From ce23e6c77c2276351224441d05eedc8a8fe8df00 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Tue, 17 Sep 2019 12:46:29 +0800 Subject: [PATCH 001/307] init commit --- .gitignore | 3 + __init__.py | 1 + connections.py | 105 +++++++++++++++++++++++++++++ exception_codes.py | 3 + exceptions.py | 11 ++++ settings.py | 31 +++++++++ utils/__init__.py | 0 utils/logger_helper.py | 145 +++++++++++++++++++++++++++++++++++++++++ 8 files changed, 299 insertions(+) create mode 100644 .gitignore create mode 100644 __init__.py create mode 100644 connections.py create mode 100644 exception_codes.py create mode 100644 exceptions.py create mode 100644 settings.py create mode 100644 utils/__init__.py create mode 100644 utils/logger_helper.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000..624eb4fa58 --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +.env + +__pycache__/ diff --git a/__init__.py b/__init__.py new file mode 100644 index 0000000000..7db5c41bd0 --- /dev/null +++ b/__init__.py @@ -0,0 +1 @@ +import settings diff --git a/connections.py b/connections.py new file mode 100644 index 0000000000..727864ef98 --- /dev/null +++ b/connections.py @@ -0,0 +1,105 @@ +import logging +from milvus import Milvus +from functools import wraps +from contextlib import contextmanager + +import exceptions + +logger = logging.getLogger(__name__) + +class Connection: + def __init__(self, name, uri, max_retry=1, error_handlers=None, **kwargs): + self.name = name + self.uri = uri + self.max_retry = max_retry + self.retried = 0 + self.conn = Milvus() + self.error_handlers = [] if not error_handlers else error_handlers + self.on_retry_func = kwargs.get('on_retry_func', None) + + def __str__(self): + return 'Connection:name=\"{}\";uri=\"{}\"'.format(self.name, self.uri) + + def _connect(self): + try: + self.conn.connect(uri=self.uri) + except Exception as e: + if not self.error_handlers: + raise exceptions.ConnectionConnectError(message='') + for handler in self.error_handlers: + handler(e) + + @property + def can_retry(self): + return self.retried <= self.max_retry + + @property + def connected(self): + return self.conn.connected() + + def on_retry(self): + if self.on_retry_func: + self.on_retry_func(self) + else: + logger.warn('{} is retrying {}'.format(self, self.retried)) + + def on_connect(self): + while not self.connected and self.can_retry: + self.retried += 1 + self.on_retry() + self._connect() + + if not self.can_retry and not self.connected: + raise exceptions.ConnectionConnectError(message='Max retry {} reached!'.format(self.max_retry)) + + self.retried = 0 + + def connect(self, func, exception_handler=None): + @wraps(func) + def inner(*args, **kwargs): + self.on_connect() + try: + return func(*args, **kwargs) + except Exception as e: + if exception_handler: + exception_handler(e) + else: + raise e + return inner + +if __name__ == '__main__': + class Conn: + def __init__(self, state): + self.state = state + + def connect(self, uri): + return self.state + + def connected(self): + return self.state + + fail_conn = Conn(False) + success_conn = Conn(True) + + class Retry: + def __init__(self): + self.times = 0 + + def __call__(self, conn): + self.times += 1 + print('Retrying {}'.format(self.times)) + + + retry_obj = Retry() + c = Connection('client', uri='localhost', on_retry_func=retry_obj) + c.conn = fail_conn + + def f(): + print('ffffffff') + + # m = c.connect(func=f) + # m() + + c.conn = success_conn + m = c.connect(func=f) + m() diff --git a/exception_codes.py b/exception_codes.py new file mode 100644 index 0000000000..5369389e84 --- /dev/null +++ b/exception_codes.py @@ -0,0 +1,3 @@ +INVALID_CODE = -1 + +CONNECT_ERROR_CODE = 10001 diff --git a/exceptions.py b/exceptions.py new file mode 100644 index 0000000000..7178c4ebdc --- /dev/null +++ b/exceptions.py @@ -0,0 +1,11 @@ +import exception_codes as codes + +class BaseException(Exception): + code = codes.INVALID_CODE + message = 'BaseException' + def __init__(self, message='', code=None): + self.message = self.__class__.__name__ if not message else message + self.code = self.code if code is None else code + +class ConnectionConnectError(BaseException): + code = codes.CONNECT_ERROR_CODE diff --git a/settings.py b/settings.py new file mode 100644 index 0000000000..e1a45262c8 --- /dev/null +++ b/settings.py @@ -0,0 +1,31 @@ +import sys +import os + +from environs import Env + +env = Env() +env.read_env() + +DEBUG = env.bool('DEBUG', False) +TESTING = env.bool('TESTING', False) + +METADATA_URI = env.str('METADATA_URI', '') + +LOG_LEVEL = env.str('LOG_LEVEL', 'DEBUG' if DEBUG else 'INFO') +LOG_PATH = env.str('LOG_PATH', '/tmp/mishards') +LOG_NAME = env.str('LOG_NAME', 'logfile') +TIMEZONE = env.str('TIMEZONE', 'UTC') + +from utils.logger_helper import config +config(LOG_LEVEL, LOG_PATH, LOG_NAME, TIMEZONE) + +TIMEOUT = env.int('TIMEOUT', 60) + + +if __name__ == '__main__': + import logging + logger = logging.getLogger(__name__) + logger.debug('DEBUG') + logger.info('INFO') + logger.warn('WARN') + logger.error('ERROR') diff --git a/utils/__init__.py b/utils/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/utils/logger_helper.py b/utils/logger_helper.py new file mode 100644 index 0000000000..1b59aa40ec --- /dev/null +++ b/utils/logger_helper.py @@ -0,0 +1,145 @@ +import os +import datetime +from pytz import timezone +from logging import Filter +import logging.config + + +class InfoFilter(logging.Filter): + def filter(self, rec): + return rec.levelno == logging.INFO + +class DebugFilter(logging.Filter): + def filter(self, rec): + return rec.levelno == logging.DEBUG + +class WarnFilter(logging.Filter): + def filter(self, rec): + return rec.levelno == logging.WARN + +class ErrorFilter(logging.Filter): + def filter(self, rec): + return rec.levelno == logging.ERROR + +class CriticalFilter(logging.Filter): + def filter(self, rec): + return rec.levelno == logging.CRITICAL + + +COLORS = { + 'HEADER': '\033[95m', + 'INFO': '\033[92m', + 'DEBUG': '\033[94m', + 'WARNING': '\033[93m', + 'ERROR': '\033[95m', + 'CRITICAL': '\033[91m', + 'ENDC': '\033[0m', +} + +class ColorFulFormatColMixin: + def format_col(self, message_str, level_name): + if level_name in COLORS.keys(): + message_str = COLORS.get(level_name) + message_str + COLORS.get( + 'ENDC') + return message_str + +class ColorfulFormatter(logging.Formatter, ColorFulFormatColMixin): + def format(self, record): + message_str = super(ColorfulFormatter, self).format(record) + + return self.format_col(message_str, level_name=record.levelname) + +def config(log_level, log_path, name, tz='UTC'): + def build_log_file(level, log_path, name, tz): + utc_now = datetime.datetime.utcnow() + utc_tz = timezone('UTC') + local_tz = timezone(tz) + tznow = utc_now.replace(tzinfo=utc_tz).astimezone(local_tz) + return '{}-{}-{}.log'.format(os.path.join(log_path, name), tznow.strftime("%m-%d-%Y-%H:%M:%S"), + level) + + if not os.path.exists(log_path): + os.makedirs(log_path) + + LOGGING = { + 'version': 1, + 'disable_existing_loggers': False, + 'formatters': { + 'default': { + 'format': '[%(asctime)s-%(levelname)s-%(name)s]: %(message)s (%(filename)s:%(lineno)s)' + }, + 'colorful_console': { + 'format': '[%(asctime)s-%(levelname)s-%(name)s]: %(message)s (%(filename)s:%(lineno)s)', + '()': ColorfulFormatter, + }, + }, + 'filters': { + 'InfoFilter': { + '()': InfoFilter, + }, + 'DebugFilter': { + '()': DebugFilter, + }, + 'WarnFilter': { + '()': WarnFilter, + }, + 'ErrorFilter': { + '()': ErrorFilter, + }, + 'CriticalFilter': { + '()': CriticalFilter, + }, + }, + 'handlers': { + 'milvus_celery_console': { + 'class': 'logging.StreamHandler', + 'formatter': 'colorful_console', + }, + 'milvus_debug_file': { + 'level': 'DEBUG', + 'filters': ['DebugFilter'], + 'class': 'logging.handlers.RotatingFileHandler', + 'formatter': 'default', + 'filename': build_log_file('debug', log_path, name, tz) + }, + 'milvus_info_file': { + 'level': 'INFO', + 'filters': ['InfoFilter'], + 'class': 'logging.handlers.RotatingFileHandler', + 'formatter': 'default', + 'filename': build_log_file('info', log_path, name, tz) + }, + 'milvus_warn_file': { + 'level': 'WARN', + 'filters': ['WarnFilter'], + 'class': 'logging.handlers.RotatingFileHandler', + 'formatter': 'default', + 'filename': build_log_file('warn', log_path, name, tz) + }, + 'milvus_error_file': { + 'level': 'ERROR', + 'filters': ['ErrorFilter'], + 'class': 'logging.handlers.RotatingFileHandler', + 'formatter': 'default', + 'filename': build_log_file('error', log_path, name, tz) + }, + 'milvus_critical_file': { + 'level': 'CRITICAL', + 'filters': ['CriticalFilter'], + 'class': 'logging.handlers.RotatingFileHandler', + 'formatter': 'default', + 'filename': build_log_file('critical', log_path, name, tz) + }, + }, + 'loggers': { + '': { + 'handlers': ['milvus_celery_console', 'milvus_info_file', 'milvus_debug_file', 'milvus_warn_file', \ + 'milvus_error_file', 'milvus_critical_file'], + 'level': log_level, + 'propagate': False + }, + }, + 'propagate': False, + } + + logging.config.dictConfig(LOGGING) From 17bb7841843403516acf803157a6e5820511db19 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Tue, 17 Sep 2019 12:52:32 +0800 Subject: [PATCH 002/307] (exception): change exception definition --- connections.py | 6 +++--- exceptions.py | 3 +-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/connections.py b/connections.py index 727864ef98..ea446d5ad3 100644 --- a/connections.py +++ b/connections.py @@ -25,7 +25,7 @@ class Connection: self.conn.connect(uri=self.uri) except Exception as e: if not self.error_handlers: - raise exceptions.ConnectionConnectError(message='') + raise exceptions.ConnectionConnectError() for handler in self.error_handlers: handler(e) @@ -97,8 +97,8 @@ if __name__ == '__main__': def f(): print('ffffffff') - # m = c.connect(func=f) - # m() + m = c.connect(func=f) + m() c.conn = success_conn m = c.connect(func=f) diff --git a/exceptions.py b/exceptions.py index 7178c4ebdc..50db4474c4 100644 --- a/exceptions.py +++ b/exceptions.py @@ -3,9 +3,8 @@ import exception_codes as codes class BaseException(Exception): code = codes.INVALID_CODE message = 'BaseException' - def __init__(self, message='', code=None): + def __init__(self, message=''): self.message = self.__class__.__name__ if not message else message - self.code = self.code if code is None else code class ConnectionConnectError(BaseException): code = codes.CONNECT_ERROR_CODE From 052d79a58da5fc91b1d36089947634c7d7528e2c Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Tue, 17 Sep 2019 14:28:34 +0800 Subject: [PATCH 003/307] (feat): update connections --- connections.py | 105 ++++++++++++++++++++++++++++++++++++++++++--- exception_codes.py | 1 + exceptions.py | 3 ++ service_handler.py | 11 +++++ settings.py | 1 + utils/__init__.py | 10 +++++ 6 files changed, 126 insertions(+), 5 deletions(-) create mode 100644 service_handler.py diff --git a/connections.py b/connections.py index ea446d5ad3..c52a1c5f85 100644 --- a/connections.py +++ b/connections.py @@ -1,9 +1,12 @@ import logging -from milvus import Milvus +import threading from functools import wraps from contextlib import contextmanager +from milvus import Milvus +import settings import exceptions +from utils import singleton logger = logging.getLogger(__name__) @@ -16,6 +19,7 @@ class Connection: self.conn = Milvus() self.error_handlers = [] if not error_handlers else error_handlers self.on_retry_func = kwargs.get('on_retry_func', None) + self._connect() def __str__(self): return 'Connection:name=\"{}\";uri=\"{}\"'.format(self.name, self.uri) @@ -67,6 +71,79 @@ class Connection: raise e return inner +@singleton +class ConnectionMgr: + def __init__(self): + self.metas = {} + self.conns = {} + + def conn(self, name, throw=False): + c = self.conns.get(name, None) + if not c: + url = self.metas.get(name, None) + if not url: + if not throw: + return None + raise exceptions.ConnectionNotFoundError('Connection {} not found'.format(name)) + this_conn = Connection(name=name, uri=url, max_retry=settings.MAX_RETRY) + threaded = { + threading.get_ident() : this_conn + } + c[name] = threaded + return this_conn + + tid = threading.get_ident() + rconn = c.get(tid, None) + if not rconn: + url = self.metas.get(name, None) + if not url: + if not throw: + return None + raise exceptions.ConnectionNotFoundError('Connection {} not found'.format(name)) + this_conn = Connection(name=name, uri=url, max_retry=settings.MAX_RETRY) + c[tid] = this_conn + return this_conn + + return rconn + + def on_new_meta(self, name, url): + self.metas[name] = url + + def on_duplicate_meta(self, name, url): + if self.metas[name] == url: + return self.on_same_meta(name, url) + + return self.on_diff_meta(name, url) + + def on_same_meta(self, name, url): + logger.warn('Register same meta: {}:{}'.format(name, url)) + + def on_diff_meta(self, name, url): + logger.warn('Received {} with diff url={}'.format(name, url)) + self.metas[name] = url + self.conns[name] = {} + + def on_unregister_meta(self, name, url): + logger.info('Unregister name={};url={}'.format(name, url)) + self.conns.pop(name, None) + + def on_nonexisted_meta(self, name): + logger.warn('Non-existed meta: {}'.format(name)) + + def register(self, name, url): + meta = self.metas.get(name) + if not meta: + return self.on_new_meta(name, url) + else: + return self.on_duplicate_meta(name, url) + + def unregister(self, name): + url = self.metas.pop(name, None) + if url is None: + return self.on_nonexisted_meta(name) + return self.on_unregister_meta(name, url) + + if __name__ == '__main__': class Conn: def __init__(self, state): @@ -91,15 +168,33 @@ if __name__ == '__main__': retry_obj = Retry() - c = Connection('client', uri='localhost', on_retry_func=retry_obj) - c.conn = fail_conn + c = Connection('client', uri='', on_retry_func=retry_obj) def f(): print('ffffffff') - m = c.connect(func=f) - m() + # c.conn = fail_conn + # m = c.connect(func=f) + # m() c.conn = success_conn m = c.connect(func=f) m() + + mgr = ConnectionMgr() + mgr.register('pod1', '111') + mgr.register('pod2', '222') + mgr.register('pod2', '222') + mgr.register('pod2', 'tcp://127.0.0.1:19530') + + pod3 = mgr.conn('pod3') + print(pod3) + + pod2 = mgr.conn('pod2') + print(pod2) + print(pod2.connected) + + mgr.unregister('pod1') + + logger.info(mgr.metas) + logger.info(mgr.conns) diff --git a/exception_codes.py b/exception_codes.py index 5369389e84..c8cfd81dab 100644 --- a/exception_codes.py +++ b/exception_codes.py @@ -1,3 +1,4 @@ INVALID_CODE = -1 CONNECT_ERROR_CODE = 10001 +CONNECTTION_NOT_FOUND_CODE = 10002 diff --git a/exceptions.py b/exceptions.py index 50db4474c4..a25fb2c4ae 100644 --- a/exceptions.py +++ b/exceptions.py @@ -8,3 +8,6 @@ class BaseException(Exception): class ConnectionConnectError(BaseException): code = codes.CONNECT_ERROR_CODE + +class ConnectionNotFoundError(BaseException): + code = codes.CONNECTTION_NOT_FOUND_CODE diff --git a/service_handler.py b/service_handler.py new file mode 100644 index 0000000000..d5018a54d8 --- /dev/null +++ b/service_handler.py @@ -0,0 +1,11 @@ +import logging + +import grpco +from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 + +logger = logging.getLogger(__name__) + + +class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): + def __init__(self, connections, *args, **kwargs): + self.connections = self.connections diff --git a/settings.py b/settings.py index e1a45262c8..4ad00e66cb 100644 --- a/settings.py +++ b/settings.py @@ -20,6 +20,7 @@ from utils.logger_helper import config config(LOG_LEVEL, LOG_PATH, LOG_NAME, TIMEZONE) TIMEOUT = env.int('TIMEOUT', 60) +MAX_RETRY = env.int('MAX_RETRY', 3) if __name__ == '__main__': diff --git a/utils/__init__.py b/utils/__init__.py index e69de29bb2..ec7f32bcbc 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -0,0 +1,10 @@ +from functools import wraps + +def singleton(cls): + instances = {} + @wraps(cls) + def getinstance(*args, **kw): + if cls not in instances: + instances[cls] = cls(*args, **kw) + return instances[cls] + return getinstance From 4fc6f0a520159ed09d3e4513a547c0ab6fddde3d Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Tue, 17 Sep 2019 20:48:08 +0800 Subject: [PATCH 004/307] add grpc server --- __init__.py | 1 - mishards/__init__.py | 6 + connections.py => mishards/connections.py | 2 +- .../exception_codes.py | 0 exceptions.py => mishards/exceptions.py | 0 mishards/grpc_utils/__init__.py | 0 mishards/grpc_utils/grpc_args_parser.py | 101 ++++++ mishards/grpc_utils/grpc_args_wrapper.py | 4 + mishards/main.py | 14 + mishards/server.py | 47 +++ mishards/service_handler.py | 327 ++++++++++++++++++ settings.py => mishards/settings.py | 2 + {utils => mishards/utils}/__init__.py | 0 {utils => mishards/utils}/logger_helper.py | 0 service_handler.py | 11 - 15 files changed, 502 insertions(+), 13 deletions(-) delete mode 100644 __init__.py create mode 100644 mishards/__init__.py rename connections.py => mishards/connections.py (99%) rename exception_codes.py => mishards/exception_codes.py (100%) rename exceptions.py => mishards/exceptions.py (100%) create mode 100644 mishards/grpc_utils/__init__.py create mode 100644 mishards/grpc_utils/grpc_args_parser.py create mode 100644 mishards/grpc_utils/grpc_args_wrapper.py create mode 100644 mishards/main.py create mode 100644 mishards/server.py create mode 100644 mishards/service_handler.py rename settings.py => mishards/settings.py (90%) rename {utils => mishards/utils}/__init__.py (100%) rename {utils => mishards/utils}/logger_helper.py (100%) delete mode 100644 service_handler.py diff --git a/__init__.py b/__init__.py deleted file mode 100644 index 7db5c41bd0..0000000000 --- a/__init__.py +++ /dev/null @@ -1 +0,0 @@ -import settings diff --git a/mishards/__init__.py b/mishards/__init__.py new file mode 100644 index 0000000000..700dd4238c --- /dev/null +++ b/mishards/__init__.py @@ -0,0 +1,6 @@ +import settings +from connections import ConnectionMgr +connect_mgr = ConnectionMgr() + +from server import Server +grpc_server = Server(conn_mgr=connect_mgr) diff --git a/connections.py b/mishards/connections.py similarity index 99% rename from connections.py rename to mishards/connections.py index c52a1c5f85..06d5f3ff16 100644 --- a/connections.py +++ b/mishards/connections.py @@ -89,7 +89,7 @@ class ConnectionMgr: threaded = { threading.get_ident() : this_conn } - c[name] = threaded + self.conns[name] = threaded return this_conn tid = threading.get_ident() diff --git a/exception_codes.py b/mishards/exception_codes.py similarity index 100% rename from exception_codes.py rename to mishards/exception_codes.py diff --git a/exceptions.py b/mishards/exceptions.py similarity index 100% rename from exceptions.py rename to mishards/exceptions.py diff --git a/mishards/grpc_utils/__init__.py b/mishards/grpc_utils/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/mishards/grpc_utils/grpc_args_parser.py b/mishards/grpc_utils/grpc_args_parser.py new file mode 100644 index 0000000000..c8dc9d71d9 --- /dev/null +++ b/mishards/grpc_utils/grpc_args_parser.py @@ -0,0 +1,101 @@ +from milvus import Status +from functools import wraps + + +def error_status(func): + @wraps(func) + def inner(*args, **kwargs): + try: + results = func(*args, **kwargs) + except Exception as e: + return Status(code=Status.UNEXPECTED_ERROR, message=str(e)), None + + return Status(code=0, message="Success"), results + + return inner + + +class GrpcArgsParser(object): + + @classmethod + @error_status + def parse_proto_TableSchema(cls, param): + _table_schema = { + 'table_name': param.table_name.table_name, + 'dimension': param.dimension, + 'index_file_size': param.index_file_size, + 'metric_type': param.metric_type + } + + return _table_schema + + @classmethod + @error_status + def parse_proto_TableName(cls, param): + return param.table_name + + @classmethod + @error_status + def parse_proto_Index(cls, param): + _index = { + 'index_type': param.index_type, + 'nlist': param.nlist + } + + return _index + + @classmethod + @error_status + def parse_proto_IndexParam(cls, param): + _table_name = param.table_name.table_name + _status, _index = cls.parse_proto_Index(param.index) + + if not _status.OK(): + raise Exception("Argument parse error") + + return _table_name, _index + + @classmethod + @error_status + def parse_proto_Command(cls, param): + _cmd = param.cmd + + return _cmd + + @classmethod + @error_status + def parse_proto_Range(cls, param): + _start_value = param.start_value + _end_value = param.end_value + + return _start_value, _end_value + + @classmethod + @error_status + def parse_proto_RowRecord(cls, param): + return list(param.vector_data) + + @classmethod + @error_status + def parse_proto_SearchParam(cls, param): + _table_name = param.table_name + _topk = param.topk + _nprobe = param.nprobe + _status, _range = cls.parse_proto_Range(param.query_range_array) + + if not _status.OK(): + raise Exception("Argument parse error") + + _row_record = param.query_record_array + + return _table_name, _row_record, _range, _topk + + @classmethod + @error_status + def parse_proto_DeleteByRangeParam(cls, param): + _table_name = param.table_name + _range = param.range + _start_value = _range.start_value + _end_value = _range.end_value + + return _table_name, _start_value, _end_value diff --git a/mishards/grpc_utils/grpc_args_wrapper.py b/mishards/grpc_utils/grpc_args_wrapper.py new file mode 100644 index 0000000000..a864b1e400 --- /dev/null +++ b/mishards/grpc_utils/grpc_args_wrapper.py @@ -0,0 +1,4 @@ +# class GrpcArgsWrapper(object): + + # @classmethod + # def proto_TableName(cls): \ No newline at end of file diff --git a/mishards/main.py b/mishards/main.py new file mode 100644 index 0000000000..0185e6ac1d --- /dev/null +++ b/mishards/main.py @@ -0,0 +1,14 @@ +import sys +import os +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +import settings +from mishards import connect_mgr, grpc_server as server + +def main(): + connect_mgr.register('WOSERVER', settings.WOSERVER) + server.run(port=settings.SERVER_PORT) + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/mishards/server.py b/mishards/server.py new file mode 100644 index 0000000000..59ea7db46b --- /dev/null +++ b/mishards/server.py @@ -0,0 +1,47 @@ +import logging +import grpc +import time +from concurrent import futures +from grpc._cython import cygrpc +from milvus.grpc_gen.milvus_pb2_grpc import add_MilvusServiceServicer_to_server +from service_handler import ServiceHandler +import settings + +logger = logging.getLogger(__name__) + + +class Server: + def __init__(self, conn_mgr, port=19530, max_workers=10, **kwargs): + self.exit_flag = False + self.port = int(port) + self.conn_mgr = conn_mgr + self.server_impl = grpc.server( + thread_pool=futures.ThreadPoolExecutor(max_workers=max_workers), + options=[(cygrpc.ChannelArgKey.max_send_message_length, -1), + (cygrpc.ChannelArgKey.max_receive_message_length, -1)] + ) + + def start(self, port=None): + add_MilvusServiceServicer_to_server(ServiceHandler(conn_mgr=self.conn_mgr), self.server_impl) + self.server_impl.add_insecure_port("[::]:{}".format(str(port or self._port))) + self.server_impl.start() + + def run(self, port): + logger.info('Milvus server start ......') + port = port or self.port + + self.start(port) + logger.info('Successfully') + logger.info('Listening on port {}'.format(port)) + + try: + while not self.exit_flag: + time.sleep(5) + except KeyboardInterrupt: + self.stop() + + def stop(self): + logger.info('Server is shuting down ......') + self.exit_flag = True + self.server.stop(0) + logger.info('Server is closed') diff --git a/mishards/service_handler.py b/mishards/service_handler.py new file mode 100644 index 0000000000..ead8d14d88 --- /dev/null +++ b/mishards/service_handler.py @@ -0,0 +1,327 @@ +import logging +from contextlib import contextmanager +from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 + +from grpc_utils.grpc_args_parser import GrpcArgsParser as Parser + +logger = logging.getLogger(__name__) + + +class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): + def __init__(self, conn_mgr, *args, **kwargs): + self.conn_mgr = conn_mgr + self.table_meta = {} + + @property + def connection(self): + conn = self.conn_mgr.conn('WOSERVER') + if conn: + conn.on_connect() + return conn.conn + + def CreateTable(self, request, context): + _status, _table_schema = Parser.parse_proto_TableSchema(request) + + if not _status.OK(): + return status_pb2.Status(error_code=_status.code, reason=_status.message) + + logger.info('CreateTable {}'.format(_table_schema['table_name'])) + + _status = self.connection.create_table(_table_schema) + + return status_pb2.Status(error_code=_status.code, reason=_status.message) + + def HasTable(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + return milvus_pb2.BoolReply( + status=status_pb2.Status(error_code=_status.code, reason=_status.message), + bool_reply=False + ) + + logger.info('HasTable {}'.format(_table_name)) + + _bool = self.connection.has_table(_table_name) + + return milvus_pb2.BoolReply( + status=status_pb2.Status(error_code=status_pb2.SUCCESS, reason="OK"), + bool_reply=_bool + ) + + def DropTable(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + return status_pb2.Status(error_code=_status.code, reason=_status.message) + + logger.info('DropTable {}'.format(_table_name)) + + _status = self.connection.delete_table(_table_name) + + return status_pb2.Status(error_code=_status.code, reason=_status.message) + + def CreateIndex(self, request, context): + _status, unpacks = Parser.parse_proto_IndexParam(request) + + if not _status.OK(): + return status_pb2.Status(error_code=_status.code, reason=_status.message) + + _table_name, _index = unpacks + + logger.info('CreateIndex {}'.format(_table_name)) + + # TODO: interface create_table incompleted + _status = self.connection.create_index(_table_name, _index) + + return status_pb2.Status(error_code=_status.code, reason=_status.message) + + def Insert(self, request, context): + logger.info('Insert') + # TODO: Ths SDK interface add_vectors() could update, add a key 'row_id_array' + _status, _ids = self.connection.add_vectors(None, None, insert_param=request) + return milvus_pb2.VectorIds( + status=status_pb2.Status(error_code=_status.code, reason=_status.message), + vector_id_array=_ids + ) + + def Search(self, request, context): + + try: + table_name = request.table_name + + topk = request.topk + nprobe = request.nprobe + + logger.info('Search {}: topk={} nprobe={}'.format(table_name, topk, nprobe)) + + if nprobe > 2048 or nprobe <= 0: + raise exceptions.GRPCInvlidArgument('Invalid nprobe: {}'.format(nprobe)) + + table_meta = self.table_meta.get(table_name, None) + if not table_meta: + status, info = self.connection.describe_table(table_name) + if not status.OK(): + raise TableNotFoundException(table_name) + + self.table_meta[table_name] = info + table_meta = info + + start = time.time() + + query_record_array = [] + + for query_record in request.query_record_array: + query_record_array.append(list(query_record.vector_data)) + + query_range_array = [] + for query_range in request.query_range_array: + query_range_array.append( + Range(query_range.start_value, query_range.end_value)) + except (TableNotFoundException, exceptions.GRPCInvlidArgument) as exc: + return milvus_pb2.TopKQueryResultList( + status=status_pb2.Status(error_code=exc.code, reason=exc.message) + ) + except Exception as e: + return milvus_pb2.TopKQueryResultList( + status=status_pb2.Status(error_code=status_pb2.UNEXPECTED_ERROR, reason=str(e)) + ) + + try: + results = workflow.query_vectors(table_name, table_meta, query_record_array, topk, + nprobe, query_range_array) + except (exceptions.GRPCQueryInvalidRangeException, TableNotFoundException) as exc: + return milvus_pb2.TopKQueryResultList( + status=status_pb2.Status(error_code=exc.code, reason=exc.message) + ) + except exceptions.ServiceNotFoundException as exc: + return milvus_pb2.TopKQueryResultList( + status=status_pb2.Status(error_code=status_pb2.UNEXPECTED_ERROR, reason=exc.message) + ) + except Exception as e: + logger.error(e) + results = workflow.query_vectors(table_name, table_meta, query_record_array, + topk, nprobe, query_range_array) + + now = time.time() + logger.info('SearchVector Ends @{}'.format(now)) + logger.info('SearchVector takes: {}'.format(now - start)) + + topk_result_list = milvus_pb2.TopKQueryResultList( + status=status_pb2.Status(error_code=status_pb2.SUCCESS, reason="Success"), + topk_query_result=results + ) + return topk_result_list + + def SearchInFiles(self, request, context): + try: + file_id_array = list(request.file_id_array) + search_param = request.search_param + table_name = search_param.table_name + topk = search_param.topk + nprobe = search_param.nprobe + + query_record_array = [] + + for query_record in search_param.query_record_array: + query_record_array.append(list(query_record)) + + query_range_array = [] + for query_range in search_param.query_range_array: + query_range_array.append("") + except Exception as e: + milvus_pb2.TopKQueryResultList( + status=status_pb2.Status(error_code=status_pb2.UNEXPECTED_ERROR, reason=str(e)), + ) + + res = search_vector_in_files.delay(table_name=table_name, + file_id_array=file_id_array, + query_record_array=query_record_array, + query_range_array=query_range_array, + topk=topk, + nprobe=nprobe) + status, result = res.get(timeout=1) + + if not status.OK(): + raise ThriftException(code=status.code, reason=status.message) + res = TopKQueryResult() + for top_k_query_results in result: + res.query_result_arrays.append([QueryResult(id=qr.id, distance=qr.distance) + for qr in top_k_query_results]) + return res + + def DescribeTable(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + table_name = milvus_pb2.TableName( + status=status_pb2.Status(error_code=_status.code, reason=_status.message) + ) + return milvus_pb2.TableSchema( + table_name=table_name + ) + + logger.info('DescribeTable {}'.format(_table_name)) + _status, _table = self.connection.describe_table(_table_name) + + if _status.OK(): + _grpc_table_name = milvus_pb2.TableName( + status=status_pb2.Status(error_code=_status.code, reason=_status.message), + table_name=_table.table_name + ) + + return milvus_pb2.TableSchema( + table_name=_grpc_table_name, + index_file_size=_table.index_file_size, + dimension=_table.dimension, + metric_type=_table.metric_type + ) + + return milvus_pb2.TableSchema( + table_name=milvus_pb2.TableName( + status=status_pb2.Status(error_code=_status.code, reason=_status.message) + ) + ) + + def CountTable(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + status = status_pb2.Status(error_code=_status.code, reason=_status.message) + + return milvus_pb2.TableRowCount( + status=status + ) + + logger.info('CountTable {}'.format(_table_name)) + + _status, _count = self.connection.get_table_row_count(_table_name) + + return milvus_pb2.TableRowCount( + status=status_pb2.Status(error_code=_status.code, reason=_status.message), + table_row_count=_count if isinstance(_count, int) else -1) + + def Cmd(self, request, context): + _status, _cmd = Parser.parse_proto_Command(request) + logger.info('Cmd: {}'.format(_cmd)) + + if not _status.OK(): + return milvus_pb2.StringReply( + status_pb2.Status(error_code=_status.code, reason=_status.message) + ) + + if _cmd == 'version': + _status, _reply = self.connection.server_version() + else: + _status, _reply = self.connection.server_status() + + return milvus_pb2.StringReply( + status=status_pb2.Status(error_code=_status.code, reason=_status.message), + string_reply=_reply + ) + + def ShowTables(self, request, context): + logger.info('ShowTables') + _status, _results = self.connection.show_tables() + + if not _status.OK(): + _results = [] + + for _result in _results: + yield milvus_pb2.TableName( + status=status_pb2.Status(error_code=_status.code, reason=_status.message), + table_name=_result + ) + + def DeleteByRange(self, request, context): + _status, unpacks = \ + Parser.parse_proto_DeleteByRangeParam(request) + + if not _status.OK(): + return status_pb2.Status(error_code=_status.code, reason=_status.message) + + _table_name, _start_date, _end_date = unpacks + + logger.info('DeleteByRange {}: {} {}'.format(_table_name, _start_date, _end_date)) + _status = self.connection.delete_vectors_by_range(_table_name, _start_date, _end_date) + return status_pb2.Status(error_code=_status.code, reason=_status.message) + + def PreloadTable(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + return status_pb2.Status(error_code=_status.code, reason=_status.message) + + logger.info('PreloadTable {}'.format(_table_name)) + _status = self.connection.preload_table(_table_name) + return status_pb2.Status(error_code=_status.code, reason=_status.message) + + def DescribeIndex(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + return milvus_pb2.IndexParam( + table_name=milvus_pb2.TableName( + status=status_pb2.Status(error_code=_status.code, reason=_status.message) + ) + ) + + logger.info('DescribeIndex {}'.format(_table_name)) + _status, _index_param = self.connection.describe_index(_table_name) + + _index = milvus_pb2.Index(index_type=_index_param._index_type, nlist=_index_param._nlist) + _tablename = milvus_pb2.TableName( + status=status_pb2.Status(error_code=_status.code, reason=_status.message), + table_name=_table_name) + + return milvus_pb2.IndexParam(table_name=_tablename, index=_index) + + def DropIndex(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + return status_pb2.Status(error_code=_status.code, reason=_status.message) + + logger.info('DropIndex {}'.format(_table_name)) + _status = self.connection.drop_index(_table_name) + return status_pb2.Status(error_code=_status.code, reason=_status.message) diff --git a/settings.py b/mishards/settings.py similarity index 90% rename from settings.py rename to mishards/settings.py index 4ad00e66cb..0566cf066f 100644 --- a/settings.py +++ b/mishards/settings.py @@ -22,6 +22,8 @@ config(LOG_LEVEL, LOG_PATH, LOG_NAME, TIMEZONE) TIMEOUT = env.int('TIMEOUT', 60) MAX_RETRY = env.int('MAX_RETRY', 3) +SERVER_PORT = env.int('SERVER_PORT', 19530) +WOSERVER = env.str('WOSERVER') if __name__ == '__main__': import logging diff --git a/utils/__init__.py b/mishards/utils/__init__.py similarity index 100% rename from utils/__init__.py rename to mishards/utils/__init__.py diff --git a/utils/logger_helper.py b/mishards/utils/logger_helper.py similarity index 100% rename from utils/logger_helper.py rename to mishards/utils/logger_helper.py diff --git a/service_handler.py b/service_handler.py deleted file mode 100644 index d5018a54d8..0000000000 --- a/service_handler.py +++ /dev/null @@ -1,11 +0,0 @@ -import logging - -import grpco -from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 - -logger = logging.getLogger(__name__) - - -class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): - def __init__(self, connections, *args, **kwargs): - self.connections = self.connections From 86a893cb0462f7822aa1d4da2aef3f478b67db83 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 18 Sep 2019 11:56:00 +0800 Subject: [PATCH 005/307] impl part of search --- mishards/exception_codes.py | 2 + mishards/exceptions.py | 3 + mishards/main.py | 1 + mishards/service_handler.py | 232 +++++++++++++++++++++++------------- mishards/settings.py | 1 + 5 files changed, 157 insertions(+), 82 deletions(-) diff --git a/mishards/exception_codes.py b/mishards/exception_codes.py index c8cfd81dab..32b29bdfab 100644 --- a/mishards/exception_codes.py +++ b/mishards/exception_codes.py @@ -2,3 +2,5 @@ INVALID_CODE = -1 CONNECT_ERROR_CODE = 10001 CONNECTTION_NOT_FOUND_CODE = 10002 + +TABLE_NOT_FOUND_CODE = 20001 diff --git a/mishards/exceptions.py b/mishards/exceptions.py index a25fb2c4ae..1445d18769 100644 --- a/mishards/exceptions.py +++ b/mishards/exceptions.py @@ -11,3 +11,6 @@ class ConnectionConnectError(BaseException): class ConnectionNotFoundError(BaseException): code = codes.CONNECTTION_NOT_FOUND_CODE + +class TableNotFoundError(BaseException): + code = codes.TABLE_NOT_FOUND_CODE diff --git a/mishards/main.py b/mishards/main.py index 0185e6ac1d..2ba3f14697 100644 --- a/mishards/main.py +++ b/mishards/main.py @@ -7,6 +7,7 @@ from mishards import connect_mgr, grpc_server as server def main(): connect_mgr.register('WOSERVER', settings.WOSERVER) + connect_mgr.register('TEST', 'tcp://127.0.0.1:19530') server.run(port=settings.SERVER_PORT) return 0 diff --git a/mishards/service_handler.py b/mishards/service_handler.py index ead8d14d88..89ae2cd36c 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -1,13 +1,22 @@ import logging +import time +import datetime from contextlib import contextmanager -from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 +from collections import defaultdict +from concurrent.futures import ThreadPoolExecutor +from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 +from milvus.grpc_gen.milvus_pb2 import TopKQueryResult +from milvus.client import types + +import settings from grpc_utils.grpc_args_parser import GrpcArgsParser as Parser logger = logging.getLogger(__name__) class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): + MAX_NPROBE = 2048 def __init__(self, conn_mgr, *args, **kwargs): self.conn_mgr = conn_mgr self.table_meta = {} @@ -19,6 +28,99 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): conn.on_connect() return conn.conn + def query_conn(self, name): + conn = self.conn_mgr.conn(name) + conn and conn.on_connect() + return conn.conn + + def _format_date(self, start, end): + return ((start.year-1900)*10000 + (start.month-1)*100 + start.day + , (end.year-1900)*10000 + (end.month-1)*100 + end.day) + + def _range_to_date(self, range_obj): + try: + start = datetime.datetime.strptime(range_obj.start_date, '%Y-%m-%d') + end = datetime.datetime.strptime(range_obj.end_date, '%Y-%m-%d') + assert start >= end + except (ValueError, AssertionError): + raise exceptions.InvalidRangeError('Invalid time range: {} {}'.format( + range_obj.start_date, range_obj.end_date + )) + + return self._format_date(start, end) + + def _get_routing_file_ids(self, table_id, range_array): + return { + 'TEST': { + 'table_id': table_id, + 'file_ids': [123] + } + } + + def _do_merge(self, files_n_topk_results, topk, reverse=False): + if not files_n_topk_results: + return [] + + request_results = defaultdict(list) + + calc_time = time.time() + for files_collection in files_n_topk_results: + for request_pos, each_request_results in enumerate(files_collection.topk_query_result): + request_results[request_pos].extend(each_request_results.query_result_arrays) + request_results[request_pos] = sorted(request_results[request_pos], key=lambda x: x.distance, + reverse=reverse)[:topk] + + calc_time = time.time() - calc_time + logger.info('Merge takes {}'.format(calc_time)) + + results = sorted(request_results.items()) + topk_query_result = [] + + for result in results: + query_result = TopKQueryResult(query_result_arrays=result[1]) + topk_query_result.append(query_result) + + return topk_query_result + + def _do_query(self, table_id, table_meta, vectors, topk, nprobe, range_array=None, **kwargs): + range_array = [self._range_to_date(r) for r in range_array] if range_array else None + routing = self._get_routing_file_ids(table_id, range_array) + logger.debug(routing) + + rs = [] + all_topk_results = [] + + workers = settings.SEARCH_WORKER_SIZE + + def search(addr, query_params, vectors, topk, nprobe, **kwargs): + logger.info('Send Search Request: addr={};params={};nq={};topk={};nprobe={}'.format( + addr, query_params, len(vectors), topk, nprobe + )) + + conn = self.query_conn(addr) + start = time.time() + ret = conn.search_vectors_in_files(table_name=query_params['table_id'], + file_ids=query_params['file_ids'], + query_records=vectors, + top_k=topk, + nprobe=nprobe, + lazy=True) + end = time.time() + logger.info('search_vectors_in_files takes: {}'.format(end - start)) + + all_topk_results.append(ret) + + with ThreadPoolExecutor(max_workers=workers) as pool: + for addr, params in routing.items(): + res = pool.submit(search, addr, params, vectors, topk, nprobe) + rs.append(res) + + for res in rs: + res.result() + + reverse = table_meta.metric_type == types.MetricType.L2 + return self._do_merge(all_topk_results, topk, reverse=reverse) + def CreateTable(self, request, context): _status, _table_schema = Parser.parse_proto_TableSchema(request) @@ -87,64 +189,64 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): def Search(self, request, context): - try: - table_name = request.table_name + table_name = request.table_name - topk = request.topk - nprobe = request.nprobe + topk = request.topk + nprobe = request.nprobe - logger.info('Search {}: topk={} nprobe={}'.format(table_name, topk, nprobe)) + logger.info('Search {}: topk={} nprobe={}'.format(table_name, topk, nprobe)) - if nprobe > 2048 or nprobe <= 0: - raise exceptions.GRPCInvlidArgument('Invalid nprobe: {}'.format(nprobe)) + if nprobe > self.MAX_NPROBE or nprobe <= 0: + raise exceptions.GRPCInvlidArgument('Invalid nprobe: {}'.format(nprobe)) - table_meta = self.table_meta.get(table_name, None) - if not table_meta: - status, info = self.connection.describe_table(table_name) - if not status.OK(): - raise TableNotFoundException(table_name) + table_meta = self.table_meta.get(table_name, None) + if not table_meta: + status, info = self.connection.describe_table(table_name) + if not status.OK(): + raise exceptions.TableNotFoundError(table_name) - self.table_meta[table_name] = info - table_meta = info + self.table_meta[table_name] = info + table_meta = info - start = time.time() + start = time.time() - query_record_array = [] + query_record_array = [] - for query_record in request.query_record_array: - query_record_array.append(list(query_record.vector_data)) + for query_record in request.query_record_array: + query_record_array.append(list(query_record.vector_data)) - query_range_array = [] - for query_range in request.query_range_array: - query_range_array.append( - Range(query_range.start_value, query_range.end_value)) - except (TableNotFoundException, exceptions.GRPCInvlidArgument) as exc: - return milvus_pb2.TopKQueryResultList( - status=status_pb2.Status(error_code=exc.code, reason=exc.message) - ) - except Exception as e: - return milvus_pb2.TopKQueryResultList( - status=status_pb2.Status(error_code=status_pb2.UNEXPECTED_ERROR, reason=str(e)) - ) + query_range_array = [] + for query_range in request.query_range_array: + query_range_array.append( + Range(query_range.start_value, query_range.end_value)) + # except (TableNotFoundException, exceptions.GRPCInvlidArgument) as exc: + # return milvus_pb2.TopKQueryResultList( + # status=status_pb2.Status(error_code=exc.code, reason=exc.message) + # ) + # except Exception as e: + # return milvus_pb2.TopKQueryResultList( + # status=status_pb2.Status(error_code=status_pb2.UNEXPECTED_ERROR, reason=str(e)) + # ) - try: - results = workflow.query_vectors(table_name, table_meta, query_record_array, topk, - nprobe, query_range_array) - except (exceptions.GRPCQueryInvalidRangeException, TableNotFoundException) as exc: - return milvus_pb2.TopKQueryResultList( - status=status_pb2.Status(error_code=exc.code, reason=exc.message) - ) - except exceptions.ServiceNotFoundException as exc: - return milvus_pb2.TopKQueryResultList( - status=status_pb2.Status(error_code=status_pb2.UNEXPECTED_ERROR, reason=exc.message) - ) - except Exception as e: - logger.error(e) - results = workflow.query_vectors(table_name, table_meta, query_record_array, - topk, nprobe, query_range_array) + results = self._do_query(table_name, table_meta, query_record_array, topk, + nprobe, query_range_array) + # try: + # results = workflow.query_vectors(table_name, table_meta, query_record_array, topk, + # nprobe, query_range_array) + # except (exceptions.GRPCQueryInvalidRangeException, TableNotFoundException) as exc: + # return milvus_pb2.TopKQueryResultList( + # status=status_pb2.Status(error_code=exc.code, reason=exc.message) + # ) + # except exceptions.ServiceNotFoundException as exc: + # return milvus_pb2.TopKQueryResultList( + # status=status_pb2.Status(error_code=status_pb2.UNEXPECTED_ERROR, reason=exc.message) + # ) + # except Exception as e: + # logger.error(e) + # results = workflow.query_vectors(table_name, table_meta, query_record_array, + # topk, nprobe, query_range_array) now = time.time() - logger.info('SearchVector Ends @{}'.format(now)) logger.info('SearchVector takes: {}'.format(now - start)) topk_result_list = milvus_pb2.TopKQueryResultList( @@ -154,41 +256,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return topk_result_list def SearchInFiles(self, request, context): - try: - file_id_array = list(request.file_id_array) - search_param = request.search_param - table_name = search_param.table_name - topk = search_param.topk - nprobe = search_param.nprobe - - query_record_array = [] - - for query_record in search_param.query_record_array: - query_record_array.append(list(query_record)) - - query_range_array = [] - for query_range in search_param.query_range_array: - query_range_array.append("") - except Exception as e: - milvus_pb2.TopKQueryResultList( - status=status_pb2.Status(error_code=status_pb2.UNEXPECTED_ERROR, reason=str(e)), - ) - - res = search_vector_in_files.delay(table_name=table_name, - file_id_array=file_id_array, - query_record_array=query_record_array, - query_range_array=query_range_array, - topk=topk, - nprobe=nprobe) - status, result = res.get(timeout=1) - - if not status.OK(): - raise ThriftException(code=status.code, reason=status.message) - res = TopKQueryResult() - for top_k_query_results in result: - res.query_result_arrays.append([QueryResult(id=qr.id, distance=qr.distance) - for qr in top_k_query_results]) - return res + raise NotImplemented() def DescribeTable(self, request, context): _status, _table_name = Parser.parse_proto_TableName(request) diff --git a/mishards/settings.py b/mishards/settings.py index 0566cf066f..4d87e69fe3 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -21,6 +21,7 @@ config(LOG_LEVEL, LOG_PATH, LOG_NAME, TIMEZONE) TIMEOUT = env.int('TIMEOUT', 60) MAX_RETRY = env.int('MAX_RETRY', 3) +SEARCH_WORKER_SIZE = env.int('SEARCH_WORKER_SIZE', 10) SERVER_PORT = env.int('SERVER_PORT', 19530) WOSERVER = env.str('WOSERVER') From deb4a5fb62ff540eb06003d9b2940d09b8aeeb16 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 18 Sep 2019 14:50:36 +0800 Subject: [PATCH 006/307] update for service discovery --- mishards/__init__.py | 8 ++ mishards/connections.py | 9 +- mishards/main.py | 16 ++- mishards/server.py | 2 +- mishards/service_founder.py | 273 ++++++++++++++++++++++++++++++++++++ mishards/service_handler.py | 7 +- mishards/settings.py | 11 +- 7 files changed, 315 insertions(+), 11 deletions(-) create mode 100644 mishards/service_founder.py diff --git a/mishards/__init__.py b/mishards/__init__.py index 700dd4238c..b3a14cf7e3 100644 --- a/mishards/__init__.py +++ b/mishards/__init__.py @@ -2,5 +2,13 @@ import settings from connections import ConnectionMgr connect_mgr = ConnectionMgr() +from service_founder import ServiceFounder +discover = ServiceFounder(namespace=settings.SD_NAMESPACE, + conn_mgr=connect_mgr, + pod_patt=settings.SD_ROSERVER_POD_PATT, + label_selector=settings.SD_LABEL_SELECTOR, + in_cluster=settings.SD_IN_CLUSTER, + poll_interval=settings.SD_POLL_INTERVAL) + from server import Server grpc_server = Server(conn_mgr=connect_mgr) diff --git a/mishards/connections.py b/mishards/connections.py index 06d5f3ff16..82dd082eac 100644 --- a/mishards/connections.py +++ b/mishards/connections.py @@ -29,7 +29,7 @@ class Connection: self.conn.connect(uri=self.uri) except Exception as e: if not self.error_handlers: - raise exceptions.ConnectionConnectError() + raise exceptions.ConnectionConnectError(e) for handler in self.error_handlers: handler(e) @@ -77,6 +77,10 @@ class ConnectionMgr: self.metas = {} self.conns = {} + @property + def conn_names(self): + return set(self.metas.keys()) - set(['WOSERVER']) + def conn(self, name, throw=False): c = self.conns.get(name, None) if not c: @@ -116,7 +120,8 @@ class ConnectionMgr: return self.on_diff_meta(name, url) def on_same_meta(self, name, url): - logger.warn('Register same meta: {}:{}'.format(name, url)) + # logger.warn('Register same meta: {}:{}'.format(name, url)) + pass def on_diff_meta(self, name, url): logger.warn('Received {} with diff url={}'.format(name, url)) diff --git a/mishards/main.py b/mishards/main.py index 2ba3f14697..0526f87ff8 100644 --- a/mishards/main.py +++ b/mishards/main.py @@ -3,13 +3,19 @@ import os sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import settings -from mishards import connect_mgr, grpc_server as server +from mishards import (connect_mgr, + discover, + grpc_server as server) def main(): - connect_mgr.register('WOSERVER', settings.WOSERVER) - connect_mgr.register('TEST', 'tcp://127.0.0.1:19530') - server.run(port=settings.SERVER_PORT) - return 0 + try: + discover.start() + connect_mgr.register('WOSERVER', settings.WOSERVER if not settings.TESTING else settings.TESTING_WOSERVER) + server.run(port=settings.SERVER_PORT) + return 0 + except Exception as e: + logger.error(e) + return 1 if __name__ == '__main__': sys.exit(main()) diff --git a/mishards/server.py b/mishards/server.py index 59ea7db46b..d2f88cf592 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -43,5 +43,5 @@ class Server: def stop(self): logger.info('Server is shuting down ......') self.exit_flag = True - self.server.stop(0) + self.server_impl.stop(0) logger.info('Server is closed') diff --git a/mishards/service_founder.py b/mishards/service_founder.py new file mode 100644 index 0000000000..7fc47639e7 --- /dev/null +++ b/mishards/service_founder.py @@ -0,0 +1,273 @@ +import os, sys +if __name__ == '__main__': + sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +import re +import logging +import time +import copy +import threading +import queue +from functools import wraps +from kubernetes import client, config, watch + +from mishards.utils import singleton + +logger = logging.getLogger(__name__) + +incluster_namespace_path = '/var/run/secrets/kubernetes.io/serviceaccount/namespace' + + +class K8SMixin: + def __init__(self, namespace, in_cluster=False, **kwargs): + self.namespace = namespace + self.in_cluster = in_cluster + self.kwargs = kwargs + self.v1 = kwargs.get('v1', None) + if not self.namespace: + self.namespace = open(incluster_namespace_path).read() + + if not self.v1: + config.load_incluster_config() if self.in_cluster else config.load_kube_config() + self.v1 = client.CoreV1Api() + + +class K8SServiceDiscover(threading.Thread, K8SMixin): + def __init__(self, message_queue, namespace, label_selector, in_cluster=False, **kwargs): + K8SMixin.__init__(self, namespace=namespace, in_cluster=in_cluster, **kwargs) + threading.Thread.__init__(self) + self.queue = message_queue + self.terminate = False + self.label_selector = label_selector + self.poll_interval = kwargs.get('poll_interval', 5) + + def run(self): + while not self.terminate: + try: + pods = self.v1.list_namespaced_pod(namespace=self.namespace, label_selector=self.label_selector) + event_message = { + 'eType': 'PodHeartBeat', + 'events': [] + } + for item in pods.items: + pod = self.v1.read_namespaced_pod(name=item.metadata.name, namespace=self.namespace) + name = pod.metadata.name + ip = pod.status.pod_ip + phase = pod.status.phase + reason = pod.status.reason + message = pod.status.message + ready = True if phase == 'Running' else False + + pod_event = dict( + pod=name, + ip=ip, + ready=ready, + reason=reason, + message=message + ) + + event_message['events'].append(pod_event) + + self.queue.put(event_message) + + + except Exception as exc: + logger.error(exc) + + time.sleep(self.poll_interval) + + def stop(self): + self.terminate = True + + +class K8SEventListener(threading.Thread, K8SMixin): + def __init__(self, message_queue, namespace, in_cluster=False, **kwargs): + K8SMixin.__init__(self, namespace=namespace, in_cluster=in_cluster, **kwargs) + threading.Thread.__init__(self) + self.queue = message_queue + self.terminate = False + self.at_start_up = True + self._stop_event = threading.Event() + + def stop(self): + self.terminate = True + self._stop_event.set() + + def run(self): + resource_version = '' + w = watch.Watch() + for event in w.stream(self.v1.list_namespaced_event, namespace=self.namespace, + field_selector='involvedObject.kind=Pod'): + if self.terminate: + break + + resource_version = int(event['object'].metadata.resource_version) + + info = dict( + eType='WatchEvent', + pod=event['object'].involved_object.name, + reason=event['object'].reason, + message=event['object'].message, + start_up=self.at_start_up, + ) + self.at_start_up = False + # logger.info('Received event: {}'.format(info)) + self.queue.put(info) + + +class EventHandler(threading.Thread): + def __init__(self, mgr, message_queue, namespace, pod_patt, **kwargs): + threading.Thread.__init__(self) + self.mgr = mgr + self.queue = message_queue + self.kwargs = kwargs + self.terminate = False + self.pod_patt = re.compile(pod_patt) + self.namespace = namespace + + def stop(self): + self.terminate = True + + def on_drop(self, event, **kwargs): + pass + + def on_pod_started(self, event, **kwargs): + try_cnt = 3 + pod = None + while try_cnt > 0: + try_cnt -= 1 + try: + pod = self.mgr.v1.read_namespaced_pod(name=event['pod'], namespace=self.namespace) + if not pod.status.pod_ip: + time.sleep(0.5) + continue + break + except client.rest.ApiException as exc: + time.sleep(0.5) + + if try_cnt <= 0 and not pod: + if not event['start_up']: + logger.error('Pod {} is started but cannot read pod'.format(event['pod'])) + return + elif try_cnt <= 0 and not pod.status.pod_ip: + logger.warn('NoPodIPFoundError') + return + + logger.info('Register POD {} with IP {}'.format(pod.metadata.name, pod.status.pod_ip)) + self.mgr.add_pod(name=pod.metadata.name, ip=pod.status.pod_ip) + + def on_pod_killing(self, event, **kwargs): + logger.info('Unregister POD {}'.format(event['pod'])) + self.mgr.delete_pod(name=event['pod']) + + def on_pod_heartbeat(self, event, **kwargs): + names = self.mgr.conn_mgr.conn_names + + running_names = set() + for each_event in event['events']: + if each_event['ready']: + self.mgr.add_pod(name=each_event['pod'], ip=each_event['ip']) + running_names.add(each_event['pod']) + else: + self.mgr.delete_pod(name=each_event['pod']) + + to_delete = names - running_names + for name in to_delete: + self.mgr.delete_pod(name) + + logger.info(self.mgr.conn_mgr.conn_names) + + def handle_event(self, event): + if event['eType'] == 'PodHeartBeat': + return self.on_pod_heartbeat(event) + + if not event or (event['reason'] not in ('Started', 'Killing')): + return self.on_drop(event) + + if not re.match(self.pod_patt, event['pod']): + return self.on_drop(event) + + logger.info('Handling event: {}'.format(event)) + + if event['reason'] == 'Started': + return self.on_pod_started(event) + + return self.on_pod_killing(event) + + def run(self): + while not self.terminate: + try: + event = self.queue.get(timeout=1) + self.handle_event(event) + except queue.Empty: + continue + +@singleton +class ServiceFounder(object): + def __init__(self, conn_mgr, namespace, pod_patt, label_selector, in_cluster=False, **kwargs): + self.namespace = namespace + self.kwargs = kwargs + self.queue = queue.Queue() + self.in_cluster = in_cluster + + self.conn_mgr = conn_mgr + + if not self.namespace: + self.namespace = open(incluster_namespace_path).read() + + config.load_incluster_config() if self.in_cluster else config.load_kube_config() + self.v1 = client.CoreV1Api() + + self.listener = K8SEventListener( + message_queue=self.queue, + namespace=self.namespace, + in_cluster=self.in_cluster, + v1=self.v1, + **kwargs + ) + + self.pod_heartbeater = K8SServiceDiscover( + message_queue=self.queue, + namespace=namespace, + label_selector=label_selector, + in_cluster=self.in_cluster, + v1=self.v1, + **kwargs + ) + + self.event_handler = EventHandler(mgr=self, + message_queue=self.queue, + namespace=self.namespace, + pod_patt=pod_patt, **kwargs) + + def add_pod(self, name, ip): + self.conn_mgr.register(name, 'tcp://{}:19530'.format(ip)) + + def delete_pod(self, name): + self.conn_mgr.unregister(name) + + def start(self): + self.listener.daemon = True + self.listener.start() + self.event_handler.start() + while self.listener.at_start_up: + time.sleep(1) + + self.pod_heartbeater.start() + + def stop(self): + self.listener.stop() + self.pod_heartbeater.stop() + self.event_handler.stop() + + +if __name__ == '__main__': + from mishards import connect_mgr + logging.basicConfig(level=logging.INFO) + t = ServiceFounder(namespace='xp', conn_mgr=connect_mgr, pod_patt=".*-ro-servers-.*", label_selector='tier=ro-servers', in_cluster=False) + t.start() + cnt = 2 + while cnt > 0: + time.sleep(2) + cnt -= 1 + t.stop() diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 89ae2cd36c..516359f27c 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -11,6 +11,7 @@ from milvus.client import types import settings from grpc_utils.grpc_args_parser import GrpcArgsParser as Parser +import exceptions logger = logging.getLogger(__name__) @@ -30,7 +31,9 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): def query_conn(self, name): conn = self.conn_mgr.conn(name) - conn and conn.on_connect() + if not conn: + raise exceptions.ConnectionNotFoundError(name) + conn.on_connect() return conn.conn def _format_date(self, start, end): @@ -51,7 +54,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): def _get_routing_file_ids(self, table_id, range_array): return { - 'TEST': { + 'milvus-ro-servers-0': { 'table_id': table_id, 'file_ids': [123] } diff --git a/mishards/settings.py b/mishards/settings.py index 4d87e69fe3..c4466da6ec 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -7,7 +7,6 @@ env = Env() env.read_env() DEBUG = env.bool('DEBUG', False) -TESTING = env.bool('TESTING', False) METADATA_URI = env.str('METADATA_URI', '') @@ -26,6 +25,16 @@ SEARCH_WORKER_SIZE = env.int('SEARCH_WORKER_SIZE', 10) SERVER_PORT = env.int('SERVER_PORT', 19530) WOSERVER = env.str('WOSERVER') +SD_NAMESPACE = env.str('SD_NAMESPACE', '') +SD_IN_CLUSTER = env.bool('SD_IN_CLUSTER', False) +SD_POLL_INTERVAL = env.int('SD_POLL_INTERVAL', 5) +SD_ROSERVER_POD_PATT = env.str('SD_ROSERVER_POD_PATT', '') +SD_LABEL_SELECTOR = env.str('SD_LABEL_SELECTOR', '') + +TESTING = env.bool('TESTING', False) +TESTING_WOSERVER = env.str('TESTING_WOSERVER', 'tcp://127.0.0.1:19530') + + if __name__ == '__main__': import logging logger = logging.getLogger(__name__) From 099317edeeea5db14be23709736a8a13ffe4933a Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 18 Sep 2019 15:43:42 +0800 Subject: [PATCH 007/307] add models --- manager.py | 14 +++++++ mishards/__init__.py | 13 +++++-- mishards/connections.py | 5 +-- mishards/db_base.py | 27 +++++++++++++ mishards/exceptions.py | 2 +- mishards/main.py | 20 ++++------ mishards/models.py | 75 +++++++++++++++++++++++++++++++++++++ mishards/server.py | 4 +- mishards/service_handler.py | 6 +-- mishards/settings.py | 4 +- 10 files changed, 144 insertions(+), 26 deletions(-) create mode 100644 manager.py create mode 100644 mishards/db_base.py create mode 100644 mishards/models.py diff --git a/manager.py b/manager.py new file mode 100644 index 0000000000..0a2acad26f --- /dev/null +++ b/manager.py @@ -0,0 +1,14 @@ +import fire +from mishards import db + +class DBHandler: + @classmethod + def create_all(cls): + db.create_all() + + @classmethod + def drop_all(cls): + db.drop_all() + +if __name__ == '__main__': + fire.Fire(DBHandler) diff --git a/mishards/__init__.py b/mishards/__init__.py index b3a14cf7e3..c799e42fa4 100644 --- a/mishards/__init__.py +++ b/mishards/__init__.py @@ -1,8 +1,13 @@ -import settings -from connections import ConnectionMgr +from mishards import settings + +from mishards.db_base import DB +db = DB() +db.init_db(uri=settings.SQLALCHEMY_DATABASE_URI) + +from mishards.connections import ConnectionMgr connect_mgr = ConnectionMgr() -from service_founder import ServiceFounder +from mishards.service_founder import ServiceFounder discover = ServiceFounder(namespace=settings.SD_NAMESPACE, conn_mgr=connect_mgr, pod_patt=settings.SD_ROSERVER_POD_PATT, @@ -10,5 +15,5 @@ discover = ServiceFounder(namespace=settings.SD_NAMESPACE, in_cluster=settings.SD_IN_CLUSTER, poll_interval=settings.SD_POLL_INTERVAL) -from server import Server +from mishards.server import Server grpc_server = Server(conn_mgr=connect_mgr) diff --git a/mishards/connections.py b/mishards/connections.py index 82dd082eac..9201ea2b08 100644 --- a/mishards/connections.py +++ b/mishards/connections.py @@ -4,9 +4,8 @@ from functools import wraps from contextlib import contextmanager from milvus import Milvus -import settings -import exceptions -from utils import singleton +from mishards import (settings, exceptions) +from mishards.utils import singleton logger = logging.getLogger(__name__) diff --git a/mishards/db_base.py b/mishards/db_base.py new file mode 100644 index 0000000000..702c9e57e9 --- /dev/null +++ b/mishards/db_base.py @@ -0,0 +1,27 @@ +from sqlalchemy import create_engine +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import sessionmaker, scoped_session + +class DB: + Model = declarative_base() + def __init__(self, uri=None): + uri and self.init_db(uri) + + def init_db(self, uri): + self.engine = create_engine(uri, pool_size=100, pool_recycle=5, pool_timeout=30, + pool_pre_ping=True, + max_overflow=0) + self.uri = uri + session = sessionmaker() + session.configure(bind=self.engine) + self.db_session = session() + + @property + def Session(self): + return self.db_session + + def drop_all(self): + self.Model.metadata.drop_all(self.engine) + + def create_all(self): + self.Model.metadata.create_all(self.engine) diff --git a/mishards/exceptions.py b/mishards/exceptions.py index 1445d18769..0f89ecb52d 100644 --- a/mishards/exceptions.py +++ b/mishards/exceptions.py @@ -1,4 +1,4 @@ -import exception_codes as codes +import mishards.exception_codes as codes class BaseException(Exception): code = codes.INVALID_CODE diff --git a/mishards/main.py b/mishards/main.py index 0526f87ff8..5d96d8b499 100644 --- a/mishards/main.py +++ b/mishards/main.py @@ -1,21 +1,17 @@ -import sys -import os +import os, sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -import settings -from mishards import (connect_mgr, +from mishards import ( + settings, + db, connect_mgr, discover, grpc_server as server) def main(): - try: - discover.start() - connect_mgr.register('WOSERVER', settings.WOSERVER if not settings.TESTING else settings.TESTING_WOSERVER) - server.run(port=settings.SERVER_PORT) - return 0 - except Exception as e: - logger.error(e) - return 1 + discover.start() + connect_mgr.register('WOSERVER', settings.WOSERVER if not settings.TESTING else settings.TESTING_WOSERVER) + server.run(port=settings.SERVER_PORT) + return 0 if __name__ == '__main__': sys.exit(main()) diff --git a/mishards/models.py b/mishards/models.py new file mode 100644 index 0000000000..c699f490dd --- /dev/null +++ b/mishards/models.py @@ -0,0 +1,75 @@ +import logging +from sqlalchemy import (Integer, Boolean, Text, + String, BigInteger, func, and_, or_, + Column) +from sqlalchemy.orm import relationship, backref + +from mishards import db + +logger = logging.getLogger(__name__) + +class TableFiles(db.Model): + FILE_TYPE_NEW = 0 + FILE_TYPE_RAW = 1 + FILE_TYPE_TO_INDEX = 2 + FILE_TYPE_INDEX = 3 + FILE_TYPE_TO_DELETE = 4 + FILE_TYPE_NEW_MERGE = 5 + FILE_TYPE_NEW_INDEX = 6 + FILE_TYPE_BACKUP = 7 + + __tablename__ = 'TableFiles' + + id = Column(BigInteger, primary_key=True, autoincrement=True) + table_id = Column(String(50)) + engine_type = Column(Integer) + file_id = Column(String(50)) + file_type = Column(Integer) + file_size = Column(Integer, default=0) + row_count = Column(Integer, default=0) + updated_time = Column(BigInteger) + created_on = Column(BigInteger) + date = Column(Integer) + + table = relationship( + 'Table', + primaryjoin='and_(foreign(TableFile.table_id) == Table.table_id)', + backref=backref('files', uselist=True, lazy='dynamic') + ) + + +class Tables(db.Model): + TO_DELETE = 1 + NORMAL = 0 + + __tablename__ = 'Tables' + + id = Column(BigInteger, primary_key=True, autoincrement=True) + table_id = Column(String(50), unique=True) + state = Column(Integer) + dimension = Column(Integer) + created_on = Column(Integer) + flag = Column(Integer, default=0) + index_file_size = Column(Integer) + engine_type = Column(Integer) + nlist = Column(Integer) + metric_type = Column(Integer) + + def files_to_search(self, date_range=None): + cond = or_( + TableFile.file_type==TableFile.FILE_TYPE_RAW, + TableFile.file_type==TableFile.FILE_TYPE_TO_INDEX, + TableFile.file_type==TableFile.FILE_TYPE_INDEX, + ) + if date_range: + cond = and_( + cond, + or_( + and_(TableFile.date>=d[0], TableFile.date Date: Wed, 18 Sep 2019 16:59:04 +0800 Subject: [PATCH 008/307] update for models --- manager.py | 13 ++++ mishards/__init__.py | 2 +- mishards/db_base.py | 11 ++- mishards/factories.py | 49 ++++++++++++ mishards/hash_ring.py | 150 ++++++++++++++++++++++++++++++++++++ mishards/models.py | 12 +-- mishards/service_founder.py | 4 +- mishards/service_handler.py | 39 ++++++++-- mishards/settings.py | 1 + 9 files changed, 262 insertions(+), 19 deletions(-) create mode 100644 mishards/factories.py create mode 100644 mishards/hash_ring.py diff --git a/manager.py b/manager.py index 0a2acad26f..31f5894d2d 100644 --- a/manager.py +++ b/manager.py @@ -1,5 +1,6 @@ import fire from mishards import db +from sqlalchemy import and_ class DBHandler: @classmethod @@ -10,5 +11,17 @@ class DBHandler: def drop_all(cls): db.drop_all() + @classmethod + def fun(cls, tid): + from mishards.factories import TablesFactory, TableFilesFactory, Tables + f = db.Session.query(Tables).filter(and_( + Tables.table_id==tid, + Tables.state!=Tables.TO_DELETE) + ).first() + print(f) + + # f1 = TableFilesFactory() + + if __name__ == '__main__': fire.Fire(DBHandler) diff --git a/mishards/__init__.py b/mishards/__init__.py index c799e42fa4..a792cd5ce9 100644 --- a/mishards/__init__.py +++ b/mishards/__init__.py @@ -2,7 +2,7 @@ from mishards import settings from mishards.db_base import DB db = DB() -db.init_db(uri=settings.SQLALCHEMY_DATABASE_URI) +db.init_db(uri=settings.SQLALCHEMY_DATABASE_URI, echo=settings.SQL_ECHO) from mishards.connections import ConnectionMgr connect_mgr = ConnectionMgr() diff --git a/mishards/db_base.py b/mishards/db_base.py index 702c9e57e9..5ad1c394d7 100644 --- a/mishards/db_base.py +++ b/mishards/db_base.py @@ -1,15 +1,20 @@ +import logging from sqlalchemy import create_engine from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker, scoped_session +logger = logging.getLogger(__name__) + class DB: Model = declarative_base() - def __init__(self, uri=None): - uri and self.init_db(uri) + def __init__(self, uri=None, echo=False): + self.echo = echo + uri and self.init_db(uri, echo) - def init_db(self, uri): + def init_db(self, uri, echo=False): self.engine = create_engine(uri, pool_size=100, pool_recycle=5, pool_timeout=30, pool_pre_ping=True, + echo=echo, max_overflow=0) self.uri = uri session = sessionmaker() diff --git a/mishards/factories.py b/mishards/factories.py new file mode 100644 index 0000000000..5bd059654a --- /dev/null +++ b/mishards/factories.py @@ -0,0 +1,49 @@ +import time +import datetime +import random +import factory +from factory.alchemy import SQLAlchemyModelFactory +from faker import Faker +from faker.providers import BaseProvider + +from mishards import db +from mishards.models import Tables, TableFiles + +class FakerProvider(BaseProvider): + def this_date(self): + t = datetime.datetime.today() + return (t.year - 1900) * 10000 + (t.month-1)*100 + t.day + +factory.Faker.add_provider(FakerProvider) + +class TablesFactory(SQLAlchemyModelFactory): + class Meta: + model = Tables + sqlalchemy_session = db.Session + sqlalchemy_session_persistence = 'commit' + + id = factory.Faker('random_number', digits=16, fix_len=True) + table_id = factory.Faker('uuid4') + state = factory.Faker('random_element', elements=(0,1,2,3)) + dimension = factory.Faker('random_element', elements=(256,512)) + created_on = int(time.time()) + index_file_size = 0 + engine_type = factory.Faker('random_element', elements=(0,1,2,3)) + metric_type = factory.Faker('random_element', elements=(0,1)) + nlist = 16384 + +class TableFilesFactory(SQLAlchemyModelFactory): + class Meta: + model = TableFiles + sqlalchemy_session = db.Session + sqlalchemy_session_persistence = 'commit' + + id = factory.Faker('random_number', digits=16, fix_len=True) + table = factory.SubFactory(TablesFactory) + engine_type = factory.Faker('random_element', elements=(0,1,2,3)) + file_id = factory.Faker('uuid4') + file_type = factory.Faker('random_element', elements=(0,1,2,3,4)) + file_size = factory.Faker('random_number') + updated_time = int(time.time()) + created_on = int(time.time()) + date = factory.Faker('this_date') diff --git a/mishards/hash_ring.py b/mishards/hash_ring.py new file mode 100644 index 0000000000..bfec108c5c --- /dev/null +++ b/mishards/hash_ring.py @@ -0,0 +1,150 @@ +import math +import sys +from bisect import bisect + +if sys.version_info >= (2, 5): + import hashlib + md5_constructor = hashlib.md5 +else: + import md5 + md5_constructor = md5.new + +class HashRing(object): + + def __init__(self, nodes=None, weights=None): + """`nodes` is a list of objects that have a proper __str__ representation. + `weights` is dictionary that sets weights to the nodes. The default + weight is that all nodes are equal. + """ + self.ring = dict() + self._sorted_keys = [] + + self.nodes = nodes + + if not weights: + weights = {} + self.weights = weights + + self._generate_circle() + + def _generate_circle(self): + """Generates the circle. + """ + total_weight = 0 + for node in self.nodes: + total_weight += self.weights.get(node, 1) + + for node in self.nodes: + weight = 1 + + if node in self.weights: + weight = self.weights.get(node) + + factor = math.floor((40*len(self.nodes)*weight) / total_weight); + + for j in range(0, int(factor)): + b_key = self._hash_digest( '%s-%s' % (node, j) ) + + for i in range(0, 3): + key = self._hash_val(b_key, lambda x: x+i*4) + self.ring[key] = node + self._sorted_keys.append(key) + + self._sorted_keys.sort() + + def get_node(self, string_key): + """Given a string key a corresponding node in the hash ring is returned. + + If the hash ring is empty, `None` is returned. + """ + pos = self.get_node_pos(string_key) + if pos is None: + return None + return self.ring[ self._sorted_keys[pos] ] + + def get_node_pos(self, string_key): + """Given a string key a corresponding node in the hash ring is returned + along with it's position in the ring. + + If the hash ring is empty, (`None`, `None`) is returned. + """ + if not self.ring: + return None + + key = self.gen_key(string_key) + + nodes = self._sorted_keys + pos = bisect(nodes, key) + + if pos == len(nodes): + return 0 + else: + return pos + + def iterate_nodes(self, string_key, distinct=True): + """Given a string key it returns the nodes as a generator that can hold the key. + + The generator iterates one time through the ring + starting at the correct position. + + if `distinct` is set, then the nodes returned will be unique, + i.e. no virtual copies will be returned. + """ + if not self.ring: + yield None, None + + returned_values = set() + def distinct_filter(value): + if str(value) not in returned_values: + returned_values.add(str(value)) + return value + + pos = self.get_node_pos(string_key) + for key in self._sorted_keys[pos:]: + val = distinct_filter(self.ring[key]) + if val: + yield val + + for i, key in enumerate(self._sorted_keys): + if i < pos: + val = distinct_filter(self.ring[key]) + if val: + yield val + + def gen_key(self, key): + """Given a string key it returns a long value, + this long value represents a place on the hash ring. + + md5 is currently used because it mixes well. + """ + b_key = self._hash_digest(key) + return self._hash_val(b_key, lambda x: x) + + def _hash_val(self, b_key, entry_fn): + return (( b_key[entry_fn(3)] << 24) + |(b_key[entry_fn(2)] << 16) + |(b_key[entry_fn(1)] << 8) + | b_key[entry_fn(0)] ) + + def _hash_digest(self, key): + m = md5_constructor() + key = key.encode() + m.update(key) + return m.digest() + +if __name__ == '__main__': + from collections import defaultdict + servers = ['192.168.0.246:11212', + '192.168.0.247:11212', + '192.168.0.248:11212', + '192.168.0.249:11212'] + + ring = HashRing(servers) + keys = ['{}'.format(i) for i in range(100)] + mapped = defaultdict(list) + for k in keys: + server = ring.get_node(k) + mapped[server].append(k) + + for k,v in mapped.items(): + print(k, v) diff --git a/mishards/models.py b/mishards/models.py index c699f490dd..0f7bb603ae 100644 --- a/mishards/models.py +++ b/mishards/models.py @@ -32,8 +32,8 @@ class TableFiles(db.Model): date = Column(Integer) table = relationship( - 'Table', - primaryjoin='and_(foreign(TableFile.table_id) == Table.table_id)', + 'Tables', + primaryjoin='and_(foreign(TableFiles.table_id) == Tables.table_id)', backref=backref('files', uselist=True, lazy='dynamic') ) @@ -57,15 +57,15 @@ class Tables(db.Model): def files_to_search(self, date_range=None): cond = or_( - TableFile.file_type==TableFile.FILE_TYPE_RAW, - TableFile.file_type==TableFile.FILE_TYPE_TO_INDEX, - TableFile.file_type==TableFile.FILE_TYPE_INDEX, + TableFiles.file_type==TableFiles.FILE_TYPE_RAW, + TableFiles.file_type==TableFiles.FILE_TYPE_TO_INDEX, + TableFiles.file_type==TableFiles.FILE_TYPE_INDEX, ) if date_range: cond = and_( cond, or_( - and_(TableFile.date>=d[0], TableFile.date=d[0], TableFiles.date Date: Wed, 18 Sep 2019 17:09:03 +0800 Subject: [PATCH 009/307] fix session bug --- mishards/db_base.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/mishards/db_base.py b/mishards/db_base.py index 5ad1c394d7..ffbe29f94f 100644 --- a/mishards/db_base.py +++ b/mishards/db_base.py @@ -17,13 +17,12 @@ class DB: echo=echo, max_overflow=0) self.uri = uri - session = sessionmaker() - session.configure(bind=self.engine) - self.db_session = session() + self.session = sessionmaker() + self.session.configure(bind=self.engine) @property def Session(self): - return self.db_session + return self.session() def drop_all(self): self.Model.metadata.drop_all(self.engine) From f22204878a1b7fefda9cb258ce4002c01100a86f Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 18 Sep 2019 17:09:38 +0800 Subject: [PATCH 010/307] fix session bug --- mishards/service_handler.py | 1 - 1 file changed, 1 deletion(-) diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 7dd4380d97..eb2951be5e 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -60,7 +60,6 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): Tables.table_id==table_id, Tables.state!=Tables.TO_DELETE )).first() - logger.error(table) if not table: raise exceptions.TableNotFoundError(table_id) From 0ad5c32c46f29fd5486d02e30f74cc06f17c4eb6 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 18 Sep 2019 18:00:30 +0800 Subject: [PATCH 011/307] update requirements.txt --- requirements.txt | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 requirements.txt diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000..8cedabdf7b --- /dev/null +++ b/requirements.txt @@ -0,0 +1,33 @@ +environs==4.2.0 +factory-boy==2.12.0 +Faker==1.0.7 +fire==0.1.3 +google-auth==1.6.3 +grpcio==1.22.0 +grpcio-tools==1.22.0 +kubernetes==10.0.1 +MarkupSafe==1.1.1 +marshmallow==2.19.5 +pymysql==0.9.3 +protobuf==3.9.1 +py==1.8.0 +pyasn1==0.4.7 +pyasn1-modules==0.2.6 +pylint==2.3.1 +#pymilvus-test==0.2.15 +pymilvus==0.2.0 +pyparsing==2.4.0 +pytest==4.6.3 +pytest-level==0.1.1 +pytest-print==0.1.2 +pytest-repeat==0.8.0 +pytest-timeout==1.3.3 +python-dateutil==2.8.0 +python-dotenv==0.10.3 +pytz==2019.1 +requests==2.22.0 +requests-oauthlib==1.2.0 +rsa==4.0 +six==1.12.0 +SQLAlchemy==1.3.5 +urllib3==1.25.3 From c042d2f3234038e01a00e7bc0631b2e653387642 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 18 Sep 2019 18:16:51 +0800 Subject: [PATCH 012/307] add dockerfile --- Dockerfile | 10 ++++++++++ build.sh | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+) create mode 100644 Dockerfile create mode 100755 build.sh diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000..594640619e --- /dev/null +++ b/Dockerfile @@ -0,0 +1,10 @@ +FROM python:3.6 +RUN apt update && apt install -y \ + less \ + telnet +RUN mkdir /source +WORKDIR /source +ADD ./requirements.txt ./ +RUN pip install -r requirements.txt +COPY . . +CMD python mishards/main.py diff --git a/build.sh b/build.sh new file mode 100755 index 0000000000..2b3c89bbf9 --- /dev/null +++ b/build.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +BOLD=`tput bold` +NORMAL=`tput sgr0` +YELLOW='\033[1;33m' +ENDC='\033[0m' + +function build_image() { + dockerfile=$1 + remote_registry=$2 + tagged=$2 + buildcmd="docker build -t ${tagged} -f ${dockerfile} ." + echo -e "${BOLD}$buildcmd${NORMAL}" + $buildcmd + pushcmd="docker push ${remote_registry}" + echo -e "${BOLD}$pushcmd${NORMAL}" + $pushcmd + echo -e "${YELLOW}${BOLD}Image: ${remote_registry}${NORMAL}${ENDC}" +} + +case "$1" in + +all) + version="" + [[ ! -z $2 ]] && version=":${2}" + build_image "Dockerfile" "registry.zilliz.com/milvus/mishards${version}" "registry.zilliz.com/milvus/mishards" + ;; +*) + echo "Usage: [option...] {base | apps}" + echo "all, Usage: build.sh all [tagname|] => registry.zilliz.com/milvus/mishards:\${tagname}" + ;; +esac From dd59127e9722fcdc9d4b19f17358fb65a73691d4 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 18 Sep 2019 18:17:51 +0800 Subject: [PATCH 013/307] add env example --- mishards/.env.example | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 mishards/.env.example diff --git a/mishards/.env.example b/mishards/.env.example new file mode 100644 index 0000000000..22406c7f34 --- /dev/null +++ b/mishards/.env.example @@ -0,0 +1,14 @@ +DEBUG=False + +WOSERVER=tcp://127.0.0.1:19530 +TESTING_WOSERVER=tcp://127.0.0.1:19530 +SERVER_PORT=19531 + +SD_NAMESPACE=xp +SD_IN_CLUSTER=False +SD_POLL_INTERVAL=5 +SD_ROSERVER_POD_PATT=.*-ro-servers-.* +SD_LABEL_SELECTOR=tier=ro-servers + +SQLALCHEMY_DATABASE_URI=mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4 +SQL_ECHO=True From cee3d7e20ce1141eb01091d6c262d6e0a771fbf1 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 18 Sep 2019 20:16:02 +0800 Subject: [PATCH 014/307] remove dummy settings --- mishards/settings.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/mishards/settings.py b/mishards/settings.py index 62948e2fa9..2bf7e96a8f 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -8,8 +8,6 @@ env.read_env() DEBUG = env.bool('DEBUG', False) -METADATA_URI = env.str('METADATA_URI', '') - LOG_LEVEL = env.str('LOG_LEVEL', 'DEBUG' if DEBUG else 'INFO') LOG_PATH = env.str('LOG_PATH', '/tmp/mishards') LOG_NAME = env.str('LOG_NAME', 'logfile') From e04e00df4b0d5c1358da941267203880c5f2bd96 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 18 Sep 2019 20:16:18 +0800 Subject: [PATCH 015/307] add docker ignore file --- .dockerignore | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .dockerignore diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000000..d1012a3afd --- /dev/null +++ b/.dockerignore @@ -0,0 +1,5 @@ +.git +.gitignore +.env + +mishards/.env From e242a1cc91fe4b3afea1dc88f4a42b1817f5b5b2 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 18 Sep 2019 20:16:46 +0800 Subject: [PATCH 016/307] temp support dns addr --- mishards/connections.py | 1 + mishards/main.py | 8 +++++++- mishards/service_handler.py | 2 +- 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/mishards/connections.py b/mishards/connections.py index 9201ea2b08..c6323f66f8 100644 --- a/mishards/connections.py +++ b/mishards/connections.py @@ -1,5 +1,6 @@ import logging import threading +import socket from functools import wraps from contextlib import contextmanager from milvus import Milvus diff --git a/mishards/main.py b/mishards/main.py index 5d96d8b499..e9c47f9edf 100644 --- a/mishards/main.py +++ b/mishards/main.py @@ -1,6 +1,9 @@ import os, sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +from urllib.parse import urlparse +import socket + from mishards import ( settings, db, connect_mgr, @@ -9,7 +12,10 @@ from mishards import ( def main(): discover.start() - connect_mgr.register('WOSERVER', settings.WOSERVER if not settings.TESTING else settings.TESTING_WOSERVER) + woserver = settings.WOSERVER if not settings.TESTING else settings.TESTING_WOSERVER + url = urlparse(woserver) + connect_mgr.register('WOSERVER', + '{}://{}:{}'.format(url.scheme, socket.gethostbyname(url.hostname), url.port)) server.run(port=settings.SERVER_PORT) return 0 diff --git a/mishards/service_handler.py b/mishards/service_handler.py index eb2951be5e..ac70440c47 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -145,7 +145,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): for res in rs: res.result() - reverse = table_meta.metric_type == types.MetricType.L2 + reverse = table_meta.metric_type == types.MetricType.IP return self._do_merge(all_topk_results, topk, reverse=reverse) def CreateTable(self, request, context): From 512e2b31c46708401c3cba3f3f65c0cc092feef6 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 19 Sep 2019 10:17:00 +0800 Subject: [PATCH 017/307] add pre run handlers --- mishards/main.py | 10 ---------- mishards/server.py | 28 +++++++++++++++++++++++++++- 2 files changed, 27 insertions(+), 11 deletions(-) diff --git a/mishards/main.py b/mishards/main.py index e9c47f9edf..7fac55dfa2 100644 --- a/mishards/main.py +++ b/mishards/main.py @@ -1,21 +1,11 @@ import os, sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -from urllib.parse import urlparse -import socket - from mishards import ( settings, - db, connect_mgr, - discover, grpc_server as server) def main(): - discover.start() - woserver = settings.WOSERVER if not settings.TESTING else settings.TESTING_WOSERVER - url = urlparse(woserver) - connect_mgr.register('WOSERVER', - '{}://{}:{}'.format(url.scheme, socket.gethostbyname(url.hostname), url.port)) server.run(port=settings.SERVER_PORT) return 0 diff --git a/mishards/server.py b/mishards/server.py index 185ed3c957..19cca2c18a 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -1,17 +1,21 @@ import logging import grpc import time +import socket +from urllib.parse import urlparse +from functools import wraps from concurrent import futures from grpc._cython import cygrpc from milvus.grpc_gen.milvus_pb2_grpc import add_MilvusServiceServicer_to_server from mishards.service_handler import ServiceHandler -import mishards.settings +from mishards import settings, discover logger = logging.getLogger(__name__) class Server: def __init__(self, conn_mgr, port=19530, max_workers=10, **kwargs): + self.pre_run_handlers = set() self.exit_flag = False self.port = int(port) self.conn_mgr = conn_mgr @@ -21,6 +25,27 @@ class Server: (cygrpc.ChannelArgKey.max_receive_message_length, -1)] ) + self.register_pre_run_handler(self.pre_run_handler) + + def pre_run_handler(self): + woserver = settings.WOSERVER if not settings.TESTING else settings.TESTING_WOSERVER + url = urlparse(woserver) + ip = socket.gethostbyname(url.hostname) + logger.error(ip) + socket.inet_pton(socket.AF_INET, ip) + self.conn_mgr.register('WOSERVER', + '{}://{}:{}'.format(url.scheme, ip, url.port)) + + def register_pre_run_handler(self, func): + logger.info('Regiterring {} into server pre_run_handlers'.format(func)) + self.pre_run_handlers.add(func) + return func + + def on_pre_run(self): + for handler in self.pre_run_handlers: + handler() + discover.start() + def start(self, port=None): add_MilvusServiceServicer_to_server(ServiceHandler(conn_mgr=self.conn_mgr), self.server_impl) self.server_impl.add_insecure_port("[::]:{}".format(str(port or self._port))) @@ -29,6 +54,7 @@ class Server: def run(self, port): logger.info('Milvus server start ......') port = port or self.port + self.on_pre_run() self.start(port) logger.info('Successfully') From d3e79f539ea64e78e6b05910fd607f16c1221e71 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 19 Sep 2019 10:18:46 +0800 Subject: [PATCH 018/307] add pre run handlers --- mishards/server.py | 1 - 1 file changed, 1 deletion(-) diff --git a/mishards/server.py b/mishards/server.py index 19cca2c18a..9966360d47 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -31,7 +31,6 @@ class Server: woserver = settings.WOSERVER if not settings.TESTING else settings.TESTING_WOSERVER url = urlparse(woserver) ip = socket.gethostbyname(url.hostname) - logger.error(ip) socket.inet_pton(socket.AF_INET, ip) self.conn_mgr.register('WOSERVER', '{}://{}:{}'.format(url.scheme, ip, url.port)) From 5249b80b0da577bde03da99f884957a5e6d3aad0 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 19 Sep 2019 10:22:07 +0800 Subject: [PATCH 019/307] remove dummy commented code --- mishards/service_handler.py | 23 ----------------------- 1 file changed, 23 deletions(-) diff --git a/mishards/service_handler.py b/mishards/service_handler.py index ac70440c47..f88655d2d6 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -246,32 +246,9 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): for query_range in request.query_range_array: query_range_array.append( Range(query_range.start_value, query_range.end_value)) - # except (TableNotFoundException, exceptions.GRPCInvlidArgument) as exc: - # return milvus_pb2.TopKQueryResultList( - # status=status_pb2.Status(error_code=exc.code, reason=exc.message) - # ) - # except Exception as e: - # return milvus_pb2.TopKQueryResultList( - # status=status_pb2.Status(error_code=status_pb2.UNEXPECTED_ERROR, reason=str(e)) - # ) results = self._do_query(table_name, table_meta, query_record_array, topk, nprobe, query_range_array) - # try: - # results = workflow.query_vectors(table_name, table_meta, query_record_array, topk, - # nprobe, query_range_array) - # except (exceptions.GRPCQueryInvalidRangeException, TableNotFoundException) as exc: - # return milvus_pb2.TopKQueryResultList( - # status=status_pb2.Status(error_code=exc.code, reason=exc.message) - # ) - # except exceptions.ServiceNotFoundException as exc: - # return milvus_pb2.TopKQueryResultList( - # status=status_pb2.Status(error_code=status_pb2.UNEXPECTED_ERROR, reason=exc.message) - # ) - # except Exception as e: - # logger.error(e) - # results = workflow.query_vectors(table_name, table_meta, query_record_array, - # topk, nprobe, query_range_array) now = time.time() logger.info('SearchVector takes: {}'.format(now - start)) From 09d3e7844936dfcab6ad99e93218a581e4eb095c Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 19 Sep 2019 19:41:20 +0800 Subject: [PATCH 020/307] add exception handler --- mishards/connections.py | 21 ++++++++------ mishards/exception_handlers.py | 35 +++++++++++++++++++++++ mishards/exceptions.py | 3 +- mishards/server.py | 10 +++++++ mishards/service_handler.py | 51 ++++++++++++++++++++-------------- 5 files changed, 89 insertions(+), 31 deletions(-) create mode 100644 mishards/exception_handlers.py diff --git a/mishards/connections.py b/mishards/connections.py index c6323f66f8..365dc60125 100644 --- a/mishards/connections.py +++ b/mishards/connections.py @@ -24,14 +24,14 @@ class Connection: def __str__(self): return 'Connection:name=\"{}\";uri=\"{}\"'.format(self.name, self.uri) - def _connect(self): + def _connect(self, metadata=None): try: self.conn.connect(uri=self.uri) except Exception as e: if not self.error_handlers: - raise exceptions.ConnectionConnectError(e) + raise exceptions.ConnectionConnectError(message=str(e), metadata=metadata) for handler in self.error_handlers: - handler(e) + handler(e, metadata=metadata) @property def can_retry(self): @@ -47,14 +47,15 @@ class Connection: else: logger.warn('{} is retrying {}'.format(self, self.retried)) - def on_connect(self): + def on_connect(self, metadata=None): while not self.connected and self.can_retry: self.retried += 1 self.on_retry() - self._connect() + self._connect(metadata=metadata) if not self.can_retry and not self.connected: - raise exceptions.ConnectionConnectError(message='Max retry {} reached!'.format(self.max_retry)) + raise exceptions.ConnectionConnectError(message='Max retry {} reached!'.format(self.max_retry, + metadata=metadata)) self.retried = 0 @@ -81,14 +82,15 @@ class ConnectionMgr: def conn_names(self): return set(self.metas.keys()) - set(['WOSERVER']) - def conn(self, name, throw=False): + def conn(self, name, metadata, throw=False): c = self.conns.get(name, None) if not c: url = self.metas.get(name, None) if not url: if not throw: return None - raise exceptions.ConnectionNotFoundError('Connection {} not found'.format(name)) + raise exceptions.ConnectionNotFoundError(message='Connection {} not found'.format(name), + metadata=metadata) this_conn = Connection(name=name, uri=url, max_retry=settings.MAX_RETRY) threaded = { threading.get_ident() : this_conn @@ -103,7 +105,8 @@ class ConnectionMgr: if not url: if not throw: return None - raise exceptions.ConnectionNotFoundError('Connection {} not found'.format(name)) + raise exceptions.ConnectionNotFoundError('Connection {} not found'.format(name), + metadata=metadata) this_conn = Connection(name=name, uri=url, max_retry=settings.MAX_RETRY) c[tid] = this_conn return this_conn diff --git a/mishards/exception_handlers.py b/mishards/exception_handlers.py new file mode 100644 index 0000000000..3de0918be4 --- /dev/null +++ b/mishards/exception_handlers.py @@ -0,0 +1,35 @@ +import logging +from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 +from mishards import server, exceptions + +logger = logging.getLogger(__name__) + +def resp_handler(err, error_code): + if not isinstance(err, exceptions.BaseException): + return status_pb2.Status(error_code=error_code, reason=str(err)) + + status = status_pb2.Status(error_code=error_code, reason=err.message) + + if err.metadata is None: + return status + + resp_class = err.metadata.get('resp_class', None) + if not resp_class: + return status + + if resp_class == milvus_pb2.BoolReply: + return resp_class(status=status, bool_reply=False) + + if resp_class == milvus_pb2.VectorIds: + return resp_class(status=status, vector_id_array=[]) + + if resp_class == milvus_pb2.TopKQueryResultList: + return resp_class(status=status, topk_query_result=[]) + + status.error_code = status_pb2.UNEXPECTED_ERROR + return status + +@server.error_handler(exceptions.TableNotFoundError) +def TableNotFoundErrorHandler(err): + logger.error(err) + return resp_handler(err, status_pb2.TABLE_NOT_EXISTS) diff --git a/mishards/exceptions.py b/mishards/exceptions.py index 0f89ecb52d..1579fefcf4 100644 --- a/mishards/exceptions.py +++ b/mishards/exceptions.py @@ -3,8 +3,9 @@ import mishards.exception_codes as codes class BaseException(Exception): code = codes.INVALID_CODE message = 'BaseException' - def __init__(self, message=''): + def __init__(self, message='', metadata=None): self.message = self.__class__.__name__ if not message else message + self.metadata = metadata class ConnectionConnectError(BaseException): code = codes.CONNECT_ERROR_CODE diff --git a/mishards/server.py b/mishards/server.py index 9966360d47..b000016e29 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -2,6 +2,7 @@ import logging import grpc import time import socket +import inspect from urllib.parse import urlparse from functools import wraps from concurrent import futures @@ -16,6 +17,7 @@ logger = logging.getLogger(__name__) class Server: def __init__(self, conn_mgr, port=19530, max_workers=10, **kwargs): self.pre_run_handlers = set() + self.error_handler = {} self.exit_flag = False self.port = int(port) self.conn_mgr = conn_mgr @@ -40,6 +42,14 @@ class Server: self.pre_run_handlers.add(func) return func + def errorhandler(self, exception): + if inspect.isclass(exception) and issubclass(exception, Exception): + def wrapper(func): + self.error_handlers[exception] = func + return func + return wrapper + return exception + def on_pre_run(self): for handler in self.pre_run_handlers: handler() diff --git a/mishards/service_handler.py b/mishards/service_handler.py index f88655d2d6..5346be91d8 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -25,18 +25,17 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): self.conn_mgr = conn_mgr self.table_meta = {} - @property - def connection(self): + def connection(self, metadata=None): conn = self.conn_mgr.conn('WOSERVER') if conn: - conn.on_connect() + conn.on_connect(metadata=metadata) return conn.conn - def query_conn(self, name): - conn = self.conn_mgr.conn(name) + def query_conn(self, name, metadata=None): + conn = self.conn_mgr.conn(name, metadata=metadata) if not conn: - raise exceptions.ConnectionNotFoundError(name) - conn.on_connect() + raise exceptions.ConnectionNotFoundError(name, metadata=metadata) + conn.on_connect(metadata=metadata) return conn.conn def _format_date(self, start, end): @@ -55,14 +54,14 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return self._format_date(start, end) - def _get_routing_file_ids(self, table_id, range_array): + def _get_routing_file_ids(self, table_id, range_array, metadata=None): table = db.Session.query(Tables).filter(and_( Tables.table_id==table_id, Tables.state!=Tables.TO_DELETE )).first() if not table: - raise exceptions.TableNotFoundError(table_id) + raise exceptions.TableNotFoundError(table_id, metadata=metadata) files = table.files_to_search(range_array) servers = self.conn_mgr.conn_names @@ -84,7 +83,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return routing - def _do_merge(self, files_n_topk_results, topk, reverse=False): + def _do_merge(self, files_n_topk_results, topk, reverse=False, **kwargs): if not files_n_topk_results: return [] @@ -111,9 +110,11 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): def _do_query(self, table_id, table_meta, vectors, topk, nprobe, range_array=None, **kwargs): range_array = [self._range_to_date(r) for r in range_array] if range_array else None - routing = self._get_routing_file_ids(table_id, range_array) + routing = self._get_routing_file_ids(table_id, range_array, metadata=metadata) logger.info('Routing: {}'.format(routing)) + metadata = kwargs.get('metadata', None) + rs = [] all_topk_results = [] @@ -124,7 +125,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): addr, query_params, len(vectors), topk, nprobe )) - conn = self.query_conn(addr) + conn = self.query_conn(addr, metadata=metadata) start = time.time() ret = conn.search_vectors_in_files(table_name=query_params['table_id'], file_ids=query_params['file_ids'], @@ -146,7 +147,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): res.result() reverse = table_meta.metric_type == types.MetricType.IP - return self._do_merge(all_topk_results, topk, reverse=reverse) + return self._do_merge(all_topk_results, topk, reverse=reverse, metadata=metadata) def CreateTable(self, request, context): _status, _table_schema = Parser.parse_proto_TableSchema(request) @@ -156,7 +157,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): logger.info('CreateTable {}'.format(_table_schema['table_name'])) - _status = self.connection.create_table(_table_schema) + _status = self.connection().create_table(_table_schema) return status_pb2.Status(error_code=_status.code, reason=_status.message) @@ -171,7 +172,9 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): logger.info('HasTable {}'.format(_table_name)) - _bool = self.connection.has_table(_table_name) + _bool = self.connection(metadata={ + 'resp_class': milvus_pb2.BoolReply + }).has_table(_table_name) return milvus_pb2.BoolReply( status=status_pb2.Status(error_code=status_pb2.SUCCESS, reason="OK"), @@ -186,7 +189,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): logger.info('DropTable {}'.format(_table_name)) - _status = self.connection.delete_table(_table_name) + _status = self.connection().delete_table(_table_name) return status_pb2.Status(error_code=_status.code, reason=_status.message) @@ -201,14 +204,16 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): logger.info('CreateIndex {}'.format(_table_name)) # TODO: interface create_table incompleted - _status = self.connection.create_index(_table_name, _index) + _status = self.connection().create_index(_table_name, _index) return status_pb2.Status(error_code=_status.code, reason=_status.message) def Insert(self, request, context): logger.info('Insert') # TODO: Ths SDK interface add_vectors() could update, add a key 'row_id_array' - _status, _ids = self.connection.add_vectors(None, None, insert_param=request) + _status, _ids = self.connection(metadata={ + 'resp_class': milvus_pb2.VectorIds + }).add_vectors(None, None, insert_param=request) return milvus_pb2.VectorIds( status=status_pb2.Status(error_code=_status.code, reason=_status.message), vector_id_array=_ids @@ -227,10 +232,14 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): raise exceptions.GRPCInvlidArgument('Invalid nprobe: {}'.format(nprobe)) table_meta = self.table_meta.get(table_name, None) + + metadata = { + 'resp_class': milvus_pb2.TopKQueryResultList + } if not table_meta: - status, info = self.connection.describe_table(table_name) + status, info = self.connection(metadata=metadata).describe_table(table_name) if not status.OK(): - raise exceptions.TableNotFoundError(table_name) + raise exceptions.TableNotFoundError(table_name, metadata=metadata) self.table_meta[table_name] = info table_meta = info @@ -248,7 +257,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): Range(query_range.start_value, query_range.end_value)) results = self._do_query(table_name, table_meta, query_record_array, topk, - nprobe, query_range_array) + nprobe, query_range_array, metadata=metadata) now = time.time() logger.info('SearchVector takes: {}'.format(now - start)) From eb9174f2d91355c218c4e256a7361d68e776b79e Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 21 Sep 2019 09:56:19 +0800 Subject: [PATCH 021/307] optimize exception handlers --- mishards/__init__.py | 2 ++ mishards/exception_codes.py | 1 + mishards/exception_handlers.py | 12 +++++++++-- mishards/exceptions.py | 3 +++ mishards/grpc_utils/__init__.py | 3 +++ mishards/server.py | 26 ++++++++++++++++++++++-- mishards/service_handler.py | 36 ++++++++++++++++++++++++++------- 7 files changed, 72 insertions(+), 11 deletions(-) diff --git a/mishards/__init__.py b/mishards/__init__.py index a792cd5ce9..8105e7edc8 100644 --- a/mishards/__init__.py +++ b/mishards/__init__.py @@ -17,3 +17,5 @@ discover = ServiceFounder(namespace=settings.SD_NAMESPACE, from mishards.server import Server grpc_server = Server(conn_mgr=connect_mgr) + +from mishards import exception_handlers diff --git a/mishards/exception_codes.py b/mishards/exception_codes.py index 32b29bdfab..37492f25d4 100644 --- a/mishards/exception_codes.py +++ b/mishards/exception_codes.py @@ -4,3 +4,4 @@ CONNECT_ERROR_CODE = 10001 CONNECTTION_NOT_FOUND_CODE = 10002 TABLE_NOT_FOUND_CODE = 20001 +INVALID_ARGUMENT = 20002 diff --git a/mishards/exception_handlers.py b/mishards/exception_handlers.py index 3de0918be4..6207f2088c 100644 --- a/mishards/exception_handlers.py +++ b/mishards/exception_handlers.py @@ -1,6 +1,6 @@ import logging from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 -from mishards import server, exceptions +from mishards import grpc_server as server, exceptions logger = logging.getLogger(__name__) @@ -26,10 +26,18 @@ def resp_handler(err, error_code): if resp_class == milvus_pb2.TopKQueryResultList: return resp_class(status=status, topk_query_result=[]) + if resp_class == milvus_pb2.TableRowCount: + return resp_class(status=status, table_row_count=-1) + status.error_code = status_pb2.UNEXPECTED_ERROR return status -@server.error_handler(exceptions.TableNotFoundError) +@server.errorhandler(exceptions.TableNotFoundError) def TableNotFoundErrorHandler(err): logger.error(err) return resp_handler(err, status_pb2.TABLE_NOT_EXISTS) + +@server.errorhandler(exceptions.InvalidArgumentError) +def InvalidArgumentErrorHandler(err): + logger.error(err) + return resp_handler(err, status_pb2.ILLEGAL_ARGUMENT) diff --git a/mishards/exceptions.py b/mishards/exceptions.py index 1579fefcf4..4686cf674f 100644 --- a/mishards/exceptions.py +++ b/mishards/exceptions.py @@ -15,3 +15,6 @@ class ConnectionNotFoundError(BaseException): class TableNotFoundError(BaseException): code = codes.TABLE_NOT_FOUND_CODE + +class InvalidArgumentError(BaseException): + code = codes.INVALID_ARGUMENT diff --git a/mishards/grpc_utils/__init__.py b/mishards/grpc_utils/__init__.py index e69de29bb2..959d5549c7 100644 --- a/mishards/grpc_utils/__init__.py +++ b/mishards/grpc_utils/__init__.py @@ -0,0 +1,3 @@ +def mark_grpc_method(func): + setattr(func, 'grpc_method', True) + return func diff --git a/mishards/server.py b/mishards/server.py index b000016e29..9cca096b6b 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -7,6 +7,7 @@ from urllib.parse import urlparse from functools import wraps from concurrent import futures from grpc._cython import cygrpc +from grpc._channel import _Rendezvous, _UnaryUnaryMultiCallable from milvus.grpc_gen.milvus_pb2_grpc import add_MilvusServiceServicer_to_server from mishards.service_handler import ServiceHandler from mishards import settings, discover @@ -17,7 +18,8 @@ logger = logging.getLogger(__name__) class Server: def __init__(self, conn_mgr, port=19530, max_workers=10, **kwargs): self.pre_run_handlers = set() - self.error_handler = {} + self.grpc_methods = set() + self.error_handlers = {} self.exit_flag = False self.port = int(port) self.conn_mgr = conn_mgr @@ -42,6 +44,18 @@ class Server: self.pre_run_handlers.add(func) return func + def wrap_method_with_errorhandler(self, func): + @wraps(func) + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except Exception as e: + if e.__class__ in self.error_handlers: + return self.error_handlers[e.__class__](e) + raise + + return wrapper + def errorhandler(self, exception): if inspect.isclass(exception) and issubclass(exception, Exception): def wrapper(func): @@ -56,7 +70,8 @@ class Server: discover.start() def start(self, port=None): - add_MilvusServiceServicer_to_server(ServiceHandler(conn_mgr=self.conn_mgr), self.server_impl) + handler_class = self.add_error_handlers(ServiceHandler) + add_MilvusServiceServicer_to_server(handler_class(conn_mgr=self.conn_mgr), self.server_impl) self.server_impl.add_insecure_port("[::]:{}".format(str(port or self._port))) self.server_impl.start() @@ -80,3 +95,10 @@ class Server: self.exit_flag = True self.server_impl.stop(0) logger.info('Server is closed') + + def add_error_handlers(self, target): + for key, attr in target.__dict__.items(): + is_grpc_method = getattr(attr, 'grpc_method', False) + if is_grpc_method: + setattr(target, key, self.wrap_method_with_errorhandler(attr)) + return target diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 5346be91d8..acc04c5eee 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -12,6 +12,7 @@ from milvus.grpc_gen.milvus_pb2 import TopKQueryResult from milvus.client import types from mishards import (db, settings, exceptions) +from mishards.grpc_utils import mark_grpc_method from mishards.grpc_utils.grpc_args_parser import GrpcArgsParser as Parser from mishards.models import Tables, TableFiles from mishards.hash_ring import HashRing @@ -24,9 +25,10 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): def __init__(self, conn_mgr, *args, **kwargs): self.conn_mgr = conn_mgr self.table_meta = {} + self.error_handlers = {} def connection(self, metadata=None): - conn = self.conn_mgr.conn('WOSERVER') + conn = self.conn_mgr.conn('WOSERVER', metadata=metadata) if conn: conn.on_connect(metadata=metadata) return conn.conn @@ -149,6 +151,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): reverse = table_meta.metric_type == types.MetricType.IP return self._do_merge(all_topk_results, topk, reverse=reverse, metadata=metadata) + @mark_grpc_method def CreateTable(self, request, context): _status, _table_schema = Parser.parse_proto_TableSchema(request) @@ -161,6 +164,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return status_pb2.Status(error_code=_status.code, reason=_status.message) + @mark_grpc_method def HasTable(self, request, context): _status, _table_name = Parser.parse_proto_TableName(request) @@ -181,6 +185,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): bool_reply=_bool ) + @mark_grpc_method def DropTable(self, request, context): _status, _table_name = Parser.parse_proto_TableName(request) @@ -193,6 +198,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return status_pb2.Status(error_code=_status.code, reason=_status.message) + @mark_grpc_method def CreateIndex(self, request, context): _status, unpacks = Parser.parse_proto_IndexParam(request) @@ -208,6 +214,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return status_pb2.Status(error_code=_status.code, reason=_status.message) + @mark_grpc_method def Insert(self, request, context): logger.info('Insert') # TODO: Ths SDK interface add_vectors() could update, add a key 'row_id_array' @@ -219,6 +226,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): vector_id_array=_ids ) + @mark_grpc_method def Search(self, request, context): table_name = request.table_name @@ -228,14 +236,16 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): logger.info('Search {}: topk={} nprobe={}'.format(table_name, topk, nprobe)) - if nprobe > self.MAX_NPROBE or nprobe <= 0: - raise exceptions.GRPCInvlidArgument('Invalid nprobe: {}'.format(nprobe)) - - table_meta = self.table_meta.get(table_name, None) - metadata = { 'resp_class': milvus_pb2.TopKQueryResultList } + + if nprobe > self.MAX_NPROBE or nprobe <= 0: + raise exceptions.InvalidArgumentError(message='Invalid nprobe: {}'.format(nprobe), + metadata=metadata) + + table_meta = self.table_meta.get(table_name, None) + if not table_meta: status, info = self.connection(metadata=metadata).describe_table(table_name) if not status.OK(): @@ -268,9 +278,11 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): ) return topk_result_list + @mark_grpc_method def SearchInFiles(self, request, context): raise NotImplemented() + @mark_grpc_method def DescribeTable(self, request, context): _status, _table_name = Parser.parse_proto_TableName(request) @@ -304,6 +316,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): ) ) + @mark_grpc_method def CountTable(self, request, context): _status, _table_name = Parser.parse_proto_TableName(request) @@ -316,12 +329,16 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): logger.info('CountTable {}'.format(_table_name)) - _status, _count = self.connection.get_table_row_count(_table_name) + metadata = { + 'resp_class': milvus_pb2.TableRowCount + } + _status, _count = self.connection(metadata=metadata).get_table_row_count(_table_name) return milvus_pb2.TableRowCount( status=status_pb2.Status(error_code=_status.code, reason=_status.message), table_row_count=_count if isinstance(_count, int) else -1) + @mark_grpc_method def Cmd(self, request, context): _status, _cmd = Parser.parse_proto_Command(request) logger.info('Cmd: {}'.format(_cmd)) @@ -341,6 +358,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): string_reply=_reply ) + @mark_grpc_method def ShowTables(self, request, context): logger.info('ShowTables') _status, _results = self.connection.show_tables() @@ -354,6 +372,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): table_name=_result ) + @mark_grpc_method def DeleteByRange(self, request, context): _status, unpacks = \ Parser.parse_proto_DeleteByRangeParam(request) @@ -367,6 +386,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _status = self.connection.delete_vectors_by_range(_table_name, _start_date, _end_date) return status_pb2.Status(error_code=_status.code, reason=_status.message) + @mark_grpc_method def PreloadTable(self, request, context): _status, _table_name = Parser.parse_proto_TableName(request) @@ -377,6 +397,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _status = self.connection.preload_table(_table_name) return status_pb2.Status(error_code=_status.code, reason=_status.message) + @mark_grpc_method def DescribeIndex(self, request, context): _status, _table_name = Parser.parse_proto_TableName(request) @@ -397,6 +418,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return milvus_pb2.IndexParam(table_name=_tablename, index=_index) + @mark_grpc_method def DropIndex(self, request, context): _status, _table_name = Parser.parse_proto_TableName(request) From 1144f6798dcef8ec6422a373f169ba72ddd11f34 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 21 Sep 2019 10:20:25 +0800 Subject: [PATCH 022/307] fix bug in service handler --- mishards/service_handler.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mishards/service_handler.py b/mishards/service_handler.py index acc04c5eee..128667d9b6 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -112,6 +112,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): def _do_query(self, table_id, table_meta, vectors, topk, nprobe, range_array=None, **kwargs): range_array = [self._range_to_date(r) for r in range_array] if range_array else None + metadata = kwargs.get('metadata', None) routing = self._get_routing_file_ids(table_id, range_array, metadata=metadata) logger.info('Routing: {}'.format(routing)) From 33fe3b1bdee22e56a4288a1f65cff50263323954 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 21 Sep 2019 10:44:26 +0800 Subject: [PATCH 023/307] add more exception handlers --- mishards/exception_codes.py | 4 +++- mishards/exception_handlers.py | 13 +++++++++++++ mishards/exceptions.py | 8 +++++++- mishards/service_handler.py | 25 +++++++++++++++++-------- 4 files changed, 40 insertions(+), 10 deletions(-) diff --git a/mishards/exception_codes.py b/mishards/exception_codes.py index 37492f25d4..ecb2469562 100644 --- a/mishards/exception_codes.py +++ b/mishards/exception_codes.py @@ -2,6 +2,8 @@ INVALID_CODE = -1 CONNECT_ERROR_CODE = 10001 CONNECTTION_NOT_FOUND_CODE = 10002 +DB_ERROR_CODE = 10003 TABLE_NOT_FOUND_CODE = 20001 -INVALID_ARGUMENT = 20002 +INVALID_ARGUMENT_CODE = 20002 +INVALID_DATE_RANGE_CODE = 20003 diff --git a/mishards/exception_handlers.py b/mishards/exception_handlers.py index 6207f2088c..2518b64b3e 100644 --- a/mishards/exception_handlers.py +++ b/mishards/exception_handlers.py @@ -29,6 +29,9 @@ def resp_handler(err, error_code): if resp_class == milvus_pb2.TableRowCount: return resp_class(status=status, table_row_count=-1) + if resp_class == milvus_pb2.TableName: + return resp_class(status=status, table_name=[]) + status.error_code = status_pb2.UNEXPECTED_ERROR return status @@ -41,3 +44,13 @@ def TableNotFoundErrorHandler(err): def InvalidArgumentErrorHandler(err): logger.error(err) return resp_handler(err, status_pb2.ILLEGAL_ARGUMENT) + +@server.errorhandler(exceptions.DBError) +def DBErrorHandler(err): + logger.error(err) + return resp_handler(err, status_pb2.UNEXPECTED_ERROR) + +@server.errorhandler(exceptions.InvalidRangeError) +def InvalidArgumentErrorHandler(err): + logger.error(err) + return resp_handler(err, status_pb2.ILLEGAL_RANGE) diff --git a/mishards/exceptions.py b/mishards/exceptions.py index 4686cf674f..2aa2b39eb9 100644 --- a/mishards/exceptions.py +++ b/mishards/exceptions.py @@ -13,8 +13,14 @@ class ConnectionConnectError(BaseException): class ConnectionNotFoundError(BaseException): code = codes.CONNECTTION_NOT_FOUND_CODE +class DBError(BaseException): + code = codes.DB_ERROR_CODE + class TableNotFoundError(BaseException): code = codes.TABLE_NOT_FOUND_CODE class InvalidArgumentError(BaseException): - code = codes.INVALID_ARGUMENT + code = codes.INVALID_ARGUMENT_CODE + +class InvalidRangeError(BaseException): + code = codes.INVALID_DATE_RANGE_CODE diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 128667d9b6..536a17c4e3 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -5,10 +5,12 @@ from contextlib import contextmanager from collections import defaultdict from sqlalchemy import and_ +from sqlalchemy import exc as sqlalchemy_exc from concurrent.futures import ThreadPoolExecutor from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 from milvus.grpc_gen.milvus_pb2 import TopKQueryResult +from milvus.client.Abstract import Range from milvus.client import types from mishards import (db, settings, exceptions) @@ -44,7 +46,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return ((start.year-1900)*10000 + (start.month-1)*100 + start.day , (end.year-1900)*10000 + (end.month-1)*100 + end.day) - def _range_to_date(self, range_obj): + def _range_to_date(self, range_obj, metadata=None): try: start = datetime.datetime.strptime(range_obj.start_date, '%Y-%m-%d') end = datetime.datetime.strptime(range_obj.end_date, '%Y-%m-%d') @@ -52,15 +54,19 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): except (ValueError, AssertionError): raise exceptions.InvalidRangeError('Invalid time range: {} {}'.format( range_obj.start_date, range_obj.end_date - )) + ), metadata=metadata) return self._format_date(start, end) def _get_routing_file_ids(self, table_id, range_array, metadata=None): - table = db.Session.query(Tables).filter(and_( - Tables.table_id==table_id, - Tables.state!=Tables.TO_DELETE - )).first() + # PXU TODO: Implement Thread-local Context + try: + table = db.Session.query(Tables).filter(and_( + Tables.table_id==table_id, + Tables.state!=Tables.TO_DELETE + )).first() + except sqlalchemy_exc.SQLAlchemyError as e: + raise exceptions.DBError(message=str(e), metadata=metadata) if not table: raise exceptions.TableNotFoundError(table_id, metadata=metadata) @@ -111,8 +117,8 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return topk_query_result def _do_query(self, table_id, table_meta, vectors, topk, nprobe, range_array=None, **kwargs): - range_array = [self._range_to_date(r) for r in range_array] if range_array else None metadata = kwargs.get('metadata', None) + range_array = [self._range_to_date(r, metadata=metadata) for r in range_array] if range_array else None routing = self._get_routing_file_ids(table_id, range_array, metadata=metadata) logger.info('Routing: {}'.format(routing)) @@ -362,7 +368,10 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): @mark_grpc_method def ShowTables(self, request, context): logger.info('ShowTables') - _status, _results = self.connection.show_tables() + metadata = { + 'resp_class': milvus_pb2.TableName + } + _status, _results = self.connection(metadata=metadata).show_tables() if not _status.OK(): _results = [] From 1e2cc2eb6622a46aaa0ff17d230350605b430687 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 21 Sep 2019 11:00:35 +0800 Subject: [PATCH 024/307] refactor sd --- mishards/__init__.py | 2 +- sd/__init__.py | 0 {mishards => sd}/service_founder.py | 0 3 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 sd/__init__.py rename {mishards => sd}/service_founder.py (100%) diff --git a/mishards/__init__.py b/mishards/__init__.py index 8105e7edc8..3158afa5b3 100644 --- a/mishards/__init__.py +++ b/mishards/__init__.py @@ -7,7 +7,7 @@ db.init_db(uri=settings.SQLALCHEMY_DATABASE_URI, echo=settings.SQL_ECHO) from mishards.connections import ConnectionMgr connect_mgr = ConnectionMgr() -from mishards.service_founder import ServiceFounder +from sd.service_founder import ServiceFounder discover = ServiceFounder(namespace=settings.SD_NAMESPACE, conn_mgr=connect_mgr, pod_patt=settings.SD_ROSERVER_POD_PATT, diff --git a/sd/__init__.py b/sd/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/mishards/service_founder.py b/sd/service_founder.py similarity index 100% rename from mishards/service_founder.py rename to sd/service_founder.py From 8569309644e752b128af402fa95d5575e3096604 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 21 Sep 2019 11:08:14 +0800 Subject: [PATCH 025/307] refactor utils --- mishards/connections.py | 2 +- mishards/settings.py | 2 +- sd/service_founder.py | 2 +- {mishards/utils => utils}/__init__.py | 0 {mishards/utils => utils}/logger_helper.py | 0 5 files changed, 3 insertions(+), 3 deletions(-) rename {mishards/utils => utils}/__init__.py (100%) rename {mishards/utils => utils}/logger_helper.py (100%) diff --git a/mishards/connections.py b/mishards/connections.py index 365dc60125..7307c2a489 100644 --- a/mishards/connections.py +++ b/mishards/connections.py @@ -6,7 +6,7 @@ from contextlib import contextmanager from milvus import Milvus from mishards import (settings, exceptions) -from mishards.utils import singleton +from utils import singleton logger = logging.getLogger(__name__) diff --git a/mishards/settings.py b/mishards/settings.py index 2bf7e96a8f..f99bd3b3c6 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -13,7 +13,7 @@ LOG_PATH = env.str('LOG_PATH', '/tmp/mishards') LOG_NAME = env.str('LOG_NAME', 'logfile') TIMEZONE = env.str('TIMEZONE', 'UTC') -from mishards.utils.logger_helper import config +from utils.logger_helper import config config(LOG_LEVEL, LOG_PATH, LOG_NAME, TIMEZONE) SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_URI') diff --git a/sd/service_founder.py b/sd/service_founder.py index f1a37a440b..79292d452f 100644 --- a/sd/service_founder.py +++ b/sd/service_founder.py @@ -11,7 +11,7 @@ import queue from functools import wraps from kubernetes import client, config, watch -from mishards.utils import singleton +from utils import singleton logger = logging.getLogger(__name__) diff --git a/mishards/utils/__init__.py b/utils/__init__.py similarity index 100% rename from mishards/utils/__init__.py rename to utils/__init__.py diff --git a/mishards/utils/logger_helper.py b/utils/logger_helper.py similarity index 100% rename from mishards/utils/logger_helper.py rename to utils/logger_helper.py From b4ed4b2e35c3119290b29f1539c2cf37aca7cebd Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 21 Sep 2019 12:17:13 +0800 Subject: [PATCH 026/307] refactor kubernetes service provider --- mishards/__init__.py | 11 ++-- mishards/settings.py | 16 +++-- sd/__init__.py | 27 ++++++++ ...vice_founder.py => kubernetes_provider.py} | 62 ++++++++++++++----- 4 files changed, 90 insertions(+), 26 deletions(-) rename sd/{service_founder.py => kubernetes_provider.py} (83%) diff --git a/mishards/__init__.py b/mishards/__init__.py index 3158afa5b3..55b24c082c 100644 --- a/mishards/__init__.py +++ b/mishards/__init__.py @@ -7,13 +7,10 @@ db.init_db(uri=settings.SQLALCHEMY_DATABASE_URI, echo=settings.SQL_ECHO) from mishards.connections import ConnectionMgr connect_mgr = ConnectionMgr() -from sd.service_founder import ServiceFounder -discover = ServiceFounder(namespace=settings.SD_NAMESPACE, - conn_mgr=connect_mgr, - pod_patt=settings.SD_ROSERVER_POD_PATT, - label_selector=settings.SD_LABEL_SELECTOR, - in_cluster=settings.SD_IN_CLUSTER, - poll_interval=settings.SD_POLL_INTERVAL) +from sd import ProviderManager + +sd_proiver_class = ProviderManager.get_provider(settings.SD_PROVIDER) +discover = sd_proiver_class(settings=settings.SD_PROVIDER_SETTINGS, conn_mgr=connect_mgr) from mishards.server import Server grpc_server = Server(conn_mgr=connect_mgr) diff --git a/mishards/settings.py b/mishards/settings.py index f99bd3b3c6..046508f92c 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -26,11 +26,17 @@ SEARCH_WORKER_SIZE = env.int('SEARCH_WORKER_SIZE', 10) SERVER_PORT = env.int('SERVER_PORT', 19530) WOSERVER = env.str('WOSERVER') -SD_NAMESPACE = env.str('SD_NAMESPACE', '') -SD_IN_CLUSTER = env.bool('SD_IN_CLUSTER', False) -SD_POLL_INTERVAL = env.int('SD_POLL_INTERVAL', 5) -SD_ROSERVER_POD_PATT = env.str('SD_ROSERVER_POD_PATT', '') -SD_LABEL_SELECTOR = env.str('SD_LABEL_SELECTOR', '') +SD_PROVIDER_SETTINGS = None +SD_PROVIDER = env.str('SD_PROVIDER', 'Kubernetes') +if SD_PROVIDER == 'Kubernetes': + from sd.kubernetes_provider import KubernetesProviderSettings + SD_PROVIDER_SETTINGS = KubernetesProviderSettings( + namespace=env.str('SD_NAMESPACE', ''), + in_cluster=env.bool('SD_IN_CLUSTER', False), + poll_interval=env.int('SD_POLL_INTERVAL', 5), + pod_patt=env.str('SD_ROSERVER_POD_PATT', ''), + label_selector=env.str('SD_LABEL_SELECTOR', '') + ) TESTING = env.bool('TESTING', False) TESTING_WOSERVER = env.str('TESTING_WOSERVER', 'tcp://127.0.0.1:19530') diff --git a/sd/__init__.py b/sd/__init__.py index e69de29bb2..5c37bc621b 100644 --- a/sd/__init__.py +++ b/sd/__init__.py @@ -0,0 +1,27 @@ +import logging +import inspect +# from utils import singleton + +logger = logging.getLogger(__name__) + + +class ProviderManager: + PROVIDERS = {} + + @classmethod + def register_service_provider(cls, target): + if inspect.isfunction(target): + cls.PROVIDERS[target.__name__] = target + elif inspect.isclass(target): + name = target.__dict__.get('NAME', None) + name = name if name else target.__class__.__name__ + cls.PROVIDERS[name] = target + else: + assert False, 'Cannot register_service_provider for: {}'.format(target) + return target + + @classmethod + def get_provider(cls, name): + return cls.PROVIDERS.get(name, None) + +from sd import kubernetes_provider diff --git a/sd/service_founder.py b/sd/kubernetes_provider.py similarity index 83% rename from sd/service_founder.py rename to sd/kubernetes_provider.py index 79292d452f..51665a0cb5 100644 --- a/sd/service_founder.py +++ b/sd/kubernetes_provider.py @@ -12,6 +12,7 @@ from functools import wraps from kubernetes import client, config, watch from utils import singleton +from sd import ProviderManager logger = logging.getLogger(__name__) @@ -32,7 +33,7 @@ class K8SMixin: self.v1 = client.CoreV1Api() -class K8SServiceDiscover(threading.Thread, K8SMixin): +class K8SHeartbeatHandler(threading.Thread, K8SMixin): def __init__(self, message_queue, namespace, label_selector, in_cluster=False, **kwargs): K8SMixin.__init__(self, namespace=namespace, in_cluster=in_cluster, **kwargs) threading.Thread.__init__(self) @@ -202,13 +203,26 @@ class EventHandler(threading.Thread): except queue.Empty: continue -@singleton -class ServiceFounder(object): - def __init__(self, conn_mgr, namespace, pod_patt, label_selector, in_cluster=False, **kwargs): +class KubernetesProviderSettings: + def __init__(self, namespace, pod_patt, label_selector, in_cluster, poll_interval, **kwargs): self.namespace = namespace + self.pod_patt = pod_patt + self.label_selector = label_selector + self.in_cluster = in_cluster + self.poll_interval = poll_interval + +@singleton +@ProviderManager.register_service_provider +class KubernetesProvider(object): + NAME = 'Kubernetes' + def __init__(self, settings, conn_mgr, **kwargs): + self.namespace = settings.namespace + self.pod_patt = settings.pod_patt + self.label_selector = settings.label_selector + self.in_cluster = settings.in_cluster + self.poll_interval = settings.poll_interval self.kwargs = kwargs self.queue = queue.Queue() - self.in_cluster = in_cluster self.conn_mgr = conn_mgr @@ -226,19 +240,20 @@ class ServiceFounder(object): **kwargs ) - self.pod_heartbeater = K8SServiceDiscover( + self.pod_heartbeater = K8SHeartbeatHandler( message_queue=self.queue, - namespace=namespace, - label_selector=label_selector, + namespace=self.namespace, + label_selector=self.label_selector, in_cluster=self.in_cluster, v1=self.v1, + poll_interval=self.poll_interval, **kwargs ) self.event_handler = EventHandler(mgr=self, message_queue=self.queue, namespace=self.namespace, - pod_patt=pod_patt, **kwargs) + pod_patt=self.pod_patt, **kwargs) def add_pod(self, name, ip): self.conn_mgr.register(name, 'tcp://{}:19530'.format(ip)) @@ -250,8 +265,6 @@ class ServiceFounder(object): self.listener.daemon = True self.listener.start() self.event_handler.start() - # while self.listener.at_start_up: - # time.sleep(1) self.pod_heartbeater.start() @@ -262,11 +275,32 @@ class ServiceFounder(object): if __name__ == '__main__': - from mishards import connect_mgr logging.basicConfig(level=logging.INFO) - t = ServiceFounder(namespace='xp', conn_mgr=connect_mgr, pod_patt=".*-ro-servers-.*", label_selector='tier=ro-servers', in_cluster=False) + class Connect: + def register(self, name, value): + logger.error('Register: {} - {}'.format(name, value)) + def unregister(self, name): + logger.error('Unregister: {}'.format(name)) + + @property + def conn_names(self): + return set() + + connect_mgr = Connect() + + settings = KubernetesProviderSettings( + namespace='xp', + pod_patt=".*-ro-servers-.*", + label_selector='tier=ro-servers', + poll_interval=5, + in_cluster=False) + + provider_class = ProviderManager.get_provider('Kubernetes') + t = provider_class(conn_mgr=connect_mgr, + settings=settings + ) t.start() - cnt = 2 + cnt = 100 while cnt > 0: time.sleep(2) cnt -= 1 From 6acddae13095080d8a60abfcafa6e6cca354a6bf Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 21 Sep 2019 12:53:13 +0800 Subject: [PATCH 027/307] add static provider --- mishards/settings.py | 5 +++++ sd/__init__.py | 2 +- sd/static_provider.py | 32 ++++++++++++++++++++++++++++++++ 3 files changed, 38 insertions(+), 1 deletion(-) create mode 100644 sd/static_provider.py diff --git a/mishards/settings.py b/mishards/settings.py index 046508f92c..46221c5f98 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -37,6 +37,11 @@ if SD_PROVIDER == 'Kubernetes': pod_patt=env.str('SD_ROSERVER_POD_PATT', ''), label_selector=env.str('SD_LABEL_SELECTOR', '') ) +elif SD_PROVIDER == 'Static': + from sd.static_provider import StaticProviderSettings + SD_PROVIDER_SETTINGS = StaticProviderSettings( + hosts=env.list('SD_STATIC_HOSTS', []) + ) TESTING = env.bool('TESTING', False) TESTING_WOSERVER = env.str('TESTING_WOSERVER', 'tcp://127.0.0.1:19530') diff --git a/sd/__init__.py b/sd/__init__.py index 5c37bc621b..6dfba5ddc1 100644 --- a/sd/__init__.py +++ b/sd/__init__.py @@ -24,4 +24,4 @@ class ProviderManager: def get_provider(cls, name): return cls.PROVIDERS.get(name, None) -from sd import kubernetes_provider +from sd import kubernetes_provider, static_provider diff --git a/sd/static_provider.py b/sd/static_provider.py new file mode 100644 index 0000000000..73ae483b34 --- /dev/null +++ b/sd/static_provider.py @@ -0,0 +1,32 @@ +import os, sys +if __name__ == '__main__': + sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +from utils import singleton +from sd import ProviderManager + +class StaticProviderSettings: + def __init__(self, hosts): + self.hosts = hosts + +@singleton +@ProviderManager.register_service_provider +class KubernetesProvider(object): + NAME = 'Static' + def __init__(self, settings, conn_mgr, **kwargs): + self.conn_mgr = conn_mgr + self.hosts = settings.hosts + + def start(self): + for host in self.hosts: + self.add_pod(host, host) + + def stop(self): + for host in self.hosts: + self.delete_pod(host) + + def add_pod(self, name, ip): + self.conn_mgr.register(name, 'tcp://{}:19530'.format(ip)) + + def delete_pod(self, name): + self.conn_mgr.unregister(name) From ce95b50143ed4a57cacd414eeece12cb6d1fe638 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 21 Sep 2019 13:32:29 +0800 Subject: [PATCH 028/307] support sqlite --- mishards/db_base.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/mishards/db_base.py b/mishards/db_base.py index ffbe29f94f..3b2c699864 100644 --- a/mishards/db_base.py +++ b/mishards/db_base.py @@ -1,5 +1,6 @@ import logging from sqlalchemy import create_engine +from sqlalchemy.engine.url import make_url from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker, scoped_session @@ -12,7 +13,11 @@ class DB: uri and self.init_db(uri, echo) def init_db(self, uri, echo=False): - self.engine = create_engine(uri, pool_size=100, pool_recycle=5, pool_timeout=30, + url = make_url(uri) + if url.get_backend_name() == 'sqlite': + self.engine = create_engine(url) + else: + self.engine = create_engine(uri, pool_size=100, pool_recycle=5, pool_timeout=30, pool_pre_ping=True, echo=echo, max_overflow=0) From 76eb24484765a3771797701f3498af7ab37b744e Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 21 Sep 2019 14:08:57 +0800 Subject: [PATCH 029/307] fix exception handler used in service handler --- mishards/exception_handlers.py | 18 ++++++++++++++++++ mishards/service_handler.py | 28 ++++++++++++++++++++-------- 2 files changed, 38 insertions(+), 8 deletions(-) diff --git a/mishards/exception_handlers.py b/mishards/exception_handlers.py index 2518b64b3e..a2659f91af 100644 --- a/mishards/exception_handlers.py +++ b/mishards/exception_handlers.py @@ -32,6 +32,24 @@ def resp_handler(err, error_code): if resp_class == milvus_pb2.TableName: return resp_class(status=status, table_name=[]) + if resp_class == milvus_pb2.StringReply: + return resp_class(status=status, string_reply='') + + if resp_class == milvus_pb2.TableSchema: + table_name = milvus_pb2.TableName( + status=status + ) + return milvus_pb2.TableSchema( + table_name=table_name + ) + + if resp_class == milvus_pb2.IndexParam: + return milvus_pb2.IndexParam( + table_name=milvus_pb2.TableName( + status=status + ) + ) + status.error_code = status_pb2.UNEXPECTED_ERROR return status diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 536a17c4e3..f39ad3ef46 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -50,7 +50,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): try: start = datetime.datetime.strptime(range_obj.start_date, '%Y-%m-%d') end = datetime.datetime.strptime(range_obj.end_date, '%Y-%m-%d') - assert start >= end + assert start < end except (ValueError, AssertionError): raise exceptions.InvalidRangeError('Invalid time range: {} {}'.format( range_obj.start_date, range_obj.end_date @@ -301,8 +301,12 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): table_name=table_name ) + metadata = { + 'resp_class': milvus_pb2.TableSchema + } + logger.info('DescribeTable {}'.format(_table_name)) - _status, _table = self.connection.describe_table(_table_name) + _status, _table = self.connection(metadata=metadata).describe_table(_table_name) if _status.OK(): _grpc_table_name = milvus_pb2.TableName( @@ -355,10 +359,14 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): status_pb2.Status(error_code=_status.code, reason=_status.message) ) + metadata = { + 'resp_class': milvus_pb2.StringReply + } + if _cmd == 'version': - _status, _reply = self.connection.server_version() + _status, _reply = self.connection(metadata=metadata).server_version() else: - _status, _reply = self.connection.server_status() + _status, _reply = self.connection(metadata=metadata).server_status() return milvus_pb2.StringReply( status=status_pb2.Status(error_code=_status.code, reason=_status.message), @@ -393,7 +401,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _table_name, _start_date, _end_date = unpacks logger.info('DeleteByRange {}: {} {}'.format(_table_name, _start_date, _end_date)) - _status = self.connection.delete_vectors_by_range(_table_name, _start_date, _end_date) + _status = self.connection().delete_vectors_by_range(_table_name, _start_date, _end_date) return status_pb2.Status(error_code=_status.code, reason=_status.message) @mark_grpc_method @@ -404,7 +412,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return status_pb2.Status(error_code=_status.code, reason=_status.message) logger.info('PreloadTable {}'.format(_table_name)) - _status = self.connection.preload_table(_table_name) + _status = self.connection().preload_table(_table_name) return status_pb2.Status(error_code=_status.code, reason=_status.message) @mark_grpc_method @@ -418,8 +426,12 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): ) ) + metadata = { + 'resp_class': milvus_pb2.IndexParam + } + logger.info('DescribeIndex {}'.format(_table_name)) - _status, _index_param = self.connection.describe_index(_table_name) + _status, _index_param = self.connection(metadata=metadata).describe_index(_table_name) _index = milvus_pb2.Index(index_type=_index_param._index_type, nlist=_index_param._nlist) _tablename = milvus_pb2.TableName( @@ -436,5 +448,5 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return status_pb2.Status(error_code=_status.code, reason=_status.message) logger.info('DropIndex {}'.format(_table_name)) - _status = self.connection.drop_index(_table_name) + _status = self.connection().drop_index(_table_name) return status_pb2.Status(error_code=_status.code, reason=_status.message) From bc056a282929dab4b0e45f2101b3dbef8a28e0a7 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 21 Sep 2019 14:13:53 +0800 Subject: [PATCH 030/307] add more print info at startup --- mishards/connections.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mishards/connections.py b/mishards/connections.py index 7307c2a489..35c5d6c3bd 100644 --- a/mishards/connections.py +++ b/mishards/connections.py @@ -139,6 +139,7 @@ class ConnectionMgr: logger.warn('Non-existed meta: {}'.format(name)) def register(self, name, url): + logger.info('Register Connection: name={};url={}'.format(name, url)) meta = self.metas.get(name) if not meta: return self.on_new_meta(name, url) @@ -146,6 +147,7 @@ class ConnectionMgr: return self.on_duplicate_meta(name, url) def unregister(self, name): + logger.info('Unregister Connection: name={}'.format(name)) url = self.metas.pop(name, None) if url is None: return self.on_nonexisted_meta(name) From a0a5965fc6c826accf02a64c743d45e636f5b687 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 25 Sep 2019 16:23:02 +0800 Subject: [PATCH 031/307] add tracing --- mishards/server.py | 34 ++++++++++++++++++++++++++++++++++ mishards/settings.py | 9 +++++++++ requirements.txt | 2 ++ 3 files changed, 45 insertions(+) diff --git a/mishards/server.py b/mishards/server.py index 9cca096b6b..4e44731f0e 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -8,12 +8,17 @@ from functools import wraps from concurrent import futures from grpc._cython import cygrpc from grpc._channel import _Rendezvous, _UnaryUnaryMultiCallable +from jaeger_client import Config +from grpc_opentracing import open_tracing_server_interceptor +from grpc_opentracing.grpcext import intercept_server from milvus.grpc_gen.milvus_pb2_grpc import add_MilvusServiceServicer_to_server from mishards.service_handler import ServiceHandler from mishards import settings, discover logger = logging.getLogger(__name__) +def empty_server_interceptor_decorator(target_server, interceptor): + return target_server class Server: def __init__(self, conn_mgr, port=19530, max_workers=10, **kwargs): @@ -23,12 +28,40 @@ class Server: self.exit_flag = False self.port = int(port) self.conn_mgr = conn_mgr + tracer_interceptor = None + self.tracer = None + interceptor_decorator = empty_server_interceptor_decorator + + if settings.TRACING_ENABLED: + tracer_config = Config(config={ + 'sampler': { + 'type': 'const', + 'param': 1, + }, + 'local_agent': { + 'reporting_host': settings.TracingConfig.TRACING_REPORTING_HOST, + 'reporting_port': settings.TracingConfig.TRACING_REPORTING_PORT + }, + 'logging': settings.TracingConfig.TRACING_LOGGING, + }, + service_name=settings.TracingConfig.TRACING_SERVICE_NAME, + validate=settings.TracingConfig.TRACING_VALIDATE + ) + + self.tracer = tracer_config.initialize_tracer() + tracer_interceptor = open_tracing_server_interceptor(self.tracer, + log_payloads=settings.TracingConfig.TRACING_LOG_PAYLOAD) + + interceptor_decorator = intercept_server + self.server_impl = grpc.server( thread_pool=futures.ThreadPoolExecutor(max_workers=max_workers), options=[(cygrpc.ChannelArgKey.max_send_message_length, -1), (cygrpc.ChannelArgKey.max_receive_message_length, -1)] ) + self.server_impl = interceptor_decorator(self.server_impl, tracer_interceptor) + self.register_pre_run_handler(self.pre_run_handler) def pre_run_handler(self): @@ -94,6 +127,7 @@ class Server: logger.info('Server is shuting down ......') self.exit_flag = True self.server_impl.stop(0) + self.tracer and self.tracer.close() logger.info('Server is closed') def add_error_handlers(self, target): diff --git a/mishards/settings.py b/mishards/settings.py index 46221c5f98..94b8998881 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -46,6 +46,15 @@ elif SD_PROVIDER == 'Static': TESTING = env.bool('TESTING', False) TESTING_WOSERVER = env.str('TESTING_WOSERVER', 'tcp://127.0.0.1:19530') +TRACING_ENABLED = env.bool('TRACING_ENABLED', False) +class TracingConfig: + TRACING_LOGGING = env.bool('TRACING_LOGGING', True), + TRACING_SERVICE_NAME = env.str('TRACING_SERVICE_NAME', 'mishards') + TRACING_VALIDATE = env.bool('TRACING_VALIDATE', True) + TRACING_LOG_PAYLOAD = env.bool('TRACING_LOG_PAYLOAD', DEBUG) + TRACING_REPORTING_HOST = env.str('TRACING_REPORTING_HOST', '127.0.0.1') + TRACING_REPORTING_PORT = env.str('TRACING_REPORTING_PORT', '5775') + if __name__ == '__main__': import logging diff --git a/requirements.txt b/requirements.txt index 8cedabdf7b..03db7aeed3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -31,3 +31,5 @@ rsa==4.0 six==1.12.0 SQLAlchemy==1.3.5 urllib3==1.25.3 +jaeger-client>=3.4.0 +grpcio-opentracing>=1.0 From d4fb05688aa819f0761ed1017717a74e52a78873 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 25 Sep 2019 17:14:18 +0800 Subject: [PATCH 032/307] refactor tracing --- mishards/__init__.py | 5 ++++- mishards/server.py | 35 ++++------------------------------- mishards/settings.py | 2 +- tracing/__init__.py | 17 +++++++++++++++++ tracing/factory.py | 39 +++++++++++++++++++++++++++++++++++++++ 5 files changed, 65 insertions(+), 33 deletions(-) create mode 100644 tracing/__init__.py create mode 100644 tracing/factory.py diff --git a/mishards/__init__.py b/mishards/__init__.py index 55b24c082c..640293c265 100644 --- a/mishards/__init__.py +++ b/mishards/__init__.py @@ -12,7 +12,10 @@ from sd import ProviderManager sd_proiver_class = ProviderManager.get_provider(settings.SD_PROVIDER) discover = sd_proiver_class(settings=settings.SD_PROVIDER_SETTINGS, conn_mgr=connect_mgr) +from tracing.factory import TracerFactory +tracer = TracerFactory.new_tracer(settings.TRACING_TYPE, settings.TracingConfig) + from mishards.server import Server -grpc_server = Server(conn_mgr=connect_mgr) +grpc_server = Server(conn_mgr=connect_mgr, tracer=tracer) from mishards import exception_handlers diff --git a/mishards/server.py b/mishards/server.py index 4e44731f0e..93d7e38826 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -9,19 +9,15 @@ from concurrent import futures from grpc._cython import cygrpc from grpc._channel import _Rendezvous, _UnaryUnaryMultiCallable from jaeger_client import Config -from grpc_opentracing import open_tracing_server_interceptor -from grpc_opentracing.grpcext import intercept_server from milvus.grpc_gen.milvus_pb2_grpc import add_MilvusServiceServicer_to_server from mishards.service_handler import ServiceHandler from mishards import settings, discover logger = logging.getLogger(__name__) -def empty_server_interceptor_decorator(target_server, interceptor): - return target_server class Server: - def __init__(self, conn_mgr, port=19530, max_workers=10, **kwargs): + def __init__(self, conn_mgr, tracer, port=19530, max_workers=10, **kwargs): self.pre_run_handlers = set() self.grpc_methods = set() self.error_handlers = {} @@ -29,30 +25,7 @@ class Server: self.port = int(port) self.conn_mgr = conn_mgr tracer_interceptor = None - self.tracer = None - interceptor_decorator = empty_server_interceptor_decorator - - if settings.TRACING_ENABLED: - tracer_config = Config(config={ - 'sampler': { - 'type': 'const', - 'param': 1, - }, - 'local_agent': { - 'reporting_host': settings.TracingConfig.TRACING_REPORTING_HOST, - 'reporting_port': settings.TracingConfig.TRACING_REPORTING_PORT - }, - 'logging': settings.TracingConfig.TRACING_LOGGING, - }, - service_name=settings.TracingConfig.TRACING_SERVICE_NAME, - validate=settings.TracingConfig.TRACING_VALIDATE - ) - - self.tracer = tracer_config.initialize_tracer() - tracer_interceptor = open_tracing_server_interceptor(self.tracer, - log_payloads=settings.TracingConfig.TRACING_LOG_PAYLOAD) - - interceptor_decorator = intercept_server + self.tracer = tracer self.server_impl = grpc.server( thread_pool=futures.ThreadPoolExecutor(max_workers=max_workers), @@ -60,7 +33,7 @@ class Server: (cygrpc.ChannelArgKey.max_receive_message_length, -1)] ) - self.server_impl = interceptor_decorator(self.server_impl, tracer_interceptor) + self.server_impl = self.tracer.decorate(self.server_impl) self.register_pre_run_handler(self.pre_run_handler) @@ -127,7 +100,7 @@ class Server: logger.info('Server is shuting down ......') self.exit_flag = True self.server_impl.stop(0) - self.tracer and self.tracer.close() + self.tracer.close() logger.info('Server is closed') def add_error_handlers(self, target): diff --git a/mishards/settings.py b/mishards/settings.py index 94b8998881..9a8e770f11 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -46,7 +46,7 @@ elif SD_PROVIDER == 'Static': TESTING = env.bool('TESTING', False) TESTING_WOSERVER = env.str('TESTING_WOSERVER', 'tcp://127.0.0.1:19530') -TRACING_ENABLED = env.bool('TRACING_ENABLED', False) +TRACING_TYPE = env.str('TRACING_TYPE', '') class TracingConfig: TRACING_LOGGING = env.bool('TRACING_LOGGING', True), TRACING_SERVICE_NAME = env.str('TRACING_SERVICE_NAME', 'mishards') diff --git a/tracing/__init__.py b/tracing/__init__.py new file mode 100644 index 0000000000..3edddea9df --- /dev/null +++ b/tracing/__init__.py @@ -0,0 +1,17 @@ + +def empty_server_interceptor_decorator(target_server, interceptor): + return target_server + +class Tracer: + def __init__(self, tracer=None, + interceptor=None, + server_decorator=empty_server_interceptor_decorator): + self.tracer = tracer + self.interceptor = interceptor + self.server_decorator=server_decorator + + def decorate(self, server): + return self.server_decorator(server, self.interceptor) + + def close(self): + self.tracer and self.tracer.close() diff --git a/tracing/factory.py b/tracing/factory.py new file mode 100644 index 0000000000..f00a537e78 --- /dev/null +++ b/tracing/factory.py @@ -0,0 +1,39 @@ +import logging +from jaeger_client import Config +from grpc_opentracing.grpcext import intercept_server +from grpc_opentracing import open_tracing_server_interceptor + +from tracing import Tracer, empty_server_interceptor_decorator + +logger = logging.getLogger(__name__) + + +class TracerFactory: + @classmethod + def new_tracer(cls, tracer_type, tracer_config, **kwargs): + if not tracer_type: + return Tracer() + + if tracer_type.lower() == 'jaeger': + config = Config(config={ + 'sampler': { + 'type': 'const', + 'param': 1, + }, + 'local_agent': { + 'reporting_host': tracer_config.TRACING_REPORTING_HOST, + 'reporting_port': tracer_config.TRACING_REPORTING_PORT + }, + 'logging': tracer_config.TRACING_LOGGING, + }, + service_name=tracer_config.TRACING_SERVICE_NAME, + validate=tracer_config.TRACING_VALIDATE + ) + + tracer = config.initialize_tracer() + tracer_interceptor = open_tracing_server_interceptor(tracer, + log_payloads=tracer_config.TRACING_LOG_PAYLOAD) + + return Tracer(tracer, tracer_interceptor, intercept_server) + + assert False, 'Unsupported tracer type: {}'.format(tracer_type) From 63d3372b4c8931bc0258f378dee00509dc1080ef Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 25 Sep 2019 18:36:19 +0800 Subject: [PATCH 033/307] convert hostname to ip to avoid pymilvus dns domain name parse bug --- mishards/server.py | 2 +- sd/static_provider.py | 3 ++- start_services.yml | 28 ++++++++++++++++++++++++++++ 3 files changed, 31 insertions(+), 2 deletions(-) create mode 100644 start_services.yml diff --git a/mishards/server.py b/mishards/server.py index 93d7e38826..679d5f996e 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -43,7 +43,7 @@ class Server: ip = socket.gethostbyname(url.hostname) socket.inet_pton(socket.AF_INET, ip) self.conn_mgr.register('WOSERVER', - '{}://{}:{}'.format(url.scheme, ip, url.port)) + '{}://{}:{}'.format(url.scheme, ip, url.port or 80)) def register_pre_run_handler(self, func): logger.info('Regiterring {} into server pre_run_handlers'.format(func)) diff --git a/sd/static_provider.py b/sd/static_provider.py index 73ae483b34..423d6c4d60 100644 --- a/sd/static_provider.py +++ b/sd/static_provider.py @@ -2,6 +2,7 @@ import os, sys if __name__ == '__main__': sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +import socket from utils import singleton from sd import ProviderManager @@ -15,7 +16,7 @@ class KubernetesProvider(object): NAME = 'Static' def __init__(self, settings, conn_mgr, **kwargs): self.conn_mgr = conn_mgr - self.hosts = settings.hosts + self.hosts = [socket.gethostbyname(host) for host in settings.hosts] def start(self): for host in self.hosts: diff --git a/start_services.yml b/start_services.yml new file mode 100644 index 0000000000..e2cd0653c3 --- /dev/null +++ b/start_services.yml @@ -0,0 +1,28 @@ +version: "2.3" +services: + milvus: + runtime: nvidia + restart: always + image: registry.zilliz.com/milvus/engine:branch-0.4.0-release-c58ca6 + # ports: + # - "0.0.0.0:19530:19530" + volumes: + - /tmp/milvus/db:/opt/milvus/db + + mishards: + restart: always + image: registry.zilliz.com/milvus/mishards:v0.0.2 + ports: + - "0.0.0.0:19530:19531" + - "0.0.0.0:19532:19532" + volumes: + - /tmp/milvus/db:/tmp/milvus/db + - /tmp/mishards_env:/source/mishards/.env + command: ["python", "mishards/main.py"] + environment: + DEBUG: 'true' + SERVER_PORT: 19531 + WOSERVER: tcp://milvus:19530 + SD_STATIC_HOSTS: milvus + depends_on: + - milvus From d96e601ab83f8b62992e0d16e66741cf2c0d59a5 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 25 Sep 2019 19:37:25 +0800 Subject: [PATCH 034/307] add jaeger in start_services.yml --- start_services.yml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/start_services.yml b/start_services.yml index e2cd0653c3..5c779c5b82 100644 --- a/start_services.yml +++ b/start_services.yml @@ -9,6 +9,16 @@ services: volumes: - /tmp/milvus/db:/opt/milvus/db + jaeger: + restart: always + image: jaegertracing/all-in-one:1.14 + ports: + - "0.0.0.0:5775:5775/udp" + - "0.0.0.0:16686:16686" + - "0.0.0.0:9441:9441" + environment: + COLLECTOR_ZIPKIN_HTTP_PORT: 9411 + mishards: restart: always image: registry.zilliz.com/milvus/mishards:v0.0.2 @@ -24,5 +34,11 @@ services: SERVER_PORT: 19531 WOSERVER: tcp://milvus:19530 SD_STATIC_HOSTS: milvus + TRACING_TYPE: jaeger + TRACING_SERVICE_NAME: mishards-demo + TRACING_REPORTING_HOST: jaeger + TRACING_REPORTING_PORT: 5775 + depends_on: - milvus + - jaeger From dc2a60f0808701521c3876edf26b5ac26eab90b8 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 26 Sep 2019 10:33:38 +0800 Subject: [PATCH 035/307] fix bug in jaeger tracing settings --- mishards/settings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mishards/settings.py b/mishards/settings.py index 9a8e770f11..eb6e1e5964 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -48,7 +48,7 @@ TESTING_WOSERVER = env.str('TESTING_WOSERVER', 'tcp://127.0.0.1:19530') TRACING_TYPE = env.str('TRACING_TYPE', '') class TracingConfig: - TRACING_LOGGING = env.bool('TRACING_LOGGING', True), + TRACING_LOGGING = env.bool('TRACING_LOGGING', True) TRACING_SERVICE_NAME = env.str('TRACING_SERVICE_NAME', 'mishards') TRACING_VALIDATE = env.bool('TRACING_VALIDATE', True) TRACING_LOG_PAYLOAD = env.bool('TRACING_LOG_PAYLOAD', DEBUG) From 4c9cd6dc8ed1ba440bd9839e097c507668b1743f Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 26 Sep 2019 11:02:05 +0800 Subject: [PATCH 036/307] add span decorator --- mishards/server.py | 1 - tracing/__init__.py | 12 ++++++++++++ tracing/factory.py | 7 +++++-- 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/mishards/server.py b/mishards/server.py index 679d5f996e..9dc09d6f05 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -24,7 +24,6 @@ class Server: self.exit_flag = False self.port = int(port) self.conn_mgr = conn_mgr - tracer_interceptor = None self.tracer = tracer self.server_impl = grpc.server( diff --git a/tracing/__init__.py b/tracing/__init__.py index 3edddea9df..04975c4cfd 100644 --- a/tracing/__init__.py +++ b/tracing/__init__.py @@ -1,3 +1,15 @@ +from grpc_opentracing import SpanDecorator + +class GrpcSpanDecorator(SpanDecorator): + def __call__(self, span, rpc_info): + if rpc_info.response.status.error_code == 0: + return + span.set_tag('error', True) + error_log = {'event': 'error', + 'error.kind': str(rpc_info.response.status.error_code), + 'message': rpc_info.response.status.reason + } + span.log_kv(error_log) def empty_server_interceptor_decorator(target_server, interceptor): return target_server diff --git a/tracing/factory.py b/tracing/factory.py index f00a537e78..f692563e7b 100644 --- a/tracing/factory.py +++ b/tracing/factory.py @@ -3,7 +3,9 @@ from jaeger_client import Config from grpc_opentracing.grpcext import intercept_server from grpc_opentracing import open_tracing_server_interceptor -from tracing import Tracer, empty_server_interceptor_decorator +from tracing import (Tracer, + GrpcSpanDecorator, + empty_server_interceptor_decorator) logger = logging.getLogger(__name__) @@ -32,7 +34,8 @@ class TracerFactory: tracer = config.initialize_tracer() tracer_interceptor = open_tracing_server_interceptor(tracer, - log_payloads=tracer_config.TRACING_LOG_PAYLOAD) + log_payloads=tracer_config.TRACING_LOG_PAYLOAD, + span_decorator=GrpcSpanDecorator()) return Tracer(tracer, tracer_interceptor, intercept_server) From 48f172facb6db3f27684fd8be4c8c3936cb6e148 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 26 Sep 2019 11:38:31 +0800 Subject: [PATCH 037/307] refactor tracing --- mishards/__init__.py | 4 +++- mishards/grpc_utils/__init__.py | 21 +++++++++++++++++++++ mishards/settings.py | 17 ++++++++++++----- tracing/__init__.py | 13 ------------- tracing/factory.py | 17 +++-------------- 5 files changed, 39 insertions(+), 33 deletions(-) diff --git a/mishards/__init__.py b/mishards/__init__.py index 640293c265..c1cea84861 100644 --- a/mishards/__init__.py +++ b/mishards/__init__.py @@ -13,7 +13,9 @@ sd_proiver_class = ProviderManager.get_provider(settings.SD_PROVIDER) discover = sd_proiver_class(settings=settings.SD_PROVIDER_SETTINGS, conn_mgr=connect_mgr) from tracing.factory import TracerFactory -tracer = TracerFactory.new_tracer(settings.TRACING_TYPE, settings.TracingConfig) +from grpc_utils import GrpcSpanDecorator +tracer = TracerFactory.new_tracer(settings.TRACING_TYPE, settings.TracingConfig, + span_decorator=GrpcSpanDecorator()) from mishards.server import Server grpc_server = Server(conn_mgr=connect_mgr, tracer=tracer) diff --git a/mishards/grpc_utils/__init__.py b/mishards/grpc_utils/__init__.py index 959d5549c7..9ee7d22f37 100644 --- a/mishards/grpc_utils/__init__.py +++ b/mishards/grpc_utils/__init__.py @@ -1,3 +1,24 @@ +from grpc_opentracing import SpanDecorator +from milvus.grpc_gen import status_pb2 + + +class GrpcSpanDecorator(SpanDecorator): + def __call__(self, span, rpc_info): + status = None + if isinstance(rpc_info.response, status_pb2.Status): + status = rpc_info.response + else: + status = rpc_info.response.status + if status.error_code == 0: + return + span.set_tag('error', True) + span.set_tag('error_code', status.error_code) + error_log = {'event': 'error', + 'request': rpc_info.request, + 'response': rpc_info.response + } + span.log_kv(error_log) + def mark_grpc_method(func): setattr(func, 'grpc_method', True) return func diff --git a/mishards/settings.py b/mishards/settings.py index eb6e1e5964..4a70d44561 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -48,13 +48,20 @@ TESTING_WOSERVER = env.str('TESTING_WOSERVER', 'tcp://127.0.0.1:19530') TRACING_TYPE = env.str('TRACING_TYPE', '') class TracingConfig: - TRACING_LOGGING = env.bool('TRACING_LOGGING', True) TRACING_SERVICE_NAME = env.str('TRACING_SERVICE_NAME', 'mishards') TRACING_VALIDATE = env.bool('TRACING_VALIDATE', True) - TRACING_LOG_PAYLOAD = env.bool('TRACING_LOG_PAYLOAD', DEBUG) - TRACING_REPORTING_HOST = env.str('TRACING_REPORTING_HOST', '127.0.0.1') - TRACING_REPORTING_PORT = env.str('TRACING_REPORTING_PORT', '5775') - + TRACING_LOG_PAYLOAD = env.bool('TRACING_LOG_PAYLOAD', False) + TRACING_CONFIG = { + 'sampler': { + 'type': env.str('TRACING_SAMPLER_TYPE', 'const'), + 'param': env.str('TRACING_SAMPLER_PARAM', "1"), + }, + 'local_agent': { + 'reporting_host': env.str('TRACING_REPORTING_HOST', '127.0.0.1'), + 'reporting_port': env.str('TRACING_REPORTING_PORT', '5775') + }, + 'logging': env.bool('TRACING_LOGGING', True) + } if __name__ == '__main__': import logging diff --git a/tracing/__init__.py b/tracing/__init__.py index 04975c4cfd..0aebf6ffba 100644 --- a/tracing/__init__.py +++ b/tracing/__init__.py @@ -1,16 +1,3 @@ -from grpc_opentracing import SpanDecorator - -class GrpcSpanDecorator(SpanDecorator): - def __call__(self, span, rpc_info): - if rpc_info.response.status.error_code == 0: - return - span.set_tag('error', True) - error_log = {'event': 'error', - 'error.kind': str(rpc_info.response.status.error_code), - 'message': rpc_info.response.status.reason - } - span.log_kv(error_log) - def empty_server_interceptor_decorator(target_server, interceptor): return target_server diff --git a/tracing/factory.py b/tracing/factory.py index f692563e7b..fd06fe3cac 100644 --- a/tracing/factory.py +++ b/tracing/factory.py @@ -4,7 +4,6 @@ from grpc_opentracing.grpcext import intercept_server from grpc_opentracing import open_tracing_server_interceptor from tracing import (Tracer, - GrpcSpanDecorator, empty_server_interceptor_decorator) logger = logging.getLogger(__name__) @@ -12,22 +11,12 @@ logger = logging.getLogger(__name__) class TracerFactory: @classmethod - def new_tracer(cls, tracer_type, tracer_config, **kwargs): + def new_tracer(cls, tracer_type, tracer_config, span_decorator=None, **kwargs): if not tracer_type: return Tracer() if tracer_type.lower() == 'jaeger': - config = Config(config={ - 'sampler': { - 'type': 'const', - 'param': 1, - }, - 'local_agent': { - 'reporting_host': tracer_config.TRACING_REPORTING_HOST, - 'reporting_port': tracer_config.TRACING_REPORTING_PORT - }, - 'logging': tracer_config.TRACING_LOGGING, - }, + config = Config(config=tracer_config.TRACING_CONFIG, service_name=tracer_config.TRACING_SERVICE_NAME, validate=tracer_config.TRACING_VALIDATE ) @@ -35,7 +24,7 @@ class TracerFactory: tracer = config.initialize_tracer() tracer_interceptor = open_tracing_server_interceptor(tracer, log_payloads=tracer_config.TRACING_LOG_PAYLOAD, - span_decorator=GrpcSpanDecorator()) + span_decorator=span_decorator) return Tracer(tracer, tracer_interceptor, intercept_server) From bdbb70f63f2c72e070b98330e3ced1d959d9c366 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 26 Sep 2019 12:06:38 +0800 Subject: [PATCH 038/307] change grpc decorator --- mishards/grpc_utils/__init__.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/mishards/grpc_utils/__init__.py b/mishards/grpc_utils/__init__.py index 9ee7d22f37..ba9a5e175d 100644 --- a/mishards/grpc_utils/__init__.py +++ b/mishards/grpc_utils/__init__.py @@ -5,18 +5,24 @@ from milvus.grpc_gen import status_pb2 class GrpcSpanDecorator(SpanDecorator): def __call__(self, span, rpc_info): status = None + if not rpc_info.response: + return if isinstance(rpc_info.response, status_pb2.Status): status = rpc_info.response else: - status = rpc_info.response.status + try: + status = rpc_info.response.status + except Exception as e: + status = status_pb2.Status(error_code=status_pb2.UNEXPECTED_ERROR, + reason='Should not happen') + if status.error_code == 0: return - span.set_tag('error', True) - span.set_tag('error_code', status.error_code) error_log = {'event': 'error', 'request': rpc_info.request, 'response': rpc_info.response } + span.set_tag('error', True) span.log_kv(error_log) def mark_grpc_method(func): From 11ba6beb40f2e6b9ef4351cbcffa1b4810b7e5d9 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 26 Sep 2019 15:22:00 +0800 Subject: [PATCH 039/307] update for search error handling --- mishards/service_handler.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/mishards/service_handler.py b/mishards/service_handler.py index f39ad3ef46..cb904f4e42 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -92,13 +92,17 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return routing def _do_merge(self, files_n_topk_results, topk, reverse=False, **kwargs): + status=status_pb2.Status(error_code=status_pb2.SUCCESS, reason="Success") if not files_n_topk_results: - return [] + return status, [] request_results = defaultdict(list) calc_time = time.time() for files_collection in files_n_topk_results: + if isinstance(files_collection, tuple): + status, _ = files_collection + return status, [] for request_pos, each_request_results in enumerate(files_collection.topk_query_result): request_results[request_pos].extend(each_request_results.query_result_arrays) request_results[request_pos] = sorted(request_results[request_pos], key=lambda x: x.distance, @@ -114,7 +118,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): query_result = TopKQueryResult(query_result_arrays=result[1]) topk_query_result.append(query_result) - return topk_query_result + return status, topk_query_result def _do_query(self, table_id, table_meta, vectors, topk, nprobe, range_array=None, **kwargs): metadata = kwargs.get('metadata', None) @@ -273,14 +277,14 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): query_range_array.append( Range(query_range.start_value, query_range.end_value)) - results = self._do_query(table_name, table_meta, query_record_array, topk, + status, results = self._do_query(table_name, table_meta, query_record_array, topk, nprobe, query_range_array, metadata=metadata) now = time.time() logger.info('SearchVector takes: {}'.format(now - start)) topk_result_list = milvus_pb2.TopKQueryResultList( - status=status_pb2.Status(error_code=status_pb2.SUCCESS, reason="Success"), + status=status, topk_query_result=results ) return topk_result_list From 110e56c1b7f20574db351eea6a3c3d812ad21fc3 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 26 Sep 2019 18:34:02 +0800 Subject: [PATCH 040/307] add more child span for search --- mishards/server.py | 2 +- mishards/service_handler.py | 31 ++++++++++++++++++------------- tracing/__init__.py | 6 ++++++ 3 files changed, 25 insertions(+), 14 deletions(-) diff --git a/mishards/server.py b/mishards/server.py index 9dc09d6f05..876424089c 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -76,7 +76,7 @@ class Server: def start(self, port=None): handler_class = self.add_error_handlers(ServiceHandler) - add_MilvusServiceServicer_to_server(handler_class(conn_mgr=self.conn_mgr), self.server_impl) + add_MilvusServiceServicer_to_server(handler_class(conn_mgr=self.conn_mgr, tracer=self.tracer), self.server_impl) self.server_impl.add_insecure_port("[::]:{}".format(str(port or self._port))) self.server_impl.start() diff --git a/mishards/service_handler.py b/mishards/service_handler.py index cb904f4e42..72ae73932c 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -24,10 +24,11 @@ logger = logging.getLogger(__name__) class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): MAX_NPROBE = 2048 - def __init__(self, conn_mgr, *args, **kwargs): + def __init__(self, conn_mgr, tracer, *args, **kwargs): self.conn_mgr = conn_mgr self.table_meta = {} self.error_handlers = {} + self.tracer = tracer def connection(self, metadata=None): conn = self.conn_mgr.conn('WOSERVER', metadata=metadata) @@ -120,7 +121,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return status, topk_query_result - def _do_query(self, table_id, table_meta, vectors, topk, nprobe, range_array=None, **kwargs): + def _do_query(self, context, table_id, table_meta, vectors, topk, nprobe, range_array=None, **kwargs): metadata = kwargs.get('metadata', None) range_array = [self._range_to_date(r, metadata=metadata) for r in range_array] if range_array else None routing = self._get_routing_file_ids(table_id, range_array, metadata=metadata) @@ -140,16 +141,18 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): conn = self.query_conn(addr, metadata=metadata) start = time.time() - ret = conn.search_vectors_in_files(table_name=query_params['table_id'], - file_ids=query_params['file_ids'], - query_records=vectors, - top_k=topk, - nprobe=nprobe, - lazy=True) - end = time.time() - logger.info('search_vectors_in_files takes: {}'.format(end - start)) + with self.tracer.start_span('search_{}_span'.format(addr), + child_of=context.get_active_span().context): + ret = conn.search_vectors_in_files(table_name=query_params['table_id'], + file_ids=query_params['file_ids'], + query_records=vectors, + top_k=topk, + nprobe=nprobe, + lazy=True) + end = time.time() + logger.info('search_vectors_in_files takes: {}'.format(end - start)) - all_topk_results.append(ret) + all_topk_results.append(ret) with ThreadPoolExecutor(max_workers=workers) as pool: for addr, params in routing.items(): @@ -160,7 +163,9 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): res.result() reverse = table_meta.metric_type == types.MetricType.IP - return self._do_merge(all_topk_results, topk, reverse=reverse, metadata=metadata) + with self.tracer.start_span('do_merge', + child_of=context.get_active_span().context): + return self._do_merge(all_topk_results, topk, reverse=reverse, metadata=metadata) @mark_grpc_method def CreateTable(self, request, context): @@ -277,7 +282,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): query_range_array.append( Range(query_range.start_value, query_range.end_value)) - status, results = self._do_query(table_name, table_meta, query_record_array, topk, + status, results = self._do_query(context, table_name, table_meta, query_record_array, topk, nprobe, query_range_array, metadata=metadata) now = time.time() diff --git a/tracing/__init__.py b/tracing/__init__.py index 0aebf6ffba..27c57473db 100644 --- a/tracing/__init__.py +++ b/tracing/__init__.py @@ -14,3 +14,9 @@ class Tracer: def close(self): self.tracer and self.tracer.close() + + def start_span(self, operation_name=None, + child_of=None, references=None, tags=None, + start_time=None, ignore_active_span=False): + return self.tracer.start_span(operation_name, child_of, + references, tags, start_time, ignore_active_span) From a6a1ff2f13dbdadb178ae91582a50b50df12e9a2 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 26 Sep 2019 19:23:15 +0800 Subject: [PATCH 041/307] add routing span --- mishards/service_handler.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 72ae73932c..cafe4be60f 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -124,7 +124,11 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): def _do_query(self, context, table_id, table_meta, vectors, topk, nprobe, range_array=None, **kwargs): metadata = kwargs.get('metadata', None) range_array = [self._range_to_date(r, metadata=metadata) for r in range_array] if range_array else None - routing = self._get_routing_file_ids(table_id, range_array, metadata=metadata) + + routing = {} + with self.tracer.start_span('get_routing', + child_of=context.get_active_span().context): + routing = self._get_routing_file_ids(table_id, range_array, metadata=metadata) logger.info('Routing: {}'.format(routing)) metadata = kwargs.get('metadata', None) From 81a78a40cb9647d78b59505997f0e02ba936e737 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 27 Sep 2019 10:21:17 +0800 Subject: [PATCH 042/307] more detail tracing in search --- mishards/service_handler.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/mishards/service_handler.py b/mishards/service_handler.py index cafe4be60f..ddff2903b8 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -145,7 +145,9 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): conn = self.query_conn(addr, metadata=metadata) start = time.time() - with self.tracer.start_span('search_{}_span'.format(addr), + span = kwargs.get('span', None) + span = span if span else context.get_active_span().context + with self.tracer.start_span('search_{}'.format(addr), child_of=context.get_active_span().context): ret = conn.search_vectors_in_files(table_name=query_params['table_id'], file_ids=query_params['file_ids'], @@ -158,13 +160,15 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): all_topk_results.append(ret) - with ThreadPoolExecutor(max_workers=workers) as pool: - for addr, params in routing.items(): - res = pool.submit(search, addr, params, vectors, topk, nprobe) - rs.append(res) + with self.tracer.start_span('do_search', + child_of=context.get_active_span().context) as span: + with ThreadPoolExecutor(max_workers=workers) as pool: + for addr, params in routing.items(): + res = pool.submit(search, addr, params, vectors, topk, nprobe, span=span) + rs.append(res) - for res in rs: - res.result() + for res in rs: + res.result() reverse = table_meta.metric_type == types.MetricType.IP with self.tracer.start_span('do_merge', From 98d49b803d76daf40a3bfc5c2f142ba29ddc0433 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 27 Sep 2019 11:29:22 +0800 Subject: [PATCH 043/307] update for proto update --- mishards/exception_handlers.py | 5 +---- mishards/grpc_utils/grpc_args_parser.py | 5 +++-- mishards/service_handler.py | 30 ++++++++----------------- requirements.txt | 4 ++-- 4 files changed, 15 insertions(+), 29 deletions(-) diff --git a/mishards/exception_handlers.py b/mishards/exception_handlers.py index a2659f91af..16ba34a3b1 100644 --- a/mishards/exception_handlers.py +++ b/mishards/exception_handlers.py @@ -36,11 +36,8 @@ def resp_handler(err, error_code): return resp_class(status=status, string_reply='') if resp_class == milvus_pb2.TableSchema: - table_name = milvus_pb2.TableName( - status=status - ) return milvus_pb2.TableSchema( - table_name=table_name + status=status ) if resp_class == milvus_pb2.IndexParam: diff --git a/mishards/grpc_utils/grpc_args_parser.py b/mishards/grpc_utils/grpc_args_parser.py index c8dc9d71d9..039299803d 100644 --- a/mishards/grpc_utils/grpc_args_parser.py +++ b/mishards/grpc_utils/grpc_args_parser.py @@ -21,7 +21,8 @@ class GrpcArgsParser(object): @error_status def parse_proto_TableSchema(cls, param): _table_schema = { - 'table_name': param.table_name.table_name, + 'status': param.status, + 'table_name': param.table_name, 'dimension': param.dimension, 'index_file_size': param.index_file_size, 'metric_type': param.metric_type @@ -47,7 +48,7 @@ class GrpcArgsParser(object): @classmethod @error_status def parse_proto_IndexParam(cls, param): - _table_name = param.table_name.table_name + _table_name = param.table_name _status, _index = cls.parse_proto_Index(param.index) if not _status.OK(): diff --git a/mishards/service_handler.py b/mishards/service_handler.py index ddff2903b8..81217b52be 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -311,11 +311,8 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _status, _table_name = Parser.parse_proto_TableName(request) if not _status.OK(): - table_name = milvus_pb2.TableName( - status=status_pb2.Status(error_code=_status.code, reason=_status.message) - ) return milvus_pb2.TableSchema( - table_name=table_name + status=status_pb2.Status(error_code=_status.code, reason=_status.message), ) metadata = { @@ -326,22 +323,17 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _status, _table = self.connection(metadata=metadata).describe_table(_table_name) if _status.OK(): - _grpc_table_name = milvus_pb2.TableName( - status=status_pb2.Status(error_code=_status.code, reason=_status.message), - table_name=_table.table_name - ) - return milvus_pb2.TableSchema( - table_name=_grpc_table_name, + table_name=_table_name, index_file_size=_table.index_file_size, dimension=_table.dimension, - metric_type=_table.metric_type + metric_type=_table.metric_type, + status=status_pb2.Status(error_code=_status.code, reason=_status.message), ) return milvus_pb2.TableSchema( - table_name=milvus_pb2.TableName( - status=status_pb2.Status(error_code=_status.code, reason=_status.message) - ) + table_name=_table_name, + status=status_pb2.Status(error_code=_status.code, reason=_status.message), ) @mark_grpc_method @@ -398,14 +390,10 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): } _status, _results = self.connection(metadata=metadata).show_tables() - if not _status.OK(): - _results = [] - - for _result in _results: - yield milvus_pb2.TableName( + return milvus_pb2.TableNameList( status=status_pb2.Status(error_code=_status.code, reason=_status.message), - table_name=_result - ) + table_names=_results + ) @mark_grpc_method def DeleteByRange(self, request, context): diff --git a/requirements.txt b/requirements.txt index 03db7aeed3..e94f8d1597 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,8 +14,8 @@ py==1.8.0 pyasn1==0.4.7 pyasn1-modules==0.2.6 pylint==2.3.1 -#pymilvus-test==0.2.15 -pymilvus==0.2.0 +pymilvus-test==0.2.15 +#pymilvus==0.2.0 pyparsing==2.4.0 pytest==4.6.3 pytest-level==0.1.1 From 76581d0641f55907f0dd7d8a5b35b4f8b1175e11 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 27 Sep 2019 11:39:24 +0800 Subject: [PATCH 044/307] update DecribeIndex for proto changes --- mishards/service_handler.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 81217b52be..60d64cef37 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -426,9 +426,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): if not _status.OK(): return milvus_pb2.IndexParam( - table_name=milvus_pb2.TableName( status=status_pb2.Status(error_code=_status.code, reason=_status.message) - ) ) metadata = { @@ -439,11 +437,9 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _status, _index_param = self.connection(metadata=metadata).describe_index(_table_name) _index = milvus_pb2.Index(index_type=_index_param._index_type, nlist=_index_param._nlist) - _tablename = milvus_pb2.TableName( - status=status_pb2.Status(error_code=_status.code, reason=_status.message), - table_name=_table_name) - return milvus_pb2.IndexParam(table_name=_tablename, index=_index) + return milvus_pb2.IndexParam(status=status_pb2.Status(error_code=_status.code, reason=_status.message), + table_name=_table_name, index=_index) @mark_grpc_method def DropIndex(self, request, context): From 663f9a2312997fda9dad71135a49dd307b20898e Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 27 Sep 2019 14:03:46 +0800 Subject: [PATCH 045/307] small refactor in server --- mishards/grpc_utils/__init__.py | 5 +++++ mishards/server.py | 14 +++++++------- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/mishards/grpc_utils/__init__.py b/mishards/grpc_utils/__init__.py index ba9a5e175d..550913ed60 100644 --- a/mishards/grpc_utils/__init__.py +++ b/mishards/grpc_utils/__init__.py @@ -28,3 +28,8 @@ class GrpcSpanDecorator(SpanDecorator): def mark_grpc_method(func): setattr(func, 'grpc_method', True) return func + +def is_grpc_method(func): + if not func: + return False + return getattr(func, 'grpc_method', False) diff --git a/mishards/server.py b/mishards/server.py index 876424089c..1f72a8812d 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -10,6 +10,7 @@ from grpc._cython import cygrpc from grpc._channel import _Rendezvous, _UnaryUnaryMultiCallable from jaeger_client import Config from milvus.grpc_gen.milvus_pb2_grpc import add_MilvusServiceServicer_to_server +from mishards.grpc_utils import is_grpc_method from mishards.service_handler import ServiceHandler from mishards import settings, discover @@ -75,7 +76,7 @@ class Server: discover.start() def start(self, port=None): - handler_class = self.add_error_handlers(ServiceHandler) + handler_class = self.decorate_handler(ServiceHandler) add_MilvusServiceServicer_to_server(handler_class(conn_mgr=self.conn_mgr, tracer=self.tracer), self.server_impl) self.server_impl.add_insecure_port("[::]:{}".format(str(port or self._port))) self.server_impl.start() @@ -102,9 +103,8 @@ class Server: self.tracer.close() logger.info('Server is closed') - def add_error_handlers(self, target): - for key, attr in target.__dict__.items(): - is_grpc_method = getattr(attr, 'grpc_method', False) - if is_grpc_method: - setattr(target, key, self.wrap_method_with_errorhandler(attr)) - return target + def decorate_handler(self, handler): + for key, attr in handler.__dict__.items(): + if is_grpc_method(attr): + setattr(handler, key, self.wrap_method_with_errorhandler(attr)) + return handler From 7220af2cd172ac6a4304b75f6f5e48d409671e70 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 28 Sep 2019 11:01:52 +0800 Subject: [PATCH 046/307] refactor settings --- mishards/__init__.py | 39 +++++++++++++++++++++++---------------- mishards/main.py | 4 ++-- mishards/server.py | 9 ++++++--- mishards/settings.py | 12 +++++++++--- 4 files changed, 40 insertions(+), 24 deletions(-) diff --git a/mishards/__init__.py b/mishards/__init__.py index c1cea84861..76f3168b51 100644 --- a/mishards/__init__.py +++ b/mishards/__init__.py @@ -2,22 +2,29 @@ from mishards import settings from mishards.db_base import DB db = DB() -db.init_db(uri=settings.SQLALCHEMY_DATABASE_URI, echo=settings.SQL_ECHO) - -from mishards.connections import ConnectionMgr -connect_mgr = ConnectionMgr() - -from sd import ProviderManager - -sd_proiver_class = ProviderManager.get_provider(settings.SD_PROVIDER) -discover = sd_proiver_class(settings=settings.SD_PROVIDER_SETTINGS, conn_mgr=connect_mgr) - -from tracing.factory import TracerFactory -from grpc_utils import GrpcSpanDecorator -tracer = TracerFactory.new_tracer(settings.TRACING_TYPE, settings.TracingConfig, - span_decorator=GrpcSpanDecorator()) from mishards.server import Server -grpc_server = Server(conn_mgr=connect_mgr, tracer=tracer) +grpc_server = Server() -from mishards import exception_handlers +def create_app(testing_config=None): + config = testing_config if testing_config else settings.DefaultConfig + db.init_db(uri=config.SQLALCHEMY_DATABASE_URI, echo=config.SQL_ECHO) + + from mishards.connections import ConnectionMgr + connect_mgr = ConnectionMgr() + + from sd import ProviderManager + + sd_proiver_class = ProviderManager.get_provider(settings.SD_PROVIDER) + discover = sd_proiver_class(settings=settings.SD_PROVIDER_SETTINGS, conn_mgr=connect_mgr) + + from tracing.factory import TracerFactory + from grpc_utils import GrpcSpanDecorator + tracer = TracerFactory.new_tracer(settings.TRACING_TYPE, settings.TracingConfig, + span_decorator=GrpcSpanDecorator()) + + grpc_server.init_app(conn_mgr=connect_mgr, tracer=tracer, discover=discover) + + from mishards import exception_handlers + + return grpc_server diff --git a/mishards/main.py b/mishards/main.py index 7fac55dfa2..9197fbf598 100644 --- a/mishards/main.py +++ b/mishards/main.py @@ -2,10 +2,10 @@ import os, sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from mishards import ( - settings, - grpc_server as server) + settings, create_app) def main(): + server = create_app() server.run(port=settings.SERVER_PORT) return 0 diff --git a/mishards/server.py b/mishards/server.py index 1f72a8812d..0ca4a8f866 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -12,20 +12,23 @@ from jaeger_client import Config from milvus.grpc_gen.milvus_pb2_grpc import add_MilvusServiceServicer_to_server from mishards.grpc_utils import is_grpc_method from mishards.service_handler import ServiceHandler -from mishards import settings, discover +from mishards import settings logger = logging.getLogger(__name__) class Server: - def __init__(self, conn_mgr, tracer, port=19530, max_workers=10, **kwargs): + def __init__(self): self.pre_run_handlers = set() self.grpc_methods = set() self.error_handlers = {} self.exit_flag = False + + def init_app(self, conn_mgr, tracer, discover, port=19530, max_workers=10, **kwargs): self.port = int(port) self.conn_mgr = conn_mgr self.tracer = tracer + self.discover = discover self.server_impl = grpc.server( thread_pool=futures.ThreadPoolExecutor(max_workers=max_workers), @@ -73,7 +76,7 @@ class Server: def on_pre_run(self): for handler in self.pre_run_handlers: handler() - discover.start() + self.discover.start() def start(self, port=None): handler_class = self.decorate_handler(ServiceHandler) diff --git a/mishards/settings.py b/mishards/settings.py index 4a70d44561..b42cb791f6 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -16,9 +16,6 @@ TIMEZONE = env.str('TIMEZONE', 'UTC') from utils.logger_helper import config config(LOG_LEVEL, LOG_PATH, LOG_NAME, TIMEZONE) -SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_URI') -SQL_ECHO = env.bool('SQL_ECHO', False) - TIMEOUT = env.int('TIMEOUT', 60) MAX_RETRY = env.int('MAX_RETRY', 3) SEARCH_WORKER_SIZE = env.int('SEARCH_WORKER_SIZE', 10) @@ -63,6 +60,15 @@ class TracingConfig: 'logging': env.bool('TRACING_LOGGING', True) } +class DefaultConfig: + SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_URI') + SQL_ECHO = env.bool('SQL_ECHO', False) + +# class TestingConfig(DefaultConfig): +# SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_TEST_URI') +# SQL_ECHO = env.bool('SQL_TEST_ECHO', False) + + if __name__ == '__main__': import logging logger = logging.getLogger(__name__) From 4051cf7e07b54d79c6303f8b0fb7f9311aadd850 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 28 Sep 2019 11:22:46 +0800 Subject: [PATCH 047/307] update for testing config --- mishards/__init__.py | 4 ++++ mishards/db_base.py | 4 ++++ mishards/main.py | 2 +- mishards/server.py | 1 - mishards/settings.py | 8 +++++--- 5 files changed, 14 insertions(+), 5 deletions(-) diff --git a/mishards/__init__.py b/mishards/__init__.py index 76f3168b51..8682b6eba6 100644 --- a/mishards/__init__.py +++ b/mishards/__init__.py @@ -7,8 +7,12 @@ from mishards.server import Server grpc_server = Server() def create_app(testing_config=None): + import logging + logger = logging.getLogger() + config = testing_config if testing_config else settings.DefaultConfig db.init_db(uri=config.SQLALCHEMY_DATABASE_URI, echo=config.SQL_ECHO) + logger.info(db) from mishards.connections import ConnectionMgr connect_mgr = ConnectionMgr() diff --git a/mishards/db_base.py b/mishards/db_base.py index 3b2c699864..1006f21f55 100644 --- a/mishards/db_base.py +++ b/mishards/db_base.py @@ -24,6 +24,10 @@ class DB: self.uri = uri self.session = sessionmaker() self.session.configure(bind=self.engine) + self.url = url + + def __str__(self): + return ''.format(self.url.get_backend_name(), self.url.database) @property def Session(self): diff --git a/mishards/main.py b/mishards/main.py index 9197fbf598..5d8db0a179 100644 --- a/mishards/main.py +++ b/mishards/main.py @@ -5,7 +5,7 @@ from mishards import ( settings, create_app) def main(): - server = create_app() + server = create_app(settings.TestingConfig if settings.TESTING else settings.DefaultConfig) server.run(port=settings.SERVER_PORT) return 0 diff --git a/mishards/server.py b/mishards/server.py index 0ca4a8f866..c044bbb7ad 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -90,7 +90,6 @@ class Server: self.on_pre_run() self.start(port) - logger.info('Successfully') logger.info('Listening on port {}'.format(port)) try: diff --git a/mishards/settings.py b/mishards/settings.py index b42cb791f6..71e94b76a2 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -64,9 +64,11 @@ class DefaultConfig: SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_URI') SQL_ECHO = env.bool('SQL_ECHO', False) -# class TestingConfig(DefaultConfig): -# SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_TEST_URI') -# SQL_ECHO = env.bool('SQL_TEST_ECHO', False) +TESTING = env.bool('TESTING', False) +if TESTING: + class TestingConfig(DefaultConfig): + SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_TEST_URI') + SQL_ECHO = env.bool('SQL_TEST_ECHO', False) if __name__ == '__main__': From 71231205659444422fcc505c4cd7d5cadae70aa7 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 28 Sep 2019 15:54:55 +0800 Subject: [PATCH 048/307] update db session and related factory impl --- mishards/__init__.py | 7 +++---- mishards/db_base.py | 13 ++++++++++--- mishards/factories.py | 4 ++-- 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/mishards/__init__.py b/mishards/__init__.py index 8682b6eba6..b351986cba 100644 --- a/mishards/__init__.py +++ b/mishards/__init__.py @@ -1,4 +1,6 @@ +import logging from mishards import settings +logger = logging.getLogger() from mishards.db_base import DB db = DB() @@ -7,9 +9,6 @@ from mishards.server import Server grpc_server = Server() def create_app(testing_config=None): - import logging - logger = logging.getLogger() - config = testing_config if testing_config else settings.DefaultConfig db.init_db(uri=config.SQLALCHEMY_DATABASE_URI, echo=config.SQL_ECHO) logger.info(db) @@ -23,7 +22,7 @@ def create_app(testing_config=None): discover = sd_proiver_class(settings=settings.SD_PROVIDER_SETTINGS, conn_mgr=connect_mgr) from tracing.factory import TracerFactory - from grpc_utils import GrpcSpanDecorator + from mishards.grpc_utils import GrpcSpanDecorator tracer = TracerFactory.new_tracer(settings.TRACING_TYPE, settings.TracingConfig, span_decorator=GrpcSpanDecorator()) diff --git a/mishards/db_base.py b/mishards/db_base.py index 1006f21f55..b1492aa8f5 100644 --- a/mishards/db_base.py +++ b/mishards/db_base.py @@ -3,14 +3,23 @@ from sqlalchemy import create_engine from sqlalchemy.engine.url import make_url from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker, scoped_session +from sqlalchemy.orm.session import Session as SessionBase logger = logging.getLogger(__name__) + +class LocalSession(SessionBase): + def __init__(self, db, autocommit=False, autoflush=True, **options): + self.db = db + bind = options.pop('bind', None) or db.engine + SessionBase.__init__(self, autocommit=autocommit, autoflush=autoflush, bind=bind, **options) + class DB: Model = declarative_base() def __init__(self, uri=None, echo=False): self.echo = echo uri and self.init_db(uri, echo) + self.session_factory = scoped_session(sessionmaker(class_=LocalSession, db=self)) def init_db(self, uri, echo=False): url = make_url(uri) @@ -22,8 +31,6 @@ class DB: echo=echo, max_overflow=0) self.uri = uri - self.session = sessionmaker() - self.session.configure(bind=self.engine) self.url = url def __str__(self): @@ -31,7 +38,7 @@ class DB: @property def Session(self): - return self.session() + return self.session_factory() def drop_all(self): self.Model.metadata.drop_all(self.engine) diff --git a/mishards/factories.py b/mishards/factories.py index 5bd059654a..26e9ab2619 100644 --- a/mishards/factories.py +++ b/mishards/factories.py @@ -19,7 +19,7 @@ factory.Faker.add_provider(FakerProvider) class TablesFactory(SQLAlchemyModelFactory): class Meta: model = Tables - sqlalchemy_session = db.Session + sqlalchemy_session = db.session_factory sqlalchemy_session_persistence = 'commit' id = factory.Faker('random_number', digits=16, fix_len=True) @@ -35,7 +35,7 @@ class TablesFactory(SQLAlchemyModelFactory): class TableFilesFactory(SQLAlchemyModelFactory): class Meta: model = TableFiles - sqlalchemy_session = db.Session + sqlalchemy_session = db.session_factory sqlalchemy_session_persistence = 'commit' id = factory.Faker('random_number', digits=16, fix_len=True) From 13bad105e201172d6a072174ffb07ecddf326bfa Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 28 Sep 2019 16:42:05 +0800 Subject: [PATCH 049/307] add unit test --- conftest.py | 22 +++++++++++++++++++++ mishards/test_connections.py | 0 mishards/test_models.py | 38 ++++++++++++++++++++++++++++++++++++ setup.cfg | 4 ++++ 4 files changed, 64 insertions(+) create mode 100644 conftest.py create mode 100644 mishards/test_connections.py create mode 100644 mishards/test_models.py create mode 100644 setup.cfg diff --git a/conftest.py b/conftest.py new file mode 100644 index 0000000000..630ff0ba31 --- /dev/null +++ b/conftest.py @@ -0,0 +1,22 @@ +import logging +import pytest +from mishards import settings, db, create_app + +logger = logging.getLogger(__name__) + +def clear_data(session): + meta = db.metadata + for table in reversed(meta.sorted_tables): + session.execute(table.delete()) + session.commit() + +# @pytest.fixture(scope="module") +@pytest.fixture +def app(request): + app = create_app(settings.TestingConfig) + db.drop_all() + db.create_all() + + yield app + + db.drop_all() diff --git a/mishards/test_connections.py b/mishards/test_connections.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/mishards/test_models.py b/mishards/test_models.py new file mode 100644 index 0000000000..85dcc246aa --- /dev/null +++ b/mishards/test_models.py @@ -0,0 +1,38 @@ +import logging +import pytest +from mishards.factories import TableFiles, Tables, TableFilesFactory, TablesFactory +from mishards import db, create_app, settings +from mishards.factories import ( + Tables, TableFiles, + TablesFactory, TableFilesFactory + ) + +logger = logging.getLogger(__name__) + +@pytest.mark.usefixtures('app') +class TestModels: + def test_files_to_search(self): + table = TablesFactory() + new_files_cnt = 5 + to_index_cnt = 10 + raw_cnt = 20 + backup_cnt = 12 + to_delete_cnt = 9 + index_cnt = 8 + new_index_cnt = 6 + new_merge_cnt = 11 + + new_files = TableFilesFactory.create_batch(new_files_cnt, table=table, file_type=TableFiles.FILE_TYPE_NEW, date=110) + to_index_files = TableFilesFactory.create_batch(to_index_cnt, table=table, file_type=TableFiles.FILE_TYPE_TO_INDEX, date=110) + raw_files = TableFilesFactory.create_batch(raw_cnt, table=table, file_type=TableFiles.FILE_TYPE_RAW, date=120) + backup_files = TableFilesFactory.create_batch(backup_cnt, table=table, file_type=TableFiles.FILE_TYPE_BACKUP, date=110) + index_files = TableFilesFactory.create_batch(index_cnt, table=table, file_type=TableFiles.FILE_TYPE_INDEX, date=110) + new_index_files = TableFilesFactory.create_batch(new_index_cnt, table=table, file_type=TableFiles.FILE_TYPE_NEW_INDEX, date=110) + new_merge_files = TableFilesFactory.create_batch(new_merge_cnt, table=table, file_type=TableFiles.FILE_TYPE_NEW_MERGE, date=110) + to_delete_files = TableFilesFactory.create_batch(to_delete_cnt, table=table, file_type=TableFiles.FILE_TYPE_TO_DELETE, date=110) + assert table.files_to_search().count() == raw_cnt + index_cnt + to_index_cnt + + assert table.files_to_search([(100, 115)]).count() == index_cnt + to_index_cnt + assert table.files_to_search([(111, 120)]).count() == 0 + assert table.files_to_search([(111, 121)]).count() == raw_cnt + assert table.files_to_search([(110, 121)]).count() == raw_cnt + index_cnt + to_index_cnt diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000000..4a88432914 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,4 @@ +[tool:pytest] +testpaths = mishards +log_cli=true +log_cli_level=info From dd38d54d647816516479782404a9c71805cf05b9 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 28 Sep 2019 17:26:28 +0800 Subject: [PATCH 050/307] add connection tests --- conftest.py | 7 ---- mishards/connections.py | 10 ++--- mishards/test_connections.py | 73 ++++++++++++++++++++++++++++++++++++ 3 files changed, 78 insertions(+), 12 deletions(-) diff --git a/conftest.py b/conftest.py index 630ff0ba31..c4fed5cc7e 100644 --- a/conftest.py +++ b/conftest.py @@ -4,13 +4,6 @@ from mishards import settings, db, create_app logger = logging.getLogger(__name__) -def clear_data(session): - meta = db.metadata - for table in reversed(meta.sorted_tables): - session.execute(table.delete()) - session.commit() - -# @pytest.fixture(scope="module") @pytest.fixture def app(request): app = create_app(settings.TestingConfig) diff --git a/mishards/connections.py b/mishards/connections.py index 35c5d6c3bd..caaf9629dd 100644 --- a/mishards/connections.py +++ b/mishards/connections.py @@ -35,7 +35,7 @@ class Connection: @property def can_retry(self): - return self.retried <= self.max_retry + return self.retried < self.max_retry @property def connected(self): @@ -45,7 +45,7 @@ class Connection: if self.on_retry_func: self.on_retry_func(self) else: - logger.warn('{} is retrying {}'.format(self, self.retried)) + logger.warning('{} is retrying {}'.format(self, self.retried)) def on_connect(self, metadata=None): while not self.connected and self.can_retry: @@ -123,11 +123,11 @@ class ConnectionMgr: return self.on_diff_meta(name, url) def on_same_meta(self, name, url): - # logger.warn('Register same meta: {}:{}'.format(name, url)) + # logger.warning('Register same meta: {}:{}'.format(name, url)) pass def on_diff_meta(self, name, url): - logger.warn('Received {} with diff url={}'.format(name, url)) + logger.warning('Received {} with diff url={}'.format(name, url)) self.metas[name] = url self.conns[name] = {} @@ -136,7 +136,7 @@ class ConnectionMgr: self.conns.pop(name, None) def on_nonexisted_meta(self, name): - logger.warn('Non-existed meta: {}'.format(name)) + logger.warning('Non-existed meta: {}'.format(name)) def register(self, name, url): logger.info('Register Connection: name={};url={}'.format(name, url)) diff --git a/mishards/test_connections.py b/mishards/test_connections.py index e69de29bb2..1f46b60f8b 100644 --- a/mishards/test_connections.py +++ b/mishards/test_connections.py @@ -0,0 +1,73 @@ +import logging +import pytest + +from mishards.connections import (ConnectionMgr, Connection) +from mishards import exceptions + +logger = logging.getLogger(__name__) + +@pytest.mark.usefixtures('app') +class TestConnection: + def test_manager(self): + mgr = ConnectionMgr() + + mgr.register('pod1', '111') + mgr.register('pod2', '222') + mgr.register('pod2', '222') + mgr.register('pod2', '2222') + assert len(mgr.conn_names) == 2 + + mgr.unregister('pod1') + assert len(mgr.conn_names) == 1 + + mgr.unregister('pod2') + assert len(mgr.conn_names) == 0 + + mgr.register('WOSERVER', 'xxxx') + assert len(mgr.conn_names) == 0 + + def test_connection(self): + class Conn: + def __init__(self, state): + self.state = state + def connect(self, uri): + return self.state + def connected(self): + return self.state + FAIL_CONN = Conn(False) + PASS_CONN = Conn(True) + + class Retry: + def __init__(self): + self.times = 0 + + def __call__(self, conn): + self.times += 1 + logger.info('Retrying {}'.format(self.times)) + + class Func(): + def __init__(self): + self.executed = False + def __call__(self): + self.executed = True + + max_retry = 3 + + RetryObj = Retry() + c = Connection('client', uri='', + max_retry=max_retry, + on_retry_func=RetryObj) + c.conn = FAIL_CONN + ff = Func() + this_connect = c.connect(func=ff) + with pytest.raises(exceptions.ConnectionConnectError): + this_connect() + assert RetryObj.times == max_retry + assert not ff.executed + RetryObj = Retry() + + c.conn = PASS_CONN + this_connect = c.connect(func=ff) + this_connect() + assert ff.executed + assert RetryObj.times == 0 From 7d1590c691a8aa518290614de6f9df2ca3af21af Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 28 Sep 2019 17:27:33 +0800 Subject: [PATCH 051/307] remove dummy code --- mishards/connections.py | 56 ----------------------------------------- 1 file changed, 56 deletions(-) diff --git a/mishards/connections.py b/mishards/connections.py index caaf9629dd..22524c3a20 100644 --- a/mishards/connections.py +++ b/mishards/connections.py @@ -152,59 +152,3 @@ class ConnectionMgr: if url is None: return self.on_nonexisted_meta(name) return self.on_unregister_meta(name, url) - - -if __name__ == '__main__': - class Conn: - def __init__(self, state): - self.state = state - - def connect(self, uri): - return self.state - - def connected(self): - return self.state - - fail_conn = Conn(False) - success_conn = Conn(True) - - class Retry: - def __init__(self): - self.times = 0 - - def __call__(self, conn): - self.times += 1 - print('Retrying {}'.format(self.times)) - - - retry_obj = Retry() - c = Connection('client', uri='', on_retry_func=retry_obj) - - def f(): - print('ffffffff') - - # c.conn = fail_conn - # m = c.connect(func=f) - # m() - - c.conn = success_conn - m = c.connect(func=f) - m() - - mgr = ConnectionMgr() - mgr.register('pod1', '111') - mgr.register('pod2', '222') - mgr.register('pod2', '222') - mgr.register('pod2', 'tcp://127.0.0.1:19530') - - pod3 = mgr.conn('pod3') - print(pod3) - - pod2 = mgr.conn('pod2') - print(pod2) - print(pod2.connected) - - mgr.unregister('pod1') - - logger.info(mgr.metas) - logger.info(mgr.conns) From 6d25b23e39e8233b18ec2ac95371aa3abb0f4716 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 28 Sep 2019 18:21:33 +0800 Subject: [PATCH 052/307] update env example --- mishards/.env.example | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/mishards/.env.example b/mishards/.env.example index 22406c7f34..76b1810759 100644 --- a/mishards/.env.example +++ b/mishards/.env.example @@ -1,8 +1,10 @@ -DEBUG=False +DEBUG=True WOSERVER=tcp://127.0.0.1:19530 TESTING_WOSERVER=tcp://127.0.0.1:19530 -SERVER_PORT=19531 +SERVER_PORT=19532 + +SD_PROVIDER=Static SD_NAMESPACE=xp SD_IN_CLUSTER=False @@ -10,5 +12,21 @@ SD_POLL_INTERVAL=5 SD_ROSERVER_POD_PATT=.*-ro-servers-.* SD_LABEL_SELECTOR=tier=ro-servers +SD_STATIC_HOSTS=127.0.0.1 + SQLALCHEMY_DATABASE_URI=mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4 +#SQLALCHEMY_DATABASE_URI=sqlite:////tmp/milvus/db/meta.sqlite?check_same_thread=False SQL_ECHO=True + +TESTING=True +#SQLALCHEMY_DATABASE_TEST_URI=mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4 +SQLALCHEMY_DATABASE_TEST_URI=sqlite:////tmp/milvus/db/meta.sqlite?check_same_thread=False +SQL_TEST_ECHO=False + +TRACING_TYPE=jaeger +TRACING_SERVICE_NAME=fortest +TRACING_SAMPLER_TYPE=const +TRACING_SAMPLER_PARAM=1 +TRACING_LOG_PAYLOAD=True +#TRACING_SAMPLER_TYPE=probabilistic +#TRACING_SAMPLER_PARAM=0.5 From 498f3e9c8c89a916a8af44f491ccffb8ccd5a068 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 14 Oct 2019 11:20:45 +0800 Subject: [PATCH 053/307] load env example by default --- mishards/settings.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mishards/settings.py b/mishards/settings.py index 71e94b76a2..f5028cbbc7 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -1,10 +1,12 @@ import sys import os -from environs import Env +from dotenv import load_dotenv +load_dotenv('./mishards/.env.example') +from environs import Env env = Env() -env.read_env() +env.read_env(override=True) DEBUG = env.bool('DEBUG', False) From bef93edab9921f04d15747a7e245f8649597e4a7 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 14 Oct 2019 11:29:41 +0800 Subject: [PATCH 054/307] update default sql url --- mishards/.env.example | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mishards/.env.example b/mishards/.env.example index 76b1810759..47a4549f04 100644 --- a/mishards/.env.example +++ b/mishards/.env.example @@ -14,8 +14,8 @@ SD_LABEL_SELECTOR=tier=ro-servers SD_STATIC_HOSTS=127.0.0.1 -SQLALCHEMY_DATABASE_URI=mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4 -#SQLALCHEMY_DATABASE_URI=sqlite:////tmp/milvus/db/meta.sqlite?check_same_thread=False +#SQLALCHEMY_DATABASE_URI=mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4 +SQLALCHEMY_DATABASE_URI=sqlite:////tmp/milvus/db/meta.sqlite?check_same_thread=False SQL_ECHO=True TESTING=True From 71c67f59a3b1d348c0e27c49a642bf64b0227a5a Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 14 Oct 2019 13:42:12 +0800 Subject: [PATCH 055/307] update for code style --- conftest.py | 1 + manager.py | 7 ++- mishards/__init__.py | 5 +- mishards/connections.py | 10 ++-- mishards/db_base.py | 8 ++- mishards/exception_handlers.py | 5 ++ mishards/exceptions.py | 8 +++ mishards/factories.py | 18 +++--- mishards/grpc_utils/__init__.py | 10 ++-- mishards/grpc_utils/grpc_args_wrapper.py | 4 +- mishards/hash_ring.py | 28 +++++----- mishards/main.py | 11 ++-- mishards/models.py | 15 ++--- mishards/server.py | 4 +- mishards/service_handler.py | 64 ++++++++++----------- mishards/settings.py | 10 +++- mishards/test_connections.py | 8 ++- mishards/test_models.py | 7 ++- sd/__init__.py | 1 + sd/kubernetes_provider.py | 71 +++++++++++++----------- sd/static_provider.py | 6 +- tracing/__init__.py | 13 +++-- tracing/factory.py | 12 ++-- utils/__init__.py | 1 + utils/logger_helper.py | 17 ++++-- 25 files changed, 201 insertions(+), 143 deletions(-) diff --git a/conftest.py b/conftest.py index c4fed5cc7e..d6c9f3acc7 100644 --- a/conftest.py +++ b/conftest.py @@ -4,6 +4,7 @@ from mishards import settings, db, create_app logger = logging.getLogger(__name__) + @pytest.fixture def app(request): app = create_app(settings.TestingConfig) diff --git a/manager.py b/manager.py index 31f5894d2d..931c90ebc8 100644 --- a/manager.py +++ b/manager.py @@ -2,6 +2,7 @@ import fire from mishards import db from sqlalchemy import and_ + class DBHandler: @classmethod def create_all(cls): @@ -15,9 +16,9 @@ class DBHandler: def fun(cls, tid): from mishards.factories import TablesFactory, TableFilesFactory, Tables f = db.Session.query(Tables).filter(and_( - Tables.table_id==tid, - Tables.state!=Tables.TO_DELETE) - ).first() + Tables.table_id == tid, + Tables.state != Tables.TO_DELETE) + ).first() print(f) # f1 = TableFilesFactory() diff --git a/mishards/__init__.py b/mishards/__init__.py index b351986cba..47d8adb6e3 100644 --- a/mishards/__init__.py +++ b/mishards/__init__.py @@ -1,4 +1,4 @@ -import logging +import logging from mishards import settings logger = logging.getLogger() @@ -8,6 +8,7 @@ db = DB() from mishards.server import Server grpc_server = Server() + def create_app(testing_config=None): config = testing_config if testing_config else settings.DefaultConfig db.init_db(uri=config.SQLALCHEMY_DATABASE_URI, echo=config.SQL_ECHO) @@ -24,7 +25,7 @@ def create_app(testing_config=None): from tracing.factory import TracerFactory from mishards.grpc_utils import GrpcSpanDecorator tracer = TracerFactory.new_tracer(settings.TRACING_TYPE, settings.TracingConfig, - span_decorator=GrpcSpanDecorator()) + span_decorator=GrpcSpanDecorator()) grpc_server.init_app(conn_mgr=connect_mgr, tracer=tracer, discover=discover) diff --git a/mishards/connections.py b/mishards/connections.py index 22524c3a20..ccd8e7e81b 100644 --- a/mishards/connections.py +++ b/mishards/connections.py @@ -10,6 +10,7 @@ from utils import singleton logger = logging.getLogger(__name__) + class Connection: def __init__(self, name, uri, max_retry=1, error_handlers=None, **kwargs): self.name = name @@ -55,7 +56,7 @@ class Connection: if not self.can_retry and not self.connected: raise exceptions.ConnectionConnectError(message='Max retry {} reached!'.format(self.max_retry, - metadata=metadata)) + metadata=metadata)) self.retried = 0 @@ -72,6 +73,7 @@ class Connection: raise e return inner + @singleton class ConnectionMgr: def __init__(self): @@ -90,10 +92,10 @@ class ConnectionMgr: if not throw: return None raise exceptions.ConnectionNotFoundError(message='Connection {} not found'.format(name), - metadata=metadata) + metadata=metadata) this_conn = Connection(name=name, uri=url, max_retry=settings.MAX_RETRY) threaded = { - threading.get_ident() : this_conn + threading.get_ident(): this_conn } self.conns[name] = threaded return this_conn @@ -106,7 +108,7 @@ class ConnectionMgr: if not throw: return None raise exceptions.ConnectionNotFoundError('Connection {} not found'.format(name), - metadata=metadata) + metadata=metadata) this_conn = Connection(name=name, uri=url, max_retry=settings.MAX_RETRY) c[tid] = this_conn return this_conn diff --git a/mishards/db_base.py b/mishards/db_base.py index b1492aa8f5..6fb3aef4e1 100644 --- a/mishards/db_base.py +++ b/mishards/db_base.py @@ -14,8 +14,10 @@ class LocalSession(SessionBase): bind = options.pop('bind', None) or db.engine SessionBase.__init__(self, autocommit=autocommit, autoflush=autoflush, bind=bind, **options) + class DB: Model = declarative_base() + def __init__(self, uri=None, echo=False): self.echo = echo uri and self.init_db(uri, echo) @@ -27,9 +29,9 @@ class DB: self.engine = create_engine(url) else: self.engine = create_engine(uri, pool_size=100, pool_recycle=5, pool_timeout=30, - pool_pre_ping=True, - echo=echo, - max_overflow=0) + pool_pre_ping=True, + echo=echo, + max_overflow=0) self.uri = uri self.url = url diff --git a/mishards/exception_handlers.py b/mishards/exception_handlers.py index 16ba34a3b1..1e5ffb3529 100644 --- a/mishards/exception_handlers.py +++ b/mishards/exception_handlers.py @@ -4,6 +4,7 @@ from mishards import grpc_server as server, exceptions logger = logging.getLogger(__name__) + def resp_handler(err, error_code): if not isinstance(err, exceptions.BaseException): return status_pb2.Status(error_code=error_code, reason=str(err)) @@ -50,21 +51,25 @@ def resp_handler(err, error_code): status.error_code = status_pb2.UNEXPECTED_ERROR return status + @server.errorhandler(exceptions.TableNotFoundError) def TableNotFoundErrorHandler(err): logger.error(err) return resp_handler(err, status_pb2.TABLE_NOT_EXISTS) + @server.errorhandler(exceptions.InvalidArgumentError) def InvalidArgumentErrorHandler(err): logger.error(err) return resp_handler(err, status_pb2.ILLEGAL_ARGUMENT) + @server.errorhandler(exceptions.DBError) def DBErrorHandler(err): logger.error(err) return resp_handler(err, status_pb2.UNEXPECTED_ERROR) + @server.errorhandler(exceptions.InvalidRangeError) def InvalidArgumentErrorHandler(err): logger.error(err) diff --git a/mishards/exceptions.py b/mishards/exceptions.py index 2aa2b39eb9..acd9372d6a 100644 --- a/mishards/exceptions.py +++ b/mishards/exceptions.py @@ -1,26 +1,34 @@ import mishards.exception_codes as codes + class BaseException(Exception): code = codes.INVALID_CODE message = 'BaseException' + def __init__(self, message='', metadata=None): self.message = self.__class__.__name__ if not message else message self.metadata = metadata + class ConnectionConnectError(BaseException): code = codes.CONNECT_ERROR_CODE + class ConnectionNotFoundError(BaseException): code = codes.CONNECTTION_NOT_FOUND_CODE + class DBError(BaseException): code = codes.DB_ERROR_CODE + class TableNotFoundError(BaseException): code = codes.TABLE_NOT_FOUND_CODE + class InvalidArgumentError(BaseException): code = codes.INVALID_ARGUMENT_CODE + class InvalidRangeError(BaseException): code = codes.INVALID_DATE_RANGE_CODE diff --git a/mishards/factories.py b/mishards/factories.py index 26e9ab2619..c4037fe2d7 100644 --- a/mishards/factories.py +++ b/mishards/factories.py @@ -9,13 +9,16 @@ from faker.providers import BaseProvider from mishards import db from mishards.models import Tables, TableFiles + class FakerProvider(BaseProvider): def this_date(self): t = datetime.datetime.today() - return (t.year - 1900) * 10000 + (t.month-1)*100 + t.day + return (t.year - 1900) * 10000 + (t.month - 1) * 100 + t.day + factory.Faker.add_provider(FakerProvider) + class TablesFactory(SQLAlchemyModelFactory): class Meta: model = Tables @@ -24,14 +27,15 @@ class TablesFactory(SQLAlchemyModelFactory): id = factory.Faker('random_number', digits=16, fix_len=True) table_id = factory.Faker('uuid4') - state = factory.Faker('random_element', elements=(0,1,2,3)) - dimension = factory.Faker('random_element', elements=(256,512)) + state = factory.Faker('random_element', elements=(0, 1, 2, 3)) + dimension = factory.Faker('random_element', elements=(256, 512)) created_on = int(time.time()) index_file_size = 0 - engine_type = factory.Faker('random_element', elements=(0,1,2,3)) - metric_type = factory.Faker('random_element', elements=(0,1)) + engine_type = factory.Faker('random_element', elements=(0, 1, 2, 3)) + metric_type = factory.Faker('random_element', elements=(0, 1)) nlist = 16384 + class TableFilesFactory(SQLAlchemyModelFactory): class Meta: model = TableFiles @@ -40,9 +44,9 @@ class TableFilesFactory(SQLAlchemyModelFactory): id = factory.Faker('random_number', digits=16, fix_len=True) table = factory.SubFactory(TablesFactory) - engine_type = factory.Faker('random_element', elements=(0,1,2,3)) + engine_type = factory.Faker('random_element', elements=(0, 1, 2, 3)) file_id = factory.Faker('uuid4') - file_type = factory.Faker('random_element', elements=(0,1,2,3,4)) + file_type = factory.Faker('random_element', elements=(0, 1, 2, 3, 4)) file_size = factory.Faker('random_number') updated_time = int(time.time()) created_on = int(time.time()) diff --git a/mishards/grpc_utils/__init__.py b/mishards/grpc_utils/__init__.py index 550913ed60..f5225b2a66 100644 --- a/mishards/grpc_utils/__init__.py +++ b/mishards/grpc_utils/__init__.py @@ -14,21 +14,23 @@ class GrpcSpanDecorator(SpanDecorator): status = rpc_info.response.status except Exception as e: status = status_pb2.Status(error_code=status_pb2.UNEXPECTED_ERROR, - reason='Should not happen') + reason='Should not happen') if status.error_code == 0: return error_log = {'event': 'error', - 'request': rpc_info.request, - 'response': rpc_info.response - } + 'request': rpc_info.request, + 'response': rpc_info.response + } span.set_tag('error', True) span.log_kv(error_log) + def mark_grpc_method(func): setattr(func, 'grpc_method', True) return func + def is_grpc_method(func): if not func: return False diff --git a/mishards/grpc_utils/grpc_args_wrapper.py b/mishards/grpc_utils/grpc_args_wrapper.py index a864b1e400..7447dbd995 100644 --- a/mishards/grpc_utils/grpc_args_wrapper.py +++ b/mishards/grpc_utils/grpc_args_wrapper.py @@ -1,4 +1,4 @@ # class GrpcArgsWrapper(object): - # @classmethod - # def proto_TableName(cls): \ No newline at end of file +# @classmethod +# def proto_TableName(cls): diff --git a/mishards/hash_ring.py b/mishards/hash_ring.py index bfec108c5c..a97f3f580e 100644 --- a/mishards/hash_ring.py +++ b/mishards/hash_ring.py @@ -9,8 +9,8 @@ else: import md5 md5_constructor = md5.new -class HashRing(object): +class HashRing(object): def __init__(self, nodes=None, weights=None): """`nodes` is a list of objects that have a proper __str__ representation. `weights` is dictionary that sets weights to the nodes. The default @@ -40,13 +40,13 @@ class HashRing(object): if node in self.weights: weight = self.weights.get(node) - factor = math.floor((40*len(self.nodes)*weight) / total_weight); + factor = math.floor((40 * len(self.nodes) * weight) / total_weight) for j in range(0, int(factor)): - b_key = self._hash_digest( '%s-%s' % (node, j) ) + b_key = self._hash_digest('%s-%s' % (node, j)) for i in range(0, 3): - key = self._hash_val(b_key, lambda x: x+i*4) + key = self._hash_val(b_key, lambda x: x + i * 4) self.ring[key] = node self._sorted_keys.append(key) @@ -60,7 +60,7 @@ class HashRing(object): pos = self.get_node_pos(string_key) if pos is None: return None - return self.ring[ self._sorted_keys[pos] ] + return self.ring[self._sorted_keys[pos]] def get_node_pos(self, string_key): """Given a string key a corresponding node in the hash ring is returned @@ -94,6 +94,7 @@ class HashRing(object): yield None, None returned_values = set() + def distinct_filter(value): if str(value) not in returned_values: returned_values.add(str(value)) @@ -121,10 +122,8 @@ class HashRing(object): return self._hash_val(b_key, lambda x: x) def _hash_val(self, b_key, entry_fn): - return (( b_key[entry_fn(3)] << 24) - |(b_key[entry_fn(2)] << 16) - |(b_key[entry_fn(1)] << 8) - | b_key[entry_fn(0)] ) + return (b_key[entry_fn(3)] << 24) | (b_key[entry_fn(2)] << 16) | ( + b_key[entry_fn(1)] << 8) | b_key[entry_fn(0)] def _hash_digest(self, key): m = md5_constructor() @@ -132,12 +131,13 @@ class HashRing(object): m.update(key) return m.digest() + if __name__ == '__main__': from collections import defaultdict - servers = ['192.168.0.246:11212', - '192.168.0.247:11212', - '192.168.0.248:11212', - '192.168.0.249:11212'] + servers = [ + '192.168.0.246:11212', '192.168.0.247:11212', '192.168.0.248:11212', + '192.168.0.249:11212' + ] ring = HashRing(servers) keys = ['{}'.format(i) for i in range(100)] @@ -146,5 +146,5 @@ if __name__ == '__main__': server = ring.get_node(k) mapped[server].append(k) - for k,v in mapped.items(): + for k, v in mapped.items(): print(k, v) diff --git a/mishards/main.py b/mishards/main.py index 5d8db0a179..3f69484ee4 100644 --- a/mishards/main.py +++ b/mishards/main.py @@ -1,13 +1,16 @@ -import os, sys +import os +import sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -from mishards import ( - settings, create_app) +from mishards import (settings, create_app) + def main(): - server = create_app(settings.TestingConfig if settings.TESTING else settings.DefaultConfig) + server = create_app( + settings.TestingConfig if settings.TESTING else settings.DefaultConfig) server.run(port=settings.SERVER_PORT) return 0 + if __name__ == '__main__': sys.exit(main()) diff --git a/mishards/models.py b/mishards/models.py index 0f7bb603ae..54cf5f8ed9 100644 --- a/mishards/models.py +++ b/mishards/models.py @@ -1,13 +1,14 @@ import logging from sqlalchemy import (Integer, Boolean, Text, - String, BigInteger, func, and_, or_, - Column) + String, BigInteger, func, and_, or_, + Column) from sqlalchemy.orm import relationship, backref from mishards import db logger = logging.getLogger(__name__) + class TableFiles(db.Model): FILE_TYPE_NEW = 0 FILE_TYPE_RAW = 1 @@ -57,16 +58,16 @@ class Tables(db.Model): def files_to_search(self, date_range=None): cond = or_( - TableFiles.file_type==TableFiles.FILE_TYPE_RAW, - TableFiles.file_type==TableFiles.FILE_TYPE_TO_INDEX, - TableFiles.file_type==TableFiles.FILE_TYPE_INDEX, + TableFiles.file_type == TableFiles.FILE_TYPE_RAW, + TableFiles.file_type == TableFiles.FILE_TYPE_TO_INDEX, + TableFiles.file_type == TableFiles.FILE_TYPE_INDEX, ) if date_range: cond = and_( cond, or_( - and_(TableFiles.date>=d[0], TableFiles.date= d[0], TableFiles.date < d[1]) for d in date_range + ) ) files = self.files.filter(cond) diff --git a/mishards/server.py b/mishards/server.py index c044bbb7ad..032d101cba 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -33,7 +33,7 @@ class Server: self.server_impl = grpc.server( thread_pool=futures.ThreadPoolExecutor(max_workers=max_workers), options=[(cygrpc.ChannelArgKey.max_send_message_length, -1), - (cygrpc.ChannelArgKey.max_receive_message_length, -1)] + (cygrpc.ChannelArgKey.max_receive_message_length, -1)] ) self.server_impl = self.tracer.decorate(self.server_impl) @@ -46,7 +46,7 @@ class Server: ip = socket.gethostbyname(url.hostname) socket.inet_pton(socket.AF_INET, ip) self.conn_mgr.register('WOSERVER', - '{}://{}:{}'.format(url.scheme, ip, url.port or 80)) + '{}://{}:{}'.format(url.scheme, ip, url.port or 80)) def register_pre_run_handler(self, func): logger.info('Regiterring {} into server pre_run_handlers'.format(func)) diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 60d64cef37..2a1e0eef02 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -11,7 +11,7 @@ from concurrent.futures import ThreadPoolExecutor from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 from milvus.grpc_gen.milvus_pb2 import TopKQueryResult from milvus.client.Abstract import Range -from milvus.client import types +from milvus.client import types as Types from mishards import (db, settings, exceptions) from mishards.grpc_utils import mark_grpc_method @@ -24,6 +24,7 @@ logger = logging.getLogger(__name__) class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): MAX_NPROBE = 2048 + def __init__(self, conn_mgr, tracer, *args, **kwargs): self.conn_mgr = conn_mgr self.table_meta = {} @@ -44,8 +45,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return conn.conn def _format_date(self, start, end): - return ((start.year-1900)*10000 + (start.month-1)*100 + start.day - , (end.year-1900)*10000 + (end.month-1)*100 + end.day) + return ((start.year - 1900) * 10000 + (start.month - 1) * 100 + start.day, (end.year - 1900) * 10000 + (end.month - 1) * 100 + end.day) def _range_to_date(self, range_obj, metadata=None): try: @@ -54,8 +54,8 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): assert start < end except (ValueError, AssertionError): raise exceptions.InvalidRangeError('Invalid time range: {} {}'.format( - range_obj.start_date, range_obj.end_date - ), metadata=metadata) + range_obj.start_date, range_obj.end_date + ), metadata=metadata) return self._format_date(start, end) @@ -63,9 +63,9 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): # PXU TODO: Implement Thread-local Context try: table = db.Session.query(Tables).filter(and_( - Tables.table_id==table_id, - Tables.state!=Tables.TO_DELETE - )).first() + Tables.table_id == table_id, + Tables.state != Tables.TO_DELETE + )).first() except sqlalchemy_exc.SQLAlchemyError as e: raise exceptions.DBError(message=str(e), metadata=metadata) @@ -93,7 +93,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return routing def _do_merge(self, files_n_topk_results, topk, reverse=False, **kwargs): - status=status_pb2.Status(error_code=status_pb2.SUCCESS, reason="Success") + status = status_pb2.Status(error_code=status_pb2.SUCCESS, reason="Success") if not files_n_topk_results: return status, [] @@ -107,7 +107,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): for request_pos, each_request_results in enumerate(files_collection.topk_query_result): request_results[request_pos].extend(each_request_results.query_result_arrays) request_results[request_pos] = sorted(request_results[request_pos], key=lambda x: x.distance, - reverse=reverse)[:topk] + reverse=reverse)[:topk] calc_time = time.time() - calc_time logger.info('Merge takes {}'.format(calc_time)) @@ -127,7 +127,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): routing = {} with self.tracer.start_span('get_routing', - child_of=context.get_active_span().context): + child_of=context.get_active_span().context): routing = self._get_routing_file_ids(table_id, range_array, metadata=metadata) logger.info('Routing: {}'.format(routing)) @@ -140,28 +140,28 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): def search(addr, query_params, vectors, topk, nprobe, **kwargs): logger.info('Send Search Request: addr={};params={};nq={};topk={};nprobe={}'.format( - addr, query_params, len(vectors), topk, nprobe - )) + addr, query_params, len(vectors), topk, nprobe + )) conn = self.query_conn(addr, metadata=metadata) start = time.time() span = kwargs.get('span', None) span = span if span else context.get_active_span().context with self.tracer.start_span('search_{}'.format(addr), - child_of=context.get_active_span().context): + child_of=context.get_active_span().context): ret = conn.search_vectors_in_files(table_name=query_params['table_id'], - file_ids=query_params['file_ids'], - query_records=vectors, - top_k=topk, - nprobe=nprobe, - lazy=True) + file_ids=query_params['file_ids'], + query_records=vectors, + top_k=topk, + nprobe=nprobe, + lazy=True) end = time.time() logger.info('search_vectors_in_files takes: {}'.format(end - start)) all_topk_results.append(ret) with self.tracer.start_span('do_search', - child_of=context.get_active_span().context) as span: + child_of=context.get_active_span().context) as span: with ThreadPoolExecutor(max_workers=workers) as pool: for addr, params in routing.items(): res = pool.submit(search, addr, params, vectors, topk, nprobe, span=span) @@ -170,9 +170,9 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): for res in rs: res.result() - reverse = table_meta.metric_type == types.MetricType.IP + reverse = table_meta.metric_type == Types.MetricType.IP with self.tracer.start_span('do_merge', - child_of=context.get_active_span().context): + child_of=context.get_active_span().context): return self._do_merge(all_topk_results, topk, reverse=reverse, metadata=metadata) @mark_grpc_method @@ -201,8 +201,8 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): logger.info('HasTable {}'.format(_table_name)) _bool = self.connection(metadata={ - 'resp_class': milvus_pb2.BoolReply - }).has_table(_table_name) + 'resp_class': milvus_pb2.BoolReply + }).has_table(_table_name) return milvus_pb2.BoolReply( status=status_pb2.Status(error_code=status_pb2.SUCCESS, reason="OK"), @@ -244,7 +244,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): # TODO: Ths SDK interface add_vectors() could update, add a key 'row_id_array' _status, _ids = self.connection(metadata={ 'resp_class': milvus_pb2.VectorIds - }).add_vectors(None, None, insert_param=request) + }).add_vectors(None, None, insert_param=request) return milvus_pb2.VectorIds( status=status_pb2.Status(error_code=_status.code, reason=_status.message), vector_id_array=_ids @@ -266,7 +266,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): if nprobe > self.MAX_NPROBE or nprobe <= 0: raise exceptions.InvalidArgumentError(message='Invalid nprobe: {}'.format(nprobe), - metadata=metadata) + metadata=metadata) table_meta = self.table_meta.get(table_name, None) @@ -332,8 +332,8 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): ) return milvus_pb2.TableSchema( - table_name=_table_name, - status=status_pb2.Status(error_code=_status.code, reason=_status.message), + table_name=_table_name, + status=status_pb2.Status(error_code=_status.code, reason=_status.message), ) @mark_grpc_method @@ -391,8 +391,8 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _status, _results = self.connection(metadata=metadata).show_tables() return milvus_pb2.TableNameList( - status=status_pb2.Status(error_code=_status.code, reason=_status.message), - table_names=_results + status=status_pb2.Status(error_code=_status.code, reason=_status.message), + table_names=_results ) @mark_grpc_method @@ -426,7 +426,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): if not _status.OK(): return milvus_pb2.IndexParam( - status=status_pb2.Status(error_code=_status.code, reason=_status.message) + status=status_pb2.Status(error_code=_status.code, reason=_status.message) ) metadata = { @@ -439,7 +439,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _index = milvus_pb2.Index(index_type=_index_param._index_type, nlist=_index_param._nlist) return milvus_pb2.IndexParam(status=status_pb2.Status(error_code=_status.code, reason=_status.message), - table_name=_table_name, index=_index) + table_name=_table_name, index=_index) @mark_grpc_method def DropIndex(self, request, context): diff --git a/mishards/settings.py b/mishards/settings.py index f5028cbbc7..4563538a08 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -39,13 +39,15 @@ if SD_PROVIDER == 'Kubernetes': elif SD_PROVIDER == 'Static': from sd.static_provider import StaticProviderSettings SD_PROVIDER_SETTINGS = StaticProviderSettings( - hosts=env.list('SD_STATIC_HOSTS', []) - ) + hosts=env.list('SD_STATIC_HOSTS', []) + ) TESTING = env.bool('TESTING', False) TESTING_WOSERVER = env.str('TESTING_WOSERVER', 'tcp://127.0.0.1:19530') TRACING_TYPE = env.str('TRACING_TYPE', '') + + class TracingConfig: TRACING_SERVICE_NAME = env.str('TRACING_SERVICE_NAME', 'mishards') TRACING_VALIDATE = env.bool('TRACING_VALIDATE', True) @@ -54,7 +56,7 @@ class TracingConfig: 'sampler': { 'type': env.str('TRACING_SAMPLER_TYPE', 'const'), 'param': env.str('TRACING_SAMPLER_PARAM', "1"), - }, + }, 'local_agent': { 'reporting_host': env.str('TRACING_REPORTING_HOST', '127.0.0.1'), 'reporting_port': env.str('TRACING_REPORTING_PORT', '5775') @@ -62,10 +64,12 @@ class TracingConfig: 'logging': env.bool('TRACING_LOGGING', True) } + class DefaultConfig: SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_URI') SQL_ECHO = env.bool('SQL_ECHO', False) + TESTING = env.bool('TESTING', False) if TESTING: class TestingConfig(DefaultConfig): diff --git a/mishards/test_connections.py b/mishards/test_connections.py index 1f46b60f8b..f1c54f0c61 100644 --- a/mishards/test_connections.py +++ b/mishards/test_connections.py @@ -6,6 +6,7 @@ from mishards import exceptions logger = logging.getLogger(__name__) + @pytest.mark.usefixtures('app') class TestConnection: def test_manager(self): @@ -30,8 +31,10 @@ class TestConnection: class Conn: def __init__(self, state): self.state = state + def connect(self, uri): return self.state + def connected(self): return self.state FAIL_CONN = Conn(False) @@ -48,6 +51,7 @@ class TestConnection: class Func(): def __init__(self): self.executed = False + def __call__(self): self.executed = True @@ -55,8 +59,8 @@ class TestConnection: RetryObj = Retry() c = Connection('client', uri='', - max_retry=max_retry, - on_retry_func=RetryObj) + max_retry=max_retry, + on_retry_func=RetryObj) c.conn = FAIL_CONN ff = Func() this_connect = c.connect(func=ff) diff --git a/mishards/test_models.py b/mishards/test_models.py index 85dcc246aa..d60b62713e 100644 --- a/mishards/test_models.py +++ b/mishards/test_models.py @@ -3,12 +3,13 @@ import pytest from mishards.factories import TableFiles, Tables, TableFilesFactory, TablesFactory from mishards import db, create_app, settings from mishards.factories import ( - Tables, TableFiles, - TablesFactory, TableFilesFactory - ) + Tables, TableFiles, + TablesFactory, TableFilesFactory +) logger = logging.getLogger(__name__) + @pytest.mark.usefixtures('app') class TestModels: def test_files_to_search(self): diff --git a/sd/__init__.py b/sd/__init__.py index 6dfba5ddc1..7943887d0f 100644 --- a/sd/__init__.py +++ b/sd/__init__.py @@ -24,4 +24,5 @@ class ProviderManager: def get_provider(cls, name): return cls.PROVIDERS.get(name, None) + from sd import kubernetes_provider, static_provider diff --git a/sd/kubernetes_provider.py b/sd/kubernetes_provider.py index 51665a0cb5..924f1fc8a4 100644 --- a/sd/kubernetes_provider.py +++ b/sd/kubernetes_provider.py @@ -1,4 +1,5 @@ -import os, sys +import os +import sys if __name__ == '__main__': sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) @@ -71,7 +72,6 @@ class K8SHeartbeatHandler(threading.Thread, K8SMixin): self.queue.put(event_message) - except Exception as exc: logger.error(exc) @@ -98,18 +98,18 @@ class K8SEventListener(threading.Thread, K8SMixin): resource_version = '' w = watch.Watch() for event in w.stream(self.v1.list_namespaced_event, namespace=self.namespace, - field_selector='involvedObject.kind=Pod'): + field_selector='involvedObject.kind=Pod'): if self.terminate: break resource_version = int(event['object'].metadata.resource_version) info = dict( - eType='WatchEvent', - pod=event['object'].involved_object.name, - reason=event['object'].reason, - message=event['object'].message, - start_up=self.at_start_up, + eType='WatchEvent', + pod=event['object'].involved_object.name, + reason=event['object'].reason, + message=event['object'].message, + start_up=self.at_start_up, ) self.at_start_up = False # logger.info('Received event: {}'.format(info)) @@ -135,7 +135,7 @@ class EventHandler(threading.Thread): def on_pod_started(self, event, **kwargs): try_cnt = 3 pod = None - while try_cnt > 0: + while try_cnt > 0: try_cnt -= 1 try: pod = self.mgr.v1.read_namespaced_pod(name=event['pod'], namespace=self.namespace) @@ -203,6 +203,7 @@ class EventHandler(threading.Thread): except queue.Empty: continue + class KubernetesProviderSettings: def __init__(self, namespace, pod_patt, label_selector, in_cluster, poll_interval, **kwargs): self.namespace = namespace @@ -211,10 +212,12 @@ class KubernetesProviderSettings: self.in_cluster = in_cluster self.poll_interval = poll_interval + @singleton @ProviderManager.register_service_provider class KubernetesProvider(object): NAME = 'Kubernetes' + def __init__(self, settings, conn_mgr, **kwargs): self.namespace = settings.namespace self.pod_patt = settings.pod_patt @@ -233,27 +236,27 @@ class KubernetesProvider(object): self.v1 = client.CoreV1Api() self.listener = K8SEventListener( - message_queue=self.queue, - namespace=self.namespace, - in_cluster=self.in_cluster, - v1=self.v1, - **kwargs - ) + message_queue=self.queue, + namespace=self.namespace, + in_cluster=self.in_cluster, + v1=self.v1, + **kwargs + ) self.pod_heartbeater = K8SHeartbeatHandler( - message_queue=self.queue, - namespace=self.namespace, - label_selector=self.label_selector, - in_cluster=self.in_cluster, - v1=self.v1, - poll_interval=self.poll_interval, - **kwargs - ) + message_queue=self.queue, + namespace=self.namespace, + label_selector=self.label_selector, + in_cluster=self.in_cluster, + v1=self.v1, + poll_interval=self.poll_interval, + **kwargs + ) self.event_handler = EventHandler(mgr=self, - message_queue=self.queue, - namespace=self.namespace, - pod_patt=self.pod_patt, **kwargs) + message_queue=self.queue, + namespace=self.namespace, + pod_patt=self.pod_patt, **kwargs) def add_pod(self, name, ip): self.conn_mgr.register(name, 'tcp://{}:19530'.format(ip)) @@ -276,9 +279,11 @@ class KubernetesProvider(object): if __name__ == '__main__': logging.basicConfig(level=logging.INFO) + class Connect: def register(self, name, value): logger.error('Register: {} - {}'.format(name, value)) + def unregister(self, name): logger.error('Unregister: {}'.format(name)) @@ -289,16 +294,16 @@ if __name__ == '__main__': connect_mgr = Connect() settings = KubernetesProviderSettings( - namespace='xp', - pod_patt=".*-ro-servers-.*", - label_selector='tier=ro-servers', - poll_interval=5, - in_cluster=False) + namespace='xp', + pod_patt=".*-ro-servers-.*", + label_selector='tier=ro-servers', + poll_interval=5, + in_cluster=False) provider_class = ProviderManager.get_provider('Kubernetes') t = provider_class(conn_mgr=connect_mgr, - settings=settings - ) + settings=settings + ) t.start() cnt = 100 while cnt > 0: diff --git a/sd/static_provider.py b/sd/static_provider.py index 423d6c4d60..5c97c4efd0 100644 --- a/sd/static_provider.py +++ b/sd/static_provider.py @@ -1,4 +1,5 @@ -import os, sys +import os +import sys if __name__ == '__main__': sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) @@ -6,14 +7,17 @@ import socket from utils import singleton from sd import ProviderManager + class StaticProviderSettings: def __init__(self, hosts): self.hosts = hosts + @singleton @ProviderManager.register_service_provider class KubernetesProvider(object): NAME = 'Static' + def __init__(self, settings, conn_mgr, **kwargs): self.conn_mgr = conn_mgr self.hosts = [socket.gethostbyname(host) for host in settings.hosts] diff --git a/tracing/__init__.py b/tracing/__init__.py index 27c57473db..5014309a52 100644 --- a/tracing/__init__.py +++ b/tracing/__init__.py @@ -1,13 +1,14 @@ def empty_server_interceptor_decorator(target_server, interceptor): return target_server + class Tracer: def __init__(self, tracer=None, - interceptor=None, - server_decorator=empty_server_interceptor_decorator): + interceptor=None, + server_decorator=empty_server_interceptor_decorator): self.tracer = tracer self.interceptor = interceptor - self.server_decorator=server_decorator + self.server_decorator = server_decorator def decorate(self, server): return self.server_decorator(server, self.interceptor) @@ -16,7 +17,7 @@ class Tracer: self.tracer and self.tracer.close() def start_span(self, operation_name=None, - child_of=None, references=None, tags=None, - start_time=None, ignore_active_span=False): + child_of=None, references=None, tags=None, + start_time=None, ignore_active_span=False): return self.tracer.start_span(operation_name, child_of, - references, tags, start_time, ignore_active_span) + references, tags, start_time, ignore_active_span) diff --git a/tracing/factory.py b/tracing/factory.py index fd06fe3cac..648dfa291e 100644 --- a/tracing/factory.py +++ b/tracing/factory.py @@ -4,7 +4,7 @@ from grpc_opentracing.grpcext import intercept_server from grpc_opentracing import open_tracing_server_interceptor from tracing import (Tracer, - empty_server_interceptor_decorator) + empty_server_interceptor_decorator) logger = logging.getLogger(__name__) @@ -17,14 +17,14 @@ class TracerFactory: if tracer_type.lower() == 'jaeger': config = Config(config=tracer_config.TRACING_CONFIG, - service_name=tracer_config.TRACING_SERVICE_NAME, - validate=tracer_config.TRACING_VALIDATE - ) + service_name=tracer_config.TRACING_SERVICE_NAME, + validate=tracer_config.TRACING_VALIDATE + ) tracer = config.initialize_tracer() tracer_interceptor = open_tracing_server_interceptor(tracer, - log_payloads=tracer_config.TRACING_LOG_PAYLOAD, - span_decorator=span_decorator) + log_payloads=tracer_config.TRACING_LOG_PAYLOAD, + span_decorator=span_decorator) return Tracer(tracer, tracer_interceptor, intercept_server) diff --git a/utils/__init__.py b/utils/__init__.py index ec7f32bcbc..c1d55e76c0 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -1,5 +1,6 @@ from functools import wraps + def singleton(cls): instances = {} @wraps(cls) diff --git a/utils/logger_helper.py b/utils/logger_helper.py index 1b59aa40ec..55ce3206ab 100644 --- a/utils/logger_helper.py +++ b/utils/logger_helper.py @@ -9,18 +9,22 @@ class InfoFilter(logging.Filter): def filter(self, rec): return rec.levelno == logging.INFO + class DebugFilter(logging.Filter): def filter(self, rec): return rec.levelno == logging.DEBUG + class WarnFilter(logging.Filter): def filter(self, rec): return rec.levelno == logging.WARN + class ErrorFilter(logging.Filter): def filter(self, rec): return rec.levelno == logging.ERROR + class CriticalFilter(logging.Filter): def filter(self, rec): return rec.levelno == logging.CRITICAL @@ -36,6 +40,7 @@ COLORS = { 'ENDC': '\033[0m', } + class ColorFulFormatColMixin: def format_col(self, message_str, level_name): if level_name in COLORS.keys(): @@ -43,12 +48,14 @@ class ColorFulFormatColMixin: 'ENDC') return message_str + class ColorfulFormatter(logging.Formatter, ColorFulFormatColMixin): def format(self, record): message_str = super(ColorfulFormatter, self).format(record) return self.format_col(message_str, level_name=record.levelname) + def config(log_level, log_path, name, tz='UTC'): def build_log_file(level, log_path, name, tz): utc_now = datetime.datetime.utcnow() @@ -56,7 +63,7 @@ def config(log_level, log_path, name, tz='UTC'): local_tz = timezone(tz) tznow = utc_now.replace(tzinfo=utc_tz).astimezone(local_tz) return '{}-{}-{}.log'.format(os.path.join(log_path, name), tznow.strftime("%m-%d-%Y-%H:%M:%S"), - level) + level) if not os.path.exists(log_path): os.makedirs(log_path) @@ -66,10 +73,10 @@ def config(log_level, log_path, name, tz='UTC'): 'disable_existing_loggers': False, 'formatters': { 'default': { - 'format': '[%(asctime)s-%(levelname)s-%(name)s]: %(message)s (%(filename)s:%(lineno)s)' + 'format': '[%(asctime)s-%(levelname)s-%(name)s]: %(message)s (%(filename)s:%(lineno)s)' }, 'colorful_console': { - 'format': '[%(asctime)s-%(levelname)s-%(name)s]: %(message)s (%(filename)s:%(lineno)s)', + 'format': '[%(asctime)s-%(levelname)s-%(name)s]: %(message)s (%(filename)s:%(lineno)s)', '()': ColorfulFormatter, }, }, @@ -133,8 +140,8 @@ def config(log_level, log_path, name, tz='UTC'): }, 'loggers': { '': { - 'handlers': ['milvus_celery_console', 'milvus_info_file', 'milvus_debug_file', 'milvus_warn_file', \ - 'milvus_error_file', 'milvus_critical_file'], + 'handlers': ['milvus_celery_console', 'milvus_info_file', 'milvus_debug_file', 'milvus_warn_file', + 'milvus_error_file', 'milvus_critical_file'], 'level': log_level, 'propagate': False }, From 4455f539fab8fbf0343b7678a1b1182ac7afb2a3 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 14 Oct 2019 13:54:37 +0800 Subject: [PATCH 056/307] code refactor for unused import --- mishards/connections.py | 2 -- mishards/models.py | 2 +- mishards/server.py | 2 -- mishards/service_handler.py | 1 - sd/kubernetes_provider.py | 5 ++--- 5 files changed, 3 insertions(+), 9 deletions(-) diff --git a/mishards/connections.py b/mishards/connections.py index ccd8e7e81b..22263e9e7e 100644 --- a/mishards/connections.py +++ b/mishards/connections.py @@ -1,8 +1,6 @@ import logging import threading -import socket from functools import wraps -from contextlib import contextmanager from milvus import Milvus from mishards import (settings, exceptions) diff --git a/mishards/models.py b/mishards/models.py index 54cf5f8ed9..4b6c8f9ef4 100644 --- a/mishards/models.py +++ b/mishards/models.py @@ -1,6 +1,6 @@ import logging from sqlalchemy import (Integer, Boolean, Text, - String, BigInteger, func, and_, or_, + String, BigInteger, and_, or_, Column) from sqlalchemy.orm import relationship, backref diff --git a/mishards/server.py b/mishards/server.py index 032d101cba..feb2176e86 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -7,8 +7,6 @@ from urllib.parse import urlparse from functools import wraps from concurrent import futures from grpc._cython import cygrpc -from grpc._channel import _Rendezvous, _UnaryUnaryMultiCallable -from jaeger_client import Config from milvus.grpc_gen.milvus_pb2_grpc import add_MilvusServiceServicer_to_server from mishards.grpc_utils import is_grpc_method from mishards.service_handler import ServiceHandler diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 2a1e0eef02..9d851ecfcb 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -1,7 +1,6 @@ import logging import time import datetime -from contextlib import contextmanager from collections import defaultdict from sqlalchemy import and_ diff --git a/sd/kubernetes_provider.py b/sd/kubernetes_provider.py index 924f1fc8a4..8ee1588ec4 100644 --- a/sd/kubernetes_provider.py +++ b/sd/kubernetes_provider.py @@ -9,7 +9,6 @@ import time import copy import threading import queue -from functools import wraps from kubernetes import client, config, watch from utils import singleton @@ -17,7 +16,7 @@ from sd import ProviderManager logger = logging.getLogger(__name__) -incluster_namespace_path = '/var/run/secrets/kubernetes.io/serviceaccount/namespace' +INCLUSTER_NAMESPACE_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/namespace' class K8SMixin: @@ -27,7 +26,7 @@ class K8SMixin: self.kwargs = kwargs self.v1 = kwargs.get('v1', None) if not self.namespace: - self.namespace = open(incluster_namespace_path).read() + self.namespace = open(INCLUSTER_NAMESPACE_PATH).read() if not self.v1: config.load_incluster_config() if self.in_cluster else config.load_kube_config() From 7ccab1640f78ceb1555cc3633d5d6d140f693f7f Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 14 Oct 2019 15:04:37 +0800 Subject: [PATCH 057/307] update pymilvus version --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index e94f8d1597..ea338d0723 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,7 +14,7 @@ py==1.8.0 pyasn1==0.4.7 pyasn1-modules==0.2.6 pylint==2.3.1 -pymilvus-test==0.2.15 +pymilvus-test==0.2.21 #pymilvus==0.2.0 pyparsing==2.4.0 pytest==4.6.3 From f32d269eed453aa8dab638fc05c6d2f051fa7bd4 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 14 Oct 2019 15:04:55 +0800 Subject: [PATCH 058/307] update for docker-compose --- start_services.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/start_services.yml b/start_services.yml index 5c779c5b82..b2d4d97cb6 100644 --- a/start_services.yml +++ b/start_services.yml @@ -3,7 +3,7 @@ services: milvus: runtime: nvidia restart: always - image: registry.zilliz.com/milvus/engine:branch-0.4.0-release-c58ca6 + image: registry.zilliz.com/milvus/engine:branch-0.5.0-release-4316de # ports: # - "0.0.0.0:19530:19530" volumes: @@ -21,13 +21,13 @@ services: mishards: restart: always - image: registry.zilliz.com/milvus/mishards:v0.0.2 + image: registry.zilliz.com/milvus/mishards:v0.0.3 ports: - "0.0.0.0:19530:19531" - "0.0.0.0:19532:19532" volumes: - /tmp/milvus/db:/tmp/milvus/db - - /tmp/mishards_env:/source/mishards/.env + # - /tmp/mishards_env:/source/mishards/.env command: ["python", "mishards/main.py"] environment: DEBUG: 'true' From fd735cc62efbd29980839454e1113afe95633178 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 14 Oct 2019 15:17:08 +0800 Subject: [PATCH 059/307] change read .env and read .env.example --- mishards/.env.example | 2 +- mishards/settings.py | 18 ++++++++++-------- start_services.yml | 1 + 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/mishards/.env.example b/mishards/.env.example index 47a4549f04..bfea0a3edc 100644 --- a/mishards/.env.example +++ b/mishards/.env.example @@ -18,7 +18,7 @@ SD_STATIC_HOSTS=127.0.0.1 SQLALCHEMY_DATABASE_URI=sqlite:////tmp/milvus/db/meta.sqlite?check_same_thread=False SQL_ECHO=True -TESTING=True +TESTING=False #SQLALCHEMY_DATABASE_TEST_URI=mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4 SQLALCHEMY_DATABASE_TEST_URI=sqlite:////tmp/milvus/db/meta.sqlite?check_same_thread=False SQL_TEST_ECHO=False diff --git a/mishards/settings.py b/mishards/settings.py index 4563538a08..1982a508e7 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -1,12 +1,15 @@ import sys import os -from dotenv import load_dotenv -load_dotenv('./mishards/.env.example') - from environs import Env env = Env() -env.read_env(override=True) + +FROM_EXAMPLE = env.bool('FROM_EXAMPLE', False) +if FROM_EXAMPLE: + from dotenv import load_dotenv + load_dotenv('./mishards/.env.example') +else: + env.read_env() DEBUG = env.bool('DEBUG', False) @@ -34,13 +37,11 @@ if SD_PROVIDER == 'Kubernetes': in_cluster=env.bool('SD_IN_CLUSTER', False), poll_interval=env.int('SD_POLL_INTERVAL', 5), pod_patt=env.str('SD_ROSERVER_POD_PATT', ''), - label_selector=env.str('SD_LABEL_SELECTOR', '') - ) + label_selector=env.str('SD_LABEL_SELECTOR', '')) elif SD_PROVIDER == 'Static': from sd.static_provider import StaticProviderSettings SD_PROVIDER_SETTINGS = StaticProviderSettings( - hosts=env.list('SD_STATIC_HOSTS', []) - ) + hosts=env.list('SD_STATIC_HOSTS', [])) TESTING = env.bool('TESTING', False) TESTING_WOSERVER = env.str('TESTING_WOSERVER', 'tcp://127.0.0.1:19530') @@ -72,6 +73,7 @@ class DefaultConfig: TESTING = env.bool('TESTING', False) if TESTING: + class TestingConfig(DefaultConfig): SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_TEST_URI') SQL_ECHO = env.bool('SQL_TEST_ECHO', False) diff --git a/start_services.yml b/start_services.yml index b2d4d97cb6..c7a3c36f51 100644 --- a/start_services.yml +++ b/start_services.yml @@ -30,6 +30,7 @@ services: # - /tmp/mishards_env:/source/mishards/.env command: ["python", "mishards/main.py"] environment: + FROM_EXAMPLE: 'true' DEBUG: 'true' SERVER_PORT: 19531 WOSERVER: tcp://milvus:19530 From 4dd19f607d4ff23276864bd1b935fe415eaaa515 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 14 Oct 2019 15:35:29 +0800 Subject: [PATCH 060/307] update build.sh --- build.sh | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/build.sh b/build.sh index 2b3c89bbf9..c46b6a8ea9 100755 --- a/build.sh +++ b/build.sh @@ -5,6 +5,8 @@ NORMAL=`tput sgr0` YELLOW='\033[1;33m' ENDC='\033[0m' +echo -e "${BOLD}MISHARDS_REGISTRY=${MISHARDS_REGISTRY}${ENDC}" + function build_image() { dockerfile=$1 remote_registry=$2 @@ -21,12 +23,17 @@ function build_image() { case "$1" in all) + [[ -z $MISHARDS_REGISTRY ]] && { + echo -e "${YELLOW}Error: Please set docker registry first:${ENDC}\n\t${BOLD}export MISHARDS_REGISTRY=xxxx${ENDC}" + exit 1 + } + version="" [[ ! -z $2 ]] && version=":${2}" - build_image "Dockerfile" "registry.zilliz.com/milvus/mishards${version}" "registry.zilliz.com/milvus/mishards" + build_image "Dockerfile" "${MISHARDS_REGISTRY}${version}" "${MISHARDS_REGISTRY}" ;; *) echo "Usage: [option...] {base | apps}" - echo "all, Usage: build.sh all [tagname|] => registry.zilliz.com/milvus/mishards:\${tagname}" + echo "all, Usage: build.sh all [tagname|] => ${MISHARDS_REGISTRY}:\${tagname}" ;; esac From 66fc20ee54f3040f22ee3b4a5f48d11e84c21056 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 14 Oct 2019 15:35:29 +0800 Subject: [PATCH 061/307] update build.sh update build.sh --- build.sh | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/build.sh b/build.sh index 2b3c89bbf9..8e142d0115 100755 --- a/build.sh +++ b/build.sh @@ -21,12 +21,17 @@ function build_image() { case "$1" in all) + [[ -z $MISHARDS_REGISTRY ]] && { + echo -e "${YELLOW}Error: Please set docker registry first:${ENDC}\n\t${BOLD}export MISHARDS_REGISTRY=xxxx\n${ENDC}" + exit 1 + } + version="" [[ ! -z $2 ]] && version=":${2}" - build_image "Dockerfile" "registry.zilliz.com/milvus/mishards${version}" "registry.zilliz.com/milvus/mishards" + build_image "Dockerfile" "${MISHARDS_REGISTRY}${version}" "${MISHARDS_REGISTRY}" ;; *) echo "Usage: [option...] {base | apps}" - echo "all, Usage: build.sh all [tagname|] => registry.zilliz.com/milvus/mishards:\${tagname}" + echo "all, Usage: build.sh all [tagname|] => {docker_registry}:\${tagname}" ;; esac From 8a432bc472d903e7d783d71f84e2d61768813518 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 14 Oct 2019 15:56:47 +0800 Subject: [PATCH 062/307] update k8s provider for sd --- sd/kubernetes_provider.py | 108 ++++++++++++++++++++++---------------- 1 file changed, 63 insertions(+), 45 deletions(-) diff --git a/sd/kubernetes_provider.py b/sd/kubernetes_provider.py index 8ee1588ec4..9a15b2fa78 100644 --- a/sd/kubernetes_provider.py +++ b/sd/kubernetes_provider.py @@ -1,7 +1,8 @@ import os import sys if __name__ == '__main__': - sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + sys.path.append(os.path.dirname(os.path.dirname( + os.path.abspath(__file__)))) import re import logging @@ -9,6 +10,7 @@ import time import copy import threading import queue +import enum from kubernetes import client, config, watch from utils import singleton @@ -19,6 +21,11 @@ logger = logging.getLogger(__name__) INCLUSTER_NAMESPACE_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/namespace' +class EventType(enum.Enum): + PodHeartBeat = 1 + Watch = 2 + + class K8SMixin: def __init__(self, namespace, in_cluster=False, **kwargs): self.namespace = namespace @@ -29,13 +36,22 @@ class K8SMixin: self.namespace = open(INCLUSTER_NAMESPACE_PATH).read() if not self.v1: - config.load_incluster_config() if self.in_cluster else config.load_kube_config() + config.load_incluster_config( + ) if self.in_cluster else config.load_kube_config() self.v1 = client.CoreV1Api() class K8SHeartbeatHandler(threading.Thread, K8SMixin): - def __init__(self, message_queue, namespace, label_selector, in_cluster=False, **kwargs): - K8SMixin.__init__(self, namespace=namespace, in_cluster=in_cluster, **kwargs) + def __init__(self, + message_queue, + namespace, + label_selector, + in_cluster=False, + **kwargs): + K8SMixin.__init__(self, + namespace=namespace, + in_cluster=in_cluster, + **kwargs) threading.Thread.__init__(self) self.queue = message_queue self.terminate = False @@ -45,13 +61,13 @@ class K8SHeartbeatHandler(threading.Thread, K8SMixin): def run(self): while not self.terminate: try: - pods = self.v1.list_namespaced_pod(namespace=self.namespace, label_selector=self.label_selector) - event_message = { - 'eType': 'PodHeartBeat', - 'events': [] - } + pods = self.v1.list_namespaced_pod( + namespace=self.namespace, + label_selector=self.label_selector) + event_message = {'eType': EventType.PodHeartBeat, 'events': []} for item in pods.items: - pod = self.v1.read_namespaced_pod(name=item.metadata.name, namespace=self.namespace) + pod = self.v1.read_namespaced_pod(name=item.metadata.name, + namespace=self.namespace) name = pod.metadata.name ip = pod.status.pod_ip phase = pod.status.phase @@ -59,13 +75,11 @@ class K8SHeartbeatHandler(threading.Thread, K8SMixin): message = pod.status.message ready = True if phase == 'Running' else False - pod_event = dict( - pod=name, - ip=ip, - ready=ready, - reason=reason, - message=message - ) + pod_event = dict(pod=name, + ip=ip, + ready=ready, + reason=reason, + message=message) event_message['events'].append(pod_event) @@ -82,7 +96,10 @@ class K8SHeartbeatHandler(threading.Thread, K8SMixin): class K8SEventListener(threading.Thread, K8SMixin): def __init__(self, message_queue, namespace, in_cluster=False, **kwargs): - K8SMixin.__init__(self, namespace=namespace, in_cluster=in_cluster, **kwargs) + K8SMixin.__init__(self, + namespace=namespace, + in_cluster=in_cluster, + **kwargs) threading.Thread.__init__(self) self.queue = message_queue self.terminate = False @@ -96,7 +113,8 @@ class K8SEventListener(threading.Thread, K8SMixin): def run(self): resource_version = '' w = watch.Watch() - for event in w.stream(self.v1.list_namespaced_event, namespace=self.namespace, + for event in w.stream(self.v1.list_namespaced_event, + namespace=self.namespace, field_selector='involvedObject.kind=Pod'): if self.terminate: break @@ -104,7 +122,7 @@ class K8SEventListener(threading.Thread, K8SMixin): resource_version = int(event['object'].metadata.resource_version) info = dict( - eType='WatchEvent', + eType=EventType.Watch, pod=event['object'].involved_object.name, reason=event['object'].reason, message=event['object'].message, @@ -137,7 +155,8 @@ class EventHandler(threading.Thread): while try_cnt > 0: try_cnt -= 1 try: - pod = self.mgr.v1.read_namespaced_pod(name=event['pod'], namespace=self.namespace) + pod = self.mgr.v1.read_namespaced_pod(name=event['pod'], + namespace=self.namespace) if not pod.status.pod_ip: time.sleep(0.5) continue @@ -147,13 +166,15 @@ class EventHandler(threading.Thread): if try_cnt <= 0 and not pod: if not event['start_up']: - logger.error('Pod {} is started but cannot read pod'.format(event['pod'])) + logger.error('Pod {} is started but cannot read pod'.format( + event['pod'])) return elif try_cnt <= 0 and not pod.status.pod_ip: logger.warn('NoPodIPFoundError') return - logger.info('Register POD {} with IP {}'.format(pod.metadata.name, pod.status.pod_ip)) + logger.info('Register POD {} with IP {}'.format( + pod.metadata.name, pod.status.pod_ip)) self.mgr.add_pod(name=pod.metadata.name, ip=pod.status.pod_ip) def on_pod_killing(self, event, **kwargs): @@ -178,7 +199,7 @@ class EventHandler(threading.Thread): logger.info(self.mgr.conn_mgr.conn_names) def handle_event(self, event): - if event['eType'] == 'PodHeartBeat': + if event['eType'] == EventType.PodHeartBeat: return self.on_pod_heartbeat(event) if not event or (event['reason'] not in ('Started', 'Killing')): @@ -204,7 +225,8 @@ class EventHandler(threading.Thread): class KubernetesProviderSettings: - def __init__(self, namespace, pod_patt, label_selector, in_cluster, poll_interval, **kwargs): + def __init__(self, namespace, pod_patt, label_selector, in_cluster, + poll_interval, **kwargs): self.namespace = namespace self.pod_patt = pod_patt self.label_selector = label_selector @@ -231,16 +253,15 @@ class KubernetesProvider(object): if not self.namespace: self.namespace = open(incluster_namespace_path).read() - config.load_incluster_config() if self.in_cluster else config.load_kube_config() + config.load_incluster_config( + ) if self.in_cluster else config.load_kube_config() self.v1 = client.CoreV1Api() - self.listener = K8SEventListener( - message_queue=self.queue, - namespace=self.namespace, - in_cluster=self.in_cluster, - v1=self.v1, - **kwargs - ) + self.listener = K8SEventListener(message_queue=self.queue, + namespace=self.namespace, + in_cluster=self.in_cluster, + v1=self.v1, + **kwargs) self.pod_heartbeater = K8SHeartbeatHandler( message_queue=self.queue, @@ -249,13 +270,13 @@ class KubernetesProvider(object): in_cluster=self.in_cluster, v1=self.v1, poll_interval=self.poll_interval, - **kwargs - ) + **kwargs) self.event_handler = EventHandler(mgr=self, message_queue=self.queue, namespace=self.namespace, - pod_patt=self.pod_patt, **kwargs) + pod_patt=self.pod_patt, + **kwargs) def add_pod(self, name, ip): self.conn_mgr.register(name, 'tcp://{}:19530'.format(ip)) @@ -292,17 +313,14 @@ if __name__ == '__main__': connect_mgr = Connect() - settings = KubernetesProviderSettings( - namespace='xp', - pod_patt=".*-ro-servers-.*", - label_selector='tier=ro-servers', - poll_interval=5, - in_cluster=False) + settings = KubernetesProviderSettings(namespace='xp', + pod_patt=".*-ro-servers-.*", + label_selector='tier=ro-servers', + poll_interval=5, + in_cluster=False) provider_class = ProviderManager.get_provider('Kubernetes') - t = provider_class(conn_mgr=connect_mgr, - settings=settings - ) + t = provider_class(conn_mgr=connect_mgr, settings=settings) t.start() cnt = 100 while cnt > 0: From c4f7b7c4b2d206f0051cf79ac193ffa3500f7b58 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 14 Oct 2019 16:11:40 +0800 Subject: [PATCH 063/307] update docker and git ignore --- .dockerignore | 2 ++ .gitignore | 2 ++ 2 files changed, 4 insertions(+) diff --git a/.dockerignore b/.dockerignore index d1012a3afd..7f608f71d6 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,5 +1,7 @@ .git .gitignore .env +.coverage +cov_html/ mishards/.env diff --git a/.gitignore b/.gitignore index 624eb4fa58..8919efeb01 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ .env +.coverage +cov_html/ __pycache__/ From 8ad5d6c2d95a06df5e39200d6e7c9419789ecc2e Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 14 Oct 2019 17:05:11 +0800 Subject: [PATCH 064/307] add test_grpc --- mishards/grpc_utils/test_grpc.py | 77 ++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 mishards/grpc_utils/test_grpc.py diff --git a/mishards/grpc_utils/test_grpc.py b/mishards/grpc_utils/test_grpc.py new file mode 100644 index 0000000000..068ee391e7 --- /dev/null +++ b/mishards/grpc_utils/test_grpc.py @@ -0,0 +1,77 @@ +import logging +import opentracing +from mishards.grpc_utils import GrpcSpanDecorator, is_grpc_method +from milvus.grpc_gen import status_pb2, milvus_pb2 + + +logger = logging.getLogger(__name__) + + +class TestTracer(opentracing.Tracer): + pass + +class TestSpan(opentracing.Span): + def __init__(self, context, tracer, **kwargs): + super(TestSpan, self).__init__(tracer, context) + self.reset() + + def set_tag(self, key, value): + self.tags.append({key:value}) + + def log_kv(self, key_values, timestamp=None): + self.logs.append(key_values) + + def reset(self): + self.tags = [] + self.logs = [] + + +class TestRpcInfo: + def __init__(self, request, response): + self.request = request + self.response = response + + +class TestGrpcUtils: + def test_span_deco(self): + request = 'request' + OK = status_pb2.Status(error_code=status_pb2.SUCCESS, reason='Success') + response = OK + rpc_info = TestRpcInfo(request=request, response=response) + span = TestSpan(context=None, tracer=TestTracer()) + span_deco = GrpcSpanDecorator() + span_deco(span, rpc_info) + assert len(span.logs) == 0 + assert len(span.tags) == 0 + + response = milvus_pb2.BoolReply(status=OK, bool_reply=False) + rpc_info = TestRpcInfo(request=request, response=response) + span = TestSpan(context=None, tracer=TestTracer()) + span_deco = GrpcSpanDecorator() + span_deco(span, rpc_info) + assert len(span.logs) == 0 + assert len(span.tags) == 0 + + response = 1 + rpc_info = TestRpcInfo(request=request, response=response) + span = TestSpan(context=None, tracer=TestTracer()) + span_deco = GrpcSpanDecorator() + span_deco(span, rpc_info) + logger.error(span.logs) + assert len(span.logs) == 1 + assert len(span.tags) == 1 + + response = 0 + rpc_info = TestRpcInfo(request=request, response=response) + span = TestSpan(context=None, tracer=TestTracer()) + span_deco = GrpcSpanDecorator() + span_deco(span, rpc_info) + logger.error(span.logs) + assert len(span.logs) == 0 + assert len(span.tags) == 0 + + def test_is_grpc_method(self): + target = 1 + assert not is_grpc_method(target) + target = None + assert not is_grpc_method(target) From 4aa29968a68ad16abefe29941e43c5148c99164b Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 16 Oct 2019 14:19:01 +0800 Subject: [PATCH 065/307] update for TESTING changes --- conftest.py | 10 ++++++++++ mishards/__init__.py | 2 +- mishards/connections.py | 2 +- mishards/grpc_utils/test_grpc.py | 2 -- mishards/main.py | 3 +-- mishards/server.py | 2 +- mishards/settings.py | 22 ++++++++++++---------- mishards/test_connections.py | 26 +++++++++++++++++++++++++- requirements.txt | 1 + tracing/factory.py | 12 ++++++++---- 10 files changed, 60 insertions(+), 22 deletions(-) diff --git a/conftest.py b/conftest.py index d6c9f3acc7..1aba5b32cf 100644 --- a/conftest.py +++ b/conftest.py @@ -1,5 +1,6 @@ import logging import pytest +import grpc from mishards import settings, db, create_app logger = logging.getLogger(__name__) @@ -14,3 +15,12 @@ def app(request): yield app db.drop_all() + +@pytest.fixture +def started_app(app): + app.on_pre_run() + app.start(app.port) + + yield app + + app.stop() diff --git a/mishards/__init__.py b/mishards/__init__.py index 47d8adb6e3..4bd77d8c60 100644 --- a/mishards/__init__.py +++ b/mishards/__init__.py @@ -24,7 +24,7 @@ def create_app(testing_config=None): from tracing.factory import TracerFactory from mishards.grpc_utils import GrpcSpanDecorator - tracer = TracerFactory.new_tracer(settings.TRACING_TYPE, settings.TracingConfig, + tracer = TracerFactory.new_tracer(config.TRACING_TYPE, settings.TracingConfig, span_decorator=GrpcSpanDecorator()) grpc_server.init_app(conn_mgr=connect_mgr, tracer=tracer, discover=discover) diff --git a/mishards/connections.py b/mishards/connections.py index 22263e9e7e..7db271381c 100644 --- a/mishards/connections.py +++ b/mishards/connections.py @@ -18,7 +18,7 @@ class Connection: self.conn = Milvus() self.error_handlers = [] if not error_handlers else error_handlers self.on_retry_func = kwargs.get('on_retry_func', None) - self._connect() + # self._connect() def __str__(self): return 'Connection:name=\"{}\";uri=\"{}\"'.format(self.name, self.uri) diff --git a/mishards/grpc_utils/test_grpc.py b/mishards/grpc_utils/test_grpc.py index 068ee391e7..d8511c8d6c 100644 --- a/mishards/grpc_utils/test_grpc.py +++ b/mishards/grpc_utils/test_grpc.py @@ -57,7 +57,6 @@ class TestGrpcUtils: span = TestSpan(context=None, tracer=TestTracer()) span_deco = GrpcSpanDecorator() span_deco(span, rpc_info) - logger.error(span.logs) assert len(span.logs) == 1 assert len(span.tags) == 1 @@ -66,7 +65,6 @@ class TestGrpcUtils: span = TestSpan(context=None, tracer=TestTracer()) span_deco = GrpcSpanDecorator() span_deco(span, rpc_info) - logger.error(span.logs) assert len(span.logs) == 0 assert len(span.tags) == 0 diff --git a/mishards/main.py b/mishards/main.py index 3f69484ee4..c0d142607b 100644 --- a/mishards/main.py +++ b/mishards/main.py @@ -6,8 +6,7 @@ from mishards import (settings, create_app) def main(): - server = create_app( - settings.TestingConfig if settings.TESTING else settings.DefaultConfig) + server = create_app(settings.DefaultConfig) server.run(port=settings.SERVER_PORT) return 0 diff --git a/mishards/server.py b/mishards/server.py index feb2176e86..dcaacd0fbc 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -39,7 +39,7 @@ class Server: self.register_pre_run_handler(self.pre_run_handler) def pre_run_handler(self): - woserver = settings.WOSERVER if not settings.TESTING else settings.TESTING_WOSERVER + woserver = settings.WOSERVER url = urlparse(woserver) ip = socket.gethostbyname(url.hostname) socket.inet_pton(socket.AF_INET, ip) diff --git a/mishards/settings.py b/mishards/settings.py index 1982a508e7..c9b62717d4 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -43,10 +43,7 @@ elif SD_PROVIDER == 'Static': SD_PROVIDER_SETTINGS = StaticProviderSettings( hosts=env.list('SD_STATIC_HOSTS', [])) -TESTING = env.bool('TESTING', False) -TESTING_WOSERVER = env.str('TESTING_WOSERVER', 'tcp://127.0.0.1:19530') - -TRACING_TYPE = env.str('TRACING_TYPE', '') +# TESTING_WOSERVER = env.str('TESTING_WOSERVER', 'tcp://127.0.0.1:19530') class TracingConfig: @@ -64,19 +61,24 @@ class TracingConfig: }, 'logging': env.bool('TRACING_LOGGING', True) } + DEFAULT_TRACING_CONFIG = { + 'sampler': { + 'type': env.str('TRACING_SAMPLER_TYPE', 'const'), + 'param': env.str('TRACING_SAMPLER_PARAM', "0"), + } + } class DefaultConfig: SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_URI') SQL_ECHO = env.bool('SQL_ECHO', False) + TRACING_TYPE = env.str('TRACING_TYPE', '') -TESTING = env.bool('TESTING', False) -if TESTING: - - class TestingConfig(DefaultConfig): - SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_TEST_URI') - SQL_ECHO = env.bool('SQL_TEST_ECHO', False) +class TestingConfig(DefaultConfig): + SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_TEST_URI') + SQL_ECHO = env.bool('SQL_TEST_ECHO', False) + TRACING_TYPE = env.str('TRACING_TEST_TYPE', '') if __name__ == '__main__': diff --git a/mishards/test_connections.py b/mishards/test_connections.py index f1c54f0c61..819d2e03da 100644 --- a/mishards/test_connections.py +++ b/mishards/test_connections.py @@ -1,6 +1,8 @@ import logging import pytest +import mock +from milvus import Milvus from mishards.connections import (ConnectionMgr, Connection) from mishards import exceptions @@ -27,6 +29,12 @@ class TestConnection: mgr.register('WOSERVER', 'xxxx') assert len(mgr.conn_names) == 0 + assert not mgr.conn('XXXX', None) + with pytest.raises(exceptions.ConnectionNotFoundError): + mgr.conn('XXXX', None, True) + + mgr.conn('WOSERVER', None) + def test_connection(self): class Conn: def __init__(self, state): @@ -37,6 +45,7 @@ class TestConnection: def connected(self): return self.state + FAIL_CONN = Conn(False) PASS_CONN = Conn(True) @@ -58,7 +67,9 @@ class TestConnection: max_retry = 3 RetryObj = Retry() - c = Connection('client', uri='', + + c = Connection('client', + uri='xx', max_retry=max_retry, on_retry_func=RetryObj) c.conn = FAIL_CONN @@ -75,3 +86,16 @@ class TestConnection: this_connect() assert ff.executed assert RetryObj.times == 0 + + this_connect = c.connect(func=None) + with pytest.raises(TypeError): + this_connect() + + errors = [] + + def error_handler(err): + errors.append(err) + + this_connect = c.connect(func=None, exception_handler=error_handler) + this_connect() + assert len(errors) == 1 diff --git a/requirements.txt b/requirements.txt index ea338d0723..133cfac8ab 100644 --- a/requirements.txt +++ b/requirements.txt @@ -33,3 +33,4 @@ SQLAlchemy==1.3.5 urllib3==1.25.3 jaeger-client>=3.4.0 grpcio-opentracing>=1.0 +mock==2.0.0 diff --git a/tracing/factory.py b/tracing/factory.py index 648dfa291e..0c14d9d536 100644 --- a/tracing/factory.py +++ b/tracing/factory.py @@ -12,13 +12,17 @@ logger = logging.getLogger(__name__) class TracerFactory: @classmethod def new_tracer(cls, tracer_type, tracer_config, span_decorator=None, **kwargs): + config = tracer_config.TRACING_CONFIG + service_name = tracer_config.TRACING_SERVICE_NAME + validate=tracer_config.TRACING_VALIDATE if not tracer_type: - return Tracer() + tracer_type = 'jaeger' + config = tracer_config.DEFAULT_TRACING_CONFIG if tracer_type.lower() == 'jaeger': - config = Config(config=tracer_config.TRACING_CONFIG, - service_name=tracer_config.TRACING_SERVICE_NAME, - validate=tracer_config.TRACING_VALIDATE + config = Config(config=config, + service_name=service_name, + validate=validate ) tracer = config.initialize_tracer() From 9012f47a101228f956d04cc2eae804f38ca4e50e Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 16 Oct 2019 17:38:34 +0800 Subject: [PATCH 066/307] changes for unit test --- mishards/grpc_utils/test_grpc.py | 24 +++++++-------- mishards/service_handler.py | 51 ++++++++++++++++++++++++-------- tracing/factory.py | 8 +++-- 3 files changed, 56 insertions(+), 27 deletions(-) diff --git a/mishards/grpc_utils/test_grpc.py b/mishards/grpc_utils/test_grpc.py index d8511c8d6c..314fccfe00 100644 --- a/mishards/grpc_utils/test_grpc.py +++ b/mishards/grpc_utils/test_grpc.py @@ -7,12 +7,12 @@ from milvus.grpc_gen import status_pb2, milvus_pb2 logger = logging.getLogger(__name__) -class TestTracer(opentracing.Tracer): +class FakeTracer(opentracing.Tracer): pass -class TestSpan(opentracing.Span): +class FakeSpan(opentracing.Span): def __init__(self, context, tracer, **kwargs): - super(TestSpan, self).__init__(tracer, context) + super(FakeSpan, self).__init__(tracer, context) self.reset() def set_tag(self, key, value): @@ -26,7 +26,7 @@ class TestSpan(opentracing.Span): self.logs = [] -class TestRpcInfo: +class FakeRpcInfo: def __init__(self, request, response): self.request = request self.response = response @@ -37,32 +37,32 @@ class TestGrpcUtils: request = 'request' OK = status_pb2.Status(error_code=status_pb2.SUCCESS, reason='Success') response = OK - rpc_info = TestRpcInfo(request=request, response=response) - span = TestSpan(context=None, tracer=TestTracer()) + rpc_info = FakeRpcInfo(request=request, response=response) + span = FakeSpan(context=None, tracer=FakeTracer()) span_deco = GrpcSpanDecorator() span_deco(span, rpc_info) assert len(span.logs) == 0 assert len(span.tags) == 0 response = milvus_pb2.BoolReply(status=OK, bool_reply=False) - rpc_info = TestRpcInfo(request=request, response=response) - span = TestSpan(context=None, tracer=TestTracer()) + rpc_info = FakeRpcInfo(request=request, response=response) + span = FakeSpan(context=None, tracer=FakeTracer()) span_deco = GrpcSpanDecorator() span_deco(span, rpc_info) assert len(span.logs) == 0 assert len(span.tags) == 0 response = 1 - rpc_info = TestRpcInfo(request=request, response=response) - span = TestSpan(context=None, tracer=TestTracer()) + rpc_info = FakeRpcInfo(request=request, response=response) + span = FakeSpan(context=None, tracer=FakeTracer()) span_deco = GrpcSpanDecorator() span_deco(span, rpc_info) assert len(span.logs) == 1 assert len(span.tags) == 1 response = 0 - rpc_info = TestRpcInfo(request=request, response=response) - span = TestSpan(context=None, tracer=TestTracer()) + rpc_info = FakeRpcInfo(request=request, response=response) + span = FakeSpan(context=None, tracer=FakeTracer()) span_deco = GrpcSpanDecorator() span_deco(span, rpc_info) assert len(span.logs) == 0 diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 9d851ecfcb..113ec3ca20 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -237,13 +237,15 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return status_pb2.Status(error_code=_status.code, reason=_status.message) + def _add_vectors(self, param, metadata=None): + return self.connection(metadata=metadata).add_vectors(None, None, insert_param=param) + @mark_grpc_method def Insert(self, request, context): logger.info('Insert') # TODO: Ths SDK interface add_vectors() could update, add a key 'row_id_array' - _status, _ids = self.connection(metadata={ - 'resp_class': milvus_pb2.VectorIds - }).add_vectors(None, None, insert_param=request) + _status, _ids = self._add_vectors(metadata={ + 'resp_class': milvus_pb2.VectorIds}, param=request) return milvus_pb2.VectorIds( status=status_pb2.Status(error_code=_status.code, reason=_status.message), vector_id_array=_ids @@ -305,6 +307,9 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): def SearchInFiles(self, request, context): raise NotImplemented() + def _describe_table(self, table_name, metadata=None): + return self.connection(metadata=metadata).describe_table(table_name) + @mark_grpc_method def DescribeTable(self, request, context): _status, _table_name = Parser.parse_proto_TableName(request) @@ -319,7 +324,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): } logger.info('DescribeTable {}'.format(_table_name)) - _status, _table = self.connection(metadata=metadata).describe_table(_table_name) + _status, _table = self._describe_table(metadata=metadata, table_name=_table_name) if _status.OK(): return milvus_pb2.TableSchema( @@ -335,6 +340,9 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): status=status_pb2.Status(error_code=_status.code, reason=_status.message), ) + def _count_table(self, table_name, metadata=None): + return self.connection(metadata=metadata).get_table_row_count(table_name) + @mark_grpc_method def CountTable(self, request, context): _status, _table_name = Parser.parse_proto_TableName(request) @@ -351,12 +359,16 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): metadata = { 'resp_class': milvus_pb2.TableRowCount } - _status, _count = self.connection(metadata=metadata).get_table_row_count(_table_name) + _status, _count = self._count_table(_table_name, metadata=metadata) return milvus_pb2.TableRowCount( status=status_pb2.Status(error_code=_status.code, reason=_status.message), table_row_count=_count if isinstance(_count, int) else -1) + + def _get_server_version(self, metadata=None): + return self.connection(metadata=metadata).server_version() + @mark_grpc_method def Cmd(self, request, context): _status, _cmd = Parser.parse_proto_Command(request) @@ -364,7 +376,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): if not _status.OK(): return milvus_pb2.StringReply( - status_pb2.Status(error_code=_status.code, reason=_status.message) + status=status_pb2.Status(error_code=_status.code, reason=_status.message) ) metadata = { @@ -372,7 +384,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): } if _cmd == 'version': - _status, _reply = self.connection(metadata=metadata).server_version() + _status, _reply = self._get_server_version(metadata=metadata) else: _status, _reply = self.connection(metadata=metadata).server_status() @@ -381,19 +393,25 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): string_reply=_reply ) + def _show_tables(self): + return self.connection(metadata=metadata).show_tables() + @mark_grpc_method def ShowTables(self, request, context): logger.info('ShowTables') metadata = { 'resp_class': milvus_pb2.TableName } - _status, _results = self.connection(metadata=metadata).show_tables() + _status, _results = self._show_tables() return milvus_pb2.TableNameList( status=status_pb2.Status(error_code=_status.code, reason=_status.message), table_names=_results ) + def _delete_by_range(self, table_name, start_date, end_date): + return self.connection().delete_vectors_by_range(table_name, start_date, end_date) + @mark_grpc_method def DeleteByRange(self, request, context): _status, unpacks = \ @@ -405,9 +423,12 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _table_name, _start_date, _end_date = unpacks logger.info('DeleteByRange {}: {} {}'.format(_table_name, _start_date, _end_date)) - _status = self.connection().delete_vectors_by_range(_table_name, _start_date, _end_date) + _status = self._delete_by_range(_table_name, _start_date, _end_date) return status_pb2.Status(error_code=_status.code, reason=_status.message) + def _preload_table(self, table_name): + return self.connection().preload_table(table_name) + @mark_grpc_method def PreloadTable(self, request, context): _status, _table_name = Parser.parse_proto_TableName(request) @@ -416,9 +437,12 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return status_pb2.Status(error_code=_status.code, reason=_status.message) logger.info('PreloadTable {}'.format(_table_name)) - _status = self.connection().preload_table(_table_name) + _status = self._preload_table(_table_name) return status_pb2.Status(error_code=_status.code, reason=_status.message) + def _describe_index(self, table_name, metadata=None): + return self.connection(metadata=metadata).describe_index(table_name) + @mark_grpc_method def DescribeIndex(self, request, context): _status, _table_name = Parser.parse_proto_TableName(request) @@ -433,13 +457,16 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): } logger.info('DescribeIndex {}'.format(_table_name)) - _status, _index_param = self.connection(metadata=metadata).describe_index(_table_name) + _status, _index_param = self._describe_index(table_name=_table_name, metadata=metadata) _index = milvus_pb2.Index(index_type=_index_param._index_type, nlist=_index_param._nlist) return milvus_pb2.IndexParam(status=status_pb2.Status(error_code=_status.code, reason=_status.message), table_name=_table_name, index=_index) + def _drop_index(self, table_name): + return self.connection().drop_index(table_name) + @mark_grpc_method def DropIndex(self, request, context): _status, _table_name = Parser.parse_proto_TableName(request) @@ -448,5 +475,5 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return status_pb2.Status(error_code=_status.code, reason=_status.message) logger.info('DropIndex {}'.format(_table_name)) - _status = self.connection().drop_index(_table_name) + _status = self._drop_index(_table_name) return status_pb2.Status(error_code=_status.code, reason=_status.message) diff --git a/tracing/factory.py b/tracing/factory.py index 0c14d9d536..61cd75fcd6 100644 --- a/tracing/factory.py +++ b/tracing/factory.py @@ -12,12 +12,14 @@ logger = logging.getLogger(__name__) class TracerFactory: @classmethod def new_tracer(cls, tracer_type, tracer_config, span_decorator=None, **kwargs): + if not tracer_type: + return Tracer() config = tracer_config.TRACING_CONFIG service_name = tracer_config.TRACING_SERVICE_NAME validate=tracer_config.TRACING_VALIDATE - if not tracer_type: - tracer_type = 'jaeger' - config = tracer_config.DEFAULT_TRACING_CONFIG + # if not tracer_type: + # tracer_type = 'jaeger' + # config = tracer_config.DEFAULT_TRACING_CONFIG if tracer_type.lower() == 'jaeger': config = Config(config=config, From e0498e081df88eecb646c9d86cf744412f908902 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 17 Oct 2019 14:13:50 +0800 Subject: [PATCH 067/307] update for server test update for server test --- mishards/factories.py | 5 +- mishards/service_handler.py | 37 +++-- mishards/test_server.py | 279 ++++++++++++++++++++++++++++++++++++ tracing/__init__.py | 13 ++ 4 files changed, 320 insertions(+), 14 deletions(-) create mode 100644 mishards/test_server.py diff --git a/mishards/factories.py b/mishards/factories.py index c4037fe2d7..52c0253b39 100644 --- a/mishards/factories.py +++ b/mishards/factories.py @@ -6,6 +6,7 @@ from factory.alchemy import SQLAlchemyModelFactory from faker import Faker from faker.providers import BaseProvider +from milvus.client.types import MetricType from mishards import db from mishards.models import Tables, TableFiles @@ -27,12 +28,12 @@ class TablesFactory(SQLAlchemyModelFactory): id = factory.Faker('random_number', digits=16, fix_len=True) table_id = factory.Faker('uuid4') - state = factory.Faker('random_element', elements=(0, 1, 2, 3)) + state = factory.Faker('random_element', elements=(0, 1)) dimension = factory.Faker('random_element', elements=(256, 512)) created_on = int(time.time()) index_file_size = 0 engine_type = factory.Faker('random_element', elements=(0, 1, 2, 3)) - metric_type = factory.Faker('random_element', elements=(0, 1)) + metric_type = factory.Faker('random_element', elements=(MetricType.L2, MetricType.IP)) nlist = 16384 diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 113ec3ca20..e04965c12a 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -125,8 +125,9 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): range_array = [self._range_to_date(r, metadata=metadata) for r in range_array] if range_array else None routing = {} + p_span = None if self.tracer.empty else context.get_active_span().context with self.tracer.start_span('get_routing', - child_of=context.get_active_span().context): + child_of=p_span): routing = self._get_routing_file_ids(table_id, range_array, metadata=metadata) logger.info('Routing: {}'.format(routing)) @@ -145,9 +146,10 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): conn = self.query_conn(addr, metadata=metadata) start = time.time() span = kwargs.get('span', None) - span = span if span else context.get_active_span().context + span = span if span else (None if self.tracer.empty else context.get_active_span().context) + with self.tracer.start_span('search_{}'.format(addr), - child_of=context.get_active_span().context): + child_of=span): ret = conn.search_vectors_in_files(table_name=query_params['table_id'], file_ids=query_params['file_ids'], query_records=vectors, @@ -160,7 +162,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): all_topk_results.append(ret) with self.tracer.start_span('do_search', - child_of=context.get_active_span().context) as span: + child_of=p_span) as span: with ThreadPoolExecutor(max_workers=workers) as pool: for addr, params in routing.items(): res = pool.submit(search, addr, params, vectors, topk, nprobe, span=span) @@ -171,9 +173,12 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): reverse = table_meta.metric_type == Types.MetricType.IP with self.tracer.start_span('do_merge', - child_of=context.get_active_span().context): + child_of=p_span): return self._do_merge(all_topk_results, topk, reverse=reverse, metadata=metadata) + def _create_table(self, table_schema): + return self.connection().create_table(table_schema) + @mark_grpc_method def CreateTable(self, request, context): _status, _table_schema = Parser.parse_proto_TableSchema(request) @@ -183,10 +188,13 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): logger.info('CreateTable {}'.format(_table_schema['table_name'])) - _status = self.connection().create_table(_table_schema) + _status = self._create_table(_table_schema) return status_pb2.Status(error_code=_status.code, reason=_status.message) + def _has_table(self, table_name, metadata=None): + return self.connection(metadata=metadata).has_table(table_name) + @mark_grpc_method def HasTable(self, request, context): _status, _table_name = Parser.parse_proto_TableName(request) @@ -199,15 +207,17 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): logger.info('HasTable {}'.format(_table_name)) - _bool = self.connection(metadata={ - 'resp_class': milvus_pb2.BoolReply - }).has_table(_table_name) + _bool = self._has_table(_table_name, metadata={ + 'resp_class': milvus_pb2.BoolReply}) return milvus_pb2.BoolReply( status=status_pb2.Status(error_code=status_pb2.SUCCESS, reason="OK"), bool_reply=_bool ) + def _delete_table(self, table_name): + return self.connection().delete_table(table_name) + @mark_grpc_method def DropTable(self, request, context): _status, _table_name = Parser.parse_proto_TableName(request) @@ -217,10 +227,13 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): logger.info('DropTable {}'.format(_table_name)) - _status = self.connection().delete_table(_table_name) + _status = self._delete_table(_table_name) return status_pb2.Status(error_code=_status.code, reason=_status.message) + def _create_index(self, table_name, index): + return self.connection().create_index(table_name, index) + @mark_grpc_method def CreateIndex(self, request, context): _status, unpacks = Parser.parse_proto_IndexParam(request) @@ -233,7 +246,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): logger.info('CreateIndex {}'.format(_table_name)) # TODO: interface create_table incompleted - _status = self.connection().create_index(_table_name, _index) + _status = self._create_index(_table_name, _index) return status_pb2.Status(error_code=_status.code, reason=_status.message) @@ -298,7 +311,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): logger.info('SearchVector takes: {}'.format(now - start)) topk_result_list = milvus_pb2.TopKQueryResultList( - status=status, + status=status_pb2.Status(error_code=status.error_code, reason=status.reason), topk_query_result=results ) return topk_result_list diff --git a/mishards/test_server.py b/mishards/test_server.py new file mode 100644 index 0000000000..e9a7c0d878 --- /dev/null +++ b/mishards/test_server.py @@ -0,0 +1,279 @@ +import logging +import pytest +import mock +import datetime +import random +import faker +import inspect +from milvus import Milvus +from milvus.client.types import Status, IndexType, MetricType +from milvus.client.Abstract import IndexParam, TableSchema +from milvus.grpc_gen import status_pb2, milvus_pb2 +from mishards import db, create_app, settings +from mishards.service_handler import ServiceHandler +from mishards.grpc_utils.grpc_args_parser import GrpcArgsParser as Parser +from mishards.factories import TableFilesFactory, TablesFactory, TableFiles, Tables + +logger = logging.getLogger(__name__) + +OK = Status(code=Status.SUCCESS, message='Success') +BAD = Status(code=Status.PERMISSION_DENIED, message='Fail') + + +@pytest.mark.usefixtures('started_app') +class TestServer: + def client(self, port): + m = Milvus() + m.connect(host='localhost', port=port) + return m + + def test_server_start(self, started_app): + assert started_app.conn_mgr.metas.get('WOSERVER') == settings.WOSERVER + + def test_cmd(self, started_app): + ServiceHandler._get_server_version = mock.MagicMock(return_value=(OK, + '')) + status, _ = self.client(started_app.port).server_version() + assert status.OK() + + Parser.parse_proto_Command = mock.MagicMock(return_value=(BAD, 'cmd')) + status, _ = self.client(started_app.port).server_version() + assert not status.OK() + + def test_drop_index(self, started_app): + table_name = inspect.currentframe().f_code.co_name + ServiceHandler._drop_index = mock.MagicMock(return_value=OK) + status = self.client(started_app.port).drop_index(table_name) + assert status.OK() + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(BAD, table_name)) + status = self.client(started_app.port).drop_index(table_name) + assert not status.OK() + + def test_describe_index(self, started_app): + table_name = inspect.currentframe().f_code.co_name + index_type = IndexType.FLAT + nlist = 1 + index_param = IndexParam(table_name=table_name, + index_type=index_type, + nlist=nlist) + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(OK, table_name)) + ServiceHandler._describe_index = mock.MagicMock( + return_value=(OK, index_param)) + status, ret = self.client(started_app.port).describe_index(table_name) + assert status.OK() + assert ret._table_name == index_param._table_name + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(BAD, table_name)) + status, _ = self.client(started_app.port).describe_index(table_name) + assert not status.OK() + + def test_preload(self, started_app): + table_name = inspect.currentframe().f_code.co_name + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(OK, table_name)) + ServiceHandler._preload_table = mock.MagicMock(return_value=OK) + status = self.client(started_app.port).preload_table(table_name) + assert status.OK() + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(BAD, table_name)) + status = self.client(started_app.port).preload_table(table_name) + assert not status.OK() + + def test_delete_by_range(self, started_app): + table_name = inspect.currentframe().f_code.co_name + + unpacked = table_name, datetime.datetime.today( + ), datetime.datetime.today() + + Parser.parse_proto_DeleteByRangeParam = mock.MagicMock( + return_value=(OK, unpacked)) + ServiceHandler._delete_by_range = mock.MagicMock(return_value=OK) + status = self.client(started_app.port).delete_vectors_by_range( + *unpacked) + assert status.OK() + + Parser.parse_proto_DeleteByRangeParam = mock.MagicMock( + return_value=(BAD, unpacked)) + status = self.client(started_app.port).delete_vectors_by_range( + *unpacked) + assert not status.OK() + + def test_count_table(self, started_app): + table_name = inspect.currentframe().f_code.co_name + count = random.randint(100, 200) + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(OK, table_name)) + ServiceHandler._count_table = mock.MagicMock(return_value=(OK, count)) + status, ret = self.client( + started_app.port).get_table_row_count(table_name) + assert status.OK() + assert ret == count + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(BAD, table_name)) + status, _ = self.client( + started_app.port).get_table_row_count(table_name) + assert not status.OK() + + def test_show_tables(self, started_app): + tables = ['t1', 't2'] + ServiceHandler._show_tables = mock.MagicMock(return_value=(OK, tables)) + status, ret = self.client(started_app.port).show_tables() + assert status.OK() + assert ret == tables + + def test_describe_table(self, started_app): + table_name = inspect.currentframe().f_code.co_name + dimension = 128 + nlist = 1 + table_schema = TableSchema(table_name=table_name, + index_file_size=100, + metric_type=MetricType.L2, + dimension=dimension) + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(OK, table_schema.table_name)) + ServiceHandler._describe_table = mock.MagicMock( + return_value=(OK, table_schema)) + status, _ = self.client(started_app.port).describe_table(table_name) + assert status.OK() + + ServiceHandler._describe_table = mock.MagicMock( + return_value=(BAD, table_schema)) + status, _ = self.client(started_app.port).describe_table(table_name) + assert not status.OK() + + Parser.parse_proto_TableName = mock.MagicMock(return_value=(BAD, + 'cmd')) + status, ret = self.client(started_app.port).describe_table(table_name) + assert not status.OK() + + def test_insert(self, started_app): + table_name = inspect.currentframe().f_code.co_name + vectors = [[random.random() for _ in range(16)] for _ in range(10)] + ids = [random.randint(1000000, 20000000) for _ in range(10)] + ServiceHandler._add_vectors = mock.MagicMock(return_value=(OK, ids)) + status, ret = self.client(started_app.port).add_vectors( + table_name=table_name, records=vectors) + assert status.OK() + assert ids == ret + + def test_create_index(self, started_app): + table_name = inspect.currentframe().f_code.co_name + unpacks = table_name, None + Parser.parse_proto_IndexParam = mock.MagicMock(return_value=(OK, + unpacks)) + ServiceHandler._create_index = mock.MagicMock(return_value=OK) + status = self.client( + started_app.port).create_index(table_name=table_name) + assert status.OK() + + Parser.parse_proto_IndexParam = mock.MagicMock(return_value=(BAD, + None)) + status = self.client( + started_app.port).create_index(table_name=table_name) + assert not status.OK() + + def test_drop_table(self, started_app): + table_name = inspect.currentframe().f_code.co_name + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(OK, table_name)) + ServiceHandler._delete_table = mock.MagicMock(return_value=OK) + status = self.client( + started_app.port).delete_table(table_name=table_name) + assert status.OK() + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(BAD, table_name)) + status = self.client( + started_app.port).delete_table(table_name=table_name) + assert not status.OK() + + def test_has_table(self, started_app): + table_name = inspect.currentframe().f_code.co_name + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(OK, table_name)) + ServiceHandler._has_table = mock.MagicMock(return_value=True) + has = self.client(started_app.port).has_table(table_name=table_name) + assert has + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(BAD, table_name)) + has = self.client(started_app.port).has_table(table_name=table_name) + assert not has + + def test_create_table(self, started_app): + table_name = inspect.currentframe().f_code.co_name + dimension = 128 + table_schema = dict(table_name=table_name, + index_file_size=100, + metric_type=MetricType.L2, + dimension=dimension) + + ServiceHandler._create_table = mock.MagicMock(return_value=OK) + status = self.client(started_app.port).create_table(table_schema) + assert status.OK() + + Parser.parse_proto_TableSchema = mock.MagicMock(return_value=(BAD, + None)) + status = self.client(started_app.port).create_table(table_schema) + assert not status.OK() + + def random_data(self, n, dimension): + return [[random.random() for _ in range(dimension)] for _ in range(n)] + + def test_search(self, started_app): + table_name = inspect.currentframe().f_code.co_name + to_index_cnt = random.randint(10, 20) + table = TablesFactory(table_id=table_name, state=Tables.NORMAL) + to_index_files = TableFilesFactory.create_batch( + to_index_cnt, table=table, file_type=TableFiles.FILE_TYPE_TO_INDEX) + topk = random.randint(5, 10) + nq = random.randint(5, 10) + param = { + 'table_name': table_name, + 'query_records': self.random_data(nq, table.dimension), + 'top_k': topk, + 'nprobe': 2049 + } + + result = [ + milvus_pb2.TopKQueryResult(query_result_arrays=[ + milvus_pb2.QueryResult(id=i, distance=random.random()) + for i in range(topk) + ]) for i in range(nq) + ] + + mock_results = milvus_pb2.TopKQueryResultList(status=status_pb2.Status( + error_code=status_pb2.SUCCESS, reason="Success"), + topk_query_result=result) + + table_schema = TableSchema(table_name=table_name, + index_file_size=table.index_file_size, + metric_type=table.metric_type, + dimension=table.dimension) + + status, _ = self.client(started_app.port).search_vectors(**param) + assert status.code == Status.ILLEGAL_ARGUMENT + + param['nprobe'] = 2048 + Milvus.describe_table = mock.MagicMock(return_value=(BAD, + table_schema)) + status, ret = self.client(started_app.port).search_vectors(**param) + assert status.code == Status.TABLE_NOT_EXISTS + + Milvus.describe_table = mock.MagicMock(return_value=(OK, table_schema)) + Milvus.search_vectors_in_files = mock.MagicMock( + return_value=mock_results) + + status, ret = self.client(started_app.port).search_vectors(**param) + assert status.OK() + assert len(ret) == nq diff --git a/tracing/__init__.py b/tracing/__init__.py index 5014309a52..a1974e2204 100644 --- a/tracing/__init__.py +++ b/tracing/__init__.py @@ -1,6 +1,13 @@ +from contextlib import contextmanager + def empty_server_interceptor_decorator(target_server, interceptor): return target_server +@contextmanager +def EmptySpan(*args, **kwargs): + yield None + return + class Tracer: def __init__(self, tracer=None, @@ -13,11 +20,17 @@ class Tracer: def decorate(self, server): return self.server_decorator(server, self.interceptor) + @property + def empty(self): + return self.tracer is None + def close(self): self.tracer and self.tracer.close() def start_span(self, operation_name=None, child_of=None, references=None, tags=None, start_time=None, ignore_active_span=False): + if self.empty: + return EmptySpan() return self.tracer.start_span(operation_name, child_of, references, tags, start_time, ignore_active_span) From 24b2e73e5ae132f0e2f0a391895b3031165098e7 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 17 Oct 2019 14:20:09 +0800 Subject: [PATCH 068/307] code style format --- conftest.py | 1 + mishards/grpc_utils/test_grpc.py | 4 ++-- mishards/service_handler.py | 3 +-- mishards/test_server.py | 2 +- tracing/__init__.py | 19 +++++++++++++------ tracing/factory.py | 21 ++++++++++++--------- 6 files changed, 30 insertions(+), 20 deletions(-) diff --git a/conftest.py b/conftest.py index 1aba5b32cf..ebe8276cea 100644 --- a/conftest.py +++ b/conftest.py @@ -16,6 +16,7 @@ def app(request): db.drop_all() + @pytest.fixture def started_app(app): app.on_pre_run() diff --git a/mishards/grpc_utils/test_grpc.py b/mishards/grpc_utils/test_grpc.py index 314fccfe00..9af09e5d0d 100644 --- a/mishards/grpc_utils/test_grpc.py +++ b/mishards/grpc_utils/test_grpc.py @@ -3,20 +3,20 @@ import opentracing from mishards.grpc_utils import GrpcSpanDecorator, is_grpc_method from milvus.grpc_gen import status_pb2, milvus_pb2 - logger = logging.getLogger(__name__) class FakeTracer(opentracing.Tracer): pass + class FakeSpan(opentracing.Span): def __init__(self, context, tracer, **kwargs): super(FakeSpan, self).__init__(tracer, context) self.reset() def set_tag(self, key, value): - self.tags.append({key:value}) + self.tags.append({key: value}) def log_kv(self, key_values, timestamp=None): self.logs.append(key_values) diff --git a/mishards/service_handler.py b/mishards/service_handler.py index e04965c12a..0172f73126 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -232,7 +232,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return status_pb2.Status(error_code=_status.code, reason=_status.message) def _create_index(self, table_name, index): - return self.connection().create_index(table_name, index) + return self.connection().create_index(table_name, index) @mark_grpc_method def CreateIndex(self, request, context): @@ -378,7 +378,6 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): status=status_pb2.Status(error_code=_status.code, reason=_status.message), table_row_count=_count if isinstance(_count, int) else -1) - def _get_server_version(self, metadata=None): return self.connection(metadata=metadata).server_version() diff --git a/mishards/test_server.py b/mishards/test_server.py index e9a7c0d878..a2677847da 100644 --- a/mishards/test_server.py +++ b/mishards/test_server.py @@ -254,7 +254,7 @@ class TestServer: mock_results = milvus_pb2.TopKQueryResultList(status=status_pb2.Status( error_code=status_pb2.SUCCESS, reason="Success"), - topk_query_result=result) + topk_query_result=result) table_schema = TableSchema(table_name=table_name, index_file_size=table.index_file_size, diff --git a/tracing/__init__.py b/tracing/__init__.py index a1974e2204..64a5b50d15 100644 --- a/tracing/__init__.py +++ b/tracing/__init__.py @@ -1,8 +1,10 @@ from contextlib import contextmanager + def empty_server_interceptor_decorator(target_server, interceptor): return target_server + @contextmanager def EmptySpan(*args, **kwargs): yield None @@ -10,7 +12,8 @@ def EmptySpan(*args, **kwargs): class Tracer: - def __init__(self, tracer=None, + def __init__(self, + tracer=None, interceptor=None, server_decorator=empty_server_interceptor_decorator): self.tracer = tracer @@ -27,10 +30,14 @@ class Tracer: def close(self): self.tracer and self.tracer.close() - def start_span(self, operation_name=None, - child_of=None, references=None, tags=None, - start_time=None, ignore_active_span=False): + def start_span(self, + operation_name=None, + child_of=None, + references=None, + tags=None, + start_time=None, + ignore_active_span=False): if self.empty: return EmptySpan() - return self.tracer.start_span(operation_name, child_of, - references, tags, start_time, ignore_active_span) + return self.tracer.start_span(operation_name, child_of, references, + tags, start_time, ignore_active_span) diff --git a/tracing/factory.py b/tracing/factory.py index 61cd75fcd6..14fcde2eb3 100644 --- a/tracing/factory.py +++ b/tracing/factory.py @@ -3,20 +3,23 @@ from jaeger_client import Config from grpc_opentracing.grpcext import intercept_server from grpc_opentracing import open_tracing_server_interceptor -from tracing import (Tracer, - empty_server_interceptor_decorator) +from tracing import (Tracer, empty_server_interceptor_decorator) logger = logging.getLogger(__name__) class TracerFactory: @classmethod - def new_tracer(cls, tracer_type, tracer_config, span_decorator=None, **kwargs): + def new_tracer(cls, + tracer_type, + tracer_config, + span_decorator=None, + **kwargs): if not tracer_type: return Tracer() config = tracer_config.TRACING_CONFIG service_name = tracer_config.TRACING_SERVICE_NAME - validate=tracer_config.TRACING_VALIDATE + validate = tracer_config.TRACING_VALIDATE # if not tracer_type: # tracer_type = 'jaeger' # config = tracer_config.DEFAULT_TRACING_CONFIG @@ -24,13 +27,13 @@ class TracerFactory: if tracer_type.lower() == 'jaeger': config = Config(config=config, service_name=service_name, - validate=validate - ) + validate=validate) tracer = config.initialize_tracer() - tracer_interceptor = open_tracing_server_interceptor(tracer, - log_payloads=tracer_config.TRACING_LOG_PAYLOAD, - span_decorator=span_decorator) + tracer_interceptor = open_tracing_server_interceptor( + tracer, + log_payloads=tracer_config.TRACING_LOG_PAYLOAD, + span_decorator=span_decorator) return Tracer(tracer, tracer_interceptor, intercept_server) From 560c4310ae15a8326ca90e1df153e89fc4befb6b Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 18 Oct 2019 10:19:39 +0800 Subject: [PATCH 069/307] small refactor --- mishards/service_handler.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 0172f73126..1396466568 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -405,7 +405,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): string_reply=_reply ) - def _show_tables(self): + def _show_tables(self, metadata=None): return self.connection(metadata=metadata).show_tables() @mark_grpc_method @@ -414,7 +414,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): metadata = { 'resp_class': milvus_pb2.TableName } - _status, _results = self._show_tables() + _status, _results = self._show_tables(metadata=metadata) return milvus_pb2.TableNameList( status=status_pb2.Status(error_code=_status.code, reason=_status.message), From a3409be0dc4330923dd5bab2d647d1f11dc3d538 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 18 Oct 2019 13:38:19 +0800 Subject: [PATCH 070/307] add router in impl --- mishards/__init__.py | 5 +- mishards/routings.py | 81 +++++++++ mishards/server.py | 27 ++- mishards/service_handler.py | 331 +++++++++++++++++------------------- mishards/settings.py | 2 + mishards/utilities.py | 20 +++ 6 files changed, 287 insertions(+), 179 deletions(-) create mode 100644 mishards/routings.py create mode 100644 mishards/utilities.py diff --git a/mishards/__init__.py b/mishards/__init__.py index 4bd77d8c60..759e8c2e5a 100644 --- a/mishards/__init__.py +++ b/mishards/__init__.py @@ -27,7 +27,10 @@ def create_app(testing_config=None): tracer = TracerFactory.new_tracer(config.TRACING_TYPE, settings.TracingConfig, span_decorator=GrpcSpanDecorator()) - grpc_server.init_app(conn_mgr=connect_mgr, tracer=tracer, discover=discover) + from mishards.routings import RouterFactory + router = RouterFactory.new_router(config.ROUTER_CLASS_NAME, connect_mgr) + + grpc_server.init_app(conn_mgr=connect_mgr, tracer=tracer, router=router, discover=discover) from mishards import exception_handlers diff --git a/mishards/routings.py b/mishards/routings.py new file mode 100644 index 0000000000..a61352f40b --- /dev/null +++ b/mishards/routings.py @@ -0,0 +1,81 @@ +import logging +from sqlalchemy import exc as sqlalchemy_exc +from sqlalchemy import and_ + +from mishards import exceptions, db +from mishards.hash_ring import HashRing +from mishards.models import Tables + +logger = logging.getLogger(__name__) + + +class RouteManager: + ROUTER_CLASSES = {} + + @classmethod + def register_router_class(cls, target): + name = target.__dict__.get('NAME', None) + name = name if name else target.__class__.__name__ + cls.ROUTER_CLASSES[name] = target + return target + + @classmethod + def get_router_class(cls, name): + return cls.ROUTER_CLASSES.get(name, None) + + +class RouterFactory: + @classmethod + def new_router(cls, name, conn_mgr, **kwargs): + router_class = RouteManager.get_router_class(name) + assert router_class + return router_class(conn_mgr, **kwargs) + + +class RouterMixin: + def __init__(self, conn_mgr): + self.conn_mgr = conn_mgr + + def routing(self, table_name, metadata=None, **kwargs): + raise NotImplemented() + + +@RouteManager.register_router_class +class FileBasedHashRingRouter(RouterMixin): + NAME = 'FileBasedHashRingRouter' + + def __init__(self, conn_mgr, **kwargs): + super(FileBasedHashRingRouter, self).__init__(conn_mgr) + + def routing(self, table_name, metadata=None, **kwargs): + range_array = kwargs.pop('range_array', None) + return self._route(table_name, range_array, metadata, **kwargs) + + def _route(self, table_name, range_array, metadata=None, **kwargs): + # PXU TODO: Implement Thread-local Context + try: + table = db.Session.query(Tables).filter( + and_(Tables.table_id == table_name, + Tables.state != Tables.TO_DELETE)).first() + except sqlalchemy_exc.SQLAlchemyError as e: + raise exceptions.DBError(message=str(e), metadata=metadata) + + if not table: + raise exceptions.TableNotFoundError(table_name, metadata=metadata) + files = table.files_to_search(range_array) + + servers = self.conn_mgr.conn_names + logger.info('Available servers: {}'.format(servers)) + + ring = HashRing(servers) + + routing = {} + + for f in files: + target_host = ring.get_node(str(f.id)) + sub = routing.get(target_host, None) + if not sub: + routing[target_host] = {'table_id': table_name, 'file_ids': []} + routing[target_host]['file_ids'].append(str(f.id)) + + return routing diff --git a/mishards/server.py b/mishards/server.py index dcaacd0fbc..20be8f1746 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -22,17 +22,24 @@ class Server: self.error_handlers = {} self.exit_flag = False - def init_app(self, conn_mgr, tracer, discover, port=19530, max_workers=10, **kwargs): + def init_app(self, + conn_mgr, + tracer, + router, + discover, + port=19530, + max_workers=10, + **kwargs): self.port = int(port) self.conn_mgr = conn_mgr self.tracer = tracer + self.router = router self.discover = discover self.server_impl = grpc.server( thread_pool=futures.ThreadPoolExecutor(max_workers=max_workers), options=[(cygrpc.ChannelArgKey.max_send_message_length, -1), - (cygrpc.ChannelArgKey.max_receive_message_length, -1)] - ) + (cygrpc.ChannelArgKey.max_receive_message_length, -1)]) self.server_impl = self.tracer.decorate(self.server_impl) @@ -43,8 +50,8 @@ class Server: url = urlparse(woserver) ip = socket.gethostbyname(url.hostname) socket.inet_pton(socket.AF_INET, ip) - self.conn_mgr.register('WOSERVER', - '{}://{}:{}'.format(url.scheme, ip, url.port or 80)) + self.conn_mgr.register( + 'WOSERVER', '{}://{}:{}'.format(url.scheme, ip, url.port or 80)) def register_pre_run_handler(self, func): logger.info('Regiterring {} into server pre_run_handlers'.format(func)) @@ -65,9 +72,11 @@ class Server: def errorhandler(self, exception): if inspect.isclass(exception) and issubclass(exception, Exception): + def wrapper(func): self.error_handlers[exception] = func return func + return wrapper return exception @@ -78,8 +87,12 @@ class Server: def start(self, port=None): handler_class = self.decorate_handler(ServiceHandler) - add_MilvusServiceServicer_to_server(handler_class(conn_mgr=self.conn_mgr, tracer=self.tracer), self.server_impl) - self.server_impl.add_insecure_port("[::]:{}".format(str(port or self._port))) + add_MilvusServiceServicer_to_server( + handler_class(conn_mgr=self.conn_mgr, + tracer=self.tracer, + router=self.router), self.server_impl) + self.server_impl.add_insecure_port("[::]:{}".format( + str(port or self._port))) self.server_impl.start() def run(self, port): diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 1396466568..e26f2bfd74 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -3,9 +3,6 @@ import time import datetime from collections import defaultdict -from sqlalchemy import and_ -from sqlalchemy import exc as sqlalchemy_exc - from concurrent.futures import ThreadPoolExecutor from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 from milvus.grpc_gen.milvus_pb2 import TopKQueryResult @@ -15,8 +12,7 @@ from milvus.client import types as Types from mishards import (db, settings, exceptions) from mishards.grpc_utils import mark_grpc_method from mishards.grpc_utils.grpc_args_parser import GrpcArgsParser as Parser -from mishards.models import Tables, TableFiles -from mishards.hash_ring import HashRing +from mishards import utilities logger = logging.getLogger(__name__) @@ -24,11 +20,12 @@ logger = logging.getLogger(__name__) class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): MAX_NPROBE = 2048 - def __init__(self, conn_mgr, tracer, *args, **kwargs): + def __init__(self, conn_mgr, tracer, router, *args, **kwargs): self.conn_mgr = conn_mgr self.table_meta = {} self.error_handlers = {} self.tracer = tracer + self.router = router def connection(self, metadata=None): conn = self.conn_mgr.conn('WOSERVER', metadata=metadata) @@ -43,56 +40,9 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): conn.on_connect(metadata=metadata) return conn.conn - def _format_date(self, start, end): - return ((start.year - 1900) * 10000 + (start.month - 1) * 100 + start.day, (end.year - 1900) * 10000 + (end.month - 1) * 100 + end.day) - - def _range_to_date(self, range_obj, metadata=None): - try: - start = datetime.datetime.strptime(range_obj.start_date, '%Y-%m-%d') - end = datetime.datetime.strptime(range_obj.end_date, '%Y-%m-%d') - assert start < end - except (ValueError, AssertionError): - raise exceptions.InvalidRangeError('Invalid time range: {} {}'.format( - range_obj.start_date, range_obj.end_date - ), metadata=metadata) - - return self._format_date(start, end) - - def _get_routing_file_ids(self, table_id, range_array, metadata=None): - # PXU TODO: Implement Thread-local Context - try: - table = db.Session.query(Tables).filter(and_( - Tables.table_id == table_id, - Tables.state != Tables.TO_DELETE - )).first() - except sqlalchemy_exc.SQLAlchemyError as e: - raise exceptions.DBError(message=str(e), metadata=metadata) - - if not table: - raise exceptions.TableNotFoundError(table_id, metadata=metadata) - files = table.files_to_search(range_array) - - servers = self.conn_mgr.conn_names - logger.info('Available servers: {}'.format(servers)) - - ring = HashRing(servers) - - routing = {} - - for f in files: - target_host = ring.get_node(str(f.id)) - sub = routing.get(target_host, None) - if not sub: - routing[target_host] = { - 'table_id': table_id, - 'file_ids': [] - } - routing[target_host]['file_ids'].append(str(f.id)) - - return routing - def _do_merge(self, files_n_topk_results, topk, reverse=False, **kwargs): - status = status_pb2.Status(error_code=status_pb2.SUCCESS, reason="Success") + status = status_pb2.Status(error_code=status_pb2.SUCCESS, + reason="Success") if not files_n_topk_results: return status, [] @@ -103,10 +53,14 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): if isinstance(files_collection, tuple): status, _ = files_collection return status, [] - for request_pos, each_request_results in enumerate(files_collection.topk_query_result): - request_results[request_pos].extend(each_request_results.query_result_arrays) - request_results[request_pos] = sorted(request_results[request_pos], key=lambda x: x.distance, - reverse=reverse)[:topk] + for request_pos, each_request_results in enumerate( + files_collection.topk_query_result): + request_results[request_pos].extend( + each_request_results.query_result_arrays) + request_results[request_pos] = sorted( + request_results[request_pos], + key=lambda x: x.distance, + reverse=reverse)[:topk] calc_time = time.time() - calc_time logger.info('Merge takes {}'.format(calc_time)) @@ -120,15 +74,27 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return status, topk_query_result - def _do_query(self, context, table_id, table_meta, vectors, topk, nprobe, range_array=None, **kwargs): + def _do_query(self, + context, + table_id, + table_meta, + vectors, + topk, + nprobe, + range_array=None, + **kwargs): metadata = kwargs.get('metadata', None) - range_array = [self._range_to_date(r, metadata=metadata) for r in range_array] if range_array else None + range_array = [ + utilities.range_to_date(r, metadata=metadata) for r in range_array + ] if range_array else None routing = {} - p_span = None if self.tracer.empty else context.get_active_span().context - with self.tracer.start_span('get_routing', - child_of=p_span): - routing = self._get_routing_file_ids(table_id, range_array, metadata=metadata) + p_span = None if self.tracer.empty else context.get_active_span( + ).context + with self.tracer.start_span('get_routing', child_of=p_span): + routing = self.router.routing(table_id, + range_array=range_array, + metadata=metadata) logger.info('Routing: {}'.format(routing)) metadata = kwargs.get('metadata', None) @@ -139,42 +105,51 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): workers = settings.SEARCH_WORKER_SIZE def search(addr, query_params, vectors, topk, nprobe, **kwargs): - logger.info('Send Search Request: addr={};params={};nq={};topk={};nprobe={}'.format( - addr, query_params, len(vectors), topk, nprobe - )) + logger.info( + 'Send Search Request: addr={};params={};nq={};topk={};nprobe={}' + .format(addr, query_params, len(vectors), topk, nprobe)) conn = self.query_conn(addr, metadata=metadata) start = time.time() span = kwargs.get('span', None) - span = span if span else (None if self.tracer.empty else context.get_active_span().context) + span = span if span else (None if self.tracer.empty else + context.get_active_span().context) with self.tracer.start_span('search_{}'.format(addr), child_of=span): - ret = conn.search_vectors_in_files(table_name=query_params['table_id'], - file_ids=query_params['file_ids'], - query_records=vectors, - top_k=topk, - nprobe=nprobe, - lazy=True) + ret = conn.search_vectors_in_files( + table_name=query_params['table_id'], + file_ids=query_params['file_ids'], + query_records=vectors, + top_k=topk, + nprobe=nprobe, + lazy=True) end = time.time() logger.info('search_vectors_in_files takes: {}'.format(end - start)) all_topk_results.append(ret) - with self.tracer.start_span('do_search', - child_of=p_span) as span: + with self.tracer.start_span('do_search', child_of=p_span) as span: with ThreadPoolExecutor(max_workers=workers) as pool: for addr, params in routing.items(): - res = pool.submit(search, addr, params, vectors, topk, nprobe, span=span) + res = pool.submit(search, + addr, + params, + vectors, + topk, + nprobe, + span=span) rs.append(res) for res in rs: res.result() reverse = table_meta.metric_type == Types.MetricType.IP - with self.tracer.start_span('do_merge', - child_of=p_span): - return self._do_merge(all_topk_results, topk, reverse=reverse, metadata=metadata) + with self.tracer.start_span('do_merge', child_of=p_span): + return self._do_merge(all_topk_results, + topk, + reverse=reverse, + metadata=metadata) def _create_table(self, table_schema): return self.connection().create_table(table_schema) @@ -184,13 +159,15 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _status, _table_schema = Parser.parse_proto_TableSchema(request) if not _status.OK(): - return status_pb2.Status(error_code=_status.code, reason=_status.message) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) logger.info('CreateTable {}'.format(_table_schema['table_name'])) _status = self._create_table(_table_schema) - return status_pb2.Status(error_code=_status.code, reason=_status.message) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) def _has_table(self, table_name, metadata=None): return self.connection(metadata=metadata).has_table(table_name) @@ -200,20 +177,18 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _status, _table_name = Parser.parse_proto_TableName(request) if not _status.OK(): - return milvus_pb2.BoolReply( - status=status_pb2.Status(error_code=_status.code, reason=_status.message), - bool_reply=False - ) + return milvus_pb2.BoolReply(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), + bool_reply=False) logger.info('HasTable {}'.format(_table_name)) - _bool = self._has_table(_table_name, metadata={ - 'resp_class': milvus_pb2.BoolReply}) + _bool = self._has_table(_table_name, + metadata={'resp_class': milvus_pb2.BoolReply}) - return milvus_pb2.BoolReply( - status=status_pb2.Status(error_code=status_pb2.SUCCESS, reason="OK"), - bool_reply=_bool - ) + return milvus_pb2.BoolReply(status=status_pb2.Status( + error_code=status_pb2.SUCCESS, reason="OK"), + bool_reply=_bool) def _delete_table(self, table_name): return self.connection().delete_table(table_name) @@ -223,13 +198,15 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _status, _table_name = Parser.parse_proto_TableName(request) if not _status.OK(): - return status_pb2.Status(error_code=_status.code, reason=_status.message) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) logger.info('DropTable {}'.format(_table_name)) _status = self._delete_table(_table_name) - return status_pb2.Status(error_code=_status.code, reason=_status.message) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) def _create_index(self, table_name, index): return self.connection().create_index(table_name, index) @@ -239,7 +216,8 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _status, unpacks = Parser.parse_proto_IndexParam(request) if not _status.OK(): - return status_pb2.Status(error_code=_status.code, reason=_status.message) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) _table_name, _index = unpacks @@ -248,21 +226,22 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): # TODO: interface create_table incompleted _status = self._create_index(_table_name, _index) - return status_pb2.Status(error_code=_status.code, reason=_status.message) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) def _add_vectors(self, param, metadata=None): - return self.connection(metadata=metadata).add_vectors(None, None, insert_param=param) + return self.connection(metadata=metadata).add_vectors( + None, None, insert_param=param) @mark_grpc_method def Insert(self, request, context): logger.info('Insert') # TODO: Ths SDK interface add_vectors() could update, add a key 'row_id_array' - _status, _ids = self._add_vectors(metadata={ - 'resp_class': milvus_pb2.VectorIds}, param=request) - return milvus_pb2.VectorIds( - status=status_pb2.Status(error_code=_status.code, reason=_status.message), - vector_id_array=_ids - ) + _status, _ids = self._add_vectors( + metadata={'resp_class': milvus_pb2.VectorIds}, param=request) + return milvus_pb2.VectorIds(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), + vector_id_array=_ids) @mark_grpc_method def Search(self, request, context): @@ -272,22 +251,23 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): topk = request.topk nprobe = request.nprobe - logger.info('Search {}: topk={} nprobe={}'.format(table_name, topk, nprobe)) + logger.info('Search {}: topk={} nprobe={}'.format( + table_name, topk, nprobe)) - metadata = { - 'resp_class': milvus_pb2.TopKQueryResultList - } + metadata = {'resp_class': milvus_pb2.TopKQueryResultList} if nprobe > self.MAX_NPROBE or nprobe <= 0: - raise exceptions.InvalidArgumentError(message='Invalid nprobe: {}'.format(nprobe), - metadata=metadata) + raise exceptions.InvalidArgumentError( + message='Invalid nprobe: {}'.format(nprobe), metadata=metadata) table_meta = self.table_meta.get(table_name, None) if not table_meta: - status, info = self.connection(metadata=metadata).describe_table(table_name) + status, info = self.connection( + metadata=metadata).describe_table(table_name) if not status.OK(): - raise exceptions.TableNotFoundError(table_name, metadata=metadata) + raise exceptions.TableNotFoundError(table_name, + metadata=metadata) self.table_meta[table_name] = info table_meta = info @@ -304,16 +284,22 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): query_range_array.append( Range(query_range.start_value, query_range.end_value)) - status, results = self._do_query(context, table_name, table_meta, query_record_array, topk, - nprobe, query_range_array, metadata=metadata) + status, results = self._do_query(context, + table_name, + table_meta, + query_record_array, + topk, + nprobe, + query_range_array, + metadata=metadata) now = time.time() logger.info('SearchVector takes: {}'.format(now - start)) topk_result_list = milvus_pb2.TopKQueryResultList( - status=status_pb2.Status(error_code=status.error_code, reason=status.reason), - topk_query_result=results - ) + status=status_pb2.Status(error_code=status.error_code, + reason=status.reason), + topk_query_result=results) return topk_result_list @mark_grpc_method @@ -328,16 +314,14 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _status, _table_name = Parser.parse_proto_TableName(request) if not _status.OK(): - return milvus_pb2.TableSchema( - status=status_pb2.Status(error_code=_status.code, reason=_status.message), - ) + return milvus_pb2.TableSchema(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), ) - metadata = { - 'resp_class': milvus_pb2.TableSchema - } + metadata = {'resp_class': milvus_pb2.TableSchema} logger.info('DescribeTable {}'.format(_table_name)) - _status, _table = self._describe_table(metadata=metadata, table_name=_table_name) + _status, _table = self._describe_table(metadata=metadata, + table_name=_table_name) if _status.OK(): return milvus_pb2.TableSchema( @@ -345,37 +329,38 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): index_file_size=_table.index_file_size, dimension=_table.dimension, metric_type=_table.metric_type, - status=status_pb2.Status(error_code=_status.code, reason=_status.message), + status=status_pb2.Status(error_code=_status.code, + reason=_status.message), ) return milvus_pb2.TableSchema( table_name=_table_name, - status=status_pb2.Status(error_code=_status.code, reason=_status.message), + status=status_pb2.Status(error_code=_status.code, + reason=_status.message), ) def _count_table(self, table_name, metadata=None): - return self.connection(metadata=metadata).get_table_row_count(table_name) + return self.connection( + metadata=metadata).get_table_row_count(table_name) @mark_grpc_method def CountTable(self, request, context): _status, _table_name = Parser.parse_proto_TableName(request) if not _status.OK(): - status = status_pb2.Status(error_code=_status.code, reason=_status.message) + status = status_pb2.Status(error_code=_status.code, + reason=_status.message) - return milvus_pb2.TableRowCount( - status=status - ) + return milvus_pb2.TableRowCount(status=status) logger.info('CountTable {}'.format(_table_name)) - metadata = { - 'resp_class': milvus_pb2.TableRowCount - } + metadata = {'resp_class': milvus_pb2.TableRowCount} _status, _count = self._count_table(_table_name, metadata=metadata) return milvus_pb2.TableRowCount( - status=status_pb2.Status(error_code=_status.code, reason=_status.message), + status=status_pb2.Status(error_code=_status.code, + reason=_status.message), table_row_count=_count if isinstance(_count, int) else -1) def _get_server_version(self, metadata=None): @@ -387,23 +372,20 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): logger.info('Cmd: {}'.format(_cmd)) if not _status.OK(): - return milvus_pb2.StringReply( - status=status_pb2.Status(error_code=_status.code, reason=_status.message) - ) + return milvus_pb2.StringReply(status=status_pb2.Status( + error_code=_status.code, reason=_status.message)) - metadata = { - 'resp_class': milvus_pb2.StringReply - } + metadata = {'resp_class': milvus_pb2.StringReply} if _cmd == 'version': _status, _reply = self._get_server_version(metadata=metadata) else: - _status, _reply = self.connection(metadata=metadata).server_status() + _status, _reply = self.connection( + metadata=metadata).server_status() - return milvus_pb2.StringReply( - status=status_pb2.Status(error_code=_status.code, reason=_status.message), - string_reply=_reply - ) + return milvus_pb2.StringReply(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), + string_reply=_reply) def _show_tables(self, metadata=None): return self.connection(metadata=metadata).show_tables() @@ -411,18 +393,17 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): @mark_grpc_method def ShowTables(self, request, context): logger.info('ShowTables') - metadata = { - 'resp_class': milvus_pb2.TableName - } + metadata = {'resp_class': milvus_pb2.TableName} _status, _results = self._show_tables(metadata=metadata) - return milvus_pb2.TableNameList( - status=status_pb2.Status(error_code=_status.code, reason=_status.message), - table_names=_results - ) + return milvus_pb2.TableNameList(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), + table_names=_results) def _delete_by_range(self, table_name, start_date, end_date): - return self.connection().delete_vectors_by_range(table_name, start_date, end_date) + return self.connection().delete_vectors_by_range(table_name, + start_date, + end_date) @mark_grpc_method def DeleteByRange(self, request, context): @@ -430,13 +411,16 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): Parser.parse_proto_DeleteByRangeParam(request) if not _status.OK(): - return status_pb2.Status(error_code=_status.code, reason=_status.message) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) _table_name, _start_date, _end_date = unpacks - logger.info('DeleteByRange {}: {} {}'.format(_table_name, _start_date, _end_date)) + logger.info('DeleteByRange {}: {} {}'.format(_table_name, _start_date, + _end_date)) _status = self._delete_by_range(_table_name, _start_date, _end_date) - return status_pb2.Status(error_code=_status.code, reason=_status.message) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) def _preload_table(self, table_name): return self.connection().preload_table(table_name) @@ -446,11 +430,13 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _status, _table_name = Parser.parse_proto_TableName(request) if not _status.OK(): - return status_pb2.Status(error_code=_status.code, reason=_status.message) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) logger.info('PreloadTable {}'.format(_table_name)) _status = self._preload_table(_table_name) - return status_pb2.Status(error_code=_status.code, reason=_status.message) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) def _describe_index(self, table_name, metadata=None): return self.connection(metadata=metadata).describe_index(table_name) @@ -460,21 +446,22 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _status, _table_name = Parser.parse_proto_TableName(request) if not _status.OK(): - return milvus_pb2.IndexParam( - status=status_pb2.Status(error_code=_status.code, reason=_status.message) - ) + return milvus_pb2.IndexParam(status=status_pb2.Status( + error_code=_status.code, reason=_status.message)) - metadata = { - 'resp_class': milvus_pb2.IndexParam - } + metadata = {'resp_class': milvus_pb2.IndexParam} logger.info('DescribeIndex {}'.format(_table_name)) - _status, _index_param = self._describe_index(table_name=_table_name, metadata=metadata) + _status, _index_param = self._describe_index(table_name=_table_name, + metadata=metadata) - _index = milvus_pb2.Index(index_type=_index_param._index_type, nlist=_index_param._nlist) + _index = milvus_pb2.Index(index_type=_index_param._index_type, + nlist=_index_param._nlist) - return milvus_pb2.IndexParam(status=status_pb2.Status(error_code=_status.code, reason=_status.message), - table_name=_table_name, index=_index) + return milvus_pb2.IndexParam(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), + table_name=_table_name, + index=_index) def _drop_index(self, table_name): return self.connection().drop_index(table_name) @@ -484,8 +471,10 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _status, _table_name = Parser.parse_proto_TableName(request) if not _status.OK(): - return status_pb2.Status(error_code=_status.code, reason=_status.message) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) logger.info('DropIndex {}'.format(_table_name)) _status = self._drop_index(_table_name) - return status_pb2.Status(error_code=_status.code, reason=_status.message) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) diff --git a/mishards/settings.py b/mishards/settings.py index c9b62717d4..5e81a1a8ad 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -73,12 +73,14 @@ class DefaultConfig: SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_URI') SQL_ECHO = env.bool('SQL_ECHO', False) TRACING_TYPE = env.str('TRACING_TYPE', '') + ROUTER_CLASS_NAME = env.str('ROUTER_CLASS_NAME', 'FileBasedHashRingRouter') class TestingConfig(DefaultConfig): SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_TEST_URI') SQL_ECHO = env.bool('SQL_TEST_ECHO', False) TRACING_TYPE = env.str('TRACING_TEST_TYPE', '') + ROUTER_CLASS_NAME = env.str('ROUTER_CLASS_TEST_NAME', 'FileBasedHashRingRouter') if __name__ == '__main__': diff --git a/mishards/utilities.py b/mishards/utilities.py new file mode 100644 index 0000000000..c08d0d42df --- /dev/null +++ b/mishards/utilities.py @@ -0,0 +1,20 @@ +import datetime +from mishards import exceptions + + +def format_date(self, start, end): + return ((start.year - 1900) * 10000 + (start.month - 1) * 100 + start.day, + (end.year - 1900) * 10000 + (end.month - 1) * 100 + end.day) + + +def range_to_date(self, range_obj, metadata=None): + try: + start = datetime.datetime.strptime(range_obj.start_date, '%Y-%m-%d') + end = datetime.datetime.strptime(range_obj.end_date, '%Y-%m-%d') + assert start < end + except (ValueError, AssertionError): + raise exceptions.InvalidRangeError('Invalid time range: {} {}'.format( + range_obj.start_date, range_obj.end_date), + metadata=metadata) + + return self.format_date(start, end) From fb5e6ab3b809754fd425770fd5cf48a704135ad0 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 18 Oct 2019 13:46:09 +0800 Subject: [PATCH 071/307] refactor max workers in handler --- mishards/service_handler.py | 8 ++++---- mishards/settings.py | 1 - 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/mishards/service_handler.py b/mishards/service_handler.py index e26f2bfd74..669d96802a 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -3,6 +3,7 @@ import time import datetime from collections import defaultdict +import multiprocessing from concurrent.futures import ThreadPoolExecutor from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 from milvus.grpc_gen.milvus_pb2 import TopKQueryResult @@ -20,12 +21,13 @@ logger = logging.getLogger(__name__) class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): MAX_NPROBE = 2048 - def __init__(self, conn_mgr, tracer, router, *args, **kwargs): + def __init__(self, conn_mgr, tracer, router, max_workers=multiprocessing.cpu_count(), **kwargs): self.conn_mgr = conn_mgr self.table_meta = {} self.error_handlers = {} self.tracer = tracer self.router = router + self.max_workers = max_workers def connection(self, metadata=None): conn = self.conn_mgr.conn('WOSERVER', metadata=metadata) @@ -102,8 +104,6 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): rs = [] all_topk_results = [] - workers = settings.SEARCH_WORKER_SIZE - def search(addr, query_params, vectors, topk, nprobe, **kwargs): logger.info( 'Send Search Request: addr={};params={};nq={};topk={};nprobe={}' @@ -130,7 +130,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): all_topk_results.append(ret) with self.tracer.start_span('do_search', child_of=p_span) as span: - with ThreadPoolExecutor(max_workers=workers) as pool: + with ThreadPoolExecutor(max_workers=self.max_workers) as pool: for addr, params in routing.items(): res = pool.submit(search, addr, diff --git a/mishards/settings.py b/mishards/settings.py index 5e81a1a8ad..fd07d9d436 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -23,7 +23,6 @@ config(LOG_LEVEL, LOG_PATH, LOG_NAME, TIMEZONE) TIMEOUT = env.int('TIMEOUT', 60) MAX_RETRY = env.int('MAX_RETRY', 3) -SEARCH_WORKER_SIZE = env.int('SEARCH_WORKER_SIZE', 10) SERVER_PORT = env.int('SERVER_PORT', 19530) WOSERVER = env.str('WOSERVER') From bafa336410619817bb733c805f90ba3428c4cdf1 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 18 Oct 2019 13:55:22 +0800 Subject: [PATCH 072/307] change retry count logic --- mishards/connections.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mishards/connections.py b/mishards/connections.py index 7db271381c..915454711f 100644 --- a/mishards/connections.py +++ b/mishards/connections.py @@ -44,7 +44,7 @@ class Connection: if self.on_retry_func: self.on_retry_func(self) else: - logger.warning('{} is retrying {}'.format(self, self.retried)) + self.retried > 1 and logger.warning('{} is retrying {}'.format(self, self.retried)) def on_connect(self, metadata=None): while not self.connected and self.can_retry: From 3fb602c83fffea7dd39dd46cdd93a00b3ed98c32 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 18 Oct 2019 13:55:34 +0800 Subject: [PATCH 073/307] change log format --- utils/logger_helper.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/logger_helper.py b/utils/logger_helper.py index 55ce3206ab..b4e3b9c5b6 100644 --- a/utils/logger_helper.py +++ b/utils/logger_helper.py @@ -73,10 +73,10 @@ def config(log_level, log_path, name, tz='UTC'): 'disable_existing_loggers': False, 'formatters': { 'default': { - 'format': '[%(asctime)s-%(levelname)s-%(name)s]: %(message)s (%(filename)s:%(lineno)s)' + 'format': '%(asctime)s | %(levelname)s | %(name)s | %(threadName)s: %(message)s (%(filename)s:%(lineno)s)', }, 'colorful_console': { - 'format': '[%(asctime)s-%(levelname)s-%(name)s]: %(message)s (%(filename)s:%(lineno)s)', + 'format': '%(asctime)s | %(levelname)s | %(name)s | %(threadName)s: %(message)s (%(filename)s:%(lineno)s)', '()': ColorfulFormatter, }, }, From 4231328e0e75cdcc4cba55e2f340c09d40e5d34f Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 18 Oct 2019 13:57:57 +0800 Subject: [PATCH 074/307] smaill code changes for logging --- mishards/__init__.py | 1 - sd/kubernetes_provider.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/mishards/__init__.py b/mishards/__init__.py index 759e8c2e5a..7db3d8cb5e 100644 --- a/mishards/__init__.py +++ b/mishards/__init__.py @@ -12,7 +12,6 @@ grpc_server = Server() def create_app(testing_config=None): config = testing_config if testing_config else settings.DefaultConfig db.init_db(uri=config.SQLALCHEMY_DATABASE_URI, echo=config.SQL_ECHO) - logger.info(db) from mishards.connections import ConnectionMgr connect_mgr = ConnectionMgr() diff --git a/sd/kubernetes_provider.py b/sd/kubernetes_provider.py index 9a15b2fa78..ca593a3682 100644 --- a/sd/kubernetes_provider.py +++ b/sd/kubernetes_provider.py @@ -170,7 +170,7 @@ class EventHandler(threading.Thread): event['pod'])) return elif try_cnt <= 0 and not pod.status.pod_ip: - logger.warn('NoPodIPFoundError') + logger.warning('NoPodIPFoundError') return logger.info('Register POD {} with IP {}'.format( From 2b8a6f43debb99e904968fb13cc351b5d0b32dbd Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 18 Oct 2019 16:26:53 +0800 Subject: [PATCH 075/307] set test sql uri default value --- mishards/settings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mishards/settings.py b/mishards/settings.py index fd07d9d436..773c04f083 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -76,7 +76,7 @@ class DefaultConfig: class TestingConfig(DefaultConfig): - SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_TEST_URI') + SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_TEST_URI', '') SQL_ECHO = env.bool('SQL_TEST_ECHO', False) TRACING_TYPE = env.str('TRACING_TEST_TYPE', '') ROUTER_CLASS_NAME = env.str('ROUTER_CLASS_TEST_NAME', 'FileBasedHashRingRouter') From 9b2a9193908443f1a5c545cc01b5e5953e969383 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 18 Oct 2019 17:12:30 +0800 Subject: [PATCH 076/307] ignore pyc files --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 8919efeb01..60d9da8c38 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ .env .coverage +*.pyc cov_html/ __pycache__/ From c40b72df960b464756c62e52a9a18c89e3c3a40b Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 18 Oct 2019 17:12:58 +0800 Subject: [PATCH 077/307] change heartbeat log --- mishards/connections.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mishards/connections.py b/mishards/connections.py index 915454711f..618690a099 100644 --- a/mishards/connections.py +++ b/mishards/connections.py @@ -114,6 +114,7 @@ class ConnectionMgr: return rconn def on_new_meta(self, name, url): + logger.info('Register Connection: name={};url={}'.format(name, url)) self.metas[name] = url def on_duplicate_meta(self, name, url): @@ -139,7 +140,6 @@ class ConnectionMgr: logger.warning('Non-existed meta: {}'.format(name)) def register(self, name, url): - logger.info('Register Connection: name={};url={}'.format(name, url)) meta = self.metas.get(name) if not meta: return self.on_new_meta(name, url) From bdff52021d115facf1a6f4ce8c54759b370e1a60 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 18 Oct 2019 17:13:28 +0800 Subject: [PATCH 078/307] db session bug fix for multi-threading scenario --- mishards/db_base.py | 3 +++ mishards/routings.py | 2 ++ 2 files changed, 5 insertions(+) diff --git a/mishards/db_base.py b/mishards/db_base.py index 6fb3aef4e1..5f2eee9ba1 100644 --- a/mishards/db_base.py +++ b/mishards/db_base.py @@ -42,6 +42,9 @@ class DB: def Session(self): return self.session_factory() + def remove_session(self): + self.session_factory.remove() + def drop_all(self): self.Model.metadata.drop_all(self.engine) diff --git a/mishards/routings.py b/mishards/routings.py index a61352f40b..f04f3d2484 100644 --- a/mishards/routings.py +++ b/mishards/routings.py @@ -53,6 +53,7 @@ class FileBasedHashRingRouter(RouterMixin): def _route(self, table_name, range_array, metadata=None, **kwargs): # PXU TODO: Implement Thread-local Context + # PXU TODO: Session life mgt try: table = db.Session.query(Tables).filter( and_(Tables.table_id == table_name, @@ -63,6 +64,7 @@ class FileBasedHashRingRouter(RouterMixin): if not table: raise exceptions.TableNotFoundError(table_name, metadata=metadata) files = table.files_to_search(range_array) + db.remove_session() servers = self.conn_mgr.conn_names logger.info('Available servers: {}'.format(servers)) From 46210920818662372a22d184823dd0370cbf7f27 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 19 Oct 2019 11:21:53 +0800 Subject: [PATCH 079/307] remove conn_mgr from handler --- mishards/routings.py | 13 ++++++++++ mishards/server.py | 3 +-- mishards/service_handler.py | 48 +++++++++++++------------------------ 3 files changed, 31 insertions(+), 33 deletions(-) diff --git a/mishards/routings.py b/mishards/routings.py index f04f3d2484..823972726f 100644 --- a/mishards/routings.py +++ b/mishards/routings.py @@ -39,6 +39,19 @@ class RouterMixin: def routing(self, table_name, metadata=None, **kwargs): raise NotImplemented() + def connection(self, metadata=None): + conn = self.conn_mgr.conn('WOSERVER', metadata=metadata) + if conn: + conn.on_connect(metadata=metadata) + return conn.conn + + def query_conn(self, name, metadata=None): + conn = self.conn_mgr.conn(name, metadata=metadata) + if not conn: + raise exceptions.ConnectionNotFoundError(name, metadata=metadata) + conn.on_connect(metadata=metadata) + return conn.conn + @RouteManager.register_router_class class FileBasedHashRingRouter(RouterMixin): diff --git a/mishards/server.py b/mishards/server.py index 20be8f1746..6eb0e92582 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -88,8 +88,7 @@ class Server: def start(self, port=None): handler_class = self.decorate_handler(ServiceHandler) add_MilvusServiceServicer_to_server( - handler_class(conn_mgr=self.conn_mgr, - tracer=self.tracer, + handler_class(tracer=self.tracer, router=self.router), self.server_impl) self.server_impl.add_insecure_port("[::]:{}".format( str(port or self._port))) diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 669d96802a..04e74415a1 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -21,27 +21,13 @@ logger = logging.getLogger(__name__) class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): MAX_NPROBE = 2048 - def __init__(self, conn_mgr, tracer, router, max_workers=multiprocessing.cpu_count(), **kwargs): - self.conn_mgr = conn_mgr + def __init__(self, tracer, router, max_workers=multiprocessing.cpu_count(), **kwargs): self.table_meta = {} self.error_handlers = {} self.tracer = tracer self.router = router self.max_workers = max_workers - def connection(self, metadata=None): - conn = self.conn_mgr.conn('WOSERVER', metadata=metadata) - if conn: - conn.on_connect(metadata=metadata) - return conn.conn - - def query_conn(self, name, metadata=None): - conn = self.conn_mgr.conn(name, metadata=metadata) - if not conn: - raise exceptions.ConnectionNotFoundError(name, metadata=metadata) - conn.on_connect(metadata=metadata) - return conn.conn - def _do_merge(self, files_n_topk_results, topk, reverse=False, **kwargs): status = status_pb2.Status(error_code=status_pb2.SUCCESS, reason="Success") @@ -109,7 +95,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): 'Send Search Request: addr={};params={};nq={};topk={};nprobe={}' .format(addr, query_params, len(vectors), topk, nprobe)) - conn = self.query_conn(addr, metadata=metadata) + conn = self.router.query_conn(addr, metadata=metadata) start = time.time() span = kwargs.get('span', None) span = span if span else (None if self.tracer.empty else @@ -152,7 +138,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): metadata=metadata) def _create_table(self, table_schema): - return self.connection().create_table(table_schema) + return self.router.connection().create_table(table_schema) @mark_grpc_method def CreateTable(self, request, context): @@ -170,7 +156,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): reason=_status.message) def _has_table(self, table_name, metadata=None): - return self.connection(metadata=metadata).has_table(table_name) + return self.router.connection(metadata=metadata).has_table(table_name) @mark_grpc_method def HasTable(self, request, context): @@ -191,7 +177,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): bool_reply=_bool) def _delete_table(self, table_name): - return self.connection().delete_table(table_name) + return self.router.connection().delete_table(table_name) @mark_grpc_method def DropTable(self, request, context): @@ -209,7 +195,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): reason=_status.message) def _create_index(self, table_name, index): - return self.connection().create_index(table_name, index) + return self.router.connection().create_index(table_name, index) @mark_grpc_method def CreateIndex(self, request, context): @@ -230,7 +216,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): reason=_status.message) def _add_vectors(self, param, metadata=None): - return self.connection(metadata=metadata).add_vectors( + return self.router.connection(metadata=metadata).add_vectors( None, None, insert_param=param) @mark_grpc_method @@ -263,7 +249,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): table_meta = self.table_meta.get(table_name, None) if not table_meta: - status, info = self.connection( + status, info = self.router.connection( metadata=metadata).describe_table(table_name) if not status.OK(): raise exceptions.TableNotFoundError(table_name, @@ -307,7 +293,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): raise NotImplemented() def _describe_table(self, table_name, metadata=None): - return self.connection(metadata=metadata).describe_table(table_name) + return self.router.connection(metadata=metadata).describe_table(table_name) @mark_grpc_method def DescribeTable(self, request, context): @@ -340,7 +326,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): ) def _count_table(self, table_name, metadata=None): - return self.connection( + return self.router.connection( metadata=metadata).get_table_row_count(table_name) @mark_grpc_method @@ -364,7 +350,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): table_row_count=_count if isinstance(_count, int) else -1) def _get_server_version(self, metadata=None): - return self.connection(metadata=metadata).server_version() + return self.router.connection(metadata=metadata).server_version() @mark_grpc_method def Cmd(self, request, context): @@ -380,7 +366,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): if _cmd == 'version': _status, _reply = self._get_server_version(metadata=metadata) else: - _status, _reply = self.connection( + _status, _reply = self.router.connection( metadata=metadata).server_status() return milvus_pb2.StringReply(status=status_pb2.Status( @@ -388,7 +374,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): string_reply=_reply) def _show_tables(self, metadata=None): - return self.connection(metadata=metadata).show_tables() + return self.router.connection(metadata=metadata).show_tables() @mark_grpc_method def ShowTables(self, request, context): @@ -401,7 +387,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): table_names=_results) def _delete_by_range(self, table_name, start_date, end_date): - return self.connection().delete_vectors_by_range(table_name, + return self.router.connection().delete_vectors_by_range(table_name, start_date, end_date) @@ -423,7 +409,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): reason=_status.message) def _preload_table(self, table_name): - return self.connection().preload_table(table_name) + return self.router.connection().preload_table(table_name) @mark_grpc_method def PreloadTable(self, request, context): @@ -439,7 +425,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): reason=_status.message) def _describe_index(self, table_name, metadata=None): - return self.connection(metadata=metadata).describe_index(table_name) + return self.router.connection(metadata=metadata).describe_index(table_name) @mark_grpc_method def DescribeIndex(self, request, context): @@ -464,7 +450,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): index=_index) def _drop_index(self, table_name): - return self.connection().drop_index(table_name) + return self.router.connection().drop_index(table_name) @mark_grpc_method def DropIndex(self, request, context): From 43bc2cc60c8b1c5428cb990f7300c91f81a63ead Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 19 Oct 2019 11:21:53 +0800 Subject: [PATCH 080/307] remove conn_mgr from handler remove conn_mgr from handler --- mishards/service_handler.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 04e74415a1..485aa8b211 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -388,8 +388,8 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): def _delete_by_range(self, table_name, start_date, end_date): return self.router.connection().delete_vectors_by_range(table_name, - start_date, - end_date) + start_date, + end_date) @mark_grpc_method def DeleteByRange(self, request, context): From 3ddd181dd2225c1166d3989249d984ae7677538a Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 19 Oct 2019 14:02:48 +0800 Subject: [PATCH 081/307] update for better test --- conftest.py | 2 +- mishards/server.py | 2 +- mishards/settings.py | 7 +++++-- sd/kubernetes_provider.py | 6 ++++-- sd/static_provider.py | 6 ++++-- 5 files changed, 15 insertions(+), 8 deletions(-) diff --git a/conftest.py b/conftest.py index ebe8276cea..34e22af693 100644 --- a/conftest.py +++ b/conftest.py @@ -20,7 +20,7 @@ def app(request): @pytest.fixture def started_app(app): app.on_pre_run() - app.start(app.port) + app.start(settings.SERVER_TEST_PORT) yield app diff --git a/mishards/server.py b/mishards/server.py index 6eb0e92582..599a00e455 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -91,7 +91,7 @@ class Server: handler_class(tracer=self.tracer, router=self.router), self.server_impl) self.server_impl.add_insecure_port("[::]:{}".format( - str(port or self._port))) + str(port or self.port))) self.server_impl.start() def run(self, port): diff --git a/mishards/settings.py b/mishards/settings.py index 773c04f083..21a3bb7a65 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -25,6 +25,7 @@ TIMEOUT = env.int('TIMEOUT', 60) MAX_RETRY = env.int('MAX_RETRY', 3) SERVER_PORT = env.int('SERVER_PORT', 19530) +SERVER_TEST_PORT = env.int('SERVER_TEST_PORT', 19530) WOSERVER = env.str('WOSERVER') SD_PROVIDER_SETTINGS = None @@ -36,11 +37,13 @@ if SD_PROVIDER == 'Kubernetes': in_cluster=env.bool('SD_IN_CLUSTER', False), poll_interval=env.int('SD_POLL_INTERVAL', 5), pod_patt=env.str('SD_ROSERVER_POD_PATT', ''), - label_selector=env.str('SD_LABEL_SELECTOR', '')) + label_selector=env.str('SD_LABEL_SELECTOR', ''), + port=env.int('SD_PORT', 19530)) elif SD_PROVIDER == 'Static': from sd.static_provider import StaticProviderSettings SD_PROVIDER_SETTINGS = StaticProviderSettings( - hosts=env.list('SD_STATIC_HOSTS', [])) + hosts=env.list('SD_STATIC_HOSTS', []), + port=env.int('SD_STATIC_PORT', 19530)) # TESTING_WOSERVER = env.str('TESTING_WOSERVER', 'tcp://127.0.0.1:19530') diff --git a/sd/kubernetes_provider.py b/sd/kubernetes_provider.py index ca593a3682..eb113db007 100644 --- a/sd/kubernetes_provider.py +++ b/sd/kubernetes_provider.py @@ -226,12 +226,13 @@ class EventHandler(threading.Thread): class KubernetesProviderSettings: def __init__(self, namespace, pod_patt, label_selector, in_cluster, - poll_interval, **kwargs): + poll_interval, port=None, **kwargs): self.namespace = namespace self.pod_patt = pod_patt self.label_selector = label_selector self.in_cluster = in_cluster self.poll_interval = poll_interval + self.port = int(port) if port else 19530 @singleton @@ -245,6 +246,7 @@ class KubernetesProvider(object): self.label_selector = settings.label_selector self.in_cluster = settings.in_cluster self.poll_interval = settings.poll_interval + self.port = settings.port self.kwargs = kwargs self.queue = queue.Queue() @@ -279,7 +281,7 @@ class KubernetesProvider(object): **kwargs) def add_pod(self, name, ip): - self.conn_mgr.register(name, 'tcp://{}:19530'.format(ip)) + self.conn_mgr.register(name, 'tcp://{}:{}'.format(ip, self.port)) def delete_pod(self, name): self.conn_mgr.unregister(name) diff --git a/sd/static_provider.py b/sd/static_provider.py index 5c97c4efd0..e88780740f 100644 --- a/sd/static_provider.py +++ b/sd/static_provider.py @@ -9,8 +9,9 @@ from sd import ProviderManager class StaticProviderSettings: - def __init__(self, hosts): + def __init__(self, hosts, port=None): self.hosts = hosts + self.port = int(port) if port else 19530 @singleton @@ -21,6 +22,7 @@ class KubernetesProvider(object): def __init__(self, settings, conn_mgr, **kwargs): self.conn_mgr = conn_mgr self.hosts = [socket.gethostbyname(host) for host in settings.hosts] + self.port = settings.port def start(self): for host in self.hosts: @@ -31,7 +33,7 @@ class KubernetesProvider(object): self.delete_pod(host) def add_pod(self, name, ip): - self.conn_mgr.register(name, 'tcp://{}:19530'.format(ip)) + self.conn_mgr.register(name, 'tcp://{}:{}'.format(ip, self.port)) def delete_pod(self, name): self.conn_mgr.unregister(name) From 9dc45d650c713caa8876b7693d526e66922db629 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 19 Oct 2019 14:03:06 +0800 Subject: [PATCH 082/307] update test_server --- mishards/test_server.py | 70 ++++++++++++++++++++--------------------- 1 file changed, 34 insertions(+), 36 deletions(-) diff --git a/mishards/test_server.py b/mishards/test_server.py index a2677847da..2f24a1167b 100644 --- a/mishards/test_server.py +++ b/mishards/test_server.py @@ -13,6 +13,7 @@ from mishards import db, create_app, settings from mishards.service_handler import ServiceHandler from mishards.grpc_utils.grpc_args_parser import GrpcArgsParser as Parser from mishards.factories import TableFilesFactory, TablesFactory, TableFiles, Tables +from mishards.routings import RouterMixin logger = logging.getLogger(__name__) @@ -22,9 +23,10 @@ BAD = Status(code=Status.PERMISSION_DENIED, message='Fail') @pytest.mark.usefixtures('started_app') class TestServer: - def client(self, port): + @property + def client(self): m = Milvus() - m.connect(host='localhost', port=port) + m.connect(host='localhost', port=settings.SERVER_TEST_PORT) return m def test_server_start(self, started_app): @@ -33,22 +35,22 @@ class TestServer: def test_cmd(self, started_app): ServiceHandler._get_server_version = mock.MagicMock(return_value=(OK, '')) - status, _ = self.client(started_app.port).server_version() + status, _ = self.client.server_version() assert status.OK() Parser.parse_proto_Command = mock.MagicMock(return_value=(BAD, 'cmd')) - status, _ = self.client(started_app.port).server_version() + status, _ = self.client.server_version() assert not status.OK() def test_drop_index(self, started_app): table_name = inspect.currentframe().f_code.co_name ServiceHandler._drop_index = mock.MagicMock(return_value=OK) - status = self.client(started_app.port).drop_index(table_name) + status = self.client.drop_index(table_name) assert status.OK() Parser.parse_proto_TableName = mock.MagicMock( return_value=(BAD, table_name)) - status = self.client(started_app.port).drop_index(table_name) + status = self.client.drop_index(table_name) assert not status.OK() def test_describe_index(self, started_app): @@ -62,13 +64,13 @@ class TestServer: return_value=(OK, table_name)) ServiceHandler._describe_index = mock.MagicMock( return_value=(OK, index_param)) - status, ret = self.client(started_app.port).describe_index(table_name) + status, ret = self.client.describe_index(table_name) assert status.OK() assert ret._table_name == index_param._table_name Parser.parse_proto_TableName = mock.MagicMock( return_value=(BAD, table_name)) - status, _ = self.client(started_app.port).describe_index(table_name) + status, _ = self.client.describe_index(table_name) assert not status.OK() def test_preload(self, started_app): @@ -77,12 +79,12 @@ class TestServer: Parser.parse_proto_TableName = mock.MagicMock( return_value=(OK, table_name)) ServiceHandler._preload_table = mock.MagicMock(return_value=OK) - status = self.client(started_app.port).preload_table(table_name) + status = self.client.preload_table(table_name) assert status.OK() Parser.parse_proto_TableName = mock.MagicMock( return_value=(BAD, table_name)) - status = self.client(started_app.port).preload_table(table_name) + status = self.client.preload_table(table_name) assert not status.OK() def test_delete_by_range(self, started_app): @@ -94,13 +96,13 @@ class TestServer: Parser.parse_proto_DeleteByRangeParam = mock.MagicMock( return_value=(OK, unpacked)) ServiceHandler._delete_by_range = mock.MagicMock(return_value=OK) - status = self.client(started_app.port).delete_vectors_by_range( + status = self.client.delete_vectors_by_range( *unpacked) assert status.OK() Parser.parse_proto_DeleteByRangeParam = mock.MagicMock( return_value=(BAD, unpacked)) - status = self.client(started_app.port).delete_vectors_by_range( + status = self.client.delete_vectors_by_range( *unpacked) assert not status.OK() @@ -111,21 +113,19 @@ class TestServer: Parser.parse_proto_TableName = mock.MagicMock( return_value=(OK, table_name)) ServiceHandler._count_table = mock.MagicMock(return_value=(OK, count)) - status, ret = self.client( - started_app.port).get_table_row_count(table_name) + status, ret = self.client.get_table_row_count(table_name) assert status.OK() assert ret == count Parser.parse_proto_TableName = mock.MagicMock( return_value=(BAD, table_name)) - status, _ = self.client( - started_app.port).get_table_row_count(table_name) + status, _ = self.client.get_table_row_count(table_name) assert not status.OK() def test_show_tables(self, started_app): tables = ['t1', 't2'] ServiceHandler._show_tables = mock.MagicMock(return_value=(OK, tables)) - status, ret = self.client(started_app.port).show_tables() + status, ret = self.client.show_tables() assert status.OK() assert ret == tables @@ -141,17 +141,17 @@ class TestServer: return_value=(OK, table_schema.table_name)) ServiceHandler._describe_table = mock.MagicMock( return_value=(OK, table_schema)) - status, _ = self.client(started_app.port).describe_table(table_name) + status, _ = self.client.describe_table(table_name) assert status.OK() ServiceHandler._describe_table = mock.MagicMock( return_value=(BAD, table_schema)) - status, _ = self.client(started_app.port).describe_table(table_name) + status, _ = self.client.describe_table(table_name) assert not status.OK() Parser.parse_proto_TableName = mock.MagicMock(return_value=(BAD, 'cmd')) - status, ret = self.client(started_app.port).describe_table(table_name) + status, ret = self.client.describe_table(table_name) assert not status.OK() def test_insert(self, started_app): @@ -159,7 +159,7 @@ class TestServer: vectors = [[random.random() for _ in range(16)] for _ in range(10)] ids = [random.randint(1000000, 20000000) for _ in range(10)] ServiceHandler._add_vectors = mock.MagicMock(return_value=(OK, ids)) - status, ret = self.client(started_app.port).add_vectors( + status, ret = self.client.add_vectors( table_name=table_name, records=vectors) assert status.OK() assert ids == ret @@ -170,14 +170,12 @@ class TestServer: Parser.parse_proto_IndexParam = mock.MagicMock(return_value=(OK, unpacks)) ServiceHandler._create_index = mock.MagicMock(return_value=OK) - status = self.client( - started_app.port).create_index(table_name=table_name) + status = self.client.create_index(table_name=table_name) assert status.OK() Parser.parse_proto_IndexParam = mock.MagicMock(return_value=(BAD, None)) - status = self.client( - started_app.port).create_index(table_name=table_name) + status = self.client.create_index(table_name=table_name) assert not status.OK() def test_drop_table(self, started_app): @@ -186,14 +184,12 @@ class TestServer: Parser.parse_proto_TableName = mock.MagicMock( return_value=(OK, table_name)) ServiceHandler._delete_table = mock.MagicMock(return_value=OK) - status = self.client( - started_app.port).delete_table(table_name=table_name) + status = self.client.delete_table(table_name=table_name) assert status.OK() Parser.parse_proto_TableName = mock.MagicMock( return_value=(BAD, table_name)) - status = self.client( - started_app.port).delete_table(table_name=table_name) + status = self.client.delete_table(table_name=table_name) assert not status.OK() def test_has_table(self, started_app): @@ -202,12 +198,12 @@ class TestServer: Parser.parse_proto_TableName = mock.MagicMock( return_value=(OK, table_name)) ServiceHandler._has_table = mock.MagicMock(return_value=True) - has = self.client(started_app.port).has_table(table_name=table_name) + has = self.client.has_table(table_name=table_name) assert has Parser.parse_proto_TableName = mock.MagicMock( return_value=(BAD, table_name)) - has = self.client(started_app.port).has_table(table_name=table_name) + has = self.client.has_table(table_name=table_name) assert not has def test_create_table(self, started_app): @@ -219,12 +215,12 @@ class TestServer: dimension=dimension) ServiceHandler._create_table = mock.MagicMock(return_value=OK) - status = self.client(started_app.port).create_table(table_schema) + status = self.client.create_table(table_schema) assert status.OK() Parser.parse_proto_TableSchema = mock.MagicMock(return_value=(BAD, None)) - status = self.client(started_app.port).create_table(table_schema) + status = self.client.create_table(table_schema) assert not status.OK() def random_data(self, n, dimension): @@ -261,19 +257,21 @@ class TestServer: metric_type=table.metric_type, dimension=table.dimension) - status, _ = self.client(started_app.port).search_vectors(**param) + status, _ = self.client.search_vectors(**param) assert status.code == Status.ILLEGAL_ARGUMENT param['nprobe'] = 2048 + RouterMixin.connection = mock.MagicMock(return_value=Milvus()) + RouterMixin.query_conn = mock.MagicMock(return_value=Milvus()) Milvus.describe_table = mock.MagicMock(return_value=(BAD, table_schema)) - status, ret = self.client(started_app.port).search_vectors(**param) + status, ret = self.client.search_vectors(**param) assert status.code == Status.TABLE_NOT_EXISTS Milvus.describe_table = mock.MagicMock(return_value=(OK, table_schema)) Milvus.search_vectors_in_files = mock.MagicMock( return_value=mock_results) - status, ret = self.client(started_app.port).search_vectors(**param) + status, ret = self.client.search_vectors(**param) assert status.OK() assert len(ret) == nq From 4efa4506a99e044cd6a3d39e7713f1ef78fc4877 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 19 Oct 2019 14:06:35 +0800 Subject: [PATCH 083/307] update .env.example --- mishards/.env.example | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/mishards/.env.example b/mishards/.env.example index bfea0a3edc..0a23c0cf56 100644 --- a/mishards/.env.example +++ b/mishards/.env.example @@ -1,8 +1,8 @@ DEBUG=True WOSERVER=tcp://127.0.0.1:19530 -TESTING_WOSERVER=tcp://127.0.0.1:19530 SERVER_PORT=19532 +SERVER_TEST_PORT=19888 SD_PROVIDER=Static @@ -13,16 +13,17 @@ SD_ROSERVER_POD_PATT=.*-ro-servers-.* SD_LABEL_SELECTOR=tier=ro-servers SD_STATIC_HOSTS=127.0.0.1 +SD_STATIC_PORT=19530 #SQLALCHEMY_DATABASE_URI=mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4 SQLALCHEMY_DATABASE_URI=sqlite:////tmp/milvus/db/meta.sqlite?check_same_thread=False SQL_ECHO=True -TESTING=False #SQLALCHEMY_DATABASE_TEST_URI=mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4 SQLALCHEMY_DATABASE_TEST_URI=sqlite:////tmp/milvus/db/meta.sqlite?check_same_thread=False SQL_TEST_ECHO=False +# TRACING_TEST_TYPE=jaeger TRACING_TYPE=jaeger TRACING_SERVICE_NAME=fortest TRACING_SAMPLER_TYPE=const From a27eef278b538ed21010a0719885c49c7ec597e2 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 21 Oct 2019 11:42:54 +0800 Subject: [PATCH 084/307] update for new sdk --- mishards/service_handler.py | 4 ++-- mishards/test_server.py | 6 ++++-- requirements.txt | 2 +- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 485aa8b211..4519afbaa0 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -7,7 +7,7 @@ import multiprocessing from concurrent.futures import ThreadPoolExecutor from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 from milvus.grpc_gen.milvus_pb2 import TopKQueryResult -from milvus.client.Abstract import Range +from milvus.client.abstract import Range from milvus.client import types as Types from mishards import (db, settings, exceptions) @@ -109,7 +109,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): query_records=vectors, top_k=topk, nprobe=nprobe, - lazy=True) + lazy_=True) end = time.time() logger.info('search_vectors_in_files takes: {}'.format(end - start)) diff --git a/mishards/test_server.py b/mishards/test_server.py index 2f24a1167b..a7fec615c9 100644 --- a/mishards/test_server.py +++ b/mishards/test_server.py @@ -7,7 +7,7 @@ import faker import inspect from milvus import Milvus from milvus.client.types import Status, IndexType, MetricType -from milvus.client.Abstract import IndexParam, TableSchema +from milvus.client.abstract import IndexParam, TableSchema from milvus.grpc_gen import status_pb2, milvus_pb2 from mishards import db, create_app, settings from mishards.service_handler import ServiceHandler @@ -87,6 +87,7 @@ class TestServer: status = self.client.preload_table(table_name) assert not status.OK() + @pytest.mark.skip def test_delete_by_range(self, started_app): table_name = inspect.currentframe().f_code.co_name @@ -203,7 +204,8 @@ class TestServer: Parser.parse_proto_TableName = mock.MagicMock( return_value=(BAD, table_name)) - has = self.client.has_table(table_name=table_name) + status, has = self.client.has_table(table_name=table_name) + assert not status.OK() assert not has def test_create_table(self, started_app): diff --git a/requirements.txt b/requirements.txt index 133cfac8ab..ae224e92ed 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,7 +14,7 @@ py==1.8.0 pyasn1==0.4.7 pyasn1-modules==0.2.6 pylint==2.3.1 -pymilvus-test==0.2.21 +pymilvus-test==0.2.28 #pymilvus==0.2.0 pyparsing==2.4.0 pytest==4.6.3 From 703371efa379c9eba1c0c36004db25e7e9b22521 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 21 Oct 2019 12:08:45 +0800 Subject: [PATCH 085/307] check return index param in DescribeIndex --- mishards/service_handler.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 4519afbaa0..0c6b41ece6 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -441,6 +441,10 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _status, _index_param = self._describe_index(table_name=_table_name, metadata=metadata) + if not _index_param: + return milvus_pb2.IndexParam(status=status_pb2.Status( + error_code=_status.code, reason=_status.message)) + _index = milvus_pb2.Index(index_type=_index_param._index_type, nlist=_index_param._nlist) From 26b3adfcc37d4b0e18b953786d47f9fcb39c89a3 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 21 Oct 2019 14:34:12 +0800 Subject: [PATCH 086/307] update for new sdk changes --- mishards/service_handler.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 0c6b41ece6..44e1d8cf7b 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -169,11 +169,11 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): logger.info('HasTable {}'.format(_table_name)) - _bool = self._has_table(_table_name, + _status, _bool = self._has_table(_table_name, metadata={'resp_class': milvus_pb2.BoolReply}) return milvus_pb2.BoolReply(status=status_pb2.Status( - error_code=status_pb2.SUCCESS, reason="OK"), + error_code=_status.code, reason=_status.message), bool_reply=_bool) def _delete_table(self, table_name): From c4a5c5c69b5f2bb4d8b7f016e230a74d5ddfd2d5 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 21 Oct 2019 14:58:39 +0800 Subject: [PATCH 087/307] bug fix for time range and topk check in search --- mishards/exception_codes.py | 1 + mishards/exception_handlers.py | 6 ++++++ mishards/exceptions.py | 4 ++++ mishards/service_handler.py | 5 +++++ mishards/utilities.py | 6 +++--- 5 files changed, 19 insertions(+), 3 deletions(-) diff --git a/mishards/exception_codes.py b/mishards/exception_codes.py index ecb2469562..bdd4572dd5 100644 --- a/mishards/exception_codes.py +++ b/mishards/exception_codes.py @@ -7,3 +7,4 @@ DB_ERROR_CODE = 10003 TABLE_NOT_FOUND_CODE = 20001 INVALID_ARGUMENT_CODE = 20002 INVALID_DATE_RANGE_CODE = 20003 +INVALID_TOPK_CODE = 20004 diff --git a/mishards/exception_handlers.py b/mishards/exception_handlers.py index 1e5ffb3529..c79a6db5a3 100644 --- a/mishards/exception_handlers.py +++ b/mishards/exception_handlers.py @@ -58,6 +58,12 @@ def TableNotFoundErrorHandler(err): return resp_handler(err, status_pb2.TABLE_NOT_EXISTS) +@server.errorhandler(exceptions.InvalidTopKError) +def InvalidTopKErrorHandler(err): + logger.error(err) + return resp_handler(err, status_pb2.ILLEGAL_TOPK) + + @server.errorhandler(exceptions.InvalidArgumentError) def InvalidArgumentErrorHandler(err): logger.error(err) diff --git a/mishards/exceptions.py b/mishards/exceptions.py index acd9372d6a..72839f88d2 100644 --- a/mishards/exceptions.py +++ b/mishards/exceptions.py @@ -26,6 +26,10 @@ class TableNotFoundError(BaseException): code = codes.TABLE_NOT_FOUND_CODE +class InvalidTopKError(BaseException): + code = codes.INVALID_TOPK_CODE + + class InvalidArgumentError(BaseException): code = codes.INVALID_ARGUMENT_CODE diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 44e1d8cf7b..5e91c14f14 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -20,6 +20,7 @@ logger = logging.getLogger(__name__) class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): MAX_NPROBE = 2048 + MAX_TOPK = 2048 def __init__(self, tracer, router, max_workers=multiprocessing.cpu_count(), **kwargs): self.table_meta = {} @@ -246,6 +247,10 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): raise exceptions.InvalidArgumentError( message='Invalid nprobe: {}'.format(nprobe), metadata=metadata) + if topk > self.MAX_TOPK or topk <= 0: + raise exceptions.InvalidTopKError( + message='Invalid topk: {}'.format(topk), metadata=metadata) + table_meta = self.table_meta.get(table_name, None) if not table_meta: diff --git a/mishards/utilities.py b/mishards/utilities.py index c08d0d42df..42e982b5f1 100644 --- a/mishards/utilities.py +++ b/mishards/utilities.py @@ -2,12 +2,12 @@ import datetime from mishards import exceptions -def format_date(self, start, end): +def format_date(start, end): return ((start.year - 1900) * 10000 + (start.month - 1) * 100 + start.day, (end.year - 1900) * 10000 + (end.month - 1) * 100 + end.day) -def range_to_date(self, range_obj, metadata=None): +def range_to_date(range_obj, metadata=None): try: start = datetime.datetime.strptime(range_obj.start_date, '%Y-%m-%d') end = datetime.datetime.strptime(range_obj.end_date, '%Y-%m-%d') @@ -17,4 +17,4 @@ def range_to_date(self, range_obj, metadata=None): range_obj.start_date, range_obj.end_date), metadata=metadata) - return self.format_date(start, end) + return format_date(start, end) From e47f3ec28a89715745be8949c160e81f416fcd9f Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 21 Oct 2019 15:06:58 +0800 Subject: [PATCH 088/307] update to latest image --- start_services.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/start_services.yml b/start_services.yml index c7a3c36f51..57fe061bb7 100644 --- a/start_services.yml +++ b/start_services.yml @@ -21,7 +21,7 @@ services: mishards: restart: always - image: registry.zilliz.com/milvus/mishards:v0.0.3 + image: registry.zilliz.com/milvus/mishards:v0.0.4 ports: - "0.0.0.0:19530:19531" - "0.0.0.0:19532:19532" From 7b0a731e047b571c1154ca0dba37f8be8f867c8d Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 21 Oct 2019 15:45:25 +0800 Subject: [PATCH 089/307] fix bug in test_server --- mishards/test_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mishards/test_server.py b/mishards/test_server.py index a7fec615c9..efd3912076 100644 --- a/mishards/test_server.py +++ b/mishards/test_server.py @@ -198,7 +198,7 @@ class TestServer: Parser.parse_proto_TableName = mock.MagicMock( return_value=(OK, table_name)) - ServiceHandler._has_table = mock.MagicMock(return_value=True) + ServiceHandler._has_table = mock.MagicMock(return_value=(OK, True)) has = self.client.has_table(table_name=table_name) assert has From 9a4c732563323cd8814a11a5eda8891745e264ba Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 21 Oct 2019 16:20:29 +0800 Subject: [PATCH 090/307] fix bug in test_server --- Dockerfile | 10 - build.sh | 39 -- conftest.py | 27 -- manager.py | 28 -- mishards/.env.example | 33 -- mishards/__init__.py | 36 -- mishards/connections.py | 154 -------- mishards/db_base.py | 52 --- mishards/exception_codes.py | 10 - mishards/exception_handlers.py | 82 ---- mishards/exceptions.py | 38 -- mishards/factories.py | 54 --- mishards/grpc_utils/__init__.py | 37 -- mishards/grpc_utils/grpc_args_parser.py | 102 ----- mishards/grpc_utils/grpc_args_wrapper.py | 4 - mishards/grpc_utils/test_grpc.py | 75 ---- mishards/hash_ring.py | 150 ------- mishards/main.py | 15 - mishards/models.py | 76 ---- mishards/routings.py | 96 ----- mishards/server.py | 122 ------ mishards/service_handler.py | 475 ----------------------- mishards/settings.py | 94 ----- mishards/test_connections.py | 101 ----- mishards/test_models.py | 39 -- mishards/test_server.py | 279 ------------- mishards/utilities.py | 20 - requirements.txt | 36 -- sd/__init__.py | 28 -- sd/kubernetes_provider.py | 331 ---------------- sd/static_provider.py | 39 -- setup.cfg | 4 - start_services.yml | 45 --- tracing/__init__.py | 43 -- tracing/factory.py | 40 -- utils/__init__.py | 11 - utils/logger_helper.py | 152 -------- 37 files changed, 2977 deletions(-) delete mode 100644 Dockerfile delete mode 100755 build.sh delete mode 100644 conftest.py delete mode 100644 manager.py delete mode 100644 mishards/.env.example delete mode 100644 mishards/__init__.py delete mode 100644 mishards/connections.py delete mode 100644 mishards/db_base.py delete mode 100644 mishards/exception_codes.py delete mode 100644 mishards/exception_handlers.py delete mode 100644 mishards/exceptions.py delete mode 100644 mishards/factories.py delete mode 100644 mishards/grpc_utils/__init__.py delete mode 100644 mishards/grpc_utils/grpc_args_parser.py delete mode 100644 mishards/grpc_utils/grpc_args_wrapper.py delete mode 100644 mishards/grpc_utils/test_grpc.py delete mode 100644 mishards/hash_ring.py delete mode 100644 mishards/main.py delete mode 100644 mishards/models.py delete mode 100644 mishards/routings.py delete mode 100644 mishards/server.py delete mode 100644 mishards/service_handler.py delete mode 100644 mishards/settings.py delete mode 100644 mishards/test_connections.py delete mode 100644 mishards/test_models.py delete mode 100644 mishards/test_server.py delete mode 100644 mishards/utilities.py delete mode 100644 requirements.txt delete mode 100644 sd/__init__.py delete mode 100644 sd/kubernetes_provider.py delete mode 100644 sd/static_provider.py delete mode 100644 setup.cfg delete mode 100644 start_services.yml delete mode 100644 tracing/__init__.py delete mode 100644 tracing/factory.py delete mode 100644 utils/__init__.py delete mode 100644 utils/logger_helper.py diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 594640619e..0000000000 --- a/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM python:3.6 -RUN apt update && apt install -y \ - less \ - telnet -RUN mkdir /source -WORKDIR /source -ADD ./requirements.txt ./ -RUN pip install -r requirements.txt -COPY . . -CMD python mishards/main.py diff --git a/build.sh b/build.sh deleted file mode 100755 index fad30518f2..0000000000 --- a/build.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash - -BOLD=`tput bold` -NORMAL=`tput sgr0` -YELLOW='\033[1;33m' -ENDC='\033[0m' - -echo -e "${BOLD}MISHARDS_REGISTRY=${MISHARDS_REGISTRY}${ENDC}" - -function build_image() { - dockerfile=$1 - remote_registry=$2 - tagged=$2 - buildcmd="docker build -t ${tagged} -f ${dockerfile} ." - echo -e "${BOLD}$buildcmd${NORMAL}" - $buildcmd - pushcmd="docker push ${remote_registry}" - echo -e "${BOLD}$pushcmd${NORMAL}" - $pushcmd - echo -e "${YELLOW}${BOLD}Image: ${remote_registry}${NORMAL}${ENDC}" -} - -case "$1" in - -all) - [[ -z $MISHARDS_REGISTRY ]] && { - echo -e "${YELLOW}Error: Please set docker registry first:${ENDC}\n\t${BOLD}export MISHARDS_REGISTRY=xxxx\n${ENDC}" - exit 1 - } - - version="" - [[ ! -z $2 ]] && version=":${2}" - build_image "Dockerfile" "${MISHARDS_REGISTRY}${version}" "${MISHARDS_REGISTRY}" - ;; -*) - echo "Usage: [option...] {base | apps}" - echo "all, Usage: build.sh all [tagname|] => {docker_registry}:\${tagname}" - ;; -esac diff --git a/conftest.py b/conftest.py deleted file mode 100644 index 34e22af693..0000000000 --- a/conftest.py +++ /dev/null @@ -1,27 +0,0 @@ -import logging -import pytest -import grpc -from mishards import settings, db, create_app - -logger = logging.getLogger(__name__) - - -@pytest.fixture -def app(request): - app = create_app(settings.TestingConfig) - db.drop_all() - db.create_all() - - yield app - - db.drop_all() - - -@pytest.fixture -def started_app(app): - app.on_pre_run() - app.start(settings.SERVER_TEST_PORT) - - yield app - - app.stop() diff --git a/manager.py b/manager.py deleted file mode 100644 index 931c90ebc8..0000000000 --- a/manager.py +++ /dev/null @@ -1,28 +0,0 @@ -import fire -from mishards import db -from sqlalchemy import and_ - - -class DBHandler: - @classmethod - def create_all(cls): - db.create_all() - - @classmethod - def drop_all(cls): - db.drop_all() - - @classmethod - def fun(cls, tid): - from mishards.factories import TablesFactory, TableFilesFactory, Tables - f = db.Session.query(Tables).filter(and_( - Tables.table_id == tid, - Tables.state != Tables.TO_DELETE) - ).first() - print(f) - - # f1 = TableFilesFactory() - - -if __name__ == '__main__': - fire.Fire(DBHandler) diff --git a/mishards/.env.example b/mishards/.env.example deleted file mode 100644 index 0a23c0cf56..0000000000 --- a/mishards/.env.example +++ /dev/null @@ -1,33 +0,0 @@ -DEBUG=True - -WOSERVER=tcp://127.0.0.1:19530 -SERVER_PORT=19532 -SERVER_TEST_PORT=19888 - -SD_PROVIDER=Static - -SD_NAMESPACE=xp -SD_IN_CLUSTER=False -SD_POLL_INTERVAL=5 -SD_ROSERVER_POD_PATT=.*-ro-servers-.* -SD_LABEL_SELECTOR=tier=ro-servers - -SD_STATIC_HOSTS=127.0.0.1 -SD_STATIC_PORT=19530 - -#SQLALCHEMY_DATABASE_URI=mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4 -SQLALCHEMY_DATABASE_URI=sqlite:////tmp/milvus/db/meta.sqlite?check_same_thread=False -SQL_ECHO=True - -#SQLALCHEMY_DATABASE_TEST_URI=mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4 -SQLALCHEMY_DATABASE_TEST_URI=sqlite:////tmp/milvus/db/meta.sqlite?check_same_thread=False -SQL_TEST_ECHO=False - -# TRACING_TEST_TYPE=jaeger -TRACING_TYPE=jaeger -TRACING_SERVICE_NAME=fortest -TRACING_SAMPLER_TYPE=const -TRACING_SAMPLER_PARAM=1 -TRACING_LOG_PAYLOAD=True -#TRACING_SAMPLER_TYPE=probabilistic -#TRACING_SAMPLER_PARAM=0.5 diff --git a/mishards/__init__.py b/mishards/__init__.py deleted file mode 100644 index 7db3d8cb5e..0000000000 --- a/mishards/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ -import logging -from mishards import settings -logger = logging.getLogger() - -from mishards.db_base import DB -db = DB() - -from mishards.server import Server -grpc_server = Server() - - -def create_app(testing_config=None): - config = testing_config if testing_config else settings.DefaultConfig - db.init_db(uri=config.SQLALCHEMY_DATABASE_URI, echo=config.SQL_ECHO) - - from mishards.connections import ConnectionMgr - connect_mgr = ConnectionMgr() - - from sd import ProviderManager - - sd_proiver_class = ProviderManager.get_provider(settings.SD_PROVIDER) - discover = sd_proiver_class(settings=settings.SD_PROVIDER_SETTINGS, conn_mgr=connect_mgr) - - from tracing.factory import TracerFactory - from mishards.grpc_utils import GrpcSpanDecorator - tracer = TracerFactory.new_tracer(config.TRACING_TYPE, settings.TracingConfig, - span_decorator=GrpcSpanDecorator()) - - from mishards.routings import RouterFactory - router = RouterFactory.new_router(config.ROUTER_CLASS_NAME, connect_mgr) - - grpc_server.init_app(conn_mgr=connect_mgr, tracer=tracer, router=router, discover=discover) - - from mishards import exception_handlers - - return grpc_server diff --git a/mishards/connections.py b/mishards/connections.py deleted file mode 100644 index 618690a099..0000000000 --- a/mishards/connections.py +++ /dev/null @@ -1,154 +0,0 @@ -import logging -import threading -from functools import wraps -from milvus import Milvus - -from mishards import (settings, exceptions) -from utils import singleton - -logger = logging.getLogger(__name__) - - -class Connection: - def __init__(self, name, uri, max_retry=1, error_handlers=None, **kwargs): - self.name = name - self.uri = uri - self.max_retry = max_retry - self.retried = 0 - self.conn = Milvus() - self.error_handlers = [] if not error_handlers else error_handlers - self.on_retry_func = kwargs.get('on_retry_func', None) - # self._connect() - - def __str__(self): - return 'Connection:name=\"{}\";uri=\"{}\"'.format(self.name, self.uri) - - def _connect(self, metadata=None): - try: - self.conn.connect(uri=self.uri) - except Exception as e: - if not self.error_handlers: - raise exceptions.ConnectionConnectError(message=str(e), metadata=metadata) - for handler in self.error_handlers: - handler(e, metadata=metadata) - - @property - def can_retry(self): - return self.retried < self.max_retry - - @property - def connected(self): - return self.conn.connected() - - def on_retry(self): - if self.on_retry_func: - self.on_retry_func(self) - else: - self.retried > 1 and logger.warning('{} is retrying {}'.format(self, self.retried)) - - def on_connect(self, metadata=None): - while not self.connected and self.can_retry: - self.retried += 1 - self.on_retry() - self._connect(metadata=metadata) - - if not self.can_retry and not self.connected: - raise exceptions.ConnectionConnectError(message='Max retry {} reached!'.format(self.max_retry, - metadata=metadata)) - - self.retried = 0 - - def connect(self, func, exception_handler=None): - @wraps(func) - def inner(*args, **kwargs): - self.on_connect() - try: - return func(*args, **kwargs) - except Exception as e: - if exception_handler: - exception_handler(e) - else: - raise e - return inner - - -@singleton -class ConnectionMgr: - def __init__(self): - self.metas = {} - self.conns = {} - - @property - def conn_names(self): - return set(self.metas.keys()) - set(['WOSERVER']) - - def conn(self, name, metadata, throw=False): - c = self.conns.get(name, None) - if not c: - url = self.metas.get(name, None) - if not url: - if not throw: - return None - raise exceptions.ConnectionNotFoundError(message='Connection {} not found'.format(name), - metadata=metadata) - this_conn = Connection(name=name, uri=url, max_retry=settings.MAX_RETRY) - threaded = { - threading.get_ident(): this_conn - } - self.conns[name] = threaded - return this_conn - - tid = threading.get_ident() - rconn = c.get(tid, None) - if not rconn: - url = self.metas.get(name, None) - if not url: - if not throw: - return None - raise exceptions.ConnectionNotFoundError('Connection {} not found'.format(name), - metadata=metadata) - this_conn = Connection(name=name, uri=url, max_retry=settings.MAX_RETRY) - c[tid] = this_conn - return this_conn - - return rconn - - def on_new_meta(self, name, url): - logger.info('Register Connection: name={};url={}'.format(name, url)) - self.metas[name] = url - - def on_duplicate_meta(self, name, url): - if self.metas[name] == url: - return self.on_same_meta(name, url) - - return self.on_diff_meta(name, url) - - def on_same_meta(self, name, url): - # logger.warning('Register same meta: {}:{}'.format(name, url)) - pass - - def on_diff_meta(self, name, url): - logger.warning('Received {} with diff url={}'.format(name, url)) - self.metas[name] = url - self.conns[name] = {} - - def on_unregister_meta(self, name, url): - logger.info('Unregister name={};url={}'.format(name, url)) - self.conns.pop(name, None) - - def on_nonexisted_meta(self, name): - logger.warning('Non-existed meta: {}'.format(name)) - - def register(self, name, url): - meta = self.metas.get(name) - if not meta: - return self.on_new_meta(name, url) - else: - return self.on_duplicate_meta(name, url) - - def unregister(self, name): - logger.info('Unregister Connection: name={}'.format(name)) - url = self.metas.pop(name, None) - if url is None: - return self.on_nonexisted_meta(name) - return self.on_unregister_meta(name, url) diff --git a/mishards/db_base.py b/mishards/db_base.py deleted file mode 100644 index 5f2eee9ba1..0000000000 --- a/mishards/db_base.py +++ /dev/null @@ -1,52 +0,0 @@ -import logging -from sqlalchemy import create_engine -from sqlalchemy.engine.url import make_url -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy.orm import sessionmaker, scoped_session -from sqlalchemy.orm.session import Session as SessionBase - -logger = logging.getLogger(__name__) - - -class LocalSession(SessionBase): - def __init__(self, db, autocommit=False, autoflush=True, **options): - self.db = db - bind = options.pop('bind', None) or db.engine - SessionBase.__init__(self, autocommit=autocommit, autoflush=autoflush, bind=bind, **options) - - -class DB: - Model = declarative_base() - - def __init__(self, uri=None, echo=False): - self.echo = echo - uri and self.init_db(uri, echo) - self.session_factory = scoped_session(sessionmaker(class_=LocalSession, db=self)) - - def init_db(self, uri, echo=False): - url = make_url(uri) - if url.get_backend_name() == 'sqlite': - self.engine = create_engine(url) - else: - self.engine = create_engine(uri, pool_size=100, pool_recycle=5, pool_timeout=30, - pool_pre_ping=True, - echo=echo, - max_overflow=0) - self.uri = uri - self.url = url - - def __str__(self): - return ''.format(self.url.get_backend_name(), self.url.database) - - @property - def Session(self): - return self.session_factory() - - def remove_session(self): - self.session_factory.remove() - - def drop_all(self): - self.Model.metadata.drop_all(self.engine) - - def create_all(self): - self.Model.metadata.create_all(self.engine) diff --git a/mishards/exception_codes.py b/mishards/exception_codes.py deleted file mode 100644 index bdd4572dd5..0000000000 --- a/mishards/exception_codes.py +++ /dev/null @@ -1,10 +0,0 @@ -INVALID_CODE = -1 - -CONNECT_ERROR_CODE = 10001 -CONNECTTION_NOT_FOUND_CODE = 10002 -DB_ERROR_CODE = 10003 - -TABLE_NOT_FOUND_CODE = 20001 -INVALID_ARGUMENT_CODE = 20002 -INVALID_DATE_RANGE_CODE = 20003 -INVALID_TOPK_CODE = 20004 diff --git a/mishards/exception_handlers.py b/mishards/exception_handlers.py deleted file mode 100644 index c79a6db5a3..0000000000 --- a/mishards/exception_handlers.py +++ /dev/null @@ -1,82 +0,0 @@ -import logging -from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 -from mishards import grpc_server as server, exceptions - -logger = logging.getLogger(__name__) - - -def resp_handler(err, error_code): - if not isinstance(err, exceptions.BaseException): - return status_pb2.Status(error_code=error_code, reason=str(err)) - - status = status_pb2.Status(error_code=error_code, reason=err.message) - - if err.metadata is None: - return status - - resp_class = err.metadata.get('resp_class', None) - if not resp_class: - return status - - if resp_class == milvus_pb2.BoolReply: - return resp_class(status=status, bool_reply=False) - - if resp_class == milvus_pb2.VectorIds: - return resp_class(status=status, vector_id_array=[]) - - if resp_class == milvus_pb2.TopKQueryResultList: - return resp_class(status=status, topk_query_result=[]) - - if resp_class == milvus_pb2.TableRowCount: - return resp_class(status=status, table_row_count=-1) - - if resp_class == milvus_pb2.TableName: - return resp_class(status=status, table_name=[]) - - if resp_class == milvus_pb2.StringReply: - return resp_class(status=status, string_reply='') - - if resp_class == milvus_pb2.TableSchema: - return milvus_pb2.TableSchema( - status=status - ) - - if resp_class == milvus_pb2.IndexParam: - return milvus_pb2.IndexParam( - table_name=milvus_pb2.TableName( - status=status - ) - ) - - status.error_code = status_pb2.UNEXPECTED_ERROR - return status - - -@server.errorhandler(exceptions.TableNotFoundError) -def TableNotFoundErrorHandler(err): - logger.error(err) - return resp_handler(err, status_pb2.TABLE_NOT_EXISTS) - - -@server.errorhandler(exceptions.InvalidTopKError) -def InvalidTopKErrorHandler(err): - logger.error(err) - return resp_handler(err, status_pb2.ILLEGAL_TOPK) - - -@server.errorhandler(exceptions.InvalidArgumentError) -def InvalidArgumentErrorHandler(err): - logger.error(err) - return resp_handler(err, status_pb2.ILLEGAL_ARGUMENT) - - -@server.errorhandler(exceptions.DBError) -def DBErrorHandler(err): - logger.error(err) - return resp_handler(err, status_pb2.UNEXPECTED_ERROR) - - -@server.errorhandler(exceptions.InvalidRangeError) -def InvalidArgumentErrorHandler(err): - logger.error(err) - return resp_handler(err, status_pb2.ILLEGAL_RANGE) diff --git a/mishards/exceptions.py b/mishards/exceptions.py deleted file mode 100644 index 72839f88d2..0000000000 --- a/mishards/exceptions.py +++ /dev/null @@ -1,38 +0,0 @@ -import mishards.exception_codes as codes - - -class BaseException(Exception): - code = codes.INVALID_CODE - message = 'BaseException' - - def __init__(self, message='', metadata=None): - self.message = self.__class__.__name__ if not message else message - self.metadata = metadata - - -class ConnectionConnectError(BaseException): - code = codes.CONNECT_ERROR_CODE - - -class ConnectionNotFoundError(BaseException): - code = codes.CONNECTTION_NOT_FOUND_CODE - - -class DBError(BaseException): - code = codes.DB_ERROR_CODE - - -class TableNotFoundError(BaseException): - code = codes.TABLE_NOT_FOUND_CODE - - -class InvalidTopKError(BaseException): - code = codes.INVALID_TOPK_CODE - - -class InvalidArgumentError(BaseException): - code = codes.INVALID_ARGUMENT_CODE - - -class InvalidRangeError(BaseException): - code = codes.INVALID_DATE_RANGE_CODE diff --git a/mishards/factories.py b/mishards/factories.py deleted file mode 100644 index 52c0253b39..0000000000 --- a/mishards/factories.py +++ /dev/null @@ -1,54 +0,0 @@ -import time -import datetime -import random -import factory -from factory.alchemy import SQLAlchemyModelFactory -from faker import Faker -from faker.providers import BaseProvider - -from milvus.client.types import MetricType -from mishards import db -from mishards.models import Tables, TableFiles - - -class FakerProvider(BaseProvider): - def this_date(self): - t = datetime.datetime.today() - return (t.year - 1900) * 10000 + (t.month - 1) * 100 + t.day - - -factory.Faker.add_provider(FakerProvider) - - -class TablesFactory(SQLAlchemyModelFactory): - class Meta: - model = Tables - sqlalchemy_session = db.session_factory - sqlalchemy_session_persistence = 'commit' - - id = factory.Faker('random_number', digits=16, fix_len=True) - table_id = factory.Faker('uuid4') - state = factory.Faker('random_element', elements=(0, 1)) - dimension = factory.Faker('random_element', elements=(256, 512)) - created_on = int(time.time()) - index_file_size = 0 - engine_type = factory.Faker('random_element', elements=(0, 1, 2, 3)) - metric_type = factory.Faker('random_element', elements=(MetricType.L2, MetricType.IP)) - nlist = 16384 - - -class TableFilesFactory(SQLAlchemyModelFactory): - class Meta: - model = TableFiles - sqlalchemy_session = db.session_factory - sqlalchemy_session_persistence = 'commit' - - id = factory.Faker('random_number', digits=16, fix_len=True) - table = factory.SubFactory(TablesFactory) - engine_type = factory.Faker('random_element', elements=(0, 1, 2, 3)) - file_id = factory.Faker('uuid4') - file_type = factory.Faker('random_element', elements=(0, 1, 2, 3, 4)) - file_size = factory.Faker('random_number') - updated_time = int(time.time()) - created_on = int(time.time()) - date = factory.Faker('this_date') diff --git a/mishards/grpc_utils/__init__.py b/mishards/grpc_utils/__init__.py deleted file mode 100644 index f5225b2a66..0000000000 --- a/mishards/grpc_utils/__init__.py +++ /dev/null @@ -1,37 +0,0 @@ -from grpc_opentracing import SpanDecorator -from milvus.grpc_gen import status_pb2 - - -class GrpcSpanDecorator(SpanDecorator): - def __call__(self, span, rpc_info): - status = None - if not rpc_info.response: - return - if isinstance(rpc_info.response, status_pb2.Status): - status = rpc_info.response - else: - try: - status = rpc_info.response.status - except Exception as e: - status = status_pb2.Status(error_code=status_pb2.UNEXPECTED_ERROR, - reason='Should not happen') - - if status.error_code == 0: - return - error_log = {'event': 'error', - 'request': rpc_info.request, - 'response': rpc_info.response - } - span.set_tag('error', True) - span.log_kv(error_log) - - -def mark_grpc_method(func): - setattr(func, 'grpc_method', True) - return func - - -def is_grpc_method(func): - if not func: - return False - return getattr(func, 'grpc_method', False) diff --git a/mishards/grpc_utils/grpc_args_parser.py b/mishards/grpc_utils/grpc_args_parser.py deleted file mode 100644 index 039299803d..0000000000 --- a/mishards/grpc_utils/grpc_args_parser.py +++ /dev/null @@ -1,102 +0,0 @@ -from milvus import Status -from functools import wraps - - -def error_status(func): - @wraps(func) - def inner(*args, **kwargs): - try: - results = func(*args, **kwargs) - except Exception as e: - return Status(code=Status.UNEXPECTED_ERROR, message=str(e)), None - - return Status(code=0, message="Success"), results - - return inner - - -class GrpcArgsParser(object): - - @classmethod - @error_status - def parse_proto_TableSchema(cls, param): - _table_schema = { - 'status': param.status, - 'table_name': param.table_name, - 'dimension': param.dimension, - 'index_file_size': param.index_file_size, - 'metric_type': param.metric_type - } - - return _table_schema - - @classmethod - @error_status - def parse_proto_TableName(cls, param): - return param.table_name - - @classmethod - @error_status - def parse_proto_Index(cls, param): - _index = { - 'index_type': param.index_type, - 'nlist': param.nlist - } - - return _index - - @classmethod - @error_status - def parse_proto_IndexParam(cls, param): - _table_name = param.table_name - _status, _index = cls.parse_proto_Index(param.index) - - if not _status.OK(): - raise Exception("Argument parse error") - - return _table_name, _index - - @classmethod - @error_status - def parse_proto_Command(cls, param): - _cmd = param.cmd - - return _cmd - - @classmethod - @error_status - def parse_proto_Range(cls, param): - _start_value = param.start_value - _end_value = param.end_value - - return _start_value, _end_value - - @classmethod - @error_status - def parse_proto_RowRecord(cls, param): - return list(param.vector_data) - - @classmethod - @error_status - def parse_proto_SearchParam(cls, param): - _table_name = param.table_name - _topk = param.topk - _nprobe = param.nprobe - _status, _range = cls.parse_proto_Range(param.query_range_array) - - if not _status.OK(): - raise Exception("Argument parse error") - - _row_record = param.query_record_array - - return _table_name, _row_record, _range, _topk - - @classmethod - @error_status - def parse_proto_DeleteByRangeParam(cls, param): - _table_name = param.table_name - _range = param.range - _start_value = _range.start_value - _end_value = _range.end_value - - return _table_name, _start_value, _end_value diff --git a/mishards/grpc_utils/grpc_args_wrapper.py b/mishards/grpc_utils/grpc_args_wrapper.py deleted file mode 100644 index 7447dbd995..0000000000 --- a/mishards/grpc_utils/grpc_args_wrapper.py +++ /dev/null @@ -1,4 +0,0 @@ -# class GrpcArgsWrapper(object): - -# @classmethod -# def proto_TableName(cls): diff --git a/mishards/grpc_utils/test_grpc.py b/mishards/grpc_utils/test_grpc.py deleted file mode 100644 index 9af09e5d0d..0000000000 --- a/mishards/grpc_utils/test_grpc.py +++ /dev/null @@ -1,75 +0,0 @@ -import logging -import opentracing -from mishards.grpc_utils import GrpcSpanDecorator, is_grpc_method -from milvus.grpc_gen import status_pb2, milvus_pb2 - -logger = logging.getLogger(__name__) - - -class FakeTracer(opentracing.Tracer): - pass - - -class FakeSpan(opentracing.Span): - def __init__(self, context, tracer, **kwargs): - super(FakeSpan, self).__init__(tracer, context) - self.reset() - - def set_tag(self, key, value): - self.tags.append({key: value}) - - def log_kv(self, key_values, timestamp=None): - self.logs.append(key_values) - - def reset(self): - self.tags = [] - self.logs = [] - - -class FakeRpcInfo: - def __init__(self, request, response): - self.request = request - self.response = response - - -class TestGrpcUtils: - def test_span_deco(self): - request = 'request' - OK = status_pb2.Status(error_code=status_pb2.SUCCESS, reason='Success') - response = OK - rpc_info = FakeRpcInfo(request=request, response=response) - span = FakeSpan(context=None, tracer=FakeTracer()) - span_deco = GrpcSpanDecorator() - span_deco(span, rpc_info) - assert len(span.logs) == 0 - assert len(span.tags) == 0 - - response = milvus_pb2.BoolReply(status=OK, bool_reply=False) - rpc_info = FakeRpcInfo(request=request, response=response) - span = FakeSpan(context=None, tracer=FakeTracer()) - span_deco = GrpcSpanDecorator() - span_deco(span, rpc_info) - assert len(span.logs) == 0 - assert len(span.tags) == 0 - - response = 1 - rpc_info = FakeRpcInfo(request=request, response=response) - span = FakeSpan(context=None, tracer=FakeTracer()) - span_deco = GrpcSpanDecorator() - span_deco(span, rpc_info) - assert len(span.logs) == 1 - assert len(span.tags) == 1 - - response = 0 - rpc_info = FakeRpcInfo(request=request, response=response) - span = FakeSpan(context=None, tracer=FakeTracer()) - span_deco = GrpcSpanDecorator() - span_deco(span, rpc_info) - assert len(span.logs) == 0 - assert len(span.tags) == 0 - - def test_is_grpc_method(self): - target = 1 - assert not is_grpc_method(target) - target = None - assert not is_grpc_method(target) diff --git a/mishards/hash_ring.py b/mishards/hash_ring.py deleted file mode 100644 index a97f3f580e..0000000000 --- a/mishards/hash_ring.py +++ /dev/null @@ -1,150 +0,0 @@ -import math -import sys -from bisect import bisect - -if sys.version_info >= (2, 5): - import hashlib - md5_constructor = hashlib.md5 -else: - import md5 - md5_constructor = md5.new - - -class HashRing(object): - def __init__(self, nodes=None, weights=None): - """`nodes` is a list of objects that have a proper __str__ representation. - `weights` is dictionary that sets weights to the nodes. The default - weight is that all nodes are equal. - """ - self.ring = dict() - self._sorted_keys = [] - - self.nodes = nodes - - if not weights: - weights = {} - self.weights = weights - - self._generate_circle() - - def _generate_circle(self): - """Generates the circle. - """ - total_weight = 0 - for node in self.nodes: - total_weight += self.weights.get(node, 1) - - for node in self.nodes: - weight = 1 - - if node in self.weights: - weight = self.weights.get(node) - - factor = math.floor((40 * len(self.nodes) * weight) / total_weight) - - for j in range(0, int(factor)): - b_key = self._hash_digest('%s-%s' % (node, j)) - - for i in range(0, 3): - key = self._hash_val(b_key, lambda x: x + i * 4) - self.ring[key] = node - self._sorted_keys.append(key) - - self._sorted_keys.sort() - - def get_node(self, string_key): - """Given a string key a corresponding node in the hash ring is returned. - - If the hash ring is empty, `None` is returned. - """ - pos = self.get_node_pos(string_key) - if pos is None: - return None - return self.ring[self._sorted_keys[pos]] - - def get_node_pos(self, string_key): - """Given a string key a corresponding node in the hash ring is returned - along with it's position in the ring. - - If the hash ring is empty, (`None`, `None`) is returned. - """ - if not self.ring: - return None - - key = self.gen_key(string_key) - - nodes = self._sorted_keys - pos = bisect(nodes, key) - - if pos == len(nodes): - return 0 - else: - return pos - - def iterate_nodes(self, string_key, distinct=True): - """Given a string key it returns the nodes as a generator that can hold the key. - - The generator iterates one time through the ring - starting at the correct position. - - if `distinct` is set, then the nodes returned will be unique, - i.e. no virtual copies will be returned. - """ - if not self.ring: - yield None, None - - returned_values = set() - - def distinct_filter(value): - if str(value) not in returned_values: - returned_values.add(str(value)) - return value - - pos = self.get_node_pos(string_key) - for key in self._sorted_keys[pos:]: - val = distinct_filter(self.ring[key]) - if val: - yield val - - for i, key in enumerate(self._sorted_keys): - if i < pos: - val = distinct_filter(self.ring[key]) - if val: - yield val - - def gen_key(self, key): - """Given a string key it returns a long value, - this long value represents a place on the hash ring. - - md5 is currently used because it mixes well. - """ - b_key = self._hash_digest(key) - return self._hash_val(b_key, lambda x: x) - - def _hash_val(self, b_key, entry_fn): - return (b_key[entry_fn(3)] << 24) | (b_key[entry_fn(2)] << 16) | ( - b_key[entry_fn(1)] << 8) | b_key[entry_fn(0)] - - def _hash_digest(self, key): - m = md5_constructor() - key = key.encode() - m.update(key) - return m.digest() - - -if __name__ == '__main__': - from collections import defaultdict - servers = [ - '192.168.0.246:11212', '192.168.0.247:11212', '192.168.0.248:11212', - '192.168.0.249:11212' - ] - - ring = HashRing(servers) - keys = ['{}'.format(i) for i in range(100)] - mapped = defaultdict(list) - for k in keys: - server = ring.get_node(k) - mapped[server].append(k) - - for k, v in mapped.items(): - print(k, v) diff --git a/mishards/main.py b/mishards/main.py deleted file mode 100644 index c0d142607b..0000000000 --- a/mishards/main.py +++ /dev/null @@ -1,15 +0,0 @@ -import os -import sys -sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) - -from mishards import (settings, create_app) - - -def main(): - server = create_app(settings.DefaultConfig) - server.run(port=settings.SERVER_PORT) - return 0 - - -if __name__ == '__main__': - sys.exit(main()) diff --git a/mishards/models.py b/mishards/models.py deleted file mode 100644 index 4b6c8f9ef4..0000000000 --- a/mishards/models.py +++ /dev/null @@ -1,76 +0,0 @@ -import logging -from sqlalchemy import (Integer, Boolean, Text, - String, BigInteger, and_, or_, - Column) -from sqlalchemy.orm import relationship, backref - -from mishards import db - -logger = logging.getLogger(__name__) - - -class TableFiles(db.Model): - FILE_TYPE_NEW = 0 - FILE_TYPE_RAW = 1 - FILE_TYPE_TO_INDEX = 2 - FILE_TYPE_INDEX = 3 - FILE_TYPE_TO_DELETE = 4 - FILE_TYPE_NEW_MERGE = 5 - FILE_TYPE_NEW_INDEX = 6 - FILE_TYPE_BACKUP = 7 - - __tablename__ = 'TableFiles' - - id = Column(BigInteger, primary_key=True, autoincrement=True) - table_id = Column(String(50)) - engine_type = Column(Integer) - file_id = Column(String(50)) - file_type = Column(Integer) - file_size = Column(Integer, default=0) - row_count = Column(Integer, default=0) - updated_time = Column(BigInteger) - created_on = Column(BigInteger) - date = Column(Integer) - - table = relationship( - 'Tables', - primaryjoin='and_(foreign(TableFiles.table_id) == Tables.table_id)', - backref=backref('files', uselist=True, lazy='dynamic') - ) - - -class Tables(db.Model): - TO_DELETE = 1 - NORMAL = 0 - - __tablename__ = 'Tables' - - id = Column(BigInteger, primary_key=True, autoincrement=True) - table_id = Column(String(50), unique=True) - state = Column(Integer) - dimension = Column(Integer) - created_on = Column(Integer) - flag = Column(Integer, default=0) - index_file_size = Column(Integer) - engine_type = Column(Integer) - nlist = Column(Integer) - metric_type = Column(Integer) - - def files_to_search(self, date_range=None): - cond = or_( - TableFiles.file_type == TableFiles.FILE_TYPE_RAW, - TableFiles.file_type == TableFiles.FILE_TYPE_TO_INDEX, - TableFiles.file_type == TableFiles.FILE_TYPE_INDEX, - ) - if date_range: - cond = and_( - cond, - or_( - and_(TableFiles.date >= d[0], TableFiles.date < d[1]) for d in date_range - ) - ) - - files = self.files.filter(cond) - - logger.debug('DATE_RANGE: {}'.format(date_range)) - return files diff --git a/mishards/routings.py b/mishards/routings.py deleted file mode 100644 index 823972726f..0000000000 --- a/mishards/routings.py +++ /dev/null @@ -1,96 +0,0 @@ -import logging -from sqlalchemy import exc as sqlalchemy_exc -from sqlalchemy import and_ - -from mishards import exceptions, db -from mishards.hash_ring import HashRing -from mishards.models import Tables - -logger = logging.getLogger(__name__) - - -class RouteManager: - ROUTER_CLASSES = {} - - @classmethod - def register_router_class(cls, target): - name = target.__dict__.get('NAME', None) - name = name if name else target.__class__.__name__ - cls.ROUTER_CLASSES[name] = target - return target - - @classmethod - def get_router_class(cls, name): - return cls.ROUTER_CLASSES.get(name, None) - - -class RouterFactory: - @classmethod - def new_router(cls, name, conn_mgr, **kwargs): - router_class = RouteManager.get_router_class(name) - assert router_class - return router_class(conn_mgr, **kwargs) - - -class RouterMixin: - def __init__(self, conn_mgr): - self.conn_mgr = conn_mgr - - def routing(self, table_name, metadata=None, **kwargs): - raise NotImplemented() - - def connection(self, metadata=None): - conn = self.conn_mgr.conn('WOSERVER', metadata=metadata) - if conn: - conn.on_connect(metadata=metadata) - return conn.conn - - def query_conn(self, name, metadata=None): - conn = self.conn_mgr.conn(name, metadata=metadata) - if not conn: - raise exceptions.ConnectionNotFoundError(name, metadata=metadata) - conn.on_connect(metadata=metadata) - return conn.conn - - -@RouteManager.register_router_class -class FileBasedHashRingRouter(RouterMixin): - NAME = 'FileBasedHashRingRouter' - - def __init__(self, conn_mgr, **kwargs): - super(FileBasedHashRingRouter, self).__init__(conn_mgr) - - def routing(self, table_name, metadata=None, **kwargs): - range_array = kwargs.pop('range_array', None) - return self._route(table_name, range_array, metadata, **kwargs) - - def _route(self, table_name, range_array, metadata=None, **kwargs): - # PXU TODO: Implement Thread-local Context - # PXU TODO: Session life mgt - try: - table = db.Session.query(Tables).filter( - and_(Tables.table_id == table_name, - Tables.state != Tables.TO_DELETE)).first() - except sqlalchemy_exc.SQLAlchemyError as e: - raise exceptions.DBError(message=str(e), metadata=metadata) - - if not table: - raise exceptions.TableNotFoundError(table_name, metadata=metadata) - files = table.files_to_search(range_array) - db.remove_session() - - servers = self.conn_mgr.conn_names - logger.info('Available servers: {}'.format(servers)) - - ring = HashRing(servers) - - routing = {} - - for f in files: - target_host = ring.get_node(str(f.id)) - sub = routing.get(target_host, None) - if not sub: - routing[target_host] = {'table_id': table_name, 'file_ids': []} - routing[target_host]['file_ids'].append(str(f.id)) - - return routing diff --git a/mishards/server.py b/mishards/server.py deleted file mode 100644 index 599a00e455..0000000000 --- a/mishards/server.py +++ /dev/null @@ -1,122 +0,0 @@ -import logging -import grpc -import time -import socket -import inspect -from urllib.parse import urlparse -from functools import wraps -from concurrent import futures -from grpc._cython import cygrpc -from milvus.grpc_gen.milvus_pb2_grpc import add_MilvusServiceServicer_to_server -from mishards.grpc_utils import is_grpc_method -from mishards.service_handler import ServiceHandler -from mishards import settings - -logger = logging.getLogger(__name__) - - -class Server: - def __init__(self): - self.pre_run_handlers = set() - self.grpc_methods = set() - self.error_handlers = {} - self.exit_flag = False - - def init_app(self, - conn_mgr, - tracer, - router, - discover, - port=19530, - max_workers=10, - **kwargs): - self.port = int(port) - self.conn_mgr = conn_mgr - self.tracer = tracer - self.router = router - self.discover = discover - - self.server_impl = grpc.server( - thread_pool=futures.ThreadPoolExecutor(max_workers=max_workers), - options=[(cygrpc.ChannelArgKey.max_send_message_length, -1), - (cygrpc.ChannelArgKey.max_receive_message_length, -1)]) - - self.server_impl = self.tracer.decorate(self.server_impl) - - self.register_pre_run_handler(self.pre_run_handler) - - def pre_run_handler(self): - woserver = settings.WOSERVER - url = urlparse(woserver) - ip = socket.gethostbyname(url.hostname) - socket.inet_pton(socket.AF_INET, ip) - self.conn_mgr.register( - 'WOSERVER', '{}://{}:{}'.format(url.scheme, ip, url.port or 80)) - - def register_pre_run_handler(self, func): - logger.info('Regiterring {} into server pre_run_handlers'.format(func)) - self.pre_run_handlers.add(func) - return func - - def wrap_method_with_errorhandler(self, func): - @wraps(func) - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - except Exception as e: - if e.__class__ in self.error_handlers: - return self.error_handlers[e.__class__](e) - raise - - return wrapper - - def errorhandler(self, exception): - if inspect.isclass(exception) and issubclass(exception, Exception): - - def wrapper(func): - self.error_handlers[exception] = func - return func - - return wrapper - return exception - - def on_pre_run(self): - for handler in self.pre_run_handlers: - handler() - self.discover.start() - - def start(self, port=None): - handler_class = self.decorate_handler(ServiceHandler) - add_MilvusServiceServicer_to_server( - handler_class(tracer=self.tracer, - router=self.router), self.server_impl) - self.server_impl.add_insecure_port("[::]:{}".format( - str(port or self.port))) - self.server_impl.start() - - def run(self, port): - logger.info('Milvus server start ......') - port = port or self.port - self.on_pre_run() - - self.start(port) - logger.info('Listening on port {}'.format(port)) - - try: - while not self.exit_flag: - time.sleep(5) - except KeyboardInterrupt: - self.stop() - - def stop(self): - logger.info('Server is shuting down ......') - self.exit_flag = True - self.server_impl.stop(0) - self.tracer.close() - logger.info('Server is closed') - - def decorate_handler(self, handler): - for key, attr in handler.__dict__.items(): - if is_grpc_method(attr): - setattr(handler, key, self.wrap_method_with_errorhandler(attr)) - return handler diff --git a/mishards/service_handler.py b/mishards/service_handler.py deleted file mode 100644 index 5e91c14f14..0000000000 --- a/mishards/service_handler.py +++ /dev/null @@ -1,475 +0,0 @@ -import logging -import time -import datetime -from collections import defaultdict - -import multiprocessing -from concurrent.futures import ThreadPoolExecutor -from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 -from milvus.grpc_gen.milvus_pb2 import TopKQueryResult -from milvus.client.abstract import Range -from milvus.client import types as Types - -from mishards import (db, settings, exceptions) -from mishards.grpc_utils import mark_grpc_method -from mishards.grpc_utils.grpc_args_parser import GrpcArgsParser as Parser -from mishards import utilities - -logger = logging.getLogger(__name__) - - -class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): - MAX_NPROBE = 2048 - MAX_TOPK = 2048 - - def __init__(self, tracer, router, max_workers=multiprocessing.cpu_count(), **kwargs): - self.table_meta = {} - self.error_handlers = {} - self.tracer = tracer - self.router = router - self.max_workers = max_workers - - def _do_merge(self, files_n_topk_results, topk, reverse=False, **kwargs): - status = status_pb2.Status(error_code=status_pb2.SUCCESS, - reason="Success") - if not files_n_topk_results: - return status, [] - - request_results = defaultdict(list) - - calc_time = time.time() - for files_collection in files_n_topk_results: - if isinstance(files_collection, tuple): - status, _ = files_collection - return status, [] - for request_pos, each_request_results in enumerate( - files_collection.topk_query_result): - request_results[request_pos].extend( - each_request_results.query_result_arrays) - request_results[request_pos] = sorted( - request_results[request_pos], - key=lambda x: x.distance, - reverse=reverse)[:topk] - - calc_time = time.time() - calc_time - logger.info('Merge takes {}'.format(calc_time)) - - results = sorted(request_results.items()) - topk_query_result = [] - - for result in results: - query_result = TopKQueryResult(query_result_arrays=result[1]) - topk_query_result.append(query_result) - - return status, topk_query_result - - def _do_query(self, - context, - table_id, - table_meta, - vectors, - topk, - nprobe, - range_array=None, - **kwargs): - metadata = kwargs.get('metadata', None) - range_array = [ - utilities.range_to_date(r, metadata=metadata) for r in range_array - ] if range_array else None - - routing = {} - p_span = None if self.tracer.empty else context.get_active_span( - ).context - with self.tracer.start_span('get_routing', child_of=p_span): - routing = self.router.routing(table_id, - range_array=range_array, - metadata=metadata) - logger.info('Routing: {}'.format(routing)) - - metadata = kwargs.get('metadata', None) - - rs = [] - all_topk_results = [] - - def search(addr, query_params, vectors, topk, nprobe, **kwargs): - logger.info( - 'Send Search Request: addr={};params={};nq={};topk={};nprobe={}' - .format(addr, query_params, len(vectors), topk, nprobe)) - - conn = self.router.query_conn(addr, metadata=metadata) - start = time.time() - span = kwargs.get('span', None) - span = span if span else (None if self.tracer.empty else - context.get_active_span().context) - - with self.tracer.start_span('search_{}'.format(addr), - child_of=span): - ret = conn.search_vectors_in_files( - table_name=query_params['table_id'], - file_ids=query_params['file_ids'], - query_records=vectors, - top_k=topk, - nprobe=nprobe, - lazy_=True) - end = time.time() - logger.info('search_vectors_in_files takes: {}'.format(end - start)) - - all_topk_results.append(ret) - - with self.tracer.start_span('do_search', child_of=p_span) as span: - with ThreadPoolExecutor(max_workers=self.max_workers) as pool: - for addr, params in routing.items(): - res = pool.submit(search, - addr, - params, - vectors, - topk, - nprobe, - span=span) - rs.append(res) - - for res in rs: - res.result() - - reverse = table_meta.metric_type == Types.MetricType.IP - with self.tracer.start_span('do_merge', child_of=p_span): - return self._do_merge(all_topk_results, - topk, - reverse=reverse, - metadata=metadata) - - def _create_table(self, table_schema): - return self.router.connection().create_table(table_schema) - - @mark_grpc_method - def CreateTable(self, request, context): - _status, _table_schema = Parser.parse_proto_TableSchema(request) - - if not _status.OK(): - return status_pb2.Status(error_code=_status.code, - reason=_status.message) - - logger.info('CreateTable {}'.format(_table_schema['table_name'])) - - _status = self._create_table(_table_schema) - - return status_pb2.Status(error_code=_status.code, - reason=_status.message) - - def _has_table(self, table_name, metadata=None): - return self.router.connection(metadata=metadata).has_table(table_name) - - @mark_grpc_method - def HasTable(self, request, context): - _status, _table_name = Parser.parse_proto_TableName(request) - - if not _status.OK(): - return milvus_pb2.BoolReply(status=status_pb2.Status( - error_code=_status.code, reason=_status.message), - bool_reply=False) - - logger.info('HasTable {}'.format(_table_name)) - - _status, _bool = self._has_table(_table_name, - metadata={'resp_class': milvus_pb2.BoolReply}) - - return milvus_pb2.BoolReply(status=status_pb2.Status( - error_code=_status.code, reason=_status.message), - bool_reply=_bool) - - def _delete_table(self, table_name): - return self.router.connection().delete_table(table_name) - - @mark_grpc_method - def DropTable(self, request, context): - _status, _table_name = Parser.parse_proto_TableName(request) - - if not _status.OK(): - return status_pb2.Status(error_code=_status.code, - reason=_status.message) - - logger.info('DropTable {}'.format(_table_name)) - - _status = self._delete_table(_table_name) - - return status_pb2.Status(error_code=_status.code, - reason=_status.message) - - def _create_index(self, table_name, index): - return self.router.connection().create_index(table_name, index) - - @mark_grpc_method - def CreateIndex(self, request, context): - _status, unpacks = Parser.parse_proto_IndexParam(request) - - if not _status.OK(): - return status_pb2.Status(error_code=_status.code, - reason=_status.message) - - _table_name, _index = unpacks - - logger.info('CreateIndex {}'.format(_table_name)) - - # TODO: interface create_table incompleted - _status = self._create_index(_table_name, _index) - - return status_pb2.Status(error_code=_status.code, - reason=_status.message) - - def _add_vectors(self, param, metadata=None): - return self.router.connection(metadata=metadata).add_vectors( - None, None, insert_param=param) - - @mark_grpc_method - def Insert(self, request, context): - logger.info('Insert') - # TODO: Ths SDK interface add_vectors() could update, add a key 'row_id_array' - _status, _ids = self._add_vectors( - metadata={'resp_class': milvus_pb2.VectorIds}, param=request) - return milvus_pb2.VectorIds(status=status_pb2.Status( - error_code=_status.code, reason=_status.message), - vector_id_array=_ids) - - @mark_grpc_method - def Search(self, request, context): - - table_name = request.table_name - - topk = request.topk - nprobe = request.nprobe - - logger.info('Search {}: topk={} nprobe={}'.format( - table_name, topk, nprobe)) - - metadata = {'resp_class': milvus_pb2.TopKQueryResultList} - - if nprobe > self.MAX_NPROBE or nprobe <= 0: - raise exceptions.InvalidArgumentError( - message='Invalid nprobe: {}'.format(nprobe), metadata=metadata) - - if topk > self.MAX_TOPK or topk <= 0: - raise exceptions.InvalidTopKError( - message='Invalid topk: {}'.format(topk), metadata=metadata) - - table_meta = self.table_meta.get(table_name, None) - - if not table_meta: - status, info = self.router.connection( - metadata=metadata).describe_table(table_name) - if not status.OK(): - raise exceptions.TableNotFoundError(table_name, - metadata=metadata) - - self.table_meta[table_name] = info - table_meta = info - - start = time.time() - - query_record_array = [] - - for query_record in request.query_record_array: - query_record_array.append(list(query_record.vector_data)) - - query_range_array = [] - for query_range in request.query_range_array: - query_range_array.append( - Range(query_range.start_value, query_range.end_value)) - - status, results = self._do_query(context, - table_name, - table_meta, - query_record_array, - topk, - nprobe, - query_range_array, - metadata=metadata) - - now = time.time() - logger.info('SearchVector takes: {}'.format(now - start)) - - topk_result_list = milvus_pb2.TopKQueryResultList( - status=status_pb2.Status(error_code=status.error_code, - reason=status.reason), - topk_query_result=results) - return topk_result_list - - @mark_grpc_method - def SearchInFiles(self, request, context): - raise NotImplemented() - - def _describe_table(self, table_name, metadata=None): - return self.router.connection(metadata=metadata).describe_table(table_name) - - @mark_grpc_method - def DescribeTable(self, request, context): - _status, _table_name = Parser.parse_proto_TableName(request) - - if not _status.OK(): - return milvus_pb2.TableSchema(status=status_pb2.Status( - error_code=_status.code, reason=_status.message), ) - - metadata = {'resp_class': milvus_pb2.TableSchema} - - logger.info('DescribeTable {}'.format(_table_name)) - _status, _table = self._describe_table(metadata=metadata, - table_name=_table_name) - - if _status.OK(): - return milvus_pb2.TableSchema( - table_name=_table_name, - index_file_size=_table.index_file_size, - dimension=_table.dimension, - metric_type=_table.metric_type, - status=status_pb2.Status(error_code=_status.code, - reason=_status.message), - ) - - return milvus_pb2.TableSchema( - table_name=_table_name, - status=status_pb2.Status(error_code=_status.code, - reason=_status.message), - ) - - def _count_table(self, table_name, metadata=None): - return self.router.connection( - metadata=metadata).get_table_row_count(table_name) - - @mark_grpc_method - def CountTable(self, request, context): - _status, _table_name = Parser.parse_proto_TableName(request) - - if not _status.OK(): - status = status_pb2.Status(error_code=_status.code, - reason=_status.message) - - return milvus_pb2.TableRowCount(status=status) - - logger.info('CountTable {}'.format(_table_name)) - - metadata = {'resp_class': milvus_pb2.TableRowCount} - _status, _count = self._count_table(_table_name, metadata=metadata) - - return milvus_pb2.TableRowCount( - status=status_pb2.Status(error_code=_status.code, - reason=_status.message), - table_row_count=_count if isinstance(_count, int) else -1) - - def _get_server_version(self, metadata=None): - return self.router.connection(metadata=metadata).server_version() - - @mark_grpc_method - def Cmd(self, request, context): - _status, _cmd = Parser.parse_proto_Command(request) - logger.info('Cmd: {}'.format(_cmd)) - - if not _status.OK(): - return milvus_pb2.StringReply(status=status_pb2.Status( - error_code=_status.code, reason=_status.message)) - - metadata = {'resp_class': milvus_pb2.StringReply} - - if _cmd == 'version': - _status, _reply = self._get_server_version(metadata=metadata) - else: - _status, _reply = self.router.connection( - metadata=metadata).server_status() - - return milvus_pb2.StringReply(status=status_pb2.Status( - error_code=_status.code, reason=_status.message), - string_reply=_reply) - - def _show_tables(self, metadata=None): - return self.router.connection(metadata=metadata).show_tables() - - @mark_grpc_method - def ShowTables(self, request, context): - logger.info('ShowTables') - metadata = {'resp_class': milvus_pb2.TableName} - _status, _results = self._show_tables(metadata=metadata) - - return milvus_pb2.TableNameList(status=status_pb2.Status( - error_code=_status.code, reason=_status.message), - table_names=_results) - - def _delete_by_range(self, table_name, start_date, end_date): - return self.router.connection().delete_vectors_by_range(table_name, - start_date, - end_date) - - @mark_grpc_method - def DeleteByRange(self, request, context): - _status, unpacks = \ - Parser.parse_proto_DeleteByRangeParam(request) - - if not _status.OK(): - return status_pb2.Status(error_code=_status.code, - reason=_status.message) - - _table_name, _start_date, _end_date = unpacks - - logger.info('DeleteByRange {}: {} {}'.format(_table_name, _start_date, - _end_date)) - _status = self._delete_by_range(_table_name, _start_date, _end_date) - return status_pb2.Status(error_code=_status.code, - reason=_status.message) - - def _preload_table(self, table_name): - return self.router.connection().preload_table(table_name) - - @mark_grpc_method - def PreloadTable(self, request, context): - _status, _table_name = Parser.parse_proto_TableName(request) - - if not _status.OK(): - return status_pb2.Status(error_code=_status.code, - reason=_status.message) - - logger.info('PreloadTable {}'.format(_table_name)) - _status = self._preload_table(_table_name) - return status_pb2.Status(error_code=_status.code, - reason=_status.message) - - def _describe_index(self, table_name, metadata=None): - return self.router.connection(metadata=metadata).describe_index(table_name) - - @mark_grpc_method - def DescribeIndex(self, request, context): - _status, _table_name = Parser.parse_proto_TableName(request) - - if not _status.OK(): - return milvus_pb2.IndexParam(status=status_pb2.Status( - error_code=_status.code, reason=_status.message)) - - metadata = {'resp_class': milvus_pb2.IndexParam} - - logger.info('DescribeIndex {}'.format(_table_name)) - _status, _index_param = self._describe_index(table_name=_table_name, - metadata=metadata) - - if not _index_param: - return milvus_pb2.IndexParam(status=status_pb2.Status( - error_code=_status.code, reason=_status.message)) - - _index = milvus_pb2.Index(index_type=_index_param._index_type, - nlist=_index_param._nlist) - - return milvus_pb2.IndexParam(status=status_pb2.Status( - error_code=_status.code, reason=_status.message), - table_name=_table_name, - index=_index) - - def _drop_index(self, table_name): - return self.router.connection().drop_index(table_name) - - @mark_grpc_method - def DropIndex(self, request, context): - _status, _table_name = Parser.parse_proto_TableName(request) - - if not _status.OK(): - return status_pb2.Status(error_code=_status.code, - reason=_status.message) - - logger.info('DropIndex {}'.format(_table_name)) - _status = self._drop_index(_table_name) - return status_pb2.Status(error_code=_status.code, - reason=_status.message) diff --git a/mishards/settings.py b/mishards/settings.py deleted file mode 100644 index 21a3bb7a65..0000000000 --- a/mishards/settings.py +++ /dev/null @@ -1,94 +0,0 @@ -import sys -import os - -from environs import Env -env = Env() - -FROM_EXAMPLE = env.bool('FROM_EXAMPLE', False) -if FROM_EXAMPLE: - from dotenv import load_dotenv - load_dotenv('./mishards/.env.example') -else: - env.read_env() - -DEBUG = env.bool('DEBUG', False) - -LOG_LEVEL = env.str('LOG_LEVEL', 'DEBUG' if DEBUG else 'INFO') -LOG_PATH = env.str('LOG_PATH', '/tmp/mishards') -LOG_NAME = env.str('LOG_NAME', 'logfile') -TIMEZONE = env.str('TIMEZONE', 'UTC') - -from utils.logger_helper import config -config(LOG_LEVEL, LOG_PATH, LOG_NAME, TIMEZONE) - -TIMEOUT = env.int('TIMEOUT', 60) -MAX_RETRY = env.int('MAX_RETRY', 3) - -SERVER_PORT = env.int('SERVER_PORT', 19530) -SERVER_TEST_PORT = env.int('SERVER_TEST_PORT', 19530) -WOSERVER = env.str('WOSERVER') - -SD_PROVIDER_SETTINGS = None -SD_PROVIDER = env.str('SD_PROVIDER', 'Kubernetes') -if SD_PROVIDER == 'Kubernetes': - from sd.kubernetes_provider import KubernetesProviderSettings - SD_PROVIDER_SETTINGS = KubernetesProviderSettings( - namespace=env.str('SD_NAMESPACE', ''), - in_cluster=env.bool('SD_IN_CLUSTER', False), - poll_interval=env.int('SD_POLL_INTERVAL', 5), - pod_patt=env.str('SD_ROSERVER_POD_PATT', ''), - label_selector=env.str('SD_LABEL_SELECTOR', ''), - port=env.int('SD_PORT', 19530)) -elif SD_PROVIDER == 'Static': - from sd.static_provider import StaticProviderSettings - SD_PROVIDER_SETTINGS = StaticProviderSettings( - hosts=env.list('SD_STATIC_HOSTS', []), - port=env.int('SD_STATIC_PORT', 19530)) - -# TESTING_WOSERVER = env.str('TESTING_WOSERVER', 'tcp://127.0.0.1:19530') - - -class TracingConfig: - TRACING_SERVICE_NAME = env.str('TRACING_SERVICE_NAME', 'mishards') - TRACING_VALIDATE = env.bool('TRACING_VALIDATE', True) - TRACING_LOG_PAYLOAD = env.bool('TRACING_LOG_PAYLOAD', False) - TRACING_CONFIG = { - 'sampler': { - 'type': env.str('TRACING_SAMPLER_TYPE', 'const'), - 'param': env.str('TRACING_SAMPLER_PARAM', "1"), - }, - 'local_agent': { - 'reporting_host': env.str('TRACING_REPORTING_HOST', '127.0.0.1'), - 'reporting_port': env.str('TRACING_REPORTING_PORT', '5775') - }, - 'logging': env.bool('TRACING_LOGGING', True) - } - DEFAULT_TRACING_CONFIG = { - 'sampler': { - 'type': env.str('TRACING_SAMPLER_TYPE', 'const'), - 'param': env.str('TRACING_SAMPLER_PARAM', "0"), - } - } - - -class DefaultConfig: - SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_URI') - SQL_ECHO = env.bool('SQL_ECHO', False) - TRACING_TYPE = env.str('TRACING_TYPE', '') - ROUTER_CLASS_NAME = env.str('ROUTER_CLASS_NAME', 'FileBasedHashRingRouter') - - -class TestingConfig(DefaultConfig): - SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_TEST_URI', '') - SQL_ECHO = env.bool('SQL_TEST_ECHO', False) - TRACING_TYPE = env.str('TRACING_TEST_TYPE', '') - ROUTER_CLASS_NAME = env.str('ROUTER_CLASS_TEST_NAME', 'FileBasedHashRingRouter') - - -if __name__ == '__main__': - import logging - logger = logging.getLogger(__name__) - logger.debug('DEBUG') - logger.info('INFO') - logger.warn('WARN') - logger.error('ERROR') diff --git a/mishards/test_connections.py b/mishards/test_connections.py deleted file mode 100644 index 819d2e03da..0000000000 --- a/mishards/test_connections.py +++ /dev/null @@ -1,101 +0,0 @@ -import logging -import pytest -import mock - -from milvus import Milvus -from mishards.connections import (ConnectionMgr, Connection) -from mishards import exceptions - -logger = logging.getLogger(__name__) - - -@pytest.mark.usefixtures('app') -class TestConnection: - def test_manager(self): - mgr = ConnectionMgr() - - mgr.register('pod1', '111') - mgr.register('pod2', '222') - mgr.register('pod2', '222') - mgr.register('pod2', '2222') - assert len(mgr.conn_names) == 2 - - mgr.unregister('pod1') - assert len(mgr.conn_names) == 1 - - mgr.unregister('pod2') - assert len(mgr.conn_names) == 0 - - mgr.register('WOSERVER', 'xxxx') - assert len(mgr.conn_names) == 0 - - assert not mgr.conn('XXXX', None) - with pytest.raises(exceptions.ConnectionNotFoundError): - mgr.conn('XXXX', None, True) - - mgr.conn('WOSERVER', None) - - def test_connection(self): - class Conn: - def __init__(self, state): - self.state = state - - def connect(self, uri): - return self.state - - def connected(self): - return self.state - - FAIL_CONN = Conn(False) - PASS_CONN = Conn(True) - - class Retry: - def __init__(self): - self.times = 0 - - def __call__(self, conn): - self.times += 1 - logger.info('Retrying {}'.format(self.times)) - - class Func(): - def __init__(self): - self.executed = False - - def __call__(self): - self.executed = True - - max_retry = 3 - - RetryObj = Retry() - - c = Connection('client', - uri='xx', - max_retry=max_retry, - on_retry_func=RetryObj) - c.conn = FAIL_CONN - ff = Func() - this_connect = c.connect(func=ff) - with pytest.raises(exceptions.ConnectionConnectError): - this_connect() - assert RetryObj.times == max_retry - assert not ff.executed - RetryObj = Retry() - - c.conn = PASS_CONN - this_connect = c.connect(func=ff) - this_connect() - assert ff.executed - assert RetryObj.times == 0 - - this_connect = c.connect(func=None) - with pytest.raises(TypeError): - this_connect() - - errors = [] - - def error_handler(err): - errors.append(err) - - this_connect = c.connect(func=None, exception_handler=error_handler) - this_connect() - assert len(errors) == 1 diff --git a/mishards/test_models.py b/mishards/test_models.py deleted file mode 100644 index d60b62713e..0000000000 --- a/mishards/test_models.py +++ /dev/null @@ -1,39 +0,0 @@ -import logging -import pytest -from mishards.factories import TableFiles, Tables, TableFilesFactory, TablesFactory -from mishards import db, create_app, settings -from mishards.factories import ( - Tables, TableFiles, - TablesFactory, TableFilesFactory -) - -logger = logging.getLogger(__name__) - - -@pytest.mark.usefixtures('app') -class TestModels: - def test_files_to_search(self): - table = TablesFactory() - new_files_cnt = 5 - to_index_cnt = 10 - raw_cnt = 20 - backup_cnt = 12 - to_delete_cnt = 9 - index_cnt = 8 - new_index_cnt = 6 - new_merge_cnt = 11 - - new_files = TableFilesFactory.create_batch(new_files_cnt, table=table, file_type=TableFiles.FILE_TYPE_NEW, date=110) - to_index_files = TableFilesFactory.create_batch(to_index_cnt, table=table, file_type=TableFiles.FILE_TYPE_TO_INDEX, date=110) - raw_files = TableFilesFactory.create_batch(raw_cnt, table=table, file_type=TableFiles.FILE_TYPE_RAW, date=120) - backup_files = TableFilesFactory.create_batch(backup_cnt, table=table, file_type=TableFiles.FILE_TYPE_BACKUP, date=110) - index_files = TableFilesFactory.create_batch(index_cnt, table=table, file_type=TableFiles.FILE_TYPE_INDEX, date=110) - new_index_files = TableFilesFactory.create_batch(new_index_cnt, table=table, file_type=TableFiles.FILE_TYPE_NEW_INDEX, date=110) - new_merge_files = TableFilesFactory.create_batch(new_merge_cnt, table=table, file_type=TableFiles.FILE_TYPE_NEW_MERGE, date=110) - to_delete_files = TableFilesFactory.create_batch(to_delete_cnt, table=table, file_type=TableFiles.FILE_TYPE_TO_DELETE, date=110) - assert table.files_to_search().count() == raw_cnt + index_cnt + to_index_cnt - - assert table.files_to_search([(100, 115)]).count() == index_cnt + to_index_cnt - assert table.files_to_search([(111, 120)]).count() == 0 - assert table.files_to_search([(111, 121)]).count() == raw_cnt - assert table.files_to_search([(110, 121)]).count() == raw_cnt + index_cnt + to_index_cnt diff --git a/mishards/test_server.py b/mishards/test_server.py deleted file mode 100644 index efd3912076..0000000000 --- a/mishards/test_server.py +++ /dev/null @@ -1,279 +0,0 @@ -import logging -import pytest -import mock -import datetime -import random -import faker -import inspect -from milvus import Milvus -from milvus.client.types import Status, IndexType, MetricType -from milvus.client.abstract import IndexParam, TableSchema -from milvus.grpc_gen import status_pb2, milvus_pb2 -from mishards import db, create_app, settings -from mishards.service_handler import ServiceHandler -from mishards.grpc_utils.grpc_args_parser import GrpcArgsParser as Parser -from mishards.factories import TableFilesFactory, TablesFactory, TableFiles, Tables -from mishards.routings import RouterMixin - -logger = logging.getLogger(__name__) - -OK = Status(code=Status.SUCCESS, message='Success') -BAD = Status(code=Status.PERMISSION_DENIED, message='Fail') - - -@pytest.mark.usefixtures('started_app') -class TestServer: - @property - def client(self): - m = Milvus() - m.connect(host='localhost', port=settings.SERVER_TEST_PORT) - return m - - def test_server_start(self, started_app): - assert started_app.conn_mgr.metas.get('WOSERVER') == settings.WOSERVER - - def test_cmd(self, started_app): - ServiceHandler._get_server_version = mock.MagicMock(return_value=(OK, - '')) - status, _ = self.client.server_version() - assert status.OK() - - Parser.parse_proto_Command = mock.MagicMock(return_value=(BAD, 'cmd')) - status, _ = self.client.server_version() - assert not status.OK() - - def test_drop_index(self, started_app): - table_name = inspect.currentframe().f_code.co_name - ServiceHandler._drop_index = mock.MagicMock(return_value=OK) - status = self.client.drop_index(table_name) - assert status.OK() - - Parser.parse_proto_TableName = mock.MagicMock( - return_value=(BAD, table_name)) - status = self.client.drop_index(table_name) - assert not status.OK() - - def test_describe_index(self, started_app): - table_name = inspect.currentframe().f_code.co_name - index_type = IndexType.FLAT - nlist = 1 - index_param = IndexParam(table_name=table_name, - index_type=index_type, - nlist=nlist) - Parser.parse_proto_TableName = mock.MagicMock( - return_value=(OK, table_name)) - ServiceHandler._describe_index = mock.MagicMock( - return_value=(OK, index_param)) - status, ret = self.client.describe_index(table_name) - assert status.OK() - assert ret._table_name == index_param._table_name - - Parser.parse_proto_TableName = mock.MagicMock( - return_value=(BAD, table_name)) - status, _ = self.client.describe_index(table_name) - assert not status.OK() - - def test_preload(self, started_app): - table_name = inspect.currentframe().f_code.co_name - - Parser.parse_proto_TableName = mock.MagicMock( - return_value=(OK, table_name)) - ServiceHandler._preload_table = mock.MagicMock(return_value=OK) - status = self.client.preload_table(table_name) - assert status.OK() - - Parser.parse_proto_TableName = mock.MagicMock( - return_value=(BAD, table_name)) - status = self.client.preload_table(table_name) - assert not status.OK() - - @pytest.mark.skip - def test_delete_by_range(self, started_app): - table_name = inspect.currentframe().f_code.co_name - - unpacked = table_name, datetime.datetime.today( - ), datetime.datetime.today() - - Parser.parse_proto_DeleteByRangeParam = mock.MagicMock( - return_value=(OK, unpacked)) - ServiceHandler._delete_by_range = mock.MagicMock(return_value=OK) - status = self.client.delete_vectors_by_range( - *unpacked) - assert status.OK() - - Parser.parse_proto_DeleteByRangeParam = mock.MagicMock( - return_value=(BAD, unpacked)) - status = self.client.delete_vectors_by_range( - *unpacked) - assert not status.OK() - - def test_count_table(self, started_app): - table_name = inspect.currentframe().f_code.co_name - count = random.randint(100, 200) - - Parser.parse_proto_TableName = mock.MagicMock( - return_value=(OK, table_name)) - ServiceHandler._count_table = mock.MagicMock(return_value=(OK, count)) - status, ret = self.client.get_table_row_count(table_name) - assert status.OK() - assert ret == count - - Parser.parse_proto_TableName = mock.MagicMock( - return_value=(BAD, table_name)) - status, _ = self.client.get_table_row_count(table_name) - assert not status.OK() - - def test_show_tables(self, started_app): - tables = ['t1', 't2'] - ServiceHandler._show_tables = mock.MagicMock(return_value=(OK, tables)) - status, ret = self.client.show_tables() - assert status.OK() - assert ret == tables - - def test_describe_table(self, started_app): - table_name = inspect.currentframe().f_code.co_name - dimension = 128 - nlist = 1 - table_schema = TableSchema(table_name=table_name, - index_file_size=100, - metric_type=MetricType.L2, - dimension=dimension) - Parser.parse_proto_TableName = mock.MagicMock( - return_value=(OK, table_schema.table_name)) - ServiceHandler._describe_table = mock.MagicMock( - return_value=(OK, table_schema)) - status, _ = self.client.describe_table(table_name) - assert status.OK() - - ServiceHandler._describe_table = mock.MagicMock( - return_value=(BAD, table_schema)) - status, _ = self.client.describe_table(table_name) - assert not status.OK() - - Parser.parse_proto_TableName = mock.MagicMock(return_value=(BAD, - 'cmd')) - status, ret = self.client.describe_table(table_name) - assert not status.OK() - - def test_insert(self, started_app): - table_name = inspect.currentframe().f_code.co_name - vectors = [[random.random() for _ in range(16)] for _ in range(10)] - ids = [random.randint(1000000, 20000000) for _ in range(10)] - ServiceHandler._add_vectors = mock.MagicMock(return_value=(OK, ids)) - status, ret = self.client.add_vectors( - table_name=table_name, records=vectors) - assert status.OK() - assert ids == ret - - def test_create_index(self, started_app): - table_name = inspect.currentframe().f_code.co_name - unpacks = table_name, None - Parser.parse_proto_IndexParam = mock.MagicMock(return_value=(OK, - unpacks)) - ServiceHandler._create_index = mock.MagicMock(return_value=OK) - status = self.client.create_index(table_name=table_name) - assert status.OK() - - Parser.parse_proto_IndexParam = mock.MagicMock(return_value=(BAD, - None)) - status = self.client.create_index(table_name=table_name) - assert not status.OK() - - def test_drop_table(self, started_app): - table_name = inspect.currentframe().f_code.co_name - - Parser.parse_proto_TableName = mock.MagicMock( - return_value=(OK, table_name)) - ServiceHandler._delete_table = mock.MagicMock(return_value=OK) - status = self.client.delete_table(table_name=table_name) - assert status.OK() - - Parser.parse_proto_TableName = mock.MagicMock( - return_value=(BAD, table_name)) - status = self.client.delete_table(table_name=table_name) - assert not status.OK() - - def test_has_table(self, started_app): - table_name = inspect.currentframe().f_code.co_name - - Parser.parse_proto_TableName = mock.MagicMock( - return_value=(OK, table_name)) - ServiceHandler._has_table = mock.MagicMock(return_value=(OK, True)) - has = self.client.has_table(table_name=table_name) - assert has - - Parser.parse_proto_TableName = mock.MagicMock( - return_value=(BAD, table_name)) - status, has = self.client.has_table(table_name=table_name) - assert not status.OK() - assert not has - - def test_create_table(self, started_app): - table_name = inspect.currentframe().f_code.co_name - dimension = 128 - table_schema = dict(table_name=table_name, - index_file_size=100, - metric_type=MetricType.L2, - dimension=dimension) - - ServiceHandler._create_table = mock.MagicMock(return_value=OK) - status = self.client.create_table(table_schema) - assert status.OK() - - Parser.parse_proto_TableSchema = mock.MagicMock(return_value=(BAD, - None)) - status = self.client.create_table(table_schema) - assert not status.OK() - - def random_data(self, n, dimension): - return [[random.random() for _ in range(dimension)] for _ in range(n)] - - def test_search(self, started_app): - table_name = inspect.currentframe().f_code.co_name - to_index_cnt = random.randint(10, 20) - table = TablesFactory(table_id=table_name, state=Tables.NORMAL) - to_index_files = TableFilesFactory.create_batch( - to_index_cnt, table=table, file_type=TableFiles.FILE_TYPE_TO_INDEX) - topk = random.randint(5, 10) - nq = random.randint(5, 10) - param = { - 'table_name': table_name, - 'query_records': self.random_data(nq, table.dimension), - 'top_k': topk, - 'nprobe': 2049 - } - - result = [ - milvus_pb2.TopKQueryResult(query_result_arrays=[ - milvus_pb2.QueryResult(id=i, distance=random.random()) - for i in range(topk) - ]) for i in range(nq) - ] - - mock_results = milvus_pb2.TopKQueryResultList(status=status_pb2.Status( - error_code=status_pb2.SUCCESS, reason="Success"), - topk_query_result=result) - - table_schema = TableSchema(table_name=table_name, - index_file_size=table.index_file_size, - metric_type=table.metric_type, - dimension=table.dimension) - - status, _ = self.client.search_vectors(**param) - assert status.code == Status.ILLEGAL_ARGUMENT - - param['nprobe'] = 2048 - RouterMixin.connection = mock.MagicMock(return_value=Milvus()) - RouterMixin.query_conn = mock.MagicMock(return_value=Milvus()) - Milvus.describe_table = mock.MagicMock(return_value=(BAD, - table_schema)) - status, ret = self.client.search_vectors(**param) - assert status.code == Status.TABLE_NOT_EXISTS - - Milvus.describe_table = mock.MagicMock(return_value=(OK, table_schema)) - Milvus.search_vectors_in_files = mock.MagicMock( - return_value=mock_results) - - status, ret = self.client.search_vectors(**param) - assert status.OK() - assert len(ret) == nq diff --git a/mishards/utilities.py b/mishards/utilities.py deleted file mode 100644 index 42e982b5f1..0000000000 --- a/mishards/utilities.py +++ /dev/null @@ -1,20 +0,0 @@ -import datetime -from mishards import exceptions - - -def format_date(start, end): - return ((start.year - 1900) * 10000 + (start.month - 1) * 100 + start.day, - (end.year - 1900) * 10000 + (end.month - 1) * 100 + end.day) - - -def range_to_date(range_obj, metadata=None): - try: - start = datetime.datetime.strptime(range_obj.start_date, '%Y-%m-%d') - end = datetime.datetime.strptime(range_obj.end_date, '%Y-%m-%d') - assert start < end - except (ValueError, AssertionError): - raise exceptions.InvalidRangeError('Invalid time range: {} {}'.format( - range_obj.start_date, range_obj.end_date), - metadata=metadata) - - return format_date(start, end) diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index ae224e92ed..0000000000 --- a/requirements.txt +++ /dev/null @@ -1,36 +0,0 @@ -environs==4.2.0 -factory-boy==2.12.0 -Faker==1.0.7 -fire==0.1.3 -google-auth==1.6.3 -grpcio==1.22.0 -grpcio-tools==1.22.0 -kubernetes==10.0.1 -MarkupSafe==1.1.1 -marshmallow==2.19.5 -pymysql==0.9.3 -protobuf==3.9.1 -py==1.8.0 -pyasn1==0.4.7 -pyasn1-modules==0.2.6 -pylint==2.3.1 -pymilvus-test==0.2.28 -#pymilvus==0.2.0 -pyparsing==2.4.0 -pytest==4.6.3 -pytest-level==0.1.1 -pytest-print==0.1.2 -pytest-repeat==0.8.0 -pytest-timeout==1.3.3 -python-dateutil==2.8.0 -python-dotenv==0.10.3 -pytz==2019.1 -requests==2.22.0 -requests-oauthlib==1.2.0 -rsa==4.0 -six==1.12.0 -SQLAlchemy==1.3.5 -urllib3==1.25.3 -jaeger-client>=3.4.0 -grpcio-opentracing>=1.0 -mock==2.0.0 diff --git a/sd/__init__.py b/sd/__init__.py deleted file mode 100644 index 7943887d0f..0000000000 --- a/sd/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -import logging -import inspect -# from utils import singleton - -logger = logging.getLogger(__name__) - - -class ProviderManager: - PROVIDERS = {} - - @classmethod - def register_service_provider(cls, target): - if inspect.isfunction(target): - cls.PROVIDERS[target.__name__] = target - elif inspect.isclass(target): - name = target.__dict__.get('NAME', None) - name = name if name else target.__class__.__name__ - cls.PROVIDERS[name] = target - else: - assert False, 'Cannot register_service_provider for: {}'.format(target) - return target - - @classmethod - def get_provider(cls, name): - return cls.PROVIDERS.get(name, None) - - -from sd import kubernetes_provider, static_provider diff --git a/sd/kubernetes_provider.py b/sd/kubernetes_provider.py deleted file mode 100644 index eb113db007..0000000000 --- a/sd/kubernetes_provider.py +++ /dev/null @@ -1,331 +0,0 @@ -import os -import sys -if __name__ == '__main__': - sys.path.append(os.path.dirname(os.path.dirname( - os.path.abspath(__file__)))) - -import re -import logging -import time -import copy -import threading -import queue -import enum -from kubernetes import client, config, watch - -from utils import singleton -from sd import ProviderManager - -logger = logging.getLogger(__name__) - -INCLUSTER_NAMESPACE_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/namespace' - - -class EventType(enum.Enum): - PodHeartBeat = 1 - Watch = 2 - - -class K8SMixin: - def __init__(self, namespace, in_cluster=False, **kwargs): - self.namespace = namespace - self.in_cluster = in_cluster - self.kwargs = kwargs - self.v1 = kwargs.get('v1', None) - if not self.namespace: - self.namespace = open(INCLUSTER_NAMESPACE_PATH).read() - - if not self.v1: - config.load_incluster_config( - ) if self.in_cluster else config.load_kube_config() - self.v1 = client.CoreV1Api() - - -class K8SHeartbeatHandler(threading.Thread, K8SMixin): - def __init__(self, - message_queue, - namespace, - label_selector, - in_cluster=False, - **kwargs): - K8SMixin.__init__(self, - namespace=namespace, - in_cluster=in_cluster, - **kwargs) - threading.Thread.__init__(self) - self.queue = message_queue - self.terminate = False - self.label_selector = label_selector - self.poll_interval = kwargs.get('poll_interval', 5) - - def run(self): - while not self.terminate: - try: - pods = self.v1.list_namespaced_pod( - namespace=self.namespace, - label_selector=self.label_selector) - event_message = {'eType': EventType.PodHeartBeat, 'events': []} - for item in pods.items: - pod = self.v1.read_namespaced_pod(name=item.metadata.name, - namespace=self.namespace) - name = pod.metadata.name - ip = pod.status.pod_ip - phase = pod.status.phase - reason = pod.status.reason - message = pod.status.message - ready = True if phase == 'Running' else False - - pod_event = dict(pod=name, - ip=ip, - ready=ready, - reason=reason, - message=message) - - event_message['events'].append(pod_event) - - self.queue.put(event_message) - - except Exception as exc: - logger.error(exc) - - time.sleep(self.poll_interval) - - def stop(self): - self.terminate = True - - -class K8SEventListener(threading.Thread, K8SMixin): - def __init__(self, message_queue, namespace, in_cluster=False, **kwargs): - K8SMixin.__init__(self, - namespace=namespace, - in_cluster=in_cluster, - **kwargs) - threading.Thread.__init__(self) - self.queue = message_queue - self.terminate = False - self.at_start_up = True - self._stop_event = threading.Event() - - def stop(self): - self.terminate = True - self._stop_event.set() - - def run(self): - resource_version = '' - w = watch.Watch() - for event in w.stream(self.v1.list_namespaced_event, - namespace=self.namespace, - field_selector='involvedObject.kind=Pod'): - if self.terminate: - break - - resource_version = int(event['object'].metadata.resource_version) - - info = dict( - eType=EventType.Watch, - pod=event['object'].involved_object.name, - reason=event['object'].reason, - message=event['object'].message, - start_up=self.at_start_up, - ) - self.at_start_up = False - # logger.info('Received event: {}'.format(info)) - self.queue.put(info) - - -class EventHandler(threading.Thread): - def __init__(self, mgr, message_queue, namespace, pod_patt, **kwargs): - threading.Thread.__init__(self) - self.mgr = mgr - self.queue = message_queue - self.kwargs = kwargs - self.terminate = False - self.pod_patt = re.compile(pod_patt) - self.namespace = namespace - - def stop(self): - self.terminate = True - - def on_drop(self, event, **kwargs): - pass - - def on_pod_started(self, event, **kwargs): - try_cnt = 3 - pod = None - while try_cnt > 0: - try_cnt -= 1 - try: - pod = self.mgr.v1.read_namespaced_pod(name=event['pod'], - namespace=self.namespace) - if not pod.status.pod_ip: - time.sleep(0.5) - continue - break - except client.rest.ApiException as exc: - time.sleep(0.5) - - if try_cnt <= 0 and not pod: - if not event['start_up']: - logger.error('Pod {} is started but cannot read pod'.format( - event['pod'])) - return - elif try_cnt <= 0 and not pod.status.pod_ip: - logger.warning('NoPodIPFoundError') - return - - logger.info('Register POD {} with IP {}'.format( - pod.metadata.name, pod.status.pod_ip)) - self.mgr.add_pod(name=pod.metadata.name, ip=pod.status.pod_ip) - - def on_pod_killing(self, event, **kwargs): - logger.info('Unregister POD {}'.format(event['pod'])) - self.mgr.delete_pod(name=event['pod']) - - def on_pod_heartbeat(self, event, **kwargs): - names = self.mgr.conn_mgr.conn_names - - running_names = set() - for each_event in event['events']: - if each_event['ready']: - self.mgr.add_pod(name=each_event['pod'], ip=each_event['ip']) - running_names.add(each_event['pod']) - else: - self.mgr.delete_pod(name=each_event['pod']) - - to_delete = names - running_names - for name in to_delete: - self.mgr.delete_pod(name) - - logger.info(self.mgr.conn_mgr.conn_names) - - def handle_event(self, event): - if event['eType'] == EventType.PodHeartBeat: - return self.on_pod_heartbeat(event) - - if not event or (event['reason'] not in ('Started', 'Killing')): - return self.on_drop(event) - - if not re.match(self.pod_patt, event['pod']): - return self.on_drop(event) - - logger.info('Handling event: {}'.format(event)) - - if event['reason'] == 'Started': - return self.on_pod_started(event) - - return self.on_pod_killing(event) - - def run(self): - while not self.terminate: - try: - event = self.queue.get(timeout=1) - self.handle_event(event) - except queue.Empty: - continue - - -class KubernetesProviderSettings: - def __init__(self, namespace, pod_patt, label_selector, in_cluster, - poll_interval, port=None, **kwargs): - self.namespace = namespace - self.pod_patt = pod_patt - self.label_selector = label_selector - self.in_cluster = in_cluster - self.poll_interval = poll_interval - self.port = int(port) if port else 19530 - - -@singleton -@ProviderManager.register_service_provider -class KubernetesProvider(object): - NAME = 'Kubernetes' - - def __init__(self, settings, conn_mgr, **kwargs): - self.namespace = settings.namespace - self.pod_patt = settings.pod_patt - self.label_selector = settings.label_selector - self.in_cluster = settings.in_cluster - self.poll_interval = settings.poll_interval - self.port = settings.port - self.kwargs = kwargs - self.queue = queue.Queue() - - self.conn_mgr = conn_mgr - - if not self.namespace: - self.namespace = open(incluster_namespace_path).read() - - config.load_incluster_config( - ) if self.in_cluster else config.load_kube_config() - self.v1 = client.CoreV1Api() - - self.listener = K8SEventListener(message_queue=self.queue, - namespace=self.namespace, - in_cluster=self.in_cluster, - v1=self.v1, - **kwargs) - - self.pod_heartbeater = K8SHeartbeatHandler( - message_queue=self.queue, - namespace=self.namespace, - label_selector=self.label_selector, - in_cluster=self.in_cluster, - v1=self.v1, - poll_interval=self.poll_interval, - **kwargs) - - self.event_handler = EventHandler(mgr=self, - message_queue=self.queue, - namespace=self.namespace, - pod_patt=self.pod_patt, - **kwargs) - - def add_pod(self, name, ip): - self.conn_mgr.register(name, 'tcp://{}:{}'.format(ip, self.port)) - - def delete_pod(self, name): - self.conn_mgr.unregister(name) - - def start(self): - self.listener.daemon = True - self.listener.start() - self.event_handler.start() - - self.pod_heartbeater.start() - - def stop(self): - self.listener.stop() - self.pod_heartbeater.stop() - self.event_handler.stop() - - -if __name__ == '__main__': - logging.basicConfig(level=logging.INFO) - - class Connect: - def register(self, name, value): - logger.error('Register: {} - {}'.format(name, value)) - - def unregister(self, name): - logger.error('Unregister: {}'.format(name)) - - @property - def conn_names(self): - return set() - - connect_mgr = Connect() - - settings = KubernetesProviderSettings(namespace='xp', - pod_patt=".*-ro-servers-.*", - label_selector='tier=ro-servers', - poll_interval=5, - in_cluster=False) - - provider_class = ProviderManager.get_provider('Kubernetes') - t = provider_class(conn_mgr=connect_mgr, settings=settings) - t.start() - cnt = 100 - while cnt > 0: - time.sleep(2) - cnt -= 1 - t.stop() diff --git a/sd/static_provider.py b/sd/static_provider.py deleted file mode 100644 index e88780740f..0000000000 --- a/sd/static_provider.py +++ /dev/null @@ -1,39 +0,0 @@ -import os -import sys -if __name__ == '__main__': - sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) - -import socket -from utils import singleton -from sd import ProviderManager - - -class StaticProviderSettings: - def __init__(self, hosts, port=None): - self.hosts = hosts - self.port = int(port) if port else 19530 - - -@singleton -@ProviderManager.register_service_provider -class KubernetesProvider(object): - NAME = 'Static' - - def __init__(self, settings, conn_mgr, **kwargs): - self.conn_mgr = conn_mgr - self.hosts = [socket.gethostbyname(host) for host in settings.hosts] - self.port = settings.port - - def start(self): - for host in self.hosts: - self.add_pod(host, host) - - def stop(self): - for host in self.hosts: - self.delete_pod(host) - - def add_pod(self, name, ip): - self.conn_mgr.register(name, 'tcp://{}:{}'.format(ip, self.port)) - - def delete_pod(self, name): - self.conn_mgr.unregister(name) diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 4a88432914..0000000000 --- a/setup.cfg +++ /dev/null @@ -1,4 +0,0 @@ -[tool:pytest] -testpaths = mishards -log_cli=true -log_cli_level=info diff --git a/start_services.yml b/start_services.yml deleted file mode 100644 index 57fe061bb7..0000000000 --- a/start_services.yml +++ /dev/null @@ -1,45 +0,0 @@ -version: "2.3" -services: - milvus: - runtime: nvidia - restart: always - image: registry.zilliz.com/milvus/engine:branch-0.5.0-release-4316de - # ports: - # - "0.0.0.0:19530:19530" - volumes: - - /tmp/milvus/db:/opt/milvus/db - - jaeger: - restart: always - image: jaegertracing/all-in-one:1.14 - ports: - - "0.0.0.0:5775:5775/udp" - - "0.0.0.0:16686:16686" - - "0.0.0.0:9441:9441" - environment: - COLLECTOR_ZIPKIN_HTTP_PORT: 9411 - - mishards: - restart: always - image: registry.zilliz.com/milvus/mishards:v0.0.4 - ports: - - "0.0.0.0:19530:19531" - - "0.0.0.0:19532:19532" - volumes: - - /tmp/milvus/db:/tmp/milvus/db - # - /tmp/mishards_env:/source/mishards/.env - command: ["python", "mishards/main.py"] - environment: - FROM_EXAMPLE: 'true' - DEBUG: 'true' - SERVER_PORT: 19531 - WOSERVER: tcp://milvus:19530 - SD_STATIC_HOSTS: milvus - TRACING_TYPE: jaeger - TRACING_SERVICE_NAME: mishards-demo - TRACING_REPORTING_HOST: jaeger - TRACING_REPORTING_PORT: 5775 - - depends_on: - - milvus - - jaeger diff --git a/tracing/__init__.py b/tracing/__init__.py deleted file mode 100644 index 64a5b50d15..0000000000 --- a/tracing/__init__.py +++ /dev/null @@ -1,43 +0,0 @@ -from contextlib import contextmanager - - -def empty_server_interceptor_decorator(target_server, interceptor): - return target_server - - -@contextmanager -def EmptySpan(*args, **kwargs): - yield None - return - - -class Tracer: - def __init__(self, - tracer=None, - interceptor=None, - server_decorator=empty_server_interceptor_decorator): - self.tracer = tracer - self.interceptor = interceptor - self.server_decorator = server_decorator - - def decorate(self, server): - return self.server_decorator(server, self.interceptor) - - @property - def empty(self): - return self.tracer is None - - def close(self): - self.tracer and self.tracer.close() - - def start_span(self, - operation_name=None, - child_of=None, - references=None, - tags=None, - start_time=None, - ignore_active_span=False): - if self.empty: - return EmptySpan() - return self.tracer.start_span(operation_name, child_of, references, - tags, start_time, ignore_active_span) diff --git a/tracing/factory.py b/tracing/factory.py deleted file mode 100644 index 14fcde2eb3..0000000000 --- a/tracing/factory.py +++ /dev/null @@ -1,40 +0,0 @@ -import logging -from jaeger_client import Config -from grpc_opentracing.grpcext import intercept_server -from grpc_opentracing import open_tracing_server_interceptor - -from tracing import (Tracer, empty_server_interceptor_decorator) - -logger = logging.getLogger(__name__) - - -class TracerFactory: - @classmethod - def new_tracer(cls, - tracer_type, - tracer_config, - span_decorator=None, - **kwargs): - if not tracer_type: - return Tracer() - config = tracer_config.TRACING_CONFIG - service_name = tracer_config.TRACING_SERVICE_NAME - validate = tracer_config.TRACING_VALIDATE - # if not tracer_type: - # tracer_type = 'jaeger' - # config = tracer_config.DEFAULT_TRACING_CONFIG - - if tracer_type.lower() == 'jaeger': - config = Config(config=config, - service_name=service_name, - validate=validate) - - tracer = config.initialize_tracer() - tracer_interceptor = open_tracing_server_interceptor( - tracer, - log_payloads=tracer_config.TRACING_LOG_PAYLOAD, - span_decorator=span_decorator) - - return Tracer(tracer, tracer_interceptor, intercept_server) - - assert False, 'Unsupported tracer type: {}'.format(tracer_type) diff --git a/utils/__init__.py b/utils/__init__.py deleted file mode 100644 index c1d55e76c0..0000000000 --- a/utils/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -from functools import wraps - - -def singleton(cls): - instances = {} - @wraps(cls) - def getinstance(*args, **kw): - if cls not in instances: - instances[cls] = cls(*args, **kw) - return instances[cls] - return getinstance diff --git a/utils/logger_helper.py b/utils/logger_helper.py deleted file mode 100644 index b4e3b9c5b6..0000000000 --- a/utils/logger_helper.py +++ /dev/null @@ -1,152 +0,0 @@ -import os -import datetime -from pytz import timezone -from logging import Filter -import logging.config - - -class InfoFilter(logging.Filter): - def filter(self, rec): - return rec.levelno == logging.INFO - - -class DebugFilter(logging.Filter): - def filter(self, rec): - return rec.levelno == logging.DEBUG - - -class WarnFilter(logging.Filter): - def filter(self, rec): - return rec.levelno == logging.WARN - - -class ErrorFilter(logging.Filter): - def filter(self, rec): - return rec.levelno == logging.ERROR - - -class CriticalFilter(logging.Filter): - def filter(self, rec): - return rec.levelno == logging.CRITICAL - - -COLORS = { - 'HEADER': '\033[95m', - 'INFO': '\033[92m', - 'DEBUG': '\033[94m', - 'WARNING': '\033[93m', - 'ERROR': '\033[95m', - 'CRITICAL': '\033[91m', - 'ENDC': '\033[0m', -} - - -class ColorFulFormatColMixin: - def format_col(self, message_str, level_name): - if level_name in COLORS.keys(): - message_str = COLORS.get(level_name) + message_str + COLORS.get( - 'ENDC') - return message_str - - -class ColorfulFormatter(logging.Formatter, ColorFulFormatColMixin): - def format(self, record): - message_str = super(ColorfulFormatter, self).format(record) - - return self.format_col(message_str, level_name=record.levelname) - - -def config(log_level, log_path, name, tz='UTC'): - def build_log_file(level, log_path, name, tz): - utc_now = datetime.datetime.utcnow() - utc_tz = timezone('UTC') - local_tz = timezone(tz) - tznow = utc_now.replace(tzinfo=utc_tz).astimezone(local_tz) - return '{}-{}-{}.log'.format(os.path.join(log_path, name), tznow.strftime("%m-%d-%Y-%H:%M:%S"), - level) - - if not os.path.exists(log_path): - os.makedirs(log_path) - - LOGGING = { - 'version': 1, - 'disable_existing_loggers': False, - 'formatters': { - 'default': { - 'format': '%(asctime)s | %(levelname)s | %(name)s | %(threadName)s: %(message)s (%(filename)s:%(lineno)s)', - }, - 'colorful_console': { - 'format': '%(asctime)s | %(levelname)s | %(name)s | %(threadName)s: %(message)s (%(filename)s:%(lineno)s)', - '()': ColorfulFormatter, - }, - }, - 'filters': { - 'InfoFilter': { - '()': InfoFilter, - }, - 'DebugFilter': { - '()': DebugFilter, - }, - 'WarnFilter': { - '()': WarnFilter, - }, - 'ErrorFilter': { - '()': ErrorFilter, - }, - 'CriticalFilter': { - '()': CriticalFilter, - }, - }, - 'handlers': { - 'milvus_celery_console': { - 'class': 'logging.StreamHandler', - 'formatter': 'colorful_console', - }, - 'milvus_debug_file': { - 'level': 'DEBUG', - 'filters': ['DebugFilter'], - 'class': 'logging.handlers.RotatingFileHandler', - 'formatter': 'default', - 'filename': build_log_file('debug', log_path, name, tz) - }, - 'milvus_info_file': { - 'level': 'INFO', - 'filters': ['InfoFilter'], - 'class': 'logging.handlers.RotatingFileHandler', - 'formatter': 'default', - 'filename': build_log_file('info', log_path, name, tz) - }, - 'milvus_warn_file': { - 'level': 'WARN', - 'filters': ['WarnFilter'], - 'class': 'logging.handlers.RotatingFileHandler', - 'formatter': 'default', - 'filename': build_log_file('warn', log_path, name, tz) - }, - 'milvus_error_file': { - 'level': 'ERROR', - 'filters': ['ErrorFilter'], - 'class': 'logging.handlers.RotatingFileHandler', - 'formatter': 'default', - 'filename': build_log_file('error', log_path, name, tz) - }, - 'milvus_critical_file': { - 'level': 'CRITICAL', - 'filters': ['CriticalFilter'], - 'class': 'logging.handlers.RotatingFileHandler', - 'formatter': 'default', - 'filename': build_log_file('critical', log_path, name, tz) - }, - }, - 'loggers': { - '': { - 'handlers': ['milvus_celery_console', 'milvus_info_file', 'milvus_debug_file', 'milvus_warn_file', - 'milvus_error_file', 'milvus_critical_file'], - 'level': log_level, - 'propagate': False - }, - }, - 'propagate': False, - } - - logging.config.dictConfig(LOGGING) From 8553d1c332352d2b044e1f26136369fa71157247 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 21 Oct 2019 16:21:32 +0800 Subject: [PATCH 091/307] Preparing to merge into milvus --- shards/Dockerfile | 10 + shards/build.sh | 39 ++ shards/conftest.py | 27 + shards/manager.py | 28 ++ shards/mishards/.env.example | 33 ++ shards/mishards/__init__.py | 36 ++ shards/mishards/connections.py | 154 ++++++ shards/mishards/db_base.py | 52 ++ shards/mishards/exception_codes.py | 10 + shards/mishards/exception_handlers.py | 82 +++ shards/mishards/exceptions.py | 38 ++ shards/mishards/factories.py | 54 ++ shards/mishards/grpc_utils/__init__.py | 37 ++ .../mishards/grpc_utils/grpc_args_parser.py | 102 ++++ .../mishards/grpc_utils/grpc_args_wrapper.py | 4 + shards/mishards/grpc_utils/test_grpc.py | 75 +++ shards/mishards/hash_ring.py | 150 ++++++ shards/mishards/main.py | 15 + shards/mishards/models.py | 76 +++ shards/mishards/routings.py | 96 ++++ shards/mishards/server.py | 122 +++++ shards/mishards/service_handler.py | 475 ++++++++++++++++++ shards/mishards/settings.py | 94 ++++ shards/mishards/test_connections.py | 101 ++++ shards/mishards/test_models.py | 39 ++ shards/mishards/test_server.py | 279 ++++++++++ shards/mishards/utilities.py | 20 + shards/requirements.txt | 36 ++ shards/sd/__init__.py | 28 ++ shards/sd/kubernetes_provider.py | 331 ++++++++++++ shards/sd/static_provider.py | 39 ++ shards/setup.cfg | 4 + shards/start_services.yml | 45 ++ shards/tracing/__init__.py | 43 ++ shards/tracing/factory.py | 40 ++ shards/utils/__init__.py | 11 + shards/utils/logger_helper.py | 152 ++++++ 37 files changed, 2977 insertions(+) create mode 100644 shards/Dockerfile create mode 100755 shards/build.sh create mode 100644 shards/conftest.py create mode 100644 shards/manager.py create mode 100644 shards/mishards/.env.example create mode 100644 shards/mishards/__init__.py create mode 100644 shards/mishards/connections.py create mode 100644 shards/mishards/db_base.py create mode 100644 shards/mishards/exception_codes.py create mode 100644 shards/mishards/exception_handlers.py create mode 100644 shards/mishards/exceptions.py create mode 100644 shards/mishards/factories.py create mode 100644 shards/mishards/grpc_utils/__init__.py create mode 100644 shards/mishards/grpc_utils/grpc_args_parser.py create mode 100644 shards/mishards/grpc_utils/grpc_args_wrapper.py create mode 100644 shards/mishards/grpc_utils/test_grpc.py create mode 100644 shards/mishards/hash_ring.py create mode 100644 shards/mishards/main.py create mode 100644 shards/mishards/models.py create mode 100644 shards/mishards/routings.py create mode 100644 shards/mishards/server.py create mode 100644 shards/mishards/service_handler.py create mode 100644 shards/mishards/settings.py create mode 100644 shards/mishards/test_connections.py create mode 100644 shards/mishards/test_models.py create mode 100644 shards/mishards/test_server.py create mode 100644 shards/mishards/utilities.py create mode 100644 shards/requirements.txt create mode 100644 shards/sd/__init__.py create mode 100644 shards/sd/kubernetes_provider.py create mode 100644 shards/sd/static_provider.py create mode 100644 shards/setup.cfg create mode 100644 shards/start_services.yml create mode 100644 shards/tracing/__init__.py create mode 100644 shards/tracing/factory.py create mode 100644 shards/utils/__init__.py create mode 100644 shards/utils/logger_helper.py diff --git a/shards/Dockerfile b/shards/Dockerfile new file mode 100644 index 0000000000..594640619e --- /dev/null +++ b/shards/Dockerfile @@ -0,0 +1,10 @@ +FROM python:3.6 +RUN apt update && apt install -y \ + less \ + telnet +RUN mkdir /source +WORKDIR /source +ADD ./requirements.txt ./ +RUN pip install -r requirements.txt +COPY . . +CMD python mishards/main.py diff --git a/shards/build.sh b/shards/build.sh new file mode 100755 index 0000000000..fad30518f2 --- /dev/null +++ b/shards/build.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +BOLD=`tput bold` +NORMAL=`tput sgr0` +YELLOW='\033[1;33m' +ENDC='\033[0m' + +echo -e "${BOLD}MISHARDS_REGISTRY=${MISHARDS_REGISTRY}${ENDC}" + +function build_image() { + dockerfile=$1 + remote_registry=$2 + tagged=$2 + buildcmd="docker build -t ${tagged} -f ${dockerfile} ." + echo -e "${BOLD}$buildcmd${NORMAL}" + $buildcmd + pushcmd="docker push ${remote_registry}" + echo -e "${BOLD}$pushcmd${NORMAL}" + $pushcmd + echo -e "${YELLOW}${BOLD}Image: ${remote_registry}${NORMAL}${ENDC}" +} + +case "$1" in + +all) + [[ -z $MISHARDS_REGISTRY ]] && { + echo -e "${YELLOW}Error: Please set docker registry first:${ENDC}\n\t${BOLD}export MISHARDS_REGISTRY=xxxx\n${ENDC}" + exit 1 + } + + version="" + [[ ! -z $2 ]] && version=":${2}" + build_image "Dockerfile" "${MISHARDS_REGISTRY}${version}" "${MISHARDS_REGISTRY}" + ;; +*) + echo "Usage: [option...] {base | apps}" + echo "all, Usage: build.sh all [tagname|] => {docker_registry}:\${tagname}" + ;; +esac diff --git a/shards/conftest.py b/shards/conftest.py new file mode 100644 index 0000000000..34e22af693 --- /dev/null +++ b/shards/conftest.py @@ -0,0 +1,27 @@ +import logging +import pytest +import grpc +from mishards import settings, db, create_app + +logger = logging.getLogger(__name__) + + +@pytest.fixture +def app(request): + app = create_app(settings.TestingConfig) + db.drop_all() + db.create_all() + + yield app + + db.drop_all() + + +@pytest.fixture +def started_app(app): + app.on_pre_run() + app.start(settings.SERVER_TEST_PORT) + + yield app + + app.stop() diff --git a/shards/manager.py b/shards/manager.py new file mode 100644 index 0000000000..931c90ebc8 --- /dev/null +++ b/shards/manager.py @@ -0,0 +1,28 @@ +import fire +from mishards import db +from sqlalchemy import and_ + + +class DBHandler: + @classmethod + def create_all(cls): + db.create_all() + + @classmethod + def drop_all(cls): + db.drop_all() + + @classmethod + def fun(cls, tid): + from mishards.factories import TablesFactory, TableFilesFactory, Tables + f = db.Session.query(Tables).filter(and_( + Tables.table_id == tid, + Tables.state != Tables.TO_DELETE) + ).first() + print(f) + + # f1 = TableFilesFactory() + + +if __name__ == '__main__': + fire.Fire(DBHandler) diff --git a/shards/mishards/.env.example b/shards/mishards/.env.example new file mode 100644 index 0000000000..0a23c0cf56 --- /dev/null +++ b/shards/mishards/.env.example @@ -0,0 +1,33 @@ +DEBUG=True + +WOSERVER=tcp://127.0.0.1:19530 +SERVER_PORT=19532 +SERVER_TEST_PORT=19888 + +SD_PROVIDER=Static + +SD_NAMESPACE=xp +SD_IN_CLUSTER=False +SD_POLL_INTERVAL=5 +SD_ROSERVER_POD_PATT=.*-ro-servers-.* +SD_LABEL_SELECTOR=tier=ro-servers + +SD_STATIC_HOSTS=127.0.0.1 +SD_STATIC_PORT=19530 + +#SQLALCHEMY_DATABASE_URI=mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4 +SQLALCHEMY_DATABASE_URI=sqlite:////tmp/milvus/db/meta.sqlite?check_same_thread=False +SQL_ECHO=True + +#SQLALCHEMY_DATABASE_TEST_URI=mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4 +SQLALCHEMY_DATABASE_TEST_URI=sqlite:////tmp/milvus/db/meta.sqlite?check_same_thread=False +SQL_TEST_ECHO=False + +# TRACING_TEST_TYPE=jaeger +TRACING_TYPE=jaeger +TRACING_SERVICE_NAME=fortest +TRACING_SAMPLER_TYPE=const +TRACING_SAMPLER_PARAM=1 +TRACING_LOG_PAYLOAD=True +#TRACING_SAMPLER_TYPE=probabilistic +#TRACING_SAMPLER_PARAM=0.5 diff --git a/shards/mishards/__init__.py b/shards/mishards/__init__.py new file mode 100644 index 0000000000..7db3d8cb5e --- /dev/null +++ b/shards/mishards/__init__.py @@ -0,0 +1,36 @@ +import logging +from mishards import settings +logger = logging.getLogger() + +from mishards.db_base import DB +db = DB() + +from mishards.server import Server +grpc_server = Server() + + +def create_app(testing_config=None): + config = testing_config if testing_config else settings.DefaultConfig + db.init_db(uri=config.SQLALCHEMY_DATABASE_URI, echo=config.SQL_ECHO) + + from mishards.connections import ConnectionMgr + connect_mgr = ConnectionMgr() + + from sd import ProviderManager + + sd_proiver_class = ProviderManager.get_provider(settings.SD_PROVIDER) + discover = sd_proiver_class(settings=settings.SD_PROVIDER_SETTINGS, conn_mgr=connect_mgr) + + from tracing.factory import TracerFactory + from mishards.grpc_utils import GrpcSpanDecorator + tracer = TracerFactory.new_tracer(config.TRACING_TYPE, settings.TracingConfig, + span_decorator=GrpcSpanDecorator()) + + from mishards.routings import RouterFactory + router = RouterFactory.new_router(config.ROUTER_CLASS_NAME, connect_mgr) + + grpc_server.init_app(conn_mgr=connect_mgr, tracer=tracer, router=router, discover=discover) + + from mishards import exception_handlers + + return grpc_server diff --git a/shards/mishards/connections.py b/shards/mishards/connections.py new file mode 100644 index 0000000000..618690a099 --- /dev/null +++ b/shards/mishards/connections.py @@ -0,0 +1,154 @@ +import logging +import threading +from functools import wraps +from milvus import Milvus + +from mishards import (settings, exceptions) +from utils import singleton + +logger = logging.getLogger(__name__) + + +class Connection: + def __init__(self, name, uri, max_retry=1, error_handlers=None, **kwargs): + self.name = name + self.uri = uri + self.max_retry = max_retry + self.retried = 0 + self.conn = Milvus() + self.error_handlers = [] if not error_handlers else error_handlers + self.on_retry_func = kwargs.get('on_retry_func', None) + # self._connect() + + def __str__(self): + return 'Connection:name=\"{}\";uri=\"{}\"'.format(self.name, self.uri) + + def _connect(self, metadata=None): + try: + self.conn.connect(uri=self.uri) + except Exception as e: + if not self.error_handlers: + raise exceptions.ConnectionConnectError(message=str(e), metadata=metadata) + for handler in self.error_handlers: + handler(e, metadata=metadata) + + @property + def can_retry(self): + return self.retried < self.max_retry + + @property + def connected(self): + return self.conn.connected() + + def on_retry(self): + if self.on_retry_func: + self.on_retry_func(self) + else: + self.retried > 1 and logger.warning('{} is retrying {}'.format(self, self.retried)) + + def on_connect(self, metadata=None): + while not self.connected and self.can_retry: + self.retried += 1 + self.on_retry() + self._connect(metadata=metadata) + + if not self.can_retry and not self.connected: + raise exceptions.ConnectionConnectError(message='Max retry {} reached!'.format(self.max_retry, + metadata=metadata)) + + self.retried = 0 + + def connect(self, func, exception_handler=None): + @wraps(func) + def inner(*args, **kwargs): + self.on_connect() + try: + return func(*args, **kwargs) + except Exception as e: + if exception_handler: + exception_handler(e) + else: + raise e + return inner + + +@singleton +class ConnectionMgr: + def __init__(self): + self.metas = {} + self.conns = {} + + @property + def conn_names(self): + return set(self.metas.keys()) - set(['WOSERVER']) + + def conn(self, name, metadata, throw=False): + c = self.conns.get(name, None) + if not c: + url = self.metas.get(name, None) + if not url: + if not throw: + return None + raise exceptions.ConnectionNotFoundError(message='Connection {} not found'.format(name), + metadata=metadata) + this_conn = Connection(name=name, uri=url, max_retry=settings.MAX_RETRY) + threaded = { + threading.get_ident(): this_conn + } + self.conns[name] = threaded + return this_conn + + tid = threading.get_ident() + rconn = c.get(tid, None) + if not rconn: + url = self.metas.get(name, None) + if not url: + if not throw: + return None + raise exceptions.ConnectionNotFoundError('Connection {} not found'.format(name), + metadata=metadata) + this_conn = Connection(name=name, uri=url, max_retry=settings.MAX_RETRY) + c[tid] = this_conn + return this_conn + + return rconn + + def on_new_meta(self, name, url): + logger.info('Register Connection: name={};url={}'.format(name, url)) + self.metas[name] = url + + def on_duplicate_meta(self, name, url): + if self.metas[name] == url: + return self.on_same_meta(name, url) + + return self.on_diff_meta(name, url) + + def on_same_meta(self, name, url): + # logger.warning('Register same meta: {}:{}'.format(name, url)) + pass + + def on_diff_meta(self, name, url): + logger.warning('Received {} with diff url={}'.format(name, url)) + self.metas[name] = url + self.conns[name] = {} + + def on_unregister_meta(self, name, url): + logger.info('Unregister name={};url={}'.format(name, url)) + self.conns.pop(name, None) + + def on_nonexisted_meta(self, name): + logger.warning('Non-existed meta: {}'.format(name)) + + def register(self, name, url): + meta = self.metas.get(name) + if not meta: + return self.on_new_meta(name, url) + else: + return self.on_duplicate_meta(name, url) + + def unregister(self, name): + logger.info('Unregister Connection: name={}'.format(name)) + url = self.metas.pop(name, None) + if url is None: + return self.on_nonexisted_meta(name) + return self.on_unregister_meta(name, url) diff --git a/shards/mishards/db_base.py b/shards/mishards/db_base.py new file mode 100644 index 0000000000..5f2eee9ba1 --- /dev/null +++ b/shards/mishards/db_base.py @@ -0,0 +1,52 @@ +import logging +from sqlalchemy import create_engine +from sqlalchemy.engine.url import make_url +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import sessionmaker, scoped_session +from sqlalchemy.orm.session import Session as SessionBase + +logger = logging.getLogger(__name__) + + +class LocalSession(SessionBase): + def __init__(self, db, autocommit=False, autoflush=True, **options): + self.db = db + bind = options.pop('bind', None) or db.engine + SessionBase.__init__(self, autocommit=autocommit, autoflush=autoflush, bind=bind, **options) + + +class DB: + Model = declarative_base() + + def __init__(self, uri=None, echo=False): + self.echo = echo + uri and self.init_db(uri, echo) + self.session_factory = scoped_session(sessionmaker(class_=LocalSession, db=self)) + + def init_db(self, uri, echo=False): + url = make_url(uri) + if url.get_backend_name() == 'sqlite': + self.engine = create_engine(url) + else: + self.engine = create_engine(uri, pool_size=100, pool_recycle=5, pool_timeout=30, + pool_pre_ping=True, + echo=echo, + max_overflow=0) + self.uri = uri + self.url = url + + def __str__(self): + return ''.format(self.url.get_backend_name(), self.url.database) + + @property + def Session(self): + return self.session_factory() + + def remove_session(self): + self.session_factory.remove() + + def drop_all(self): + self.Model.metadata.drop_all(self.engine) + + def create_all(self): + self.Model.metadata.create_all(self.engine) diff --git a/shards/mishards/exception_codes.py b/shards/mishards/exception_codes.py new file mode 100644 index 0000000000..bdd4572dd5 --- /dev/null +++ b/shards/mishards/exception_codes.py @@ -0,0 +1,10 @@ +INVALID_CODE = -1 + +CONNECT_ERROR_CODE = 10001 +CONNECTTION_NOT_FOUND_CODE = 10002 +DB_ERROR_CODE = 10003 + +TABLE_NOT_FOUND_CODE = 20001 +INVALID_ARGUMENT_CODE = 20002 +INVALID_DATE_RANGE_CODE = 20003 +INVALID_TOPK_CODE = 20004 diff --git a/shards/mishards/exception_handlers.py b/shards/mishards/exception_handlers.py new file mode 100644 index 0000000000..c79a6db5a3 --- /dev/null +++ b/shards/mishards/exception_handlers.py @@ -0,0 +1,82 @@ +import logging +from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 +from mishards import grpc_server as server, exceptions + +logger = logging.getLogger(__name__) + + +def resp_handler(err, error_code): + if not isinstance(err, exceptions.BaseException): + return status_pb2.Status(error_code=error_code, reason=str(err)) + + status = status_pb2.Status(error_code=error_code, reason=err.message) + + if err.metadata is None: + return status + + resp_class = err.metadata.get('resp_class', None) + if not resp_class: + return status + + if resp_class == milvus_pb2.BoolReply: + return resp_class(status=status, bool_reply=False) + + if resp_class == milvus_pb2.VectorIds: + return resp_class(status=status, vector_id_array=[]) + + if resp_class == milvus_pb2.TopKQueryResultList: + return resp_class(status=status, topk_query_result=[]) + + if resp_class == milvus_pb2.TableRowCount: + return resp_class(status=status, table_row_count=-1) + + if resp_class == milvus_pb2.TableName: + return resp_class(status=status, table_name=[]) + + if resp_class == milvus_pb2.StringReply: + return resp_class(status=status, string_reply='') + + if resp_class == milvus_pb2.TableSchema: + return milvus_pb2.TableSchema( + status=status + ) + + if resp_class == milvus_pb2.IndexParam: + return milvus_pb2.IndexParam( + table_name=milvus_pb2.TableName( + status=status + ) + ) + + status.error_code = status_pb2.UNEXPECTED_ERROR + return status + + +@server.errorhandler(exceptions.TableNotFoundError) +def TableNotFoundErrorHandler(err): + logger.error(err) + return resp_handler(err, status_pb2.TABLE_NOT_EXISTS) + + +@server.errorhandler(exceptions.InvalidTopKError) +def InvalidTopKErrorHandler(err): + logger.error(err) + return resp_handler(err, status_pb2.ILLEGAL_TOPK) + + +@server.errorhandler(exceptions.InvalidArgumentError) +def InvalidArgumentErrorHandler(err): + logger.error(err) + return resp_handler(err, status_pb2.ILLEGAL_ARGUMENT) + + +@server.errorhandler(exceptions.DBError) +def DBErrorHandler(err): + logger.error(err) + return resp_handler(err, status_pb2.UNEXPECTED_ERROR) + + +@server.errorhandler(exceptions.InvalidRangeError) +def InvalidArgumentErrorHandler(err): + logger.error(err) + return resp_handler(err, status_pb2.ILLEGAL_RANGE) diff --git a/shards/mishards/exceptions.py b/shards/mishards/exceptions.py new file mode 100644 index 0000000000..72839f88d2 --- /dev/null +++ b/shards/mishards/exceptions.py @@ -0,0 +1,38 @@ +import mishards.exception_codes as codes + + +class BaseException(Exception): + code = codes.INVALID_CODE + message = 'BaseException' + + def __init__(self, message='', metadata=None): + self.message = self.__class__.__name__ if not message else message + self.metadata = metadata + + +class ConnectionConnectError(BaseException): + code = codes.CONNECT_ERROR_CODE + + +class ConnectionNotFoundError(BaseException): + code = codes.CONNECTTION_NOT_FOUND_CODE + + +class DBError(BaseException): + code = codes.DB_ERROR_CODE + + +class TableNotFoundError(BaseException): + code = codes.TABLE_NOT_FOUND_CODE + + +class InvalidTopKError(BaseException): + code = codes.INVALID_TOPK_CODE + + +class InvalidArgumentError(BaseException): + code = codes.INVALID_ARGUMENT_CODE + + +class InvalidRangeError(BaseException): + code = codes.INVALID_DATE_RANGE_CODE diff --git a/shards/mishards/factories.py b/shards/mishards/factories.py new file mode 100644 index 0000000000..52c0253b39 --- /dev/null +++ b/shards/mishards/factories.py @@ -0,0 +1,54 @@ +import time +import datetime +import random +import factory +from factory.alchemy import SQLAlchemyModelFactory +from faker import Faker +from faker.providers import BaseProvider + +from milvus.client.types import MetricType +from mishards import db +from mishards.models import Tables, TableFiles + + +class FakerProvider(BaseProvider): + def this_date(self): + t = datetime.datetime.today() + return (t.year - 1900) * 10000 + (t.month - 1) * 100 + t.day + + +factory.Faker.add_provider(FakerProvider) + + +class TablesFactory(SQLAlchemyModelFactory): + class Meta: + model = Tables + sqlalchemy_session = db.session_factory + sqlalchemy_session_persistence = 'commit' + + id = factory.Faker('random_number', digits=16, fix_len=True) + table_id = factory.Faker('uuid4') + state = factory.Faker('random_element', elements=(0, 1)) + dimension = factory.Faker('random_element', elements=(256, 512)) + created_on = int(time.time()) + index_file_size = 0 + engine_type = factory.Faker('random_element', elements=(0, 1, 2, 3)) + metric_type = factory.Faker('random_element', elements=(MetricType.L2, MetricType.IP)) + nlist = 16384 + + +class TableFilesFactory(SQLAlchemyModelFactory): + class Meta: + model = TableFiles + sqlalchemy_session = db.session_factory + sqlalchemy_session_persistence = 'commit' + + id = factory.Faker('random_number', digits=16, fix_len=True) + table = factory.SubFactory(TablesFactory) + engine_type = factory.Faker('random_element', elements=(0, 1, 2, 3)) + file_id = factory.Faker('uuid4') + file_type = factory.Faker('random_element', elements=(0, 1, 2, 3, 4)) + file_size = factory.Faker('random_number') + updated_time = int(time.time()) + created_on = int(time.time()) + date = factory.Faker('this_date') diff --git a/shards/mishards/grpc_utils/__init__.py b/shards/mishards/grpc_utils/__init__.py new file mode 100644 index 0000000000..f5225b2a66 --- /dev/null +++ b/shards/mishards/grpc_utils/__init__.py @@ -0,0 +1,37 @@ +from grpc_opentracing import SpanDecorator +from milvus.grpc_gen import status_pb2 + + +class GrpcSpanDecorator(SpanDecorator): + def __call__(self, span, rpc_info): + status = None + if not rpc_info.response: + return + if isinstance(rpc_info.response, status_pb2.Status): + status = rpc_info.response + else: + try: + status = rpc_info.response.status + except Exception as e: + status = status_pb2.Status(error_code=status_pb2.UNEXPECTED_ERROR, + reason='Should not happen') + + if status.error_code == 0: + return + error_log = {'event': 'error', + 'request': rpc_info.request, + 'response': rpc_info.response + } + span.set_tag('error', True) + span.log_kv(error_log) + + +def mark_grpc_method(func): + setattr(func, 'grpc_method', True) + return func + + +def is_grpc_method(func): + if not func: + return False + return getattr(func, 'grpc_method', False) diff --git a/shards/mishards/grpc_utils/grpc_args_parser.py b/shards/mishards/grpc_utils/grpc_args_parser.py new file mode 100644 index 0000000000..039299803d --- /dev/null +++ b/shards/mishards/grpc_utils/grpc_args_parser.py @@ -0,0 +1,102 @@ +from milvus import Status +from functools import wraps + + +def error_status(func): + @wraps(func) + def inner(*args, **kwargs): + try: + results = func(*args, **kwargs) + except Exception as e: + return Status(code=Status.UNEXPECTED_ERROR, message=str(e)), None + + return Status(code=0, message="Success"), results + + return inner + + +class GrpcArgsParser(object): + + @classmethod + @error_status + def parse_proto_TableSchema(cls, param): + _table_schema = { + 'status': param.status, + 'table_name': param.table_name, + 'dimension': param.dimension, + 'index_file_size': param.index_file_size, + 'metric_type': param.metric_type + } + + return _table_schema + + @classmethod + @error_status + def parse_proto_TableName(cls, param): + return param.table_name + + @classmethod + @error_status + def parse_proto_Index(cls, param): + _index = { + 'index_type': param.index_type, + 'nlist': param.nlist + } + + return _index + + @classmethod + @error_status + def parse_proto_IndexParam(cls, param): + _table_name = param.table_name + _status, _index = cls.parse_proto_Index(param.index) + + if not _status.OK(): + raise Exception("Argument parse error") + + return _table_name, _index + + @classmethod + @error_status + def parse_proto_Command(cls, param): + _cmd = param.cmd + + return _cmd + + @classmethod + @error_status + def parse_proto_Range(cls, param): + _start_value = param.start_value + _end_value = param.end_value + + return _start_value, _end_value + + @classmethod + @error_status + def parse_proto_RowRecord(cls, param): + return list(param.vector_data) + + @classmethod + @error_status + def parse_proto_SearchParam(cls, param): + _table_name = param.table_name + _topk = param.topk + _nprobe = param.nprobe + _status, _range = cls.parse_proto_Range(param.query_range_array) + + if not _status.OK(): + raise Exception("Argument parse error") + + _row_record = param.query_record_array + + return _table_name, _row_record, _range, _topk + + @classmethod + @error_status + def parse_proto_DeleteByRangeParam(cls, param): + _table_name = param.table_name + _range = param.range + _start_value = _range.start_value + _end_value = _range.end_value + + return _table_name, _start_value, _end_value diff --git a/shards/mishards/grpc_utils/grpc_args_wrapper.py b/shards/mishards/grpc_utils/grpc_args_wrapper.py new file mode 100644 index 0000000000..7447dbd995 --- /dev/null +++ b/shards/mishards/grpc_utils/grpc_args_wrapper.py @@ -0,0 +1,4 @@ +# class GrpcArgsWrapper(object): + +# @classmethod +# def proto_TableName(cls): diff --git a/shards/mishards/grpc_utils/test_grpc.py b/shards/mishards/grpc_utils/test_grpc.py new file mode 100644 index 0000000000..9af09e5d0d --- /dev/null +++ b/shards/mishards/grpc_utils/test_grpc.py @@ -0,0 +1,75 @@ +import logging +import opentracing +from mishards.grpc_utils import GrpcSpanDecorator, is_grpc_method +from milvus.grpc_gen import status_pb2, milvus_pb2 + +logger = logging.getLogger(__name__) + + +class FakeTracer(opentracing.Tracer): + pass + + +class FakeSpan(opentracing.Span): + def __init__(self, context, tracer, **kwargs): + super(FakeSpan, self).__init__(tracer, context) + self.reset() + + def set_tag(self, key, value): + self.tags.append({key: value}) + + def log_kv(self, key_values, timestamp=None): + self.logs.append(key_values) + + def reset(self): + self.tags = [] + self.logs = [] + + +class FakeRpcInfo: + def __init__(self, request, response): + self.request = request + self.response = response + + +class TestGrpcUtils: + def test_span_deco(self): + request = 'request' + OK = status_pb2.Status(error_code=status_pb2.SUCCESS, reason='Success') + response = OK + rpc_info = FakeRpcInfo(request=request, response=response) + span = FakeSpan(context=None, tracer=FakeTracer()) + span_deco = GrpcSpanDecorator() + span_deco(span, rpc_info) + assert len(span.logs) == 0 + assert len(span.tags) == 0 + + response = milvus_pb2.BoolReply(status=OK, bool_reply=False) + rpc_info = FakeRpcInfo(request=request, response=response) + span = FakeSpan(context=None, tracer=FakeTracer()) + span_deco = GrpcSpanDecorator() + span_deco(span, rpc_info) + assert len(span.logs) == 0 + assert len(span.tags) == 0 + + response = 1 + rpc_info = FakeRpcInfo(request=request, response=response) + span = FakeSpan(context=None, tracer=FakeTracer()) + span_deco = GrpcSpanDecorator() + span_deco(span, rpc_info) + assert len(span.logs) == 1 + assert len(span.tags) == 1 + + response = 0 + rpc_info = FakeRpcInfo(request=request, response=response) + span = FakeSpan(context=None, tracer=FakeTracer()) + span_deco = GrpcSpanDecorator() + span_deco(span, rpc_info) + assert len(span.logs) == 0 + assert len(span.tags) == 0 + + def test_is_grpc_method(self): + target = 1 + assert not is_grpc_method(target) + target = None + assert not is_grpc_method(target) diff --git a/shards/mishards/hash_ring.py b/shards/mishards/hash_ring.py new file mode 100644 index 0000000000..a97f3f580e --- /dev/null +++ b/shards/mishards/hash_ring.py @@ -0,0 +1,150 @@ +import math +import sys +from bisect import bisect + +if sys.version_info >= (2, 5): + import hashlib + md5_constructor = hashlib.md5 +else: + import md5 + md5_constructor = md5.new + + +class HashRing(object): + def __init__(self, nodes=None, weights=None): + """`nodes` is a list of objects that have a proper __str__ representation. + `weights` is dictionary that sets weights to the nodes. The default + weight is that all nodes are equal. + """ + self.ring = dict() + self._sorted_keys = [] + + self.nodes = nodes + + if not weights: + weights = {} + self.weights = weights + + self._generate_circle() + + def _generate_circle(self): + """Generates the circle. + """ + total_weight = 0 + for node in self.nodes: + total_weight += self.weights.get(node, 1) + + for node in self.nodes: + weight = 1 + + if node in self.weights: + weight = self.weights.get(node) + + factor = math.floor((40 * len(self.nodes) * weight) / total_weight) + + for j in range(0, int(factor)): + b_key = self._hash_digest('%s-%s' % (node, j)) + + for i in range(0, 3): + key = self._hash_val(b_key, lambda x: x + i * 4) + self.ring[key] = node + self._sorted_keys.append(key) + + self._sorted_keys.sort() + + def get_node(self, string_key): + """Given a string key a corresponding node in the hash ring is returned. + + If the hash ring is empty, `None` is returned. + """ + pos = self.get_node_pos(string_key) + if pos is None: + return None + return self.ring[self._sorted_keys[pos]] + + def get_node_pos(self, string_key): + """Given a string key a corresponding node in the hash ring is returned + along with it's position in the ring. + + If the hash ring is empty, (`None`, `None`) is returned. + """ + if not self.ring: + return None + + key = self.gen_key(string_key) + + nodes = self._sorted_keys + pos = bisect(nodes, key) + + if pos == len(nodes): + return 0 + else: + return pos + + def iterate_nodes(self, string_key, distinct=True): + """Given a string key it returns the nodes as a generator that can hold the key. + + The generator iterates one time through the ring + starting at the correct position. + + if `distinct` is set, then the nodes returned will be unique, + i.e. no virtual copies will be returned. + """ + if not self.ring: + yield None, None + + returned_values = set() + + def distinct_filter(value): + if str(value) not in returned_values: + returned_values.add(str(value)) + return value + + pos = self.get_node_pos(string_key) + for key in self._sorted_keys[pos:]: + val = distinct_filter(self.ring[key]) + if val: + yield val + + for i, key in enumerate(self._sorted_keys): + if i < pos: + val = distinct_filter(self.ring[key]) + if val: + yield val + + def gen_key(self, key): + """Given a string key it returns a long value, + this long value represents a place on the hash ring. + + md5 is currently used because it mixes well. + """ + b_key = self._hash_digest(key) + return self._hash_val(b_key, lambda x: x) + + def _hash_val(self, b_key, entry_fn): + return (b_key[entry_fn(3)] << 24) | (b_key[entry_fn(2)] << 16) | ( + b_key[entry_fn(1)] << 8) | b_key[entry_fn(0)] + + def _hash_digest(self, key): + m = md5_constructor() + key = key.encode() + m.update(key) + return m.digest() + + +if __name__ == '__main__': + from collections import defaultdict + servers = [ + '192.168.0.246:11212', '192.168.0.247:11212', '192.168.0.248:11212', + '192.168.0.249:11212' + ] + + ring = HashRing(servers) + keys = ['{}'.format(i) for i in range(100)] + mapped = defaultdict(list) + for k in keys: + server = ring.get_node(k) + mapped[server].append(k) + + for k, v in mapped.items(): + print(k, v) diff --git a/shards/mishards/main.py b/shards/mishards/main.py new file mode 100644 index 0000000000..c0d142607b --- /dev/null +++ b/shards/mishards/main.py @@ -0,0 +1,15 @@ +import os +import sys +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +from mishards import (settings, create_app) + + +def main(): + server = create_app(settings.DefaultConfig) + server.run(port=settings.SERVER_PORT) + return 0 + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/shards/mishards/models.py b/shards/mishards/models.py new file mode 100644 index 0000000000..4b6c8f9ef4 --- /dev/null +++ b/shards/mishards/models.py @@ -0,0 +1,76 @@ +import logging +from sqlalchemy import (Integer, Boolean, Text, + String, BigInteger, and_, or_, + Column) +from sqlalchemy.orm import relationship, backref + +from mishards import db + +logger = logging.getLogger(__name__) + + +class TableFiles(db.Model): + FILE_TYPE_NEW = 0 + FILE_TYPE_RAW = 1 + FILE_TYPE_TO_INDEX = 2 + FILE_TYPE_INDEX = 3 + FILE_TYPE_TO_DELETE = 4 + FILE_TYPE_NEW_MERGE = 5 + FILE_TYPE_NEW_INDEX = 6 + FILE_TYPE_BACKUP = 7 + + __tablename__ = 'TableFiles' + + id = Column(BigInteger, primary_key=True, autoincrement=True) + table_id = Column(String(50)) + engine_type = Column(Integer) + file_id = Column(String(50)) + file_type = Column(Integer) + file_size = Column(Integer, default=0) + row_count = Column(Integer, default=0) + updated_time = Column(BigInteger) + created_on = Column(BigInteger) + date = Column(Integer) + + table = relationship( + 'Tables', + primaryjoin='and_(foreign(TableFiles.table_id) == Tables.table_id)', + backref=backref('files', uselist=True, lazy='dynamic') + ) + + +class Tables(db.Model): + TO_DELETE = 1 + NORMAL = 0 + + __tablename__ = 'Tables' + + id = Column(BigInteger, primary_key=True, autoincrement=True) + table_id = Column(String(50), unique=True) + state = Column(Integer) + dimension = Column(Integer) + created_on = Column(Integer) + flag = Column(Integer, default=0) + index_file_size = Column(Integer) + engine_type = Column(Integer) + nlist = Column(Integer) + metric_type = Column(Integer) + + def files_to_search(self, date_range=None): + cond = or_( + TableFiles.file_type == TableFiles.FILE_TYPE_RAW, + TableFiles.file_type == TableFiles.FILE_TYPE_TO_INDEX, + TableFiles.file_type == TableFiles.FILE_TYPE_INDEX, + ) + if date_range: + cond = and_( + cond, + or_( + and_(TableFiles.date >= d[0], TableFiles.date < d[1]) for d in date_range + ) + ) + + files = self.files.filter(cond) + + logger.debug('DATE_RANGE: {}'.format(date_range)) + return files diff --git a/shards/mishards/routings.py b/shards/mishards/routings.py new file mode 100644 index 0000000000..823972726f --- /dev/null +++ b/shards/mishards/routings.py @@ -0,0 +1,96 @@ +import logging +from sqlalchemy import exc as sqlalchemy_exc +from sqlalchemy import and_ + +from mishards import exceptions, db +from mishards.hash_ring import HashRing +from mishards.models import Tables + +logger = logging.getLogger(__name__) + + +class RouteManager: + ROUTER_CLASSES = {} + + @classmethod + def register_router_class(cls, target): + name = target.__dict__.get('NAME', None) + name = name if name else target.__class__.__name__ + cls.ROUTER_CLASSES[name] = target + return target + + @classmethod + def get_router_class(cls, name): + return cls.ROUTER_CLASSES.get(name, None) + + +class RouterFactory: + @classmethod + def new_router(cls, name, conn_mgr, **kwargs): + router_class = RouteManager.get_router_class(name) + assert router_class + return router_class(conn_mgr, **kwargs) + + +class RouterMixin: + def __init__(self, conn_mgr): + self.conn_mgr = conn_mgr + + def routing(self, table_name, metadata=None, **kwargs): + raise NotImplemented() + + def connection(self, metadata=None): + conn = self.conn_mgr.conn('WOSERVER', metadata=metadata) + if conn: + conn.on_connect(metadata=metadata) + return conn.conn + + def query_conn(self, name, metadata=None): + conn = self.conn_mgr.conn(name, metadata=metadata) + if not conn: + raise exceptions.ConnectionNotFoundError(name, metadata=metadata) + conn.on_connect(metadata=metadata) + return conn.conn + + +@RouteManager.register_router_class +class FileBasedHashRingRouter(RouterMixin): + NAME = 'FileBasedHashRingRouter' + + def __init__(self, conn_mgr, **kwargs): + super(FileBasedHashRingRouter, self).__init__(conn_mgr) + + def routing(self, table_name, metadata=None, **kwargs): + range_array = kwargs.pop('range_array', None) + return self._route(table_name, range_array, metadata, **kwargs) + + def _route(self, table_name, range_array, metadata=None, **kwargs): + # PXU TODO: Implement Thread-local Context + # PXU TODO: Session life mgt + try: + table = db.Session.query(Tables).filter( + and_(Tables.table_id == table_name, + Tables.state != Tables.TO_DELETE)).first() + except sqlalchemy_exc.SQLAlchemyError as e: + raise exceptions.DBError(message=str(e), metadata=metadata) + + if not table: + raise exceptions.TableNotFoundError(table_name, metadata=metadata) + files = table.files_to_search(range_array) + db.remove_session() + + servers = self.conn_mgr.conn_names + logger.info('Available servers: {}'.format(servers)) + + ring = HashRing(servers) + + routing = {} + + for f in files: + target_host = ring.get_node(str(f.id)) + sub = routing.get(target_host, None) + if not sub: + routing[target_host] = {'table_id': table_name, 'file_ids': []} + routing[target_host]['file_ids'].append(str(f.id)) + + return routing diff --git a/shards/mishards/server.py b/shards/mishards/server.py new file mode 100644 index 0000000000..599a00e455 --- /dev/null +++ b/shards/mishards/server.py @@ -0,0 +1,122 @@ +import logging +import grpc +import time +import socket +import inspect +from urllib.parse import urlparse +from functools import wraps +from concurrent import futures +from grpc._cython import cygrpc +from milvus.grpc_gen.milvus_pb2_grpc import add_MilvusServiceServicer_to_server +from mishards.grpc_utils import is_grpc_method +from mishards.service_handler import ServiceHandler +from mishards import settings + +logger = logging.getLogger(__name__) + + +class Server: + def __init__(self): + self.pre_run_handlers = set() + self.grpc_methods = set() + self.error_handlers = {} + self.exit_flag = False + + def init_app(self, + conn_mgr, + tracer, + router, + discover, + port=19530, + max_workers=10, + **kwargs): + self.port = int(port) + self.conn_mgr = conn_mgr + self.tracer = tracer + self.router = router + self.discover = discover + + self.server_impl = grpc.server( + thread_pool=futures.ThreadPoolExecutor(max_workers=max_workers), + options=[(cygrpc.ChannelArgKey.max_send_message_length, -1), + (cygrpc.ChannelArgKey.max_receive_message_length, -1)]) + + self.server_impl = self.tracer.decorate(self.server_impl) + + self.register_pre_run_handler(self.pre_run_handler) + + def pre_run_handler(self): + woserver = settings.WOSERVER + url = urlparse(woserver) + ip = socket.gethostbyname(url.hostname) + socket.inet_pton(socket.AF_INET, ip) + self.conn_mgr.register( + 'WOSERVER', '{}://{}:{}'.format(url.scheme, ip, url.port or 80)) + + def register_pre_run_handler(self, func): + logger.info('Regiterring {} into server pre_run_handlers'.format(func)) + self.pre_run_handlers.add(func) + return func + + def wrap_method_with_errorhandler(self, func): + @wraps(func) + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except Exception as e: + if e.__class__ in self.error_handlers: + return self.error_handlers[e.__class__](e) + raise + + return wrapper + + def errorhandler(self, exception): + if inspect.isclass(exception) and issubclass(exception, Exception): + + def wrapper(func): + self.error_handlers[exception] = func + return func + + return wrapper + return exception + + def on_pre_run(self): + for handler in self.pre_run_handlers: + handler() + self.discover.start() + + def start(self, port=None): + handler_class = self.decorate_handler(ServiceHandler) + add_MilvusServiceServicer_to_server( + handler_class(tracer=self.tracer, + router=self.router), self.server_impl) + self.server_impl.add_insecure_port("[::]:{}".format( + str(port or self.port))) + self.server_impl.start() + + def run(self, port): + logger.info('Milvus server start ......') + port = port or self.port + self.on_pre_run() + + self.start(port) + logger.info('Listening on port {}'.format(port)) + + try: + while not self.exit_flag: + time.sleep(5) + except KeyboardInterrupt: + self.stop() + + def stop(self): + logger.info('Server is shuting down ......') + self.exit_flag = True + self.server_impl.stop(0) + self.tracer.close() + logger.info('Server is closed') + + def decorate_handler(self, handler): + for key, attr in handler.__dict__.items(): + if is_grpc_method(attr): + setattr(handler, key, self.wrap_method_with_errorhandler(attr)) + return handler diff --git a/shards/mishards/service_handler.py b/shards/mishards/service_handler.py new file mode 100644 index 0000000000..5e91c14f14 --- /dev/null +++ b/shards/mishards/service_handler.py @@ -0,0 +1,475 @@ +import logging +import time +import datetime +from collections import defaultdict + +import multiprocessing +from concurrent.futures import ThreadPoolExecutor +from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 +from milvus.grpc_gen.milvus_pb2 import TopKQueryResult +from milvus.client.abstract import Range +from milvus.client import types as Types + +from mishards import (db, settings, exceptions) +from mishards.grpc_utils import mark_grpc_method +from mishards.grpc_utils.grpc_args_parser import GrpcArgsParser as Parser +from mishards import utilities + +logger = logging.getLogger(__name__) + + +class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): + MAX_NPROBE = 2048 + MAX_TOPK = 2048 + + def __init__(self, tracer, router, max_workers=multiprocessing.cpu_count(), **kwargs): + self.table_meta = {} + self.error_handlers = {} + self.tracer = tracer + self.router = router + self.max_workers = max_workers + + def _do_merge(self, files_n_topk_results, topk, reverse=False, **kwargs): + status = status_pb2.Status(error_code=status_pb2.SUCCESS, + reason="Success") + if not files_n_topk_results: + return status, [] + + request_results = defaultdict(list) + + calc_time = time.time() + for files_collection in files_n_topk_results: + if isinstance(files_collection, tuple): + status, _ = files_collection + return status, [] + for request_pos, each_request_results in enumerate( + files_collection.topk_query_result): + request_results[request_pos].extend( + each_request_results.query_result_arrays) + request_results[request_pos] = sorted( + request_results[request_pos], + key=lambda x: x.distance, + reverse=reverse)[:topk] + + calc_time = time.time() - calc_time + logger.info('Merge takes {}'.format(calc_time)) + + results = sorted(request_results.items()) + topk_query_result = [] + + for result in results: + query_result = TopKQueryResult(query_result_arrays=result[1]) + topk_query_result.append(query_result) + + return status, topk_query_result + + def _do_query(self, + context, + table_id, + table_meta, + vectors, + topk, + nprobe, + range_array=None, + **kwargs): + metadata = kwargs.get('metadata', None) + range_array = [ + utilities.range_to_date(r, metadata=metadata) for r in range_array + ] if range_array else None + + routing = {} + p_span = None if self.tracer.empty else context.get_active_span( + ).context + with self.tracer.start_span('get_routing', child_of=p_span): + routing = self.router.routing(table_id, + range_array=range_array, + metadata=metadata) + logger.info('Routing: {}'.format(routing)) + + metadata = kwargs.get('metadata', None) + + rs = [] + all_topk_results = [] + + def search(addr, query_params, vectors, topk, nprobe, **kwargs): + logger.info( + 'Send Search Request: addr={};params={};nq={};topk={};nprobe={}' + .format(addr, query_params, len(vectors), topk, nprobe)) + + conn = self.router.query_conn(addr, metadata=metadata) + start = time.time() + span = kwargs.get('span', None) + span = span if span else (None if self.tracer.empty else + context.get_active_span().context) + + with self.tracer.start_span('search_{}'.format(addr), + child_of=span): + ret = conn.search_vectors_in_files( + table_name=query_params['table_id'], + file_ids=query_params['file_ids'], + query_records=vectors, + top_k=topk, + nprobe=nprobe, + lazy_=True) + end = time.time() + logger.info('search_vectors_in_files takes: {}'.format(end - start)) + + all_topk_results.append(ret) + + with self.tracer.start_span('do_search', child_of=p_span) as span: + with ThreadPoolExecutor(max_workers=self.max_workers) as pool: + for addr, params in routing.items(): + res = pool.submit(search, + addr, + params, + vectors, + topk, + nprobe, + span=span) + rs.append(res) + + for res in rs: + res.result() + + reverse = table_meta.metric_type == Types.MetricType.IP + with self.tracer.start_span('do_merge', child_of=p_span): + return self._do_merge(all_topk_results, + topk, + reverse=reverse, + metadata=metadata) + + def _create_table(self, table_schema): + return self.router.connection().create_table(table_schema) + + @mark_grpc_method + def CreateTable(self, request, context): + _status, _table_schema = Parser.parse_proto_TableSchema(request) + + if not _status.OK(): + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + logger.info('CreateTable {}'.format(_table_schema['table_name'])) + + _status = self._create_table(_table_schema) + + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + def _has_table(self, table_name, metadata=None): + return self.router.connection(metadata=metadata).has_table(table_name) + + @mark_grpc_method + def HasTable(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + return milvus_pb2.BoolReply(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), + bool_reply=False) + + logger.info('HasTable {}'.format(_table_name)) + + _status, _bool = self._has_table(_table_name, + metadata={'resp_class': milvus_pb2.BoolReply}) + + return milvus_pb2.BoolReply(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), + bool_reply=_bool) + + def _delete_table(self, table_name): + return self.router.connection().delete_table(table_name) + + @mark_grpc_method + def DropTable(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + logger.info('DropTable {}'.format(_table_name)) + + _status = self._delete_table(_table_name) + + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + def _create_index(self, table_name, index): + return self.router.connection().create_index(table_name, index) + + @mark_grpc_method + def CreateIndex(self, request, context): + _status, unpacks = Parser.parse_proto_IndexParam(request) + + if not _status.OK(): + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + _table_name, _index = unpacks + + logger.info('CreateIndex {}'.format(_table_name)) + + # TODO: interface create_table incompleted + _status = self._create_index(_table_name, _index) + + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + def _add_vectors(self, param, metadata=None): + return self.router.connection(metadata=metadata).add_vectors( + None, None, insert_param=param) + + @mark_grpc_method + def Insert(self, request, context): + logger.info('Insert') + # TODO: Ths SDK interface add_vectors() could update, add a key 'row_id_array' + _status, _ids = self._add_vectors( + metadata={'resp_class': milvus_pb2.VectorIds}, param=request) + return milvus_pb2.VectorIds(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), + vector_id_array=_ids) + + @mark_grpc_method + def Search(self, request, context): + + table_name = request.table_name + + topk = request.topk + nprobe = request.nprobe + + logger.info('Search {}: topk={} nprobe={}'.format( + table_name, topk, nprobe)) + + metadata = {'resp_class': milvus_pb2.TopKQueryResultList} + + if nprobe > self.MAX_NPROBE or nprobe <= 0: + raise exceptions.InvalidArgumentError( + message='Invalid nprobe: {}'.format(nprobe), metadata=metadata) + + if topk > self.MAX_TOPK or topk <= 0: + raise exceptions.InvalidTopKError( + message='Invalid topk: {}'.format(topk), metadata=metadata) + + table_meta = self.table_meta.get(table_name, None) + + if not table_meta: + status, info = self.router.connection( + metadata=metadata).describe_table(table_name) + if not status.OK(): + raise exceptions.TableNotFoundError(table_name, + metadata=metadata) + + self.table_meta[table_name] = info + table_meta = info + + start = time.time() + + query_record_array = [] + + for query_record in request.query_record_array: + query_record_array.append(list(query_record.vector_data)) + + query_range_array = [] + for query_range in request.query_range_array: + query_range_array.append( + Range(query_range.start_value, query_range.end_value)) + + status, results = self._do_query(context, + table_name, + table_meta, + query_record_array, + topk, + nprobe, + query_range_array, + metadata=metadata) + + now = time.time() + logger.info('SearchVector takes: {}'.format(now - start)) + + topk_result_list = milvus_pb2.TopKQueryResultList( + status=status_pb2.Status(error_code=status.error_code, + reason=status.reason), + topk_query_result=results) + return topk_result_list + + @mark_grpc_method + def SearchInFiles(self, request, context): + raise NotImplemented() + + def _describe_table(self, table_name, metadata=None): + return self.router.connection(metadata=metadata).describe_table(table_name) + + @mark_grpc_method + def DescribeTable(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + return milvus_pb2.TableSchema(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), ) + + metadata = {'resp_class': milvus_pb2.TableSchema} + + logger.info('DescribeTable {}'.format(_table_name)) + _status, _table = self._describe_table(metadata=metadata, + table_name=_table_name) + + if _status.OK(): + return milvus_pb2.TableSchema( + table_name=_table_name, + index_file_size=_table.index_file_size, + dimension=_table.dimension, + metric_type=_table.metric_type, + status=status_pb2.Status(error_code=_status.code, + reason=_status.message), + ) + + return milvus_pb2.TableSchema( + table_name=_table_name, + status=status_pb2.Status(error_code=_status.code, + reason=_status.message), + ) + + def _count_table(self, table_name, metadata=None): + return self.router.connection( + metadata=metadata).get_table_row_count(table_name) + + @mark_grpc_method + def CountTable(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + status = status_pb2.Status(error_code=_status.code, + reason=_status.message) + + return milvus_pb2.TableRowCount(status=status) + + logger.info('CountTable {}'.format(_table_name)) + + metadata = {'resp_class': milvus_pb2.TableRowCount} + _status, _count = self._count_table(_table_name, metadata=metadata) + + return milvus_pb2.TableRowCount( + status=status_pb2.Status(error_code=_status.code, + reason=_status.message), + table_row_count=_count if isinstance(_count, int) else -1) + + def _get_server_version(self, metadata=None): + return self.router.connection(metadata=metadata).server_version() + + @mark_grpc_method + def Cmd(self, request, context): + _status, _cmd = Parser.parse_proto_Command(request) + logger.info('Cmd: {}'.format(_cmd)) + + if not _status.OK(): + return milvus_pb2.StringReply(status=status_pb2.Status( + error_code=_status.code, reason=_status.message)) + + metadata = {'resp_class': milvus_pb2.StringReply} + + if _cmd == 'version': + _status, _reply = self._get_server_version(metadata=metadata) + else: + _status, _reply = self.router.connection( + metadata=metadata).server_status() + + return milvus_pb2.StringReply(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), + string_reply=_reply) + + def _show_tables(self, metadata=None): + return self.router.connection(metadata=metadata).show_tables() + + @mark_grpc_method + def ShowTables(self, request, context): + logger.info('ShowTables') + metadata = {'resp_class': milvus_pb2.TableName} + _status, _results = self._show_tables(metadata=metadata) + + return milvus_pb2.TableNameList(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), + table_names=_results) + + def _delete_by_range(self, table_name, start_date, end_date): + return self.router.connection().delete_vectors_by_range(table_name, + start_date, + end_date) + + @mark_grpc_method + def DeleteByRange(self, request, context): + _status, unpacks = \ + Parser.parse_proto_DeleteByRangeParam(request) + + if not _status.OK(): + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + _table_name, _start_date, _end_date = unpacks + + logger.info('DeleteByRange {}: {} {}'.format(_table_name, _start_date, + _end_date)) + _status = self._delete_by_range(_table_name, _start_date, _end_date) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + def _preload_table(self, table_name): + return self.router.connection().preload_table(table_name) + + @mark_grpc_method + def PreloadTable(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + logger.info('PreloadTable {}'.format(_table_name)) + _status = self._preload_table(_table_name) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + def _describe_index(self, table_name, metadata=None): + return self.router.connection(metadata=metadata).describe_index(table_name) + + @mark_grpc_method + def DescribeIndex(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + return milvus_pb2.IndexParam(status=status_pb2.Status( + error_code=_status.code, reason=_status.message)) + + metadata = {'resp_class': milvus_pb2.IndexParam} + + logger.info('DescribeIndex {}'.format(_table_name)) + _status, _index_param = self._describe_index(table_name=_table_name, + metadata=metadata) + + if not _index_param: + return milvus_pb2.IndexParam(status=status_pb2.Status( + error_code=_status.code, reason=_status.message)) + + _index = milvus_pb2.Index(index_type=_index_param._index_type, + nlist=_index_param._nlist) + + return milvus_pb2.IndexParam(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), + table_name=_table_name, + index=_index) + + def _drop_index(self, table_name): + return self.router.connection().drop_index(table_name) + + @mark_grpc_method + def DropIndex(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + logger.info('DropIndex {}'.format(_table_name)) + _status = self._drop_index(_table_name) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) diff --git a/shards/mishards/settings.py b/shards/mishards/settings.py new file mode 100644 index 0000000000..21a3bb7a65 --- /dev/null +++ b/shards/mishards/settings.py @@ -0,0 +1,94 @@ +import sys +import os + +from environs import Env +env = Env() + +FROM_EXAMPLE = env.bool('FROM_EXAMPLE', False) +if FROM_EXAMPLE: + from dotenv import load_dotenv + load_dotenv('./mishards/.env.example') +else: + env.read_env() + +DEBUG = env.bool('DEBUG', False) + +LOG_LEVEL = env.str('LOG_LEVEL', 'DEBUG' if DEBUG else 'INFO') +LOG_PATH = env.str('LOG_PATH', '/tmp/mishards') +LOG_NAME = env.str('LOG_NAME', 'logfile') +TIMEZONE = env.str('TIMEZONE', 'UTC') + +from utils.logger_helper import config +config(LOG_LEVEL, LOG_PATH, LOG_NAME, TIMEZONE) + +TIMEOUT = env.int('TIMEOUT', 60) +MAX_RETRY = env.int('MAX_RETRY', 3) + +SERVER_PORT = env.int('SERVER_PORT', 19530) +SERVER_TEST_PORT = env.int('SERVER_TEST_PORT', 19530) +WOSERVER = env.str('WOSERVER') + +SD_PROVIDER_SETTINGS = None +SD_PROVIDER = env.str('SD_PROVIDER', 'Kubernetes') +if SD_PROVIDER == 'Kubernetes': + from sd.kubernetes_provider import KubernetesProviderSettings + SD_PROVIDER_SETTINGS = KubernetesProviderSettings( + namespace=env.str('SD_NAMESPACE', ''), + in_cluster=env.bool('SD_IN_CLUSTER', False), + poll_interval=env.int('SD_POLL_INTERVAL', 5), + pod_patt=env.str('SD_ROSERVER_POD_PATT', ''), + label_selector=env.str('SD_LABEL_SELECTOR', ''), + port=env.int('SD_PORT', 19530)) +elif SD_PROVIDER == 'Static': + from sd.static_provider import StaticProviderSettings + SD_PROVIDER_SETTINGS = StaticProviderSettings( + hosts=env.list('SD_STATIC_HOSTS', []), + port=env.int('SD_STATIC_PORT', 19530)) + +# TESTING_WOSERVER = env.str('TESTING_WOSERVER', 'tcp://127.0.0.1:19530') + + +class TracingConfig: + TRACING_SERVICE_NAME = env.str('TRACING_SERVICE_NAME', 'mishards') + TRACING_VALIDATE = env.bool('TRACING_VALIDATE', True) + TRACING_LOG_PAYLOAD = env.bool('TRACING_LOG_PAYLOAD', False) + TRACING_CONFIG = { + 'sampler': { + 'type': env.str('TRACING_SAMPLER_TYPE', 'const'), + 'param': env.str('TRACING_SAMPLER_PARAM', "1"), + }, + 'local_agent': { + 'reporting_host': env.str('TRACING_REPORTING_HOST', '127.0.0.1'), + 'reporting_port': env.str('TRACING_REPORTING_PORT', '5775') + }, + 'logging': env.bool('TRACING_LOGGING', True) + } + DEFAULT_TRACING_CONFIG = { + 'sampler': { + 'type': env.str('TRACING_SAMPLER_TYPE', 'const'), + 'param': env.str('TRACING_SAMPLER_PARAM', "0"), + } + } + + +class DefaultConfig: + SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_URI') + SQL_ECHO = env.bool('SQL_ECHO', False) + TRACING_TYPE = env.str('TRACING_TYPE', '') + ROUTER_CLASS_NAME = env.str('ROUTER_CLASS_NAME', 'FileBasedHashRingRouter') + + +class TestingConfig(DefaultConfig): + SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_TEST_URI', '') + SQL_ECHO = env.bool('SQL_TEST_ECHO', False) + TRACING_TYPE = env.str('TRACING_TEST_TYPE', '') + ROUTER_CLASS_NAME = env.str('ROUTER_CLASS_TEST_NAME', 'FileBasedHashRingRouter') + + +if __name__ == '__main__': + import logging + logger = logging.getLogger(__name__) + logger.debug('DEBUG') + logger.info('INFO') + logger.warn('WARN') + logger.error('ERROR') diff --git a/shards/mishards/test_connections.py b/shards/mishards/test_connections.py new file mode 100644 index 0000000000..819d2e03da --- /dev/null +++ b/shards/mishards/test_connections.py @@ -0,0 +1,101 @@ +import logging +import pytest +import mock + +from milvus import Milvus +from mishards.connections import (ConnectionMgr, Connection) +from mishards import exceptions + +logger = logging.getLogger(__name__) + + +@pytest.mark.usefixtures('app') +class TestConnection: + def test_manager(self): + mgr = ConnectionMgr() + + mgr.register('pod1', '111') + mgr.register('pod2', '222') + mgr.register('pod2', '222') + mgr.register('pod2', '2222') + assert len(mgr.conn_names) == 2 + + mgr.unregister('pod1') + assert len(mgr.conn_names) == 1 + + mgr.unregister('pod2') + assert len(mgr.conn_names) == 0 + + mgr.register('WOSERVER', 'xxxx') + assert len(mgr.conn_names) == 0 + + assert not mgr.conn('XXXX', None) + with pytest.raises(exceptions.ConnectionNotFoundError): + mgr.conn('XXXX', None, True) + + mgr.conn('WOSERVER', None) + + def test_connection(self): + class Conn: + def __init__(self, state): + self.state = state + + def connect(self, uri): + return self.state + + def connected(self): + return self.state + + FAIL_CONN = Conn(False) + PASS_CONN = Conn(True) + + class Retry: + def __init__(self): + self.times = 0 + + def __call__(self, conn): + self.times += 1 + logger.info('Retrying {}'.format(self.times)) + + class Func(): + def __init__(self): + self.executed = False + + def __call__(self): + self.executed = True + + max_retry = 3 + + RetryObj = Retry() + + c = Connection('client', + uri='xx', + max_retry=max_retry, + on_retry_func=RetryObj) + c.conn = FAIL_CONN + ff = Func() + this_connect = c.connect(func=ff) + with pytest.raises(exceptions.ConnectionConnectError): + this_connect() + assert RetryObj.times == max_retry + assert not ff.executed + RetryObj = Retry() + + c.conn = PASS_CONN + this_connect = c.connect(func=ff) + this_connect() + assert ff.executed + assert RetryObj.times == 0 + + this_connect = c.connect(func=None) + with pytest.raises(TypeError): + this_connect() + + errors = [] + + def error_handler(err): + errors.append(err) + + this_connect = c.connect(func=None, exception_handler=error_handler) + this_connect() + assert len(errors) == 1 diff --git a/shards/mishards/test_models.py b/shards/mishards/test_models.py new file mode 100644 index 0000000000..d60b62713e --- /dev/null +++ b/shards/mishards/test_models.py @@ -0,0 +1,39 @@ +import logging +import pytest +from mishards.factories import TableFiles, Tables, TableFilesFactory, TablesFactory +from mishards import db, create_app, settings +from mishards.factories import ( + Tables, TableFiles, + TablesFactory, TableFilesFactory +) + +logger = logging.getLogger(__name__) + + +@pytest.mark.usefixtures('app') +class TestModels: + def test_files_to_search(self): + table = TablesFactory() + new_files_cnt = 5 + to_index_cnt = 10 + raw_cnt = 20 + backup_cnt = 12 + to_delete_cnt = 9 + index_cnt = 8 + new_index_cnt = 6 + new_merge_cnt = 11 + + new_files = TableFilesFactory.create_batch(new_files_cnt, table=table, file_type=TableFiles.FILE_TYPE_NEW, date=110) + to_index_files = TableFilesFactory.create_batch(to_index_cnt, table=table, file_type=TableFiles.FILE_TYPE_TO_INDEX, date=110) + raw_files = TableFilesFactory.create_batch(raw_cnt, table=table, file_type=TableFiles.FILE_TYPE_RAW, date=120) + backup_files = TableFilesFactory.create_batch(backup_cnt, table=table, file_type=TableFiles.FILE_TYPE_BACKUP, date=110) + index_files = TableFilesFactory.create_batch(index_cnt, table=table, file_type=TableFiles.FILE_TYPE_INDEX, date=110) + new_index_files = TableFilesFactory.create_batch(new_index_cnt, table=table, file_type=TableFiles.FILE_TYPE_NEW_INDEX, date=110) + new_merge_files = TableFilesFactory.create_batch(new_merge_cnt, table=table, file_type=TableFiles.FILE_TYPE_NEW_MERGE, date=110) + to_delete_files = TableFilesFactory.create_batch(to_delete_cnt, table=table, file_type=TableFiles.FILE_TYPE_TO_DELETE, date=110) + assert table.files_to_search().count() == raw_cnt + index_cnt + to_index_cnt + + assert table.files_to_search([(100, 115)]).count() == index_cnt + to_index_cnt + assert table.files_to_search([(111, 120)]).count() == 0 + assert table.files_to_search([(111, 121)]).count() == raw_cnt + assert table.files_to_search([(110, 121)]).count() == raw_cnt + index_cnt + to_index_cnt diff --git a/shards/mishards/test_server.py b/shards/mishards/test_server.py new file mode 100644 index 0000000000..efd3912076 --- /dev/null +++ b/shards/mishards/test_server.py @@ -0,0 +1,279 @@ +import logging +import pytest +import mock +import datetime +import random +import faker +import inspect +from milvus import Milvus +from milvus.client.types import Status, IndexType, MetricType +from milvus.client.abstract import IndexParam, TableSchema +from milvus.grpc_gen import status_pb2, milvus_pb2 +from mishards import db, create_app, settings +from mishards.service_handler import ServiceHandler +from mishards.grpc_utils.grpc_args_parser import GrpcArgsParser as Parser +from mishards.factories import TableFilesFactory, TablesFactory, TableFiles, Tables +from mishards.routings import RouterMixin + +logger = logging.getLogger(__name__) + +OK = Status(code=Status.SUCCESS, message='Success') +BAD = Status(code=Status.PERMISSION_DENIED, message='Fail') + + +@pytest.mark.usefixtures('started_app') +class TestServer: + @property + def client(self): + m = Milvus() + m.connect(host='localhost', port=settings.SERVER_TEST_PORT) + return m + + def test_server_start(self, started_app): + assert started_app.conn_mgr.metas.get('WOSERVER') == settings.WOSERVER + + def test_cmd(self, started_app): + ServiceHandler._get_server_version = mock.MagicMock(return_value=(OK, + '')) + status, _ = self.client.server_version() + assert status.OK() + + Parser.parse_proto_Command = mock.MagicMock(return_value=(BAD, 'cmd')) + status, _ = self.client.server_version() + assert not status.OK() + + def test_drop_index(self, started_app): + table_name = inspect.currentframe().f_code.co_name + ServiceHandler._drop_index = mock.MagicMock(return_value=OK) + status = self.client.drop_index(table_name) + assert status.OK() + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(BAD, table_name)) + status = self.client.drop_index(table_name) + assert not status.OK() + + def test_describe_index(self, started_app): + table_name = inspect.currentframe().f_code.co_name + index_type = IndexType.FLAT + nlist = 1 + index_param = IndexParam(table_name=table_name, + index_type=index_type, + nlist=nlist) + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(OK, table_name)) + ServiceHandler._describe_index = mock.MagicMock( + return_value=(OK, index_param)) + status, ret = self.client.describe_index(table_name) + assert status.OK() + assert ret._table_name == index_param._table_name + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(BAD, table_name)) + status, _ = self.client.describe_index(table_name) + assert not status.OK() + + def test_preload(self, started_app): + table_name = inspect.currentframe().f_code.co_name + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(OK, table_name)) + ServiceHandler._preload_table = mock.MagicMock(return_value=OK) + status = self.client.preload_table(table_name) + assert status.OK() + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(BAD, table_name)) + status = self.client.preload_table(table_name) + assert not status.OK() + + @pytest.mark.skip + def test_delete_by_range(self, started_app): + table_name = inspect.currentframe().f_code.co_name + + unpacked = table_name, datetime.datetime.today( + ), datetime.datetime.today() + + Parser.parse_proto_DeleteByRangeParam = mock.MagicMock( + return_value=(OK, unpacked)) + ServiceHandler._delete_by_range = mock.MagicMock(return_value=OK) + status = self.client.delete_vectors_by_range( + *unpacked) + assert status.OK() + + Parser.parse_proto_DeleteByRangeParam = mock.MagicMock( + return_value=(BAD, unpacked)) + status = self.client.delete_vectors_by_range( + *unpacked) + assert not status.OK() + + def test_count_table(self, started_app): + table_name = inspect.currentframe().f_code.co_name + count = random.randint(100, 200) + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(OK, table_name)) + ServiceHandler._count_table = mock.MagicMock(return_value=(OK, count)) + status, ret = self.client.get_table_row_count(table_name) + assert status.OK() + assert ret == count + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(BAD, table_name)) + status, _ = self.client.get_table_row_count(table_name) + assert not status.OK() + + def test_show_tables(self, started_app): + tables = ['t1', 't2'] + ServiceHandler._show_tables = mock.MagicMock(return_value=(OK, tables)) + status, ret = self.client.show_tables() + assert status.OK() + assert ret == tables + + def test_describe_table(self, started_app): + table_name = inspect.currentframe().f_code.co_name + dimension = 128 + nlist = 1 + table_schema = TableSchema(table_name=table_name, + index_file_size=100, + metric_type=MetricType.L2, + dimension=dimension) + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(OK, table_schema.table_name)) + ServiceHandler._describe_table = mock.MagicMock( + return_value=(OK, table_schema)) + status, _ = self.client.describe_table(table_name) + assert status.OK() + + ServiceHandler._describe_table = mock.MagicMock( + return_value=(BAD, table_schema)) + status, _ = self.client.describe_table(table_name) + assert not status.OK() + + Parser.parse_proto_TableName = mock.MagicMock(return_value=(BAD, + 'cmd')) + status, ret = self.client.describe_table(table_name) + assert not status.OK() + + def test_insert(self, started_app): + table_name = inspect.currentframe().f_code.co_name + vectors = [[random.random() for _ in range(16)] for _ in range(10)] + ids = [random.randint(1000000, 20000000) for _ in range(10)] + ServiceHandler._add_vectors = mock.MagicMock(return_value=(OK, ids)) + status, ret = self.client.add_vectors( + table_name=table_name, records=vectors) + assert status.OK() + assert ids == ret + + def test_create_index(self, started_app): + table_name = inspect.currentframe().f_code.co_name + unpacks = table_name, None + Parser.parse_proto_IndexParam = mock.MagicMock(return_value=(OK, + unpacks)) + ServiceHandler._create_index = mock.MagicMock(return_value=OK) + status = self.client.create_index(table_name=table_name) + assert status.OK() + + Parser.parse_proto_IndexParam = mock.MagicMock(return_value=(BAD, + None)) + status = self.client.create_index(table_name=table_name) + assert not status.OK() + + def test_drop_table(self, started_app): + table_name = inspect.currentframe().f_code.co_name + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(OK, table_name)) + ServiceHandler._delete_table = mock.MagicMock(return_value=OK) + status = self.client.delete_table(table_name=table_name) + assert status.OK() + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(BAD, table_name)) + status = self.client.delete_table(table_name=table_name) + assert not status.OK() + + def test_has_table(self, started_app): + table_name = inspect.currentframe().f_code.co_name + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(OK, table_name)) + ServiceHandler._has_table = mock.MagicMock(return_value=(OK, True)) + has = self.client.has_table(table_name=table_name) + assert has + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(BAD, table_name)) + status, has = self.client.has_table(table_name=table_name) + assert not status.OK() + assert not has + + def test_create_table(self, started_app): + table_name = inspect.currentframe().f_code.co_name + dimension = 128 + table_schema = dict(table_name=table_name, + index_file_size=100, + metric_type=MetricType.L2, + dimension=dimension) + + ServiceHandler._create_table = mock.MagicMock(return_value=OK) + status = self.client.create_table(table_schema) + assert status.OK() + + Parser.parse_proto_TableSchema = mock.MagicMock(return_value=(BAD, + None)) + status = self.client.create_table(table_schema) + assert not status.OK() + + def random_data(self, n, dimension): + return [[random.random() for _ in range(dimension)] for _ in range(n)] + + def test_search(self, started_app): + table_name = inspect.currentframe().f_code.co_name + to_index_cnt = random.randint(10, 20) + table = TablesFactory(table_id=table_name, state=Tables.NORMAL) + to_index_files = TableFilesFactory.create_batch( + to_index_cnt, table=table, file_type=TableFiles.FILE_TYPE_TO_INDEX) + topk = random.randint(5, 10) + nq = random.randint(5, 10) + param = { + 'table_name': table_name, + 'query_records': self.random_data(nq, table.dimension), + 'top_k': topk, + 'nprobe': 2049 + } + + result = [ + milvus_pb2.TopKQueryResult(query_result_arrays=[ + milvus_pb2.QueryResult(id=i, distance=random.random()) + for i in range(topk) + ]) for i in range(nq) + ] + + mock_results = milvus_pb2.TopKQueryResultList(status=status_pb2.Status( + error_code=status_pb2.SUCCESS, reason="Success"), + topk_query_result=result) + + table_schema = TableSchema(table_name=table_name, + index_file_size=table.index_file_size, + metric_type=table.metric_type, + dimension=table.dimension) + + status, _ = self.client.search_vectors(**param) + assert status.code == Status.ILLEGAL_ARGUMENT + + param['nprobe'] = 2048 + RouterMixin.connection = mock.MagicMock(return_value=Milvus()) + RouterMixin.query_conn = mock.MagicMock(return_value=Milvus()) + Milvus.describe_table = mock.MagicMock(return_value=(BAD, + table_schema)) + status, ret = self.client.search_vectors(**param) + assert status.code == Status.TABLE_NOT_EXISTS + + Milvus.describe_table = mock.MagicMock(return_value=(OK, table_schema)) + Milvus.search_vectors_in_files = mock.MagicMock( + return_value=mock_results) + + status, ret = self.client.search_vectors(**param) + assert status.OK() + assert len(ret) == nq diff --git a/shards/mishards/utilities.py b/shards/mishards/utilities.py new file mode 100644 index 0000000000..42e982b5f1 --- /dev/null +++ b/shards/mishards/utilities.py @@ -0,0 +1,20 @@ +import datetime +from mishards import exceptions + + +def format_date(start, end): + return ((start.year - 1900) * 10000 + (start.month - 1) * 100 + start.day, + (end.year - 1900) * 10000 + (end.month - 1) * 100 + end.day) + + +def range_to_date(range_obj, metadata=None): + try: + start = datetime.datetime.strptime(range_obj.start_date, '%Y-%m-%d') + end = datetime.datetime.strptime(range_obj.end_date, '%Y-%m-%d') + assert start < end + except (ValueError, AssertionError): + raise exceptions.InvalidRangeError('Invalid time range: {} {}'.format( + range_obj.start_date, range_obj.end_date), + metadata=metadata) + + return format_date(start, end) diff --git a/shards/requirements.txt b/shards/requirements.txt new file mode 100644 index 0000000000..ae224e92ed --- /dev/null +++ b/shards/requirements.txt @@ -0,0 +1,36 @@ +environs==4.2.0 +factory-boy==2.12.0 +Faker==1.0.7 +fire==0.1.3 +google-auth==1.6.3 +grpcio==1.22.0 +grpcio-tools==1.22.0 +kubernetes==10.0.1 +MarkupSafe==1.1.1 +marshmallow==2.19.5 +pymysql==0.9.3 +protobuf==3.9.1 +py==1.8.0 +pyasn1==0.4.7 +pyasn1-modules==0.2.6 +pylint==2.3.1 +pymilvus-test==0.2.28 +#pymilvus==0.2.0 +pyparsing==2.4.0 +pytest==4.6.3 +pytest-level==0.1.1 +pytest-print==0.1.2 +pytest-repeat==0.8.0 +pytest-timeout==1.3.3 +python-dateutil==2.8.0 +python-dotenv==0.10.3 +pytz==2019.1 +requests==2.22.0 +requests-oauthlib==1.2.0 +rsa==4.0 +six==1.12.0 +SQLAlchemy==1.3.5 +urllib3==1.25.3 +jaeger-client>=3.4.0 +grpcio-opentracing>=1.0 +mock==2.0.0 diff --git a/shards/sd/__init__.py b/shards/sd/__init__.py new file mode 100644 index 0000000000..7943887d0f --- /dev/null +++ b/shards/sd/__init__.py @@ -0,0 +1,28 @@ +import logging +import inspect +# from utils import singleton + +logger = logging.getLogger(__name__) + + +class ProviderManager: + PROVIDERS = {} + + @classmethod + def register_service_provider(cls, target): + if inspect.isfunction(target): + cls.PROVIDERS[target.__name__] = target + elif inspect.isclass(target): + name = target.__dict__.get('NAME', None) + name = name if name else target.__class__.__name__ + cls.PROVIDERS[name] = target + else: + assert False, 'Cannot register_service_provider for: {}'.format(target) + return target + + @classmethod + def get_provider(cls, name): + return cls.PROVIDERS.get(name, None) + + +from sd import kubernetes_provider, static_provider diff --git a/shards/sd/kubernetes_provider.py b/shards/sd/kubernetes_provider.py new file mode 100644 index 0000000000..eb113db007 --- /dev/null +++ b/shards/sd/kubernetes_provider.py @@ -0,0 +1,331 @@ +import os +import sys +if __name__ == '__main__': + sys.path.append(os.path.dirname(os.path.dirname( + os.path.abspath(__file__)))) + +import re +import logging +import time +import copy +import threading +import queue +import enum +from kubernetes import client, config, watch + +from utils import singleton +from sd import ProviderManager + +logger = logging.getLogger(__name__) + +INCLUSTER_NAMESPACE_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/namespace' + + +class EventType(enum.Enum): + PodHeartBeat = 1 + Watch = 2 + + +class K8SMixin: + def __init__(self, namespace, in_cluster=False, **kwargs): + self.namespace = namespace + self.in_cluster = in_cluster + self.kwargs = kwargs + self.v1 = kwargs.get('v1', None) + if not self.namespace: + self.namespace = open(INCLUSTER_NAMESPACE_PATH).read() + + if not self.v1: + config.load_incluster_config( + ) if self.in_cluster else config.load_kube_config() + self.v1 = client.CoreV1Api() + + +class K8SHeartbeatHandler(threading.Thread, K8SMixin): + def __init__(self, + message_queue, + namespace, + label_selector, + in_cluster=False, + **kwargs): + K8SMixin.__init__(self, + namespace=namespace, + in_cluster=in_cluster, + **kwargs) + threading.Thread.__init__(self) + self.queue = message_queue + self.terminate = False + self.label_selector = label_selector + self.poll_interval = kwargs.get('poll_interval', 5) + + def run(self): + while not self.terminate: + try: + pods = self.v1.list_namespaced_pod( + namespace=self.namespace, + label_selector=self.label_selector) + event_message = {'eType': EventType.PodHeartBeat, 'events': []} + for item in pods.items: + pod = self.v1.read_namespaced_pod(name=item.metadata.name, + namespace=self.namespace) + name = pod.metadata.name + ip = pod.status.pod_ip + phase = pod.status.phase + reason = pod.status.reason + message = pod.status.message + ready = True if phase == 'Running' else False + + pod_event = dict(pod=name, + ip=ip, + ready=ready, + reason=reason, + message=message) + + event_message['events'].append(pod_event) + + self.queue.put(event_message) + + except Exception as exc: + logger.error(exc) + + time.sleep(self.poll_interval) + + def stop(self): + self.terminate = True + + +class K8SEventListener(threading.Thread, K8SMixin): + def __init__(self, message_queue, namespace, in_cluster=False, **kwargs): + K8SMixin.__init__(self, + namespace=namespace, + in_cluster=in_cluster, + **kwargs) + threading.Thread.__init__(self) + self.queue = message_queue + self.terminate = False + self.at_start_up = True + self._stop_event = threading.Event() + + def stop(self): + self.terminate = True + self._stop_event.set() + + def run(self): + resource_version = '' + w = watch.Watch() + for event in w.stream(self.v1.list_namespaced_event, + namespace=self.namespace, + field_selector='involvedObject.kind=Pod'): + if self.terminate: + break + + resource_version = int(event['object'].metadata.resource_version) + + info = dict( + eType=EventType.Watch, + pod=event['object'].involved_object.name, + reason=event['object'].reason, + message=event['object'].message, + start_up=self.at_start_up, + ) + self.at_start_up = False + # logger.info('Received event: {}'.format(info)) + self.queue.put(info) + + +class EventHandler(threading.Thread): + def __init__(self, mgr, message_queue, namespace, pod_patt, **kwargs): + threading.Thread.__init__(self) + self.mgr = mgr + self.queue = message_queue + self.kwargs = kwargs + self.terminate = False + self.pod_patt = re.compile(pod_patt) + self.namespace = namespace + + def stop(self): + self.terminate = True + + def on_drop(self, event, **kwargs): + pass + + def on_pod_started(self, event, **kwargs): + try_cnt = 3 + pod = None + while try_cnt > 0: + try_cnt -= 1 + try: + pod = self.mgr.v1.read_namespaced_pod(name=event['pod'], + namespace=self.namespace) + if not pod.status.pod_ip: + time.sleep(0.5) + continue + break + except client.rest.ApiException as exc: + time.sleep(0.5) + + if try_cnt <= 0 and not pod: + if not event['start_up']: + logger.error('Pod {} is started but cannot read pod'.format( + event['pod'])) + return + elif try_cnt <= 0 and not pod.status.pod_ip: + logger.warning('NoPodIPFoundError') + return + + logger.info('Register POD {} with IP {}'.format( + pod.metadata.name, pod.status.pod_ip)) + self.mgr.add_pod(name=pod.metadata.name, ip=pod.status.pod_ip) + + def on_pod_killing(self, event, **kwargs): + logger.info('Unregister POD {}'.format(event['pod'])) + self.mgr.delete_pod(name=event['pod']) + + def on_pod_heartbeat(self, event, **kwargs): + names = self.mgr.conn_mgr.conn_names + + running_names = set() + for each_event in event['events']: + if each_event['ready']: + self.mgr.add_pod(name=each_event['pod'], ip=each_event['ip']) + running_names.add(each_event['pod']) + else: + self.mgr.delete_pod(name=each_event['pod']) + + to_delete = names - running_names + for name in to_delete: + self.mgr.delete_pod(name) + + logger.info(self.mgr.conn_mgr.conn_names) + + def handle_event(self, event): + if event['eType'] == EventType.PodHeartBeat: + return self.on_pod_heartbeat(event) + + if not event or (event['reason'] not in ('Started', 'Killing')): + return self.on_drop(event) + + if not re.match(self.pod_patt, event['pod']): + return self.on_drop(event) + + logger.info('Handling event: {}'.format(event)) + + if event['reason'] == 'Started': + return self.on_pod_started(event) + + return self.on_pod_killing(event) + + def run(self): + while not self.terminate: + try: + event = self.queue.get(timeout=1) + self.handle_event(event) + except queue.Empty: + continue + + +class KubernetesProviderSettings: + def __init__(self, namespace, pod_patt, label_selector, in_cluster, + poll_interval, port=None, **kwargs): + self.namespace = namespace + self.pod_patt = pod_patt + self.label_selector = label_selector + self.in_cluster = in_cluster + self.poll_interval = poll_interval + self.port = int(port) if port else 19530 + + +@singleton +@ProviderManager.register_service_provider +class KubernetesProvider(object): + NAME = 'Kubernetes' + + def __init__(self, settings, conn_mgr, **kwargs): + self.namespace = settings.namespace + self.pod_patt = settings.pod_patt + self.label_selector = settings.label_selector + self.in_cluster = settings.in_cluster + self.poll_interval = settings.poll_interval + self.port = settings.port + self.kwargs = kwargs + self.queue = queue.Queue() + + self.conn_mgr = conn_mgr + + if not self.namespace: + self.namespace = open(incluster_namespace_path).read() + + config.load_incluster_config( + ) if self.in_cluster else config.load_kube_config() + self.v1 = client.CoreV1Api() + + self.listener = K8SEventListener(message_queue=self.queue, + namespace=self.namespace, + in_cluster=self.in_cluster, + v1=self.v1, + **kwargs) + + self.pod_heartbeater = K8SHeartbeatHandler( + message_queue=self.queue, + namespace=self.namespace, + label_selector=self.label_selector, + in_cluster=self.in_cluster, + v1=self.v1, + poll_interval=self.poll_interval, + **kwargs) + + self.event_handler = EventHandler(mgr=self, + message_queue=self.queue, + namespace=self.namespace, + pod_patt=self.pod_patt, + **kwargs) + + def add_pod(self, name, ip): + self.conn_mgr.register(name, 'tcp://{}:{}'.format(ip, self.port)) + + def delete_pod(self, name): + self.conn_mgr.unregister(name) + + def start(self): + self.listener.daemon = True + self.listener.start() + self.event_handler.start() + + self.pod_heartbeater.start() + + def stop(self): + self.listener.stop() + self.pod_heartbeater.stop() + self.event_handler.stop() + + +if __name__ == '__main__': + logging.basicConfig(level=logging.INFO) + + class Connect: + def register(self, name, value): + logger.error('Register: {} - {}'.format(name, value)) + + def unregister(self, name): + logger.error('Unregister: {}'.format(name)) + + @property + def conn_names(self): + return set() + + connect_mgr = Connect() + + settings = KubernetesProviderSettings(namespace='xp', + pod_patt=".*-ro-servers-.*", + label_selector='tier=ro-servers', + poll_interval=5, + in_cluster=False) + + provider_class = ProviderManager.get_provider('Kubernetes') + t = provider_class(conn_mgr=connect_mgr, settings=settings) + t.start() + cnt = 100 + while cnt > 0: + time.sleep(2) + cnt -= 1 + t.stop() diff --git a/shards/sd/static_provider.py b/shards/sd/static_provider.py new file mode 100644 index 0000000000..e88780740f --- /dev/null +++ b/shards/sd/static_provider.py @@ -0,0 +1,39 @@ +import os +import sys +if __name__ == '__main__': + sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +import socket +from utils import singleton +from sd import ProviderManager + + +class StaticProviderSettings: + def __init__(self, hosts, port=None): + self.hosts = hosts + self.port = int(port) if port else 19530 + + +@singleton +@ProviderManager.register_service_provider +class KubernetesProvider(object): + NAME = 'Static' + + def __init__(self, settings, conn_mgr, **kwargs): + self.conn_mgr = conn_mgr + self.hosts = [socket.gethostbyname(host) for host in settings.hosts] + self.port = settings.port + + def start(self): + for host in self.hosts: + self.add_pod(host, host) + + def stop(self): + for host in self.hosts: + self.delete_pod(host) + + def add_pod(self, name, ip): + self.conn_mgr.register(name, 'tcp://{}:{}'.format(ip, self.port)) + + def delete_pod(self, name): + self.conn_mgr.unregister(name) diff --git a/shards/setup.cfg b/shards/setup.cfg new file mode 100644 index 0000000000..4a88432914 --- /dev/null +++ b/shards/setup.cfg @@ -0,0 +1,4 @@ +[tool:pytest] +testpaths = mishards +log_cli=true +log_cli_level=info diff --git a/shards/start_services.yml b/shards/start_services.yml new file mode 100644 index 0000000000..57fe061bb7 --- /dev/null +++ b/shards/start_services.yml @@ -0,0 +1,45 @@ +version: "2.3" +services: + milvus: + runtime: nvidia + restart: always + image: registry.zilliz.com/milvus/engine:branch-0.5.0-release-4316de + # ports: + # - "0.0.0.0:19530:19530" + volumes: + - /tmp/milvus/db:/opt/milvus/db + + jaeger: + restart: always + image: jaegertracing/all-in-one:1.14 + ports: + - "0.0.0.0:5775:5775/udp" + - "0.0.0.0:16686:16686" + - "0.0.0.0:9441:9441" + environment: + COLLECTOR_ZIPKIN_HTTP_PORT: 9411 + + mishards: + restart: always + image: registry.zilliz.com/milvus/mishards:v0.0.4 + ports: + - "0.0.0.0:19530:19531" + - "0.0.0.0:19532:19532" + volumes: + - /tmp/milvus/db:/tmp/milvus/db + # - /tmp/mishards_env:/source/mishards/.env + command: ["python", "mishards/main.py"] + environment: + FROM_EXAMPLE: 'true' + DEBUG: 'true' + SERVER_PORT: 19531 + WOSERVER: tcp://milvus:19530 + SD_STATIC_HOSTS: milvus + TRACING_TYPE: jaeger + TRACING_SERVICE_NAME: mishards-demo + TRACING_REPORTING_HOST: jaeger + TRACING_REPORTING_PORT: 5775 + + depends_on: + - milvus + - jaeger diff --git a/shards/tracing/__init__.py b/shards/tracing/__init__.py new file mode 100644 index 0000000000..64a5b50d15 --- /dev/null +++ b/shards/tracing/__init__.py @@ -0,0 +1,43 @@ +from contextlib import contextmanager + + +def empty_server_interceptor_decorator(target_server, interceptor): + return target_server + + +@contextmanager +def EmptySpan(*args, **kwargs): + yield None + return + + +class Tracer: + def __init__(self, + tracer=None, + interceptor=None, + server_decorator=empty_server_interceptor_decorator): + self.tracer = tracer + self.interceptor = interceptor + self.server_decorator = server_decorator + + def decorate(self, server): + return self.server_decorator(server, self.interceptor) + + @property + def empty(self): + return self.tracer is None + + def close(self): + self.tracer and self.tracer.close() + + def start_span(self, + operation_name=None, + child_of=None, + references=None, + tags=None, + start_time=None, + ignore_active_span=False): + if self.empty: + return EmptySpan() + return self.tracer.start_span(operation_name, child_of, references, + tags, start_time, ignore_active_span) diff --git a/shards/tracing/factory.py b/shards/tracing/factory.py new file mode 100644 index 0000000000..14fcde2eb3 --- /dev/null +++ b/shards/tracing/factory.py @@ -0,0 +1,40 @@ +import logging +from jaeger_client import Config +from grpc_opentracing.grpcext import intercept_server +from grpc_opentracing import open_tracing_server_interceptor + +from tracing import (Tracer, empty_server_interceptor_decorator) + +logger = logging.getLogger(__name__) + + +class TracerFactory: + @classmethod + def new_tracer(cls, + tracer_type, + tracer_config, + span_decorator=None, + **kwargs): + if not tracer_type: + return Tracer() + config = tracer_config.TRACING_CONFIG + service_name = tracer_config.TRACING_SERVICE_NAME + validate = tracer_config.TRACING_VALIDATE + # if not tracer_type: + # tracer_type = 'jaeger' + # config = tracer_config.DEFAULT_TRACING_CONFIG + + if tracer_type.lower() == 'jaeger': + config = Config(config=config, + service_name=service_name, + validate=validate) + + tracer = config.initialize_tracer() + tracer_interceptor = open_tracing_server_interceptor( + tracer, + log_payloads=tracer_config.TRACING_LOG_PAYLOAD, + span_decorator=span_decorator) + + return Tracer(tracer, tracer_interceptor, intercept_server) + + assert False, 'Unsupported tracer type: {}'.format(tracer_type) diff --git a/shards/utils/__init__.py b/shards/utils/__init__.py new file mode 100644 index 0000000000..c1d55e76c0 --- /dev/null +++ b/shards/utils/__init__.py @@ -0,0 +1,11 @@ +from functools import wraps + + +def singleton(cls): + instances = {} + @wraps(cls) + def getinstance(*args, **kw): + if cls not in instances: + instances[cls] = cls(*args, **kw) + return instances[cls] + return getinstance diff --git a/shards/utils/logger_helper.py b/shards/utils/logger_helper.py new file mode 100644 index 0000000000..b4e3b9c5b6 --- /dev/null +++ b/shards/utils/logger_helper.py @@ -0,0 +1,152 @@ +import os +import datetime +from pytz import timezone +from logging import Filter +import logging.config + + +class InfoFilter(logging.Filter): + def filter(self, rec): + return rec.levelno == logging.INFO + + +class DebugFilter(logging.Filter): + def filter(self, rec): + return rec.levelno == logging.DEBUG + + +class WarnFilter(logging.Filter): + def filter(self, rec): + return rec.levelno == logging.WARN + + +class ErrorFilter(logging.Filter): + def filter(self, rec): + return rec.levelno == logging.ERROR + + +class CriticalFilter(logging.Filter): + def filter(self, rec): + return rec.levelno == logging.CRITICAL + + +COLORS = { + 'HEADER': '\033[95m', + 'INFO': '\033[92m', + 'DEBUG': '\033[94m', + 'WARNING': '\033[93m', + 'ERROR': '\033[95m', + 'CRITICAL': '\033[91m', + 'ENDC': '\033[0m', +} + + +class ColorFulFormatColMixin: + def format_col(self, message_str, level_name): + if level_name in COLORS.keys(): + message_str = COLORS.get(level_name) + message_str + COLORS.get( + 'ENDC') + return message_str + + +class ColorfulFormatter(logging.Formatter, ColorFulFormatColMixin): + def format(self, record): + message_str = super(ColorfulFormatter, self).format(record) + + return self.format_col(message_str, level_name=record.levelname) + + +def config(log_level, log_path, name, tz='UTC'): + def build_log_file(level, log_path, name, tz): + utc_now = datetime.datetime.utcnow() + utc_tz = timezone('UTC') + local_tz = timezone(tz) + tznow = utc_now.replace(tzinfo=utc_tz).astimezone(local_tz) + return '{}-{}-{}.log'.format(os.path.join(log_path, name), tznow.strftime("%m-%d-%Y-%H:%M:%S"), + level) + + if not os.path.exists(log_path): + os.makedirs(log_path) + + LOGGING = { + 'version': 1, + 'disable_existing_loggers': False, + 'formatters': { + 'default': { + 'format': '%(asctime)s | %(levelname)s | %(name)s | %(threadName)s: %(message)s (%(filename)s:%(lineno)s)', + }, + 'colorful_console': { + 'format': '%(asctime)s | %(levelname)s | %(name)s | %(threadName)s: %(message)s (%(filename)s:%(lineno)s)', + '()': ColorfulFormatter, + }, + }, + 'filters': { + 'InfoFilter': { + '()': InfoFilter, + }, + 'DebugFilter': { + '()': DebugFilter, + }, + 'WarnFilter': { + '()': WarnFilter, + }, + 'ErrorFilter': { + '()': ErrorFilter, + }, + 'CriticalFilter': { + '()': CriticalFilter, + }, + }, + 'handlers': { + 'milvus_celery_console': { + 'class': 'logging.StreamHandler', + 'formatter': 'colorful_console', + }, + 'milvus_debug_file': { + 'level': 'DEBUG', + 'filters': ['DebugFilter'], + 'class': 'logging.handlers.RotatingFileHandler', + 'formatter': 'default', + 'filename': build_log_file('debug', log_path, name, tz) + }, + 'milvus_info_file': { + 'level': 'INFO', + 'filters': ['InfoFilter'], + 'class': 'logging.handlers.RotatingFileHandler', + 'formatter': 'default', + 'filename': build_log_file('info', log_path, name, tz) + }, + 'milvus_warn_file': { + 'level': 'WARN', + 'filters': ['WarnFilter'], + 'class': 'logging.handlers.RotatingFileHandler', + 'formatter': 'default', + 'filename': build_log_file('warn', log_path, name, tz) + }, + 'milvus_error_file': { + 'level': 'ERROR', + 'filters': ['ErrorFilter'], + 'class': 'logging.handlers.RotatingFileHandler', + 'formatter': 'default', + 'filename': build_log_file('error', log_path, name, tz) + }, + 'milvus_critical_file': { + 'level': 'CRITICAL', + 'filters': ['CriticalFilter'], + 'class': 'logging.handlers.RotatingFileHandler', + 'formatter': 'default', + 'filename': build_log_file('critical', log_path, name, tz) + }, + }, + 'loggers': { + '': { + 'handlers': ['milvus_celery_console', 'milvus_info_file', 'milvus_debug_file', 'milvus_warn_file', + 'milvus_error_file', 'milvus_critical_file'], + 'level': log_level, + 'propagate': False + }, + }, + 'propagate': False, + } + + logging.config.dictConfig(LOGGING) From af319887d4c28605cd4c42c3b8e7f7a86b0eebe7 Mon Sep 17 00:00:00 2001 From: jinhai Date: Tue, 5 Nov 2019 01:48:11 +0800 Subject: [PATCH 092/307] Update CHANGELOG.md --- CHANGELOG.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f5f9725825..95ce8cd886 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,17 @@ Please mark all change in change log and use the ticket from JIRA. +# Milvus 0.6.0 (TODO) + +## Bug + +## Feature + +## Improvement + +## Task + + # Milvus 0.5.2 (TODO) ## Bug From 4ca6b59b912770d2832ffd81fc0ca9a2ef157900 Mon Sep 17 00:00:00 2001 From: youny626 Date: Tue, 5 Nov 2019 10:23:59 +0800 Subject: [PATCH 093/307] add CPU version --- core/CMakeLists.txt | 242 +++++---- core/build.sh | 18 +- core/cmake/DefineOptions.cmake | 5 + core/conf/server_config.template | 13 +- core/src/CMakeLists.txt | 42 +- core/src/config.h | 3 + core/src/config.h.in | 3 + core/src/db/DB.h | 7 +- core/src/db/DBImpl.cpp | 18 +- core/src/db/DBImpl.h | 9 +- core/src/db/Types.h | 8 +- core/src/db/engine/ExecutionEngineImpl.cpp | 38 +- core/src/index/CMakeLists.txt | 47 +- core/src/index/cmake/DefineOptionsCore.cmake | 11 +- .../index/cmake/ThirdPartyPackagesCore.cmake | 322 ++++++----- core/src/index/knowhere/CMakeLists.txt | 57 +- .../index/vector_index/IndexGPUIDMAP.cpp | 113 ++++ .../index/vector_index/IndexGPUIDMAP.h | 47 ++ .../index/vector_index/IndexIDMAP.cpp | 385 ++++++-------- .../knowhere/index/vector_index/IndexIDMAP.h | 29 - .../knowhere/index/vector_index/IndexIVF.cpp | 24 +- .../index/vector_index/IndexIVFSQ.cpp | 17 +- .../knowhere/index/vector_index/IndexNSG.cpp | 8 +- core/src/index/unittest/CMakeLists.txt | 86 +-- core/src/index/unittest/Helper.h | 32 +- .../unittest/faiss_benchmark/CMakeLists.txt | 64 ++- .../index/unittest/faiss_ori/CMakeLists.txt | 63 ++- core/src/index/unittest/test_idmap.cpp | 6 +- core/src/index/unittest/test_ivf.cpp | 86 +-- .../index/unittest/test_nsg/CMakeLists.txt | 3 +- core/src/index/unittest/test_nsg/test_nsg.cpp | 6 + core/src/main.cpp | 33 +- core/src/metrics/SystemInfo.cpp | 25 +- core/src/scheduler/JobMgr.cpp | 31 +- core/src/scheduler/SchedInst.cpp | 13 +- core/src/scheduler/SchedInst.h | 1 + core/src/scheduler/TaskCreator.cpp | 11 +- core/src/scheduler/Utils.cpp | 4 + .../scheduler/action/PushTaskToNeighbour.cpp | 102 ++-- core/src/scheduler/job/BuildIndexJob.cpp | 5 +- core/src/scheduler/job/SearchJob.cpp | 16 +- core/src/scheduler/job/SearchJob.h | 22 +- core/src/scheduler/optimizer/OnlyGPUPass.cpp | 2 +- core/src/scheduler/task/SearchTask.cpp | 115 ++-- core/src/scheduler/task/SearchTask.h | 5 +- core/src/sdk/grpc/ClientProxy.cpp | 2 +- core/src/server/Config.cpp | 75 +-- core/src/server/Config.h | 4 +- core/src/server/Server.cpp | 2 +- core/src/server/grpc_impl/GrpcRequestTask.cpp | 28 +- core/src/utils/ValidationUtil.cpp | 10 + core/src/wrapper/ConfAdapter.cpp | 2 +- core/src/wrapper/KnowhereResource.cpp | 9 + core/src/wrapper/VecImpl.cpp | 501 +++++++----------- core/src/wrapper/VecImpl.h | 35 -- core/src/wrapper/VecIndex.cpp | 47 +- .../WrapperException.cpp} | 16 +- core/src/wrapper/WrapperException.h | 36 ++ core/src/wrapper/gpu/GPUVecImpl.cpp | 164 ++++++ core/src/wrapper/gpu/GPUVecImpl.h | 66 +++ core/thirdparty/versions.txt | 1 + core/ubuntu_build_deps.sh | 6 + core/unittest/CMakeLists.txt | 20 +- core/unittest/db/CMakeLists.txt | 2 +- core/unittest/db/test_db.cpp | 47 +- core/unittest/db/test_db_mysql.cpp | 22 +- core/unittest/db/test_mem.cpp | 24 +- core/unittest/db/test_search.cpp | 168 +++--- core/unittest/db/utils.cpp | 13 +- core/unittest/metrics/test_metrics.cpp | 5 +- core/unittest/scheduler/CMakeLists.txt | 2 +- core/unittest/server/CMakeLists.txt | 2 +- core/unittest/server/test_config.cpp | 120 ----- core/unittest/server/test_rpc.cpp | 2 +- core/unittest/server/test_util.cpp | 2 + core/unittest/server/utils.cpp | 8 +- core/unittest/wrapper/CMakeLists.txt | 8 +- core/unittest/wrapper/test_wrapper.cpp | 61 ++- core/unittest/wrapper/utils.cpp | 6 + 79 files changed, 2103 insertions(+), 1610 deletions(-) create mode 100644 core/src/config.h create mode 100644 core/src/config.h.in create mode 100644 core/src/index/knowhere/knowhere/index/vector_index/IndexGPUIDMAP.cpp create mode 100644 core/src/index/knowhere/knowhere/index/vector_index/IndexGPUIDMAP.h rename core/src/{version.h.macro => wrapper/WrapperException.cpp} (74%) create mode 100644 core/src/wrapper/WrapperException.h create mode 100644 core/src/wrapper/gpu/GPUVecImpl.cpp create mode 100644 core/src/wrapper/gpu/GPUVecImpl.h diff --git a/core/CMakeLists.txt b/core/CMakeLists.txt index 402e65fb10..a59e80a6e8 100644 --- a/core/CMakeLists.txt +++ b/core/CMakeLists.txt @@ -23,9 +23,9 @@ message(STATUS "Building using CMake version: ${CMAKE_VERSION}") set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake") -MACRO (GET_CURRENT_TIME CURRENT_TIME) +MACRO(GET_CURRENT_TIME CURRENT_TIME) execute_process(COMMAND "date" +"%Y-%m-%d %H:%M.%S" OUTPUT_VARIABLE ${CURRENT_TIME}) -ENDMACRO (GET_CURRENT_TIME) +ENDMACRO(GET_CURRENT_TIME) GET_CURRENT_TIME(BUILD_TIME) string(REGEX REPLACE "\n" "" BUILD_TIME ${BUILD_TIME}) @@ -42,23 +42,20 @@ GET_GIT_BRANCH_NAME(GIT_BRANCH_NAME) message(STATUS "GIT_BRANCH_NAME = ${GIT_BRANCH_NAME}") if(NOT GIT_BRANCH_NAME STREQUAL "") string(REGEX REPLACE "\n" "" GIT_BRANCH_NAME ${GIT_BRANCH_NAME}) -endif() +endif () -set(MILVUS_VERSION "0.5.1") +set(MILVUS_VERSION "${GIT_BRANCH_NAME}") string(REGEX MATCH "[0-9]+\\.[0-9]+\\.[0-9]" MILVUS_VERSION "${MILVUS_VERSION}") -find_package(ClangTools) -set(BUILD_SUPPORT_DIR "${CMAKE_SOURCE_DIR}/build-support") - -if(CMAKE_BUILD_TYPE STREQUAL "Release") +if (CMAKE_BUILD_TYPE STREQUAL "Release") set(BUILD_TYPE "Release") -else() +else () set(BUILD_TYPE "Debug") -endif() +endif () message(STATUS "Build type = ${BUILD_TYPE}") project(milvus VERSION "${MILVUS_VERSION}") -project(milvus_engine LANGUAGES CUDA CXX) +project(milvus_engine LANGUAGES CXX) unset(CMAKE_EXPORT_COMPILE_COMMANDS CACHE) set(CMAKE_EXPORT_COMPILE_COMMANDS ON) @@ -67,15 +64,15 @@ set(MILVUS_VERSION_MAJOR "${milvus_VERSION_MAJOR}") set(MILVUS_VERSION_MINOR "${milvus_VERSION_MINOR}") set(MILVUS_VERSION_PATCH "${milvus_VERSION_PATCH}") -if(MILVUS_VERSION_MAJOR STREQUAL "" +if (MILVUS_VERSION_MAJOR STREQUAL "" OR MILVUS_VERSION_MINOR STREQUAL "" OR MILVUS_VERSION_PATCH STREQUAL "") message(WARNING "Failed to determine Milvus version from git branch name") set(MILVUS_VERSION "0.5.0") -endif() +endif () message(STATUS "Build version = ${MILVUS_VERSION}") -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/src/version.h.macro ${CMAKE_CURRENT_SOURCE_DIR}/src/version.h) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/src/config.h.in ${CMAKE_CURRENT_SOURCE_DIR}/src/config.h @ONLY) message(STATUS "Milvus version: " "${MILVUS_VERSION_MAJOR}.${MILVUS_VERSION_MINOR}.${MILVUS_VERSION_PATCH} " @@ -84,46 +81,33 @@ message(STATUS "Milvus version: " set(CMAKE_CXX_STANDARD 14) set(CMAKE_CXX_STANDARD_REQUIRED on) -if(CMAKE_SYSTEM_PROCESSOR MATCHES "(x86)|(X86)|(amd64)|(AMD64)") +if (CMAKE_SYSTEM_PROCESSOR MATCHES "(x86)|(X86)|(amd64)|(AMD64)") message(STATUS "Building milvus_engine on x86 architecture") set(MILVUS_BUILD_ARCH x86_64) -elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "(ppc)") +elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "(ppc)") message(STATUS "Building milvus_engine on ppc architecture") set(MILVUS_BUILD_ARCH ppc64le) -else() +else () message(WARNING "Unknown processor type") message(WARNING "CMAKE_SYSTEM_PROCESSOR=${CMAKE_SYSTEM_PROCESSOR}") set(MILVUS_BUILD_ARCH unknown) -endif() - -find_package (Python COMPONENTS Interpreter Development) - -find_package(CUDA) -set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -Xcompiler -fPIC -std=c++11 -D_FORCE_INLINES --expt-extended-lambda") - -if(CMAKE_BUILD_TYPE STREQUAL "Release") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3 -fPIC -DELPP_THREAD_SAFE -fopenmp") - set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -O3") -else() - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O0 -g -fPIC -DELPP_THREAD_SAFE -fopenmp") - set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -O0 -g") -endif() +endif () # Ensure that a default make is set -if("${MAKE}" STREQUAL "") - if(NOT MSVC) +if ("${MAKE}" STREQUAL "") + if (NOT MSVC) find_program(MAKE make) - endif() -endif() + endif () +endif () find_path(MYSQL_INCLUDE_DIR - NAMES "mysql.h" - PATH_SUFFIXES "mysql") + NAMES "mysql.h" + PATH_SUFFIXES "mysql") if (${MYSQL_INCLUDE_DIR} STREQUAL "MYSQL_INCLUDE_DIR-NOTFOUND") message(FATAL_ERROR "Could not found MySQL include directory") -else() +else () include_directories(${MYSQL_INCLUDE_DIR}) -endif() +endif () set(MILVUS_SOURCE_DIR ${PROJECT_SOURCE_DIR}) set(MILVUS_BINARY_DIR ${PROJECT_BINARY_DIR}) @@ -134,26 +118,50 @@ include(DefineOptions) include(BuildUtils) include(ThirdPartyPackages) -config_summary() +set(MILVUS_GPU_VERSION false) +if (MILVUS_CPU_VERSION) + message(STATUS "Building Milvus CPU version") + add_compile_definitions("MILVUS_CPU_VERSION") +else () + message(STATUS "Building Milvus GPU version") + set(MILVUS_GPU_VERSION true) + add_compile_definitions("MILVUS_GPU_VERSION") + enable_language(CUDA) + find_package(CUDA 10 REQUIRED) + set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -Xcompiler -fPIC -std=c++11 -D_FORCE_INLINES --expt-extended-lambda") +endif () + +if (CMAKE_BUILD_TYPE STREQUAL "Release") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3 -fPIC -DELPP_THREAD_SAFE -fopenmp") + if (MILVUS_GPU_VERSION) + set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -O3") + endif () +else () + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O0 -g -fPIC -DELPP_THREAD_SAFE -fopenmp") + if (MILVUS_GPU_VERSION) + set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -O0 -g") + endif () +endif () if (CUSTOMIZATION) add_definitions(-DCUSTOMIZATION) endif (CUSTOMIZATION) +config_summary() add_subdirectory(src) if (BUILD_UNIT_TEST STREQUAL "ON") if (BUILD_COVERAGE STREQUAL "ON") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fprofile-arcs -ftest-coverage") - endif() + endif () add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/unittest) -endif() +endif () add_custom_target(Clean-All COMMAND ${CMAKE_BUILD_TOOL} clean) -if("${MILVUS_DB_PATH}" STREQUAL "") +if ("${MILVUS_DB_PATH}" STREQUAL "") set(MILVUS_DB_PATH "/tmp/milvus") -endif() +endif () configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf/server_config.template ${CMAKE_CURRENT_SOURCE_DIR}/conf/server_config.yaml) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf/log_config.template ${CMAKE_CURRENT_SOURCE_DIR}/conf/log_config.conf) @@ -169,19 +177,22 @@ install(FILES DESTINATION conf) +find_package(Python COMPONENTS Interpreter Development) +find_package(ClangTools) +set(BUILD_SUPPORT_DIR "${CMAKE_SOURCE_DIR}/build-support") # # "make lint" target # -if(NOT MILVUS_VERBOSE_LINT) - set(MILVUS_LINT_QUIET "--quiet") -endif() +if (NOT MILVUS_VERBOSE_LINT) + set(MILVUS_LINT_QUIET "--quiet") +endif () -if(NOT LINT_EXCLUSIONS_FILE) - # source files matching a glob from a line in this file - # will be excluded from linting (cpplint, clang-tidy, clang-format) - set(LINT_EXCLUSIONS_FILE ${BUILD_SUPPORT_DIR}/lint_exclusions.txt) -endif() +if (NOT LINT_EXCLUSIONS_FILE) + # source files matching a glob from a line in this file + # will be excluded from linting (cpplint, clang-tidy, clang-format) + set(LINT_EXCLUSIONS_FILE ${BUILD_SUPPORT_DIR}/lint_exclusions.txt) +endif () find_program(CPPLINT_BIN NAMES cpplint cpplint.py HINTS ${BUILD_SUPPORT_DIR}) message(STATUS "Found cpplint executable at ${CPPLINT_BIN}") @@ -190,77 +201,76 @@ message(STATUS "Found cpplint executable at ${CPPLINT_BIN}") # "make lint" targets # add_custom_target(lint - ${PYTHON_EXECUTABLE} - ${BUILD_SUPPORT_DIR}/run_cpplint.py - --cpplint_binary - ${CPPLINT_BIN} - --exclude_globs - ${LINT_EXCLUSIONS_FILE} - --source_dir - ${CMAKE_CURRENT_SOURCE_DIR} - ${MILVUS_LINT_QUIET}) + ${PYTHON_EXECUTABLE} + ${BUILD_SUPPORT_DIR}/run_cpplint.py + --cpplint_binary + ${CPPLINT_BIN} + --exclude_globs + ${LINT_EXCLUSIONS_FILE} + --source_dir + ${CMAKE_CURRENT_SOURCE_DIR} + ${MILVUS_LINT_QUIET}) # # "make clang-format" and "make check-clang-format" targets # -if(${CLANG_FORMAT_FOUND}) - # runs clang format and updates files in place. - add_custom_target(clang-format - ${PYTHON_EXECUTABLE} - ${BUILD_SUPPORT_DIR}/run_clang_format.py - --clang_format_binary - ${CLANG_FORMAT_BIN} - --exclude_globs - ${LINT_EXCLUSIONS_FILE} - --source_dir - ${CMAKE_CURRENT_SOURCE_DIR}/src - --fix - ${MILVUS_LINT_QUIET}) +if (${CLANG_FORMAT_FOUND}) + # runs clang format and updates files in place. + add_custom_target(clang-format + ${PYTHON_EXECUTABLE} + ${BUILD_SUPPORT_DIR}/run_clang_format.py + --clang_format_binary + ${CLANG_FORMAT_BIN} + --exclude_globs + ${LINT_EXCLUSIONS_FILE} + --source_dir + ${CMAKE_CURRENT_SOURCE_DIR}/src + --fix + ${MILVUS_LINT_QUIET}) - # runs clang format and exits with a non-zero exit code if any files need to be reformatted - add_custom_target(check-clang-format - ${PYTHON_EXECUTABLE} - ${BUILD_SUPPORT_DIR}/run_clang_format.py - --clang_format_binary - ${CLANG_FORMAT_BIN} - --exclude_globs - ${LINT_EXCLUSIONS_FILE} - --source_dir - ${CMAKE_CURRENT_SOURCE_DIR}/src - ${MILVUS_LINT_QUIET}) -endif() + # runs clang format and exits with a non-zero exit code if any files need to be reformatted + add_custom_target(check-clang-format + ${PYTHON_EXECUTABLE} + ${BUILD_SUPPORT_DIR}/run_clang_format.py + --clang_format_binary + ${CLANG_FORMAT_BIN} + --exclude_globs + ${LINT_EXCLUSIONS_FILE} + --source_dir + ${CMAKE_CURRENT_SOURCE_DIR}/src + ${MILVUS_LINT_QUIET}) +endif () # # "make clang-tidy" and "make check-clang-tidy" targets # -if(${CLANG_TIDY_FOUND}) - # runs clang-tidy and attempts to fix any warning automatically - add_custom_target(clang-tidy - ${PYTHON_EXECUTABLE} - ${BUILD_SUPPORT_DIR}/run_clang_tidy.py - --clang_tidy_binary - ${CLANG_TIDY_BIN} - --exclude_globs - ${LINT_EXCLUSIONS_FILE} - --compile_commands - ${CMAKE_BINARY_DIR}/compile_commands.json - --source_dir - ${CMAKE_CURRENT_SOURCE_DIR}/src - --fix - ${MILVUS_LINT_QUIET}) - - # runs clang-tidy and exits with a non-zero exit code if any errors are found. - add_custom_target(check-clang-tidy - ${PYTHON_EXECUTABLE} - ${BUILD_SUPPORT_DIR}/run_clang_tidy.py - --clang_tidy_binary - ${CLANG_TIDY_BIN} - --exclude_globs - ${LINT_EXCLUSIONS_FILE} - --compile_commands - ${CMAKE_BINARY_DIR}/compile_commands.json - --source_dir - ${CMAKE_CURRENT_SOURCE_DIR}/src - ${MILVUS_LINT_QUIET}) -endif() +if (${CLANG_TIDY_FOUND}) + # runs clang-tidy and attempts to fix any warning automatically + add_custom_target(clang-tidy + ${PYTHON_EXECUTABLE} + ${BUILD_SUPPORT_DIR}/run_clang_tidy.py + --clang_tidy_binary + ${CLANG_TIDY_BIN} + --exclude_globs + ${LINT_EXCLUSIONS_FILE} + --compile_commands + ${CMAKE_BINARY_DIR}/compile_commands.json + --source_dir + ${CMAKE_CURRENT_SOURCE_DIR}/src + --fix + ${MILVUS_LINT_QUIET}) + # runs clang-tidy and exits with a non-zero exit code if any errors are found. + add_custom_target(check-clang-tidy + ${PYTHON_EXECUTABLE} + ${BUILD_SUPPORT_DIR}/run_clang_tidy.py + --clang_tidy_binary + ${CLANG_TIDY_BIN} + --exclude_globs + ${LINT_EXCLUSIONS_FILE} + --compile_commands + ${CMAKE_BINARY_DIR}/compile_commands.json + --source_dir + ${CMAKE_CURRENT_SOURCE_DIR}/src + ${MILVUS_LINT_QUIET}) +endif () \ No newline at end of file diff --git a/core/build.sh b/core/build.sh index a662fe497a..819278b94a 100755 --- a/core/build.sh +++ b/core/build.sh @@ -12,6 +12,8 @@ USE_JFROG_CACHE="OFF" RUN_CPPLINT="OFF" CUSTOMIZATION="OFF" # default use ori faiss CUDA_COMPILER=/usr/local/cuda/bin/nvcc +CPU_VERSION="OFF" +WITH_MKL="OFF" CUSTOMIZED_FAISS_URL="${FAISS_URL:-NONE}" wget -q --method HEAD ${CUSTOMIZED_FAISS_URL} @@ -21,7 +23,7 @@ else CUSTOMIZATION="OFF" fi -while getopts "p:d:t:ulrcgjhx" arg +while getopts "p:d:t:ulrcgjhxzm" arg do case $arg in p) @@ -58,6 +60,12 @@ do x) CUSTOMIZATION="OFF" # force use ori faiss ;; + z) + CPU_VERSION="ON" + ;; + m) + WITH_MKL="ON" + ;; h) # help echo " @@ -71,10 +79,12 @@ parameter: -c: code coverage(default: OFF) -g: profiling(default: OFF) -j: use jfrog cache build directory(default: OFF) +-z: build pure CPU version(default: OFF) +-m: build with MKL(default: OFF) -h: help usage: -./build.sh -p \${INSTALL_PREFIX} -t \${BUILD_TYPE} [-u] [-l] [-r] [-c] [-g] [-j] [-h] +./build.sh -p \${INSTALL_PREFIX} -t \${BUILD_TYPE} [-u] [-l] [-r] [-c] [-g] [-j] [-z] [-m] [-h] " exit 0 ;; @@ -106,6 +116,8 @@ CMAKE_CMD="cmake \ -DUSE_JFROG_CACHE=${USE_JFROG_CACHE} \ -DCUSTOMIZATION=${CUSTOMIZATION} \ -DFAISS_URL=${CUSTOMIZED_FAISS_URL} \ +-DMILVUS_CPU_VERSION=${CPU_VERSION} \ +-DBUILD_FAISS_WITH_MKL=${WITH_MKL} \ ../" echo ${CMAKE_CMD} ${CMAKE_CMD} @@ -147,4 +159,4 @@ else # compile and build make -j 8 install || exit 1 -fi \ No newline at end of file +fi diff --git a/core/cmake/DefineOptions.cmake b/core/cmake/DefineOptions.cmake index 167b6e9d66..b90f41fe8e 100644 --- a/core/cmake/DefineOptions.cmake +++ b/core/cmake/DefineOptions.cmake @@ -40,6 +40,11 @@ macro(define_option_string name description default) endif() endmacro() +#---------------------------------------------------------------------- +set_option_category("CPU version") + +define_option(MILVUS_CPU_VERSION "Build CPU version only" ON) + #---------------------------------------------------------------------- set_option_category("Thirdparty") diff --git a/core/conf/server_config.template b/core/conf/server_config.template index 8dfb30f534..07aaeb88e9 100644 --- a/core/conf/server_config.template +++ b/core/conf/server_config.template @@ -27,18 +27,21 @@ metric_config: port: 8080 # port prometheus uses to fetch metrics, must in range [1025, 65534] cache_config: + cpu_cache_capacity: 16 # GB, CPU memory used for cache, must be a positive integer cpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered, must be in range (0.0, 1.0] - gpu_cache_capacity: 4 # GB, GPU memory used for cache, must be a positive integer - gpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered, must be in range (0.0, 1.0] cache_insert_data: false # whether to load inserted data into cache, must be a boolean +# Uncomment the following config if you are using GPU version +# gpu_cache_capacity: 4 # GB, GPU memory used for cache, must be a positive integer +# gpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered, must be in range (0.0, 1.0] + engine_config: use_blas_threshold: 20 # if nq < use_blas_threshold, use SSE, faster with fluctuated response times # if nq >= use_blas_threshold, use OpenBlas, slower with stable response times gpu_search_threshold: 1000 # threshold beyond which the search computation is executed on GPUs only resource_config: - search_resources: # define the GPUs used for search computation, must be in format: gpux - - gpu0 - index_build_device: gpu0 # GPU used for building index, must be in format: gpux \ No newline at end of file + search_resources: # define the CPU / GPUs used for search computation, must be in format: cpu / gpux + - cpu + index_build_device: cpu # CPU / GPU used for building index, must be in format: cpu / gpux diff --git a/core/src/CMakeLists.txt b/core/src/CMakeLists.txt index ae3a458987..9e4065d646 100644 --- a/core/src/CMakeLists.txt +++ b/core/src/CMakeLists.txt @@ -20,11 +20,9 @@ include_directories(${MILVUS_SOURCE_DIR}) include_directories(${MILVUS_ENGINE_SRC}) -include_directories(${CUDA_TOOLKIT_ROOT_DIR}/include) include_directories(${MILVUS_ENGINE_SRC}/grpc/gen-status) include_directories(${MILVUS_ENGINE_SRC}/grpc/gen-milvus) -#this statement must put here, since the INDEX_INCLUDE_DIRS is defined in code/CMakeList.txt add_subdirectory(index) set(INDEX_INCLUDE_DIRS ${INDEX_INCLUDE_DIRS} PARENT_SCOPE) @@ -109,35 +107,45 @@ set(boost_lib libboost_serialization.a ) -set(cuda_lib - ${CUDA_TOOLKIT_ROOT_DIR}/lib64/stubs/libnvidia-ml.so - cudart - cublas - ) - set(third_party_libs sqlite ${client_grpc_lib} yaml-cpp ${prometheus_lib} - ${cuda_lib} mysqlpp zlib ${boost_lib} ) -if (MILVUS_ENABLE_PROFILING STREQUAL "ON") +if (MILVUS_GPU_VERSION) + include_directories(${CUDA_INCLUDE_DIRS}) + link_directories("${CUDA_TOOLKIT_ROOT_DIR}/lib64") + set(cuda_lib + ${CUDA_TOOLKIT_ROOT_DIR}/lib64/stubs/libnvidia-ml.so + cudart + cublas + ) set(third_party_libs ${third_party_libs} - gperftools - libunwind - ) + ${cuda_lib} + ) + aux_source_directory(${MILVUS_ENGINE_SRC}/wrapper/gpu wrapper_gpu_files) + set(engine_files ${engine_files} + ${wrapper_gpu_files} + ) +endif () + +if (MILVUS_ENABLE_PROFILING STREQUAL "ON") + set(third_party_libs ${third_party_libs} + gperftools + libunwind + ) endif () -link_directories("${CUDA_TOOLKIT_ROOT_DIR}/lib64") set(engine_libs pthread libgomp.a libgfortran.a + dl ) if (NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64") @@ -147,11 +155,11 @@ if (NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64") ) endif () -cuda_add_library(milvus_engine STATIC ${engine_files}) +add_library(milvus_engine STATIC ${engine_files}) target_link_libraries(milvus_engine knowhere - ${engine_libs} ${third_party_libs} + ${engine_libs} ) add_library(metrics STATIC ${metrics_files}) @@ -165,8 +173,6 @@ target_link_libraries(metrics ${metrics_lib}) set(server_libs milvus_engine - pthread - dl metrics ) diff --git a/core/src/config.h b/core/src/config.h new file mode 100644 index 0000000000..b031713328 --- /dev/null +++ b/core/src/config.h @@ -0,0 +1,3 @@ +#define MILVUS_VERSION "0.5.0" +#define BUILD_TYPE "Debug" +#define BUILD_TIME "2019-11-05 10:23.18" diff --git a/core/src/config.h.in b/core/src/config.h.in new file mode 100644 index 0000000000..9e8821881f --- /dev/null +++ b/core/src/config.h.in @@ -0,0 +1,3 @@ +#cmakedefine MILVUS_VERSION "@MILVUS_VERSION@" +#cmakedefine BUILD_TYPE "@BUILD_TYPE@" +#cmakedefine BUILD_TIME @BUILD_TIME@ \ No newline at end of file diff --git a/core/src/db/DB.h b/core/src/db/DB.h index 07fe30babd..a790fadb50 100644 --- a/core/src/db/DB.h +++ b/core/src/db/DB.h @@ -67,16 +67,15 @@ class DB { virtual Status Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors, - ResultIds& result_ids, ResultDistances& result_distances) = 0; + QueryResults& results) = 0; virtual Status Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors, - const meta::DatesT& dates, ResultIds& result_ids, ResultDistances& result_distances) = 0; + const meta::DatesT& dates, QueryResults& results) = 0; virtual Status Query(const std::string& table_id, const std::vector& file_ids, uint64_t k, uint64_t nq, - uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids, - ResultDistances& result_distances) = 0; + uint64_t nprobe, const float* vectors, const meta::DatesT& dates, QueryResults& results) = 0; virtual Status Size(uint64_t& result) = 0; diff --git a/core/src/db/DBImpl.cpp b/core/src/db/DBImpl.cpp index fc31846bd3..6995de3d14 100644 --- a/core/src/db/DBImpl.cpp +++ b/core/src/db/DBImpl.cpp @@ -336,20 +336,20 @@ DBImpl::DropIndex(const std::string& table_id) { Status DBImpl::Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors, - ResultIds& result_ids, ResultDistances& result_distances) { + QueryResults& results) { if (shutting_down_.load(std::memory_order_acquire)) { return Status(DB_ERROR, "Milsvus server is shutdown!"); } meta::DatesT dates = {utils::GetDate()}; - Status result = Query(table_id, k, nq, nprobe, vectors, dates, result_ids, result_distances); + Status result = Query(table_id, k, nq, nprobe, vectors, dates, results); return result; } Status DBImpl::Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors, - const meta::DatesT& dates, ResultIds& result_ids, ResultDistances& result_distances) { + const meta::DatesT& dates, QueryResults& results) { if (shutting_down_.load(std::memory_order_acquire)) { return Status(DB_ERROR, "Milsvus server is shutdown!"); } @@ -372,15 +372,14 @@ DBImpl::Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t npr } cache::CpuCacheMgr::GetInstance()->PrintInfo(); // print cache info before query - status = QueryAsync(table_id, file_id_array, k, nq, nprobe, vectors, result_ids, result_distances); + status = QueryAsync(table_id, file_id_array, k, nq, nprobe, vectors, results); cache::CpuCacheMgr::GetInstance()->PrintInfo(); // print cache info after query return status; } Status DBImpl::Query(const std::string& table_id, const std::vector& file_ids, uint64_t k, uint64_t nq, - uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids, - ResultDistances& result_distances) { + uint64_t nprobe, const float* vectors, const meta::DatesT& dates, QueryResults& results) { if (shutting_down_.load(std::memory_order_acquire)) { return Status(DB_ERROR, "Milsvus server is shutdown!"); } @@ -414,7 +413,7 @@ DBImpl::Query(const std::string& table_id, const std::vector& file_ } cache::CpuCacheMgr::GetInstance()->PrintInfo(); // print cache info before query - status = QueryAsync(table_id, file_id_array, k, nq, nprobe, vectors, result_ids, result_distances); + status = QueryAsync(table_id, file_id_array, k, nq, nprobe, vectors, results); cache::CpuCacheMgr::GetInstance()->PrintInfo(); // print cache info after query return status; } @@ -433,7 +432,7 @@ DBImpl::Size(uint64_t& result) { /////////////////////////////////////////////////////////////////////////////////////////////////////////////////// Status DBImpl::QueryAsync(const std::string& table_id, const meta::TableFilesSchema& files, uint64_t k, uint64_t nq, - uint64_t nprobe, const float* vectors, ResultIds& result_ids, ResultDistances& result_distances) { + uint64_t nprobe, const float* vectors, QueryResults& results) { server::CollectQueryMetrics metrics(nq); TimeRecorder rc(""); @@ -454,8 +453,7 @@ DBImpl::QueryAsync(const std::string& table_id, const meta::TableFilesSchema& fi } // step 3: construct results - result_ids = job->GetResultIds(); - result_distances = job->GetResultDistances(); + results = job->GetResult(); rc.ElapseFromBegin("Engine query totally cost"); return Status::OK(); diff --git a/core/src/db/DBImpl.h b/core/src/db/DBImpl.h index ad9c574bb1..e1e030cc32 100644 --- a/core/src/db/DBImpl.h +++ b/core/src/db/DBImpl.h @@ -91,16 +91,15 @@ class DBImpl : public DB { Status Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors, - ResultIds& result_ids, ResultDistances& result_distances) override; + QueryResults& results) override; Status Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors, - const meta::DatesT& dates, ResultIds& result_ids, ResultDistances& result_distances) override; + const meta::DatesT& dates, QueryResults& results) override; Status Query(const std::string& table_id, const std::vector& file_ids, uint64_t k, uint64_t nq, - uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids, - ResultDistances& result_distances) override; + uint64_t nprobe, const float* vectors, const meta::DatesT& dates, QueryResults& results) override; Status Size(uint64_t& result) override; @@ -108,7 +107,7 @@ class DBImpl : public DB { private: Status QueryAsync(const std::string& table_id, const meta::TableFilesSchema& files, uint64_t k, uint64_t nq, - uint64_t nprobe, const float* vectors, ResultIds& result_ids, ResultDistances& result_distances); + uint64_t nprobe, const float* vectors, QueryResults& results); void BackgroundTimerTask(); diff --git a/core/src/db/Types.h b/core/src/db/Types.h index cc2eab0383..94528a9a8a 100644 --- a/core/src/db/Types.h +++ b/core/src/db/Types.h @@ -19,7 +19,6 @@ #include "db/engine/ExecutionEngine.h" -#include #include #include #include @@ -27,13 +26,12 @@ namespace milvus { namespace engine { -using IDNumber = faiss::Index::idx_t; - +typedef int64_t IDNumber; typedef IDNumber* IDNumberPtr; typedef std::vector IDNumbers; -typedef std::vector ResultIds; -typedef std::vector ResultDistances; +typedef std::vector> QueryResult; +typedef std::vector QueryResults; struct TableIndex { int32_t engine_type_ = (int)EngineType::FAISS_IDMAP; diff --git a/core/src/db/engine/ExecutionEngineImpl.cpp b/core/src/db/engine/ExecutionEngineImpl.cpp index ee04191fef..19c699bda7 100644 --- a/core/src/db/engine/ExecutionEngineImpl.cpp +++ b/core/src/db/engine/ExecutionEngineImpl.cpp @@ -25,6 +25,7 @@ #include "utils/CommonUtil.h" #include "utils/Exception.h" #include "utils/Log.h" + #include "wrapper/ConfAdapter.h" #include "wrapper/ConfAdapterMgr.h" #include "wrapper/VecImpl.h" @@ -92,11 +93,19 @@ ExecutionEngineImpl::CreatetVecIndex(EngineType type) { break; } case EngineType::FAISS_IVFFLAT: { +#ifdef MILVUS_CPU_VERSION + index = GetVecIndexFactory(IndexType::FAISS_IVFFLAT_CPU); +#else index = GetVecIndexFactory(IndexType::FAISS_IVFFLAT_MIX); +#endif break; } case EngineType::FAISS_IVFSQ8: { +#ifdef MILVUS_CPU_VERSION + index = GetVecIndexFactory(IndexType::FAISS_IVFSQ8_CPU); +#else index = GetVecIndexFactory(IndexType::FAISS_IVFSQ8_MIX); +#endif break; } case EngineType::NSG_MIX: { @@ -309,13 +318,30 @@ ExecutionEngineImpl::CopyToGpu(uint64_t device_id, bool hybrid) { return Status::OK(); } #endif - try { - index_ = index_->CopyToGpu(device_id); - ENGINE_LOG_DEBUG << "CPU to GPU" << device_id; - } catch (std::exception& e) { - ENGINE_LOG_ERROR << e.what(); - return Status(DB_ERROR, e.what()); + + auto index = std::static_pointer_cast(cache::GpuCacheMgr::GetInstance(device_id)->GetIndex(location_)); + bool already_in_cache = (index != nullptr); + if (already_in_cache) { + index_ = index; + } else { + if (index_ == nullptr) { + ENGINE_LOG_ERROR << "ExecutionEngineImpl: index is null, failed to copy to gpu"; + return Status(DB_ERROR, "index is null"); + } + + try { + index_ = index_->CopyToGpu(device_id); + ENGINE_LOG_DEBUG << "CPU to GPU" << device_id; + } catch (std::exception& e) { + ENGINE_LOG_ERROR << e.what(); + return Status(DB_ERROR, e.what()); + } } + + if (!already_in_cache) { + GpuCache(device_id); + } + return Status::OK(); } diff --git a/core/src/index/CMakeLists.txt b/core/src/index/CMakeLists.txt index f570752b68..86e5df8eca 100644 --- a/core/src/index/CMakeLists.txt +++ b/core/src/index/CMakeLists.txt @@ -19,12 +19,12 @@ cmake_minimum_required(VERSION 3.14) -message(STATUS "---------------core--------------") +message(STATUS "------------------------------KNOWHERE-----------------------------------") message(STATUS "Building using CMake version: ${CMAKE_VERSION}") -set(KNOWHERE_VERSION "0.1.0") +set(KNOWHERE_VERSION "0.5.0") string(REGEX MATCH "^[0-9]+\\.[0-9]+\\.[0-9]+" KNOWHERE_BASE_VERSION "${KNOWHERE_VERSION}") -project(knowhere VERSION "${KNOWHERE_BASE_VERSION}" LANGUAGES CUDA C CXX) +project(knowhere VERSION "${KNOWHERE_BASE_VERSION}" LANGUAGES C CXX) set(CMAKE_CXX_STANDARD 14) set(KNOWHERE_VERSION_MAJOR "${knowhere_VERSION_MAJOR}") @@ -45,17 +45,6 @@ if(NOT CMAKE_BUILD_TYPE) set(CMAKE_BUILD_TYPE Release) endif(NOT CMAKE_BUILD_TYPE) -if(CMAKE_BUILD_TYPE STREQUAL "Release") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3 -fPIC -DELPP_THREAD_SAFE -fopenmp") - set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -O3") -else() - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O0 -g -fPIC -DELPP_THREAD_SAFE -fopenmp") - set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -O0 -g") -endif() -MESSAGE(STATUS "CMAKE_CXX_FLAGS" ${CMAKE_CXX_FLAGS}) - -find_package(CUDA) - if(CMAKE_SYSTEM_PROCESSOR MATCHES "(x86)|(X86)|(amd64)|(AMD64)") message(STATUS "building milvus_engine on x86 architecture") set(KNOWHERE_BUILD_ARCH x86_64) @@ -77,15 +66,39 @@ message(STATUS "Build type = ${BUILD_TYPE}") set(INDEX_SOURCE_DIR ${PROJECT_SOURCE_DIR}) set(INDEX_BINARY_DIR ${PROJECT_BINARY_DIR}) -message(STATUS "Core source dir: ${PROJECT_SOURCE_DIR}") -message(STATUS "Core binary dir: ${PROJECT_BINARY_DIR}") set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${INDEX_SOURCE_DIR}/cmake") include(ExternalProject) include(DefineOptionsCore) include(BuildUtilsCore) + +set(KNOWHERE_GPU_VERSION false) +if (MILVUS_CPU_VERSION OR KNOWHERE_CPU_VERSION) + message(STATUS "Building Knowhere CPU version") + add_compile_definitions("MILVUS_CPU_VERSION") +else () + message(STATUS "Building Knowhere GPU version") + add_compile_definitions("MILVUS_GPU_VERSION") + set(KNOWHERE_GPU_VERSION true) + enable_language(CUDA) + find_package(CUDA 10 REQUIRED) + set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -Xcompiler -fPIC -std=c++11 -D_FORCE_INLINES --expt-extended-lambda") +endif () + include(ThirdPartyPackagesCore) +if (CMAKE_BUILD_TYPE STREQUAL "Release") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3 -fPIC -DELPP_THREAD_SAFE -fopenmp") + if (KNOWHERE_GPU_VERSION) + set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -O3") + endif () +else () + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O0 -g -fPIC -DELPP_THREAD_SAFE -fopenmp") + if (KNOWHERE_GPU_VERSION) + set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -O0 -g") + endif () +endif () + add_subdirectory(knowhere) if (BUILD_COVERAGE STREQUAL "ON") @@ -94,7 +107,7 @@ endif() set(INDEX_INCLUDE_DIRS ${INDEX_INCLUDE_DIRS} PARENT_SCOPE) -if(BUILD_UNIT_TEST STREQUAL "ON") +if(KNOWHERE_BUILD_TESTS) add_subdirectory(unittest) endif() diff --git a/core/src/index/cmake/DefineOptionsCore.cmake b/core/src/index/cmake/DefineOptionsCore.cmake index cf5e8ea328..ba29fea207 100644 --- a/core/src/index/cmake/DefineOptionsCore.cmake +++ b/core/src/index/cmake/DefineOptionsCore.cmake @@ -40,6 +40,15 @@ macro(define_option_string name description default) endif() endmacro() +#---------------------------------------------------------------------- +set_option_category("CPU version") + +if(MILVUS_CPU_VERSION) + define_option(KNOWHERE_CPU_VERSION "Build CPU version only" ON) +else() + define_option(KNOWHERE_CPU_VERSION "Build CPU version only" OFF) +endif() + #---------------------------------------------------------------------- set_option_category("Thirdparty") @@ -70,7 +79,7 @@ define_option(KNOWHERE_WITH_FAISS "Build with FAISS library" ON) define_option(KNOWHERE_WITH_FAISS_GPU_VERSION "Build with FAISS GPU version" ON) -define_option(KNOWHERE_WITH_OPENBLAS "Build with OpenBLAS library" ON) +define_option(BUILD_FAISS_WITH_MKL "Build FAISS with MKL" OFF) #---------------------------------------------------------------------- if(MSVC) diff --git a/core/src/index/cmake/ThirdPartyPackagesCore.cmake b/core/src/index/cmake/ThirdPartyPackagesCore.cmake index 66ad5e9bbe..54d6ab568b 100644 --- a/core/src/index/cmake/ThirdPartyPackagesCore.cmake +++ b/core/src/index/cmake/ThirdPartyPackagesCore.cmake @@ -26,24 +26,24 @@ set(KNOWHERE_THIRDPARTY_DEPENDENCIES message(STATUS "Using ${KNOWHERE_DEPENDENCY_SOURCE} approach to find dependencies") # For each dependency, set dependency source to global default, if unset -foreach(DEPENDENCY ${KNOWHERE_THIRDPARTY_DEPENDENCIES}) - if("${${DEPENDENCY}_SOURCE}" STREQUAL "") +foreach (DEPENDENCY ${KNOWHERE_THIRDPARTY_DEPENDENCIES}) + if ("${${DEPENDENCY}_SOURCE}" STREQUAL "") set(${DEPENDENCY}_SOURCE ${KNOWHERE_DEPENDENCY_SOURCE}) - endif() -endforeach() + endif () +endforeach () macro(build_dependency DEPENDENCY_NAME) - if("${DEPENDENCY_NAME}" STREQUAL "ARROW") + if ("${DEPENDENCY_NAME}" STREQUAL "ARROW") build_arrow() - elseif("${DEPENDENCY_NAME}" STREQUAL "LAPACK") + elseif ("${DEPENDENCY_NAME}" STREQUAL "LAPACK") build_lapack() elseif ("${DEPENDENCY_NAME}" STREQUAL "GTest") build_gtest() elseif ("${DEPENDENCY_NAME}" STREQUAL "OpenBLAS") build_openblas() - elseif("${DEPENDENCY_NAME}" STREQUAL "FAISS") + elseif ("${DEPENDENCY_NAME}" STREQUAL "FAISS") build_faiss() - else() + else () message(FATAL_ERROR "Unknown thirdparty dependency to build: ${DEPENDENCY_NAME}") endif () endmacro() @@ -51,7 +51,7 @@ endmacro() macro(resolve_dependency DEPENDENCY_NAME) if (${DEPENDENCY_NAME}_SOURCE STREQUAL "AUTO") #message(STATUS "Finding ${DEPENDENCY_NAME} package") - #message(STATUS "${DEPENDENCY_NAME} package not found") + #message(STATUS "${DEPENDENCY_NAME} package not found") build_dependency(${DEPENDENCY_NAME}) elseif (${DEPENDENCY_NAME}_SOURCE STREQUAL "BUNDLED") build_dependency(${DEPENDENCY_NAME}) @@ -64,28 +64,28 @@ endmacro() # Identify OS if (UNIX) if (APPLE) - set (CMAKE_OS_NAME "osx" CACHE STRING "Operating system name" FORCE) + set(CMAKE_OS_NAME "osx" CACHE STRING "Operating system name" FORCE) else (APPLE) ## Check for Debian GNU/Linux ________________ - find_file (DEBIAN_FOUND debian_version debconf.conf + find_file(DEBIAN_FOUND debian_version debconf.conf PATHS /etc ) if (DEBIAN_FOUND) - set (CMAKE_OS_NAME "debian" CACHE STRING "Operating system name" FORCE) + set(CMAKE_OS_NAME "debian" CACHE STRING "Operating system name" FORCE) endif (DEBIAN_FOUND) ## Check for Fedora _________________________ - find_file (FEDORA_FOUND fedora-release + find_file(FEDORA_FOUND fedora-release PATHS /etc ) if (FEDORA_FOUND) - set (CMAKE_OS_NAME "fedora" CACHE STRING "Operating system name" FORCE) + set(CMAKE_OS_NAME "fedora" CACHE STRING "Operating system name" FORCE) endif (FEDORA_FOUND) ## Check for RedHat _________________________ - find_file (REDHAT_FOUND redhat-release inittab.RH + find_file(REDHAT_FOUND redhat-release inittab.RH PATHS /etc ) if (REDHAT_FOUND) - set (CMAKE_OS_NAME "redhat" CACHE STRING "Operating system name" FORCE) + set(CMAKE_OS_NAME "redhat" CACHE STRING "Operating system name" FORCE) endif (REDHAT_FOUND) ## Extra check for Ubuntu ____________________ if (DEBIAN_FOUND) @@ -94,18 +94,18 @@ if (UNIX) ## a first superficial inspection a system will ## be considered as Debian, which signifies an ## extra check is required. - find_file (UBUNTU_EXTRA legal issue + find_file(UBUNTU_EXTRA legal issue PATHS /etc ) if (UBUNTU_EXTRA) ## Scan contents of file - file (STRINGS ${UBUNTU_EXTRA} UBUNTU_FOUND + file(STRINGS ${UBUNTU_EXTRA} UBUNTU_FOUND REGEX Ubuntu ) ## Check result of string search if (UBUNTU_FOUND) - set (CMAKE_OS_NAME "ubuntu" CACHE STRING "Operating system name" FORCE) - set (DEBIAN_FOUND FALSE) + set(CMAKE_OS_NAME "ubuntu" CACHE STRING "Operating system name" FORCE) + set(DEBIAN_FOUND FALSE) endif (UBUNTU_FOUND) endif (UBUNTU_EXTRA) endif (DEBIAN_FOUND) @@ -119,17 +119,17 @@ set(THIRDPARTY_DIR "${INDEX_SOURCE_DIR}/thirdparty") # ---------------------------------------------------------------------- # JFrog -if(NOT DEFINED USE_JFROG_CACHE) +if (NOT DEFINED USE_JFROG_CACHE) set(USE_JFROG_CACHE "OFF") -endif() -if(USE_JFROG_CACHE STREQUAL "ON") +endif () +if (USE_JFROG_CACHE STREQUAL "ON") set(JFROG_ARTFACTORY_CACHE_URL "${JFROG_ARTFACTORY_URL}/milvus/thirdparty/cache/${CMAKE_OS_NAME}/${KNOWHERE_BUILD_ARCH}/${BUILD_TYPE}") set(THIRDPARTY_PACKAGE_CACHE "${THIRDPARTY_DIR}/cache") - if(NOT EXISTS ${THIRDPARTY_PACKAGE_CACHE}) + if (NOT EXISTS ${THIRDPARTY_PACKAGE_CACHE}) message(STATUS "Will create cached directory: ${THIRDPARTY_PACKAGE_CACHE}") file(MAKE_DIRECTORY ${THIRDPARTY_PACKAGE_CACHE}) - endif() -endif() + endif () +endif () macro(resolve_dependency DEPENDENCY_NAME) if (${DEPENDENCY_NAME}_SOURCE STREQUAL "AUTO") @@ -150,11 +150,11 @@ string(TOUPPER ${CMAKE_BUILD_TYPE} UPPERCASE_BUILD_TYPE) set(EP_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${UPPERCASE_BUILD_TYPE}}") set(EP_C_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${UPPERCASE_BUILD_TYPE}}") -if(NOT MSVC) +if (NOT MSVC) # Set -fPIC on all external projects set(EP_CXX_FLAGS "${EP_CXX_FLAGS} -fPIC") set(EP_C_FLAGS "${EP_C_FLAGS} -fPIC") -endif() +endif () # CC/CXX environment variables are captured on the first invocation of the # builder (e.g make or ninja) instead of when CMake is invoked into to build @@ -164,13 +164,13 @@ endif() set(EP_COMMON_TOOLCHAIN -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}) -if(CMAKE_AR) +if (CMAKE_AR) set(EP_COMMON_TOOLCHAIN ${EP_COMMON_TOOLCHAIN} -DCMAKE_AR=${CMAKE_AR}) -endif() +endif () -if(CMAKE_RANLIB) +if (CMAKE_RANLIB) set(EP_COMMON_TOOLCHAIN ${EP_COMMON_TOOLCHAIN} -DCMAKE_RANLIB=${CMAKE_RANLIB}) -endif() +endif () # External projects are still able to override the following declarations. # cmake command line will favor the last defined variable when a duplicate is @@ -184,18 +184,18 @@ set(EP_COMMON_CMAKE_ARGS -DCMAKE_CXX_FLAGS=${EP_CXX_FLAGS} -DCMAKE_CXX_FLAGS_${UPPERCASE_BUILD_TYPE}=${EP_CXX_FLAGS}) -if(NOT KNOWHERE_VERBOSE_THIRDPARTY_BUILD) +if (NOT KNOWHERE_VERBOSE_THIRDPARTY_BUILD) set(EP_LOG_OPTIONS LOG_CONFIGURE 1 LOG_BUILD 1 LOG_INSTALL 1 LOG_DOWNLOAD 1) -else() +else () set(EP_LOG_OPTIONS) -endif() +endif () # Ensure that a default make is set -if("${MAKE}" STREQUAL "") - if(NOT MSVC) +if ("${MAKE}" STREQUAL "") + if (NOT MSVC) find_program(MAKE make) - endif() -endif() + endif () +endif () set(MAKE_BUILD_ARGS "-j8") @@ -212,32 +212,32 @@ find_package(Threads REQUIRED) # Read toolchain versions from cpp/thirdparty/versions.txt file(STRINGS "${THIRDPARTY_DIR}/versions.txt" TOOLCHAIN_VERSIONS_TXT) -foreach(_VERSION_ENTRY ${TOOLCHAIN_VERSIONS_TXT}) +foreach (_VERSION_ENTRY ${TOOLCHAIN_VERSIONS_TXT}) # Exclude comments - if(NOT _VERSION_ENTRY MATCHES "^[^#][A-Za-z0-9-_]+_VERSION=") + if (NOT _VERSION_ENTRY MATCHES "^[^#][A-Za-z0-9-_]+_VERSION=") continue() - endif() + endif () string(REGEX MATCH "^[^=]*" _LIB_NAME ${_VERSION_ENTRY}) string(REPLACE "${_LIB_NAME}=" "" _LIB_VERSION ${_VERSION_ENTRY}) # Skip blank or malformed lines - if(${_LIB_VERSION} STREQUAL "") + if (${_LIB_VERSION} STREQUAL "") continue() - endif() + endif () # For debugging #message(STATUS "${_LIB_NAME}: ${_LIB_VERSION}") set(${_LIB_NAME} "${_LIB_VERSION}") -endforeach() +endforeach () -if(CUSTOMIZATION) +if (CUSTOMIZATION) execute_process(COMMAND wget -q --method HEAD ${FAISS_URL} RESULT_VARIABLE return_code) message(STATUS "Check the remote cache file ${FAISS_URL}. return code = ${return_code}") if (NOT return_code EQUAL 0) MESSAGE(FATAL_ERROR "Can't access to ${FAISS_URL}") - else() + else () set(FAISS_SOURCE_URL ${FAISS_URL}) # set(FAISS_MD5 "a589663865a8558205533c8ac414278c") # set(FAISS_MD5 "57da9c4f599cc8fa4260488b1c96e1cc") # commit-id 6dbdf75987c34a2c853bd172ea0d384feea8358c branch-0.2.0 @@ -246,36 +246,35 @@ if(CUSTOMIZATION) # set(FAISS_MD5 "c89ea8e655f5cdf58f42486f13614714") # commit-id 9c28a1cbb88f41fa03b03d7204106201ad33276b branch-0.2.1 # set(FAISS_MD5 "87fdd86351ffcaf3f80dc26ade63c44b") # commit-id 841a156e67e8e22cd8088e1b58c00afbf2efc30b branch-0.2.1 # set(FAISS_MD5 "f3b2ce3364c3fa7febd3aa7fdd0fe380") # commit-id 694e03458e6b69ce8a62502f71f69a614af5af8f branch-0.3.0 - # set(FAISS_MD5 "bb30722c22390ce5f6759ccb216c1b2a") # commit-id d324db297475286afe107847c7fb7a0f9dc7e90e branch-0.3.0 - set(FAISS_MD5 "2293cdb209c3718e3b19f3edae8b32b3") # commit-id a13c1205dc52977a9ad3b33a14efa958604a8bff branch-0.3.0 - endif() -else() + set(FAISS_MD5 "bb30722c22390ce5f6759ccb216c1b2a") # commit-id d324db297475286afe107847c7fb7a0f9dc7e90e branch-0.3.0 + endif () +else () set(FAISS_SOURCE_URL "https://github.com/milvus-io/faiss/archive/1.6.0.tar.gz") set(FAISS_MD5 "eb96d84f98b078a9eec04a796f5c792e") -endif() +endif () message(STATUS "FAISS URL = ${FAISS_SOURCE_URL}") -if(DEFINED ENV{KNOWHERE_ARROW_URL}) +if (DEFINED ENV{KNOWHERE_ARROW_URL}) set(ARROW_SOURCE_URL "$ENV{KNOWHERE_ARROW_URL}") -else() +else () set(ARROW_SOURCE_URL "https://github.com/apache/arrow.git" ) -endif() +endif () if (DEFINED ENV{KNOWHERE_GTEST_URL}) set(GTEST_SOURCE_URL "$ENV{KNOWHERE_GTEST_URL}") else () set(GTEST_SOURCE_URL "https://github.com/google/googletest/archive/release-${GTEST_VERSION}.tar.gz") -endif() +endif () set(GTEST_MD5 "2e6fbeb6a91310a16efe181886c59596") -if(DEFINED ENV{KNOWHERE_LAPACK_URL}) +if (DEFINED ENV{KNOWHERE_LAPACK_URL}) set(LAPACK_SOURCE_URL "$ENV{KNOWHERE_LAPACK_URL}") -else() +else () set(LAPACK_SOURCE_URL "https://github.com/Reference-LAPACK/lapack/archive/${LAPACK_VERSION}.tar.gz") -endif() +endif () set(LAPACK_MD5 "96591affdbf58c450d45c1daa540dbd2") if (DEFINED ENV{KNOWHERE_OPENBLAS_URL}) @@ -283,7 +282,7 @@ if (DEFINED ENV{KNOWHERE_OPENBLAS_URL}) else () set(OPENBLAS_SOURCE_URL "https://github.com/xianyi/OpenBLAS/archive/${OPENBLAS_VERSION}.tar.gz") -endif() +endif () set(OPENBLAS_MD5 "8a110a25b819a4b94e8a9580702b6495") # ---------------------------------------------------------------------- @@ -293,10 +292,10 @@ set(ARROW_PREFIX "${INDEX_BINARY_DIR}/arrow_ep-prefix/src/arrow_ep/cpp") macro(build_arrow) message(STATUS "Building Apache ARROW-${ARROW_VERSION} from source") set(ARROW_STATIC_LIB_NAME arrow) - set(ARROW_STATIC_LIB + set(ARROW_STATIC_LIB "${ARROW_PREFIX}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}${ARROW_STATIC_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}" ) - set(ARROW_INCLUDE_DIR "${ARROW_PREFIX}/include") + set(ARROW_INCLUDE_DIR "${ARROW_PREFIX}/include") set(ARROW_CMAKE_ARGS ${EP_COMMON_CMAKE_ARGS} @@ -326,10 +325,10 @@ macro(build_arrow) -DBOOST_SOURCE=AUTO #try to find BOOST in the system default locations and build from source if not found ) - - if(USE_JFROG_CACHE STREQUAL "ON") + + if (USE_JFROG_CACHE STREQUAL "ON") execute_process(COMMAND sh -c "git ls-remote --heads --tags ${ARROW_SOURCE_URL} ${ARROW_VERSION} | cut -f 1" OUTPUT_VARIABLE ARROW_LAST_COMMIT_ID) - if(${ARROW_LAST_COMMIT_ID} MATCHES "^[^#][a-z0-9]+") + if (${ARROW_LAST_COMMIT_ID} MATCHES "^[^#][a-z0-9]+") string(MD5 ARROW_COMBINE_MD5 "${ARROW_LAST_COMMIT_ID}") set(ARROW_CACHE_PACKAGE_NAME "arrow_${ARROW_COMBINE_MD5}.tar.gz") set(ARROW_CACHE_URL "${JFROG_ARTFACTORY_CACHE_URL}/${ARROW_CACHE_PACKAGE_NAME}") @@ -359,18 +358,18 @@ macro(build_arrow) ) ExternalProject_Create_Cache(arrow_ep ${ARROW_CACHE_PACKAGE_PATH} "${INDEX_BINARY_DIR}/arrow_ep-prefix" ${JFROG_USER_NAME} ${JFROG_PASSWORD} ${ARROW_CACHE_URL}) - else() + else () file(DOWNLOAD ${ARROW_CACHE_URL} ${ARROW_CACHE_PACKAGE_PATH} STATUS status) list(GET status 0 status_code) message(STATUS "DOWNLOADING FROM ${ARROW_CACHE_URL} TO ${ARROW_CACHE_PACKAGE_PATH}. STATUS = ${status_code}") if (status_code EQUAL 0) ExternalProject_Use_Cache(arrow_ep ${ARROW_CACHE_PACKAGE_PATH} ${INDEX_BINARY_DIR}) - endif() - endif() - else() + endif () + endif () + else () message(FATAL_ERROR "The last commit ID of \"${ARROW_SOURCE_URL}\" repository don't match!") - endif() - else() + endif () + else () externalproject_add(arrow_ep GIT_REPOSITORY ${ARROW_SOURCE_URL} @@ -390,14 +389,14 @@ macro(build_arrow) BUILD_BYPRODUCTS "${ARROW_STATIC_LIB}" ) - endif() + endif () file(MAKE_DIRECTORY "${ARROW_PREFIX}/include") add_library(arrow STATIC IMPORTED) set_target_properties(arrow PROPERTIES IMPORTED_LOCATION "${ARROW_STATIC_LIB}" INTERFACE_INCLUDE_DIRECTORIES "${ARROW_INCLUDE_DIR}") - add_dependencies(arrow arrow_ep) + add_dependencies(arrow arrow_ep) set(JEMALLOC_PREFIX "${INDEX_BINARY_DIR}/arrow_ep-prefix/src/arrow_ep-build/jemalloc_ep-prefix/src/jemalloc_ep") @@ -408,13 +407,13 @@ macro(build_arrow) endmacro() -if(KNOWHERE_WITH_ARROW AND NOT TARGET arrow_ep) +if (KNOWHERE_WITH_ARROW AND NOT TARGET arrow_ep) resolve_dependency(ARROW) link_directories(SYSTEM ${ARROW_PREFIX}/lib/) include_directories(SYSTEM ${ARROW_INCLUDE_DIR}) -endif() +endif () # ---------------------------------------------------------------------- # OpenBLAS @@ -428,7 +427,7 @@ macro(build_openblas) set(OPENBLAS_REAL_STATIC_LIB "${OPENBLAS_PREFIX}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}openblas_haswellp-r0.3.6${CMAKE_STATIC_LIBRARY_SUFFIX}") - if(USE_JFROG_CACHE STREQUAL "ON") + if (USE_JFROG_CACHE STREQUAL "ON") set(OPENBLAS_CACHE_PACKAGE_NAME "openblas_${OPENBLAS_MD5}.tar.gz") set(OPENBLAS_CACHE_URL "${JFROG_ARTFACTORY_CACHE_URL}/${OPENBLAS_CACHE_PACKAGE_NAME}") set(OPENBLAS_CACHE_PACKAGE_PATH "${THIRDPARTY_PACKAGE_CACHE}/${OPENBLAS_CACHE_PACKAGE_NAME}") @@ -455,15 +454,15 @@ macro(build_openblas) ${OPENBLAS_STATIC_LIB}) ExternalProject_Create_Cache(openblas_ep ${OPENBLAS_CACHE_PACKAGE_PATH} "${INDEX_BINARY_DIR}/openblas_ep-prefix" ${JFROG_USER_NAME} ${JFROG_PASSWORD} ${OPENBLAS_CACHE_URL}) - else() + else () file(DOWNLOAD ${OPENBLAS_CACHE_URL} ${OPENBLAS_CACHE_PACKAGE_PATH} STATUS status) list(GET status 0 status_code) message(STATUS "DOWNLOADING FROM ${OPENBLAS_CACHE_URL} TO ${OPENBLAS_CACHE_PACKAGE_PATH}. STATUS = ${status_code}") if (status_code EQUAL 0) ExternalProject_Use_Cache(openblas_ep ${OPENBLAS_CACHE_PACKAGE_PATH} ${INDEX_BINARY_DIR}) - endif() - endif() - else() + endif () + endif () + else () externalproject_add(openblas_ep URL ${OPENBLAS_SOURCE_URL} @@ -481,7 +480,7 @@ macro(build_openblas) install BUILD_BYPRODUCTS ${OPENBLAS_STATIC_LIB}) - endif() + endif () file(MAKE_DIRECTORY "${OPENBLAS_INCLUDE_DIR}") add_library(openblas STATIC IMPORTED) @@ -510,7 +509,7 @@ macro(build_lapack) "-DCMAKE_INSTALL_PREFIX=${LAPACK_PREFIX}" -DCMAKE_INSTALL_LIBDIR=lib) - if(USE_JFROG_CACHE STREQUAL "ON") + if (USE_JFROG_CACHE STREQUAL "ON") set(LAPACK_CACHE_PACKAGE_NAME "lapack_${LAPACK_MD5}.tar.gz") set(LAPACK_CACHE_URL "${JFROG_ARTFACTORY_CACHE_URL}/${LAPACK_CACHE_PACKAGE_NAME}") set(LAPACK_CACHE_PACKAGE_PATH "${THIRDPARTY_PACKAGE_CACHE}/${LAPACK_CACHE_PACKAGE_NAME}") @@ -531,15 +530,15 @@ macro(build_lapack) ${LAPACK_STATIC_LIB}) ExternalProject_Create_Cache(lapack_ep ${LAPACK_CACHE_PACKAGE_PATH} "${INDEX_BINARY_DIR}/lapack_ep-prefix" ${JFROG_USER_NAME} ${JFROG_PASSWORD} ${LAPACK_CACHE_URL}) - else() + else () file(DOWNLOAD ${LAPACK_CACHE_URL} ${LAPACK_CACHE_PACKAGE_PATH} STATUS status) list(GET status 0 status_code) message(STATUS "DOWNLOADING FROM ${LAPACK_CACHE_URL} TO ${LAPACK_CACHE_PACKAGE_PATH}. STATUS = ${status_code}") if (status_code EQUAL 0) ExternalProject_Use_Cache(lapack_ep ${LAPACK_CACHE_PACKAGE_PATH} ${INDEX_BINARY_DIR}) - endif() - endif() - else() + endif () + endif () + else () externalproject_add(lapack_ep URL ${LAPACK_SOURCE_URL} @@ -551,7 +550,7 @@ macro(build_lapack) ${MAKE_BUILD_ARGS} BUILD_BYPRODUCTS ${LAPACK_STATIC_LIB}) - endif() + endif () file(MAKE_DIRECTORY "${LAPACK_INCLUDE_DIR}") add_library(lapack STATIC IMPORTED) @@ -571,13 +570,13 @@ macro(build_gtest) set(GTEST_VENDORED TRUE) set(GTEST_CMAKE_CXX_FLAGS "${EP_CXX_FLAGS}") - if(APPLE) + if (APPLE) set(GTEST_CMAKE_CXX_FLAGS ${GTEST_CMAKE_CXX_FLAGS} -DGTEST_USE_OWN_TR1_TUPLE=1 -Wno-unused-value -Wno-ignored-attributes) - endif() + endif () set(GTEST_PREFIX "${INDEX_BINARY_DIR}/googletest_ep-prefix/src/googletest_ep") set(GTEST_INCLUDE_DIR "${GTEST_PREFIX}/include") @@ -596,10 +595,10 @@ macro(build_gtest) set(GMOCK_INCLUDE_DIR "${GTEST_PREFIX}/include") set(GMOCK_STATIC_LIB "${GTEST_PREFIX}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}gmock${CMAKE_STATIC_LIBRARY_SUFFIX}" - ) + ) - if(USE_JFROG_CACHE STREQUAL "ON") + if (USE_JFROG_CACHE STREQUAL "ON") set(GTEST_CACHE_PACKAGE_NAME "googletest_${GTEST_MD5}.tar.gz") set(GTEST_CACHE_URL "${JFROG_ARTFACTORY_CACHE_URL}/${GTEST_CACHE_PACKAGE_NAME}") set(GTEST_CACHE_PACKAGE_PATH "${THIRDPARTY_PACKAGE_CACHE}/${GTEST_CACHE_PACKAGE_NAME}") @@ -622,15 +621,15 @@ macro(build_gtest) ${EP_LOG_OPTIONS}) ExternalProject_Create_Cache(googletest_ep ${GTEST_CACHE_PACKAGE_PATH} "${INDEX_BINARY_DIR}/googletest_ep-prefix" ${JFROG_USER_NAME} ${JFROG_PASSWORD} ${GTEST_CACHE_URL}) - else() + else () file(DOWNLOAD ${GTEST_CACHE_URL} ${GTEST_CACHE_PACKAGE_PATH} STATUS status) list(GET status 0 status_code) message(STATUS "DOWNLOADING FROM ${GTEST_CACHE_URL} TO ${GTEST_CACHE_PACKAGE_PATH}. STATUS = ${status_code}") if (status_code EQUAL 0) ExternalProject_Use_Cache(googletest_ep ${GTEST_CACHE_PACKAGE_PATH} ${INDEX_BINARY_DIR}) - endif() - endif() - else() + endif () + endif () + else () ExternalProject_Add(googletest_ep URL ${GTEST_SOURCE_URL} @@ -644,20 +643,20 @@ macro(build_gtest) CMAKE_ARGS ${GTEST_CMAKE_ARGS} ${EP_LOG_OPTIONS}) - endif() + endif () # The include directory must exist before it is referenced by a target. file(MAKE_DIRECTORY "${GTEST_INCLUDE_DIR}") add_library(gtest STATIC IMPORTED) set_target_properties(gtest - PROPERTIES IMPORTED_LOCATION "${GTEST_STATIC_LIB}" - INTERFACE_INCLUDE_DIRECTORIES "${GTEST_INCLUDE_DIR}") + PROPERTIES IMPORTED_LOCATION "${GTEST_STATIC_LIB}" + INTERFACE_INCLUDE_DIRECTORIES "${GTEST_INCLUDE_DIR}") add_library(gtest_main STATIC IMPORTED) set_target_properties(gtest_main - PROPERTIES IMPORTED_LOCATION "${GTEST_MAIN_STATIC_LIB}" - INTERFACE_INCLUDE_DIRECTORIES "${GTEST_INCLUDE_DIR}") + PROPERTIES IMPORTED_LOCATION "${GTEST_MAIN_STATIC_LIB}" + INTERFACE_INCLUDE_DIRECTORIES "${GTEST_INCLUDE_DIR}") add_library(gmock STATIC IMPORTED) set_target_properties(gmock @@ -673,44 +672,88 @@ endmacro() if (KNOWHERE_BUILD_TESTS AND NOT TARGET googletest_ep) resolve_dependency(GTest) - if(NOT GTEST_VENDORED) - endif() + if (NOT GTEST_VENDORED) + endif () # TODO: Don't use global includes but rather target_include_directories get_target_property(GTEST_INCLUDE_DIR gtest INTERFACE_INCLUDE_DIRECTORIES) link_directories(SYSTEM "${GTEST_PREFIX}/lib") include_directories(SYSTEM ${GTEST_INCLUDE_DIR}) -endif() +endif () # ---------------------------------------------------------------------- # FAISS macro(build_faiss) message(STATUS "Building FAISS-${FAISS_VERSION} from source") + + if (NOT DEFINED BUILD_FAISS_WITH_MKL) + set(BUILD_FAISS_WITH_MKL OFF) + endif () + + if (EXISTS "/proc/cpuinfo") + FILE(READ /proc/cpuinfo PROC_CPUINFO) + + SET(VENDOR_ID_RX "vendor_id[ \t]*:[ \t]*([a-zA-Z]+)\n") + STRING(REGEX MATCH "${VENDOR_ID_RX}" VENDOR_ID "${PROC_CPUINFO}") + STRING(REGEX REPLACE "${VENDOR_ID_RX}" "\\1" VENDOR_ID "${VENDOR_ID}") + + if (NOT ${VENDOR_ID} STREQUAL "GenuineIntel") + set(BUILD_FAISS_WITH_MKL OFF) + endif () + endif () + set(FAISS_PREFIX "${INDEX_BINARY_DIR}/faiss_ep-prefix/src/faiss_ep") set(FAISS_INCLUDE_DIR "${FAISS_PREFIX}/include") set(FAISS_STATIC_LIB "${FAISS_PREFIX}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}faiss${CMAKE_STATIC_LIBRARY_SUFFIX}") - set(FAISS_CONFIGURE_ARGS "--prefix=${FAISS_PREFIX}" "CFLAGS=${EP_C_FLAGS}" "CXXFLAGS=${EP_CXX_FLAGS}" - "LDFLAGS=-L${OPENBLAS_PREFIX}/lib -L${LAPACK_PREFIX}/lib -lopenblas -llapack" --without-python) - - if(${KNOWHERE_WITH_FAISS_GPU_VERSION} STREQUAL "ON") + set(FAISS_CFLAGS ${EP_C_FLAGS}) + set(FAISS_CXXFLAGS ${EP_CXX_FLAGS}) + + if (BUILD_FAISS_WITH_MKL) + + find_path(MKL_LIB_PATH + NAMES "libmkl_intel_ilp64.a" "libmkl_gnu_thread.a" "libmkl_core.a" + PATH_SUFFIXES "intel/compilers_and_libraries_${MKL_VERSION}/linux/mkl/lib/intel64/") + if (${MKL_LIB_PATH} STREQUAL "MKL_LIB_PATH-NOTFOUND") + message(FATAL_ERROR "Could not find MKL libraries") + endif () + message(STATUS "Build Faiss with MKL. MKL lib path = ${MKL_LIB_PATH}") + + set(MKL_LIBS + ${MKL_LIB_PATH}/libmkl_intel_ilp64.a + ${MKL_LIB_PATH}/libmkl_gnu_thread.a + ${MKL_LIB_PATH}/libmkl_core.a + ) + + set(FAISS_CONFIGURE_ARGS ${FAISS_CONFIGURE_ARGS} + "CPPFLAGS=-DFINTEGER=long -DMKL_ILP64 -m64 -I${MKL_LIB_PATH}/../../include" + "LDFLAGS=-L${MKL_LIB_PATH}" + ) + + else () + message(STATUS "Build Faiss with OpenBlas/LAPACK") + set(FAISS_CONFIGURE_ARGS ${FAISS_CONFIGURE_ARGS} + "LDFLAGS=-L${OPENBLAS_PREFIX}/lib -L${LAPACK_PREFIX}/lib") + endif () + + if (KNOWHERE_GPU_VERSION) set(FAISS_CONFIGURE_ARGS ${FAISS_CONFIGURE_ARGS} "--with-cuda=${CUDA_TOOLKIT_ROOT_DIR}" "--with-cuda-arch=-gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_61,code=sm_61 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75" ) - else() + else () set(FAISS_CONFIGURE_ARGS ${FAISS_CONFIGURE_ARGS} --without-cuda) - endif() + endif () - if(USE_JFROG_CACHE STREQUAL "ON") + if (USE_JFROG_CACHE STREQUAL "ON") string(MD5 FAISS_COMBINE_MD5 "${FAISS_MD5}${LAPACK_MD5}${OPENBLAS_MD5}") set(FAISS_CACHE_PACKAGE_NAME "faiss_${FAISS_COMBINE_MD5}.tar.gz") set(FAISS_CACHE_URL "${JFROG_ARTFACTORY_CACHE_URL}/${FAISS_CACHE_PACKAGE_NAME}") @@ -735,18 +778,20 @@ macro(build_faiss) BUILD_BYPRODUCTS ${FAISS_STATIC_LIB}) - ExternalProject_Add_StepDependencies(faiss_ep build openblas_ep lapack_ep) + if (NOT BUILD_FAISS_WITH_MKL) + ExternalProject_Add_StepDependencies(faiss_ep build openblas_ep lapack_ep) + endif () ExternalProject_Create_Cache(faiss_ep ${FAISS_CACHE_PACKAGE_PATH} "${INDEX_BINARY_DIR}/faiss_ep-prefix" ${JFROG_USER_NAME} ${JFROG_PASSWORD} ${FAISS_CACHE_URL}) - else() + else () file(DOWNLOAD ${FAISS_CACHE_URL} ${FAISS_CACHE_PACKAGE_PATH} STATUS status) list(GET status 0 status_code) message(STATUS "DOWNLOADING FROM ${FAISS_CACHE_URL} TO ${FAISS_CACHE_PACKAGE_PATH}. STATUS = ${status_code}") if (status_code EQUAL 0) ExternalProject_Use_Cache(faiss_ep ${FAISS_CACHE_PACKAGE_PATH} ${INDEX_BINARY_DIR}) - endif() - endif() - else() + endif () + endif () + else () externalproject_add(faiss_ep URL ${FAISS_SOURCE_URL} @@ -763,35 +808,54 @@ macro(build_faiss) BUILD_BYPRODUCTS ${FAISS_STATIC_LIB}) - ExternalProject_Add_StepDependencies(faiss_ep build openblas_ep lapack_ep) - endif() + if (NOT BUILD_FAISS_WITH_MKL) + ExternalProject_Add_StepDependencies(faiss_ep build openblas_ep lapack_ep) + endif () + + endif () file(MAKE_DIRECTORY "${FAISS_INCLUDE_DIR}") add_library(faiss STATIC IMPORTED) + set_target_properties( faiss - PROPERTIES IMPORTED_LOCATION "${FAISS_STATIC_LIB}" + PROPERTIES + IMPORTED_LOCATION "${FAISS_STATIC_LIB}" INTERFACE_INCLUDE_DIRECTORIES "${FAISS_INCLUDE_DIR}" - INTERFACE_LINK_LIBRARIES "openblas;lapack" ) + ) + if (BUILD_FAISS_WITH_MKL) + set_target_properties( + faiss + PROPERTIES + INTERFACE_LINK_LIBRARIES "${MKL_LIBS}") + else () + set_target_properties( + faiss + PROPERTIES + INTERFACE_LINK_LIBRARIES "openblas;lapack") + endif () + add_dependencies(faiss faiss_ep) endmacro() -if(KNOWHERE_WITH_FAISS AND NOT TARGET faiss_ep) +if (KNOWHERE_WITH_FAISS AND NOT TARGET faiss_ep) - resolve_dependency(OpenBLAS) - get_target_property(OPENBLAS_INCLUDE_DIR openblas INTERFACE_INCLUDE_DIRECTORIES) - include_directories(SYSTEM "${OPENBLAS_INCLUDE_DIR}") - link_directories(SYSTEM ${OPENBLAS_PREFIX}/lib) + if (NOT BUILD_FAISS_WITH_MKL) + resolve_dependency(OpenBLAS) + get_target_property(OPENBLAS_INCLUDE_DIR openblas INTERFACE_INCLUDE_DIRECTORIES) + include_directories(SYSTEM "${OPENBLAS_INCLUDE_DIR}") + link_directories(SYSTEM ${OPENBLAS_PREFIX}/lib) - resolve_dependency(LAPACK) - get_target_property(LAPACK_INCLUDE_DIR lapack INTERFACE_INCLUDE_DIRECTORIES) - include_directories(SYSTEM "${LAPACK_INCLUDE_DIR}") - link_directories(SYSTEM "${LAPACK_PREFIX}/lib") + resolve_dependency(LAPACK) + get_target_property(LAPACK_INCLUDE_DIR lapack INTERFACE_INCLUDE_DIRECTORIES) + include_directories(SYSTEM "${LAPACK_INCLUDE_DIR}") + link_directories(SYSTEM "${LAPACK_PREFIX}/lib") + endif () resolve_dependency(FAISS) get_target_property(FAISS_INCLUDE_DIR faiss INTERFACE_INCLUDE_DIRECTORIES) include_directories(SYSTEM "${FAISS_INCLUDE_DIR}") link_directories(SYSTEM ${FAISS_PREFIX}/lib/) -endif() +endif () diff --git a/core/src/index/knowhere/CMakeLists.txt b/core/src/index/knowhere/CMakeLists.txt index bece9058a9..11c79e5466 100644 --- a/core/src/index/knowhere/CMakeLists.txt +++ b/core/src/index/knowhere/CMakeLists.txt @@ -1,6 +1,3 @@ -include_directories(${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES}) -link_directories(${CUDA_TOOLKIT_ROOT_DIR}/lib64) - include_directories(${INDEX_SOURCE_DIR}/knowhere) include_directories(${INDEX_SOURCE_DIR}/thirdparty) include_directories(${INDEX_SOURCE_DIR}/thirdparty/SPTAG/AnnService) @@ -19,9 +16,9 @@ file(GLOB SRC_FILES ${SPTAG_SOURCE_DIR}/AnnService/src/Core/KDT/*.cpp ${SPTAG_SOURCE_DIR}/AnnService/src/Helper/*.cpp) -if(NOT TARGET SPTAGLibStatic) +if (NOT TARGET SPTAGLibStatic) add_library(SPTAGLibStatic STATIC ${SRC_FILES} ${HDR_FILES}) -endif() +endif () set(external_srcs knowhere/adapter/SptagAdapter.cpp @@ -36,19 +33,13 @@ set(index_srcs knowhere/index/vector_index/IndexKDT.cpp knowhere/index/vector_index/IndexIDMAP.cpp knowhere/index/vector_index/IndexIVF.cpp - knowhere/index/vector_index/IndexGPUIVF.cpp knowhere/index/vector_index/helpers/KDTParameterMgr.cpp knowhere/index/vector_index/IndexNSG.cpp knowhere/index/vector_index/nsg/NSG.cpp knowhere/index/vector_index/nsg/NSGIO.cpp knowhere/index/vector_index/nsg/NSGHelper.cpp - knowhere/index/vector_index/helpers/Cloner.cpp - knowhere/index/vector_index/helpers/FaissGpuResourceMgr.cpp knowhere/index/vector_index/IndexIVFSQ.cpp - knowhere/index/vector_index/IndexGPUIVFSQ.cpp - knowhere/index/vector_index/IndexIVFSQHybrid.cpp knowhere/index/vector_index/IndexIVFPQ.cpp - knowhere/index/vector_index/IndexGPUIVFPQ.cpp knowhere/index/vector_index/FaissBaseIndex.cpp knowhere/index/vector_index/helpers/FaissIO.cpp knowhere/index/vector_index/helpers/IndexParameter.cpp @@ -57,24 +48,56 @@ set(index_srcs set(depend_libs SPTAGLibStatic faiss - openblas - lapack arrow ${ARROW_PREFIX}/lib/libjemalloc_pic.a - cudart - cublas gomp gfortran pthread ) +if (BUILD_FAISS_WITH_MKL) + set(depend_libs ${depend_libs} + "-Wl,--start-group \ + ${MKL_LIB_PATH}/libmkl_intel_ilp64.a \ + ${MKL_LIB_PATH}/libmkl_gnu_thread.a \ + ${MKL_LIB_PATH}/libmkl_core.a \ + -Wl,--end-group -lgomp -lpthread -lm -ldl" + ) +else () + set(depend_libs ${depend_libs} + lapack + openblas) +endif () -if(NOT TARGET knowhere) +if (KNOWHERE_GPU_VERSION) + include_directories(${CUDA_INCLUDE_DIRS}) + link_directories("${CUDA_TOOLKIT_ROOT_DIR}/lib64") + set(cuda_lib + cudart + cublas + ) + set(depend_libs ${depend_libs} + ${cuda_lib} + ) + + set(index_srcs ${index_srcs} + knowhere/index/vector_index/IndexGPUIVF.cpp + knowhere/index/vector_index/helpers/Cloner.cpp + knowhere/index/vector_index/helpers/FaissGpuResourceMgr.cpp + knowhere/index/vector_index/IndexGPUIVFSQ.cpp + knowhere/index/vector_index/IndexIVFSQHybrid.cpp + knowhere/index/vector_index/IndexGPUIVFPQ.cpp + knowhere/index/vector_index/IndexGPUIDMAP.cpp + ) + +endif () + +if (NOT TARGET knowhere) add_library( knowhere STATIC ${external_srcs} ${index_srcs} ) -endif() +endif () target_link_libraries( knowhere diff --git a/core/src/index/knowhere/knowhere/index/vector_index/IndexGPUIDMAP.cpp b/core/src/index/knowhere/knowhere/index/vector_index/IndexGPUIDMAP.cpp new file mode 100644 index 0000000000..5c7edd73fe --- /dev/null +++ b/core/src/index/knowhere/knowhere/index/vector_index/IndexGPUIDMAP.cpp @@ -0,0 +1,113 @@ +#include "IndexGPUIDMAP.h" + +#include +#include +#include +#include + +#ifdef MILVUS_GPU_VERSION + +#include + +#endif + +#include "knowhere/adapter/VectorAdapter.h" +#include "knowhere/common/Exception.h" +#include "knowhere/index/vector_index/IndexIDMAP.h" +#include "knowhere/index/vector_index/helpers/FaissIO.h" + +namespace knowhere { + + VectorIndexPtr + GPUIDMAP::CopyGpuToCpu(const Config &config) { + std::lock_guard lk(mutex_); + + faiss::Index *device_index = index_.get(); + faiss::Index *host_index = faiss::gpu::index_gpu_to_cpu(device_index); + + std::shared_ptr new_index; + new_index.reset(host_index); + return std::make_shared(new_index); + } + + VectorIndexPtr + GPUIDMAP::Clone() { + auto cpu_idx = CopyGpuToCpu(Config()); + + if (auto idmap = std::dynamic_pointer_cast(cpu_idx)) { + return idmap->CopyCpuToGpu(gpu_id_, Config()); + } else { + KNOWHERE_THROW_MSG("IndexType not Support GpuClone"); + } + } + + BinarySet + GPUIDMAP::SerializeImpl() { + try { + MemoryIOWriter writer; + { + faiss::Index *index = index_.get(); + faiss::Index *host_index = faiss::gpu::index_gpu_to_cpu(index); + + faiss::write_index(host_index, &writer); + delete host_index; + } + auto data = std::make_shared(); + data.reset(writer.data_); + + BinarySet res_set; + res_set.Append("IVF", data, writer.rp); + + return res_set; + } catch (std::exception &e) { + KNOWHERE_THROW_MSG(e.what()); + } + } + + void + GPUIDMAP::LoadImpl(const BinarySet &index_binary) { + auto binary = index_binary.GetByName("IVF"); + MemoryIOReader reader; + { + reader.total = binary->size; + reader.data_ = binary->data.get(); + + faiss::Index *index = faiss::read_index(&reader); + + if (auto res = FaissGpuResourceMgr::GetInstance().GetRes(gpu_id_)) { + ResScope rs(res, gpu_id_, false); + auto device_index = faiss::gpu::index_cpu_to_gpu(res->faiss_res.get(), gpu_id_, index); + index_.reset(device_index); + res_ = res; + } else { + KNOWHERE_THROW_MSG("Load error, can't get gpu resource"); + } + + delete index; + } + } + + VectorIndexPtr + GPUIDMAP::CopyGpuToGpu(const int64_t &device_id, const Config &config) { + auto cpu_index = CopyGpuToCpu(config); + return std::static_pointer_cast(cpu_index)->CopyCpuToGpu(device_id, config); + } + + float * + GPUIDMAP::GetRawVectors() { + KNOWHERE_THROW_MSG("Not support"); + } + + int64_t * + GPUIDMAP::GetRawIds() { + KNOWHERE_THROW_MSG("Not support"); + } + + void + GPUIDMAP::search_impl(int64_t n, const float *data, int64_t k, float *distances, int64_t *labels, + const Config &cfg) { + ResScope rs(res_, gpu_id_); + index_->search(n, (float *) data, k, distances, labels); + } + +} // knowhere diff --git a/core/src/index/knowhere/knowhere/index/vector_index/IndexGPUIDMAP.h b/core/src/index/knowhere/knowhere/index/vector_index/IndexGPUIDMAP.h new file mode 100644 index 0000000000..3272067cf0 --- /dev/null +++ b/core/src/index/knowhere/knowhere/index/vector_index/IndexGPUIDMAP.h @@ -0,0 +1,47 @@ +#pragma once + +#include "IndexGPUIVF.h" +#include "IndexIVF.h" +#include "IndexIDMAP.h" + +#include +#include + +namespace knowhere { + + class GPUIDMAP : public IDMAP, public GPUIndex { + public: + explicit GPUIDMAP(std::shared_ptr index, const int64_t &device_id, ResPtr &res) + : IDMAP(std::move(index)), GPUIndex(device_id, res) { + } + + VectorIndexPtr + CopyGpuToCpu(const Config &config) override; + + float * + GetRawVectors() override; + + int64_t * + GetRawIds() override; + + VectorIndexPtr + Clone() override; + + VectorIndexPtr + CopyGpuToGpu(const int64_t &device_id, const Config &config) override; + + protected: + void + search_impl(int64_t n, const float *data, int64_t k, float *distances, int64_t *labels, + const Config &cfg) override; + + BinarySet + SerializeImpl() override; + + void + LoadImpl(const BinarySet &index_binary) override; + }; + + using GPUIDMAPPtr = std::shared_ptr; + +} // knowhere \ No newline at end of file diff --git a/core/src/index/knowhere/knowhere/index/vector_index/IndexIDMAP.cpp b/core/src/index/knowhere/knowhere/index/vector_index/IndexIDMAP.cpp index f926951736..f02752abf2 100644 --- a/core/src/index/knowhere/knowhere/index/vector_index/IndexIDMAP.cpp +++ b/core/src/index/knowhere/knowhere/index/vector_index/IndexIDMAP.cpp @@ -17,10 +17,18 @@ #include #include -#include + #include +#include +#include #include +#ifdef MILVUS_GPU_VERSION + +#include + +#endif + #include #include "knowhere/adapter/VectorAdapter.h" @@ -28,244 +36,167 @@ #include "knowhere/index/vector_index/IndexIDMAP.h" #include "knowhere/index/vector_index/helpers/FaissIO.h" +#ifdef MILVUS_GPU_VERSION + +#include "knowhere/index/vector_index/helpers/FaissGpuResourceMgr.h" +#include "knowhere/index/vector_index/IndexGPUIDMAP.h" + +#endif + namespace knowhere { -BinarySet -IDMAP::Serialize() { - if (!index_) { - KNOWHERE_THROW_MSG("index not initialize"); - } - - std::lock_guard lk(mutex_); - return SerializeImpl(); -} - -void -IDMAP::Load(const BinarySet& index_binary) { - std::lock_guard lk(mutex_); - LoadImpl(index_binary); -} - -DatasetPtr -IDMAP::Search(const DatasetPtr& dataset, const Config& config) { - if (!index_) { - KNOWHERE_THROW_MSG("index not initialize"); - } - - config->CheckValid(); - // auto metric_type = config["metric_type"].as_string() == "L2" ? - // faiss::METRIC_L2 : faiss::METRIC_INNER_PRODUCT; - // index_->metric_type = metric_type; - - GETTENSOR(dataset) - - auto elems = rows * config->k; - auto res_ids = (int64_t*)malloc(sizeof(int64_t) * elems); - auto res_dis = (float*)malloc(sizeof(float) * elems); - - search_impl(rows, (float*)p_data, config->k, res_dis, res_ids, Config()); - - auto id_buf = MakeMutableBufferSmart((uint8_t*)res_ids, sizeof(int64_t) * elems); - auto dist_buf = MakeMutableBufferSmart((uint8_t*)res_dis, sizeof(float) * elems); - - std::vector id_bufs{nullptr, id_buf}; - std::vector dist_bufs{nullptr, dist_buf}; - - auto int64_type = std::make_shared(); - auto float_type = std::make_shared(); - - auto id_array_data = arrow::ArrayData::Make(int64_type, elems, id_bufs); - auto dist_array_data = arrow::ArrayData::Make(float_type, elems, dist_bufs); - - auto ids = std::make_shared>(id_array_data); - auto dists = std::make_shared>(dist_array_data); - std::vector array{ids, dists}; - - return std::make_shared(array, nullptr); -} - -void -IDMAP::search_impl(int64_t n, const float* data, int64_t k, float* distances, int64_t* labels, const Config& cfg) { - index_->search(n, (float*)data, k, distances, labels); -} - -void -IDMAP::Add(const DatasetPtr& dataset, const Config& config) { - if (!index_) { - KNOWHERE_THROW_MSG("index not initialize"); - } - - std::lock_guard lk(mutex_); - GETTENSOR(dataset) - - // TODO: magic here. - auto array = dataset->array()[0]; - auto p_ids = array->data()->GetValues(1, 0); - - index_->add_with_ids(rows, (float*)p_data, p_ids); -} - -int64_t -IDMAP::Count() { - return index_->ntotal; -} - -int64_t -IDMAP::Dimension() { - return index_->d; -} - -// TODO(linxj): return const pointer -float* -IDMAP::GetRawVectors() { - try { - auto file_index = dynamic_cast(index_.get()); - auto flat_index = dynamic_cast(file_index->index); - return flat_index->xb.data(); - } catch (std::exception& e) { - KNOWHERE_THROW_MSG(e.what()); - } -} - -// TODO(linxj): return const pointer -int64_t* -IDMAP::GetRawIds() { - try { - auto file_index = dynamic_cast(index_.get()); - return file_index->id_map.data(); - } catch (std::exception& e) { - KNOWHERE_THROW_MSG(e.what()); - } -} - -const char* type = "IDMap,Flat"; - -void -IDMAP::Train(const Config& config) { - config->CheckValid(); - - auto index = faiss::index_factory(config->d, type, GetMetricType(config->metric_type)); - index_.reset(index); -} - -VectorIndexPtr -IDMAP::Clone() { - std::lock_guard lk(mutex_); - - auto clone_index = faiss::clone_index(index_.get()); - std::shared_ptr new_index; - new_index.reset(clone_index); - return std::make_shared(new_index); -} - -VectorIndexPtr -IDMAP::CopyCpuToGpu(const int64_t& device_id, const Config& config) { - if (auto res = FaissGpuResourceMgr::GetInstance().GetRes(device_id)) { - ResScope rs(res, device_id, false); - auto gpu_index = faiss::gpu::index_cpu_to_gpu(res->faiss_res.get(), device_id, index_.get()); - - std::shared_ptr device_index; - device_index.reset(gpu_index); - return std::make_shared(device_index, device_id, res); - } else { - KNOWHERE_THROW_MSG("CopyCpuToGpu Error, can't get gpu_resource"); - } -} - -void -IDMAP::Seal() { - // do nothing -} - -VectorIndexPtr -GPUIDMAP::CopyGpuToCpu(const Config& config) { - std::lock_guard lk(mutex_); - - faiss::Index* device_index = index_.get(); - faiss::Index* host_index = faiss::gpu::index_gpu_to_cpu(device_index); - - std::shared_ptr new_index; - new_index.reset(host_index); - return std::make_shared(new_index); -} - -VectorIndexPtr -GPUIDMAP::Clone() { - auto cpu_idx = CopyGpuToCpu(Config()); - - if (auto idmap = std::dynamic_pointer_cast(cpu_idx)) { - return idmap->CopyCpuToGpu(gpu_id_, Config()); - } else { - KNOWHERE_THROW_MSG("IndexType not Support GpuClone"); - } -} - -BinarySet -GPUIDMAP::SerializeImpl() { - try { - MemoryIOWriter writer; - { - faiss::Index* index = index_.get(); - faiss::Index* host_index = faiss::gpu::index_gpu_to_cpu(index); - - faiss::write_index(host_index, &writer); - delete host_index; + BinarySet + IDMAP::Serialize() { + if (!index_) { + KNOWHERE_THROW_MSG("index not initialize"); } - auto data = std::make_shared(); - data.reset(writer.data_); - BinarySet res_set; - res_set.Append("IVF", data, writer.rp); - - return res_set; - } catch (std::exception& e) { - KNOWHERE_THROW_MSG(e.what()); + std::lock_guard lk(mutex_); + return SerializeImpl(); } -} -void -GPUIDMAP::LoadImpl(const BinarySet& index_binary) { - auto binary = index_binary.GetByName("IVF"); - MemoryIOReader reader; - { - reader.total = binary->size; - reader.data_ = binary->data.get(); + void + IDMAP::Load(const BinarySet &index_binary) { + std::lock_guard lk(mutex_); + LoadImpl(index_binary); + } - faiss::Index* index = faiss::read_index(&reader); + DatasetPtr + IDMAP::Search(const DatasetPtr &dataset, const Config &config) { + if (!index_) { + KNOWHERE_THROW_MSG("index not initialize"); + } - if (auto res = FaissGpuResourceMgr::GetInstance().GetRes(gpu_id_)) { - ResScope rs(res, gpu_id_, false); - auto device_index = faiss::gpu::index_cpu_to_gpu(res->faiss_res.get(), gpu_id_, index); - index_.reset(device_index); - res_ = res; + config->CheckValid(); + // auto metric_type = config["metric_type"].as_string() == "L2" ? + // faiss::METRIC_L2 : faiss::METRIC_INNER_PRODUCT; + // index_->metric_type = metric_type; + + GETTENSOR(dataset) + + auto elems = rows * config->k; + auto res_ids = (int64_t *) malloc(sizeof(int64_t) * elems); + auto res_dis = (float *) malloc(sizeof(float) * elems); + + search_impl(rows, (float *) p_data, config->k, res_dis, res_ids, Config()); + + auto id_buf = MakeMutableBufferSmart((uint8_t *) res_ids, sizeof(int64_t) * elems); + auto dist_buf = MakeMutableBufferSmart((uint8_t *) res_dis, sizeof(float) * elems); + + std::vector id_bufs{nullptr, id_buf}; + std::vector dist_bufs{nullptr, dist_buf}; + + auto int64_type = std::make_shared(); + auto float_type = std::make_shared(); + + auto id_array_data = arrow::ArrayData::Make(int64_type, elems, id_bufs); + auto dist_array_data = arrow::ArrayData::Make(float_type, elems, dist_bufs); + + auto ids = std::make_shared>(id_array_data); + auto dists = std::make_shared>(dist_array_data); + std::vector array{ids, dists}; + + return std::make_shared(array, nullptr); + } + + void + IDMAP::search_impl(int64_t n, const float *data, int64_t k, float *distances, int64_t *labels, const Config &cfg) { + index_->search(n, (float *) data, k, distances, labels); + } + + void + IDMAP::Add(const DatasetPtr &dataset, const Config &config) { + if (!index_) { + KNOWHERE_THROW_MSG("index not initialize"); + } + + std::lock_guard lk(mutex_); + GETTENSOR(dataset) + + // TODO: magic here. + auto array = dataset->array()[0]; + auto p_ids = array->data()->GetValues(1, 0); + + index_->add_with_ids(rows, (float *) p_data, p_ids); + } + + int64_t + IDMAP::Count() { + return index_->ntotal; + } + + int64_t + IDMAP::Dimension() { + return index_->d; + } + +// TODO(linxj): return const pointer + float * + IDMAP::GetRawVectors() { + try { + auto file_index = dynamic_cast(index_.get()); + auto flat_index = dynamic_cast(file_index->index); + return flat_index->xb.data(); + } catch (std::exception &e) { + KNOWHERE_THROW_MSG(e.what()); + } + } + +// TODO(linxj): return const pointer + int64_t * + IDMAP::GetRawIds() { + try { + auto file_index = dynamic_cast(index_.get()); + return file_index->id_map.data(); + } catch (std::exception &e) { + KNOWHERE_THROW_MSG(e.what()); + } + } + + const char *type = "IDMap,Flat"; + + void + IDMAP::Train(const Config &config) { + config->CheckValid(); + + auto index = faiss::index_factory(config->d, type, GetMetricType(config->metric_type)); + index_.reset(index); + } + + VectorIndexPtr + IDMAP::Clone() { + std::lock_guard lk(mutex_); + + auto clone_index = faiss::clone_index(index_.get()); + std::shared_ptr new_index; + new_index.reset(clone_index); + return std::make_shared(new_index); + } + + VectorIndexPtr + IDMAP::CopyCpuToGpu(const int64_t &device_id, const Config &config) { + +#ifdef MILVUS_GPU_VERSION + + if (auto res = FaissGpuResourceMgr::GetInstance().GetRes(device_id)) { + ResScope rs(res, device_id, false); + auto gpu_index = faiss::gpu::index_cpu_to_gpu(res->faiss_res.get(), device_id, index_.get()); + + std::shared_ptr device_index; + device_index.reset(gpu_index); + return std::make_shared(device_index, device_id, res); } else { - KNOWHERE_THROW_MSG("Load error, can't get gpu resource"); + KNOWHERE_THROW_MSG("CopyCpuToGpu Error, can't get gpu_resource"); } +#else + KNOWHERE_THROW_MSG("Calling IDMAP::CopyCpuToGpu when we are using CPU version"); +#endif - delete index; } -} -VectorIndexPtr -GPUIDMAP::CopyGpuToGpu(const int64_t& device_id, const Config& config) { - auto cpu_index = CopyGpuToCpu(config); - return std::static_pointer_cast(cpu_index)->CopyCpuToGpu(device_id, config); -} - -float* -GPUIDMAP::GetRawVectors() { - KNOWHERE_THROW_MSG("Not support"); -} - -int64_t* -GPUIDMAP::GetRawIds() { - KNOWHERE_THROW_MSG("Not support"); -} - -void -GPUIDMAP::search_impl(int64_t n, const float* data, int64_t k, float* distances, int64_t* labels, const Config& cfg) { - ResScope rs(res_, gpu_id_); - index_->search(n, (float*)data, k, distances, labels); -} + void + IDMAP::Seal() { + // do nothing + } } // namespace knowhere diff --git a/core/src/index/knowhere/knowhere/index/vector_index/IndexIDMAP.h b/core/src/index/knowhere/knowhere/index/vector_index/IndexIDMAP.h index ec1cbb9e77..0f66e8fac0 100644 --- a/core/src/index/knowhere/knowhere/index/vector_index/IndexIDMAP.h +++ b/core/src/index/knowhere/knowhere/index/vector_index/IndexIDMAP.h @@ -17,7 +17,6 @@ #pragma once -#include "IndexGPUIVF.h" #include "IndexIVF.h" #include @@ -67,32 +66,4 @@ class IDMAP : public VectorIndex, public FaissBaseIndex { using IDMAPPtr = std::shared_ptr; -class GPUIDMAP : public IDMAP, public GPUIndex { - public: - explicit GPUIDMAP(std::shared_ptr index, const int64_t& device_id, ResPtr& res) - : IDMAP(std::move(index)), GPUIndex(device_id, res) { - } - - VectorIndexPtr - CopyGpuToCpu(const Config& config) override; - float* - GetRawVectors() override; - int64_t* - GetRawIds() override; - VectorIndexPtr - Clone() override; - VectorIndexPtr - CopyGpuToGpu(const int64_t& device_id, const Config& config) override; - - protected: - void - search_impl(int64_t n, const float* data, int64_t k, float* distances, int64_t* labels, const Config& cfg) override; - BinarySet - SerializeImpl() override; - void - LoadImpl(const BinarySet& index_binary) override; -}; - -using GPUIDMAPPtr = std::shared_ptr; - } // namespace knowhere diff --git a/core/src/index/knowhere/knowhere/index/vector_index/IndexIVF.cpp b/core/src/index/knowhere/knowhere/index/vector_index/IndexIVF.cpp index 6da5db38ec..1b1d1a86f0 100644 --- a/core/src/index/knowhere/knowhere/index/vector_index/IndexIVF.cpp +++ b/core/src/index/knowhere/knowhere/index/vector_index/IndexIVF.cpp @@ -15,11 +15,19 @@ // specific language governing permissions and limitations // under the License. +#include #include #include #include #include +#include +#include +#include +#include +#ifdef MILVUS_GPU_VERSION +#include #include +#endif #include #include @@ -29,7 +37,9 @@ #include "knowhere/adapter/VectorAdapter.h" #include "knowhere/common/Exception.h" #include "knowhere/common/Log.h" +#ifdef MILVUS_GPU_VERSION #include "knowhere/index/vector_index/IndexGPUIVF.h" +#endif #include "knowhere/index/vector_index/IndexIVF.h" namespace knowhere { @@ -221,16 +231,18 @@ IVF::search_impl(int64_t n, const float* data, int64_t k, float* distances, int6 faiss::ivflib::search_with_parameters(index_.get(), n, (float*)data, k, distances, labels, params.get()); stdclock::time_point after = stdclock::now(); double search_cost = (std::chrono::duration(after - before)).count(); - KNOWHERE_LOG_DEBUG << "K=" << k << " NQ=" << n << " NL=" << faiss::indexIVF_stats.nlist - << " ND=" << faiss::indexIVF_stats.ndis << " NH=" << faiss::indexIVF_stats.nheap_updates - << " Q=" << faiss::indexIVF_stats.quantization_time - << " S=" << faiss::indexIVF_stats.search_time; + KNOWHERE_LOG_DEBUG << "IVF search cost: " << search_cost + << ", quantization cost: " << faiss::indexIVF_stats.quantization_time + << ", data search cost: " << faiss::indexIVF_stats.search_time; faiss::indexIVF_stats.quantization_time = 0; faiss::indexIVF_stats.search_time = 0; } VectorIndexPtr IVF::CopyCpuToGpu(const int64_t& device_id, const Config& config) { + +#ifdef MILVUS_GPU_VERSION + if (auto res = FaissGpuResourceMgr::GetInstance().GetRes(device_id)) { ResScope rs(res, device_id, false); auto gpu_index = faiss::gpu::index_cpu_to_gpu(res->faiss_res.get(), device_id, index_.get()); @@ -241,6 +253,10 @@ IVF::CopyCpuToGpu(const int64_t& device_id, const Config& config) { } else { KNOWHERE_THROW_MSG("CopyCpuToGpu Error, can't get gpu_resource"); } + +#else + KNOWHERE_THROW_MSG("Calling IVF::CopyCpuToGpu when we are using CPU version"); +#endif } VectorIndexPtr diff --git a/core/src/index/knowhere/knowhere/index/vector_index/IndexIVFSQ.cpp b/core/src/index/knowhere/knowhere/index/vector_index/IndexIVFSQ.cpp index 6e9a1d94da..4abeaf4385 100644 --- a/core/src/index/knowhere/knowhere/index/vector_index/IndexIVFSQ.cpp +++ b/core/src/index/knowhere/knowhere/index/vector_index/IndexIVFSQ.cpp @@ -15,15 +15,23 @@ // specific language governing permissions and limitations // under the License. +#ifdef MILVUS_GPU_VERSION +#include #include +#endif #include + #include #include "knowhere/adapter/VectorAdapter.h" #include "knowhere/common/Exception.h" -#include "knowhere/index/vector_index/IndexGPUIVFSQ.h" #include "knowhere/index/vector_index/IndexIVFSQ.h" + +#ifdef MILVUS_GPU_VERSION +#include "knowhere/index/vector_index/IndexGPUIVFSQ.h" #include "knowhere/index/vector_index/helpers/FaissGpuResourceMgr.h" +#endif +#include "knowhere/index/vector_index/IndexIVFSQ.h" namespace knowhere { @@ -54,6 +62,9 @@ IVFSQ::Clone_impl(const std::shared_ptr& index) { VectorIndexPtr IVFSQ::CopyCpuToGpu(const int64_t& device_id, const Config& config) { + +#ifdef MILVUS_GPU_VERSION + if (auto res = FaissGpuResourceMgr::GetInstance().GetRes(device_id)) { ResScope rs(res, device_id, false); @@ -65,6 +76,10 @@ IVFSQ::CopyCpuToGpu(const int64_t& device_id, const Config& config) { } else { KNOWHERE_THROW_MSG("CopyCpuToGpu Error, can't get gpu_resource"); } + +#else + KNOWHERE_THROW_MSG("Calling IVFSQ::CopyCpuToGpu when we are using CPU version"); +#endif } } // namespace knowhere diff --git a/core/src/index/knowhere/knowhere/index/vector_index/IndexNSG.cpp b/core/src/index/knowhere/knowhere/index/vector_index/IndexNSG.cpp index f5519b8240..8f6d93d7ff 100644 --- a/core/src/index/knowhere/knowhere/index/vector_index/IndexNSG.cpp +++ b/core/src/index/knowhere/knowhere/index/vector_index/IndexNSG.cpp @@ -19,8 +19,10 @@ #include "knowhere/adapter/VectorAdapter.h" #include "knowhere/common/Exception.h" #include "knowhere/common/Timer.h" +#ifdef MILVUS_GPU_VERSION #include "knowhere/index/vector_index/IndexGPUIVF.h" -#include "knowhere/index/vector_index/IndexIDMAP.h" +#endif + #include "knowhere/index/vector_index/IndexIVF.h" #include "knowhere/index/vector_index/nsg/NSG.h" #include "knowhere/index/vector_index/nsg/NSGIO.h" @@ -117,7 +119,11 @@ NSG::Train(const DatasetPtr& dataset, const Config& config) { } // TODO(linxj): dev IndexFactory, support more IndexType +#ifdef MILVUS_GPU_VERSION auto preprocess_index = std::make_shared(build_cfg->gpu_id); +#else + auto preprocess_index = std::make_shared(); +#endif auto model = preprocess_index->Train(dataset, config); preprocess_index->set_index_model(model); preprocess_index->AddWithoutIds(dataset, config); diff --git a/core/src/index/unittest/CMakeLists.txt b/core/src/index/unittest/CMakeLists.txt index 2e84908cd7..145278a636 100644 --- a/core/src/index/unittest/CMakeLists.txt +++ b/core/src/index/unittest/CMakeLists.txt @@ -2,26 +2,32 @@ include_directories(${INDEX_SOURCE_DIR}/thirdparty) include_directories(${INDEX_SOURCE_DIR}/thirdparty/SPTAG/AnnService) include_directories(${INDEX_SOURCE_DIR}/knowhere) include_directories(${INDEX_SOURCE_DIR}) -include_directories(/usr/local/cuda/include) -link_directories(/usr/local/cuda/lib64) - -message(STATUS "arrow prefix: ${ARROW_PREFIX}") -message(STATUS "libjemalloc_pic path: ${ARROW_PREFIX}/lib/libjemalloc_pic.a") set(depend_libs gtest gmock gtest_main gmock_main - faiss openblas lapack + faiss arrow "${ARROW_PREFIX}/lib/libjemalloc_pic.a" ) +if (BUILD_FAISS_WITH_MKL) + set(depend_libs ${depend_libs} + "-Wl,--start-group \ + ${MKL_LIB_PATH}/libmkl_intel_ilp64.a \ + ${MKL_LIB_PATH}/libmkl_gnu_thread.a \ + ${MKL_LIB_PATH}/libmkl_core.a \ + -Wl,--end-group -lgomp -lpthread -lm -ldl" + ) +else () + set(depend_libs ${depend_libs} + lapack + openblas) +endif () set(basic_libs - cudart cublas gomp gfortran pthread ) set(util_srcs ${MILVUS_ENGINE_SRC}/external/easyloggingpp/easylogging++.cc - ${INDEX_SOURCE_DIR}/knowhere/knowhere/index/vector_index/helpers/FaissGpuResourceMgr.cpp ${INDEX_SOURCE_DIR}/knowhere/knowhere/index/vector_index/helpers/FaissIO.cpp ${INDEX_SOURCE_DIR}/knowhere/knowhere/index/vector_index/helpers/IndexParameter.cpp ${INDEX_SOURCE_DIR}/knowhere/knowhere/adapter/Structure.cpp @@ -31,32 +37,49 @@ set(util_srcs ${INDEX_SOURCE_DIR}/unittest/utils.cpp ) +if (KNOWHERE_GPU_VERSION) + include_directories(${CUDA_INCLUDE_DIRS}) + link_directories("${CUDA_TOOLKIT_ROOT_DIR}/lib64") + set(cuda_lib + cudart + cublas + ) + set(basic_libs ${basic_libs} + ${cuda_lib} + ) + set(util_srcs ${util_srcs} + ${INDEX_SOURCE_DIR}/knowhere/knowhere/index/vector_index/helpers/FaissGpuResourceMgr.cpp + ) +endif () + # set(ivf_srcs - ${INDEX_SOURCE_DIR}/knowhere/knowhere/index/vector_index/helpers/Cloner.cpp ${INDEX_SOURCE_DIR}/knowhere/knowhere/index/vector_index/IndexIVF.cpp - ${INDEX_SOURCE_DIR}/knowhere/knowhere/index/vector_index/IndexGPUIVF.cpp ${INDEX_SOURCE_DIR}/knowhere/knowhere/index/vector_index/IndexIVFSQ.cpp - ${INDEX_SOURCE_DIR}/knowhere/knowhere/index/vector_index/IndexGPUIVFSQ.cpp ${INDEX_SOURCE_DIR}/knowhere/knowhere/index/vector_index/IndexIVFPQ.cpp - ${INDEX_SOURCE_DIR}/knowhere/knowhere/index/vector_index/IndexGPUIVFPQ.cpp - ${INDEX_SOURCE_DIR}/knowhere/knowhere/index/vector_index/IndexIVFSQHybrid.cpp ${INDEX_SOURCE_DIR}/knowhere/knowhere/index/vector_index/IndexIDMAP.cpp ${INDEX_SOURCE_DIR}/knowhere/knowhere/index/vector_index/FaissBaseIndex.cpp ) -if(NOT TARGET test_ivf) +if (KNOWHERE_GPU_VERSION) + set(ivf_srcs ${ivf_srcs} + ${INDEX_SOURCE_DIR}/knowhere/knowhere/index/vector_index/IndexGPUIDMAP.cpp + ${INDEX_SOURCE_DIR}/knowhere/knowhere/index/vector_index/helpers/Cloner.cpp + ${INDEX_SOURCE_DIR}/knowhere/knowhere/index/vector_index/IndexGPUIVF.cpp + ${INDEX_SOURCE_DIR}/knowhere/knowhere/index/vector_index/IndexGPUIVFSQ.cpp + ${INDEX_SOURCE_DIR}/knowhere/knowhere/index/vector_index/IndexGPUIVFPQ.cpp + ${INDEX_SOURCE_DIR}/knowhere/knowhere/index/vector_index/IndexIVFSQHybrid.cpp + ) +endif () +if (NOT TARGET test_ivf) add_executable(test_ivf test_ivf.cpp ${ivf_srcs} ${util_srcs}) -endif() +endif () target_link_libraries(test_ivf ${depend_libs} ${unittest_libs} ${basic_libs}) # -set(idmap_srcs - ${INDEX_SOURCE_DIR}/knowhere/knowhere/index/vector_index/IndexIDMAP.cpp - ) -if(NOT TARGET test_idmap) - add_executable(test_idmap test_idmap.cpp ${idmap_srcs} ${ivf_srcs} ${util_srcs}) -endif() +if (NOT TARGET test_idmap) + add_executable(test_idmap test_idmap.cpp ${ivf_srcs} ${util_srcs}) +endif () target_link_libraries(test_idmap ${depend_libs} ${unittest_libs} ${basic_libs}) # @@ -66,25 +89,28 @@ set(kdt_srcs ${INDEX_SOURCE_DIR}/knowhere/knowhere/index/vector_index/helpers/KDTParameterMgr.cpp ${INDEX_SOURCE_DIR}/knowhere/knowhere/index/vector_index/IndexKDT.cpp ) -if(NOT TARGET test_kdt) +if (NOT TARGET test_kdt) add_executable(test_kdt test_kdt.cpp ${kdt_srcs} ${util_srcs}) -endif() +endif () target_link_libraries(test_kdt SPTAGLibStatic ${depend_libs} ${unittest_libs} ${basic_libs}) -add_executable(test_gpuresource test_gpuresource.cpp ${util_srcs} ${ivf_srcs}) -target_link_libraries(test_gpuresource ${depend_libs} ${unittest_libs} ${basic_libs}) +if (KNOWHERE_GPU_VERSION) + add_executable(test_gpuresource test_gpuresource.cpp ${util_srcs} ${ivf_srcs}) + target_link_libraries(test_gpuresource ${depend_libs} ${unittest_libs} ${basic_libs}) -add_executable(test_customized_index test_customized_index.cpp ${util_srcs} ${ivf_srcs}) -target_link_libraries(test_customized_index ${depend_libs} ${unittest_libs} ${basic_libs}) + add_executable(test_customized_index test_customized_index.cpp ${util_srcs} ${ivf_srcs}) + target_link_libraries(test_customized_index ${depend_libs} ${unittest_libs} ${basic_libs}) +endif () install(TARGETS test_ivf DESTINATION unittest) install(TARGETS test_idmap DESTINATION unittest) install(TARGETS test_kdt DESTINATION unittest) -install(TARGETS test_gpuresource DESTINATION unittest) -install(TARGETS test_customized_index DESTINATION unittest) - +if (KNOWHERE_GPU_VERSION) + install(TARGETS test_gpuresource DESTINATION unittest) + install(TARGETS test_customized_index DESTINATION unittest) +endif () #add_subdirectory(faiss_ori) #add_subdirectory(faiss_benchmark) add_subdirectory(test_nsg) diff --git a/core/src/index/unittest/Helper.h b/core/src/index/unittest/Helper.h index 8d4bb0f4ae..074c9548aa 100644 --- a/core/src/index/unittest/Helper.h +++ b/core/src/index/unittest/Helper.h @@ -18,13 +18,16 @@ #include #include -#include "knowhere/index/vector_index/IndexGPUIVF.h" -#include "knowhere/index/vector_index/IndexGPUIVFPQ.h" -#include "knowhere/index/vector_index/IndexGPUIVFSQ.h" #include "knowhere/index/vector_index/IndexIVF.h" #include "knowhere/index/vector_index/IndexIVFPQ.h" #include "knowhere/index/vector_index/IndexIVFSQ.h" + +#ifdef MILVUS_GPU_VERSION +#include "knowhere/index/vector_index/IndexGPUIVF.h" +#include "knowhere/index/vector_index/IndexGPUIVFPQ.h" +#include "knowhere/index/vector_index/IndexGPUIVFSQ.h" #include "knowhere/index/vector_index/IndexIVFSQHybrid.h" +#endif int DEVICEID = 0; constexpr int64_t DIM = 128; @@ -36,22 +39,25 @@ constexpr int64_t TEMPMEM = 1024 * 1024 * 300; constexpr int64_t RESNUM = 2; knowhere::IVFIndexPtr -IndexFactory(const std::string& type) { +IndexFactory(const std::string &type) { if (type == "IVF") { return std::make_shared(); } else if (type == "IVFPQ") { return std::make_shared(); - } else if (type == "GPUIVF") { + } else if (type == "IVFSQ") { + return std::make_shared(); + } +#ifdef MILVUS_GPU_VERSION + else if (type == "GPUIVF") { return std::make_shared(DEVICEID); } else if (type == "GPUIVFPQ") { return std::make_shared(DEVICEID); - } else if (type == "IVFSQ") { - return std::make_shared(); } else if (type == "GPUIVFSQ") { return std::make_shared(DEVICEID); } else if (type == "IVFSQHybrid") { return std::make_shared(DEVICEID); } +#endif } enum class ParameterType { @@ -61,15 +67,15 @@ enum class ParameterType { }; class ParamGenerator { - public: - static ParamGenerator& +public: + static ParamGenerator & GetInstance() { static ParamGenerator instance; return instance; } knowhere::Config - Gen(const ParameterType& type) { + Gen(const ParameterType &type) { if (type == ParameterType::ivf) { auto tempconf = std::make_shared(); tempconf->d = DIM; @@ -107,14 +113,18 @@ class ParamGenerator { #include class TestGpuIndexBase : public ::testing::Test { - protected: +protected: void SetUp() override { +#ifdef MILVUS_GPU_VERSION knowhere::FaissGpuResourceMgr::GetInstance().InitDevice(DEVICEID, PINMEM, TEMPMEM, RESNUM); +#endif } void TearDown() override { +#ifdef MILVUS_GPU_VERSION knowhere::FaissGpuResourceMgr::GetInstance().Free(); +#endif } }; diff --git a/core/src/index/unittest/faiss_benchmark/CMakeLists.txt b/core/src/index/unittest/faiss_benchmark/CMakeLists.txt index 556364b68a..72eb7e7a7e 100644 --- a/core/src/index/unittest/faiss_benchmark/CMakeLists.txt +++ b/core/src/index/unittest/faiss_benchmark/CMakeLists.txt @@ -1,24 +1,50 @@ -include_directories(${INDEX_SOURCE_DIR}/thirdparty) -include_directories(${INDEX_SOURCE_DIR}/include) -include_directories(/usr/local/cuda/include) -include_directories(/usr/local/hdf5/include) +if (KNOWHERE_GPU_VERSION) -link_directories(/usr/local/cuda/lib64) -link_directories(/usr/local/hdf5/lib) + include_directories(${INDEX_SOURCE_DIR}/thirdparty) + include_directories(${INDEX_SOURCE_DIR}/include) + include_directories(/usr/local/cuda/include) + include_directories(/usr/local/hdf5/include) -set(unittest_libs - gtest gmock gtest_main gmock_main) + link_directories(/usr/local/cuda/lib64) + link_directories(/usr/local/hdf5/lib) -set(depend_libs - faiss openblas lapack hdf5 - arrow ${ARROW_PREFIX}/lib/libjemalloc_pic.a - ) + set(unittest_libs + gtest gmock gtest_main gmock_main) -set(basic_libs - cudart cublas - gomp gfortran pthread - ) + set(depend_libs + faiss hdf5 + arrow ${ARROW_PREFIX}/lib/libjemalloc_pic.a + ) + if (BUILD_FAISS_WITH_MKL) + set(depend_libs ${depend_libs} + "-Wl,--start-group \ + ${MKL_LIB_PATH}/libmkl_intel_ilp64.a \ + ${MKL_LIB_PATH}/libmkl_gnu_thread.a \ + ${MKL_LIB_PATH}/libmkl_core.a \ + -Wl,--end-group -lgomp -lpthread -lm -ldl" + ) + else () + set(depend_libs ${depend_libs} + lapack + openblas) + endif () -add_executable(test_faiss_benchmark faiss_benchmark_test.cpp) -target_link_libraries(test_faiss_benchmark ${depend_libs} ${unittest_libs} ${basic_libs}) -install(TARGETS test_faiss_benchmark DESTINATION unittest) + set(basic_libs + gomp gfortran pthread + ) + + include_directories(${CUDA_INCLUDE_DIRS}) + link_directories("${CUDA_TOOLKIT_ROOT_DIR}/lib64") + set(cuda_lib + cudart + cublas + ) + set(basic_libs ${basic_libs} + ${cuda_lib} + ) + + add_executable(test_faiss_benchmark faiss_benchmark_test.cpp) + target_link_libraries(test_faiss_benchmark ${depend_libs} ${unittest_libs} ${basic_libs}) + install(TARGETS test_faiss_benchmark DESTINATION unittest) + +endif () diff --git a/core/src/index/unittest/faiss_ori/CMakeLists.txt b/core/src/index/unittest/faiss_ori/CMakeLists.txt index d01463aaab..8216764ab7 100644 --- a/core/src/index/unittest/faiss_ori/CMakeLists.txt +++ b/core/src/index/unittest/faiss_ori/CMakeLists.txt @@ -1,26 +1,49 @@ -include_directories(${INDEX_SOURCE_DIR}/thirdparty) -include_directories(${INDEX_SOURCE_DIR}/include) -include_directories(/usr/local/cuda/include) -link_directories(/usr/local/cuda/lib64) +if (KNOWHERE_GPU_VERSION) -set(unittest_libs - gtest gmock gtest_main gmock_main) + include_directories(${INDEX_SOURCE_DIR}/thirdparty) + include_directories(${INDEX_SOURCE_DIR}/include) -set(depend_libs - faiss openblas lapack - arrow ${ARROW_PREFIX}/lib/libjemalloc_pic.a - ) + set(unittest_libs + gtest gmock gtest_main gmock_main) -set(basic_libs - cudart cublas - gomp gfortran pthread - ) + set(depend_libs + faiss + arrow ${ARROW_PREFIX}/lib/libjemalloc_pic.a + ) + if (BUILD_FAISS_WITH_MKL) + set(depend_libs ${depend_libs} + "-Wl,--start-group \ + ${MKL_LIB_PATH}/libmkl_intel_ilp64.a \ + ${MKL_LIB_PATH}/libmkl_gnu_thread.a \ + ${MKL_LIB_PATH}/libmkl_core.a \ + -Wl,--end-group -lgomp -lpthread -lm -ldl" + ) + else () + set(depend_libs ${depend_libs} + lapack + openblas) + endif () + set(basic_libs + gomp gfortran pthread + ) -# -if(NOT TARGET test_gpu) - add_executable(test_gpu gpuresource_test.cpp) -endif() -target_link_libraries(test_gpu ${depend_libs} ${unittest_libs} ${basic_libs}) + include_directories(${CUDA_INCLUDE_DIRS}) + link_directories("${CUDA_TOOLKIT_ROOT_DIR}/lib64") + set(cuda_lib + cudart + cublas + ) + set(basic_libs ${basic_libs} + ${cuda_lib} + ) -install(TARGETS test_gpu DESTINATION unittest) \ No newline at end of file + # + if (NOT TARGET test_gpu) + add_executable(test_gpu gpuresource_test.cpp) + endif () + target_link_libraries(test_gpu ${depend_libs} ${unittest_libs} ${basic_libs}) + + install(TARGETS test_gpu DESTINATION unittest) + +endif () \ No newline at end of file diff --git a/core/src/index/unittest/test_idmap.cpp b/core/src/index/unittest/test_idmap.cpp index d1ff3ee046..b6e51361b9 100644 --- a/core/src/index/unittest/test_idmap.cpp +++ b/core/src/index/unittest/test_idmap.cpp @@ -21,8 +21,10 @@ #include "knowhere/adapter/Structure.h" #include "knowhere/common/Exception.h" #include "knowhere/index/vector_index/IndexIDMAP.h" +#ifdef MILVUS_GPU_VERSION #include "knowhere/index/vector_index/helpers/Cloner.h" - +#include "knowhere/index/vector_index/IndexGPUIDMAP.h" +#endif #include "Helper.h" #include "unittest/utils.h" @@ -116,6 +118,7 @@ TEST_F(IDMAPTest, idmap_serialize) { } } +#ifdef MILVUS_GPU_VERSION TEST_F(IDMAPTest, copy_test) { ASSERT_TRUE(!xb.empty()); @@ -175,3 +178,4 @@ TEST_F(IDMAPTest, copy_test) { AssertAnns(device_result, nq, k); } } +#endif diff --git a/core/src/index/unittest/test_ivf.cpp b/core/src/index/unittest/test_ivf.cpp index 8b17e08272..953884bc2f 100644 --- a/core/src/index/unittest/test_ivf.cpp +++ b/core/src/index/unittest/test_ivf.cpp @@ -20,13 +20,24 @@ #include #include +#ifdef MILVUS_GPU_VERSION #include +#endif #include "knowhere/common/Exception.h" #include "knowhere/common/Timer.h" -#include "knowhere/index/vector_index/IndexGPUIVF.h" + #include "knowhere/index/vector_index/IndexIVF.h" +#include "knowhere/index/vector_index/IndexIVFPQ.h" +#include "knowhere/index/vector_index/IndexIVFSQ.h" + +#ifdef MILVUS_GPU_VERSION +#include "knowhere/index/vector_index/IndexGPUIVF.h" +#include "knowhere/index/vector_index/IndexGPUIVFPQ.h" +#include "knowhere/index/vector_index/IndexGPUIVFSQ.h" +#include "knowhere/index/vector_index/IndexIVFSQHybrid.h" #include "knowhere/index/vector_index/helpers/Cloner.h" +#endif #include "unittest/Helper.h" #include "unittest/utils.h" @@ -36,11 +47,12 @@ using ::testing::TestWithParam; using ::testing::Values; class IVFTest : public DataGen, public TestWithParam<::std::tuple> { - protected: +protected: void SetUp() override { +#ifdef MILVUS_GPU_VERSION knowhere::FaissGpuResourceMgr::GetInstance().InitDevice(DEVICEID, PINMEM, TEMPMEM, RESNUM); - +#endif ParameterType parameter_type; std::tie(index_type, parameter_type) = GetParam(); // Init_with_default(); @@ -54,7 +66,9 @@ class IVFTest : public DataGen, public TestWithParam<::std::tuple(bin->data.get()), bin->size); + writer(static_cast(bin->data.get()), bin->size); FileIOReader reader(filename); reader(ret, bin->size); @@ -148,6 +165,7 @@ TEST_P(IVFTest, ivf_serialize) { } } +#ifdef MILVUS_GPU_VERSION TEST_P(IVFTest, clone_test) { assert(!xb.empty()); @@ -198,18 +216,18 @@ TEST_P(IVFTest, clone_test) { auto finder = std::find(support_idx_vec.cbegin(), support_idx_vec.cend(), index_type); if (finder != support_idx_vec.cend()) { EXPECT_NO_THROW({ - auto clone_index = knowhere::cloner::CopyGpuToCpu(index_, knowhere::Config()); - auto clone_result = clone_index->Search(query_dataset, conf); - AssertEqual(result, clone_result); - std::cout << "clone G <=> C [" << index_type << "] success" << std::endl; - }); + auto clone_index = knowhere::cloner::CopyGpuToCpu(index_, knowhere::Config()); + auto clone_result = clone_index->Search(query_dataset, conf); + AssertEqual(result, clone_result); + std::cout << "clone G <=> C [" << index_type << "] success" << std::endl; + }); } else { EXPECT_THROW( - { - std::cout << "clone G <=> C [" << index_type << "] failed" << std::endl; - auto clone_index = knowhere::cloner::CopyGpuToCpu(index_, knowhere::Config()); - }, - knowhere::KnowhereException); + { + std::cout << "clone G <=> C [" << index_type << "] failed" << std::endl; + auto clone_index = knowhere::cloner::CopyGpuToCpu(index_, knowhere::Config()); + }, + knowhere::KnowhereException); } } @@ -223,22 +241,24 @@ TEST_P(IVFTest, clone_test) { auto finder = std::find(support_idx_vec.cbegin(), support_idx_vec.cend(), index_type); if (finder != support_idx_vec.cend()) { EXPECT_NO_THROW({ - auto clone_index = knowhere::cloner::CopyCpuToGpu(index_, DEVICEID, knowhere::Config()); - auto clone_result = clone_index->Search(query_dataset, conf); - AssertEqual(result, clone_result); - std::cout << "clone C <=> G [" << index_type << "] success" << std::endl; - }); + auto clone_index = knowhere::cloner::CopyCpuToGpu(index_, DEVICEID, knowhere::Config()); + auto clone_result = clone_index->Search(query_dataset, conf); + AssertEqual(result, clone_result); + std::cout << "clone C <=> G [" << index_type << "] success" << std::endl; + }); } else { EXPECT_THROW( - { - std::cout << "clone C <=> G [" << index_type << "] failed" << std::endl; - auto clone_index = knowhere::cloner::CopyCpuToGpu(index_, DEVICEID, knowhere::Config()); - }, - knowhere::KnowhereException); + { + std::cout << "clone C <=> G [" << index_type << "] failed" << std::endl; + auto clone_index = knowhere::cloner::CopyCpuToGpu(index_, DEVICEID, knowhere::Config()); + }, + knowhere::KnowhereException); } } } +#endif +#ifdef MILVUS_GPU_VERSION #ifdef CUSTOMIZATION TEST_P(IVFTest, gpu_seal_test) { std::vector support_idx_vec{"GPUIVF", "GPUIVFSQ", "IVFSQHybrid"}; @@ -271,5 +291,5 @@ TEST_P(IVFTest, gpu_seal_test) { auto with_seal = tc.RecordSection("With seal"); ASSERT_GE(without_seal, with_seal); } - +#endif #endif diff --git a/core/src/index/unittest/test_nsg/CMakeLists.txt b/core/src/index/unittest/test_nsg/CMakeLists.txt index 3d22051d82..01227ea745 100644 --- a/core/src/index/unittest/test_nsg/CMakeLists.txt +++ b/core/src/index/unittest/test_nsg/CMakeLists.txt @@ -4,14 +4,13 @@ add_definitions(-std=c++11 -O3 -lboost -march=native -Wall -DINFO) -find_package(OpenMP) +find_package(OpenMP REQUIRED) if (OPENMP_FOUND) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}") else () message(FATAL_ERROR "no OpenMP supprot") endif () -message(${OpenMP_CXX_FLAGS}) include_directories(${INDEX_SOURCE_DIR}/knowhere/knowhere/index/vector_index/nsg) aux_source_directory(${INDEX_SOURCE_DIR}/knowhere/knowhere/index/vector_index/nsg nsg_src) diff --git a/core/src/index/unittest/test_nsg/test_nsg.cpp b/core/src/index/unittest/test_nsg/test_nsg.cpp index 11b9becce4..47c014e691 100644 --- a/core/src/index/unittest/test_nsg/test_nsg.cpp +++ b/core/src/index/unittest/test_nsg/test_nsg.cpp @@ -21,7 +21,9 @@ #include "knowhere/common/Exception.h" #include "knowhere/index/vector_index/FaissBaseIndex.h" #include "knowhere/index/vector_index/IndexNSG.h" +#ifdef MILVUS_GPU_VERSION #include "knowhere/index/vector_index/helpers/FaissGpuResourceMgr.h" +#endif #include "knowhere/index/vector_index/nsg/NSGIO.h" #include "unittest/utils.h" @@ -37,7 +39,9 @@ class NSGInterfaceTest : public DataGen, public ::testing::Test { void SetUp() override { // Init_with_default(); +#ifdef MILVUS_GPU_VERSION knowhere::FaissGpuResourceMgr::GetInstance().InitDevice(DEVICEID, 1024 * 1024 * 200, 1024 * 1024 * 600, 2); +#endif Generate(256, 1000000 / 100, 1); index_ = std::make_shared(); @@ -60,7 +64,9 @@ class NSGInterfaceTest : public DataGen, public ::testing::Test { void TearDown() override { +#ifdef MILVUS_GPU_VERSION knowhere::FaissGpuResourceMgr::GetInstance().Free(); +#endif } protected: diff --git a/core/src/main.cpp b/core/src/main.cpp index 85f91469bb..cd00024afe 100644 --- a/core/src/main.cpp +++ b/core/src/main.cpp @@ -25,14 +25,14 @@ #include "external/easyloggingpp/easylogging++.h" #include "metrics/Metrics.h" #include "server/Server.h" -#include "src/version.h" +#include "src/config.h" #include "utils/CommonUtil.h" #include "utils/SignalUtil.h" INITIALIZE_EASYLOGGINGPP void -print_help(const std::string& app_name) { +print_help(const std::string &app_name) { std::cout << std::endl << "Usage: " << app_name << " [OPTIONS]" << std::endl << std::endl; std::cout << " Options:" << std::endl; std::cout << " -h --help Print this help" << std::endl; @@ -52,19 +52,24 @@ print_banner() { std::cout << std::endl; std::cout << "Welcome to Milvus!" << std::endl; std::cout << "Milvus " << BUILD_TYPE << " version: v" << MILVUS_VERSION << ", built at " << BUILD_TIME << std::endl; +#ifdef MILVUS_CPU_VERSION + std::cout << "You are using Milvus CPU version" << std::endl; +#else + std::cout << "You are using Milvus GPU version" << std::endl; +#endif std::cout << std::endl; } int -main(int argc, char* argv[]) { +main(int argc, char *argv[]) { print_banner(); - static struct option long_options[] = {{"conf_file", required_argument, nullptr, 'c'}, + static struct option long_options[] = {{"conf_file", required_argument, nullptr, 'c'}, {"log_conf_file", required_argument, nullptr, 'l'}, - {"help", no_argument, nullptr, 'h'}, - {"daemon", no_argument, nullptr, 'd'}, - {"pid_file", required_argument, nullptr, 'p'}, - {nullptr, 0, nullptr, 0}}; + {"help", no_argument, nullptr, 'h'}, + {"daemon", no_argument, nullptr, 'd'}, + {"pid_file", required_argument, nullptr, 'p'}, + {nullptr, 0, nullptr, 0}}; int option_index = 0; int64_t start_daemonized = 0; @@ -73,7 +78,7 @@ main(int argc, char* argv[]) { std::string pid_filename; std::string app_name = argv[0]; - milvus::server::Server& server = milvus::server::Server::GetInstance(); + milvus::server::Server &server = milvus::server::Server::GetInstance(); milvus::Status s; if (argc < 2) { @@ -85,21 +90,21 @@ main(int argc, char* argv[]) { while ((value = getopt_long(argc, argv, "c:l:p:dh", long_options, &option_index)) != -1) { switch (value) { case 'c': { - char* config_filename_ptr = strdup(optarg); + char *config_filename_ptr = strdup(optarg); config_filename = config_filename_ptr; free(config_filename_ptr); std::cout << "Loading configuration from: " << config_filename << std::endl; break; } case 'l': { - char* log_filename_ptr = strdup(optarg); + char *log_filename_ptr = strdup(optarg); log_config_file = log_filename_ptr; free(log_filename_ptr); - std::cout << "Initial log config from: " << log_config_file << std::endl; + std::cout << "Initializing log config from: " << log_config_file << std::endl; break; } case 'p': { - char* pid_filename_ptr = strdup(optarg); + char *pid_filename_ptr = strdup(optarg); pid_filename = pid_filename_ptr; free(pid_filename_ptr); std::cout << pid_filename << std::endl; @@ -142,7 +147,7 @@ main(int argc, char* argv[]) { return EXIT_SUCCESS; -FAIL: + FAIL: std::cout << "Milvus server exit..." << std::endl; return EXIT_FAILURE; } diff --git a/core/src/metrics/SystemInfo.cpp b/core/src/metrics/SystemInfo.cpp index 154f7b0797..b0d1577d36 100644 --- a/core/src/metrics/SystemInfo.cpp +++ b/core/src/metrics/SystemInfo.cpp @@ -19,7 +19,6 @@ #include "utils/Log.h" #include -#include #include #include #include @@ -29,6 +28,10 @@ #include #include +#ifdef MILVUS_GPU_VERSION +#include +#endif + namespace milvus { namespace server { @@ -60,6 +63,7 @@ SystemInfo::Init() { total_ram_ = GetPhysicalMemory(); fclose(file); +#ifdef MILVUS_GPU_VERSION // initialize GPU information nvmlReturn_t nvmlresult; nvmlresult = nvmlInit(); @@ -72,6 +76,7 @@ SystemInfo::Init() { SERVER_LOG_ERROR << "Unable to get devidce number"; return; } +#endif // initialize network traffic information std::pair in_and_out_octets = Octets(); @@ -209,10 +214,14 @@ SystemInfo::CPUPercent() { std::vector SystemInfo::GPUMemoryTotal() { + // get GPU usage percent if (!initialized_) Init(); std::vector result; + +#ifdef MILVUS_GPU_VERSION + nvmlMemory_t nvmlMemory; for (int i = 0; i < num_device_; ++i) { nvmlDevice_t device; @@ -220,6 +229,8 @@ SystemInfo::GPUMemoryTotal() { nvmlDeviceGetMemoryInfo(device, &nvmlMemory); result.push_back(nvmlMemory.total); } +#endif + return result; } @@ -228,6 +239,9 @@ SystemInfo::GPUTemperature() { if (!initialized_) Init(); std::vector result; + +#ifdef MILVUS_GPU_VERSION + for (int i = 0; i < num_device_; i++) { nvmlDevice_t device; nvmlDeviceGetHandleByIndex(i, &device); @@ -235,6 +249,9 @@ SystemInfo::GPUTemperature() { nvmlDeviceGetTemperature(device, NVML_TEMPERATURE_GPU, &temp); result.push_back(temp); } + +#endif + return result; } @@ -283,6 +300,9 @@ SystemInfo::GPUMemoryUsed() { Init(); std::vector result; + +#ifdef MILVUS_GPU_VERSION + nvmlMemory_t nvmlMemory; for (int i = 0; i < num_device_; ++i) { nvmlDevice_t device; @@ -290,6 +310,9 @@ SystemInfo::GPUMemoryUsed() { nvmlDeviceGetMemoryInfo(device, &nvmlMemory); result.push_back(nvmlMemory.used); } + +#endif + return result; } diff --git a/core/src/scheduler/JobMgr.cpp b/core/src/scheduler/JobMgr.cpp index 4404d95763..794f6a0f37 100644 --- a/core/src/scheduler/JobMgr.cpp +++ b/core/src/scheduler/JobMgr.cpp @@ -104,25 +104,20 @@ JobMgr::build_task(const JobPtr& job) { void JobMgr::calculate_path(const TaskPtr& task) { - if (task->type_ == TaskType::SearchTask) { - if (task->label()->Type() != TaskLabelType::SPECIFIED_RESOURCE) { - return; - } - - std::vector path; - auto spec_label = std::static_pointer_cast(task->label()); - auto src = res_mgr_->GetDiskResources()[0]; - auto dest = spec_label->resource(); - ShortestPath(src.lock(), dest.lock(), res_mgr_, path); - task->path() = Path(path, path.size() - 1); - } else if (task->type_ == TaskType::BuildIndexTask) { - auto spec_label = std::static_pointer_cast(task->label()); - auto src = res_mgr_->GetDiskResources()[0]; - auto dest = spec_label->resource(); - std::vector path; - ShortestPath(src.lock(), dest.lock(), res_mgr_, path); - task->path() = Path(path, path.size() - 1); + if (task->type_ != TaskType::SearchTask) { + return; } + + if (task->label()->Type() != TaskLabelType::SPECIFIED_RESOURCE) { + return; + } + + std::vector path; + auto spec_label = std::static_pointer_cast(task->label()); + auto src = res_mgr_->GetDiskResources()[0]; + auto dest = spec_label->resource(); + ShortestPath(src.lock(), dest.lock(), res_mgr_, path); + task->path() = Path(path, path.size() - 1); } } // namespace scheduler diff --git a/core/src/scheduler/SchedInst.cpp b/core/src/scheduler/SchedInst.cpp index f0c00c2d2a..61e0c09759 100644 --- a/core/src/scheduler/SchedInst.cpp +++ b/core/src/scheduler/SchedInst.cpp @@ -18,7 +18,6 @@ #include "scheduler/SchedInst.h" #include "ResourceFactory.h" #include "Utils.h" -#include "knowhere/index/vector_index/IndexGPUIVF.h" #include "server/Config.h" #include @@ -55,8 +54,8 @@ load_simple_config() { // get resources auto gpu_ids = get_gpu_pool(); - int32_t index_build_device_id; - config.GetResourceConfigIndexBuildDevice(index_build_device_id); + int32_t build_gpu_id; + config.GetResourceConfigIndexBuildDevice(build_gpu_id); // create and connect ResMgrInst::GetInstance()->Add(ResourceFactory::Create("disk", "DISK", 0, true, false)); @@ -70,15 +69,15 @@ load_simple_config() { for (auto& gpu_id : gpu_ids) { ResMgrInst::GetInstance()->Add(ResourceFactory::Create(std::to_string(gpu_id), "GPU", gpu_id, true, true)); ResMgrInst::GetInstance()->Connect("cpu", std::to_string(gpu_id), pcie); - if (index_build_device_id == gpu_id) { + if (build_gpu_id == gpu_id) { find_build_gpu_id = true; } } - if (not find_build_gpu_id && index_build_device_id != server::CPU_DEVICE_ID) { + if (not find_build_gpu_id) { ResMgrInst::GetInstance()->Add( - ResourceFactory::Create(std::to_string(index_build_device_id), "GPU", index_build_device_id, true, true)); - ResMgrInst::GetInstance()->Connect("cpu", std::to_string(index_build_device_id), pcie); + ResourceFactory::Create(std::to_string(build_gpu_id), "GPU", build_gpu_id, true, true)); + ResMgrInst::GetInstance()->Connect("cpu", std::to_string(build_gpu_id), pcie); } } diff --git a/core/src/scheduler/SchedInst.h b/core/src/scheduler/SchedInst.h index d51611af26..a3048069f9 100644 --- a/core/src/scheduler/SchedInst.h +++ b/core/src/scheduler/SchedInst.h @@ -106,6 +106,7 @@ class OptimizerInst { has_cpu = true; } } + std::vector pass_list; pass_list.push_back(std::make_shared()); pass_list.push_back(std::make_shared()); diff --git a/core/src/scheduler/TaskCreator.cpp b/core/src/scheduler/TaskCreator.cpp index 9f3bc2ae9a..40cfa9aac6 100644 --- a/core/src/scheduler/TaskCreator.cpp +++ b/core/src/scheduler/TaskCreator.cpp @@ -70,15 +70,8 @@ TaskCreator::Create(const DeleteJobPtr& job) { std::vector TaskCreator::Create(const BuildIndexJobPtr& job) { std::vector tasks; - server::Config& config = server::Config::GetInstance(); - int32_t build_index_id; - Status stat = config.GetResourceConfigIndexBuildDevice(build_index_id); - ResourcePtr res_ptr; - if (build_index_id == server::CPU_DEVICE_ID) { - res_ptr = ResMgrInst::GetInstance()->GetResource("cpu"); - } else { - res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, build_index_id); - } + // TODO(yukun): remove "disk" hardcode here + ResourcePtr res_ptr = ResMgrInst::GetInstance()->GetResource("disk"); for (auto& to_index_file : job->to_index_files()) { auto label = std::make_shared(std::weak_ptr(res_ptr)); diff --git a/core/src/scheduler/Utils.cpp b/core/src/scheduler/Utils.cpp index 998e545ba5..2fd573e47a 100644 --- a/core/src/scheduler/Utils.cpp +++ b/core/src/scheduler/Utils.cpp @@ -19,7 +19,9 @@ #include "server/Config.h" #include "utils/Log.h" +#ifdef MILVUS_GPU_VERSION #include +#endif #include #include #include @@ -38,7 +40,9 @@ get_current_timestamp() { uint64_t get_num_gpu() { int n_devices = 0; +#ifdef MILVUS_GPU_VERSION cudaGetDeviceCount(&n_devices); +#endif return n_devices; } diff --git a/core/src/scheduler/action/PushTaskToNeighbour.cpp b/core/src/scheduler/action/PushTaskToNeighbour.cpp index 9aed678937..b8a4a1164b 100644 --- a/core/src/scheduler/action/PushTaskToNeighbour.cpp +++ b/core/src/scheduler/action/PushTaskToNeighbour.cpp @@ -138,41 +138,73 @@ Action::SpecifiedResourceLabelTaskScheduler(const ResourceMgrPtr& res_mgr, Resou std::shared_ptr event) { auto task_item = event->task_table_item_; auto task = event->task_table_item_->task; - // if (resource->type() == ResourceType::DISK) { - // // step 1: calculate shortest path per resource, from disk to compute resource - // auto compute_resources = res_mgr->GetComputeResources(); - // std::vector> paths; - // std::vector transport_costs; - // for (auto& res : compute_resources) { - // std::vector path; - // uint64_t transport_cost = ShortestPath(resource, res, res_mgr, path); - // transport_costs.push_back(transport_cost); - // paths.emplace_back(path); - // } - // if (task->job_.lock()->type() == JobType::BUILD) { - // // step2: Read device id in config - // // get build index gpu resource - // server::Config& config = server::Config::GetInstance(); - // int32_t build_index_gpu; - // Status stat = config.GetResourceConfigIndexBuildDevice(build_index_gpu); - // - // bool find_gpu_res = false; - // if (res_mgr->GetResource(ResourceType::GPU, build_index_gpu) != nullptr) { - // for (uint64_t i = 0; i < compute_resources.size(); ++i) { - // if (compute_resources[i]->name() == - // res_mgr->GetResource(ResourceType::GPU, build_index_gpu)->name()) { - // find_gpu_res = true; - // Path task_path(paths[i], paths[i].size() - 1); - // task->path() = task_path; - // break; - // } - // } - // } - // if (not find_gpu_res) { - // task->path() = Path(paths[0], paths[0].size() - 1); - // } - // } - // } + if (resource->type() == ResourceType::DISK) { + // step 1: calculate shortest path per resource, from disk to compute resource + auto compute_resources = res_mgr->GetComputeResources(); + std::vector> paths; + std::vector transport_costs; + for (auto& res : compute_resources) { + std::vector path; + uint64_t transport_cost = ShortestPath(resource, res, res_mgr, path); + transport_costs.push_back(transport_cost); + paths.emplace_back(path); + } + // if (task->job_.lock()->type() == JobType::SEARCH) { + // auto label = task->label(); + // auto spec_label = std::static_pointer_cast(label); + // if (spec_label->resource().lock()->type() == ResourceType::CPU) { + // std::vector spec_path; + // spec_path.push_back(spec_label->resource().lock()->name()); + // spec_path.push_back(resource->name()); + // task->path() = Path(spec_path, spec_path.size() - 1); + // } else { + // // step 2: select min cost, cost(resource) = avg_cost * task_to_do + transport_cost + // uint64_t min_cost = std::numeric_limits::max(); + // uint64_t min_cost_idx = 0; + // for (uint64_t i = 0; i < compute_resources.size(); ++i) { + // if (compute_resources[i]->TotalTasks() == 0) { + // min_cost_idx = i; + // break; + // } + // uint64_t cost = compute_resources[i]->TaskAvgCost() * + // compute_resources[i]->NumOfTaskToExec() + + // transport_costs[i]; + // if (min_cost > cost) { + // min_cost = cost; + // min_cost_idx = i; + // } + // } + // + // // step 3: set path in task + // Path task_path(paths[min_cost_idx], paths[min_cost_idx].size() - 1); + // task->path() = task_path; + // } + // + // } else + if (task->job_.lock()->type() == JobType::BUILD) { + // step2: Read device id in config + // get build index gpu resource + server::Config& config = server::Config::GetInstance(); + int32_t build_index_gpu; + Status stat = config.GetResourceConfigIndexBuildDevice(build_index_gpu); + + bool find_gpu_res = false; + if (res_mgr->GetResource(ResourceType::GPU, build_index_gpu) != nullptr) { + for (uint64_t i = 0; i < compute_resources.size(); ++i) { + if (compute_resources[i]->name() == + res_mgr->GetResource(ResourceType::GPU, build_index_gpu)->name()) { + find_gpu_res = true; + Path task_path(paths[i], paths[i].size() - 1); + task->path() = task_path; + break; + } + } + } + if (not find_gpu_res) { + task->path() = Path(paths[0], paths[0].size() - 1); + } + } + } if (resource->name() == task->path().Last()) { resource->WakeupExecutor(); diff --git a/core/src/scheduler/job/BuildIndexJob.cpp b/core/src/scheduler/job/BuildIndexJob.cpp index 3247383db3..4c4c3b5054 100644 --- a/core/src/scheduler/job/BuildIndexJob.cpp +++ b/core/src/scheduler/job/BuildIndexJob.cpp @@ -50,10 +50,7 @@ void BuildIndexJob::BuildIndexDone(size_t to_index_id) { std::unique_lock lock(mutex_); to_index_files_.erase(to_index_id); - if (to_index_files_.empty()) { - cv_.notify_all(); - } - + cv_.notify_all(); SERVER_LOG_DEBUG << "BuildIndexJob " << id() << " finish index file: " << to_index_id; } diff --git a/core/src/scheduler/job/SearchJob.cpp b/core/src/scheduler/job/SearchJob.cpp index ec93c69f55..47c825c122 100644 --- a/core/src/scheduler/job/SearchJob.cpp +++ b/core/src/scheduler/job/SearchJob.cpp @@ -49,21 +49,13 @@ void SearchJob::SearchDone(size_t index_id) { std::unique_lock lock(mutex_); index_files_.erase(index_id); - if (index_files_.empty()) { - cv_.notify_all(); - } - + cv_.notify_all(); SERVER_LOG_DEBUG << "SearchJob " << id() << " finish index file: " << index_id; } -ResultIds& -SearchJob::GetResultIds() { - return result_ids_; -} - -ResultDistances& -SearchJob::GetResultDistances() { - return result_distances_; +ResultSet& +SearchJob::GetResult() { + return result_; } Status& diff --git a/core/src/scheduler/job/SearchJob.h b/core/src/scheduler/job/SearchJob.h index ff5ab34131..1e586090b9 100644 --- a/core/src/scheduler/job/SearchJob.h +++ b/core/src/scheduler/job/SearchJob.h @@ -29,7 +29,6 @@ #include #include "Job.h" -#include "db/Types.h" #include "db/meta/MetaTypes.h" namespace milvus { @@ -38,9 +37,9 @@ namespace scheduler { using engine::meta::TableFileSchemaPtr; using Id2IndexMap = std::unordered_map; - -using ResultIds = engine::ResultIds; -using ResultDistances = engine::ResultDistances; +using IdDistPair = std::pair; +using Id2DistVec = std::vector; +using ResultSet = std::vector; class SearchJob : public Job { public: @@ -56,11 +55,8 @@ class SearchJob : public Job { void SearchDone(size_t index_id); - ResultIds& - GetResultIds(); - - ResultDistances& - GetResultDistances(); + ResultSet& + GetResult(); Status& GetStatus(); @@ -94,11 +90,6 @@ class SearchJob : public Job { return index_files_; } - std::mutex& - mutex() { - return mutex_; - } - private: uint64_t topk_ = 0; uint64_t nq_ = 0; @@ -108,8 +99,7 @@ class SearchJob : public Job { Id2IndexMap index_files_; // TODO: column-base better ? - ResultIds result_ids_; - ResultDistances result_distances_; + ResultSet result_; Status status_; std::mutex mutex_; diff --git a/core/src/scheduler/optimizer/OnlyGPUPass.cpp b/core/src/scheduler/optimizer/OnlyGPUPass.cpp index e5d3c71fd3..3fcda0e8a3 100644 --- a/core/src/scheduler/optimizer/OnlyGPUPass.cpp +++ b/core/src/scheduler/optimizer/OnlyGPUPass.cpp @@ -46,7 +46,7 @@ OnlyGPUPass::Run(const TaskPtr& task) { auto label = std::make_shared(std::weak_ptr(res_ptr)); task->label() = label; - specified_gpu_id_ = (specified_gpu_id_ + 1) % gpu_id.size(); + specified_gpu_id_ = specified_gpu_id_++ % gpu_id.size(); return true; } diff --git a/core/src/scheduler/task/SearchTask.cpp b/core/src/scheduler/task/SearchTask.cpp index 08bc6525aa..1bf1caff76 100644 --- a/core/src/scheduler/task/SearchTask.cpp +++ b/core/src/scheduler/task/SearchTask.cpp @@ -219,11 +219,8 @@ XSearchTask::Execute() { // step 3: pick up topk result auto spec_k = index_engine_->Count() < topk ? index_engine_->Count() : topk; - { - std::unique_lock lock(search_job->mutex()); - XSearchTask::MergeTopkToResultSet(output_ids, output_distance, spec_k, nq, topk, metric_l2, - search_job->GetResultIds(), search_job->GetResultDistances()); - } + XSearchTask::MergeTopkToResultSet(output_ids, output_distance, spec_k, nq, topk, metric_l2, + search_job->GetResult()); span = rc.RecordSection(hdr + ", reduce topk"); // search_job->AccumReduceCost(span); @@ -243,69 +240,71 @@ XSearchTask::Execute() { } void -XSearchTask::MergeTopkToResultSet(const scheduler::ResultIds& src_ids, const scheduler::ResultDistances& src_distances, - size_t src_k, size_t nq, size_t topk, bool ascending, scheduler::ResultIds& tar_ids, - scheduler::ResultDistances& tar_distances) { - if (src_ids.empty()) { - return; +XSearchTask::MergeTopkToResultSet(const std::vector& input_ids, const std::vector& input_distance, + uint64_t input_k, uint64_t nq, uint64_t topk, bool ascending, + scheduler::ResultSet& result) { + if (result.empty()) { + result.resize(nq); } - size_t tar_k = tar_ids.size() / nq; - size_t buf_k = std::min(topk, src_k + tar_k); - - scheduler::ResultIds buf_ids(nq * buf_k, -1); - scheduler::ResultDistances buf_distances(nq * buf_k, 0.0); - for (uint64_t i = 0; i < nq; i++) { - size_t buf_k_j = 0, src_k_j = 0, tar_k_j = 0; - size_t buf_idx, src_idx, tar_idx; + scheduler::Id2DistVec result_buf; + auto& result_i = result[i]; - size_t buf_k_multi_i = buf_k * i; - size_t src_k_multi_i = topk * i; - size_t tar_k_multi_i = tar_k * i; - - while (buf_k_j < buf_k && src_k_j < src_k && tar_k_j < tar_k) { - src_idx = src_k_multi_i + src_k_j; - tar_idx = tar_k_multi_i + tar_k_j; - buf_idx = buf_k_multi_i + buf_k_j; - - if ((ascending && src_distances[src_idx] < tar_distances[tar_idx]) || - (!ascending && src_distances[src_idx] > tar_distances[tar_idx])) { - buf_ids[buf_idx] = src_ids[src_idx]; - buf_distances[buf_idx] = src_distances[src_idx]; - src_k_j++; - } else { - buf_ids[buf_idx] = tar_ids[tar_idx]; - buf_distances[buf_idx] = tar_distances[tar_idx]; - tar_k_j++; + if (result[i].empty()) { + result_buf.resize(input_k, scheduler::IdDistPair(-1, 0.0)); + uint64_t input_k_multi_i = topk * i; + for (auto k = 0; k < input_k; ++k) { + uint64_t idx = input_k_multi_i + k; + auto& result_buf_item = result_buf[k]; + result_buf_item.first = input_ids[idx]; + result_buf_item.second = input_distance[idx]; } - buf_k_j++; - } - - if (buf_k_j < buf_k) { - if (src_k_j < src_k) { - while (buf_k_j < buf_k && src_k_j < src_k) { - buf_idx = buf_k_multi_i + buf_k_j; - src_idx = src_k_multi_i + src_k_j; - buf_ids[buf_idx] = src_ids[src_idx]; - buf_distances[buf_idx] = src_distances[src_idx]; - src_k_j++; - buf_k_j++; + } else { + size_t tar_size = result_i.size(); + uint64_t output_k = std::min(topk, input_k + tar_size); + result_buf.resize(output_k, scheduler::IdDistPair(-1, 0.0)); + size_t buf_k = 0, src_k = 0, tar_k = 0; + uint64_t src_idx; + uint64_t input_k_multi_i = topk * i; + while (buf_k < output_k && src_k < input_k && tar_k < tar_size) { + src_idx = input_k_multi_i + src_k; + auto& result_buf_item = result_buf[buf_k]; + auto& result_item = result_i[tar_k]; + if ((ascending && input_distance[src_idx] < result_item.second) || + (!ascending && input_distance[src_idx] > result_item.second)) { + result_buf_item.first = input_ids[src_idx]; + result_buf_item.second = input_distance[src_idx]; + src_k++; + } else { + result_buf_item = result_item; + tar_k++; } - } else { - while (buf_k_j < buf_k && tar_k_j < tar_k) { - buf_idx = buf_k_multi_i + buf_k_j; - tar_idx = tar_k_multi_i + tar_k_j; - buf_ids[buf_idx] = tar_ids[tar_idx]; - buf_distances[buf_idx] = tar_distances[tar_idx]; - tar_k_j++; - buf_k_j++; + buf_k++; + } + + if (buf_k < output_k) { + if (src_k < input_k) { + while (buf_k < output_k && src_k < input_k) { + src_idx = input_k_multi_i + src_k; + auto& result_buf_item = result_buf[buf_k]; + result_buf_item.first = input_ids[src_idx]; + result_buf_item.second = input_distance[src_idx]; + src_k++; + buf_k++; + } + } else { + while (buf_k < output_k && tar_k < tar_size) { + result_buf[buf_k] = result_i[tar_k]; + tar_k++; + buf_k++; + } } } } + + result_i.swap(result_buf); } - tar_ids.swap(buf_ids); - tar_distances.swap(buf_distances); } // void diff --git a/core/src/scheduler/task/SearchTask.h b/core/src/scheduler/task/SearchTask.h index bd51137341..bbc8b5bd8f 100644 --- a/core/src/scheduler/task/SearchTask.h +++ b/core/src/scheduler/task/SearchTask.h @@ -39,9 +39,8 @@ class XSearchTask : public Task { public: static void - MergeTopkToResultSet(const scheduler::ResultIds& src_ids, const scheduler::ResultDistances& src_distances, - size_t src_k, size_t nq, size_t topk, bool ascending, scheduler::ResultIds& tar_ids, - scheduler::ResultDistances& tar_distances); + MergeTopkToResultSet(const std::vector& input_ids, const std::vector& input_distance, + uint64_t input_k, uint64_t nq, uint64_t topk, bool ascending, scheduler::ResultSet& result); // static void // MergeTopkArray(std::vector& tar_ids, std::vector& tar_distance, uint64_t& tar_input_k, diff --git a/core/src/sdk/grpc/ClientProxy.cpp b/core/src/sdk/grpc/ClientProxy.cpp index c726cfc532..91a11adf8c 100644 --- a/core/src/sdk/grpc/ClientProxy.cpp +++ b/core/src/sdk/grpc/ClientProxy.cpp @@ -17,7 +17,7 @@ #include "sdk/grpc/ClientProxy.h" #include "grpc/gen-milvus/milvus.grpc.pb.h" -#include "src/version.h" +#include "src/config.h" #include #include diff --git a/core/src/server/Config.cpp b/core/src/server/Config.cpp index cc88dccffa..81a0fd7042 100644 --- a/core/src/server/Config.cpp +++ b/core/src/server/Config.cpp @@ -162,6 +162,7 @@ Config::ValidateConfig() { return s; } +#ifdef MILVUS_GPU_VERSION int64_t cache_gpu_cache_capacity; s = GetCacheConfigGpuCacheCapacity(cache_gpu_cache_capacity); if (!s.ok()) { @@ -173,6 +174,7 @@ Config::ValidateConfig() { if (!s.ok()) { return s; } +#endif bool cache_insert_data; s = GetCacheConfigCacheInsertData(cache_insert_data); @@ -401,7 +403,8 @@ Status Config::CheckServerConfigDeployMode(const std::string& value) { if (value != "single" && value != "cluster_readonly" && value != "cluster_writable") { return Status(SERVER_INVALID_ARGUMENT, - "server_config.deploy_mode is not one of single, cluster_readonly, and cluster_writable."); + "server_config.deploy_mode is not one of " + "single, cluster_readonly, and cluster_writable."); } return Status::OK(); } @@ -589,18 +592,15 @@ Config::CheckCacheConfigGpuCacheCapacity(const std::string& value) { return Status(SERVER_INVALID_ARGUMENT, msg); } else { uint64_t gpu_cache_capacity = std::stoi(value) * GB; - int device_id; - Status s = GetResourceConfigIndexBuildDevice(device_id); + int gpu_index; + Status s = GetResourceConfigIndexBuildDevice(gpu_index); if (!s.ok()) { return s; } - if (device_id == server::CPU_DEVICE_ID) - return Status::OK(); - size_t gpu_memory; - if (!ValidationUtil::GetGpuMemory(device_id, gpu_memory).ok()) { - std::string msg = "Fail to get GPU memory for GPU device: " + std::to_string(device_id); + if (!ValidationUtil::GetGpuMemory(gpu_index, gpu_memory).ok()) { + std::string msg = "Fail to get GPU memory for GPU device: " + std::to_string(gpu_index); return Status(SERVER_UNEXPECTED_ERROR, msg); } else if (gpu_cache_capacity >= gpu_memory) { std::string msg = "Invalid gpu cache capacity: " + value + @@ -689,21 +689,31 @@ Config::CheckResourceConfigMode(const std::string& value) { } Status -CheckGpuDevice(const std::string& value) { - const std::regex pat("gpu(\\d+)"); - std::cmatch m; - if (!std::regex_match(value.c_str(), m, pat)) { - std::string msg = "Invalid gpu device: " + value + - ". Possible reason: resource_config.search_resources does not match your hardware."; +CheckResource(const std::string& value) { + std::string s = value; + std::transform(s.begin(), s.end(), s.begin(), ::tolower); +#ifdef MILVUS_CPU_VERSION + if (s != "cpu") { + return Status(SERVER_INVALID_ARGUMENT, "Invalid CPU resource: " + s); + } +#else + const std::regex pat("cpu|gpu(\\d+)"); + std::smatch m; + if (!std::regex_match(s, m, pat)) { + std::string msg = "Invalid search resource: " + value + + ". Possible reason: resource_config.search_resources is not in the format of cpux or gpux"; return Status(SERVER_INVALID_ARGUMENT, msg); } - int32_t gpu_index = std::stoi(value.substr(3)); - if (!ValidationUtil::ValidateGpuIndex(gpu_index).ok()) { - std::string msg = "Invalid gpu device: " + value + + if (s.compare(0, 3, "gpu") == 0) { + int32_t gpu_index = std::stoi(s.substr(3)); + if (!ValidationUtil::ValidateGpuIndex(gpu_index).ok()) { + std::string msg = "Invalid search resource: " + value + ". Possible reason: resource_config.search_resources does not match your hardware."; - return Status(SERVER_INVALID_ARGUMENT, msg); + return Status(SERVER_INVALID_ARGUMENT, msg); + } } +#endif return Status::OK(); } @@ -716,14 +726,10 @@ Config::CheckResourceConfigSearchResources(const std::vector& value return Status(SERVER_INVALID_ARGUMENT, msg); } - for (auto& device : value) { - if (device == "cpu") { - continue; - } - if (!CheckGpuDevice(device).ok()) { - std::string msg = "Invalid search resource: " + device + - ". Possible reason: resource_config.search_resources does not match your hardware."; - return Status(SERVER_INVALID_ARGUMENT, msg); + for (auto& resource : value) { + auto status = CheckResource(resource); + if (!status.ok()) { + return Status(SERVER_INVALID_ARGUMENT, status.message()); } } return Status::OK(); @@ -731,13 +737,9 @@ Config::CheckResourceConfigSearchResources(const std::vector& value Status Config::CheckResourceConfigIndexBuildDevice(const std::string& value) { - if (value == "cpu") { - return Status::OK(); - } - if (!CheckGpuDevice(value).ok()) { - std::string msg = "Invalid index build device: " + value + - ". Possible reason: resource_config.index_build_device does not match your hardware."; - return Status(SERVER_INVALID_ARGUMENT, msg); + auto status = CheckResource(value); + if (!status.ok()) { + return Status(SERVER_INVALID_ARGUMENT, status.message()); } return Status::OK(); } @@ -1015,11 +1017,12 @@ Config::GetResourceConfigIndexBuildDevice(int32_t& value) { return s; } - if (str == "cpu") { - value = CPU_DEVICE_ID; - } else { + if (str != "cpu") { value = std::stoi(str.substr(3)); } + else { + value = -1; + } return Status::OK(); } diff --git a/core/src/server/Config.h b/core/src/server/Config.h index 45591fb5ec..e8a396d568 100644 --- a/core/src/server/Config.h +++ b/core/src/server/Config.h @@ -93,9 +93,7 @@ static const char* CONFIG_RESOURCE_MODE = "mode"; static const char* CONFIG_RESOURCE_MODE_DEFAULT = "simple"; static const char* CONFIG_RESOURCE_SEARCH_RESOURCES = "search_resources"; static const char* CONFIG_RESOURCE_INDEX_BUILD_DEVICE = "index_build_device"; -static const char* CONFIG_RESOURCE_INDEX_BUILD_DEVICE_DEFAULT = "gpu0"; - -const int32_t CPU_DEVICE_ID = -1; +static const char* CONFIG_RESOURCE_INDEX_BUILD_DEVICE_DEFAULT = "cpu"; class Config { public: diff --git a/core/src/server/Server.cpp b/core/src/server/Server.cpp index 5676504722..eb2b077829 100644 --- a/core/src/server/Server.cpp +++ b/core/src/server/Server.cpp @@ -25,7 +25,7 @@ #include "server/DBWrapper.h" #include "server/Server.h" #include "server/grpc_impl/GrpcServer.h" -#include "src/version.h" +#include "src/config.h" #include "utils/Log.h" #include "utils/LogUtil.h" #include "utils/SignalUtil.h" diff --git a/core/src/server/grpc_impl/GrpcRequestTask.cpp b/core/src/server/grpc_impl/GrpcRequestTask.cpp index 77f262bda6..0816d45750 100644 --- a/core/src/server/grpc_impl/GrpcRequestTask.cpp +++ b/core/src/server/grpc_impl/GrpcRequestTask.cpp @@ -28,7 +28,7 @@ #include "scheduler/SchedInst.h" #include "server/DBWrapper.h" #include "server/Server.h" -#include "src/version.h" +#include "src/config.h" #include "utils/CommonUtil.h" #include "utils/Log.h" #include "utils/TimeRecorder.h" @@ -637,8 +637,7 @@ SearchTask::OnExecute() { rc.RecordSection("prepare vector data"); // step 6: search vectors - engine::ResultIds result_ids; - engine::ResultDistances result_distances; + engine::QueryResults results; auto record_count = (uint64_t)search_param_->query_record_array().size(); #ifdef MILVUS_ENABLE_PROFILING @@ -648,11 +647,11 @@ SearchTask::OnExecute() { #endif if (file_id_array_.empty()) { - status = DBWrapper::DB()->Query(table_name_, (size_t)top_k, record_count, nprobe, vec_f.data(), dates, - result_ids, result_distances); + status = + DBWrapper::DB()->Query(table_name_, (size_t)top_k, record_count, nprobe, vec_f.data(), dates, results); } else { status = DBWrapper::DB()->Query(table_name_, file_id_array_, (size_t)top_k, record_count, nprobe, - vec_f.data(), dates, result_ids, result_distances); + vec_f.data(), dates, results); } #ifdef MILVUS_ENABLE_PROFILING @@ -664,20 +663,23 @@ SearchTask::OnExecute() { return status; } - if (result_ids.empty()) { + if (results.empty()) { return Status::OK(); // empty table } - size_t result_k = result_ids.size() / record_count; + if (results.size() != record_count) { + std::string msg = "Search " + std::to_string(record_count) + " vectors but only return " + + std::to_string(results.size()) + " results"; + return Status(SERVER_ILLEGAL_SEARCH_RESULT, msg); + } // step 7: construct result array - for (size_t i = 0; i < record_count; i++) { + for (auto& result : results) { ::milvus::grpc::TopKQueryResult* topk_query_result = topk_result_list->add_topk_query_result(); - for (size_t j = 0; j < result_k; j++) { + for (auto& pair : result) { ::milvus::grpc::QueryResult* grpc_result = topk_query_result->add_query_result_arrays(); - size_t idx = i * result_k + j; - grpc_result->set_id(result_ids[idx]); - grpc_result->set_distance(result_distances[idx]); + grpc_result->set_id(pair.first); + grpc_result->set_distance(pair.second); } } diff --git a/core/src/utils/ValidationUtil.cpp b/core/src/utils/ValidationUtil.cpp index 68088d6c93..347ba44e85 100644 --- a/core/src/utils/ValidationUtil.cpp +++ b/core/src/utils/ValidationUtil.cpp @@ -20,7 +20,9 @@ #include "db/engine/ExecutionEngine.h" #include +#ifdef MILVUS_GPU_VERSION #include +#endif #include #include #include @@ -168,6 +170,8 @@ ValidationUtil::ValidateSearchNprobe(int64_t nprobe, const engine::meta::TableSc Status ValidationUtil::ValidateGpuIndex(uint32_t gpu_index) { + +#ifdef MILVUS_GPU_VERSION int num_devices = 0; auto cuda_err = cudaGetDeviceCount(&num_devices); if (cuda_err != cudaSuccess) { @@ -181,12 +185,16 @@ ValidationUtil::ValidateGpuIndex(uint32_t gpu_index) { SERVER_LOG_ERROR << msg; return Status(SERVER_INVALID_ARGUMENT, msg); } +#endif return Status::OK(); } Status ValidationUtil::GetGpuMemory(uint32_t gpu_index, size_t& memory) { + +#ifdef MILVUS_GPU_VERSION + cudaDeviceProp deviceProp; auto cuda_err = cudaGetDeviceProperties(&deviceProp, gpu_index); if (cuda_err) { @@ -196,6 +204,8 @@ ValidationUtil::GetGpuMemory(uint32_t gpu_index, size_t& memory) { } memory = deviceProp.totalGlobalMem; +#endif + return Status::OK(); } diff --git a/core/src/wrapper/ConfAdapter.cpp b/core/src/wrapper/ConfAdapter.cpp index 4ac8e22f52..2dcf6bab7e 100644 --- a/core/src/wrapper/ConfAdapter.cpp +++ b/core/src/wrapper/ConfAdapter.cpp @@ -109,7 +109,7 @@ IVFSQConfAdapter::Match(const TempMetaConf& metaconf) { conf->nlist = MatchNlist(metaconf.size, metaconf.nlist); conf->d = metaconf.dim; conf->metric_type = metaconf.metric_type; - conf->gpu_id = metaconf.gpu_id; + conf->gpu_id = conf->gpu_id; conf->nbits = 8; MatchBase(conf); return conf; diff --git a/core/src/wrapper/KnowhereResource.cpp b/core/src/wrapper/KnowhereResource.cpp index 650ae727c1..de3b909b08 100644 --- a/core/src/wrapper/KnowhereResource.cpp +++ b/core/src/wrapper/KnowhereResource.cpp @@ -16,7 +16,9 @@ // under the License. #include "wrapper/KnowhereResource.h" +#ifdef MILVUS_GPU_VERSION #include "knowhere/index/vector_index/helpers/FaissGpuResourceMgr.h" +#endif #include "server/Config.h" #include @@ -32,6 +34,9 @@ constexpr int64_t M_BYTE = 1024 * 1024; Status KnowhereResource::Initialize() { + +#ifdef MILVUS_GPU_VERSION + struct GpuResourceSetting { int64_t pinned_memory = 300 * M_BYTE; int64_t temp_memory = 300 * M_BYTE; @@ -73,12 +78,16 @@ KnowhereResource::Initialize() { iter->second.temp_memory, iter->second.resource_num); } +#endif + return Status::OK(); } Status KnowhereResource::Finalize() { +#ifdef MILVUS_GPU_VERSION knowhere::FaissGpuResourceMgr::GetInstance().Free(); // free gpu resource. +#endif return Status::OK(); } diff --git a/core/src/wrapper/VecImpl.cpp b/core/src/wrapper/VecImpl.cpp index c97900f839..8bf979e668 100644 --- a/core/src/wrapper/VecImpl.cpp +++ b/core/src/wrapper/VecImpl.cpp @@ -18,11 +18,16 @@ #include "wrapper/VecImpl.h" #include "DataTransfer.h" #include "knowhere/common/Exception.h" -#include "knowhere/index/vector_index/IndexGPUIVF.h" #include "knowhere/index/vector_index/IndexIDMAP.h" -#include "knowhere/index/vector_index/IndexIVFSQHybrid.h" -#include "knowhere/index/vector_index/helpers/Cloner.h" #include "utils/Log.h" +#include "wrapper/WrapperException.h" + +#ifdef MILVUS_GPU_VERSION + +#include +#include + +#endif /* * no parameter check in this layer. @@ -30,326 +35,216 @@ */ namespace milvus { -namespace engine { + namespace engine { -Status -VecIndexImpl::BuildAll(const int64_t& nb, const float* xb, const int64_t* ids, const Config& cfg, const int64_t& nt, - const float* xt) { - try { - dim = cfg->d; - auto dataset = GenDatasetWithIds(nb, dim, xb, ids); + Status + VecIndexImpl::BuildAll(const int64_t &nb, const float *xb, const int64_t *ids, const Config &cfg, + const int64_t &nt, + const float *xt) { + try { + dim = cfg->d; + auto dataset = GenDatasetWithIds(nb, dim, xb, ids); - auto preprocessor = index_->BuildPreprocessor(dataset, cfg); - index_->set_preprocessor(preprocessor); - auto model = index_->Train(dataset, cfg); - index_->set_index_model(model); - index_->Add(dataset, cfg); - } catch (knowhere::KnowhereException& e) { - WRAPPER_LOG_ERROR << e.what(); - return Status(KNOWHERE_UNEXPECTED_ERROR, e.what()); - } catch (std::exception& e) { - WRAPPER_LOG_ERROR << e.what(); - return Status(KNOWHERE_ERROR, e.what()); - } - return Status::OK(); -} + auto preprocessor = index_->BuildPreprocessor(dataset, cfg); + index_->set_preprocessor(preprocessor); + auto model = index_->Train(dataset, cfg); + index_->set_index_model(model); + index_->Add(dataset, cfg); + } catch (knowhere::KnowhereException &e) { + WRAPPER_LOG_ERROR << e.what(); + return Status(KNOWHERE_UNEXPECTED_ERROR, e.what()); + } catch (std::exception &e) { + WRAPPER_LOG_ERROR << e.what(); + return Status(KNOWHERE_ERROR, e.what()); + } + return Status::OK(); + } -Status -VecIndexImpl::Add(const int64_t& nb, const float* xb, const int64_t* ids, const Config& cfg) { - try { - auto dataset = GenDatasetWithIds(nb, dim, xb, ids); + Status + VecIndexImpl::Add(const int64_t &nb, const float *xb, const int64_t *ids, const Config &cfg) { + try { + auto dataset = GenDatasetWithIds(nb, dim, xb, ids); - index_->Add(dataset, cfg); - } catch (knowhere::KnowhereException& e) { - WRAPPER_LOG_ERROR << e.what(); - return Status(KNOWHERE_UNEXPECTED_ERROR, e.what()); - } catch (std::exception& e) { - WRAPPER_LOG_ERROR << e.what(); - return Status(KNOWHERE_ERROR, e.what()); - } - return Status::OK(); -} + index_->Add(dataset, cfg); + } catch (knowhere::KnowhereException &e) { + WRAPPER_LOG_ERROR << e.what(); + return Status(KNOWHERE_UNEXPECTED_ERROR, e.what()); + } catch (std::exception &e) { + WRAPPER_LOG_ERROR << e.what(); + return Status(KNOWHERE_ERROR, e.what()); + } + return Status::OK(); + } -Status -VecIndexImpl::Search(const int64_t& nq, const float* xq, float* dist, int64_t* ids, const Config& cfg) { - try { - auto k = cfg->k; - auto dataset = GenDataset(nq, dim, xq); + Status + VecIndexImpl::Search(const int64_t &nq, const float *xq, float *dist, int64_t *ids, const Config &cfg) { + try { + auto k = cfg->k; + auto dataset = GenDataset(nq, dim, xq); - Config search_cfg = cfg; + Config search_cfg = cfg; - auto res = index_->Search(dataset, search_cfg); - auto ids_array = res->array()[0]; - auto dis_array = res->array()[1]; + auto res = index_->Search(dataset, search_cfg); + auto ids_array = res->array()[0]; + auto dis_array = res->array()[1]; - //{ - // auto& ids = ids_array; - // auto& dists = dis_array; - // std::stringstream ss_id; - // std::stringstream ss_dist; - // for (auto i = 0; i < 10; i++) { - // for (auto j = 0; j < k; ++j) { - // ss_id << *(ids->data()->GetValues(1, i * k + j)) << " "; - // ss_dist << *(dists->data()->GetValues(1, i * k + j)) << " "; - // } - // ss_id << std::endl; - // ss_dist << std::endl; - // } - // std::cout << "id\n" << ss_id.str() << std::endl; - // std::cout << "dist\n" << ss_dist.str() << std::endl; - //} + //{ + // auto& ids = ids_array; + // auto& dists = dis_array; + // std::stringstream ss_id; + // std::stringstream ss_dist; + // for (auto i = 0; i < 10; i++) { + // for (auto j = 0; j < k; ++j) { + // ss_id << *(ids->data()->GetValues(1, i * k + j)) << " "; + // ss_dist << *(dists->data()->GetValues(1, i * k + j)) << " "; + // } + // ss_id << std::endl; + // ss_dist << std::endl; + // } + // std::cout << "id\n" << ss_id.str() << std::endl; + // std::cout << "dist\n" << ss_dist.str() << std::endl; + //} - auto p_ids = ids_array->data()->GetValues(1, 0); - auto p_dist = dis_array->data()->GetValues(1, 0); + auto p_ids = ids_array->data()->GetValues(1, 0); + auto p_dist = dis_array->data()->GetValues(1, 0); - // TODO(linxj): avoid copy here. - memcpy(ids, p_ids, sizeof(int64_t) * nq * k); - memcpy(dist, p_dist, sizeof(float) * nq * k); - } catch (knowhere::KnowhereException& e) { - WRAPPER_LOG_ERROR << e.what(); - return Status(KNOWHERE_UNEXPECTED_ERROR, e.what()); - } catch (std::exception& e) { - WRAPPER_LOG_ERROR << e.what(); - return Status(KNOWHERE_ERROR, e.what()); - } - return Status::OK(); -} + // TODO(linxj): avoid copy here. + memcpy(ids, p_ids, sizeof(int64_t) * nq * k); + memcpy(dist, p_dist, sizeof(float) * nq * k); + } catch (knowhere::KnowhereException &e) { + WRAPPER_LOG_ERROR << e.what(); + return Status(KNOWHERE_UNEXPECTED_ERROR, e.what()); + } catch (std::exception &e) { + WRAPPER_LOG_ERROR << e.what(); + return Status(KNOWHERE_ERROR, e.what()); + } + return Status::OK(); + } -knowhere::BinarySet -VecIndexImpl::Serialize() { - type = ConvertToCpuIndexType(type); - return index_->Serialize(); -} - -Status -VecIndexImpl::Load(const knowhere::BinarySet& index_binary) { - index_->Load(index_binary); - dim = Dimension(); - return Status::OK(); -} - -int64_t -VecIndexImpl::Dimension() { - return index_->Dimension(); -} - -int64_t -VecIndexImpl::Count() { - return index_->Count(); -} - -IndexType -VecIndexImpl::GetType() { - return type; -} - -VecIndexPtr -VecIndexImpl::CopyToGpu(const int64_t& device_id, const Config& cfg) { - // TODO(linxj): exception handle - auto gpu_index = knowhere::cloner::CopyCpuToGpu(index_, device_id, cfg); - auto new_index = std::make_shared(gpu_index, ConvertToGpuIndexType(type)); - new_index->dim = dim; - return new_index; -} - -VecIndexPtr -VecIndexImpl::CopyToCpu(const Config& cfg) { - // TODO(linxj): exception handle - auto cpu_index = knowhere::cloner::CopyGpuToCpu(index_, cfg); - auto new_index = std::make_shared(cpu_index, ConvertToCpuIndexType(type)); - new_index->dim = dim; - return new_index; -} - -VecIndexPtr -VecIndexImpl::Clone() { - // TODO(linxj): exception handle - auto clone_index = std::make_shared(index_->Clone(), type); - clone_index->dim = dim; - return clone_index; -} - -int64_t -VecIndexImpl::GetDeviceId() { - if (auto device_idx = std::dynamic_pointer_cast(index_)) { - return device_idx->GetGpuDevice(); - } - // else - return -1; // -1 == cpu -} - -float* -BFIndex::GetRawVectors() { - auto raw_index = std::dynamic_pointer_cast(index_); - if (raw_index) { - return raw_index->GetRawVectors(); - } - return nullptr; -} - -int64_t* -BFIndex::GetRawIds() { - return std::static_pointer_cast(index_)->GetRawIds(); -} - -ErrorCode -BFIndex::Build(const Config& cfg) { - try { - dim = cfg->d; - std::static_pointer_cast(index_)->Train(cfg); - } catch (knowhere::KnowhereException& e) { - WRAPPER_LOG_ERROR << e.what(); - return KNOWHERE_UNEXPECTED_ERROR; - } catch (std::exception& e) { - WRAPPER_LOG_ERROR << e.what(); - return KNOWHERE_ERROR; - } - return KNOWHERE_SUCCESS; -} - -Status -BFIndex::BuildAll(const int64_t& nb, const float* xb, const int64_t* ids, const Config& cfg, const int64_t& nt, - const float* xt) { - try { - dim = cfg->d; - auto dataset = GenDatasetWithIds(nb, dim, xb, ids); - - std::static_pointer_cast(index_)->Train(cfg); - index_->Add(dataset, cfg); - } catch (knowhere::KnowhereException& e) { - WRAPPER_LOG_ERROR << e.what(); - return Status(KNOWHERE_UNEXPECTED_ERROR, e.what()); - } catch (std::exception& e) { - WRAPPER_LOG_ERROR << e.what(); - return Status(KNOWHERE_ERROR, e.what()); - } - return Status::OK(); -} - -// TODO(linxj): add lock here. -Status -IVFMixIndex::BuildAll(const int64_t& nb, const float* xb, const int64_t* ids, const Config& cfg, const int64_t& nt, - const float* xt) { - try { - dim = cfg->d; - auto dataset = GenDatasetWithIds(nb, dim, xb, ids); - - auto preprocessor = index_->BuildPreprocessor(dataset, cfg); - index_->set_preprocessor(preprocessor); - auto model = index_->Train(dataset, cfg); - index_->set_index_model(model); - index_->Add(dataset, cfg); - - if (auto device_index = std::dynamic_pointer_cast(index_)) { - auto host_index = device_index->CopyGpuToCpu(Config()); - index_ = host_index; + knowhere::BinarySet + VecIndexImpl::Serialize() { type = ConvertToCpuIndexType(type); - } else { - WRAPPER_LOG_ERROR << "Build IVFMIXIndex Failed"; - return Status(KNOWHERE_ERROR, "Build IVFMIXIndex Failed"); + return index_->Serialize(); } - } catch (knowhere::KnowhereException& e) { - WRAPPER_LOG_ERROR << e.what(); - return Status(KNOWHERE_UNEXPECTED_ERROR, e.what()); - } catch (std::exception& e) { - WRAPPER_LOG_ERROR << e.what(); - return Status(KNOWHERE_ERROR, e.what()); - } - return Status::OK(); -} -Status -IVFMixIndex::Load(const knowhere::BinarySet& index_binary) { - index_->Load(index_binary); - dim = Dimension(); - return Status::OK(); -} - -knowhere::QuantizerPtr -IVFHybridIndex::LoadQuantizer(const Config& conf) { - // TODO(linxj): Hardcode here - if (auto new_idx = std::dynamic_pointer_cast(index_)) { - return new_idx->LoadQuantizer(conf); - } else { - WRAPPER_LOG_ERROR << "Hybrid mode not support for index type: " << int(type); - } -} - -Status -IVFHybridIndex::SetQuantizer(const knowhere::QuantizerPtr& q) { - try { - // TODO(linxj): Hardcode here - if (auto new_idx = std::dynamic_pointer_cast(index_)) { - new_idx->SetQuantizer(q); - } else { - WRAPPER_LOG_ERROR << "Hybrid mode not support for index type: " << int(type); - return Status(KNOWHERE_ERROR, "not support"); + Status + VecIndexImpl::Load(const knowhere::BinarySet &index_binary) { + index_->Load(index_binary); + dim = Dimension(); + return Status::OK(); } - } catch (knowhere::KnowhereException& e) { - WRAPPER_LOG_ERROR << e.what(); - return Status(KNOWHERE_UNEXPECTED_ERROR, e.what()); - } catch (std::exception& e) { - WRAPPER_LOG_ERROR << e.what(); - return Status(KNOWHERE_ERROR, e.what()); - } - return Status::OK(); -} -Status -IVFHybridIndex::UnsetQuantizer() { - try { - // TODO(linxj): Hardcode here - if (auto new_idx = std::dynamic_pointer_cast(index_)) { - new_idx->UnsetQuantizer(); - } else { - WRAPPER_LOG_ERROR << "Hybrid mode not support for index type: " << int(type); - return Status(KNOWHERE_ERROR, "not support"); + int64_t + VecIndexImpl::Dimension() { + return index_->Dimension(); } - } catch (knowhere::KnowhereException& e) { - WRAPPER_LOG_ERROR << e.what(); - return Status(KNOWHERE_UNEXPECTED_ERROR, e.what()); - } catch (std::exception& e) { - WRAPPER_LOG_ERROR << e.what(); - return Status(KNOWHERE_ERROR, e.what()); - } - return Status::OK(); -} -VecIndexPtr -IVFHybridIndex::LoadData(const knowhere::QuantizerPtr& q, const Config& conf) { - try { - // TODO(linxj): Hardcode here - if (auto new_idx = std::dynamic_pointer_cast(index_)) { - return std::make_shared(new_idx->LoadData(q, conf), type); - } else { - WRAPPER_LOG_ERROR << "Hybrid mode not support for index type: " << int(type); + int64_t + VecIndexImpl::Count() { + return index_->Count(); } - } catch (knowhere::KnowhereException& e) { - WRAPPER_LOG_ERROR << e.what(); - } catch (std::exception& e) { - WRAPPER_LOG_ERROR << e.what(); - } - return nullptr; -} -std::pair -IVFHybridIndex::CopyToGpuWithQuantizer(const int64_t& device_id, const Config& cfg) { - try { - // TODO(linxj): Hardcode here - if (auto hybrid_idx = std::dynamic_pointer_cast(index_)) { - auto pair = hybrid_idx->CopyCpuToGpuWithQuantizer(device_id, cfg); - auto new_idx = std::make_shared(pair.first, type); - return std::make_pair(new_idx, pair.second); - } else { - WRAPPER_LOG_ERROR << "Hybrid mode not support for index type: " << int(type); + IndexType + VecIndexImpl::GetType() { + return type; } - } catch (knowhere::KnowhereException& e) { - WRAPPER_LOG_ERROR << e.what(); - } catch (std::exception& e) { - WRAPPER_LOG_ERROR << e.what(); - } - return std::make_pair(nullptr, nullptr); -} -} // namespace engine + VecIndexPtr + VecIndexImpl::CopyToGpu(const int64_t &device_id, const Config &cfg) { + // TODO(linxj): exception handle +#ifdef MILVUS_GPU_VERSION + auto gpu_index = knowhere::cloner::CopyCpuToGpu(index_, device_id, cfg); + auto new_index = std::make_shared(gpu_index, ConvertToGpuIndexType(type)); + new_index->dim = dim; + return new_index; +#else + WRAPPER_LOG_ERROR << "Calling VecIndexImpl::CopyToGpu when we are using CPU version"; + throw WrapperException("Calling VecIndexImpl::CopyToGpu when we are using CPU version"); +#endif + } + + VecIndexPtr + VecIndexImpl::CopyToCpu(const Config &cfg) { + // TODO(linxj): exception handle +#ifdef MILVUS_GPU_VERSION + auto cpu_index = knowhere::cloner::CopyGpuToCpu(index_, cfg); + auto new_index = std::make_shared(cpu_index, ConvertToCpuIndexType(type)); + new_index->dim = dim; + return new_index; +#else + WRAPPER_LOG_ERROR << "Calling VecIndexImpl::CopyToCpu when we are using CPU version"; + throw WrapperException("Calling VecIndexImpl::CopyToCpu when we are using CPU version"); +#endif + + } + + VecIndexPtr + VecIndexImpl::Clone() { + // TODO(linxj): exception handle + auto clone_index = std::make_shared(index_->Clone(), type); + clone_index->dim = dim; + return clone_index; + } + + int64_t + VecIndexImpl::GetDeviceId() { +#ifdef MILVUS_GPU_VERSION + if (auto device_idx = std::dynamic_pointer_cast(index_)) { + return device_idx->GetGpuDevice(); + } +#else + // else + return -1; // -1 == cpu +#endif + } + + float * + BFIndex::GetRawVectors() { + auto raw_index = std::dynamic_pointer_cast(index_); + if (raw_index) { + return raw_index->GetRawVectors(); + } + return nullptr; + } + + int64_t * + BFIndex::GetRawIds() { + return std::static_pointer_cast(index_)->GetRawIds(); + } + + ErrorCode + BFIndex::Build(const Config &cfg) { + try { + dim = cfg->d; + std::static_pointer_cast(index_)->Train(cfg); + } catch (knowhere::KnowhereException &e) { + WRAPPER_LOG_ERROR << e.what(); + return KNOWHERE_UNEXPECTED_ERROR; + } catch (std::exception &e) { + WRAPPER_LOG_ERROR << e.what(); + return KNOWHERE_ERROR; + } + return KNOWHERE_SUCCESS; + } + + Status + BFIndex::BuildAll(const int64_t &nb, const float *xb, const int64_t *ids, const Config &cfg, const int64_t &nt, + const float *xt) { + try { + dim = cfg->d; + auto dataset = GenDatasetWithIds(nb, dim, xb, ids); + + std::static_pointer_cast(index_)->Train(cfg); + index_->Add(dataset, cfg); + } catch (knowhere::KnowhereException &e) { + WRAPPER_LOG_ERROR << e.what(); + return Status(KNOWHERE_UNEXPECTED_ERROR, e.what()); + } catch (std::exception &e) { + WRAPPER_LOG_ERROR << e.what(); + return Status(KNOWHERE_ERROR, e.what()); + } + return Status::OK(); + } + + } // namespace engine } // namespace milvus diff --git a/core/src/wrapper/VecImpl.h b/core/src/wrapper/VecImpl.h index 22d734cf92..2b6f07827e 100644 --- a/core/src/wrapper/VecImpl.h +++ b/core/src/wrapper/VecImpl.h @@ -77,41 +77,6 @@ class VecIndexImpl : public VecIndex { std::shared_ptr index_ = nullptr; }; -class IVFMixIndex : public VecIndexImpl { - public: - explicit IVFMixIndex(std::shared_ptr index, const IndexType& type) - : VecIndexImpl(std::move(index), type) { - } - - Status - BuildAll(const int64_t& nb, const float* xb, const int64_t* ids, const Config& cfg, const int64_t& nt, - const float* xt) override; - - Status - Load(const knowhere::BinarySet& index_binary) override; -}; - -class IVFHybridIndex : public IVFMixIndex { - public: - explicit IVFHybridIndex(std::shared_ptr index, const IndexType& type) - : IVFMixIndex(std::move(index), type) { - } - - knowhere::QuantizerPtr - LoadQuantizer(const Config& conf) override; - - Status - SetQuantizer(const knowhere::QuantizerPtr& q) override; - - Status - UnsetQuantizer() override; - std::pair - CopyToGpuWithQuantizer(const int64_t& device_id, const Config& cfg) override; - - VecIndexPtr - LoadData(const knowhere::QuantizerPtr& q, const Config& conf) override; -}; - class BFIndex : public VecIndexImpl { public: explicit BFIndex(std::shared_ptr index) diff --git a/core/src/wrapper/VecIndex.cpp b/core/src/wrapper/VecIndex.cpp index abf97e69e5..d9e252d7ed 100644 --- a/core/src/wrapper/VecIndex.cpp +++ b/core/src/wrapper/VecIndex.cpp @@ -18,19 +18,23 @@ #include "wrapper/VecIndex.h" #include "VecImpl.h" #include "knowhere/common/Exception.h" -#include "knowhere/index/vector_index/IndexGPUIVF.h" -#include "knowhere/index/vector_index/IndexGPUIVFPQ.h" -#include "knowhere/index/vector_index/IndexGPUIVFSQ.h" #include "knowhere/index/vector_index/IndexIDMAP.h" #include "knowhere/index/vector_index/IndexIVF.h" #include "knowhere/index/vector_index/IndexIVFPQ.h" #include "knowhere/index/vector_index/IndexIVFSQ.h" -#include "knowhere/index/vector_index/IndexIVFSQHybrid.h" #include "knowhere/index/vector_index/IndexKDT.h" #include "knowhere/index/vector_index/IndexNSG.h" #include "utils/Log.h" +#ifdef MILVUS_GPU_VERSION #include +#include "wrapper/gpu/GPUVecImpl.h" +#include "knowhere/index/vector_index/IndexGPUIVF.h" +#include "knowhere/index/vector_index/IndexGPUIVFPQ.h" +#include "knowhere/index/vector_index/IndexGPUIVFSQ.h" +#include "knowhere/index/vector_index/IndexGPUIDMAP.h" +#include "knowhere/index/vector_index/IndexIVFSQHybrid.h" +#endif namespace milvus { namespace engine { @@ -119,43 +123,46 @@ GetVecIndexFactory(const IndexType& type, const Config& cfg) { index = std::make_shared(); break; } - case IndexType::FAISS_IVFFLAT_GPU: { - index = std::make_shared(gpu_device); - break; - } - case IndexType::FAISS_IVFFLAT_MIX: { - index = std::make_shared(gpu_device); - return std::make_shared(index, IndexType::FAISS_IVFFLAT_MIX); - } case IndexType::FAISS_IVFPQ_CPU: { index = std::make_shared(); break; } - case IndexType::FAISS_IVFPQ_GPU: { - index = std::make_shared(gpu_device); - break; - } case IndexType::SPTAG_KDT_RNT_CPU: { index = std::make_shared(); break; } - case IndexType::FAISS_IVFSQ8_MIX: { - index = std::make_shared(gpu_device); - return std::make_shared(index, IndexType::FAISS_IVFSQ8_MIX); - } case IndexType::FAISS_IVFSQ8_CPU: { index = std::make_shared(); break; } + +#ifdef MILVUS_GPU_VERSION + case IndexType::FAISS_IVFFLAT_GPU: { + index = std::make_shared(gpu_device); + break; + } + case IndexType::FAISS_IVFPQ_GPU: { + index = std::make_shared(gpu_device); + break; + } + case IndexType::FAISS_IVFSQ8_MIX: { + index = std::make_shared(gpu_device); + return std::make_shared(index, IndexType::FAISS_IVFSQ8_MIX); + } case IndexType::FAISS_IVFSQ8_GPU: { index = std::make_shared(gpu_device); break; } + case IndexType::FAISS_IVFFLAT_MIX: { + index = std::make_shared(gpu_device); + return std::make_shared(index, IndexType::FAISS_IVFFLAT_MIX); + } #ifdef CUSTOMIZATION case IndexType::FAISS_IVFSQ8_HYBRID: { index = std::make_shared(gpu_device); return std::make_shared(index, IndexType::FAISS_IVFSQ8_HYBRID); } +#endif #endif case IndexType::NSG_MIX: { // TODO(linxj): bug. index = std::make_shared(gpu_device); diff --git a/core/src/version.h.macro b/core/src/wrapper/WrapperException.cpp similarity index 74% rename from core/src/version.h.macro rename to core/src/wrapper/WrapperException.cpp index 454d8a990a..611a3571da 100644 --- a/core/src/version.h.macro +++ b/core/src/wrapper/WrapperException.cpp @@ -15,8 +15,16 @@ // specific language governing permissions and limitations // under the License. -#pragma once +#include "wrapper/WrapperException.h" -#define MILVUS_VERSION "@MILVUS_VERSION@" -#define BUILD_TYPE "@BUILD_TYPE@" -#define BUILD_TIME @BUILD_TIME@ \ No newline at end of file +namespace milvus { +namespace engine { + +WrapperException::WrapperException(const std::string &msg) : msg(msg) {} + +const char *WrapperException::what() const noexcept { + return msg.c_str(); +} + +} // namespace engine +} // namespace milvus diff --git a/core/src/wrapper/WrapperException.h b/core/src/wrapper/WrapperException.h new file mode 100644 index 0000000000..c2e2748a43 --- /dev/null +++ b/core/src/wrapper/WrapperException.h @@ -0,0 +1,36 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +namespace milvus { +namespace engine { + +class WrapperException : public std::exception { + public: + explicit WrapperException(const std::string& msg); + + const char* what() const noexcept override; + + const std::string msg; +}; + +} // namespace engine +} // namespace milvus diff --git a/core/src/wrapper/gpu/GPUVecImpl.cpp b/core/src/wrapper/gpu/GPUVecImpl.cpp new file mode 100644 index 0000000000..4ff5e665cc --- /dev/null +++ b/core/src/wrapper/gpu/GPUVecImpl.cpp @@ -0,0 +1,164 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "wrapper/VecImpl.h" +#include "GPUVecImpl.h" +#include "src/wrapper/DataTransfer.h" +#include "knowhere/common/Exception.h" +#include "knowhere/index/vector_index/IndexIDMAP.h" +#include "knowhere/index/vector_index/IndexGPUIVF.h" +#include "knowhere/index/vector_index/IndexGPUIDMAP.h" +#include "knowhere/index/vector_index/IndexIVFSQHybrid.h" +#include "knowhere/index/vector_index/helpers/Cloner.h" +#include "utils/Log.h" + +/* + * no parameter check in this layer. + * only responible for index combination + */ + +namespace milvus { +namespace engine { + + +// TODO(linxj): add lock here. +Status +IVFMixIndex::BuildAll(const int64_t& nb, const float* xb, const int64_t* ids, const Config& cfg, const int64_t& nt, + const float* xt) { + try { + dim = cfg->d; + auto dataset = GenDatasetWithIds(nb, dim, xb, ids); + + auto preprocessor = index_->BuildPreprocessor(dataset, cfg); + index_->set_preprocessor(preprocessor); + auto model = index_->Train(dataset, cfg); + index_->set_index_model(model); + index_->Add(dataset, cfg); + + if (auto device_index = std::dynamic_pointer_cast(index_)) { + auto host_index = device_index->CopyGpuToCpu(Config()); + index_ = host_index; + type = ConvertToCpuIndexType(type); + } else { + WRAPPER_LOG_ERROR << "Build IVFMIXIndex Failed"; + return Status(KNOWHERE_ERROR, "Build IVFMIXIndex Failed"); + } + } catch (knowhere::KnowhereException& e) { + WRAPPER_LOG_ERROR << e.what(); + return Status(KNOWHERE_UNEXPECTED_ERROR, e.what()); + } catch (std::exception& e) { + WRAPPER_LOG_ERROR << e.what(); + return Status(KNOWHERE_ERROR, e.what()); + } + return Status::OK(); +} + +Status +IVFMixIndex::Load(const knowhere::BinarySet& index_binary) { + index_->Load(index_binary); + dim = Dimension(); + return Status::OK(); +} + +knowhere::QuantizerPtr +IVFHybridIndex::LoadQuantizer(const Config& conf) { + // TODO(linxj): Hardcode here + if (auto new_idx = std::dynamic_pointer_cast(index_)) { + return new_idx->LoadQuantizer(conf); + } else { + WRAPPER_LOG_ERROR << "Hybrid mode not support for index type: " << int(type); + } +} + +Status +IVFHybridIndex::SetQuantizer(const knowhere::QuantizerPtr& q) { + try { + // TODO(linxj): Hardcode here + if (auto new_idx = std::dynamic_pointer_cast(index_)) { + new_idx->SetQuantizer(q); + } else { + WRAPPER_LOG_ERROR << "Hybrid mode not support for index type: " << int(type); + return Status(KNOWHERE_ERROR, "not support"); + } + } catch (knowhere::KnowhereException& e) { + WRAPPER_LOG_ERROR << e.what(); + return Status(KNOWHERE_UNEXPECTED_ERROR, e.what()); + } catch (std::exception& e) { + WRAPPER_LOG_ERROR << e.what(); + return Status(KNOWHERE_ERROR, e.what()); + } + return Status::OK(); +} + +Status +IVFHybridIndex::UnsetQuantizer() { + try { + // TODO(linxj): Hardcode here + if (auto new_idx = std::dynamic_pointer_cast(index_)) { + new_idx->UnsetQuantizer(); + } else { + WRAPPER_LOG_ERROR << "Hybrid mode not support for index type: " << int(type); + return Status(KNOWHERE_ERROR, "not support"); + } + } catch (knowhere::KnowhereException& e) { + WRAPPER_LOG_ERROR << e.what(); + return Status(KNOWHERE_UNEXPECTED_ERROR, e.what()); + } catch (std::exception& e) { + WRAPPER_LOG_ERROR << e.what(); + return Status(KNOWHERE_ERROR, e.what()); + } + return Status::OK(); +} + +VecIndexPtr +IVFHybridIndex::LoadData(const knowhere::QuantizerPtr& q, const Config& conf) { + try { + // TODO(linxj): Hardcode here + if (auto new_idx = std::dynamic_pointer_cast(index_)) { + return std::make_shared(new_idx->LoadData(q, conf), type); + } else { + WRAPPER_LOG_ERROR << "Hybrid mode not support for index type: " << int(type); + } + } catch (knowhere::KnowhereException& e) { + WRAPPER_LOG_ERROR << e.what(); + } catch (std::exception& e) { + WRAPPER_LOG_ERROR << e.what(); + } + return nullptr; +} + +std::pair +IVFHybridIndex::CopyToGpuWithQuantizer(const int64_t& device_id, const Config& cfg) { + try { + // TODO(linxj): Hardcode here + if (auto hybrid_idx = std::dynamic_pointer_cast(index_)) { + auto pair = hybrid_idx->CopyCpuToGpuWithQuantizer(device_id, cfg); + auto new_idx = std::make_shared(pair.first, type); + return std::make_pair(new_idx, pair.second); + } else { + WRAPPER_LOG_ERROR << "Hybrid mode not support for index type: " << int(type); + } + } catch (knowhere::KnowhereException& e) { + WRAPPER_LOG_ERROR << e.what(); + } catch (std::exception& e) { + WRAPPER_LOG_ERROR << e.what(); + } + return std::make_pair(nullptr, nullptr); +} + +} // namespace engine +} // namespace milvus diff --git a/core/src/wrapper/gpu/GPUVecImpl.h b/core/src/wrapper/gpu/GPUVecImpl.h new file mode 100644 index 0000000000..39df216185 --- /dev/null +++ b/core/src/wrapper/gpu/GPUVecImpl.h @@ -0,0 +1,66 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "wrapper/VecIndex.h" +#include "knowhere/index/vector_index/VectorIndex.h" + +namespace milvus { +namespace engine { + +class IVFMixIndex : public VecIndexImpl { +public: + explicit IVFMixIndex(std::shared_ptr index, const IndexType &type) + : VecIndexImpl(std::move(index), type) { + } + + Status + BuildAll(const int64_t &nb, const float *xb, const int64_t *ids, const Config &cfg, const int64_t &nt, + const float *xt) override; + + Status + Load(const knowhere::BinarySet &index_binary) override; +}; + +class IVFHybridIndex : public IVFMixIndex { +public: + explicit IVFHybridIndex(std::shared_ptr index, const IndexType &type) + : IVFMixIndex(std::move(index), type) { + } + + knowhere::QuantizerPtr + LoadQuantizer(const Config &conf) override; + + Status + SetQuantizer(const knowhere::QuantizerPtr &q) override; + + Status + UnsetQuantizer() override; + + std::pair + CopyToGpuWithQuantizer(const int64_t &device_id, const Config &cfg) override; + + VecIndexPtr + LoadData(const knowhere::QuantizerPtr &q, const Config &conf) override; +}; + +} // namespace engine +} // namespace milvus diff --git a/core/thirdparty/versions.txt b/core/thirdparty/versions.txt index 4faaf119e4..68023d4072 100644 --- a/core/thirdparty/versions.txt +++ b/core/thirdparty/versions.txt @@ -9,5 +9,6 @@ LIBUNWIND_VERSION=1.3.1 GPERFTOOLS_VERSION=2.7 GRPC_VERSION=master ZLIB_VERSION=v1.2.11 +MKL_VERSION=2019.5.281 # vim: set filetype=sh: diff --git a/core/ubuntu_build_deps.sh b/core/ubuntu_build_deps.sh index e454a147ac..911046aa1f 100755 --- a/core/ubuntu_build_deps.sh +++ b/core/ubuntu_build_deps.sh @@ -1,5 +1,11 @@ #!/bin/bash +wget -P /tmp https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB +sudo apt-key add /tmp/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB + +sudo sh -c 'echo deb https://apt.repos.intel.com/mkl all main > /etc/apt/sources.list.d/intel-mkl.list' +sudo apt-get -y update && sudo apt-get -y install intel-mkl-gnu-2019.5-281 intel-mkl-core-2019.5-281 + sudo apt-get install -y gfortran libmysqlclient-dev mysql-client libcurl4-openssl-dev libboost-system-dev \ libboost-filesystem-dev libboost-serialization-dev libboost-regex-dev diff --git a/core/unittest/CMakeLists.txt b/core/unittest/CMakeLists.txt index 62b5bdf256..1ec300a7bc 100644 --- a/core/unittest/CMakeLists.txt +++ b/core/unittest/CMakeLists.txt @@ -16,7 +16,6 @@ # specific language governing permissions and limitations # under the License. #------------------------------------------------------------------------------- -include_directories("${CUDA_TOOLKIT_ROOT_DIR}/include") foreach(dir ${INDEX_INCLUDE_DIRS}) include_directories(${dir}) @@ -26,8 +25,6 @@ include_directories(${MILVUS_SOURCE_DIR}) include_directories(${MILVUS_ENGINE_SRC}) include_directories(${CMAKE_CURRENT_SOURCE_DIR}) -link_directories("${CUDA_TOOLKIT_ROOT_DIR}/lib64") - aux_source_directory(${MILVUS_ENGINE_SRC}/cache cache_files) aux_source_directory(${MILVUS_ENGINE_SRC}/config config_files) aux_source_directory(${MILVUS_ENGINE_SRC}/metrics metrics_files) @@ -116,11 +113,22 @@ set(unittest_libs prometheus-cpp-core dl z - ${CUDA_TOOLKIT_ROOT_DIR}/lib64/stubs/libnvidia-ml.so - cudart - cublas ) +if (MILVUS_GPU_VERSION) + include_directories("${CUDA_INCLUDE_DIRS}") + link_directories("${CUDA_TOOLKIT_ROOT_DIR}/lib64") + set(unittest_libs ${unittest_libs} + ${CUDA_TOOLKIT_ROOT_DIR}/lib64/stubs/libnvidia-ml.so + cudart + cublas + ) + aux_source_directory(${MILVUS_ENGINE_SRC}/wrapper/gpu wrapper_gpu_files) + set(common_files ${common_files} + ${wrapper_gpu_files} + ) +endif() + add_subdirectory(db) add_subdirectory(wrapper) add_subdirectory(metrics) diff --git a/core/unittest/db/CMakeLists.txt b/core/unittest/db/CMakeLists.txt index 3954dd8656..83f242a31a 100644 --- a/core/unittest/db/CMakeLists.txt +++ b/core/unittest/db/CMakeLists.txt @@ -20,7 +20,7 @@ aux_source_directory(${CMAKE_CURRENT_SOURCE_DIR} test_files) -cuda_add_executable(test_db +add_executable(test_db ${common_files} ${test_files} ) diff --git a/core/unittest/db/test_db.cpp b/core/unittest/db/test_db.cpp index f9e8da9c0f..b869d17388 100644 --- a/core/unittest/db/test_db.cpp +++ b/core/unittest/db/test_db.cpp @@ -175,8 +175,7 @@ TEST_F(DBTest, DB_TEST) { BuildVectors(qb, qxb); std::thread search([&]() { - milvus::engine::ResultIds result_ids; - milvus::engine::ResultDistances result_distances; + milvus::engine::QueryResults results; int k = 10; std::this_thread::sleep_for(std::chrono::seconds(2)); @@ -191,17 +190,17 @@ TEST_F(DBTest, DB_TEST) { prev_count = count; START_TIMER; - stat = db_->Query(TABLE_NAME, k, qb, 10, qxb.data(), result_ids, result_distances); + stat = db_->Query(TABLE_NAME, k, qb, 10, qxb.data(), results); ss << "Search " << j << " With Size " << count / milvus::engine::M << " M"; STOP_TIMER(ss.str()); ASSERT_TRUE(stat.ok()); - for (auto i = 0; i < qb; ++i) { - ASSERT_EQ(result_ids[i*k], target_ids[i]); + for (auto k = 0; k < qb; ++k) { + ASSERT_EQ(results[k][0].first, target_ids[k]); ss.str(""); - ss << "Result [" << i << "]:"; - for (auto t = 0; t < k; t++) { - ss << result_ids[i * k + t] << " "; + ss << "Result [" << k << "]:"; + for (auto result : results[k]) { + ss << result.first << " "; } /* LOG(DEBUG) << ss.str(); */ } @@ -285,18 +284,16 @@ TEST_F(DBTest, SEARCH_TEST) { db_->CreateIndex(TABLE_NAME, index); // wait until build index finish { - milvus::engine::ResultIds result_ids; - milvus::engine::ResultDistances result_distances; - stat = db_->Query(TABLE_NAME, k, nq, 10, xq.data(), result_ids, result_distances); + milvus::engine::QueryResults results; + stat = db_->Query(TABLE_NAME, k, nq, 10, xq.data(), results); ASSERT_TRUE(stat.ok()); } {//search by specify index file milvus::engine::meta::DatesT dates; std::vector file_ids = {"1", "2", "3", "4", "5", "6"}; - milvus::engine::ResultIds result_ids; - milvus::engine::ResultDistances result_distances; - stat = db_->Query(TABLE_NAME, file_ids, k, nq, 10, xq.data(), dates, result_ids, result_distances); + milvus::engine::QueryResults results; + stat = db_->Query(TABLE_NAME, file_ids, k, nq, 10, xq.data(), dates, results); ASSERT_TRUE(stat.ok()); } @@ -306,25 +303,22 @@ TEST_F(DBTest, SEARCH_TEST) { db_->CreateIndex(TABLE_NAME, index); // wait until build index finish { - milvus::engine::ResultIds result_ids; - milvus::engine::ResultDistances result_distances; - stat = db_->Query(TABLE_NAME, k, nq, 10, xq.data(), result_ids, result_distances); + milvus::engine::QueryResults results; + stat = db_->Query(TABLE_NAME, k, nq, 10, xq.data(), results); ASSERT_TRUE(stat.ok()); } { - milvus::engine::ResultIds result_ids; - milvus::engine::ResultDistances result_distances; - stat = db_->Query(TABLE_NAME, k, 200, 10, xq.data(), result_ids, result_distances); + milvus::engine::QueryResults large_nq_results; + stat = db_->Query(TABLE_NAME, k, 200, 10, xq.data(), large_nq_results); ASSERT_TRUE(stat.ok()); } {//search by specify index file milvus::engine::meta::DatesT dates; std::vector file_ids = {"1", "2", "3", "4", "5", "6"}; - milvus::engine::ResultIds result_ids; - milvus::engine::ResultDistances result_distances; - stat = db_->Query(TABLE_NAME, file_ids, k, nq, 10, xq.data(), dates, result_ids, result_distances); + milvus::engine::QueryResults results; + stat = db_->Query(TABLE_NAME, file_ids, k, nq, 10, xq.data(), dates, results); ASSERT_TRUE(stat.ok()); } @@ -397,12 +391,11 @@ TEST_F(DBTest, SHUTDOWN_TEST) { ASSERT_FALSE(stat.ok()); milvus::engine::meta::DatesT dates; - milvus::engine::ResultIds result_ids; - milvus::engine::ResultDistances result_distances; - stat = db_->Query(table_info.table_id_, 1, 1, 1, nullptr, dates, result_ids, result_distances); + milvus::engine::QueryResults results; + stat = db_->Query(table_info.table_id_, 1, 1, 1, nullptr, dates, results); ASSERT_FALSE(stat.ok()); std::vector file_ids; - stat = db_->Query(table_info.table_id_, file_ids, 1, 1, 1, nullptr, dates, result_ids, result_distances); + stat = db_->Query(table_info.table_id_, file_ids, 1, 1, 1, nullptr, dates, results); ASSERT_FALSE(stat.ok()); stat = db_->DeleteTable(table_info.table_id_, dates); diff --git a/core/unittest/db/test_db_mysql.cpp b/core/unittest/db/test_db_mysql.cpp index 30a616e662..ae1da8012a 100644 --- a/core/unittest/db/test_db_mysql.cpp +++ b/core/unittest/db/test_db_mysql.cpp @@ -81,8 +81,7 @@ TEST_F(MySqlDBTest, DB_TEST) { ASSERT_EQ(target_ids.size(), qb); std::thread search([&]() { - milvus::engine::ResultIds result_ids; - milvus::engine::ResultDistances result_distances; + milvus::engine::QueryResults results; int k = 10; std::this_thread::sleep_for(std::chrono::seconds(5)); @@ -97,25 +96,25 @@ TEST_F(MySqlDBTest, DB_TEST) { prev_count = count; START_TIMER; - stat = db_->Query(TABLE_NAME, k, qb, 10, qxb.data(), result_ids, result_distances); + stat = db_->Query(TABLE_NAME, k, qb, 10, qxb.data(), results); ss << "Search " << j << " With Size " << count / milvus::engine::M << " M"; STOP_TIMER(ss.str()); ASSERT_TRUE(stat.ok()); - for (auto i = 0; i < qb; ++i) { + for (auto k = 0; k < qb; ++k) { // std::cout << results[k][0].first << " " << target_ids[k] << std::endl; // ASSERT_EQ(results[k][0].first, target_ids[k]); bool exists = false; - for (auto t = 0; t < k; t++) { - if (result_ids[i * k + t] == target_ids[i]) { + for (auto &result : results[k]) { + if (result.first == target_ids[k]) { exists = true; } } ASSERT_TRUE(exists); ss.str(""); - ss << "Result [" << i << "]:"; - for (auto t = 0; t < k; t++) { - ss << result_ids[i * k + t] << " "; + ss << "Result [" << k << "]:"; + for (auto result : results[k]) { + ss << result.first << " "; } /* LOG(DEBUG) << ss.str(); */ } @@ -189,9 +188,8 @@ TEST_F(MySqlDBTest, SEARCH_TEST) { sleep(2); // wait until build index finish - milvus::engine::ResultIds result_ids; - milvus::engine::ResultDistances result_distances; - stat = db_->Query(TABLE_NAME, k, nq, 10, xq.data(), result_ids, result_distances); + milvus::engine::QueryResults results; + stat = db_->Query(TABLE_NAME, k, nq, 10, xq.data(), results); ASSERT_TRUE(stat.ok()); } diff --git a/core/unittest/db/test_mem.cpp b/core/unittest/db/test_mem.cpp index 939e61246c..e05811ff9e 100644 --- a/core/unittest/db/test_mem.cpp +++ b/core/unittest/db/test_mem.cpp @@ -259,11 +259,10 @@ TEST_F(MemManagerTest2, SERIAL_INSERT_SEARCH_TEST) { int topk = 10, nprobe = 10; for (auto& pair : search_vectors) { auto& search = pair.second; - milvus::engine::ResultIds result_ids; - milvus::engine::ResultDistances result_distances; - stat = db_->Query(GetTableName(), topk, 1, nprobe, search.data(), result_ids, result_distances); - ASSERT_EQ(result_ids[0], pair.first); - ASSERT_LT(result_distances[0], 1e-4); + milvus::engine::QueryResults results; + stat = db_->Query(GetTableName(), topk, 1, nprobe, search.data(), results); + ASSERT_EQ(results[0][0].first, pair.first); + ASSERT_LT(results[0][0].second, 1e-4); } } @@ -315,8 +314,7 @@ TEST_F(MemManagerTest2, CONCURRENT_INSERT_SEARCH_TEST) { BuildVectors(qb, qxb); std::thread search([&]() { - milvus::engine::ResultIds result_ids; - milvus::engine::ResultDistances result_distances; + milvus::engine::QueryResults results; int k = 10; std::this_thread::sleep_for(std::chrono::seconds(2)); @@ -331,17 +329,17 @@ TEST_F(MemManagerTest2, CONCURRENT_INSERT_SEARCH_TEST) { prev_count = count; START_TIMER; - stat = db_->Query(GetTableName(), k, qb, 10, qxb.data(), result_ids, result_distances); + stat = db_->Query(GetTableName(), k, qb, 10, qxb.data(), results); ss << "Search " << j << " With Size " << count / milvus::engine::M << " M"; STOP_TIMER(ss.str()); ASSERT_TRUE(stat.ok()); - for (auto i = 0; i < qb; ++i) { - ASSERT_EQ(result_ids[i * k], target_ids[i]); + for (auto k = 0; k < qb; ++k) { + ASSERT_EQ(results[k][0].first, target_ids[k]); ss.str(""); - ss << "Result [" << i << "]:"; - for (auto t = 0; t < k; t++) { - ss << result_ids[i * k + t] << " "; + ss << "Result [" << k << "]:"; + for (auto result : results[k]) { + ss << result.first << " "; } /* LOG(DEBUG) << ss.str(); */ } diff --git a/core/unittest/db/test_search.cpp b/core/unittest/db/test_search.cpp index 1d1d9a677a..b8cf08b3e2 100644 --- a/core/unittest/db/test_search.cpp +++ b/core/unittest/db/test_search.cpp @@ -19,7 +19,6 @@ #include #include -#include "scheduler/job/SearchJob.h" #include "scheduler/task/SearchTask.h" #include "utils/TimeRecorder.h" #include "utils/ThreadPool.h" @@ -29,80 +28,74 @@ namespace { namespace ms = milvus::scheduler; void -BuildResult(ms::ResultIds& output_ids, - ms::ResultDistances & output_distances, - size_t input_k, - size_t topk, - size_t nq, +BuildResult(std::vector& output_ids, + std::vector& output_distance, + uint64_t input_k, + uint64_t topk, + uint64_t nq, bool ascending) { output_ids.clear(); output_ids.resize(nq * topk); - output_distances.clear(); - output_distances.resize(nq * topk); + output_distance.clear(); + output_distance.resize(nq * topk); - for (size_t i = 0; i < nq; i++) { + for (uint64_t i = 0; i < nq; i++) { //insert valid items - for (size_t j = 0; j < input_k; j++) { + for (uint64_t j = 0; j < input_k; j++) { output_ids[i * topk + j] = (int64_t)(drand48() * 100000); - output_distances[i * topk + j] = ascending ? (j + drand48()) : ((input_k - j) + drand48()); + output_distance[i * topk + j] = ascending ? (j + drand48()) : ((input_k - j) + drand48()); } //insert invalid items - for (size_t j = input_k; j < topk; j++) { + for (uint64_t j = input_k; j < topk; j++) { output_ids[i * topk + j] = -1; - output_distances[i * topk + j] = -1.0; + output_distance[i * topk + j] = -1.0; } } } void -CopyResult(ms::ResultIds& output_ids, - ms::ResultDistances& output_distances, - size_t output_topk, - ms::ResultIds& input_ids, - ms::ResultDistances& input_distances, - size_t input_topk, - size_t nq) { +CopyResult(std::vector& output_ids, + std::vector& output_distance, + uint64_t output_topk, + std::vector& input_ids, + std::vector& input_distance, + uint64_t input_topk, + uint64_t nq) { ASSERT_TRUE(input_ids.size() >= nq * input_topk); - ASSERT_TRUE(input_distances.size() >= nq * input_topk); + ASSERT_TRUE(input_distance.size() >= nq * input_topk); ASSERT_TRUE(output_topk <= input_topk); output_ids.clear(); output_ids.resize(nq * output_topk); - output_distances.clear(); - output_distances.resize(nq * output_topk); + output_distance.clear(); + output_distance.resize(nq * output_topk); - for (size_t i = 0; i < nq; i++) { - for (size_t j = 0; j < output_topk; j++) { + for (uint64_t i = 0; i < nq; i++) { + for (uint64_t j = 0; j < output_topk; j++) { output_ids[i * output_topk + j] = input_ids[i * input_topk + j]; - output_distances[i * output_topk + j] = input_distances[i * input_topk + j]; + output_distance[i * output_topk + j] = input_distance[i * input_topk + j]; } } } void -CheckTopkResult(const ms::ResultIds& input_ids_1, - const ms::ResultDistances& input_distances_1, - size_t input_k_1, - const ms::ResultIds& input_ids_2, - const ms::ResultDistances& input_distances_2, - size_t input_k_2, - size_t topk, - size_t nq, +CheckTopkResult(const std::vector& input_ids_1, + const std::vector& input_distance_1, + const std::vector& input_ids_2, + const std::vector& input_distance_2, + uint64_t topk, + uint64_t nq, bool ascending, - const ms::ResultIds& result_ids, - const ms::ResultDistances& result_distances) { - ASSERT_EQ(result_ids.size(), result_distances.size()); - ASSERT_EQ(input_ids_1.size(), input_distances_1.size()); - ASSERT_EQ(input_ids_2.size(), input_distances_2.size()); + const milvus::scheduler::ResultSet& result) { + ASSERT_EQ(result.size(), nq); + ASSERT_EQ(input_ids_1.size(), input_distance_1.size()); + ASSERT_EQ(input_ids_2.size(), input_distance_2.size()); - size_t result_k = result_distances.size() / nq; - ASSERT_EQ(result_k, std::min(topk, input_k_1 + input_k_2)); - - for (size_t i = 0; i < nq; i++) { + for (int64_t i = 0; i < nq; i++) { std::vector - src_vec(input_distances_1.begin() + i * topk, input_distances_1.begin() + (i + 1) * topk); + src_vec(input_distance_1.begin() + i * topk, input_distance_1.begin() + (i + 1) * topk); src_vec.insert(src_vec.end(), - input_distances_2.begin() + i * topk, - input_distances_2.begin() + (i + 1) * topk); + input_distance_2.begin() + i * topk, + input_distance_2.begin() + (i + 1) * topk); if (ascending) { std::sort(src_vec.begin(), src_vec.end()); } else { @@ -118,16 +111,15 @@ CheckTopkResult(const ms::ResultIds& input_ids_1, ++iter; } - size_t n = std::min(topk, result_ids.size() / nq); - for (size_t j = 0; j < n; j++) { - size_t idx = i * n + j; - if (result_ids[idx] < 0) { + uint64_t n = std::min(topk, result[i].size()); + for (uint64_t j = 0; j < n; j++) { + if (result[i][j].first < 0) { continue; } - if (src_vec[j] != result_distances[idx]) { - std::cout << src_vec[j] << " " << result_distances[idx] << std::endl; + if (src_vec[j] != result[i][j].second) { + std::cout << src_vec[j] << " " << result[i][j].second << std::endl; } - ASSERT_TRUE(src_vec[j] == result_distances[idx]); + ASSERT_TRUE(src_vec[j] == result[i][j].second); } } } @@ -135,21 +127,20 @@ CheckTopkResult(const ms::ResultIds& input_ids_1, } // namespace void -MergeTopkToResultSetTest(size_t topk_1, size_t topk_2, size_t nq, size_t topk, bool ascending) { - ms::ResultIds ids1, ids2; - ms::ResultDistances dist1, dist2; - ms::ResultIds result_ids; - ms::ResultDistances result_distances; +MergeTopkToResultSetTest(uint64_t topk_1, uint64_t topk_2, uint64_t nq, uint64_t topk, bool ascending) { + std::vector ids1, ids2; + std::vector dist1, dist2; + ms::ResultSet result; BuildResult(ids1, dist1, topk_1, topk, nq, ascending); BuildResult(ids2, dist2, topk_2, topk, nq, ascending); - ms::XSearchTask::MergeTopkToResultSet(ids1, dist1, topk_1, nq, topk, ascending, result_ids, result_distances); - ms::XSearchTask::MergeTopkToResultSet(ids2, dist2, topk_2, nq, topk, ascending, result_ids, result_distances); - CheckTopkResult(ids1, dist1, topk_1, ids2, dist2, topk_2, topk, nq, ascending, result_ids, result_distances); + ms::XSearchTask::MergeTopkToResultSet(ids1, dist1, topk_1, nq, topk, ascending, result); + ms::XSearchTask::MergeTopkToResultSet(ids2, dist2, topk_2, nq, topk, ascending, result); + CheckTopkResult(ids1, dist1, ids2, dist2, topk, nq, ascending, result); } TEST(DBSearchTest, MERGE_RESULT_SET_TEST) { - size_t NQ = 15; - size_t TOP_K = 64; + uint64_t NQ = 15; + uint64_t TOP_K = 64; /* test1, id1/dist1 valid, id2/dist2 empty */ MergeTopkToResultSetTest(TOP_K, 0, NQ, TOP_K, true); @@ -168,21 +159,21 @@ TEST(DBSearchTest, MERGE_RESULT_SET_TEST) { MergeTopkToResultSetTest(TOP_K / 2, TOP_K / 3, NQ, TOP_K, false); } -//void MergeTopkArrayTest(size_t topk_1, size_t topk_2, size_t nq, size_t topk, bool ascending) { +//void MergeTopkArrayTest(uint64_t topk_1, uint64_t topk_2, uint64_t nq, uint64_t topk, bool ascending) { // std::vector ids1, ids2; // std::vector dist1, dist2; // ms::ResultSet result; // BuildResult(ids1, dist1, topk_1, topk, nq, ascending); // BuildResult(ids2, dist2, topk_2, topk, nq, ascending); -// size_t result_topk = std::min(topk, topk_1 + topk_2); +// uint64_t result_topk = std::min(topk, topk_1 + topk_2); // ms::XSearchTask::MergeTopkArray(ids1, dist1, topk_1, ids2, dist2, topk_2, nq, topk, ascending); // if (ids1.size() != result_topk * nq) { // std::cout << ids1.size() << " " << result_topk * nq << std::endl; // } // ASSERT_TRUE(ids1.size() == result_topk * nq); // ASSERT_TRUE(dist1.size() == result_topk * nq); -// for (size_t i = 0; i < nq; i++) { -// for (size_t k = 1; k < result_topk; k++) { +// for (uint64_t i = 0; i < nq; i++) { +// for (uint64_t k = 1; k < result_topk; k++) { // float f0 = dist1[i * topk + k - 1]; // float f1 = dist1[i * topk + k]; // if (ascending) { @@ -201,8 +192,8 @@ TEST(DBSearchTest, MERGE_RESULT_SET_TEST) { //} //TEST(DBSearchTest, MERGE_ARRAY_TEST) { -// size_t NQ = 15; -// size_t TOP_K = 64; +// uint64_t NQ = 15; +// uint64_t TOP_K = 64; // // /* test1, id1/dist1 valid, id2/dist2 empty */ // MergeTopkArrayTest(TOP_K, 0, NQ, TOP_K, true); @@ -231,23 +222,23 @@ TEST(DBSearchTest, REDUCE_PERF_TEST) { int32_t index_file_num = 478; /* sift1B dataset, index files num */ bool ascending = true; - std::vector thread_vec = {4}; - std::vector nq_vec = {1000}; - std::vector topk_vec = {64}; - size_t NQ = nq_vec[nq_vec.size() - 1]; - size_t TOPK = topk_vec[topk_vec.size() - 1]; + std::vector thread_vec = {4, 8}; + std::vector nq_vec = {1, 10, 100}; + std::vector topk_vec = {1, 4, 16, 64}; + int32_t NQ = nq_vec[nq_vec.size() - 1]; + int32_t TOPK = topk_vec[topk_vec.size() - 1]; - std::vector id_vec; - std::vector dist_vec; - ms::ResultIds input_ids; - ms::ResultDistances input_distances; + std::vector> id_vec; + std::vector> dist_vec; + std::vector input_ids; + std::vector input_distance; int32_t i, k, step; /* generate testing data */ for (i = 0; i < index_file_num; i++) { - BuildResult(input_ids, input_distances, TOPK, TOPK, NQ, ascending); + BuildResult(input_ids, input_distance, TOPK, TOPK, NQ, ascending); id_vec.push_back(input_ids); - dist_vec.push_back(input_distances); + dist_vec.push_back(input_distance); } for (int32_t max_thread_num : thread_vec) { @@ -256,11 +247,10 @@ TEST(DBSearchTest, REDUCE_PERF_TEST) { for (int32_t nq : nq_vec) { for (int32_t top_k : topk_vec) { - ms::ResultIds final_result_ids, final_result_ids_2, final_result_ids_3; - ms::ResultDistances final_result_distances, final_result_distances_2, final_result_distances_3; + ms::ResultSet final_result, final_result_2, final_result_3; - std::vector id_vec_1(index_file_num); - std::vector dist_vec_1(index_file_num); + std::vector> id_vec_1(index_file_num); + std::vector> dist_vec_1(index_file_num); for (i = 0; i < index_file_num; i++) { CopyResult(id_vec_1[i], dist_vec_1[i], top_k, id_vec[i], dist_vec[i], TOPK, nq); } @@ -278,10 +268,8 @@ TEST(DBSearchTest, REDUCE_PERF_TEST) { nq, top_k, ascending, - final_result_ids, - final_result_distances); - ASSERT_EQ(final_result_ids.size(), nq * top_k); - ASSERT_EQ(final_result_distances.size(), nq * top_k); + final_result); + ASSERT_EQ(final_result.size(), nq); } rc1.RecordSection("reduce done"); @@ -290,7 +278,7 @@ TEST(DBSearchTest, REDUCE_PERF_TEST) { // /* method-2 */ // std::vector> id_vec_2(index_file_num); // std::vector> dist_vec_2(index_file_num); -// std::vector k_vec_2(index_file_num); +// std::vector k_vec_2(index_file_num); // for (i = 0; i < index_file_num; i++) { // CopyResult(id_vec_2[i], dist_vec_2[i], top_k, id_vec[i], dist_vec[i], TOPK, nq); // k_vec_2[i] = top_k; @@ -333,7 +321,7 @@ TEST(DBSearchTest, REDUCE_PERF_TEST) { // /* method-3 parallel */ // std::vector> id_vec_3(index_file_num); // std::vector> dist_vec_3(index_file_num); -// std::vector k_vec_3(index_file_num); +// std::vector k_vec_3(index_file_num); // for (i = 0; i < index_file_num; i++) { // CopyResult(id_vec_3[i], dist_vec_3[i], top_k, id_vec[i], dist_vec[i], TOPK, nq); // k_vec_3[i] = top_k; diff --git a/core/unittest/db/utils.cpp b/core/unittest/db/utils.cpp index 16e195079c..a78aecd046 100644 --- a/core/unittest/db/utils.cpp +++ b/core/unittest/db/utils.cpp @@ -27,7 +27,9 @@ #include "cache/CpuCacheMgr.h" #include "db/DBFactory.h" #include "db/Options.h" +#ifdef MILVUS_GPU_VERSION #include "knowhere/index/vector_index/helpers/FaissGpuResourceMgr.h" +#endif #include "utils/CommonUtil.h" INITIALIZE_EASYLOGGINGPP @@ -68,9 +70,15 @@ static const char " use_blas_threshold: 20\n" "\n" "resource_config:\n" +#ifdef MILVUS_CPU_VERSION + " search_resources:\n" + " - cpu\n" + " index_build_device: cpu # CPU used for building index"; +#else " search_resources:\n" " - gpu0\n" " index_build_device: gpu0 # GPU used for building index"; +#endif void WriteToFile(const std::string& file_path, const char* content) { @@ -118,15 +126,18 @@ BaseTest::InitLog() { void BaseTest::SetUp() { InitLog(); - +#ifdef MILVUS_GPU_VERSION knowhere::FaissGpuResourceMgr::GetInstance().InitDevice(0, 1024 * 1024 * 200, 1024 * 1024 * 300, 2); +#endif } void BaseTest::TearDown() { milvus::cache::CpuCacheMgr::GetInstance()->ClearCache(); milvus::cache::GpuCacheMgr::GetInstance(0)->ClearCache(); +#ifdef MILVUS_GPU_VERSION knowhere::FaissGpuResourceMgr::GetInstance().Free(); +#endif } milvus::engine::DBOptions diff --git a/core/unittest/metrics/test_metrics.cpp b/core/unittest/metrics/test_metrics.cpp index 1b26ad097b..c0d1044bb4 100644 --- a/core/unittest/metrics/test_metrics.cpp +++ b/core/unittest/metrics/test_metrics.cpp @@ -75,8 +75,7 @@ TEST_F(MetricTest, METRIC_TEST) { } std::thread search([&]() { -// milvus::engine::ResultIds result_ids; -// milvus::engine::ResultDistances result_distances; + milvus::engine::QueryResults results; int k = 10; std::this_thread::sleep_for(std::chrono::seconds(2)); @@ -91,7 +90,7 @@ TEST_F(MetricTest, METRIC_TEST) { prev_count = count; START_TIMER; -// stat = db_->Query(group_name, k, qb, qxb, result_ids, result_distances); +// stat = db_->Query(group_name, k, qb, qxb, results); ss << "Search " << j << " With Size " << (float) (count * group_dim * sizeof(float)) / (1024 * 1024) << " M"; diff --git a/core/unittest/scheduler/CMakeLists.txt b/core/unittest/scheduler/CMakeLists.txt index 087f93f017..77404f3cba 100644 --- a/core/unittest/scheduler/CMakeLists.txt +++ b/core/unittest/scheduler/CMakeLists.txt @@ -19,7 +19,7 @@ aux_source_directory(${CMAKE_CURRENT_SOURCE_DIR} test_files) -cuda_add_executable(test_scheduler +add_executable(test_scheduler ${common_files} ${entry_file} ${test_files} diff --git a/core/unittest/server/CMakeLists.txt b/core/unittest/server/CMakeLists.txt index 1f89de8d3f..38309611da 100644 --- a/core/unittest/server/CMakeLists.txt +++ b/core/unittest/server/CMakeLists.txt @@ -47,7 +47,7 @@ set(server_test_files ${test_files} ) -cuda_add_executable(test_server ${server_test_files}) +add_executable(test_server ${server_test_files}) set(client_grpc_lib grpcpp_channelz diff --git a/core/unittest/server/test_config.cpp b/core/unittest/server/test_config.cpp index 123ddf5265..76230cbcc3 100644 --- a/core/unittest/server/test_config.cpp +++ b/core/unittest/server/test_config.cpp @@ -112,123 +112,3 @@ TEST_F(ConfigTest, SERVER_CONFIG_TEST) { s = config.ResetDefaultConfig(); ASSERT_TRUE(s.ok()); } - -TEST_F(ConfigTest, SERVER_CONFIG_INVALID_TEST) { - std::string config_path(CONFIG_PATH); - milvus::server::Config& config = milvus::server::Config::GetInstance(); - milvus::Status s; - - s = config.LoadConfigFile(""); - ASSERT_FALSE(s.ok()); - - s = config.LoadConfigFile(config_path + INVALID_CONFIG_FILE); - ASSERT_FALSE(s.ok()); - s = config.LoadConfigFile(config_path + "dummy.yaml"); - ASSERT_FALSE(s.ok()); - - /* server config */ - s = config.SetServerConfigAddress("0.0.0"); - ASSERT_FALSE(s.ok()); - s = config.SetServerConfigAddress("0.0.0.256"); - ASSERT_FALSE(s.ok()); - - s = config.SetServerConfigPort("a"); - ASSERT_FALSE(s.ok()); - s = config.SetServerConfigPort("99999"); - ASSERT_FALSE(s.ok()); - - s = config.SetServerConfigDeployMode("cluster"); - ASSERT_FALSE(s.ok()); - - s = config.SetServerConfigTimeZone("GM"); - ASSERT_FALSE(s.ok()); - s = config.SetServerConfigTimeZone("GMT8"); - ASSERT_FALSE(s.ok()); - s = config.SetServerConfigTimeZone("UTCA"); - ASSERT_FALSE(s.ok()); - - /* db config */ - s = config.SetDBConfigPrimaryPath(""); - ASSERT_FALSE(s.ok()); - -// s = config.SetDBConfigSecondaryPath(""); -// ASSERT_FALSE(s.ok()); - - s = config.SetDBConfigBackendUrl("http://www.google.com"); - ASSERT_FALSE(s.ok()); - s = config.SetDBConfigBackendUrl("sqlite://:@:"); - ASSERT_FALSE(s.ok()); - s = config.SetDBConfigBackendUrl("mysql://root:123456@127.0.0.1/milvus"); - ASSERT_FALSE(s.ok()); - - s = config.SetDBConfigArchiveDiskThreshold("0x10"); - ASSERT_FALSE(s.ok()); - - s = config.SetDBConfigArchiveDaysThreshold("0x10"); - ASSERT_FALSE(s.ok()); - - s = config.SetDBConfigInsertBufferSize("a"); - ASSERT_FALSE(s.ok()); - s = config.SetDBConfigInsertBufferSize("0"); - ASSERT_FALSE(s.ok()); - s = config.SetDBConfigInsertBufferSize("2048"); - ASSERT_FALSE(s.ok()); - - /* metric config */ - s = config.SetMetricConfigEnableMonitor("Y"); - ASSERT_FALSE(s.ok()); - - s = config.SetMetricConfigCollector("zilliz"); - ASSERT_FALSE(s.ok()); - - s = config.SetMetricConfigPrometheusPort("0xff"); - ASSERT_FALSE(s.ok()); - - /* cache config */ - s = config.SetCacheConfigCpuCacheCapacity("a"); - ASSERT_FALSE(s.ok()); - s = config.SetCacheConfigCpuCacheCapacity("0"); - ASSERT_FALSE(s.ok()); - s = config.SetCacheConfigCpuCacheCapacity("2048"); - ASSERT_FALSE(s.ok()); - - s = config.SetCacheConfigCpuCacheThreshold("a"); - ASSERT_FALSE(s.ok()); - s = config.SetCacheConfigCpuCacheThreshold("1.0"); - ASSERT_FALSE(s.ok()); - - s = config.SetCacheConfigGpuCacheCapacity("a"); - ASSERT_FALSE(s.ok()); - s = config.SetCacheConfigGpuCacheCapacity("128"); - ASSERT_FALSE(s.ok()); - - s = config.SetCacheConfigGpuCacheThreshold("a"); - ASSERT_FALSE(s.ok()); - s = config.SetCacheConfigGpuCacheThreshold("1.0"); - ASSERT_FALSE(s.ok()); - - s = config.SetCacheConfigCacheInsertData("N"); - ASSERT_FALSE(s.ok()); - - /* engine config */ - s = config.SetEngineConfigUseBlasThreshold("0xff"); - ASSERT_FALSE(s.ok()); - - s = config.SetEngineConfigOmpThreadNum("a"); - ASSERT_FALSE(s.ok()); - s = config.SetEngineConfigOmpThreadNum("10000"); - ASSERT_FALSE(s.ok()); - - s = config.SetEngineConfigGpuSearchThreshold("-1"); - ASSERT_FALSE(s.ok()); - - /* resource config */ - s = config.SetResourceConfigMode("default"); - ASSERT_FALSE(s.ok()); - - s = config.SetResourceConfigIndexBuildDevice("gup2"); - ASSERT_FALSE(s.ok()); - s = config.SetResourceConfigIndexBuildDevice("gpu16"); - ASSERT_FALSE(s.ok()); -} - diff --git a/core/unittest/server/test_rpc.cpp b/core/unittest/server/test_rpc.cpp index ebbcd810c1..a1de370650 100644 --- a/core/unittest/server/test_rpc.cpp +++ b/core/unittest/server/test_rpc.cpp @@ -23,7 +23,7 @@ #include "server/grpc_impl/GrpcRequestHandler.h" #include "server/grpc_impl/GrpcRequestScheduler.h" #include "server/grpc_impl/GrpcRequestTask.h" -#include "src/version.h" +#include "src/config.h" #include "grpc/gen-milvus/milvus.grpc.pb.h" #include "grpc/gen-status/status.pb.h" diff --git a/core/unittest/server/test_util.cpp b/core/unittest/server/test_util.cpp index 24482740bc..87c2d2f5f2 100644 --- a/core/unittest/server/test_util.cpp +++ b/core/unittest/server/test_util.cpp @@ -314,6 +314,7 @@ TEST(ValidationUtilTest, VALIDATE_NPROBE_TEST) { ASSERT_NE(milvus::server::ValidationUtil::ValidateSearchNprobe(101, schema).code(), milvus::SERVER_SUCCESS); } +#ifdef MILVUS_GPU_VERSION TEST(ValidationUtilTest, VALIDATE_GPU_TEST) { ASSERT_EQ(milvus::server::ValidationUtil::ValidateGpuIndex(0).code(), milvus::SERVER_SUCCESS); ASSERT_NE(milvus::server::ValidationUtil::ValidateGpuIndex(100).code(), milvus::SERVER_SUCCESS); @@ -322,6 +323,7 @@ TEST(ValidationUtilTest, VALIDATE_GPU_TEST) { ASSERT_EQ(milvus::server::ValidationUtil::GetGpuMemory(0, memory).code(), milvus::SERVER_SUCCESS); ASSERT_NE(milvus::server::ValidationUtil::GetGpuMemory(100, memory).code(), milvus::SERVER_SUCCESS); } +#endif TEST(ValidationUtilTest, VALIDATE_IPADDRESS_TEST) { ASSERT_EQ(milvus::server::ValidationUtil::ValidateIpAddress("127.0.0.1").code(), milvus::SERVER_SUCCESS); diff --git a/core/unittest/server/utils.cpp b/core/unittest/server/utils.cpp index 4c03da6ad9..1c99736173 100644 --- a/core/unittest/server/utils.cpp +++ b/core/unittest/server/utils.cpp @@ -60,9 +60,15 @@ static const char " use_blas_threshold: 20 \n" "\n" "resource_config:\n" - " search_resources: \n" +#ifdef MILVUS_CPU_VERSION + " search_resources:\n" + " - cpu\n" + " index_build_device: cpu # CPU used for building index"; +#else + " search_resources:\n" " - gpu0\n" " index_build_device: gpu0 # GPU used for building index"; +#endif static const char* INVALID_CONFIG_STR = "*INVALID*"; diff --git a/core/unittest/wrapper/CMakeLists.txt b/core/unittest/wrapper/CMakeLists.txt index ef145a9f50..232abf6e1a 100644 --- a/core/unittest/wrapper/CMakeLists.txt +++ b/core/unittest/wrapper/CMakeLists.txt @@ -17,7 +17,13 @@ # under the License. #------------------------------------------------------------------------------- -aux_source_directory(${CMAKE_CURRENT_SOURCE_DIR} test_files) +set(test_files + test_knowhere.cpp + test_wrapper.cpp) +if (MILVUS_GPU_VERSION) + set(test_files ${test_files} + test_hybrid_index.cpp) +endif () set(wrapper_files ${MILVUS_ENGINE_SRC}/wrapper/DataTransfer.cpp diff --git a/core/unittest/wrapper/test_wrapper.cpp b/core/unittest/wrapper/test_wrapper.cpp index 4ceb07ddb4..97eb3a3a68 100644 --- a/core/unittest/wrapper/test_wrapper.cpp +++ b/core/unittest/wrapper/test_wrapper.cpp @@ -17,7 +17,11 @@ #include "external/easyloggingpp/easylogging++.h" #include "wrapper/VecIndex.h" + +#ifdef MILVUS_GPU_VERSION #include "knowhere/index/vector_index/helpers/FaissGpuResourceMgr.h" +#endif + #include "knowhere/index/vector_index/helpers/IndexParameter.h" #include "wrapper/utils.h" @@ -30,11 +34,13 @@ using ::testing::Values; using ::testing::Combine; class KnowhereWrapperTest - : public DataGenBase, - public TestWithParam<::std::tuple> { - protected: + : public DataGenBase, + public TestWithParam<::std::tuple> { +protected: void SetUp() override { +#ifdef MILVUS_GPU_VERSION knowhere::FaissGpuResourceMgr::GetInstance().InitDevice(DEVICEID, PINMEM, TEMPMEM, RESNUM); +#endif std::string generator_type; std::tie(index_type, generator_type, dim, nb, nq, k) = GetParam(); @@ -48,10 +54,12 @@ class KnowhereWrapperTest } void TearDown() override { +#ifdef MILVUS_GPU_VERSION knowhere::FaissGpuResourceMgr::GetInstance().Free(); +#endif } - protected: +protected: milvus::engine::IndexType index_type; milvus::engine::VecIndexPtr index_ = nullptr; knowhere::Config conf; @@ -59,27 +67,30 @@ class KnowhereWrapperTest INSTANTIATE_TEST_CASE_P(WrapperParam, KnowhereWrapperTest, Values( - //["Index type", "Generator type", "dim", "nb", "nq", "k", "build config", "search config"] - std::make_tuple(milvus::engine::IndexType::FAISS_IVFFLAT_CPU, - "Default", - 64, - 100000, - 10, - 10), - std::make_tuple(milvus::engine::IndexType::FAISS_IVFFLAT_GPU, "Default", DIM, NB, 10, 10), - std::make_tuple(milvus::engine::IndexType::FAISS_IVFFLAT_MIX, - "Default", - 64, - 100000, - 10, - 10), - std::make_tuple(milvus::engine::IndexType::FAISS_IVFSQ8_CPU, "Default", DIM, NB, 10, 10), - std::make_tuple(milvus::engine::IndexType::FAISS_IVFSQ8_GPU, "Default", DIM, NB, 10, 10), - std::make_tuple(milvus::engine::IndexType::FAISS_IVFSQ8_MIX, "Default", DIM, NB, 10, 10), + //["Index type", "Generator type", "dim", "nb", "nq", "k", "build config", "search config"] +#ifdef MILVUS_GPU_VERSION + std::make_tuple(milvus::engine::IndexType::FAISS_IVFFLAT_GPU, "Default", DIM, NB, 10, 10), + std::make_tuple(milvus::engine::IndexType::FAISS_IVFFLAT_MIX, + "Default", + 64, + 100000, + 10, + 10), +// std::make_tuple(milvus::engine::IndexType::FAISS_IVFSQ8_GPU, "Default", DIM, NB, 10, 10), + std::make_tuple(milvus::engine::IndexType::FAISS_IVFSQ8_GPU, "Default", DIM, NB, 10, 10), + std::make_tuple(milvus::engine::IndexType::FAISS_IVFSQ8_MIX, "Default", DIM, NB, 10, 10), // std::make_tuple(IndexType::NSG_MIX, "Default", 128, 250000, 10, 10), +#endif // std::make_tuple(IndexType::SPTAG_KDT_RNT_CPU, "Default", 128, 250000, 10, 10), - std::make_tuple(milvus::engine::IndexType::FAISS_IDMAP, "Default", 64, 100000, 10, 10) - ) + std::make_tuple(milvus::engine::IndexType::FAISS_IDMAP, "Default", 64, 100000, 10, 10), + std::make_tuple(milvus::engine::IndexType::FAISS_IVFFLAT_CPU, + "Default", + 64, + 100000, + 10, + 10), + std::make_tuple(milvus::engine::IndexType::FAISS_IVFSQ8_CPU, "Default", DIM, NB, 10, 10) +) ); TEST_P(KnowhereWrapperTest, BASE_TEST) { @@ -94,6 +105,8 @@ TEST_P(KnowhereWrapperTest, BASE_TEST) { AssertResult(res_ids, res_dis); } +#ifdef MILVUS_GPU_VERSION + TEST_P(KnowhereWrapperTest, TO_GPU_TEST) { EXPECT_EQ(index_->GetType(), index_type); @@ -125,6 +138,7 @@ TEST_P(KnowhereWrapperTest, TO_GPU_TEST) { AssertResult(res_ids, res_dis); } } +#endif TEST_P(KnowhereWrapperTest, SERIALIZE_TEST) { EXPECT_EQ(index_->GetType(), index_type); @@ -166,6 +180,7 @@ TEST_P(KnowhereWrapperTest, SERIALIZE_TEST) { } #include "wrapper/ConfAdapter.h" + TEST(whatever, test_config) { milvus::engine::TempMetaConf conf; auto nsg_conf = std::make_shared(); diff --git a/core/unittest/wrapper/utils.cpp b/core/unittest/wrapper/utils.cpp index b397a35d7c..7caa662343 100644 --- a/core/unittest/wrapper/utils.cpp +++ b/core/unittest/wrapper/utils.cpp @@ -58,9 +58,15 @@ static const char " blas_threshold: 20\n" "\n" "resource_config:\n" +#ifdef MILVUS_CPU_VERSION + " search_resources:\n" + " - cpu\n" + " index_build_device: cpu # CPU used for building index"; +#else " search_resources:\n" " - gpu0\n" " index_build_device: gpu0 # GPU used for building index"; +#endif void WriteToFile(const std::string& file_path, const char* content) { From ac1caec612e426bb62deb40b0dd16ec424088121 Mon Sep 17 00:00:00 2001 From: youny626 Date: Tue, 5 Nov 2019 10:26:20 +0800 Subject: [PATCH 094/307] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 95ce8cd886..193f2405ab 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ Please mark all change in change log and use the ticket from JIRA. ## Bug ## Feature +- \#12 - Pure CPU version for Milvus ## Improvement From 29e8ef110fbcedabda4ba8db9485cd7130dd960e Mon Sep 17 00:00:00 2001 From: youny626 Date: Tue, 5 Nov 2019 13:21:32 +0800 Subject: [PATCH 095/307] change default cpu version option to off --- core/cmake/DefineOptions.cmake | 2 +- core/src/config.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/cmake/DefineOptions.cmake b/core/cmake/DefineOptions.cmake index b90f41fe8e..c7f4f73d94 100644 --- a/core/cmake/DefineOptions.cmake +++ b/core/cmake/DefineOptions.cmake @@ -43,7 +43,7 @@ endmacro() #---------------------------------------------------------------------- set_option_category("CPU version") -define_option(MILVUS_CPU_VERSION "Build CPU version only" ON) +define_option(MILVUS_CPU_VERSION "Build CPU version only" OFF) #---------------------------------------------------------------------- set_option_category("Thirdparty") diff --git a/core/src/config.h b/core/src/config.h index b031713328..5f604f53b3 100644 --- a/core/src/config.h +++ b/core/src/config.h @@ -1,3 +1,3 @@ #define MILVUS_VERSION "0.5.0" #define BUILD_TYPE "Debug" -#define BUILD_TIME "2019-11-05 10:23.18" +#define BUILD_TIME "2019-11-05 13:20.31" From bede77751e83f4136c7ba17f280c43a4b4738fcc Mon Sep 17 00:00:00 2001 From: youny626 Date: Tue, 5 Nov 2019 13:35:24 +0800 Subject: [PATCH 096/307] clang format --- core/src/cache/Cache.inl | 86 +- core/src/cache/CacheMgr.inl | 42 +- core/src/config.h.in | 17 + core/src/db/meta/SqliteMetaImpl.cpp | 671 +- .../external/easyloggingpp/easylogging++.cc | 5107 +++---- .../external/easyloggingpp/easylogging++.h | 6756 +++++----- core/src/external/nlohmann/json.hpp | 10958 +++++++--------- core/src/index/CMakeLists.txt | 28 +- core/src/index/cmake/BuildUtilsCore.cmake | 116 +- core/src/index/cmake/DefineOptionsCore.cmake | 76 +- .../index/vector_index/IndexGPUIDMAP.cpp | 174 +- .../index/vector_index/IndexGPUIDMAP.h | 70 +- .../index/vector_index/IndexIDMAP.cpp | 234 +- .../knowhere/index/vector_index/IndexIVF.cpp | 5 +- .../index/vector_index/IndexIVFSQ.cpp | 1 - core/src/index/unittest/Helper.h | 10 +- core/src/index/unittest/test_idmap.cpp | 2 +- core/src/index/unittest/test_ivf.cpp | 59 +- .../index/unittest/test_nsg/CMakeLists.txt | 4 +- core/src/main.cpp | 24 +- core/src/metrics/SystemInfo.cpp | 1 - core/src/scheduler/ResourceMgr.cpp | 4 +- core/src/scheduler/Scheduler.cpp | 4 +- core/src/scheduler/resource/Resource.cpp | 4 +- core/src/server/Config.cpp | 5 +- core/src/utils/BlockingQueue.inl | 40 +- core/src/utils/ValidationUtil.cpp | 2 - core/src/wrapper/KnowhereResource.cpp | 1 - core/src/wrapper/VecImpl.cpp | 356 +- core/src/wrapper/VecIndex.cpp | 16 +- core/src/wrapper/WrapperException.cpp | 6 +- core/src/wrapper/WrapperException.h | 3 +- core/src/wrapper/gpu/GPUVecImpl.cpp | 9 +- core/src/wrapper/gpu/GPUVecImpl.h | 29 +- core/unittest/CMakeLists.txt | 6 +- core/unittest/db/test_db.cpp | 75 +- core/unittest/db/test_db_mysql.cpp | 67 +- core/unittest/db/test_engine.cpp | 59 +- core/unittest/db/test_mem.cpp | 54 +- core/unittest/db/test_meta.cpp | 35 +- core/unittest/db/test_meta_mysql.cpp | 47 +- core/unittest/db/test_misc.cpp | 31 +- core/unittest/db/test_search.cpp | 278 +- core/unittest/db/utils.cpp | 109 +- core/unittest/db/utils.h | 68 +- core/unittest/main.cpp | 4 +- core/unittest/metrics/test_metricbase.cpp | 1 - core/unittest/metrics/test_metrics.cpp | 35 +- core/unittest/metrics/test_prometheus.cpp | 1 - core/unittest/metrics/utils.cpp | 38 +- core/unittest/metrics/utils.h | 32 +- core/unittest/scheduler/task_test.cpp | 12 +- core/unittest/scheduler/test_algorithm.cpp | 13 +- core/unittest/scheduler/test_event.cpp | 13 +- core/unittest/scheduler/test_normal.cpp | 9 +- core/unittest/scheduler/test_resource.cpp | 23 +- .../scheduler/test_resource_factory.cpp | 5 +- core/unittest/scheduler/test_resource_mgr.cpp | 28 +- core/unittest/scheduler/test_scheduler.cpp | 71 +- core/unittest/server/test_cache.cpp | 68 +- core/unittest/server/test_config.cpp | 8 +- core/unittest/server/test_rpc.cpp | 100 +- core/unittest/server/test_util.cpp | 48 +- core/unittest/server/utils.cpp | 91 +- core/unittest/server/utils.h | 13 +- core/unittest/wrapper/test_hybrid_index.cpp | 15 +- core/unittest/wrapper/test_knowhere.cpp | 2 +- core/unittest/wrapper/test_wrapper.cpp | 46 +- core/unittest/wrapper/utils.cpp | 118 +- core/unittest/wrapper/utils.h | 93 +- 70 files changed, 12797 insertions(+), 13839 deletions(-) diff --git a/core/src/cache/Cache.inl b/core/src/cache/Cache.inl index 3a60dd288f..9ac7ff21e6 100644 --- a/core/src/cache/Cache.inl +++ b/core/src/cache/Cache.inl @@ -15,24 +15,18 @@ // specific language governing permissions and limitations // under the License. - - - namespace milvus { namespace cache { constexpr double DEFAULT_THRESHHOLD_PERCENT = 0.85; -template +template Cache::Cache(int64_t capacity, uint64_t cache_max_count) - : usage_(0), - capacity_(capacity), - freemem_percent_(DEFAULT_THRESHHOLD_PERCENT), - lru_(cache_max_count) { -// AGENT_LOG_DEBUG << "Construct Cache with capacity " << std::to_string(mem_capacity) + : usage_(0), capacity_(capacity), freemem_percent_(DEFAULT_THRESHHOLD_PERCENT), lru_(cache_max_count) { + // AGENT_LOG_DEBUG << "Construct Cache with capacity " << std::to_string(mem_capacity) } -template +template void Cache::set_capacity(int64_t capacity) { if (capacity > 0) { @@ -41,23 +35,23 @@ Cache::set_capacity(int64_t capacity) { } } -template +template size_t Cache::size() const { std::lock_guard lock(mutex_); return lru_.size(); } -template +template bool -Cache::exists(const std::string &key) { +Cache::exists(const std::string& key) { std::lock_guard lock(mutex_); return lru_.exists(key); } -template +template ItemObj -Cache::get(const std::string &key) { +Cache::get(const std::string& key) { std::lock_guard lock(mutex_); if (!lru_.exists(key)) { return nullptr; @@ -66,60 +60,59 @@ Cache::get(const std::string &key) { return lru_.get(key); } -template +template void -Cache::insert(const std::string &key, const ItemObj &item) { +Cache::insert(const std::string& key, const ItemObj& item) { if (item == nullptr) { return; } -// if(item->size() > capacity_) { -// SERVER_LOG_ERROR << "Item size " << item->size() -// << " is too large to insert into cache, capacity " << capacity_; -// return; -// } + // if(item->size() > capacity_) { + // SERVER_LOG_ERROR << "Item size " << item->size() + // << " is too large to insert into cache, capacity " << capacity_; + // return; + // } - //calculate usage + // calculate usage { std::lock_guard lock(mutex_); - //if key already exist, subtract old item size + // if key already exist, subtract old item size if (lru_.exists(key)) { - const ItemObj &old_item = lru_.get(key); + const ItemObj& old_item = lru_.get(key); usage_ -= old_item->Size(); } - //plus new item size + // plus new item size usage_ += item->Size(); } - //if usage exceed capacity, free some items + // if usage exceed capacity, free some items if (usage_ > capacity_) { - SERVER_LOG_DEBUG << "Current usage " << usage_ - << " exceeds cache capacity " << capacity_ + SERVER_LOG_DEBUG << "Current usage " << usage_ << " exceeds cache capacity " << capacity_ << ", start free memory"; free_memory(); } - //insert new item + // insert new item { std::lock_guard lock(mutex_); lru_.put(key, item); - SERVER_LOG_DEBUG << "Insert " << key << " size:" << item->Size() - << " bytes into cache, usage: " << usage_ << " bytes"; + SERVER_LOG_DEBUG << "Insert " << key << " size:" << item->Size() << " bytes into cache, usage: " << usage_ + << " bytes"; } } -template +template void -Cache::erase(const std::string &key) { +Cache::erase(const std::string& key) { std::lock_guard lock(mutex_); if (!lru_.exists(key)) { return; } - const ItemObj &old_item = lru_.get(key); + const ItemObj& old_item = lru_.get(key); usage_ -= old_item->Size(); SERVER_LOG_DEBUG << "Erase " << key << " size: " << old_item->Size(); @@ -127,7 +120,7 @@ Cache::erase(const std::string &key) { lru_.erase(key); } -template +template void Cache::clear() { std::lock_guard lock(mutex_); @@ -137,15 +130,16 @@ Cache::clear() { } /* free memory space when CACHE occupation exceed its capacity */ -template +template void Cache::free_memory() { - if (usage_ <= capacity_) return; + if (usage_ <= capacity_) + return; int64_t threshhold = capacity_ * freemem_percent_; int64_t delta_size = usage_ - threshhold; if (delta_size <= 0) { - delta_size = 1;//ensure at least one item erased + delta_size = 1; // ensure at least one item erased } std::set key_array; @@ -156,8 +150,8 @@ Cache::free_memory() { auto it = lru_.rbegin(); while (it != lru_.rend() && released_size < delta_size) { - auto &key = it->first; - auto &obj_ptr = it->second; + auto& key = it->first; + auto& obj_ptr = it->second; key_array.emplace(key); released_size += obj_ptr->Size(); @@ -167,14 +161,14 @@ Cache::free_memory() { SERVER_LOG_DEBUG << "to be released memory size: " << released_size; - for (auto &key : key_array) { + for (auto& key : key_array) { erase(key); } print(); } -template +template void Cache::print() { size_t cache_count = 0; @@ -188,7 +182,5 @@ Cache::print() { SERVER_LOG_DEBUG << "[Cache capacity]: " << capacity_ << " bytes"; } -} // namespace cache -} // namespace milvus - - +} // namespace cache +} // namespace milvus diff --git a/core/src/cache/CacheMgr.inl b/core/src/cache/CacheMgr.inl index 23b2f0df74..30d2940d87 100644 --- a/core/src/cache/CacheMgr.inl +++ b/core/src/cache/CacheMgr.inl @@ -15,21 +15,18 @@ // specific language governing permissions and limitations // under the License. - - namespace milvus { namespace cache { -template +template CacheMgr::CacheMgr() { } -template +template CacheMgr::~CacheMgr() { - } -template +template uint64_t CacheMgr::ItemCount() const { if (cache_ == nullptr) { @@ -37,12 +34,12 @@ CacheMgr::ItemCount() const { return 0; } - return (uint64_t) (cache_->size()); + return (uint64_t)(cache_->size()); } -template +template bool -CacheMgr::ItemExists(const std::string &key) { +CacheMgr::ItemExists(const std::string& key) { if (cache_ == nullptr) { SERVER_LOG_ERROR << "Cache doesn't exist"; return false; @@ -51,9 +48,9 @@ CacheMgr::ItemExists(const std::string &key) { return cache_->exists(key); } -template +template ItemObj -CacheMgr::GetItem(const std::string &key) { +CacheMgr::GetItem(const std::string& key) { if (cache_ == nullptr) { SERVER_LOG_ERROR << "Cache doesn't exist"; return nullptr; @@ -62,9 +59,9 @@ CacheMgr::GetItem(const std::string &key) { return cache_->get(key); } -template +template void -CacheMgr::InsertItem(const std::string &key, const ItemObj &data) { +CacheMgr::InsertItem(const std::string& key, const ItemObj& data) { if (cache_ == nullptr) { SERVER_LOG_ERROR << "Cache doesn't exist"; return; @@ -74,9 +71,9 @@ CacheMgr::InsertItem(const std::string &key, const ItemObj &data) { server::Metrics::GetInstance().CacheAccessTotalIncrement(); } -template +template void -CacheMgr::EraseItem(const std::string &key) { +CacheMgr::EraseItem(const std::string& key) { if (cache_ == nullptr) { SERVER_LOG_ERROR << "Cache doesn't exist"; return; @@ -86,7 +83,7 @@ CacheMgr::EraseItem(const std::string &key) { server::Metrics::GetInstance().CacheAccessTotalIncrement(); } -template +template void CacheMgr::PrintInfo() { if (cache_ == nullptr) { @@ -97,7 +94,7 @@ CacheMgr::PrintInfo() { cache_->print(); } -template +template void CacheMgr::ClearCache() { if (cache_ == nullptr) { @@ -108,7 +105,7 @@ CacheMgr::ClearCache() { cache_->clear(); } -template +template int64_t CacheMgr::CacheUsage() const { if (cache_ == nullptr) { @@ -119,7 +116,7 @@ CacheMgr::CacheUsage() const { return cache_->usage(); } -template +template int64_t CacheMgr::CacheCapacity() const { if (cache_ == nullptr) { @@ -130,7 +127,7 @@ CacheMgr::CacheCapacity() const { return cache_->capacity(); } -template +template void CacheMgr::SetCapacity(int64_t capacity) { if (cache_ == nullptr) { @@ -140,6 +137,5 @@ CacheMgr::SetCapacity(int64_t capacity) { cache_->set_capacity(capacity); } -} // namespace cache -} // namespace milvus - +} // namespace cache +} // namespace milvus diff --git a/core/src/config.h.in b/core/src/config.h.in index 9e8821881f..989010cde8 100644 --- a/core/src/config.h.in +++ b/core/src/config.h.in @@ -1,3 +1,20 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + #cmakedefine MILVUS_VERSION "@MILVUS_VERSION@" #cmakedefine BUILD_TYPE "@BUILD_TYPE@" #cmakedefine BUILD_TIME @BUILD_TIME@ \ No newline at end of file diff --git a/core/src/db/meta/SqliteMetaImpl.cpp b/core/src/db/meta/SqliteMetaImpl.cpp index 3fed2a81d4..0211e4db61 100644 --- a/core/src/db/meta/SqliteMetaImpl.cpp +++ b/core/src/db/meta/SqliteMetaImpl.cpp @@ -16,24 +16,23 @@ // under the License. #include "db/meta/SqliteMetaImpl.h" +#include "MetaConsts.h" #include "db/IDGenerator.h" #include "db/Utils.h" -#include "utils/Log.h" -#include "utils/Exception.h" -#include "MetaConsts.h" #include "metrics/Metrics.h" +#include "utils/Exception.h" +#include "utils/Log.h" +#include #include -#include -#include #include #include #include -#include +#include #include +#include #include -#include - +#include namespace milvus { namespace engine { @@ -44,7 +43,7 @@ using namespace sqlite_orm; namespace { Status -HandleException(const std::string &desc, const char *what = nullptr) { +HandleException(const std::string& desc, const char* what = nullptr) { if (what == nullptr) { ENGINE_LOG_ERROR << desc; return Status(DB_META_TRANSACTION_FAILED, desc); @@ -55,40 +54,35 @@ HandleException(const std::string &desc, const char *what = nullptr) { } } -} // namespace +} // namespace inline auto -StoragePrototype(const std::string &path) { - return make_storage(path, - make_table(META_TABLES, - make_column("id", &TableSchema::id_, primary_key()), - make_column("table_id", &TableSchema::table_id_, unique()), - make_column("state", &TableSchema::state_), - make_column("dimension", &TableSchema::dimension_), - make_column("created_on", &TableSchema::created_on_), - make_column("flag", &TableSchema::flag_, default_value(0)), - make_column("index_file_size", &TableSchema::index_file_size_), - make_column("engine_type", &TableSchema::engine_type_), - make_column("nlist", &TableSchema::nlist_), - make_column("metric_type", &TableSchema::metric_type_)), - make_table(META_TABLEFILES, - make_column("id", &TableFileSchema::id_, primary_key()), - make_column("table_id", &TableFileSchema::table_id_), - make_column("engine_type", &TableFileSchema::engine_type_), - make_column("file_id", &TableFileSchema::file_id_), - make_column("file_type", &TableFileSchema::file_type_), - make_column("file_size", &TableFileSchema::file_size_, default_value(0)), - make_column("row_count", &TableFileSchema::row_count_, default_value(0)), - make_column("updated_time", &TableFileSchema::updated_time_), - make_column("created_on", &TableFileSchema::created_on_), - make_column("date", &TableFileSchema::date_))); +StoragePrototype(const std::string& path) { + return make_storage( + path, + make_table(META_TABLES, make_column("id", &TableSchema::id_, primary_key()), + make_column("table_id", &TableSchema::table_id_, unique()), + make_column("state", &TableSchema::state_), make_column("dimension", &TableSchema::dimension_), + make_column("created_on", &TableSchema::created_on_), + make_column("flag", &TableSchema::flag_, default_value(0)), + make_column("index_file_size", &TableSchema::index_file_size_), + make_column("engine_type", &TableSchema::engine_type_), make_column("nlist", &TableSchema::nlist_), + make_column("metric_type", &TableSchema::metric_type_)), + make_table( + META_TABLEFILES, make_column("id", &TableFileSchema::id_, primary_key()), + make_column("table_id", &TableFileSchema::table_id_), + make_column("engine_type", &TableFileSchema::engine_type_), + make_column("file_id", &TableFileSchema::file_id_), make_column("file_type", &TableFileSchema::file_type_), + make_column("file_size", &TableFileSchema::file_size_, default_value(0)), + make_column("row_count", &TableFileSchema::row_count_, default_value(0)), + make_column("updated_time", &TableFileSchema::updated_time_), + make_column("created_on", &TableFileSchema::created_on_), make_column("date", &TableFileSchema::date_))); } using ConnectorT = decltype(StoragePrototype("")); static std::unique_ptr ConnectorPtr; -SqliteMetaImpl::SqliteMetaImpl(const DBMetaOptions &options) - : options_(options) { +SqliteMetaImpl::SqliteMetaImpl(const DBMetaOptions& options) : options_(options) { Initialize(); } @@ -96,7 +90,7 @@ SqliteMetaImpl::~SqliteMetaImpl() { } Status -SqliteMetaImpl::NextTableId(std::string &table_id) { +SqliteMetaImpl::NextTableId(std::string& table_id) { std::stringstream ss; SimpleIDGenerator g; ss << g.GetNextIDNumber(); @@ -105,7 +99,7 @@ SqliteMetaImpl::NextTableId(std::string &table_id) { } Status -SqliteMetaImpl::NextFileId(std::string &file_id) { +SqliteMetaImpl::NextFileId(std::string& file_id) { std::stringstream ss; SimpleIDGenerator g; ss << g.GetNextIDNumber(); @@ -119,14 +113,14 @@ SqliteMetaImpl::ValidateMetaSchema() { return; } - //old meta could be recreated since schema changed, throw exception if meta schema is not compatible + // old meta could be recreated since schema changed, throw exception if meta schema is not compatible auto ret = ConnectorPtr->sync_schema_simulate(); - if (ret.find(META_TABLES) != ret.end() - && sqlite_orm::sync_schema_result::dropped_and_recreated == ret[META_TABLES]) { + if (ret.find(META_TABLES) != ret.end() && + sqlite_orm::sync_schema_result::dropped_and_recreated == ret[META_TABLES]) { throw Exception(DB_INCOMPATIB_META, "Meta Tables schema is created by Milvus old version"); } - if (ret.find(META_TABLEFILES) != ret.end() - && sqlite_orm::sync_schema_result::dropped_and_recreated == ret[META_TABLEFILES]) { + if (ret.find(META_TABLEFILES) != ret.end() && + sqlite_orm::sync_schema_result::dropped_and_recreated == ret[META_TABLEFILES]) { throw Exception(DB_INCOMPATIB_META, "Meta TableFiles schema is created by Milvus old version"); } } @@ -147,8 +141,8 @@ SqliteMetaImpl::Initialize() { ValidateMetaSchema(); ConnectorPtr->sync_schema(); - ConnectorPtr->open_forever(); // thread safe option - ConnectorPtr->pragma.journal_mode(journal_mode::WAL); // WAL => write ahead log + ConnectorPtr->open_forever(); // thread safe option + ConnectorPtr->pragma.journal_mode(journal_mode::WAL); // WAL => write ahead log CleanUp(); @@ -157,8 +151,7 @@ SqliteMetaImpl::Initialize() { // TODO(myh): Delete single vecotor by id Status -SqliteMetaImpl::DropPartitionsByDates(const std::string &table_id, - const DatesT &dates) { +SqliteMetaImpl::DropPartitionsByDates(const std::string& table_id, const DatesT& dates) { if (dates.empty()) { return Status::OK(); } @@ -171,38 +164,35 @@ SqliteMetaImpl::DropPartitionsByDates(const std::string &table_id, } try { - //sqlite_orm has a bug, 'in' statement cannot handle too many elements - //so we split one query into multi-queries, this is a work-around!! + // sqlite_orm has a bug, 'in' statement cannot handle too many elements + // so we split one query into multi-queries, this is a work-around!! std::vector split_dates; split_dates.push_back(DatesT()); const size_t batch_size = 30; - for(DateT date : dates) { + for (DateT date : dates) { DatesT& last_batch = *split_dates.rbegin(); last_batch.push_back(date); - if(last_batch.size() > batch_size) { + if (last_batch.size() > batch_size) { split_dates.push_back(DatesT()); } } - //multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here + // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); - for(auto& batch_dates : split_dates) { - if(batch_dates.empty()) { + for (auto& batch_dates : split_dates) { + if (batch_dates.empty()) { continue; } ConnectorPtr->update_all( - set( - c(&TableFileSchema::file_type_) = (int)TableFileSchema::TO_DELETE, + set(c(&TableFileSchema::file_type_) = (int)TableFileSchema::TO_DELETE, c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), - where( - c(&TableFileSchema::table_id_) == table_id and - in(&TableFileSchema::date_, batch_dates))); + where(c(&TableFileSchema::table_id_) == table_id and in(&TableFileSchema::date_, batch_dates))); } ENGINE_LOG_DEBUG << "Successfully drop partitions, table id = " << table_schema.table_id_; - } catch (std::exception &e) { + } catch (std::exception& e) { return HandleException("Encounter exception when drop partition", e.what()); } @@ -210,11 +200,11 @@ SqliteMetaImpl::DropPartitionsByDates(const std::string &table_id, } Status -SqliteMetaImpl::CreateTable(TableSchema &table_schema) { +SqliteMetaImpl::CreateTable(TableSchema& table_schema) { try { server::MetricCollector metric; - //multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here + // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); if (table_schema.table_id_ == "") { @@ -238,36 +228,33 @@ SqliteMetaImpl::CreateTable(TableSchema &table_schema) { try { auto id = ConnectorPtr->insert(table_schema); table_schema.id_ = id; - } catch (std::exception &e) { + } catch (std::exception& e) { return HandleException("Encounter exception when create table", e.what()); } ENGINE_LOG_DEBUG << "Successfully create table: " << table_schema.table_id_; return utils::CreateTablePath(options_, table_schema.table_id_); - } catch (std::exception &e) { + } catch (std::exception& e) { return HandleException("Encounter exception when create table", e.what()); } } Status -SqliteMetaImpl::DeleteTable(const std::string &table_id) { +SqliteMetaImpl::DeleteTable(const std::string& table_id) { try { server::MetricCollector metric; - //multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here + // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); - //soft delete table + // soft delete table ConnectorPtr->update_all( - set( - c(&TableSchema::state_) = (int) TableSchema::TO_DELETE), - where( - c(&TableSchema::table_id_) == table_id and - c(&TableSchema::state_) != (int) TableSchema::TO_DELETE)); + set(c(&TableSchema::state_) = (int)TableSchema::TO_DELETE), + where(c(&TableSchema::table_id_) == table_id and c(&TableSchema::state_) != (int)TableSchema::TO_DELETE)); ENGINE_LOG_DEBUG << "Successfully delete table, table id = " << table_id; - } catch (std::exception &e) { + } catch (std::exception& e) { return HandleException("Encounter exception when delete table", e.what()); } @@ -275,24 +262,21 @@ SqliteMetaImpl::DeleteTable(const std::string &table_id) { } Status -SqliteMetaImpl::DeleteTableFiles(const std::string &table_id) { +SqliteMetaImpl::DeleteTableFiles(const std::string& table_id) { try { server::MetricCollector metric; - //multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here + // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); - //soft delete table files - ConnectorPtr->update_all( - set( - c(&TableFileSchema::file_type_) = (int) TableFileSchema::TO_DELETE, - c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), - where( - c(&TableFileSchema::table_id_) == table_id and - c(&TableFileSchema::file_type_) != (int) TableFileSchema::TO_DELETE)); + // soft delete table files + ConnectorPtr->update_all(set(c(&TableFileSchema::file_type_) = (int)TableFileSchema::TO_DELETE, + c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), + where(c(&TableFileSchema::table_id_) == table_id and + c(&TableFileSchema::file_type_) != (int)TableFileSchema::TO_DELETE)); ENGINE_LOG_DEBUG << "Successfully delete table files, table id = " << table_id; - } catch (std::exception &e) { + } catch (std::exception& e) { return HandleException("Encounter exception when delete table files", e.what()); } @@ -300,21 +284,16 @@ SqliteMetaImpl::DeleteTableFiles(const std::string &table_id) { } Status -SqliteMetaImpl::DescribeTable(TableSchema &table_schema) { +SqliteMetaImpl::DescribeTable(TableSchema& table_schema) { try { server::MetricCollector metric; - auto groups = ConnectorPtr->select(columns(&TableSchema::id_, - &TableSchema::state_, - &TableSchema::dimension_, - &TableSchema::created_on_, - &TableSchema::flag_, - &TableSchema::index_file_size_, - &TableSchema::engine_type_, - &TableSchema::nlist_, - &TableSchema::metric_type_), - where(c(&TableSchema::table_id_) == table_schema.table_id_ - and c(&TableSchema::state_) != (int) TableSchema::TO_DELETE)); + auto groups = + ConnectorPtr->select(columns(&TableSchema::id_, &TableSchema::state_, &TableSchema::dimension_, + &TableSchema::created_on_, &TableSchema::flag_, &TableSchema::index_file_size_, + &TableSchema::engine_type_, &TableSchema::nlist_, &TableSchema::metric_type_), + where(c(&TableSchema::table_id_) == table_schema.table_id_ and + c(&TableSchema::state_) != (int)TableSchema::TO_DELETE)); if (groups.size() == 1) { table_schema.id_ = std::get<0>(groups[0]); @@ -329,7 +308,7 @@ SqliteMetaImpl::DescribeTable(TableSchema &table_schema) { } else { return Status(DB_NOT_FOUND, "Table " + table_schema.table_id_ + " not found"); } - } catch (std::exception &e) { + } catch (std::exception& e) { return HandleException("Encounter exception when describe table", e.what()); } @@ -337,41 +316,47 @@ SqliteMetaImpl::DescribeTable(TableSchema &table_schema) { } Status -SqliteMetaImpl::FilesByType(const std::string &table_id, - const std::vector &file_types, - std::vector &file_ids) { +SqliteMetaImpl::FilesByType(const std::string& table_id, const std::vector& file_types, + std::vector& file_ids) { if (file_types.empty()) { return Status(DB_ERROR, "file types array is empty"); } try { file_ids.clear(); - auto selected = ConnectorPtr->select(columns(&TableFileSchema::file_id_, - &TableFileSchema::file_type_), - where(in(&TableFileSchema::file_type_, file_types) - and c(&TableFileSchema::table_id_) == table_id)); + auto selected = ConnectorPtr->select( + columns(&TableFileSchema::file_id_, &TableFileSchema::file_type_), + where(in(&TableFileSchema::file_type_, file_types) and c(&TableFileSchema::table_id_) == table_id)); if (selected.size() >= 1) { int raw_count = 0, new_count = 0, new_merge_count = 0, new_index_count = 0; int to_index_count = 0, index_count = 0, backup_count = 0; - for (auto &file : selected) { + for (auto& file : selected) { file_ids.push_back(std::get<0>(file)); switch (std::get<1>(file)) { - case (int) TableFileSchema::RAW:raw_count++; + case (int)TableFileSchema::RAW: + raw_count++; break; - case (int) TableFileSchema::NEW:new_count++; + case (int)TableFileSchema::NEW: + new_count++; break; - case (int) TableFileSchema::NEW_MERGE:new_merge_count++; + case (int)TableFileSchema::NEW_MERGE: + new_merge_count++; break; - case (int) TableFileSchema::NEW_INDEX:new_index_count++; + case (int)TableFileSchema::NEW_INDEX: + new_index_count++; break; - case (int) TableFileSchema::TO_INDEX:to_index_count++; + case (int)TableFileSchema::TO_INDEX: + to_index_count++; break; - case (int) TableFileSchema::INDEX:index_count++; + case (int)TableFileSchema::INDEX: + index_count++; break; - case (int) TableFileSchema::BACKUP:backup_count++; + case (int)TableFileSchema::BACKUP: + backup_count++; + break; + default: break; - default:break; } } @@ -380,28 +365,24 @@ SqliteMetaImpl::FilesByType(const std::string &table_id, << " new_index files:" << new_index_count << " to_index files:" << to_index_count << " index files:" << index_count << " backup files:" << backup_count; } - } catch (std::exception &e) { + } catch (std::exception& e) { return HandleException("Encounter exception when check non index files", e.what()); } return Status::OK(); } Status -SqliteMetaImpl::UpdateTableIndex(const std::string &table_id, const TableIndex &index) { +SqliteMetaImpl::UpdateTableIndex(const std::string& table_id, const TableIndex& index) { try { server::MetricCollector metric; - //multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here + // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); - auto tables = ConnectorPtr->select(columns(&TableSchema::id_, - &TableSchema::state_, - &TableSchema::dimension_, - &TableSchema::created_on_, - &TableSchema::flag_, - &TableSchema::index_file_size_), - where(c(&TableSchema::table_id_) == table_id - and c(&TableSchema::state_) != (int) TableSchema::TO_DELETE)); + auto tables = ConnectorPtr->select( + columns(&TableSchema::id_, &TableSchema::state_, &TableSchema::dimension_, &TableSchema::created_on_, + &TableSchema::flag_, &TableSchema::index_file_size_), + where(c(&TableSchema::table_id_) == table_id and c(&TableSchema::state_) != (int)TableSchema::TO_DELETE)); if (tables.size() > 0) { meta::TableSchema table_schema; @@ -421,17 +402,14 @@ SqliteMetaImpl::UpdateTableIndex(const std::string &table_id, const TableIndex & return Status(DB_NOT_FOUND, "Table " + table_id + " not found"); } - //set all backup file to raw - ConnectorPtr->update_all( - set( - c(&TableFileSchema::file_type_) = (int) TableFileSchema::RAW, - c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), - where( - c(&TableFileSchema::table_id_) == table_id and - c(&TableFileSchema::file_type_) == (int) TableFileSchema::BACKUP)); + // set all backup file to raw + ConnectorPtr->update_all(set(c(&TableFileSchema::file_type_) = (int)TableFileSchema::RAW, + c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), + where(c(&TableFileSchema::table_id_) == table_id and + c(&TableFileSchema::file_type_) == (int)TableFileSchema::BACKUP)); ENGINE_LOG_DEBUG << "Successfully update table index, table id = " << table_id; - } catch (std::exception &e) { + } catch (std::exception& e) { std::string msg = "Encounter exception when update table index: table_id = " + table_id; return HandleException(msg, e.what()); } @@ -440,18 +418,14 @@ SqliteMetaImpl::UpdateTableIndex(const std::string &table_id, const TableIndex & } Status -SqliteMetaImpl::UpdateTableFlag(const std::string &table_id, int64_t flag) { +SqliteMetaImpl::UpdateTableFlag(const std::string& table_id, int64_t flag) { try { server::MetricCollector metric; - //set all backup file to raw - ConnectorPtr->update_all( - set( - c(&TableSchema::flag_) = flag), - where( - c(&TableSchema::table_id_) == table_id)); + // set all backup file to raw + ConnectorPtr->update_all(set(c(&TableSchema::flag_) = flag), where(c(&TableSchema::table_id_) == table_id)); ENGINE_LOG_DEBUG << "Successfully update table flag, table id = " << table_id; - } catch (std::exception &e) { + } catch (std::exception& e) { std::string msg = "Encounter exception when update table flag: table_id = " + table_id; return HandleException(msg, e.what()); } @@ -460,15 +434,13 @@ SqliteMetaImpl::UpdateTableFlag(const std::string &table_id, int64_t flag) { } Status -SqliteMetaImpl::DescribeTableIndex(const std::string &table_id, TableIndex &index) { +SqliteMetaImpl::DescribeTableIndex(const std::string& table_id, TableIndex& index) { try { server::MetricCollector metric; - auto groups = ConnectorPtr->select(columns(&TableSchema::engine_type_, - &TableSchema::nlist_, - &TableSchema::metric_type_), - where(c(&TableSchema::table_id_) == table_id - and c(&TableSchema::state_) != (int) TableSchema::TO_DELETE)); + auto groups = ConnectorPtr->select( + columns(&TableSchema::engine_type_, &TableSchema::nlist_, &TableSchema::metric_type_), + where(c(&TableSchema::table_id_) == table_id and c(&TableSchema::state_) != (int)TableSchema::TO_DELETE)); if (groups.size() == 1) { index.engine_type_ = std::get<0>(groups[0]); @@ -477,7 +449,7 @@ SqliteMetaImpl::DescribeTableIndex(const std::string &table_id, TableIndex &inde } else { return Status(DB_NOT_FOUND, "Table " + table_id + " not found"); } - } catch (std::exception &e) { + } catch (std::exception& e) { return HandleException("Encounter exception when describe index", e.what()); } @@ -485,42 +457,33 @@ SqliteMetaImpl::DescribeTableIndex(const std::string &table_id, TableIndex &inde } Status -SqliteMetaImpl::DropTableIndex(const std::string &table_id) { +SqliteMetaImpl::DropTableIndex(const std::string& table_id) { try { server::MetricCollector metric; - //multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here + // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); - //soft delete index files - ConnectorPtr->update_all( - set( - c(&TableFileSchema::file_type_) = (int) TableFileSchema::TO_DELETE, - c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), - where( - c(&TableFileSchema::table_id_) == table_id and - c(&TableFileSchema::file_type_) == (int) TableFileSchema::INDEX)); + // soft delete index files + ConnectorPtr->update_all(set(c(&TableFileSchema::file_type_) = (int)TableFileSchema::TO_DELETE, + c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), + where(c(&TableFileSchema::table_id_) == table_id and + c(&TableFileSchema::file_type_) == (int)TableFileSchema::INDEX)); - //set all backup file to raw - ConnectorPtr->update_all( - set( - c(&TableFileSchema::file_type_) = (int) TableFileSchema::RAW, - c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), - where( - c(&TableFileSchema::table_id_) == table_id and - c(&TableFileSchema::file_type_) == (int) TableFileSchema::BACKUP)); + // set all backup file to raw + ConnectorPtr->update_all(set(c(&TableFileSchema::file_type_) = (int)TableFileSchema::RAW, + c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), + where(c(&TableFileSchema::table_id_) == table_id and + c(&TableFileSchema::file_type_) == (int)TableFileSchema::BACKUP)); - //set table index type to raw + // set table index type to raw ConnectorPtr->update_all( - set( - c(&TableSchema::engine_type_) = DEFAULT_ENGINE_TYPE, - c(&TableSchema::nlist_) = DEFAULT_NLIST, + set(c(&TableSchema::engine_type_) = DEFAULT_ENGINE_TYPE, c(&TableSchema::nlist_) = DEFAULT_NLIST, c(&TableSchema::metric_type_) = DEFAULT_METRIC_TYPE), - where( - c(&TableSchema::table_id_) == table_id)); + where(c(&TableSchema::table_id_) == table_id)); ENGINE_LOG_DEBUG << "Successfully drop table index, table id = " << table_id; - } catch (std::exception &e) { + } catch (std::exception& e) { return HandleException("Encounter exception when delete table index files", e.what()); } @@ -528,20 +491,20 @@ SqliteMetaImpl::DropTableIndex(const std::string &table_id) { } Status -SqliteMetaImpl::HasTable(const std::string &table_id, bool &has_or_not) { +SqliteMetaImpl::HasTable(const std::string& table_id, bool& has_or_not) { has_or_not = false; try { server::MetricCollector metric; - auto tables = ConnectorPtr->select(columns(&TableSchema::id_), - where(c(&TableSchema::table_id_) == table_id - and c(&TableSchema::state_) != (int) TableSchema::TO_DELETE)); + auto tables = ConnectorPtr->select( + columns(&TableSchema::id_), + where(c(&TableSchema::table_id_) == table_id and c(&TableSchema::state_) != (int)TableSchema::TO_DELETE)); if (tables.size() == 1) { has_or_not = true; } else { has_or_not = false; } - } catch (std::exception &e) { + } catch (std::exception& e) { return HandleException("Encounter exception when lookup table", e.what()); } @@ -549,21 +512,16 @@ SqliteMetaImpl::HasTable(const std::string &table_id, bool &has_or_not) { } Status -SqliteMetaImpl::AllTables(std::vector &table_schema_array) { +SqliteMetaImpl::AllTables(std::vector& table_schema_array) { try { server::MetricCollector metric; - auto selected = ConnectorPtr->select(columns(&TableSchema::id_, - &TableSchema::table_id_, - &TableSchema::dimension_, - &TableSchema::created_on_, - &TableSchema::flag_, - &TableSchema::index_file_size_, - &TableSchema::engine_type_, - &TableSchema::nlist_, - &TableSchema::metric_type_), - where(c(&TableSchema::state_) != (int) TableSchema::TO_DELETE)); - for (auto &table : selected) { + auto selected = + ConnectorPtr->select(columns(&TableSchema::id_, &TableSchema::table_id_, &TableSchema::dimension_, + &TableSchema::created_on_, &TableSchema::flag_, &TableSchema::index_file_size_, + &TableSchema::engine_type_, &TableSchema::nlist_, &TableSchema::metric_type_), + where(c(&TableSchema::state_) != (int)TableSchema::TO_DELETE)); + for (auto& table : selected) { TableSchema schema; schema.id_ = std::get<0>(table); schema.table_id_ = std::get<1>(table); @@ -577,7 +535,7 @@ SqliteMetaImpl::AllTables(std::vector &table_schema_array) { table_schema_array.emplace_back(schema); } - } catch (std::exception &e) { + } catch (std::exception& e) { return HandleException("Encounter exception when lookup all tables", e.what()); } @@ -585,7 +543,7 @@ SqliteMetaImpl::AllTables(std::vector &table_schema_array) { } Status -SqliteMetaImpl::CreateTableFile(TableFileSchema &file_schema) { +SqliteMetaImpl::CreateTableFile(TableFileSchema& file_schema) { if (file_schema.date_ == EmptyDate) { file_schema.date_ = utils::GetDate(); } @@ -610,7 +568,7 @@ SqliteMetaImpl::CreateTableFile(TableFileSchema &file_schema) { file_schema.nlist_ = table_schema.nlist_; file_schema.metric_type_ = table_schema.metric_type_; - //multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here + // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); auto id = ConnectorPtr->insert(file_schema); @@ -618,7 +576,7 @@ SqliteMetaImpl::CreateTableFile(TableFileSchema &file_schema) { ENGINE_LOG_DEBUG << "Successfully create table file, file id = " << file_schema.file_id_; return utils::CreateTableFilePath(options_, file_schema); - } catch (std::exception &e) { + } catch (std::exception& e) { return HandleException("Encounter exception when create table file", e.what()); } @@ -626,29 +584,23 @@ SqliteMetaImpl::CreateTableFile(TableFileSchema &file_schema) { } Status -SqliteMetaImpl::FilesToIndex(TableFilesSchema &files) { +SqliteMetaImpl::FilesToIndex(TableFilesSchema& files) { files.clear(); try { server::MetricCollector metric; - auto selected = ConnectorPtr->select(columns(&TableFileSchema::id_, - &TableFileSchema::table_id_, - &TableFileSchema::file_id_, - &TableFileSchema::file_type_, - &TableFileSchema::file_size_, - &TableFileSchema::row_count_, - &TableFileSchema::date_, - &TableFileSchema::engine_type_, - &TableFileSchema::created_on_), - where(c(&TableFileSchema::file_type_) - == (int) TableFileSchema::TO_INDEX)); + auto selected = ConnectorPtr->select( + columns(&TableFileSchema::id_, &TableFileSchema::table_id_, &TableFileSchema::file_id_, + &TableFileSchema::file_type_, &TableFileSchema::file_size_, &TableFileSchema::row_count_, + &TableFileSchema::date_, &TableFileSchema::engine_type_, &TableFileSchema::created_on_), + where(c(&TableFileSchema::file_type_) == (int)TableFileSchema::TO_INDEX)); std::map groups; TableFileSchema table_file; Status ret; - for (auto &file : selected) { + for (auto& file : selected) { table_file.id_ = std::get<0>(file); table_file.table_id_ = std::get<1>(file); table_file.file_id_ = std::get<2>(file); @@ -684,57 +636,50 @@ SqliteMetaImpl::FilesToIndex(TableFilesSchema &files) { ENGINE_LOG_DEBUG << "Collect " << selected.size() << " to-index files"; } return ret; - } catch (std::exception &e) { + } catch (std::exception& e) { return HandleException("Encounter exception when iterate raw files", e.what()); } } Status -SqliteMetaImpl::FilesToSearch(const std::string &table_id, - const std::vector &ids, - const DatesT &dates, - DatePartionedTableFilesSchema &files) { +SqliteMetaImpl::FilesToSearch(const std::string& table_id, const std::vector& ids, const DatesT& dates, + DatePartionedTableFilesSchema& files) { files.clear(); server::MetricCollector metric; try { - auto select_columns = columns(&TableFileSchema::id_, - &TableFileSchema::table_id_, - &TableFileSchema::file_id_, - &TableFileSchema::file_type_, - &TableFileSchema::file_size_, - &TableFileSchema::row_count_, - &TableFileSchema::date_, - &TableFileSchema::engine_type_); + auto select_columns = + columns(&TableFileSchema::id_, &TableFileSchema::table_id_, &TableFileSchema::file_id_, + &TableFileSchema::file_type_, &TableFileSchema::file_size_, &TableFileSchema::row_count_, + &TableFileSchema::date_, &TableFileSchema::engine_type_); auto match_tableid = c(&TableFileSchema::table_id_) == table_id; - std::vector file_types = { - (int) TableFileSchema::RAW, - (int) TableFileSchema::TO_INDEX, - (int) TableFileSchema::INDEX - }; + std::vector file_types = {(int)TableFileSchema::RAW, (int)TableFileSchema::TO_INDEX, + (int)TableFileSchema::INDEX}; auto match_type = in(&TableFileSchema::file_type_, file_types); TableSchema table_schema; table_schema.table_id_ = table_id; auto status = DescribeTable(table_schema); - if (!status.ok()) { return status; } + if (!status.ok()) { + return status; + } - //sqlite_orm has a bug, 'in' statement cannot handle too many elements - //so we split one query into multi-queries, this is a work-around!! + // sqlite_orm has a bug, 'in' statement cannot handle too many elements + // so we split one query into multi-queries, this is a work-around!! std::vector split_dates; split_dates.push_back(DatesT()); const size_t batch_size = 30; - for(DateT date : dates) { + for (DateT date : dates) { DatesT& last_batch = *split_dates.rbegin(); last_batch.push_back(date); - if(last_batch.size() > batch_size) { + if (last_batch.size() > batch_size) { split_dates.push_back(DatesT()); } } - //perform query + // perform query decltype(ConnectorPtr->select(select_columns)) selected; if (dates.empty() && ids.empty()) { auto filter = where(match_tableid and match_type); @@ -744,28 +689,28 @@ SqliteMetaImpl::FilesToSearch(const std::string &table_id, auto filter = where(match_tableid and match_fileid and match_type); selected = ConnectorPtr->select(select_columns, filter); } else if (!dates.empty() && ids.empty()) { - for(auto& batch_dates : split_dates) { - if(batch_dates.empty()) { + for (auto& batch_dates : split_dates) { + if (batch_dates.empty()) { continue; } auto match_date = in(&TableFileSchema::date_, batch_dates); auto filter = where(match_tableid and match_date and match_type); auto batch_selected = ConnectorPtr->select(select_columns, filter); - for (auto &file : batch_selected) { + for (auto& file : batch_selected) { selected.push_back(file); } } } else if (!dates.empty() && !ids.empty()) { - for(auto& batch_dates : split_dates) { - if(batch_dates.empty()) { + for (auto& batch_dates : split_dates) { + if (batch_dates.empty()) { continue; } auto match_fileid = in(&TableFileSchema::id_, ids); auto match_date = in(&TableFileSchema::date_, batch_dates); auto filter = where(match_tableid and match_fileid and match_date and match_type); auto batch_selected = ConnectorPtr->select(select_columns, filter); - for (auto &file : batch_selected) { + for (auto& file : batch_selected) { selected.push_back(file); } } @@ -773,7 +718,7 @@ SqliteMetaImpl::FilesToSearch(const std::string &table_id, Status ret; TableFileSchema table_file; - for (auto &file : selected) { + for (auto& file : selected) { table_file.id_ = std::get<0>(file); table_file.table_id_ = std::get<1>(file); table_file.file_id_ = std::get<2>(file); @@ -806,20 +751,19 @@ SqliteMetaImpl::FilesToSearch(const std::string &table_id, ENGINE_LOG_DEBUG << "Collect " << selected.size() << " to-search files"; } return ret; - } catch (std::exception &e) { + } catch (std::exception& e) { return HandleException("Encounter exception when iterate index files", e.what()); } } Status -SqliteMetaImpl::FilesToMerge(const std::string &table_id, - DatePartionedTableFilesSchema &files) { +SqliteMetaImpl::FilesToMerge(const std::string& table_id, DatePartionedTableFilesSchema& files) { files.clear(); try { server::MetricCollector metric; - //check table existence + // check table existence TableSchema table_schema; table_schema.table_id_ = table_id; auto status = DescribeTable(table_schema); @@ -827,25 +771,21 @@ SqliteMetaImpl::FilesToMerge(const std::string &table_id, return status; } - //get files to merge - auto selected = ConnectorPtr->select(columns(&TableFileSchema::id_, - &TableFileSchema::table_id_, - &TableFileSchema::file_id_, - &TableFileSchema::file_type_, - &TableFileSchema::file_size_, - &TableFileSchema::row_count_, - &TableFileSchema::date_, - &TableFileSchema::created_on_), - where(c(&TableFileSchema::file_type_) == (int) TableFileSchema::RAW and - c(&TableFileSchema::table_id_) == table_id), - order_by(&TableFileSchema::file_size_).desc()); + // get files to merge + auto selected = ConnectorPtr->select( + columns(&TableFileSchema::id_, &TableFileSchema::table_id_, &TableFileSchema::file_id_, + &TableFileSchema::file_type_, &TableFileSchema::file_size_, &TableFileSchema::row_count_, + &TableFileSchema::date_, &TableFileSchema::created_on_), + where(c(&TableFileSchema::file_type_) == (int)TableFileSchema::RAW and + c(&TableFileSchema::table_id_) == table_id), + order_by(&TableFileSchema::file_size_).desc()); Status result; - for (auto &file : selected) { + for (auto& file : selected) { TableFileSchema table_file; table_file.file_size_ = std::get<4>(file); if (table_file.file_size_ >= table_schema.index_file_size_) { - continue;//skip large file + continue; // skip large file } table_file.id_ = std::get<0>(file); @@ -876,28 +816,22 @@ SqliteMetaImpl::FilesToMerge(const std::string &table_id, ENGINE_LOG_DEBUG << "Collect " << selected.size() << " to-merge files"; } return result; - } catch (std::exception &e) { + } catch (std::exception& e) { return HandleException("Encounter exception when iterate merge files", e.what()); } } Status -SqliteMetaImpl::GetTableFiles(const std::string &table_id, - const std::vector &ids, - TableFilesSchema &table_files) { +SqliteMetaImpl::GetTableFiles(const std::string& table_id, const std::vector& ids, + TableFilesSchema& table_files) { try { table_files.clear(); - auto files = ConnectorPtr->select(columns(&TableFileSchema::id_, - &TableFileSchema::file_id_, - &TableFileSchema::file_type_, - &TableFileSchema::file_size_, - &TableFileSchema::row_count_, - &TableFileSchema::date_, - &TableFileSchema::engine_type_, - &TableFileSchema::created_on_), - where(c(&TableFileSchema::table_id_) == table_id and - in(&TableFileSchema::id_, ids) and - c(&TableFileSchema::file_type_) != (int) TableFileSchema::TO_DELETE)); + auto files = ConnectorPtr->select( + columns(&TableFileSchema::id_, &TableFileSchema::file_id_, &TableFileSchema::file_type_, + &TableFileSchema::file_size_, &TableFileSchema::row_count_, &TableFileSchema::date_, + &TableFileSchema::engine_type_, &TableFileSchema::created_on_), + where(c(&TableFileSchema::table_id_) == table_id and in(&TableFileSchema::id_, ids) and + c(&TableFileSchema::file_type_) != (int)TableFileSchema::TO_DELETE)); TableSchema table_schema; table_schema.table_id_ = table_id; @@ -907,7 +841,7 @@ SqliteMetaImpl::GetTableFiles(const std::string &table_id, } Status result; - for (auto &file : files) { + for (auto& file : files) { TableFileSchema file_schema; file_schema.table_id_ = table_id; file_schema.id_ = std::get<0>(file); @@ -930,7 +864,7 @@ SqliteMetaImpl::GetTableFiles(const std::string &table_id, ENGINE_LOG_DEBUG << "Get table files by id"; return result; - } catch (std::exception &e) { + } catch (std::exception& e) { return HandleException("Encounter exception when lookup table files", e.what()); } } @@ -938,28 +872,25 @@ SqliteMetaImpl::GetTableFiles(const std::string &table_id, // TODO(myh): Support swap to cloud storage Status SqliteMetaImpl::Archive() { - auto &criterias = options_.archive_conf_.GetCriterias(); + auto& criterias = options_.archive_conf_.GetCriterias(); if (criterias.size() == 0) { return Status::OK(); } for (auto kv : criterias) { - auto &criteria = kv.first; - auto &limit = kv.second; + auto& criteria = kv.first; + auto& limit = kv.second; if (criteria == engine::ARCHIVE_CONF_DAYS) { int64_t usecs = limit * D_SEC * US_PS; int64_t now = utils::GetMicroSecTimeStamp(); try { - //multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here + // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); - ConnectorPtr->update_all( - set( - c(&TableFileSchema::file_type_) = (int) TableFileSchema::TO_DELETE), - where( - c(&TableFileSchema::created_on_) < (int64_t) (now - usecs) and - c(&TableFileSchema::file_type_) != (int) TableFileSchema::TO_DELETE)); - } catch (std::exception &e) { + ConnectorPtr->update_all(set(c(&TableFileSchema::file_type_) = (int)TableFileSchema::TO_DELETE), + where(c(&TableFileSchema::created_on_) < (int64_t)(now - usecs) and + c(&TableFileSchema::file_type_) != (int)TableFileSchema::TO_DELETE)); + } catch (std::exception& e) { return HandleException("Encounter exception when update table files", e.what()); } @@ -969,7 +900,7 @@ SqliteMetaImpl::Archive() { uint64_t sum = 0; Size(sum); - int64_t to_delete = (int64_t) sum - limit * G; + int64_t to_delete = (int64_t)sum - limit * G; DiscardFiles(to_delete); ENGINE_LOG_DEBUG << "Archive files to free disk"; @@ -980,19 +911,18 @@ SqliteMetaImpl::Archive() { } Status -SqliteMetaImpl::Size(uint64_t &result) { +SqliteMetaImpl::Size(uint64_t& result) { result = 0; try { auto selected = ConnectorPtr->select(columns(sum(&TableFileSchema::file_size_)), - where( - c(&TableFileSchema::file_type_) != (int) TableFileSchema::TO_DELETE)); - for (auto &total_size : selected) { + where(c(&TableFileSchema::file_type_) != (int)TableFileSchema::TO_DELETE)); + for (auto& total_size : selected) { if (!std::get<0>(total_size)) { continue; } - result += (uint64_t) (*std::get<0>(total_size)); + result += (uint64_t)(*std::get<0>(total_size)); } - } catch (std::exception &e) { + } catch (std::exception& e) { return HandleException("Encounter exception when calculte db size", e.what()); } @@ -1010,22 +940,21 @@ SqliteMetaImpl::DiscardFiles(int64_t to_discard_size) { try { server::MetricCollector metric; - //multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here + // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); auto commited = ConnectorPtr->transaction([&]() mutable { - auto selected = ConnectorPtr->select(columns(&TableFileSchema::id_, - &TableFileSchema::file_size_), - where(c(&TableFileSchema::file_type_) - != (int) TableFileSchema::TO_DELETE), - order_by(&TableFileSchema::id_), - limit(10)); + auto selected = + ConnectorPtr->select(columns(&TableFileSchema::id_, &TableFileSchema::file_size_), + where(c(&TableFileSchema::file_type_) != (int)TableFileSchema::TO_DELETE), + order_by(&TableFileSchema::id_), limit(10)); std::vector ids; TableFileSchema table_file; - for (auto &file : selected) { - if (to_discard_size <= 0) break; + for (auto& file : selected) { + if (to_discard_size <= 0) + break; table_file.id_ = std::get<0>(file); table_file.file_size_ = std::get<1>(file); ids.push_back(table_file.id_); @@ -1038,12 +967,9 @@ SqliteMetaImpl::DiscardFiles(int64_t to_discard_size) { return true; } - ConnectorPtr->update_all( - set( - c(&TableFileSchema::file_type_) = (int) TableFileSchema::TO_DELETE, - c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), - where( - in(&TableFileSchema::id_, ids))); + ConnectorPtr->update_all(set(c(&TableFileSchema::file_type_) = (int)TableFileSchema::TO_DELETE, + c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), + where(in(&TableFileSchema::id_, ids))); return true; }); @@ -1051,7 +977,7 @@ SqliteMetaImpl::DiscardFiles(int64_t to_discard_size) { if (!commited) { return HandleException("DiscardFiles error: sqlite transaction failed"); } - } catch (std::exception &e) { + } catch (std::exception& e) { return HandleException("Encounter exception when discard table file", e.what()); } @@ -1059,51 +985,48 @@ SqliteMetaImpl::DiscardFiles(int64_t to_discard_size) { } Status -SqliteMetaImpl::UpdateTableFile(TableFileSchema &file_schema) { +SqliteMetaImpl::UpdateTableFile(TableFileSchema& file_schema) { file_schema.updated_time_ = utils::GetMicroSecTimeStamp(); try { server::MetricCollector metric; - //multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here + // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); auto tables = ConnectorPtr->select(columns(&TableSchema::state_), where(c(&TableSchema::table_id_) == file_schema.table_id_)); - //if the table has been deleted, just mark the table file as TO_DELETE - //clean thread will delete the file later - if (tables.size() < 1 || std::get<0>(tables[0]) == (int) TableSchema::TO_DELETE) { + // if the table has been deleted, just mark the table file as TO_DELETE + // clean thread will delete the file later + if (tables.size() < 1 || std::get<0>(tables[0]) == (int)TableSchema::TO_DELETE) { file_schema.file_type_ = TableFileSchema::TO_DELETE; } ConnectorPtr->update(file_schema); ENGINE_LOG_DEBUG << "Update single table file, file id = " << file_schema.file_id_; - } catch (std::exception &e) { - std::string msg = "Exception update table file: table_id = " + file_schema.table_id_ - + " file_id = " + file_schema.file_id_; + } catch (std::exception& e) { + std::string msg = + "Exception update table file: table_id = " + file_schema.table_id_ + " file_id = " + file_schema.file_id_; return HandleException(msg, e.what()); } return Status::OK(); } Status -SqliteMetaImpl::UpdateTableFilesToIndex(const std::string &table_id) { +SqliteMetaImpl::UpdateTableFilesToIndex(const std::string& table_id) { try { server::MetricCollector metric; - //multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here + // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); - ConnectorPtr->update_all( - set( - c(&TableFileSchema::file_type_) = (int) TableFileSchema::TO_INDEX), - where( - c(&TableFileSchema::table_id_) == table_id and - c(&TableFileSchema::file_type_) == (int) TableFileSchema::RAW)); + ConnectorPtr->update_all(set(c(&TableFileSchema::file_type_) = (int)TableFileSchema::TO_INDEX), + where(c(&TableFileSchema::table_id_) == table_id and + c(&TableFileSchema::file_type_) == (int)TableFileSchema::RAW)); ENGINE_LOG_DEBUG << "Update files to to_index, table id = " << table_id; - } catch (std::exception &e) { + } catch (std::exception& e) { return HandleException("Encounter exception when update table files to to_index", e.what()); } @@ -1111,21 +1034,21 @@ SqliteMetaImpl::UpdateTableFilesToIndex(const std::string &table_id) { } Status -SqliteMetaImpl::UpdateTableFiles(TableFilesSchema &files) { +SqliteMetaImpl::UpdateTableFiles(TableFilesSchema& files) { try { server::MetricCollector metric; - //multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here + // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); std::map has_tables; - for (auto &file : files) { + for (auto& file : files) { if (has_tables.find(file.table_id_) != has_tables.end()) { continue; } auto tables = ConnectorPtr->select(columns(&TableSchema::id_), - where(c(&TableSchema::table_id_) == file.table_id_ - and c(&TableSchema::state_) != (int) TableSchema::TO_DELETE)); + where(c(&TableSchema::table_id_) == file.table_id_ and + c(&TableSchema::state_) != (int)TableSchema::TO_DELETE)); if (tables.size() >= 1) { has_tables[file.table_id_] = true; } else { @@ -1134,7 +1057,7 @@ SqliteMetaImpl::UpdateTableFiles(TableFilesSchema &files) { } auto commited = ConnectorPtr->transaction([&]() mutable { - for (auto &file : files) { + for (auto& file : files) { if (!has_tables[file.table_id_]) { file.file_type_ = TableFileSchema::TO_DELETE; } @@ -1150,7 +1073,7 @@ SqliteMetaImpl::UpdateTableFiles(TableFilesSchema &files) { } ENGINE_LOG_DEBUG << "Update " << files.size() << " table files"; - } catch (std::exception &e) { + } catch (std::exception& e) { return HandleException("Encounter exception when update table files", e.what()); } return Status::OK(); @@ -1161,27 +1084,21 @@ SqliteMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) { auto now = utils::GetMicroSecTimeStamp(); std::set table_ids; - //remove to_delete files + // remove to_delete files try { server::MetricCollector metric; - //multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here + // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); - auto files = ConnectorPtr->select(columns(&TableFileSchema::id_, - &TableFileSchema::table_id_, - &TableFileSchema::file_id_, - &TableFileSchema::date_), - where( - c(&TableFileSchema::file_type_) == - (int) TableFileSchema::TO_DELETE - and - c(&TableFileSchema::updated_time_) - < now - seconds * US_PS)); + auto files = ConnectorPtr->select(columns(&TableFileSchema::id_, &TableFileSchema::table_id_, + &TableFileSchema::file_id_, &TableFileSchema::date_), + where(c(&TableFileSchema::file_type_) == (int)TableFileSchema::TO_DELETE and + c(&TableFileSchema::updated_time_) < now - seconds * US_PS)); auto commited = ConnectorPtr->transaction([&]() mutable { TableFileSchema table_file; - for (auto &file : files) { + for (auto& file : files) { table_file.id_ = std::get<0>(file); table_file.table_id_ = std::get<1>(file); table_file.file_id_ = std::get<2>(file); @@ -1203,24 +1120,23 @@ SqliteMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) { if (files.size() > 0) { ENGINE_LOG_DEBUG << "Clean " << files.size() << " files deleted in " << seconds << " seconds"; } - } catch (std::exception &e) { + } catch (std::exception& e) { return HandleException("Encounter exception when clean table files", e.what()); } - //remove to_delete tables + // remove to_delete tables try { server::MetricCollector metric; - //multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here + // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); - auto tables = ConnectorPtr->select(columns(&TableSchema::id_, - &TableSchema::table_id_), - where(c(&TableSchema::state_) == (int) TableSchema::TO_DELETE)); + auto tables = ConnectorPtr->select(columns(&TableSchema::id_, &TableSchema::table_id_), + where(c(&TableSchema::state_) == (int)TableSchema::TO_DELETE)); auto commited = ConnectorPtr->transaction([&]() mutable { - for (auto &table : tables) { - utils::DeleteTablePath(options_, std::get<1>(table), false);//only delete empty folder + for (auto& table : tables) { + utils::DeleteTablePath(options_, std::get<1>(table), false); // only delete empty folder ConnectorPtr->remove(std::get<0>(table)); } @@ -1234,16 +1150,16 @@ SqliteMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) { if (tables.size() > 0) { ENGINE_LOG_DEBUG << "Remove " << tables.size() << " tables from meta"; } - } catch (std::exception &e) { + } catch (std::exception& e) { return HandleException("Encounter exception when clean table files", e.what()); } - //remove deleted table folder - //don't remove table folder until all its files has been deleted + // remove deleted table folder + // don't remove table folder until all its files has been deleted try { server::MetricCollector metric; - for (auto &table_id : table_ids) { + for (auto& table_id : table_ids) { auto selected = ConnectorPtr->select(columns(&TableFileSchema::file_id_), where(c(&TableFileSchema::table_id_) == table_id)); if (selected.size() == 0) { @@ -1254,7 +1170,7 @@ SqliteMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) { if (table_ids.size() > 0) { ENGINE_LOG_DEBUG << "Remove " << table_ids.size() << " tables folder"; } - } catch (std::exception &e) { + } catch (std::exception& e) { return HandleException("Encounter exception when delete table folder", e.what()); } @@ -1266,19 +1182,16 @@ SqliteMetaImpl::CleanUp() { try { server::MetricCollector metric; - //multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here + // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); - std::vector file_types = { - (int) TableFileSchema::NEW, - (int) TableFileSchema::NEW_INDEX, - (int) TableFileSchema::NEW_MERGE - }; + std::vector file_types = {(int)TableFileSchema::NEW, (int)TableFileSchema::NEW_INDEX, + (int)TableFileSchema::NEW_MERGE}; auto files = ConnectorPtr->select(columns(&TableFileSchema::id_), where(in(&TableFileSchema::file_type_, file_types))); auto commited = ConnectorPtr->transaction([&]() mutable { - for (auto &file : files) { + for (auto& file : files) { ENGINE_LOG_DEBUG << "Remove table file type as NEW"; ConnectorPtr->remove(std::get<0>(file)); } @@ -1292,7 +1205,7 @@ SqliteMetaImpl::CleanUp() { if (files.size() > 0) { ENGINE_LOG_DEBUG << "Clean " << files.size() << " files"; } - } catch (std::exception &e) { + } catch (std::exception& e) { return HandleException("Encounter exception when clean table file", e.what()); } @@ -1300,18 +1213,15 @@ SqliteMetaImpl::CleanUp() { } Status -SqliteMetaImpl::Count(const std::string &table_id, uint64_t &result) { +SqliteMetaImpl::Count(const std::string& table_id, uint64_t& result) { try { server::MetricCollector metric; - std::vector file_types = { - (int) TableFileSchema::RAW, - (int) TableFileSchema::TO_INDEX, - (int) TableFileSchema::INDEX - }; - auto selected = ConnectorPtr->select(columns(&TableFileSchema::row_count_), - where(in(&TableFileSchema::file_type_, file_types) - and c(&TableFileSchema::table_id_) == table_id)); + std::vector file_types = {(int)TableFileSchema::RAW, (int)TableFileSchema::TO_INDEX, + (int)TableFileSchema::INDEX}; + auto selected = ConnectorPtr->select( + columns(&TableFileSchema::row_count_), + where(in(&TableFileSchema::file_type_, file_types) and c(&TableFileSchema::table_id_) == table_id)); TableSchema table_schema; table_schema.table_id_ = table_id; @@ -1322,10 +1232,10 @@ SqliteMetaImpl::Count(const std::string &table_id, uint64_t &result) { } result = 0; - for (auto &file : selected) { + for (auto& file : selected) { result += std::get<0>(file); } - } catch (std::exception &e) { + } catch (std::exception& e) { return HandleException("Encounter exception when calculate table file size", e.what()); } return Status::OK(); @@ -1338,14 +1248,13 @@ SqliteMetaImpl::DropAll() { try { ConnectorPtr->drop_table(META_TABLES); ConnectorPtr->drop_table(META_TABLEFILES); - } catch (std::exception &e) { + } catch (std::exception& e) { return HandleException("Encounter exception when drop all meta", e.what()); } return Status::OK(); } -} // namespace meta -} // namespace engine -} // namespace milvus - +} // namespace meta +} // namespace engine +} // namespace milvus diff --git a/core/src/external/easyloggingpp/easylogging++.cc b/core/src/external/easyloggingpp/easylogging++.cc index 0c3bb0d375..4c6df12686 100644 --- a/core/src/external/easyloggingpp/easylogging++.cc +++ b/core/src/external/easyloggingpp/easylogging++.cc @@ -29,609 +29,624 @@ namespace consts { // Level log values - These are values that are replaced in place of %level format specifier // Extra spaces after format specifiers are only for readability purposes in log files -static const base::type::char_t* kInfoLevelLogValue = ELPP_LITERAL("INFO"); -static const base::type::char_t* kDebugLevelLogValue = ELPP_LITERAL("DEBUG"); -static const base::type::char_t* kWarningLevelLogValue = ELPP_LITERAL("WARNING"); -static const base::type::char_t* kErrorLevelLogValue = ELPP_LITERAL("ERROR"); -static const base::type::char_t* kFatalLevelLogValue = ELPP_LITERAL("FATAL"); -static const base::type::char_t* kVerboseLevelLogValue = - ELPP_LITERAL("VERBOSE"); // will become VERBOSE-x where x = verbose level -static const base::type::char_t* kTraceLevelLogValue = ELPP_LITERAL("TRACE"); -static const base::type::char_t* kInfoLevelShortLogValue = ELPP_LITERAL("I"); -static const base::type::char_t* kDebugLevelShortLogValue = ELPP_LITERAL("D"); -static const base::type::char_t* kWarningLevelShortLogValue = ELPP_LITERAL("W"); -static const base::type::char_t* kErrorLevelShortLogValue = ELPP_LITERAL("E"); -static const base::type::char_t* kFatalLevelShortLogValue = ELPP_LITERAL("F"); -static const base::type::char_t* kVerboseLevelShortLogValue = ELPP_LITERAL("V"); -static const base::type::char_t* kTraceLevelShortLogValue = ELPP_LITERAL("T"); +static const base::type::char_t* kInfoLevelLogValue = ELPP_LITERAL("INFO"); +static const base::type::char_t* kDebugLevelLogValue = ELPP_LITERAL("DEBUG"); +static const base::type::char_t* kWarningLevelLogValue = ELPP_LITERAL("WARNING"); +static const base::type::char_t* kErrorLevelLogValue = ELPP_LITERAL("ERROR"); +static const base::type::char_t* kFatalLevelLogValue = ELPP_LITERAL("FATAL"); +static const base::type::char_t* kVerboseLevelLogValue = + ELPP_LITERAL("VERBOSE"); // will become VERBOSE-x where x = verbose level +static const base::type::char_t* kTraceLevelLogValue = ELPP_LITERAL("TRACE"); +static const base::type::char_t* kInfoLevelShortLogValue = ELPP_LITERAL("I"); +static const base::type::char_t* kDebugLevelShortLogValue = ELPP_LITERAL("D"); +static const base::type::char_t* kWarningLevelShortLogValue = ELPP_LITERAL("W"); +static const base::type::char_t* kErrorLevelShortLogValue = ELPP_LITERAL("E"); +static const base::type::char_t* kFatalLevelShortLogValue = ELPP_LITERAL("F"); +static const base::type::char_t* kVerboseLevelShortLogValue = ELPP_LITERAL("V"); +static const base::type::char_t* kTraceLevelShortLogValue = ELPP_LITERAL("T"); // Format specifiers - These are used to define log format -static const base::type::char_t* kAppNameFormatSpecifier = ELPP_LITERAL("%app"); -static const base::type::char_t* kLoggerIdFormatSpecifier = ELPP_LITERAL("%logger"); -static const base::type::char_t* kThreadIdFormatSpecifier = ELPP_LITERAL("%thread"); -static const base::type::char_t* kSeverityLevelFormatSpecifier = ELPP_LITERAL("%level"); -static const base::type::char_t* kSeverityLevelShortFormatSpecifier = ELPP_LITERAL("%levshort"); -static const base::type::char_t* kDateTimeFormatSpecifier = ELPP_LITERAL("%datetime"); -static const base::type::char_t* kLogFileFormatSpecifier = ELPP_LITERAL("%file"); -static const base::type::char_t* kLogFileBaseFormatSpecifier = ELPP_LITERAL("%fbase"); -static const base::type::char_t* kLogLineFormatSpecifier = ELPP_LITERAL("%line"); -static const base::type::char_t* kLogLocationFormatSpecifier = ELPP_LITERAL("%loc"); -static const base::type::char_t* kLogFunctionFormatSpecifier = ELPP_LITERAL("%func"); -static const base::type::char_t* kCurrentUserFormatSpecifier = ELPP_LITERAL("%user"); -static const base::type::char_t* kCurrentHostFormatSpecifier = ELPP_LITERAL("%host"); -static const base::type::char_t* kMessageFormatSpecifier = ELPP_LITERAL("%msg"); -static const base::type::char_t* kVerboseLevelFormatSpecifier = ELPP_LITERAL("%vlevel"); -static const char* kDateTimeFormatSpecifierForFilename = "%datetime"; +static const base::type::char_t* kAppNameFormatSpecifier = ELPP_LITERAL("%app"); +static const base::type::char_t* kLoggerIdFormatSpecifier = ELPP_LITERAL("%logger"); +static const base::type::char_t* kThreadIdFormatSpecifier = ELPP_LITERAL("%thread"); +static const base::type::char_t* kSeverityLevelFormatSpecifier = ELPP_LITERAL("%level"); +static const base::type::char_t* kSeverityLevelShortFormatSpecifier = ELPP_LITERAL("%levshort"); +static const base::type::char_t* kDateTimeFormatSpecifier = ELPP_LITERAL("%datetime"); +static const base::type::char_t* kLogFileFormatSpecifier = ELPP_LITERAL("%file"); +static const base::type::char_t* kLogFileBaseFormatSpecifier = ELPP_LITERAL("%fbase"); +static const base::type::char_t* kLogLineFormatSpecifier = ELPP_LITERAL("%line"); +static const base::type::char_t* kLogLocationFormatSpecifier = ELPP_LITERAL("%loc"); +static const base::type::char_t* kLogFunctionFormatSpecifier = ELPP_LITERAL("%func"); +static const base::type::char_t* kCurrentUserFormatSpecifier = ELPP_LITERAL("%user"); +static const base::type::char_t* kCurrentHostFormatSpecifier = ELPP_LITERAL("%host"); +static const base::type::char_t* kMessageFormatSpecifier = ELPP_LITERAL("%msg"); +static const base::type::char_t* kVerboseLevelFormatSpecifier = ELPP_LITERAL("%vlevel"); +static const char* kDateTimeFormatSpecifierForFilename = "%datetime"; // Date/time -static const char* kDays[7] = { "Sundayaaa", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday" }; -static const char* kDaysAbbrev[7] = { "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" }; -static const char* kMonths[12] = { "January", "February", "March", "Apri", "May", "June", "July", "August", - "September", "October", "November", "December" - }; -static const char* kMonthsAbbrev[12] = { "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" }; -static const char* kDefaultDateTimeFormat = "%Y-%M-%d %H:%m:%s,%g"; -static const char* kDefaultDateTimeFormatInFilename = "%Y-%M-%d_%H-%m"; -static const int kYearBase = 1900; -static const char* kAm = "AM"; -static const char* kPm = "PM"; +static const char* kDays[7] = {"Sundayaaa", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}; +static const char* kDaysAbbrev[7] = {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}; +static const char* kMonths[12] = {"January", "February", "March", "Apri", "May", "June", + "July", "August", "September", "October", "November", "December"}; +static const char* kMonthsAbbrev[12] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun", + "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}; +static const char* kDefaultDateTimeFormat = "%Y-%M-%d %H:%m:%s,%g"; +static const char* kDefaultDateTimeFormatInFilename = "%Y-%M-%d_%H-%m"; +static const int kYearBase = 1900; +static const char* kAm = "AM"; +static const char* kPm = "PM"; // Miscellaneous constants -static const char* kNullPointer = "nullptr"; +static const char* kNullPointer = "nullptr"; #if ELPP_VARIADIC_TEMPLATES_SUPPORTED #endif // ELPP_VARIADIC_TEMPLATES_SUPPORTED -static const base::type::VerboseLevel kMaxVerboseLevel = 9; -static const char* kUnknownUser = "user"; -static const char* kUnknownHost = "unknown-host"; - +static const base::type::VerboseLevel kMaxVerboseLevel = 9; +static const char* kUnknownUser = "user"; +static const char* kUnknownHost = "unknown-host"; //---------------- DEFAULT LOG FILE ----------------------- #if defined(ELPP_NO_DEFAULT_LOG_FILE) -# if ELPP_OS_UNIX -static const char* kDefaultLogFile = "/dev/null"; -# elif ELPP_OS_WINDOWS -static const char* kDefaultLogFile = "nul"; -# endif // ELPP_OS_UNIX +#if ELPP_OS_UNIX +static const char* kDefaultLogFile = "/dev/null"; +#elif ELPP_OS_WINDOWS +static const char* kDefaultLogFile = "nul"; +#endif // ELPP_OS_UNIX #elif defined(ELPP_DEFAULT_LOG_FILE) -static const char* kDefaultLogFile = ELPP_DEFAULT_LOG_FILE; +static const char* kDefaultLogFile = ELPP_DEFAULT_LOG_FILE; #else -static const char* kDefaultLogFile = "myeasylog.log"; -#endif // defined(ELPP_NO_DEFAULT_LOG_FILE) - +static const char* kDefaultLogFile = "myeasylog.log"; +#endif // defined(ELPP_NO_DEFAULT_LOG_FILE) #if !defined(ELPP_DISABLE_LOG_FILE_FROM_ARG) -static const char* kDefaultLogFileParam = "--default-log-file"; +static const char* kDefaultLogFileParam = "--default-log-file"; #endif // !defined(ELPP_DISABLE_LOG_FILE_FROM_ARG) #if defined(ELPP_LOGGING_FLAGS_FROM_ARG) -static const char* kLoggingFlagsParam = "--logging-flags"; +static const char* kLoggingFlagsParam = "--logging-flags"; #endif // defined(ELPP_LOGGING_FLAGS_FROM_ARG) -static const char* kValidLoggerIdSymbols = - "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-._"; -static const char* kConfigurationComment = "##"; -static const char* kConfigurationLevel = "*"; -static const char* kConfigurationLoggerId = "--"; -} +static const char* kValidLoggerIdSymbols = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-._"; +static const char* kConfigurationComment = "##"; +static const char* kConfigurationLevel = "*"; +static const char* kConfigurationLoggerId = "--"; +} // namespace consts // el::base::utils namespace utils { /// @brief Aborts application due with user-defined status -static void abort(int status, const std::string& reason) { - // Both status and reason params are there for debugging with tools like gdb etc - ELPP_UNUSED(status); - ELPP_UNUSED(reason); +static void +abort(int status, const std::string& reason) { + // Both status and reason params are there for debugging with tools like gdb etc + ELPP_UNUSED(status); + ELPP_UNUSED(reason); #if defined(ELPP_COMPILER_MSVC) && defined(_M_IX86) && defined(_DEBUG) - // Ignore msvc critical error dialog - break instead (on debug mode) - _asm int 3 + // Ignore msvc critical error dialog - break instead (on debug mode) + _asm int 3 #else - ::abort(); + ::abort(); #endif // defined(ELPP_COMPILER_MSVC) && defined(_M_IX86) && defined(_DEBUG) } -} // namespace utils -} // namespace base +} // namespace utils +} // namespace base // el // LevelHelper -const char* LevelHelper::convertToString(Level level) { - // Do not use switch over strongly typed enums because Intel C++ compilers dont support them yet. - if (level == Level::Global) return "GLOBAL"; - if (level == Level::Debug) return "DEBUG"; - if (level == Level::Info) return "INFO"; - if (level == Level::Warning) return "WARNING"; - if (level == Level::Error) return "ERROR"; - if (level == Level::Fatal) return "FATAL"; - if (level == Level::Verbose) return "VERBOSE"; - if (level == Level::Trace) return "TRACE"; - return "UNKNOWN"; +const char* +LevelHelper::convertToString(Level level) { + // Do not use switch over strongly typed enums because Intel C++ compilers dont support them yet. + if (level == Level::Global) + return "GLOBAL"; + if (level == Level::Debug) + return "DEBUG"; + if (level == Level::Info) + return "INFO"; + if (level == Level::Warning) + return "WARNING"; + if (level == Level::Error) + return "ERROR"; + if (level == Level::Fatal) + return "FATAL"; + if (level == Level::Verbose) + return "VERBOSE"; + if (level == Level::Trace) + return "TRACE"; + return "UNKNOWN"; } struct StringToLevelItem { - const char* levelString; - Level level; + const char* levelString; + Level level; }; static struct StringToLevelItem stringToLevelMap[] = { - { "global", Level::Global }, - { "debug", Level::Debug }, - { "info", Level::Info }, - { "warning", Level::Warning }, - { "error", Level::Error }, - { "fatal", Level::Fatal }, - { "verbose", Level::Verbose }, - { "trace", Level::Trace } -}; + {"global", Level::Global}, {"debug", Level::Debug}, {"info", Level::Info}, {"warning", Level::Warning}, + {"error", Level::Error}, {"fatal", Level::Fatal}, {"verbose", Level::Verbose}, {"trace", Level::Trace}}; -Level LevelHelper::convertFromString(const char* levelStr) { - for (auto& item : stringToLevelMap) { - if (base::utils::Str::cStringCaseEq(levelStr, item.levelString)) { - return item.level; +Level +LevelHelper::convertFromString(const char* levelStr) { + for (auto& item : stringToLevelMap) { + if (base::utils::Str::cStringCaseEq(levelStr, item.levelString)) { + return item.level; + } } - } - return Level::Unknown; + return Level::Unknown; } -void LevelHelper::forEachLevel(base::type::EnumType* startIndex, const std::function& fn) { - base::type::EnumType lIndexMax = LevelHelper::kMaxValid; - do { - if (fn()) { - break; - } - *startIndex = static_cast(*startIndex << 1); - } while (*startIndex <= lIndexMax); +void +LevelHelper::forEachLevel(base::type::EnumType* startIndex, const std::function& fn) { + base::type::EnumType lIndexMax = LevelHelper::kMaxValid; + do { + if (fn()) { + break; + } + *startIndex = static_cast(*startIndex << 1); + } while (*startIndex <= lIndexMax); } // ConfigurationTypeHelper -const char* ConfigurationTypeHelper::convertToString(ConfigurationType configurationType) { - // Do not use switch over strongly typed enums because Intel C++ compilers dont support them yet. - if (configurationType == ConfigurationType::Enabled) return "ENABLED"; - if (configurationType == ConfigurationType::Filename) return "FILENAME"; - if (configurationType == ConfigurationType::Format) return "FORMAT"; - if (configurationType == ConfigurationType::ToFile) return "TO_FILE"; - if (configurationType == ConfigurationType::ToStandardOutput) return "TO_STANDARD_OUTPUT"; - if (configurationType == ConfigurationType::SubsecondPrecision) return "SUBSECOND_PRECISION"; - if (configurationType == ConfigurationType::PerformanceTracking) return "PERFORMANCE_TRACKING"; - if (configurationType == ConfigurationType::MaxLogFileSize) return "MAX_LOG_FILE_SIZE"; - if (configurationType == ConfigurationType::LogFlushThreshold) return "LOG_FLUSH_THRESHOLD"; - return "UNKNOWN"; +const char* +ConfigurationTypeHelper::convertToString(ConfigurationType configurationType) { + // Do not use switch over strongly typed enums because Intel C++ compilers dont support them yet. + if (configurationType == ConfigurationType::Enabled) + return "ENABLED"; + if (configurationType == ConfigurationType::Filename) + return "FILENAME"; + if (configurationType == ConfigurationType::Format) + return "FORMAT"; + if (configurationType == ConfigurationType::ToFile) + return "TO_FILE"; + if (configurationType == ConfigurationType::ToStandardOutput) + return "TO_STANDARD_OUTPUT"; + if (configurationType == ConfigurationType::SubsecondPrecision) + return "SUBSECOND_PRECISION"; + if (configurationType == ConfigurationType::PerformanceTracking) + return "PERFORMANCE_TRACKING"; + if (configurationType == ConfigurationType::MaxLogFileSize) + return "MAX_LOG_FILE_SIZE"; + if (configurationType == ConfigurationType::LogFlushThreshold) + return "LOG_FLUSH_THRESHOLD"; + return "UNKNOWN"; } struct ConfigurationStringToTypeItem { - const char* configString; - ConfigurationType configType; + const char* configString; + ConfigurationType configType; }; static struct ConfigurationStringToTypeItem configStringToTypeMap[] = { - { "enabled", ConfigurationType::Enabled }, - { "to_file", ConfigurationType::ToFile }, - { "to_standard_output", ConfigurationType::ToStandardOutput }, - { "format", ConfigurationType::Format }, - { "filename", ConfigurationType::Filename }, - { "subsecond_precision", ConfigurationType::SubsecondPrecision }, - { "milliseconds_width", ConfigurationType::MillisecondsWidth }, - { "performance_tracking", ConfigurationType::PerformanceTracking }, - { "max_log_file_size", ConfigurationType::MaxLogFileSize }, - { "log_flush_threshold", ConfigurationType::LogFlushThreshold }, + {"enabled", ConfigurationType::Enabled}, + {"to_file", ConfigurationType::ToFile}, + {"to_standard_output", ConfigurationType::ToStandardOutput}, + {"format", ConfigurationType::Format}, + {"filename", ConfigurationType::Filename}, + {"subsecond_precision", ConfigurationType::SubsecondPrecision}, + {"milliseconds_width", ConfigurationType::MillisecondsWidth}, + {"performance_tracking", ConfigurationType::PerformanceTracking}, + {"max_log_file_size", ConfigurationType::MaxLogFileSize}, + {"log_flush_threshold", ConfigurationType::LogFlushThreshold}, }; -ConfigurationType ConfigurationTypeHelper::convertFromString(const char* configStr) { - for (auto& item : configStringToTypeMap) { - if (base::utils::Str::cStringCaseEq(configStr, item.configString)) { - return item.configType; +ConfigurationType +ConfigurationTypeHelper::convertFromString(const char* configStr) { + for (auto& item : configStringToTypeMap) { + if (base::utils::Str::cStringCaseEq(configStr, item.configString)) { + return item.configType; + } } - } - return ConfigurationType::Unknown; + return ConfigurationType::Unknown; } -void ConfigurationTypeHelper::forEachConfigType(base::type::EnumType* startIndex, const std::function& fn) { - base::type::EnumType cIndexMax = ConfigurationTypeHelper::kMaxValid; - do { - if (fn()) { - break; - } - *startIndex = static_cast(*startIndex << 1); - } while (*startIndex <= cIndexMax); +void +ConfigurationTypeHelper::forEachConfigType(base::type::EnumType* startIndex, const std::function& fn) { + base::type::EnumType cIndexMax = ConfigurationTypeHelper::kMaxValid; + do { + if (fn()) { + break; + } + *startIndex = static_cast(*startIndex << 1); + } while (*startIndex <= cIndexMax); } // Configuration -Configuration::Configuration(const Configuration& c) : - m_level(c.m_level), - m_configurationType(c.m_configurationType), - m_value(c.m_value) { +Configuration::Configuration(const Configuration& c) + : m_level(c.m_level), m_configurationType(c.m_configurationType), m_value(c.m_value) { } -Configuration& Configuration::operator=(const Configuration& c) { - if (&c != this) { - m_level = c.m_level; - m_configurationType = c.m_configurationType; - m_value = c.m_value; - } - return *this; +Configuration& +Configuration::operator=(const Configuration& c) { + if (&c != this) { + m_level = c.m_level; + m_configurationType = c.m_configurationType; + m_value = c.m_value; + } + return *this; } /// @brief Full constructor used to sets value of configuration -Configuration::Configuration(Level level, ConfigurationType configurationType, const std::string& value) : - m_level(level), - m_configurationType(configurationType), - m_value(value) { +Configuration::Configuration(Level level, ConfigurationType configurationType, const std::string& value) + : m_level(level), m_configurationType(configurationType), m_value(value) { } -void Configuration::log(el::base::type::ostream_t& os) const { - os << LevelHelper::convertToString(m_level) - << ELPP_LITERAL(" ") << ConfigurationTypeHelper::convertToString(m_configurationType) - << ELPP_LITERAL(" = ") << m_value.c_str(); +void +Configuration::log(el::base::type::ostream_t& os) const { + os << LevelHelper::convertToString(m_level) << ELPP_LITERAL(" ") + << ConfigurationTypeHelper::convertToString(m_configurationType) << ELPP_LITERAL(" = ") << m_value.c_str(); } /// @brief Used to find configuration from configuration (pointers) repository. Avoid using it. -Configuration::Predicate::Predicate(Level level, ConfigurationType configurationType) : - m_level(level), - m_configurationType(configurationType) { +Configuration::Predicate::Predicate(Level level, ConfigurationType configurationType) + : m_level(level), m_configurationType(configurationType) { } -bool Configuration::Predicate::operator()(const Configuration* conf) const { - return ((conf != nullptr) && (conf->level() == m_level) && (conf->configurationType() == m_configurationType)); +bool +Configuration::Predicate::operator()(const Configuration* conf) const { + return ((conf != nullptr) && (conf->level() == m_level) && (conf->configurationType() == m_configurationType)); } // Configurations -Configurations::Configurations(void) : - m_configurationFile(std::string()), - m_isFromFile(false) { +Configurations::Configurations(void) : m_configurationFile(std::string()), m_isFromFile(false) { } -Configurations::Configurations(const std::string& configurationFile, bool useDefaultsForRemaining, - Configurations* base) : - m_configurationFile(configurationFile), - m_isFromFile(false) { - parseFromFile(configurationFile, base); - if (useDefaultsForRemaining) { - setRemainingToDefault(); - } -} - -bool Configurations::parseFromFile(const std::string& configurationFile, Configurations* base) { - // We initial assertion with true because if we have assertion diabled, we want to pass this - // check and if assertion is enabled we will have values re-assigned any way. - bool assertionPassed = true; - ELPP_ASSERT((assertionPassed = base::utils::File::pathExists(configurationFile.c_str(), true)) == true, - "Configuration file [" << configurationFile << "] does not exist!"); - if (!assertionPassed) { - return false; - } - bool success = Parser::parseFromFile(configurationFile, this, base); - m_isFromFile = success; - return success; -} - -bool Configurations::parseFromText(const std::string& configurationsString, Configurations* base) { - bool success = Parser::parseFromText(configurationsString, this, base); - if (success) { - m_isFromFile = false; - } - return success; -} - -void Configurations::setFromBase(Configurations* base) { - if (base == nullptr || base == this) { - return; - } - base::threading::ScopedLock scopedLock(base->lock()); - for (Configuration*& conf : base->list()) { - set(conf); - } -} - -bool Configurations::hasConfiguration(ConfigurationType configurationType) { - base::type::EnumType lIndex = LevelHelper::kMinValid; - bool result = false; - LevelHelper::forEachLevel(&lIndex, [&](void) -> bool { - if (hasConfiguration(LevelHelper::castFromInt(lIndex), configurationType)) { - result = true; +Configurations::Configurations(const std::string& configurationFile, bool useDefaultsForRemaining, Configurations* base) + : m_configurationFile(configurationFile), m_isFromFile(false) { + parseFromFile(configurationFile, base); + if (useDefaultsForRemaining) { + setRemainingToDefault(); } - return result; - }); - return result; } -bool Configurations::hasConfiguration(Level level, ConfigurationType configurationType) { - base::threading::ScopedLock scopedLock(lock()); +bool +Configurations::parseFromFile(const std::string& configurationFile, Configurations* base) { + // We initial assertion with true because if we have assertion diabled, we want to pass this + // check and if assertion is enabled we will have values re-assigned any way. + bool assertionPassed = true; + ELPP_ASSERT((assertionPassed = base::utils::File::pathExists(configurationFile.c_str(), true)) == true, + "Configuration file [" << configurationFile << "] does not exist!"); + if (!assertionPassed) { + return false; + } + bool success = Parser::parseFromFile(configurationFile, this, base); + m_isFromFile = success; + return success; +} + +bool +Configurations::parseFromText(const std::string& configurationsString, Configurations* base) { + bool success = Parser::parseFromText(configurationsString, this, base); + if (success) { + m_isFromFile = false; + } + return success; +} + +void +Configurations::setFromBase(Configurations* base) { + if (base == nullptr || base == this) { + return; + } + base::threading::ScopedLock scopedLock(base->lock()); + for (Configuration*& conf : base->list()) { + set(conf); + } +} + +bool +Configurations::hasConfiguration(ConfigurationType configurationType) { + base::type::EnumType lIndex = LevelHelper::kMinValid; + bool result = false; + LevelHelper::forEachLevel(&lIndex, [&](void) -> bool { + if (hasConfiguration(LevelHelper::castFromInt(lIndex), configurationType)) { + result = true; + } + return result; + }); + return result; +} + +bool +Configurations::hasConfiguration(Level level, ConfigurationType configurationType) { + base::threading::ScopedLock scopedLock(lock()); #if ELPP_COMPILER_INTEL - // We cant specify template types here, Intel C++ throws compilation error - // "error: type name is not allowed" - return RegistryWithPred::get(level, configurationType) != nullptr; + // We cant specify template types here, Intel C++ throws compilation error + // "error: type name is not allowed" + return RegistryWithPred::get(level, configurationType) != nullptr; #else - return RegistryWithPred::get(level, configurationType) != nullptr; + return RegistryWithPred::get(level, configurationType) != nullptr; #endif // ELPP_COMPILER_INTEL } -void Configurations::set(Level level, ConfigurationType configurationType, const std::string& value) { - base::threading::ScopedLock scopedLock(lock()); - unsafeSet(level, configurationType, value); // This is not unsafe anymore as we have locked mutex - if (level == Level::Global) { - unsafeSetGlobally(configurationType, value, false); // Again this is not unsafe either - } +void +Configurations::set(Level level, ConfigurationType configurationType, const std::string& value) { + base::threading::ScopedLock scopedLock(lock()); + unsafeSet(level, configurationType, value); // This is not unsafe anymore as we have locked mutex + if (level == Level::Global) { + unsafeSetGlobally(configurationType, value, false); // Again this is not unsafe either + } } -void Configurations::set(Configuration* conf) { - if (conf == nullptr) { - return; - } - set(conf->level(), conf->configurationType(), conf->value()); +void +Configurations::set(Configuration* conf) { + if (conf == nullptr) { + return; + } + set(conf->level(), conf->configurationType(), conf->value()); } -void Configurations::setToDefault(void) { - setGlobally(ConfigurationType::Enabled, std::string("true"), true); - setGlobally(ConfigurationType::Filename, std::string(base::consts::kDefaultLogFile), true); +void +Configurations::setToDefault(void) { + setGlobally(ConfigurationType::Enabled, std::string("true"), true); + setGlobally(ConfigurationType::Filename, std::string(base::consts::kDefaultLogFile), true); #if defined(ELPP_NO_LOG_TO_FILE) - setGlobally(ConfigurationType::ToFile, std::string("false"), true); + setGlobally(ConfigurationType::ToFile, std::string("false"), true); #else - setGlobally(ConfigurationType::ToFile, std::string("true"), true); -#endif // defined(ELPP_NO_LOG_TO_FILE) - setGlobally(ConfigurationType::ToStandardOutput, std::string("true"), true); - setGlobally(ConfigurationType::SubsecondPrecision, std::string("3"), true); - setGlobally(ConfigurationType::PerformanceTracking, std::string("true"), true); - setGlobally(ConfigurationType::MaxLogFileSize, std::string("0"), true); - setGlobally(ConfigurationType::LogFlushThreshold, std::string("0"), true); + setGlobally(ConfigurationType::ToFile, std::string("true"), true); +#endif // defined(ELPP_NO_LOG_TO_FILE) + setGlobally(ConfigurationType::ToStandardOutput, std::string("true"), true); + setGlobally(ConfigurationType::SubsecondPrecision, std::string("3"), true); + setGlobally(ConfigurationType::PerformanceTracking, std::string("true"), true); + setGlobally(ConfigurationType::MaxLogFileSize, std::string("0"), true); + setGlobally(ConfigurationType::LogFlushThreshold, std::string("0"), true); - setGlobally(ConfigurationType::Format, std::string("%datetime %level [%logger] %msg"), true); - set(Level::Debug, ConfigurationType::Format, - std::string("%datetime %level [%logger] [%user@%host] [%func] [%loc] %msg")); - // INFO and WARNING are set to default by Level::Global - set(Level::Error, ConfigurationType::Format, std::string("%datetime %level [%logger] %msg")); - set(Level::Fatal, ConfigurationType::Format, std::string("%datetime %level [%logger] %msg")); - set(Level::Verbose, ConfigurationType::Format, std::string("%datetime %level-%vlevel [%logger] %msg")); - set(Level::Trace, ConfigurationType::Format, std::string("%datetime %level [%logger] [%func] [%loc] %msg")); + setGlobally(ConfigurationType::Format, std::string("%datetime %level [%logger] %msg"), true); + set(Level::Debug, ConfigurationType::Format, + std::string("%datetime %level [%logger] [%user@%host] [%func] [%loc] %msg")); + // INFO and WARNING are set to default by Level::Global + set(Level::Error, ConfigurationType::Format, std::string("%datetime %level [%logger] %msg")); + set(Level::Fatal, ConfigurationType::Format, std::string("%datetime %level [%logger] %msg")); + set(Level::Verbose, ConfigurationType::Format, std::string("%datetime %level-%vlevel [%logger] %msg")); + set(Level::Trace, ConfigurationType::Format, std::string("%datetime %level [%logger] [%func] [%loc] %msg")); } -void Configurations::setRemainingToDefault(void) { - base::threading::ScopedLock scopedLock(lock()); +void +Configurations::setRemainingToDefault(void) { + base::threading::ScopedLock scopedLock(lock()); #if defined(ELPP_NO_LOG_TO_FILE) - unsafeSetIfNotExist(Level::Global, ConfigurationType::Enabled, std::string("false")); + unsafeSetIfNotExist(Level::Global, ConfigurationType::Enabled, std::string("false")); #else - unsafeSetIfNotExist(Level::Global, ConfigurationType::Enabled, std::string("true")); -#endif // defined(ELPP_NO_LOG_TO_FILE) - unsafeSetIfNotExist(Level::Global, ConfigurationType::Filename, std::string(base::consts::kDefaultLogFile)); - unsafeSetIfNotExist(Level::Global, ConfigurationType::ToStandardOutput, std::string("true")); - unsafeSetIfNotExist(Level::Global, ConfigurationType::SubsecondPrecision, std::string("3")); - unsafeSetIfNotExist(Level::Global, ConfigurationType::PerformanceTracking, std::string("true")); - unsafeSetIfNotExist(Level::Global, ConfigurationType::MaxLogFileSize, std::string("0")); - unsafeSetIfNotExist(Level::Global, ConfigurationType::Format, std::string("%datetime %level [%logger] %msg")); - unsafeSetIfNotExist(Level::Debug, ConfigurationType::Format, - std::string("%datetime %level [%logger] [%user@%host] [%func] [%loc] %msg")); - // INFO and WARNING are set to default by Level::Global - unsafeSetIfNotExist(Level::Error, ConfigurationType::Format, std::string("%datetime %level [%logger] %msg")); - unsafeSetIfNotExist(Level::Fatal, ConfigurationType::Format, std::string("%datetime %level [%logger] %msg")); - unsafeSetIfNotExist(Level::Verbose, ConfigurationType::Format, std::string("%datetime %level-%vlevel [%logger] %msg")); - unsafeSetIfNotExist(Level::Trace, ConfigurationType::Format, - std::string("%datetime %level [%logger] [%func] [%loc] %msg")); + unsafeSetIfNotExist(Level::Global, ConfigurationType::Enabled, std::string("true")); +#endif // defined(ELPP_NO_LOG_TO_FILE) + unsafeSetIfNotExist(Level::Global, ConfigurationType::Filename, std::string(base::consts::kDefaultLogFile)); + unsafeSetIfNotExist(Level::Global, ConfigurationType::ToStandardOutput, std::string("true")); + unsafeSetIfNotExist(Level::Global, ConfigurationType::SubsecondPrecision, std::string("3")); + unsafeSetIfNotExist(Level::Global, ConfigurationType::PerformanceTracking, std::string("true")); + unsafeSetIfNotExist(Level::Global, ConfigurationType::MaxLogFileSize, std::string("0")); + unsafeSetIfNotExist(Level::Global, ConfigurationType::Format, std::string("%datetime %level [%logger] %msg")); + unsafeSetIfNotExist(Level::Debug, ConfigurationType::Format, + std::string("%datetime %level [%logger] [%user@%host] [%func] [%loc] %msg")); + // INFO and WARNING are set to default by Level::Global + unsafeSetIfNotExist(Level::Error, ConfigurationType::Format, std::string("%datetime %level [%logger] %msg")); + unsafeSetIfNotExist(Level::Fatal, ConfigurationType::Format, std::string("%datetime %level [%logger] %msg")); + unsafeSetIfNotExist(Level::Verbose, ConfigurationType::Format, + std::string("%datetime %level-%vlevel [%logger] %msg")); + unsafeSetIfNotExist(Level::Trace, ConfigurationType::Format, + std::string("%datetime %level [%logger] [%func] [%loc] %msg")); } -bool Configurations::Parser::parseFromFile(const std::string& configurationFile, Configurations* sender, - Configurations* base) { - sender->setFromBase(base); - std::ifstream fileStream_(configurationFile.c_str(), std::ifstream::in); - ELPP_ASSERT(fileStream_.is_open(), "Unable to open configuration file [" << configurationFile << "] for parsing."); - bool parsedSuccessfully = false; - std::string line = std::string(); - Level currLevel = Level::Unknown; - std::string currConfigStr = std::string(); - std::string currLevelStr = std::string(); - while (fileStream_.good()) { - std::getline(fileStream_, line); - parsedSuccessfully = parseLine(&line, &currConfigStr, &currLevelStr, &currLevel, sender); - ELPP_ASSERT(parsedSuccessfully, "Unable to parse configuration line: " << line); - } - return parsedSuccessfully; -} - -bool Configurations::Parser::parseFromText(const std::string& configurationsString, Configurations* sender, - Configurations* base) { - sender->setFromBase(base); - bool parsedSuccessfully = false; - std::stringstream ss(configurationsString); - std::string line = std::string(); - Level currLevel = Level::Unknown; - std::string currConfigStr = std::string(); - std::string currLevelStr = std::string(); - while (std::getline(ss, line)) { - parsedSuccessfully = parseLine(&line, &currConfigStr, &currLevelStr, &currLevel, sender); - ELPP_ASSERT(parsedSuccessfully, "Unable to parse configuration line: " << line); - } - return parsedSuccessfully; -} - -void Configurations::Parser::ignoreComments(std::string* line) { - std::size_t foundAt = 0; - std::size_t quotesStart = line->find("\""); - std::size_t quotesEnd = std::string::npos; - if (quotesStart != std::string::npos) { - quotesEnd = line->find("\"", quotesStart + 1); - while (quotesEnd != std::string::npos && line->at(quotesEnd - 1) == '\\') { - // Do not erase slash yet - we will erase it in parseLine(..) while loop - quotesEnd = line->find("\"", quotesEnd + 2); +bool +Configurations::Parser::parseFromFile(const std::string& configurationFile, Configurations* sender, + Configurations* base) { + sender->setFromBase(base); + std::ifstream fileStream_(configurationFile.c_str(), std::ifstream::in); + ELPP_ASSERT(fileStream_.is_open(), "Unable to open configuration file [" << configurationFile << "] for parsing."); + bool parsedSuccessfully = false; + std::string line = std::string(); + Level currLevel = Level::Unknown; + std::string currConfigStr = std::string(); + std::string currLevelStr = std::string(); + while (fileStream_.good()) { + std::getline(fileStream_, line); + parsedSuccessfully = parseLine(&line, &currConfigStr, &currLevelStr, &currLevel, sender); + ELPP_ASSERT(parsedSuccessfully, "Unable to parse configuration line: " << line); } - } - if ((foundAt = line->find(base::consts::kConfigurationComment)) != std::string::npos) { - if (foundAt < quotesEnd) { - foundAt = line->find(base::consts::kConfigurationComment, quotesEnd + 1); + return parsedSuccessfully; +} + +bool +Configurations::Parser::parseFromText(const std::string& configurationsString, Configurations* sender, + Configurations* base) { + sender->setFromBase(base); + bool parsedSuccessfully = false; + std::stringstream ss(configurationsString); + std::string line = std::string(); + Level currLevel = Level::Unknown; + std::string currConfigStr = std::string(); + std::string currLevelStr = std::string(); + while (std::getline(ss, line)) { + parsedSuccessfully = parseLine(&line, &currConfigStr, &currLevelStr, &currLevel, sender); + ELPP_ASSERT(parsedSuccessfully, "Unable to parse configuration line: " << line); } - *line = line->substr(0, foundAt); - } + return parsedSuccessfully; } -bool Configurations::Parser::isLevel(const std::string& line) { - return base::utils::Str::startsWith(line, std::string(base::consts::kConfigurationLevel)); -} - -bool Configurations::Parser::isComment(const std::string& line) { - return base::utils::Str::startsWith(line, std::string(base::consts::kConfigurationComment)); -} - -bool Configurations::Parser::isConfig(const std::string& line) { - std::size_t assignment = line.find('='); - return line != "" && - ((line[0] >= 'A' && line[0] <= 'Z') || (line[0] >= 'a' && line[0] <= 'z')) && - (assignment != std::string::npos) && - (line.size() > assignment); -} - -bool Configurations::Parser::parseLine(std::string* line, std::string* currConfigStr, std::string* currLevelStr, - Level* currLevel, - Configurations* conf) { - ConfigurationType currConfig = ConfigurationType::Unknown; - std::string currValue = std::string(); - *line = base::utils::Str::trim(*line); - if (isComment(*line)) return true; - ignoreComments(line); - *line = base::utils::Str::trim(*line); - if (line->empty()) { - // Comment ignored - return true; - } - if (isLevel(*line)) { - if (line->size() <= 2) { - return true; - } - *currLevelStr = line->substr(1, line->size() - 2); - *currLevelStr = base::utils::Str::toUpper(*currLevelStr); - *currLevelStr = base::utils::Str::trim(*currLevelStr); - *currLevel = LevelHelper::convertFromString(currLevelStr->c_str()); - return true; - } - if (isConfig(*line)) { - std::size_t assignment = line->find('='); - *currConfigStr = line->substr(0, assignment); - *currConfigStr = base::utils::Str::toUpper(*currConfigStr); - *currConfigStr = base::utils::Str::trim(*currConfigStr); - currConfig = ConfigurationTypeHelper::convertFromString(currConfigStr->c_str()); - currValue = line->substr(assignment + 1); - currValue = base::utils::Str::trim(currValue); - std::size_t quotesStart = currValue.find("\"", 0); +void +Configurations::Parser::ignoreComments(std::string* line) { + std::size_t foundAt = 0; + std::size_t quotesStart = line->find("\""); std::size_t quotesEnd = std::string::npos; if (quotesStart != std::string::npos) { - quotesEnd = currValue.find("\"", quotesStart + 1); - while (quotesEnd != std::string::npos && currValue.at(quotesEnd - 1) == '\\') { - currValue = currValue.erase(quotesEnd - 1, 1); - quotesEnd = currValue.find("\"", quotesEnd + 2); - } + quotesEnd = line->find("\"", quotesStart + 1); + while (quotesEnd != std::string::npos && line->at(quotesEnd - 1) == '\\') { + // Do not erase slash yet - we will erase it in parseLine(..) while loop + quotesEnd = line->find("\"", quotesEnd + 2); + } } - if (quotesStart != std::string::npos && quotesEnd != std::string::npos) { - // Quote provided - check and strip if valid - ELPP_ASSERT((quotesStart < quotesEnd), "Configuration error - No ending quote found in [" - << currConfigStr << "]"); - ELPP_ASSERT((quotesStart + 1 != quotesEnd), "Empty configuration value for [" << currConfigStr << "]"); - if ((quotesStart != quotesEnd) && (quotesStart + 1 != quotesEnd)) { - // Explicit check in case if assertion is disabled - currValue = currValue.substr(quotesStart + 1, quotesEnd - 1); - } + if ((foundAt = line->find(base::consts::kConfigurationComment)) != std::string::npos) { + if (foundAt < quotesEnd) { + foundAt = line->find(base::consts::kConfigurationComment, quotesEnd + 1); + } + *line = line->substr(0, foundAt); } - } - ELPP_ASSERT(*currLevel != Level::Unknown, "Unrecognized severity level [" << *currLevelStr << "]"); - ELPP_ASSERT(currConfig != ConfigurationType::Unknown, "Unrecognized configuration [" << *currConfigStr << "]"); - if (*currLevel == Level::Unknown || currConfig == ConfigurationType::Unknown) { - return false; // unrecognizable level or config - } - conf->set(*currLevel, currConfig, currValue); - return true; } -void Configurations::unsafeSetIfNotExist(Level level, ConfigurationType configurationType, const std::string& value) { - Configuration* conf = RegistryWithPred::get(level, configurationType); - if (conf == nullptr) { - unsafeSet(level, configurationType, value); - } +bool +Configurations::Parser::isLevel(const std::string& line) { + return base::utils::Str::startsWith(line, std::string(base::consts::kConfigurationLevel)); } -void Configurations::unsafeSet(Level level, ConfigurationType configurationType, const std::string& value) { - Configuration* conf = RegistryWithPred::get(level, configurationType); - if (conf == nullptr) { - registerNew(new Configuration(level, configurationType, value)); - } else { - conf->setValue(value); - } - if (level == Level::Global) { - unsafeSetGlobally(configurationType, value, false); - } +bool +Configurations::Parser::isComment(const std::string& line) { + return base::utils::Str::startsWith(line, std::string(base::consts::kConfigurationComment)); } -void Configurations::setGlobally(ConfigurationType configurationType, const std::string& value, - bool includeGlobalLevel) { - if (includeGlobalLevel) { - set(Level::Global, configurationType, value); - } - base::type::EnumType lIndex = LevelHelper::kMinValid; - LevelHelper::forEachLevel(&lIndex, [&](void) -> bool { - set(LevelHelper::castFromInt(lIndex), configurationType, value); - return false; // Do not break lambda function yet as we need to set all levels regardless - }); +bool +Configurations::Parser::isConfig(const std::string& line) { + std::size_t assignment = line.find('='); + return line != "" && ((line[0] >= 'A' && line[0] <= 'Z') || (line[0] >= 'a' && line[0] <= 'z')) && + (assignment != std::string::npos) && (line.size() > assignment); } -void Configurations::unsafeSetGlobally(ConfigurationType configurationType, const std::string& value, - bool includeGlobalLevel) { - if (includeGlobalLevel) { - unsafeSet(Level::Global, configurationType, value); - } - base::type::EnumType lIndex = LevelHelper::kMinValid; - LevelHelper::forEachLevel(&lIndex, [&](void) -> bool { - unsafeSet(LevelHelper::castFromInt(lIndex), configurationType, value); - return false; // Do not break lambda function yet as we need to set all levels regardless - }); +bool +Configurations::Parser::parseLine(std::string* line, std::string* currConfigStr, std::string* currLevelStr, + Level* currLevel, Configurations* conf) { + ConfigurationType currConfig = ConfigurationType::Unknown; + std::string currValue = std::string(); + *line = base::utils::Str::trim(*line); + if (isComment(*line)) + return true; + ignoreComments(line); + *line = base::utils::Str::trim(*line); + if (line->empty()) { + // Comment ignored + return true; + } + if (isLevel(*line)) { + if (line->size() <= 2) { + return true; + } + *currLevelStr = line->substr(1, line->size() - 2); + *currLevelStr = base::utils::Str::toUpper(*currLevelStr); + *currLevelStr = base::utils::Str::trim(*currLevelStr); + *currLevel = LevelHelper::convertFromString(currLevelStr->c_str()); + return true; + } + if (isConfig(*line)) { + std::size_t assignment = line->find('='); + *currConfigStr = line->substr(0, assignment); + *currConfigStr = base::utils::Str::toUpper(*currConfigStr); + *currConfigStr = base::utils::Str::trim(*currConfigStr); + currConfig = ConfigurationTypeHelper::convertFromString(currConfigStr->c_str()); + currValue = line->substr(assignment + 1); + currValue = base::utils::Str::trim(currValue); + std::size_t quotesStart = currValue.find("\"", 0); + std::size_t quotesEnd = std::string::npos; + if (quotesStart != std::string::npos) { + quotesEnd = currValue.find("\"", quotesStart + 1); + while (quotesEnd != std::string::npos && currValue.at(quotesEnd - 1) == '\\') { + currValue = currValue.erase(quotesEnd - 1, 1); + quotesEnd = currValue.find("\"", quotesEnd + 2); + } + } + if (quotesStart != std::string::npos && quotesEnd != std::string::npos) { + // Quote provided - check and strip if valid + ELPP_ASSERT((quotesStart < quotesEnd), + "Configuration error - No ending quote found in [" << currConfigStr << "]"); + ELPP_ASSERT((quotesStart + 1 != quotesEnd), "Empty configuration value for [" << currConfigStr << "]"); + if ((quotesStart != quotesEnd) && (quotesStart + 1 != quotesEnd)) { + // Explicit check in case if assertion is disabled + currValue = currValue.substr(quotesStart + 1, quotesEnd - 1); + } + } + } + ELPP_ASSERT(*currLevel != Level::Unknown, "Unrecognized severity level [" << *currLevelStr << "]"); + ELPP_ASSERT(currConfig != ConfigurationType::Unknown, "Unrecognized configuration [" << *currConfigStr << "]"); + if (*currLevel == Level::Unknown || currConfig == ConfigurationType::Unknown) { + return false; // unrecognizable level or config + } + conf->set(*currLevel, currConfig, currValue); + return true; +} + +void +Configurations::unsafeSetIfNotExist(Level level, ConfigurationType configurationType, const std::string& value) { + Configuration* conf = RegistryWithPred::get(level, configurationType); + if (conf == nullptr) { + unsafeSet(level, configurationType, value); + } +} + +void +Configurations::unsafeSet(Level level, ConfigurationType configurationType, const std::string& value) { + Configuration* conf = RegistryWithPred::get(level, configurationType); + if (conf == nullptr) { + registerNew(new Configuration(level, configurationType, value)); + } else { + conf->setValue(value); + } + if (level == Level::Global) { + unsafeSetGlobally(configurationType, value, false); + } +} + +void +Configurations::setGlobally(ConfigurationType configurationType, const std::string& value, bool includeGlobalLevel) { + if (includeGlobalLevel) { + set(Level::Global, configurationType, value); + } + base::type::EnumType lIndex = LevelHelper::kMinValid; + LevelHelper::forEachLevel(&lIndex, [&](void) -> bool { + set(LevelHelper::castFromInt(lIndex), configurationType, value); + return false; // Do not break lambda function yet as we need to set all levels regardless + }); +} + +void +Configurations::unsafeSetGlobally(ConfigurationType configurationType, const std::string& value, + bool includeGlobalLevel) { + if (includeGlobalLevel) { + unsafeSet(Level::Global, configurationType, value); + } + base::type::EnumType lIndex = LevelHelper::kMinValid; + LevelHelper::forEachLevel(&lIndex, [&](void) -> bool { + unsafeSet(LevelHelper::castFromInt(lIndex), configurationType, value); + return false; // Do not break lambda function yet as we need to set all levels regardless + }); } // LogBuilder -void LogBuilder::convertToColoredOutput(base::type::string_t* logLine, Level level) { - if (!m_termSupportsColor) return; - const base::type::char_t* resetColor = ELPP_LITERAL("\x1b[0m"); - if (level == Level::Error || level == Level::Fatal) - *logLine = ELPP_LITERAL("\x1b[31m") + *logLine + resetColor; - else if (level == Level::Warning) - *logLine = ELPP_LITERAL("\x1b[33m") + *logLine + resetColor; - else if (level == Level::Debug) - *logLine = ELPP_LITERAL("\x1b[32m") + *logLine + resetColor; - else if (level == Level::Info) - *logLine = ELPP_LITERAL("\x1b[36m") + *logLine + resetColor; - else if (level == Level::Trace) - *logLine = ELPP_LITERAL("\x1b[35m") + *logLine + resetColor; +void +LogBuilder::convertToColoredOutput(base::type::string_t* logLine, Level level) { + if (!m_termSupportsColor) + return; + const base::type::char_t* resetColor = ELPP_LITERAL("\x1b[0m"); + if (level == Level::Error || level == Level::Fatal) + *logLine = ELPP_LITERAL("\x1b[31m") + *logLine + resetColor; + else if (level == Level::Warning) + *logLine = ELPP_LITERAL("\x1b[33m") + *logLine + resetColor; + else if (level == Level::Debug) + *logLine = ELPP_LITERAL("\x1b[32m") + *logLine + resetColor; + else if (level == Level::Info) + *logLine = ELPP_LITERAL("\x1b[36m") + *logLine + resetColor; + else if (level == Level::Trace) + *logLine = ELPP_LITERAL("\x1b[35m") + *logLine + resetColor; } // Logger -Logger::Logger(const std::string& id, base::LogStreamsReferenceMap* logStreamsReference) : - m_id(id), - m_typedConfigurations(nullptr), - m_parentApplicationName(std::string()), - m_isConfigured(false), - m_logStreamsReference(logStreamsReference) { - initUnflushedCount(); +Logger::Logger(const std::string& id, base::LogStreamsReferenceMap* logStreamsReference) + : m_id(id), + m_typedConfigurations(nullptr), + m_parentApplicationName(std::string()), + m_isConfigured(false), + m_logStreamsReference(logStreamsReference) { + initUnflushedCount(); } Logger::Logger(const std::string& id, const Configurations& configurations, - base::LogStreamsReferenceMap* logStreamsReference) : - m_id(id), - m_typedConfigurations(nullptr), - m_parentApplicationName(std::string()), - m_isConfigured(false), - m_logStreamsReference(logStreamsReference) { - initUnflushedCount(); - configure(configurations); + base::LogStreamsReferenceMap* logStreamsReference) + : m_id(id), + m_typedConfigurations(nullptr), + m_parentApplicationName(std::string()), + m_isConfigured(false), + m_logStreamsReference(logStreamsReference) { + initUnflushedCount(); + configure(configurations); } Logger::Logger(const Logger& logger) { - base::utils::safeDelete(m_typedConfigurations); - m_id = logger.m_id; - m_typedConfigurations = logger.m_typedConfigurations; - m_parentApplicationName = logger.m_parentApplicationName; - m_isConfigured = logger.m_isConfigured; - m_configurations = logger.m_configurations; - m_unflushedCount = logger.m_unflushedCount; - m_logStreamsReference = logger.m_logStreamsReference; -} - -Logger& Logger::operator=(const Logger& logger) { - if (&logger != this) { base::utils::safeDelete(m_typedConfigurations); m_id = logger.m_id; m_typedConfigurations = logger.m_typedConfigurations; @@ -640,84 +655,104 @@ Logger& Logger::operator=(const Logger& logger) { m_configurations = logger.m_configurations; m_unflushedCount = logger.m_unflushedCount; m_logStreamsReference = logger.m_logStreamsReference; - } - return *this; } -void Logger::configure(const Configurations& configurations) { - m_isConfigured = false; // we set it to false in case if we fail - initUnflushedCount(); - if (m_typedConfigurations != nullptr) { - Configurations* c = const_cast(m_typedConfigurations->configurations()); - if (c->hasConfiguration(Level::Global, ConfigurationType::Filename)) { - flush(); +Logger& +Logger::operator=(const Logger& logger) { + if (&logger != this) { + base::utils::safeDelete(m_typedConfigurations); + m_id = logger.m_id; + m_typedConfigurations = logger.m_typedConfigurations; + m_parentApplicationName = logger.m_parentApplicationName; + m_isConfigured = logger.m_isConfigured; + m_configurations = logger.m_configurations; + m_unflushedCount = logger.m_unflushedCount; + m_logStreamsReference = logger.m_logStreamsReference; } - } - base::threading::ScopedLock scopedLock(lock()); - if (m_configurations != configurations) { - m_configurations.setFromBase(const_cast(&configurations)); - } - base::utils::safeDelete(m_typedConfigurations); - m_typedConfigurations = new base::TypedConfigurations(&m_configurations, m_logStreamsReference); - resolveLoggerFormatSpec(); - m_isConfigured = true; + return *this; } -void Logger::reconfigure(void) { - ELPP_INTERNAL_INFO(1, "Reconfiguring logger [" << m_id << "]"); - configure(m_configurations); -} - -bool Logger::isValidId(const std::string& id) { - for (std::string::const_iterator it = id.begin(); it != id.end(); ++it) { - if (!base::utils::Str::contains(base::consts::kValidLoggerIdSymbols, *it)) { - return false; +void +Logger::configure(const Configurations& configurations) { + m_isConfigured = false; // we set it to false in case if we fail + initUnflushedCount(); + if (m_typedConfigurations != nullptr) { + Configurations* c = const_cast(m_typedConfigurations->configurations()); + if (c->hasConfiguration(Level::Global, ConfigurationType::Filename)) { + flush(); + } } - } - return true; -} - -void Logger::flush(void) { - ELPP_INTERNAL_INFO(3, "Flushing logger [" << m_id << "] all levels"); - base::threading::ScopedLock scopedLock(lock()); - base::type::EnumType lIndex = LevelHelper::kMinValid; - LevelHelper::forEachLevel(&lIndex, [&](void) -> bool { - flush(LevelHelper::castFromInt(lIndex), nullptr); - return false; - }); -} - -void Logger::flush(Level level, base::type::fstream_t* fs) { - if (fs == nullptr && m_typedConfigurations->toFile(level)) { - fs = m_typedConfigurations->fileStream(level); - } - if (fs != nullptr) { - fs->flush(); - std::unordered_map::iterator iter = m_unflushedCount.find(level); - if (iter != m_unflushedCount.end()) { - iter->second = 0; + base::threading::ScopedLock scopedLock(lock()); + if (m_configurations != configurations) { + m_configurations.setFromBase(const_cast(&configurations)); } - Helpers::validateFileRolling(this, level); - } + base::utils::safeDelete(m_typedConfigurations); + m_typedConfigurations = new base::TypedConfigurations(&m_configurations, m_logStreamsReference); + resolveLoggerFormatSpec(); + m_isConfigured = true; } -void Logger::initUnflushedCount(void) { - m_unflushedCount.clear(); - base::type::EnumType lIndex = LevelHelper::kMinValid; - LevelHelper::forEachLevel(&lIndex, [&](void) -> bool { - m_unflushedCount.insert(std::make_pair(LevelHelper::castFromInt(lIndex), 0)); - return false; - }); +void +Logger::reconfigure(void) { + ELPP_INTERNAL_INFO(1, "Reconfiguring logger [" << m_id << "]"); + configure(m_configurations); } -void Logger::resolveLoggerFormatSpec(void) const { - base::type::EnumType lIndex = LevelHelper::kMinValid; - LevelHelper::forEachLevel(&lIndex, [&](void) -> bool { - base::LogFormat* logFormat = - const_cast(&m_typedConfigurations->logFormat(LevelHelper::castFromInt(lIndex))); - base::utils::Str::replaceFirstWithEscape(logFormat->m_format, base::consts::kLoggerIdFormatSpecifier, m_id); - return false; - }); +bool +Logger::isValidId(const std::string& id) { + for (std::string::const_iterator it = id.begin(); it != id.end(); ++it) { + if (!base::utils::Str::contains(base::consts::kValidLoggerIdSymbols, *it)) { + return false; + } + } + return true; +} + +void +Logger::flush(void) { + ELPP_INTERNAL_INFO(3, "Flushing logger [" << m_id << "] all levels"); + base::threading::ScopedLock scopedLock(lock()); + base::type::EnumType lIndex = LevelHelper::kMinValid; + LevelHelper::forEachLevel(&lIndex, [&](void) -> bool { + flush(LevelHelper::castFromInt(lIndex), nullptr); + return false; + }); +} + +void +Logger::flush(Level level, base::type::fstream_t* fs) { + if (fs == nullptr && m_typedConfigurations->toFile(level)) { + fs = m_typedConfigurations->fileStream(level); + } + if (fs != nullptr) { + fs->flush(); + std::unordered_map::iterator iter = m_unflushedCount.find(level); + if (iter != m_unflushedCount.end()) { + iter->second = 0; + } + Helpers::validateFileRolling(this, level); + } +} + +void +Logger::initUnflushedCount(void) { + m_unflushedCount.clear(); + base::type::EnumType lIndex = LevelHelper::kMinValid; + LevelHelper::forEachLevel(&lIndex, [&](void) -> bool { + m_unflushedCount.insert(std::make_pair(LevelHelper::castFromInt(lIndex), 0)); + return false; + }); +} + +void +Logger::resolveLoggerFormatSpec(void) const { + base::type::EnumType lIndex = LevelHelper::kMinValid; + LevelHelper::forEachLevel(&lIndex, [&](void) -> bool { + base::LogFormat* logFormat = + const_cast(&m_typedConfigurations->logFormat(LevelHelper::castFromInt(lIndex))); + base::utils::Str::replaceFirstWithEscape(logFormat->m_format, base::consts::kLoggerIdFormatSpecifier, m_id); + return false; + }); } // el::base @@ -728,305 +763,323 @@ namespace utils { // File -base::type::fstream_t* File::newFileStream(const std::string& filename) { - base::type::fstream_t *fs = new base::type::fstream_t(filename.c_str(), - base::type::fstream_t::out +base::type::fstream_t* +File::newFileStream(const std::string& filename) { + base::type::fstream_t* fs = new base::type::fstream_t(filename.c_str(), base::type::fstream_t::out #if !defined(ELPP_FRESH_LOG_FILE) - | base::type::fstream_t::app + | base::type::fstream_t::app #endif - ); + ); #if defined(ELPP_UNICODE) - std::locale elppUnicodeLocale(""); -# if ELPP_OS_WINDOWS - std::locale elppUnicodeLocaleWindows(elppUnicodeLocale, new std::codecvt_utf8_utf16); - elppUnicodeLocale = elppUnicodeLocaleWindows; -# endif // ELPP_OS_WINDOWS - fs->imbue(elppUnicodeLocale); + std::locale elppUnicodeLocale(""); +#if ELPP_OS_WINDOWS + std::locale elppUnicodeLocaleWindows(elppUnicodeLocale, new std::codecvt_utf8_utf16); + elppUnicodeLocale = elppUnicodeLocaleWindows; +#endif // ELPP_OS_WINDOWS + fs->imbue(elppUnicodeLocale); #endif // defined(ELPP_UNICODE) - if (fs->is_open()) { - fs->flush(); - } else { - base::utils::safeDelete(fs); - ELPP_INTERNAL_ERROR("Bad file [" << filename << "]", true); - } - return fs; + if (fs->is_open()) { + fs->flush(); + } else { + base::utils::safeDelete(fs); + ELPP_INTERNAL_ERROR("Bad file [" << filename << "]", true); + } + return fs; } -std::size_t File::getSizeOfFile(base::type::fstream_t* fs) { - if (fs == nullptr) { - return 0; - } - // Since the file stream is appended to or truncated, the current - // offset is the file size. - std::size_t size = static_cast(fs->tellg()); - return size; +std::size_t +File::getSizeOfFile(base::type::fstream_t* fs) { + if (fs == nullptr) { + return 0; + } + // Since the file stream is appended to or truncated, the current + // offset is the file size. + std::size_t size = static_cast(fs->tellg()); + return size; } -bool File::pathExists(const char* path, bool considerFile) { - if (path == nullptr) { - return false; - } +bool +File::pathExists(const char* path, bool considerFile) { + if (path == nullptr) { + return false; + } #if ELPP_OS_UNIX - ELPP_UNUSED(considerFile); - struct stat st; - return (stat(path, &st) == 0); + ELPP_UNUSED(considerFile); + struct stat st; + return (stat(path, &st) == 0); #elif ELPP_OS_WINDOWS - DWORD fileType = GetFileAttributesA(path); - if (fileType == INVALID_FILE_ATTRIBUTES) { - return false; - } - return considerFile ? true : ((fileType & FILE_ATTRIBUTE_DIRECTORY) == 0 ? false : true); + DWORD fileType = GetFileAttributesA(path); + if (fileType == INVALID_FILE_ATTRIBUTES) { + return false; + } + return considerFile ? true : ((fileType & FILE_ATTRIBUTE_DIRECTORY) == 0 ? false : true); #endif // ELPP_OS_UNIX } -bool File::createPath(const std::string& path) { - if (path.empty()) { - return false; - } - if (base::utils::File::pathExists(path.c_str())) { +bool +File::createPath(const std::string& path) { + if (path.empty()) { + return false; + } + if (base::utils::File::pathExists(path.c_str())) { + return true; + } + int status = -1; + + char* currPath = const_cast(path.c_str()); + std::string builtPath = std::string(); +#if ELPP_OS_UNIX + if (path[0] == '/') { + builtPath = "/"; + } + currPath = STRTOK(currPath, base::consts::kFilePathSeperator, 0); +#elif ELPP_OS_WINDOWS + // Use secure functions API + char* nextTok_ = nullptr; + currPath = STRTOK(currPath, base::consts::kFilePathSeperator, &nextTok_); + ELPP_UNUSED(nextTok_); +#endif // ELPP_OS_UNIX + while (currPath != nullptr) { + builtPath.append(currPath); + builtPath.append(base::consts::kFilePathSeperator); +#if ELPP_OS_UNIX + status = mkdir(builtPath.c_str(), ELPP_LOG_PERMS); + currPath = STRTOK(nullptr, base::consts::kFilePathSeperator, 0); +#elif ELPP_OS_WINDOWS + status = _mkdir(builtPath.c_str()); + currPath = STRTOK(nullptr, base::consts::kFilePathSeperator, &nextTok_); +#endif // ELPP_OS_UNIX + } + if (status == -1) { + ELPP_INTERNAL_ERROR("Error while creating path [" << path << "]", true); + return false; + } return true; - } - int status = -1; - - char* currPath = const_cast(path.c_str()); - std::string builtPath = std::string(); -#if ELPP_OS_UNIX - if (path[0] == '/') { - builtPath = "/"; - } - currPath = STRTOK(currPath, base::consts::kFilePathSeperator, 0); -#elif ELPP_OS_WINDOWS - // Use secure functions API - char* nextTok_ = nullptr; - currPath = STRTOK(currPath, base::consts::kFilePathSeperator, &nextTok_); - ELPP_UNUSED(nextTok_); -#endif // ELPP_OS_UNIX - while (currPath != nullptr) { - builtPath.append(currPath); - builtPath.append(base::consts::kFilePathSeperator); -#if ELPP_OS_UNIX - status = mkdir(builtPath.c_str(), ELPP_LOG_PERMS); - currPath = STRTOK(nullptr, base::consts::kFilePathSeperator, 0); -#elif ELPP_OS_WINDOWS - status = _mkdir(builtPath.c_str()); - currPath = STRTOK(nullptr, base::consts::kFilePathSeperator, &nextTok_); -#endif // ELPP_OS_UNIX - } - if (status == -1) { - ELPP_INTERNAL_ERROR("Error while creating path [" << path << "]", true); - return false; - } - return true; } -std::string File::extractPathFromFilename(const std::string& fullPath, const char* separator) { - if ((fullPath == "") || (fullPath.find(separator) == std::string::npos)) { - return fullPath; - } - std::size_t lastSlashAt = fullPath.find_last_of(separator); - if (lastSlashAt == 0) { - return std::string(separator); - } - return fullPath.substr(0, lastSlashAt + 1); -} - -void File::buildStrippedFilename(const char* filename, char buff[], std::size_t limit) { - std::size_t sizeOfFilename = strlen(filename); - if (sizeOfFilename >= limit) { - filename += (sizeOfFilename - limit); - if (filename[0] != '.' && filename[1] != '.') { // prepend if not already - filename += 3; // 3 = '..' - STRCAT(buff, "..", limit); +std::string +File::extractPathFromFilename(const std::string& fullPath, const char* separator) { + if ((fullPath == "") || (fullPath.find(separator) == std::string::npos)) { + return fullPath; } - } - STRCAT(buff, filename, limit); + std::size_t lastSlashAt = fullPath.find_last_of(separator); + if (lastSlashAt == 0) { + return std::string(separator); + } + return fullPath.substr(0, lastSlashAt + 1); } -void File::buildBaseFilename(const std::string& fullPath, char buff[], std::size_t limit, const char* separator) { - const char *filename = fullPath.c_str(); - std::size_t lastSlashAt = fullPath.find_last_of(separator); - filename += lastSlashAt ? lastSlashAt+1 : 0; - std::size_t sizeOfFilename = strlen(filename); - if (sizeOfFilename >= limit) { - filename += (sizeOfFilename - limit); - if (filename[0] != '.' && filename[1] != '.') { // prepend if not already - filename += 3; // 3 = '..' - STRCAT(buff, "..", limit); +void +File::buildStrippedFilename(const char* filename, char buff[], std::size_t limit) { + std::size_t sizeOfFilename = strlen(filename); + if (sizeOfFilename >= limit) { + filename += (sizeOfFilename - limit); + if (filename[0] != '.' && filename[1] != '.') { // prepend if not already + filename += 3; // 3 = '..' + STRCAT(buff, "..", limit); + } } - } - STRCAT(buff, filename, limit); + STRCAT(buff, filename, limit); +} + +void +File::buildBaseFilename(const std::string& fullPath, char buff[], std::size_t limit, const char* separator) { + const char* filename = fullPath.c_str(); + std::size_t lastSlashAt = fullPath.find_last_of(separator); + filename += lastSlashAt ? lastSlashAt + 1 : 0; + std::size_t sizeOfFilename = strlen(filename); + if (sizeOfFilename >= limit) { + filename += (sizeOfFilename - limit); + if (filename[0] != '.' && filename[1] != '.') { // prepend if not already + filename += 3; // 3 = '..' + STRCAT(buff, "..", limit); + } + } + STRCAT(buff, filename, limit); } // Str -bool Str::wildCardMatch(const char* str, const char* pattern) { - while (*pattern) { - switch (*pattern) { - case '?': - if (!*str) - return false; - ++str; - ++pattern; - break; - case '*': - if (wildCardMatch(str, pattern + 1)) - return true; - if (*str && wildCardMatch(str + 1, pattern)) - return true; - return false; - default: - if (*str++ != *pattern++) - return false; - break; +bool +Str::wildCardMatch(const char* str, const char* pattern) { + while (*pattern) { + switch (*pattern) { + case '?': + if (!*str) + return false; + ++str; + ++pattern; + break; + case '*': + if (wildCardMatch(str, pattern + 1)) + return true; + if (*str && wildCardMatch(str + 1, pattern)) + return true; + return false; + default: + if (*str++ != *pattern++) + return false; + break; + } } - } - return !*str && !*pattern; + return !*str && !*pattern; } -std::string& Str::ltrim(std::string& str) { - str.erase(str.begin(), std::find_if(str.begin(), str.end(), [](char c) { - return !std::isspace(c); - } )); - return str; -} - -std::string& Str::rtrim(std::string& str) { - str.erase(std::find_if(str.rbegin(), str.rend(), [](char c) { - return !std::isspace(c); - }).base(), str.end()); - return str; -} - -std::string& Str::trim(std::string& str) { - return ltrim(rtrim(str)); -} - -bool Str::startsWith(const std::string& str, const std::string& start) { - return (str.length() >= start.length()) && (str.compare(0, start.length(), start) == 0); -} - -bool Str::endsWith(const std::string& str, const std::string& end) { - return (str.length() >= end.length()) && (str.compare(str.length() - end.length(), end.length(), end) == 0); -} - -std::string& Str::replaceAll(std::string& str, char replaceWhat, char replaceWith) { - std::replace(str.begin(), str.end(), replaceWhat, replaceWith); - return str; -} - -std::string& Str::replaceAll(std::string& str, const std::string& replaceWhat, - const std::string& replaceWith) { - if (replaceWhat == replaceWith) +std::string& +Str::ltrim(std::string& str) { + str.erase(str.begin(), std::find_if(str.begin(), str.end(), [](char c) { return !std::isspace(c); })); return str; - std::size_t foundAt = std::string::npos; - while ((foundAt = str.find(replaceWhat, foundAt + 1)) != std::string::npos) { - str.replace(foundAt, replaceWhat.length(), replaceWith); - } - return str; } -void Str::replaceFirstWithEscape(base::type::string_t& str, const base::type::string_t& replaceWhat, - const base::type::string_t& replaceWith) { - std::size_t foundAt = base::type::string_t::npos; - while ((foundAt = str.find(replaceWhat, foundAt + 1)) != base::type::string_t::npos) { - if (foundAt > 0 && str[foundAt - 1] == base::consts::kFormatSpecifierChar) { - str.erase(foundAt - 1, 1); - ++foundAt; - } else { - str.replace(foundAt, replaceWhat.length(), replaceWith); - return; +std::string& +Str::rtrim(std::string& str) { + str.erase(std::find_if(str.rbegin(), str.rend(), [](char c) { return !std::isspace(c); }).base(), str.end()); + return str; +} + +std::string& +Str::trim(std::string& str) { + return ltrim(rtrim(str)); +} + +bool +Str::startsWith(const std::string& str, const std::string& start) { + return (str.length() >= start.length()) && (str.compare(0, start.length(), start) == 0); +} + +bool +Str::endsWith(const std::string& str, const std::string& end) { + return (str.length() >= end.length()) && (str.compare(str.length() - end.length(), end.length(), end) == 0); +} + +std::string& +Str::replaceAll(std::string& str, char replaceWhat, char replaceWith) { + std::replace(str.begin(), str.end(), replaceWhat, replaceWith); + return str; +} + +std::string& +Str::replaceAll(std::string& str, const std::string& replaceWhat, const std::string& replaceWith) { + if (replaceWhat == replaceWith) + return str; + std::size_t foundAt = std::string::npos; + while ((foundAt = str.find(replaceWhat, foundAt + 1)) != std::string::npos) { + str.replace(foundAt, replaceWhat.length(), replaceWith); + } + return str; +} + +void +Str::replaceFirstWithEscape(base::type::string_t& str, const base::type::string_t& replaceWhat, + const base::type::string_t& replaceWith) { + std::size_t foundAt = base::type::string_t::npos; + while ((foundAt = str.find(replaceWhat, foundAt + 1)) != base::type::string_t::npos) { + if (foundAt > 0 && str[foundAt - 1] == base::consts::kFormatSpecifierChar) { + str.erase(foundAt - 1, 1); + ++foundAt; + } else { + str.replace(foundAt, replaceWhat.length(), replaceWith); + return; + } } - } } #if defined(ELPP_UNICODE) -void Str::replaceFirstWithEscape(base::type::string_t& str, const base::type::string_t& replaceWhat, - const std::string& replaceWith) { - replaceFirstWithEscape(str, replaceWhat, base::type::string_t(replaceWith.begin(), replaceWith.end())); +void +Str::replaceFirstWithEscape(base::type::string_t& str, const base::type::string_t& replaceWhat, + const std::string& replaceWith) { + replaceFirstWithEscape(str, replaceWhat, base::type::string_t(replaceWith.begin(), replaceWith.end())); } #endif // defined(ELPP_UNICODE) -std::string& Str::toUpper(std::string& str) { - std::transform(str.begin(), str.end(), str.begin(), - [](char c) { - return static_cast(::toupper(c)); - }); - return str; +std::string& +Str::toUpper(std::string& str) { + std::transform(str.begin(), str.end(), str.begin(), [](char c) { return static_cast(::toupper(c)); }); + return str; } -bool Str::cStringEq(const char* s1, const char* s2) { - if (s1 == nullptr && s2 == nullptr) return true; - if (s1 == nullptr || s2 == nullptr) return false; - return strcmp(s1, s2) == 0; +bool +Str::cStringEq(const char* s1, const char* s2) { + if (s1 == nullptr && s2 == nullptr) + return true; + if (s1 == nullptr || s2 == nullptr) + return false; + return strcmp(s1, s2) == 0; } -bool Str::cStringCaseEq(const char* s1, const char* s2) { - if (s1 == nullptr && s2 == nullptr) return true; - if (s1 == nullptr || s2 == nullptr) return false; +bool +Str::cStringCaseEq(const char* s1, const char* s2) { + if (s1 == nullptr && s2 == nullptr) + return true; + if (s1 == nullptr || s2 == nullptr) + return false; - // With thanks to cygwin for this code - int d = 0; + // With thanks to cygwin for this code + int d = 0; - while (true) { - const int c1 = toupper(*s1++); - const int c2 = toupper(*s2++); + while (true) { + const int c1 = toupper(*s1++); + const int c2 = toupper(*s2++); - if (((d = c1 - c2) != 0) || (c2 == '\0')) { - break; + if (((d = c1 - c2) != 0) || (c2 == '\0')) { + break; + } } - } - return d == 0; + return d == 0; } -bool Str::contains(const char* str, char c) { - for (; *str; ++str) { - if (*str == c) - return true; - } - return false; +bool +Str::contains(const char* str, char c) { + for (; *str; ++str) { + if (*str == c) + return true; + } + return false; } -char* Str::convertAndAddToBuff(std::size_t n, int len, char* buf, const char* bufLim, bool zeroPadded) { - char localBuff[10] = ""; - char* p = localBuff + sizeof(localBuff) - 2; - if (n > 0) { - for (; n > 0 && p > localBuff && len > 0; n /= 10, --len) - *--p = static_cast(n % 10 + '0'); - } else { - *--p = '0'; - --len; - } - if (zeroPadded) - while (p > localBuff && len-- > 0) *--p = static_cast('0'); - return addToBuff(p, buf, bufLim); +char* +Str::convertAndAddToBuff(std::size_t n, int len, char* buf, const char* bufLim, bool zeroPadded) { + char localBuff[10] = ""; + char* p = localBuff + sizeof(localBuff) - 2; + if (n > 0) { + for (; n > 0 && p > localBuff && len > 0; n /= 10, --len) *--p = static_cast(n % 10 + '0'); + } else { + *--p = '0'; + --len; + } + if (zeroPadded) + while (p > localBuff && len-- > 0) *--p = static_cast('0'); + return addToBuff(p, buf, bufLim); } -char* Str::addToBuff(const char* str, char* buf, const char* bufLim) { - while ((buf < bufLim) && ((*buf = *str++) != '\0')) - ++buf; - return buf; +char* +Str::addToBuff(const char* str, char* buf, const char* bufLim) { + while ((buf < bufLim) && ((*buf = *str++) != '\0')) ++buf; + return buf; } -char* Str::clearBuff(char buff[], std::size_t lim) { - STRCPY(buff, "", lim); - ELPP_UNUSED(lim); // For *nix we dont have anything using lim in above STRCPY macro - return buff; +char* +Str::clearBuff(char buff[], std::size_t lim) { + STRCPY(buff, "", lim); + ELPP_UNUSED(lim); // For *nix we dont have anything using lim in above STRCPY macro + return buff; } /// @brief Converst wchar* to char* /// NOTE: Need to free return value after use! -char* Str::wcharPtrToCharPtr(const wchar_t* line) { - std::size_t len_ = wcslen(line) + 1; - char* buff_ = static_cast(malloc(len_ + 1)); -# if ELPP_OS_UNIX || (ELPP_OS_WINDOWS && !ELPP_CRT_DBG_WARNINGS) - std::wcstombs(buff_, line, len_); -# elif ELPP_OS_WINDOWS - std::size_t convCount_ = 0; - mbstate_t mbState_; - ::memset(static_cast(&mbState_), 0, sizeof(mbState_)); - wcsrtombs_s(&convCount_, buff_, len_, &line, len_, &mbState_); -# endif // ELPP_OS_UNIX || (ELPP_OS_WINDOWS && !ELPP_CRT_DBG_WARNINGS) - return buff_; +char* +Str::wcharPtrToCharPtr(const wchar_t* line) { + std::size_t len_ = wcslen(line) + 1; + char* buff_ = static_cast(malloc(len_ + 1)); +#if ELPP_OS_UNIX || (ELPP_OS_WINDOWS && !ELPP_CRT_DBG_WARNINGS) + std::wcstombs(buff_, line, len_); +#elif ELPP_OS_WINDOWS + std::size_t convCount_ = 0; + mbstate_t mbState_; + ::memset(static_cast(&mbState_), 0, sizeof(mbState_)); + wcsrtombs_s(&convCount_, buff_, len_, &line, len_, &mbState_); +#endif // ELPP_OS_UNIX || (ELPP_OS_WINDOWS && !ELPP_CRT_DBG_WARNINGS) + return buff_; } // OS @@ -1036,901 +1089,954 @@ char* Str::wcharPtrToCharPtr(const wchar_t* line) { /// We are not using getenv(const char*) because of CRT deprecation /// @param varname Variable name to get environment variable value for /// @return If variable exist the value of it otherwise nullptr -const char* OS::getWindowsEnvironmentVariable(const char* varname) { - const DWORD bufferLen = 50; - static char buffer[bufferLen]; - if (GetEnvironmentVariableA(varname, buffer, bufferLen)) { - return buffer; - } - return nullptr; +const char* +OS::getWindowsEnvironmentVariable(const char* varname) { + const DWORD bufferLen = 50; + static char buffer[bufferLen]; + if (GetEnvironmentVariableA(varname, buffer, bufferLen)) { + return buffer; + } + return nullptr; } #endif // ELPP_OS_WINDOWS #if ELPP_OS_ANDROID -std::string OS::getProperty(const char* prop) { - char propVal[PROP_VALUE_MAX + 1]; - int ret = __system_property_get(prop, propVal); - return ret == 0 ? std::string() : std::string(propVal); +std::string +OS::getProperty(const char* prop) { + char propVal[PROP_VALUE_MAX + 1]; + int ret = __system_property_get(prop, propVal); + return ret == 0 ? std::string() : std::string(propVal); } -std::string OS::getDeviceName(void) { - std::stringstream ss; - std::string manufacturer = getProperty("ro.product.manufacturer"); - std::string model = getProperty("ro.product.model"); - if (manufacturer.empty() || model.empty()) { - return std::string(); - } - ss << manufacturer << "-" << model; - return ss.str(); +std::string +OS::getDeviceName(void) { + std::stringstream ss; + std::string manufacturer = getProperty("ro.product.manufacturer"); + std::string model = getProperty("ro.product.model"); + if (manufacturer.empty() || model.empty()) { + return std::string(); + } + ss << manufacturer << "-" << model; + return ss.str(); } #endif // ELPP_OS_ANDROID -const std::string OS::getBashOutput(const char* command) { +const std::string +OS::getBashOutput(const char* command) { #if (ELPP_OS_UNIX && !ELPP_OS_ANDROID && !ELPP_CYGWIN) - if (command == nullptr) { - return std::string(); - } - FILE* proc = nullptr; - if ((proc = popen(command, "r")) == nullptr) { - ELPP_INTERNAL_ERROR("\nUnable to run command [" << command << "]", true); - return std::string(); - } - char hBuff[4096]; - if (fgets(hBuff, sizeof(hBuff), proc) != nullptr) { - pclose(proc); - const std::size_t buffLen = strlen(hBuff); - if (buffLen > 0 && hBuff[buffLen - 1] == '\n') { - hBuff[buffLen - 1] = '\0'; + if (command == nullptr) { + return std::string(); } - return std::string(hBuff); - } else { - pclose(proc); - } - return std::string(); + FILE* proc = nullptr; + if ((proc = popen(command, "r")) == nullptr) { + ELPP_INTERNAL_ERROR("\nUnable to run command [" << command << "]", true); + return std::string(); + } + char hBuff[4096]; + if (fgets(hBuff, sizeof(hBuff), proc) != nullptr) { + pclose(proc); + const std::size_t buffLen = strlen(hBuff); + if (buffLen > 0 && hBuff[buffLen - 1] == '\n') { + hBuff[buffLen - 1] = '\0'; + } + return std::string(hBuff); + } else { + pclose(proc); + } + return std::string(); #else - ELPP_UNUSED(command); - return std::string(); + ELPP_UNUSED(command); + return std::string(); #endif // (ELPP_OS_UNIX && !ELPP_OS_ANDROID && !ELPP_CYGWIN) } -std::string OS::getEnvironmentVariable(const char* variableName, const char* defaultVal, - const char* alternativeBashCommand) { +std::string +OS::getEnvironmentVariable(const char* variableName, const char* defaultVal, const char* alternativeBashCommand) { #if ELPP_OS_UNIX - const char* val = getenv(variableName); + const char* val = getenv(variableName); #elif ELPP_OS_WINDOWS - const char* val = getWindowsEnvironmentVariable(variableName); + const char* val = getWindowsEnvironmentVariable(variableName); #endif // ELPP_OS_UNIX - if ((val == nullptr) || ((strcmp(val, "") == 0))) { + if ((val == nullptr) || ((strcmp(val, "") == 0))) { #if ELPP_OS_UNIX && defined(ELPP_FORCE_ENV_VAR_FROM_BASH) - // Try harder on unix-based systems - std::string valBash = base::utils::OS::getBashOutput(alternativeBashCommand); - if (valBash.empty()) { - return std::string(defaultVal); - } else { - return valBash; - } + // Try harder on unix-based systems + std::string valBash = base::utils::OS::getBashOutput(alternativeBashCommand); + if (valBash.empty()) { + return std::string(defaultVal); + } else { + return valBash; + } #elif ELPP_OS_WINDOWS || ELPP_OS_UNIX - ELPP_UNUSED(alternativeBashCommand); - return std::string(defaultVal); + ELPP_UNUSED(alternativeBashCommand); + return std::string(defaultVal); #endif // ELPP_OS_UNIX && defined(ELPP_FORCE_ENV_VAR_FROM_BASH) - } - return std::string(val); + } + return std::string(val); } -std::string OS::currentUser(void) { +std::string +OS::currentUser(void) { #if ELPP_OS_UNIX && !ELPP_OS_ANDROID - return getEnvironmentVariable("USER", base::consts::kUnknownUser, "whoami"); + return getEnvironmentVariable("USER", base::consts::kUnknownUser, "whoami"); #elif ELPP_OS_WINDOWS - return getEnvironmentVariable("USERNAME", base::consts::kUnknownUser); + return getEnvironmentVariable("USERNAME", base::consts::kUnknownUser); #elif ELPP_OS_ANDROID - ELPP_UNUSED(base::consts::kUnknownUser); - return std::string("android"); + ELPP_UNUSED(base::consts::kUnknownUser); + return std::string("android"); #else - return std::string(); + return std::string(); #endif // ELPP_OS_UNIX && !ELPP_OS_ANDROID } -std::string OS::currentHost(void) { +std::string +OS::currentHost(void) { #if ELPP_OS_UNIX && !ELPP_OS_ANDROID - return getEnvironmentVariable("HOSTNAME", base::consts::kUnknownHost, "hostname"); + return getEnvironmentVariable("HOSTNAME", base::consts::kUnknownHost, "hostname"); #elif ELPP_OS_WINDOWS - return getEnvironmentVariable("COMPUTERNAME", base::consts::kUnknownHost); + return getEnvironmentVariable("COMPUTERNAME", base::consts::kUnknownHost); #elif ELPP_OS_ANDROID - ELPP_UNUSED(base::consts::kUnknownHost); - return getDeviceName(); + ELPP_UNUSED(base::consts::kUnknownHost); + return getDeviceName(); #else - return std::string(); + return std::string(); #endif // ELPP_OS_UNIX && !ELPP_OS_ANDROID } -bool OS::termSupportsColor(void) { - std::string term = getEnvironmentVariable("TERM", ""); - return term == "xterm" || term == "xterm-color" || term == "xterm-256color" - || term == "screen" || term == "linux" || term == "cygwin" - || term == "screen-256color"; +bool +OS::termSupportsColor(void) { + std::string term = getEnvironmentVariable("TERM", ""); + return term == "xterm" || term == "xterm-color" || term == "xterm-256color" || term == "screen" || + term == "linux" || term == "cygwin" || term == "screen-256color"; } // DateTime -void DateTime::gettimeofday(struct timeval* tv) { +void +DateTime::gettimeofday(struct timeval* tv) { #if ELPP_OS_WINDOWS - if (tv != nullptr) { -# if ELPP_COMPILER_MSVC || defined(_MSC_EXTENSIONS) - const unsigned __int64 delta_ = 11644473600000000Ui64; -# else - const unsigned __int64 delta_ = 11644473600000000ULL; -# endif // ELPP_COMPILER_MSVC || defined(_MSC_EXTENSIONS) - const double secOffSet = 0.000001; - const unsigned long usecOffSet = 1000000; - FILETIME fileTime; - GetSystemTimeAsFileTime(&fileTime); - unsigned __int64 present = 0; - present |= fileTime.dwHighDateTime; - present = present << 32; - present |= fileTime.dwLowDateTime; - present /= 10; // mic-sec - // Subtract the difference - present -= delta_; - tv->tv_sec = static_cast(present * secOffSet); - tv->tv_usec = static_cast(present % usecOffSet); - } + if (tv != nullptr) { +#if ELPP_COMPILER_MSVC || defined(_MSC_EXTENSIONS) + const unsigned __int64 delta_ = 11644473600000000Ui64; #else - ::gettimeofday(tv, nullptr); + const unsigned __int64 delta_ = 11644473600000000ULL; +#endif // ELPP_COMPILER_MSVC || defined(_MSC_EXTENSIONS) + const double secOffSet = 0.000001; + const unsigned long usecOffSet = 1000000; + FILETIME fileTime; + GetSystemTimeAsFileTime(&fileTime); + unsigned __int64 present = 0; + present |= fileTime.dwHighDateTime; + present = present << 32; + present |= fileTime.dwLowDateTime; + present /= 10; // mic-sec + // Subtract the difference + present -= delta_; + tv->tv_sec = static_cast(present * secOffSet); + tv->tv_usec = static_cast(present % usecOffSet); + } +#else + ::gettimeofday(tv, nullptr); #endif // ELPP_OS_WINDOWS } -std::string DateTime::getDateTime(const char* format, const base::SubsecondPrecision* ssPrec) { - struct timeval currTime; - gettimeofday(&currTime); - return timevalToString(currTime, format, ssPrec); +std::string +DateTime::getDateTime(const char* format, const base::SubsecondPrecision* ssPrec) { + struct timeval currTime; + gettimeofday(&currTime); + return timevalToString(currTime, format, ssPrec); } -std::string DateTime::timevalToString(struct timeval tval, const char* format, - const el::base::SubsecondPrecision* ssPrec) { - struct ::tm timeInfo; - buildTimeInfo(&tval, &timeInfo); - const int kBuffSize = 30; - char buff_[kBuffSize] = ""; - parseFormat(buff_, kBuffSize, format, &timeInfo, static_cast(tval.tv_usec / ssPrec->m_offset), - ssPrec); - return std::string(buff_); +std::string +DateTime::timevalToString(struct timeval tval, const char* format, const el::base::SubsecondPrecision* ssPrec) { + struct ::tm timeInfo; + buildTimeInfo(&tval, &timeInfo); + const int kBuffSize = 30; + char buff_[kBuffSize] = ""; + parseFormat(buff_, kBuffSize, format, &timeInfo, static_cast(tval.tv_usec / ssPrec->m_offset), ssPrec); + return std::string(buff_); } -base::type::string_t DateTime::formatTime(unsigned long long time, base::TimestampUnit timestampUnit) { - base::type::EnumType start = static_cast(timestampUnit); - const base::type::char_t* unit = base::consts::kTimeFormats[start].unit; - for (base::type::EnumType i = start; i < base::consts::kTimeFormatsCount - 1; ++i) { - if (time <= base::consts::kTimeFormats[i].value) { - break; +base::type::string_t +DateTime::formatTime(unsigned long long time, base::TimestampUnit timestampUnit) { + base::type::EnumType start = static_cast(timestampUnit); + const base::type::char_t* unit = base::consts::kTimeFormats[start].unit; + for (base::type::EnumType i = start; i < base::consts::kTimeFormatsCount - 1; ++i) { + if (time <= base::consts::kTimeFormats[i].value) { + break; + } + if (base::consts::kTimeFormats[i].value == 1000.0f && time / 1000.0f < 1.9f) { + break; + } + time /= static_cast(base::consts::kTimeFormats[i].value); + unit = base::consts::kTimeFormats[i + 1].unit; } - if (base::consts::kTimeFormats[i].value == 1000.0f && time / 1000.0f < 1.9f) { - break; + base::type::stringstream_t ss; + ss << time << " " << unit; + return ss.str(); +} + +unsigned long long +DateTime::getTimeDifference(const struct timeval& endTime, const struct timeval& startTime, + base::TimestampUnit timestampUnit) { + if (timestampUnit == base::TimestampUnit::Microsecond) { + return static_cast( + static_cast(1000000 * endTime.tv_sec + endTime.tv_usec) - + static_cast(1000000 * startTime.tv_sec + startTime.tv_usec)); } - time /= static_cast(base::consts::kTimeFormats[i].value); - unit = base::consts::kTimeFormats[i + 1].unit; - } - base::type::stringstream_t ss; - ss << time << " " << unit; - return ss.str(); + // milliseconds + auto conv = [](const struct timeval& tim) { + return static_cast((tim.tv_sec * 1000) + (tim.tv_usec / 1000)); + }; + return static_cast(conv(endTime) - conv(startTime)); } -unsigned long long DateTime::getTimeDifference(const struct timeval& endTime, const struct timeval& startTime, - base::TimestampUnit timestampUnit) { - if (timestampUnit == base::TimestampUnit::Microsecond) { - return static_cast(static_cast(1000000 * endTime.tv_sec + endTime.tv_usec) - - static_cast(1000000 * startTime.tv_sec + startTime.tv_usec)); - } - // milliseconds - auto conv = [](const struct timeval& tim) { - return static_cast((tim.tv_sec * 1000) + (tim.tv_usec / 1000)); - }; - return static_cast(conv(endTime) - conv(startTime)); -} - -struct ::tm* DateTime::buildTimeInfo(struct timeval* currTime, struct ::tm* timeInfo) { +struct ::tm* +DateTime::buildTimeInfo(struct timeval* currTime, struct ::tm* timeInfo) { #if ELPP_OS_UNIX - time_t rawTime = currTime->tv_sec; - ::elpptime_r(&rawTime, timeInfo); - return timeInfo; + time_t rawTime = currTime->tv_sec; + ::elpptime_r(&rawTime, timeInfo); + return timeInfo; #else -# if ELPP_COMPILER_MSVC - ELPP_UNUSED(currTime); - time_t t; -# if defined(_USE_32BIT_TIME_T) - _time32(&t); -# else - _time64(&t); -# endif - elpptime_s(timeInfo, &t); - return timeInfo; -# else - // For any other compilers that don't have CRT warnings issue e.g, MinGW or TDM GCC- we use different method - time_t rawTime = currTime->tv_sec; - struct tm* tmInf = elpptime(&rawTime); - *timeInfo = *tmInf; - return timeInfo; -# endif // ELPP_COMPILER_MSVC +#if ELPP_COMPILER_MSVC + ELPP_UNUSED(currTime); + time_t t; +#if defined(_USE_32BIT_TIME_T) + _time32(&t); +#else + _time64(&t); +#endif + elpptime_s(timeInfo, &t); + return timeInfo; +#else + // For any other compilers that don't have CRT warnings issue e.g, MinGW or TDM GCC- we use different method + time_t rawTime = currTime->tv_sec; + struct tm* tmInf = elpptime(&rawTime); + *timeInfo = *tmInf; + return timeInfo; +#endif // ELPP_COMPILER_MSVC #endif // ELPP_OS_UNIX } -char* DateTime::parseFormat(char* buf, std::size_t bufSz, const char* format, const struct tm* tInfo, - std::size_t msec, const base::SubsecondPrecision* ssPrec) { - const char* bufLim = buf + bufSz; - for (; *format; ++format) { - if (*format == base::consts::kFormatSpecifierChar) { - switch (*++format) { - case base::consts::kFormatSpecifierChar: // Escape - break; - case '\0': // End - --format; - break; - case 'd': // Day - buf = base::utils::Str::convertAndAddToBuff(tInfo->tm_mday, 2, buf, bufLim); - continue; - case 'a': // Day of week (short) - buf = base::utils::Str::addToBuff(base::consts::kDaysAbbrev[tInfo->tm_wday], buf, bufLim); - continue; - case 'A': // Day of week (long) - buf = base::utils::Str::addToBuff(base::consts::kDays[tInfo->tm_wday], buf, bufLim); - continue; - case 'M': // month - buf = base::utils::Str::convertAndAddToBuff(tInfo->tm_mon + 1, 2, buf, bufLim); - continue; - case 'b': // month (short) - buf = base::utils::Str::addToBuff(base::consts::kMonthsAbbrev[tInfo->tm_mon], buf, bufLim); - continue; - case 'B': // month (long) - buf = base::utils::Str::addToBuff(base::consts::kMonths[tInfo->tm_mon], buf, bufLim); - continue; - case 'y': // year (two digits) - buf = base::utils::Str::convertAndAddToBuff(tInfo->tm_year + base::consts::kYearBase, 2, buf, bufLim); - continue; - case 'Y': // year (four digits) - buf = base::utils::Str::convertAndAddToBuff(tInfo->tm_year + base::consts::kYearBase, 4, buf, bufLim); - continue; - case 'h': // hour (12-hour) - buf = base::utils::Str::convertAndAddToBuff(tInfo->tm_hour % 12, 2, buf, bufLim); - continue; - case 'H': // hour (24-hour) - buf = base::utils::Str::convertAndAddToBuff(tInfo->tm_hour, 2, buf, bufLim); - continue; - case 'm': // minute - buf = base::utils::Str::convertAndAddToBuff(tInfo->tm_min, 2, buf, bufLim); - continue; - case 's': // second - buf = base::utils::Str::convertAndAddToBuff(tInfo->tm_sec, 2, buf, bufLim); - continue; - case 'z': // subsecond part - case 'g': - buf = base::utils::Str::convertAndAddToBuff(msec, ssPrec->m_width, buf, bufLim); - continue; - case 'F': // AM/PM - buf = base::utils::Str::addToBuff((tInfo->tm_hour >= 12) ? base::consts::kPm : base::consts::kAm, buf, bufLim); - continue; - default: - continue; - } +char* +DateTime::parseFormat(char* buf, std::size_t bufSz, const char* format, const struct tm* tInfo, std::size_t msec, + const base::SubsecondPrecision* ssPrec) { + const char* bufLim = buf + bufSz; + for (; *format; ++format) { + if (*format == base::consts::kFormatSpecifierChar) { + switch (*++format) { + case base::consts::kFormatSpecifierChar: // Escape + break; + case '\0': // End + --format; + break; + case 'd': // Day + buf = base::utils::Str::convertAndAddToBuff(tInfo->tm_mday, 2, buf, bufLim); + continue; + case 'a': // Day of week (short) + buf = base::utils::Str::addToBuff(base::consts::kDaysAbbrev[tInfo->tm_wday], buf, bufLim); + continue; + case 'A': // Day of week (long) + buf = base::utils::Str::addToBuff(base::consts::kDays[tInfo->tm_wday], buf, bufLim); + continue; + case 'M': // month + buf = base::utils::Str::convertAndAddToBuff(tInfo->tm_mon + 1, 2, buf, bufLim); + continue; + case 'b': // month (short) + buf = base::utils::Str::addToBuff(base::consts::kMonthsAbbrev[tInfo->tm_mon], buf, bufLim); + continue; + case 'B': // month (long) + buf = base::utils::Str::addToBuff(base::consts::kMonths[tInfo->tm_mon], buf, bufLim); + continue; + case 'y': // year (two digits) + buf = + base::utils::Str::convertAndAddToBuff(tInfo->tm_year + base::consts::kYearBase, 2, buf, bufLim); + continue; + case 'Y': // year (four digits) + buf = + base::utils::Str::convertAndAddToBuff(tInfo->tm_year + base::consts::kYearBase, 4, buf, bufLim); + continue; + case 'h': // hour (12-hour) + buf = base::utils::Str::convertAndAddToBuff(tInfo->tm_hour % 12, 2, buf, bufLim); + continue; + case 'H': // hour (24-hour) + buf = base::utils::Str::convertAndAddToBuff(tInfo->tm_hour, 2, buf, bufLim); + continue; + case 'm': // minute + buf = base::utils::Str::convertAndAddToBuff(tInfo->tm_min, 2, buf, bufLim); + continue; + case 's': // second + buf = base::utils::Str::convertAndAddToBuff(tInfo->tm_sec, 2, buf, bufLim); + continue; + case 'z': // subsecond part + case 'g': + buf = base::utils::Str::convertAndAddToBuff(msec, ssPrec->m_width, buf, bufLim); + continue; + case 'F': // AM/PM + buf = base::utils::Str::addToBuff((tInfo->tm_hour >= 12) ? base::consts::kPm : base::consts::kAm, + buf, bufLim); + continue; + default: + continue; + } + } + if (buf == bufLim) + break; + *buf++ = *format; } - if (buf == bufLim) break; - *buf++ = *format; - } - return buf; + return buf; } // CommandLineArgs -void CommandLineArgs::setArgs(int argc, char** argv) { - m_params.clear(); - m_paramsWithValue.clear(); - if (argc == 0 || argv == nullptr) { - return; - } - m_argc = argc; - m_argv = argv; - for (int i = 1; i < m_argc; ++i) { - const char* v = (strstr(m_argv[i], "=")); - if (v != nullptr && strlen(v) > 0) { - std::string key = std::string(m_argv[i]); - key = key.substr(0, key.find_first_of('=')); - if (hasParamWithValue(key.c_str())) { - ELPP_INTERNAL_INFO(1, "Skipping [" << key << "] arg since it already has value [" - << getParamValue(key.c_str()) << "]"); - } else { - m_paramsWithValue.insert(std::make_pair(key, std::string(v + 1))); - } +void +CommandLineArgs::setArgs(int argc, char** argv) { + m_params.clear(); + m_paramsWithValue.clear(); + if (argc == 0 || argv == nullptr) { + return; } - if (v == nullptr) { - if (hasParam(m_argv[i])) { - ELPP_INTERNAL_INFO(1, "Skipping [" << m_argv[i] << "] arg since it already exists"); - } else { - m_params.push_back(std::string(m_argv[i])); - } + m_argc = argc; + m_argv = argv; + for (int i = 1; i < m_argc; ++i) { + const char* v = (strstr(m_argv[i], "=")); + if (v != nullptr && strlen(v) > 0) { + std::string key = std::string(m_argv[i]); + key = key.substr(0, key.find_first_of('=')); + if (hasParamWithValue(key.c_str())) { + ELPP_INTERNAL_INFO(1, "Skipping [" << key << "] arg since it already has value [" + << getParamValue(key.c_str()) << "]"); + } else { + m_paramsWithValue.insert(std::make_pair(key, std::string(v + 1))); + } + } + if (v == nullptr) { + if (hasParam(m_argv[i])) { + ELPP_INTERNAL_INFO(1, "Skipping [" << m_argv[i] << "] arg since it already exists"); + } else { + m_params.push_back(std::string(m_argv[i])); + } + } } - } } -bool CommandLineArgs::hasParamWithValue(const char* paramKey) const { - return m_paramsWithValue.find(std::string(paramKey)) != m_paramsWithValue.end(); +bool +CommandLineArgs::hasParamWithValue(const char* paramKey) const { + return m_paramsWithValue.find(std::string(paramKey)) != m_paramsWithValue.end(); } -const char* CommandLineArgs::getParamValue(const char* paramKey) const { - std::unordered_map::const_iterator iter = m_paramsWithValue.find(std::string(paramKey)); - return iter != m_paramsWithValue.end() ? iter->second.c_str() : ""; +const char* +CommandLineArgs::getParamValue(const char* paramKey) const { + std::unordered_map::const_iterator iter = m_paramsWithValue.find(std::string(paramKey)); + return iter != m_paramsWithValue.end() ? iter->second.c_str() : ""; } -bool CommandLineArgs::hasParam(const char* paramKey) const { - return std::find(m_params.begin(), m_params.end(), std::string(paramKey)) != m_params.end(); +bool +CommandLineArgs::hasParam(const char* paramKey) const { + return std::find(m_params.begin(), m_params.end(), std::string(paramKey)) != m_params.end(); } -bool CommandLineArgs::empty(void) const { - return m_params.empty() && m_paramsWithValue.empty(); +bool +CommandLineArgs::empty(void) const { + return m_params.empty() && m_paramsWithValue.empty(); } -std::size_t CommandLineArgs::size(void) const { - return m_params.size() + m_paramsWithValue.size(); +std::size_t +CommandLineArgs::size(void) const { + return m_params.size() + m_paramsWithValue.size(); } -base::type::ostream_t& operator<<(base::type::ostream_t& os, const CommandLineArgs& c) { - for (int i = 1; i < c.m_argc; ++i) { - os << ELPP_LITERAL("[") << c.m_argv[i] << ELPP_LITERAL("]"); - if (i < c.m_argc - 1) { - os << ELPP_LITERAL(" "); +base::type::ostream_t& +operator<<(base::type::ostream_t& os, const CommandLineArgs& c) { + for (int i = 1; i < c.m_argc; ++i) { + os << ELPP_LITERAL("[") << c.m_argv[i] << ELPP_LITERAL("]"); + if (i < c.m_argc - 1) { + os << ELPP_LITERAL(" "); + } } - } - return os; + return os; } -} // namespace utils +} // namespace utils // el::base::threading namespace threading { #if ELPP_THREADING_ENABLED -# if ELPP_USE_STD_THREADING -# if ELPP_ASYNC_LOGGING -static void msleep(int ms) { - // Only when async logging enabled - this is because async is strict on compiler -# if defined(ELPP_NO_SLEEP_FOR) - usleep(ms * 1000); -# else - std::this_thread::sleep_for(std::chrono::milliseconds(ms)); -# endif // defined(ELPP_NO_SLEEP_FOR) +#if ELPP_USE_STD_THREADING +#if ELPP_ASYNC_LOGGING +static void +msleep(int ms) { + // Only when async logging enabled - this is because async is strict on compiler +#if defined(ELPP_NO_SLEEP_FOR) + usleep(ms * 1000); +#else + std::this_thread::sleep_for(std::chrono::milliseconds(ms)); +#endif // defined(ELPP_NO_SLEEP_FOR) } -# endif // ELPP_ASYNC_LOGGING -# endif // !ELPP_USE_STD_THREADING +#endif // ELPP_ASYNC_LOGGING +#endif // !ELPP_USE_STD_THREADING #endif // ELPP_THREADING_ENABLED -} // namespace threading +} // namespace threading // el::base // SubsecondPrecision -void SubsecondPrecision::init(int width) { - if (width < 1 || width > 6) { - width = base::consts::kDefaultSubsecondPrecision; - } - m_width = width; - switch (m_width) { - case 3: - m_offset = 1000; - break; - case 4: - m_offset = 100; - break; - case 5: - m_offset = 10; - break; - case 6: - m_offset = 1; - break; - default: - m_offset = 1000; - break; - } +void +SubsecondPrecision::init(int width) { + if (width < 1 || width > 6) { + width = base::consts::kDefaultSubsecondPrecision; + } + m_width = width; + switch (m_width) { + case 3: + m_offset = 1000; + break; + case 4: + m_offset = 100; + break; + case 5: + m_offset = 10; + break; + case 6: + m_offset = 1; + break; + default: + m_offset = 1000; + break; + } } // LogFormat -LogFormat::LogFormat(void) : - m_level(Level::Unknown), - m_userFormat(base::type::string_t()), - m_format(base::type::string_t()), - m_dateTimeFormat(std::string()), - m_flags(0x0), - m_currentUser(base::utils::OS::currentUser()), - m_currentHost(base::utils::OS::currentHost()) { +LogFormat::LogFormat(void) + : m_level(Level::Unknown), + m_userFormat(base::type::string_t()), + m_format(base::type::string_t()), + m_dateTimeFormat(std::string()), + m_flags(0x0), + m_currentUser(base::utils::OS::currentUser()), + m_currentHost(base::utils::OS::currentHost()) { } LogFormat::LogFormat(Level level, const base::type::string_t& format) - : m_level(level), m_userFormat(format), m_currentUser(base::utils::OS::currentUser()), - m_currentHost(base::utils::OS::currentHost()) { - parseFromFormat(m_userFormat); + : m_level(level), + m_userFormat(format), + m_currentUser(base::utils::OS::currentUser()), + m_currentHost(base::utils::OS::currentHost()) { + parseFromFormat(m_userFormat); } -LogFormat::LogFormat(const LogFormat& logFormat): - m_level(logFormat.m_level), - m_userFormat(logFormat.m_userFormat), - m_format(logFormat.m_format), - m_dateTimeFormat(logFormat.m_dateTimeFormat), - m_flags(logFormat.m_flags), - m_currentUser(logFormat.m_currentUser), - m_currentHost(logFormat.m_currentHost) { +LogFormat::LogFormat(const LogFormat& logFormat) + : m_level(logFormat.m_level), + m_userFormat(logFormat.m_userFormat), + m_format(logFormat.m_format), + m_dateTimeFormat(logFormat.m_dateTimeFormat), + m_flags(logFormat.m_flags), + m_currentUser(logFormat.m_currentUser), + m_currentHost(logFormat.m_currentHost) { } LogFormat::LogFormat(LogFormat&& logFormat) { - m_level = std::move(logFormat.m_level); - m_userFormat = std::move(logFormat.m_userFormat); - m_format = std::move(logFormat.m_format); - m_dateTimeFormat = std::move(logFormat.m_dateTimeFormat); - m_flags = std::move(logFormat.m_flags); - m_currentUser = std::move(logFormat.m_currentUser); - m_currentHost = std::move(logFormat.m_currentHost); + m_level = std::move(logFormat.m_level); + m_userFormat = std::move(logFormat.m_userFormat); + m_format = std::move(logFormat.m_format); + m_dateTimeFormat = std::move(logFormat.m_dateTimeFormat); + m_flags = std::move(logFormat.m_flags); + m_currentUser = std::move(logFormat.m_currentUser); + m_currentHost = std::move(logFormat.m_currentHost); } -LogFormat& LogFormat::operator=(const LogFormat& logFormat) { - if (&logFormat != this) { - m_level = logFormat.m_level; - m_userFormat = logFormat.m_userFormat; - m_dateTimeFormat = logFormat.m_dateTimeFormat; - m_flags = logFormat.m_flags; - m_currentUser = logFormat.m_currentUser; - m_currentHost = logFormat.m_currentHost; - } - return *this; +LogFormat& +LogFormat::operator=(const LogFormat& logFormat) { + if (&logFormat != this) { + m_level = logFormat.m_level; + m_userFormat = logFormat.m_userFormat; + m_dateTimeFormat = logFormat.m_dateTimeFormat; + m_flags = logFormat.m_flags; + m_currentUser = logFormat.m_currentUser; + m_currentHost = logFormat.m_currentHost; + } + return *this; } -bool LogFormat::operator==(const LogFormat& other) { - return m_level == other.m_level && m_userFormat == other.m_userFormat && m_format == other.m_format && - m_dateTimeFormat == other.m_dateTimeFormat && m_flags == other.m_flags; +bool +LogFormat::operator==(const LogFormat& other) { + return m_level == other.m_level && m_userFormat == other.m_userFormat && m_format == other.m_format && + m_dateTimeFormat == other.m_dateTimeFormat && m_flags == other.m_flags; } /// @brief Updates format to be used while logging. /// @param userFormat User provided format -void LogFormat::parseFromFormat(const base::type::string_t& userFormat) { - // We make copy because we will be changing the format - // i.e, removing user provided date format from original format - // and then storing it. - base::type::string_t formatCopy = userFormat; - m_flags = 0x0; - auto conditionalAddFlag = [&](const base::type::char_t* specifier, base::FormatFlags flag) { - std::size_t foundAt = base::type::string_t::npos; - while ((foundAt = formatCopy.find(specifier, foundAt + 1)) != base::type::string_t::npos) { - if (foundAt > 0 && formatCopy[foundAt - 1] == base::consts::kFormatSpecifierChar) { - if (hasFlag(flag)) { - // If we already have flag we remove the escape chars so that '%%' is turned to '%' - // even after specifier resolution - this is because we only replaceFirst specifier - formatCopy.erase(foundAt - 1, 1); - ++foundAt; +void +LogFormat::parseFromFormat(const base::type::string_t& userFormat) { + // We make copy because we will be changing the format + // i.e, removing user provided date format from original format + // and then storing it. + base::type::string_t formatCopy = userFormat; + m_flags = 0x0; + auto conditionalAddFlag = [&](const base::type::char_t* specifier, base::FormatFlags flag) { + std::size_t foundAt = base::type::string_t::npos; + while ((foundAt = formatCopy.find(specifier, foundAt + 1)) != base::type::string_t::npos) { + if (foundAt > 0 && formatCopy[foundAt - 1] == base::consts::kFormatSpecifierChar) { + if (hasFlag(flag)) { + // If we already have flag we remove the escape chars so that '%%' is turned to '%' + // even after specifier resolution - this is because we only replaceFirst specifier + formatCopy.erase(foundAt - 1, 1); + ++foundAt; + } + } else { + if (!hasFlag(flag)) + addFlag(flag); + } + } + }; + conditionalAddFlag(base::consts::kAppNameFormatSpecifier, base::FormatFlags::AppName); + conditionalAddFlag(base::consts::kSeverityLevelFormatSpecifier, base::FormatFlags::Level); + conditionalAddFlag(base::consts::kSeverityLevelShortFormatSpecifier, base::FormatFlags::LevelShort); + conditionalAddFlag(base::consts::kLoggerIdFormatSpecifier, base::FormatFlags::LoggerId); + conditionalAddFlag(base::consts::kThreadIdFormatSpecifier, base::FormatFlags::ThreadId); + conditionalAddFlag(base::consts::kLogFileFormatSpecifier, base::FormatFlags::File); + conditionalAddFlag(base::consts::kLogFileBaseFormatSpecifier, base::FormatFlags::FileBase); + conditionalAddFlag(base::consts::kLogLineFormatSpecifier, base::FormatFlags::Line); + conditionalAddFlag(base::consts::kLogLocationFormatSpecifier, base::FormatFlags::Location); + conditionalAddFlag(base::consts::kLogFunctionFormatSpecifier, base::FormatFlags::Function); + conditionalAddFlag(base::consts::kCurrentUserFormatSpecifier, base::FormatFlags::User); + conditionalAddFlag(base::consts::kCurrentHostFormatSpecifier, base::FormatFlags::Host); + conditionalAddFlag(base::consts::kMessageFormatSpecifier, base::FormatFlags::LogMessage); + conditionalAddFlag(base::consts::kVerboseLevelFormatSpecifier, base::FormatFlags::VerboseLevel); + // For date/time we need to extract user's date format first + std::size_t dateIndex = std::string::npos; + if ((dateIndex = formatCopy.find(base::consts::kDateTimeFormatSpecifier)) != std::string::npos) { + while (dateIndex > 0 && formatCopy[dateIndex - 1] == base::consts::kFormatSpecifierChar) { + dateIndex = formatCopy.find(base::consts::kDateTimeFormatSpecifier, dateIndex + 1); + } + if (dateIndex != std::string::npos) { + addFlag(base::FormatFlags::DateTime); + updateDateFormat(dateIndex, formatCopy); } - } else { - if (!hasFlag(flag)) addFlag(flag); - } } - }; - conditionalAddFlag(base::consts::kAppNameFormatSpecifier, base::FormatFlags::AppName); - conditionalAddFlag(base::consts::kSeverityLevelFormatSpecifier, base::FormatFlags::Level); - conditionalAddFlag(base::consts::kSeverityLevelShortFormatSpecifier, base::FormatFlags::LevelShort); - conditionalAddFlag(base::consts::kLoggerIdFormatSpecifier, base::FormatFlags::LoggerId); - conditionalAddFlag(base::consts::kThreadIdFormatSpecifier, base::FormatFlags::ThreadId); - conditionalAddFlag(base::consts::kLogFileFormatSpecifier, base::FormatFlags::File); - conditionalAddFlag(base::consts::kLogFileBaseFormatSpecifier, base::FormatFlags::FileBase); - conditionalAddFlag(base::consts::kLogLineFormatSpecifier, base::FormatFlags::Line); - conditionalAddFlag(base::consts::kLogLocationFormatSpecifier, base::FormatFlags::Location); - conditionalAddFlag(base::consts::kLogFunctionFormatSpecifier, base::FormatFlags::Function); - conditionalAddFlag(base::consts::kCurrentUserFormatSpecifier, base::FormatFlags::User); - conditionalAddFlag(base::consts::kCurrentHostFormatSpecifier, base::FormatFlags::Host); - conditionalAddFlag(base::consts::kMessageFormatSpecifier, base::FormatFlags::LogMessage); - conditionalAddFlag(base::consts::kVerboseLevelFormatSpecifier, base::FormatFlags::VerboseLevel); - // For date/time we need to extract user's date format first - std::size_t dateIndex = std::string::npos; - if ((dateIndex = formatCopy.find(base::consts::kDateTimeFormatSpecifier)) != std::string::npos) { - while (dateIndex > 0 && formatCopy[dateIndex - 1] == base::consts::kFormatSpecifierChar) { - dateIndex = formatCopy.find(base::consts::kDateTimeFormatSpecifier, dateIndex + 1); - } - if (dateIndex != std::string::npos) { - addFlag(base::FormatFlags::DateTime); - updateDateFormat(dateIndex, formatCopy); - } - } - m_format = formatCopy; - updateFormatSpec(); + m_format = formatCopy; + updateFormatSpec(); } -void LogFormat::updateDateFormat(std::size_t index, base::type::string_t& currFormat) { - if (hasFlag(base::FormatFlags::DateTime)) { - index += ELPP_STRLEN(base::consts::kDateTimeFormatSpecifier); - } - const base::type::char_t* ptr = currFormat.c_str() + index; - if ((currFormat.size() > index) && (ptr[0] == '{')) { - // User has provided format for date/time - ++ptr; - int count = 1; // Start by 1 in order to remove starting brace - std::stringstream ss; - for (; *ptr; ++ptr, ++count) { - if (*ptr == '}') { - ++count; // In order to remove ending brace - break; - } - ss << static_cast(*ptr); - } - currFormat.erase(index, count); - m_dateTimeFormat = ss.str(); - } else { - // No format provided, use default +void +LogFormat::updateDateFormat(std::size_t index, base::type::string_t& currFormat) { if (hasFlag(base::FormatFlags::DateTime)) { - m_dateTimeFormat = std::string(base::consts::kDefaultDateTimeFormat); + index += ELPP_STRLEN(base::consts::kDateTimeFormatSpecifier); } - } -} - -void LogFormat::updateFormatSpec(void) { - // Do not use switch over strongly typed enums because Intel C++ compilers dont support them yet. - if (m_level == Level::Debug) { - base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelFormatSpecifier, - base::consts::kDebugLevelLogValue); - base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelShortFormatSpecifier, - base::consts::kDebugLevelShortLogValue); - } else if (m_level == Level::Info) { - base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelFormatSpecifier, - base::consts::kInfoLevelLogValue); - base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelShortFormatSpecifier, - base::consts::kInfoLevelShortLogValue); - } else if (m_level == Level::Warning) { - base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelFormatSpecifier, - base::consts::kWarningLevelLogValue); - base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelShortFormatSpecifier, - base::consts::kWarningLevelShortLogValue); - } else if (m_level == Level::Error) { - base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelFormatSpecifier, - base::consts::kErrorLevelLogValue); - base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelShortFormatSpecifier, - base::consts::kErrorLevelShortLogValue); - } else if (m_level == Level::Fatal) { - base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelFormatSpecifier, - base::consts::kFatalLevelLogValue); - base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelShortFormatSpecifier, - base::consts::kFatalLevelShortLogValue); - } else if (m_level == Level::Verbose) { - base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelFormatSpecifier, - base::consts::kVerboseLevelLogValue); - base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelShortFormatSpecifier, - base::consts::kVerboseLevelShortLogValue); - } else if (m_level == Level::Trace) { - base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelFormatSpecifier, - base::consts::kTraceLevelLogValue); - base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelShortFormatSpecifier, - base::consts::kTraceLevelShortLogValue); - } - if (hasFlag(base::FormatFlags::User)) { - base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kCurrentUserFormatSpecifier, - m_currentUser); - } - if (hasFlag(base::FormatFlags::Host)) { - base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kCurrentHostFormatSpecifier, - m_currentHost); - } - // Ignore Level::Global and Level::Unknown -} - -// TypedConfigurations - -TypedConfigurations::TypedConfigurations(Configurations* configurations, - base::LogStreamsReferenceMap* logStreamsReference) { - m_configurations = configurations; - m_logStreamsReference = logStreamsReference; - build(m_configurations); -} - -TypedConfigurations::TypedConfigurations(const TypedConfigurations& other) { - this->m_configurations = other.m_configurations; - this->m_logStreamsReference = other.m_logStreamsReference; - build(m_configurations); -} - -bool TypedConfigurations::enabled(Level level) { - return getConfigByVal(level, &m_enabledMap, "enabled"); -} - -bool TypedConfigurations::toFile(Level level) { - return getConfigByVal(level, &m_toFileMap, "toFile"); -} - -const std::string& TypedConfigurations::filename(Level level) { - return getConfigByRef(level, &m_filenameMap, "filename"); -} - -bool TypedConfigurations::toStandardOutput(Level level) { - return getConfigByVal(level, &m_toStandardOutputMap, "toStandardOutput"); -} - -const base::LogFormat& TypedConfigurations::logFormat(Level level) { - return getConfigByRef(level, &m_logFormatMap, "logFormat"); -} - -const base::SubsecondPrecision& TypedConfigurations::subsecondPrecision(Level level) { - return getConfigByRef(level, &m_subsecondPrecisionMap, "subsecondPrecision"); -} - -const base::MillisecondsWidth& TypedConfigurations::millisecondsWidth(Level level) { - return getConfigByRef(level, &m_subsecondPrecisionMap, "millisecondsWidth"); -} - -bool TypedConfigurations::performanceTracking(Level level) { - return getConfigByVal(level, &m_performanceTrackingMap, "performanceTracking"); -} - -base::type::fstream_t* TypedConfigurations::fileStream(Level level) { - return getConfigByRef(level, &m_fileStreamMap, "fileStream").get(); -} - -std::size_t TypedConfigurations::maxLogFileSize(Level level) { - return getConfigByVal(level, &m_maxLogFileSizeMap, "maxLogFileSize"); -} - -std::size_t TypedConfigurations::logFlushThreshold(Level level) { - return getConfigByVal(level, &m_logFlushThresholdMap, "logFlushThreshold"); -} - -void TypedConfigurations::build(Configurations* configurations) { - base::threading::ScopedLock scopedLock(lock()); - auto getBool = [] (std::string boolStr) -> bool { // Pass by value for trimming - base::utils::Str::trim(boolStr); - return (boolStr == "TRUE" || boolStr == "true" || boolStr == "1"); - }; - std::vector withFileSizeLimit; - for (Configurations::const_iterator it = configurations->begin(); it != configurations->end(); ++it) { - Configuration* conf = *it; - // We cannot use switch on strong enums because Intel C++ dont support them yet - if (conf->configurationType() == ConfigurationType::Enabled) { - setValue(conf->level(), getBool(conf->value()), &m_enabledMap); - } else if (conf->configurationType() == ConfigurationType::ToFile) { - setValue(conf->level(), getBool(conf->value()), &m_toFileMap); - } else if (conf->configurationType() == ConfigurationType::ToStandardOutput) { - setValue(conf->level(), getBool(conf->value()), &m_toStandardOutputMap); - } else if (conf->configurationType() == ConfigurationType::Filename) { - // We do not yet configure filename but we will configure in another - // loop. This is because if file cannot be created, we will force ToFile - // to be false. Because configuring logger is not necessarily performance - // sensative operation, we can live with another loop; (by the way this loop - // is not very heavy either) - } else if (conf->configurationType() == ConfigurationType::Format) { - setValue(conf->level(), base::LogFormat(conf->level(), - base::type::string_t(conf->value().begin(), conf->value().end())), &m_logFormatMap); - } else if (conf->configurationType() == ConfigurationType::SubsecondPrecision) { - setValue(Level::Global, - base::SubsecondPrecision(static_cast(getULong(conf->value()))), &m_subsecondPrecisionMap); - } else if (conf->configurationType() == ConfigurationType::PerformanceTracking) { - setValue(Level::Global, getBool(conf->value()), &m_performanceTrackingMap); - } else if (conf->configurationType() == ConfigurationType::MaxLogFileSize) { - auto v = getULong(conf->value()); - setValue(conf->level(), static_cast(v), &m_maxLogFileSizeMap); - if (v != 0) { - withFileSizeLimit.push_back(conf); - } - } else if (conf->configurationType() == ConfigurationType::LogFlushThreshold) { - setValue(conf->level(), static_cast(getULong(conf->value())), &m_logFlushThresholdMap); - } - } - // As mentioned earlier, we will now set filename configuration in separate loop to deal with non-existent files - for (Configurations::const_iterator it = configurations->begin(); it != configurations->end(); ++it) { - Configuration* conf = *it; - if (conf->configurationType() == ConfigurationType::Filename) { - insertFile(conf->level(), conf->value()); - } - } - for (std::vector::iterator conf = withFileSizeLimit.begin(); - conf != withFileSizeLimit.end(); ++conf) { - // This is not unsafe as mutex is locked in currect scope - unsafeValidateFileRolling((*conf)->level(), base::defaultPreRollOutCallback); - } -} - -unsigned long TypedConfigurations::getULong(std::string confVal) { - bool valid = true; - base::utils::Str::trim(confVal); - valid = !confVal.empty() && std::find_if(confVal.begin(), confVal.end(), - [](char c) { - return !base::utils::Str::isDigit(c); - }) == confVal.end(); - if (!valid) { - valid = false; - ELPP_ASSERT(valid, "Configuration value not a valid integer [" << confVal << "]"); - return 0; - } - return atol(confVal.c_str()); -} - -std::string TypedConfigurations::resolveFilename(const std::string& filename) { - std::string resultingFilename = filename; - std::size_t dateIndex = std::string::npos; - std::string dateTimeFormatSpecifierStr = std::string(base::consts::kDateTimeFormatSpecifierForFilename); - if ((dateIndex = resultingFilename.find(dateTimeFormatSpecifierStr.c_str())) != std::string::npos) { - while (dateIndex > 0 && resultingFilename[dateIndex - 1] == base::consts::kFormatSpecifierChar) { - dateIndex = resultingFilename.find(dateTimeFormatSpecifierStr.c_str(), dateIndex + 1); - } - if (dateIndex != std::string::npos) { - const char* ptr = resultingFilename.c_str() + dateIndex; - // Goto end of specifier - ptr += dateTimeFormatSpecifierStr.size(); - std::string fmt; - if ((resultingFilename.size() > dateIndex) && (ptr[0] == '{')) { + const base::type::char_t* ptr = currFormat.c_str() + index; + if ((currFormat.size() > index) && (ptr[0] == '{')) { // User has provided format for date/time ++ptr; int count = 1; // Start by 1 in order to remove starting brace std::stringstream ss; for (; *ptr; ++ptr, ++count) { - if (*ptr == '}') { - ++count; // In order to remove ending brace - break; - } - ss << *ptr; + if (*ptr == '}') { + ++count; // In order to remove ending brace + break; + } + ss << static_cast(*ptr); } - resultingFilename.erase(dateIndex + dateTimeFormatSpecifierStr.size(), count); - fmt = ss.str(); - } else { - fmt = std::string(base::consts::kDefaultDateTimeFormatInFilename); - } - base::SubsecondPrecision ssPrec(3); - std::string now = base::utils::DateTime::getDateTime(fmt.c_str(), &ssPrec); - base::utils::Str::replaceAll(now, '/', '-'); // Replace path element since we are dealing with filename - base::utils::Str::replaceAll(resultingFilename, dateTimeFormatSpecifierStr, now); - } - } - return resultingFilename; -} - -void TypedConfigurations::insertFile(Level level, const std::string& fullFilename) { - std::string resolvedFilename = resolveFilename(fullFilename); - if (resolvedFilename.empty()) { - std::cerr << "Could not load empty file for logging, please re-check your configurations for level [" - << LevelHelper::convertToString(level) << "]"; - } - std::string filePath = base::utils::File::extractPathFromFilename(resolvedFilename, base::consts::kFilePathSeperator); - if (filePath.size() < resolvedFilename.size()) { - base::utils::File::createPath(filePath); - } - auto create = [&](Level level) { - base::LogStreamsReferenceMap::iterator filestreamIter = m_logStreamsReference->find(resolvedFilename); - base::type::fstream_t* fs = nullptr; - if (filestreamIter == m_logStreamsReference->end()) { - // We need a completely new stream, nothing to share with - fs = base::utils::File::newFileStream(resolvedFilename); - m_filenameMap.insert(std::make_pair(level, resolvedFilename)); - m_fileStreamMap.insert(std::make_pair(level, base::FileStreamPtr(fs))); - m_logStreamsReference->insert(std::make_pair(resolvedFilename, base::FileStreamPtr(m_fileStreamMap.at(level)))); + currFormat.erase(index, count); + m_dateTimeFormat = ss.str(); } else { - // Woops! we have an existing one, share it! - m_filenameMap.insert(std::make_pair(level, filestreamIter->first)); - m_fileStreamMap.insert(std::make_pair(level, base::FileStreamPtr(filestreamIter->second))); - fs = filestreamIter->second.get(); + // No format provided, use default + if (hasFlag(base::FormatFlags::DateTime)) { + m_dateTimeFormat = std::string(base::consts::kDefaultDateTimeFormat); + } } - if (fs == nullptr) { - // We display bad file error from newFileStream() - ELPP_INTERNAL_ERROR("Setting [TO_FILE] of [" - << LevelHelper::convertToString(level) << "] to FALSE", false); - setValue(level, false, &m_toFileMap); - } - }; - // If we dont have file conf for any level, create it for Level::Global first - // otherwise create for specified level - create(m_filenameMap.empty() && m_fileStreamMap.empty() ? Level::Global : level); } -bool TypedConfigurations::unsafeValidateFileRolling(Level level, const PreRollOutCallback& preRollOutCallback) { - base::type::fstream_t* fs = unsafeGetConfigByRef(level, &m_fileStreamMap, "fileStream").get(); - if (fs == nullptr) { - return true; - } - std::size_t maxLogFileSize = unsafeGetConfigByVal(level, &m_maxLogFileSizeMap, "maxLogFileSize"); - std::size_t currFileSize = base::utils::File::getSizeOfFile(fs); - if (maxLogFileSize != 0 && currFileSize >= maxLogFileSize) { - std::string fname = unsafeGetConfigByRef(level, &m_filenameMap, "filename"); - ELPP_INTERNAL_INFO(1, "Truncating log file [" << fname << "] as a result of configurations for level [" - << LevelHelper::convertToString(level) << "]"); - fs->close(); - preRollOutCallback(fname.c_str(), currFileSize, level); - fs->open(fname, std::fstream::out | std::fstream::trunc); - return true; - } - return false; +void +LogFormat::updateFormatSpec(void) { + // Do not use switch over strongly typed enums because Intel C++ compilers dont support them yet. + if (m_level == Level::Debug) { + base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelFormatSpecifier, + base::consts::kDebugLevelLogValue); + base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelShortFormatSpecifier, + base::consts::kDebugLevelShortLogValue); + } else if (m_level == Level::Info) { + base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelFormatSpecifier, + base::consts::kInfoLevelLogValue); + base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelShortFormatSpecifier, + base::consts::kInfoLevelShortLogValue); + } else if (m_level == Level::Warning) { + base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelFormatSpecifier, + base::consts::kWarningLevelLogValue); + base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelShortFormatSpecifier, + base::consts::kWarningLevelShortLogValue); + } else if (m_level == Level::Error) { + base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelFormatSpecifier, + base::consts::kErrorLevelLogValue); + base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelShortFormatSpecifier, + base::consts::kErrorLevelShortLogValue); + } else if (m_level == Level::Fatal) { + base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelFormatSpecifier, + base::consts::kFatalLevelLogValue); + base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelShortFormatSpecifier, + base::consts::kFatalLevelShortLogValue); + } else if (m_level == Level::Verbose) { + base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelFormatSpecifier, + base::consts::kVerboseLevelLogValue); + base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelShortFormatSpecifier, + base::consts::kVerboseLevelShortLogValue); + } else if (m_level == Level::Trace) { + base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelFormatSpecifier, + base::consts::kTraceLevelLogValue); + base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelShortFormatSpecifier, + base::consts::kTraceLevelShortLogValue); + } + if (hasFlag(base::FormatFlags::User)) { + base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kCurrentUserFormatSpecifier, m_currentUser); + } + if (hasFlag(base::FormatFlags::Host)) { + base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kCurrentHostFormatSpecifier, m_currentHost); + } + // Ignore Level::Global and Level::Unknown +} + +// TypedConfigurations + +TypedConfigurations::TypedConfigurations(Configurations* configurations, + base::LogStreamsReferenceMap* logStreamsReference) { + m_configurations = configurations; + m_logStreamsReference = logStreamsReference; + build(m_configurations); +} + +TypedConfigurations::TypedConfigurations(const TypedConfigurations& other) { + this->m_configurations = other.m_configurations; + this->m_logStreamsReference = other.m_logStreamsReference; + build(m_configurations); +} + +bool +TypedConfigurations::enabled(Level level) { + return getConfigByVal(level, &m_enabledMap, "enabled"); +} + +bool +TypedConfigurations::toFile(Level level) { + return getConfigByVal(level, &m_toFileMap, "toFile"); +} + +const std::string& +TypedConfigurations::filename(Level level) { + return getConfigByRef(level, &m_filenameMap, "filename"); +} + +bool +TypedConfigurations::toStandardOutput(Level level) { + return getConfigByVal(level, &m_toStandardOutputMap, "toStandardOutput"); +} + +const base::LogFormat& +TypedConfigurations::logFormat(Level level) { + return getConfigByRef(level, &m_logFormatMap, "logFormat"); +} + +const base::SubsecondPrecision& +TypedConfigurations::subsecondPrecision(Level level) { + return getConfigByRef(level, &m_subsecondPrecisionMap, "subsecondPrecision"); +} + +const base::MillisecondsWidth& +TypedConfigurations::millisecondsWidth(Level level) { + return getConfigByRef(level, &m_subsecondPrecisionMap, "millisecondsWidth"); +} + +bool +TypedConfigurations::performanceTracking(Level level) { + return getConfigByVal(level, &m_performanceTrackingMap, "performanceTracking"); +} + +base::type::fstream_t* +TypedConfigurations::fileStream(Level level) { + return getConfigByRef(level, &m_fileStreamMap, "fileStream").get(); +} + +std::size_t +TypedConfigurations::maxLogFileSize(Level level) { + return getConfigByVal(level, &m_maxLogFileSizeMap, "maxLogFileSize"); +} + +std::size_t +TypedConfigurations::logFlushThreshold(Level level) { + return getConfigByVal(level, &m_logFlushThresholdMap, "logFlushThreshold"); +} + +void +TypedConfigurations::build(Configurations* configurations) { + base::threading::ScopedLock scopedLock(lock()); + auto getBool = [](std::string boolStr) -> bool { // Pass by value for trimming + base::utils::Str::trim(boolStr); + return (boolStr == "TRUE" || boolStr == "true" || boolStr == "1"); + }; + std::vector withFileSizeLimit; + for (Configurations::const_iterator it = configurations->begin(); it != configurations->end(); ++it) { + Configuration* conf = *it; + // We cannot use switch on strong enums because Intel C++ dont support them yet + if (conf->configurationType() == ConfigurationType::Enabled) { + setValue(conf->level(), getBool(conf->value()), &m_enabledMap); + } else if (conf->configurationType() == ConfigurationType::ToFile) { + setValue(conf->level(), getBool(conf->value()), &m_toFileMap); + } else if (conf->configurationType() == ConfigurationType::ToStandardOutput) { + setValue(conf->level(), getBool(conf->value()), &m_toStandardOutputMap); + } else if (conf->configurationType() == ConfigurationType::Filename) { + // We do not yet configure filename but we will configure in another + // loop. This is because if file cannot be created, we will force ToFile + // to be false. Because configuring logger is not necessarily performance + // sensative operation, we can live with another loop; (by the way this loop + // is not very heavy either) + } else if (conf->configurationType() == ConfigurationType::Format) { + setValue(conf->level(), + base::LogFormat(conf->level(), base::type::string_t(conf->value().begin(), conf->value().end())), + &m_logFormatMap); + } else if (conf->configurationType() == ConfigurationType::SubsecondPrecision) { + setValue(Level::Global, base::SubsecondPrecision(static_cast(getULong(conf->value()))), + &m_subsecondPrecisionMap); + } else if (conf->configurationType() == ConfigurationType::PerformanceTracking) { + setValue(Level::Global, getBool(conf->value()), &m_performanceTrackingMap); + } else if (conf->configurationType() == ConfigurationType::MaxLogFileSize) { + auto v = getULong(conf->value()); + setValue(conf->level(), static_cast(v), &m_maxLogFileSizeMap); + if (v != 0) { + withFileSizeLimit.push_back(conf); + } + } else if (conf->configurationType() == ConfigurationType::LogFlushThreshold) { + setValue(conf->level(), static_cast(getULong(conf->value())), &m_logFlushThresholdMap); + } + } + // As mentioned earlier, we will now set filename configuration in separate loop to deal with non-existent files + for (Configurations::const_iterator it = configurations->begin(); it != configurations->end(); ++it) { + Configuration* conf = *it; + if (conf->configurationType() == ConfigurationType::Filename) { + insertFile(conf->level(), conf->value()); + } + } + for (std::vector::iterator conf = withFileSizeLimit.begin(); conf != withFileSizeLimit.end(); + ++conf) { + // This is not unsafe as mutex is locked in currect scope + unsafeValidateFileRolling((*conf)->level(), base::defaultPreRollOutCallback); + } +} + +unsigned long +TypedConfigurations::getULong(std::string confVal) { + bool valid = true; + base::utils::Str::trim(confVal); + valid = !confVal.empty() && std::find_if(confVal.begin(), confVal.end(), + [](char c) { return !base::utils::Str::isDigit(c); }) == confVal.end(); + if (!valid) { + valid = false; + ELPP_ASSERT(valid, "Configuration value not a valid integer [" << confVal << "]"); + return 0; + } + return atol(confVal.c_str()); +} + +std::string +TypedConfigurations::resolveFilename(const std::string& filename) { + std::string resultingFilename = filename; + std::size_t dateIndex = std::string::npos; + std::string dateTimeFormatSpecifierStr = std::string(base::consts::kDateTimeFormatSpecifierForFilename); + if ((dateIndex = resultingFilename.find(dateTimeFormatSpecifierStr.c_str())) != std::string::npos) { + while (dateIndex > 0 && resultingFilename[dateIndex - 1] == base::consts::kFormatSpecifierChar) { + dateIndex = resultingFilename.find(dateTimeFormatSpecifierStr.c_str(), dateIndex + 1); + } + if (dateIndex != std::string::npos) { + const char* ptr = resultingFilename.c_str() + dateIndex; + // Goto end of specifier + ptr += dateTimeFormatSpecifierStr.size(); + std::string fmt; + if ((resultingFilename.size() > dateIndex) && (ptr[0] == '{')) { + // User has provided format for date/time + ++ptr; + int count = 1; // Start by 1 in order to remove starting brace + std::stringstream ss; + for (; *ptr; ++ptr, ++count) { + if (*ptr == '}') { + ++count; // In order to remove ending brace + break; + } + ss << *ptr; + } + resultingFilename.erase(dateIndex + dateTimeFormatSpecifierStr.size(), count); + fmt = ss.str(); + } else { + fmt = std::string(base::consts::kDefaultDateTimeFormatInFilename); + } + base::SubsecondPrecision ssPrec(3); + std::string now = base::utils::DateTime::getDateTime(fmt.c_str(), &ssPrec); + base::utils::Str::replaceAll(now, '/', '-'); // Replace path element since we are dealing with filename + base::utils::Str::replaceAll(resultingFilename, dateTimeFormatSpecifierStr, now); + } + } + return resultingFilename; +} + +void +TypedConfigurations::insertFile(Level level, const std::string& fullFilename) { + std::string resolvedFilename = resolveFilename(fullFilename); + if (resolvedFilename.empty()) { + std::cerr << "Could not load empty file for logging, please re-check your configurations for level [" + << LevelHelper::convertToString(level) << "]"; + } + std::string filePath = + base::utils::File::extractPathFromFilename(resolvedFilename, base::consts::kFilePathSeperator); + if (filePath.size() < resolvedFilename.size()) { + base::utils::File::createPath(filePath); + } + auto create = [&](Level level) { + base::LogStreamsReferenceMap::iterator filestreamIter = m_logStreamsReference->find(resolvedFilename); + base::type::fstream_t* fs = nullptr; + if (filestreamIter == m_logStreamsReference->end()) { + // We need a completely new stream, nothing to share with + fs = base::utils::File::newFileStream(resolvedFilename); + m_filenameMap.insert(std::make_pair(level, resolvedFilename)); + m_fileStreamMap.insert(std::make_pair(level, base::FileStreamPtr(fs))); + m_logStreamsReference->insert( + std::make_pair(resolvedFilename, base::FileStreamPtr(m_fileStreamMap.at(level)))); + } else { + // Woops! we have an existing one, share it! + m_filenameMap.insert(std::make_pair(level, filestreamIter->first)); + m_fileStreamMap.insert(std::make_pair(level, base::FileStreamPtr(filestreamIter->second))); + fs = filestreamIter->second.get(); + } + if (fs == nullptr) { + // We display bad file error from newFileStream() + ELPP_INTERNAL_ERROR("Setting [TO_FILE] of [" << LevelHelper::convertToString(level) << "] to FALSE", false); + setValue(level, false, &m_toFileMap); + } + }; + // If we dont have file conf for any level, create it for Level::Global first + // otherwise create for specified level + create(m_filenameMap.empty() && m_fileStreamMap.empty() ? Level::Global : level); +} + +bool +TypedConfigurations::unsafeValidateFileRolling(Level level, const PreRollOutCallback& preRollOutCallback) { + base::type::fstream_t* fs = unsafeGetConfigByRef(level, &m_fileStreamMap, "fileStream").get(); + if (fs == nullptr) { + return true; + } + std::size_t maxLogFileSize = unsafeGetConfigByVal(level, &m_maxLogFileSizeMap, "maxLogFileSize"); + std::size_t currFileSize = base::utils::File::getSizeOfFile(fs); + if (maxLogFileSize != 0 && currFileSize >= maxLogFileSize) { + std::string fname = unsafeGetConfigByRef(level, &m_filenameMap, "filename"); + ELPP_INTERNAL_INFO(1, "Truncating log file [" << fname << "] as a result of configurations for level [" + << LevelHelper::convertToString(level) << "]"); + fs->close(); + preRollOutCallback(fname.c_str(), currFileSize, level); + fs->open(fname, std::fstream::out | std::fstream::trunc); + return true; + } + return false; } // RegisteredHitCounters -bool RegisteredHitCounters::validateEveryN(const char* filename, base::type::LineNumber lineNumber, std::size_t n) { - base::threading::ScopedLock scopedLock(lock()); - base::HitCounter* counter = get(filename, lineNumber); - if (counter == nullptr) { - registerNew(counter = new base::HitCounter(filename, lineNumber)); - } - counter->validateHitCounts(n); - bool result = (n >= 1 && counter->hitCounts() != 0 && counter->hitCounts() % n == 0); - return result; +bool +RegisteredHitCounters::validateEveryN(const char* filename, base::type::LineNumber lineNumber, std::size_t n) { + base::threading::ScopedLock scopedLock(lock()); + base::HitCounter* counter = get(filename, lineNumber); + if (counter == nullptr) { + registerNew(counter = new base::HitCounter(filename, lineNumber)); + } + counter->validateHitCounts(n); + bool result = (n >= 1 && counter->hitCounts() != 0 && counter->hitCounts() % n == 0); + return result; } /// @brief Validates counter for hits >= N, i.e, registers new if does not exist otherwise updates original one /// @return True if validation resulted in triggering hit. Meaning logs should be written everytime true is returned -bool RegisteredHitCounters::validateAfterN(const char* filename, base::type::LineNumber lineNumber, std::size_t n) { - base::threading::ScopedLock scopedLock(lock()); - base::HitCounter* counter = get(filename, lineNumber); - if (counter == nullptr) { - registerNew(counter = new base::HitCounter(filename, lineNumber)); - } - // Do not use validateHitCounts here since we do not want to reset counter here - // Note the >= instead of > because we are incrementing - // after this check - if (counter->hitCounts() >= n) - return true; - counter->increment(); - return false; +bool +RegisteredHitCounters::validateAfterN(const char* filename, base::type::LineNumber lineNumber, std::size_t n) { + base::threading::ScopedLock scopedLock(lock()); + base::HitCounter* counter = get(filename, lineNumber); + if (counter == nullptr) { + registerNew(counter = new base::HitCounter(filename, lineNumber)); + } + // Do not use validateHitCounts here since we do not want to reset counter here + // Note the >= instead of > because we are incrementing + // after this check + if (counter->hitCounts() >= n) + return true; + counter->increment(); + return false; } /// @brief Validates counter for hits are <= n, i.e, registers new if does not exist otherwise updates original one /// @return True if validation resulted in triggering hit. Meaning logs should be written everytime true is returned -bool RegisteredHitCounters::validateNTimes(const char* filename, base::type::LineNumber lineNumber, std::size_t n) { - base::threading::ScopedLock scopedLock(lock()); - base::HitCounter* counter = get(filename, lineNumber); - if (counter == nullptr) { - registerNew(counter = new base::HitCounter(filename, lineNumber)); - } - counter->increment(); - // Do not use validateHitCounts here since we do not want to reset counter here - if (counter->hitCounts() <= n) - return true; - return false; +bool +RegisteredHitCounters::validateNTimes(const char* filename, base::type::LineNumber lineNumber, std::size_t n) { + base::threading::ScopedLock scopedLock(lock()); + base::HitCounter* counter = get(filename, lineNumber); + if (counter == nullptr) { + registerNew(counter = new base::HitCounter(filename, lineNumber)); + } + counter->increment(); + // Do not use validateHitCounts here since we do not want to reset counter here + if (counter->hitCounts() <= n) + return true; + return false; } // RegisteredLoggers -RegisteredLoggers::RegisteredLoggers(const LogBuilderPtr& defaultLogBuilder) : - m_defaultLogBuilder(defaultLogBuilder) { - m_defaultConfigurations.setToDefault(); +RegisteredLoggers::RegisteredLoggers(const LogBuilderPtr& defaultLogBuilder) : m_defaultLogBuilder(defaultLogBuilder) { + m_defaultConfigurations.setToDefault(); } -Logger* RegisteredLoggers::get(const std::string& id, bool forceCreation) { - base::threading::ScopedLock scopedLock(lock()); - Logger* logger_ = base::utils::Registry::get(id); - if (logger_ == nullptr && forceCreation) { - bool validId = Logger::isValidId(id); - if (!validId) { - ELPP_ASSERT(validId, "Invalid logger ID [" << id << "]. Not registering this logger."); - return nullptr; +Logger* +RegisteredLoggers::get(const std::string& id, bool forceCreation) { + base::threading::ScopedLock scopedLock(lock()); + Logger* logger_ = base::utils::Registry::get(id); + if (logger_ == nullptr && forceCreation) { + bool validId = Logger::isValidId(id); + if (!validId) { + ELPP_ASSERT(validId, "Invalid logger ID [" << id << "]. Not registering this logger."); + return nullptr; + } + logger_ = new Logger(id, m_defaultConfigurations, &m_logStreamsReference); + logger_->m_logBuilder = m_defaultLogBuilder; + registerNew(id, logger_); + LoggerRegistrationCallback* callback = nullptr; + for (const std::pair& h : + m_loggerRegistrationCallbacks) { + callback = h.second.get(); + if (callback != nullptr && callback->enabled()) { + callback->handle(logger_); + } + } } - logger_ = new Logger(id, m_defaultConfigurations, &m_logStreamsReference); - logger_->m_logBuilder = m_defaultLogBuilder; - registerNew(id, logger_); - LoggerRegistrationCallback* callback = nullptr; - for (const std::pair& h - : m_loggerRegistrationCallbacks) { - callback = h.second.get(); - if (callback != nullptr && callback->enabled()) { - callback->handle(logger_); - } + return logger_; +} + +bool +RegisteredLoggers::remove(const std::string& id) { + if (id == base::consts::kDefaultLoggerId) { + return false; } - } - return logger_; + // get has internal lock + Logger* logger = base::utils::Registry::get(id); + if (logger != nullptr) { + // unregister has internal lock + unregister(logger); + } + return true; } -bool RegisteredLoggers::remove(const std::string& id) { - if (id == base::consts::kDefaultLoggerId) { - return false; - } - // get has internal lock - Logger* logger = base::utils::Registry::get(id); - if (logger != nullptr) { - // unregister has internal lock - unregister(logger); - } - return true; -} - -void RegisteredLoggers::unsafeFlushAll(void) { - ELPP_INTERNAL_INFO(1, "Flushing all log files"); - for (base::LogStreamsReferenceMap::iterator it = m_logStreamsReference.begin(); - it != m_logStreamsReference.end(); ++it) { - if (it->second.get() == nullptr) continue; - it->second->flush(); - } +void +RegisteredLoggers::unsafeFlushAll(void) { + ELPP_INTERNAL_INFO(1, "Flushing all log files"); + for (base::LogStreamsReferenceMap::iterator it = m_logStreamsReference.begin(); it != m_logStreamsReference.end(); + ++it) { + if (it->second.get() == nullptr) + continue; + it->second->flush(); + } } // VRegistry @@ -1939,334 +2045,358 @@ VRegistry::VRegistry(base::type::VerboseLevel level, base::type::EnumType* pFlag } /// @brief Sets verbose level. Accepted range is 0-9 -void VRegistry::setLevel(base::type::VerboseLevel level) { - base::threading::ScopedLock scopedLock(lock()); - if (level > 9) - m_level = base::consts::kMaxVerboseLevel; - else - m_level = level; +void +VRegistry::setLevel(base::type::VerboseLevel level) { + base::threading::ScopedLock scopedLock(lock()); + if (level > 9) + m_level = base::consts::kMaxVerboseLevel; + else + m_level = level; } -void VRegistry::setModules(const char* modules) { - base::threading::ScopedLock scopedLock(lock()); - auto addSuffix = [](std::stringstream& ss, const char* sfx, const char* prev) { - if (prev != nullptr && base::utils::Str::endsWith(ss.str(), std::string(prev))) { - std::string chr(ss.str().substr(0, ss.str().size() - strlen(prev))); - ss.str(std::string("")); - ss << chr; - } - if (base::utils::Str::endsWith(ss.str(), std::string(sfx))) { - std::string chr(ss.str().substr(0, ss.str().size() - strlen(sfx))); - ss.str(std::string("")); - ss << chr; - } - ss << sfx; - }; - auto insert = [&](std::stringstream& ss, base::type::VerboseLevel level) { - if (!base::utils::hasFlag(LoggingFlag::DisableVModulesExtensions, *m_pFlags)) { - addSuffix(ss, ".h", nullptr); - m_modules.insert(std::make_pair(ss.str(), level)); - addSuffix(ss, ".c", ".h"); - m_modules.insert(std::make_pair(ss.str(), level)); - addSuffix(ss, ".cpp", ".c"); - m_modules.insert(std::make_pair(ss.str(), level)); - addSuffix(ss, ".cc", ".cpp"); - m_modules.insert(std::make_pair(ss.str(), level)); - addSuffix(ss, ".cxx", ".cc"); - m_modules.insert(std::make_pair(ss.str(), level)); - addSuffix(ss, ".-inl.h", ".cxx"); - m_modules.insert(std::make_pair(ss.str(), level)); - addSuffix(ss, ".hxx", ".-inl.h"); - m_modules.insert(std::make_pair(ss.str(), level)); - addSuffix(ss, ".hpp", ".hxx"); - m_modules.insert(std::make_pair(ss.str(), level)); - addSuffix(ss, ".hh", ".hpp"); - } - m_modules.insert(std::make_pair(ss.str(), level)); - }; - bool isMod = true; - bool isLevel = false; - std::stringstream ss; - int level = -1; - for (; *modules; ++modules) { - switch (*modules) { - case '=': - isLevel = true; - isMod = false; - break; - case ',': - isLevel = false; - isMod = true; - if (!ss.str().empty() && level != -1) { - insert(ss, static_cast(level)); - ss.str(std::string("")); - level = -1; - } - break; - default: - if (isMod) { - ss << *modules; - } else if (isLevel) { - if (isdigit(*modules)) { - level = static_cast(*modules) - 48; +void +VRegistry::setModules(const char* modules) { + base::threading::ScopedLock scopedLock(lock()); + auto addSuffix = [](std::stringstream& ss, const char* sfx, const char* prev) { + if (prev != nullptr && base::utils::Str::endsWith(ss.str(), std::string(prev))) { + std::string chr(ss.str().substr(0, ss.str().size() - strlen(prev))); + ss.str(std::string("")); + ss << chr; + } + if (base::utils::Str::endsWith(ss.str(), std::string(sfx))) { + std::string chr(ss.str().substr(0, ss.str().size() - strlen(sfx))); + ss.str(std::string("")); + ss << chr; + } + ss << sfx; + }; + auto insert = [&](std::stringstream& ss, base::type::VerboseLevel level) { + if (!base::utils::hasFlag(LoggingFlag::DisableVModulesExtensions, *m_pFlags)) { + addSuffix(ss, ".h", nullptr); + m_modules.insert(std::make_pair(ss.str(), level)); + addSuffix(ss, ".c", ".h"); + m_modules.insert(std::make_pair(ss.str(), level)); + addSuffix(ss, ".cpp", ".c"); + m_modules.insert(std::make_pair(ss.str(), level)); + addSuffix(ss, ".cc", ".cpp"); + m_modules.insert(std::make_pair(ss.str(), level)); + addSuffix(ss, ".cxx", ".cc"); + m_modules.insert(std::make_pair(ss.str(), level)); + addSuffix(ss, ".-inl.h", ".cxx"); + m_modules.insert(std::make_pair(ss.str(), level)); + addSuffix(ss, ".hxx", ".-inl.h"); + m_modules.insert(std::make_pair(ss.str(), level)); + addSuffix(ss, ".hpp", ".hxx"); + m_modules.insert(std::make_pair(ss.str(), level)); + addSuffix(ss, ".hh", ".hpp"); + } + m_modules.insert(std::make_pair(ss.str(), level)); + }; + bool isMod = true; + bool isLevel = false; + std::stringstream ss; + int level = -1; + for (; *modules; ++modules) { + switch (*modules) { + case '=': + isLevel = true; + isMod = false; + break; + case ',': + isLevel = false; + isMod = true; + if (!ss.str().empty() && level != -1) { + insert(ss, static_cast(level)); + ss.str(std::string("")); + level = -1; + } + break; + default: + if (isMod) { + ss << *modules; + } else if (isLevel) { + if (isdigit(*modules)) { + level = static_cast(*modules) - 48; + } + } + break; } - } - break; } - } - if (!ss.str().empty() && level != -1) { - insert(ss, static_cast(level)); - } + if (!ss.str().empty() && level != -1) { + insert(ss, static_cast(level)); + } } -bool VRegistry::allowed(base::type::VerboseLevel vlevel, const char* file) { - base::threading::ScopedLock scopedLock(lock()); - if (m_modules.empty() || file == nullptr) { - return vlevel <= m_level; - } else { - char baseFilename[base::consts::kSourceFilenameMaxLength] = ""; - base::utils::File::buildBaseFilename(file, baseFilename); - std::unordered_map::iterator it = m_modules.begin(); - for (; it != m_modules.end(); ++it) { - if (base::utils::Str::wildCardMatch(baseFilename, it->first.c_str())) { - return vlevel <= it->second; - } +bool +VRegistry::allowed(base::type::VerboseLevel vlevel, const char* file) { + base::threading::ScopedLock scopedLock(lock()); + if (m_modules.empty() || file == nullptr) { + return vlevel <= m_level; + } else { + char baseFilename[base::consts::kSourceFilenameMaxLength] = ""; + base::utils::File::buildBaseFilename(file, baseFilename); + std::unordered_map::iterator it = m_modules.begin(); + for (; it != m_modules.end(); ++it) { + if (base::utils::Str::wildCardMatch(baseFilename, it->first.c_str())) { + return vlevel <= it->second; + } + } + if (base::utils::hasFlag(LoggingFlag::AllowVerboseIfModuleNotSpecified, *m_pFlags)) { + return true; + } + return false; } - if (base::utils::hasFlag(LoggingFlag::AllowVerboseIfModuleNotSpecified, *m_pFlags)) { - return true; - } - return false; - } } -void VRegistry::setFromArgs(const base::utils::CommandLineArgs* commandLineArgs) { - if (commandLineArgs->hasParam("-v") || commandLineArgs->hasParam("--verbose") || - commandLineArgs->hasParam("-V") || commandLineArgs->hasParam("--VERBOSE")) { - setLevel(base::consts::kMaxVerboseLevel); - } else if (commandLineArgs->hasParamWithValue("--v")) { - setLevel(static_cast(atoi(commandLineArgs->getParamValue("--v")))); - } else if (commandLineArgs->hasParamWithValue("--V")) { - setLevel(static_cast(atoi(commandLineArgs->getParamValue("--V")))); - } else if ((commandLineArgs->hasParamWithValue("-vmodule")) && vModulesEnabled()) { - setModules(commandLineArgs->getParamValue("-vmodule")); - } else if (commandLineArgs->hasParamWithValue("-VMODULE") && vModulesEnabled()) { - setModules(commandLineArgs->getParamValue("-VMODULE")); - } +void +VRegistry::setFromArgs(const base::utils::CommandLineArgs* commandLineArgs) { + if (commandLineArgs->hasParam("-v") || commandLineArgs->hasParam("--verbose") || commandLineArgs->hasParam("-V") || + commandLineArgs->hasParam("--VERBOSE")) { + setLevel(base::consts::kMaxVerboseLevel); + } else if (commandLineArgs->hasParamWithValue("--v")) { + setLevel(static_cast(atoi(commandLineArgs->getParamValue("--v")))); + } else if (commandLineArgs->hasParamWithValue("--V")) { + setLevel(static_cast(atoi(commandLineArgs->getParamValue("--V")))); + } else if ((commandLineArgs->hasParamWithValue("-vmodule")) && vModulesEnabled()) { + setModules(commandLineArgs->getParamValue("-vmodule")); + } else if (commandLineArgs->hasParamWithValue("-VMODULE") && vModulesEnabled()) { + setModules(commandLineArgs->getParamValue("-VMODULE")); + } } #if !defined(ELPP_DEFAULT_LOGGING_FLAGS) -# define ELPP_DEFAULT_LOGGING_FLAGS 0x0 -#endif // !defined(ELPP_DEFAULT_LOGGING_FLAGS) +#define ELPP_DEFAULT_LOGGING_FLAGS 0x0 +#endif // !defined(ELPP_DEFAULT_LOGGING_FLAGS) // Storage #if ELPP_ASYNC_LOGGING -Storage::Storage(const LogBuilderPtr& defaultLogBuilder, base::IWorker* asyncDispatchWorker) : +Storage::Storage(const LogBuilderPtr& defaultLogBuilder, base::IWorker* asyncDispatchWorker) + : #else -Storage::Storage(const LogBuilderPtr& defaultLogBuilder) : +Storage::Storage(const LogBuilderPtr& defaultLogBuilder) + : #endif // ELPP_ASYNC_LOGGING - m_registeredHitCounters(new base::RegisteredHitCounters()), - m_registeredLoggers(new base::RegisteredLoggers(defaultLogBuilder)), - m_flags(ELPP_DEFAULT_LOGGING_FLAGS), - m_vRegistry(new base::VRegistry(0, &m_flags)), + m_registeredHitCounters(new base::RegisteredHitCounters()), + m_registeredLoggers(new base::RegisteredLoggers(defaultLogBuilder)), + m_flags(ELPP_DEFAULT_LOGGING_FLAGS), + m_vRegistry(new base::VRegistry(0, &m_flags)), #if ELPP_ASYNC_LOGGING - m_asyncLogQueue(new base::AsyncLogQueue()), - m_asyncDispatchWorker(asyncDispatchWorker), + m_asyncLogQueue(new base::AsyncLogQueue()), + m_asyncDispatchWorker(asyncDispatchWorker), #endif // ELPP_ASYNC_LOGGING - m_preRollOutCallback(base::defaultPreRollOutCallback) { - // Register default logger - m_registeredLoggers->get(std::string(base::consts::kDefaultLoggerId)); - // We register default logger anyway (worse case it's not going to register) just in case - m_registeredLoggers->get("default"); + m_preRollOutCallback(base::defaultPreRollOutCallback) { + // Register default logger + m_registeredLoggers->get(std::string(base::consts::kDefaultLoggerId)); + // We register default logger anyway (worse case it's not going to register) just in case + m_registeredLoggers->get("default"); #if defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_PERFORMANCE_TRACKING) - // Register performance logger and reconfigure format - Logger* performanceLogger = m_registeredLoggers->get(std::string(base::consts::kPerformanceLoggerId)); - m_registeredLoggers->get("performance"); - performanceLogger->configurations()->setGlobally(ConfigurationType::Format, std::string("%datetime %level %msg")); - performanceLogger->reconfigure(); -#endif // defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_PERFORMANCE_TRACKING) + // Register performance logger and reconfigure format + Logger* performanceLogger = m_registeredLoggers->get(std::string(base::consts::kPerformanceLoggerId)); + m_registeredLoggers->get("performance"); + performanceLogger->configurations()->setGlobally(ConfigurationType::Format, std::string("%datetime %level %msg")); + performanceLogger->reconfigure(); +#endif // defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_PERFORMANCE_TRACKING) #if defined(ELPP_SYSLOG) - // Register syslog logger and reconfigure format - Logger* sysLogLogger = m_registeredLoggers->get(std::string(base::consts::kSysLogLoggerId)); - sysLogLogger->configurations()->setGlobally(ConfigurationType::Format, std::string("%level: %msg")); - sysLogLogger->reconfigure(); -#endif // defined(ELPP_SYSLOG) - addFlag(LoggingFlag::AllowVerboseIfModuleNotSpecified); + // Register syslog logger and reconfigure format + Logger* sysLogLogger = m_registeredLoggers->get(std::string(base::consts::kSysLogLoggerId)); + sysLogLogger->configurations()->setGlobally(ConfigurationType::Format, std::string("%level: %msg")); + sysLogLogger->reconfigure(); +#endif // defined(ELPP_SYSLOG) + addFlag(LoggingFlag::AllowVerboseIfModuleNotSpecified); #if ELPP_ASYNC_LOGGING - installLogDispatchCallback(std::string("AsyncLogDispatchCallback")); + installLogDispatchCallback(std::string("AsyncLogDispatchCallback")); #else - installLogDispatchCallback(std::string("DefaultLogDispatchCallback")); + installLogDispatchCallback(std::string("DefaultLogDispatchCallback")); #endif // ELPP_ASYNC_LOGGING #if defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_PERFORMANCE_TRACKING) - installPerformanceTrackingCallback - (std::string("DefaultPerformanceTrackingCallback")); -#endif // defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_PERFORMANCE_TRACKING) - ELPP_INTERNAL_INFO(1, "Easylogging++ has been initialized"); + installPerformanceTrackingCallback( + std::string("DefaultPerformanceTrackingCallback")); +#endif // defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_PERFORMANCE_TRACKING) + ELPP_INTERNAL_INFO(1, "Easylogging++ has been initialized"); #if ELPP_ASYNC_LOGGING - m_asyncDispatchWorker->start(); + m_asyncDispatchWorker->start(); #endif // ELPP_ASYNC_LOGGING } Storage::~Storage(void) { - ELPP_INTERNAL_INFO(4, "Destroying storage"); + ELPP_INTERNAL_INFO(4, "Destroying storage"); #if ELPP_ASYNC_LOGGING - ELPP_INTERNAL_INFO(5, "Replacing log dispatch callback to synchronous"); - uninstallLogDispatchCallback(std::string("AsyncLogDispatchCallback")); - installLogDispatchCallback(std::string("DefaultLogDispatchCallback")); - ELPP_INTERNAL_INFO(5, "Destroying asyncDispatchWorker"); - base::utils::safeDelete(m_asyncDispatchWorker); - ELPP_INTERNAL_INFO(5, "Destroying asyncLogQueue"); - base::utils::safeDelete(m_asyncLogQueue); + ELPP_INTERNAL_INFO(5, "Replacing log dispatch callback to synchronous"); + uninstallLogDispatchCallback(std::string("AsyncLogDispatchCallback")); + installLogDispatchCallback(std::string("DefaultLogDispatchCallback")); + ELPP_INTERNAL_INFO(5, "Destroying asyncDispatchWorker"); + base::utils::safeDelete(m_asyncDispatchWorker); + ELPP_INTERNAL_INFO(5, "Destroying asyncLogQueue"); + base::utils::safeDelete(m_asyncLogQueue); #endif // ELPP_ASYNC_LOGGING - ELPP_INTERNAL_INFO(5, "Destroying registeredHitCounters"); - base::utils::safeDelete(m_registeredHitCounters); - ELPP_INTERNAL_INFO(5, "Destroying registeredLoggers"); - base::utils::safeDelete(m_registeredLoggers); - ELPP_INTERNAL_INFO(5, "Destroying vRegistry"); - base::utils::safeDelete(m_vRegistry); + ELPP_INTERNAL_INFO(5, "Destroying registeredHitCounters"); + base::utils::safeDelete(m_registeredHitCounters); + ELPP_INTERNAL_INFO(5, "Destroying registeredLoggers"); + base::utils::safeDelete(m_registeredLoggers); + ELPP_INTERNAL_INFO(5, "Destroying vRegistry"); + base::utils::safeDelete(m_vRegistry); } -bool Storage::hasCustomFormatSpecifier(const char* formatSpecifier) { - base::threading::ScopedLock scopedLock(customFormatSpecifiersLock()); - return std::find(m_customFormatSpecifiers.begin(), m_customFormatSpecifiers.end(), - formatSpecifier) != m_customFormatSpecifiers.end(); +bool +Storage::hasCustomFormatSpecifier(const char* formatSpecifier) { + base::threading::ScopedLock scopedLock(customFormatSpecifiersLock()); + return std::find(m_customFormatSpecifiers.begin(), m_customFormatSpecifiers.end(), formatSpecifier) != + m_customFormatSpecifiers.end(); } -void Storage::installCustomFormatSpecifier(const CustomFormatSpecifier& customFormatSpecifier) { - if (hasCustomFormatSpecifier(customFormatSpecifier.formatSpecifier())) { - return; - } - base::threading::ScopedLock scopedLock(customFormatSpecifiersLock()); - m_customFormatSpecifiers.push_back(customFormatSpecifier); -} - -bool Storage::uninstallCustomFormatSpecifier(const char* formatSpecifier) { - base::threading::ScopedLock scopedLock(customFormatSpecifiersLock()); - std::vector::iterator it = std::find(m_customFormatSpecifiers.begin(), - m_customFormatSpecifiers.end(), formatSpecifier); - if (it != m_customFormatSpecifiers.end() && strcmp(formatSpecifier, it->formatSpecifier()) == 0) { - m_customFormatSpecifiers.erase(it); - return true; - } - return false; -} - -void Storage::setApplicationArguments(int argc, char** argv) { - m_commandLineArgs.setArgs(argc, argv); - m_vRegistry->setFromArgs(commandLineArgs()); - // default log file -#if !defined(ELPP_DISABLE_LOG_FILE_FROM_ARG) - if (m_commandLineArgs.hasParamWithValue(base::consts::kDefaultLogFileParam)) { - Configurations c; - c.setGlobally(ConfigurationType::Filename, - std::string(m_commandLineArgs.getParamValue(base::consts::kDefaultLogFileParam))); - registeredLoggers()->setDefaultConfigurations(c); - for (base::RegisteredLoggers::iterator it = registeredLoggers()->begin(); - it != registeredLoggers()->end(); ++it) { - it->second->configure(c); +void +Storage::installCustomFormatSpecifier(const CustomFormatSpecifier& customFormatSpecifier) { + if (hasCustomFormatSpecifier(customFormatSpecifier.formatSpecifier())) { + return; + } + base::threading::ScopedLock scopedLock(customFormatSpecifiersLock()); + m_customFormatSpecifiers.push_back(customFormatSpecifier); +} + +bool +Storage::uninstallCustomFormatSpecifier(const char* formatSpecifier) { + base::threading::ScopedLock scopedLock(customFormatSpecifiersLock()); + std::vector::iterator it = + std::find(m_customFormatSpecifiers.begin(), m_customFormatSpecifiers.end(), formatSpecifier); + if (it != m_customFormatSpecifiers.end() && strcmp(formatSpecifier, it->formatSpecifier()) == 0) { + m_customFormatSpecifiers.erase(it); + return true; + } + return false; +} + +void +Storage::setApplicationArguments(int argc, char** argv) { + m_commandLineArgs.setArgs(argc, argv); + m_vRegistry->setFromArgs(commandLineArgs()); + // default log file +#if !defined(ELPP_DISABLE_LOG_FILE_FROM_ARG) + if (m_commandLineArgs.hasParamWithValue(base::consts::kDefaultLogFileParam)) { + Configurations c; + c.setGlobally(ConfigurationType::Filename, + std::string(m_commandLineArgs.getParamValue(base::consts::kDefaultLogFileParam))); + registeredLoggers()->setDefaultConfigurations(c); + for (base::RegisteredLoggers::iterator it = registeredLoggers()->begin(); it != registeredLoggers()->end(); + ++it) { + it->second->configure(c); + } } - } #endif // !defined(ELPP_DISABLE_LOG_FILE_FROM_ARG) #if defined(ELPP_LOGGING_FLAGS_FROM_ARG) - if (m_commandLineArgs.hasParamWithValue(base::consts::kLoggingFlagsParam)) { - int userInput = atoi(m_commandLineArgs.getParamValue(base::consts::kLoggingFlagsParam)); - if (ELPP_DEFAULT_LOGGING_FLAGS == 0x0) { - m_flags = userInput; - } else { - base::utils::addFlag(userInput, &m_flags); + if (m_commandLineArgs.hasParamWithValue(base::consts::kLoggingFlagsParam)) { + int userInput = atoi(m_commandLineArgs.getParamValue(base::consts::kLoggingFlagsParam)); + if (ELPP_DEFAULT_LOGGING_FLAGS == 0x0) { + m_flags = userInput; + } else { + base::utils::addFlag(userInput, &m_flags); + } } - } #endif // defined(ELPP_LOGGING_FLAGS_FROM_ARG) } -} // namespace base +} // namespace base // LogDispatchCallback -void LogDispatchCallback::handle(const LogDispatchData* data) { +void +LogDispatchCallback::handle(const LogDispatchData* data) { #if defined(ELPP_THREAD_SAFE) - base::threading::ScopedLock scopedLock(m_fileLocksMapLock); - std::string filename = data->logMessage()->logger()->typedConfigurations()->filename(data->logMessage()->level()); - auto lock = m_fileLocks.find(filename); - if (lock == m_fileLocks.end()) { - m_fileLocks.emplace(std::make_pair(filename, std::unique_ptr(new base::threading::Mutex))); - } + base::threading::ScopedLock scopedLock(m_fileLocksMapLock); + std::string filename = data->logMessage()->logger()->typedConfigurations()->filename(data->logMessage()->level()); + auto lock = m_fileLocks.find(filename); + if (lock == m_fileLocks.end()) { + m_fileLocks.emplace( + std::make_pair(filename, std::unique_ptr(new base::threading::Mutex))); + } #endif } -base::threading::Mutex& LogDispatchCallback::fileHandle(const LogDispatchData* data) { - auto it = m_fileLocks.find(data->logMessage()->logger()->typedConfigurations()->filename(data->logMessage()->level())); - return *(it->second.get()); +base::threading::Mutex& +LogDispatchCallback::fileHandle(const LogDispatchData* data) { + auto it = + m_fileLocks.find(data->logMessage()->logger()->typedConfigurations()->filename(data->logMessage()->level())); + return *(it->second.get()); } namespace base { // DefaultLogDispatchCallback -void DefaultLogDispatchCallback::handle(const LogDispatchData* data) { +void +DefaultLogDispatchCallback::handle(const LogDispatchData* data) { #if defined(ELPP_THREAD_SAFE) - LogDispatchCallback::handle(data); - base::threading::ScopedLock scopedLock(fileHandle(data)); + LogDispatchCallback::handle(data); + base::threading::ScopedLock scopedLock(fileHandle(data)); #endif - m_data = data; - dispatch(m_data->logMessage()->logger()->logBuilder()->build(m_data->logMessage(), - m_data->dispatchAction() == base::DispatchAction::NormalLog)); + m_data = data; + dispatch(m_data->logMessage()->logger()->logBuilder()->build( + m_data->logMessage(), m_data->dispatchAction() == base::DispatchAction::NormalLog)); } -void DefaultLogDispatchCallback::dispatch(base::type::string_t&& logLine) { - if (m_data->dispatchAction() == base::DispatchAction::NormalLog) { - if (m_data->logMessage()->logger()->m_typedConfigurations->toFile(m_data->logMessage()->level())) { - base::type::fstream_t* fs = m_data->logMessage()->logger()->m_typedConfigurations->fileStream( - m_data->logMessage()->level()); - if (fs != nullptr) { - fs->write(logLine.c_str(), logLine.size()); - if (fs->fail()) { - ELPP_INTERNAL_ERROR("Unable to write log to file [" - << m_data->logMessage()->logger()->m_typedConfigurations->filename(m_data->logMessage()->level()) << "].\n" - << "Few possible reasons (could be something else):\n" << " * Permission denied\n" - << " * Disk full\n" << " * Disk is not writable", true); - } else { - if (ELPP->hasFlag(LoggingFlag::ImmediateFlush) - || (m_data->logMessage()->logger()->isFlushNeeded(m_data->logMessage()->level()))) { - m_data->logMessage()->logger()->flush(m_data->logMessage()->level(), fs); - } +void +DefaultLogDispatchCallback::dispatch(base::type::string_t&& logLine) { + if (m_data->dispatchAction() == base::DispatchAction::NormalLog) { + if (m_data->logMessage()->logger()->m_typedConfigurations->toFile(m_data->logMessage()->level())) { + base::type::fstream_t* fs = + m_data->logMessage()->logger()->m_typedConfigurations->fileStream(m_data->logMessage()->level()); + if (fs != nullptr) { + fs->write(logLine.c_str(), logLine.size()); + if (fs->fail()) { + ELPP_INTERNAL_ERROR("Unable to write log to file [" + << m_data->logMessage()->logger()->m_typedConfigurations->filename( + m_data->logMessage()->level()) + << "].\n" + << "Few possible reasons (could be something else):\n" + << " * Permission denied\n" + << " * Disk full\n" + << " * Disk is not writable", + true); + } else { + if (ELPP->hasFlag(LoggingFlag::ImmediateFlush) || + (m_data->logMessage()->logger()->isFlushNeeded(m_data->logMessage()->level()))) { + m_data->logMessage()->logger()->flush(m_data->logMessage()->level(), fs); + } + } + } else { + ELPP_INTERNAL_ERROR("Log file for [" + << LevelHelper::convertToString(m_data->logMessage()->level()) << "] " + << "has not been configured but [TO_FILE] is configured to TRUE. [Logger ID: " + << m_data->logMessage()->logger()->id() << "]", + false); + } + } + if (m_data->logMessage()->logger()->m_typedConfigurations->toStandardOutput(m_data->logMessage()->level())) { + if (ELPP->hasFlag(LoggingFlag::ColoredTerminalOutput)) + m_data->logMessage()->logger()->logBuilder()->convertToColoredOutput(&logLine, + m_data->logMessage()->level()); + ELPP_COUT << ELPP_COUT_LINE(logLine); } - } else { - ELPP_INTERNAL_ERROR("Log file for [" << LevelHelper::convertToString(m_data->logMessage()->level()) << "] " - << "has not been configured but [TO_FILE] is configured to TRUE. [Logger ID: " - << m_data->logMessage()->logger()->id() << "]", false); - } } - if (m_data->logMessage()->logger()->m_typedConfigurations->toStandardOutput(m_data->logMessage()->level())) { - if (ELPP->hasFlag(LoggingFlag::ColoredTerminalOutput)) - m_data->logMessage()->logger()->logBuilder()->convertToColoredOutput(&logLine, m_data->logMessage()->level()); - ELPP_COUT << ELPP_COUT_LINE(logLine); - } - } #if defined(ELPP_SYSLOG) - else if (m_data->dispatchAction() == base::DispatchAction::SysLog) { - // Determine syslog priority - int sysLogPriority = 0; - if (m_data->logMessage()->level() == Level::Fatal) - sysLogPriority = LOG_EMERG; - else if (m_data->logMessage()->level() == Level::Error) - sysLogPriority = LOG_ERR; - else if (m_data->logMessage()->level() == Level::Warning) - sysLogPriority = LOG_WARNING; - else if (m_data->logMessage()->level() == Level::Info) - sysLogPriority = LOG_INFO; - else if (m_data->logMessage()->level() == Level::Debug) - sysLogPriority = LOG_DEBUG; - else - sysLogPriority = LOG_NOTICE; -# if defined(ELPP_UNICODE) - char* line = base::utils::Str::wcharPtrToCharPtr(logLine.c_str()); - syslog(sysLogPriority, "%s", line); - free(line); -# else - syslog(sysLogPriority, "%s", logLine.c_str()); -# endif - } + else if (m_data->dispatchAction() == base::DispatchAction::SysLog) { + // Determine syslog priority + int sysLogPriority = 0; + if (m_data->logMessage()->level() == Level::Fatal) + sysLogPriority = LOG_EMERG; + else if (m_data->logMessage()->level() == Level::Error) + sysLogPriority = LOG_ERR; + else if (m_data->logMessage()->level() == Level::Warning) + sysLogPriority = LOG_WARNING; + else if (m_data->logMessage()->level() == Level::Info) + sysLogPriority = LOG_INFO; + else if (m_data->logMessage()->level() == Level::Debug) + sysLogPriority = LOG_DEBUG; + else + sysLogPriority = LOG_NOTICE; +#if defined(ELPP_UNICODE) + char* line = base::utils::Str::wcharPtrToCharPtr(logLine.c_str()); + syslog(sysLogPriority, "%s", line); + free(line); +#else + syslog(sysLogPriority, "%s", logLine.c_str()); +#endif + } #endif // defined(ELPP_SYSLOG) } @@ -2274,487 +2404,516 @@ void DefaultLogDispatchCallback::dispatch(base::type::string_t&& logLine) { // AsyncLogDispatchCallback -void AsyncLogDispatchCallback::handle(const LogDispatchData* data) { - base::type::string_t logLine = data->logMessage()->logger()->logBuilder()->build(data->logMessage(), - data->dispatchAction() == base::DispatchAction::NormalLog); - if (data->dispatchAction() == base::DispatchAction::NormalLog - && data->logMessage()->logger()->typedConfigurations()->toStandardOutput(data->logMessage()->level())) { - if (ELPP->hasFlag(LoggingFlag::ColoredTerminalOutput)) - data->logMessage()->logger()->logBuilder()->convertToColoredOutput(&logLine, data->logMessage()->level()); - ELPP_COUT << ELPP_COUT_LINE(logLine); - } - // Save resources and only queue if we want to write to file otherwise just ignore handler - if (data->logMessage()->logger()->typedConfigurations()->toFile(data->logMessage()->level())) { - ELPP->asyncLogQueue()->push(AsyncLogItem(*(data->logMessage()), *data, logLine)); - } +void +AsyncLogDispatchCallback::handle(const LogDispatchData* data) { + base::type::string_t logLine = data->logMessage()->logger()->logBuilder()->build( + data->logMessage(), data->dispatchAction() == base::DispatchAction::NormalLog); + if (data->dispatchAction() == base::DispatchAction::NormalLog && + data->logMessage()->logger()->typedConfigurations()->toStandardOutput(data->logMessage()->level())) { + if (ELPP->hasFlag(LoggingFlag::ColoredTerminalOutput)) + data->logMessage()->logger()->logBuilder()->convertToColoredOutput(&logLine, data->logMessage()->level()); + ELPP_COUT << ELPP_COUT_LINE(logLine); + } + // Save resources and only queue if we want to write to file otherwise just ignore handler + if (data->logMessage()->logger()->typedConfigurations()->toFile(data->logMessage()->level())) { + ELPP->asyncLogQueue()->push(AsyncLogItem(*(data->logMessage()), *data, logLine)); + } } // AsyncDispatchWorker AsyncDispatchWorker::AsyncDispatchWorker() { - setContinueRunning(false); + setContinueRunning(false); } AsyncDispatchWorker::~AsyncDispatchWorker() { - setContinueRunning(false); - ELPP_INTERNAL_INFO(6, "Stopping dispatch worker - Cleaning log queue"); - clean(); - ELPP_INTERNAL_INFO(6, "Log queue cleaned"); + setContinueRunning(false); + ELPP_INTERNAL_INFO(6, "Stopping dispatch worker - Cleaning log queue"); + clean(); + ELPP_INTERNAL_INFO(6, "Log queue cleaned"); } -bool AsyncDispatchWorker::clean(void) { - std::mutex m; - std::unique_lock lk(m); - cv.wait(lk, [] { return !ELPP->asyncLogQueue()->empty(); }); - emptyQueue(); - lk.unlock(); - cv.notify_one(); - return ELPP->asyncLogQueue()->empty(); -} - -void AsyncDispatchWorker::emptyQueue(void) { - while (!ELPP->asyncLogQueue()->empty()) { - AsyncLogItem data = ELPP->asyncLogQueue()->next(); - handle(&data); - base::threading::msleep(100); - } -} - -void AsyncDispatchWorker::start(void) { - base::threading::msleep(5000); // 5s (why?) - setContinueRunning(true); - std::thread t1(&AsyncDispatchWorker::run, this); - t1.join(); -} - -void AsyncDispatchWorker::handle(AsyncLogItem* logItem) { - LogDispatchData* data = logItem->data(); - LogMessage* logMessage = logItem->logMessage(); - Logger* logger = logMessage->logger(); - base::TypedConfigurations* conf = logger->typedConfigurations(); - base::type::string_t logLine = logItem->logLine(); - if (data->dispatchAction() == base::DispatchAction::NormalLog) { - if (conf->toFile(logMessage->level())) { - base::type::fstream_t* fs = conf->fileStream(logMessage->level()); - if (fs != nullptr) { - fs->write(logLine.c_str(), logLine.size()); - if (fs->fail()) { - ELPP_INTERNAL_ERROR("Unable to write log to file [" - << conf->filename(logMessage->level()) << "].\n" - << "Few possible reasons (could be something else):\n" << " * Permission denied\n" - << " * Disk full\n" << " * Disk is not writable", true); - } else { - if (ELPP->hasFlag(LoggingFlag::ImmediateFlush) || (logger->isFlushNeeded(logMessage->level()))) { - logger->flush(logMessage->level(), fs); - } - } - } else { - ELPP_INTERNAL_ERROR("Log file for [" << LevelHelper::convertToString(logMessage->level()) << "] " - << "has not been configured but [TO_FILE] is configured to TRUE. [Logger ID: " << logger->id() << "]", false); - } - } - } -# if defined(ELPP_SYSLOG) - else if (data->dispatchAction() == base::DispatchAction::SysLog) { - // Determine syslog priority - int sysLogPriority = 0; - if (logMessage->level() == Level::Fatal) - sysLogPriority = LOG_EMERG; - else if (logMessage->level() == Level::Error) - sysLogPriority = LOG_ERR; - else if (logMessage->level() == Level::Warning) - sysLogPriority = LOG_WARNING; - else if (logMessage->level() == Level::Info) - sysLogPriority = LOG_INFO; - else if (logMessage->level() == Level::Debug) - sysLogPriority = LOG_DEBUG; - else - sysLogPriority = LOG_NOTICE; -# if defined(ELPP_UNICODE) - char* line = base::utils::Str::wcharPtrToCharPtr(logLine.c_str()); - syslog(sysLogPriority, "%s", line); - free(line); -# else - syslog(sysLogPriority, "%s", logLine.c_str()); -# endif - } -# endif // defined(ELPP_SYSLOG) -} - -void AsyncDispatchWorker::run(void) { - while (continueRunning()) { +bool +AsyncDispatchWorker::clean(void) { + std::mutex m; + std::unique_lock lk(m); + cv.wait(lk, [] { return !ELPP->asyncLogQueue()->empty(); }); emptyQueue(); - base::threading::msleep(10); // 10ms - } + lk.unlock(); + cv.notify_one(); + return ELPP->asyncLogQueue()->empty(); +} + +void +AsyncDispatchWorker::emptyQueue(void) { + while (!ELPP->asyncLogQueue()->empty()) { + AsyncLogItem data = ELPP->asyncLogQueue()->next(); + handle(&data); + base::threading::msleep(100); + } +} + +void +AsyncDispatchWorker::start(void) { + base::threading::msleep(5000); // 5s (why?) + setContinueRunning(true); + std::thread t1(&AsyncDispatchWorker::run, this); + t1.join(); +} + +void +AsyncDispatchWorker::handle(AsyncLogItem* logItem) { + LogDispatchData* data = logItem->data(); + LogMessage* logMessage = logItem->logMessage(); + Logger* logger = logMessage->logger(); + base::TypedConfigurations* conf = logger->typedConfigurations(); + base::type::string_t logLine = logItem->logLine(); + if (data->dispatchAction() == base::DispatchAction::NormalLog) { + if (conf->toFile(logMessage->level())) { + base::type::fstream_t* fs = conf->fileStream(logMessage->level()); + if (fs != nullptr) { + fs->write(logLine.c_str(), logLine.size()); + if (fs->fail()) { + ELPP_INTERNAL_ERROR("Unable to write log to file [" + << conf->filename(logMessage->level()) << "].\n" + << "Few possible reasons (could be something else):\n" + << " * Permission denied\n" + << " * Disk full\n" + << " * Disk is not writable", + true); + } else { + if (ELPP->hasFlag(LoggingFlag::ImmediateFlush) || (logger->isFlushNeeded(logMessage->level()))) { + logger->flush(logMessage->level(), fs); + } + } + } else { + ELPP_INTERNAL_ERROR("Log file for [" + << LevelHelper::convertToString(logMessage->level()) << "] " + << "has not been configured but [TO_FILE] is configured to TRUE. [Logger ID: " + << logger->id() << "]", + false); + } + } + } +#if defined(ELPP_SYSLOG) + else if (data->dispatchAction() == base::DispatchAction::SysLog) { + // Determine syslog priority + int sysLogPriority = 0; + if (logMessage->level() == Level::Fatal) + sysLogPriority = LOG_EMERG; + else if (logMessage->level() == Level::Error) + sysLogPriority = LOG_ERR; + else if (logMessage->level() == Level::Warning) + sysLogPriority = LOG_WARNING; + else if (logMessage->level() == Level::Info) + sysLogPriority = LOG_INFO; + else if (logMessage->level() == Level::Debug) + sysLogPriority = LOG_DEBUG; + else + sysLogPriority = LOG_NOTICE; +#if defined(ELPP_UNICODE) + char* line = base::utils::Str::wcharPtrToCharPtr(logLine.c_str()); + syslog(sysLogPriority, "%s", line); + free(line); +#else + syslog(sysLogPriority, "%s", logLine.c_str()); +#endif + } +#endif // defined(ELPP_SYSLOG) +} + +void +AsyncDispatchWorker::run(void) { + while (continueRunning()) { + emptyQueue(); + base::threading::msleep(10); // 10ms + } } #endif // ELPP_ASYNC_LOGGING // DefaultLogBuilder -base::type::string_t DefaultLogBuilder::build(const LogMessage* logMessage, bool appendNewLine) const { - base::TypedConfigurations* tc = logMessage->logger()->typedConfigurations(); - const base::LogFormat* logFormat = &tc->logFormat(logMessage->level()); - base::type::string_t logLine = logFormat->format(); - char buff[base::consts::kSourceFilenameMaxLength + base::consts::kSourceLineMaxLength] = ""; - const char* bufLim = buff + sizeof(buff); - if (logFormat->hasFlag(base::FormatFlags::AppName)) { - // App name - base::utils::Str::replaceFirstWithEscape(logLine, base::consts::kAppNameFormatSpecifier, - logMessage->logger()->parentApplicationName()); - } - if (logFormat->hasFlag(base::FormatFlags::ThreadId)) { - // Thread ID - base::utils::Str::replaceFirstWithEscape(logLine, base::consts::kThreadIdFormatSpecifier, - ELPP->getThreadName(base::threading::getCurrentThreadId())); - } - if (logFormat->hasFlag(base::FormatFlags::DateTime)) { - // DateTime - base::utils::Str::replaceFirstWithEscape(logLine, base::consts::kDateTimeFormatSpecifier, - base::utils::DateTime::getDateTime(logFormat->dateTimeFormat().c_str(), - &tc->subsecondPrecision(logMessage->level()))); - } - if (logFormat->hasFlag(base::FormatFlags::Function)) { - // Function - base::utils::Str::replaceFirstWithEscape(logLine, base::consts::kLogFunctionFormatSpecifier, logMessage->func()); - } - if (logFormat->hasFlag(base::FormatFlags::File)) { - // File - base::utils::Str::clearBuff(buff, base::consts::kSourceFilenameMaxLength); - base::utils::File::buildStrippedFilename(logMessage->file().c_str(), buff); - base::utils::Str::replaceFirstWithEscape(logLine, base::consts::kLogFileFormatSpecifier, std::string(buff)); - } - if (logFormat->hasFlag(base::FormatFlags::FileBase)) { - // FileBase - base::utils::Str::clearBuff(buff, base::consts::kSourceFilenameMaxLength); - base::utils::File::buildBaseFilename(logMessage->file(), buff); - base::utils::Str::replaceFirstWithEscape(logLine, base::consts::kLogFileBaseFormatSpecifier, std::string(buff)); - } - if (logFormat->hasFlag(base::FormatFlags::Line)) { - // Line - char* buf = base::utils::Str::clearBuff(buff, base::consts::kSourceLineMaxLength); - buf = base::utils::Str::convertAndAddToBuff(logMessage->line(), base::consts::kSourceLineMaxLength, buf, bufLim, false); - base::utils::Str::replaceFirstWithEscape(logLine, base::consts::kLogLineFormatSpecifier, std::string(buff)); - } - if (logFormat->hasFlag(base::FormatFlags::Location)) { - // Location - char* buf = base::utils::Str::clearBuff(buff, - base::consts::kSourceFilenameMaxLength + base::consts::kSourceLineMaxLength); - base::utils::File::buildStrippedFilename(logMessage->file().c_str(), buff); - buf = base::utils::Str::addToBuff(buff, buf, bufLim); - buf = base::utils::Str::addToBuff(":", buf, bufLim); - buf = base::utils::Str::convertAndAddToBuff(logMessage->line(), base::consts::kSourceLineMaxLength, buf, bufLim, - false); - base::utils::Str::replaceFirstWithEscape(logLine, base::consts::kLogLocationFormatSpecifier, std::string(buff)); - } - if (logMessage->level() == Level::Verbose && logFormat->hasFlag(base::FormatFlags::VerboseLevel)) { - // Verbose level - char* buf = base::utils::Str::clearBuff(buff, 1); - buf = base::utils::Str::convertAndAddToBuff(logMessage->verboseLevel(), 1, buf, bufLim, false); - base::utils::Str::replaceFirstWithEscape(logLine, base::consts::kVerboseLevelFormatSpecifier, std::string(buff)); - } - if (logFormat->hasFlag(base::FormatFlags::LogMessage)) { - // Log message - base::utils::Str::replaceFirstWithEscape(logLine, base::consts::kMessageFormatSpecifier, logMessage->message()); - } +base::type::string_t +DefaultLogBuilder::build(const LogMessage* logMessage, bool appendNewLine) const { + base::TypedConfigurations* tc = logMessage->logger()->typedConfigurations(); + const base::LogFormat* logFormat = &tc->logFormat(logMessage->level()); + base::type::string_t logLine = logFormat->format(); + char buff[base::consts::kSourceFilenameMaxLength + base::consts::kSourceLineMaxLength] = ""; + const char* bufLim = buff + sizeof(buff); + if (logFormat->hasFlag(base::FormatFlags::AppName)) { + // App name + base::utils::Str::replaceFirstWithEscape(logLine, base::consts::kAppNameFormatSpecifier, + logMessage->logger()->parentApplicationName()); + } + if (logFormat->hasFlag(base::FormatFlags::ThreadId)) { + // Thread ID + base::utils::Str::replaceFirstWithEscape(logLine, base::consts::kThreadIdFormatSpecifier, + ELPP->getThreadName(base::threading::getCurrentThreadId())); + } + if (logFormat->hasFlag(base::FormatFlags::DateTime)) { + // DateTime + base::utils::Str::replaceFirstWithEscape( + logLine, base::consts::kDateTimeFormatSpecifier, + base::utils::DateTime::getDateTime(logFormat->dateTimeFormat().c_str(), + &tc->subsecondPrecision(logMessage->level()))); + } + if (logFormat->hasFlag(base::FormatFlags::Function)) { + // Function + base::utils::Str::replaceFirstWithEscape(logLine, base::consts::kLogFunctionFormatSpecifier, + logMessage->func()); + } + if (logFormat->hasFlag(base::FormatFlags::File)) { + // File + base::utils::Str::clearBuff(buff, base::consts::kSourceFilenameMaxLength); + base::utils::File::buildStrippedFilename(logMessage->file().c_str(), buff); + base::utils::Str::replaceFirstWithEscape(logLine, base::consts::kLogFileFormatSpecifier, std::string(buff)); + } + if (logFormat->hasFlag(base::FormatFlags::FileBase)) { + // FileBase + base::utils::Str::clearBuff(buff, base::consts::kSourceFilenameMaxLength); + base::utils::File::buildBaseFilename(logMessage->file(), buff); + base::utils::Str::replaceFirstWithEscape(logLine, base::consts::kLogFileBaseFormatSpecifier, std::string(buff)); + } + if (logFormat->hasFlag(base::FormatFlags::Line)) { + // Line + char* buf = base::utils::Str::clearBuff(buff, base::consts::kSourceLineMaxLength); + buf = base::utils::Str::convertAndAddToBuff(logMessage->line(), base::consts::kSourceLineMaxLength, buf, bufLim, + false); + base::utils::Str::replaceFirstWithEscape(logLine, base::consts::kLogLineFormatSpecifier, std::string(buff)); + } + if (logFormat->hasFlag(base::FormatFlags::Location)) { + // Location + char* buf = base::utils::Str::clearBuff( + buff, base::consts::kSourceFilenameMaxLength + base::consts::kSourceLineMaxLength); + base::utils::File::buildStrippedFilename(logMessage->file().c_str(), buff); + buf = base::utils::Str::addToBuff(buff, buf, bufLim); + buf = base::utils::Str::addToBuff(":", buf, bufLim); + buf = base::utils::Str::convertAndAddToBuff(logMessage->line(), base::consts::kSourceLineMaxLength, buf, bufLim, + false); + base::utils::Str::replaceFirstWithEscape(logLine, base::consts::kLogLocationFormatSpecifier, std::string(buff)); + } + if (logMessage->level() == Level::Verbose && logFormat->hasFlag(base::FormatFlags::VerboseLevel)) { + // Verbose level + char* buf = base::utils::Str::clearBuff(buff, 1); + buf = base::utils::Str::convertAndAddToBuff(logMessage->verboseLevel(), 1, buf, bufLim, false); + base::utils::Str::replaceFirstWithEscape(logLine, base::consts::kVerboseLevelFormatSpecifier, + std::string(buff)); + } + if (logFormat->hasFlag(base::FormatFlags::LogMessage)) { + // Log message + base::utils::Str::replaceFirstWithEscape(logLine, base::consts::kMessageFormatSpecifier, logMessage->message()); + } #if !defined(ELPP_DISABLE_CUSTOM_FORMAT_SPECIFIERS) - el::base::threading::ScopedLock lock_(ELPP->customFormatSpecifiersLock()); - ELPP_UNUSED(lock_); - for (std::vector::const_iterator it = ELPP->customFormatSpecifiers()->begin(); - it != ELPP->customFormatSpecifiers()->end(); ++it) { - std::string fs(it->formatSpecifier()); - base::type::string_t wcsFormatSpecifier(fs.begin(), fs.end()); - base::utils::Str::replaceFirstWithEscape(logLine, wcsFormatSpecifier, it->resolver()(logMessage)); - } + el::base::threading::ScopedLock lock_(ELPP->customFormatSpecifiersLock()); + ELPP_UNUSED(lock_); + for (std::vector::const_iterator it = ELPP->customFormatSpecifiers()->begin(); + it != ELPP->customFormatSpecifiers()->end(); ++it) { + std::string fs(it->formatSpecifier()); + base::type::string_t wcsFormatSpecifier(fs.begin(), fs.end()); + base::utils::Str::replaceFirstWithEscape(logLine, wcsFormatSpecifier, it->resolver()(logMessage)); + } #endif // !defined(ELPP_DISABLE_CUSTOM_FORMAT_SPECIFIERS) - if (appendNewLine) logLine += ELPP_LITERAL("\n"); - return logLine; + if (appendNewLine) + logLine += ELPP_LITERAL("\n"); + return logLine; } // LogDispatcher -void LogDispatcher::dispatch(void) { - if (m_proceed && m_dispatchAction == base::DispatchAction::None) { - m_proceed = false; - } - if (!m_proceed) { - return; - } -#ifndef ELPP_NO_GLOBAL_LOCK - // see https://github.com/muflihun/easyloggingpp/issues/580 - // global lock is turned off by default unless - // ELPP_NO_GLOBAL_LOCK is defined - base::threading::ScopedLock scopedLock(ELPP->lock()); -#endif - base::TypedConfigurations* tc = m_logMessage->logger()->m_typedConfigurations; - if (ELPP->hasFlag(LoggingFlag::StrictLogFileSizeCheck)) { - tc->validateFileRolling(m_logMessage->level(), ELPP->preRollOutCallback()); - } - LogDispatchCallback* callback = nullptr; - LogDispatchData data; - for (const std::pair& h - : ELPP->m_logDispatchCallbacks) { - callback = h.second.get(); - if (callback != nullptr && callback->enabled()) { - data.setLogMessage(m_logMessage); - data.setDispatchAction(m_dispatchAction); - callback->handle(&data); +void +LogDispatcher::dispatch(void) { + if (m_proceed && m_dispatchAction == base::DispatchAction::None) { + m_proceed = false; + } + if (!m_proceed) { + return; + } +#ifndef ELPP_NO_GLOBAL_LOCK + // see https://github.com/muflihun/easyloggingpp/issues/580 + // global lock is turned off by default unless + // ELPP_NO_GLOBAL_LOCK is defined + base::threading::ScopedLock scopedLock(ELPP->lock()); +#endif + base::TypedConfigurations* tc = m_logMessage->logger()->m_typedConfigurations; + if (ELPP->hasFlag(LoggingFlag::StrictLogFileSizeCheck)) { + tc->validateFileRolling(m_logMessage->level(), ELPP->preRollOutCallback()); + } + LogDispatchCallback* callback = nullptr; + LogDispatchData data; + for (const std::pair& h : ELPP->m_logDispatchCallbacks) { + callback = h.second.get(); + if (callback != nullptr && callback->enabled()) { + data.setLogMessage(m_logMessage); + data.setDispatchAction(m_dispatchAction); + callback->handle(&data); + } } - } } // MessageBuilder -void MessageBuilder::initialize(Logger* logger) { - m_logger = logger; - m_containerLogSeperator = ELPP->hasFlag(LoggingFlag::NewLineForContainer) ? - ELPP_LITERAL("\n ") : ELPP_LITERAL(", "); +void +MessageBuilder::initialize(Logger* logger) { + m_logger = logger; + m_containerLogSeperator = + ELPP->hasFlag(LoggingFlag::NewLineForContainer) ? ELPP_LITERAL("\n ") : ELPP_LITERAL(", "); } -MessageBuilder& MessageBuilder::operator<<(const wchar_t* msg) { - if (msg == nullptr) { - m_logger->stream() << base::consts::kNullPointer; +MessageBuilder& +MessageBuilder::operator<<(const wchar_t* msg) { + if (msg == nullptr) { + m_logger->stream() << base::consts::kNullPointer; + return *this; + } +#if defined(ELPP_UNICODE) + m_logger->stream() << msg; +#else + char* buff_ = base::utils::Str::wcharPtrToCharPtr(msg); + m_logger->stream() << buff_; + free(buff_); +#endif + if (ELPP->hasFlag(LoggingFlag::AutoSpacing)) { + m_logger->stream() << " "; + } return *this; - } -# if defined(ELPP_UNICODE) - m_logger->stream() << msg; -# else - char* buff_ = base::utils::Str::wcharPtrToCharPtr(msg); - m_logger->stream() << buff_; - free(buff_); -# endif - if (ELPP->hasFlag(LoggingFlag::AutoSpacing)) { - m_logger->stream() << " "; - } - return *this; } // Writer -Writer& Writer::construct(Logger* logger, bool needLock) { - m_logger = logger; - initializeLogger(logger->id(), false, needLock); - m_messageBuilder.initialize(m_logger); - return *this; +Writer& +Writer::construct(Logger* logger, bool needLock) { + m_logger = logger; + initializeLogger(logger->id(), false, needLock); + m_messageBuilder.initialize(m_logger); + return *this; } -Writer& Writer::construct(int count, const char* loggerIds, ...) { - if (ELPP->hasFlag(LoggingFlag::MultiLoggerSupport)) { - va_list loggersList; - va_start(loggersList, loggerIds); - const char* id = loggerIds; - m_loggerIds.reserve(count); - for (int i = 0; i < count; ++i) { - m_loggerIds.push_back(std::string(id)); - id = va_arg(loggersList, const char*); - } - va_end(loggersList); - initializeLogger(m_loggerIds.at(0)); - } else { - initializeLogger(std::string(loggerIds)); - } - m_messageBuilder.initialize(m_logger); - return *this; -} - -void Writer::initializeLogger(const std::string& loggerId, bool lookup, bool needLock) { - if (lookup) { - m_logger = ELPP->registeredLoggers()->get(loggerId, ELPP->hasFlag(LoggingFlag::CreateLoggerAutomatically)); - } - if (m_logger == nullptr) { - { - if (!ELPP->registeredLoggers()->has(std::string(base::consts::kDefaultLoggerId))) { - // Somehow default logger has been unregistered. Not good! Register again - ELPP->registeredLoggers()->get(std::string(base::consts::kDefaultLoggerId)); - } - } - Writer(Level::Debug, m_file, m_line, m_func).construct(1, base::consts::kDefaultLoggerId) - << "Logger [" << loggerId << "] is not registered yet!"; - m_proceed = false; - } else { - if (needLock) { - m_logger->acquireLock(); // This should not be unlocked by checking m_proceed because - // m_proceed can be changed by lines below - } - if (ELPP->hasFlag(LoggingFlag::HierarchicalLogging)) { - m_proceed = m_level == Level::Verbose ? m_logger->enabled(m_level) : - LevelHelper::castToInt(m_level) >= LevelHelper::castToInt(ELPP->m_loggingLevel); - } else { - m_proceed = m_logger->enabled(m_level); - } - } -} - -void Writer::processDispatch() { -#if ELPP_LOGGING_ENABLED - if (ELPP->hasFlag(LoggingFlag::MultiLoggerSupport)) { - bool firstDispatched = false; - base::type::string_t logMessage; - std::size_t i = 0; - do { - if (m_proceed) { - if (firstDispatched) { - m_logger->stream() << logMessage; - } else { - firstDispatched = true; - if (m_loggerIds.size() > 1) { - logMessage = m_logger->stream().str(); - } +Writer& +Writer::construct(int count, const char* loggerIds, ...) { + if (ELPP->hasFlag(LoggingFlag::MultiLoggerSupport)) { + va_list loggersList; + va_start(loggersList, loggerIds); + const char* id = loggerIds; + m_loggerIds.reserve(count); + for (int i = 0; i < count; ++i) { + m_loggerIds.push_back(std::string(id)); + id = va_arg(loggersList, const char*); } - triggerDispatch(); - } else if (m_logger != nullptr) { + va_end(loggersList); + initializeLogger(m_loggerIds.at(0)); + } else { + initializeLogger(std::string(loggerIds)); + } + m_messageBuilder.initialize(m_logger); + return *this; +} + +void +Writer::initializeLogger(const std::string& loggerId, bool lookup, bool needLock) { + if (lookup) { + m_logger = ELPP->registeredLoggers()->get(loggerId, ELPP->hasFlag(LoggingFlag::CreateLoggerAutomatically)); + } + if (m_logger == nullptr) { + { + if (!ELPP->registeredLoggers()->has(std::string(base::consts::kDefaultLoggerId))) { + // Somehow default logger has been unregistered. Not good! Register again + ELPP->registeredLoggers()->get(std::string(base::consts::kDefaultLoggerId)); + } + } + Writer(Level::Debug, m_file, m_line, m_func).construct(1, base::consts::kDefaultLoggerId) + << "Logger [" << loggerId << "] is not registered yet!"; + m_proceed = false; + } else { + if (needLock) { + m_logger->acquireLock(); // This should not be unlocked by checking m_proceed because + // m_proceed can be changed by lines below + } + if (ELPP->hasFlag(LoggingFlag::HierarchicalLogging)) { + m_proceed = m_level == Level::Verbose + ? m_logger->enabled(m_level) + : LevelHelper::castToInt(m_level) >= LevelHelper::castToInt(ELPP->m_loggingLevel); + } else { + m_proceed = m_logger->enabled(m_level); + } + } +} + +void +Writer::processDispatch() { +#if ELPP_LOGGING_ENABLED + if (ELPP->hasFlag(LoggingFlag::MultiLoggerSupport)) { + bool firstDispatched = false; + base::type::string_t logMessage; + std::size_t i = 0; + do { + if (m_proceed) { + if (firstDispatched) { + m_logger->stream() << logMessage; + } else { + firstDispatched = true; + if (m_loggerIds.size() > 1) { + logMessage = m_logger->stream().str(); + } + } + triggerDispatch(); + } else if (m_logger != nullptr) { + m_logger->stream().str(ELPP_LITERAL("")); + m_logger->releaseLock(); + } + if (i + 1 < m_loggerIds.size()) { + initializeLogger(m_loggerIds.at(i + 1)); + } + } while (++i < m_loggerIds.size()); + } else { + if (m_proceed) { + triggerDispatch(); + } else if (m_logger != nullptr) { + m_logger->stream().str(ELPP_LITERAL("")); + m_logger->releaseLock(); + } + } +#else + if (m_logger != nullptr) { m_logger->stream().str(ELPP_LITERAL("")); m_logger->releaseLock(); - } - if (i + 1 < m_loggerIds.size()) { - initializeLogger(m_loggerIds.at(i + 1)); - } - } while (++i < m_loggerIds.size()); - } else { - if (m_proceed) { - triggerDispatch(); - } else if (m_logger != nullptr) { - m_logger->stream().str(ELPP_LITERAL("")); - m_logger->releaseLock(); } - } -#else - if (m_logger != nullptr) { - m_logger->stream().str(ELPP_LITERAL("")); - m_logger->releaseLock(); - } -#endif // ELPP_LOGGING_ENABLED +#endif // ELPP_LOGGING_ENABLED } -void Writer::triggerDispatch(void) { - if (m_proceed) { - if (m_msg == nullptr) { - LogMessage msg(m_level, m_file, m_line, m_func, m_verboseLevel, - m_logger); - base::LogDispatcher(m_proceed, &msg, m_dispatchAction).dispatch(); - } else { - base::LogDispatcher(m_proceed, m_msg, m_dispatchAction).dispatch(); +void +Writer::triggerDispatch(void) { + if (m_proceed) { + if (m_msg == nullptr) { + LogMessage msg(m_level, m_file, m_line, m_func, m_verboseLevel, m_logger); + base::LogDispatcher(m_proceed, &msg, m_dispatchAction).dispatch(); + } else { + base::LogDispatcher(m_proceed, m_msg, m_dispatchAction).dispatch(); + } } - } - if (m_logger != nullptr) { - m_logger->stream().str(ELPP_LITERAL("")); - m_logger->releaseLock(); - } - if (m_proceed && m_level == Level::Fatal - && !ELPP->hasFlag(LoggingFlag::DisableApplicationAbortOnFatalLog)) { - base::Writer(Level::Warning, m_file, m_line, m_func).construct(1, base::consts::kDefaultLoggerId) - << "Aborting application. Reason: Fatal log at [" << m_file << ":" << m_line << "]"; - std::stringstream reasonStream; - reasonStream << "Fatal log at [" << m_file << ":" << m_line << "]" - << " If you wish to disable 'abort on fatal log' please use " - << "el::Loggers::addFlag(el::LoggingFlag::DisableApplicationAbortOnFatalLog)"; - base::utils::abort(1, reasonStream.str()); - } - m_proceed = false; + if (m_logger != nullptr) { + m_logger->stream().str(ELPP_LITERAL("")); + m_logger->releaseLock(); + } + if (m_proceed && m_level == Level::Fatal && !ELPP->hasFlag(LoggingFlag::DisableApplicationAbortOnFatalLog)) { + base::Writer(Level::Warning, m_file, m_line, m_func).construct(1, base::consts::kDefaultLoggerId) + << "Aborting application. Reason: Fatal log at [" << m_file << ":" << m_line << "]"; + std::stringstream reasonStream; + reasonStream << "Fatal log at [" << m_file << ":" << m_line << "]" + << " If you wish to disable 'abort on fatal log' please use " + << "el::Loggers::addFlag(el::LoggingFlag::DisableApplicationAbortOnFatalLog)"; + base::utils::abort(1, reasonStream.str()); + } + m_proceed = false; } // PErrorWriter PErrorWriter::~PErrorWriter(void) { - if (m_proceed) { + if (m_proceed) { #if ELPP_COMPILER_MSVC - char buff[256]; - strerror_s(buff, 256, errno); - m_logger->stream() << ": " << buff << " [" << errno << "]"; + char buff[256]; + strerror_s(buff, 256, errno); + m_logger->stream() << ": " << buff << " [" << errno << "]"; #else - m_logger->stream() << ": " << strerror(errno) << " [" << errno << "]"; + m_logger->stream() << ": " << strerror(errno) << " [" << errno << "]"; #endif - } + } } // PerformanceTracker #if defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_PERFORMANCE_TRACKING) -PerformanceTracker::PerformanceTracker(const std::string& blockName, - base::TimestampUnit timestampUnit, - const std::string& loggerId, - bool scopedLog, Level level) : - m_blockName(blockName), m_timestampUnit(timestampUnit), m_loggerId(loggerId), m_scopedLog(scopedLog), - m_level(level), m_hasChecked(false), m_lastCheckpointId(std::string()), m_enabled(false) { +PerformanceTracker::PerformanceTracker(const std::string& blockName, base::TimestampUnit timestampUnit, + const std::string& loggerId, bool scopedLog, Level level) + : m_blockName(blockName), + m_timestampUnit(timestampUnit), + m_loggerId(loggerId), + m_scopedLog(scopedLog), + m_level(level), + m_hasChecked(false), + m_lastCheckpointId(std::string()), + m_enabled(false) { #if !defined(ELPP_DISABLE_PERFORMANCE_TRACKING) && ELPP_LOGGING_ENABLED - // We store it locally so that if user happen to change configuration by the end of scope - // or before calling checkpoint, we still depend on state of configuraton at time of construction - el::Logger* loggerPtr = ELPP->registeredLoggers()->get(loggerId, false); - m_enabled = loggerPtr != nullptr && loggerPtr->m_typedConfigurations->performanceTracking(m_level); - if (m_enabled) { - base::utils::DateTime::gettimeofday(&m_startTime); - } + // We store it locally so that if user happen to change configuration by the end of scope + // or before calling checkpoint, we still depend on state of configuraton at time of construction + el::Logger* loggerPtr = ELPP->registeredLoggers()->get(loggerId, false); + m_enabled = loggerPtr != nullptr && loggerPtr->m_typedConfigurations->performanceTracking(m_level); + if (m_enabled) { + base::utils::DateTime::gettimeofday(&m_startTime); + } #endif // !defined(ELPP_DISABLE_PERFORMANCE_TRACKING) && ELPP_LOGGING_ENABLED } PerformanceTracker::~PerformanceTracker(void) { #if !defined(ELPP_DISABLE_PERFORMANCE_TRACKING) && ELPP_LOGGING_ENABLED - if (m_enabled) { - base::threading::ScopedLock scopedLock(lock()); - if (m_scopedLog) { - base::utils::DateTime::gettimeofday(&m_endTime); - base::type::string_t formattedTime = getFormattedTimeTaken(); - PerformanceTrackingData data(PerformanceTrackingData::DataType::Complete); - data.init(this); - data.m_formattedTimeTaken = formattedTime; - PerformanceTrackingCallback* callback = nullptr; - for (const std::pair& h - : ELPP->m_performanceTrackingCallbacks) { - callback = h.second.get(); - if (callback != nullptr && callback->enabled()) { - callback->handle(&data); + if (m_enabled) { + base::threading::ScopedLock scopedLock(lock()); + if (m_scopedLog) { + base::utils::DateTime::gettimeofday(&m_endTime); + base::type::string_t formattedTime = getFormattedTimeTaken(); + PerformanceTrackingData data(PerformanceTrackingData::DataType::Complete); + data.init(this); + data.m_formattedTimeTaken = formattedTime; + PerformanceTrackingCallback* callback = nullptr; + for (const std::pair& h : + ELPP->m_performanceTrackingCallbacks) { + callback = h.second.get(); + if (callback != nullptr && callback->enabled()) { + callback->handle(&data); + } + } } - } } - } #endif // !defined(ELPP_DISABLE_PERFORMANCE_TRACKING) } -void PerformanceTracker::checkpoint(const std::string& id, const char* file, base::type::LineNumber line, - const char* func) { +void +PerformanceTracker::checkpoint(const std::string& id, const char* file, base::type::LineNumber line, const char* func) { #if !defined(ELPP_DISABLE_PERFORMANCE_TRACKING) && ELPP_LOGGING_ENABLED - if (m_enabled) { - base::threading::ScopedLock scopedLock(lock()); - base::utils::DateTime::gettimeofday(&m_endTime); - base::type::string_t formattedTime = m_hasChecked ? getFormattedTimeTaken(m_lastCheckpointTime) : ELPP_LITERAL(""); - PerformanceTrackingData data(PerformanceTrackingData::DataType::Checkpoint); - data.init(this); - data.m_checkpointId = id; - data.m_file = file; - data.m_line = line; - data.m_func = func; - data.m_formattedTimeTaken = formattedTime; - PerformanceTrackingCallback* callback = nullptr; - for (const std::pair& h - : ELPP->m_performanceTrackingCallbacks) { - callback = h.second.get(); - if (callback != nullptr && callback->enabled()) { - callback->handle(&data); - } + if (m_enabled) { + base::threading::ScopedLock scopedLock(lock()); + base::utils::DateTime::gettimeofday(&m_endTime); + base::type::string_t formattedTime = + m_hasChecked ? getFormattedTimeTaken(m_lastCheckpointTime) : ELPP_LITERAL(""); + PerformanceTrackingData data(PerformanceTrackingData::DataType::Checkpoint); + data.init(this); + data.m_checkpointId = id; + data.m_file = file; + data.m_line = line; + data.m_func = func; + data.m_formattedTimeTaken = formattedTime; + PerformanceTrackingCallback* callback = nullptr; + for (const std::pair& h : + ELPP->m_performanceTrackingCallbacks) { + callback = h.second.get(); + if (callback != nullptr && callback->enabled()) { + callback->handle(&data); + } + } + base::utils::DateTime::gettimeofday(&m_lastCheckpointTime); + m_hasChecked = true; + m_lastCheckpointId = id; } - base::utils::DateTime::gettimeofday(&m_lastCheckpointTime); - m_hasChecked = true; - m_lastCheckpointId = id; - } #endif // !defined(ELPP_DISABLE_PERFORMANCE_TRACKING) && ELPP_LOGGING_ENABLED - ELPP_UNUSED(id); - ELPP_UNUSED(file); - ELPP_UNUSED(line); - ELPP_UNUSED(func); + ELPP_UNUSED(id); + ELPP_UNUSED(file); + ELPP_UNUSED(line); + ELPP_UNUSED(func); } -const base::type::string_t PerformanceTracker::getFormattedTimeTaken(struct timeval startTime) const { - if (ELPP->hasFlag(LoggingFlag::FixedTimeFormat)) { - base::type::stringstream_t ss; - ss << base::utils::DateTime::getTimeDifference(m_endTime, - startTime, m_timestampUnit) << " " << base::consts::kTimeFormats[static_cast - (m_timestampUnit)].unit; - return ss.str(); - } - return base::utils::DateTime::formatTime(base::utils::DateTime::getTimeDifference(m_endTime, - startTime, m_timestampUnit), m_timestampUnit); +const base::type::string_t +PerformanceTracker::getFormattedTimeTaken(struct timeval startTime) const { + if (ELPP->hasFlag(LoggingFlag::FixedTimeFormat)) { + base::type::stringstream_t ss; + ss << base::utils::DateTime::getTimeDifference(m_endTime, startTime, m_timestampUnit) << " " + << base::consts::kTimeFormats[static_cast(m_timestampUnit)].unit; + return ss.str(); + } + return base::utils::DateTime::formatTime( + base::utils::DateTime::getTimeDifference(m_endTime, startTime, m_timestampUnit), m_timestampUnit); } -#endif // defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_PERFORMANCE_TRACKING) +#endif // defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_PERFORMANCE_TRACKING) namespace debug { #if defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_CRASH_LOG) @@ -2762,155 +2921,159 @@ namespace debug { // StackTrace StackTrace::StackTraceEntry::StackTraceEntry(std::size_t index, const std::string& loc, const std::string& demang, - const std::string& hex, - const std::string& addr) : - m_index(index), - m_location(loc), - m_demangled(demang), - m_hex(hex), - m_addr(addr) { + const std::string& hex, const std::string& addr) + : m_index(index), m_location(loc), m_demangled(demang), m_hex(hex), m_addr(addr) { } -std::ostream& operator<<(std::ostream& ss, const StackTrace::StackTraceEntry& si) { - ss << "[" << si.m_index << "] " << si.m_location << (si.m_hex.empty() ? "" : "+") << si.m_hex << " " << si.m_addr << - (si.m_demangled.empty() ? "" : ":") << si.m_demangled; - return ss; +std::ostream& +operator<<(std::ostream& ss, const StackTrace::StackTraceEntry& si) { + ss << "[" << si.m_index << "] " << si.m_location << (si.m_hex.empty() ? "" : "+") << si.m_hex << " " << si.m_addr + << (si.m_demangled.empty() ? "" : ":") << si.m_demangled; + return ss; } -std::ostream& operator<<(std::ostream& os, const StackTrace& st) { - std::vector::const_iterator it = st.m_stack.begin(); - while (it != st.m_stack.end()) { - os << " " << *it++ << "\n"; - } - return os; -} - -void StackTrace::generateNew(void) { -#if ELPP_STACKTRACE - m_stack.clear(); - void* stack[kMaxStack]; - unsigned int size = backtrace(stack, kMaxStack); - char** strings = backtrace_symbols(stack, size); - if (size > kStackStart) { // Skip StackTrace c'tor and generateNew - for (std::size_t i = kStackStart; i < size; ++i) { - std::string mangName; - std::string location; - std::string hex; - std::string addr; - - // entry: 2 crash.cpp.bin 0x0000000101552be5 _ZN2el4base5debug10StackTraceC1Ev + 21 - const std::string line(strings[i]); - auto p = line.find("_"); - if (p != std::string::npos) { - mangName = line.substr(p); - mangName = mangName.substr(0, mangName.find(" +")); - } - p = line.find("0x"); - if (p != std::string::npos) { - addr = line.substr(p); - addr = addr.substr(0, addr.find("_")); - } - // Perform demangling if parsed properly - if (!mangName.empty()) { - int status = 0; - char* demangName = abi::__cxa_demangle(mangName.data(), 0, 0, &status); - // if demangling is successful, output the demangled function name - if (status == 0) { - // Success (see http://gcc.gnu.org/onlinedocs/libstdc++/libstdc++-html-USERS-4.3/a01696.html) - StackTraceEntry entry(i - 1, location, demangName, hex, addr); - m_stack.push_back(entry); - } else { - // Not successful - we will use mangled name - StackTraceEntry entry(i - 1, location, mangName, hex, addr); - m_stack.push_back(entry); - } - free(demangName); - } else { - StackTraceEntry entry(i - 1, line); - m_stack.push_back(entry); - } +std::ostream& +operator<<(std::ostream& os, const StackTrace& st) { + std::vector::const_iterator it = st.m_stack.begin(); + while (it != st.m_stack.end()) { + os << " " << *it++ << "\n"; } - } - free(strings); + return os; +} + +void +StackTrace::generateNew(void) { +#if ELPP_STACKTRACE + m_stack.clear(); + void* stack[kMaxStack]; + unsigned int size = backtrace(stack, kMaxStack); + char** strings = backtrace_symbols(stack, size); + if (size > kStackStart) { // Skip StackTrace c'tor and generateNew + for (std::size_t i = kStackStart; i < size; ++i) { + std::string mangName; + std::string location; + std::string hex; + std::string addr; + + // entry: 2 crash.cpp.bin 0x0000000101552be5 _ZN2el4base5debug10StackTraceC1Ev + 21 + const std::string line(strings[i]); + auto p = line.find("_"); + if (p != std::string::npos) { + mangName = line.substr(p); + mangName = mangName.substr(0, mangName.find(" +")); + } + p = line.find("0x"); + if (p != std::string::npos) { + addr = line.substr(p); + addr = addr.substr(0, addr.find("_")); + } + // Perform demangling if parsed properly + if (!mangName.empty()) { + int status = 0; + char* demangName = abi::__cxa_demangle(mangName.data(), 0, 0, &status); + // if demangling is successful, output the demangled function name + if (status == 0) { + // Success (see http://gcc.gnu.org/onlinedocs/libstdc++/libstdc++-html-USERS-4.3/a01696.html) + StackTraceEntry entry(i - 1, location, demangName, hex, addr); + m_stack.push_back(entry); + } else { + // Not successful - we will use mangled name + StackTraceEntry entry(i - 1, location, mangName, hex, addr); + m_stack.push_back(entry); + } + free(demangName); + } else { + StackTraceEntry entry(i - 1, line); + m_stack.push_back(entry); + } + } + } + free(strings); #else - ELPP_INTERNAL_INFO(1, "Stacktrace generation not supported for selected compiler"); + ELPP_INTERNAL_INFO(1, "Stacktrace generation not supported for selected compiler"); #endif // ELPP_STACKTRACE } // Static helper functions -static std::string crashReason(int sig) { - std::stringstream ss; - bool foundReason = false; - for (int i = 0; i < base::consts::kCrashSignalsCount; ++i) { - if (base::consts::kCrashSignals[i].numb == sig) { - ss << "Application has crashed due to [" << base::consts::kCrashSignals[i].name << "] signal"; - if (ELPP->hasFlag(el::LoggingFlag::LogDetailedCrashReason)) { - ss << std::endl << - " " << base::consts::kCrashSignals[i].brief << std::endl << - " " << base::consts::kCrashSignals[i].detail; - } - foundReason = true; +static std::string +crashReason(int sig) { + std::stringstream ss; + bool foundReason = false; + for (int i = 0; i < base::consts::kCrashSignalsCount; ++i) { + if (base::consts::kCrashSignals[i].numb == sig) { + ss << "Application has crashed due to [" << base::consts::kCrashSignals[i].name << "] signal"; + if (ELPP->hasFlag(el::LoggingFlag::LogDetailedCrashReason)) { + ss << std::endl + << " " << base::consts::kCrashSignals[i].brief << std::endl + << " " << base::consts::kCrashSignals[i].detail; + } + foundReason = true; + } } - } - if (!foundReason) { - ss << "Application has crashed due to unknown signal [" << sig << "]"; - } - return ss.str(); + if (!foundReason) { + ss << "Application has crashed due to unknown signal [" << sig << "]"; + } + return ss.str(); } /// @brief Logs reason of crash from sig -static void logCrashReason(int sig, bool stackTraceIfAvailable, Level level, const char* logger) { - if (sig == SIGINT && ELPP->hasFlag(el::LoggingFlag::IgnoreSigInt)) { - return; - } - std::stringstream ss; - ss << "CRASH HANDLED; "; - ss << crashReason(sig); +static void +logCrashReason(int sig, bool stackTraceIfAvailable, Level level, const char* logger) { + if (sig == SIGINT && ELPP->hasFlag(el::LoggingFlag::IgnoreSigInt)) { + return; + } + std::stringstream ss; + ss << "CRASH HANDLED; "; + ss << crashReason(sig); #if ELPP_STACKTRACE - if (stackTraceIfAvailable) { - ss << std::endl << " ======= Backtrace: =========" << std::endl << base::debug::StackTrace(); - } + if (stackTraceIfAvailable) { + ss << std::endl << " ======= Backtrace: =========" << std::endl << base::debug::StackTrace(); + } #else - ELPP_UNUSED(stackTraceIfAvailable); + ELPP_UNUSED(stackTraceIfAvailable); #endif // ELPP_STACKTRACE - ELPP_WRITE_LOG(el::base::Writer, level, base::DispatchAction::NormalLog, logger) << ss.str(); + ELPP_WRITE_LOG(el::base::Writer, level, base::DispatchAction::NormalLog, logger) << ss.str(); } -static inline void crashAbort(int sig) { - base::utils::abort(sig, std::string()); +static inline void +crashAbort(int sig) { + base::utils::abort(sig, std::string()); } /// @brief Default application crash handler /// -/// @detail This function writes log using 'default' logger, prints stack trace for GCC based compilers and aborts program. -static inline void defaultCrashHandler(int sig) { - base::debug::logCrashReason(sig, true, Level::Fatal, base::consts::kDefaultLoggerId); - base::debug::crashAbort(sig); +/// @detail This function writes log using 'default' logger, prints stack trace for GCC based compilers and aborts +/// program. +static inline void +defaultCrashHandler(int sig) { + base::debug::logCrashReason(sig, true, Level::Fatal, base::consts::kDefaultLoggerId); + base::debug::crashAbort(sig); } // CrashHandler CrashHandler::CrashHandler(bool useDefault) { - if (useDefault) { - setHandler(defaultCrashHandler); - } + if (useDefault) { + setHandler(defaultCrashHandler); + } } -void CrashHandler::setHandler(const Handler& cHandler) { - m_handler = cHandler; +void +CrashHandler::setHandler(const Handler& cHandler) { + m_handler = cHandler; #if defined(ELPP_HANDLE_SIGABRT) - int i = 0; // SIGABRT is at base::consts::kCrashSignals[0] + int i = 0; // SIGABRT is at base::consts::kCrashSignals[0] #else - int i = 1; + int i = 1; #endif // defined(ELPP_HANDLE_SIGABRT) - for (; i < base::consts::kCrashSignalsCount; ++i) { - m_handler = signal(base::consts::kCrashSignals[i].numb, cHandler); - } + for (; i < base::consts::kCrashSignalsCount; ++i) { + m_handler = signal(base::consts::kCrashSignals[i].numb, cHandler); + } } -#endif // defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_CRASH_LOG) +#endif // defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_CRASH_LOG) } // namespace debug -} // namespace base +} // namespace base // el @@ -2918,195 +3081,219 @@ void CrashHandler::setHandler(const Handler& cHandler) { #if defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_CRASH_LOG) -void Helpers::crashAbort(int sig, const char* sourceFile, unsigned int long line) { - std::stringstream ss; - ss << base::debug::crashReason(sig).c_str(); - ss << " - [Called el::Helpers::crashAbort(" << sig << ")]"; - if (sourceFile != nullptr && strlen(sourceFile) > 0) { - ss << " - Source: " << sourceFile; - if (line > 0) - ss << ":" << line; - else - ss << " (line number not specified)"; - } - base::utils::abort(sig, ss.str()); +void +Helpers::crashAbort(int sig, const char* sourceFile, unsigned int long line) { + std::stringstream ss; + ss << base::debug::crashReason(sig).c_str(); + ss << " - [Called el::Helpers::crashAbort(" << sig << ")]"; + if (sourceFile != nullptr && strlen(sourceFile) > 0) { + ss << " - Source: " << sourceFile; + if (line > 0) + ss << ":" << line; + else + ss << " (line number not specified)"; + } + base::utils::abort(sig, ss.str()); } -void Helpers::logCrashReason(int sig, bool stackTraceIfAvailable, Level level, const char* logger) { - el::base::debug::logCrashReason(sig, stackTraceIfAvailable, level, logger); +void +Helpers::logCrashReason(int sig, bool stackTraceIfAvailable, Level level, const char* logger) { + el::base::debug::logCrashReason(sig, stackTraceIfAvailable, level, logger); } -#endif // defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_CRASH_LOG) +#endif // defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_CRASH_LOG) // Loggers -Logger* Loggers::getLogger(const std::string& identity, bool registerIfNotAvailable) { - return ELPP->registeredLoggers()->get(identity, registerIfNotAvailable); +Logger* +Loggers::getLogger(const std::string& identity, bool registerIfNotAvailable) { + return ELPP->registeredLoggers()->get(identity, registerIfNotAvailable); } -void Loggers::setDefaultLogBuilder(el::LogBuilderPtr& logBuilderPtr) { - ELPP->registeredLoggers()->setDefaultLogBuilder(logBuilderPtr); +void +Loggers::setDefaultLogBuilder(el::LogBuilderPtr& logBuilderPtr) { + ELPP->registeredLoggers()->setDefaultLogBuilder(logBuilderPtr); } -bool Loggers::unregisterLogger(const std::string& identity) { - return ELPP->registeredLoggers()->remove(identity); +bool +Loggers::unregisterLogger(const std::string& identity) { + return ELPP->registeredLoggers()->remove(identity); } -bool Loggers::hasLogger(const std::string& identity) { - return ELPP->registeredLoggers()->has(identity); +bool +Loggers::hasLogger(const std::string& identity) { + return ELPP->registeredLoggers()->has(identity); } -Logger* Loggers::reconfigureLogger(Logger* logger, const Configurations& configurations) { - if (!logger) return nullptr; - logger->configure(configurations); - return logger; +Logger* +Loggers::reconfigureLogger(Logger* logger, const Configurations& configurations) { + if (!logger) + return nullptr; + logger->configure(configurations); + return logger; } -Logger* Loggers::reconfigureLogger(const std::string& identity, const Configurations& configurations) { - return Loggers::reconfigureLogger(Loggers::getLogger(identity), configurations); +Logger* +Loggers::reconfigureLogger(const std::string& identity, const Configurations& configurations) { + return Loggers::reconfigureLogger(Loggers::getLogger(identity), configurations); } -Logger* Loggers::reconfigureLogger(const std::string& identity, ConfigurationType configurationType, - const std::string& value) { - Logger* logger = Loggers::getLogger(identity); - if (logger == nullptr) { - return nullptr; - } - logger->configurations()->set(Level::Global, configurationType, value); - logger->reconfigure(); - return logger; -} - -void Loggers::reconfigureAllLoggers(const Configurations& configurations) { - for (base::RegisteredLoggers::iterator it = ELPP->registeredLoggers()->begin(); - it != ELPP->registeredLoggers()->end(); ++it) { - Loggers::reconfigureLogger(it->second, configurations); - } -} - -void Loggers::reconfigureAllLoggers(Level level, ConfigurationType configurationType, - const std::string& value) { - for (base::RegisteredLoggers::iterator it = ELPP->registeredLoggers()->begin(); - it != ELPP->registeredLoggers()->end(); ++it) { - Logger* logger = it->second; - logger->configurations()->set(level, configurationType, value); - logger->reconfigure(); - } -} - -void Loggers::setDefaultConfigurations(const Configurations& configurations, bool reconfigureExistingLoggers) { - ELPP->registeredLoggers()->setDefaultConfigurations(configurations); - if (reconfigureExistingLoggers) { - Loggers::reconfigureAllLoggers(configurations); - } -} - -const Configurations* Loggers::defaultConfigurations(void) { - return ELPP->registeredLoggers()->defaultConfigurations(); -} - -const base::LogStreamsReferenceMap* Loggers::logStreamsReference(void) { - return ELPP->registeredLoggers()->logStreamsReference(); -} - -base::TypedConfigurations Loggers::defaultTypedConfigurations(void) { - return base::TypedConfigurations( - ELPP->registeredLoggers()->defaultConfigurations(), - ELPP->registeredLoggers()->logStreamsReference()); -} - -std::vector* Loggers::populateAllLoggerIds(std::vector* targetList) { - targetList->clear(); - for (base::RegisteredLoggers::iterator it = ELPP->registeredLoggers()->list().begin(); - it != ELPP->registeredLoggers()->list().end(); ++it) { - targetList->push_back(it->first); - } - return targetList; -} - -void Loggers::configureFromGlobal(const char* globalConfigurationFilePath) { - std::ifstream gcfStream(globalConfigurationFilePath, std::ifstream::in); - ELPP_ASSERT(gcfStream.is_open(), "Unable to open global configuration file [" << globalConfigurationFilePath - << "] for parsing."); - std::string line = std::string(); - std::stringstream ss; - Logger* logger = nullptr; - auto configure = [&](void) { - ELPP_INTERNAL_INFO(8, "Configuring logger: '" << logger->id() << "' with configurations \n" << ss.str() - << "\n--------------"); - Configurations c; - c.parseFromText(ss.str()); - logger->configure(c); - }; - while (gcfStream.good()) { - std::getline(gcfStream, line); - ELPP_INTERNAL_INFO(1, "Parsing line: " << line); - base::utils::Str::trim(line); - if (Configurations::Parser::isComment(line)) continue; - Configurations::Parser::ignoreComments(&line); - base::utils::Str::trim(line); - if (line.size() > 2 && base::utils::Str::startsWith(line, std::string(base::consts::kConfigurationLoggerId))) { - if (!ss.str().empty() && logger != nullptr) { - configure(); - } - ss.str(std::string("")); - line = line.substr(2); - base::utils::Str::trim(line); - if (line.size() > 1) { - ELPP_INTERNAL_INFO(1, "Getting logger: '" << line << "'"); - logger = getLogger(line); - } - } else { - ss << line << "\n"; +Logger* +Loggers::reconfigureLogger(const std::string& identity, ConfigurationType configurationType, const std::string& value) { + Logger* logger = Loggers::getLogger(identity); + if (logger == nullptr) { + return nullptr; } - } - if (!ss.str().empty() && logger != nullptr) { - configure(); - } + logger->configurations()->set(Level::Global, configurationType, value); + logger->reconfigure(); + return logger; } -bool Loggers::configureFromArg(const char* argKey) { +void +Loggers::reconfigureAllLoggers(const Configurations& configurations) { + for (base::RegisteredLoggers::iterator it = ELPP->registeredLoggers()->begin(); + it != ELPP->registeredLoggers()->end(); ++it) { + Loggers::reconfigureLogger(it->second, configurations); + } +} + +void +Loggers::reconfigureAllLoggers(Level level, ConfigurationType configurationType, const std::string& value) { + for (base::RegisteredLoggers::iterator it = ELPP->registeredLoggers()->begin(); + it != ELPP->registeredLoggers()->end(); ++it) { + Logger* logger = it->second; + logger->configurations()->set(level, configurationType, value); + logger->reconfigure(); + } +} + +void +Loggers::setDefaultConfigurations(const Configurations& configurations, bool reconfigureExistingLoggers) { + ELPP->registeredLoggers()->setDefaultConfigurations(configurations); + if (reconfigureExistingLoggers) { + Loggers::reconfigureAllLoggers(configurations); + } +} + +const Configurations* +Loggers::defaultConfigurations(void) { + return ELPP->registeredLoggers()->defaultConfigurations(); +} + +const base::LogStreamsReferenceMap* +Loggers::logStreamsReference(void) { + return ELPP->registeredLoggers()->logStreamsReference(); +} + +base::TypedConfigurations +Loggers::defaultTypedConfigurations(void) { + return base::TypedConfigurations(ELPP->registeredLoggers()->defaultConfigurations(), + ELPP->registeredLoggers()->logStreamsReference()); +} + +std::vector* +Loggers::populateAllLoggerIds(std::vector* targetList) { + targetList->clear(); + for (base::RegisteredLoggers::iterator it = ELPP->registeredLoggers()->list().begin(); + it != ELPP->registeredLoggers()->list().end(); ++it) { + targetList->push_back(it->first); + } + return targetList; +} + +void +Loggers::configureFromGlobal(const char* globalConfigurationFilePath) { + std::ifstream gcfStream(globalConfigurationFilePath, std::ifstream::in); + ELPP_ASSERT(gcfStream.is_open(), + "Unable to open global configuration file [" << globalConfigurationFilePath << "] for parsing."); + std::string line = std::string(); + std::stringstream ss; + Logger* logger = nullptr; + auto configure = [&](void) { + ELPP_INTERNAL_INFO(8, "Configuring logger: '" << logger->id() << "' with configurations \n" + << ss.str() << "\n--------------"); + Configurations c; + c.parseFromText(ss.str()); + logger->configure(c); + }; + while (gcfStream.good()) { + std::getline(gcfStream, line); + ELPP_INTERNAL_INFO(1, "Parsing line: " << line); + base::utils::Str::trim(line); + if (Configurations::Parser::isComment(line)) + continue; + Configurations::Parser::ignoreComments(&line); + base::utils::Str::trim(line); + if (line.size() > 2 && base::utils::Str::startsWith(line, std::string(base::consts::kConfigurationLoggerId))) { + if (!ss.str().empty() && logger != nullptr) { + configure(); + } + ss.str(std::string("")); + line = line.substr(2); + base::utils::Str::trim(line); + if (line.size() > 1) { + ELPP_INTERNAL_INFO(1, "Getting logger: '" << line << "'"); + logger = getLogger(line); + } + } else { + ss << line << "\n"; + } + } + if (!ss.str().empty() && logger != nullptr) { + configure(); + } +} + +bool +Loggers::configureFromArg(const char* argKey) { #if defined(ELPP_DISABLE_CONFIGURATION_FROM_PROGRAM_ARGS) - ELPP_UNUSED(argKey); + ELPP_UNUSED(argKey); #else - if (!Helpers::commandLineArgs()->hasParamWithValue(argKey)) { - return false; - } - configureFromGlobal(Helpers::commandLineArgs()->getParamValue(argKey)); + if (!Helpers::commandLineArgs()->hasParamWithValue(argKey)) { + return false; + } + configureFromGlobal(Helpers::commandLineArgs()->getParamValue(argKey)); #endif // defined(ELPP_DISABLE_CONFIGURATION_FROM_PROGRAM_ARGS) - return true; + return true; } -void Loggers::flushAll(void) { - ELPP->registeredLoggers()->flushAll(); +void +Loggers::flushAll(void) { + ELPP->registeredLoggers()->flushAll(); } -void Loggers::setVerboseLevel(base::type::VerboseLevel level) { - ELPP->vRegistry()->setLevel(level); +void +Loggers::setVerboseLevel(base::type::VerboseLevel level) { + ELPP->vRegistry()->setLevel(level); } -base::type::VerboseLevel Loggers::verboseLevel(void) { - return ELPP->vRegistry()->level(); +base::type::VerboseLevel +Loggers::verboseLevel(void) { + return ELPP->vRegistry()->level(); } -void Loggers::setVModules(const char* modules) { - if (ELPP->vRegistry()->vModulesEnabled()) { - ELPP->vRegistry()->setModules(modules); - } +void +Loggers::setVModules(const char* modules) { + if (ELPP->vRegistry()->vModulesEnabled()) { + ELPP->vRegistry()->setModules(modules); + } } -void Loggers::clearVModules(void) { - ELPP->vRegistry()->clearModules(); +void +Loggers::clearVModules(void) { + ELPP->vRegistry()->clearModules(); } // VersionInfo -const std::string VersionInfo::version(void) { - return std::string("9.96.7"); +const std::string +VersionInfo::version(void) { + return std::string("9.96.7"); } /// @brief Release date of current version -const std::string VersionInfo::releaseDate(void) { - return std::string("24-11-2018 0728hrs"); +const std::string +VersionInfo::releaseDate(void) { + return std::string("24-11-2018 0728hrs"); } -} // namespace el +} // namespace el diff --git a/core/src/external/easyloggingpp/easylogging++.h b/core/src/external/easyloggingpp/easylogging++.h index 62a7c5a423..ce6c7ece48 100644 --- a/core/src/external/easyloggingpp/easylogging++.h +++ b/core/src/external/easyloggingpp/easylogging++.h @@ -18,220 +18,242 @@ #define EASYLOGGINGPP_H // Compilers and C++0x/C++11 Evaluation #if __cplusplus >= 201103L -# define ELPP_CXX11 1 +#define ELPP_CXX11 1 #endif // __cplusplus >= 201103L #if (defined(__GNUC__)) -# define ELPP_COMPILER_GCC 1 +#define ELPP_COMPILER_GCC 1 #else -# define ELPP_COMPILER_GCC 0 +#define ELPP_COMPILER_GCC 0 #endif #if ELPP_COMPILER_GCC -# define ELPP_GCC_VERSION (__GNUC__ * 10000 \ -+ __GNUC_MINOR__ * 100 \ -+ __GNUC_PATCHLEVEL__) -# if defined(__GXX_EXPERIMENTAL_CXX0X__) -# define ELPP_CXX0X 1 -# endif +#define ELPP_GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) +#if defined(__GXX_EXPERIMENTAL_CXX0X__) +#define ELPP_CXX0X 1 +#endif #endif // Visual C++ #if defined(_MSC_VER) -# define ELPP_COMPILER_MSVC 1 +#define ELPP_COMPILER_MSVC 1 #else -# define ELPP_COMPILER_MSVC 0 +#define ELPP_COMPILER_MSVC 0 #endif #define ELPP_CRT_DBG_WARNINGS ELPP_COMPILER_MSVC #if ELPP_COMPILER_MSVC -# if (_MSC_VER == 1600) -# define ELPP_CXX0X 1 -# elif(_MSC_VER >= 1700) -# define ELPP_CXX11 1 -# endif +#if (_MSC_VER == 1600) +#define ELPP_CXX0X 1 +#elif (_MSC_VER >= 1700) +#define ELPP_CXX11 1 +#endif #endif // Clang++ #if (defined(__clang__) && (__clang__ == 1)) -# define ELPP_COMPILER_CLANG 1 +#define ELPP_COMPILER_CLANG 1 #else -# define ELPP_COMPILER_CLANG 0 +#define ELPP_COMPILER_CLANG 0 #endif #if ELPP_COMPILER_CLANG -# if __has_include() -# include // Make __GLIBCXX__ defined when using libstdc++ -# if !defined(__GLIBCXX__) || __GLIBCXX__ >= 20150426 -# define ELPP_CLANG_SUPPORTS_THREAD -# endif // !defined(__GLIBCXX__) || __GLIBCXX__ >= 20150426 -# endif // __has_include() +#if __has_include() +#include // Make __GLIBCXX__ defined when using libstdc++ +#if !defined(__GLIBCXX__) || __GLIBCXX__ >= 20150426 +#define ELPP_CLANG_SUPPORTS_THREAD +#endif // !defined(__GLIBCXX__) || __GLIBCXX__ >= 20150426 +#endif // __has_include() #endif #if (defined(__MINGW32__) || defined(__MINGW64__)) -# define ELPP_MINGW 1 +#define ELPP_MINGW 1 #else -# define ELPP_MINGW 0 +#define ELPP_MINGW 0 #endif #if (defined(__CYGWIN__) && (__CYGWIN__ == 1)) -# define ELPP_CYGWIN 1 +#define ELPP_CYGWIN 1 #else -# define ELPP_CYGWIN 0 +#define ELPP_CYGWIN 0 #endif #if (defined(__INTEL_COMPILER)) -# define ELPP_COMPILER_INTEL 1 +#define ELPP_COMPILER_INTEL 1 #else -# define ELPP_COMPILER_INTEL 0 +#define ELPP_COMPILER_INTEL 0 #endif // Operating System Evaluation // Windows #if (defined(_WIN32) || defined(_WIN64)) -# define ELPP_OS_WINDOWS 1 +#define ELPP_OS_WINDOWS 1 #else -# define ELPP_OS_WINDOWS 0 +#define ELPP_OS_WINDOWS 0 #endif // Linux #if (defined(__linux) || defined(__linux__)) -# define ELPP_OS_LINUX 1 +#define ELPP_OS_LINUX 1 #else -# define ELPP_OS_LINUX 0 +#define ELPP_OS_LINUX 0 #endif #if (defined(__APPLE__)) -# define ELPP_OS_MAC 1 +#define ELPP_OS_MAC 1 #else -# define ELPP_OS_MAC 0 +#define ELPP_OS_MAC 0 #endif #if (defined(__FreeBSD__) || defined(__FreeBSD_kernel__)) -# define ELPP_OS_FREEBSD 1 +#define ELPP_OS_FREEBSD 1 #else -# define ELPP_OS_FREEBSD 0 +#define ELPP_OS_FREEBSD 0 #endif #if (defined(__sun)) -# define ELPP_OS_SOLARIS 1 +#define ELPP_OS_SOLARIS 1 #else -# define ELPP_OS_SOLARIS 0 +#define ELPP_OS_SOLARIS 0 #endif #if (defined(_AIX)) -# define ELPP_OS_AIX 1 +#define ELPP_OS_AIX 1 #else -# define ELPP_OS_AIX 0 +#define ELPP_OS_AIX 0 #endif #if (defined(__NetBSD__)) -# define ELPP_OS_NETBSD 1 +#define ELPP_OS_NETBSD 1 #else -# define ELPP_OS_NETBSD 0 +#define ELPP_OS_NETBSD 0 #endif #if defined(__EMSCRIPTEN__) -# define ELPP_OS_EMSCRIPTEN 1 +#define ELPP_OS_EMSCRIPTEN 1 #else -# define ELPP_OS_EMSCRIPTEN 0 +#define ELPP_OS_EMSCRIPTEN 0 #endif // Unix -#if ((ELPP_OS_LINUX || ELPP_OS_MAC || ELPP_OS_FREEBSD || ELPP_OS_NETBSD || ELPP_OS_SOLARIS || ELPP_OS_AIX || ELPP_OS_EMSCRIPTEN) && (!ELPP_OS_WINDOWS)) -# define ELPP_OS_UNIX 1 +#if ((ELPP_OS_LINUX || ELPP_OS_MAC || ELPP_OS_FREEBSD || ELPP_OS_NETBSD || ELPP_OS_SOLARIS || ELPP_OS_AIX || \ + ELPP_OS_EMSCRIPTEN) && \ + (!ELPP_OS_WINDOWS)) +#define ELPP_OS_UNIX 1 #else -# define ELPP_OS_UNIX 0 +#define ELPP_OS_UNIX 0 #endif #if (defined(__ANDROID__)) -# define ELPP_OS_ANDROID 1 +#define ELPP_OS_ANDROID 1 #else -# define ELPP_OS_ANDROID 0 +#define ELPP_OS_ANDROID 0 #endif // Evaluating Cygwin as *nix OS #if !ELPP_OS_UNIX && !ELPP_OS_WINDOWS && ELPP_CYGWIN -# undef ELPP_OS_UNIX -# undef ELPP_OS_LINUX -# define ELPP_OS_UNIX 1 -# define ELPP_OS_LINUX 1 -#endif // !ELPP_OS_UNIX && !ELPP_OS_WINDOWS && ELPP_CYGWIN +#undef ELPP_OS_UNIX +#undef ELPP_OS_LINUX +#define ELPP_OS_UNIX 1 +#define ELPP_OS_LINUX 1 +#endif // !ELPP_OS_UNIX && !ELPP_OS_WINDOWS && ELPP_CYGWIN #if !defined(ELPP_INTERNAL_DEBUGGING_OUT_INFO) -# define ELPP_INTERNAL_DEBUGGING_OUT_INFO std::cout -#endif // !defined(ELPP_INTERNAL_DEBUGGING_OUT) +#define ELPP_INTERNAL_DEBUGGING_OUT_INFO std::cout +#endif // !defined(ELPP_INTERNAL_DEBUGGING_OUT) #if !defined(ELPP_INTERNAL_DEBUGGING_OUT_ERROR) -# define ELPP_INTERNAL_DEBUGGING_OUT_ERROR std::cerr -#endif // !defined(ELPP_INTERNAL_DEBUGGING_OUT) +#define ELPP_INTERNAL_DEBUGGING_OUT_ERROR std::cerr +#endif // !defined(ELPP_INTERNAL_DEBUGGING_OUT) #if !defined(ELPP_INTERNAL_DEBUGGING_ENDL) -# define ELPP_INTERNAL_DEBUGGING_ENDL std::endl -#endif // !defined(ELPP_INTERNAL_DEBUGGING_OUT) +#define ELPP_INTERNAL_DEBUGGING_ENDL std::endl +#endif // !defined(ELPP_INTERNAL_DEBUGGING_OUT) #if !defined(ELPP_INTERNAL_DEBUGGING_MSG) -# define ELPP_INTERNAL_DEBUGGING_MSG(msg) msg -#endif // !defined(ELPP_INTERNAL_DEBUGGING_OUT) +#define ELPP_INTERNAL_DEBUGGING_MSG(msg) msg +#endif // !defined(ELPP_INTERNAL_DEBUGGING_OUT) // Internal Assertions and errors #if !defined(ELPP_DISABLE_ASSERT) -# if (defined(ELPP_DEBUG_ASSERT_FAILURE)) -# define ELPP_ASSERT(expr, msg) if (!(expr)) { \ -std::stringstream internalInfoStream; internalInfoStream << msg; \ -ELPP_INTERNAL_DEBUGGING_OUT_ERROR \ -<< "EASYLOGGING++ ASSERTION FAILED (LINE: " << __LINE__ << ") [" #expr << "] WITH MESSAGE \"" \ -<< ELPP_INTERNAL_DEBUGGING_MSG(internalInfoStream.str()) << "\"" << ELPP_INTERNAL_DEBUGGING_ENDL; base::utils::abort(1, \ -"ELPP Assertion failure, please define ELPP_DEBUG_ASSERT_FAILURE"); } -# else -# define ELPP_ASSERT(expr, msg) if (!(expr)) { \ -std::stringstream internalInfoStream; internalInfoStream << msg; \ -ELPP_INTERNAL_DEBUGGING_OUT_ERROR\ -<< "ASSERTION FAILURE FROM EASYLOGGING++ (LINE: " \ -<< __LINE__ << ") [" #expr << "] WITH MESSAGE \"" << ELPP_INTERNAL_DEBUGGING_MSG(internalInfoStream.str()) << "\"" \ -<< ELPP_INTERNAL_DEBUGGING_ENDL; } -# endif // (defined(ELPP_DEBUG_ASSERT_FAILURE)) +#if (defined(ELPP_DEBUG_ASSERT_FAILURE)) +#define ELPP_ASSERT(expr, msg) \ + if (!(expr)) { \ + std::stringstream internalInfoStream; \ + internalInfoStream << msg; \ + ELPP_INTERNAL_DEBUGGING_OUT_ERROR \ + << "EASYLOGGING++ ASSERTION FAILED (LINE: " << __LINE__ << ") [" #expr << "] WITH MESSAGE \"" \ + << ELPP_INTERNAL_DEBUGGING_MSG(internalInfoStream.str()) << "\"" << ELPP_INTERNAL_DEBUGGING_ENDL; \ + base::utils::abort(1, "ELPP Assertion failure, please define ELPP_DEBUG_ASSERT_FAILURE"); \ + } #else -# define ELPP_ASSERT(x, y) +#define ELPP_ASSERT(expr, msg) \ + if (!(expr)) { \ + std::stringstream internalInfoStream; \ + internalInfoStream << msg; \ + ELPP_INTERNAL_DEBUGGING_OUT_ERROR \ + << "ASSERTION FAILURE FROM EASYLOGGING++ (LINE: " << __LINE__ << ") [" #expr << "] WITH MESSAGE \"" \ + << ELPP_INTERNAL_DEBUGGING_MSG(internalInfoStream.str()) << "\"" << ELPP_INTERNAL_DEBUGGING_ENDL; \ + } +#endif // (defined(ELPP_DEBUG_ASSERT_FAILURE)) +#else +#define ELPP_ASSERT(x, y) #endif //(!defined(ELPP_DISABLE_ASSERT) #if ELPP_COMPILER_MSVC -# define ELPP_INTERNAL_DEBUGGING_WRITE_PERROR \ -{ char buff[256]; strerror_s(buff, 256, errno); \ -ELPP_INTERNAL_DEBUGGING_OUT_ERROR << ": " << buff << " [" << errno << "]";} (void)0 +#define ELPP_INTERNAL_DEBUGGING_WRITE_PERROR \ + { \ + char buff[256]; \ + strerror_s(buff, 256, errno); \ + ELPP_INTERNAL_DEBUGGING_OUT_ERROR << ": " << buff << " [" << errno << "]"; \ + } \ + (void)0 #else -# define ELPP_INTERNAL_DEBUGGING_WRITE_PERROR \ -ELPP_INTERNAL_DEBUGGING_OUT_ERROR << ": " << strerror(errno) << " [" << errno << "]"; (void)0 +#define ELPP_INTERNAL_DEBUGGING_WRITE_PERROR \ + ELPP_INTERNAL_DEBUGGING_OUT_ERROR << ": " << strerror(errno) << " [" << errno << "]"; \ + (void)0 #endif // ELPP_COMPILER_MSVC #if defined(ELPP_DEBUG_ERRORS) -# if !defined(ELPP_INTERNAL_ERROR) -# define ELPP_INTERNAL_ERROR(msg, pe) { \ -std::stringstream internalInfoStream; internalInfoStream << " " << msg; \ -ELPP_INTERNAL_DEBUGGING_OUT_ERROR \ -<< "ERROR FROM EASYLOGGING++ (LINE: " << __LINE__ << ") " \ -<< ELPP_INTERNAL_DEBUGGING_MSG(internalInfoStream.str()) << ELPP_INTERNAL_DEBUGGING_ENDL; \ -if (pe) { ELPP_INTERNAL_DEBUGGING_OUT_ERROR << " "; ELPP_INTERNAL_DEBUGGING_WRITE_PERROR; }} (void)0 -# endif +#if !defined(ELPP_INTERNAL_ERROR) +#define ELPP_INTERNAL_ERROR(msg, pe) \ + { \ + std::stringstream internalInfoStream; \ + internalInfoStream << " " << msg; \ + ELPP_INTERNAL_DEBUGGING_OUT_ERROR << "ERROR FROM EASYLOGGING++ (LINE: " << __LINE__ << ") " \ + << ELPP_INTERNAL_DEBUGGING_MSG(internalInfoStream.str()) \ + << ELPP_INTERNAL_DEBUGGING_ENDL; \ + if (pe) { \ + ELPP_INTERNAL_DEBUGGING_OUT_ERROR << " "; \ + ELPP_INTERNAL_DEBUGGING_WRITE_PERROR; \ + } \ + } \ + (void)0 +#endif #else -# undef ELPP_INTERNAL_INFO -# define ELPP_INTERNAL_ERROR(msg, pe) +#undef ELPP_INTERNAL_INFO +#define ELPP_INTERNAL_ERROR(msg, pe) #endif // defined(ELPP_DEBUG_ERRORS) #if (defined(ELPP_DEBUG_INFO)) -# if !(defined(ELPP_INTERNAL_INFO_LEVEL)) -# define ELPP_INTERNAL_INFO_LEVEL 9 -# endif // !(defined(ELPP_INTERNAL_INFO_LEVEL)) -# if !defined(ELPP_INTERNAL_INFO) -# define ELPP_INTERNAL_INFO(lvl, msg) { if (lvl <= ELPP_INTERNAL_INFO_LEVEL) { \ -std::stringstream internalInfoStream; internalInfoStream << " " << msg; \ -ELPP_INTERNAL_DEBUGGING_OUT_INFO << ELPP_INTERNAL_DEBUGGING_MSG(internalInfoStream.str()) \ -<< ELPP_INTERNAL_DEBUGGING_ENDL; }} -# endif +#if !(defined(ELPP_INTERNAL_INFO_LEVEL)) +#define ELPP_INTERNAL_INFO_LEVEL 9 +#endif // !(defined(ELPP_INTERNAL_INFO_LEVEL)) +#if !defined(ELPP_INTERNAL_INFO) +#define ELPP_INTERNAL_INFO(lvl, msg) \ + { \ + if (lvl <= ELPP_INTERNAL_INFO_LEVEL) { \ + std::stringstream internalInfoStream; \ + internalInfoStream << " " << msg; \ + ELPP_INTERNAL_DEBUGGING_OUT_INFO << ELPP_INTERNAL_DEBUGGING_MSG(internalInfoStream.str()) \ + << ELPP_INTERNAL_DEBUGGING_ENDL; \ + } \ + } +#endif #else -# undef ELPP_INTERNAL_INFO -# define ELPP_INTERNAL_INFO(lvl, msg) +#undef ELPP_INTERNAL_INFO +#define ELPP_INTERNAL_INFO(lvl, msg) #endif // (defined(ELPP_DEBUG_INFO)) #if (defined(ELPP_FEATURE_ALL)) || (defined(ELPP_FEATURE_CRASH_LOG)) -# if (ELPP_COMPILER_GCC && !ELPP_MINGW && !ELPP_OS_ANDROID && !ELPP_OS_EMSCRIPTEN) -# define ELPP_STACKTRACE 1 -# else -# if ELPP_COMPILER_MSVC -# pragma message("Stack trace not available for this compiler") -# else -# warning "Stack trace not available for this compiler"; -# endif // ELPP_COMPILER_MSVC -# define ELPP_STACKTRACE 0 -# endif // ELPP_COMPILER_GCC +#if (ELPP_COMPILER_GCC && !ELPP_MINGW && !ELPP_OS_ANDROID && !ELPP_OS_EMSCRIPTEN) +#define ELPP_STACKTRACE 1 #else -# define ELPP_STACKTRACE 0 +#if ELPP_COMPILER_MSVC +#pragma message("Stack trace not available for this compiler") +#else +#warning "Stack trace not available for this compiler"; +#endif // ELPP_COMPILER_MSVC +#define ELPP_STACKTRACE 0 +#endif // ELPP_COMPILER_GCC +#else +#define ELPP_STACKTRACE 0 #endif // (defined(ELPP_FEATURE_ALL)) || (defined(ELPP_FEATURE_CRASH_LOG)) // Miscellaneous macros #define ELPP_UNUSED(x) (void)x #if ELPP_OS_UNIX // Log file permissions for unix-based systems -# define ELPP_LOG_PERMS S_IRUSR | S_IWUSR | S_IXUSR | S_IWGRP | S_IRGRP | S_IXGRP | S_IWOTH | S_IXOTH +#define ELPP_LOG_PERMS S_IRUSR | S_IWUSR | S_IXUSR | S_IWGRP | S_IRGRP | S_IXGRP | S_IWOTH | S_IXOTH #endif // ELPP_OS_UNIX #if defined(ELPP_AS_DLL) && ELPP_COMPILER_MSVC -# if defined(ELPP_EXPORT_SYMBOLS) -# define ELPP_EXPORT __declspec(dllexport) -# else -# define ELPP_EXPORT __declspec(dllimport) -# endif // defined(ELPP_EXPORT_SYMBOLS) +#if defined(ELPP_EXPORT_SYMBOLS) +#define ELPP_EXPORT __declspec(dllexport) #else -# define ELPP_EXPORT +#define ELPP_EXPORT __declspec(dllimport) +#endif // defined(ELPP_EXPORT_SYMBOLS) +#else +#define ELPP_EXPORT #endif // defined(ELPP_AS_DLL) && ELPP_COMPILER_MSVC // Some special functions that are VC++ specific #undef STRTOK @@ -239,65 +261,64 @@ ELPP_INTERNAL_DEBUGGING_OUT_INFO << ELPP_INTERNAL_DEBUGGING_MSG(internalInfoStre #undef STRCAT #undef STRCPY #if ELPP_CRT_DBG_WARNINGS -# define STRTOK(a, b, c) strtok_s(a, b, c) -# define STRERROR(a, b, c) strerror_s(a, b, c) -# define STRCAT(a, b, len) strcat_s(a, len, b) -# define STRCPY(a, b, len) strcpy_s(a, len, b) +#define STRTOK(a, b, c) strtok_s(a, b, c) +#define STRERROR(a, b, c) strerror_s(a, b, c) +#define STRCAT(a, b, len) strcat_s(a, len, b) +#define STRCPY(a, b, len) strcpy_s(a, len, b) #else -# define STRTOK(a, b, c) strtok(a, b) -# define STRERROR(a, b, c) strerror(c) -# define STRCAT(a, b, len) strcat(a, b) -# define STRCPY(a, b, len) strcpy(a, b) +#define STRTOK(a, b, c) strtok(a, b) +#define STRERROR(a, b, c) strerror(c) +#define STRCAT(a, b, len) strcat(a, b) +#define STRCPY(a, b, len) strcpy(a, b) #endif // Compiler specific support evaluations #if (ELPP_MINGW && !defined(ELPP_FORCE_USE_STD_THREAD)) -# define ELPP_USE_STD_THREADING 0 +#define ELPP_USE_STD_THREADING 0 #else -# if ((ELPP_COMPILER_CLANG && defined(ELPP_CLANG_SUPPORTS_THREAD)) || \ - (!ELPP_COMPILER_CLANG && defined(ELPP_CXX11)) || \ - defined(ELPP_FORCE_USE_STD_THREAD)) -# define ELPP_USE_STD_THREADING 1 -# else -# define ELPP_USE_STD_THREADING 0 -# endif +#if ((ELPP_COMPILER_CLANG && defined(ELPP_CLANG_SUPPORTS_THREAD)) || (!ELPP_COMPILER_CLANG && defined(ELPP_CXX11)) || \ + defined(ELPP_FORCE_USE_STD_THREAD)) +#define ELPP_USE_STD_THREADING 1 +#else +#define ELPP_USE_STD_THREADING 0 +#endif #endif #undef ELPP_FINAL #if ELPP_COMPILER_INTEL || (ELPP_GCC_VERSION < 40702) -# define ELPP_FINAL +#define ELPP_FINAL #else -# define ELPP_FINAL final +#define ELPP_FINAL final #endif // ELPP_COMPILER_INTEL || (ELPP_GCC_VERSION < 40702) #if defined(ELPP_EXPERIMENTAL_ASYNC) -# define ELPP_ASYNC_LOGGING 1 +#define ELPP_ASYNC_LOGGING 1 #else -# define ELPP_ASYNC_LOGGING 0 -#endif // defined(ELPP_EXPERIMENTAL_ASYNC) +#define ELPP_ASYNC_LOGGING 0 +#endif // defined(ELPP_EXPERIMENTAL_ASYNC) #if defined(ELPP_THREAD_SAFE) || ELPP_ASYNC_LOGGING -# define ELPP_THREADING_ENABLED 1 +#define ELPP_THREADING_ENABLED 1 #else -# define ELPP_THREADING_ENABLED 0 +#define ELPP_THREADING_ENABLED 0 #endif // defined(ELPP_THREAD_SAFE) || ELPP_ASYNC_LOGGING // Function macro ELPP_FUNC #undef ELPP_FUNC #if ELPP_COMPILER_MSVC // Visual C++ -# define ELPP_FUNC __FUNCSIG__ +#define ELPP_FUNC __FUNCSIG__ #elif ELPP_COMPILER_GCC // GCC -# define ELPP_FUNC __PRETTY_FUNCTION__ +#define ELPP_FUNC __PRETTY_FUNCTION__ #elif ELPP_COMPILER_INTEL // Intel C++ -# define ELPP_FUNC __PRETTY_FUNCTION__ +#define ELPP_FUNC __PRETTY_FUNCTION__ #elif ELPP_COMPILER_CLANG // Clang++ -# define ELPP_FUNC __PRETTY_FUNCTION__ +#define ELPP_FUNC __PRETTY_FUNCTION__ #else -# if defined(__func__) -# define ELPP_FUNC __func__ -# else -# define ELPP_FUNC "" -# endif // defined(__func__) +#if defined(__func__) +#define ELPP_FUNC __func__ +#else +#define ELPP_FUNC "" +#endif // defined(__func__) #endif // defined(_MSC_VER) #undef ELPP_VARIADIC_TEMPLATES_SUPPORTED // Keep following line commented until features are fixed #define ELPP_VARIADIC_TEMPLATES_SUPPORTED \ -(ELPP_COMPILER_GCC || ELPP_COMPILER_CLANG || ELPP_COMPILER_INTEL || (ELPP_COMPILER_MSVC && _MSC_VER >= 1800)) + (ELPP_COMPILER_GCC || ELPP_COMPILER_CLANG || ELPP_COMPILER_INTEL || (ELPP_COMPILER_MSVC && _MSC_VER >= 1800)) // Logging Enable/Disable macros #if defined(ELPP_DISABLE_LOGS) #define ELPP_LOGGING_ENABLED 0 @@ -305,165 +326,165 @@ ELPP_INTERNAL_DEBUGGING_OUT_INFO << ELPP_INTERNAL_DEBUGGING_MSG(internalInfoStre #define ELPP_LOGGING_ENABLED 1 #endif #if (!defined(ELPP_DISABLE_DEBUG_LOGS) && (ELPP_LOGGING_ENABLED)) -# define ELPP_DEBUG_LOG 1 +#define ELPP_DEBUG_LOG 1 #else -# define ELPP_DEBUG_LOG 0 +#define ELPP_DEBUG_LOG 0 #endif // (!defined(ELPP_DISABLE_DEBUG_LOGS) && (ELPP_LOGGING_ENABLED)) #if (!defined(ELPP_DISABLE_INFO_LOGS) && (ELPP_LOGGING_ENABLED)) -# define ELPP_INFO_LOG 1 +#define ELPP_INFO_LOG 1 #else -# define ELPP_INFO_LOG 0 +#define ELPP_INFO_LOG 0 #endif // (!defined(ELPP_DISABLE_INFO_LOGS) && (ELPP_LOGGING_ENABLED)) #if (!defined(ELPP_DISABLE_WARNING_LOGS) && (ELPP_LOGGING_ENABLED)) -# define ELPP_WARNING_LOG 1 +#define ELPP_WARNING_LOG 1 #else -# define ELPP_WARNING_LOG 0 +#define ELPP_WARNING_LOG 0 #endif // (!defined(ELPP_DISABLE_WARNING_LOGS) && (ELPP_LOGGING_ENABLED)) #if (!defined(ELPP_DISABLE_ERROR_LOGS) && (ELPP_LOGGING_ENABLED)) -# define ELPP_ERROR_LOG 1 +#define ELPP_ERROR_LOG 1 #else -# define ELPP_ERROR_LOG 0 +#define ELPP_ERROR_LOG 0 #endif // (!defined(ELPP_DISABLE_ERROR_LOGS) && (ELPP_LOGGING_ENABLED)) #if (!defined(ELPP_DISABLE_FATAL_LOGS) && (ELPP_LOGGING_ENABLED)) -# define ELPP_FATAL_LOG 1 +#define ELPP_FATAL_LOG 1 #else -# define ELPP_FATAL_LOG 0 +#define ELPP_FATAL_LOG 0 #endif // (!defined(ELPP_DISABLE_FATAL_LOGS) && (ELPP_LOGGING_ENABLED)) #if (!defined(ELPP_DISABLE_TRACE_LOGS) && (ELPP_LOGGING_ENABLED)) -# define ELPP_TRACE_LOG 1 +#define ELPP_TRACE_LOG 1 #else -# define ELPP_TRACE_LOG 0 +#define ELPP_TRACE_LOG 0 #endif // (!defined(ELPP_DISABLE_TRACE_LOGS) && (ELPP_LOGGING_ENABLED)) #if (!defined(ELPP_DISABLE_VERBOSE_LOGS) && (ELPP_LOGGING_ENABLED)) -# define ELPP_VERBOSE_LOG 1 +#define ELPP_VERBOSE_LOG 1 #else -# define ELPP_VERBOSE_LOG 0 +#define ELPP_VERBOSE_LOG 0 #endif // (!defined(ELPP_DISABLE_VERBOSE_LOGS) && (ELPP_LOGGING_ENABLED)) #if (!(ELPP_CXX0X || ELPP_CXX11)) -# error "C++0x (or higher) support not detected! (Is `-std=c++11' missing?)" +#error "C++0x (or higher) support not detected! (Is `-std=c++11' missing?)" #endif // (!(ELPP_CXX0X || ELPP_CXX11)) // Headers #if defined(ELPP_SYSLOG) -# include +#include #endif // defined(ELPP_SYSLOG) -#include -#include -#include #include -#include -#include #include +#include #include +#include +#include +#include +#include #if defined(ELPP_UNICODE) -# include -# if ELPP_OS_WINDOWS -# include -# endif // ELPP_OS_WINDOWS +#include +#if ELPP_OS_WINDOWS +#include +#endif // ELPP_OS_WINDOWS #endif // defined(ELPP_UNICODE) #if ELPP_STACKTRACE -# include -# include +#include +#include #endif // ELPP_STACKTRACE #if ELPP_OS_ANDROID -# include +#include #endif // ELPP_OS_ANDROID #if ELPP_OS_UNIX -# include -# include +#include +#include #elif ELPP_OS_WINDOWS -# include -# include -# if defined(WIN32_LEAN_AND_MEAN) -# if defined(ELPP_WINSOCK2) -# include -# else -# include -# endif // defined(ELPP_WINSOCK2) -# endif // defined(WIN32_LEAN_AND_MEAN) +#include +#include +#if defined(WIN32_LEAN_AND_MEAN) +#if defined(ELPP_WINSOCK2) +#include +#else +#include +#endif // defined(ELPP_WINSOCK2) +#endif // defined(WIN32_LEAN_AND_MEAN) #endif // ELPP_OS_UNIX -#include -#include -#include -#include -#include -#include #include #include +#include #include -#include +#include #include +#include +#include #include +#include +#include +#include #if ELPP_THREADING_ENABLED -# if ELPP_USE_STD_THREADING -# include -# include -# else -# if ELPP_OS_UNIX -# include -# endif // ELPP_OS_UNIX -# endif // ELPP_USE_STD_THREADING +#if ELPP_USE_STD_THREADING +#include +#include +#else +#if ELPP_OS_UNIX +#include +#endif // ELPP_OS_UNIX +#endif // ELPP_USE_STD_THREADING #endif // ELPP_THREADING_ENABLED #if ELPP_ASYNC_LOGGING -# if defined(ELPP_NO_SLEEP_FOR) -# include -# endif // defined(ELPP_NO_SLEEP_FOR) -# include -# include -# include +#if defined(ELPP_NO_SLEEP_FOR) +#include +#endif // defined(ELPP_NO_SLEEP_FOR) +#include +#include +#include #endif // ELPP_ASYNC_LOGGING #if defined(ELPP_STL_LOGGING) // For logging STL based templates -# include -# include -# include -# include -# include -# include -# if defined(ELPP_LOG_STD_ARRAY) -# include -# endif // defined(ELPP_LOG_STD_ARRAY) -# if defined(ELPP_LOG_UNORDERED_SET) -# include -# endif // defined(ELPP_UNORDERED_SET) +#include +#include +#include +#include +#include +#include +#if defined(ELPP_LOG_STD_ARRAY) +#include +#endif // defined(ELPP_LOG_STD_ARRAY) +#if defined(ELPP_LOG_UNORDERED_SET) +#include +#endif // defined(ELPP_UNORDERED_SET) #endif // defined(ELPP_STL_LOGGING) #if defined(ELPP_QT_LOGGING) // For logging Qt based classes & templates -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #endif // defined(ELPP_QT_LOGGING) #if defined(ELPP_BOOST_LOGGING) // For logging boost based classes & templates -# include -# include -# include -# include -# include -# include -# include -# include +#include +#include +#include +#include +#include +#include +#include +#include #endif // defined(ELPP_BOOST_LOGGING) #if defined(ELPP_WXWIDGETS_LOGGING) // For logging wxWidgets based classes & templates -# include +#include #endif // defined(ELPP_WXWIDGETS_LOGGING) #if defined(ELPP_UTC_DATETIME) -# define elpptime_r gmtime_r -# define elpptime_s gmtime_s -# define elpptime gmtime +#define elpptime_r gmtime_r +#define elpptime_s gmtime_s +#define elpptime gmtime #else -# define elpptime_r localtime_r -# define elpptime_s localtime_s -# define elpptime localtime +#define elpptime_r localtime_r +#define elpptime_s localtime_s +#define elpptime localtime #endif // defined(ELPP_UTC_DATETIME) // Forward declarations namespace el { @@ -472,7 +493,8 @@ class LogMessage; class PerformanceTrackingData; class Loggers; class Helpers; -template class Callback; +template +class Callback; class LogDispatchCallback; class PerformanceTrackingCallback; class LoggerRegistrationCallback; @@ -490,7 +512,7 @@ class DefaultLogDispatchCallback; #if ELPP_ASYNC_LOGGING class AsyncLogDispatchCallback; class AsyncDispatchWorker; -#endif // ELPP_ASYNC_LOGGING +#endif // ELPP_ASYNC_LOGGING class DefaultPerformanceTrackingCallback; } // namespace base } // namespace el @@ -504,26 +526,26 @@ namespace type { #undef ELPP_STRLEN #undef ELPP_COUT #if defined(ELPP_UNICODE) -# define ELPP_LITERAL(txt) L##txt -# define ELPP_STRLEN wcslen -# if defined ELPP_CUSTOM_COUT -# define ELPP_COUT ELPP_CUSTOM_COUT -# else -# define ELPP_COUT std::wcout -# endif // defined ELPP_CUSTOM_COUT +#define ELPP_LITERAL(txt) L##txt +#define ELPP_STRLEN wcslen +#if defined ELPP_CUSTOM_COUT +#define ELPP_COUT ELPP_CUSTOM_COUT +#else +#define ELPP_COUT std::wcout +#endif // defined ELPP_CUSTOM_COUT typedef wchar_t char_t; typedef std::wstring string_t; typedef std::wstringstream stringstream_t; typedef std::wfstream fstream_t; typedef std::wostream ostream_t; #else -# define ELPP_LITERAL(txt) txt -# define ELPP_STRLEN strlen -# if defined ELPP_CUSTOM_COUT -# define ELPP_COUT ELPP_CUSTOM_COUT -# else -# define ELPP_COUT std::cout -# endif // defined ELPP_CUSTOM_COUT +#define ELPP_LITERAL(txt) txt +#define ELPP_STRLEN strlen +#if defined ELPP_CUSTOM_COUT +#define ELPP_COUT ELPP_CUSTOM_COUT +#else +#define ELPP_COUT std::cout +#endif // defined ELPP_CUSTOM_COUT typedef char char_t; typedef std::string string_t; typedef std::stringstream stringstream_t; @@ -531,10 +553,10 @@ typedef std::fstream fstream_t; typedef std::ostream ostream_t; #endif // defined(ELPP_UNICODE) #if defined(ELPP_CUSTOM_COUT_LINE) -# define ELPP_COUT_LINE(logLine) ELPP_CUSTOM_COUT_LINE(logLine) +#define ELPP_COUT_LINE(logLine) ELPP_CUSTOM_COUT_LINE(logLine) #else -# define ELPP_COUT_LINE(logLine) logLine << std::flush -#endif // defined(ELPP_CUSTOM_COUT_LINE) +#define ELPP_COUT_LINE(logLine) logLine << std::flush +#endif // defined(ELPP_CUSTOM_COUT_LINE) typedef unsigned int EnumType; typedef unsigned short VerboseLevel; typedef unsigned long int LineNumber; @@ -549,10 +571,13 @@ typedef std::unique_ptr PerformanceTrackerPtr; /// @detail When using this class simply inherit it privately class NoCopy { protected: - NoCopy(void) {} + NoCopy(void) { + } + private: - NoCopy(const NoCopy&); - NoCopy& operator=(const NoCopy&); + NoCopy(const NoCopy&); + NoCopy& + operator=(const NoCopy&); }; /// @brief Internal helper class that makes all default constructors private. /// @@ -560,9 +585,10 @@ class NoCopy { /// When using this class simply inherit it privately class StaticClass { private: - StaticClass(void); - StaticClass(const StaticClass&); - StaticClass& operator=(const StaticClass&); + StaticClass(void); + StaticClass(const StaticClass&); + StaticClass& + operator=(const StaticClass&); }; } // namespace base /// @brief Represents enumeration for severity level used to determine level of logging @@ -570,281 +596,287 @@ class StaticClass { /// @detail With Easylogging++, developers may disable or enable any level regardless of /// what the severity is. Or they can choose to log using hierarchical logging flag enum class Level : base::type::EnumType { - /// @brief Generic level that represents all the levels. Useful when setting global configuration for all levels - Global = 1, - /// @brief Information that can be useful to back-trace certain events - mostly useful than debug logs. - Trace = 2, - /// @brief Informational events most useful for developers to debug application - Debug = 4, - /// @brief Severe error information that will presumably abort application - Fatal = 8, - /// @brief Information representing errors in application but application will keep running - Error = 16, - /// @brief Useful when application has potentially harmful situtaions - Warning = 32, - /// @brief Information that can be highly useful and vary with verbose logging level. - Verbose = 64, - /// @brief Mainly useful to represent current progress of application - Info = 128, - /// @brief Represents unknown level - Unknown = 1010 + /// @brief Generic level that represents all the levels. Useful when setting global configuration for all levels + Global = 1, + /// @brief Information that can be useful to back-trace certain events - mostly useful than debug logs. + Trace = 2, + /// @brief Informational events most useful for developers to debug application + Debug = 4, + /// @brief Severe error information that will presumably abort application + Fatal = 8, + /// @brief Information representing errors in application but application will keep running + Error = 16, + /// @brief Useful when application has potentially harmful situtaions + Warning = 32, + /// @brief Information that can be highly useful and vary with verbose logging level. + Verbose = 64, + /// @brief Mainly useful to represent current progress of application + Info = 128, + /// @brief Represents unknown level + Unknown = 1010 }; -} // namespace el +} // namespace el namespace std { -template<> struct hash { +template <> +struct hash { public: - std::size_t operator()(const el::Level& l) const { - return hash {}(static_cast(l)); - } + std::size_t + operator()(const el::Level& l) const { + return hash{}(static_cast(l)); + } }; -} +} // namespace std namespace el { /// @brief Static class that contains helper functions for el::Level class LevelHelper : base::StaticClass { public: - /// @brief Represents minimum valid level. Useful when iterating through enum. - static const base::type::EnumType kMinValid = static_cast(Level::Trace); - /// @brief Represents maximum valid level. This is used internally and you should not need it. - static const base::type::EnumType kMaxValid = static_cast(Level::Info); - /// @brief Casts level to int, useful for iterating through enum. - static base::type::EnumType castToInt(Level level) { - return static_cast(level); - } - /// @brief Casts int(ushort) to level, useful for iterating through enum. - static Level castFromInt(base::type::EnumType l) { - return static_cast(l); - } - /// @brief Converts level to associated const char* - /// @return Upper case string based level. - static const char* convertToString(Level level); - /// @brief Converts from levelStr to Level - /// @param levelStr Upper case string based level. - /// Lower case is also valid but providing upper case is recommended. - static Level convertFromString(const char* levelStr); - /// @brief Applies specified function to each level starting from startIndex - /// @param startIndex initial value to start the iteration from. This is passed as pointer and - /// is left-shifted so this can be used inside function (fn) to represent current level. - /// @param fn function to apply with each level. This bool represent whether or not to stop iterating through levels. - static void forEachLevel(base::type::EnumType* startIndex, const std::function& fn); + /// @brief Represents minimum valid level. Useful when iterating through enum. + static const base::type::EnumType kMinValid = static_cast(Level::Trace); + /// @brief Represents maximum valid level. This is used internally and you should not need it. + static const base::type::EnumType kMaxValid = static_cast(Level::Info); + /// @brief Casts level to int, useful for iterating through enum. + static base::type::EnumType + castToInt(Level level) { + return static_cast(level); + } + /// @brief Casts int(ushort) to level, useful for iterating through enum. + static Level + castFromInt(base::type::EnumType l) { + return static_cast(l); + } + /// @brief Converts level to associated const char* + /// @return Upper case string based level. + static const char* + convertToString(Level level); + /// @brief Converts from levelStr to Level + /// @param levelStr Upper case string based level. + /// Lower case is also valid but providing upper case is recommended. + static Level + convertFromString(const char* levelStr); + /// @brief Applies specified function to each level starting from startIndex + /// @param startIndex initial value to start the iteration from. This is passed as pointer and + /// is left-shifted so this can be used inside function (fn) to represent current level. + /// @param fn function to apply with each level. This bool represent whether or not to stop iterating through + /// levels. + static void + forEachLevel(base::type::EnumType* startIndex, const std::function& fn); }; /// @brief Represents enumeration of ConfigurationType used to configure or access certain aspect /// of logging enum class ConfigurationType : base::type::EnumType { - /// @brief Determines whether or not corresponding level and logger of logging is enabled - /// You may disable all logs by using el::Level::Global - Enabled = 1, - /// @brief Whether or not to write corresponding log to log file - ToFile = 2, - /// @brief Whether or not to write corresponding level and logger log to standard output. - /// By standard output meaning termnal, command prompt etc - ToStandardOutput = 4, - /// @brief Determines format of logging corresponding level and logger. - Format = 8, - /// @brief Determines log file (full path) to write logs to for correponding level and logger - Filename = 16, - /// @brief Specifies precision of the subsecond part. It should be within range (1-6). - SubsecondPrecision = 32, - /// @brief Alias of SubsecondPrecision (for backward compatibility) - MillisecondsWidth = SubsecondPrecision, - /// @brief Determines whether or not performance tracking is enabled. - /// - /// @detail This does not depend on logger or level. Performance tracking always uses 'performance' logger - PerformanceTracking = 64, - /// @brief Specifies log file max size. - /// - /// @detail If file size of corresponding log file (for corresponding level) is >= specified size, log file will - /// be truncated and re-initiated. - MaxLogFileSize = 128, - /// @brief Specifies number of log entries to hold until we flush pending log data - LogFlushThreshold = 256, - /// @brief Represents unknown configuration - Unknown = 1010 + /// @brief Determines whether or not corresponding level and logger of logging is enabled + /// You may disable all logs by using el::Level::Global + Enabled = 1, + /// @brief Whether or not to write corresponding log to log file + ToFile = 2, + /// @brief Whether or not to write corresponding level and logger log to standard output. + /// By standard output meaning termnal, command prompt etc + ToStandardOutput = 4, + /// @brief Determines format of logging corresponding level and logger. + Format = 8, + /// @brief Determines log file (full path) to write logs to for correponding level and logger + Filename = 16, + /// @brief Specifies precision of the subsecond part. It should be within range (1-6). + SubsecondPrecision = 32, + /// @brief Alias of SubsecondPrecision (for backward compatibility) + MillisecondsWidth = SubsecondPrecision, + /// @brief Determines whether or not performance tracking is enabled. + /// + /// @detail This does not depend on logger or level. Performance tracking always uses 'performance' logger + PerformanceTracking = 64, + /// @brief Specifies log file max size. + /// + /// @detail If file size of corresponding log file (for corresponding level) is >= specified size, log file will + /// be truncated and re-initiated. + MaxLogFileSize = 128, + /// @brief Specifies number of log entries to hold until we flush pending log data + LogFlushThreshold = 256, + /// @brief Represents unknown configuration + Unknown = 1010 }; /// @brief Static class that contains helper functions for el::ConfigurationType class ConfigurationTypeHelper : base::StaticClass { public: - /// @brief Represents minimum valid configuration type. Useful when iterating through enum. - static const base::type::EnumType kMinValid = static_cast(ConfigurationType::Enabled); - /// @brief Represents maximum valid configuration type. This is used internally and you should not need it. - static const base::type::EnumType kMaxValid = static_cast(ConfigurationType::MaxLogFileSize); - /// @brief Casts configuration type to int, useful for iterating through enum. - static base::type::EnumType castToInt(ConfigurationType configurationType) { - return static_cast(configurationType); - } - /// @brief Casts int(ushort) to configurationt type, useful for iterating through enum. - static ConfigurationType castFromInt(base::type::EnumType c) { - return static_cast(c); - } - /// @brief Converts configuration type to associated const char* - /// @returns Upper case string based configuration type. - static const char* convertToString(ConfigurationType configurationType); - /// @brief Converts from configStr to ConfigurationType - /// @param configStr Upper case string based configuration type. - /// Lower case is also valid but providing upper case is recommended. - static ConfigurationType convertFromString(const char* configStr); - /// @brief Applies specified function to each configuration type starting from startIndex - /// @param startIndex initial value to start the iteration from. This is passed by pointer and is left-shifted - /// so this can be used inside function (fn) to represent current configuration type. - /// @param fn function to apply with each configuration type. - /// This bool represent whether or not to stop iterating through configurations. - static inline void forEachConfigType(base::type::EnumType* startIndex, const std::function& fn); + /// @brief Represents minimum valid configuration type. Useful when iterating through enum. + static const base::type::EnumType kMinValid = static_cast(ConfigurationType::Enabled); + /// @brief Represents maximum valid configuration type. This is used internally and you should not need it. + static const base::type::EnumType kMaxValid = static_cast(ConfigurationType::MaxLogFileSize); + /// @brief Casts configuration type to int, useful for iterating through enum. + static base::type::EnumType + castToInt(ConfigurationType configurationType) { + return static_cast(configurationType); + } + /// @brief Casts int(ushort) to configurationt type, useful for iterating through enum. + static ConfigurationType + castFromInt(base::type::EnumType c) { + return static_cast(c); + } + /// @brief Converts configuration type to associated const char* + /// @returns Upper case string based configuration type. + static const char* + convertToString(ConfigurationType configurationType); + /// @brief Converts from configStr to ConfigurationType + /// @param configStr Upper case string based configuration type. + /// Lower case is also valid but providing upper case is recommended. + static ConfigurationType + convertFromString(const char* configStr); + /// @brief Applies specified function to each configuration type starting from startIndex + /// @param startIndex initial value to start the iteration from. This is passed by pointer and is left-shifted + /// so this can be used inside function (fn) to represent current configuration type. + /// @param fn function to apply with each configuration type. + /// This bool represent whether or not to stop iterating through configurations. + static inline void + forEachConfigType(base::type::EnumType* startIndex, const std::function& fn); }; /// @brief Flags used while writing logs. This flags are set by user enum class LoggingFlag : base::type::EnumType { - /// @brief Makes sure we have new line for each container log entry - NewLineForContainer = 1, - /// @brief Makes sure if -vmodule is used and does not specifies a module, then verbose - /// logging is allowed via that module. - AllowVerboseIfModuleNotSpecified = 2, - /// @brief When handling crashes by default, detailed crash reason will be logged as well - LogDetailedCrashReason = 4, - /// @brief Allows to disable application abortion when logged using FATAL level - DisableApplicationAbortOnFatalLog = 8, - /// @brief Flushes log with every log-entry (performance sensative) - Disabled by default - ImmediateFlush = 16, - /// @brief Enables strict file rolling - StrictLogFileSizeCheck = 32, - /// @brief Make terminal output colorful for supported terminals - ColoredTerminalOutput = 64, - /// @brief Supports use of multiple logging in same macro, e.g, CLOG(INFO, "default", "network") - MultiLoggerSupport = 128, - /// @brief Disables comparing performance tracker's checkpoints - DisablePerformanceTrackingCheckpointComparison = 256, - /// @brief Disable VModules - DisableVModules = 512, - /// @brief Disable VModules extensions - DisableVModulesExtensions = 1024, - /// @brief Enables hierarchical logging - HierarchicalLogging = 2048, - /// @brief Creates logger automatically when not available - CreateLoggerAutomatically = 4096, - /// @brief Adds spaces b/w logs that separated by left-shift operator - AutoSpacing = 8192, - /// @brief Preserves time format and does not convert it to sec, hour etc (performance tracking only) - FixedTimeFormat = 16384, - // @brief Ignore SIGINT or crash - IgnoreSigInt = 32768, + /// @brief Makes sure we have new line for each container log entry + NewLineForContainer = 1, + /// @brief Makes sure if -vmodule is used and does not specifies a module, then verbose + /// logging is allowed via that module. + AllowVerboseIfModuleNotSpecified = 2, + /// @brief When handling crashes by default, detailed crash reason will be logged as well + LogDetailedCrashReason = 4, + /// @brief Allows to disable application abortion when logged using FATAL level + DisableApplicationAbortOnFatalLog = 8, + /// @brief Flushes log with every log-entry (performance sensative) - Disabled by default + ImmediateFlush = 16, + /// @brief Enables strict file rolling + StrictLogFileSizeCheck = 32, + /// @brief Make terminal output colorful for supported terminals + ColoredTerminalOutput = 64, + /// @brief Supports use of multiple logging in same macro, e.g, CLOG(INFO, "default", "network") + MultiLoggerSupport = 128, + /// @brief Disables comparing performance tracker's checkpoints + DisablePerformanceTrackingCheckpointComparison = 256, + /// @brief Disable VModules + DisableVModules = 512, + /// @brief Disable VModules extensions + DisableVModulesExtensions = 1024, + /// @brief Enables hierarchical logging + HierarchicalLogging = 2048, + /// @brief Creates logger automatically when not available + CreateLoggerAutomatically = 4096, + /// @brief Adds spaces b/w logs that separated by left-shift operator + AutoSpacing = 8192, + /// @brief Preserves time format and does not convert it to sec, hour etc (performance tracking only) + FixedTimeFormat = 16384, + // @brief Ignore SIGINT or crash + IgnoreSigInt = 32768, }; namespace base { /// @brief Namespace containing constants used internally. namespace consts { -static const char kFormatSpecifierCharValue = 'v'; -static const char kFormatSpecifierChar = '%'; -static const unsigned int kMaxLogPerCounter = 100000; -static const unsigned int kMaxLogPerContainer = 100; -static const unsigned int kDefaultSubsecondPrecision = 3; +static const char kFormatSpecifierCharValue = 'v'; +static const char kFormatSpecifierChar = '%'; +static const unsigned int kMaxLogPerCounter = 100000; +static const unsigned int kMaxLogPerContainer = 100; +static const unsigned int kDefaultSubsecondPrecision = 3; #ifdef ELPP_DEFAULT_LOGGER -static const char* kDefaultLoggerId = ELPP_DEFAULT_LOGGER; +static const char* kDefaultLoggerId = ELPP_DEFAULT_LOGGER; #else -static const char* kDefaultLoggerId = "default"; +static const char* kDefaultLoggerId = "default"; #endif #if defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_PERFORMANCE_TRACKING) #ifdef ELPP_DEFAULT_PERFORMANCE_LOGGER -static const char* kPerformanceLoggerId = ELPP_DEFAULT_PERFORMANCE_LOGGER; +static const char* kPerformanceLoggerId = ELPP_DEFAULT_PERFORMANCE_LOGGER; #else -static const char* kPerformanceLoggerId = "performance"; -#endif // ELPP_DEFAULT_PERFORMANCE_LOGGER +static const char* kPerformanceLoggerId = "performance"; +#endif // ELPP_DEFAULT_PERFORMANCE_LOGGER #endif #if defined(ELPP_SYSLOG) -static const char* kSysLogLoggerId = "syslog"; +static const char* kSysLogLoggerId = "syslog"; #endif // defined(ELPP_SYSLOG) #if ELPP_OS_WINDOWS -static const char* kFilePathSeperator = "\\"; +static const char* kFilePathSeperator = "\\"; #else -static const char* kFilePathSeperator = "/"; +static const char* kFilePathSeperator = "/"; #endif // ELPP_OS_WINDOWS -static const std::size_t kSourceFilenameMaxLength = 100; -static const std::size_t kSourceLineMaxLength = 10; -static const Level kPerformanceTrackerDefaultLevel = Level::Info; +static const std::size_t kSourceFilenameMaxLength = 100; +static const std::size_t kSourceLineMaxLength = 10; +static const Level kPerformanceTrackerDefaultLevel = Level::Info; const struct { - double value; - const base::type::char_t* unit; -} kTimeFormats[] = { - { 1000.0f, ELPP_LITERAL("us") }, - { 1000.0f, ELPP_LITERAL("ms") }, - { 60.0f, ELPP_LITERAL("seconds") }, - { 60.0f, ELPP_LITERAL("minutes") }, - { 24.0f, ELPP_LITERAL("hours") }, - { 7.0f, ELPP_LITERAL("days") } -}; -static const int kTimeFormatsCount = sizeof(kTimeFormats) / sizeof(kTimeFormats[0]); + double value; + const base::type::char_t* unit; +} kTimeFormats[] = {{1000.0f, ELPP_LITERAL("us")}, {1000.0f, ELPP_LITERAL("ms")}, {60.0f, ELPP_LITERAL("seconds")}, + {60.0f, ELPP_LITERAL("minutes")}, {24.0f, ELPP_LITERAL("hours")}, {7.0f, ELPP_LITERAL("days")}}; +static const int kTimeFormatsCount = sizeof(kTimeFormats) / sizeof(kTimeFormats[0]); const struct { - int numb; - const char* name; - const char* brief; - const char* detail; + int numb; + const char* name; + const char* brief; + const char* detail; } kCrashSignals[] = { - // NOTE: Do not re-order, if you do please check CrashHandler(bool) constructor and CrashHandler::setHandler(..) - { - SIGABRT, "SIGABRT", "Abnormal termination", - "Program was abnormally terminated." - }, - { - SIGFPE, "SIGFPE", "Erroneous arithmetic operation", - "Arithemetic operation issue such as division by zero or operation resulting in overflow." - }, - { - SIGILL, "SIGILL", "Illegal instruction", - "Generally due to a corruption in the code or to an attempt to execute data." - }, - { - SIGSEGV, "SIGSEGV", "Invalid access to memory", - "Program is trying to read an invalid (unallocated, deleted or corrupted) or inaccessible memory." - }, - { - SIGINT, "SIGINT", "Interactive attention signal", - "Interruption generated (generally) by user or operating system." - }, + // NOTE: Do not re-order, if you do please check CrashHandler(bool) constructor and CrashHandler::setHandler(..) + {SIGABRT, "SIGABRT", "Abnormal termination", "Program was abnormally terminated."}, + {SIGFPE, "SIGFPE", "Erroneous arithmetic operation", + "Arithemetic operation issue such as division by zero or operation resulting in overflow."}, + {SIGILL, "SIGILL", "Illegal instruction", + "Generally due to a corruption in the code or to an attempt to execute data."}, + {SIGSEGV, "SIGSEGV", "Invalid access to memory", + "Program is trying to read an invalid (unallocated, deleted or corrupted) or inaccessible memory."}, + {SIGINT, "SIGINT", "Interactive attention signal", + "Interruption generated (generally) by user or operating system."}, }; -static const int kCrashSignalsCount = sizeof(kCrashSignals) / sizeof(kCrashSignals[0]); +static const int kCrashSignalsCount = sizeof(kCrashSignals) / sizeof(kCrashSignals[0]); } // namespace consts } // namespace base typedef std::function PreRollOutCallback; namespace base { -static inline void defaultPreRollOutCallback(const char*, std::size_t, Level level) {} +static inline void +defaultPreRollOutCallback(const char*, std::size_t, Level level) { +} /// @brief Enum to represent timestamp unit enum class TimestampUnit : base::type::EnumType { - Microsecond = 0, Millisecond = 1, Second = 2, Minute = 3, Hour = 4, Day = 5 + Microsecond = 0, + Millisecond = 1, + Second = 2, + Minute = 3, + Hour = 4, + Day = 5 }; /// @brief Format flags used to determine specifiers that are active for performance improvements. enum class FormatFlags : base::type::EnumType { - DateTime = 1 << 1, - LoggerId = 1 << 2, - File = 1 << 3, - Line = 1 << 4, - Location = 1 << 5, - Function = 1 << 6, - User = 1 << 7, - Host = 1 << 8, - LogMessage = 1 << 9, - VerboseLevel = 1 << 10, - AppName = 1 << 11, - ThreadId = 1 << 12, - Level = 1 << 13, - FileBase = 1 << 14, - LevelShort = 1 << 15 + DateTime = 1 << 1, + LoggerId = 1 << 2, + File = 1 << 3, + Line = 1 << 4, + Location = 1 << 5, + Function = 1 << 6, + User = 1 << 7, + Host = 1 << 8, + LogMessage = 1 << 9, + VerboseLevel = 1 << 10, + AppName = 1 << 11, + ThreadId = 1 << 12, + Level = 1 << 13, + FileBase = 1 << 14, + LevelShort = 1 << 15 }; /// @brief A subsecond precision class containing actual width and offset of the subsecond part class SubsecondPrecision { public: - SubsecondPrecision(void) { - init(base::consts::kDefaultSubsecondPrecision); - } - explicit SubsecondPrecision(int width) { - init(width); - } - bool operator==(const SubsecondPrecision& ssPrec) { - return m_width == ssPrec.m_width && m_offset == ssPrec.m_offset; - } - int m_width; - unsigned int m_offset; + SubsecondPrecision(void) { + init(base::consts::kDefaultSubsecondPrecision); + } + explicit SubsecondPrecision(int width) { + init(width); + } + bool + operator==(const SubsecondPrecision& ssPrec) { + return m_width == ssPrec.m_width && m_offset == ssPrec.m_offset; + } + int m_width; + unsigned int m_offset; + private: - void init(int width); + void + init(int width); }; /// @brief Type alias of SubsecondPrecision typedef SubsecondPrecision MillisecondsWidth; @@ -852,146 +884,162 @@ typedef SubsecondPrecision MillisecondsWidth; namespace utils { /// @brief Deletes memory safely and points to null template -static -typename std::enable_if::value, void>::type +static typename std::enable_if::value, void>::type safeDelete(T*& pointer) { - if (pointer == nullptr) - return; - delete pointer; - pointer = nullptr; + if (pointer == nullptr) + return; + delete pointer; + pointer = nullptr; } -/// @brief Bitwise operations for C++11 strong enum class. This casts e into Flag_T and returns value after bitwise operation -/// Use these function as
flag = bitwise::Or(MyEnum::val1, flag);
+/// @brief Bitwise operations for C++11 strong enum class. This casts e into Flag_T and returns value after bitwise +/// operation Use these function as
flag = bitwise::Or(MyEnum::val1, flag);
namespace bitwise { template -static inline base::type::EnumType And(Enum e, base::type::EnumType flag) { - return static_cast(flag) & static_cast(e); +static inline base::type::EnumType +And(Enum e, base::type::EnumType flag) { + return static_cast(flag) & static_cast(e); } template -static inline base::type::EnumType Not(Enum e, base::type::EnumType flag) { - return static_cast(flag) & ~(static_cast(e)); +static inline base::type::EnumType +Not(Enum e, base::type::EnumType flag) { + return static_cast(flag) & ~(static_cast(e)); } template -static inline base::type::EnumType Or(Enum e, base::type::EnumType flag) { - return static_cast(flag) | static_cast(e); +static inline base::type::EnumType +Or(Enum e, base::type::EnumType flag) { + return static_cast(flag) | static_cast(e); } } // namespace bitwise template -static inline void addFlag(Enum e, base::type::EnumType* flag) { - *flag = base::utils::bitwise::Or(e, *flag); +static inline void +addFlag(Enum e, base::type::EnumType* flag) { + *flag = base::utils::bitwise::Or(e, *flag); } template -static inline void removeFlag(Enum e, base::type::EnumType* flag) { - *flag = base::utils::bitwise::Not(e, *flag); +static inline void +removeFlag(Enum e, base::type::EnumType* flag) { + *flag = base::utils::bitwise::Not(e, *flag); } template -static inline bool hasFlag(Enum e, base::type::EnumType flag) { - return base::utils::bitwise::And(e, flag) > 0x0; +static inline bool +hasFlag(Enum e, base::type::EnumType flag) { + return base::utils::bitwise::And(e, flag) > 0x0; } } // namespace utils namespace threading { #if ELPP_THREADING_ENABLED -# if !ELPP_USE_STD_THREADING +#if !ELPP_USE_STD_THREADING namespace internal { /// @brief A mutex wrapper for compiler that dont yet support std::recursive_mutex class Mutex : base::NoCopy { public: - Mutex(void) { -# if ELPP_OS_UNIX - pthread_mutexattr_t attr; - pthread_mutexattr_init(&attr); - pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE); - pthread_mutex_init(&m_underlyingMutex, &attr); - pthread_mutexattr_destroy(&attr); -# elif ELPP_OS_WINDOWS - InitializeCriticalSection(&m_underlyingMutex); -# endif // ELPP_OS_UNIX - } + Mutex(void) { +#if ELPP_OS_UNIX + pthread_mutexattr_t attr; + pthread_mutexattr_init(&attr); + pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE); + pthread_mutex_init(&m_underlyingMutex, &attr); + pthread_mutexattr_destroy(&attr); +#elif ELPP_OS_WINDOWS + InitializeCriticalSection(&m_underlyingMutex); +#endif // ELPP_OS_UNIX + } - virtual ~Mutex(void) { -# if ELPP_OS_UNIX - pthread_mutex_destroy(&m_underlyingMutex); -# elif ELPP_OS_WINDOWS - DeleteCriticalSection(&m_underlyingMutex); -# endif // ELPP_OS_UNIX - } + virtual ~Mutex(void) { +#if ELPP_OS_UNIX + pthread_mutex_destroy(&m_underlyingMutex); +#elif ELPP_OS_WINDOWS + DeleteCriticalSection(&m_underlyingMutex); +#endif // ELPP_OS_UNIX + } - inline void lock(void) { -# if ELPP_OS_UNIX - pthread_mutex_lock(&m_underlyingMutex); -# elif ELPP_OS_WINDOWS - EnterCriticalSection(&m_underlyingMutex); -# endif // ELPP_OS_UNIX - } + inline void + lock(void) { +#if ELPP_OS_UNIX + pthread_mutex_lock(&m_underlyingMutex); +#elif ELPP_OS_WINDOWS + EnterCriticalSection(&m_underlyingMutex); +#endif // ELPP_OS_UNIX + } - inline bool try_lock(void) { -# if ELPP_OS_UNIX - return (pthread_mutex_trylock(&m_underlyingMutex) == 0); -# elif ELPP_OS_WINDOWS - return TryEnterCriticalSection(&m_underlyingMutex); -# endif // ELPP_OS_UNIX - } + inline bool + try_lock(void) { +#if ELPP_OS_UNIX + return (pthread_mutex_trylock(&m_underlyingMutex) == 0); +#elif ELPP_OS_WINDOWS + return TryEnterCriticalSection(&m_underlyingMutex); +#endif // ELPP_OS_UNIX + } - inline void unlock(void) { -# if ELPP_OS_UNIX - pthread_mutex_unlock(&m_underlyingMutex); -# elif ELPP_OS_WINDOWS - LeaveCriticalSection(&m_underlyingMutex); -# endif // ELPP_OS_UNIX - } + inline void + unlock(void) { +#if ELPP_OS_UNIX + pthread_mutex_unlock(&m_underlyingMutex); +#elif ELPP_OS_WINDOWS + LeaveCriticalSection(&m_underlyingMutex); +#endif // ELPP_OS_UNIX + } private: -# if ELPP_OS_UNIX - pthread_mutex_t m_underlyingMutex; -# elif ELPP_OS_WINDOWS - CRITICAL_SECTION m_underlyingMutex; -# endif // ELPP_OS_UNIX +#if ELPP_OS_UNIX + pthread_mutex_t m_underlyingMutex; +#elif ELPP_OS_WINDOWS + CRITICAL_SECTION m_underlyingMutex; +#endif // ELPP_OS_UNIX }; /// @brief Scoped lock for compiler that dont yet support std::lock_guard template class ScopedLock : base::NoCopy { public: - explicit ScopedLock(M& mutex) { - m_mutex = &mutex; - m_mutex->lock(); - } + explicit ScopedLock(M& mutex) { + m_mutex = &mutex; + m_mutex->lock(); + } + + virtual ~ScopedLock(void) { + m_mutex->unlock(); + } - virtual ~ScopedLock(void) { - m_mutex->unlock(); - } private: - M* m_mutex; - ScopedLock(void); + M* m_mutex; + ScopedLock(void); }; -} // namespace internal +} // namespace internal typedef base::threading::internal::Mutex Mutex; typedef base::threading::internal::ScopedLock ScopedLock; -# else +#else typedef std::recursive_mutex Mutex; typedef std::lock_guard ScopedLock; -# endif // !ELPP_USE_STD_THREADING +#endif // !ELPP_USE_STD_THREADING #else namespace internal { /// @brief Mutex wrapper used when multi-threading is disabled. class NoMutex : base::NoCopy { public: - NoMutex(void) {} - inline void lock(void) {} - inline bool try_lock(void) { - return true; - } - inline void unlock(void) {} + NoMutex(void) { + } + inline void + lock(void) { + } + inline bool + try_lock(void) { + return true; + } + inline void + unlock(void) { + } }; /// @brief Lock guard wrapper used when multi-threading is disabled. template class NoScopedLock : base::NoCopy { public: - explicit NoScopedLock(Mutex&) { - } - virtual ~NoScopedLock(void) { - } + explicit NoScopedLock(Mutex&) { + } + virtual ~NoScopedLock(void) { + } + private: - NoScopedLock(void); + NoScopedLock(void); }; } // namespace internal typedef base::threading::internal::NoMutex Mutex; @@ -1000,641 +1048,756 @@ typedef base::threading::internal::NoScopedLock ScopedLo /// @brief Base of thread safe class, this class is inheritable-only class ThreadSafe { public: - virtual inline void acquireLock(void) ELPP_FINAL { m_mutex.lock(); } - virtual inline void releaseLock(void) ELPP_FINAL { m_mutex.unlock(); } - virtual inline base::threading::Mutex& lock(void) ELPP_FINAL { return m_mutex; } + virtual inline void + acquireLock(void) ELPP_FINAL { + m_mutex.lock(); + } + virtual inline void + releaseLock(void) ELPP_FINAL { + m_mutex.unlock(); + } + virtual inline base::threading::Mutex& + lock(void) ELPP_FINAL { + return m_mutex; + } + protected: - ThreadSafe(void) {} - virtual ~ThreadSafe(void) {} + ThreadSafe(void) { + } + virtual ~ThreadSafe(void) { + } + private: - base::threading::Mutex m_mutex; + base::threading::Mutex m_mutex; }; #if ELPP_THREADING_ENABLED -# if !ELPP_USE_STD_THREADING +#if !ELPP_USE_STD_THREADING /// @brief Gets ID of currently running threading in windows systems. On unix, nothing is returned. -static std::string getCurrentThreadId(void) { - std::stringstream ss; -# if (ELPP_OS_WINDOWS) - ss << GetCurrentThreadId(); -# endif // (ELPP_OS_WINDOWS) - return ss.str(); +static std::string +getCurrentThreadId(void) { + std::stringstream ss; +#if (ELPP_OS_WINDOWS) + ss << GetCurrentThreadId(); +#endif // (ELPP_OS_WINDOWS) + return ss.str(); } -# else -/// @brief Gets ID of currently running threading using std::this_thread::get_id() -static std::string getCurrentThreadId(void) { - std::stringstream ss; - ss << std::this_thread::get_id(); - return ss.str(); -} -# endif // !ELPP_USE_STD_THREADING #else -static inline std::string getCurrentThreadId(void) { - return std::string(); +/// @brief Gets ID of currently running threading using std::this_thread::get_id() +static std::string +getCurrentThreadId(void) { + std::stringstream ss; + ss << std::this_thread::get_id(); + return ss.str(); +} +#endif // !ELPP_USE_STD_THREADING +#else +static inline std::string +getCurrentThreadId(void) { + return std::string(); } #endif // ELPP_THREADING_ENABLED } // namespace threading namespace utils { class File : base::StaticClass { public: - /// @brief Creates new out file stream for specified filename. - /// @return Pointer to newly created fstream or nullptr - static base::type::fstream_t* newFileStream(const std::string& filename); + /// @brief Creates new out file stream for specified filename. + /// @return Pointer to newly created fstream or nullptr + static base::type::fstream_t* + newFileStream(const std::string& filename); - /// @brief Gets size of file provided in stream - static std::size_t getSizeOfFile(base::type::fstream_t* fs); + /// @brief Gets size of file provided in stream + static std::size_t + getSizeOfFile(base::type::fstream_t* fs); - /// @brief Determines whether or not provided path exist in current file system - static bool pathExists(const char* path, bool considerFile = false); + /// @brief Determines whether or not provided path exist in current file system + static bool + pathExists(const char* path, bool considerFile = false); - /// @brief Creates specified path on file system - /// @param path Path to create. - static bool createPath(const std::string& path); - /// @brief Extracts path of filename with leading slash - static std::string extractPathFromFilename(const std::string& fullPath, - const char* seperator = base::consts::kFilePathSeperator); - /// @brief builds stripped filename and puts it in buff - static void buildStrippedFilename(const char* filename, char buff[], - std::size_t limit = base::consts::kSourceFilenameMaxLength); - /// @brief builds base filename and puts it in buff - static void buildBaseFilename(const std::string& fullPath, char buff[], - std::size_t limit = base::consts::kSourceFilenameMaxLength, - const char* seperator = base::consts::kFilePathSeperator); + /// @brief Creates specified path on file system + /// @param path Path to create. + static bool + createPath(const std::string& path); + /// @brief Extracts path of filename with leading slash + static std::string + extractPathFromFilename(const std::string& fullPath, const char* seperator = base::consts::kFilePathSeperator); + /// @brief builds stripped filename and puts it in buff + static void + buildStrippedFilename(const char* filename, char buff[], + std::size_t limit = base::consts::kSourceFilenameMaxLength); + /// @brief builds base filename and puts it in buff + static void + buildBaseFilename(const std::string& fullPath, char buff[], + std::size_t limit = base::consts::kSourceFilenameMaxLength, + const char* seperator = base::consts::kFilePathSeperator); }; /// @brief String utilities helper class used internally. You should not use it. class Str : base::StaticClass { public: - /// @brief Checks if character is digit. Dont use libc implementation of it to prevent locale issues. - static inline bool isDigit(char c) { - return c >= '0' && c <= '9'; - } + /// @brief Checks if character is digit. Dont use libc implementation of it to prevent locale issues. + static inline bool + isDigit(char c) { + return c >= '0' && c <= '9'; + } - /// @brief Matches wildcards, '*' and '?' only supported. - static bool wildCardMatch(const char* str, const char* pattern); + /// @brief Matches wildcards, '*' and '?' only supported. + static bool + wildCardMatch(const char* str, const char* pattern); - static std::string& ltrim(std::string& str); - static std::string& rtrim(std::string& str); - static std::string& trim(std::string& str); + static std::string& + ltrim(std::string& str); + static std::string& + rtrim(std::string& str); + static std::string& + trim(std::string& str); - /// @brief Determines whether or not str starts with specified string - /// @param str String to check - /// @param start String to check against - /// @return Returns true if starts with specified string, false otherwise - static bool startsWith(const std::string& str, const std::string& start); + /// @brief Determines whether or not str starts with specified string + /// @param str String to check + /// @param start String to check against + /// @return Returns true if starts with specified string, false otherwise + static bool + startsWith(const std::string& str, const std::string& start); - /// @brief Determines whether or not str ends with specified string - /// @param str String to check - /// @param end String to check against - /// @return Returns true if ends with specified string, false otherwise - static bool endsWith(const std::string& str, const std::string& end); + /// @brief Determines whether or not str ends with specified string + /// @param str String to check + /// @param end String to check against + /// @return Returns true if ends with specified string, false otherwise + static bool + endsWith(const std::string& str, const std::string& end); - /// @brief Replaces all instances of replaceWhat with 'replaceWith'. Original variable is changed for performance. - /// @param [in,out] str String to replace from - /// @param replaceWhat Character to replace - /// @param replaceWith Character to replace with - /// @return Modified version of str - static std::string& replaceAll(std::string& str, char replaceWhat, char replaceWith); + /// @brief Replaces all instances of replaceWhat with 'replaceWith'. Original variable is changed for performance. + /// @param [in,out] str String to replace from + /// @param replaceWhat Character to replace + /// @param replaceWith Character to replace with + /// @return Modified version of str + static std::string& + replaceAll(std::string& str, char replaceWhat, char replaceWith); - /// @brief Replaces all instances of 'replaceWhat' with 'replaceWith'. (String version) Replaces in place - /// @param str String to replace from - /// @param replaceWhat Character to replace - /// @param replaceWith Character to replace with - /// @return Modified (original) str - static std::string& replaceAll(std::string& str, const std::string& replaceWhat, - const std::string& replaceWith); + /// @brief Replaces all instances of 'replaceWhat' with 'replaceWith'. (String version) Replaces in place + /// @param str String to replace from + /// @param replaceWhat Character to replace + /// @param replaceWith Character to replace with + /// @return Modified (original) str + static std::string& + replaceAll(std::string& str, const std::string& replaceWhat, const std::string& replaceWith); - static void replaceFirstWithEscape(base::type::string_t& str, const base::type::string_t& replaceWhat, - const base::type::string_t& replaceWith); + static void + replaceFirstWithEscape(base::type::string_t& str, const base::type::string_t& replaceWhat, + const base::type::string_t& replaceWith); #if defined(ELPP_UNICODE) - static void replaceFirstWithEscape(base::type::string_t& str, const base::type::string_t& replaceWhat, - const std::string& replaceWith); + static void + replaceFirstWithEscape(base::type::string_t& str, const base::type::string_t& replaceWhat, + const std::string& replaceWith); #endif // defined(ELPP_UNICODE) - /// @brief Converts string to uppercase - /// @param str String to convert - /// @return Uppercase string - static std::string& toUpper(std::string& str); + /// @brief Converts string to uppercase + /// @param str String to convert + /// @return Uppercase string + static std::string& + toUpper(std::string& str); - /// @brief Compares cstring equality - uses strcmp - static bool cStringEq(const char* s1, const char* s2); + /// @brief Compares cstring equality - uses strcmp + static bool + cStringEq(const char* s1, const char* s2); - /// @brief Compares cstring equality (case-insensitive) - uses toupper(char) - /// Dont use strcasecmp because of CRT (VC++) - static bool cStringCaseEq(const char* s1, const char* s2); + /// @brief Compares cstring equality (case-insensitive) - uses toupper(char) + /// Dont use strcasecmp because of CRT (VC++) + static bool + cStringCaseEq(const char* s1, const char* s2); - /// @brief Returns true if c exist in str - static bool contains(const char* str, char c); + /// @brief Returns true if c exist in str + static bool + contains(const char* str, char c); - static char* convertAndAddToBuff(std::size_t n, int len, char* buf, const char* bufLim, bool zeroPadded = true); - static char* addToBuff(const char* str, char* buf, const char* bufLim); - static char* clearBuff(char buff[], std::size_t lim); + static char* + convertAndAddToBuff(std::size_t n, int len, char* buf, const char* bufLim, bool zeroPadded = true); + static char* + addToBuff(const char* str, char* buf, const char* bufLim); + static char* + clearBuff(char buff[], std::size_t lim); - /// @brief Converst wchar* to char* - /// NOTE: Need to free return value after use! - static char* wcharPtrToCharPtr(const wchar_t* line); + /// @brief Converst wchar* to char* + /// NOTE: Need to free return value after use! + static char* + wcharPtrToCharPtr(const wchar_t* line); }; /// @brief Operating System helper static class used internally. You should not use it. class OS : base::StaticClass { public: #if ELPP_OS_WINDOWS - /// @brief Gets environment variables for Windows based OS. - /// We are not using getenv(const char*) because of CRT deprecation - /// @param varname Variable name to get environment variable value for - /// @return If variable exist the value of it otherwise nullptr - static const char* getWindowsEnvironmentVariable(const char* varname); + /// @brief Gets environment variables for Windows based OS. + /// We are not using getenv(const char*) because of CRT deprecation + /// @param varname Variable name to get environment variable value for + /// @return If variable exist the value of it otherwise nullptr + static const char* + getWindowsEnvironmentVariable(const char* varname); #endif // ELPP_OS_WINDOWS #if ELPP_OS_ANDROID - /// @brief Reads android property value - static std::string getProperty(const char* prop); + /// @brief Reads android property value + static std::string + getProperty(const char* prop); - /// @brief Reads android device name - static std::string getDeviceName(void); + /// @brief Reads android device name + static std::string + getDeviceName(void); #endif // ELPP_OS_ANDROID - /// @brief Runs command on terminal and returns the output. - /// - /// @detail This is applicable only on unix based systems, for all other OS, an empty string is returned. - /// @param command Bash command - /// @return Result of bash output or empty string if no result found. - static const std::string getBashOutput(const char* command); + /// @brief Runs command on terminal and returns the output. + /// + /// @detail This is applicable only on unix based systems, for all other OS, an empty string is returned. + /// @param command Bash command + /// @return Result of bash output or empty string if no result found. + static const std::string + getBashOutput(const char* command); - /// @brief Gets environment variable. This is cross-platform and CRT safe (for VC++) - /// @param variableName Environment variable name - /// @param defaultVal If no environment variable or value found the value to return by default - /// @param alternativeBashCommand If environment variable not found what would be alternative bash command - /// in order to look for value user is looking for. E.g, for 'user' alternative command will 'whoami' - static std::string getEnvironmentVariable(const char* variableName, const char* defaultVal, - const char* alternativeBashCommand = nullptr); - /// @brief Gets current username. - static std::string currentUser(void); + /// @brief Gets environment variable. This is cross-platform and CRT safe (for VC++) + /// @param variableName Environment variable name + /// @param defaultVal If no environment variable or value found the value to return by default + /// @param alternativeBashCommand If environment variable not found what would be alternative bash command + /// in order to look for value user is looking for. E.g, for 'user' alternative command will 'whoami' + static std::string + getEnvironmentVariable(const char* variableName, const char* defaultVal, + const char* alternativeBashCommand = nullptr); + /// @brief Gets current username. + static std::string + currentUser(void); - /// @brief Gets current host name or computer name. - /// - /// @detail For android systems this is device name with its manufacturer and model seperated by hyphen - static std::string currentHost(void); - /// @brief Whether or not terminal supports colors - static bool termSupportsColor(void); + /// @brief Gets current host name or computer name. + /// + /// @detail For android systems this is device name with its manufacturer and model seperated by hyphen + static std::string + currentHost(void); + /// @brief Whether or not terminal supports colors + static bool + termSupportsColor(void); }; /// @brief Contains utilities for cross-platform date/time. This class make use of el::base::utils::Str class DateTime : base::StaticClass { public: - /// @brief Cross platform gettimeofday for Windows and unix platform. This can be used to determine current microsecond. - /// - /// @detail For unix system it uses gettimeofday(timeval*, timezone*) and for Windows, a seperate implementation is provided - /// @param [in,out] tv Pointer that gets updated - static void gettimeofday(struct timeval* tv); + /// @brief Cross platform gettimeofday for Windows and unix platform. This can be used to determine current + /// microsecond. + /// + /// @detail For unix system it uses gettimeofday(timeval*, timezone*) and for Windows, a seperate implementation is + /// provided + /// @param [in,out] tv Pointer that gets updated + static void + gettimeofday(struct timeval* tv); - /// @brief Gets current date and time with a subsecond part. - /// @param format User provided date/time format - /// @param ssPrec A pointer to base::SubsecondPrecision from configuration (non-null) - /// @returns string based date time in specified format. - static std::string getDateTime(const char* format, const base::SubsecondPrecision* ssPrec); + /// @brief Gets current date and time with a subsecond part. + /// @param format User provided date/time format + /// @param ssPrec A pointer to base::SubsecondPrecision from configuration (non-null) + /// @returns string based date time in specified format. + static std::string + getDateTime(const char* format, const base::SubsecondPrecision* ssPrec); - /// @brief Converts timeval (struct from ctime) to string using specified format and subsecond precision - static std::string timevalToString(struct timeval tval, const char* format, - const el::base::SubsecondPrecision* ssPrec); + /// @brief Converts timeval (struct from ctime) to string using specified format and subsecond precision + static std::string + timevalToString(struct timeval tval, const char* format, const el::base::SubsecondPrecision* ssPrec); - /// @brief Formats time to get unit accordingly, units like second if > 1000 or minutes if > 60000 etc - static base::type::string_t formatTime(unsigned long long time, base::TimestampUnit timestampUnit); + /// @brief Formats time to get unit accordingly, units like second if > 1000 or minutes if > 60000 etc + static base::type::string_t + formatTime(unsigned long long time, base::TimestampUnit timestampUnit); - /// @brief Gets time difference in milli/micro second depending on timestampUnit - static unsigned long long getTimeDifference(const struct timeval& endTime, const struct timeval& startTime, - base::TimestampUnit timestampUnit); + /// @brief Gets time difference in milli/micro second depending on timestampUnit + static unsigned long long + getTimeDifference(const struct timeval& endTime, const struct timeval& startTime, + base::TimestampUnit timestampUnit); + static struct ::tm* + buildTimeInfo(struct timeval* currTime, struct ::tm* timeInfo); - static struct ::tm* buildTimeInfo(struct timeval* currTime, struct ::tm* timeInfo); private: - static char* parseFormat(char* buf, std::size_t bufSz, const char* format, const struct tm* tInfo, - std::size_t msec, const base::SubsecondPrecision* ssPrec); + static char* + parseFormat(char* buf, std::size_t bufSz, const char* format, const struct tm* tInfo, std::size_t msec, + const base::SubsecondPrecision* ssPrec); }; /// @brief Command line arguments for application if specified using el::Helpers::setArgs(..) or START_EASYLOGGINGPP(..) class CommandLineArgs { public: - CommandLineArgs(void) { - setArgs(0, static_cast(nullptr)); - } - CommandLineArgs(int argc, const char** argv) { - setArgs(argc, argv); - } - CommandLineArgs(int argc, char** argv) { - setArgs(argc, argv); - } - virtual ~CommandLineArgs(void) {} - /// @brief Sets arguments and parses them - inline void setArgs(int argc, const char** argv) { - setArgs(argc, const_cast(argv)); - } - /// @brief Sets arguments and parses them - void setArgs(int argc, char** argv); - /// @brief Returns true if arguments contain paramKey with a value (seperated by '=') - bool hasParamWithValue(const char* paramKey) const; - /// @brief Returns value of arguments - /// @see hasParamWithValue(const char*) - const char* getParamValue(const char* paramKey) const; - /// @brief Return true if arguments has a param (not having a value) i,e without '=' - bool hasParam(const char* paramKey) const; - /// @brief Returns true if no params available. This exclude argv[0] - bool empty(void) const; - /// @brief Returns total number of arguments. This exclude argv[0] - std::size_t size(void) const; - friend base::type::ostream_t& operator<<(base::type::ostream_t& os, const CommandLineArgs& c); + CommandLineArgs(void) { + setArgs(0, static_cast(nullptr)); + } + CommandLineArgs(int argc, const char** argv) { + setArgs(argc, argv); + } + CommandLineArgs(int argc, char** argv) { + setArgs(argc, argv); + } + virtual ~CommandLineArgs(void) { + } + /// @brief Sets arguments and parses them + inline void + setArgs(int argc, const char** argv) { + setArgs(argc, const_cast(argv)); + } + /// @brief Sets arguments and parses them + void + setArgs(int argc, char** argv); + /// @brief Returns true if arguments contain paramKey with a value (seperated by '=') + bool + hasParamWithValue(const char* paramKey) const; + /// @brief Returns value of arguments + /// @see hasParamWithValue(const char*) + const char* + getParamValue(const char* paramKey) const; + /// @brief Return true if arguments has a param (not having a value) i,e without '=' + bool + hasParam(const char* paramKey) const; + /// @brief Returns true if no params available. This exclude argv[0] + bool + empty(void) const; + /// @brief Returns total number of arguments. This exclude argv[0] + std::size_t + size(void) const; + friend base::type::ostream_t& + operator<<(base::type::ostream_t& os, const CommandLineArgs& c); private: - int m_argc; - char** m_argv; - std::unordered_map m_paramsWithValue; - std::vector m_params; + int m_argc; + char** m_argv; + std::unordered_map m_paramsWithValue; + std::vector m_params; }; -/// @brief Abstract registry (aka repository) that provides basic interface for pointer repository specified by T_Ptr type. +/// @brief Abstract registry (aka repository) that provides basic interface for pointer repository specified by T_Ptr +/// type. /// -/// @detail Most of the functions are virtual final methods but anything implementing this abstract class should implement -/// unregisterAll() and deepCopy(const AbstractRegistry&) and write registerNew() method according to container -/// and few more methods; get() to find element, unregister() to unregister single entry. -/// Please note that this is thread-unsafe and should also implement thread-safety mechanisms in implementation. +/// @detail Most of the functions are virtual final methods but anything implementing this abstract class should +/// implement unregisterAll() and deepCopy(const AbstractRegistry&) and write registerNew() method +/// according to container and few more methods; get() to find element, unregister() to unregister single entry. Please +/// note that this is thread-unsafe and should also implement thread-safety mechanisms in implementation. template class AbstractRegistry : public base::threading::ThreadSafe { public: - typedef typename Container::iterator iterator; - typedef typename Container::const_iterator const_iterator; + typedef typename Container::iterator iterator; + typedef typename Container::const_iterator const_iterator; - /// @brief Default constructor - AbstractRegistry(void) {} + /// @brief Default constructor + AbstractRegistry(void) { + } - /// @brief Move constructor that is useful for base classes - AbstractRegistry(AbstractRegistry&& sr) { - if (this == &sr) { - return; + /// @brief Move constructor that is useful for base classes + AbstractRegistry(AbstractRegistry&& sr) { + if (this == &sr) { + return; + } + unregisterAll(); + m_list = std::move(sr.m_list); } - unregisterAll(); - m_list = std::move(sr.m_list); - } - bool operator==(const AbstractRegistry& other) { - if (size() != other.size()) { - return false; - } - for (std::size_t i = 0; i < m_list.size(); ++i) { - if (m_list.at(i) != other.m_list.at(i)) { - return false; - } - } - return true; - } - - bool operator!=(const AbstractRegistry& other) { - if (size() != other.size()) { - return true; - } - for (std::size_t i = 0; i < m_list.size(); ++i) { - if (m_list.at(i) != other.m_list.at(i)) { + bool + operator==(const AbstractRegistry& other) { + if (size() != other.size()) { + return false; + } + for (std::size_t i = 0; i < m_list.size(); ++i) { + if (m_list.at(i) != other.m_list.at(i)) { + return false; + } + } return true; - } } - return false; - } - /// @brief Assignment move operator - AbstractRegistry& operator=(AbstractRegistry&& sr) { - if (this == &sr) { - return *this; + bool + operator!=(const AbstractRegistry& other) { + if (size() != other.size()) { + return true; + } + for (std::size_t i = 0; i < m_list.size(); ++i) { + if (m_list.at(i) != other.m_list.at(i)) { + return true; + } + } + return false; } - unregisterAll(); - m_list = std::move(sr.m_list); - return *this; - } - virtual ~AbstractRegistry(void) { - } + /// @brief Assignment move operator + AbstractRegistry& + operator=(AbstractRegistry&& sr) { + if (this == &sr) { + return *this; + } + unregisterAll(); + m_list = std::move(sr.m_list); + return *this; + } - /// @return Iterator pointer from start of repository - virtual inline iterator begin(void) ELPP_FINAL { - return m_list.begin(); - } + virtual ~AbstractRegistry(void) { + } - /// @return Iterator pointer from end of repository - virtual inline iterator end(void) ELPP_FINAL { - return m_list.end(); - } + /// @return Iterator pointer from start of repository + virtual inline iterator + begin(void) ELPP_FINAL { + return m_list.begin(); + } + /// @return Iterator pointer from end of repository + virtual inline iterator + end(void) ELPP_FINAL { + return m_list.end(); + } - /// @return Constant iterator pointer from start of repository - virtual inline const_iterator cbegin(void) const ELPP_FINAL { - return m_list.cbegin(); - } + /// @return Constant iterator pointer from start of repository + virtual inline const_iterator + cbegin(void) const ELPP_FINAL { + return m_list.cbegin(); + } - /// @return End of repository - virtual inline const_iterator cend(void) const ELPP_FINAL { - return m_list.cend(); - } + /// @return End of repository + virtual inline const_iterator + cend(void) const ELPP_FINAL { + return m_list.cend(); + } - /// @return Whether or not repository is empty - virtual inline bool empty(void) const ELPP_FINAL { - return m_list.empty(); - } + /// @return Whether or not repository is empty + virtual inline bool + empty(void) const ELPP_FINAL { + return m_list.empty(); + } - /// @return Size of repository - virtual inline std::size_t size(void) const ELPP_FINAL { - return m_list.size(); - } + /// @return Size of repository + virtual inline std::size_t + size(void) const ELPP_FINAL { + return m_list.size(); + } - /// @brief Returns underlying container by reference - virtual inline Container& list(void) ELPP_FINAL { - return m_list; - } + /// @brief Returns underlying container by reference + virtual inline Container& + list(void) ELPP_FINAL { + return m_list; + } - /// @brief Returns underlying container by constant reference. - virtual inline const Container& list(void) const ELPP_FINAL { - return m_list; - } + /// @brief Returns underlying container by constant reference. + virtual inline const Container& + list(void) const ELPP_FINAL { + return m_list; + } - /// @brief Unregisters all the pointers from current repository. - virtual void unregisterAll(void) = 0; + /// @brief Unregisters all the pointers from current repository. + virtual void + unregisterAll(void) = 0; protected: - virtual void deepCopy(const AbstractRegistry&) = 0; - void reinitDeepCopy(const AbstractRegistry& sr) { - unregisterAll(); - deepCopy(sr); - } + virtual void + deepCopy(const AbstractRegistry&) = 0; + void + reinitDeepCopy(const AbstractRegistry& sr) { + unregisterAll(); + deepCopy(sr); + } private: - Container m_list; + Container m_list; }; /// @brief A pointer registry mechanism to manage memory and provide search functionalities. (non-predicate version) /// -/// @detail NOTE: This is thread-unsafe implementation (although it contains lock function, it does not use these functions) +/// @detail NOTE: This is thread-unsafe implementation (although it contains lock function, it does not use these +/// functions) /// of AbstractRegistry. Any implementation of this class should be /// explicitly (by using lock functions) template class Registry : public AbstractRegistry> { public: - typedef typename Registry::iterator iterator; - typedef typename Registry::const_iterator const_iterator; + typedef typename Registry::iterator iterator; + typedef typename Registry::const_iterator const_iterator; - Registry(void) {} - - /// @brief Copy constructor that is useful for base classes. Try to avoid this constructor, use move constructor. - Registry(const Registry& sr) : AbstractRegistry>() { - if (this == &sr) { - return; + Registry(void) { } - this->reinitDeepCopy(sr); - } - /// @brief Assignment operator that unregisters all the existing registeries and deeply copies each of repo element - /// @see unregisterAll() - /// @see deepCopy(const AbstractRegistry&) - Registry& operator=(const Registry& sr) { - if (this == &sr) { - return *this; + /// @brief Copy constructor that is useful for base classes. Try to avoid this constructor, use move constructor. + Registry(const Registry& sr) : AbstractRegistry>() { + if (this == &sr) { + return; + } + this->reinitDeepCopy(sr); } - this->reinitDeepCopy(sr); - return *this; - } - virtual ~Registry(void) { - unregisterAll(); - } + /// @brief Assignment operator that unregisters all the existing registeries and deeply copies each of repo element + /// @see unregisterAll() + /// @see deepCopy(const AbstractRegistry&) + Registry& + operator=(const Registry& sr) { + if (this == &sr) { + return *this; + } + this->reinitDeepCopy(sr); + return *this; + } + + virtual ~Registry(void) { + unregisterAll(); + } protected: - virtual void unregisterAll(void) ELPP_FINAL { - if (!this->empty()) { - for (auto&& curr : this->list()) { - base::utils::safeDelete(curr.second); - } - this->list().clear(); + virtual void + unregisterAll(void) ELPP_FINAL { + if (!this->empty()) { + for (auto&& curr : this->list()) { + base::utils::safeDelete(curr.second); + } + this->list().clear(); + } } - } -/// @brief Registers new registry to repository. - virtual void registerNew(const T_Key& uniqKey, T_Ptr* ptr) ELPP_FINAL { - unregister(uniqKey); - this->list().insert(std::make_pair(uniqKey, ptr)); - } - -/// @brief Unregisters single entry mapped to specified unique key - void unregister(const T_Key& uniqKey) { - T_Ptr* existing = get(uniqKey); - if (existing != nullptr) { - this->list().erase(uniqKey); - base::utils::safeDelete(existing); + /// @brief Registers new registry to repository. + virtual void + registerNew(const T_Key& uniqKey, T_Ptr* ptr) ELPP_FINAL { + unregister(uniqKey); + this->list().insert(std::make_pair(uniqKey, ptr)); } - } -/// @brief Gets pointer from repository. If none found, nullptr is returned. - T_Ptr* get(const T_Key& uniqKey) { - iterator it = this->list().find(uniqKey); - return it == this->list().end() - ? nullptr - : it->second; - } + /// @brief Unregisters single entry mapped to specified unique key + void + unregister(const T_Key& uniqKey) { + T_Ptr* existing = get(uniqKey); + if (existing != nullptr) { + this->list().erase(uniqKey); + base::utils::safeDelete(existing); + } + } + + /// @brief Gets pointer from repository. If none found, nullptr is returned. + T_Ptr* + get(const T_Key& uniqKey) { + iterator it = this->list().find(uniqKey); + return it == this->list().end() ? nullptr : it->second; + } private: - virtual void deepCopy(const AbstractRegistry>& sr) ELPP_FINAL { - for (const_iterator it = sr.cbegin(); it != sr.cend(); ++it) { - registerNew(it->first, new T_Ptr(*it->second)); + virtual void + deepCopy(const AbstractRegistry>& sr) ELPP_FINAL { + for (const_iterator it = sr.cbegin(); it != sr.cend(); ++it) { + registerNew(it->first, new T_Ptr(*it->second)); + } } - } }; /// @brief A pointer registry mechanism to manage memory and provide search functionalities. (predicate version) /// -/// @detail NOTE: This is thread-unsafe implementation of AbstractRegistry. Any implementation of this class -/// should be made thread-safe explicitly +/// @detail NOTE: This is thread-unsafe implementation of AbstractRegistry. Any implementation of this +/// class should be made thread-safe explicitly template class RegistryWithPred : public AbstractRegistry> { public: - typedef typename RegistryWithPred::iterator iterator; - typedef typename RegistryWithPred::const_iterator const_iterator; + typedef typename RegistryWithPred::iterator iterator; + typedef typename RegistryWithPred::const_iterator const_iterator; - RegistryWithPred(void) { - } - - virtual ~RegistryWithPred(void) { - unregisterAll(); - } - - /// @brief Copy constructor that is useful for base classes. Try to avoid this constructor, use move constructor. - RegistryWithPred(const RegistryWithPred& sr) : AbstractRegistry>() { - if (this == &sr) { - return; + RegistryWithPred(void) { } - this->reinitDeepCopy(sr); - } - /// @brief Assignment operator that unregisters all the existing registeries and deeply copies each of repo element - /// @see unregisterAll() - /// @see deepCopy(const AbstractRegistry&) - RegistryWithPred& operator=(const RegistryWithPred& sr) { - if (this == &sr) { - return *this; + virtual ~RegistryWithPred(void) { + unregisterAll(); } - this->reinitDeepCopy(sr); - return *this; - } - friend base::type::ostream_t& operator<<(base::type::ostream_t& os, const RegistryWithPred& sr) { - for (const_iterator it = sr.list().begin(); it != sr.list().end(); ++it) { - os << ELPP_LITERAL(" ") << **it << ELPP_LITERAL("\n"); + /// @brief Copy constructor that is useful for base classes. Try to avoid this constructor, use move constructor. + RegistryWithPred(const RegistryWithPred& sr) : AbstractRegistry>() { + if (this == &sr) { + return; + } + this->reinitDeepCopy(sr); + } + + /// @brief Assignment operator that unregisters all the existing registeries and deeply copies each of repo element + /// @see unregisterAll() + /// @see deepCopy(const AbstractRegistry&) + RegistryWithPred& + operator=(const RegistryWithPred& sr) { + if (this == &sr) { + return *this; + } + this->reinitDeepCopy(sr); + return *this; + } + + friend base::type::ostream_t& + operator<<(base::type::ostream_t& os, const RegistryWithPred& sr) { + for (const_iterator it = sr.list().begin(); it != sr.list().end(); ++it) { + os << ELPP_LITERAL(" ") << **it << ELPP_LITERAL("\n"); + } + return os; } - return os; - } protected: - virtual void unregisterAll(void) ELPP_FINAL { - if (!this->empty()) { - for (auto&& curr : this->list()) { - base::utils::safeDelete(curr); - } - this->list().clear(); - } - } - - virtual void unregister(T_Ptr*& ptr) ELPP_FINAL { - if (ptr) { - iterator iter = this->begin(); - for (; iter != this->end(); ++iter) { - if (ptr == *iter) { - break; + virtual void + unregisterAll(void) ELPP_FINAL { + if (!this->empty()) { + for (auto&& curr : this->list()) { + base::utils::safeDelete(curr); + } + this->list().clear(); } - } - if (iter != this->end() && *iter != nullptr) { - this->list().erase(iter); - base::utils::safeDelete(*iter); - } } - } - virtual inline void registerNew(T_Ptr* ptr) ELPP_FINAL { - this->list().push_back(ptr); - } - -/// @brief Gets pointer from repository with speicifed arguments. Arguments are passed to predicate -/// in order to validate pointer. - template - T_Ptr* get(const T& arg1, const T2 arg2) { - iterator iter = std::find_if(this->list().begin(), this->list().end(), Pred(arg1, arg2)); - if (iter != this->list().end() && *iter != nullptr) { - return *iter; + virtual void + unregister(T_Ptr*& ptr) ELPP_FINAL { + if (ptr) { + iterator iter = this->begin(); + for (; iter != this->end(); ++iter) { + if (ptr == *iter) { + break; + } + } + if (iter != this->end() && *iter != nullptr) { + this->list().erase(iter); + base::utils::safeDelete(*iter); + } + } + } + + virtual inline void + registerNew(T_Ptr* ptr) ELPP_FINAL { + this->list().push_back(ptr); + } + + /// @brief Gets pointer from repository with speicifed arguments. Arguments are passed to predicate + /// in order to validate pointer. + template + T_Ptr* + get(const T& arg1, const T2 arg2) { + iterator iter = std::find_if(this->list().begin(), this->list().end(), Pred(arg1, arg2)); + if (iter != this->list().end() && *iter != nullptr) { + return *iter; + } + return nullptr; } - return nullptr; - } private: - virtual void deepCopy(const AbstractRegistry>& sr) { - for (const_iterator it = sr.list().begin(); it != sr.list().end(); ++it) { - registerNew(new T_Ptr(**it)); + virtual void + deepCopy(const AbstractRegistry>& sr) { + for (const_iterator it = sr.list().begin(); it != sr.list().end(); ++it) { + registerNew(new T_Ptr(**it)); + } } - } }; class Utils { public: - template - static bool installCallback(const std::string& id, std::unordered_map* mapT) { - if (mapT->find(id) == mapT->end()) { - mapT->insert(std::make_pair(id, TPtr(new T()))); - return true; + template + static bool + installCallback(const std::string& id, std::unordered_map* mapT) { + if (mapT->find(id) == mapT->end()) { + mapT->insert(std::make_pair(id, TPtr(new T()))); + return true; + } + return false; } - return false; - } - template - static void uninstallCallback(const std::string& id, std::unordered_map* mapT) { - if (mapT->find(id) != mapT->end()) { - mapT->erase(id); + template + static void + uninstallCallback(const std::string& id, std::unordered_map* mapT) { + if (mapT->find(id) != mapT->end()) { + mapT->erase(id); + } } - } - template - static T* callback(const std::string& id, std::unordered_map* mapT) { - typename std::unordered_map::iterator iter = mapT->find(id); - if (iter != mapT->end()) { - return static_cast(iter->second.get()); + template + static T* + callback(const std::string& id, std::unordered_map* mapT) { + typename std::unordered_map::iterator iter = mapT->find(id); + if (iter != mapT->end()) { + return static_cast(iter->second.get()); + } + return nullptr; } - return nullptr; - } }; } // namespace utils -} // namespace base +} // namespace base /// @brief Base of Easylogging++ friendly class /// /// @detail After inheriting this class publicly, implement pure-virtual function `void log(std::ostream&) const` class Loggable { public: - virtual ~Loggable(void) {} - virtual void log(el::base::type::ostream_t&) const = 0; + virtual ~Loggable(void) { + } + virtual void + log(el::base::type::ostream_t&) const = 0; + private: - friend inline el::base::type::ostream_t& operator<<(el::base::type::ostream_t& os, const Loggable& loggable) { - loggable.log(os); - return os; - } + friend inline el::base::type::ostream_t& + operator<<(el::base::type::ostream_t& os, const Loggable& loggable) { + loggable.log(os); + return os; + } }; namespace base { /// @brief Represents log format containing flags and date format. This is used internally to start initial log class LogFormat : public Loggable { public: - LogFormat(void); - LogFormat(Level level, const base::type::string_t& format); - LogFormat(const LogFormat& logFormat); - LogFormat(LogFormat&& logFormat); - LogFormat& operator=(const LogFormat& logFormat); - virtual ~LogFormat(void) {} - bool operator==(const LogFormat& other); + LogFormat(void); + LogFormat(Level level, const base::type::string_t& format); + LogFormat(const LogFormat& logFormat); + LogFormat(LogFormat&& logFormat); + LogFormat& + operator=(const LogFormat& logFormat); + virtual ~LogFormat(void) { + } + bool + operator==(const LogFormat& other); - /// @brief Updates format to be used while logging. - /// @param userFormat User provided format - void parseFromFormat(const base::type::string_t& userFormat); + /// @brief Updates format to be used while logging. + /// @param userFormat User provided format + void + parseFromFormat(const base::type::string_t& userFormat); - inline Level level(void) const { - return m_level; - } + inline Level + level(void) const { + return m_level; + } - inline const base::type::string_t& userFormat(void) const { - return m_userFormat; - } + inline const base::type::string_t& + userFormat(void) const { + return m_userFormat; + } - inline const base::type::string_t& format(void) const { - return m_format; - } + inline const base::type::string_t& + format(void) const { + return m_format; + } - inline const std::string& dateTimeFormat(void) const { - return m_dateTimeFormat; - } + inline const std::string& + dateTimeFormat(void) const { + return m_dateTimeFormat; + } - inline base::type::EnumType flags(void) const { - return m_flags; - } + inline base::type::EnumType + flags(void) const { + return m_flags; + } - inline bool hasFlag(base::FormatFlags flag) const { - return base::utils::hasFlag(flag, m_flags); - } + inline bool + hasFlag(base::FormatFlags flag) const { + return base::utils::hasFlag(flag, m_flags); + } - virtual void log(el::base::type::ostream_t& os) const { - os << m_format; - } + virtual void + log(el::base::type::ostream_t& os) const { + os << m_format; + } protected: - /// @brief Updates date time format if available in currFormat. - /// @param index Index where %datetime, %date or %time was found - /// @param [in,out] currFormat current format that is being used to format - virtual void updateDateFormat(std::size_t index, base::type::string_t& currFormat) ELPP_FINAL; + /// @brief Updates date time format if available in currFormat. + /// @param index Index where %datetime, %date or %time was found + /// @param [in,out] currFormat current format that is being used to format + virtual void + updateDateFormat(std::size_t index, base::type::string_t& currFormat) ELPP_FINAL; - /// @brief Updates %level from format. This is so that we dont have to do it at log-writing-time. It uses m_format and m_level - virtual void updateFormatSpec(void) ELPP_FINAL; + /// @brief Updates %level from format. This is so that we dont have to do it at log-writing-time. It uses m_format + /// and m_level + virtual void + updateFormatSpec(void) ELPP_FINAL; - inline void addFlag(base::FormatFlags flag) { - base::utils::addFlag(flag, &m_flags); - } + inline void + addFlag(base::FormatFlags flag) { + base::utils::addFlag(flag, &m_flags); + } private: - Level m_level; - base::type::string_t m_userFormat; - base::type::string_t m_format; - std::string m_dateTimeFormat; - base::type::EnumType m_flags; - std::string m_currentUser; - std::string m_currentHost; - friend class el::Logger; // To resolve loggerId format specifier easily + Level m_level; + base::type::string_t m_userFormat; + base::type::string_t m_format; + std::string m_dateTimeFormat; + base::type::EnumType m_flags; + std::string m_currentUser; + std::string m_currentHost; + friend class el::Logger; // To resolve loggerId format specifier easily }; } // namespace base /// @brief Resolving function for format specifier @@ -1644,26 +1807,30 @@ typedef std::function FormatSpecifierValueResolv /// @see FormatSpecifierValueResolver class CustomFormatSpecifier { public: - CustomFormatSpecifier(const char* formatSpecifier, const FormatSpecifierValueResolver& resolver) : - m_formatSpecifier(formatSpecifier), m_resolver(resolver) {} - inline const char* formatSpecifier(void) const { - return m_formatSpecifier; - } - inline const FormatSpecifierValueResolver& resolver(void) const { - return m_resolver; - } - inline bool operator==(const char* formatSpecifier) { - return strcmp(m_formatSpecifier, formatSpecifier) == 0; - } + CustomFormatSpecifier(const char* formatSpecifier, const FormatSpecifierValueResolver& resolver) + : m_formatSpecifier(formatSpecifier), m_resolver(resolver) { + } + inline const char* + formatSpecifier(void) const { + return m_formatSpecifier; + } + inline const FormatSpecifierValueResolver& + resolver(void) const { + return m_resolver; + } + inline bool + operator==(const char* formatSpecifier) { + return strcmp(m_formatSpecifier, formatSpecifier) == 0; + } private: - const char* m_formatSpecifier; - FormatSpecifierValueResolver m_resolver; + const char* m_formatSpecifier; + FormatSpecifierValueResolver m_resolver; }; /// @brief Represents single configuration that has representing level, configuration type and a string based value. /// -/// @detail String based value means any value either its boolean, integer or string itself, it will be embedded inside quotes -/// and will be parsed later. +/// @detail String based value means any value either its boolean, integer or string itself, it will be embedded inside +/// quotes and will be parsed later. /// /// Consider some examples below: /// * el::Configuration confEnabledInfo(el::Level::Info, el::ConfigurationType::Enabled, "true"); @@ -1671,55 +1838,63 @@ class CustomFormatSpecifier { /// * el::Configuration confFilenameInfo(el::Level::Info, el::ConfigurationType::Filename, "/var/log/my.log"); class Configuration : public Loggable { public: - Configuration(const Configuration& c); - Configuration& operator=(const Configuration& c); + Configuration(const Configuration& c); + Configuration& + operator=(const Configuration& c); - virtual ~Configuration(void) { - } + virtual ~Configuration(void) { + } - /// @brief Full constructor used to sets value of configuration - Configuration(Level level, ConfigurationType configurationType, const std::string& value); + /// @brief Full constructor used to sets value of configuration + Configuration(Level level, ConfigurationType configurationType, const std::string& value); - /// @brief Gets level of current configuration - inline Level level(void) const { - return m_level; - } + /// @brief Gets level of current configuration + inline Level + level(void) const { + return m_level; + } - /// @brief Gets configuration type of current configuration - inline ConfigurationType configurationType(void) const { - return m_configurationType; - } + /// @brief Gets configuration type of current configuration + inline ConfigurationType + configurationType(void) const { + return m_configurationType; + } - /// @brief Gets string based configuration value - inline const std::string& value(void) const { - return m_value; - } + /// @brief Gets string based configuration value + inline const std::string& + value(void) const { + return m_value; + } - /// @brief Set string based configuration value - /// @param value Value to set. Values have to be std::string; For boolean values use "true", "false", for any integral values - /// use them in quotes. They will be parsed when configuring - inline void setValue(const std::string& value) { - m_value = value; - } + /// @brief Set string based configuration value + /// @param value Value to set. Values have to be std::string; For boolean values use "true", "false", for any + /// integral values + /// use them in quotes. They will be parsed when configuring + inline void + setValue(const std::string& value) { + m_value = value; + } - virtual void log(el::base::type::ostream_t& os) const; + virtual void + log(el::base::type::ostream_t& os) const; - /// @brief Used to find configuration from configuration (pointers) repository. Avoid using it. - class Predicate { - public: - Predicate(Level level, ConfigurationType configurationType); + /// @brief Used to find configuration from configuration (pointers) repository. Avoid using it. + class Predicate { + public: + Predicate(Level level, ConfigurationType configurationType); - bool operator()(const Configuration* conf) const; + bool + operator()(const Configuration* conf) const; - private: - Level m_level; - ConfigurationType m_configurationType; - }; + private: + Level m_level; + ConfigurationType m_configurationType; + }; private: - Level m_level; - ConfigurationType m_configurationType; - std::string m_value; + Level m_level; + ConfigurationType m_configurationType; + std::string m_value; }; /// @brief Thread-safe Configuration repository @@ -1727,167 +1902,195 @@ class Configuration : public Loggable { /// @detail This repository represents configurations for all the levels and configuration type mapped to a value. class Configurations : public base::utils::RegistryWithPred { public: - /// @brief Default constructor with empty repository - Configurations(void); + /// @brief Default constructor with empty repository + Configurations(void); - /// @brief Constructor used to set configurations using configuration file. - /// @param configurationFile Full path to configuration file - /// @param useDefaultsForRemaining Lets you set the remaining configurations to default. - /// @param base If provided, this configuration will be based off existing repository that this argument is pointing to. - /// @see parseFromFile(const std::string&, Configurations* base) - /// @see setRemainingToDefault() - Configurations(const std::string& configurationFile, bool useDefaultsForRemaining = true, - Configurations* base = nullptr); + /// @brief Constructor used to set configurations using configuration file. + /// @param configurationFile Full path to configuration file + /// @param useDefaultsForRemaining Lets you set the remaining configurations to default. + /// @param base If provided, this configuration will be based off existing repository that this argument is pointing + /// to. + /// @see parseFromFile(const std::string&, Configurations* base) + /// @see setRemainingToDefault() + Configurations(const std::string& configurationFile, bool useDefaultsForRemaining = true, + Configurations* base = nullptr); - virtual ~Configurations(void) { - } + virtual ~Configurations(void) { + } - /// @brief Parses configuration from file. - /// @param configurationFile Full path to configuration file - /// @param base Configurations to base new configuration repository off. This value is used when you want to use - /// existing Configurations to base all the values and then set rest of configuration via configuration file. - /// @return True if successfully parsed, false otherwise. You may define 'ELPP_DEBUG_ASSERT_FAILURE' to make sure you - /// do not proceed without successful parse. - bool parseFromFile(const std::string& configurationFile, Configurations* base = nullptr); - - /// @brief Parse configurations from configuration string. - /// - /// @detail This configuration string has same syntax as configuration file contents. Make sure all the necessary - /// new line characters are provided. - /// @param base Configurations to base new configuration repository off. This value is used when you want to use - /// existing Configurations to base all the values and then set rest of configuration via configuration text. - /// @return True if successfully parsed, false otherwise. You may define 'ELPP_DEBUG_ASSERT_FAILURE' to make sure you - /// do not proceed without successful parse. - bool parseFromText(const std::string& configurationsString, Configurations* base = nullptr); - - /// @brief Sets configuration based-off an existing configurations. - /// @param base Pointer to existing configurations. - void setFromBase(Configurations* base); - - /// @brief Determines whether or not specified configuration type exists in the repository. - /// - /// @detail Returns as soon as first level is found. - /// @param configurationType Type of configuration to check existence for. - bool hasConfiguration(ConfigurationType configurationType); - - /// @brief Determines whether or not specified configuration type exists for specified level - /// @param level Level to check - /// @param configurationType Type of configuration to check existence for. - bool hasConfiguration(Level level, ConfigurationType configurationType); - - /// @brief Sets value of configuration for specified level. - /// - /// @detail Any existing configuration for specified level will be replaced. Also note that configuration types - /// ConfigurationType::SubsecondPrecision and ConfigurationType::PerformanceTracking will be ignored if not set for - /// Level::Global because these configurations are not dependant on level. - /// @param level Level to set configuration for (el::Level). - /// @param configurationType Type of configuration (el::ConfigurationType) - /// @param value A string based value. Regardless of what the data type of configuration is, it will always be string - /// from users' point of view. This is then parsed later to be used internally. - /// @see Configuration::setValue(const std::string& value) - /// @see el::Level - /// @see el::ConfigurationType - void set(Level level, ConfigurationType configurationType, const std::string& value); - - /// @brief Sets single configuration based on other single configuration. - /// @see set(Level level, ConfigurationType configurationType, const std::string& value) - void set(Configuration* conf); - - inline Configuration* get(Level level, ConfigurationType configurationType) { - base::threading::ScopedLock scopedLock(lock()); - return RegistryWithPred::get(level, configurationType); - } - - /// @brief Sets configuration for all levels. - /// @param configurationType Type of configuration - /// @param value String based value - /// @see Configurations::set(Level level, ConfigurationType configurationType, const std::string& value) - inline void setGlobally(ConfigurationType configurationType, const std::string& value) { - setGlobally(configurationType, value, false); - } - - /// @brief Clears repository so that all the configurations are unset - inline void clear(void) { - base::threading::ScopedLock scopedLock(lock()); - unregisterAll(); - } - - /// @brief Gets configuration file used in parsing this configurations. - /// - /// @detail If this repository was set manually or by text this returns empty string. - inline const std::string& configurationFile(void) const { - return m_configurationFile; - } - - /// @brief Sets configurations to "factory based" configurations. - void setToDefault(void); - - /// @brief Lets you set the remaining configurations to default. - /// - /// @detail By remaining, it means that the level/type a configuration does not exist for. - /// This function is useful when you want to minimize chances of failures, e.g, if you have a configuration file that sets - /// configuration for all the configurations except for Enabled or not, we use this so that ENABLED is set to default i.e, - /// true. If you dont do this explicitly (either by calling this function or by using second param in Constructor - /// and try to access a value, an error is thrown - void setRemainingToDefault(void); - - /// @brief Parser used internally to parse configurations from file or text. - /// - /// @detail This class makes use of base::utils::Str. - /// You should not need this unless you are working on some tool for Easylogging++ - class Parser : base::StaticClass { - public: /// @brief Parses configuration from file. /// @param configurationFile Full path to configuration file - /// @param sender Sender configurations pointer. Usually 'this' is used from calling class /// @param base Configurations to base new configuration repository off. This value is used when you want to use /// existing Configurations to base all the values and then set rest of configuration via configuration file. - /// @return True if successfully parsed, false otherwise. You may define '_STOP_ON_FIRSTELPP_ASSERTION' to make sure you + /// @return True if successfully parsed, false otherwise. You may define 'ELPP_DEBUG_ASSERT_FAILURE' to make sure + /// you /// do not proceed without successful parse. - static bool parseFromFile(const std::string& configurationFile, Configurations* sender, - Configurations* base = nullptr); + bool + parseFromFile(const std::string& configurationFile, Configurations* base = nullptr); /// @brief Parse configurations from configuration string. /// /// @detail This configuration string has same syntax as configuration file contents. Make sure all the necessary - /// new line characters are provided. You may define '_STOP_ON_FIRSTELPP_ASSERTION' to make sure you - /// do not proceed without successful parse (This is recommended) - /// @param configurationsString the configuration in plain text format - /// @param sender Sender configurations pointer. Usually 'this' is used from calling class + /// new line characters are provided. /// @param base Configurations to base new configuration repository off. This value is used when you want to use /// existing Configurations to base all the values and then set rest of configuration via configuration text. - /// @return True if successfully parsed, false otherwise. - static bool parseFromText(const std::string& configurationsString, Configurations* sender, - Configurations* base = nullptr); + /// @return True if successfully parsed, false otherwise. You may define 'ELPP_DEBUG_ASSERT_FAILURE' to make sure + /// you + /// do not proceed without successful parse. + bool + parseFromText(const std::string& configurationsString, Configurations* base = nullptr); - private: - friend class el::Loggers; - static void ignoreComments(std::string* line); - static bool isLevel(const std::string& line); - static bool isComment(const std::string& line); - static inline bool isConfig(const std::string& line); - static bool parseLine(std::string* line, std::string* currConfigStr, std::string* currLevelStr, Level* currLevel, - Configurations* conf); - }; + /// @brief Sets configuration based-off an existing configurations. + /// @param base Pointer to existing configurations. + void + setFromBase(Configurations* base); + + /// @brief Determines whether or not specified configuration type exists in the repository. + /// + /// @detail Returns as soon as first level is found. + /// @param configurationType Type of configuration to check existence for. + bool + hasConfiguration(ConfigurationType configurationType); + + /// @brief Determines whether or not specified configuration type exists for specified level + /// @param level Level to check + /// @param configurationType Type of configuration to check existence for. + bool + hasConfiguration(Level level, ConfigurationType configurationType); + + /// @brief Sets value of configuration for specified level. + /// + /// @detail Any existing configuration for specified level will be replaced. Also note that configuration types + /// ConfigurationType::SubsecondPrecision and ConfigurationType::PerformanceTracking will be ignored if not set for + /// Level::Global because these configurations are not dependant on level. + /// @param level Level to set configuration for (el::Level). + /// @param configurationType Type of configuration (el::ConfigurationType) + /// @param value A string based value. Regardless of what the data type of configuration is, it will always be + /// string from users' point of view. This is then parsed later to be used internally. + /// @see Configuration::setValue(const std::string& value) + /// @see el::Level + /// @see el::ConfigurationType + void + set(Level level, ConfigurationType configurationType, const std::string& value); + + /// @brief Sets single configuration based on other single configuration. + /// @see set(Level level, ConfigurationType configurationType, const std::string& value) + void + set(Configuration* conf); + + inline Configuration* + get(Level level, ConfigurationType configurationType) { + base::threading::ScopedLock scopedLock(lock()); + return RegistryWithPred::get(level, configurationType); + } + + /// @brief Sets configuration for all levels. + /// @param configurationType Type of configuration + /// @param value String based value + /// @see Configurations::set(Level level, ConfigurationType configurationType, const std::string& value) + inline void + setGlobally(ConfigurationType configurationType, const std::string& value) { + setGlobally(configurationType, value, false); + } + + /// @brief Clears repository so that all the configurations are unset + inline void + clear(void) { + base::threading::ScopedLock scopedLock(lock()); + unregisterAll(); + } + + /// @brief Gets configuration file used in parsing this configurations. + /// + /// @detail If this repository was set manually or by text this returns empty string. + inline const std::string& + configurationFile(void) const { + return m_configurationFile; + } + + /// @brief Sets configurations to "factory based" configurations. + void + setToDefault(void); + + /// @brief Lets you set the remaining configurations to default. + /// + /// @detail By remaining, it means that the level/type a configuration does not exist for. + /// This function is useful when you want to minimize chances of failures, e.g, if you have a configuration file + /// that sets configuration for all the configurations except for Enabled or not, we use this so that ENABLED is set + /// to default i.e, true. If you dont do this explicitly (either by calling this function or by using second param + /// in Constructor and try to access a value, an error is thrown + void + setRemainingToDefault(void); + + /// @brief Parser used internally to parse configurations from file or text. + /// + /// @detail This class makes use of base::utils::Str. + /// You should not need this unless you are working on some tool for Easylogging++ + class Parser : base::StaticClass { + public: + /// @brief Parses configuration from file. + /// @param configurationFile Full path to configuration file + /// @param sender Sender configurations pointer. Usually 'this' is used from calling class + /// @param base Configurations to base new configuration repository off. This value is used when you want to use + /// existing Configurations to base all the values and then set rest of configuration via configuration + /// file. + /// @return True if successfully parsed, false otherwise. You may define '_STOP_ON_FIRSTELPP_ASSERTION' to make + /// sure you + /// do not proceed without successful parse. + static bool + parseFromFile(const std::string& configurationFile, Configurations* sender, Configurations* base = nullptr); + + /// @brief Parse configurations from configuration string. + /// + /// @detail This configuration string has same syntax as configuration file contents. Make sure all the + /// necessary new line characters are provided. You may define '_STOP_ON_FIRSTELPP_ASSERTION' to make sure you + /// do not proceed without successful parse (This is recommended) + /// @param configurationsString the configuration in plain text format + /// @param sender Sender configurations pointer. Usually 'this' is used from calling class + /// @param base Configurations to base new configuration repository off. This value is used when you want to use + /// existing Configurations to base all the values and then set rest of configuration via configuration + /// text. + /// @return True if successfully parsed, false otherwise. + static bool + parseFromText(const std::string& configurationsString, Configurations* sender, Configurations* base = nullptr); + + private: + friend class el::Loggers; + static void + ignoreComments(std::string* line); + static bool + isLevel(const std::string& line); + static bool + isComment(const std::string& line); + static inline bool + isConfig(const std::string& line); + static bool + parseLine(std::string* line, std::string* currConfigStr, std::string* currLevelStr, Level* currLevel, + Configurations* conf); + }; private: - std::string m_configurationFile; - bool m_isFromFile; - friend class el::Loggers; + std::string m_configurationFile; + bool m_isFromFile; + friend class el::Loggers; - /// @brief Unsafely sets configuration if does not already exist - void unsafeSetIfNotExist(Level level, ConfigurationType configurationType, const std::string& value); + /// @brief Unsafely sets configuration if does not already exist + void + unsafeSetIfNotExist(Level level, ConfigurationType configurationType, const std::string& value); - /// @brief Thread unsafe set - void unsafeSet(Level level, ConfigurationType configurationType, const std::string& value); + /// @brief Thread unsafe set + void + unsafeSet(Level level, ConfigurationType configurationType, const std::string& value); - /// @brief Sets configurations for all levels including Level::Global if includeGlobalLevel is true - /// @see Configurations::setGlobally(ConfigurationType configurationType, const std::string& value) - void setGlobally(ConfigurationType configurationType, const std::string& value, bool includeGlobalLevel); + /// @brief Sets configurations for all levels including Level::Global if includeGlobalLevel is true + /// @see Configurations::setGlobally(ConfigurationType configurationType, const std::string& value) + void + setGlobally(ConfigurationType configurationType, const std::string& value, bool includeGlobalLevel); - /// @brief Sets configurations (Unsafely) for all levels including Level::Global if includeGlobalLevel is true - /// @see Configurations::setGlobally(ConfigurationType configurationType, const std::string& value) - void unsafeSetGlobally(ConfigurationType configurationType, const std::string& value, bool includeGlobalLevel); + /// @brief Sets configurations (Unsafely) for all levels including Level::Global if includeGlobalLevel is true + /// @see Configurations::setGlobally(ConfigurationType configurationType, const std::string& value) + void + unsafeSetGlobally(ConfigurationType configurationType, const std::string& value, bool includeGlobalLevel); }; namespace base { @@ -1895,314 +2098,362 @@ typedef std::shared_ptr FileStreamPtr; typedef std::unordered_map LogStreamsReferenceMap; /// @brief Configurations with data types. /// -/// @detail el::Configurations have string based values. This is whats used internally in order to read correct configurations. -/// This is to perform faster while writing logs using correct configurations. +/// @detail el::Configurations have string based values. This is whats used internally in order to read correct +/// configurations. This is to perform faster while writing logs using correct configurations. /// /// This is thread safe and final class containing non-virtual destructor (means nothing should inherit this class) class TypedConfigurations : public base::threading::ThreadSafe { public: - /// @brief Constructor to initialize (construct) the object off el::Configurations - /// @param configurations Configurations pointer/reference to base this typed configurations off. - /// @param logStreamsReference Use ELPP->registeredLoggers()->logStreamsReference() - TypedConfigurations(Configurations* configurations, base::LogStreamsReferenceMap* logStreamsReference); + /// @brief Constructor to initialize (construct) the object off el::Configurations + /// @param configurations Configurations pointer/reference to base this typed configurations off. + /// @param logStreamsReference Use ELPP->registeredLoggers()->logStreamsReference() + TypedConfigurations(Configurations* configurations, base::LogStreamsReferenceMap* logStreamsReference); - TypedConfigurations(const TypedConfigurations& other); + TypedConfigurations(const TypedConfigurations& other); - virtual ~TypedConfigurations(void) { - } + virtual ~TypedConfigurations(void) { + } - const Configurations* configurations(void) const { - return m_configurations; - } + const Configurations* + configurations(void) const { + return m_configurations; + } - bool enabled(Level level); - bool toFile(Level level); - const std::string& filename(Level level); - bool toStandardOutput(Level level); - const base::LogFormat& logFormat(Level level); - const base::SubsecondPrecision& subsecondPrecision(Level level = Level::Global); - const base::MillisecondsWidth& millisecondsWidth(Level level = Level::Global); - bool performanceTracking(Level level = Level::Global); - base::type::fstream_t* fileStream(Level level); - std::size_t maxLogFileSize(Level level); - std::size_t logFlushThreshold(Level level); + bool + enabled(Level level); + bool + toFile(Level level); + const std::string& + filename(Level level); + bool + toStandardOutput(Level level); + const base::LogFormat& + logFormat(Level level); + const base::SubsecondPrecision& + subsecondPrecision(Level level = Level::Global); + const base::MillisecondsWidth& + millisecondsWidth(Level level = Level::Global); + bool + performanceTracking(Level level = Level::Global); + base::type::fstream_t* + fileStream(Level level); + std::size_t + maxLogFileSize(Level level); + std::size_t + logFlushThreshold(Level level); private: - Configurations* m_configurations; - std::unordered_map m_enabledMap; - std::unordered_map m_toFileMap; - std::unordered_map m_filenameMap; - std::unordered_map m_toStandardOutputMap; - std::unordered_map m_logFormatMap; - std::unordered_map m_subsecondPrecisionMap; - std::unordered_map m_performanceTrackingMap; - std::unordered_map m_fileStreamMap; - std::unordered_map m_maxLogFileSizeMap; - std::unordered_map m_logFlushThresholdMap; - base::LogStreamsReferenceMap* m_logStreamsReference; + Configurations* m_configurations; + std::unordered_map m_enabledMap; + std::unordered_map m_toFileMap; + std::unordered_map m_filenameMap; + std::unordered_map m_toStandardOutputMap; + std::unordered_map m_logFormatMap; + std::unordered_map m_subsecondPrecisionMap; + std::unordered_map m_performanceTrackingMap; + std::unordered_map m_fileStreamMap; + std::unordered_map m_maxLogFileSizeMap; + std::unordered_map m_logFlushThresholdMap; + base::LogStreamsReferenceMap* m_logStreamsReference; - friend class el::Helpers; - friend class el::base::MessageBuilder; - friend class el::base::Writer; - friend class el::base::DefaultLogDispatchCallback; - friend class el::base::LogDispatcher; + friend class el::Helpers; + friend class el::base::MessageBuilder; + friend class el::base::Writer; + friend class el::base::DefaultLogDispatchCallback; + friend class el::base::LogDispatcher; - template - inline Conf_T getConfigByVal(Level level, const std::unordered_map* confMap, const char* confName) { - base::threading::ScopedLock scopedLock(lock()); - return unsafeGetConfigByVal(level, confMap, confName); // This is not unsafe anymore - mutex locked in scope - } - - template - inline Conf_T& getConfigByRef(Level level, std::unordered_map* confMap, const char* confName) { - base::threading::ScopedLock scopedLock(lock()); - return unsafeGetConfigByRef(level, confMap, confName); // This is not unsafe anymore - mutex locked in scope - } - - template - Conf_T unsafeGetConfigByVal(Level level, const std::unordered_map* confMap, const char* confName) { - ELPP_UNUSED(confName); - typename std::unordered_map::const_iterator it = confMap->find(level); - if (it == confMap->end()) { - try { - return confMap->at(Level::Global); - } catch (...) { - ELPP_INTERNAL_ERROR("Unable to get configuration [" << confName << "] for level [" - << LevelHelper::convertToString(level) << "]" - << std::endl << "Please ensure you have properly configured logger.", false); - return Conf_T(); - } + template + inline Conf_T + getConfigByVal(Level level, const std::unordered_map* confMap, const char* confName) { + base::threading::ScopedLock scopedLock(lock()); + return unsafeGetConfigByVal(level, confMap, confName); // This is not unsafe anymore - mutex locked in scope } - return it->second; - } - template - Conf_T& unsafeGetConfigByRef(Level level, std::unordered_map* confMap, const char* confName) { - ELPP_UNUSED(confName); - typename std::unordered_map::iterator it = confMap->find(level); - if (it == confMap->end()) { - try { - return confMap->at(Level::Global); - } catch (...) { - ELPP_INTERNAL_ERROR("Unable to get configuration [" << confName << "] for level [" - << LevelHelper::convertToString(level) << "]" - << std::endl << "Please ensure you have properly configured logger.", false); - } + template + inline Conf_T& + getConfigByRef(Level level, std::unordered_map* confMap, const char* confName) { + base::threading::ScopedLock scopedLock(lock()); + return unsafeGetConfigByRef(level, confMap, confName); // This is not unsafe anymore - mutex locked in scope } - return it->second; - } - template - void setValue(Level level, const Conf_T& value, std::unordered_map* confMap, - bool includeGlobalLevel = true) { - // If map is empty and we are allowed to add into generic level (Level::Global), do it! - if (confMap->empty() && includeGlobalLevel) { - confMap->insert(std::make_pair(Level::Global, value)); - return; + template + Conf_T + unsafeGetConfigByVal(Level level, const std::unordered_map* confMap, const char* confName) { + ELPP_UNUSED(confName); + typename std::unordered_map::const_iterator it = confMap->find(level); + if (it == confMap->end()) { + try { + return confMap->at(Level::Global); + } catch (...) { + ELPP_INTERNAL_ERROR("Unable to get configuration [" + << confName << "] for level [" << LevelHelper::convertToString(level) << "]" + << std::endl + << "Please ensure you have properly configured logger.", + false); + return Conf_T(); + } + } + return it->second; } - // If same value exist in generic level already, dont add it to explicit level - typename std::unordered_map::iterator it = confMap->find(Level::Global); - if (it != confMap->end() && it->second == value) { - return; - } - // Now make sure we dont double up values if we really need to add it to explicit level - it = confMap->find(level); - if (it == confMap->end()) { - // Value not found for level, add new - confMap->insert(std::make_pair(level, value)); - } else { - // Value found, just update value - confMap->at(level) = value; - } - } - void build(Configurations* configurations); - unsigned long getULong(std::string confVal); - std::string resolveFilename(const std::string& filename); - void insertFile(Level level, const std::string& fullFilename); - bool unsafeValidateFileRolling(Level level, const PreRollOutCallback& preRollOutCallback); + template + Conf_T& + unsafeGetConfigByRef(Level level, std::unordered_map* confMap, const char* confName) { + ELPP_UNUSED(confName); + typename std::unordered_map::iterator it = confMap->find(level); + if (it == confMap->end()) { + try { + return confMap->at(Level::Global); + } catch (...) { + ELPP_INTERNAL_ERROR("Unable to get configuration [" + << confName << "] for level [" << LevelHelper::convertToString(level) << "]" + << std::endl + << "Please ensure you have properly configured logger.", + false); + } + } + return it->second; + } - inline bool validateFileRolling(Level level, const PreRollOutCallback& preRollOutCallback) { - base::threading::ScopedLock scopedLock(lock()); - return unsafeValidateFileRolling(level, preRollOutCallback); - } + template + void + setValue(Level level, const Conf_T& value, std::unordered_map* confMap, + bool includeGlobalLevel = true) { + // If map is empty and we are allowed to add into generic level (Level::Global), do it! + if (confMap->empty() && includeGlobalLevel) { + confMap->insert(std::make_pair(Level::Global, value)); + return; + } + // If same value exist in generic level already, dont add it to explicit level + typename std::unordered_map::iterator it = confMap->find(Level::Global); + if (it != confMap->end() && it->second == value) { + return; + } + // Now make sure we dont double up values if we really need to add it to explicit level + it = confMap->find(level); + if (it == confMap->end()) { + // Value not found for level, add new + confMap->insert(std::make_pair(level, value)); + } else { + // Value found, just update value + confMap->at(level) = value; + } + } + + void + build(Configurations* configurations); + unsigned long + getULong(std::string confVal); + std::string + resolveFilename(const std::string& filename); + void + insertFile(Level level, const std::string& fullFilename); + bool + unsafeValidateFileRolling(Level level, const PreRollOutCallback& preRollOutCallback); + + inline bool + validateFileRolling(Level level, const PreRollOutCallback& preRollOutCallback) { + base::threading::ScopedLock scopedLock(lock()); + return unsafeValidateFileRolling(level, preRollOutCallback); + } }; /// @brief Class that keeps record of current line hit for occasional logging class HitCounter { public: - HitCounter(void) : - m_filename(""), - m_lineNumber(0), - m_hitCounts(0) { - } - - HitCounter(const char* filename, base::type::LineNumber lineNumber) : - m_filename(filename), - m_lineNumber(lineNumber), - m_hitCounts(0) { - } - - HitCounter(const HitCounter& hitCounter) : - m_filename(hitCounter.m_filename), - m_lineNumber(hitCounter.m_lineNumber), - m_hitCounts(hitCounter.m_hitCounts) { - } - - HitCounter& operator=(const HitCounter& hitCounter) { - if (&hitCounter != this) { - m_filename = hitCounter.m_filename; - m_lineNumber = hitCounter.m_lineNumber; - m_hitCounts = hitCounter.m_hitCounts; - } - return *this; - } - - virtual ~HitCounter(void) { - } - - /// @brief Resets location of current hit counter - inline void resetLocation(const char* filename, base::type::LineNumber lineNumber) { - m_filename = filename; - m_lineNumber = lineNumber; - } - - /// @brief Validates hit counts and resets it if necessary - inline void validateHitCounts(std::size_t n) { - if (m_hitCounts >= base::consts::kMaxLogPerCounter) { - m_hitCounts = (n >= 1 ? base::consts::kMaxLogPerCounter % n : 0); - } - ++m_hitCounts; - } - - inline const char* filename(void) const { - return m_filename; - } - - inline base::type::LineNumber lineNumber(void) const { - return m_lineNumber; - } - - inline std::size_t hitCounts(void) const { - return m_hitCounts; - } - - inline void increment(void) { - ++m_hitCounts; - } - - class Predicate { - public: - Predicate(const char* filename, base::type::LineNumber lineNumber) - : m_filename(filename), - m_lineNumber(lineNumber) { - } - inline bool operator()(const HitCounter* counter) { - return ((counter != nullptr) && - (strcmp(counter->m_filename, m_filename) == 0) && - (counter->m_lineNumber == m_lineNumber)); + HitCounter(void) : m_filename(""), m_lineNumber(0), m_hitCounts(0) { } - private: - const char* m_filename; - base::type::LineNumber m_lineNumber; - }; + HitCounter(const char* filename, base::type::LineNumber lineNumber) + : m_filename(filename), m_lineNumber(lineNumber), m_hitCounts(0) { + } + + HitCounter(const HitCounter& hitCounter) + : m_filename(hitCounter.m_filename), + m_lineNumber(hitCounter.m_lineNumber), + m_hitCounts(hitCounter.m_hitCounts) { + } + + HitCounter& + operator=(const HitCounter& hitCounter) { + if (&hitCounter != this) { + m_filename = hitCounter.m_filename; + m_lineNumber = hitCounter.m_lineNumber; + m_hitCounts = hitCounter.m_hitCounts; + } + return *this; + } + + virtual ~HitCounter(void) { + } + + /// @brief Resets location of current hit counter + inline void + resetLocation(const char* filename, base::type::LineNumber lineNumber) { + m_filename = filename; + m_lineNumber = lineNumber; + } + + /// @brief Validates hit counts and resets it if necessary + inline void + validateHitCounts(std::size_t n) { + if (m_hitCounts >= base::consts::kMaxLogPerCounter) { + m_hitCounts = (n >= 1 ? base::consts::kMaxLogPerCounter % n : 0); + } + ++m_hitCounts; + } + + inline const char* + filename(void) const { + return m_filename; + } + + inline base::type::LineNumber + lineNumber(void) const { + return m_lineNumber; + } + + inline std::size_t + hitCounts(void) const { + return m_hitCounts; + } + + inline void + increment(void) { + ++m_hitCounts; + } + + class Predicate { + public: + Predicate(const char* filename, base::type::LineNumber lineNumber) + : m_filename(filename), m_lineNumber(lineNumber) { + } + inline bool + operator()(const HitCounter* counter) { + return ((counter != nullptr) && (strcmp(counter->m_filename, m_filename) == 0) && + (counter->m_lineNumber == m_lineNumber)); + } + + private: + const char* m_filename; + base::type::LineNumber m_lineNumber; + }; private: - const char* m_filename; - base::type::LineNumber m_lineNumber; - std::size_t m_hitCounts; + const char* m_filename; + base::type::LineNumber m_lineNumber; + std::size_t m_hitCounts; }; /// @brief Repository for hit counters used across the application class RegisteredHitCounters : public base::utils::RegistryWithPred { public: - /// @brief Validates counter for every N, i.e, registers new if does not exist otherwise updates original one - /// @return True if validation resulted in triggering hit. Meaning logs should be written everytime true is returned - bool validateEveryN(const char* filename, base::type::LineNumber lineNumber, std::size_t n); + /// @brief Validates counter for every N, i.e, registers new if does not exist otherwise updates original one + /// @return True if validation resulted in triggering hit. Meaning logs should be written everytime true is returned + bool + validateEveryN(const char* filename, base::type::LineNumber lineNumber, std::size_t n); - /// @brief Validates counter for hits >= N, i.e, registers new if does not exist otherwise updates original one - /// @return True if validation resulted in triggering hit. Meaning logs should be written everytime true is returned - bool validateAfterN(const char* filename, base::type::LineNumber lineNumber, std::size_t n); + /// @brief Validates counter for hits >= N, i.e, registers new if does not exist otherwise updates original one + /// @return True if validation resulted in triggering hit. Meaning logs should be written everytime true is returned + bool + validateAfterN(const char* filename, base::type::LineNumber lineNumber, std::size_t n); - /// @brief Validates counter for hits are <= n, i.e, registers new if does not exist otherwise updates original one - /// @return True if validation resulted in triggering hit. Meaning logs should be written everytime true is returned - bool validateNTimes(const char* filename, base::type::LineNumber lineNumber, std::size_t n); + /// @brief Validates counter for hits are <= n, i.e, registers new if does not exist otherwise updates original one + /// @return True if validation resulted in triggering hit. Meaning logs should be written everytime true is returned + bool + validateNTimes(const char* filename, base::type::LineNumber lineNumber, std::size_t n); - /// @brief Gets hit counter registered at specified position - inline const base::HitCounter* getCounter(const char* filename, base::type::LineNumber lineNumber) { - base::threading::ScopedLock scopedLock(lock()); - return get(filename, lineNumber); - } + /// @brief Gets hit counter registered at specified position + inline const base::HitCounter* + getCounter(const char* filename, base::type::LineNumber lineNumber) { + base::threading::ScopedLock scopedLock(lock()); + return get(filename, lineNumber); + } }; /// @brief Action to be taken for dispatching -enum class DispatchAction : base::type::EnumType { - None = 1, NormalLog = 2, SysLog = 4 -}; +enum class DispatchAction : base::type::EnumType { None = 1, NormalLog = 2, SysLog = 4 }; } // namespace base template class Callback : protected base::threading::ThreadSafe { public: - Callback(void) : m_enabled(true) {} - inline bool enabled(void) const { - return m_enabled; - } - inline void setEnabled(bool enabled) { - base::threading::ScopedLock scopedLock(lock()); - m_enabled = enabled; - } + Callback(void) : m_enabled(true) { + } + inline bool + enabled(void) const { + return m_enabled; + } + inline void + setEnabled(bool enabled) { + base::threading::ScopedLock scopedLock(lock()); + m_enabled = enabled; + } + protected: - virtual void handle(const T* handlePtr) = 0; + virtual void + handle(const T* handlePtr) = 0; + private: - bool m_enabled; + bool m_enabled; }; class LogDispatchData { public: - LogDispatchData() : m_logMessage(nullptr), m_dispatchAction(base::DispatchAction::None) {} - inline const LogMessage* logMessage(void) const { - return m_logMessage; - } - inline base::DispatchAction dispatchAction(void) const { - return m_dispatchAction; - } - inline void setLogMessage(LogMessage* logMessage) { - m_logMessage = logMessage; - } - inline void setDispatchAction(base::DispatchAction dispatchAction) { - m_dispatchAction = dispatchAction; - } - private: - LogMessage* m_logMessage; - base::DispatchAction m_dispatchAction; - friend class base::LogDispatcher; + LogDispatchData() : m_logMessage(nullptr), m_dispatchAction(base::DispatchAction::None) { + } + inline const LogMessage* + logMessage(void) const { + return m_logMessage; + } + inline base::DispatchAction + dispatchAction(void) const { + return m_dispatchAction; + } + inline void + setLogMessage(LogMessage* logMessage) { + m_logMessage = logMessage; + } + inline void + setDispatchAction(base::DispatchAction dispatchAction) { + m_dispatchAction = dispatchAction; + } + private: + LogMessage* m_logMessage; + base::DispatchAction m_dispatchAction; + friend class base::LogDispatcher; }; class LogDispatchCallback : public Callback { protected: - virtual void handle(const LogDispatchData* data); - base::threading::Mutex& fileHandle(const LogDispatchData* data); + virtual void + handle(const LogDispatchData* data); + base::threading::Mutex& + fileHandle(const LogDispatchData* data); + private: - friend class base::LogDispatcher; - std::unordered_map> m_fileLocks; - base::threading::Mutex m_fileLocksMapLock; + friend class base::LogDispatcher; + std::unordered_map> m_fileLocks; + base::threading::Mutex m_fileLocksMapLock; }; class PerformanceTrackingCallback : public Callback { private: - friend class base::PerformanceTracker; + friend class base::PerformanceTracker; }; class LoggerRegistrationCallback : public Callback { private: - friend class base::RegisteredLoggers; + friend class base::RegisteredLoggers; }; class LogBuilder : base::NoCopy { public: - LogBuilder() : m_termSupportsColor(base::utils::OS::termSupportsColor()) {} - virtual ~LogBuilder(void) { - ELPP_INTERNAL_INFO(3, "Destroying log builder...") - } - virtual base::type::string_t build(const LogMessage* logMessage, bool appendNewLine) const = 0; - void convertToColoredOutput(base::type::string_t* logLine, Level level); + LogBuilder() : m_termSupportsColor(base::utils::OS::termSupportsColor()) { + } + virtual ~LogBuilder(void) { + ELPP_INTERNAL_INFO(3, "Destroying log builder...") + } + virtual base::type::string_t + build(const LogMessage* logMessage, bool appendNewLine) const = 0; + void + convertToColoredOutput(base::type::string_t* logLine, Level level); + private: - bool m_termSupportsColor; - friend class el::base::DefaultLogDispatchCallback; + bool m_termSupportsColor; + friend class el::base::DefaultLogDispatchCallback; }; typedef std::shared_ptr LogBuilderPtr; /// @brief Represents a logger holding ID and configurations we need to write logs @@ -2210,1691 +2461,1989 @@ typedef std::shared_ptr LogBuilderPtr; /// @detail This class does not write logs itself instead its used by writer to read configuations from. class Logger : public base::threading::ThreadSafe, public Loggable { public: - Logger(const std::string& id, base::LogStreamsReferenceMap* logStreamsReference); - Logger(const std::string& id, const Configurations& configurations, base::LogStreamsReferenceMap* logStreamsReference); - Logger(const Logger& logger); - Logger& operator=(const Logger& logger); + Logger(const std::string& id, base::LogStreamsReferenceMap* logStreamsReference); + Logger(const std::string& id, const Configurations& configurations, + base::LogStreamsReferenceMap* logStreamsReference); + Logger(const Logger& logger); + Logger& + operator=(const Logger& logger); - virtual ~Logger(void) { - base::utils::safeDelete(m_typedConfigurations); - } + virtual ~Logger(void) { + base::utils::safeDelete(m_typedConfigurations); + } - virtual inline void log(el::base::type::ostream_t& os) const { - os << m_id.c_str(); - } + virtual inline void + log(el::base::type::ostream_t& os) const { + os << m_id.c_str(); + } - /// @brief Configures the logger using specified configurations. - void configure(const Configurations& configurations); + /// @brief Configures the logger using specified configurations. + void + configure(const Configurations& configurations); - /// @brief Reconfigures logger using existing configurations - void reconfigure(void); + /// @brief Reconfigures logger using existing configurations + void + reconfigure(void); - inline const std::string& id(void) const { - return m_id; - } + inline const std::string& + id(void) const { + return m_id; + } - inline const std::string& parentApplicationName(void) const { - return m_parentApplicationName; - } + inline const std::string& + parentApplicationName(void) const { + return m_parentApplicationName; + } - inline void setParentApplicationName(const std::string& parentApplicationName) { - m_parentApplicationName = parentApplicationName; - } + inline void + setParentApplicationName(const std::string& parentApplicationName) { + m_parentApplicationName = parentApplicationName; + } - inline Configurations* configurations(void) { - return &m_configurations; - } + inline Configurations* + configurations(void) { + return &m_configurations; + } - inline base::TypedConfigurations* typedConfigurations(void) { - return m_typedConfigurations; - } + inline base::TypedConfigurations* + typedConfigurations(void) { + return m_typedConfigurations; + } - static bool isValidId(const std::string& id); + static bool + isValidId(const std::string& id); - /// @brief Flushes logger to sync all log files for all levels - void flush(void); + /// @brief Flushes logger to sync all log files for all levels + void + flush(void); - void flush(Level level, base::type::fstream_t* fs); + void + flush(Level level, base::type::fstream_t* fs); - inline bool isFlushNeeded(Level level) { - return ++m_unflushedCount.find(level)->second >= m_typedConfigurations->logFlushThreshold(level); - } + inline bool + isFlushNeeded(Level level) { + return ++m_unflushedCount.find(level)->second >= m_typedConfigurations->logFlushThreshold(level); + } - inline LogBuilder* logBuilder(void) const { - return m_logBuilder.get(); - } + inline LogBuilder* + logBuilder(void) const { + return m_logBuilder.get(); + } - inline void setLogBuilder(const LogBuilderPtr& logBuilder) { - m_logBuilder = logBuilder; - } + inline void + setLogBuilder(const LogBuilderPtr& logBuilder) { + m_logBuilder = logBuilder; + } - inline bool enabled(Level level) const { - return m_typedConfigurations->enabled(level); - } + inline bool + enabled(Level level) const { + return m_typedConfigurations->enabled(level); + } #if ELPP_VARIADIC_TEMPLATES_SUPPORTED -# define LOGGER_LEVEL_WRITERS_SIGNATURES(FUNCTION_NAME)\ -template \ -inline void FUNCTION_NAME(const char*, const T&, const Args&...);\ -template \ -inline void FUNCTION_NAME(const T&); +#define LOGGER_LEVEL_WRITERS_SIGNATURES(FUNCTION_NAME) \ + template \ + inline void FUNCTION_NAME(const char*, const T&, const Args&...); \ + template \ + inline void FUNCTION_NAME(const T&); - template - inline void verbose(int, const char*, const T&, const Args&...); + template + inline void + verbose(int, const char*, const T&, const Args&...); - template - inline void verbose(int, const T&); + template + inline void + verbose(int, const T&); - LOGGER_LEVEL_WRITERS_SIGNATURES(info) - LOGGER_LEVEL_WRITERS_SIGNATURES(debug) - LOGGER_LEVEL_WRITERS_SIGNATURES(warn) - LOGGER_LEVEL_WRITERS_SIGNATURES(error) - LOGGER_LEVEL_WRITERS_SIGNATURES(fatal) - LOGGER_LEVEL_WRITERS_SIGNATURES(trace) -# undef LOGGER_LEVEL_WRITERS_SIGNATURES -#endif // ELPP_VARIADIC_TEMPLATES_SUPPORTED + LOGGER_LEVEL_WRITERS_SIGNATURES(info) + LOGGER_LEVEL_WRITERS_SIGNATURES(debug) + LOGGER_LEVEL_WRITERS_SIGNATURES(warn) + LOGGER_LEVEL_WRITERS_SIGNATURES(error) + LOGGER_LEVEL_WRITERS_SIGNATURES(fatal) + LOGGER_LEVEL_WRITERS_SIGNATURES(trace) +#undef LOGGER_LEVEL_WRITERS_SIGNATURES +#endif // ELPP_VARIADIC_TEMPLATES_SUPPORTED private: - std::string m_id; - base::TypedConfigurations* m_typedConfigurations; - base::type::stringstream_t m_stream; - std::string m_parentApplicationName; - bool m_isConfigured; - Configurations m_configurations; - std::unordered_map m_unflushedCount; - base::LogStreamsReferenceMap* m_logStreamsReference; - LogBuilderPtr m_logBuilder; + std::string m_id; + base::TypedConfigurations* m_typedConfigurations; + base::type::stringstream_t m_stream; + std::string m_parentApplicationName; + bool m_isConfigured; + Configurations m_configurations; + std::unordered_map m_unflushedCount; + base::LogStreamsReferenceMap* m_logStreamsReference; + LogBuilderPtr m_logBuilder; - friend class el::LogMessage; - friend class el::Loggers; - friend class el::Helpers; - friend class el::base::RegisteredLoggers; - friend class el::base::DefaultLogDispatchCallback; - friend class el::base::MessageBuilder; - friend class el::base::Writer; - friend class el::base::PErrorWriter; - friend class el::base::Storage; - friend class el::base::PerformanceTracker; - friend class el::base::LogDispatcher; + friend class el::LogMessage; + friend class el::Loggers; + friend class el::Helpers; + friend class el::base::RegisteredLoggers; + friend class el::base::DefaultLogDispatchCallback; + friend class el::base::MessageBuilder; + friend class el::base::Writer; + friend class el::base::PErrorWriter; + friend class el::base::Storage; + friend class el::base::PerformanceTracker; + friend class el::base::LogDispatcher; - Logger(void); + Logger(void); #if ELPP_VARIADIC_TEMPLATES_SUPPORTED - template - void log_(Level, int, const char*, const T&, const Args&...); + template + void + log_(Level, int, const char*, const T&, const Args&...); - template - inline void log_(Level, int, const T&); + template + inline void + log_(Level, int, const T&); - template - void log(Level, const char*, const T&, const Args&...); + template + void + log(Level, const char*, const T&, const Args&...); - template - inline void log(Level, const T&); -#endif // ELPP_VARIADIC_TEMPLATES_SUPPORTED + template + inline void + log(Level, const T&); +#endif // ELPP_VARIADIC_TEMPLATES_SUPPORTED - void initUnflushedCount(void); + void + initUnflushedCount(void); - inline base::type::stringstream_t& stream(void) { - return m_stream; - } + inline base::type::stringstream_t& + stream(void) { + return m_stream; + } - void resolveLoggerFormatSpec(void) const; + void + resolveLoggerFormatSpec(void) const; }; namespace base { /// @brief Loggers repository class RegisteredLoggers : public base::utils::Registry { public: - explicit RegisteredLoggers(const LogBuilderPtr& defaultLogBuilder); + explicit RegisteredLoggers(const LogBuilderPtr& defaultLogBuilder); - virtual ~RegisteredLoggers(void) { - unsafeFlushAll(); - } + virtual ~RegisteredLoggers(void) { + unsafeFlushAll(); + } - inline void setDefaultConfigurations(const Configurations& configurations) { - base::threading::ScopedLock scopedLock(lock()); - m_defaultConfigurations.setFromBase(const_cast(&configurations)); - } + inline void + setDefaultConfigurations(const Configurations& configurations) { + base::threading::ScopedLock scopedLock(lock()); + m_defaultConfigurations.setFromBase(const_cast(&configurations)); + } - inline Configurations* defaultConfigurations(void) { - return &m_defaultConfigurations; - } + inline Configurations* + defaultConfigurations(void) { + return &m_defaultConfigurations; + } - Logger* get(const std::string& id, bool forceCreation = true); + Logger* + get(const std::string& id, bool forceCreation = true); - template - inline bool installLoggerRegistrationCallback(const std::string& id) { - return base::utils::Utils::installCallback(id, - &m_loggerRegistrationCallbacks); - } + template + inline bool + installLoggerRegistrationCallback(const std::string& id) { + return base::utils::Utils::installCallback( + id, &m_loggerRegistrationCallbacks); + } - template - inline void uninstallLoggerRegistrationCallback(const std::string& id) { - base::utils::Utils::uninstallCallback(id, &m_loggerRegistrationCallbacks); - } + template + inline void + uninstallLoggerRegistrationCallback(const std::string& id) { + base::utils::Utils::uninstallCallback( + id, &m_loggerRegistrationCallbacks); + } - template - inline T* loggerRegistrationCallback(const std::string& id) { - return base::utils::Utils::callback(id, &m_loggerRegistrationCallbacks); - } + template + inline T* + loggerRegistrationCallback(const std::string& id) { + return base::utils::Utils::callback( + id, &m_loggerRegistrationCallbacks); + } - bool remove(const std::string& id); + bool + remove(const std::string& id); - inline bool has(const std::string& id) { - return get(id, false) != nullptr; - } + inline bool + has(const std::string& id) { + return get(id, false) != nullptr; + } - inline void unregister(Logger*& logger) { - base::threading::ScopedLock scopedLock(lock()); - base::utils::Registry::unregister(logger->id()); - } + inline void + unregister(Logger*& logger) { + base::threading::ScopedLock scopedLock(lock()); + base::utils::Registry::unregister(logger->id()); + } - inline base::LogStreamsReferenceMap* logStreamsReference(void) { - return &m_logStreamsReference; - } + inline base::LogStreamsReferenceMap* + logStreamsReference(void) { + return &m_logStreamsReference; + } - inline void flushAll(void) { - base::threading::ScopedLock scopedLock(lock()); - unsafeFlushAll(); - } + inline void + flushAll(void) { + base::threading::ScopedLock scopedLock(lock()); + unsafeFlushAll(); + } - inline void setDefaultLogBuilder(LogBuilderPtr& logBuilderPtr) { - base::threading::ScopedLock scopedLock(lock()); - m_defaultLogBuilder = logBuilderPtr; - } + inline void + setDefaultLogBuilder(LogBuilderPtr& logBuilderPtr) { + base::threading::ScopedLock scopedLock(lock()); + m_defaultLogBuilder = logBuilderPtr; + } private: - LogBuilderPtr m_defaultLogBuilder; - Configurations m_defaultConfigurations; - base::LogStreamsReferenceMap m_logStreamsReference; - std::unordered_map m_loggerRegistrationCallbacks; - friend class el::base::Storage; + LogBuilderPtr m_defaultLogBuilder; + Configurations m_defaultConfigurations; + base::LogStreamsReferenceMap m_logStreamsReference; + std::unordered_map m_loggerRegistrationCallbacks; + friend class el::base::Storage; - void unsafeFlushAll(void); + void + unsafeFlushAll(void); }; /// @brief Represents registries for verbose logging class VRegistry : base::NoCopy, public base::threading::ThreadSafe { public: - explicit VRegistry(base::type::VerboseLevel level, base::type::EnumType* pFlags); + explicit VRegistry(base::type::VerboseLevel level, base::type::EnumType* pFlags); - /// @brief Sets verbose level. Accepted range is 0-9 - void setLevel(base::type::VerboseLevel level); + /// @brief Sets verbose level. Accepted range is 0-9 + void + setLevel(base::type::VerboseLevel level); - inline base::type::VerboseLevel level(void) const { - return m_level; - } + inline base::type::VerboseLevel + level(void) const { + return m_level; + } - inline void clearModules(void) { - base::threading::ScopedLock scopedLock(lock()); - m_modules.clear(); - } + inline void + clearModules(void) { + base::threading::ScopedLock scopedLock(lock()); + m_modules.clear(); + } - void setModules(const char* modules); + void + setModules(const char* modules); - bool allowed(base::type::VerboseLevel vlevel, const char* file); + bool + allowed(base::type::VerboseLevel vlevel, const char* file); - inline const std::unordered_map& modules(void) const { - return m_modules; - } + inline const std::unordered_map& + modules(void) const { + return m_modules; + } - void setFromArgs(const base::utils::CommandLineArgs* commandLineArgs); + void + setFromArgs(const base::utils::CommandLineArgs* commandLineArgs); - /// @brief Whether or not vModules enabled - inline bool vModulesEnabled(void) { - return !base::utils::hasFlag(LoggingFlag::DisableVModules, *m_pFlags); - } + /// @brief Whether or not vModules enabled + inline bool + vModulesEnabled(void) { + return !base::utils::hasFlag(LoggingFlag::DisableVModules, *m_pFlags); + } private: - base::type::VerboseLevel m_level; - base::type::EnumType* m_pFlags; - std::unordered_map m_modules; + base::type::VerboseLevel m_level; + base::type::EnumType* m_pFlags; + std::unordered_map m_modules; }; } // namespace base class LogMessage { public: - LogMessage(Level level, const std::string& file, base::type::LineNumber line, const std::string& func, - base::type::VerboseLevel verboseLevel, Logger* logger) : - m_level(level), m_file(file), m_line(line), m_func(func), - m_verboseLevel(verboseLevel), m_logger(logger), m_message(logger->stream().str()) { - } - inline Level level(void) const { - return m_level; - } - inline const std::string& file(void) const { - return m_file; - } - inline base::type::LineNumber line(void) const { - return m_line; - } - inline const std::string& func(void) const { - return m_func; - } - inline base::type::VerboseLevel verboseLevel(void) const { - return m_verboseLevel; - } - inline Logger* logger(void) const { - return m_logger; - } - inline const base::type::string_t& message(void) const { - return m_message; - } + LogMessage(Level level, const std::string& file, base::type::LineNumber line, const std::string& func, + base::type::VerboseLevel verboseLevel, Logger* logger) + : m_level(level), + m_file(file), + m_line(line), + m_func(func), + m_verboseLevel(verboseLevel), + m_logger(logger), + m_message(logger->stream().str()) { + } + inline Level + level(void) const { + return m_level; + } + inline const std::string& + file(void) const { + return m_file; + } + inline base::type::LineNumber + line(void) const { + return m_line; + } + inline const std::string& + func(void) const { + return m_func; + } + inline base::type::VerboseLevel + verboseLevel(void) const { + return m_verboseLevel; + } + inline Logger* + logger(void) const { + return m_logger; + } + inline const base::type::string_t& + message(void) const { + return m_message; + } + private: - Level m_level; - std::string m_file; - base::type::LineNumber m_line; - std::string m_func; - base::type::VerboseLevel m_verboseLevel; - Logger* m_logger; - base::type::string_t m_message; + Level m_level; + std::string m_file; + base::type::LineNumber m_line; + std::string m_func; + base::type::VerboseLevel m_verboseLevel; + Logger* m_logger; + base::type::string_t m_message; }; namespace base { #if ELPP_ASYNC_LOGGING class AsyncLogItem { public: - explicit AsyncLogItem(const LogMessage& logMessage, const LogDispatchData& data, const base::type::string_t& logLine) - : m_logMessage(logMessage), m_dispatchData(data), m_logLine(logLine) {} - virtual ~AsyncLogItem() {} - inline LogMessage* logMessage(void) { - return &m_logMessage; - } - inline LogDispatchData* data(void) { - return &m_dispatchData; - } - inline base::type::string_t logLine(void) { - return m_logLine; - } + explicit AsyncLogItem(const LogMessage& logMessage, const LogDispatchData& data, + const base::type::string_t& logLine) + : m_logMessage(logMessage), m_dispatchData(data), m_logLine(logLine) { + } + virtual ~AsyncLogItem() { + } + inline LogMessage* + logMessage(void) { + return &m_logMessage; + } + inline LogDispatchData* + data(void) { + return &m_dispatchData; + } + inline base::type::string_t + logLine(void) { + return m_logLine; + } + private: - LogMessage m_logMessage; - LogDispatchData m_dispatchData; - base::type::string_t m_logLine; + LogMessage m_logMessage; + LogDispatchData m_dispatchData; + base::type::string_t m_logLine; }; class AsyncLogQueue : public base::threading::ThreadSafe { public: - virtual ~AsyncLogQueue() { - ELPP_INTERNAL_INFO(6, "~AsyncLogQueue"); - } + virtual ~AsyncLogQueue() { + ELPP_INTERNAL_INFO(6, "~AsyncLogQueue"); + } - inline AsyncLogItem next(void) { - base::threading::ScopedLock scopedLock(lock()); - AsyncLogItem result = m_queue.front(); - m_queue.pop(); - return result; - } + inline AsyncLogItem + next(void) { + base::threading::ScopedLock scopedLock(lock()); + AsyncLogItem result = m_queue.front(); + m_queue.pop(); + return result; + } + + inline void + push(const AsyncLogItem& item) { + base::threading::ScopedLock scopedLock(lock()); + m_queue.push(item); + } + inline void + pop(void) { + base::threading::ScopedLock scopedLock(lock()); + m_queue.pop(); + } + inline AsyncLogItem + front(void) { + base::threading::ScopedLock scopedLock(lock()); + return m_queue.front(); + } + inline bool + empty(void) { + base::threading::ScopedLock scopedLock(lock()); + return m_queue.empty(); + } - inline void push(const AsyncLogItem& item) { - base::threading::ScopedLock scopedLock(lock()); - m_queue.push(item); - } - inline void pop(void) { - base::threading::ScopedLock scopedLock(lock()); - m_queue.pop(); - } - inline AsyncLogItem front(void) { - base::threading::ScopedLock scopedLock(lock()); - return m_queue.front(); - } - inline bool empty(void) { - base::threading::ScopedLock scopedLock(lock()); - return m_queue.empty(); - } private: - std::queue m_queue; + std::queue m_queue; }; class IWorker { public: - virtual ~IWorker() {} - virtual void start() = 0; + virtual ~IWorker() { + } + virtual void + start() = 0; }; -#endif // ELPP_ASYNC_LOGGING +#endif // ELPP_ASYNC_LOGGING /// @brief Easylogging++ management storage class Storage : base::NoCopy, public base::threading::ThreadSafe { public: #if ELPP_ASYNC_LOGGING - Storage(const LogBuilderPtr& defaultLogBuilder, base::IWorker* asyncDispatchWorker); + Storage(const LogBuilderPtr& defaultLogBuilder, base::IWorker* asyncDispatchWorker); #else - explicit Storage(const LogBuilderPtr& defaultLogBuilder); + explicit Storage(const LogBuilderPtr& defaultLogBuilder); #endif // ELPP_ASYNC_LOGGING - virtual ~Storage(void); + virtual ~Storage(void); - inline bool validateEveryNCounter(const char* filename, base::type::LineNumber lineNumber, std::size_t occasion) { - return hitCounters()->validateEveryN(filename, lineNumber, occasion); - } + inline bool + validateEveryNCounter(const char* filename, base::type::LineNumber lineNumber, std::size_t occasion) { + return hitCounters()->validateEveryN(filename, lineNumber, occasion); + } - inline bool validateAfterNCounter(const char* filename, base::type::LineNumber lineNumber, std::size_t n) { - return hitCounters()->validateAfterN(filename, lineNumber, n); - } + inline bool + validateAfterNCounter(const char* filename, base::type::LineNumber lineNumber, std::size_t n) { + return hitCounters()->validateAfterN(filename, lineNumber, n); + } - inline bool validateNTimesCounter(const char* filename, base::type::LineNumber lineNumber, std::size_t n) { - return hitCounters()->validateNTimes(filename, lineNumber, n); - } + inline bool + validateNTimesCounter(const char* filename, base::type::LineNumber lineNumber, std::size_t n) { + return hitCounters()->validateNTimes(filename, lineNumber, n); + } - inline base::RegisteredHitCounters* hitCounters(void) const { - return m_registeredHitCounters; - } + inline base::RegisteredHitCounters* + hitCounters(void) const { + return m_registeredHitCounters; + } - inline base::RegisteredLoggers* registeredLoggers(void) const { - return m_registeredLoggers; - } + inline base::RegisteredLoggers* + registeredLoggers(void) const { + return m_registeredLoggers; + } - inline base::VRegistry* vRegistry(void) const { - return m_vRegistry; - } + inline base::VRegistry* + vRegistry(void) const { + return m_vRegistry; + } #if ELPP_ASYNC_LOGGING - inline base::AsyncLogQueue* asyncLogQueue(void) const { - return m_asyncLogQueue; - } + inline base::AsyncLogQueue* + asyncLogQueue(void) const { + return m_asyncLogQueue; + } #endif // ELPP_ASYNC_LOGGING - inline const base::utils::CommandLineArgs* commandLineArgs(void) const { - return &m_commandLineArgs; - } + inline const base::utils::CommandLineArgs* + commandLineArgs(void) const { + return &m_commandLineArgs; + } - inline void addFlag(LoggingFlag flag) { - base::utils::addFlag(flag, &m_flags); - } + inline void + addFlag(LoggingFlag flag) { + base::utils::addFlag(flag, &m_flags); + } - inline void removeFlag(LoggingFlag flag) { - base::utils::removeFlag(flag, &m_flags); - } + inline void + removeFlag(LoggingFlag flag) { + base::utils::removeFlag(flag, &m_flags); + } - inline bool hasFlag(LoggingFlag flag) const { - return base::utils::hasFlag(flag, m_flags); - } + inline bool + hasFlag(LoggingFlag flag) const { + return base::utils::hasFlag(flag, m_flags); + } - inline base::type::EnumType flags(void) const { - return m_flags; - } + inline base::type::EnumType + flags(void) const { + return m_flags; + } - inline void setFlags(base::type::EnumType flags) { - m_flags = flags; - } + inline void + setFlags(base::type::EnumType flags) { + m_flags = flags; + } - inline void setPreRollOutCallback(const PreRollOutCallback& callback) { - m_preRollOutCallback = callback; - } + inline void + setPreRollOutCallback(const PreRollOutCallback& callback) { + m_preRollOutCallback = callback; + } - inline void unsetPreRollOutCallback(void) { - m_preRollOutCallback = base::defaultPreRollOutCallback; - } + inline void + unsetPreRollOutCallback(void) { + m_preRollOutCallback = base::defaultPreRollOutCallback; + } - inline PreRollOutCallback& preRollOutCallback(void) { - return m_preRollOutCallback; - } + inline PreRollOutCallback& + preRollOutCallback(void) { + return m_preRollOutCallback; + } - bool hasCustomFormatSpecifier(const char* formatSpecifier); - void installCustomFormatSpecifier(const CustomFormatSpecifier& customFormatSpecifier); - bool uninstallCustomFormatSpecifier(const char* formatSpecifier); + bool + hasCustomFormatSpecifier(const char* formatSpecifier); + void + installCustomFormatSpecifier(const CustomFormatSpecifier& customFormatSpecifier); + bool + uninstallCustomFormatSpecifier(const char* formatSpecifier); - const std::vector* customFormatSpecifiers(void) const { - return &m_customFormatSpecifiers; - } + const std::vector* + customFormatSpecifiers(void) const { + return &m_customFormatSpecifiers; + } - base::threading::Mutex& customFormatSpecifiersLock() { - return m_customFormatSpecifiersLock; - } + base::threading::Mutex& + customFormatSpecifiersLock() { + return m_customFormatSpecifiersLock; + } - inline void setLoggingLevel(Level level) { - m_loggingLevel = level; - } + inline void + setLoggingLevel(Level level) { + m_loggingLevel = level; + } - template - inline bool installLogDispatchCallback(const std::string& id) { - return base::utils::Utils::installCallback(id, &m_logDispatchCallbacks); - } + template + inline bool + installLogDispatchCallback(const std::string& id) { + return base::utils::Utils::installCallback(id, &m_logDispatchCallbacks); + } - template - inline void uninstallLogDispatchCallback(const std::string& id) { - base::utils::Utils::uninstallCallback(id, &m_logDispatchCallbacks); - } - template - inline T* logDispatchCallback(const std::string& id) { - return base::utils::Utils::callback(id, &m_logDispatchCallbacks); - } + template + inline void + uninstallLogDispatchCallback(const std::string& id) { + base::utils::Utils::uninstallCallback(id, &m_logDispatchCallbacks); + } + template + inline T* + logDispatchCallback(const std::string& id) { + return base::utils::Utils::callback(id, &m_logDispatchCallbacks); + } #if defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_PERFORMANCE_TRACKING) - template - inline bool installPerformanceTrackingCallback(const std::string& id) { - return base::utils::Utils::installCallback(id, - &m_performanceTrackingCallbacks); - } - - template - inline void uninstallPerformanceTrackingCallback(const std::string& id) { - base::utils::Utils::uninstallCallback(id, - &m_performanceTrackingCallbacks); - } - - template - inline T* performanceTrackingCallback(const std::string& id) { - return base::utils::Utils::callback(id, &m_performanceTrackingCallbacks); - } -#endif // defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_PERFORMANCE_TRACKING) - - /// @brief Sets thread name for current thread. Requires std::thread - inline void setThreadName(const std::string& name) { - if (name.empty()) return; - base::threading::ScopedLock scopedLock(m_threadNamesLock); - m_threadNames[base::threading::getCurrentThreadId()] = name; - } - - inline std::string getThreadName(const std::string& threadId) { - base::threading::ScopedLock scopedLock(m_threadNamesLock); - std::unordered_map::const_iterator it = m_threadNames.find(threadId); - if (it == m_threadNames.end()) { - return threadId; + template + inline bool + installPerformanceTrackingCallback(const std::string& id) { + return base::utils::Utils::installCallback( + id, &m_performanceTrackingCallbacks); } - return it->second; - } + + template + inline void + uninstallPerformanceTrackingCallback(const std::string& id) { + base::utils::Utils::uninstallCallback( + id, &m_performanceTrackingCallbacks); + } + + template + inline T* + performanceTrackingCallback(const std::string& id) { + return base::utils::Utils::callback( + id, &m_performanceTrackingCallbacks); + } +#endif // defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_PERFORMANCE_TRACKING) + + /// @brief Sets thread name for current thread. Requires std::thread + inline void + setThreadName(const std::string& name) { + if (name.empty()) + return; + base::threading::ScopedLock scopedLock(m_threadNamesLock); + m_threadNames[base::threading::getCurrentThreadId()] = name; + } + + inline std::string + getThreadName(const std::string& threadId) { + base::threading::ScopedLock scopedLock(m_threadNamesLock); + std::unordered_map::const_iterator it = m_threadNames.find(threadId); + if (it == m_threadNames.end()) { + return threadId; + } + return it->second; + } + private: - base::RegisteredHitCounters* m_registeredHitCounters; - base::RegisteredLoggers* m_registeredLoggers; - base::type::EnumType m_flags; - base::VRegistry* m_vRegistry; + base::RegisteredHitCounters* m_registeredHitCounters; + base::RegisteredLoggers* m_registeredLoggers; + base::type::EnumType m_flags; + base::VRegistry* m_vRegistry; #if ELPP_ASYNC_LOGGING - base::AsyncLogQueue* m_asyncLogQueue; - base::IWorker* m_asyncDispatchWorker; + base::AsyncLogQueue* m_asyncLogQueue; + base::IWorker* m_asyncDispatchWorker; #endif // ELPP_ASYNC_LOGGING - base::utils::CommandLineArgs m_commandLineArgs; - PreRollOutCallback m_preRollOutCallback; - std::unordered_map m_logDispatchCallbacks; - std::unordered_map m_performanceTrackingCallbacks; - std::unordered_map m_threadNames; - std::vector m_customFormatSpecifiers; - base::threading::Mutex m_customFormatSpecifiersLock; - base::threading::Mutex m_threadNamesLock; - Level m_loggingLevel; + base::utils::CommandLineArgs m_commandLineArgs; + PreRollOutCallback m_preRollOutCallback; + std::unordered_map m_logDispatchCallbacks; + std::unordered_map m_performanceTrackingCallbacks; + std::unordered_map m_threadNames; + std::vector m_customFormatSpecifiers; + base::threading::Mutex m_customFormatSpecifiersLock; + base::threading::Mutex m_threadNamesLock; + Level m_loggingLevel; - friend class el::Helpers; - friend class el::base::DefaultLogDispatchCallback; - friend class el::LogBuilder; - friend class el::base::MessageBuilder; - friend class el::base::Writer; - friend class el::base::PerformanceTracker; - friend class el::base::LogDispatcher; + friend class el::Helpers; + friend class el::base::DefaultLogDispatchCallback; + friend class el::LogBuilder; + friend class el::base::MessageBuilder; + friend class el::base::Writer; + friend class el::base::PerformanceTracker; + friend class el::base::LogDispatcher; - void setApplicationArguments(int argc, char** argv); + void + setApplicationArguments(int argc, char** argv); - inline void setApplicationArguments(int argc, const char** argv) { - setApplicationArguments(argc, const_cast(argv)); - } + inline void + setApplicationArguments(int argc, const char** argv) { + setApplicationArguments(argc, const_cast(argv)); + } }; extern ELPP_EXPORT base::type::StoragePointer elStorage; #define ELPP el::base::elStorage class DefaultLogDispatchCallback : public LogDispatchCallback { protected: - void handle(const LogDispatchData* data); + void + handle(const LogDispatchData* data); + private: - const LogDispatchData* m_data; - void dispatch(base::type::string_t&& logLine); + const LogDispatchData* m_data; + void + dispatch(base::type::string_t&& logLine); }; #if ELPP_ASYNC_LOGGING class AsyncLogDispatchCallback : public LogDispatchCallback { protected: - void handle(const LogDispatchData* data); + void + handle(const LogDispatchData* data); }; class AsyncDispatchWorker : public base::IWorker, public base::threading::ThreadSafe { public: - AsyncDispatchWorker(); - virtual ~AsyncDispatchWorker(); + AsyncDispatchWorker(); + virtual ~AsyncDispatchWorker(); - bool clean(void); - void emptyQueue(void); - virtual void start(void); - void handle(AsyncLogItem* logItem); - void run(void); + bool + clean(void); + void + emptyQueue(void); + virtual void + start(void); + void + handle(AsyncLogItem* logItem); + void + run(void); - void setContinueRunning(bool value) { - base::threading::ScopedLock scopedLock(m_continueRunningLock); - m_continueRunning = value; - } + void + setContinueRunning(bool value) { + base::threading::ScopedLock scopedLock(m_continueRunningLock); + m_continueRunning = value; + } + + bool + continueRunning(void) const { + return m_continueRunning; + } - bool continueRunning(void) const { - return m_continueRunning; - } private: - std::condition_variable cv; - bool m_continueRunning; - base::threading::Mutex m_continueRunningLock; + std::condition_variable cv; + bool m_continueRunning; + base::threading::Mutex m_continueRunningLock; }; #endif // ELPP_ASYNC_LOGGING } // namespace base namespace base { class DefaultLogBuilder : public LogBuilder { public: - base::type::string_t build(const LogMessage* logMessage, bool appendNewLine) const; + base::type::string_t + build(const LogMessage* logMessage, bool appendNewLine) const; }; /// @brief Dispatches log messages class LogDispatcher : base::NoCopy { public: - LogDispatcher(bool proceed, LogMessage* logMessage, base::DispatchAction dispatchAction) : - m_proceed(proceed), - m_logMessage(logMessage), - m_dispatchAction(std::move(dispatchAction)) { - } + LogDispatcher(bool proceed, LogMessage* logMessage, base::DispatchAction dispatchAction) + : m_proceed(proceed), m_logMessage(logMessage), m_dispatchAction(std::move(dispatchAction)) { + } - void dispatch(void); + void + dispatch(void); private: - bool m_proceed; - LogMessage* m_logMessage; - base::DispatchAction m_dispatchAction; + bool m_proceed; + LogMessage* m_logMessage; + base::DispatchAction m_dispatchAction; }; #if defined(ELPP_STL_LOGGING) /// @brief Workarounds to write some STL logs /// -/// @detail There is workaround needed to loop through some stl containers. In order to do that, we need iterable containers -/// of same type and provide iterator interface and pass it on to writeIterator(). -/// Remember, this is passed by value in constructor so that we dont change original containers. -/// This operation is as expensive as Big-O(std::min(class_.size(), base::consts::kMaxLogPerContainer)) +/// @detail There is workaround needed to loop through some stl containers. In order to do that, we need iterable +/// containers of same type and provide iterator interface and pass it on to writeIterator(). Remember, this is passed +/// by value in constructor so that we dont change original containers. This operation is as expensive as +/// Big-O(std::min(class_.size(), base::consts::kMaxLogPerContainer)) namespace workarounds { /// @brief Abstract IterableContainer template that provides interface for iterable classes of type T template class IterableContainer { public: - typedef typename Container::iterator iterator; - typedef typename Container::const_iterator const_iterator; - IterableContainer(void) {} - virtual ~IterableContainer(void) {} - iterator begin(void) { - return getContainer().begin(); - } - iterator end(void) { - return getContainer().end(); - } + typedef typename Container::iterator iterator; + typedef typename Container::const_iterator const_iterator; + IterableContainer(void) { + } + virtual ~IterableContainer(void) { + } + iterator + begin(void) { + return getContainer().begin(); + } + iterator + end(void) { + return getContainer().end(); + } + private: - virtual Container& getContainer(void) = 0; + virtual Container& + getContainer(void) = 0; }; /// @brief Implements IterableContainer and provides iterable std::priority_queue class -template, typename Comparator = std::less> +template , + typename Comparator = std::less> class IterablePriorityQueue : public IterableContainer, - public std::priority_queue { + public std::priority_queue { public: - IterablePriorityQueue(std::priority_queue queue_) { - std::size_t count_ = 0; - while (++count_ < base::consts::kMaxLogPerContainer && !queue_.empty()) { - this->push(queue_.top()); - queue_.pop(); + IterablePriorityQueue(std::priority_queue queue_) { + std::size_t count_ = 0; + while (++count_ < base::consts::kMaxLogPerContainer && !queue_.empty()) { + this->push(queue_.top()); + queue_.pop(); + } } - } + private: - inline Container& getContainer(void) { - return this->c; - } + inline Container& + getContainer(void) { + return this->c; + } }; /// @brief Implements IterableContainer and provides iterable std::queue class -template> +template > class IterableQueue : public IterableContainer, public std::queue { public: - IterableQueue(std::queue queue_) { - std::size_t count_ = 0; - while (++count_ < base::consts::kMaxLogPerContainer && !queue_.empty()) { - this->push(queue_.front()); - queue_.pop(); + IterableQueue(std::queue queue_) { + std::size_t count_ = 0; + while (++count_ < base::consts::kMaxLogPerContainer && !queue_.empty()) { + this->push(queue_.front()); + queue_.pop(); + } } - } + private: - inline Container& getContainer(void) { - return this->c; - } + inline Container& + getContainer(void) { + return this->c; + } }; /// @brief Implements IterableContainer and provides iterable std::stack class -template> +template > class IterableStack : public IterableContainer, public std::stack { public: - IterableStack(std::stack stack_) { - std::size_t count_ = 0; - while (++count_ < base::consts::kMaxLogPerContainer && !stack_.empty()) { - this->push(stack_.top()); - stack_.pop(); + IterableStack(std::stack stack_) { + std::size_t count_ = 0; + while (++count_ < base::consts::kMaxLogPerContainer && !stack_.empty()) { + this->push(stack_.top()); + stack_.pop(); + } } - } + private: - inline Container& getContainer(void) { - return this->c; - } + inline Container& + getContainer(void) { + return this->c; + } }; } // namespace workarounds #endif // defined(ELPP_STL_LOGGING) // Log message builder class MessageBuilder { public: - MessageBuilder(void) : m_logger(nullptr), m_containerLogSeperator(ELPP_LITERAL("")) {} - void initialize(Logger* logger); + MessageBuilder(void) : m_logger(nullptr), m_containerLogSeperator(ELPP_LITERAL("")) { + } + void + initialize(Logger* logger); -# define ELPP_SIMPLE_LOG(LOG_TYPE)\ -MessageBuilder& operator<<(LOG_TYPE msg) {\ -m_logger->stream() << msg;\ -if (ELPP->hasFlag(LoggingFlag::AutoSpacing)) {\ -m_logger->stream() << " ";\ -}\ -return *this;\ -} +#define ELPP_SIMPLE_LOG(LOG_TYPE) \ + MessageBuilder& operator<<(LOG_TYPE msg) { \ + m_logger->stream() << msg; \ + if (ELPP->hasFlag(LoggingFlag::AutoSpacing)) { \ + m_logger->stream() << " "; \ + } \ + return *this; \ + } - inline MessageBuilder& operator<<(const std::string& msg) { - return operator<<(msg.c_str()); - } - ELPP_SIMPLE_LOG(char) - ELPP_SIMPLE_LOG(bool) - ELPP_SIMPLE_LOG(signed short) - ELPP_SIMPLE_LOG(unsigned short) - ELPP_SIMPLE_LOG(signed int) - ELPP_SIMPLE_LOG(unsigned int) - ELPP_SIMPLE_LOG(signed long) - ELPP_SIMPLE_LOG(unsigned long) - ELPP_SIMPLE_LOG(float) - ELPP_SIMPLE_LOG(double) - ELPP_SIMPLE_LOG(char*) - ELPP_SIMPLE_LOG(const char*) - ELPP_SIMPLE_LOG(const void*) - ELPP_SIMPLE_LOG(long double) - inline MessageBuilder& operator<<(const std::wstring& msg) { - return operator<<(msg.c_str()); - } - MessageBuilder& operator<<(const wchar_t* msg); - // ostream manipulators - inline MessageBuilder& operator<<(std::ostream& (*OStreamMani)(std::ostream&)) { - m_logger->stream() << OStreamMani; - return *this; - } -#define ELPP_ITERATOR_CONTAINER_LOG_ONE_ARG(temp) \ -template \ -inline MessageBuilder& operator<<(const temp& template_inst) { \ -return writeIterator(template_inst.begin(), template_inst.end(), template_inst.size()); \ -} -#define ELPP_ITERATOR_CONTAINER_LOG_TWO_ARG(temp) \ -template \ -inline MessageBuilder& operator<<(const temp& template_inst) { \ -return writeIterator(template_inst.begin(), template_inst.end(), template_inst.size()); \ -} -#define ELPP_ITERATOR_CONTAINER_LOG_THREE_ARG(temp) \ -template \ -inline MessageBuilder& operator<<(const temp& template_inst) { \ -return writeIterator(template_inst.begin(), template_inst.end(), template_inst.size()); \ -} -#define ELPP_ITERATOR_CONTAINER_LOG_FOUR_ARG(temp) \ -template \ -inline MessageBuilder& operator<<(const temp& template_inst) { \ -return writeIterator(template_inst.begin(), template_inst.end(), template_inst.size()); \ -} -#define ELPP_ITERATOR_CONTAINER_LOG_FIVE_ARG(temp) \ -template \ -inline MessageBuilder& operator<<(const temp& template_inst) { \ -return writeIterator(template_inst.begin(), template_inst.end(), template_inst.size()); \ -} + inline MessageBuilder& + operator<<(const std::string& msg) { + return operator<<(msg.c_str()); + } + ELPP_SIMPLE_LOG(char) + ELPP_SIMPLE_LOG(bool) + ELPP_SIMPLE_LOG(signed short) + ELPP_SIMPLE_LOG(unsigned short) + ELPP_SIMPLE_LOG(signed int) + ELPP_SIMPLE_LOG(unsigned int) + ELPP_SIMPLE_LOG(signed long) + ELPP_SIMPLE_LOG(unsigned long) + ELPP_SIMPLE_LOG(float) + ELPP_SIMPLE_LOG(double) + ELPP_SIMPLE_LOG(char*) + ELPP_SIMPLE_LOG(const char*) + ELPP_SIMPLE_LOG(const void*) + ELPP_SIMPLE_LOG(long double) + inline MessageBuilder& + operator<<(const std::wstring& msg) { + return operator<<(msg.c_str()); + } + MessageBuilder& + operator<<(const wchar_t* msg); + // ostream manipulators + inline MessageBuilder& + operator<<(std::ostream& (*OStreamMani)(std::ostream&)) { + m_logger->stream() << OStreamMani; + return *this; + } +#define ELPP_ITERATOR_CONTAINER_LOG_ONE_ARG(temp) \ + template \ + inline MessageBuilder& operator<<(const temp& template_inst) { \ + return writeIterator(template_inst.begin(), template_inst.end(), template_inst.size()); \ + } +#define ELPP_ITERATOR_CONTAINER_LOG_TWO_ARG(temp) \ + template \ + inline MessageBuilder& operator<<(const temp& template_inst) { \ + return writeIterator(template_inst.begin(), template_inst.end(), template_inst.size()); \ + } +#define ELPP_ITERATOR_CONTAINER_LOG_THREE_ARG(temp) \ + template \ + inline MessageBuilder& operator<<(const temp& template_inst) { \ + return writeIterator(template_inst.begin(), template_inst.end(), template_inst.size()); \ + } +#define ELPP_ITERATOR_CONTAINER_LOG_FOUR_ARG(temp) \ + template \ + inline MessageBuilder& operator<<(const temp& template_inst) { \ + return writeIterator(template_inst.begin(), template_inst.end(), template_inst.size()); \ + } +#define ELPP_ITERATOR_CONTAINER_LOG_FIVE_ARG(temp) \ + template \ + inline MessageBuilder& operator<<(const temp& template_inst) { \ + return writeIterator(template_inst.begin(), template_inst.end(), template_inst.size()); \ + } #if defined(ELPP_STL_LOGGING) - ELPP_ITERATOR_CONTAINER_LOG_TWO_ARG(std::vector) - ELPP_ITERATOR_CONTAINER_LOG_TWO_ARG(std::list) - ELPP_ITERATOR_CONTAINER_LOG_TWO_ARG(std::deque) - ELPP_ITERATOR_CONTAINER_LOG_THREE_ARG(std::set) - ELPP_ITERATOR_CONTAINER_LOG_THREE_ARG(std::multiset) - ELPP_ITERATOR_CONTAINER_LOG_FOUR_ARG(std::map) - ELPP_ITERATOR_CONTAINER_LOG_FOUR_ARG(std::multimap) - template - inline MessageBuilder& operator<<(const std::queue& queue_) { - base::workarounds::IterableQueue iterableQueue_ = - static_cast >(queue_); - return writeIterator(iterableQueue_.begin(), iterableQueue_.end(), iterableQueue_.size()); - } - template - inline MessageBuilder& operator<<(const std::stack& stack_) { - base::workarounds::IterableStack iterableStack_ = - static_cast >(stack_); - return writeIterator(iterableStack_.begin(), iterableStack_.end(), iterableStack_.size()); - } - template - inline MessageBuilder& operator<<(const std::priority_queue& priorityQueue_) { - base::workarounds::IterablePriorityQueue iterablePriorityQueue_ = - static_cast >(priorityQueue_); - return writeIterator(iterablePriorityQueue_.begin(), iterablePriorityQueue_.end(), iterablePriorityQueue_.size()); - } - template - MessageBuilder& operator<<(const std::pair& pair_) { - m_logger->stream() << ELPP_LITERAL("("); - operator << (static_cast(pair_.first)); - m_logger->stream() << ELPP_LITERAL(", "); - operator << (static_cast(pair_.second)); - m_logger->stream() << ELPP_LITERAL(")"); - return *this; - } - template - MessageBuilder& operator<<(const std::bitset& bitset_) { - m_logger->stream() << ELPP_LITERAL("["); - operator << (bitset_.to_string()); - m_logger->stream() << ELPP_LITERAL("]"); - return *this; - } -# if defined(ELPP_LOG_STD_ARRAY) - template - inline MessageBuilder& operator<<(const std::array& array) { - return writeIterator(array.begin(), array.end(), array.size()); - } -# endif // defined(ELPP_LOG_STD_ARRAY) -# if defined(ELPP_LOG_UNORDERED_MAP) - ELPP_ITERATOR_CONTAINER_LOG_FIVE_ARG(std::unordered_map) - ELPP_ITERATOR_CONTAINER_LOG_FIVE_ARG(std::unordered_multimap) -# endif // defined(ELPP_LOG_UNORDERED_MAP) -# if defined(ELPP_LOG_UNORDERED_SET) - ELPP_ITERATOR_CONTAINER_LOG_FOUR_ARG(std::unordered_set) - ELPP_ITERATOR_CONTAINER_LOG_FOUR_ARG(std::unordered_multiset) -# endif // defined(ELPP_LOG_UNORDERED_SET) + ELPP_ITERATOR_CONTAINER_LOG_TWO_ARG(std::vector) + ELPP_ITERATOR_CONTAINER_LOG_TWO_ARG(std::list) + ELPP_ITERATOR_CONTAINER_LOG_TWO_ARG(std::deque) + ELPP_ITERATOR_CONTAINER_LOG_THREE_ARG(std::set) + ELPP_ITERATOR_CONTAINER_LOG_THREE_ARG(std::multiset) + ELPP_ITERATOR_CONTAINER_LOG_FOUR_ARG(std::map) + ELPP_ITERATOR_CONTAINER_LOG_FOUR_ARG(std::multimap) + template + inline MessageBuilder& + operator<<(const std::queue& queue_) { + base::workarounds::IterableQueue iterableQueue_ = + static_cast>(queue_); + return writeIterator(iterableQueue_.begin(), iterableQueue_.end(), iterableQueue_.size()); + } + template + inline MessageBuilder& + operator<<(const std::stack& stack_) { + base::workarounds::IterableStack iterableStack_ = + static_cast>(stack_); + return writeIterator(iterableStack_.begin(), iterableStack_.end(), iterableStack_.size()); + } + template + inline MessageBuilder& + operator<<(const std::priority_queue& priorityQueue_) { + base::workarounds::IterablePriorityQueue iterablePriorityQueue_ = + static_cast>(priorityQueue_); + return writeIterator(iterablePriorityQueue_.begin(), iterablePriorityQueue_.end(), + iterablePriorityQueue_.size()); + } + template + MessageBuilder& + operator<<(const std::pair& pair_) { + m_logger->stream() << ELPP_LITERAL("("); + operator<<(static_cast(pair_.first)); + m_logger->stream() << ELPP_LITERAL(", "); + operator<<(static_cast(pair_.second)); + m_logger->stream() << ELPP_LITERAL(")"); + return *this; + } + template + MessageBuilder& + operator<<(const std::bitset& bitset_) { + m_logger->stream() << ELPP_LITERAL("["); + operator<<(bitset_.to_string()); + m_logger->stream() << ELPP_LITERAL("]"); + return *this; + } +#if defined(ELPP_LOG_STD_ARRAY) + template + inline MessageBuilder& + operator<<(const std::array& array) { + return writeIterator(array.begin(), array.end(), array.size()); + } +#endif // defined(ELPP_LOG_STD_ARRAY) +#if defined(ELPP_LOG_UNORDERED_MAP) + ELPP_ITERATOR_CONTAINER_LOG_FIVE_ARG(std::unordered_map) + ELPP_ITERATOR_CONTAINER_LOG_FIVE_ARG(std::unordered_multimap) +#endif // defined(ELPP_LOG_UNORDERED_MAP) +#if defined(ELPP_LOG_UNORDERED_SET) + ELPP_ITERATOR_CONTAINER_LOG_FOUR_ARG(std::unordered_set) + ELPP_ITERATOR_CONTAINER_LOG_FOUR_ARG(std::unordered_multiset) +#endif // defined(ELPP_LOG_UNORDERED_SET) #endif // defined(ELPP_STL_LOGGING) #if defined(ELPP_QT_LOGGING) - inline MessageBuilder& operator<<(const QString& msg) { -# if defined(ELPP_UNICODE) - m_logger->stream() << msg.toStdWString(); -# else - m_logger->stream() << msg.toStdString(); -# endif // defined(ELPP_UNICODE) - return *this; - } - inline MessageBuilder& operator<<(const QByteArray& msg) { - return operator << (QString(msg)); - } - inline MessageBuilder& operator<<(const QStringRef& msg) { - return operator<<(msg.toString()); - } - inline MessageBuilder& operator<<(qint64 msg) { -# if defined(ELPP_UNICODE) - m_logger->stream() << QString::number(msg).toStdWString(); -# else - m_logger->stream() << QString::number(msg).toStdString(); -# endif // defined(ELPP_UNICODE) - return *this; - } - inline MessageBuilder& operator<<(quint64 msg) { -# if defined(ELPP_UNICODE) - m_logger->stream() << QString::number(msg).toStdWString(); -# else - m_logger->stream() << QString::number(msg).toStdString(); -# endif // defined(ELPP_UNICODE) - return *this; - } - inline MessageBuilder& operator<<(QChar msg) { - m_logger->stream() << msg.toLatin1(); - return *this; - } - inline MessageBuilder& operator<<(const QLatin1String& msg) { - m_logger->stream() << msg.latin1(); - return *this; - } - ELPP_ITERATOR_CONTAINER_LOG_ONE_ARG(QList) - ELPP_ITERATOR_CONTAINER_LOG_ONE_ARG(QVector) - ELPP_ITERATOR_CONTAINER_LOG_ONE_ARG(QQueue) - ELPP_ITERATOR_CONTAINER_LOG_ONE_ARG(QSet) - ELPP_ITERATOR_CONTAINER_LOG_ONE_ARG(QLinkedList) - ELPP_ITERATOR_CONTAINER_LOG_ONE_ARG(QStack) - template - MessageBuilder& operator<<(const QPair& pair_) { - m_logger->stream() << ELPP_LITERAL("("); - operator << (static_cast(pair_.first)); - m_logger->stream() << ELPP_LITERAL(", "); - operator << (static_cast(pair_.second)); - m_logger->stream() << ELPP_LITERAL(")"); - return *this; - } - template - MessageBuilder& operator<<(const QMap& map_) { - m_logger->stream() << ELPP_LITERAL("["); - QList keys = map_.keys(); - typename QList::const_iterator begin = keys.begin(); - typename QList::const_iterator end = keys.end(); - int max_ = static_cast(base::consts::kMaxLogPerContainer); // to prevent warning - for (int index_ = 0; begin != end && index_ < max_; ++index_, ++begin) { - m_logger->stream() << ELPP_LITERAL("("); - operator << (static_cast(*begin)); - m_logger->stream() << ELPP_LITERAL(", "); - operator << (static_cast(map_.value(*begin))); - m_logger->stream() << ELPP_LITERAL(")"); - m_logger->stream() << ((index_ < keys.size() -1) ? m_containerLogSeperator : ELPP_LITERAL("")); + inline MessageBuilder& + operator<<(const QString& msg) { +#if defined(ELPP_UNICODE) + m_logger->stream() << msg.toStdWString(); +#else + m_logger->stream() << msg.toStdString(); +#endif // defined(ELPP_UNICODE) + return *this; } - if (begin != end) { - m_logger->stream() << ELPP_LITERAL("..."); + inline MessageBuilder& + operator<<(const QByteArray& msg) { + return operator<<(QString(msg)); } - m_logger->stream() << ELPP_LITERAL("]"); - return *this; - } - template - inline MessageBuilder& operator<<(const QMultiMap& map_) { - operator << (static_cast>(map_)); - return *this; - } - template - MessageBuilder& operator<<(const QHash& hash_) { - m_logger->stream() << ELPP_LITERAL("["); - QList keys = hash_.keys(); - typename QList::const_iterator begin = keys.begin(); - typename QList::const_iterator end = keys.end(); - int max_ = static_cast(base::consts::kMaxLogPerContainer); // prevent type warning - for (int index_ = 0; begin != end && index_ < max_; ++index_, ++begin) { - m_logger->stream() << ELPP_LITERAL("("); - operator << (static_cast(*begin)); - m_logger->stream() << ELPP_LITERAL(", "); - operator << (static_cast(hash_.value(*begin))); - m_logger->stream() << ELPP_LITERAL(")"); - m_logger->stream() << ((index_ < keys.size() -1) ? m_containerLogSeperator : ELPP_LITERAL("")); + inline MessageBuilder& + operator<<(const QStringRef& msg) { + return operator<<(msg.toString()); } - if (begin != end) { - m_logger->stream() << ELPP_LITERAL("..."); + inline MessageBuilder& + operator<<(qint64 msg) { +#if defined(ELPP_UNICODE) + m_logger->stream() << QString::number(msg).toStdWString(); +#else + m_logger->stream() << QString::number(msg).toStdString(); +#endif // defined(ELPP_UNICODE) + return *this; + } + inline MessageBuilder& + operator<<(quint64 msg) { +#if defined(ELPP_UNICODE) + m_logger->stream() << QString::number(msg).toStdWString(); +#else + m_logger->stream() << QString::number(msg).toStdString(); +#endif // defined(ELPP_UNICODE) + return *this; + } + inline MessageBuilder& + operator<<(QChar msg) { + m_logger->stream() << msg.toLatin1(); + return *this; + } + inline MessageBuilder& + operator<<(const QLatin1String& msg) { + m_logger->stream() << msg.latin1(); + return *this; + } + ELPP_ITERATOR_CONTAINER_LOG_ONE_ARG(QList) + ELPP_ITERATOR_CONTAINER_LOG_ONE_ARG(QVector) + ELPP_ITERATOR_CONTAINER_LOG_ONE_ARG(QQueue) + ELPP_ITERATOR_CONTAINER_LOG_ONE_ARG(QSet) + ELPP_ITERATOR_CONTAINER_LOG_ONE_ARG(QLinkedList) + ELPP_ITERATOR_CONTAINER_LOG_ONE_ARG(QStack) + template + MessageBuilder& + operator<<(const QPair& pair_) { + m_logger->stream() << ELPP_LITERAL("("); + operator<<(static_cast(pair_.first)); + m_logger->stream() << ELPP_LITERAL(", "); + operator<<(static_cast(pair_.second)); + m_logger->stream() << ELPP_LITERAL(")"); + return *this; + } + template + MessageBuilder& + operator<<(const QMap& map_) { + m_logger->stream() << ELPP_LITERAL("["); + QList keys = map_.keys(); + typename QList::const_iterator begin = keys.begin(); + typename QList::const_iterator end = keys.end(); + int max_ = static_cast(base::consts::kMaxLogPerContainer); // to prevent warning + for (int index_ = 0; begin != end && index_ < max_; ++index_, ++begin) { + m_logger->stream() << ELPP_LITERAL("("); + operator<<(static_cast(*begin)); + m_logger->stream() << ELPP_LITERAL(", "); + operator<<(static_cast(map_.value(*begin))); + m_logger->stream() << ELPP_LITERAL(")"); + m_logger->stream() << ((index_ < keys.size() - 1) ? m_containerLogSeperator : ELPP_LITERAL("")); + } + if (begin != end) { + m_logger->stream() << ELPP_LITERAL("..."); + } + m_logger->stream() << ELPP_LITERAL("]"); + return *this; + } + template + inline MessageBuilder& + operator<<(const QMultiMap& map_) { + operator<<(static_cast>(map_)); + return *this; + } + template + MessageBuilder& + operator<<(const QHash& hash_) { + m_logger->stream() << ELPP_LITERAL("["); + QList keys = hash_.keys(); + typename QList::const_iterator begin = keys.begin(); + typename QList::const_iterator end = keys.end(); + int max_ = static_cast(base::consts::kMaxLogPerContainer); // prevent type warning + for (int index_ = 0; begin != end && index_ < max_; ++index_, ++begin) { + m_logger->stream() << ELPP_LITERAL("("); + operator<<(static_cast(*begin)); + m_logger->stream() << ELPP_LITERAL(", "); + operator<<(static_cast(hash_.value(*begin))); + m_logger->stream() << ELPP_LITERAL(")"); + m_logger->stream() << ((index_ < keys.size() - 1) ? m_containerLogSeperator : ELPP_LITERAL("")); + } + if (begin != end) { + m_logger->stream() << ELPP_LITERAL("..."); + } + m_logger->stream() << ELPP_LITERAL("]"); + return *this; + } + template + inline MessageBuilder& + operator<<(const QMultiHash& multiHash_) { + operator<<(static_cast>(multiHash_)); + return *this; } - m_logger->stream() << ELPP_LITERAL("]"); - return *this; - } - template - inline MessageBuilder& operator<<(const QMultiHash& multiHash_) { - operator << (static_cast>(multiHash_)); - return *this; - } #endif // defined(ELPP_QT_LOGGING) #if defined(ELPP_BOOST_LOGGING) - ELPP_ITERATOR_CONTAINER_LOG_TWO_ARG(boost::container::vector) - ELPP_ITERATOR_CONTAINER_LOG_TWO_ARG(boost::container::stable_vector) - ELPP_ITERATOR_CONTAINER_LOG_TWO_ARG(boost::container::list) - ELPP_ITERATOR_CONTAINER_LOG_TWO_ARG(boost::container::deque) - ELPP_ITERATOR_CONTAINER_LOG_FOUR_ARG(boost::container::map) - ELPP_ITERATOR_CONTAINER_LOG_FOUR_ARG(boost::container::flat_map) - ELPP_ITERATOR_CONTAINER_LOG_THREE_ARG(boost::container::set) - ELPP_ITERATOR_CONTAINER_LOG_THREE_ARG(boost::container::flat_set) + ELPP_ITERATOR_CONTAINER_LOG_TWO_ARG(boost::container::vector) + ELPP_ITERATOR_CONTAINER_LOG_TWO_ARG(boost::container::stable_vector) + ELPP_ITERATOR_CONTAINER_LOG_TWO_ARG(boost::container::list) + ELPP_ITERATOR_CONTAINER_LOG_TWO_ARG(boost::container::deque) + ELPP_ITERATOR_CONTAINER_LOG_FOUR_ARG(boost::container::map) + ELPP_ITERATOR_CONTAINER_LOG_FOUR_ARG(boost::container::flat_map) + ELPP_ITERATOR_CONTAINER_LOG_THREE_ARG(boost::container::set) + ELPP_ITERATOR_CONTAINER_LOG_THREE_ARG(boost::container::flat_set) #endif // defined(ELPP_BOOST_LOGGING) - /// @brief Macro used internally that can be used externally to make containers easylogging++ friendly - /// - /// @detail This macro expands to write an ostream& operator<< for container. This container is expected to - /// have begin() and end() methods that return respective iterators - /// @param ContainerType Type of container e.g, MyList from WX_DECLARE_LIST(int, MyList); in wxwidgets - /// @param SizeMethod Method used to get size of container. - /// @param ElementInstance Instance of element to be fed out. Insance name is "elem". See WXELPP_ENABLED macro - /// for an example usage -#define MAKE_CONTAINERELPP_FRIENDLY(ContainerType, SizeMethod, ElementInstance) \ -el::base::type::ostream_t& operator<<(el::base::type::ostream_t& ss, const ContainerType& container) {\ -const el::base::type::char_t* sep = ELPP->hasFlag(el::LoggingFlag::NewLineForContainer) ? \ -ELPP_LITERAL("\n ") : ELPP_LITERAL(", ");\ -ContainerType::const_iterator elem = container.begin();\ -ContainerType::const_iterator endElem = container.end();\ -std::size_t size_ = container.SizeMethod; \ -ss << ELPP_LITERAL("[");\ -for (std::size_t i = 0; elem != endElem && i < el::base::consts::kMaxLogPerContainer; ++i, ++elem) { \ -ss << ElementInstance;\ -ss << ((i < size_ - 1) ? sep : ELPP_LITERAL(""));\ -}\ -if (elem != endElem) {\ -ss << ELPP_LITERAL("...");\ -}\ -ss << ELPP_LITERAL("]");\ -return ss;\ -} + /// @brief Macro used internally that can be used externally to make containers easylogging++ friendly + /// + /// @detail This macro expands to write an ostream& operator<< for container. This container is expected to + /// have begin() and end() methods that return respective iterators + /// @param ContainerType Type of container e.g, MyList from WX_DECLARE_LIST(int, MyList); in wxwidgets + /// @param SizeMethod Method used to get size of container. + /// @param ElementInstance Instance of element to be fed out. Insance name is "elem". See WXELPP_ENABLED macro + /// for an example usage +#define MAKE_CONTAINERELPP_FRIENDLY(ContainerType, SizeMethod, ElementInstance) \ + el::base::type::ostream_t& operator<<(el::base::type::ostream_t& ss, const ContainerType& container) { \ + const el::base::type::char_t* sep = \ + ELPP->hasFlag(el::LoggingFlag::NewLineForContainer) ? ELPP_LITERAL("\n ") : ELPP_LITERAL(", "); \ + ContainerType::const_iterator elem = container.begin(); \ + ContainerType::const_iterator endElem = container.end(); \ + std::size_t size_ = container.SizeMethod; \ + ss << ELPP_LITERAL("["); \ + for (std::size_t i = 0; elem != endElem && i < el::base::consts::kMaxLogPerContainer; ++i, ++elem) { \ + ss << ElementInstance; \ + ss << ((i < size_ - 1) ? sep : ELPP_LITERAL("")); \ + } \ + if (elem != endElem) { \ + ss << ELPP_LITERAL("..."); \ + } \ + ss << ELPP_LITERAL("]"); \ + return ss; \ + } #if defined(ELPP_WXWIDGETS_LOGGING) - ELPP_ITERATOR_CONTAINER_LOG_ONE_ARG(wxVector) -# define ELPP_WX_PTR_ENABLED(ContainerType) MAKE_CONTAINERELPP_FRIENDLY(ContainerType, size(), *(*elem)) -# define ELPP_WX_ENABLED(ContainerType) MAKE_CONTAINERELPP_FRIENDLY(ContainerType, size(), (*elem)) -# define ELPP_WX_HASH_MAP_ENABLED(ContainerType) MAKE_CONTAINERELPP_FRIENDLY(ContainerType, size(), \ + ELPP_ITERATOR_CONTAINER_LOG_ONE_ARG(wxVector) +#define ELPP_WX_PTR_ENABLED(ContainerType) MAKE_CONTAINERELPP_FRIENDLY(ContainerType, size(), *(*elem)) +#define ELPP_WX_ENABLED(ContainerType) MAKE_CONTAINERELPP_FRIENDLY(ContainerType, size(), (*elem)) +#define ELPP_WX_HASH_MAP_ENABLED(ContainerType) MAKE_CONTAINERELPP_FRIENDLY(ContainerType, size(), \ ELPP_LITERAL("(") << elem->first << ELPP_LITERAL(", ") << elem->second << ELPP_LITERAL(")") #else -# define ELPP_WX_PTR_ENABLED(ContainerType) -# define ELPP_WX_ENABLED(ContainerType) -# define ELPP_WX_HASH_MAP_ENABLED(ContainerType) +#define ELPP_WX_PTR_ENABLED(ContainerType) +#define ELPP_WX_ENABLED(ContainerType) +#define ELPP_WX_HASH_MAP_ENABLED(ContainerType) #endif // defined(ELPP_WXWIDGETS_LOGGING) - // Other classes - template - ELPP_SIMPLE_LOG(const Class&) + // Other classes + template + ELPP_SIMPLE_LOG(const Class&) #undef ELPP_SIMPLE_LOG #undef ELPP_ITERATOR_CONTAINER_LOG_ONE_ARG #undef ELPP_ITERATOR_CONTAINER_LOG_TWO_ARG #undef ELPP_ITERATOR_CONTAINER_LOG_THREE_ARG #undef ELPP_ITERATOR_CONTAINER_LOG_FOUR_ARG #undef ELPP_ITERATOR_CONTAINER_LOG_FIVE_ARG - private: - Logger* m_logger; - const base::type::char_t* m_containerLogSeperator; + private : Logger* m_logger; + const base::type::char_t* m_containerLogSeperator; - template - MessageBuilder& writeIterator(Iterator begin_, Iterator end_, std::size_t size_) { - m_logger->stream() << ELPP_LITERAL("["); - for (std::size_t i = 0; begin_ != end_ && i < base::consts::kMaxLogPerContainer; ++i, ++begin_) { - operator << (*begin_); - m_logger->stream() << ((i < size_ - 1) ? m_containerLogSeperator : ELPP_LITERAL("")); + template + MessageBuilder& + writeIterator(Iterator begin_, Iterator end_, std::size_t size_) { + m_logger->stream() << ELPP_LITERAL("["); + for (std::size_t i = 0; begin_ != end_ && i < base::consts::kMaxLogPerContainer; ++i, ++begin_) { + operator<<(*begin_); + m_logger->stream() << ((i < size_ - 1) ? m_containerLogSeperator : ELPP_LITERAL("")); + } + if (begin_ != end_) { + m_logger->stream() << ELPP_LITERAL("..."); + } + m_logger->stream() << ELPP_LITERAL("]"); + if (ELPP->hasFlag(LoggingFlag::AutoSpacing)) { + m_logger->stream() << " "; + } + return *this; } - if (begin_ != end_) { - m_logger->stream() << ELPP_LITERAL("..."); - } - m_logger->stream() << ELPP_LITERAL("]"); - if (ELPP->hasFlag(LoggingFlag::AutoSpacing)) { - m_logger->stream() << " "; - } - return *this; - } }; /// @brief Writes nothing - Used when certain log is disabled class NullWriter : base::NoCopy { public: - NullWriter(void) {} + NullWriter(void) { + } - // Null manipulator - inline NullWriter& operator<<(std::ostream& (*)(std::ostream&)) { - return *this; - } + // Null manipulator + inline NullWriter& + operator<<(std::ostream& (*)(std::ostream&)) { + return *this; + } - template - inline NullWriter& operator<<(const T&) { - return *this; - } + template + inline NullWriter& + operator<<(const T&) { + return *this; + } - inline operator bool() { - return true; - } + inline operator bool() { + return true; + } }; /// @brief Main entry point of each logging class Writer : base::NoCopy { public: - Writer(Level level, const char* file, base::type::LineNumber line, - const char* func, base::DispatchAction dispatchAction = base::DispatchAction::NormalLog, - base::type::VerboseLevel verboseLevel = 0) : - m_msg(nullptr), m_level(level), m_file(file), m_line(line), m_func(func), m_verboseLevel(verboseLevel), - m_logger(nullptr), m_proceed(false), m_dispatchAction(dispatchAction) { - } - - Writer(LogMessage* msg, base::DispatchAction dispatchAction = base::DispatchAction::NormalLog) : - m_msg(msg), m_level(msg != nullptr ? msg->level() : Level::Unknown), - m_line(0), m_logger(nullptr), m_proceed(false), m_dispatchAction(dispatchAction) { - } - - virtual ~Writer(void) { - processDispatch(); - } - - template - inline Writer& operator<<(const T& log) { -#if ELPP_LOGGING_ENABLED - if (m_proceed) { - m_messageBuilder << log; + Writer(Level level, const char* file, base::type::LineNumber line, const char* func, + base::DispatchAction dispatchAction = base::DispatchAction::NormalLog, + base::type::VerboseLevel verboseLevel = 0) + : m_msg(nullptr), + m_level(level), + m_file(file), + m_line(line), + m_func(func), + m_verboseLevel(verboseLevel), + m_logger(nullptr), + m_proceed(false), + m_dispatchAction(dispatchAction) { } -#endif // ELPP_LOGGING_ENABLED - return *this; - } - inline Writer& operator<<(std::ostream& (*log)(std::ostream&)) { -#if ELPP_LOGGING_ENABLED - if (m_proceed) { - m_messageBuilder << log; + Writer(LogMessage* msg, base::DispatchAction dispatchAction = base::DispatchAction::NormalLog) + : m_msg(msg), + m_level(msg != nullptr ? msg->level() : Level::Unknown), + m_line(0), + m_logger(nullptr), + m_proceed(false), + m_dispatchAction(dispatchAction) { } + + virtual ~Writer(void) { + processDispatch(); + } + + template + inline Writer& + operator<<(const T& log) { +#if ELPP_LOGGING_ENABLED + if (m_proceed) { + m_messageBuilder << log; + } #endif // ELPP_LOGGING_ENABLED - return *this; - } + return *this; + } - inline operator bool() { - return true; - } + inline Writer& + operator<<(std::ostream& (*log)(std::ostream&)) { +#if ELPP_LOGGING_ENABLED + if (m_proceed) { + m_messageBuilder << log; + } +#endif // ELPP_LOGGING_ENABLED + return *this; + } + + inline operator bool() { + return true; + } + + Writer& + construct(Logger* logger, bool needLock = true); + Writer& + construct(int count, const char* loggerIds, ...); - Writer& construct(Logger* logger, bool needLock = true); - Writer& construct(int count, const char* loggerIds, ...); protected: - LogMessage* m_msg; - Level m_level; - const char* m_file; - const base::type::LineNumber m_line; - const char* m_func; - base::type::VerboseLevel m_verboseLevel; - Logger* m_logger; - bool m_proceed; - base::MessageBuilder m_messageBuilder; - base::DispatchAction m_dispatchAction; - std::vector m_loggerIds; - friend class el::Helpers; + LogMessage* m_msg; + Level m_level; + const char* m_file; + const base::type::LineNumber m_line; + const char* m_func; + base::type::VerboseLevel m_verboseLevel; + Logger* m_logger; + bool m_proceed; + base::MessageBuilder m_messageBuilder; + base::DispatchAction m_dispatchAction; + std::vector m_loggerIds; + friend class el::Helpers; - void initializeLogger(const std::string& loggerId, bool lookup = true, bool needLock = true); - void processDispatch(); - void triggerDispatch(void); + void + initializeLogger(const std::string& loggerId, bool lookup = true, bool needLock = true); + void + processDispatch(); + void + triggerDispatch(void); }; class PErrorWriter : public base::Writer { public: - PErrorWriter(Level level, const char* file, base::type::LineNumber line, - const char* func, base::DispatchAction dispatchAction = base::DispatchAction::NormalLog, - base::type::VerboseLevel verboseLevel = 0) : - base::Writer(level, file, line, func, dispatchAction, verboseLevel) { - } + PErrorWriter(Level level, const char* file, base::type::LineNumber line, const char* func, + base::DispatchAction dispatchAction = base::DispatchAction::NormalLog, + base::type::VerboseLevel verboseLevel = 0) + : base::Writer(level, file, line, func, dispatchAction, verboseLevel) { + } - virtual ~PErrorWriter(void); + virtual ~PErrorWriter(void); }; } // namespace base // Logging from Logger class. Why this is here? Because we have Storage and Writer class available #if ELPP_VARIADIC_TEMPLATES_SUPPORTED template -void Logger::log_(Level level, int vlevel, const char* s, const T& value, const Args&... args) { - base::MessageBuilder b; - b.initialize(this); - while (*s) { - if (*s == base::consts::kFormatSpecifierChar) { - if (*(s + 1) == base::consts::kFormatSpecifierChar) { - ++s; - } else { - if (*(s + 1) == base::consts::kFormatSpecifierCharValue) { - ++s; - b << value; - log_(level, vlevel, ++s, args...); - return; +void +Logger::log_(Level level, int vlevel, const char* s, const T& value, const Args&... args) { + base::MessageBuilder b; + b.initialize(this); + while (*s) { + if (*s == base::consts::kFormatSpecifierChar) { + if (*(s + 1) == base::consts::kFormatSpecifierChar) { + ++s; + } else { + if (*(s + 1) == base::consts::kFormatSpecifierCharValue) { + ++s; + b << value; + log_(level, vlevel, ++s, args...); + return; + } + } } - } + b << *s++; } - b << *s++; - } - ELPP_INTERNAL_ERROR("Too many arguments provided. Unable to handle. Please provide more format specifiers", false); + ELPP_INTERNAL_ERROR("Too many arguments provided. Unable to handle. Please provide more format specifiers", false); } template -void Logger::log_(Level level, int vlevel, const T& log) { - if (level == Level::Verbose) { - if (ELPP->vRegistry()->allowed(vlevel, __FILE__)) { - base::Writer(Level::Verbose, "FILE", 0, "FUNCTION", - base::DispatchAction::NormalLog, vlevel).construct(this, false) << log; +void +Logger::log_(Level level, int vlevel, const T& log) { + if (level == Level::Verbose) { + if (ELPP->vRegistry()->allowed(vlevel, __FILE__)) { + base::Writer(Level::Verbose, "FILE", 0, "FUNCTION", base::DispatchAction::NormalLog, vlevel) + .construct(this, false) + << log; + } else { + stream().str(ELPP_LITERAL("")); + releaseLock(); + } } else { - stream().str(ELPP_LITERAL("")); - releaseLock(); + base::Writer(level, "FILE", 0, "FUNCTION").construct(this, false) << log; } - } else { - base::Writer(level, "FILE", 0, "FUNCTION").construct(this, false) << log; - } } template -inline void Logger::log(Level level, const char* s, const T& value, const Args&... args) { - acquireLock(); // released in Writer! - log_(level, 0, s, value, args...); +inline void +Logger::log(Level level, const char* s, const T& value, const Args&... args) { + acquireLock(); // released in Writer! + log_(level, 0, s, value, args...); } template -inline void Logger::log(Level level, const T& log) { - acquireLock(); // released in Writer! - log_(level, 0, log); +inline void +Logger::log(Level level, const T& log) { + acquireLock(); // released in Writer! + log_(level, 0, log); } -# if ELPP_VERBOSE_LOG +#if ELPP_VERBOSE_LOG template -inline void Logger::verbose(int vlevel, const char* s, const T& value, const Args&... args) { - acquireLock(); // released in Writer! - log_(el::Level::Verbose, vlevel, s, value, args...); +inline void +Logger::verbose(int vlevel, const char* s, const T& value, const Args&... args) { + acquireLock(); // released in Writer! + log_(el::Level::Verbose, vlevel, s, value, args...); } template -inline void Logger::verbose(int vlevel, const T& log) { - acquireLock(); // released in Writer! - log_(el::Level::Verbose, vlevel, log); +inline void +Logger::verbose(int vlevel, const T& log) { + acquireLock(); // released in Writer! + log_(el::Level::Verbose, vlevel, log); } -# else -template -inline void Logger::verbose(int, const char*, const T&, const Args&...) { - return; -} -template -inline void Logger::verbose(int, const T&) { - return; -} -# endif // ELPP_VERBOSE_LOG -# define LOGGER_LEVEL_WRITERS(FUNCTION_NAME, LOG_LEVEL)\ -template \ -inline void Logger::FUNCTION_NAME(const char* s, const T& value, const Args&... args) {\ -log(LOG_LEVEL, s, value, args...);\ -}\ -template \ -inline void Logger::FUNCTION_NAME(const T& value) {\ -log(LOG_LEVEL, value);\ -} -# define LOGGER_LEVEL_WRITERS_DISABLED(FUNCTION_NAME, LOG_LEVEL)\ -template \ -inline void Logger::FUNCTION_NAME(const char*, const T&, const Args&...) {\ -return;\ -}\ -template \ -inline void Logger::FUNCTION_NAME(const T&) {\ -return;\ -} - -# if ELPP_INFO_LOG -LOGGER_LEVEL_WRITERS(info, Level::Info) -# else -LOGGER_LEVEL_WRITERS_DISABLED(info, Level::Info) -# endif // ELPP_INFO_LOG -# if ELPP_DEBUG_LOG -LOGGER_LEVEL_WRITERS(debug, Level::Debug) -# else -LOGGER_LEVEL_WRITERS_DISABLED(debug, Level::Debug) -# endif // ELPP_DEBUG_LOG -# if ELPP_WARNING_LOG -LOGGER_LEVEL_WRITERS(warn, Level::Warning) -# else -LOGGER_LEVEL_WRITERS_DISABLED(warn, Level::Warning) -# endif // ELPP_WARNING_LOG -# if ELPP_ERROR_LOG -LOGGER_LEVEL_WRITERS(error, Level::Error) -# else -LOGGER_LEVEL_WRITERS_DISABLED(error, Level::Error) -# endif // ELPP_ERROR_LOG -# if ELPP_FATAL_LOG -LOGGER_LEVEL_WRITERS(fatal, Level::Fatal) -# else -LOGGER_LEVEL_WRITERS_DISABLED(fatal, Level::Fatal) -# endif // ELPP_FATAL_LOG -# if ELPP_TRACE_LOG -LOGGER_LEVEL_WRITERS(trace, Level::Trace) -# else -LOGGER_LEVEL_WRITERS_DISABLED(trace, Level::Trace) -# endif // ELPP_TRACE_LOG -# undef LOGGER_LEVEL_WRITERS -# undef LOGGER_LEVEL_WRITERS_DISABLED -#endif // ELPP_VARIADIC_TEMPLATES_SUPPORTED -#if ELPP_COMPILER_MSVC -# define ELPP_VARIADIC_FUNC_MSVC(variadicFunction, variadicArgs) variadicFunction variadicArgs -# define ELPP_VARIADIC_FUNC_MSVC_RUN(variadicFunction, ...) ELPP_VARIADIC_FUNC_MSVC(variadicFunction, (__VA_ARGS__)) -# define el_getVALength(...) ELPP_VARIADIC_FUNC_MSVC_RUN(el_resolveVALength, 0, ## __VA_ARGS__,\ -10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) #else -# if ELPP_COMPILER_CLANG -# define el_getVALength(...) el_resolveVALength(0, __VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) -# else -# define el_getVALength(...) el_resolveVALength(0, ## __VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) -# endif // ELPP_COMPILER_CLANG -#endif // ELPP_COMPILER_MSVC +template +inline void +Logger::verbose(int, const char*, const T&, const Args&...) { + return; +} +template +inline void +Logger::verbose(int, const T&) { + return; +} +#endif // ELPP_VERBOSE_LOG +#define LOGGER_LEVEL_WRITERS(FUNCTION_NAME, LOG_LEVEL) \ + template \ + inline void Logger::FUNCTION_NAME(const char* s, const T& value, const Args&... args) { \ + log(LOG_LEVEL, s, value, args...); \ + } \ + template \ + inline void Logger::FUNCTION_NAME(const T& value) { \ + log(LOG_LEVEL, value); \ + } +#define LOGGER_LEVEL_WRITERS_DISABLED(FUNCTION_NAME, LOG_LEVEL) \ + template \ + inline void Logger::FUNCTION_NAME(const char*, const T&, const Args&...) { \ + return; \ + } \ + template \ + inline void Logger::FUNCTION_NAME(const T&) { \ + return; \ + } + +#if ELPP_INFO_LOG +LOGGER_LEVEL_WRITERS(info, Level::Info) +#else +LOGGER_LEVEL_WRITERS_DISABLED(info, Level::Info) +#endif // ELPP_INFO_LOG +#if ELPP_DEBUG_LOG +LOGGER_LEVEL_WRITERS(debug, Level::Debug) +#else +LOGGER_LEVEL_WRITERS_DISABLED(debug, Level::Debug) +#endif // ELPP_DEBUG_LOG +#if ELPP_WARNING_LOG +LOGGER_LEVEL_WRITERS(warn, Level::Warning) +#else +LOGGER_LEVEL_WRITERS_DISABLED(warn, Level::Warning) +#endif // ELPP_WARNING_LOG +#if ELPP_ERROR_LOG +LOGGER_LEVEL_WRITERS(error, Level::Error) +#else +LOGGER_LEVEL_WRITERS_DISABLED(error, Level::Error) +#endif // ELPP_ERROR_LOG +#if ELPP_FATAL_LOG +LOGGER_LEVEL_WRITERS(fatal, Level::Fatal) +#else +LOGGER_LEVEL_WRITERS_DISABLED(fatal, Level::Fatal) +#endif // ELPP_FATAL_LOG +#if ELPP_TRACE_LOG +LOGGER_LEVEL_WRITERS(trace, Level::Trace) +#else +LOGGER_LEVEL_WRITERS_DISABLED(trace, Level::Trace) +#endif // ELPP_TRACE_LOG +#undef LOGGER_LEVEL_WRITERS +#undef LOGGER_LEVEL_WRITERS_DISABLED +#endif // ELPP_VARIADIC_TEMPLATES_SUPPORTED +#if ELPP_COMPILER_MSVC +#define ELPP_VARIADIC_FUNC_MSVC(variadicFunction, variadicArgs) variadicFunction variadicArgs +#define ELPP_VARIADIC_FUNC_MSVC_RUN(variadicFunction, ...) ELPP_VARIADIC_FUNC_MSVC(variadicFunction, (__VA_ARGS__)) +#define el_getVALength(...) \ + ELPP_VARIADIC_FUNC_MSVC_RUN(el_resolveVALength, 0, ##__VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) +#else +#if ELPP_COMPILER_CLANG +#define el_getVALength(...) el_resolveVALength(0, __VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) +#else +#define el_getVALength(...) el_resolveVALength(0, ##__VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) +#endif // ELPP_COMPILER_CLANG +#endif // ELPP_COMPILER_MSVC #define el_resolveVALength(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, N, ...) N #define ELPP_WRITE_LOG(writer, level, dispatchAction, ...) \ -writer(level, __FILE__, __LINE__, ELPP_FUNC, dispatchAction).construct(el_getVALength(__VA_ARGS__), __VA_ARGS__) -#define ELPP_WRITE_LOG_IF(writer, condition, level, dispatchAction, ...) if (condition) \ -writer(level, __FILE__, __LINE__, ELPP_FUNC, dispatchAction).construct(el_getVALength(__VA_ARGS__), __VA_ARGS__) + writer(level, __FILE__, __LINE__, ELPP_FUNC, dispatchAction).construct(el_getVALength(__VA_ARGS__), __VA_ARGS__) +#define ELPP_WRITE_LOG_IF(writer, condition, level, dispatchAction, ...) \ + if (condition) \ + writer(level, __FILE__, __LINE__, ELPP_FUNC, dispatchAction).construct(el_getVALength(__VA_ARGS__), __VA_ARGS__) #define ELPP_WRITE_LOG_EVERY_N(writer, occasion, level, dispatchAction, ...) \ -ELPP->validateEveryNCounter(__FILE__, __LINE__, occasion) && \ -writer(level, __FILE__, __LINE__, ELPP_FUNC, dispatchAction).construct(el_getVALength(__VA_ARGS__), __VA_ARGS__) -#define ELPP_WRITE_LOG_AFTER_N(writer, n, level, dispatchAction, ...) \ -ELPP->validateAfterNCounter(__FILE__, __LINE__, n) && \ -writer(level, __FILE__, __LINE__, ELPP_FUNC, dispatchAction).construct(el_getVALength(__VA_ARGS__), __VA_ARGS__) -#define ELPP_WRITE_LOG_N_TIMES(writer, n, level, dispatchAction, ...) \ -ELPP->validateNTimesCounter(__FILE__, __LINE__, n) && \ -writer(level, __FILE__, __LINE__, ELPP_FUNC, dispatchAction).construct(el_getVALength(__VA_ARGS__), __VA_ARGS__) + ELPP->validateEveryNCounter(__FILE__, __LINE__, occasion) && \ + writer(level, __FILE__, __LINE__, ELPP_FUNC, dispatchAction) \ + .construct(el_getVALength(__VA_ARGS__), __VA_ARGS__) +#define ELPP_WRITE_LOG_AFTER_N(writer, n, level, dispatchAction, ...) \ + ELPP->validateAfterNCounter(__FILE__, __LINE__, n) && writer(level, __FILE__, __LINE__, ELPP_FUNC, dispatchAction) \ + .construct(el_getVALength(__VA_ARGS__), __VA_ARGS__) +#define ELPP_WRITE_LOG_N_TIMES(writer, n, level, dispatchAction, ...) \ + ELPP->validateNTimesCounter(__FILE__, __LINE__, n) && writer(level, __FILE__, __LINE__, ELPP_FUNC, dispatchAction) \ + .construct(el_getVALength(__VA_ARGS__), __VA_ARGS__) #if defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_PERFORMANCE_TRACKING) class PerformanceTrackingData { public: - enum class DataType : base::type::EnumType { - Checkpoint = 1, Complete = 2 - }; - // Do not use constructor, will run into multiple definition error, use init(PerformanceTracker*) - explicit PerformanceTrackingData(DataType dataType) : m_performanceTracker(nullptr), - m_dataType(dataType), m_firstCheckpoint(false), m_file(""), m_line(0), m_func("") {} - inline const std::string* blockName(void) const; - inline const struct timeval* startTime(void) const; - inline const struct timeval* endTime(void) const; - inline const struct timeval* lastCheckpointTime(void) const; - inline const base::PerformanceTracker* performanceTracker(void) const { - return m_performanceTracker; - } - inline PerformanceTrackingData::DataType dataType(void) const { - return m_dataType; - } - inline bool firstCheckpoint(void) const { - return m_firstCheckpoint; - } - inline std::string checkpointId(void) const { - return m_checkpointId; - } - inline const char* file(void) const { - return m_file; - } - inline base::type::LineNumber line(void) const { - return m_line; - } - inline const char* func(void) const { - return m_func; - } - inline const base::type::string_t* formattedTimeTaken() const { - return &m_formattedTimeTaken; - } - inline const std::string& loggerId(void) const; - private: - base::PerformanceTracker* m_performanceTracker; - base::type::string_t m_formattedTimeTaken; - PerformanceTrackingData::DataType m_dataType; - bool m_firstCheckpoint; - std::string m_checkpointId; - const char* m_file; - base::type::LineNumber m_line; - const char* m_func; - inline void init(base::PerformanceTracker* performanceTracker, bool firstCheckpoint = false) { - m_performanceTracker = performanceTracker; - m_firstCheckpoint = firstCheckpoint; - } + enum class DataType : base::type::EnumType { Checkpoint = 1, Complete = 2 }; + // Do not use constructor, will run into multiple definition error, use init(PerformanceTracker*) + explicit PerformanceTrackingData(DataType dataType) + : m_performanceTracker(nullptr), + m_dataType(dataType), + m_firstCheckpoint(false), + m_file(""), + m_line(0), + m_func("") { + } + inline const std::string* + blockName(void) const; + inline const struct timeval* + startTime(void) const; + inline const struct timeval* + endTime(void) const; + inline const struct timeval* + lastCheckpointTime(void) const; + inline const base::PerformanceTracker* + performanceTracker(void) const { + return m_performanceTracker; + } + inline PerformanceTrackingData::DataType + dataType(void) const { + return m_dataType; + } + inline bool + firstCheckpoint(void) const { + return m_firstCheckpoint; + } + inline std::string + checkpointId(void) const { + return m_checkpointId; + } + inline const char* + file(void) const { + return m_file; + } + inline base::type::LineNumber + line(void) const { + return m_line; + } + inline const char* + func(void) const { + return m_func; + } + inline const base::type::string_t* + formattedTimeTaken() const { + return &m_formattedTimeTaken; + } + inline const std::string& + loggerId(void) const; - friend class el::base::PerformanceTracker; + private: + base::PerformanceTracker* m_performanceTracker; + base::type::string_t m_formattedTimeTaken; + PerformanceTrackingData::DataType m_dataType; + bool m_firstCheckpoint; + std::string m_checkpointId; + const char* m_file; + base::type::LineNumber m_line; + const char* m_func; + inline void + init(base::PerformanceTracker* performanceTracker, bool firstCheckpoint = false) { + m_performanceTracker = performanceTracker; + m_firstCheckpoint = firstCheckpoint; + } + + friend class el::base::PerformanceTracker; }; namespace base { /// @brief Represents performanceTracker block of code that conditionally adds performance status to log /// either when goes outside the scope of when checkpoint() is called class PerformanceTracker : public base::threading::ThreadSafe, public Loggable { public: - PerformanceTracker(const std::string& blockName, - base::TimestampUnit timestampUnit = base::TimestampUnit::Millisecond, - const std::string& loggerId = std::string(el::base::consts::kPerformanceLoggerId), - bool scopedLog = true, Level level = base::consts::kPerformanceTrackerDefaultLevel); - /// @brief Copy constructor - PerformanceTracker(const PerformanceTracker& t) : - m_blockName(t.m_blockName), m_timestampUnit(t.m_timestampUnit), m_loggerId(t.m_loggerId), m_scopedLog(t.m_scopedLog), - m_level(t.m_level), m_hasChecked(t.m_hasChecked), m_lastCheckpointId(t.m_lastCheckpointId), m_enabled(t.m_enabled), - m_startTime(t.m_startTime), m_endTime(t.m_endTime), m_lastCheckpointTime(t.m_lastCheckpointTime) { - } - virtual ~PerformanceTracker(void); - /// @brief A checkpoint for current performanceTracker block. - void checkpoint(const std::string& id = std::string(), const char* file = __FILE__, - base::type::LineNumber line = __LINE__, - const char* func = ""); - inline Level level(void) const { - return m_level; - } + PerformanceTracker(const std::string& blockName, + base::TimestampUnit timestampUnit = base::TimestampUnit::Millisecond, + const std::string& loggerId = std::string(el::base::consts::kPerformanceLoggerId), + bool scopedLog = true, Level level = base::consts::kPerformanceTrackerDefaultLevel); + /// @brief Copy constructor + PerformanceTracker(const PerformanceTracker& t) + : m_blockName(t.m_blockName), + m_timestampUnit(t.m_timestampUnit), + m_loggerId(t.m_loggerId), + m_scopedLog(t.m_scopedLog), + m_level(t.m_level), + m_hasChecked(t.m_hasChecked), + m_lastCheckpointId(t.m_lastCheckpointId), + m_enabled(t.m_enabled), + m_startTime(t.m_startTime), + m_endTime(t.m_endTime), + m_lastCheckpointTime(t.m_lastCheckpointTime) { + } + virtual ~PerformanceTracker(void); + /// @brief A checkpoint for current performanceTracker block. + void + checkpoint(const std::string& id = std::string(), const char* file = __FILE__, + base::type::LineNumber line = __LINE__, const char* func = ""); + inline Level + level(void) const { + return m_level; + } + private: - std::string m_blockName; - base::TimestampUnit m_timestampUnit; - std::string m_loggerId; - bool m_scopedLog; - Level m_level; - bool m_hasChecked; - std::string m_lastCheckpointId; - bool m_enabled; - struct timeval m_startTime, m_endTime, m_lastCheckpointTime; + std::string m_blockName; + base::TimestampUnit m_timestampUnit; + std::string m_loggerId; + bool m_scopedLog; + Level m_level; + bool m_hasChecked; + std::string m_lastCheckpointId; + bool m_enabled; + struct timeval m_startTime, m_endTime, m_lastCheckpointTime; - PerformanceTracker(void); + PerformanceTracker(void); - friend class el::PerformanceTrackingData; - friend class base::DefaultPerformanceTrackingCallback; + friend class el::PerformanceTrackingData; + friend class base::DefaultPerformanceTrackingCallback; - const inline base::type::string_t getFormattedTimeTaken() const { - return getFormattedTimeTaken(m_startTime); - } + const inline base::type::string_t + getFormattedTimeTaken() const { + return getFormattedTimeTaken(m_startTime); + } - const base::type::string_t getFormattedTimeTaken(struct timeval startTime) const; + const base::type::string_t + getFormattedTimeTaken(struct timeval startTime) const; - virtual inline void log(el::base::type::ostream_t& os) const { - os << getFormattedTimeTaken(); - } + virtual inline void + log(el::base::type::ostream_t& os) const { + os << getFormattedTimeTaken(); + } }; class DefaultPerformanceTrackingCallback : public PerformanceTrackingCallback { protected: - void handle(const PerformanceTrackingData* data) { - m_data = data; - base::type::stringstream_t ss; - if (m_data->dataType() == PerformanceTrackingData::DataType::Complete) { - ss << ELPP_LITERAL("Executed [") << m_data->blockName()->c_str() << ELPP_LITERAL("] in [") << - *m_data->formattedTimeTaken() << ELPP_LITERAL("]"); - } else { - ss << ELPP_LITERAL("Performance checkpoint"); - if (!m_data->checkpointId().empty()) { - ss << ELPP_LITERAL(" [") << m_data->checkpointId().c_str() << ELPP_LITERAL("]"); - } - ss << ELPP_LITERAL(" for block [") << m_data->blockName()->c_str() << ELPP_LITERAL("] : [") << - *m_data->performanceTracker(); - if (!ELPP->hasFlag(LoggingFlag::DisablePerformanceTrackingCheckpointComparison) - && m_data->performanceTracker()->m_hasChecked) { - ss << ELPP_LITERAL(" ([") << *m_data->formattedTimeTaken() << ELPP_LITERAL("] from "); - if (m_data->performanceTracker()->m_lastCheckpointId.empty()) { - ss << ELPP_LITERAL("last checkpoint"); + void + handle(const PerformanceTrackingData* data) { + m_data = data; + base::type::stringstream_t ss; + if (m_data->dataType() == PerformanceTrackingData::DataType::Complete) { + ss << ELPP_LITERAL("Executed [") << m_data->blockName()->c_str() << ELPP_LITERAL("] in [") + << *m_data->formattedTimeTaken() << ELPP_LITERAL("]"); } else { - ss << ELPP_LITERAL("checkpoint '") << m_data->performanceTracker()->m_lastCheckpointId.c_str() << ELPP_LITERAL("'"); + ss << ELPP_LITERAL("Performance checkpoint"); + if (!m_data->checkpointId().empty()) { + ss << ELPP_LITERAL(" [") << m_data->checkpointId().c_str() << ELPP_LITERAL("]"); + } + ss << ELPP_LITERAL(" for block [") << m_data->blockName()->c_str() << ELPP_LITERAL("] : [") + << *m_data->performanceTracker(); + if (!ELPP->hasFlag(LoggingFlag::DisablePerformanceTrackingCheckpointComparison) && + m_data->performanceTracker()->m_hasChecked) { + ss << ELPP_LITERAL(" ([") << *m_data->formattedTimeTaken() << ELPP_LITERAL("] from "); + if (m_data->performanceTracker()->m_lastCheckpointId.empty()) { + ss << ELPP_LITERAL("last checkpoint"); + } else { + ss << ELPP_LITERAL("checkpoint '") << m_data->performanceTracker()->m_lastCheckpointId.c_str() + << ELPP_LITERAL("'"); + } + ss << ELPP_LITERAL(")]"); + } else { + ss << ELPP_LITERAL("]"); + } } - ss << ELPP_LITERAL(")]"); - } else { - ss << ELPP_LITERAL("]"); - } + el::base::Writer(m_data->performanceTracker()->level(), m_data->file(), m_data->line(), m_data->func()) + .construct(1, m_data->loggerId().c_str()) + << ss.str(); } - el::base::Writer(m_data->performanceTracker()->level(), m_data->file(), m_data->line(), m_data->func()).construct(1, - m_data->loggerId().c_str()) << ss.str(); - } + private: - const PerformanceTrackingData* m_data; + const PerformanceTrackingData* m_data; }; } // namespace base -inline const std::string* PerformanceTrackingData::blockName() const { - return const_cast(&m_performanceTracker->m_blockName); +inline const std::string* +PerformanceTrackingData::blockName() const { + return const_cast(&m_performanceTracker->m_blockName); } -inline const struct timeval* PerformanceTrackingData::startTime() const { - return const_cast(&m_performanceTracker->m_startTime); +inline const struct timeval* +PerformanceTrackingData::startTime() const { + return const_cast(&m_performanceTracker->m_startTime); } -inline const struct timeval* PerformanceTrackingData::endTime() const { - return const_cast(&m_performanceTracker->m_endTime); +inline const struct timeval* +PerformanceTrackingData::endTime() const { + return const_cast(&m_performanceTracker->m_endTime); } -inline const struct timeval* PerformanceTrackingData::lastCheckpointTime() const { - return const_cast(&m_performanceTracker->m_lastCheckpointTime); +inline const struct timeval* +PerformanceTrackingData::lastCheckpointTime() const { + return const_cast(&m_performanceTracker->m_lastCheckpointTime); } -inline const std::string& PerformanceTrackingData::loggerId(void) const { - return m_performanceTracker->m_loggerId; +inline const std::string& +PerformanceTrackingData::loggerId(void) const { + return m_performanceTracker->m_loggerId; } -#endif // defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_PERFORMANCE_TRACKING) +#endif // defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_PERFORMANCE_TRACKING) namespace base { /// @brief Contains some internal debugging tools like crash handler and stack tracer namespace debug { #if defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_CRASH_LOG) class StackTrace : base::NoCopy { public: - static const unsigned int kMaxStack = 64; - static const unsigned int kStackStart = 2; // We want to skip c'tor and StackTrace::generateNew() - class StackTraceEntry { - public: - StackTraceEntry(std::size_t index, const std::string& loc, const std::string& demang, const std::string& hex, - const std::string& addr); - StackTraceEntry(std::size_t index, const std::string& loc) : - m_index(index), - m_location(loc) { + static const unsigned int kMaxStack = 64; + static const unsigned int kStackStart = 2; // We want to skip c'tor and StackTrace::generateNew() + class StackTraceEntry { + public: + StackTraceEntry(std::size_t index, const std::string& loc, const std::string& demang, const std::string& hex, + const std::string& addr); + StackTraceEntry(std::size_t index, const std::string& loc) : m_index(index), m_location(loc) { + } + std::size_t m_index; + std::string m_location; + std::string m_demangled; + std::string m_hex; + std::string m_addr; + friend std::ostream& + operator<<(std::ostream& ss, const StackTraceEntry& si); + + private: + StackTraceEntry(void); + }; + + StackTrace(void) { + generateNew(); } - std::size_t m_index; - std::string m_location; - std::string m_demangled; - std::string m_hex; - std::string m_addr; - friend std::ostream& operator<<(std::ostream& ss, const StackTraceEntry& si); - private: - StackTraceEntry(void); - }; + virtual ~StackTrace(void) { + } - StackTrace(void) { - generateNew(); - } + inline std::vector& + getLatestStack(void) { + return m_stack; + } - virtual ~StackTrace(void) { - } - - inline std::vector& getLatestStack(void) { - return m_stack; - } - - friend std::ostream& operator<<(std::ostream& os, const StackTrace& st); + friend std::ostream& + operator<<(std::ostream& os, const StackTrace& st); private: - std::vector m_stack; + std::vector m_stack; - void generateNew(void); + void + generateNew(void); }; /// @brief Handles unexpected crashes class CrashHandler : base::NoCopy { public: - typedef void (*Handler)(int); + typedef void (*Handler)(int); - explicit CrashHandler(bool useDefault); - explicit CrashHandler(const Handler& cHandler) { - setHandler(cHandler); - } - void setHandler(const Handler& cHandler); + explicit CrashHandler(bool useDefault); + explicit CrashHandler(const Handler& cHandler) { + setHandler(cHandler); + } + void + setHandler(const Handler& cHandler); private: - Handler m_handler; + Handler m_handler; }; #else class CrashHandler { public: - explicit CrashHandler(bool) {} + explicit CrashHandler(bool) { + } }; -#endif // defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_CRASH_LOG) +#endif // defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_CRASH_LOG) } // namespace debug } // namespace base extern base::debug::CrashHandler elCrashHandler; -#define MAKE_LOGGABLE(ClassType, ClassInstance, OutputStreamInstance) \ -el::base::type::ostream_t& operator<<(el::base::type::ostream_t& OutputStreamInstance, const ClassType& ClassInstance) +#define MAKE_LOGGABLE(ClassType, ClassInstance, OutputStreamInstance) \ + el::base::type::ostream_t& operator<<(el::base::type::ostream_t& OutputStreamInstance, \ + const ClassType& ClassInstance) /// @brief Initializes syslog with process ID, options and facility. calls closelog() on d'tor class SysLogInitializer { public: - SysLogInitializer(const char* processIdent, int options = 0, int facility = 0) { + SysLogInitializer(const char* processIdent, int options = 0, int facility = 0) { #if defined(ELPP_SYSLOG) - openlog(processIdent, options, facility); + openlog(processIdent, options, facility); #else - ELPP_UNUSED(processIdent); - ELPP_UNUSED(options); - ELPP_UNUSED(facility); + ELPP_UNUSED(processIdent); + ELPP_UNUSED(options); + ELPP_UNUSED(facility); #endif // defined(ELPP_SYSLOG) - } - virtual ~SysLogInitializer(void) { + } + virtual ~SysLogInitializer(void) { #if defined(ELPP_SYSLOG) - closelog(); + closelog(); #endif // defined(ELPP_SYSLOG) - } + } }; #define ELPP_INITIALIZE_SYSLOG(id, opt, fac) el::SysLogInitializer elSyslogInit(id, opt, fac) /// @brief Static helpers for developers class Helpers : base::StaticClass { public: - /// @brief Shares logging repository (base::Storage) - static inline void setStorage(base::type::StoragePointer storage) { - ELPP = storage; - } - /// @return Main storage repository - static inline base::type::StoragePointer storage() { - return ELPP; - } - /// @brief Sets application arguments and figures out whats active for logging and whats not. - static inline void setArgs(int argc, char** argv) { - ELPP->setApplicationArguments(argc, argv); - } - /// @copydoc setArgs(int argc, char** argv) - static inline void setArgs(int argc, const char** argv) { - ELPP->setApplicationArguments(argc, const_cast(argv)); - } - /// @brief Sets thread name for current thread. Requires std::thread - static inline void setThreadName(const std::string& name) { - ELPP->setThreadName(name); - } - static inline std::string getThreadName() { - return ELPP->getThreadName(base::threading::getCurrentThreadId()); - } -#if defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_CRASH_LOG) - /// @brief Overrides default crash handler and installs custom handler. - /// @param crashHandler A functor with no return type that takes single int argument. - /// Handler is a typedef with specification: void (*Handler)(int) - static inline void setCrashHandler(const el::base::debug::CrashHandler::Handler& crashHandler) { - el::elCrashHandler.setHandler(crashHandler); - } - /// @brief Abort due to crash with signal in parameter - /// @param sig Crash signal - static void crashAbort(int sig, const char* sourceFile = "", unsigned int long line = 0); - /// @brief Logs reason of crash as per sig - /// @param sig Crash signal - /// @param stackTraceIfAvailable Includes stack trace if available - /// @param level Logging level - /// @param logger Logger to use for logging - static void logCrashReason(int sig, bool stackTraceIfAvailable = false, - Level level = Level::Fatal, const char* logger = base::consts::kDefaultLoggerId); -#endif // defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_CRASH_LOG) - /// @brief Installs pre rollout callback, this callback is triggered when log file is about to be rolled out - /// (can be useful for backing up) - static inline void installPreRollOutCallback(const PreRollOutCallback& callback) { - ELPP->setPreRollOutCallback(callback); - } - /// @brief Uninstalls pre rollout callback - static inline void uninstallPreRollOutCallback(void) { - ELPP->unsetPreRollOutCallback(); - } - /// @brief Installs post log dispatch callback, this callback is triggered when log is dispatched - template - static inline bool installLogDispatchCallback(const std::string& id) { - return ELPP->installLogDispatchCallback(id); - } - /// @brief Uninstalls log dispatch callback - template - static inline void uninstallLogDispatchCallback(const std::string& id) { - ELPP->uninstallLogDispatchCallback(id); - } - template - static inline T* logDispatchCallback(const std::string& id) { - return ELPP->logDispatchCallback(id); - } -#if defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_PERFORMANCE_TRACKING) - /// @brief Installs post performance tracking callback, this callback is triggered when performance tracking is finished - template - static inline bool installPerformanceTrackingCallback(const std::string& id) { - return ELPP->installPerformanceTrackingCallback(id); - } - /// @brief Uninstalls post performance tracking handler - template - static inline void uninstallPerformanceTrackingCallback(const std::string& id) { - ELPP->uninstallPerformanceTrackingCallback(id); - } - template - static inline T* performanceTrackingCallback(const std::string& id) { - return ELPP->performanceTrackingCallback(id); - } -#endif // defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_PERFORMANCE_TRACKING) - /// @brief Converts template to std::string - useful for loggable classes to log containers within log(std::ostream&) const - template - static std::string convertTemplateToStdString(const T& templ) { - el::Logger* logger = - ELPP->registeredLoggers()->get(el::base::consts::kDefaultLoggerId); - if (logger == nullptr) { - return std::string(); + /// @brief Shares logging repository (base::Storage) + static inline void + setStorage(base::type::StoragePointer storage) { + ELPP = storage; } - base::MessageBuilder b; - b.initialize(logger); - logger->acquireLock(); - b << templ; + /// @return Main storage repository + static inline base::type::StoragePointer + storage() { + return ELPP; + } + /// @brief Sets application arguments and figures out whats active for logging and whats not. + static inline void + setArgs(int argc, char** argv) { + ELPP->setApplicationArguments(argc, argv); + } + /// @copydoc setArgs(int argc, char** argv) + static inline void + setArgs(int argc, const char** argv) { + ELPP->setApplicationArguments(argc, const_cast(argv)); + } + /// @brief Sets thread name for current thread. Requires std::thread + static inline void + setThreadName(const std::string& name) { + ELPP->setThreadName(name); + } + static inline std::string + getThreadName() { + return ELPP->getThreadName(base::threading::getCurrentThreadId()); + } +#if defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_CRASH_LOG) + /// @brief Overrides default crash handler and installs custom handler. + /// @param crashHandler A functor with no return type that takes single int argument. + /// Handler is a typedef with specification: void (*Handler)(int) + static inline void + setCrashHandler(const el::base::debug::CrashHandler::Handler& crashHandler) { + el::elCrashHandler.setHandler(crashHandler); + } + /// @brief Abort due to crash with signal in parameter + /// @param sig Crash signal + static void + crashAbort(int sig, const char* sourceFile = "", unsigned int long line = 0); + /// @brief Logs reason of crash as per sig + /// @param sig Crash signal + /// @param stackTraceIfAvailable Includes stack trace if available + /// @param level Logging level + /// @param logger Logger to use for logging + static void + logCrashReason(int sig, bool stackTraceIfAvailable = false, Level level = Level::Fatal, + const char* logger = base::consts::kDefaultLoggerId); +#endif // defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_CRASH_LOG) + /// @brief Installs pre rollout callback, this callback is triggered when log file is about to be rolled out + /// (can be useful for backing up) + static inline void + installPreRollOutCallback(const PreRollOutCallback& callback) { + ELPP->setPreRollOutCallback(callback); + } + /// @brief Uninstalls pre rollout callback + static inline void + uninstallPreRollOutCallback(void) { + ELPP->unsetPreRollOutCallback(); + } + /// @brief Installs post log dispatch callback, this callback is triggered when log is dispatched + template + static inline bool + installLogDispatchCallback(const std::string& id) { + return ELPP->installLogDispatchCallback(id); + } + /// @brief Uninstalls log dispatch callback + template + static inline void + uninstallLogDispatchCallback(const std::string& id) { + ELPP->uninstallLogDispatchCallback(id); + } + template + static inline T* + logDispatchCallback(const std::string& id) { + return ELPP->logDispatchCallback(id); + } +#if defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_PERFORMANCE_TRACKING) + /// @brief Installs post performance tracking callback, this callback is triggered when performance tracking is + /// finished + template + static inline bool + installPerformanceTrackingCallback(const std::string& id) { + return ELPP->installPerformanceTrackingCallback(id); + } + /// @brief Uninstalls post performance tracking handler + template + static inline void + uninstallPerformanceTrackingCallback(const std::string& id) { + ELPP->uninstallPerformanceTrackingCallback(id); + } + template + static inline T* + performanceTrackingCallback(const std::string& id) { + return ELPP->performanceTrackingCallback(id); + } +#endif // defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_PERFORMANCE_TRACKING) + /// @brief Converts template to std::string - useful for loggable classes to log containers within + /// log(std::ostream&) const + template + static std::string + convertTemplateToStdString(const T& templ) { + el::Logger* logger = ELPP->registeredLoggers()->get(el::base::consts::kDefaultLoggerId); + if (logger == nullptr) { + return std::string(); + } + base::MessageBuilder b; + b.initialize(logger); + logger->acquireLock(); + b << templ; #if defined(ELPP_UNICODE) - std::string s = std::string(logger->stream().str().begin(), logger->stream().str().end()); + std::string s = std::string(logger->stream().str().begin(), logger->stream().str().end()); #else - std::string s = logger->stream().str(); + std::string s = logger->stream().str(); #endif // defined(ELPP_UNICODE) - logger->stream().str(ELPP_LITERAL("")); - logger->releaseLock(); - return s; - } - /// @brief Returns command line arguments (pointer) provided to easylogging++ - static inline const el::base::utils::CommandLineArgs* commandLineArgs(void) { - return ELPP->commandLineArgs(); - } - /// @brief Reserve space for custom format specifiers for performance - /// @see std::vector::reserve - static inline void reserveCustomFormatSpecifiers(std::size_t size) { - ELPP->m_customFormatSpecifiers.reserve(size); - } - /// @brief Installs user defined format specifier and handler - static inline void installCustomFormatSpecifier(const CustomFormatSpecifier& customFormatSpecifier) { - ELPP->installCustomFormatSpecifier(customFormatSpecifier); - } - /// @brief Uninstalls user defined format specifier and handler - static inline bool uninstallCustomFormatSpecifier(const char* formatSpecifier) { - return ELPP->uninstallCustomFormatSpecifier(formatSpecifier); - } - /// @brief Returns true if custom format specifier is installed - static inline bool hasCustomFormatSpecifier(const char* formatSpecifier) { - return ELPP->hasCustomFormatSpecifier(formatSpecifier); - } - static inline void validateFileRolling(Logger* logger, Level level) { - if (ELPP == nullptr || logger == nullptr) return; - logger->m_typedConfigurations->validateFileRolling(level, ELPP->preRollOutCallback()); - } + logger->stream().str(ELPP_LITERAL("")); + logger->releaseLock(); + return s; + } + /// @brief Returns command line arguments (pointer) provided to easylogging++ + static inline const el::base::utils::CommandLineArgs* + commandLineArgs(void) { + return ELPP->commandLineArgs(); + } + /// @brief Reserve space for custom format specifiers for performance + /// @see std::vector::reserve + static inline void + reserveCustomFormatSpecifiers(std::size_t size) { + ELPP->m_customFormatSpecifiers.reserve(size); + } + /// @brief Installs user defined format specifier and handler + static inline void + installCustomFormatSpecifier(const CustomFormatSpecifier& customFormatSpecifier) { + ELPP->installCustomFormatSpecifier(customFormatSpecifier); + } + /// @brief Uninstalls user defined format specifier and handler + static inline bool + uninstallCustomFormatSpecifier(const char* formatSpecifier) { + return ELPP->uninstallCustomFormatSpecifier(formatSpecifier); + } + /// @brief Returns true if custom format specifier is installed + static inline bool + hasCustomFormatSpecifier(const char* formatSpecifier) { + return ELPP->hasCustomFormatSpecifier(formatSpecifier); + } + static inline void + validateFileRolling(Logger* logger, Level level) { + if (ELPP == nullptr || logger == nullptr) + return; + logger->m_typedConfigurations->validateFileRolling(level, ELPP->preRollOutCallback()); + } }; /// @brief Static helpers to deal with loggers and their configurations class Loggers : base::StaticClass { public: - /// @brief Gets existing or registers new logger - static Logger* getLogger(const std::string& identity, bool registerIfNotAvailable = true); - /// @brief Changes default log builder for future loggers - static void setDefaultLogBuilder(el::LogBuilderPtr& logBuilderPtr); - /// @brief Installs logger registration callback, this callback is triggered when new logger is registered - template - static inline bool installLoggerRegistrationCallback(const std::string& id) { - return ELPP->registeredLoggers()->installLoggerRegistrationCallback(id); - } - /// @brief Uninstalls log dispatch callback - template - static inline void uninstallLoggerRegistrationCallback(const std::string& id) { - ELPP->registeredLoggers()->uninstallLoggerRegistrationCallback(id); - } - template - static inline T* loggerRegistrationCallback(const std::string& id) { - return ELPP->registeredLoggers()->loggerRegistrationCallback(id); - } - /// @brief Unregisters logger - use it only when you know what you are doing, you may unregister - /// loggers initialized / used by third-party libs. - static bool unregisterLogger(const std::string& identity); - /// @brief Whether or not logger with id is registered - static bool hasLogger(const std::string& identity); - /// @brief Reconfigures specified logger with new configurations - static Logger* reconfigureLogger(Logger* logger, const Configurations& configurations); - /// @brief Reconfigures logger with new configurations after looking it up using identity - static Logger* reconfigureLogger(const std::string& identity, const Configurations& configurations); - /// @brief Reconfigures logger's single configuration - static Logger* reconfigureLogger(const std::string& identity, ConfigurationType configurationType, - const std::string& value); - /// @brief Reconfigures all the existing loggers with new configurations - static void reconfigureAllLoggers(const Configurations& configurations); - /// @brief Reconfigures single configuration for all the loggers - static inline void reconfigureAllLoggers(ConfigurationType configurationType, const std::string& value) { - reconfigureAllLoggers(Level::Global, configurationType, value); - } - /// @brief Reconfigures single configuration for all the loggers for specified level - static void reconfigureAllLoggers(Level level, ConfigurationType configurationType, - const std::string& value); - /// @brief Sets default configurations. This configuration is used for future (and conditionally for existing) loggers - static void setDefaultConfigurations(const Configurations& configurations, - bool reconfigureExistingLoggers = false); - /// @brief Returns current default - static const Configurations* defaultConfigurations(void); - /// @brief Returns log stream reference pointer if needed by user - static const base::LogStreamsReferenceMap* logStreamsReference(void); - /// @brief Default typed configuration based on existing defaultConf - static base::TypedConfigurations defaultTypedConfigurations(void); - /// @brief Populates all logger IDs in current repository. - /// @param [out] targetList List of fill up. - static std::vector* populateAllLoggerIds(std::vector* targetList); - /// @brief Sets configurations from global configuration file. - static void configureFromGlobal(const char* globalConfigurationFilePath); - /// @brief Configures loggers using command line arg. Ensure you have already set command line args, - /// @return False if invalid argument or argument with no value provided, true if attempted to configure logger. - /// If true is returned that does not mean it has been configured successfully, it only means that it - /// has attempeted to configure logger using configuration file provided in argument - static bool configureFromArg(const char* argKey); - /// @brief Flushes all loggers for all levels - Be careful if you dont know how many loggers are registered - static void flushAll(void); - /// @brief Adds logging flag used internally. - static inline void addFlag(LoggingFlag flag) { - ELPP->addFlag(flag); - } - /// @brief Removes logging flag used internally. - static inline void removeFlag(LoggingFlag flag) { - ELPP->removeFlag(flag); - } - /// @brief Determines whether or not certain flag is active - static inline bool hasFlag(LoggingFlag flag) { - return ELPP->hasFlag(flag); - } - /// @brief Adds flag and removes it when scope goes out - class ScopedAddFlag { - public: - ScopedAddFlag(LoggingFlag flag) : m_flag(flag) { - Loggers::addFlag(m_flag); + /// @brief Gets existing or registers new logger + static Logger* + getLogger(const std::string& identity, bool registerIfNotAvailable = true); + /// @brief Changes default log builder for future loggers + static void + setDefaultLogBuilder(el::LogBuilderPtr& logBuilderPtr); + /// @brief Installs logger registration callback, this callback is triggered when new logger is registered + template + static inline bool + installLoggerRegistrationCallback(const std::string& id) { + return ELPP->registeredLoggers()->installLoggerRegistrationCallback(id); } - ~ScopedAddFlag(void) { - Loggers::removeFlag(m_flag); + /// @brief Uninstalls log dispatch callback + template + static inline void + uninstallLoggerRegistrationCallback(const std::string& id) { + ELPP->registeredLoggers()->uninstallLoggerRegistrationCallback(id); } - private: - LoggingFlag m_flag; - }; - /// @brief Removes flag and add it when scope goes out - class ScopedRemoveFlag { - public: - ScopedRemoveFlag(LoggingFlag flag) : m_flag(flag) { - Loggers::removeFlag(m_flag); + template + static inline T* + loggerRegistrationCallback(const std::string& id) { + return ELPP->registeredLoggers()->loggerRegistrationCallback(id); } - ~ScopedRemoveFlag(void) { - Loggers::addFlag(m_flag); + /// @brief Unregisters logger - use it only when you know what you are doing, you may unregister + /// loggers initialized / used by third-party libs. + static bool + unregisterLogger(const std::string& identity); + /// @brief Whether or not logger with id is registered + static bool + hasLogger(const std::string& identity); + /// @brief Reconfigures specified logger with new configurations + static Logger* + reconfigureLogger(Logger* logger, const Configurations& configurations); + /// @brief Reconfigures logger with new configurations after looking it up using identity + static Logger* + reconfigureLogger(const std::string& identity, const Configurations& configurations); + /// @brief Reconfigures logger's single configuration + static Logger* + reconfigureLogger(const std::string& identity, ConfigurationType configurationType, const std::string& value); + /// @brief Reconfigures all the existing loggers with new configurations + static void + reconfigureAllLoggers(const Configurations& configurations); + /// @brief Reconfigures single configuration for all the loggers + static inline void + reconfigureAllLoggers(ConfigurationType configurationType, const std::string& value) { + reconfigureAllLoggers(Level::Global, configurationType, value); } - private: - LoggingFlag m_flag; - }; - /// @brief Sets hierarchy for logging. Needs to enable logging flag (HierarchicalLogging) - static void setLoggingLevel(Level level) { - ELPP->setLoggingLevel(level); - } - /// @brief Sets verbose level on the fly - static void setVerboseLevel(base::type::VerboseLevel level); - /// @brief Gets current verbose level - static base::type::VerboseLevel verboseLevel(void); - /// @brief Sets vmodules as specified (on the fly) - static void setVModules(const char* modules); - /// @brief Clears vmodules - static void clearVModules(void); + /// @brief Reconfigures single configuration for all the loggers for specified level + static void + reconfigureAllLoggers(Level level, ConfigurationType configurationType, const std::string& value); + /// @brief Sets default configurations. This configuration is used for future (and conditionally for existing) + /// loggers + static void + setDefaultConfigurations(const Configurations& configurations, bool reconfigureExistingLoggers = false); + /// @brief Returns current default + static const Configurations* + defaultConfigurations(void); + /// @brief Returns log stream reference pointer if needed by user + static const base::LogStreamsReferenceMap* + logStreamsReference(void); + /// @brief Default typed configuration based on existing defaultConf + static base::TypedConfigurations + defaultTypedConfigurations(void); + /// @brief Populates all logger IDs in current repository. + /// @param [out] targetList List of fill up. + static std::vector* + populateAllLoggerIds(std::vector* targetList); + /// @brief Sets configurations from global configuration file. + static void + configureFromGlobal(const char* globalConfigurationFilePath); + /// @brief Configures loggers using command line arg. Ensure you have already set command line args, + /// @return False if invalid argument or argument with no value provided, true if attempted to configure logger. + /// If true is returned that does not mean it has been configured successfully, it only means that it + /// has attempeted to configure logger using configuration file provided in argument + static bool + configureFromArg(const char* argKey); + /// @brief Flushes all loggers for all levels - Be careful if you dont know how many loggers are registered + static void + flushAll(void); + /// @brief Adds logging flag used internally. + static inline void + addFlag(LoggingFlag flag) { + ELPP->addFlag(flag); + } + /// @brief Removes logging flag used internally. + static inline void + removeFlag(LoggingFlag flag) { + ELPP->removeFlag(flag); + } + /// @brief Determines whether or not certain flag is active + static inline bool + hasFlag(LoggingFlag flag) { + return ELPP->hasFlag(flag); + } + /// @brief Adds flag and removes it when scope goes out + class ScopedAddFlag { + public: + ScopedAddFlag(LoggingFlag flag) : m_flag(flag) { + Loggers::addFlag(m_flag); + } + ~ScopedAddFlag(void) { + Loggers::removeFlag(m_flag); + } + + private: + LoggingFlag m_flag; + }; + /// @brief Removes flag and add it when scope goes out + class ScopedRemoveFlag { + public: + ScopedRemoveFlag(LoggingFlag flag) : m_flag(flag) { + Loggers::removeFlag(m_flag); + } + ~ScopedRemoveFlag(void) { + Loggers::addFlag(m_flag); + } + + private: + LoggingFlag m_flag; + }; + /// @brief Sets hierarchy for logging. Needs to enable logging flag (HierarchicalLogging) + static void + setLoggingLevel(Level level) { + ELPP->setLoggingLevel(level); + } + /// @brief Sets verbose level on the fly + static void + setVerboseLevel(base::type::VerboseLevel level); + /// @brief Gets current verbose level + static base::type::VerboseLevel + verboseLevel(void); + /// @brief Sets vmodules as specified (on the fly) + static void + setVModules(const char* modules); + /// @brief Clears vmodules + static void + clearVModules(void); }; class VersionInfo : base::StaticClass { public: - /// @brief Current version number - static const std::string version(void); + /// @brief Current version number + static const std::string + version(void); - /// @brief Release date of current version - static const std::string releaseDate(void); + /// @brief Release date of current version + static const std::string + releaseDate(void); }; } // namespace el #undef VLOG_IS_ON @@ -3907,9 +4456,9 @@ class VersionInfo : base::StaticClass { #undef TIMED_FUNC_IF #undef ELPP_MIN_UNIT #if defined(ELPP_PERFORMANCE_MICROSECONDS) -# define ELPP_MIN_UNIT el::base::TimestampUnit::Microsecond +#define ELPP_MIN_UNIT el::base::TimestampUnit::Microsecond #else -# define ELPP_MIN_UNIT el::base::TimestampUnit::Millisecond +#define ELPP_MIN_UNIT el::base::TimestampUnit::Millisecond #endif // (defined(ELPP_PERFORMANCE_MICROSECONDS)) /// @brief Performance tracked scope. Performance gets written when goes out of scope using /// 'performance' logger. @@ -3918,18 +4467,24 @@ class VersionInfo : base::StaticClass { /// @see el::base::PerformanceTracker /// @see el::base::PerformanceTracker::checkpoint // Note: Do not surround this definition with null macro because of obj instance -#define TIMED_SCOPE_IF(obj, blockname, condition) el::base::type::PerformanceTrackerPtr obj( condition ? \ - new el::base::PerformanceTracker(blockname, ELPP_MIN_UNIT) : nullptr ) +#define TIMED_SCOPE_IF(obj, blockname, condition) \ + el::base::type::PerformanceTrackerPtr obj(condition ? new el::base::PerformanceTracker(blockname, ELPP_MIN_UNIT) \ + : nullptr) #define TIMED_SCOPE(obj, blockname) TIMED_SCOPE_IF(obj, blockname, true) -#define TIMED_BLOCK(obj, blockName) for (struct { int i; el::base::type::PerformanceTrackerPtr timer; } obj = { 0, \ - el::base::type::PerformanceTrackerPtr(new el::base::PerformanceTracker(blockName, ELPP_MIN_UNIT)) }; obj.i < 1; ++obj.i) +#define TIMED_BLOCK(obj, blockName) \ + for (struct { \ + int i; \ + el::base::type::PerformanceTrackerPtr timer; \ + } obj = {0, \ + el::base::type::PerformanceTrackerPtr(new el::base::PerformanceTracker(blockName, ELPP_MIN_UNIT))}; \ + obj.i < 1; ++obj.i) /// @brief Performance tracked function. Performance gets written when goes out of scope using /// 'performance' logger. /// /// @detail Please note in order to check the performance at a certain time you can use obj->checkpoint(); /// @see el::base::PerformanceTracker /// @see el::base::PerformanceTracker::checkpoint -#define TIMED_FUNC_IF(obj,condition) TIMED_SCOPE_IF(obj, ELPP_FUNC, condition) +#define TIMED_FUNC_IF(obj, condition) TIMED_SCOPE_IF(obj, ELPP_FUNC, condition) #define TIMED_FUNC(obj) TIMED_SCOPE(obj, ELPP_FUNC) #undef PERFORMANCE_CHECKPOINT #undef PERFORMANCE_CHECKPOINT_WITH_ID @@ -3987,212 +4542,216 @@ class VersionInfo : base::StaticClass { #undef CVERBOSE_N_TIMES // Normal logs #if ELPP_INFO_LOG -# define CINFO(writer, dispatchAction, ...) ELPP_WRITE_LOG(writer, el::Level::Info, dispatchAction, __VA_ARGS__) +#define CINFO(writer, dispatchAction, ...) ELPP_WRITE_LOG(writer, el::Level::Info, dispatchAction, __VA_ARGS__) #else -# define CINFO(writer, dispatchAction, ...) el::base::NullWriter() +#define CINFO(writer, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_INFO_LOG #if ELPP_WARNING_LOG -# define CWARNING(writer, dispatchAction, ...) ELPP_WRITE_LOG(writer, el::Level::Warning, dispatchAction, __VA_ARGS__) +#define CWARNING(writer, dispatchAction, ...) ELPP_WRITE_LOG(writer, el::Level::Warning, dispatchAction, __VA_ARGS__) #else -# define CWARNING(writer, dispatchAction, ...) el::base::NullWriter() +#define CWARNING(writer, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_WARNING_LOG #if ELPP_DEBUG_LOG -# define CDEBUG(writer, dispatchAction, ...) ELPP_WRITE_LOG(writer, el::Level::Debug, dispatchAction, __VA_ARGS__) +#define CDEBUG(writer, dispatchAction, ...) ELPP_WRITE_LOG(writer, el::Level::Debug, dispatchAction, __VA_ARGS__) #else -# define CDEBUG(writer, dispatchAction, ...) el::base::NullWriter() +#define CDEBUG(writer, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_DEBUG_LOG #if ELPP_ERROR_LOG -# define CERROR(writer, dispatchAction, ...) ELPP_WRITE_LOG(writer, el::Level::Error, dispatchAction, __VA_ARGS__) +#define CERROR(writer, dispatchAction, ...) ELPP_WRITE_LOG(writer, el::Level::Error, dispatchAction, __VA_ARGS__) #else -# define CERROR(writer, dispatchAction, ...) el::base::NullWriter() +#define CERROR(writer, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_ERROR_LOG #if ELPP_FATAL_LOG -# define CFATAL(writer, dispatchAction, ...) ELPP_WRITE_LOG(writer, el::Level::Fatal, dispatchAction, __VA_ARGS__) +#define CFATAL(writer, dispatchAction, ...) ELPP_WRITE_LOG(writer, el::Level::Fatal, dispatchAction, __VA_ARGS__) #else -# define CFATAL(writer, dispatchAction, ...) el::base::NullWriter() +#define CFATAL(writer, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_FATAL_LOG #if ELPP_TRACE_LOG -# define CTRACE(writer, dispatchAction, ...) ELPP_WRITE_LOG(writer, el::Level::Trace, dispatchAction, __VA_ARGS__) +#define CTRACE(writer, dispatchAction, ...) ELPP_WRITE_LOG(writer, el::Level::Trace, dispatchAction, __VA_ARGS__) #else -# define CTRACE(writer, dispatchAction, ...) el::base::NullWriter() +#define CTRACE(writer, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_TRACE_LOG #if ELPP_VERBOSE_LOG -# define CVERBOSE(writer, vlevel, dispatchAction, ...) if (VLOG_IS_ON(vlevel)) writer(\ -el::Level::Verbose, __FILE__, __LINE__, ELPP_FUNC, dispatchAction, vlevel).construct(el_getVALength(__VA_ARGS__), __VA_ARGS__) +#define CVERBOSE(writer, vlevel, dispatchAction, ...) \ + if (VLOG_IS_ON(vlevel)) \ + writer(el::Level::Verbose, __FILE__, __LINE__, ELPP_FUNC, dispatchAction, vlevel) \ + .construct(el_getVALength(__VA_ARGS__), __VA_ARGS__) #else -# define CVERBOSE(writer, vlevel, dispatchAction, ...) el::base::NullWriter() +#define CVERBOSE(writer, vlevel, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_VERBOSE_LOG // Conditional logs #if ELPP_INFO_LOG -# define CINFO_IF(writer, condition_, dispatchAction, ...) \ -ELPP_WRITE_LOG_IF(writer, (condition_), el::Level::Info, dispatchAction, __VA_ARGS__) +#define CINFO_IF(writer, condition_, dispatchAction, ...) \ + ELPP_WRITE_LOG_IF(writer, (condition_), el::Level::Info, dispatchAction, __VA_ARGS__) #else -# define CINFO_IF(writer, condition_, dispatchAction, ...) el::base::NullWriter() +#define CINFO_IF(writer, condition_, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_INFO_LOG #if ELPP_WARNING_LOG -# define CWARNING_IF(writer, condition_, dispatchAction, ...)\ -ELPP_WRITE_LOG_IF(writer, (condition_), el::Level::Warning, dispatchAction, __VA_ARGS__) +#define CWARNING_IF(writer, condition_, dispatchAction, ...) \ + ELPP_WRITE_LOG_IF(writer, (condition_), el::Level::Warning, dispatchAction, __VA_ARGS__) #else -# define CWARNING_IF(writer, condition_, dispatchAction, ...) el::base::NullWriter() +#define CWARNING_IF(writer, condition_, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_WARNING_LOG #if ELPP_DEBUG_LOG -# define CDEBUG_IF(writer, condition_, dispatchAction, ...)\ -ELPP_WRITE_LOG_IF(writer, (condition_), el::Level::Debug, dispatchAction, __VA_ARGS__) +#define CDEBUG_IF(writer, condition_, dispatchAction, ...) \ + ELPP_WRITE_LOG_IF(writer, (condition_), el::Level::Debug, dispatchAction, __VA_ARGS__) #else -# define CDEBUG_IF(writer, condition_, dispatchAction, ...) el::base::NullWriter() +#define CDEBUG_IF(writer, condition_, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_DEBUG_LOG #if ELPP_ERROR_LOG -# define CERROR_IF(writer, condition_, dispatchAction, ...)\ -ELPP_WRITE_LOG_IF(writer, (condition_), el::Level::Error, dispatchAction, __VA_ARGS__) +#define CERROR_IF(writer, condition_, dispatchAction, ...) \ + ELPP_WRITE_LOG_IF(writer, (condition_), el::Level::Error, dispatchAction, __VA_ARGS__) #else -# define CERROR_IF(writer, condition_, dispatchAction, ...) el::base::NullWriter() +#define CERROR_IF(writer, condition_, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_ERROR_LOG #if ELPP_FATAL_LOG -# define CFATAL_IF(writer, condition_, dispatchAction, ...)\ -ELPP_WRITE_LOG_IF(writer, (condition_), el::Level::Fatal, dispatchAction, __VA_ARGS__) +#define CFATAL_IF(writer, condition_, dispatchAction, ...) \ + ELPP_WRITE_LOG_IF(writer, (condition_), el::Level::Fatal, dispatchAction, __VA_ARGS__) #else -# define CFATAL_IF(writer, condition_, dispatchAction, ...) el::base::NullWriter() +#define CFATAL_IF(writer, condition_, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_FATAL_LOG #if ELPP_TRACE_LOG -# define CTRACE_IF(writer, condition_, dispatchAction, ...)\ -ELPP_WRITE_LOG_IF(writer, (condition_), el::Level::Trace, dispatchAction, __VA_ARGS__) +#define CTRACE_IF(writer, condition_, dispatchAction, ...) \ + ELPP_WRITE_LOG_IF(writer, (condition_), el::Level::Trace, dispatchAction, __VA_ARGS__) #else -# define CTRACE_IF(writer, condition_, dispatchAction, ...) el::base::NullWriter() +#define CTRACE_IF(writer, condition_, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_TRACE_LOG #if ELPP_VERBOSE_LOG -# define CVERBOSE_IF(writer, condition_, vlevel, dispatchAction, ...) if (VLOG_IS_ON(vlevel) && (condition_)) writer( \ -el::Level::Verbose, __FILE__, __LINE__, ELPP_FUNC, dispatchAction, vlevel).construct(el_getVALength(__VA_ARGS__), __VA_ARGS__) +#define CVERBOSE_IF(writer, condition_, vlevel, dispatchAction, ...) \ + if (VLOG_IS_ON(vlevel) && (condition_)) \ + writer(el::Level::Verbose, __FILE__, __LINE__, ELPP_FUNC, dispatchAction, vlevel) \ + .construct(el_getVALength(__VA_ARGS__), __VA_ARGS__) #else -# define CVERBOSE_IF(writer, condition_, vlevel, dispatchAction, ...) el::base::NullWriter() +#define CVERBOSE_IF(writer, condition_, vlevel, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_VERBOSE_LOG // Occasional logs #if ELPP_INFO_LOG -# define CINFO_EVERY_N(writer, occasion, dispatchAction, ...)\ -ELPP_WRITE_LOG_EVERY_N(writer, occasion, el::Level::Info, dispatchAction, __VA_ARGS__) +#define CINFO_EVERY_N(writer, occasion, dispatchAction, ...) \ + ELPP_WRITE_LOG_EVERY_N(writer, occasion, el::Level::Info, dispatchAction, __VA_ARGS__) #else -# define CINFO_EVERY_N(writer, occasion, dispatchAction, ...) el::base::NullWriter() +#define CINFO_EVERY_N(writer, occasion, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_INFO_LOG #if ELPP_WARNING_LOG -# define CWARNING_EVERY_N(writer, occasion, dispatchAction, ...)\ -ELPP_WRITE_LOG_EVERY_N(writer, occasion, el::Level::Warning, dispatchAction, __VA_ARGS__) +#define CWARNING_EVERY_N(writer, occasion, dispatchAction, ...) \ + ELPP_WRITE_LOG_EVERY_N(writer, occasion, el::Level::Warning, dispatchAction, __VA_ARGS__) #else -# define CWARNING_EVERY_N(writer, occasion, dispatchAction, ...) el::base::NullWriter() +#define CWARNING_EVERY_N(writer, occasion, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_WARNING_LOG #if ELPP_DEBUG_LOG -# define CDEBUG_EVERY_N(writer, occasion, dispatchAction, ...)\ -ELPP_WRITE_LOG_EVERY_N(writer, occasion, el::Level::Debug, dispatchAction, __VA_ARGS__) +#define CDEBUG_EVERY_N(writer, occasion, dispatchAction, ...) \ + ELPP_WRITE_LOG_EVERY_N(writer, occasion, el::Level::Debug, dispatchAction, __VA_ARGS__) #else -# define CDEBUG_EVERY_N(writer, occasion, dispatchAction, ...) el::base::NullWriter() +#define CDEBUG_EVERY_N(writer, occasion, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_DEBUG_LOG #if ELPP_ERROR_LOG -# define CERROR_EVERY_N(writer, occasion, dispatchAction, ...)\ -ELPP_WRITE_LOG_EVERY_N(writer, occasion, el::Level::Error, dispatchAction, __VA_ARGS__) +#define CERROR_EVERY_N(writer, occasion, dispatchAction, ...) \ + ELPP_WRITE_LOG_EVERY_N(writer, occasion, el::Level::Error, dispatchAction, __VA_ARGS__) #else -# define CERROR_EVERY_N(writer, occasion, dispatchAction, ...) el::base::NullWriter() +#define CERROR_EVERY_N(writer, occasion, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_ERROR_LOG #if ELPP_FATAL_LOG -# define CFATAL_EVERY_N(writer, occasion, dispatchAction, ...)\ -ELPP_WRITE_LOG_EVERY_N(writer, occasion, el::Level::Fatal, dispatchAction, __VA_ARGS__) +#define CFATAL_EVERY_N(writer, occasion, dispatchAction, ...) \ + ELPP_WRITE_LOG_EVERY_N(writer, occasion, el::Level::Fatal, dispatchAction, __VA_ARGS__) #else -# define CFATAL_EVERY_N(writer, occasion, dispatchAction, ...) el::base::NullWriter() +#define CFATAL_EVERY_N(writer, occasion, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_FATAL_LOG #if ELPP_TRACE_LOG -# define CTRACE_EVERY_N(writer, occasion, dispatchAction, ...)\ -ELPP_WRITE_LOG_EVERY_N(writer, occasion, el::Level::Trace, dispatchAction, __VA_ARGS__) +#define CTRACE_EVERY_N(writer, occasion, dispatchAction, ...) \ + ELPP_WRITE_LOG_EVERY_N(writer, occasion, el::Level::Trace, dispatchAction, __VA_ARGS__) #else -# define CTRACE_EVERY_N(writer, occasion, dispatchAction, ...) el::base::NullWriter() +#define CTRACE_EVERY_N(writer, occasion, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_TRACE_LOG #if ELPP_VERBOSE_LOG -# define CVERBOSE_EVERY_N(writer, occasion, vlevel, dispatchAction, ...)\ -CVERBOSE_IF(writer, ELPP->validateEveryNCounter(__FILE__, __LINE__, occasion), vlevel, dispatchAction, __VA_ARGS__) +#define CVERBOSE_EVERY_N(writer, occasion, vlevel, dispatchAction, ...) \ + CVERBOSE_IF(writer, ELPP->validateEveryNCounter(__FILE__, __LINE__, occasion), vlevel, dispatchAction, __VA_ARGS__) #else -# define CVERBOSE_EVERY_N(writer, occasion, vlevel, dispatchAction, ...) el::base::NullWriter() +#define CVERBOSE_EVERY_N(writer, occasion, vlevel, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_VERBOSE_LOG // After N logs #if ELPP_INFO_LOG -# define CINFO_AFTER_N(writer, n, dispatchAction, ...)\ -ELPP_WRITE_LOG_AFTER_N(writer, n, el::Level::Info, dispatchAction, __VA_ARGS__) +#define CINFO_AFTER_N(writer, n, dispatchAction, ...) \ + ELPP_WRITE_LOG_AFTER_N(writer, n, el::Level::Info, dispatchAction, __VA_ARGS__) #else -# define CINFO_AFTER_N(writer, n, dispatchAction, ...) el::base::NullWriter() +#define CINFO_AFTER_N(writer, n, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_INFO_LOG #if ELPP_WARNING_LOG -# define CWARNING_AFTER_N(writer, n, dispatchAction, ...)\ -ELPP_WRITE_LOG_AFTER_N(writer, n, el::Level::Warning, dispatchAction, __VA_ARGS__) +#define CWARNING_AFTER_N(writer, n, dispatchAction, ...) \ + ELPP_WRITE_LOG_AFTER_N(writer, n, el::Level::Warning, dispatchAction, __VA_ARGS__) #else -# define CWARNING_AFTER_N(writer, n, dispatchAction, ...) el::base::NullWriter() +#define CWARNING_AFTER_N(writer, n, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_WARNING_LOG #if ELPP_DEBUG_LOG -# define CDEBUG_AFTER_N(writer, n, dispatchAction, ...)\ -ELPP_WRITE_LOG_AFTER_N(writer, n, el::Level::Debug, dispatchAction, __VA_ARGS__) +#define CDEBUG_AFTER_N(writer, n, dispatchAction, ...) \ + ELPP_WRITE_LOG_AFTER_N(writer, n, el::Level::Debug, dispatchAction, __VA_ARGS__) #else -# define CDEBUG_AFTER_N(writer, n, dispatchAction, ...) el::base::NullWriter() +#define CDEBUG_AFTER_N(writer, n, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_DEBUG_LOG #if ELPP_ERROR_LOG -# define CERROR_AFTER_N(writer, n, dispatchAction, ...)\ -ELPP_WRITE_LOG_AFTER_N(writer, n, el::Level::Error, dispatchAction, __VA_ARGS__) +#define CERROR_AFTER_N(writer, n, dispatchAction, ...) \ + ELPP_WRITE_LOG_AFTER_N(writer, n, el::Level::Error, dispatchAction, __VA_ARGS__) #else -# define CERROR_AFTER_N(writer, n, dispatchAction, ...) el::base::NullWriter() +#define CERROR_AFTER_N(writer, n, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_ERROR_LOG #if ELPP_FATAL_LOG -# define CFATAL_AFTER_N(writer, n, dispatchAction, ...)\ -ELPP_WRITE_LOG_AFTER_N(writer, n, el::Level::Fatal, dispatchAction, __VA_ARGS__) +#define CFATAL_AFTER_N(writer, n, dispatchAction, ...) \ + ELPP_WRITE_LOG_AFTER_N(writer, n, el::Level::Fatal, dispatchAction, __VA_ARGS__) #else -# define CFATAL_AFTER_N(writer, n, dispatchAction, ...) el::base::NullWriter() +#define CFATAL_AFTER_N(writer, n, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_FATAL_LOG #if ELPP_TRACE_LOG -# define CTRACE_AFTER_N(writer, n, dispatchAction, ...)\ -ELPP_WRITE_LOG_AFTER_N(writer, n, el::Level::Trace, dispatchAction, __VA_ARGS__) +#define CTRACE_AFTER_N(writer, n, dispatchAction, ...) \ + ELPP_WRITE_LOG_AFTER_N(writer, n, el::Level::Trace, dispatchAction, __VA_ARGS__) #else -# define CTRACE_AFTER_N(writer, n, dispatchAction, ...) el::base::NullWriter() +#define CTRACE_AFTER_N(writer, n, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_TRACE_LOG #if ELPP_VERBOSE_LOG -# define CVERBOSE_AFTER_N(writer, n, vlevel, dispatchAction, ...)\ -CVERBOSE_IF(writer, ELPP->validateAfterNCounter(__FILE__, __LINE__, n), vlevel, dispatchAction, __VA_ARGS__) +#define CVERBOSE_AFTER_N(writer, n, vlevel, dispatchAction, ...) \ + CVERBOSE_IF(writer, ELPP->validateAfterNCounter(__FILE__, __LINE__, n), vlevel, dispatchAction, __VA_ARGS__) #else -# define CVERBOSE_AFTER_N(writer, n, vlevel, dispatchAction, ...) el::base::NullWriter() +#define CVERBOSE_AFTER_N(writer, n, vlevel, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_VERBOSE_LOG // N Times logs #if ELPP_INFO_LOG -# define CINFO_N_TIMES(writer, n, dispatchAction, ...)\ -ELPP_WRITE_LOG_N_TIMES(writer, n, el::Level::Info, dispatchAction, __VA_ARGS__) +#define CINFO_N_TIMES(writer, n, dispatchAction, ...) \ + ELPP_WRITE_LOG_N_TIMES(writer, n, el::Level::Info, dispatchAction, __VA_ARGS__) #else -# define CINFO_N_TIMES(writer, n, dispatchAction, ...) el::base::NullWriter() +#define CINFO_N_TIMES(writer, n, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_INFO_LOG #if ELPP_WARNING_LOG -# define CWARNING_N_TIMES(writer, n, dispatchAction, ...)\ -ELPP_WRITE_LOG_N_TIMES(writer, n, el::Level::Warning, dispatchAction, __VA_ARGS__) +#define CWARNING_N_TIMES(writer, n, dispatchAction, ...) \ + ELPP_WRITE_LOG_N_TIMES(writer, n, el::Level::Warning, dispatchAction, __VA_ARGS__) #else -# define CWARNING_N_TIMES(writer, n, dispatchAction, ...) el::base::NullWriter() +#define CWARNING_N_TIMES(writer, n, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_WARNING_LOG #if ELPP_DEBUG_LOG -# define CDEBUG_N_TIMES(writer, n, dispatchAction, ...)\ -ELPP_WRITE_LOG_N_TIMES(writer, n, el::Level::Debug, dispatchAction, __VA_ARGS__) +#define CDEBUG_N_TIMES(writer, n, dispatchAction, ...) \ + ELPP_WRITE_LOG_N_TIMES(writer, n, el::Level::Debug, dispatchAction, __VA_ARGS__) #else -# define CDEBUG_N_TIMES(writer, n, dispatchAction, ...) el::base::NullWriter() +#define CDEBUG_N_TIMES(writer, n, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_DEBUG_LOG #if ELPP_ERROR_LOG -# define CERROR_N_TIMES(writer, n, dispatchAction, ...)\ -ELPP_WRITE_LOG_N_TIMES(writer, n, el::Level::Error, dispatchAction, __VA_ARGS__) +#define CERROR_N_TIMES(writer, n, dispatchAction, ...) \ + ELPP_WRITE_LOG_N_TIMES(writer, n, el::Level::Error, dispatchAction, __VA_ARGS__) #else -# define CERROR_N_TIMES(writer, n, dispatchAction, ...) el::base::NullWriter() +#define CERROR_N_TIMES(writer, n, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_ERROR_LOG #if ELPP_FATAL_LOG -# define CFATAL_N_TIMES(writer, n, dispatchAction, ...)\ -ELPP_WRITE_LOG_N_TIMES(writer, n, el::Level::Fatal, dispatchAction, __VA_ARGS__) +#define CFATAL_N_TIMES(writer, n, dispatchAction, ...) \ + ELPP_WRITE_LOG_N_TIMES(writer, n, el::Level::Fatal, dispatchAction, __VA_ARGS__) #else -# define CFATAL_N_TIMES(writer, n, dispatchAction, ...) el::base::NullWriter() +#define CFATAL_N_TIMES(writer, n, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_FATAL_LOG #if ELPP_TRACE_LOG -# define CTRACE_N_TIMES(writer, n, dispatchAction, ...)\ -ELPP_WRITE_LOG_N_TIMES(writer, n, el::Level::Trace, dispatchAction, __VA_ARGS__) +#define CTRACE_N_TIMES(writer, n, dispatchAction, ...) \ + ELPP_WRITE_LOG_N_TIMES(writer, n, el::Level::Trace, dispatchAction, __VA_ARGS__) #else -# define CTRACE_N_TIMES(writer, n, dispatchAction, ...) el::base::NullWriter() +#define CTRACE_N_TIMES(writer, n, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_TRACE_LOG #if ELPP_VERBOSE_LOG -# define CVERBOSE_N_TIMES(writer, n, vlevel, dispatchAction, ...)\ -CVERBOSE_IF(writer, ELPP->validateNTimesCounter(__FILE__, __LINE__, n), vlevel, dispatchAction, __VA_ARGS__) +#define CVERBOSE_N_TIMES(writer, n, vlevel, dispatchAction, ...) \ + CVERBOSE_IF(writer, ELPP->validateNTimesCounter(__FILE__, __LINE__, n), vlevel, dispatchAction, __VA_ARGS__) #else -# define CVERBOSE_N_TIMES(writer, n, vlevel, dispatchAction, ...) el::base::NullWriter() +#define CVERBOSE_N_TIMES(writer, n, vlevel, dispatchAction, ...) el::base::NullWriter() #endif // ELPP_VERBOSE_LOG // // Custom Loggers - Requires (level, dispatchAction, loggerId/s) @@ -4211,27 +4770,26 @@ CVERBOSE_IF(writer, ELPP->validateNTimesCounter(__FILE__, __LINE__, n), vlevel, #undef CLOG_N_TIMES #undef CVLOG_N_TIMES // Normal logs -#define CLOG(LEVEL, ...)\ -C##LEVEL(el::base::Writer, el::base::DispatchAction::NormalLog, __VA_ARGS__) +#define CLOG(LEVEL, ...) C##LEVEL(el::base::Writer, el::base::DispatchAction::NormalLog, __VA_ARGS__) #define CVLOG(vlevel, ...) CVERBOSE(el::base::Writer, vlevel, el::base::DispatchAction::NormalLog, __VA_ARGS__) // Conditional logs -#define CLOG_IF(condition, LEVEL, ...)\ -C##LEVEL##_IF(el::base::Writer, condition, el::base::DispatchAction::NormalLog, __VA_ARGS__) -#define CVLOG_IF(condition, vlevel, ...)\ -CVERBOSE_IF(el::base::Writer, condition, vlevel, el::base::DispatchAction::NormalLog, __VA_ARGS__) +#define CLOG_IF(condition, LEVEL, ...) \ + C##LEVEL##_IF(el::base::Writer, condition, el::base::DispatchAction::NormalLog, __VA_ARGS__) +#define CVLOG_IF(condition, vlevel, ...) \ + CVERBOSE_IF(el::base::Writer, condition, vlevel, el::base::DispatchAction::NormalLog, __VA_ARGS__) // Hit counts based logs -#define CLOG_EVERY_N(n, LEVEL, ...)\ -C##LEVEL##_EVERY_N(el::base::Writer, n, el::base::DispatchAction::NormalLog, __VA_ARGS__) -#define CVLOG_EVERY_N(n, vlevel, ...)\ -CVERBOSE_EVERY_N(el::base::Writer, n, vlevel, el::base::DispatchAction::NormalLog, __VA_ARGS__) -#define CLOG_AFTER_N(n, LEVEL, ...)\ -C##LEVEL##_AFTER_N(el::base::Writer, n, el::base::DispatchAction::NormalLog, __VA_ARGS__) -#define CVLOG_AFTER_N(n, vlevel, ...)\ -CVERBOSE_AFTER_N(el::base::Writer, n, vlevel, el::base::DispatchAction::NormalLog, __VA_ARGS__) -#define CLOG_N_TIMES(n, LEVEL, ...)\ -C##LEVEL##_N_TIMES(el::base::Writer, n, el::base::DispatchAction::NormalLog, __VA_ARGS__) -#define CVLOG_N_TIMES(n, vlevel, ...)\ -CVERBOSE_N_TIMES(el::base::Writer, n, vlevel, el::base::DispatchAction::NormalLog, __VA_ARGS__) +#define CLOG_EVERY_N(n, LEVEL, ...) \ + C##LEVEL##_EVERY_N(el::base::Writer, n, el::base::DispatchAction::NormalLog, __VA_ARGS__) +#define CVLOG_EVERY_N(n, vlevel, ...) \ + CVERBOSE_EVERY_N(el::base::Writer, n, vlevel, el::base::DispatchAction::NormalLog, __VA_ARGS__) +#define CLOG_AFTER_N(n, LEVEL, ...) \ + C##LEVEL##_AFTER_N(el::base::Writer, n, el::base::DispatchAction::NormalLog, __VA_ARGS__) +#define CVLOG_AFTER_N(n, vlevel, ...) \ + CVERBOSE_AFTER_N(el::base::Writer, n, vlevel, el::base::DispatchAction::NormalLog, __VA_ARGS__) +#define CLOG_N_TIMES(n, LEVEL, ...) \ + C##LEVEL##_N_TIMES(el::base::Writer, n, el::base::DispatchAction::NormalLog, __VA_ARGS__) +#define CVLOG_N_TIMES(n, vlevel, ...) \ + CVERBOSE_N_TIMES(el::base::Writer, n, vlevel, el::base::DispatchAction::NormalLog, __VA_ARGS__) // // Default Loggers macro using CLOG(), CLOG_VERBOSE() and CVLOG() macros // @@ -4248,9 +4806,9 @@ CVERBOSE_N_TIMES(el::base::Writer, n, vlevel, el::base::DispatchAction::NormalLo #undef VLOG_N_TIMES #undef ELPP_CURR_FILE_LOGGER_ID #if defined(ELPP_DEFAULT_LOGGER) -# define ELPP_CURR_FILE_LOGGER_ID ELPP_DEFAULT_LOGGER +#define ELPP_CURR_FILE_LOGGER_ID ELPP_DEFAULT_LOGGER #else -# define ELPP_CURR_FILE_LOGGER_ID el::base::consts::kDefaultLoggerId +#define ELPP_CURR_FILE_LOGGER_ID el::base::consts::kDefaultLoggerId #endif #undef ELPP_TRACE #define ELPP_TRACE CLOG(TRACE, ELPP_CURR_FILE_LOGGER_ID) @@ -4276,14 +4834,15 @@ CVERBOSE_N_TIMES(el::base::Writer, n, vlevel, el::base::DispatchAction::NormalLo #undef DCPLOG_IF #undef DPLOG #undef DPLOG_IF -#define CPLOG(LEVEL, ...)\ -C##LEVEL(el::base::PErrorWriter, el::base::DispatchAction::NormalLog, __VA_ARGS__) -#define CPLOG_IF(condition, LEVEL, ...)\ -C##LEVEL##_IF(el::base::PErrorWriter, condition, el::base::DispatchAction::NormalLog, __VA_ARGS__) -#define DCPLOG(LEVEL, ...)\ -if (ELPP_DEBUG_LOG) C##LEVEL(el::base::PErrorWriter, el::base::DispatchAction::NormalLog, __VA_ARGS__) -#define DCPLOG_IF(condition, LEVEL, ...)\ -C##LEVEL##_IF(el::base::PErrorWriter, (ELPP_DEBUG_LOG) && (condition), el::base::DispatchAction::NormalLog, __VA_ARGS__) +#define CPLOG(LEVEL, ...) C##LEVEL(el::base::PErrorWriter, el::base::DispatchAction::NormalLog, __VA_ARGS__) +#define CPLOG_IF(condition, LEVEL, ...) \ + C##LEVEL##_IF(el::base::PErrorWriter, condition, el::base::DispatchAction::NormalLog, __VA_ARGS__) +#define DCPLOG(LEVEL, ...) \ + if (ELPP_DEBUG_LOG) \ + C##LEVEL(el::base::PErrorWriter, el::base::DispatchAction::NormalLog, __VA_ARGS__) +#define DCPLOG_IF(condition, LEVEL, ...) \ + C##LEVEL##_IF(el::base::PErrorWriter, (ELPP_DEBUG_LOG) && (condition), el::base::DispatchAction::NormalLog, \ + __VA_ARGS__) #define PLOG(LEVEL) CPLOG(LEVEL, ELPP_CURR_FILE_LOGGER_ID) #define PLOG_IF(condition, LEVEL) CPLOG_IF(condition, LEVEL, ELPP_CURR_FILE_LOGGER_ID) #define DPLOG(LEVEL) DCPLOG(LEVEL, ELPP_CURR_FILE_LOGGER_ID) @@ -4310,53 +4869,60 @@ C##LEVEL##_IF(el::base::PErrorWriter, (ELPP_DEBUG_LOG) && (condition), el::base: #undef DSYSLOG_AFTER_N #undef DSYSLOG_N_TIMES #if defined(ELPP_SYSLOG) -# define CSYSLOG(LEVEL, ...)\ -C##LEVEL(el::base::Writer, el::base::DispatchAction::SysLog, __VA_ARGS__) -# define CSYSLOG_IF(condition, LEVEL, ...)\ -C##LEVEL##_IF(el::base::Writer, condition, el::base::DispatchAction::SysLog, __VA_ARGS__) -# define CSYSLOG_EVERY_N(n, LEVEL, ...) C##LEVEL##_EVERY_N(el::base::Writer, n, el::base::DispatchAction::SysLog, __VA_ARGS__) -# define CSYSLOG_AFTER_N(n, LEVEL, ...) C##LEVEL##_AFTER_N(el::base::Writer, n, el::base::DispatchAction::SysLog, __VA_ARGS__) -# define CSYSLOG_N_TIMES(n, LEVEL, ...) C##LEVEL##_N_TIMES(el::base::Writer, n, el::base::DispatchAction::SysLog, __VA_ARGS__) -# define SYSLOG(LEVEL) CSYSLOG(LEVEL, el::base::consts::kSysLogLoggerId) -# define SYSLOG_IF(condition, LEVEL) CSYSLOG_IF(condition, LEVEL, el::base::consts::kSysLogLoggerId) -# define SYSLOG_EVERY_N(n, LEVEL) CSYSLOG_EVERY_N(n, LEVEL, el::base::consts::kSysLogLoggerId) -# define SYSLOG_AFTER_N(n, LEVEL) CSYSLOG_AFTER_N(n, LEVEL, el::base::consts::kSysLogLoggerId) -# define SYSLOG_N_TIMES(n, LEVEL) CSYSLOG_N_TIMES(n, LEVEL, el::base::consts::kSysLogLoggerId) -# define DCSYSLOG(LEVEL, ...) if (ELPP_DEBUG_LOG) C##LEVEL(el::base::Writer, el::base::DispatchAction::SysLog, __VA_ARGS__) -# define DCSYSLOG_IF(condition, LEVEL, ...)\ -C##LEVEL##_IF(el::base::Writer, (ELPP_DEBUG_LOG) && (condition), el::base::DispatchAction::SysLog, __VA_ARGS__) -# define DCSYSLOG_EVERY_N(n, LEVEL, ...)\ -if (ELPP_DEBUG_LOG) C##LEVEL##_EVERY_N(el::base::Writer, n, el::base::DispatchAction::SysLog, __VA_ARGS__) -# define DCSYSLOG_AFTER_N(n, LEVEL, ...)\ -if (ELPP_DEBUG_LOG) C##LEVEL##_AFTER_N(el::base::Writer, n, el::base::DispatchAction::SysLog, __VA_ARGS__) -# define DCSYSLOG_N_TIMES(n, LEVEL, ...)\ -if (ELPP_DEBUG_LOG) C##LEVEL##_EVERY_N(el::base::Writer, n, el::base::DispatchAction::SysLog, __VA_ARGS__) -# define DSYSLOG(LEVEL) DCSYSLOG(LEVEL, el::base::consts::kSysLogLoggerId) -# define DSYSLOG_IF(condition, LEVEL) DCSYSLOG_IF(condition, LEVEL, el::base::consts::kSysLogLoggerId) -# define DSYSLOG_EVERY_N(n, LEVEL) DCSYSLOG_EVERY_N(n, LEVEL, el::base::consts::kSysLogLoggerId) -# define DSYSLOG_AFTER_N(n, LEVEL) DCSYSLOG_AFTER_N(n, LEVEL, el::base::consts::kSysLogLoggerId) -# define DSYSLOG_N_TIMES(n, LEVEL) DCSYSLOG_N_TIMES(n, LEVEL, el::base::consts::kSysLogLoggerId) +#define CSYSLOG(LEVEL, ...) C##LEVEL(el::base::Writer, el::base::DispatchAction::SysLog, __VA_ARGS__) +#define CSYSLOG_IF(condition, LEVEL, ...) \ + C##LEVEL##_IF(el::base::Writer, condition, el::base::DispatchAction::SysLog, __VA_ARGS__) +#define CSYSLOG_EVERY_N(n, LEVEL, ...) \ + C##LEVEL##_EVERY_N(el::base::Writer, n, el::base::DispatchAction::SysLog, __VA_ARGS__) +#define CSYSLOG_AFTER_N(n, LEVEL, ...) \ + C##LEVEL##_AFTER_N(el::base::Writer, n, el::base::DispatchAction::SysLog, __VA_ARGS__) +#define CSYSLOG_N_TIMES(n, LEVEL, ...) \ + C##LEVEL##_N_TIMES(el::base::Writer, n, el::base::DispatchAction::SysLog, __VA_ARGS__) +#define SYSLOG(LEVEL) CSYSLOG(LEVEL, el::base::consts::kSysLogLoggerId) +#define SYSLOG_IF(condition, LEVEL) CSYSLOG_IF(condition, LEVEL, el::base::consts::kSysLogLoggerId) +#define SYSLOG_EVERY_N(n, LEVEL) CSYSLOG_EVERY_N(n, LEVEL, el::base::consts::kSysLogLoggerId) +#define SYSLOG_AFTER_N(n, LEVEL) CSYSLOG_AFTER_N(n, LEVEL, el::base::consts::kSysLogLoggerId) +#define SYSLOG_N_TIMES(n, LEVEL) CSYSLOG_N_TIMES(n, LEVEL, el::base::consts::kSysLogLoggerId) +#define DCSYSLOG(LEVEL, ...) \ + if (ELPP_DEBUG_LOG) \ + C##LEVEL(el::base::Writer, el::base::DispatchAction::SysLog, __VA_ARGS__) +#define DCSYSLOG_IF(condition, LEVEL, ...) \ + C##LEVEL##_IF(el::base::Writer, (ELPP_DEBUG_LOG) && (condition), el::base::DispatchAction::SysLog, __VA_ARGS__) +#define DCSYSLOG_EVERY_N(n, LEVEL, ...) \ + if (ELPP_DEBUG_LOG) \ + C##LEVEL##_EVERY_N(el::base::Writer, n, el::base::DispatchAction::SysLog, __VA_ARGS__) +#define DCSYSLOG_AFTER_N(n, LEVEL, ...) \ + if (ELPP_DEBUG_LOG) \ + C##LEVEL##_AFTER_N(el::base::Writer, n, el::base::DispatchAction::SysLog, __VA_ARGS__) +#define DCSYSLOG_N_TIMES(n, LEVEL, ...) \ + if (ELPP_DEBUG_LOG) \ + C##LEVEL##_EVERY_N(el::base::Writer, n, el::base::DispatchAction::SysLog, __VA_ARGS__) +#define DSYSLOG(LEVEL) DCSYSLOG(LEVEL, el::base::consts::kSysLogLoggerId) +#define DSYSLOG_IF(condition, LEVEL) DCSYSLOG_IF(condition, LEVEL, el::base::consts::kSysLogLoggerId) +#define DSYSLOG_EVERY_N(n, LEVEL) DCSYSLOG_EVERY_N(n, LEVEL, el::base::consts::kSysLogLoggerId) +#define DSYSLOG_AFTER_N(n, LEVEL) DCSYSLOG_AFTER_N(n, LEVEL, el::base::consts::kSysLogLoggerId) +#define DSYSLOG_N_TIMES(n, LEVEL) DCSYSLOG_N_TIMES(n, LEVEL, el::base::consts::kSysLogLoggerId) #else -# define CSYSLOG(LEVEL, ...) el::base::NullWriter() -# define CSYSLOG_IF(condition, LEVEL, ...) el::base::NullWriter() -# define CSYSLOG_EVERY_N(n, LEVEL, ...) el::base::NullWriter() -# define CSYSLOG_AFTER_N(n, LEVEL, ...) el::base::NullWriter() -# define CSYSLOG_N_TIMES(n, LEVEL, ...) el::base::NullWriter() -# define SYSLOG(LEVEL) el::base::NullWriter() -# define SYSLOG_IF(condition, LEVEL) el::base::NullWriter() -# define SYSLOG_EVERY_N(n, LEVEL) el::base::NullWriter() -# define SYSLOG_AFTER_N(n, LEVEL) el::base::NullWriter() -# define SYSLOG_N_TIMES(n, LEVEL) el::base::NullWriter() -# define DCSYSLOG(LEVEL, ...) el::base::NullWriter() -# define DCSYSLOG_IF(condition, LEVEL, ...) el::base::NullWriter() -# define DCSYSLOG_EVERY_N(n, LEVEL, ...) el::base::NullWriter() -# define DCSYSLOG_AFTER_N(n, LEVEL, ...) el::base::NullWriter() -# define DCSYSLOG_N_TIMES(n, LEVEL, ...) el::base::NullWriter() -# define DSYSLOG(LEVEL) el::base::NullWriter() -# define DSYSLOG_IF(condition, LEVEL) el::base::NullWriter() -# define DSYSLOG_EVERY_N(n, LEVEL) el::base::NullWriter() -# define DSYSLOG_AFTER_N(n, LEVEL) el::base::NullWriter() -# define DSYSLOG_N_TIMES(n, LEVEL) el::base::NullWriter() +#define CSYSLOG(LEVEL, ...) el::base::NullWriter() +#define CSYSLOG_IF(condition, LEVEL, ...) el::base::NullWriter() +#define CSYSLOG_EVERY_N(n, LEVEL, ...) el::base::NullWriter() +#define CSYSLOG_AFTER_N(n, LEVEL, ...) el::base::NullWriter() +#define CSYSLOG_N_TIMES(n, LEVEL, ...) el::base::NullWriter() +#define SYSLOG(LEVEL) el::base::NullWriter() +#define SYSLOG_IF(condition, LEVEL) el::base::NullWriter() +#define SYSLOG_EVERY_N(n, LEVEL) el::base::NullWriter() +#define SYSLOG_AFTER_N(n, LEVEL) el::base::NullWriter() +#define SYSLOG_N_TIMES(n, LEVEL) el::base::NullWriter() +#define DCSYSLOG(LEVEL, ...) el::base::NullWriter() +#define DCSYSLOG_IF(condition, LEVEL, ...) el::base::NullWriter() +#define DCSYSLOG_EVERY_N(n, LEVEL, ...) el::base::NullWriter() +#define DCSYSLOG_AFTER_N(n, LEVEL, ...) el::base::NullWriter() +#define DCSYSLOG_N_TIMES(n, LEVEL, ...) el::base::NullWriter() +#define DSYSLOG(LEVEL) el::base::NullWriter() +#define DSYSLOG_IF(condition, LEVEL) el::base::NullWriter() +#define DSYSLOG_EVERY_N(n, LEVEL) el::base::NullWriter() +#define DSYSLOG_AFTER_N(n, LEVEL) el::base::NullWriter() +#define DSYSLOG_N_TIMES(n, LEVEL) el::base::NullWriter() #endif // defined(ELPP_SYSLOG) // // Custom Debug Only Loggers - Requires (level, loggerId/s) @@ -4373,19 +4939,41 @@ if (ELPP_DEBUG_LOG) C##LEVEL##_EVERY_N(el::base::Writer, n, el::base::DispatchAc #undef DCLOG_N_TIMES #undef DCVLOG_N_TIMES // Normal logs -#define DCLOG(LEVEL, ...) if (ELPP_DEBUG_LOG) CLOG(LEVEL, __VA_ARGS__) -#define DCLOG_VERBOSE(vlevel, ...) if (ELPP_DEBUG_LOG) CLOG_VERBOSE(vlevel, __VA_ARGS__) -#define DCVLOG(vlevel, ...) if (ELPP_DEBUG_LOG) CVLOG(vlevel, __VA_ARGS__) +#define DCLOG(LEVEL, ...) \ + if (ELPP_DEBUG_LOG) \ + CLOG(LEVEL, __VA_ARGS__) +#define DCLOG_VERBOSE(vlevel, ...) \ + if (ELPP_DEBUG_LOG) \ + CLOG_VERBOSE(vlevel, __VA_ARGS__) +#define DCVLOG(vlevel, ...) \ + if (ELPP_DEBUG_LOG) \ + CVLOG(vlevel, __VA_ARGS__) // Conditional logs -#define DCLOG_IF(condition, LEVEL, ...) if (ELPP_DEBUG_LOG) CLOG_IF(condition, LEVEL, __VA_ARGS__) -#define DCVLOG_IF(condition, vlevel, ...) if (ELPP_DEBUG_LOG) CVLOG_IF(condition, vlevel, __VA_ARGS__) +#define DCLOG_IF(condition, LEVEL, ...) \ + if (ELPP_DEBUG_LOG) \ + CLOG_IF(condition, LEVEL, __VA_ARGS__) +#define DCVLOG_IF(condition, vlevel, ...) \ + if (ELPP_DEBUG_LOG) \ + CVLOG_IF(condition, vlevel, __VA_ARGS__) // Hit counts based logs -#define DCLOG_EVERY_N(n, LEVEL, ...) if (ELPP_DEBUG_LOG) CLOG_EVERY_N(n, LEVEL, __VA_ARGS__) -#define DCVLOG_EVERY_N(n, vlevel, ...) if (ELPP_DEBUG_LOG) CVLOG_EVERY_N(n, vlevel, __VA_ARGS__) -#define DCLOG_AFTER_N(n, LEVEL, ...) if (ELPP_DEBUG_LOG) CLOG_AFTER_N(n, LEVEL, __VA_ARGS__) -#define DCVLOG_AFTER_N(n, vlevel, ...) if (ELPP_DEBUG_LOG) CVLOG_AFTER_N(n, vlevel, __VA_ARGS__) -#define DCLOG_N_TIMES(n, LEVEL, ...) if (ELPP_DEBUG_LOG) CLOG_N_TIMES(n, LEVEL, __VA_ARGS__) -#define DCVLOG_N_TIMES(n, vlevel, ...) if (ELPP_DEBUG_LOG) CVLOG_N_TIMES(n, vlevel, __VA_ARGS__) +#define DCLOG_EVERY_N(n, LEVEL, ...) \ + if (ELPP_DEBUG_LOG) \ + CLOG_EVERY_N(n, LEVEL, __VA_ARGS__) +#define DCVLOG_EVERY_N(n, vlevel, ...) \ + if (ELPP_DEBUG_LOG) \ + CVLOG_EVERY_N(n, vlevel, __VA_ARGS__) +#define DCLOG_AFTER_N(n, LEVEL, ...) \ + if (ELPP_DEBUG_LOG) \ + CLOG_AFTER_N(n, LEVEL, __VA_ARGS__) +#define DCVLOG_AFTER_N(n, vlevel, ...) \ + if (ELPP_DEBUG_LOG) \ + CVLOG_AFTER_N(n, vlevel, __VA_ARGS__) +#define DCLOG_N_TIMES(n, LEVEL, ...) \ + if (ELPP_DEBUG_LOG) \ + CLOG_N_TIMES(n, LEVEL, __VA_ARGS__) +#define DCVLOG_N_TIMES(n, vlevel, ...) \ + if (ELPP_DEBUG_LOG) \ + CVLOG_N_TIMES(n, vlevel, __VA_ARGS__) // // Default Debug Only Loggers macro using CLOG(), CLOG_VERBOSE() and CVLOG() macros // @@ -4414,7 +5002,7 @@ if (ELPP_DEBUG_LOG) C##LEVEL##_EVERY_N(el::base::Writer, n, el::base::DispatchAc #define DVLOG_AFTER_N(n, vlevel) DCVLOG_AFTER_N(n, vlevel, ELPP_CURR_FILE_LOGGER_ID) #define DLOG_N_TIMES(n, LEVEL) DCLOG_N_TIMES(n, LEVEL, ELPP_CURR_FILE_LOGGER_ID) #define DVLOG_N_TIMES(n, vlevel) DCVLOG_N_TIMES(n, vlevel, ELPP_CURR_FILE_LOGGER_ID) -#endif // defined(ELPP_NO_DEBUG_MACROS) +#endif // defined(ELPP_NO_DEBUG_MACROS) #if !defined(ELPP_NO_CHECK_MACROS) // Check macros #undef CCHECK @@ -4460,14 +5048,18 @@ if (ELPP_DEBUG_LOG) C##LEVEL##_EVERY_N(el::base::Writer, n, el::base::DispatchAc #define CHECK_GE(a, b) CCHECK_GE(a, b, ELPP_CURR_FILE_LOGGER_ID) #define CHECK_BOUNDS(val, min, max) CCHECK_BOUNDS(val, min, max, ELPP_CURR_FILE_LOGGER_ID) #define CCHECK_NOTNULL(ptr, ...) CCHECK((ptr) != nullptr, __VA_ARGS__) -#define CCHECK_STREQ(str1, str2, ...) CLOG_IF(!el::base::utils::Str::cStringEq(str1, str2), FATAL, __VA_ARGS__) \ -<< "Check failed: [" << #str1 << " == " << #str2 << "] " -#define CCHECK_STRNE(str1, str2, ...) CLOG_IF(el::base::utils::Str::cStringEq(str1, str2), FATAL, __VA_ARGS__) \ -<< "Check failed: [" << #str1 << " != " << #str2 << "] " -#define CCHECK_STRCASEEQ(str1, str2, ...) CLOG_IF(!el::base::utils::Str::cStringCaseEq(str1, str2), FATAL, __VA_ARGS__) \ -<< "Check failed: [" << #str1 << " == " << #str2 << "] " -#define CCHECK_STRCASENE(str1, str2, ...) CLOG_IF(el::base::utils::Str::cStringCaseEq(str1, str2), FATAL, __VA_ARGS__) \ -<< "Check failed: [" << #str1 << " != " << #str2 << "] " +#define CCHECK_STREQ(str1, str2, ...) \ + CLOG_IF(!el::base::utils::Str::cStringEq(str1, str2), FATAL, __VA_ARGS__) \ + << "Check failed: [" << #str1 << " == " << #str2 << "] " +#define CCHECK_STRNE(str1, str2, ...) \ + CLOG_IF(el::base::utils::Str::cStringEq(str1, str2), FATAL, __VA_ARGS__) \ + << "Check failed: [" << #str1 << " != " << #str2 << "] " +#define CCHECK_STRCASEEQ(str1, str2, ...) \ + CLOG_IF(!el::base::utils::Str::cStringCaseEq(str1, str2), FATAL, __VA_ARGS__) \ + << "Check failed: [" << #str1 << " == " << #str2 << "] " +#define CCHECK_STRCASENE(str1, str2, ...) \ + CLOG_IF(el::base::utils::Str::cStringCaseEq(str1, str2), FATAL, __VA_ARGS__) \ + << "Check failed: [" << #str1 << " != " << #str2 << "] " #define CHECK_NOTNULL(ptr) CCHECK_NOTNULL((ptr), ELPP_CURR_FILE_LOGGER_ID) #define CHECK_STREQ(str1, str2) CCHECK_STREQ(str1, str2, ELPP_CURR_FILE_LOGGER_ID) #define CHECK_STRNE(str1, str2) CCHECK_STRNE(str1, str2, ELPP_CURR_FILE_LOGGER_ID) @@ -4497,20 +5089,48 @@ if (ELPP_DEBUG_LOG) C##LEVEL##_EVERY_N(el::base::Writer, n, el::base::DispatchAc #undef DCHECK_STRCASEEQ #undef DCHECK_STRCASENE #undef DPCHECK -#define DCCHECK(condition, ...) if (ELPP_DEBUG_LOG) CCHECK(condition, __VA_ARGS__) -#define DCCHECK_EQ(a, b, ...) if (ELPP_DEBUG_LOG) CCHECK_EQ(a, b, __VA_ARGS__) -#define DCCHECK_NE(a, b, ...) if (ELPP_DEBUG_LOG) CCHECK_NE(a, b, __VA_ARGS__) -#define DCCHECK_LT(a, b, ...) if (ELPP_DEBUG_LOG) CCHECK_LT(a, b, __VA_ARGS__) -#define DCCHECK_GT(a, b, ...) if (ELPP_DEBUG_LOG) CCHECK_GT(a, b, __VA_ARGS__) -#define DCCHECK_LE(a, b, ...) if (ELPP_DEBUG_LOG) CCHECK_LE(a, b, __VA_ARGS__) -#define DCCHECK_GE(a, b, ...) if (ELPP_DEBUG_LOG) CCHECK_GE(a, b, __VA_ARGS__) -#define DCCHECK_BOUNDS(val, min, max, ...) if (ELPP_DEBUG_LOG) CCHECK_BOUNDS(val, min, max, __VA_ARGS__) -#define DCCHECK_NOTNULL(ptr, ...) if (ELPP_DEBUG_LOG) CCHECK_NOTNULL((ptr), __VA_ARGS__) -#define DCCHECK_STREQ(str1, str2, ...) if (ELPP_DEBUG_LOG) CCHECK_STREQ(str1, str2, __VA_ARGS__) -#define DCCHECK_STRNE(str1, str2, ...) if (ELPP_DEBUG_LOG) CCHECK_STRNE(str1, str2, __VA_ARGS__) -#define DCCHECK_STRCASEEQ(str1, str2, ...) if (ELPP_DEBUG_LOG) CCHECK_STRCASEEQ(str1, str2, __VA_ARGS__) -#define DCCHECK_STRCASENE(str1, str2, ...) if (ELPP_DEBUG_LOG) CCHECK_STRCASENE(str1, str2, __VA_ARGS__) -#define DCPCHECK(condition, ...) if (ELPP_DEBUG_LOG) CPCHECK(condition, __VA_ARGS__) +#define DCCHECK(condition, ...) \ + if (ELPP_DEBUG_LOG) \ + CCHECK(condition, __VA_ARGS__) +#define DCCHECK_EQ(a, b, ...) \ + if (ELPP_DEBUG_LOG) \ + CCHECK_EQ(a, b, __VA_ARGS__) +#define DCCHECK_NE(a, b, ...) \ + if (ELPP_DEBUG_LOG) \ + CCHECK_NE(a, b, __VA_ARGS__) +#define DCCHECK_LT(a, b, ...) \ + if (ELPP_DEBUG_LOG) \ + CCHECK_LT(a, b, __VA_ARGS__) +#define DCCHECK_GT(a, b, ...) \ + if (ELPP_DEBUG_LOG) \ + CCHECK_GT(a, b, __VA_ARGS__) +#define DCCHECK_LE(a, b, ...) \ + if (ELPP_DEBUG_LOG) \ + CCHECK_LE(a, b, __VA_ARGS__) +#define DCCHECK_GE(a, b, ...) \ + if (ELPP_DEBUG_LOG) \ + CCHECK_GE(a, b, __VA_ARGS__) +#define DCCHECK_BOUNDS(val, min, max, ...) \ + if (ELPP_DEBUG_LOG) \ + CCHECK_BOUNDS(val, min, max, __VA_ARGS__) +#define DCCHECK_NOTNULL(ptr, ...) \ + if (ELPP_DEBUG_LOG) \ + CCHECK_NOTNULL((ptr), __VA_ARGS__) +#define DCCHECK_STREQ(str1, str2, ...) \ + if (ELPP_DEBUG_LOG) \ + CCHECK_STREQ(str1, str2, __VA_ARGS__) +#define DCCHECK_STRNE(str1, str2, ...) \ + if (ELPP_DEBUG_LOG) \ + CCHECK_STRNE(str1, str2, __VA_ARGS__) +#define DCCHECK_STRCASEEQ(str1, str2, ...) \ + if (ELPP_DEBUG_LOG) \ + CCHECK_STRCASEEQ(str1, str2, __VA_ARGS__) +#define DCCHECK_STRCASENE(str1, str2, ...) \ + if (ELPP_DEBUG_LOG) \ + CCHECK_STRCASENE(str1, str2, __VA_ARGS__) +#define DCPCHECK(condition, ...) \ + if (ELPP_DEBUG_LOG) \ + CPCHECK(condition, __VA_ARGS__) #define DCHECK(condition) DCCHECK(condition, ELPP_CURR_FILE_LOGGER_ID) #define DCHECK_EQ(a, b) DCCHECK_EQ(a, b, ELPP_CURR_FILE_LOGGER_ID) #define DCHECK_NE(a, b) DCCHECK_NE(a, b, ELPP_CURR_FILE_LOGGER_ID) @@ -4525,45 +5145,49 @@ if (ELPP_DEBUG_LOG) C##LEVEL##_EVERY_N(el::base::Writer, n, el::base::DispatchAc #define DCHECK_STRCASEEQ(str1, str2) DCCHECK_STRCASEEQ(str1, str2, ELPP_CURR_FILE_LOGGER_ID) #define DCHECK_STRCASENE(str1, str2) DCCHECK_STRCASENE(str1, str2, ELPP_CURR_FILE_LOGGER_ID) #define DPCHECK(condition) DCPCHECK(condition, ELPP_CURR_FILE_LOGGER_ID) -#endif // defined(ELPP_NO_CHECK_MACROS) +#endif // defined(ELPP_NO_CHECK_MACROS) #if defined(ELPP_DISABLE_DEFAULT_CRASH_HANDLING) -# define ELPP_USE_DEF_CRASH_HANDLER false +#define ELPP_USE_DEF_CRASH_HANDLER false #else -# define ELPP_USE_DEF_CRASH_HANDLER true +#define ELPP_USE_DEF_CRASH_HANDLER true #endif // defined(ELPP_DISABLE_DEFAULT_CRASH_HANDLING) #define ELPP_CRASH_HANDLER_INIT -#define ELPP_INIT_EASYLOGGINGPP(val) \ -namespace el { \ -namespace base { \ -el::base::type::StoragePointer elStorage(val); \ -} \ -el::base::debug::CrashHandler elCrashHandler(ELPP_USE_DEF_CRASH_HANDLER); \ -} +#define ELPP_INIT_EASYLOGGINGPP(val) \ + namespace el { \ + namespace base { \ + el::base::type::StoragePointer elStorage(val); \ + } \ + el::base::debug::CrashHandler elCrashHandler(ELPP_USE_DEF_CRASH_HANDLER); \ + } #if ELPP_ASYNC_LOGGING -# define INITIALIZE_EASYLOGGINGPP ELPP_INIT_EASYLOGGINGPP(new el::base::Storage(el::LogBuilderPtr(new el::base::DefaultLogBuilder()),\ -new el::base::AsyncDispatchWorker())) +#define INITIALIZE_EASYLOGGINGPP \ + ELPP_INIT_EASYLOGGINGPP(new el::base::Storage(el::LogBuilderPtr(new el::base::DefaultLogBuilder()), \ + new el::base::AsyncDispatchWorker())) #else -# define INITIALIZE_EASYLOGGINGPP ELPP_INIT_EASYLOGGINGPP(new el::base::Storage(el::LogBuilderPtr(new el::base::DefaultLogBuilder()))) +#define INITIALIZE_EASYLOGGINGPP \ + ELPP_INIT_EASYLOGGINGPP(new el::base::Storage(el::LogBuilderPtr(new el::base::DefaultLogBuilder()))) #endif // ELPP_ASYNC_LOGGING -#define INITIALIZE_NULL_EASYLOGGINGPP \ -namespace el {\ -namespace base {\ -el::base::type::StoragePointer elStorage;\ -}\ -el::base::debug::CrashHandler elCrashHandler(ELPP_USE_DEF_CRASH_HANDLER);\ -} -#define SHARE_EASYLOGGINGPP(initializedStorage)\ -namespace el {\ -namespace base {\ -el::base::type::StoragePointer elStorage(initializedStorage);\ -}\ -el::base::debug::CrashHandler elCrashHandler(ELPP_USE_DEF_CRASH_HANDLER);\ -} +#define INITIALIZE_NULL_EASYLOGGINGPP \ + namespace el { \ + namespace base { \ + el::base::type::StoragePointer elStorage; \ + } \ + el::base::debug::CrashHandler elCrashHandler(ELPP_USE_DEF_CRASH_HANDLER); \ + } +#define SHARE_EASYLOGGINGPP(initializedStorage) \ + namespace el { \ + namespace base { \ + el::base::type::StoragePointer elStorage(initializedStorage); \ + } \ + el::base::debug::CrashHandler elCrashHandler(ELPP_USE_DEF_CRASH_HANDLER); \ + } #if defined(ELPP_UNICODE) -# define START_EASYLOGGINGPP(argc, argv) el::Helpers::setArgs(argc, argv); std::locale::global(std::locale("")) +#define START_EASYLOGGINGPP(argc, argv) \ + el::Helpers::setArgs(argc, argv); \ + std::locale::global(std::locale("")) #else -# define START_EASYLOGGINGPP(argc, argv) el::Helpers::setArgs(argc, argv) +#define START_EASYLOGGINGPP(argc, argv) el::Helpers::setArgs(argc, argv) #endif // defined(ELPP_UNICODE) -#endif // EASYLOGGINGPP_H +#endif // EASYLOGGINGPP_H diff --git a/core/src/external/nlohmann/json.hpp b/core/src/external/nlohmann/json.hpp index 2a32a82963..4acdcd3aea 100644 --- a/core/src/external/nlohmann/json.hpp +++ b/core/src/external/nlohmann/json.hpp @@ -34,60 +34,53 @@ SOFTWARE. #define NLOHMANN_JSON_VERSION_MINOR 7 #define NLOHMANN_JSON_VERSION_PATCH 0 -#include // all_of, find, for_each -#include // assert -#include // and, not, or -#include // nullptr_t, ptrdiff_t, size_t -#include // hash, less -#include // initializer_list -#include // istream, ostream -#include // random_access_iterator_tag -#include // unique_ptr -#include // accumulate -#include // string, stoi, to_string -#include // declval, forward, move, pair, swap -#include // vector +#include // all_of, find, for_each +#include // assert +#include // and, not, or +#include // nullptr_t, ptrdiff_t, size_t +#include // hash, less +#include // initializer_list +#include // istream, ostream +#include // random_access_iterator_tag +#include // unique_ptr +#include // accumulate +#include // string, stoi, to_string +#include // declval, forward, move, pair, swap +#include // vector // #include - #include // #include - -#include // transform -#include // array -#include // and, not -#include // forward_list -#include // inserter, front_inserter, end -#include // map -#include // string -#include // tuple, make_tuple -#include // is_arithmetic, is_same, is_enum, underlying_type, is_convertible -#include // unordered_map -#include // pair, declval -#include // valarray +#include // transform +#include // array +#include // and, not +#include // forward_list +#include // inserter, front_inserter, end +#include // map +#include // string +#include // tuple, make_tuple +#include // is_arithmetic, is_same, is_enum, underlying_type, is_convertible +#include // unordered_map +#include // pair, declval +#include // valarray // #include - -#include // exception -#include // runtime_error -#include // to_string +#include // exception +#include // runtime_error +#include // to_string // #include +#include // size_t -#include // size_t - -namespace nlohmann -{ -namespace detail -{ +namespace nlohmann { +namespace detail { /// struct to capture the start position of the current token -struct position_t -{ +struct position_t { /// the total number of characters read std::size_t chars_read_total = 0; /// the number of characters read in the current line @@ -96,19 +89,17 @@ struct position_t std::size_t lines_read = 0; /// conversion to size_t to preserve SAX interface - constexpr operator size_t() const - { + constexpr operator size_t() const { return chars_read_total; } }; -} // namespace detail -} // namespace nlohmann +} // namespace detail +} // namespace nlohmann // #include - -#include // pair +#include // pair // #include /* Hedley - https://nemequ.github.io/hedley * Created by Evan Nemerson @@ -124,1567 +115,1478 @@ struct position_t #if !defined(JSON_HEDLEY_VERSION) || (JSON_HEDLEY_VERSION < 9) #if defined(JSON_HEDLEY_VERSION) - #undef JSON_HEDLEY_VERSION +#undef JSON_HEDLEY_VERSION #endif #define JSON_HEDLEY_VERSION 9 #if defined(JSON_HEDLEY_STRINGIFY_EX) - #undef JSON_HEDLEY_STRINGIFY_EX +#undef JSON_HEDLEY_STRINGIFY_EX #endif #define JSON_HEDLEY_STRINGIFY_EX(x) #x #if defined(JSON_HEDLEY_STRINGIFY) - #undef JSON_HEDLEY_STRINGIFY +#undef JSON_HEDLEY_STRINGIFY #endif #define JSON_HEDLEY_STRINGIFY(x) JSON_HEDLEY_STRINGIFY_EX(x) #if defined(JSON_HEDLEY_CONCAT_EX) - #undef JSON_HEDLEY_CONCAT_EX +#undef JSON_HEDLEY_CONCAT_EX #endif -#define JSON_HEDLEY_CONCAT_EX(a,b) a##b +#define JSON_HEDLEY_CONCAT_EX(a, b) a##b #if defined(JSON_HEDLEY_CONCAT) - #undef JSON_HEDLEY_CONCAT +#undef JSON_HEDLEY_CONCAT #endif -#define JSON_HEDLEY_CONCAT(a,b) JSON_HEDLEY_CONCAT_EX(a,b) +#define JSON_HEDLEY_CONCAT(a, b) JSON_HEDLEY_CONCAT_EX(a, b) #if defined(JSON_HEDLEY_VERSION_ENCODE) - #undef JSON_HEDLEY_VERSION_ENCODE +#undef JSON_HEDLEY_VERSION_ENCODE #endif -#define JSON_HEDLEY_VERSION_ENCODE(major,minor,revision) (((major) * 1000000) + ((minor) * 1000) + (revision)) +#define JSON_HEDLEY_VERSION_ENCODE(major, minor, revision) (((major)*1000000) + ((minor)*1000) + (revision)) #if defined(JSON_HEDLEY_VERSION_DECODE_MAJOR) - #undef JSON_HEDLEY_VERSION_DECODE_MAJOR +#undef JSON_HEDLEY_VERSION_DECODE_MAJOR #endif #define JSON_HEDLEY_VERSION_DECODE_MAJOR(version) ((version) / 1000000) #if defined(JSON_HEDLEY_VERSION_DECODE_MINOR) - #undef JSON_HEDLEY_VERSION_DECODE_MINOR +#undef JSON_HEDLEY_VERSION_DECODE_MINOR #endif #define JSON_HEDLEY_VERSION_DECODE_MINOR(version) (((version) % 1000000) / 1000) #if defined(JSON_HEDLEY_VERSION_DECODE_REVISION) - #undef JSON_HEDLEY_VERSION_DECODE_REVISION +#undef JSON_HEDLEY_VERSION_DECODE_REVISION #endif #define JSON_HEDLEY_VERSION_DECODE_REVISION(version) ((version) % 1000) #if defined(JSON_HEDLEY_GNUC_VERSION) - #undef JSON_HEDLEY_GNUC_VERSION +#undef JSON_HEDLEY_GNUC_VERSION #endif #if defined(__GNUC__) && defined(__GNUC_PATCHLEVEL__) - #define JSON_HEDLEY_GNUC_VERSION JSON_HEDLEY_VERSION_ENCODE(__GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__) +#define JSON_HEDLEY_GNUC_VERSION JSON_HEDLEY_VERSION_ENCODE(__GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__) #elif defined(__GNUC__) - #define JSON_HEDLEY_GNUC_VERSION JSON_HEDLEY_VERSION_ENCODE(__GNUC__, __GNUC_MINOR__, 0) +#define JSON_HEDLEY_GNUC_VERSION JSON_HEDLEY_VERSION_ENCODE(__GNUC__, __GNUC_MINOR__, 0) #endif #if defined(JSON_HEDLEY_GNUC_VERSION_CHECK) - #undef JSON_HEDLEY_GNUC_VERSION_CHECK +#undef JSON_HEDLEY_GNUC_VERSION_CHECK #endif #if defined(JSON_HEDLEY_GNUC_VERSION) - #define JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_GNUC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#define JSON_HEDLEY_GNUC_VERSION_CHECK(major, minor, patch) \ + (JSON_HEDLEY_GNUC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else - #define JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) (0) +#define JSON_HEDLEY_GNUC_VERSION_CHECK(major, minor, patch) (0) #endif #if defined(JSON_HEDLEY_MSVC_VERSION) - #undef JSON_HEDLEY_MSVC_VERSION +#undef JSON_HEDLEY_MSVC_VERSION #endif #if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 140000000) - #define JSON_HEDLEY_MSVC_VERSION JSON_HEDLEY_VERSION_ENCODE(_MSC_FULL_VER / 10000000, (_MSC_FULL_VER % 10000000) / 100000, (_MSC_FULL_VER % 100000) / 100) +#define JSON_HEDLEY_MSVC_VERSION \ + JSON_HEDLEY_VERSION_ENCODE(_MSC_FULL_VER / 10000000, (_MSC_FULL_VER % 10000000) / 100000, \ + (_MSC_FULL_VER % 100000) / 100) #elif defined(_MSC_FULL_VER) - #define JSON_HEDLEY_MSVC_VERSION JSON_HEDLEY_VERSION_ENCODE(_MSC_FULL_VER / 1000000, (_MSC_FULL_VER % 1000000) / 10000, (_MSC_FULL_VER % 10000) / 10) +#define JSON_HEDLEY_MSVC_VERSION \ + JSON_HEDLEY_VERSION_ENCODE(_MSC_FULL_VER / 1000000, (_MSC_FULL_VER % 1000000) / 10000, (_MSC_FULL_VER % 10000) / 10) #elif defined(_MSC_VER) - #define JSON_HEDLEY_MSVC_VERSION JSON_HEDLEY_VERSION_ENCODE(_MSC_VER / 100, _MSC_VER % 100, 0) +#define JSON_HEDLEY_MSVC_VERSION JSON_HEDLEY_VERSION_ENCODE(_MSC_VER / 100, _MSC_VER % 100, 0) #endif #if defined(JSON_HEDLEY_MSVC_VERSION_CHECK) - #undef JSON_HEDLEY_MSVC_VERSION_CHECK +#undef JSON_HEDLEY_MSVC_VERSION_CHECK #endif #if !defined(_MSC_VER) - #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (0) +#define JSON_HEDLEY_MSVC_VERSION_CHECK(major, minor, patch) (0) #elif defined(_MSC_VER) && (_MSC_VER >= 1400) - #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_FULL_VER >= ((major * 10000000) + (minor * 100000) + (patch))) +#define JSON_HEDLEY_MSVC_VERSION_CHECK(major, minor, patch) \ + (_MSC_FULL_VER >= ((major * 10000000) + (minor * 100000) + (patch))) #elif defined(_MSC_VER) && (_MSC_VER >= 1200) - #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_FULL_VER >= ((major * 1000000) + (minor * 10000) + (patch))) +#define JSON_HEDLEY_MSVC_VERSION_CHECK(major, minor, patch) \ + (_MSC_FULL_VER >= ((major * 1000000) + (minor * 10000) + (patch))) #else - #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_VER >= ((major * 100) + (minor))) +#define JSON_HEDLEY_MSVC_VERSION_CHECK(major, minor, patch) (_MSC_VER >= ((major * 100) + (minor))) #endif #if defined(JSON_HEDLEY_INTEL_VERSION) - #undef JSON_HEDLEY_INTEL_VERSION +#undef JSON_HEDLEY_INTEL_VERSION #endif #if defined(__INTEL_COMPILER) && defined(__INTEL_COMPILER_UPDATE) - #define JSON_HEDLEY_INTEL_VERSION JSON_HEDLEY_VERSION_ENCODE(__INTEL_COMPILER / 100, __INTEL_COMPILER % 100, __INTEL_COMPILER_UPDATE) +#define JSON_HEDLEY_INTEL_VERSION \ + JSON_HEDLEY_VERSION_ENCODE(__INTEL_COMPILER / 100, __INTEL_COMPILER % 100, __INTEL_COMPILER_UPDATE) #elif defined(__INTEL_COMPILER) - #define JSON_HEDLEY_INTEL_VERSION JSON_HEDLEY_VERSION_ENCODE(__INTEL_COMPILER / 100, __INTEL_COMPILER % 100, 0) +#define JSON_HEDLEY_INTEL_VERSION JSON_HEDLEY_VERSION_ENCODE(__INTEL_COMPILER / 100, __INTEL_COMPILER % 100, 0) #endif #if defined(JSON_HEDLEY_INTEL_VERSION_CHECK) - #undef JSON_HEDLEY_INTEL_VERSION_CHECK +#undef JSON_HEDLEY_INTEL_VERSION_CHECK #endif #if defined(JSON_HEDLEY_INTEL_VERSION) - #define JSON_HEDLEY_INTEL_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_INTEL_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#define JSON_HEDLEY_INTEL_VERSION_CHECK(major, minor, patch) \ + (JSON_HEDLEY_INTEL_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else - #define JSON_HEDLEY_INTEL_VERSION_CHECK(major,minor,patch) (0) +#define JSON_HEDLEY_INTEL_VERSION_CHECK(major, minor, patch) (0) #endif #if defined(JSON_HEDLEY_PGI_VERSION) - #undef JSON_HEDLEY_PGI_VERSION +#undef JSON_HEDLEY_PGI_VERSION #endif #if defined(__PGI) && defined(__PGIC__) && defined(__PGIC_MINOR__) && defined(__PGIC_PATCHLEVEL__) - #define JSON_HEDLEY_PGI_VERSION JSON_HEDLEY_VERSION_ENCODE(__PGIC__, __PGIC_MINOR__, __PGIC_PATCHLEVEL__) +#define JSON_HEDLEY_PGI_VERSION JSON_HEDLEY_VERSION_ENCODE(__PGIC__, __PGIC_MINOR__, __PGIC_PATCHLEVEL__) #endif #if defined(JSON_HEDLEY_PGI_VERSION_CHECK) - #undef JSON_HEDLEY_PGI_VERSION_CHECK +#undef JSON_HEDLEY_PGI_VERSION_CHECK #endif #if defined(JSON_HEDLEY_PGI_VERSION) - #define JSON_HEDLEY_PGI_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_PGI_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#define JSON_HEDLEY_PGI_VERSION_CHECK(major, minor, patch) \ + (JSON_HEDLEY_PGI_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else - #define JSON_HEDLEY_PGI_VERSION_CHECK(major,minor,patch) (0) +#define JSON_HEDLEY_PGI_VERSION_CHECK(major, minor, patch) (0) #endif #if defined(JSON_HEDLEY_SUNPRO_VERSION) - #undef JSON_HEDLEY_SUNPRO_VERSION +#undef JSON_HEDLEY_SUNPRO_VERSION #endif #if defined(__SUNPRO_C) && (__SUNPRO_C > 0x1000) - #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((((__SUNPRO_C >> 16) & 0xf) * 10) + ((__SUNPRO_C >> 12) & 0xf), (((__SUNPRO_C >> 8) & 0xf) * 10) + ((__SUNPRO_C >> 4) & 0xf), (__SUNPRO_C & 0xf) * 10) +#define JSON_HEDLEY_SUNPRO_VERSION \ + JSON_HEDLEY_VERSION_ENCODE((((__SUNPRO_C >> 16) & 0xf) * 10) + ((__SUNPRO_C >> 12) & 0xf), \ + (((__SUNPRO_C >> 8) & 0xf) * 10) + ((__SUNPRO_C >> 4) & 0xf), (__SUNPRO_C & 0xf) * 10) #elif defined(__SUNPRO_C) - #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((__SUNPRO_C >> 8) & 0xf, (__SUNPRO_C >> 4) & 0xf, (__SUNPRO_C) & 0xf) +#define JSON_HEDLEY_SUNPRO_VERSION \ + JSON_HEDLEY_VERSION_ENCODE((__SUNPRO_C >> 8) & 0xf, (__SUNPRO_C >> 4) & 0xf, (__SUNPRO_C)&0xf) #elif defined(__SUNPRO_CC) && (__SUNPRO_CC > 0x1000) - #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((((__SUNPRO_CC >> 16) & 0xf) * 10) + ((__SUNPRO_CC >> 12) & 0xf), (((__SUNPRO_CC >> 8) & 0xf) * 10) + ((__SUNPRO_CC >> 4) & 0xf), (__SUNPRO_CC & 0xf) * 10) +#define JSON_HEDLEY_SUNPRO_VERSION \ + JSON_HEDLEY_VERSION_ENCODE((((__SUNPRO_CC >> 16) & 0xf) * 10) + ((__SUNPRO_CC >> 12) & 0xf), \ + (((__SUNPRO_CC >> 8) & 0xf) * 10) + ((__SUNPRO_CC >> 4) & 0xf), \ + (__SUNPRO_CC & 0xf) * 10) #elif defined(__SUNPRO_CC) - #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((__SUNPRO_CC >> 8) & 0xf, (__SUNPRO_CC >> 4) & 0xf, (__SUNPRO_CC) & 0xf) +#define JSON_HEDLEY_SUNPRO_VERSION \ + JSON_HEDLEY_VERSION_ENCODE((__SUNPRO_CC >> 8) & 0xf, (__SUNPRO_CC >> 4) & 0xf, (__SUNPRO_CC)&0xf) #endif #if defined(JSON_HEDLEY_SUNPRO_VERSION_CHECK) - #undef JSON_HEDLEY_SUNPRO_VERSION_CHECK +#undef JSON_HEDLEY_SUNPRO_VERSION_CHECK #endif #if defined(JSON_HEDLEY_SUNPRO_VERSION) - #define JSON_HEDLEY_SUNPRO_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_SUNPRO_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#define JSON_HEDLEY_SUNPRO_VERSION_CHECK(major, minor, patch) \ + (JSON_HEDLEY_SUNPRO_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else - #define JSON_HEDLEY_SUNPRO_VERSION_CHECK(major,minor,patch) (0) +#define JSON_HEDLEY_SUNPRO_VERSION_CHECK(major, minor, patch) (0) #endif #if defined(JSON_HEDLEY_EMSCRIPTEN_VERSION) - #undef JSON_HEDLEY_EMSCRIPTEN_VERSION +#undef JSON_HEDLEY_EMSCRIPTEN_VERSION #endif #if defined(__EMSCRIPTEN__) - #define JSON_HEDLEY_EMSCRIPTEN_VERSION JSON_HEDLEY_VERSION_ENCODE(__EMSCRIPTEN_major__, __EMSCRIPTEN_minor__, __EMSCRIPTEN_tiny__) +#define JSON_HEDLEY_EMSCRIPTEN_VERSION \ + JSON_HEDLEY_VERSION_ENCODE(__EMSCRIPTEN_major__, __EMSCRIPTEN_minor__, __EMSCRIPTEN_tiny__) #endif #if defined(JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK) - #undef JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK +#undef JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK #endif #if defined(JSON_HEDLEY_EMSCRIPTEN_VERSION) - #define JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_EMSCRIPTEN_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#define JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK(major, minor, patch) \ + (JSON_HEDLEY_EMSCRIPTEN_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else - #define JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK(major,minor,patch) (0) +#define JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK(major, minor, patch) (0) #endif #if defined(JSON_HEDLEY_ARM_VERSION) - #undef JSON_HEDLEY_ARM_VERSION +#undef JSON_HEDLEY_ARM_VERSION #endif #if defined(__CC_ARM) && defined(__ARMCOMPILER_VERSION) - #define JSON_HEDLEY_ARM_VERSION JSON_HEDLEY_VERSION_ENCODE(__ARMCOMPILER_VERSION / 1000000, (__ARMCOMPILER_VERSION % 1000000) / 10000, (__ARMCOMPILER_VERSION % 10000) / 100) +#define JSON_HEDLEY_ARM_VERSION \ + JSON_HEDLEY_VERSION_ENCODE(__ARMCOMPILER_VERSION / 1000000, (__ARMCOMPILER_VERSION % 1000000) / 10000, \ + (__ARMCOMPILER_VERSION % 10000) / 100) #elif defined(__CC_ARM) && defined(__ARMCC_VERSION) - #define JSON_HEDLEY_ARM_VERSION JSON_HEDLEY_VERSION_ENCODE(__ARMCC_VERSION / 1000000, (__ARMCC_VERSION % 1000000) / 10000, (__ARMCC_VERSION % 10000) / 100) +#define JSON_HEDLEY_ARM_VERSION \ + JSON_HEDLEY_VERSION_ENCODE(__ARMCC_VERSION / 1000000, (__ARMCC_VERSION % 1000000) / 10000, \ + (__ARMCC_VERSION % 10000) / 100) #endif #if defined(JSON_HEDLEY_ARM_VERSION_CHECK) - #undef JSON_HEDLEY_ARM_VERSION_CHECK +#undef JSON_HEDLEY_ARM_VERSION_CHECK #endif #if defined(JSON_HEDLEY_ARM_VERSION) - #define JSON_HEDLEY_ARM_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_ARM_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#define JSON_HEDLEY_ARM_VERSION_CHECK(major, minor, patch) \ + (JSON_HEDLEY_ARM_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else - #define JSON_HEDLEY_ARM_VERSION_CHECK(major,minor,patch) (0) +#define JSON_HEDLEY_ARM_VERSION_CHECK(major, minor, patch) (0) #endif #if defined(JSON_HEDLEY_IBM_VERSION) - #undef JSON_HEDLEY_IBM_VERSION +#undef JSON_HEDLEY_IBM_VERSION #endif #if defined(__ibmxl__) - #define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__ibmxl_version__, __ibmxl_release__, __ibmxl_modification__) +#define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__ibmxl_version__, __ibmxl_release__, __ibmxl_modification__) #elif defined(__xlC__) && defined(__xlC_ver__) - #define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__xlC__ >> 8, __xlC__ & 0xff, (__xlC_ver__ >> 8) & 0xff) +#define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__xlC__ >> 8, __xlC__ & 0xff, (__xlC_ver__ >> 8) & 0xff) #elif defined(__xlC__) - #define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__xlC__ >> 8, __xlC__ & 0xff, 0) +#define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__xlC__ >> 8, __xlC__ & 0xff, 0) #endif #if defined(JSON_HEDLEY_IBM_VERSION_CHECK) - #undef JSON_HEDLEY_IBM_VERSION_CHECK +#undef JSON_HEDLEY_IBM_VERSION_CHECK #endif #if defined(JSON_HEDLEY_IBM_VERSION) - #define JSON_HEDLEY_IBM_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_IBM_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#define JSON_HEDLEY_IBM_VERSION_CHECK(major, minor, patch) \ + (JSON_HEDLEY_IBM_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else - #define JSON_HEDLEY_IBM_VERSION_CHECK(major,minor,patch) (0) +#define JSON_HEDLEY_IBM_VERSION_CHECK(major, minor, patch) (0) #endif #if defined(JSON_HEDLEY_TI_VERSION) - #undef JSON_HEDLEY_TI_VERSION +#undef JSON_HEDLEY_TI_VERSION #endif #if defined(__TI_COMPILER_VERSION__) - #define JSON_HEDLEY_TI_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) +#define JSON_HEDLEY_TI_VERSION \ + JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, \ + (__TI_COMPILER_VERSION__ % 1000)) #endif #if defined(JSON_HEDLEY_TI_VERSION_CHECK) - #undef JSON_HEDLEY_TI_VERSION_CHECK +#undef JSON_HEDLEY_TI_VERSION_CHECK #endif #if defined(JSON_HEDLEY_TI_VERSION) - #define JSON_HEDLEY_TI_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#define JSON_HEDLEY_TI_VERSION_CHECK(major, minor, patch) \ + (JSON_HEDLEY_TI_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else - #define JSON_HEDLEY_TI_VERSION_CHECK(major,minor,patch) (0) +#define JSON_HEDLEY_TI_VERSION_CHECK(major, minor, patch) (0) #endif #if defined(JSON_HEDLEY_CRAY_VERSION) - #undef JSON_HEDLEY_CRAY_VERSION +#undef JSON_HEDLEY_CRAY_VERSION #endif #if defined(_CRAYC) - #if defined(_RELEASE_PATCHLEVEL) - #define JSON_HEDLEY_CRAY_VERSION JSON_HEDLEY_VERSION_ENCODE(_RELEASE_MAJOR, _RELEASE_MINOR, _RELEASE_PATCHLEVEL) - #else - #define JSON_HEDLEY_CRAY_VERSION JSON_HEDLEY_VERSION_ENCODE(_RELEASE_MAJOR, _RELEASE_MINOR, 0) - #endif +#if defined(_RELEASE_PATCHLEVEL) +#define JSON_HEDLEY_CRAY_VERSION JSON_HEDLEY_VERSION_ENCODE(_RELEASE_MAJOR, _RELEASE_MINOR, _RELEASE_PATCHLEVEL) +#else +#define JSON_HEDLEY_CRAY_VERSION JSON_HEDLEY_VERSION_ENCODE(_RELEASE_MAJOR, _RELEASE_MINOR, 0) +#endif #endif #if defined(JSON_HEDLEY_CRAY_VERSION_CHECK) - #undef JSON_HEDLEY_CRAY_VERSION_CHECK +#undef JSON_HEDLEY_CRAY_VERSION_CHECK #endif #if defined(JSON_HEDLEY_CRAY_VERSION) - #define JSON_HEDLEY_CRAY_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_CRAY_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#define JSON_HEDLEY_CRAY_VERSION_CHECK(major, minor, patch) \ + (JSON_HEDLEY_CRAY_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else - #define JSON_HEDLEY_CRAY_VERSION_CHECK(major,minor,patch) (0) +#define JSON_HEDLEY_CRAY_VERSION_CHECK(major, minor, patch) (0) #endif #if defined(JSON_HEDLEY_IAR_VERSION) - #undef JSON_HEDLEY_IAR_VERSION +#undef JSON_HEDLEY_IAR_VERSION #endif #if defined(__IAR_SYSTEMS_ICC__) - #if __VER__ > 1000 - #define JSON_HEDLEY_IAR_VERSION JSON_HEDLEY_VERSION_ENCODE((__VER__ / 1000000), ((__VER__ / 1000) % 1000), (__VER__ % 1000)) - #else - #define JSON_HEDLEY_IAR_VERSION JSON_HEDLEY_VERSION_ENCODE(VER / 100, __VER__ % 100, 0) - #endif +#if __VER__ > 1000 +#define JSON_HEDLEY_IAR_VERSION \ + JSON_HEDLEY_VERSION_ENCODE((__VER__ / 1000000), ((__VER__ / 1000) % 1000), (__VER__ % 1000)) +#else +#define JSON_HEDLEY_IAR_VERSION JSON_HEDLEY_VERSION_ENCODE(VER / 100, __VER__ % 100, 0) +#endif #endif #if defined(JSON_HEDLEY_IAR_VERSION_CHECK) - #undef JSON_HEDLEY_IAR_VERSION_CHECK +#undef JSON_HEDLEY_IAR_VERSION_CHECK #endif #if defined(JSON_HEDLEY_IAR_VERSION) - #define JSON_HEDLEY_IAR_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_IAR_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#define JSON_HEDLEY_IAR_VERSION_CHECK(major, minor, patch) \ + (JSON_HEDLEY_IAR_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else - #define JSON_HEDLEY_IAR_VERSION_CHECK(major,minor,patch) (0) +#define JSON_HEDLEY_IAR_VERSION_CHECK(major, minor, patch) (0) #endif #if defined(JSON_HEDLEY_TINYC_VERSION) - #undef JSON_HEDLEY_TINYC_VERSION +#undef JSON_HEDLEY_TINYC_VERSION #endif #if defined(__TINYC__) - #define JSON_HEDLEY_TINYC_VERSION JSON_HEDLEY_VERSION_ENCODE(__TINYC__ / 1000, (__TINYC__ / 100) % 10, __TINYC__ % 100) +#define JSON_HEDLEY_TINYC_VERSION JSON_HEDLEY_VERSION_ENCODE(__TINYC__ / 1000, (__TINYC__ / 100) % 10, __TINYC__ % 100) #endif #if defined(JSON_HEDLEY_TINYC_VERSION_CHECK) - #undef JSON_HEDLEY_TINYC_VERSION_CHECK +#undef JSON_HEDLEY_TINYC_VERSION_CHECK #endif #if defined(JSON_HEDLEY_TINYC_VERSION) - #define JSON_HEDLEY_TINYC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TINYC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#define JSON_HEDLEY_TINYC_VERSION_CHECK(major, minor, patch) \ + (JSON_HEDLEY_TINYC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else - #define JSON_HEDLEY_TINYC_VERSION_CHECK(major,minor,patch) (0) +#define JSON_HEDLEY_TINYC_VERSION_CHECK(major, minor, patch) (0) #endif #if defined(JSON_HEDLEY_DMC_VERSION) - #undef JSON_HEDLEY_DMC_VERSION +#undef JSON_HEDLEY_DMC_VERSION #endif #if defined(__DMC__) - #define JSON_HEDLEY_DMC_VERSION JSON_HEDLEY_VERSION_ENCODE(__DMC__ >> 8, (__DMC__ >> 4) & 0xf, __DMC__ & 0xf) +#define JSON_HEDLEY_DMC_VERSION JSON_HEDLEY_VERSION_ENCODE(__DMC__ >> 8, (__DMC__ >> 4) & 0xf, __DMC__ & 0xf) #endif #if defined(JSON_HEDLEY_DMC_VERSION_CHECK) - #undef JSON_HEDLEY_DMC_VERSION_CHECK +#undef JSON_HEDLEY_DMC_VERSION_CHECK #endif #if defined(JSON_HEDLEY_DMC_VERSION) - #define JSON_HEDLEY_DMC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_DMC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#define JSON_HEDLEY_DMC_VERSION_CHECK(major, minor, patch) \ + (JSON_HEDLEY_DMC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else - #define JSON_HEDLEY_DMC_VERSION_CHECK(major,minor,patch) (0) +#define JSON_HEDLEY_DMC_VERSION_CHECK(major, minor, patch) (0) #endif #if defined(JSON_HEDLEY_COMPCERT_VERSION) - #undef JSON_HEDLEY_COMPCERT_VERSION +#undef JSON_HEDLEY_COMPCERT_VERSION #endif #if defined(__COMPCERT_VERSION__) - #define JSON_HEDLEY_COMPCERT_VERSION JSON_HEDLEY_VERSION_ENCODE(__COMPCERT_VERSION__ / 10000, (__COMPCERT_VERSION__ / 100) % 100, __COMPCERT_VERSION__ % 100) +#define JSON_HEDLEY_COMPCERT_VERSION \ + JSON_HEDLEY_VERSION_ENCODE(__COMPCERT_VERSION__ / 10000, (__COMPCERT_VERSION__ / 100) % 100, \ + __COMPCERT_VERSION__ % 100) #endif #if defined(JSON_HEDLEY_COMPCERT_VERSION_CHECK) - #undef JSON_HEDLEY_COMPCERT_VERSION_CHECK +#undef JSON_HEDLEY_COMPCERT_VERSION_CHECK #endif #if defined(JSON_HEDLEY_COMPCERT_VERSION) - #define JSON_HEDLEY_COMPCERT_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_COMPCERT_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#define JSON_HEDLEY_COMPCERT_VERSION_CHECK(major, minor, patch) \ + (JSON_HEDLEY_COMPCERT_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else - #define JSON_HEDLEY_COMPCERT_VERSION_CHECK(major,minor,patch) (0) +#define JSON_HEDLEY_COMPCERT_VERSION_CHECK(major, minor, patch) (0) #endif #if defined(JSON_HEDLEY_PELLES_VERSION) - #undef JSON_HEDLEY_PELLES_VERSION +#undef JSON_HEDLEY_PELLES_VERSION #endif #if defined(__POCC__) - #define JSON_HEDLEY_PELLES_VERSION JSON_HEDLEY_VERSION_ENCODE(__POCC__ / 100, __POCC__ % 100, 0) +#define JSON_HEDLEY_PELLES_VERSION JSON_HEDLEY_VERSION_ENCODE(__POCC__ / 100, __POCC__ % 100, 0) #endif #if defined(JSON_HEDLEY_PELLES_VERSION_CHECK) - #undef JSON_HEDLEY_PELLES_VERSION_CHECK +#undef JSON_HEDLEY_PELLES_VERSION_CHECK #endif #if defined(JSON_HEDLEY_PELLES_VERSION) - #define JSON_HEDLEY_PELLES_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_PELLES_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#define JSON_HEDLEY_PELLES_VERSION_CHECK(major, minor, patch) \ + (JSON_HEDLEY_PELLES_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else - #define JSON_HEDLEY_PELLES_VERSION_CHECK(major,minor,patch) (0) +#define JSON_HEDLEY_PELLES_VERSION_CHECK(major, minor, patch) (0) #endif #if defined(JSON_HEDLEY_GCC_VERSION) - #undef JSON_HEDLEY_GCC_VERSION +#undef JSON_HEDLEY_GCC_VERSION #endif -#if \ - defined(JSON_HEDLEY_GNUC_VERSION) && \ - !defined(__clang__) && \ - !defined(JSON_HEDLEY_INTEL_VERSION) && \ - !defined(JSON_HEDLEY_PGI_VERSION) && \ - !defined(JSON_HEDLEY_ARM_VERSION) && \ - !defined(JSON_HEDLEY_TI_VERSION) && \ +#if defined(JSON_HEDLEY_GNUC_VERSION) && !defined(__clang__) && !defined(JSON_HEDLEY_INTEL_VERSION) && \ + !defined(JSON_HEDLEY_PGI_VERSION) && !defined(JSON_HEDLEY_ARM_VERSION) && !defined(JSON_HEDLEY_TI_VERSION) && \ !defined(__COMPCERT__) - #define JSON_HEDLEY_GCC_VERSION JSON_HEDLEY_GNUC_VERSION +#define JSON_HEDLEY_GCC_VERSION JSON_HEDLEY_GNUC_VERSION #endif #if defined(JSON_HEDLEY_GCC_VERSION_CHECK) - #undef JSON_HEDLEY_GCC_VERSION_CHECK +#undef JSON_HEDLEY_GCC_VERSION_CHECK #endif #if defined(JSON_HEDLEY_GCC_VERSION) - #define JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_GCC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#define JSON_HEDLEY_GCC_VERSION_CHECK(major, minor, patch) \ + (JSON_HEDLEY_GCC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else - #define JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) (0) +#define JSON_HEDLEY_GCC_VERSION_CHECK(major, minor, patch) (0) #endif #if defined(JSON_HEDLEY_HAS_ATTRIBUTE) - #undef JSON_HEDLEY_HAS_ATTRIBUTE +#undef JSON_HEDLEY_HAS_ATTRIBUTE #endif #if defined(__has_attribute) - #define JSON_HEDLEY_HAS_ATTRIBUTE(attribute) __has_attribute(attribute) +#define JSON_HEDLEY_HAS_ATTRIBUTE(attribute) __has_attribute(attribute) #else - #define JSON_HEDLEY_HAS_ATTRIBUTE(attribute) (0) +#define JSON_HEDLEY_HAS_ATTRIBUTE(attribute) (0) #endif #if defined(JSON_HEDLEY_GNUC_HAS_ATTRIBUTE) - #undef JSON_HEDLEY_GNUC_HAS_ATTRIBUTE +#undef JSON_HEDLEY_GNUC_HAS_ATTRIBUTE #endif #if defined(__has_attribute) - #define JSON_HEDLEY_GNUC_HAS_ATTRIBUTE(attribute,major,minor,patch) __has_attribute(attribute) +#define JSON_HEDLEY_GNUC_HAS_ATTRIBUTE(attribute, major, minor, patch) __has_attribute(attribute) #else - #define JSON_HEDLEY_GNUC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) +#define JSON_HEDLEY_GNUC_HAS_ATTRIBUTE(attribute, major, minor, patch) \ + JSON_HEDLEY_GNUC_VERSION_CHECK(major, minor, patch) #endif #if defined(JSON_HEDLEY_GCC_HAS_ATTRIBUTE) - #undef JSON_HEDLEY_GCC_HAS_ATTRIBUTE +#undef JSON_HEDLEY_GCC_HAS_ATTRIBUTE #endif #if defined(__has_attribute) - #define JSON_HEDLEY_GCC_HAS_ATTRIBUTE(attribute,major,minor,patch) __has_attribute(attribute) +#define JSON_HEDLEY_GCC_HAS_ATTRIBUTE(attribute, major, minor, patch) __has_attribute(attribute) #else - #define JSON_HEDLEY_GCC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) +#define JSON_HEDLEY_GCC_HAS_ATTRIBUTE(attribute, major, minor, patch) JSON_HEDLEY_GCC_VERSION_CHECK(major, minor, patch) #endif #if defined(JSON_HEDLEY_HAS_CPP_ATTRIBUTE) - #undef JSON_HEDLEY_HAS_CPP_ATTRIBUTE +#undef JSON_HEDLEY_HAS_CPP_ATTRIBUTE #endif #if defined(__has_cpp_attribute) && defined(__cplusplus) - #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE(attribute) __has_cpp_attribute(attribute) +#define JSON_HEDLEY_HAS_CPP_ATTRIBUTE(attribute) __has_cpp_attribute(attribute) #else - #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE(attribute) (0) +#define JSON_HEDLEY_HAS_CPP_ATTRIBUTE(attribute) (0) #endif #if defined(JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE) - #undef JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE +#undef JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE #endif #if defined(__has_cpp_attribute) && defined(__cplusplus) - #define JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) __has_cpp_attribute(attribute) +#define JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE(attribute, major, minor, patch) __has_cpp_attribute(attribute) #else - #define JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) +#define JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE(attribute, major, minor, patch) \ + JSON_HEDLEY_GNUC_VERSION_CHECK(major, minor, patch) #endif #if defined(JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE) - #undef JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE +#undef JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE #endif #if defined(__has_cpp_attribute) && defined(__cplusplus) - #define JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) __has_cpp_attribute(attribute) +#define JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE(attribute, major, minor, patch) __has_cpp_attribute(attribute) #else - #define JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) +#define JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE(attribute, major, minor, patch) \ + JSON_HEDLEY_GCC_VERSION_CHECK(major, minor, patch) #endif #if defined(JSON_HEDLEY_HAS_BUILTIN) - #undef JSON_HEDLEY_HAS_BUILTIN +#undef JSON_HEDLEY_HAS_BUILTIN #endif #if defined(__has_builtin) - #define JSON_HEDLEY_HAS_BUILTIN(builtin) __has_builtin(builtin) +#define JSON_HEDLEY_HAS_BUILTIN(builtin) __has_builtin(builtin) #else - #define JSON_HEDLEY_HAS_BUILTIN(builtin) (0) +#define JSON_HEDLEY_HAS_BUILTIN(builtin) (0) #endif #if defined(JSON_HEDLEY_GNUC_HAS_BUILTIN) - #undef JSON_HEDLEY_GNUC_HAS_BUILTIN +#undef JSON_HEDLEY_GNUC_HAS_BUILTIN #endif #if defined(__has_builtin) - #define JSON_HEDLEY_GNUC_HAS_BUILTIN(builtin,major,minor,patch) __has_builtin(builtin) +#define JSON_HEDLEY_GNUC_HAS_BUILTIN(builtin, major, minor, patch) __has_builtin(builtin) #else - #define JSON_HEDLEY_GNUC_HAS_BUILTIN(builtin,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) +#define JSON_HEDLEY_GNUC_HAS_BUILTIN(builtin, major, minor, patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major, minor, patch) #endif #if defined(JSON_HEDLEY_GCC_HAS_BUILTIN) - #undef JSON_HEDLEY_GCC_HAS_BUILTIN +#undef JSON_HEDLEY_GCC_HAS_BUILTIN #endif #if defined(__has_builtin) - #define JSON_HEDLEY_GCC_HAS_BUILTIN(builtin,major,minor,patch) __has_builtin(builtin) +#define JSON_HEDLEY_GCC_HAS_BUILTIN(builtin, major, minor, patch) __has_builtin(builtin) #else - #define JSON_HEDLEY_GCC_HAS_BUILTIN(builtin,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) +#define JSON_HEDLEY_GCC_HAS_BUILTIN(builtin, major, minor, patch) JSON_HEDLEY_GCC_VERSION_CHECK(major, minor, patch) #endif #if defined(JSON_HEDLEY_HAS_FEATURE) - #undef JSON_HEDLEY_HAS_FEATURE +#undef JSON_HEDLEY_HAS_FEATURE #endif #if defined(__has_feature) - #define JSON_HEDLEY_HAS_FEATURE(feature) __has_feature(feature) +#define JSON_HEDLEY_HAS_FEATURE(feature) __has_feature(feature) #else - #define JSON_HEDLEY_HAS_FEATURE(feature) (0) +#define JSON_HEDLEY_HAS_FEATURE(feature) (0) #endif #if defined(JSON_HEDLEY_GNUC_HAS_FEATURE) - #undef JSON_HEDLEY_GNUC_HAS_FEATURE +#undef JSON_HEDLEY_GNUC_HAS_FEATURE #endif #if defined(__has_feature) - #define JSON_HEDLEY_GNUC_HAS_FEATURE(feature,major,minor,patch) __has_feature(feature) +#define JSON_HEDLEY_GNUC_HAS_FEATURE(feature, major, minor, patch) __has_feature(feature) #else - #define JSON_HEDLEY_GNUC_HAS_FEATURE(feature,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) +#define JSON_HEDLEY_GNUC_HAS_FEATURE(feature, major, minor, patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major, minor, patch) #endif #if defined(JSON_HEDLEY_GCC_HAS_FEATURE) - #undef JSON_HEDLEY_GCC_HAS_FEATURE +#undef JSON_HEDLEY_GCC_HAS_FEATURE #endif #if defined(__has_feature) - #define JSON_HEDLEY_GCC_HAS_FEATURE(feature,major,minor,patch) __has_feature(feature) +#define JSON_HEDLEY_GCC_HAS_FEATURE(feature, major, minor, patch) __has_feature(feature) #else - #define JSON_HEDLEY_GCC_HAS_FEATURE(feature,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) +#define JSON_HEDLEY_GCC_HAS_FEATURE(feature, major, minor, patch) JSON_HEDLEY_GCC_VERSION_CHECK(major, minor, patch) #endif #if defined(JSON_HEDLEY_HAS_EXTENSION) - #undef JSON_HEDLEY_HAS_EXTENSION +#undef JSON_HEDLEY_HAS_EXTENSION #endif #if defined(__has_extension) - #define JSON_HEDLEY_HAS_EXTENSION(extension) __has_extension(extension) +#define JSON_HEDLEY_HAS_EXTENSION(extension) __has_extension(extension) #else - #define JSON_HEDLEY_HAS_EXTENSION(extension) (0) +#define JSON_HEDLEY_HAS_EXTENSION(extension) (0) #endif #if defined(JSON_HEDLEY_GNUC_HAS_EXTENSION) - #undef JSON_HEDLEY_GNUC_HAS_EXTENSION +#undef JSON_HEDLEY_GNUC_HAS_EXTENSION #endif #if defined(__has_extension) - #define JSON_HEDLEY_GNUC_HAS_EXTENSION(extension,major,minor,patch) __has_extension(extension) +#define JSON_HEDLEY_GNUC_HAS_EXTENSION(extension, major, minor, patch) __has_extension(extension) #else - #define JSON_HEDLEY_GNUC_HAS_EXTENSION(extension,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) +#define JSON_HEDLEY_GNUC_HAS_EXTENSION(extension, major, minor, patch) \ + JSON_HEDLEY_GNUC_VERSION_CHECK(major, minor, patch) #endif #if defined(JSON_HEDLEY_GCC_HAS_EXTENSION) - #undef JSON_HEDLEY_GCC_HAS_EXTENSION +#undef JSON_HEDLEY_GCC_HAS_EXTENSION #endif #if defined(__has_extension) - #define JSON_HEDLEY_GCC_HAS_EXTENSION(extension,major,minor,patch) __has_extension(extension) +#define JSON_HEDLEY_GCC_HAS_EXTENSION(extension, major, minor, patch) __has_extension(extension) #else - #define JSON_HEDLEY_GCC_HAS_EXTENSION(extension,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) +#define JSON_HEDLEY_GCC_HAS_EXTENSION(extension, major, minor, patch) JSON_HEDLEY_GCC_VERSION_CHECK(major, minor, patch) #endif #if defined(JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE) - #undef JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE +#undef JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE #endif #if defined(__has_declspec_attribute) - #define JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute) __has_declspec_attribute(attribute) +#define JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute) __has_declspec_attribute(attribute) #else - #define JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute) (0) +#define JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute) (0) #endif #if defined(JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE) - #undef JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE +#undef JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE #endif #if defined(__has_declspec_attribute) - #define JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) __has_declspec_attribute(attribute) +#define JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE(attribute, major, minor, patch) __has_declspec_attribute(attribute) #else - #define JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) +#define JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE(attribute, major, minor, patch) \ + JSON_HEDLEY_GNUC_VERSION_CHECK(major, minor, patch) #endif #if defined(JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE) - #undef JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE +#undef JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE #endif #if defined(__has_declspec_attribute) - #define JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) __has_declspec_attribute(attribute) +#define JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE(attribute, major, minor, patch) __has_declspec_attribute(attribute) #else - #define JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) +#define JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE(attribute, major, minor, patch) \ + JSON_HEDLEY_GCC_VERSION_CHECK(major, minor, patch) #endif #if defined(JSON_HEDLEY_HAS_WARNING) - #undef JSON_HEDLEY_HAS_WARNING +#undef JSON_HEDLEY_HAS_WARNING #endif #if defined(__has_warning) - #define JSON_HEDLEY_HAS_WARNING(warning) __has_warning(warning) +#define JSON_HEDLEY_HAS_WARNING(warning) __has_warning(warning) #else - #define JSON_HEDLEY_HAS_WARNING(warning) (0) +#define JSON_HEDLEY_HAS_WARNING(warning) (0) #endif #if defined(JSON_HEDLEY_GNUC_HAS_WARNING) - #undef JSON_HEDLEY_GNUC_HAS_WARNING +#undef JSON_HEDLEY_GNUC_HAS_WARNING #endif #if defined(__has_warning) - #define JSON_HEDLEY_GNUC_HAS_WARNING(warning,major,minor,patch) __has_warning(warning) +#define JSON_HEDLEY_GNUC_HAS_WARNING(warning, major, minor, patch) __has_warning(warning) #else - #define JSON_HEDLEY_GNUC_HAS_WARNING(warning,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) +#define JSON_HEDLEY_GNUC_HAS_WARNING(warning, major, minor, patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major, minor, patch) #endif #if defined(JSON_HEDLEY_GCC_HAS_WARNING) - #undef JSON_HEDLEY_GCC_HAS_WARNING +#undef JSON_HEDLEY_GCC_HAS_WARNING #endif #if defined(__has_warning) - #define JSON_HEDLEY_GCC_HAS_WARNING(warning,major,minor,patch) __has_warning(warning) +#define JSON_HEDLEY_GCC_HAS_WARNING(warning, major, minor, patch) __has_warning(warning) #else - #define JSON_HEDLEY_GCC_HAS_WARNING(warning,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) +#define JSON_HEDLEY_GCC_HAS_WARNING(warning, major, minor, patch) JSON_HEDLEY_GCC_VERSION_CHECK(major, minor, patch) #endif -#if \ - (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) || \ - defined(__clang__) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,0,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) || \ - JSON_HEDLEY_PGI_VERSION_CHECK(18,4,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(6,0,0) || \ - JSON_HEDLEY_CRAY_VERSION_CHECK(5,0,0) || \ - JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,17) || \ - JSON_HEDLEY_SUNPRO_VERSION_CHECK(8,0,0) || \ - (JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) && defined(__C99_PRAGMA_OPERATOR)) - #define JSON_HEDLEY_PRAGMA(value) _Pragma(#value) -#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) - #define JSON_HEDLEY_PRAGMA(value) __pragma(value) +#if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) || defined(__clang__) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3, 0, 0) || JSON_HEDLEY_INTEL_VERSION_CHECK(13, 0, 0) || \ + JSON_HEDLEY_IAR_VERSION_CHECK(8, 0, 0) || JSON_HEDLEY_PGI_VERSION_CHECK(18, 4, 0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4, 1, 0) || JSON_HEDLEY_TI_VERSION_CHECK(6, 0, 0) || \ + JSON_HEDLEY_CRAY_VERSION_CHECK(5, 0, 0) || JSON_HEDLEY_TINYC_VERSION_CHECK(0, 9, 17) || \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(8, 0, 0) || \ + (JSON_HEDLEY_IBM_VERSION_CHECK(10, 1, 0) && defined(__C99_PRAGMA_OPERATOR)) +#define JSON_HEDLEY_PRAGMA(value) _Pragma(#value) +#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15, 0, 0) +#define JSON_HEDLEY_PRAGMA(value) __pragma(value) #else - #define JSON_HEDLEY_PRAGMA(value) +#define JSON_HEDLEY_PRAGMA(value) #endif #if defined(JSON_HEDLEY_DIAGNOSTIC_PUSH) - #undef JSON_HEDLEY_DIAGNOSTIC_PUSH +#undef JSON_HEDLEY_DIAGNOSTIC_PUSH #endif #if defined(JSON_HEDLEY_DIAGNOSTIC_POP) - #undef JSON_HEDLEY_DIAGNOSTIC_POP +#undef JSON_HEDLEY_DIAGNOSTIC_POP #endif #if defined(__clang__) - #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("clang diagnostic push") - #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("clang diagnostic pop") -#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("warning(push)") - #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("warning(pop)") -#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,6,0) - #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("GCC diagnostic push") - #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("GCC diagnostic pop") -#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_PUSH __pragma(warning(push)) - #define JSON_HEDLEY_DIAGNOSTIC_POP __pragma(warning(pop)) -#elif JSON_HEDLEY_ARM_VERSION_CHECK(5,6,0) - #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("push") - #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("pop") -#elif JSON_HEDLEY_TI_VERSION_CHECK(8,1,0) - #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("diag_push") - #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("diag_pop") -#elif JSON_HEDLEY_PELLES_VERSION_CHECK(2,90,0) - #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("warning(push)") - #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("warning(pop)") +#define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("clang diagnostic push") +#define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("clang diagnostic pop") +#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13, 0, 0) +#define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("warning(push)") +#define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("warning(pop)") +#elif JSON_HEDLEY_GCC_VERSION_CHECK(4, 6, 0) +#define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("GCC diagnostic push") +#define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("GCC diagnostic pop") +#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15, 0, 0) +#define JSON_HEDLEY_DIAGNOSTIC_PUSH __pragma(warning(push)) +#define JSON_HEDLEY_DIAGNOSTIC_POP __pragma(warning(pop)) +#elif JSON_HEDLEY_ARM_VERSION_CHECK(5, 6, 0) +#define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("push") +#define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("pop") +#elif JSON_HEDLEY_TI_VERSION_CHECK(8, 1, 0) +#define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("diag_push") +#define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("diag_pop") +#elif JSON_HEDLEY_PELLES_VERSION_CHECK(2, 90, 0) +#define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("warning(push)") +#define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("warning(pop)") #else - #define JSON_HEDLEY_DIAGNOSTIC_PUSH - #define JSON_HEDLEY_DIAGNOSTIC_POP +#define JSON_HEDLEY_DIAGNOSTIC_PUSH +#define JSON_HEDLEY_DIAGNOSTIC_POP #endif #if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED) - #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED +#undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED #endif #if JSON_HEDLEY_HAS_WARNING("-Wdeprecated-declarations") - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\"") -#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("warning(disable:1478 1786)") -#elif JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1215,1444") -#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,3,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") -#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED __pragma(warning(disable:4996)) -#elif JSON_HEDLEY_TI_VERSION_CHECK(8,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1291,1718") -#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) && !defined(__cplusplus) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("error_messages(off,E_DEPRECATED_ATT,E_DEPRECATED_ATT_MESS)") -#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) && defined(__cplusplus) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("error_messages(off,symdeprecated,symdeprecated2)") -#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress=Pe1444,Pe1215") -#elif JSON_HEDLEY_PELLES_VERSION_CHECK(2,90,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("warn(disable:2241)") +#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\"") +#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13, 0, 0) +#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("warning(disable:1478 1786)") +#elif JSON_HEDLEY_PGI_VERSION_CHECK(17, 10, 0) +#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1215,1444") +#elif JSON_HEDLEY_GCC_VERSION_CHECK(4, 3, 0) +#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") +#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15, 0, 0) +#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED __pragma(warning(disable : 4996)) +#elif JSON_HEDLEY_TI_VERSION_CHECK(8, 0, 0) +#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1291,1718") +#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5, 13, 0) && !defined(__cplusplus) +#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("error_messages(off,E_DEPRECATED_ATT,E_DEPRECATED_ATT_MESS)") +#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5, 13, 0) && defined(__cplusplus) +#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("error_messages(off,symdeprecated,symdeprecated2)") +#elif JSON_HEDLEY_IAR_VERSION_CHECK(8, 0, 0) +#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress=Pe1444,Pe1215") +#elif JSON_HEDLEY_PELLES_VERSION_CHECK(2, 90, 0) +#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("warn(disable:2241)") #else - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED +#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED #endif #if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS) - #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS +#undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS #endif #if JSON_HEDLEY_HAS_WARNING("-Wunknown-pragmas") - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("clang diagnostic ignored \"-Wunknown-pragmas\"") -#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("warning(disable:161)") -#elif JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 1675") -#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,3,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("GCC diagnostic ignored \"-Wunknown-pragmas\"") -#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS __pragma(warning(disable:4068)) -#elif JSON_HEDLEY_TI_VERSION_CHECK(8,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 163") -#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress=Pe161") +#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("clang diagnostic ignored \"-Wunknown-pragmas\"") +#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13, 0, 0) +#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("warning(disable:161)") +#elif JSON_HEDLEY_PGI_VERSION_CHECK(17, 10, 0) +#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 1675") +#elif JSON_HEDLEY_GCC_VERSION_CHECK(4, 3, 0) +#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("GCC diagnostic ignored \"-Wunknown-pragmas\"") +#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15, 0, 0) +#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS __pragma(warning(disable : 4068)) +#elif JSON_HEDLEY_TI_VERSION_CHECK(8, 0, 0) +#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 163") +#elif JSON_HEDLEY_IAR_VERSION_CHECK(8, 0, 0) +#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress=Pe161") #else - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS +#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS #endif #if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL) - #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL +#undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL #endif #if JSON_HEDLEY_HAS_WARNING("-Wcast-qual") - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("clang diagnostic ignored \"-Wcast-qual\"") -#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("warning(disable:2203 2331)") -#elif JSON_HEDLEY_GCC_VERSION_CHECK(3,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("clang diagnostic ignored \"-Wcast-qual\"") +#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13, 0, 0) +#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("warning(disable:2203 2331)") +#elif JSON_HEDLEY_GCC_VERSION_CHECK(3, 0, 0) +#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("GCC diagnostic ignored \"-Wcast-qual\"") #else - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL +#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL #endif #if defined(JSON_HEDLEY_DEPRECATED) - #undef JSON_HEDLEY_DEPRECATED +#undef JSON_HEDLEY_DEPRECATED #endif #if defined(JSON_HEDLEY_DEPRECATED_FOR) - #undef JSON_HEDLEY_DEPRECATED_FOR +#undef JSON_HEDLEY_DEPRECATED_FOR #endif #if defined(__cplusplus) && (__cplusplus >= 201402L) - #define JSON_HEDLEY_DEPRECATED(since) [[deprecated("Since " #since)]] - #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) [[deprecated("Since " #since "; use " #replacement)]] -#elif \ - JSON_HEDLEY_HAS_EXTENSION(attribute_deprecated_with_message) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,5,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(5,6,0) || \ - JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) || \ - JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(8,3,0) - #define JSON_HEDLEY_DEPRECATED(since) __attribute__((__deprecated__("Since " #since))) - #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __attribute__((__deprecated__("Since " #since "; use " #replacement))) -#elif \ - JSON_HEDLEY_HAS_ATTRIBUTE(deprecated) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(8,0,0) || \ - (JSON_HEDLEY_TI_VERSION_CHECK(7,3,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) - #define JSON_HEDLEY_DEPRECATED(since) __attribute__((__deprecated__)) - #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __attribute__((__deprecated__)) -#elif JSON_HEDLEY_MSVC_VERSION_CHECK(14,0,0) - #define JSON_HEDLEY_DEPRECATED(since) __declspec(deprecated("Since " # since)) - #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __declspec(deprecated("Since " #since "; use " #replacement)) -#elif \ - JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \ - JSON_HEDLEY_PELLES_VERSION_CHECK(6,50,0) - #define JSON_HEDLEY_DEPRECATED(since) _declspec(deprecated) - #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __declspec(deprecated) -#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) - #define JSON_HEDLEY_DEPRECATED(since) _Pragma("deprecated") - #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) _Pragma("deprecated") +#define JSON_HEDLEY_DEPRECATED(since) [[deprecated("Since " #since)]] +#define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) [[deprecated("Since " #since "; use " #replacement)]] +#elif JSON_HEDLEY_HAS_EXTENSION(attribute_deprecated_with_message) || JSON_HEDLEY_GCC_VERSION_CHECK(4, 5, 0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13, 0, 0) || JSON_HEDLEY_ARM_VERSION_CHECK(5, 6, 0) || \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(5, 13, 0) || JSON_HEDLEY_PGI_VERSION_CHECK(17, 10, 0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(8, 3, 0) +#define JSON_HEDLEY_DEPRECATED(since) __attribute__((__deprecated__("Since " #since))) +#define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) \ + __attribute__((__deprecated__("Since " #since "; use " #replacement))) +#elif JSON_HEDLEY_HAS_ATTRIBUTE(deprecated) || JSON_HEDLEY_GCC_VERSION_CHECK(3, 1, 0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4, 1, 0) || JSON_HEDLEY_TI_VERSION_CHECK(8, 0, 0) || \ + (JSON_HEDLEY_TI_VERSION_CHECK(7, 3, 0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) +#define JSON_HEDLEY_DEPRECATED(since) __attribute__((__deprecated__)) +#define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __attribute__((__deprecated__)) +#elif JSON_HEDLEY_MSVC_VERSION_CHECK(14, 0, 0) +#define JSON_HEDLEY_DEPRECATED(since) __declspec(deprecated("Since " #since)) +#define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __declspec(deprecated("Since " #since "; use " #replacement)) +#elif JSON_HEDLEY_MSVC_VERSION_CHECK(13, 10, 0) || JSON_HEDLEY_PELLES_VERSION_CHECK(6, 50, 0) +#define JSON_HEDLEY_DEPRECATED(since) _declspec(deprecated) +#define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __declspec(deprecated) +#elif JSON_HEDLEY_IAR_VERSION_CHECK(8, 0, 0) +#define JSON_HEDLEY_DEPRECATED(since) _Pragma("deprecated") +#define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) _Pragma("deprecated") #else - #define JSON_HEDLEY_DEPRECATED(since) - #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) +#define JSON_HEDLEY_DEPRECATED(since) +#define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) #endif #if defined(JSON_HEDLEY_UNAVAILABLE) - #undef JSON_HEDLEY_UNAVAILABLE +#undef JSON_HEDLEY_UNAVAILABLE #endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(warning) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,3,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) - #define JSON_HEDLEY_UNAVAILABLE(available_since) __attribute__((__warning__("Not available until " #available_since))) +#if JSON_HEDLEY_HAS_ATTRIBUTE(warning) || JSON_HEDLEY_GCC_VERSION_CHECK(4, 3, 0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13, 0, 0) +#define JSON_HEDLEY_UNAVAILABLE(available_since) __attribute__((__warning__("Not available until " #available_since))) #else - #define JSON_HEDLEY_UNAVAILABLE(available_since) +#define JSON_HEDLEY_UNAVAILABLE(available_since) #endif #if defined(JSON_HEDLEY_WARN_UNUSED_RESULT) - #undef JSON_HEDLEY_WARN_UNUSED_RESULT +#undef JSON_HEDLEY_WARN_UNUSED_RESULT #endif #if defined(__cplusplus) && (__cplusplus >= 201703L) - #define JSON_HEDLEY_WARN_UNUSED_RESULT [[nodiscard]] -#elif \ - JSON_HEDLEY_HAS_ATTRIBUTE(warn_unused_result) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(8,0,0) || \ - (JSON_HEDLEY_TI_VERSION_CHECK(7,3,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0) && defined(__cplusplus)) || \ - JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) - #define JSON_HEDLEY_WARN_UNUSED_RESULT __attribute__((__warn_unused_result__)) +#define JSON_HEDLEY_WARN_UNUSED_RESULT [[nodiscard]] +#elif JSON_HEDLEY_HAS_ATTRIBUTE(warn_unused_result) || JSON_HEDLEY_GCC_VERSION_CHECK(3, 4, 0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13, 0, 0) || JSON_HEDLEY_TI_VERSION_CHECK(8, 0, 0) || \ + (JSON_HEDLEY_TI_VERSION_CHECK(7, 3, 0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5, 15, 0) && defined(__cplusplus)) || JSON_HEDLEY_PGI_VERSION_CHECK(17, 10, 0) +#define JSON_HEDLEY_WARN_UNUSED_RESULT __attribute__((__warn_unused_result__)) #elif defined(_Check_return_) /* SAL */ - #define JSON_HEDLEY_WARN_UNUSED_RESULT _Check_return_ +#define JSON_HEDLEY_WARN_UNUSED_RESULT _Check_return_ #else - #define JSON_HEDLEY_WARN_UNUSED_RESULT +#define JSON_HEDLEY_WARN_UNUSED_RESULT #endif #if defined(JSON_HEDLEY_SENTINEL) - #undef JSON_HEDLEY_SENTINEL +#undef JSON_HEDLEY_SENTINEL #endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(sentinel) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,0,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(5,4,0) - #define JSON_HEDLEY_SENTINEL(position) __attribute__((__sentinel__(position))) +#if JSON_HEDLEY_HAS_ATTRIBUTE(sentinel) || JSON_HEDLEY_GCC_VERSION_CHECK(4, 0, 0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13, 0, 0) || JSON_HEDLEY_ARM_VERSION_CHECK(5, 4, 0) +#define JSON_HEDLEY_SENTINEL(position) __attribute__((__sentinel__(position))) #else - #define JSON_HEDLEY_SENTINEL(position) +#define JSON_HEDLEY_SENTINEL(position) #endif #if defined(JSON_HEDLEY_NO_RETURN) - #undef JSON_HEDLEY_NO_RETURN +#undef JSON_HEDLEY_NO_RETURN #endif -#if JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) - #define JSON_HEDLEY_NO_RETURN __noreturn -#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) - #define JSON_HEDLEY_NO_RETURN __attribute__((__noreturn__)) +#if JSON_HEDLEY_IAR_VERSION_CHECK(8, 0, 0) +#define JSON_HEDLEY_NO_RETURN __noreturn +#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13, 0, 0) +#define JSON_HEDLEY_NO_RETURN __attribute__((__noreturn__)) #elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L - #define JSON_HEDLEY_NO_RETURN _Noreturn +#define JSON_HEDLEY_NO_RETURN _Noreturn #elif defined(__cplusplus) && (__cplusplus >= 201103L) - #define JSON_HEDLEY_NO_RETURN [[noreturn]] -#elif \ - JSON_HEDLEY_HAS_ATTRIBUTE(noreturn) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,2,0) || \ - JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(18,0,0) || \ - (JSON_HEDLEY_TI_VERSION_CHECK(17,3,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) - #define JSON_HEDLEY_NO_RETURN __attribute__((__noreturn__)) -#elif JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) - #define JSON_HEDLEY_NO_RETURN __declspec(noreturn) -#elif JSON_HEDLEY_TI_VERSION_CHECK(6,0,0) && defined(__cplusplus) - #define JSON_HEDLEY_NO_RETURN _Pragma("FUNC_NEVER_RETURNS;") -#elif JSON_HEDLEY_COMPCERT_VERSION_CHECK(3,2,0) - #define JSON_HEDLEY_NO_RETURN __attribute((noreturn)) -#elif JSON_HEDLEY_PELLES_VERSION_CHECK(9,0,0) - #define JSON_HEDLEY_NO_RETURN __declspec(noreturn) +#define JSON_HEDLEY_NO_RETURN [[noreturn]] +#elif JSON_HEDLEY_HAS_ATTRIBUTE(noreturn) || JSON_HEDLEY_GCC_VERSION_CHECK(3, 2, 0) || \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(5, 11, 0) || JSON_HEDLEY_ARM_VERSION_CHECK(4, 1, 0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(10, 1, 0) || JSON_HEDLEY_TI_VERSION_CHECK(18, 0, 0) || \ + (JSON_HEDLEY_TI_VERSION_CHECK(17, 3, 0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) +#define JSON_HEDLEY_NO_RETURN __attribute__((__noreturn__)) +#elif JSON_HEDLEY_MSVC_VERSION_CHECK(13, 10, 0) +#define JSON_HEDLEY_NO_RETURN __declspec(noreturn) +#elif JSON_HEDLEY_TI_VERSION_CHECK(6, 0, 0) && defined(__cplusplus) +#define JSON_HEDLEY_NO_RETURN _Pragma("FUNC_NEVER_RETURNS;") +#elif JSON_HEDLEY_COMPCERT_VERSION_CHECK(3, 2, 0) +#define JSON_HEDLEY_NO_RETURN __attribute((noreturn)) +#elif JSON_HEDLEY_PELLES_VERSION_CHECK(9, 0, 0) +#define JSON_HEDLEY_NO_RETURN __declspec(noreturn) #else - #define JSON_HEDLEY_NO_RETURN +#define JSON_HEDLEY_NO_RETURN #endif #if defined(JSON_HEDLEY_UNREACHABLE) - #undef JSON_HEDLEY_UNREACHABLE +#undef JSON_HEDLEY_UNREACHABLE #endif #if defined(JSON_HEDLEY_UNREACHABLE_RETURN) - #undef JSON_HEDLEY_UNREACHABLE_RETURN +#undef JSON_HEDLEY_UNREACHABLE_RETURN #endif -#if \ - (JSON_HEDLEY_HAS_BUILTIN(__builtin_unreachable) && (!defined(JSON_HEDLEY_ARM_VERSION))) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,5,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(13,1,5) - #define JSON_HEDLEY_UNREACHABLE() __builtin_unreachable() -#elif JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) - #define JSON_HEDLEY_UNREACHABLE() __assume(0) -#elif JSON_HEDLEY_TI_VERSION_CHECK(6,0,0) - #if defined(__cplusplus) - #define JSON_HEDLEY_UNREACHABLE() std::_nassert(0) - #else - #define JSON_HEDLEY_UNREACHABLE() _nassert(0) - #endif - #define JSON_HEDLEY_UNREACHABLE_RETURN(value) return value -#elif defined(EXIT_FAILURE) - #define JSON_HEDLEY_UNREACHABLE() abort() +#if (JSON_HEDLEY_HAS_BUILTIN(__builtin_unreachable) && (!defined(JSON_HEDLEY_ARM_VERSION))) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(4, 5, 0) || JSON_HEDLEY_INTEL_VERSION_CHECK(13, 0, 0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(13, 1, 5) +#define JSON_HEDLEY_UNREACHABLE() __builtin_unreachable() +#elif JSON_HEDLEY_MSVC_VERSION_CHECK(13, 10, 0) +#define JSON_HEDLEY_UNREACHABLE() __assume(0) +#elif JSON_HEDLEY_TI_VERSION_CHECK(6, 0, 0) +#if defined(__cplusplus) +#define JSON_HEDLEY_UNREACHABLE() std::_nassert(0) #else - #define JSON_HEDLEY_UNREACHABLE() - #define JSON_HEDLEY_UNREACHABLE_RETURN(value) return value +#define JSON_HEDLEY_UNREACHABLE() _nassert(0) +#endif +#define JSON_HEDLEY_UNREACHABLE_RETURN(value) return value +#elif defined(EXIT_FAILURE) +#define JSON_HEDLEY_UNREACHABLE() abort() +#else +#define JSON_HEDLEY_UNREACHABLE() +#define JSON_HEDLEY_UNREACHABLE_RETURN(value) return value #endif #if !defined(JSON_HEDLEY_UNREACHABLE_RETURN) - #define JSON_HEDLEY_UNREACHABLE_RETURN(value) JSON_HEDLEY_UNREACHABLE() +#define JSON_HEDLEY_UNREACHABLE_RETURN(value) JSON_HEDLEY_UNREACHABLE() #endif #if defined(JSON_HEDLEY_ASSUME) - #undef JSON_HEDLEY_ASSUME +#undef JSON_HEDLEY_ASSUME #endif -#if \ - JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) - #define JSON_HEDLEY_ASSUME(expr) __assume(expr) +#if JSON_HEDLEY_MSVC_VERSION_CHECK(13, 10, 0) || JSON_HEDLEY_INTEL_VERSION_CHECK(13, 0, 0) +#define JSON_HEDLEY_ASSUME(expr) __assume(expr) #elif JSON_HEDLEY_HAS_BUILTIN(__builtin_assume) - #define JSON_HEDLEY_ASSUME(expr) __builtin_assume(expr) -#elif JSON_HEDLEY_TI_VERSION_CHECK(6,0,0) - #if defined(__cplusplus) - #define JSON_HEDLEY_ASSUME(expr) std::_nassert(expr) - #else - #define JSON_HEDLEY_ASSUME(expr) _nassert(expr) - #endif -#elif \ - (JSON_HEDLEY_HAS_BUILTIN(__builtin_unreachable) && !defined(JSON_HEDLEY_ARM_VERSION)) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,5,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(13,1,5) - #define JSON_HEDLEY_ASSUME(expr) ((void) ((expr) ? 1 : (__builtin_unreachable(), 1))) +#define JSON_HEDLEY_ASSUME(expr) __builtin_assume(expr) +#elif JSON_HEDLEY_TI_VERSION_CHECK(6, 0, 0) +#if defined(__cplusplus) +#define JSON_HEDLEY_ASSUME(expr) std::_nassert(expr) #else - #define JSON_HEDLEY_ASSUME(expr) ((void) (expr)) +#define JSON_HEDLEY_ASSUME(expr) _nassert(expr) +#endif +#elif (JSON_HEDLEY_HAS_BUILTIN(__builtin_unreachable) && !defined(JSON_HEDLEY_ARM_VERSION)) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(4, 5, 0) || JSON_HEDLEY_INTEL_VERSION_CHECK(13, 0, 0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(13, 1, 5) +#define JSON_HEDLEY_ASSUME(expr) ((void)((expr) ? 1 : (__builtin_unreachable(), 1))) +#else +#define JSON_HEDLEY_ASSUME(expr) ((void)(expr)) #endif - JSON_HEDLEY_DIAGNOSTIC_PUSH -#if \ - JSON_HEDLEY_HAS_WARNING("-Wvariadic-macros") || \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,0,0) - #if defined(__clang__) - #pragma clang diagnostic ignored "-Wvariadic-macros" - #elif defined(JSON_HEDLEY_GCC_VERSION) - #pragma GCC diagnostic ignored "-Wvariadic-macros" - #endif +#if JSON_HEDLEY_HAS_WARNING("-Wvariadic-macros") || JSON_HEDLEY_GCC_VERSION_CHECK(4, 0, 0) +#if defined(__clang__) +#pragma clang diagnostic ignored "-Wvariadic-macros" +#elif defined(JSON_HEDLEY_GCC_VERSION) +#pragma GCC diagnostic ignored "-Wvariadic-macros" +#endif #endif #if defined(JSON_HEDLEY_NON_NULL) - #undef JSON_HEDLEY_NON_NULL +#undef JSON_HEDLEY_NON_NULL #endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(nonnull) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,3,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) - #define JSON_HEDLEY_NON_NULL(...) __attribute__((__nonnull__(__VA_ARGS__))) +#if JSON_HEDLEY_HAS_ATTRIBUTE(nonnull) || JSON_HEDLEY_GCC_VERSION_CHECK(3, 3, 0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13, 0, 0) || JSON_HEDLEY_ARM_VERSION_CHECK(4, 1, 0) +#define JSON_HEDLEY_NON_NULL(...) __attribute__((__nonnull__(__VA_ARGS__))) #else - #define JSON_HEDLEY_NON_NULL(...) +#define JSON_HEDLEY_NON_NULL(...) #endif JSON_HEDLEY_DIAGNOSTIC_POP #if defined(JSON_HEDLEY_PRINTF_FORMAT) - #undef JSON_HEDLEY_PRINTF_FORMAT +#undef JSON_HEDLEY_PRINTF_FORMAT #endif -#if defined(__MINGW32__) && JSON_HEDLEY_GCC_HAS_ATTRIBUTE(format,4,4,0) && !defined(__USE_MINGW_ANSI_STDIO) - #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __attribute__((__format__(ms_printf, string_idx, first_to_check))) -#elif defined(__MINGW32__) && JSON_HEDLEY_GCC_HAS_ATTRIBUTE(format,4,4,0) && defined(__USE_MINGW_ANSI_STDIO) - #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __attribute__((__format__(gnu_printf, string_idx, first_to_check))) -#elif \ - JSON_HEDLEY_HAS_ATTRIBUTE(format) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(5,6,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(8,0,0) || \ - (JSON_HEDLEY_TI_VERSION_CHECK(7,3,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) - #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __attribute__((__format__(__printf__, string_idx, first_to_check))) -#elif JSON_HEDLEY_PELLES_VERSION_CHECK(6,0,0) - #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __declspec(vaformat(printf,string_idx,first_to_check)) +#if defined(__MINGW32__) && JSON_HEDLEY_GCC_HAS_ATTRIBUTE(format, 4, 4, 0) && !defined(__USE_MINGW_ANSI_STDIO) +#define JSON_HEDLEY_PRINTF_FORMAT(string_idx, first_to_check) \ + __attribute__((__format__(ms_printf, string_idx, first_to_check))) +#elif defined(__MINGW32__) && JSON_HEDLEY_GCC_HAS_ATTRIBUTE(format, 4, 4, 0) && defined(__USE_MINGW_ANSI_STDIO) +#define JSON_HEDLEY_PRINTF_FORMAT(string_idx, first_to_check) \ + __attribute__((__format__(gnu_printf, string_idx, first_to_check))) +#elif JSON_HEDLEY_HAS_ATTRIBUTE(format) || JSON_HEDLEY_GCC_VERSION_CHECK(3, 1, 0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13, 0, 0) || JSON_HEDLEY_ARM_VERSION_CHECK(5, 6, 0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(10, 1, 0) || JSON_HEDLEY_TI_VERSION_CHECK(8, 0, 0) || \ + (JSON_HEDLEY_TI_VERSION_CHECK(7, 3, 0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) +#define JSON_HEDLEY_PRINTF_FORMAT(string_idx, first_to_check) \ + __attribute__((__format__(__printf__, string_idx, first_to_check))) +#elif JSON_HEDLEY_PELLES_VERSION_CHECK(6, 0, 0) +#define JSON_HEDLEY_PRINTF_FORMAT(string_idx, first_to_check) __declspec(vaformat(printf, string_idx, first_to_check)) #else - #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) +#define JSON_HEDLEY_PRINTF_FORMAT(string_idx, first_to_check) #endif #if defined(JSON_HEDLEY_CONSTEXPR) - #undef JSON_HEDLEY_CONSTEXPR +#undef JSON_HEDLEY_CONSTEXPR #endif #if defined(__cplusplus) - #if __cplusplus >= 201103L - #define JSON_HEDLEY_CONSTEXPR constexpr - #endif +#if __cplusplus >= 201103L +#define JSON_HEDLEY_CONSTEXPR constexpr +#endif #endif #if !defined(JSON_HEDLEY_CONSTEXPR) - #define JSON_HEDLEY_CONSTEXPR +#define JSON_HEDLEY_CONSTEXPR #endif #if defined(JSON_HEDLEY_PREDICT) - #undef JSON_HEDLEY_PREDICT +#undef JSON_HEDLEY_PREDICT #endif #if defined(JSON_HEDLEY_LIKELY) - #undef JSON_HEDLEY_LIKELY +#undef JSON_HEDLEY_LIKELY #endif #if defined(JSON_HEDLEY_UNLIKELY) - #undef JSON_HEDLEY_UNLIKELY +#undef JSON_HEDLEY_UNLIKELY #endif #if defined(JSON_HEDLEY_UNPREDICTABLE) - #undef JSON_HEDLEY_UNPREDICTABLE +#undef JSON_HEDLEY_UNPREDICTABLE #endif #if JSON_HEDLEY_HAS_BUILTIN(__builtin_unpredictable) - #define JSON_HEDLEY_UNPREDICTABLE(expr) __builtin_unpredictable(!!(expr)) +#define JSON_HEDLEY_UNPREDICTABLE(expr) __builtin_unpredictable(!!(expr)) #endif -#if \ - JSON_HEDLEY_HAS_BUILTIN(__builtin_expect_with_probability) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(9,0,0) -# define JSON_HEDLEY_PREDICT(expr, value, probability) __builtin_expect_with_probability(expr, value, probability) -# define JSON_HEDLEY_PREDICT_TRUE(expr, probability) __builtin_expect_with_probability(!!(expr), 1, probability) -# define JSON_HEDLEY_PREDICT_FALSE(expr, probability) __builtin_expect_with_probability(!!(expr), 0, probability) -# define JSON_HEDLEY_LIKELY(expr) __builtin_expect(!!(expr), 1) -# define JSON_HEDLEY_UNLIKELY(expr) __builtin_expect(!!(expr), 0) +#if JSON_HEDLEY_HAS_BUILTIN(__builtin_expect_with_probability) || JSON_HEDLEY_GCC_VERSION_CHECK(9, 0, 0) +#define JSON_HEDLEY_PREDICT(expr, value, probability) __builtin_expect_with_probability(expr, value, probability) +#define JSON_HEDLEY_PREDICT_TRUE(expr, probability) __builtin_expect_with_probability(!!(expr), 1, probability) +#define JSON_HEDLEY_PREDICT_FALSE(expr, probability) __builtin_expect_with_probability(!!(expr), 0, probability) +#define JSON_HEDLEY_LIKELY(expr) __builtin_expect(!!(expr), 1) +#define JSON_HEDLEY_UNLIKELY(expr) __builtin_expect(!!(expr), 0) #if !defined(JSON_HEDLEY_BUILTIN_UNPREDICTABLE) - #define JSON_HEDLEY_BUILTIN_UNPREDICTABLE(expr) __builtin_expect_with_probability(!!(expr), 1, 0.5) +#define JSON_HEDLEY_BUILTIN_UNPREDICTABLE(expr) __builtin_expect_with_probability(!!(expr), 1, 0.5) #endif -#elif \ - JSON_HEDLEY_HAS_BUILTIN(__builtin_expect) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,0,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0) && defined(__cplusplus)) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(6,1,0) || \ - JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,27) -# define JSON_HEDLEY_PREDICT(expr, expected, probability) \ - (((probability) >= 0.9) ? __builtin_expect(!!(expr), (expected)) : (((void) (expected)), !!(expr))) -# define JSON_HEDLEY_PREDICT_TRUE(expr, probability) \ - (__extension__ ({ \ - JSON_HEDLEY_CONSTEXPR double hedley_probability_ = (probability); \ - ((hedley_probability_ >= 0.9) ? __builtin_expect(!!(expr), 1) : ((hedley_probability_ <= 0.1) ? __builtin_expect(!!(expr), 0) : !!(expr))); \ +#elif JSON_HEDLEY_HAS_BUILTIN(__builtin_expect) || JSON_HEDLEY_GCC_VERSION_CHECK(3, 0, 0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13, 0, 0) || \ + (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5, 15, 0) && defined(__cplusplus)) || JSON_HEDLEY_ARM_VERSION_CHECK(4, 1, 0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(10, 1, 0) || JSON_HEDLEY_TI_VERSION_CHECK(6, 1, 0) || \ + JSON_HEDLEY_TINYC_VERSION_CHECK(0, 9, 27) +#define JSON_HEDLEY_PREDICT(expr, expected, probability) \ + (((probability) >= 0.9) ? __builtin_expect(!!(expr), (expected)) : (((void)(expected)), !!(expr))) +#define JSON_HEDLEY_PREDICT_TRUE(expr, probability) \ + (__extension__({ \ + JSON_HEDLEY_CONSTEXPR double hedley_probability_ = (probability); \ + ((hedley_probability_ >= 0.9) ? __builtin_expect(!!(expr), 1) \ + : ((hedley_probability_ <= 0.1) ? __builtin_expect(!!(expr), 0) : !!(expr))); \ })) -# define JSON_HEDLEY_PREDICT_FALSE(expr, probability) \ - (__extension__ ({ \ - JSON_HEDLEY_CONSTEXPR double hedley_probability_ = (probability); \ - ((hedley_probability_ >= 0.9) ? __builtin_expect(!!(expr), 0) : ((hedley_probability_ <= 0.1) ? __builtin_expect(!!(expr), 1) : !!(expr))); \ +#define JSON_HEDLEY_PREDICT_FALSE(expr, probability) \ + (__extension__({ \ + JSON_HEDLEY_CONSTEXPR double hedley_probability_ = (probability); \ + ((hedley_probability_ >= 0.9) ? __builtin_expect(!!(expr), 0) \ + : ((hedley_probability_ <= 0.1) ? __builtin_expect(!!(expr), 1) : !!(expr))); \ })) -# define JSON_HEDLEY_LIKELY(expr) __builtin_expect(!!(expr), 1) -# define JSON_HEDLEY_UNLIKELY(expr) __builtin_expect(!!(expr), 0) +#define JSON_HEDLEY_LIKELY(expr) __builtin_expect(!!(expr), 1) +#define JSON_HEDLEY_UNLIKELY(expr) __builtin_expect(!!(expr), 0) #else -# define JSON_HEDLEY_PREDICT(expr, expected, probability) (((void) (expected)), !!(expr)) -# define JSON_HEDLEY_PREDICT_TRUE(expr, probability) (!!(expr)) -# define JSON_HEDLEY_PREDICT_FALSE(expr, probability) (!!(expr)) -# define JSON_HEDLEY_LIKELY(expr) (!!(expr)) -# define JSON_HEDLEY_UNLIKELY(expr) (!!(expr)) +#define JSON_HEDLEY_PREDICT(expr, expected, probability) (((void)(expected)), !!(expr)) +#define JSON_HEDLEY_PREDICT_TRUE(expr, probability) (!!(expr)) +#define JSON_HEDLEY_PREDICT_FALSE(expr, probability) (!!(expr)) +#define JSON_HEDLEY_LIKELY(expr) (!!(expr)) +#define JSON_HEDLEY_UNLIKELY(expr) (!!(expr)) #endif #if !defined(JSON_HEDLEY_UNPREDICTABLE) - #define JSON_HEDLEY_UNPREDICTABLE(expr) JSON_HEDLEY_PREDICT(expr, 1, 0.5) +#define JSON_HEDLEY_UNPREDICTABLE(expr) JSON_HEDLEY_PREDICT(expr, 1, 0.5) #endif #if defined(JSON_HEDLEY_MALLOC) - #undef JSON_HEDLEY_MALLOC +#undef JSON_HEDLEY_MALLOC #endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(malloc) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(12,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(8,0,0) || \ - (JSON_HEDLEY_TI_VERSION_CHECK(7,3,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) - #define JSON_HEDLEY_MALLOC __attribute__((__malloc__)) +#if JSON_HEDLEY_HAS_ATTRIBUTE(malloc) || JSON_HEDLEY_GCC_VERSION_CHECK(3, 1, 0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13, 0, 0) || JSON_HEDLEY_SUNPRO_VERSION_CHECK(5, 11, 0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4, 1, 0) || JSON_HEDLEY_IBM_VERSION_CHECK(12, 1, 0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(8, 0, 0) || \ + (JSON_HEDLEY_TI_VERSION_CHECK(7, 3, 0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) +#define JSON_HEDLEY_MALLOC __attribute__((__malloc__)) #elif JSON_HEDLEY_MSVC_VERSION_CHECK(14, 0, 0) - #define JSON_HEDLEY_MALLOC __declspec(restrict) +#define JSON_HEDLEY_MALLOC __declspec(restrict) #else - #define JSON_HEDLEY_MALLOC +#define JSON_HEDLEY_MALLOC #endif #if defined(JSON_HEDLEY_PURE) - #undef JSON_HEDLEY_PURE +#undef JSON_HEDLEY_PURE #endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(pure) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(2,96,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(8,0,0) || \ - (JSON_HEDLEY_TI_VERSION_CHECK(7,3,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) - #define JSON_HEDLEY_PURE __attribute__((__pure__)) -#elif JSON_HEDLEY_TI_VERSION_CHECK(6,0,0) && defined(__cplusplus) - #define JSON_HEDLEY_PURE _Pragma("FUNC_IS_PURE;") +#if JSON_HEDLEY_HAS_ATTRIBUTE(pure) || JSON_HEDLEY_GCC_VERSION_CHECK(2, 96, 0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13, 0, 0) || JSON_HEDLEY_SUNPRO_VERSION_CHECK(5, 11, 0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4, 1, 0) || JSON_HEDLEY_IBM_VERSION_CHECK(10, 1, 0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(8, 0, 0) || \ + (JSON_HEDLEY_TI_VERSION_CHECK(7, 3, 0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_PGI_VERSION_CHECK(17, 10, 0) +#define JSON_HEDLEY_PURE __attribute__((__pure__)) +#elif JSON_HEDLEY_TI_VERSION_CHECK(6, 0, 0) && defined(__cplusplus) +#define JSON_HEDLEY_PURE _Pragma("FUNC_IS_PURE;") #else - #define JSON_HEDLEY_PURE +#define JSON_HEDLEY_PURE #endif #if defined(JSON_HEDLEY_CONST) - #undef JSON_HEDLEY_CONST +#undef JSON_HEDLEY_CONST #endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(const) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(2,5,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(8,0,0) || \ - (JSON_HEDLEY_TI_VERSION_CHECK(7,3,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) - #define JSON_HEDLEY_CONST __attribute__((__const__)) +#if JSON_HEDLEY_HAS_ATTRIBUTE(const) || JSON_HEDLEY_GCC_VERSION_CHECK(2, 5, 0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13, 0, 0) || JSON_HEDLEY_SUNPRO_VERSION_CHECK(5, 11, 0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4, 1, 0) || JSON_HEDLEY_IBM_VERSION_CHECK(10, 1, 0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(8, 0, 0) || \ + (JSON_HEDLEY_TI_VERSION_CHECK(7, 3, 0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_PGI_VERSION_CHECK(17, 10, 0) +#define JSON_HEDLEY_CONST __attribute__((__const__)) #else - #define JSON_HEDLEY_CONST JSON_HEDLEY_PURE +#define JSON_HEDLEY_CONST JSON_HEDLEY_PURE #endif #if defined(JSON_HEDLEY_RESTRICT) - #undef JSON_HEDLEY_RESTRICT +#undef JSON_HEDLEY_RESTRICT #endif #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && !defined(__cplusplus) - #define JSON_HEDLEY_RESTRICT restrict -#elif \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \ - JSON_HEDLEY_MSVC_VERSION_CHECK(14,0,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ - JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(8,0,0) || \ - (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,14,0) && defined(__cplusplus)) || \ - JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) || \ - defined(__clang__) - #define JSON_HEDLEY_RESTRICT __restrict -#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,3,0) && !defined(__cplusplus) - #define JSON_HEDLEY_RESTRICT _Restrict +#define JSON_HEDLEY_RESTRICT restrict +#elif JSON_HEDLEY_GCC_VERSION_CHECK(3, 1, 0) || JSON_HEDLEY_MSVC_VERSION_CHECK(14, 0, 0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13, 0, 0) || JSON_HEDLEY_ARM_VERSION_CHECK(4, 1, 0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(10, 1, 0) || JSON_HEDLEY_PGI_VERSION_CHECK(17, 10, 0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(8, 0, 0) || (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5, 14, 0) && defined(__cplusplus)) || \ + JSON_HEDLEY_IAR_VERSION_CHECK(8, 0, 0) || defined(__clang__) +#define JSON_HEDLEY_RESTRICT __restrict +#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5, 3, 0) && !defined(__cplusplus) +#define JSON_HEDLEY_RESTRICT _Restrict #else - #define JSON_HEDLEY_RESTRICT +#define JSON_HEDLEY_RESTRICT #endif #if defined(JSON_HEDLEY_INLINE) - #undef JSON_HEDLEY_INLINE +#undef JSON_HEDLEY_INLINE #endif -#if \ - (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) || \ - (defined(__cplusplus) && (__cplusplus >= 199711L)) - #define JSON_HEDLEY_INLINE inline -#elif \ - defined(JSON_HEDLEY_GCC_VERSION) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(6,2,0) - #define JSON_HEDLEY_INLINE __inline__ -#elif \ - JSON_HEDLEY_MSVC_VERSION_CHECK(12,0,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(8,0,0) - #define JSON_HEDLEY_INLINE __inline +#if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) || (defined(__cplusplus) && (__cplusplus >= 199711L)) +#define JSON_HEDLEY_INLINE inline +#elif defined(JSON_HEDLEY_GCC_VERSION) || JSON_HEDLEY_ARM_VERSION_CHECK(6, 2, 0) +#define JSON_HEDLEY_INLINE __inline__ +#elif JSON_HEDLEY_MSVC_VERSION_CHECK(12, 0, 0) || JSON_HEDLEY_ARM_VERSION_CHECK(4, 1, 0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(8, 0, 0) +#define JSON_HEDLEY_INLINE __inline #else - #define JSON_HEDLEY_INLINE +#define JSON_HEDLEY_INLINE #endif #if defined(JSON_HEDLEY_ALWAYS_INLINE) - #undef JSON_HEDLEY_ALWAYS_INLINE +#undef JSON_HEDLEY_ALWAYS_INLINE #endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(always_inline) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,0,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(8,0,0) || \ - (JSON_HEDLEY_TI_VERSION_CHECK(7,3,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) - #define JSON_HEDLEY_ALWAYS_INLINE __attribute__((__always_inline__)) JSON_HEDLEY_INLINE -#elif JSON_HEDLEY_MSVC_VERSION_CHECK(12,0,0) - #define JSON_HEDLEY_ALWAYS_INLINE __forceinline -#elif JSON_HEDLEY_TI_VERSION_CHECK(7,0,0) && defined(__cplusplus) - #define JSON_HEDLEY_ALWAYS_INLINE _Pragma("FUNC_ALWAYS_INLINE;") -#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) - #define JSON_HEDLEY_ALWAYS_INLINE _Pragma("inline=forced") +#if JSON_HEDLEY_HAS_ATTRIBUTE(always_inline) || JSON_HEDLEY_GCC_VERSION_CHECK(4, 0, 0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13, 0, 0) || JSON_HEDLEY_SUNPRO_VERSION_CHECK(5, 11, 0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4, 1, 0) || JSON_HEDLEY_IBM_VERSION_CHECK(10, 1, 0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(8, 0, 0) || \ + (JSON_HEDLEY_TI_VERSION_CHECK(7, 3, 0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) +#define JSON_HEDLEY_ALWAYS_INLINE __attribute__((__always_inline__)) JSON_HEDLEY_INLINE +#elif JSON_HEDLEY_MSVC_VERSION_CHECK(12, 0, 0) +#define JSON_HEDLEY_ALWAYS_INLINE __forceinline +#elif JSON_HEDLEY_TI_VERSION_CHECK(7, 0, 0) && defined(__cplusplus) +#define JSON_HEDLEY_ALWAYS_INLINE _Pragma("FUNC_ALWAYS_INLINE;") +#elif JSON_HEDLEY_IAR_VERSION_CHECK(8, 0, 0) +#define JSON_HEDLEY_ALWAYS_INLINE _Pragma("inline=forced") #else - #define JSON_HEDLEY_ALWAYS_INLINE JSON_HEDLEY_INLINE +#define JSON_HEDLEY_ALWAYS_INLINE JSON_HEDLEY_INLINE #endif #if defined(JSON_HEDLEY_NEVER_INLINE) - #undef JSON_HEDLEY_NEVER_INLINE +#undef JSON_HEDLEY_NEVER_INLINE #endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(noinline) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,0,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(8,0,0) || \ - (JSON_HEDLEY_TI_VERSION_CHECK(7,3,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) - #define JSON_HEDLEY_NEVER_INLINE __attribute__((__noinline__)) -#elif JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) - #define JSON_HEDLEY_NEVER_INLINE __declspec(noinline) -#elif JSON_HEDLEY_PGI_VERSION_CHECK(10,2,0) - #define JSON_HEDLEY_NEVER_INLINE _Pragma("noinline") -#elif JSON_HEDLEY_TI_VERSION_CHECK(6,0,0) && defined(__cplusplus) - #define JSON_HEDLEY_NEVER_INLINE _Pragma("FUNC_CANNOT_INLINE;") -#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) - #define JSON_HEDLEY_NEVER_INLINE _Pragma("inline=never") -#elif JSON_HEDLEY_COMPCERT_VERSION_CHECK(3,2,0) - #define JSON_HEDLEY_NEVER_INLINE __attribute((noinline)) -#elif JSON_HEDLEY_PELLES_VERSION_CHECK(9,0,0) - #define JSON_HEDLEY_NEVER_INLINE __declspec(noinline) +#if JSON_HEDLEY_HAS_ATTRIBUTE(noinline) || JSON_HEDLEY_GCC_VERSION_CHECK(4, 0, 0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13, 0, 0) || JSON_HEDLEY_SUNPRO_VERSION_CHECK(5, 11, 0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4, 1, 0) || JSON_HEDLEY_IBM_VERSION_CHECK(10, 1, 0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(8, 0, 0) || \ + (JSON_HEDLEY_TI_VERSION_CHECK(7, 3, 0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) +#define JSON_HEDLEY_NEVER_INLINE __attribute__((__noinline__)) +#elif JSON_HEDLEY_MSVC_VERSION_CHECK(13, 10, 0) +#define JSON_HEDLEY_NEVER_INLINE __declspec(noinline) +#elif JSON_HEDLEY_PGI_VERSION_CHECK(10, 2, 0) +#define JSON_HEDLEY_NEVER_INLINE _Pragma("noinline") +#elif JSON_HEDLEY_TI_VERSION_CHECK(6, 0, 0) && defined(__cplusplus) +#define JSON_HEDLEY_NEVER_INLINE _Pragma("FUNC_CANNOT_INLINE;") +#elif JSON_HEDLEY_IAR_VERSION_CHECK(8, 0, 0) +#define JSON_HEDLEY_NEVER_INLINE _Pragma("inline=never") +#elif JSON_HEDLEY_COMPCERT_VERSION_CHECK(3, 2, 0) +#define JSON_HEDLEY_NEVER_INLINE __attribute((noinline)) +#elif JSON_HEDLEY_PELLES_VERSION_CHECK(9, 0, 0) +#define JSON_HEDLEY_NEVER_INLINE __declspec(noinline) #else - #define JSON_HEDLEY_NEVER_INLINE +#define JSON_HEDLEY_NEVER_INLINE #endif #if defined(JSON_HEDLEY_PRIVATE) - #undef JSON_HEDLEY_PRIVATE +#undef JSON_HEDLEY_PRIVATE #endif #if defined(JSON_HEDLEY_PUBLIC) - #undef JSON_HEDLEY_PUBLIC +#undef JSON_HEDLEY_PUBLIC #endif #if defined(JSON_HEDLEY_IMPORT) - #undef JSON_HEDLEY_IMPORT +#undef JSON_HEDLEY_IMPORT #endif #if defined(_WIN32) || defined(__CYGWIN__) - #define JSON_HEDLEY_PRIVATE - #define JSON_HEDLEY_PUBLIC __declspec(dllexport) - #define JSON_HEDLEY_IMPORT __declspec(dllimport) +#define JSON_HEDLEY_PRIVATE +#define JSON_HEDLEY_PUBLIC __declspec(dllexport) +#define JSON_HEDLEY_IMPORT __declspec(dllimport) #else - #if \ - JSON_HEDLEY_HAS_ATTRIBUTE(visibility) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,3,0) || \ - JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(13,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(8,0,0) || \ - (JSON_HEDLEY_TI_VERSION_CHECK(7,3,0) && defined(__TI_EABI__) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) - #define JSON_HEDLEY_PRIVATE __attribute__((__visibility__("hidden"))) - #define JSON_HEDLEY_PUBLIC __attribute__((__visibility__("default"))) - #else - #define JSON_HEDLEY_PRIVATE - #define JSON_HEDLEY_PUBLIC - #endif - #define JSON_HEDLEY_IMPORT extern +#if JSON_HEDLEY_HAS_ATTRIBUTE(visibility) || JSON_HEDLEY_GCC_VERSION_CHECK(3, 3, 0) || \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(5, 11, 0) || JSON_HEDLEY_INTEL_VERSION_CHECK(13, 0, 0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4, 1, 0) || JSON_HEDLEY_IBM_VERSION_CHECK(13, 1, 0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(8, 0, 0) || \ + (JSON_HEDLEY_TI_VERSION_CHECK(7, 3, 0) && defined(__TI_EABI__) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) +#define JSON_HEDLEY_PRIVATE __attribute__((__visibility__("hidden"))) +#define JSON_HEDLEY_PUBLIC __attribute__((__visibility__("default"))) +#else +#define JSON_HEDLEY_PRIVATE +#define JSON_HEDLEY_PUBLIC +#endif +#define JSON_HEDLEY_IMPORT extern #endif #if defined(JSON_HEDLEY_NO_THROW) - #undef JSON_HEDLEY_NO_THROW +#undef JSON_HEDLEY_NO_THROW #endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(nothrow) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,3,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) - #define JSON_HEDLEY_NO_THROW __attribute__((__nothrow__)) -#elif \ - JSON_HEDLEY_MSVC_VERSION_CHECK(13,1,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) - #define JSON_HEDLEY_NO_THROW __declspec(nothrow) +#if JSON_HEDLEY_HAS_ATTRIBUTE(nothrow) || JSON_HEDLEY_GCC_VERSION_CHECK(3, 3, 0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13, 0, 0) +#define JSON_HEDLEY_NO_THROW __attribute__((__nothrow__)) +#elif JSON_HEDLEY_MSVC_VERSION_CHECK(13, 1, 0) || JSON_HEDLEY_ARM_VERSION_CHECK(4, 1, 0) +#define JSON_HEDLEY_NO_THROW __declspec(nothrow) #else - #define JSON_HEDLEY_NO_THROW +#define JSON_HEDLEY_NO_THROW #endif #if defined(JSON_HEDLEY_FALL_THROUGH) - #undef JSON_HEDLEY_FALL_THROUGH +#undef JSON_HEDLEY_FALL_THROUGH #endif -#if \ - defined(__cplusplus) && \ - (!defined(JSON_HEDLEY_SUNPRO_VERSION) || JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0)) && \ +#if defined(__cplusplus) && (!defined(JSON_HEDLEY_SUNPRO_VERSION) || JSON_HEDLEY_SUNPRO_VERSION_CHECK(5, 15, 0)) && \ !defined(JSON_HEDLEY_PGI_VERSION) - #if \ - (__cplusplus >= 201703L) || \ - ((__cplusplus >= 201103L) && JSON_HEDLEY_HAS_CPP_ATTRIBUTE(fallthrough)) - #define JSON_HEDLEY_FALL_THROUGH [[fallthrough]] - #elif (__cplusplus >= 201103L) && JSON_HEDLEY_HAS_CPP_ATTRIBUTE(clang::fallthrough) - #define JSON_HEDLEY_FALL_THROUGH [[clang::fallthrough]] - #elif (__cplusplus >= 201103L) && JSON_HEDLEY_GCC_VERSION_CHECK(7,0,0) - #define JSON_HEDLEY_FALL_THROUGH [[gnu::fallthrough]] - #endif +#if (__cplusplus >= 201703L) || ((__cplusplus >= 201103L) && JSON_HEDLEY_HAS_CPP_ATTRIBUTE(fallthrough)) +#define JSON_HEDLEY_FALL_THROUGH [[fallthrough]] +#elif (__cplusplus >= 201103L) && JSON_HEDLEY_HAS_CPP_ATTRIBUTE(clang::fallthrough) +#define JSON_HEDLEY_FALL_THROUGH [[clang::fallthrough]] +#elif (__cplusplus >= 201103L) && JSON_HEDLEY_GCC_VERSION_CHECK(7, 0, 0) +#define JSON_HEDLEY_FALL_THROUGH [[gnu::fallthrough]] +#endif #endif #if !defined(JSON_HEDLEY_FALL_THROUGH) - #if JSON_HEDLEY_GNUC_HAS_ATTRIBUTE(fallthrough,7,0,0) && !defined(JSON_HEDLEY_PGI_VERSION) - #define JSON_HEDLEY_FALL_THROUGH __attribute__((__fallthrough__)) - #elif defined(__fallthrough) /* SAL */ - #define JSON_HEDLEY_FALL_THROUGH __fallthrough - #else - #define JSON_HEDLEY_FALL_THROUGH - #endif +#if JSON_HEDLEY_GNUC_HAS_ATTRIBUTE(fallthrough, 7, 0, 0) && !defined(JSON_HEDLEY_PGI_VERSION) +#define JSON_HEDLEY_FALL_THROUGH __attribute__((__fallthrough__)) +#elif defined(__fallthrough) /* SAL */ +#define JSON_HEDLEY_FALL_THROUGH __fallthrough +#else +#define JSON_HEDLEY_FALL_THROUGH +#endif #endif #if defined(JSON_HEDLEY_RETURNS_NON_NULL) - #undef JSON_HEDLEY_RETURNS_NON_NULL +#undef JSON_HEDLEY_RETURNS_NON_NULL #endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(returns_nonnull) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,9,0) - #define JSON_HEDLEY_RETURNS_NON_NULL __attribute__((__returns_nonnull__)) +#if JSON_HEDLEY_HAS_ATTRIBUTE(returns_nonnull) || JSON_HEDLEY_GCC_VERSION_CHECK(4, 9, 0) +#define JSON_HEDLEY_RETURNS_NON_NULL __attribute__((__returns_nonnull__)) #elif defined(_Ret_notnull_) /* SAL */ - #define JSON_HEDLEY_RETURNS_NON_NULL _Ret_notnull_ +#define JSON_HEDLEY_RETURNS_NON_NULL _Ret_notnull_ #else - #define JSON_HEDLEY_RETURNS_NON_NULL +#define JSON_HEDLEY_RETURNS_NON_NULL #endif #if defined(JSON_HEDLEY_ARRAY_PARAM) - #undef JSON_HEDLEY_ARRAY_PARAM +#undef JSON_HEDLEY_ARRAY_PARAM #endif -#if \ - defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && \ - !defined(__STDC_NO_VLA__) && \ - !defined(__cplusplus) && \ - !defined(JSON_HEDLEY_PGI_VERSION) && \ - !defined(JSON_HEDLEY_TINYC_VERSION) - #define JSON_HEDLEY_ARRAY_PARAM(name) (name) +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && !defined(__STDC_NO_VLA__) && \ + !defined(__cplusplus) && !defined(JSON_HEDLEY_PGI_VERSION) && !defined(JSON_HEDLEY_TINYC_VERSION) +#define JSON_HEDLEY_ARRAY_PARAM(name) (name) #else - #define JSON_HEDLEY_ARRAY_PARAM(name) +#define JSON_HEDLEY_ARRAY_PARAM(name) #endif #if defined(JSON_HEDLEY_IS_CONSTANT) - #undef JSON_HEDLEY_IS_CONSTANT +#undef JSON_HEDLEY_IS_CONSTANT #endif #if defined(JSON_HEDLEY_REQUIRE_CONSTEXPR) - #undef JSON_HEDLEY_REQUIRE_CONSTEXPR +#undef JSON_HEDLEY_REQUIRE_CONSTEXPR #endif /* Note the double-underscore. For internal use only; no API * guarantees! */ #if defined(JSON_HEDLEY__IS_CONSTEXPR) - #undef JSON_HEDLEY__IS_CONSTEXPR +#undef JSON_HEDLEY__IS_CONSTEXPR #endif -#if \ - JSON_HEDLEY_HAS_BUILTIN(__builtin_constant_p) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,19) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(13,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(6,1,0) || \ - JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) || \ - JSON_HEDLEY_CRAY_VERSION_CHECK(8,1,0) - #define JSON_HEDLEY_IS_CONSTANT(expr) __builtin_constant_p(expr) +#if JSON_HEDLEY_HAS_BUILTIN(__builtin_constant_p) || JSON_HEDLEY_GCC_VERSION_CHECK(3, 4, 0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13, 0, 0) || JSON_HEDLEY_TINYC_VERSION_CHECK(0, 9, 19) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4, 1, 0) || JSON_HEDLEY_IBM_VERSION_CHECK(13, 1, 0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(6, 1, 0) || JSON_HEDLEY_SUNPRO_VERSION_CHECK(5, 10, 0) || \ + JSON_HEDLEY_CRAY_VERSION_CHECK(8, 1, 0) +#define JSON_HEDLEY_IS_CONSTANT(expr) __builtin_constant_p(expr) #endif #if !defined(__cplusplus) -# if \ - JSON_HEDLEY_HAS_BUILTIN(__builtin_types_compatible_p) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(13,1,0) || \ - JSON_HEDLEY_CRAY_VERSION_CHECK(8,1,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(5,4,0) || \ - JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,24) +#if JSON_HEDLEY_HAS_BUILTIN(__builtin_types_compatible_p) || JSON_HEDLEY_GCC_VERSION_CHECK(3, 4, 0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13, 0, 0) || JSON_HEDLEY_IBM_VERSION_CHECK(13, 1, 0) || \ + JSON_HEDLEY_CRAY_VERSION_CHECK(8, 1, 0) || JSON_HEDLEY_ARM_VERSION_CHECK(5, 4, 0) || \ + JSON_HEDLEY_TINYC_VERSION_CHECK(0, 9, 24) #if defined(__INTPTR_TYPE__) - #define JSON_HEDLEY__IS_CONSTEXPR(expr) __builtin_types_compatible_p(__typeof__((1 ? (void*) ((__INTPTR_TYPE__) ((expr) * 0)) : (int*) 0)), int*) +#define JSON_HEDLEY__IS_CONSTEXPR(expr) \ + __builtin_types_compatible_p(__typeof__((1 ? (void*)((__INTPTR_TYPE__)((expr)*0)) : (int*)0)), int*) #else - #include - #define JSON_HEDLEY__IS_CONSTEXPR(expr) __builtin_types_compatible_p(__typeof__((1 ? (void*) ((intptr_t) ((expr) * 0)) : (int*) 0)), int*) +#include +#define JSON_HEDLEY__IS_CONSTEXPR(expr) \ + __builtin_types_compatible_p(__typeof__((1 ? (void*)((intptr_t)((expr)*0)) : (int*)0)), int*) #endif -# elif \ - (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(JSON_HEDLEY_SUNPRO_VERSION) && !defined(JSON_HEDLEY_PGI_VERSION)) || \ - JSON_HEDLEY_HAS_EXTENSION(c_generic_selections) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,9,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(17,0,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(12,1,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(5,3,0) +#elif (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(JSON_HEDLEY_SUNPRO_VERSION) && \ + !defined(JSON_HEDLEY_PGI_VERSION)) || \ + JSON_HEDLEY_HAS_EXTENSION(c_generic_selections) || JSON_HEDLEY_GCC_VERSION_CHECK(4, 9, 0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(17, 0, 0) || JSON_HEDLEY_IBM_VERSION_CHECK(12, 1, 0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(5, 3, 0) #if defined(__INTPTR_TYPE__) - #define JSON_HEDLEY__IS_CONSTEXPR(expr) _Generic((1 ? (void*) ((__INTPTR_TYPE__) ((expr) * 0)) : (int*) 0), int*: 1, void*: 0) +#define JSON_HEDLEY__IS_CONSTEXPR(expr) \ + _Generic((1 ? (void*)((__INTPTR_TYPE__)((expr)*0)) : (int*)0), int* : 1, void* : 0) #else - #include - #define JSON_HEDLEY__IS_CONSTEXPR(expr) _Generic((1 ? (void*) ((intptr_t) * 0) : (int*) 0), int*: 1, void*: 0) +#include +#define JSON_HEDLEY__IS_CONSTEXPR(expr) _Generic((1 ? (void*)((intptr_t)*0) : (int*)0), int* : 1, void* : 0) +#endif +#elif defined(JSON_HEDLEY_GCC_VERSION) || defined(JSON_HEDLEY_INTEL_VERSION) || defined(JSON_HEDLEY_TINYC_VERSION) || \ + defined(JSON_HEDLEY_TI_VERSION) || defined(__clang__) +#define JSON_HEDLEY__IS_CONSTEXPR(expr) \ + (sizeof(void) != sizeof(*(1 ? ((void*)((expr)*0L)) : ((struct { char v[sizeof(void) * 2]; }*)1)))) #endif -# elif \ - defined(JSON_HEDLEY_GCC_VERSION) || \ - defined(JSON_HEDLEY_INTEL_VERSION) || \ - defined(JSON_HEDLEY_TINYC_VERSION) || \ - defined(JSON_HEDLEY_TI_VERSION) || \ - defined(__clang__) -# define JSON_HEDLEY__IS_CONSTEXPR(expr) ( \ - sizeof(void) != \ - sizeof(*( \ - 1 ? \ - ((void*) ((expr) * 0L) ) : \ -((struct { char v[sizeof(void) * 2]; } *) 1) \ - ) \ - ) \ - ) -# endif #endif #if defined(JSON_HEDLEY__IS_CONSTEXPR) - #if !defined(JSON_HEDLEY_IS_CONSTANT) - #define JSON_HEDLEY_IS_CONSTANT(expr) JSON_HEDLEY__IS_CONSTEXPR(expr) - #endif - #define JSON_HEDLEY_REQUIRE_CONSTEXPR(expr) (JSON_HEDLEY__IS_CONSTEXPR(expr) ? (expr) : (-1)) +#if !defined(JSON_HEDLEY_IS_CONSTANT) +#define JSON_HEDLEY_IS_CONSTANT(expr) JSON_HEDLEY__IS_CONSTEXPR(expr) +#endif +#define JSON_HEDLEY_REQUIRE_CONSTEXPR(expr) (JSON_HEDLEY__IS_CONSTEXPR(expr) ? (expr) : (-1)) #else - #if !defined(JSON_HEDLEY_IS_CONSTANT) - #define JSON_HEDLEY_IS_CONSTANT(expr) (0) - #endif - #define JSON_HEDLEY_REQUIRE_CONSTEXPR(expr) (expr) +#if !defined(JSON_HEDLEY_IS_CONSTANT) +#define JSON_HEDLEY_IS_CONSTANT(expr) (0) +#endif +#define JSON_HEDLEY_REQUIRE_CONSTEXPR(expr) (expr) #endif #if defined(JSON_HEDLEY_BEGIN_C_DECLS) - #undef JSON_HEDLEY_BEGIN_C_DECLS +#undef JSON_HEDLEY_BEGIN_C_DECLS #endif #if defined(JSON_HEDLEY_END_C_DECLS) - #undef JSON_HEDLEY_END_C_DECLS +#undef JSON_HEDLEY_END_C_DECLS #endif #if defined(JSON_HEDLEY_C_DECL) - #undef JSON_HEDLEY_C_DECL +#undef JSON_HEDLEY_C_DECL #endif #if defined(__cplusplus) - #define JSON_HEDLEY_BEGIN_C_DECLS extern "C" { - #define JSON_HEDLEY_END_C_DECLS } - #define JSON_HEDLEY_C_DECL extern "C" +#define JSON_HEDLEY_BEGIN_C_DECLS extern "C" { +#define JSON_HEDLEY_END_C_DECLS } +#define JSON_HEDLEY_C_DECL extern "C" #else - #define JSON_HEDLEY_BEGIN_C_DECLS - #define JSON_HEDLEY_END_C_DECLS - #define JSON_HEDLEY_C_DECL +#define JSON_HEDLEY_BEGIN_C_DECLS +#define JSON_HEDLEY_END_C_DECLS +#define JSON_HEDLEY_C_DECL #endif #if defined(JSON_HEDLEY_STATIC_ASSERT) - #undef JSON_HEDLEY_STATIC_ASSERT +#undef JSON_HEDLEY_STATIC_ASSERT #endif -#if \ - !defined(__cplusplus) && ( \ - (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) || \ - JSON_HEDLEY_HAS_FEATURE(c_static_assert) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(6,0,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - defined(_Static_assert) \ - ) -# define JSON_HEDLEY_STATIC_ASSERT(expr, message) _Static_assert(expr, message) -#elif \ - (defined(__cplusplus) && (__cplusplus >= 201703L)) || \ - JSON_HEDLEY_MSVC_VERSION_CHECK(16,0,0) || \ - (defined(__cplusplus) && JSON_HEDLEY_TI_VERSION_CHECK(8,3,0)) -# define JSON_HEDLEY_STATIC_ASSERT(expr, message) static_assert(expr, message) +#if !defined(__cplusplus) && \ + ((defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) || JSON_HEDLEY_HAS_FEATURE(c_static_assert) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(6, 0, 0) || JSON_HEDLEY_INTEL_VERSION_CHECK(13, 0, 0) || defined(_Static_assert)) +#define JSON_HEDLEY_STATIC_ASSERT(expr, message) _Static_assert(expr, message) +#elif (defined(__cplusplus) && (__cplusplus >= 201703L)) || JSON_HEDLEY_MSVC_VERSION_CHECK(16, 0, 0) || \ + (defined(__cplusplus) && JSON_HEDLEY_TI_VERSION_CHECK(8, 3, 0)) +#define JSON_HEDLEY_STATIC_ASSERT(expr, message) static_assert(expr, message) #elif defined(__cplusplus) && (__cplusplus >= 201103L) -# define JSON_HEDLEY_STATIC_ASSERT(expr, message) static_assert(expr) +#define JSON_HEDLEY_STATIC_ASSERT(expr, message) static_assert(expr) #else -# define JSON_HEDLEY_STATIC_ASSERT(expr, message) +#define JSON_HEDLEY_STATIC_ASSERT(expr, message) #endif #if defined(JSON_HEDLEY_CONST_CAST) - #undef JSON_HEDLEY_CONST_CAST +#undef JSON_HEDLEY_CONST_CAST #endif #if defined(__cplusplus) -# define JSON_HEDLEY_CONST_CAST(T, expr) (const_cast(expr)) -#elif \ - JSON_HEDLEY_HAS_WARNING("-Wcast-qual") || \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,6,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) -# define JSON_HEDLEY_CONST_CAST(T, expr) (__extension__ ({ \ - JSON_HEDLEY_DIAGNOSTIC_PUSH \ - JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL \ - ((T) (expr)); \ - JSON_HEDLEY_DIAGNOSTIC_POP \ +#define JSON_HEDLEY_CONST_CAST(T, expr) (const_cast(expr)) +#elif JSON_HEDLEY_HAS_WARNING("-Wcast-qual") || JSON_HEDLEY_GCC_VERSION_CHECK(4, 6, 0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13, 0, 0) +#define JSON_HEDLEY_CONST_CAST(T, expr) \ + (__extension__({ \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL((T)(expr)); \ + JSON_HEDLEY_DIAGNOSTIC_POP \ })) #else -# define JSON_HEDLEY_CONST_CAST(T, expr) ((T) (expr)) +#define JSON_HEDLEY_CONST_CAST(T, expr) ((T)(expr)) #endif #if defined(JSON_HEDLEY_REINTERPRET_CAST) - #undef JSON_HEDLEY_REINTERPRET_CAST +#undef JSON_HEDLEY_REINTERPRET_CAST #endif #if defined(__cplusplus) - #define JSON_HEDLEY_REINTERPRET_CAST(T, expr) (reinterpret_cast(expr)) +#define JSON_HEDLEY_REINTERPRET_CAST(T, expr) (reinterpret_cast(expr)) #else - #define JSON_HEDLEY_REINTERPRET_CAST(T, expr) (*((T*) &(expr))) +#define JSON_HEDLEY_REINTERPRET_CAST(T, expr) (*((T*)&(expr))) #endif #if defined(JSON_HEDLEY_STATIC_CAST) - #undef JSON_HEDLEY_STATIC_CAST +#undef JSON_HEDLEY_STATIC_CAST #endif #if defined(__cplusplus) - #define JSON_HEDLEY_STATIC_CAST(T, expr) (static_cast(expr)) +#define JSON_HEDLEY_STATIC_CAST(T, expr) (static_cast(expr)) #else - #define JSON_HEDLEY_STATIC_CAST(T, expr) ((T) (expr)) +#define JSON_HEDLEY_STATIC_CAST(T, expr) ((T)(expr)) #endif #if defined(JSON_HEDLEY_CPP_CAST) - #undef JSON_HEDLEY_CPP_CAST +#undef JSON_HEDLEY_CPP_CAST #endif #if defined(__cplusplus) - #define JSON_HEDLEY_CPP_CAST(T, expr) static_cast(expr) +#define JSON_HEDLEY_CPP_CAST(T, expr) static_cast(expr) #else - #define JSON_HEDLEY_CPP_CAST(T, expr) (expr) +#define JSON_HEDLEY_CPP_CAST(T, expr) (expr) #endif #if defined(JSON_HEDLEY_MESSAGE) - #undef JSON_HEDLEY_MESSAGE +#undef JSON_HEDLEY_MESSAGE #endif #if JSON_HEDLEY_HAS_WARNING("-Wunknown-pragmas") -# define JSON_HEDLEY_MESSAGE(msg) \ - JSON_HEDLEY_DIAGNOSTIC_PUSH \ +#define JSON_HEDLEY_MESSAGE(msg) \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS \ - JSON_HEDLEY_PRAGMA(message msg) \ + JSON_HEDLEY_PRAGMA(message msg) \ JSON_HEDLEY_DIAGNOSTIC_POP -#elif \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,4,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) -# define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(message msg) -#elif JSON_HEDLEY_CRAY_VERSION_CHECK(5,0,0) -# define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(_CRI message msg) -#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) -# define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(message(msg)) -#elif JSON_HEDLEY_PELLES_VERSION_CHECK(2,0,0) -# define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(message(msg)) +#elif JSON_HEDLEY_GCC_VERSION_CHECK(4, 4, 0) || JSON_HEDLEY_INTEL_VERSION_CHECK(13, 0, 0) +#define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(message msg) +#elif JSON_HEDLEY_CRAY_VERSION_CHECK(5, 0, 0) +#define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(_CRI message msg) +#elif JSON_HEDLEY_IAR_VERSION_CHECK(8, 0, 0) +#define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(message(msg)) +#elif JSON_HEDLEY_PELLES_VERSION_CHECK(2, 0, 0) +#define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(message(msg)) #else -# define JSON_HEDLEY_MESSAGE(msg) +#define JSON_HEDLEY_MESSAGE(msg) #endif #if defined(JSON_HEDLEY_WARNING) - #undef JSON_HEDLEY_WARNING +#undef JSON_HEDLEY_WARNING #endif #if JSON_HEDLEY_HAS_WARNING("-Wunknown-pragmas") -# define JSON_HEDLEY_WARNING(msg) \ - JSON_HEDLEY_DIAGNOSTIC_PUSH \ +#define JSON_HEDLEY_WARNING(msg) \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS \ - JSON_HEDLEY_PRAGMA(clang warning msg) \ + JSON_HEDLEY_PRAGMA(clang warning msg) \ JSON_HEDLEY_DIAGNOSTIC_POP -#elif \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,8,0) || \ - JSON_HEDLEY_PGI_VERSION_CHECK(18,4,0) -# define JSON_HEDLEY_WARNING(msg) JSON_HEDLEY_PRAGMA(GCC warning msg) -#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) -# define JSON_HEDLEY_WARNING(msg) JSON_HEDLEY_PRAGMA(message(msg)) +#elif JSON_HEDLEY_GCC_VERSION_CHECK(4, 8, 0) || JSON_HEDLEY_PGI_VERSION_CHECK(18, 4, 0) +#define JSON_HEDLEY_WARNING(msg) JSON_HEDLEY_PRAGMA(GCC warning msg) +#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15, 0, 0) +#define JSON_HEDLEY_WARNING(msg) JSON_HEDLEY_PRAGMA(message(msg)) #else -# define JSON_HEDLEY_WARNING(msg) JSON_HEDLEY_MESSAGE(msg) +#define JSON_HEDLEY_WARNING(msg) JSON_HEDLEY_MESSAGE(msg) #endif #if defined(JSON_HEDLEY_REQUIRE_MSG) - #undef JSON_HEDLEY_REQUIRE_MSG +#undef JSON_HEDLEY_REQUIRE_MSG #endif #if JSON_HEDLEY_HAS_ATTRIBUTE(diagnose_if) -# if JSON_HEDLEY_HAS_WARNING("-Wgcc-compat") -# define JSON_HEDLEY_REQUIRE_MSG(expr, msg) \ - JSON_HEDLEY_DIAGNOSTIC_PUSH \ - _Pragma("clang diagnostic ignored \"-Wgcc-compat\"") \ - __attribute__((__diagnose_if__(!(expr), msg, "error"))) \ - JSON_HEDLEY_DIAGNOSTIC_POP -# else -# define JSON_HEDLEY_REQUIRE_MSG(expr, msg) __attribute__((__diagnose_if__(!(expr), msg, "error"))) -# endif +#if JSON_HEDLEY_HAS_WARNING("-Wgcc-compat") +#define JSON_HEDLEY_REQUIRE_MSG(expr, msg) \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + _Pragma("clang diagnostic ignored \"-Wgcc-compat\"") __attribute__((__diagnose_if__(!(expr), msg, "error"))) \ + JSON_HEDLEY_DIAGNOSTIC_POP #else -# define JSON_HEDLEY_REQUIRE_MSG(expr, msg) +#define JSON_HEDLEY_REQUIRE_MSG(expr, msg) __attribute__((__diagnose_if__(!(expr), msg, "error"))) +#endif +#else +#define JSON_HEDLEY_REQUIRE_MSG(expr, msg) #endif #if defined(JSON_HEDLEY_REQUIRE) - #undef JSON_HEDLEY_REQUIRE +#undef JSON_HEDLEY_REQUIRE #endif #define JSON_HEDLEY_REQUIRE(expr) JSON_HEDLEY_REQUIRE_MSG(expr, #expr) #if defined(JSON_HEDLEY_FLAGS) - #undef JSON_HEDLEY_FLAGS +#undef JSON_HEDLEY_FLAGS #endif #if JSON_HEDLEY_HAS_ATTRIBUTE(flag_enum) - #define JSON_HEDLEY_FLAGS __attribute__((__flag_enum__)) +#define JSON_HEDLEY_FLAGS __attribute__((__flag_enum__)) #endif #if defined(JSON_HEDLEY_FLAGS_CAST) - #undef JSON_HEDLEY_FLAGS_CAST +#undef JSON_HEDLEY_FLAGS_CAST #endif -#if JSON_HEDLEY_INTEL_VERSION_CHECK(19,0,0) -# define JSON_HEDLEY_FLAGS_CAST(T, expr) (__extension__ ({ \ - JSON_HEDLEY_DIAGNOSTIC_PUSH \ - _Pragma("warning(disable:188)") \ - ((T) (expr)); \ - JSON_HEDLEY_DIAGNOSTIC_POP \ +#if JSON_HEDLEY_INTEL_VERSION_CHECK(19, 0, 0) +#define JSON_HEDLEY_FLAGS_CAST(T, expr) \ + (__extension__({ \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + _Pragma("warning(disable:188)")((T)(expr)); \ + JSON_HEDLEY_DIAGNOSTIC_POP \ })) #else -# define JSON_HEDLEY_FLAGS_CAST(T, expr) JSON_HEDLEY_STATIC_CAST(T, expr) +#define JSON_HEDLEY_FLAGS_CAST(T, expr) JSON_HEDLEY_STATIC_CAST(T, expr) #endif /* Remaining macros are deprecated. */ #if defined(JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK) - #undef JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK +#undef JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK #endif #if defined(__clang__) - #define JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK(major,minor,patch) (0) +#define JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK(major, minor, patch) (0) #else - #define JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK(major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) +#define JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK(major, minor, patch) JSON_HEDLEY_GCC_VERSION_CHECK(major, minor, patch) #endif #if defined(JSON_HEDLEY_CLANG_HAS_ATTRIBUTE) - #undef JSON_HEDLEY_CLANG_HAS_ATTRIBUTE +#undef JSON_HEDLEY_CLANG_HAS_ATTRIBUTE #endif #define JSON_HEDLEY_CLANG_HAS_ATTRIBUTE(attribute) JSON_HEDLEY_HAS_ATTRIBUTE(attribute) #if defined(JSON_HEDLEY_CLANG_HAS_CPP_ATTRIBUTE) - #undef JSON_HEDLEY_CLANG_HAS_CPP_ATTRIBUTE +#undef JSON_HEDLEY_CLANG_HAS_CPP_ATTRIBUTE #endif #define JSON_HEDLEY_CLANG_HAS_CPP_ATTRIBUTE(attribute) JSON_HEDLEY_HAS_CPP_ATTRIBUTE(attribute) #if defined(JSON_HEDLEY_CLANG_HAS_BUILTIN) - #undef JSON_HEDLEY_CLANG_HAS_BUILTIN +#undef JSON_HEDLEY_CLANG_HAS_BUILTIN #endif #define JSON_HEDLEY_CLANG_HAS_BUILTIN(builtin) JSON_HEDLEY_HAS_BUILTIN(builtin) #if defined(JSON_HEDLEY_CLANG_HAS_FEATURE) - #undef JSON_HEDLEY_CLANG_HAS_FEATURE +#undef JSON_HEDLEY_CLANG_HAS_FEATURE #endif #define JSON_HEDLEY_CLANG_HAS_FEATURE(feature) JSON_HEDLEY_HAS_FEATURE(feature) #if defined(JSON_HEDLEY_CLANG_HAS_EXTENSION) - #undef JSON_HEDLEY_CLANG_HAS_EXTENSION +#undef JSON_HEDLEY_CLANG_HAS_EXTENSION #endif #define JSON_HEDLEY_CLANG_HAS_EXTENSION(extension) JSON_HEDLEY_HAS_EXTENSION(extension) #if defined(JSON_HEDLEY_CLANG_HAS_DECLSPEC_DECLSPEC_ATTRIBUTE) - #undef JSON_HEDLEY_CLANG_HAS_DECLSPEC_DECLSPEC_ATTRIBUTE +#undef JSON_HEDLEY_CLANG_HAS_DECLSPEC_DECLSPEC_ATTRIBUTE #endif #define JSON_HEDLEY_CLANG_HAS_DECLSPEC_ATTRIBUTE(attribute) JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute) #if defined(JSON_HEDLEY_CLANG_HAS_WARNING) - #undef JSON_HEDLEY_CLANG_HAS_WARNING +#undef JSON_HEDLEY_CLANG_HAS_WARNING #endif #define JSON_HEDLEY_CLANG_HAS_WARNING(warning) JSON_HEDLEY_HAS_WARNING(warning) #endif /* !defined(JSON_HEDLEY_VERSION) || (JSON_HEDLEY_VERSION < X) */ - // This file contains all internal macro definitions // You MUST include macro_unscope.hpp at the end of json.hpp to undef all of them // exclude unsupported compilers #if !defined(JSON_SKIP_UNSUPPORTED_COMPILER_CHECK) - #if defined(__clang__) - #if (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__) < 30400 - #error "unsupported Clang version - see https://github.com/nlohmann/json#supported-compilers" - #endif - #elif defined(__GNUC__) && !(defined(__ICC) || defined(__INTEL_COMPILER)) - #if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) < 40800 - #error "unsupported GCC version - see https://github.com/nlohmann/json#supported-compilers" - #endif - #endif +#if defined(__clang__) +#if (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__) < 30400 +#error "unsupported Clang version - see https://github.com/nlohmann/json#supported-compilers" +#endif +#elif defined(__GNUC__) && !(defined(__ICC) || defined(__INTEL_COMPILER)) +#if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) < 40800 +#error "unsupported GCC version - see https://github.com/nlohmann/json#supported-compilers" +#endif +#endif #endif // C++ language standard detection -#if (defined(__cplusplus) && __cplusplus >= 201703L) || (defined(_HAS_CXX17) && _HAS_CXX17 == 1) // fix for issue #464 - #define JSON_HAS_CPP_17 - #define JSON_HAS_CPP_14 +#if (defined(__cplusplus) && __cplusplus >= 201703L) || (defined(_HAS_CXX17) && _HAS_CXX17 == 1) // fix for issue #464 +#define JSON_HAS_CPP_17 +#define JSON_HAS_CPP_14 #elif (defined(__cplusplus) && __cplusplus >= 201402L) || (defined(_HAS_CXX14) && _HAS_CXX14 == 1) - #define JSON_HAS_CPP_14 +#define JSON_HAS_CPP_14 #endif // disable float-equal warnings on GCC/clang #if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wfloat-equal" +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wfloat-equal" #endif // disable documentation warnings on clang #if defined(__clang__) - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wdocumentation" +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdocumentation" #endif // allow to disable exceptions #if (defined(__cpp_exceptions) || defined(__EXCEPTIONS) || defined(_CPPUNWIND)) && !defined(JSON_NOEXCEPTION) - #define JSON_THROW(exception) throw exception - #define JSON_TRY try - #define JSON_CATCH(exception) catch(exception) - #define JSON_INTERNAL_CATCH(exception) catch(exception) +#define JSON_THROW(exception) throw exception +#define JSON_TRY try +#define JSON_CATCH(exception) catch (exception) +#define JSON_INTERNAL_CATCH(exception) catch (exception) #else - #include - #define JSON_THROW(exception) std::abort() - #define JSON_TRY if(true) - #define JSON_CATCH(exception) if(false) - #define JSON_INTERNAL_CATCH(exception) if(false) +#include +#define JSON_THROW(exception) std::abort() +#define JSON_TRY if (true) +#define JSON_CATCH(exception) if (false) +#define JSON_INTERNAL_CATCH(exception) if (false) #endif // override exception macros #if defined(JSON_THROW_USER) - #undef JSON_THROW - #define JSON_THROW JSON_THROW_USER +#undef JSON_THROW +#define JSON_THROW JSON_THROW_USER #endif #if defined(JSON_TRY_USER) - #undef JSON_TRY - #define JSON_TRY JSON_TRY_USER +#undef JSON_TRY +#define JSON_TRY JSON_TRY_USER #endif #if defined(JSON_CATCH_USER) - #undef JSON_CATCH - #define JSON_CATCH JSON_CATCH_USER - #undef JSON_INTERNAL_CATCH - #define JSON_INTERNAL_CATCH JSON_CATCH_USER +#undef JSON_CATCH +#define JSON_CATCH JSON_CATCH_USER +#undef JSON_INTERNAL_CATCH +#define JSON_INTERNAL_CATCH JSON_CATCH_USER #endif #if defined(JSON_INTERNAL_CATCH_USER) - #undef JSON_INTERNAL_CATCH - #define JSON_INTERNAL_CATCH JSON_INTERNAL_CATCH_USER +#undef JSON_INTERNAL_CATCH +#define JSON_INTERNAL_CATCH JSON_INTERNAL_CATCH_USER #endif /*! @@ -1692,53 +1594,41 @@ JSON_HEDLEY_DIAGNOSTIC_POP @def NLOHMANN_JSON_SERIALIZE_ENUM @since version 3.4.0 */ -#define NLOHMANN_JSON_SERIALIZE_ENUM(ENUM_TYPE, ...) \ - template \ - inline void to_json(BasicJsonType& j, const ENUM_TYPE& e) \ - { \ - static_assert(std::is_enum::value, #ENUM_TYPE " must be an enum!"); \ - static const std::pair m[] = __VA_ARGS__; \ - auto it = std::find_if(std::begin(m), std::end(m), \ - [e](const std::pair& ej_pair) -> bool \ - { \ - return ej_pair.first == e; \ - }); \ - j = ((it != std::end(m)) ? it : std::begin(m))->second; \ - } \ - template \ - inline void from_json(const BasicJsonType& j, ENUM_TYPE& e) \ - { \ - static_assert(std::is_enum::value, #ENUM_TYPE " must be an enum!"); \ - static const std::pair m[] = __VA_ARGS__; \ - auto it = std::find_if(std::begin(m), std::end(m), \ - [j](const std::pair& ej_pair) -> bool \ - { \ - return ej_pair.second == j; \ - }); \ - e = ((it != std::end(m)) ? it : std::begin(m))->first; \ +#define NLOHMANN_JSON_SERIALIZE_ENUM(ENUM_TYPE, ...) \ + template \ + inline void to_json(BasicJsonType& j, const ENUM_TYPE& e) { \ + static_assert(std::is_enum::value, #ENUM_TYPE " must be an enum!"); \ + static const std::pair m[] = __VA_ARGS__; \ + auto it = std::find_if( \ + std::begin(m), std::end(m), \ + [e](const std::pair& ej_pair) -> bool { return ej_pair.first == e; }); \ + j = ((it != std::end(m)) ? it : std::begin(m))->second; \ + } \ + template \ + inline void from_json(const BasicJsonType& j, ENUM_TYPE& e) { \ + static_assert(std::is_enum::value, #ENUM_TYPE " must be an enum!"); \ + static const std::pair m[] = __VA_ARGS__; \ + auto it = std::find_if( \ + std::begin(m), std::end(m), \ + [j](const std::pair& ej_pair) -> bool { return ej_pair.second == j; }); \ + e = ((it != std::end(m)) ? it : std::begin(m))->first; \ } // Ugly macros to avoid uglier copy-paste when specializing basic_json. They // may be removed in the future once the class is split. -#define NLOHMANN_BASIC_JSON_TPL_DECLARATION \ - template class ObjectType, \ - template class ArrayType, \ - class StringType, class BooleanType, class NumberIntegerType, \ - class NumberUnsignedType, class NumberFloatType, \ - template class AllocatorType, \ - template class JSONSerializer> +#define NLOHMANN_BASIC_JSON_TPL_DECLARATION \ + template