mirror of
https://gitee.com/milvus-io/milvus.git
synced 2025-12-31 16:05:30 +08:00
119 lines
3.3 KiB
C++
119 lines
3.3 KiB
C++
/*******************************************************************************
|
|
* Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
|
|
* Unauthorized copying of this file, via any medium is strictly prohibited.
|
|
* Proprietary and confidential.
|
|
******************************************************************************/
|
|
#pragma once
|
|
|
|
#include <vector>
|
|
#include <queue>
|
|
#include <memory>
|
|
#include <thread>
|
|
#include <mutex>
|
|
#include <condition_variable>
|
|
#include <future>
|
|
#include <functional>
|
|
#include <stdexcept>
|
|
|
|
|
|
#define MAX_THREADS_NUM 32
|
|
|
|
namespace zilliz {
|
|
namespace sql {
|
|
namespace storage {
|
|
|
|
class ThreadPool {
|
|
public:
|
|
ThreadPool(size_t threads, size_t queue_size = 1000);
|
|
|
|
template<class F, class... Args>
|
|
auto enqueue(F &&f, Args &&... args)
|
|
-> std::future<typename std::result_of<F(Args...)>::type>;
|
|
|
|
~ThreadPool();
|
|
|
|
private:
|
|
// need to keep track of threads so we can join them
|
|
std::vector<std::thread> workers;
|
|
|
|
// the task queue
|
|
std::queue<std::function<void()> > tasks;
|
|
|
|
size_t max_queue_size;
|
|
|
|
// synchronization
|
|
std::mutex queue_mutex;
|
|
|
|
std::condition_variable condition;
|
|
|
|
bool stop;
|
|
};
|
|
|
|
|
|
// the constructor just launches some amount of workers
|
|
inline ThreadPool::ThreadPool(size_t threads, size_t queue_size)
|
|
: max_queue_size(queue_size), stop(false) {
|
|
for (size_t i = 0; i < threads; ++i)
|
|
workers.emplace_back(
|
|
[this] {
|
|
for (;;) {
|
|
std::function<void()> task;
|
|
|
|
{
|
|
std::unique_lock<std::mutex> lock(this->queue_mutex);
|
|
this->condition.wait(lock,
|
|
[this] { return this->stop || !this->tasks.empty(); });
|
|
if (this->stop && this->tasks.empty())
|
|
return;
|
|
task = std::move(this->tasks.front());
|
|
this->tasks.pop();
|
|
}
|
|
this->condition.notify_all();
|
|
|
|
task();
|
|
}
|
|
}
|
|
);
|
|
}
|
|
|
|
// add new work item to the pool
|
|
template<class F, class... Args>
|
|
auto ThreadPool::enqueue(F &&f, Args &&... args)
|
|
-> std::future<typename std::result_of<F(Args...)>::type> {
|
|
using return_type = typename std::result_of<F(Args...)>::type;
|
|
|
|
auto task = std::make_shared<std::packaged_task<return_type()> >(
|
|
std::bind(std::forward<F>(f), std::forward<Args>(args)...)
|
|
);
|
|
|
|
std::future<return_type> res = task->get_future();
|
|
{
|
|
std::unique_lock<std::mutex> lock(queue_mutex);
|
|
this->condition.wait(lock,
|
|
[this] { return this->tasks.size() < max_queue_size; });
|
|
// don't allow enqueueing after stopping the pool
|
|
if (stop)
|
|
throw std::runtime_error("enqueue on stopped ThreadPool");
|
|
|
|
tasks.emplace([task]() { (*task)(); });
|
|
}
|
|
condition.notify_all();
|
|
return res;
|
|
}
|
|
|
|
// the destructor joins all threads
|
|
inline ThreadPool::~ThreadPool() {
|
|
{
|
|
std::unique_lock<std::mutex> lock(queue_mutex);
|
|
stop = true;
|
|
}
|
|
condition.notify_all();
|
|
for (std::thread &worker: workers)
|
|
worker.join();
|
|
}
|
|
|
|
}
|
|
}
|
|
}
|
|
|