我正試圖在C++中實現未來調用機制。雖然這只是一個測試代碼(有點匆忙),但我打算爲我正在使用的語言的運行時使用類似的東西來實現透明並行。將執行從一個線程移動到另一個線程以執行任務並行和調用
,我幹我的工作的代碼,使其稍微小了一點,但它仍然是很大的:
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <vector>
#include <queue>
#include <future>
#include <thread>
#include <functional>
#include <type_traits>
#include <utility>
using namespace std;
using namespace std::chrono;
//------------------------------------------------------------------------------
// Simple locked printer
static std::recursive_mutex print_lock;
inline void print_() {
return;
};
template<typename T, typename... Args>
inline void print_(T t, Args... args) {
print_lock.lock();
std::cout << t;
print_(args...);
print_lock.unlock();
};
//------------------------------------------------------------------------------
template<typename R>
class PooledTask {
public:
explicit PooledTask(function<R()>);
// Possibly execute the task and return the value
R &operator()() {
// If we can get the lock, we're not executing
if(lock.try_lock()) {
// We may already have executed it
if(done)
goto end;
// Otherwise, execute it now
try {
result = move(task());
} catch(...) {
// If an exception is thrown, save it for later
eptr = current_exception();
failed = true;
};
done = true;
goto end;
} else {
// Wait until the task is completed
lock.lock();
end: {
lock.unlock();
// Maybe we got an exception!
if(failed)
rethrow_exception(eptr);
// Otherwise, just return the result
return result;
};
};
};
private:
exception_ptr eptr;
function<R()> task;
bool done;
bool failed;
mutex lock;
R result;
};
extern class TaskPool pool;
class TaskPool {
public:
TaskPool() noexcept: TaskPool(thread::hardware_concurrency() - 1) {
return;
};
TaskPool(const TaskPool &) = delete;
TaskPool(TaskPool &&) = delete;
template<typename T>
void push(PooledTask<T> *task) noexcept {
lock_guard<mutex> guard(lock);
builders.push([=] {
try {
(*task)();
} catch(...) {
// Ignore it here! The task will save it. :)
};
});
};
~TaskPool() {
// TODO: wait for all tasks to finish...
};
private:
queue<thread *> threads;
queue<function<void()>> builders;
mutex lock;
TaskPool(signed N) noexcept {
while(N --> 0)
threads.push(new thread([this, N] {
for(;;) {
pop_task();
};
}));
};
void pop_task() noexcept {
lock.lock();
if(builders.size()) {
auto task = builders.front();
builders.pop();
lock.unlock();
task();
} else
lock.unlock();
};
} pool;
template<typename R>
PooledTask<R>::PooledTask(function<R()> fun):
task(fun),
done(false),
failed(false)
{
pool.push(this);
};
// Should probably return a std::shared_ptr here...
template<typename F, typename... Args>
auto byfuture(F fun, Args&&... args) noexcept ->
PooledTask<decltype(fun(args...))> *
{
using R = decltype(fun(args...));
auto pooled = new PooledTask<R> {
bind(fun, forward<Args>(args)...)
};
return pooled;
};
//------------------------------------------------------------------------------
#include <map>
// Get the current thread id as a simple number
static int myid() noexcept {
static unsigned N = 0;
static map<thread::id, unsigned> hash;
static mutex lock;
lock_guard<mutex> guard(lock);
auto current = this_thread::get_id();
if(!hash[current])
hash[current] = ++N;
return hash[current];
};
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// The fibonacci test implementation
int future_fib(int x, int parent) {
if(x < 3)
return 1;
print_("future_fib(", x, ")", " on thread ", myid(), \
", asked by thread ", parent, "\n");
auto f1 = byfuture(future_fib, x - 1, myid());
auto f2 = byfuture(future_fib, x - 2, myid());
auto res = (*f1)() + (*f2)();
delete f1;
delete f2;
return res;
};
//------------------------------------------------------------------------------
int main() {
// Force main thread to get id 1
myid();
// Get task
auto f = byfuture(future_fib, 8, myid());
// Make sure it starts on the task pool
this_thread::sleep_for(seconds(1));
// Blocks
(*f)();
// Simply wait to be sure all threads are clean
this_thread::sleep_for(seconds(2));
//
return EXIT_SUCCESS;
};
此程序的結果是這樣的(我有一個四核,所以3個線程池中):
future_fib(8) on thread 2, asked by thread 1
future_fib(7) on thread 3, asked by thread 2
future_fib(6) on thread 4, asked by thread 2
future_fib(6) on thread 3, asked by thread 3
future_fib(5) on thread 4, asked by thread 4
future_fib(5) on thread 3, asked by thread 3
future_fib(4) on thread 4, asked by thread 4
future_fib(4) on thread 3, asked by thread 3
future_fib(3) on thread 4, asked by thread 4
future_fib(3) on thread 3, asked by thread 3
future_fib(3) on thread 4, asked by thread 4
future_fib(3) on thread 3, asked by thread 3
future_fib(4) on thread 4, asked by thread 4
future_fib(4) on thread 3, asked by thread 3
future_fib(3) on thread 4, asked by thread 4
future_fib(3) on thread 3, asked by thread 3
future_fib(5) on thread 3, asked by thread 3
future_fib(4) on thread 3, asked by thread 3
future_fib(3) on thread 3, asked by thread 3
future_fib(3) on thread 3, asked by thread 3
此實現自己都慢比正常的斐波那契功能。
所以這裏的問題:當池中運行fib(8)
,它會創建將在接下來的線程上運行兩個任務,但是,當它到達auto res = (*f1)() + (*f2)();
,兩個任務都已經在運行,所以它會阻塞f1
(上運行線程3)。
爲了提高速度,我需要做的是爲線程2而不是在f1
上進行阻塞,以假定線程3正在執行的任務,讓它準備好接受另一個任務,所以沒有線程會睡覺做計算。
這篇文章在這裏http://bartoszmilewski.com/2011/10/10/async-tasks-in-c11-not-quite-there-yet/說有必要做我想做的事,但沒有指定如何。
我的疑問是:我怎麼可能做到這一點?
有沒有其他的選擇做我想要的?
[Threading Building Blocks(TBB)library](https://www.threadingbuildingblocks.org/)怎麼樣?它提供了帶有線程池的併發任務系統。 – yohjp
看看C++ 1z的'.then()'方案嗎? 'return pooled_fib(x-2).then([x](auto && r1){auto r2 = pooled_fib(x-1); return r1.get()+ r2.get();});'或者somesuch。 – Yakk