Merge branch 'master' into sockets2

This commit is contained in:
Lewis Baker 2018-08-20 21:07:17 -07:00 committed by GitHub
commit e23897adc4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
65 changed files with 3742 additions and 810 deletions

View File

@ -19,8 +19,8 @@ addons:
- llvm-toolchain-trusty
packages:
- python2.7
- clang-6.0
- lld-6.0
- clang-7
- lld-7
- ninja-build
cache:
@ -37,12 +37,12 @@ env:
- CMAKE_VERSION="3.9.1"
matrix:
include:
- env: RELEASE=debug ARCH=x64 CLANG_VERSION=6.0
- env: RELEASE=optimised ARCH=x64 CLANG_VERSION=6.0
- env: RELEASE=debug ARCH=x64 CLANG_VERSION=7
- env: RELEASE=optimised ARCH=x64 CLANG_VERSION=7
allow_failures:
# Clang 6.0~svn320382 has a bug that causes optimised builds to fail.
# See https://bugs.llvm.org/show_bug.cgi?id=34897
- env: RELEASE=optimised ARCH=x64 CLANG_VERSION=6.0
- env: RELEASE=optimised ARCH=x64 CLANG_VERSION=7
before_install:
- export CC="$CC-$CLANG_VERSION"

519
README.md
View File

@ -28,10 +28,19 @@ These include:
* `cancellation_source`
* `cancellation_registration`
* Schedulers and I/O
* `static_thread_pool`
* `io_service`
* `io_work_scope`
* `file`, `readable_file`, `writable_file`
* `read_only_file`, `write_only_file`, `read_write_file`
* Metafunctions
* `is_awaitable<T>`
* `awaitable_traits<T>`
* Concepts
* `Awaitable<T>`
* `Awaiter<T>`
* `Scheduler`
* `DelayedScheduler`
This library is an experimental library that is exploring the space of high-performance,
scalable asynchronous programming abstractions that can be built on top of the C++ coroutines
@ -123,8 +132,8 @@ namespace cppcoro
// If the task is not yet ready then the awaiting coroutine will be
// suspended until the task completes. If the the task is_ready() then
// this operation will return the result synchronously without suspending.
<unspecified> operator co_await() const & noexcept;
<unspecified> operator co_await() const && noexcept;
Awaiter<T&> operator co_await() const & noexcept;
Awaiter<T&&> operator co_await() const && noexcept;
// Returns an awaitable that can be co_await'ed to suspend the current
// coroutine until the task completes.
@ -135,21 +144,21 @@ namespace cppcoro
//
// This can be useful if you want to synchronise with the task without
// the possibility of it throwing an exception.
<unspecified> when_ready() const noexcept;
Awaitable<void> when_ready() const noexcept;
};
template<typename T>
void swap(task<T>& a, task<T>& b);
// Apply func() to the result of the task, returning a new task that
// yields the result of 'func(co_await task)'.
template<typename FUNC, typename T>
task<std::invoke_result_t<FUNC, T&&>> fmap(FUNC func, task<T> task);
// Call func() after task completes, returning a task containing the
// result of func().
template<typename FUNC>
task<std::invoke_result_t<FUNC>> fmap(FUNC func, task<void> task);
// Creates a task that yields the result of co_await'ing the specified awaitable.
//
// This can be used as a form of type-erasure of the concrete awaitable, allowing
// different awaitables that return the same await-result type to be stored in
// the same task<RESULT> type.
template<
typename AWAITABLE,
typename RESULT = typename awaitable_traits<AWAITABLE>::await_result_t>
task<RESULT> make_task(AWAITABLE awaitable);
}
```
@ -177,11 +186,10 @@ execution of the coroutine.
If the task has already run to completion then awaiting it again will obtain
the already-computed result without suspending the awaiting coroutine.
If the `task` value is destroyed before it is awaited then the coroutine
If the `task` object is destroyed before it is awaited then the coroutine
never executes and the destructor simply destructs the captured parameters
and frees any memory used by the coroutine frame.
## `shared_task<T>`
The `shared_task<T>` class is a coroutine type that yields a single value
@ -195,7 +203,7 @@ the result of the task to be created. It also allows multiple coroutines to
concurrently await the result.
The task will start executing on the thread that first `co_await`s the task.
Subsequent awaiters will either be suspended and queued for resumption
Subsequent awaiters will either be suspended and be queued for resumption
when the task completes or will continue synchronously if the task has
already run to completion.
@ -236,7 +244,7 @@ namespace cppcoro
// is void in which case the expression has type 'void').
// If the task completed with an unhandled exception then the
// exception will be rethrown by the co_await expression.
<unspecified> operator co_await() const noexcept;
Awaiter<T&> operator co_await() const noexcept;
// Returns an operation that when awaited will suspend the
// calling coroutine until the task completes and the result
@ -245,7 +253,7 @@ namespace cppcoro
// The result is not returned from the co_await expression.
// This can be used to synchronise with the task without the
// possibility of the co_await expression throwing an exception.
<unspecified> when_ready() const noexcept;
Awaiter<void> when_ready() const noexcept;
};
@ -257,31 +265,25 @@ namespace cppcoro
template<typename T>
void swap(shared_task<T>& a, shared_task<T>& b) noexcept;
// Wrap a task in a shared_task to allow multiple coroutines to concurrently
// await the result.
template<typename T>
shared_task<T> make_shared_task(task<T> task);
// Apply func() to the result of the task, returning a new task that
// yields the result of 'func(co_await task)'.
template<typename FUNC, typename T>
task<std::invoke_result_t<FUNC, T&&>> fmap(FUNC func, shared_task<T> task);
// Call func() after task completes, returning a task containing the
// result of func().
template<typename FUNC>
task<std::invoke_result_t<FUNC>> fmap(FUNC func, shared_task<void> task);
// Wrap an awaitable value in a shared_task to allow multiple coroutines
// to concurrently await the result.
template<
typename AWAITABLE,
typename RESULT = typename awaitable_traits<AWAITABLE>::await_result_t>
shared_task<RESULT> make_shared_task(AWAITABLE awaitable);
}
```
All const-methods on `shared_task<T>` are safe to call concurrently with other const-methods on the same instance from multiple threads.
It is not safe to call non-const methods of `shared_task<T>` concurrently with any other method on the same instance of a `shared_task<T>`.
All const-methods on `shared_task<T>` are safe to call concurrently with other
const-methods on the same instance from multiple threads. It is not safe to call
non-const methods of `shared_task<T>` concurrently with any other method on the
same instance of a `shared_task<T>`.
### Comparison to `task<T>`
The `shared_task<T>` class is similar to `task<T>` in that the task does
not start execution immediately upon the coroutine function being called. The task
only starts executing when it is first awaited.
not start execution immediately upon the coroutine function being called.
The task only starts executing when it is first awaited.
It differs from `task<T>` in that the resulting task object can be copied,
allowing multiple task objects to reference the same asynchronous result.
@ -295,10 +297,12 @@ a reference count and support multiple awaiters.
## `generator<T>`
A `generator` represents a coroutine type that produces a sequence of values of type, `T`, where values are produced lazily and synchronously.
A `generator` represents a coroutine type that produces a sequence of values of type, `T`,
where values are produced lazily and synchronously.
The coroutine body is able to yield values of type `T` using the `co_yield` keyword.
Note, however, that the coroutine body is not able to use the `co_await` keyword; values must be produced synchronously.
Note, however, that the coroutine body is not able to use the `co_await` keyword;
values must be produced synchronously.
For example:
```c++
@ -328,11 +332,14 @@ When a coroutine function returning a `generator<T>` is called the coroutine is
Execution of the coroutine enters the coroutine body when the `generator<T>::begin()` method is called and continues until
either the first `co_yield` statement is reached or the coroutine runs to completion.
If the returned iterator is not equal to the `end()` iterator then dereferencing the iterator will return a reference to the value passed to the `co_yield` statement.
If the returned iterator is not equal to the `end()` iterator then dereferencing the iterator will
return a reference to the value passed to the `co_yield` statement.
Calling `operator++()` on the iterator will resume execution of the coroutine and continue until either the next `co_yield` point is reached or the coroutine runs to completion().
Calling `operator++()` on the iterator will resume execution of the coroutine and continue until
either the next `co_yield` point is reached or the coroutine runs to completion().
Any unhandled exceptions thrown by the coroutine will propagate out of the `begin()` or `operator++()` calls to the caller.
Any unhandled exceptions thrown by the coroutine will propagate out of the `begin()` or
`operator++()` calls to the caller.
API Summary:
```c++
@ -496,7 +503,7 @@ namespace cppcoro
// will subsequently become equal to the end() iterator.
// If the coroutine completes with an unhandled exception then
// that exception will be rethrown from the co_await expression.
<unspecified> operator++() noexcept;
Awaitable<iterator&> operator++() noexcept;
// Dereference the iterator.
pointer operator->() const noexcept;
@ -524,7 +531,7 @@ namespace cppcoro
//
// This method is not valid to be called once the coroutine has
// run to completion.
<unspecified> begin() noexcept;
Awaitable<iterator> begin() noexcept;
iterator end() noexcept;
};
@ -570,7 +577,7 @@ namespace cppcoro
bool is_set() const noexcept;
void set();
void reset() noexcept;
<unspecified> operator co_await() const noexcept;
Awaiter<void> operator co_await() const noexcept;
};
}
```
@ -636,7 +643,7 @@ namespace cppcoro
// continues without suspending.
// The event is automatically reset back to the 'not set' state
// before resuming the coroutine.
Awaitable<void> operator co_await() const noexcept;
Awaiter<void> operator co_await() const noexcept;
};
}
@ -807,7 +814,7 @@ namespace cppcoro
async_manual_reset_event& operator=(async_manual_reset_event&&) = delete;
// Wait until the event becomes set.
<unspecified> operator co_await() const noexcept;
async_manual_reset_event_operation operator co_await() const noexcept;
bool is_set() const noexcept;
@ -1077,6 +1084,109 @@ cppcoro::task<> cancellable_timer_wait(cppcoro::cancellation_token token)
}
```
## `static_thread_pool`
The `static_thread_pool` class provides an abstraction that lets you schedule work
on a fixed-size pool of threads.
This class implements the **Scheduler** concept (see below).
You can enqueue work to the thread-pool by executing `co_await threadPool.schedule()`.
This operation will suspend the current coroutine, enqueue it for execution on the
thread-pool and the thread pool will then resume the coroutine when a thread in the
thread-pool is next free to run the coroutine. **This operation is guaranteed not
to throw and, in the common case, will not allocate any memory**.
This class makes use of a work-stealing algorithm to load-balance work across multiple
threads. Work enqueued to the thread-pool from a thread-pool thread will be scheduled
for execution on the same thread in a LIFO queue. Work enqueued to the thread-pool from
a remote thread will be enqueued to a global FIFO queue. When a worker thread runs out
of work from its local queue it first tries to dequeue work from the global queue. If
that queue is empty then it next tries to steal work from the back of the queues of
the other worker threads.
API Summary:
```c++
namespace cppcoro
{
class static_thread_pool
{
public:
// Initialise the thread-pool with a number of threads equal to
// std::thread::hardware_concurrency().
static_thread_pool();
// Initialise the thread pool with the specified number of threads.
explicit static_thread_pool(std::uint32_t threadCount);
std::uint32_t thread_count() const noexcept;
class schedule_operation
{
public:
schedule_operation(static_thread_pool* tp) noexcept;
bool await_ready() noexcept;
bool await_suspend(std::experimental::coroutine_handle<> h) noexcept;
bool await_resume() noexcept;
private:
// unspecified
};
// Return an operation that can be awaited by a coroutine.
//
//
[[nodiscard]]
schedule_operation schedule() noexcept;
private:
// Unspecified
};
}
```
Example usage: Simple
```c++
cppcoro::task<std::string> do_something_on_threadpool(cppcoro::static_thread_pool& tp)
{
// First schedule the coroutine onto the threadpool.
co_await tp.schedule();
// When it resumes, this coroutine is now running on the threadpool.
do_something();
}
```
Example usage: Doing things in parallel - using `schedule_on()` operator with `static_thread_pool`.
```c++
cppcoro::task<double> dot_product(static_thread_pool& tp, double a[], double b[], size_t count)
{
if (count > 1000)
{
// Subdivide the work recursively into two equal tasks
// The first half is scheduled to the thread pool so it can run concurrently
// with the second half which continues on this thread.
size_t halfCount = count / 2;
auto [first, second] = co_await when_all(
schedule_on(tp, dot_product(tp, a, b, halfCount),
dot_product(tp, a + halfCount, b + halfCount, count - halfCount));
co_return first + second;
}
else
{
double sum = 0.0;
for (size_t i = 0; i < count; ++i)
{
sum += a[i] * b[i];
}
co_return sum;
}
}
```
## `io_service`
The `io_service` class provides an abstraction for processing I/O completion events
@ -1444,13 +1554,14 @@ All `open()` functions throw `std::system_error` on failure.
## `sync_wait()`
The `sync_wait()`function can be used to synchronously wait until the specified `task`
or `shared_task` completes.
The `sync_wait()`function can be used to synchronously wait until the specified `awaitable`
completes.
If the task has not yet started execution then it will be started on the current thread.
The specified awaitable will be `co_await`ed on current thread inside a newly created coroutine.
The `sync_wait()` call will block until the task completes and will return the task's result
or rethrow the task's exception if the task completed with an unhandled exception.
The `sync_wait()` call will block until the operation completes and will return the result of
the `co_await` expression or rethrow the exception if the `co_await` expression completed with
an unhandled exception.
The `sync_wait()` function is mostly useful for starting a top-level task from within `main()`
and waiting until the task finishes, in practise it is the only way to start the first/top-level
@ -1461,8 +1572,9 @@ API Summary:
// <cppcoro/sync_wait.hpp>
namespace cppcoro
{
template<typename TASKS>
decltype(auto) sync_wait(TASK&& task);
template<typename AWAITABLE>
auto sync_wait(AWAITABLE&& awaitable)
-> typename awaitable_traits<AWAITABLE&&>::await_result_t;
}
```
@ -1478,8 +1590,8 @@ void example_task()
auto task = makeTask();
// start the lazy task and wait until it completes
sync_wait(task) == "foo";
sync_wait(makeTask()) == "foo";
sync_wait(task); // -> "foo"
sync_wait(makeTask()); // -> "foo"
}
void example_shared_task()
@ -1498,48 +1610,68 @@ void example_shared_task()
## `when_all_ready()`
The `when_all_ready()` function can be used to create a new `task` that will
complete when all of the specified input tasks have completed.
The `when_all_ready()` function can be used to create a new awaitable that completes when
all of the input awaitables complete.
Input tasks can either be `task<T>` or `shared_task<T>`.
Input tasks can be any type of awaitable.
When the returned `task` is `co_await`ed it will start executing each of the input
tasks in turn on the awaiting thread in the order they are passed to the `when_all_ready()`
When the returned awaitable is `co_await`ed it will `co_await` each of the input awaitables
in turn on the awaiting thread in the order they are passed to the `when_all_ready()`
function. If these tasks to not complete synchronously then they will execute concurrently.
Once all of the input tasks have run to completion the returned `task` will complete
and resume the awaiting coroutine. The awaiting coroutine will be resumed on the thread
of the input task that is last to complete.
Once all of the `co_await` expressions on input awaitables have run to completion the
returned awaitable will complete and resume the awaiting coroutine. The awaiting coroutine
will be resumed on the thread of the input awaitable that is last to complete.
The returned `task` is guaranteed not to throw an exception when `co_await`ed,
even if some of the input tasks fail with an unhandled exception.
The returned awaitable is guaranteed not to throw an exception when `co_await`ed,
even if some of the input awaitables fail with an unhandled exception.
Note, however, that the `when_all_ready()` call itself may throw `std::bad_alloc` if it
was unable to allocate memory for the returned `task`'s coroutine frame.
was unable to allocate memory for the coroutine frames required to await each of the
input awaitables. It may also throw an exception if any of the input awaitable objects
throw from their copy/move constructors.
The input tasks are returned back to the awaiting coroutine upon completion.
This allows the caller to execute the coroutines concurrently and synchronise their
completion while still retaining the ability to subsequently inspect the results of
each of the input tasks for success/failure.
The result of `co_await`ing the returned awaitable is a `std::tuple` or `std::vector`
of `when_all_task<RESULT>` objects. These objects allow you to obtain the result (or exception)
of each input awaitable separately by calling the `when_all_task<RESULT>::result()`
method of the corresponding output task.
This allows the caller to concurrently await multiple awaitables and synchronise on
their completion while still retaining the ability to subsequently inspect the results of
each of the `co_await` operations for success/failure.
This differs from `when_all()` in a similar way that `co_await`ing `task<T>::when_ready()`
differs from `co_await'ing the `task<T>` directly.
This differs from `when_all()` where the failure of any individual `co_await` operation
causes the overall operation to fail with an exception. This means you cannot determine
which of the component `co_await` operations failed and also prevents you from obtaining
the results of the other `co_await` operations.
API summary:
```c++
// <cppcoro/when_all_ready.hpp>
namespace cppcoro
{
template<typename... TASKS>
task<std::tuple<TASKS...>> when_all_ready(TASKS... tasks);
// Concurrently await multiple awaitables.
//
// Returns an awaitable object that, when co_await'ed, will co_await each of the input
// awaitable objects and will resume the awaiting coroutine only when all of the
// component co_await operations complete.
//
// Result of co_await'ing the returned awaitable is a std::tuple of detail::when_all_task<T>,
// one for each input awaitable and where T is the result-type of the co_await expression
// on the corresponding awaitable.
//
// AWAITABLES must be awaitable types and must be movable (if passed as rvalue) or copyable
// (if passed as lvalue). The co_await expression will be executed on an rvalue of the
// copied awaitable.
template<typename... AWAITABLES>
auto when_all_ready(AWAITABLES&&... awaitables)
-> Awaitable<std::tuple<detail::when_all_task<typename awaitable_traits<AWAITABLES>::await_result_t>...>>;
template<typename T>
task<std::vector<task<T>> when_all_ready(
std::vector<task<T>> tasks);
template<typename T>
task<std::vector<shared_task<T>> when_all_ready(
std::vector<shared_task<T>> tasks);
// Concurrently await each awaitable in a vector of input awaitables.
template<
typename AWAITABLE,
typename RESULT = typename awaitable_traits<AWAITABLE>::await_result_t>
auto when_all_ready(std::vector<AWAITABLE> awaitables)
-> Awaitable<std::vector<detail::when_all_task<RESULT>>>;
}
```
@ -1556,10 +1688,10 @@ task<> example1()
get_record(456),
get_record(789));
// Unpack the result of each task (this will complete immediately)
std::string& record1 = co_await task1;
std::string& record2 = co_await task2;
std::string& record3 = co_await task3;
// Unpack the result of each task
std::string& record1 = task1.result();
std::string& record2 = task2.result();
std::string& record3 = task3.result();
// Use records....
}
@ -1574,15 +1706,15 @@ task<> example2()
}
// Execute all tasks concurrently.
// Returns the input vector of tasks.
tasks = co_await when_all_ready(std::move(tasks));
std::vector<detail::when_all_task<std::string>> resultTasks =
co_await when_all_ready(std::move(tasks));
// Unpack and handle each result individually once they're all complete.
for (int i = 0; i < 1000; ++i)
{
try
{
std::string& record = co_await tasks[i];
std::string& record = tasks[i].result();
std::cout << i << " = " << record << std::endl;
}
catch (const std::exception& ex)
@ -1595,24 +1727,26 @@ task<> example2()
## `when_all()`
The `when_all()` function can be used to create a new `task` that will complete
when all of the input tasks have completed, and will return an aggregate of all of the
individual results.
The `when_all()` function can be used to create a new Awaitable that when `co_await`ed
will `co_await` each of the input awaitables concurrently and return an aggregate of
their individual results.
When the returned `task` is awaited, it will start execution of all of the input
tasks on the current thread. Once the first task suspends, the second task will be started,
and so on. The tasks execute concurrently until they have all run to completion.
When the returned awaitable is awaited, it will `co_await` each of the input awaitables
on the current thread. Once the first awaitable suspends, the second task will be started,
and so on. The operations execute concurrently until they have all run to completion.
Once all input tasks have run to completion, an aggregate of the results is constructed
from each individual task result. If an exception is thrown by any of the input tasks
or if the construction of the aggregate result throws an exception then the exception
will propagate out of the `co_await` of the returned `task`.
Once all component `co_await` operations have run to completion, an aggregate of the
results is constructed from each individual result. If an exception is thrown by any
of the input tasks or if the construction of the aggregate result throws an exception
then the exception will propagate out of the `co_await` of the returned awaitable.
If multiple tasks fail with an exception then one of the exceptions will propagate out
of the `when_all()` task and the other exceptions will be silently ignored. It is not
specified which task's exception will be chosen. If it is important to know which task(s)
failed then you should use `when_all_ready()` instead and `co_await` the result of each
task individually.
If multiple `co_await` operations fail with an exception then one of the exceptions
will propagate out of the `co_await when_all()` expression the other exceptions will be silently
ignored. It is not specified which operation's exception will be chosen.
If it is important to know which component `co_await` operation failed or to retain
the ability to obtain results of other operations even if some of them fail then you
you should use `when_all_ready()` instead.
API Summary:
```c++
@ -1620,24 +1754,32 @@ API Summary:
namespace cppcoro
{
// Variadic version.
template<typename... TASKS>
task<std::tuple<typename TASKS::value_type...>> when_all(TASKS... tasks);
//
// Note that if the result of `co_await awaitable` yields a void-type
// for some awaitables then the corresponding component for that awaitable
// in the tuple will be an empty struct of type detail::void_value.
template<typename... AWAITABLES>
auto when_all(AWAITABLES&&... awaitables)
-> Awaitable<std::tuple<typename awaitable_traits<AWAITABLES>::await_result_t...>>;
// Overloads for vector of value-returning tasks
template<typename T>
task<std::vector<T>> when_all(std::vector<task<T>> tasks);
template<typename T>
task<std::vector<T>> when_all(std::vector<shared_task<T>> tasks);
// Overload for vector<Awaitable<void>>.
template<
typename AWAITABLE,
typename RESULT = typename awaitable_traits<AWAITABLE>::await_result_t,
std::enable_if_t<std::is_void_v<RESULT>, int> = 0>
auto when_all(std::vector<AWAITABLE> awaitables)
-> Awaitable<void>;
// Overloads for vector of reference-returning tasks
template<typename T>
task<std::vector<std::reference_wrapper<T>>> when_all(std::vector<task<T&>> tasks);
template<typename T>
task<std::vector<std::reference_wrapper<T>>> when_all(std::vector<shared_task<T&>> tasks);
// Overloads for vector of void-returning tasks
task<> when_all(std::vector<task<>> tasks);
task<> when_all(std::vector<shared_task<>> tasks);
// Overload for vector<Awaitable<NonVoid>> that yield a value when awaited.
template<
typename AWAITABLE,
typename RESULT = typename awaitable_traits<AWAITABLE>::await_result_t,
std::enable_if_t<!std::is_void_v<RESULT>, int> = 0>
auto when_all(std::vector<AWAITABLE> awaitables)
-> Awaitable<std::vector<std::conditional_t<
std::is_lvalue_reference_v<RESULT>,
std::reference_wrapper<std::remove_reference_t<RESULT>>,
std::remove_reference_t<RESULT>>>>;
}
```
@ -1684,10 +1826,11 @@ The `fmap()` function can be used to apply a callable function to the value(s) c
a container-type, returning a new container-type of the results of applying the function the
contained value(s).
The `fmap()` function can apply a function to values of type `task<T>`, `shared_task<T>`, `generator<T>`,
`recursive_generator<T>` and `async_generator<T>`.
The `fmap()` function can apply a function to values of type `generator<T>`, `recursive_generator<T>`
and `async_generator<T>` as well as any value that supports the `Awaitable` concept (eg. `task<T>`).
Each of these types provides an overload for `fmap()` that takes two arguments; a function to apply and the container value.
Each of these types provides an overload for `fmap()` that takes two arguments; a function to apply
and the container value.
See documentation for each type for the supported `fmap()` overloads.
For example, the `fmap()` function can be used to apply a function to the eventual result of
@ -1739,6 +1882,21 @@ namespace cppcoro
template<typename T, typename FUNC>
decltype(auto) operator|(T&& value, const fmap_transform<FUNC>& transform);
// Generic overload for all awaitable types.
//
// Returns an awaitable that when co_awaited, co_awaits the specified awaitable
// and applies the specified func to the result of the 'co_await awaitable'
// expression as if by 'std::invoke(func, co_await awaitable)'.
//
// If the type of 'co_await awaitable' expression is 'void' then co_awaiting the
// returned awaitable is equivalent to 'co_await awaitable, func()'.
template<
typename FUNC,
typename AWAITABLE,
std::enable_if_t<is_awaitable_v<AWAITABLE>, int> = 0>
auto fmap(FUNC&& func, AWAITABLE&& awaitable)
-> Awaitable<std::invoke_result_t<FUNC, typename awaitable_traits<AWAITABLE>::await_result_t>>;
}
```
@ -1747,16 +1905,18 @@ lookup (ADL) so it should generally be called without the `cppcoro::` prefix.
## `resume_on()`
The `resume_on()` function can be used to control the execution context that a `task`,
`shared_task` or `async_generator` should resume its awaiting coroutine on.
The `resume_on()` function can be used to control the execution context that an awaitable
will resume the awaiting coroutine on when awaited. When applied to an `async_generator`
it controls which execution context the `co_await g.begin()` and `co_await ++it` operations
resume the awaiting coroutines on.
Normally, the awaiter of a `task` or `async_generator` will resume execution on whatever
thread the `task` completed on. In some cases this may not be the thread that you want
to continue executing on. In these cases you can use the `resume_on()` function to create
a new task or generator that will resume execution on a thread associated with a specified
scheduler.
Normally, the awaiting coroutine of an awaitable (eg. a `task`) or `async_generator` will
resume execution on whatever thread the operation completed on. In some cases this may not
be the thread that you want to continue executing on. In these cases you can use the
`resume_on()` function to create a new awaitable or generator that will resume execution
on a thread associated with a specified scheduler.
The `resume_on()` function can be used either as a normal function returning a new task/generator.
The `resume_on()` function can be used either as a normal function returning a new awaitable/generator.
Or it can be used in a pipeline-syntax.
Example:
@ -1795,11 +1955,9 @@ API Summary:
// <cppcoro/resume_on.hpp>
namespace cppcoro
{
template<typename SCHEDULER, typename T>
task<T> resume_on(SCHEDULER& scheduler, task<T> t);
template<typename SCHEDULER, typename T>
task<T> resume_on(SCHEDULER& scheduler, shared_task<T> t);
template<typename SCHEDULER, typename AWAITABLE>
auto resume_on(SCHEDULER& scheduler, AWAITABLE awaitable)
-> Awaitable<typename awaitable_traits<AWAITABLE>::await_traits_t>;
template<typename SCHEDULER, typename T>
async_generator<T> resume_on(SCHEDULER& scheduler, async_generator<T> source);
@ -1828,13 +1986,14 @@ namespace cppcoro
## `schedule_on()`
The `schedule_on()` function can be used to change the execution context that a given
`task` or `async_generator` starts executing on.
awaitable or `async_generator` starts executing on.
When applied to an `async_generator` it also affects which execution context it resumes
on after `co_yield` statement.
Note that the `schedule_on` transform does not specify the thread that the `task` or `async_generator`
will complete or yield results on, that is up to the implementing coroutine.
Note that the `schedule_on` transform does not specify the thread that the awaitable or
`async_generator` will complete or yield results on, that is up to the implementation of
the awaitable or generator.
See the `resume_on()` operator for a transform that controls the thread the operation completes on.
@ -1854,7 +2013,7 @@ task<> example()
```
API Summary:
```
```c++
// <cppcoro/schedule_on.hpp>
namespace cppcoro
{
@ -1862,8 +2021,9 @@ namespace cppcoro
// ensures that 't' is co_await'ed on a thread associated with
// the specified scheduler. Resulting task will complete on
// whatever thread 't' would normally complete on.
template<typename SCHEDULER, typename T>
task<T> schedule_on(SCHEDULER& scheduler, task<T> t);
template<typename SCHEDULER, typename AWAITABLE>
auto schedule_on(SCHEDULER& scheduler, AWAITABLE awaitable)
-> Awaitable<typename awaitable_traits<AWAITABLE>::await_result_t>;
// Return a generator that yields the same sequence of results as
// 'source' but that ensures that execution of the coroutine starts
@ -1887,8 +2047,81 @@ namespace cppcoro
}
```
# Metafunctions
## `awaitable_traits<T>`
This template metafunction can be used to determine what the resulting type of a `co_await` expression
will be if applied to an expression of type `T`.
Note that this assumes the value of type `T` is being awaited in a context where it is unaffected by
any `await_transform` applied by the coroutine's promise object. The results may differ if a value
of type `T` is awaited in such a context.
The `awaitable_traits<T>` template metafunction does not define the `awaiter_t` or `await_result_t`
nested typedefs if type, `T`, is not awaitable. This allows its use in SFINAE contexts that disables
overloads when `T` is not awaitable.
API Summary:
```c++
// <cppcoro/awaitable_traits.hpp>
namespace cppcoro
{
template<typename T>
struct awaitable_traits
{
// The type that results from applying `operator co_await()` to a value
// of type T, if T supports an `operator co_await()`, otherwise is type `T&&`.
typename awaiter_t = <unspecified>;
// The type of the result of co_await'ing a value of type T.
typename await_result_t = <unspecified>;
};
}
```
## `is_awaitable<T>`
The `is_awaitable<T>` template metafunction allows you to query whether or not a given
type can be `co_await`ed or not from within a coroutine.
API Summary:
```c++
// <cppcoro/is_awaitable.hpp>
namespace cppcoro
{
template<typename T>
struct is_awaitable : std::bool_constant<...>
{};
template<typename T>
constexpr bool is_awaitable_v = is_awaitable<T>::value;
}
```
# Concepts
## `Awaitable<T>` concept
An `Awaitable<T>` is a concept that indicates that a type can be `co_await`ed in a coroutine context
that has no `await_transform` overloads and that the result of the `co_await` expression has type, `T`.
For example, the type `task<T>` implements the concept `Awaitable<T&&>` whereas the type `task<T>&`
implements the concept `Awaitable<T&>`.
## `Awaiter<T>` concept
An `Awaiter<T>` is a concept that indicates a type contains the `await_ready`, `await_suspend` and
`await_resume` methods required to implement the protocol for suspending/resuming an awaiting
coroutine.
A type that satisfies `Awaiter<T>` must have, for an instance of the type, `awaiter`:
- `awaiter.await_ready()` -> `bool`
- `awaiter.await_suspend(std::experimental::coroutine_handle<void>{})` -> `void` or `bool` or `std::experimental::coroutine_handle<P>` for some `P`.
- `awaiter.await_resume()` -> `T`
Any type that implements the `Awaiter<T>` concept also implements the `Awaitable<T>` concept.
## `Scheduler` concept
A `Scheduler` is a concept that allows scheduling execution of coroutines within some execution context.
@ -1896,7 +2129,7 @@ A `Scheduler` is a concept that allows scheduling execution of coroutines within
```c++
concept Scheduler
{
<awaitable-type> schedule();
Awaitable<void> schedule();
}
```
@ -1929,10 +2162,10 @@ the scheduler's execution context after a specified duration of time has elapsed
concept DelayedScheduler : Scheduler
{
template<typename REP, typename RATIO>
<awaitable-type> schedule_after(std::chrono::duration<REP, RATIO> delay);
Awaitable<void> schedule_after(std::chrono::duration<REP, RATIO> delay);
template<typename REP, typename RATIO>
<awaitable-type> schedule_after(
Awaitable<void> schedule_after(
std::chrono::duration<REP, RATIO> delay,
cppcoro::cancellation_token cancellationToken);
}

View File

@ -1,9 +1,8 @@
version: 1.0.{build}
image:
- Visual Studio 2017
#- Visual Studio 2017 Preview
- Visual Studio 2017 Preview
platform:
- x64
@ -13,6 +12,13 @@ configuration:
- debug
- optimised
matrix:
# Allow failures under MSVC x86 optimised since there are some known compiler
# bugs causing failures here.
allow_failures:
- platform: x86
configuration: optimised
clone_script:
- ps: git clone -q $("--branch=" + $env:APPVEYOR_REPO_BRANCH) $("https://github.com/" + $env:APPVEYOR_REPO_NAME + ".git") $env:APPVEYOR_BUILD_FOLDER
- ps: if (!$env:APPVEYOR_PULL_REQUEST_NUMBER) {$("git checkout -qf " + $env:APPVEYOR_REPO_COMMIT)}

View File

@ -117,6 +117,8 @@ if cake.system.isWindows() or cake.system.isCygwin():
# Enable C++17 features like std::optional<>
compiler.addCppFlag('/std:c++latest')
compiler.addDefine('_SILENCE_CXX17_RESULT_OF_DEPRECATION_WARNING')
compiler.addProgramFlag('/nodefaultlib')
compiler.addModuleFlag('/nodefaultlib')
@ -137,7 +139,7 @@ if cake.system.isWindows() or cake.system.isCygwin():
compiler.runtimeLibraries = 'debug-dll'
compiler.addLibrary('msvcrtd')
compiler.addLibrary('msvcprtd')
compiler.addLibrary('msvcurtd')
compiler.addLibrary('vcruntimed')
compiler.addLibrary('ucrtd')
compiler.addLibrary('oldnames')
@ -159,7 +161,7 @@ if cake.system.isWindows() or cake.system.isCygwin():
compiler.runtimeLibraries = 'release-dll'
compiler.addLibrary('msvcrt')
compiler.addLibrary('msvcprt')
compiler.addLibrary('msvcurt')
compiler.addLibrary('vcruntime')
compiler.addLibrary('ucrt')
compiler.addLibrary('oldnames')

View File

@ -117,8 +117,8 @@ namespace cppcoro
// State transition diagram
// VNRCA - value_not_ready_consumer_active
// VNRCS - value_not_ready_consumer_suspended
// VRPA - value_ready_consumer_active
// VRPS - value_ready_consumer_suspended
// VRPA - value_ready_producer_active
// VRPS - value_ready_producer_suspended
//
// A +--- VNRCA --[C]--> VNRCS yield_value()
// | | | A | A | .

View File

@ -0,0 +1,27 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (c) Lewis Baker
// Licenced under MIT license. See LICENSE.txt for details.
///////////////////////////////////////////////////////////////////////////////
#ifndef CPPCORO_AWAITABLE_TRAITS_HPP_INCLUDED
#define CPPCORO_AWAITABLE_TRAITS_HPP_INCLUDED
#include <cppcoro/detail/get_awaiter.hpp>
#include <type_traits>
namespace cppcoro
{
template<typename T, typename = void>
struct awaitable_traits
{};
template<typename T>
struct awaitable_traits<T, std::void_t<decltype(cppcoro::detail::get_awaiter(std::declval<T>()))>>
{
using awaiter_t = decltype(cppcoro::detail::get_awaiter(std::declval<T>()));
using await_result_t = decltype(std::declval<awaiter_t>().await_resume());
};
}
#endif

View File

@ -36,6 +36,22 @@
# define CPPCORO_ASSUME(X)
#endif
#if CPPCORO_COMPILER_MSVC
# define CPPCORO_NOINLINE __declspec(noinline)
#elif CPPCORO_COMPILER_CLANG || CPPCORO_COMPILER_GCC
# define CPPCORO_NOINLINE __attribute__((noinline))
#else
# define CPPCORO_NOINLINE
#endif
#if CPPCORO_COMPILER_MSVC
# define CPPCORO_FORCE_INLINE __forceinline
#elif CPPCORO_COMPILER_CLANG
# define CPPCORO_FORCE_INLINE __attribute__((always_inline))
#else
# define CPPCORO_FORCE_INLINE inline
#endif
/////////////////////////////////////////////////////////////////////////////
// OS Detection

View File

@ -0,0 +1,22 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (c) Lewis Baker
// Licenced under MIT license. See LICENSE.txt for details.
///////////////////////////////////////////////////////////////////////////////
#ifndef CPPCORO_DETAIL_ANY_HPP_INCLUDED
#define CPPCORO_DETAIL_ANY_HPP_INCLUDED
namespace cppcoro
{
namespace detail
{
// Helper type that can be cast-to from any type.
struct any
{
template<typename T>
any(T&&) noexcept
{}
};
}
}
#endif

View File

@ -1,61 +0,0 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (c) Lewis Baker
// Licenced under MIT license. See LICENSE.txt for details.
///////////////////////////////////////////////////////////////////////////////
#ifndef CPPCORO_DETAIL_CONTINUATION_HPP_INCLUDED
#define CPPCORO_DETAIL_CONTINUATION_HPP_INCLUDED
#include <experimental/coroutine>
namespace cppcoro
{
namespace detail
{
class continuation
{
public:
using callback_t = void(void*);
continuation() noexcept
: m_callback(nullptr)
, m_state(nullptr)
{}
explicit continuation(std::experimental::coroutine_handle<> awaiter) noexcept
: m_callback(nullptr)
, m_state(awaiter.address())
{}
explicit continuation(callback_t* callback, void* state) noexcept
: m_callback(callback)
, m_state(state)
{}
explicit operator bool() const noexcept
{
return m_callback != nullptr || m_state != nullptr;
}
void resume() noexcept
{
if (m_callback == nullptr)
{
std::experimental::coroutine_handle<>::from_address(m_state).resume();
}
else
{
m_callback(m_state);
}
}
private:
callback_t* m_callback;
void* m_state;
};
}
}
#endif

View File

@ -0,0 +1,49 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (c) Lewis Baker
// Licenced under MIT license. See LICENSE.txt for details.
///////////////////////////////////////////////////////////////////////////////
#ifndef CPPCORO_DETAIL_GET_AWAITER_HPP_INCLUDED
#define CPPCORO_DETAIL_GET_AWAITER_HPP_INCLUDED
#include <cppcoro/detail/is_awaiter.hpp>
#include <cppcoro/detail/any.hpp>
namespace cppcoro
{
namespace detail
{
template<typename T>
auto get_awaiter_impl(T&& value, int)
noexcept(noexcept(static_cast<T&&>(value).operator co_await()))
-> decltype(static_cast<T&&>(value).operator co_await())
{
return static_cast<T&&>(value).operator co_await();
}
template<typename T>
auto get_awaiter_impl(T&& value, long)
noexcept(noexcept(operator co_await(static_cast<T&&>(value))))
-> decltype(operator co_await(static_cast<T&&>(value)))
{
return operator co_await(static_cast<T&&>(value));
}
template<
typename T,
std::enable_if_t<cppcoro::detail::is_awaiter<T&&>::value, int> = 0>
T&& get_awaiter_impl(T&& value, cppcoro::detail::any) noexcept
{
return static_cast<T&&>(value);
}
template<typename T>
auto get_awaiter(T&& value)
noexcept(noexcept(detail::get_awaiter_impl(static_cast<T&&>(value), 123)))
-> decltype(detail::get_awaiter_impl(static_cast<T&&>(value), 123))
{
return detail::get_awaiter_impl(static_cast<T&&>(value), 123);
}
}
}
#endif

View File

@ -0,0 +1,55 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (c) Lewis Baker
// Licenced under MIT license. See LICENSE.txt for details.
///////////////////////////////////////////////////////////////////////////////
#ifndef CPPCORO_DETAIL_IS_AWAITER_HPP_INCLUDED
#define CPPCORO_DETAIL_IS_AWAITER_HPP_INCLUDED
#include <type_traits>
#include <experimental/coroutine>
namespace cppcoro
{
namespace detail
{
template<typename T>
struct is_coroutine_handle
: std::false_type
{};
template<typename PROMISE>
struct is_coroutine_handle<std::experimental::coroutine_handle<PROMISE>>
: std::true_type
{};
// NOTE: We're accepting a return value of coroutine_handle<P> here
// which is an extension supported by Clang which is not yet part of
// the C++ coroutines TS.
template<typename T>
struct is_valid_await_suspend_return_value : std::disjunction<
std::is_void<T>,
std::is_same<T, bool>,
is_coroutine_handle<T>>
{};
template<typename T, typename = std::void_t<>>
struct is_awaiter : std::false_type {};
// NOTE: We're testing whether await_suspend() will be callable using an
// arbitrary coroutine_handle here by checking if it supports being passed
// a coroutine_handle<void>. This may result in a false-result for some
// types which are only awaitable within a certain context.
template<typename T>
struct is_awaiter<T, std::void_t<
decltype(std::declval<T>().await_ready()),
decltype(std::declval<T>().await_suspend(std::declval<std::experimental::coroutine_handle<>>())),
decltype(std::declval<T>().await_resume())>> :
std::conjunction<
std::is_constructible<bool, decltype(std::declval<T>().await_ready())>,
detail::is_valid_await_suspend_return_value<
decltype(std::declval<T>().await_suspend(std::declval<std::experimental::coroutine_handle<>>()))>>
{};
}
}
#endif

View File

@ -0,0 +1,29 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (c) Lewis Baker
// Licenced under MIT license. See LICENSE.txt for details.
///////////////////////////////////////////////////////////////////////////////
#ifndef CPPCORO_DETAIL_REMOVE_RVALUE_REFERENCE_HPP_INCLUDED
#define CPPCORO_DETAIL_REMOVE_RVALUE_REFERENCE_HPP_INCLUDED
namespace cppcoro
{
namespace detail
{
template<typename T>
struct remove_rvalue_reference
{
using type = T;
};
template<typename T>
struct remove_rvalue_reference<T&&>
{
using type = T;
};
template<typename T>
using remove_rvalue_reference_t = typename remove_rvalue_reference<T>::type;
}
}
#endif

View File

@ -0,0 +1,298 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (c) Lewis Baker
// Licenced under MIT license. See LICENSE.txt for details.
///////////////////////////////////////////////////////////////////////////////
#ifndef CPPCORO_DETAIL_SYNC_WAIT_TASK_HPP_INCLUDED
#define CPPCORO_DETAIL_SYNC_WAIT_TASK_HPP_INCLUDED
#include <cppcoro/config.hpp>
#include <cppcoro/awaitable_traits.hpp>
#include <cppcoro/detail/lightweight_manual_reset_event.hpp>
#include <experimental/coroutine>
#include <cassert>
namespace cppcoro
{
namespace detail
{
template<typename RESULT>
class sync_wait_task;
template<typename RESULT>
class sync_wait_task_promise final
{
using coroutine_handle_t = std::experimental::coroutine_handle<sync_wait_task_promise<RESULT>>;
public:
using reference = RESULT&&;
sync_wait_task_promise() noexcept
{}
void start(detail::lightweight_manual_reset_event& event)
{
m_event = &event;
coroutine_handle_t::from_promise(*this).resume();
}
auto get_return_object() noexcept
{
return coroutine_handle_t::from_promise(*this);
}
std::experimental::suspend_always initial_suspend() noexcept
{
return{};
}
auto final_suspend() noexcept
{
class completion_notifier
{
public:
bool await_ready() const noexcept { return false; }
void await_suspend(coroutine_handle_t coroutine) const noexcept
{
coroutine.promise().m_event->set();
}
void await_resume() noexcept {}
};
return completion_notifier{};
}
#if CPPCORO_COMPILER_MSVC
// HACK: This is needed to work around a bug in MSVC 2017.7/2017.8.
// See comment in make_sync_wait_task below.
template<typename Awaitable>
Awaitable&& await_transform(Awaitable&& awaitable)
{
return static_cast<Awaitable&&>(awaitable);
}
struct get_promise_t {};
static constexpr get_promise_t get_promise = {};
auto await_transform(get_promise_t)
{
class awaiter
{
public:
awaiter(sync_wait_task_promise* promise) noexcept : m_promise(promise) {}
bool await_ready() noexcept {
return true;
}
void await_suspend(std::experimental::coroutine_handle<>) noexcept {}
sync_wait_task_promise& await_resume() noexcept
{
return *m_promise;
}
private:
sync_wait_task_promise* m_promise;
};
return awaiter{ this };
}
#endif
auto yield_value(reference result) noexcept
{
m_result = std::addressof(result);
return final_suspend();
}
void return_void() noexcept
{
// The coroutine should have either yielded a value or thrown
// an exception in which case it should have bypassed return_void().
assert(false);
}
void unhandled_exception()
{
m_exception = std::current_exception();
}
reference result()
{
if (m_exception)
{
std::rethrow_exception(m_exception);
}
return static_cast<reference>(*m_result);
}
private:
detail::lightweight_manual_reset_event* m_event;
std::remove_reference_t<RESULT>* m_result;
std::exception_ptr m_exception;
};
template<>
class sync_wait_task_promise<void>
{
using coroutine_handle_t = std::experimental::coroutine_handle<sync_wait_task_promise<void>>;
public:
sync_wait_task_promise() noexcept
{}
void start(detail::lightweight_manual_reset_event& event)
{
m_event = &event;
coroutine_handle_t::from_promise(*this).resume();
}
auto get_return_object() noexcept
{
return coroutine_handle_t::from_promise(*this);
}
std::experimental::suspend_always initial_suspend() noexcept
{
return{};
}
auto final_suspend() noexcept
{
class completion_notifier
{
public:
bool await_ready() const noexcept { return false; }
void await_suspend(coroutine_handle_t coroutine) const noexcept
{
coroutine.promise().m_event->set();
}
void await_resume() noexcept {}
};
return completion_notifier{};
}
void return_void() {}
void unhandled_exception()
{
m_exception = std::current_exception();
}
void result()
{
if (m_exception)
{
std::rethrow_exception(m_exception);
}
}
private:
detail::lightweight_manual_reset_event* m_event;
std::exception_ptr m_exception;
};
template<typename RESULT>
class sync_wait_task final
{
public:
using promise_type = sync_wait_task_promise<RESULT>;
using coroutine_handle_t = std::experimental::coroutine_handle<promise_type>;
sync_wait_task(coroutine_handle_t coroutine) noexcept
: m_coroutine(coroutine)
{}
sync_wait_task(sync_wait_task&& other) noexcept
: m_coroutine(std::exchange(other.m_coroutine, coroutine_handle_t{}))
{}
~sync_wait_task()
{
if (m_coroutine) m_coroutine.destroy();
}
sync_wait_task(const sync_wait_task&) = delete;
sync_wait_task& operator=(const sync_wait_task&) = delete;
void start(lightweight_manual_reset_event& event) noexcept
{
m_coroutine.promise().start(event);
}
decltype(auto) result()
{
return m_coroutine.promise().result();
}
private:
coroutine_handle_t m_coroutine;
};
#if CPPCORO_COMPILER_MSVC
// HACK: Work around bug in MSVC where passing a parameter by universal reference
// results in an error when passed a move-only type, complaining that the copy-constructor
// has been deleted. The parameter should be passed by reference and the compiler should
// notcalling the copy-constructor for the argument
template<
typename AWAITABLE,
typename RESULT = typename cppcoro::awaitable_traits<AWAITABLE&&>::await_result_t,
std::enable_if_t<!std::is_void_v<RESULT>, int> = 0>
sync_wait_task<RESULT> make_sync_wait_task(AWAITABLE& awaitable)
{
// HACK: Workaround another bug in MSVC where the expression 'co_yield co_await x' seems
// to completely ignore the co_yield an never calls promise.yield_value().
// The coroutine seems to be resuming the 'co_await' after the 'co_yield'
// rather than before the 'co_yield'.
// This bug is present in VS 2017.7 and VS 2017.8.
auto& promise = co_await sync_wait_task_promise<RESULT>::get_promise;
co_await promise.yield_value(co_await std::forward<AWAITABLE>(awaitable));
//co_yield co_await std::forward<AWAITABLE>(awaitable);
}
template<
typename AWAITABLE,
typename RESULT = typename cppcoro::awaitable_traits<AWAITABLE&&>::await_result_t,
std::enable_if_t<std::is_void_v<RESULT>, int> = 0>
sync_wait_task<void> make_sync_wait_task(AWAITABLE& awaitable)
{
co_await static_cast<AWAITABLE&&>(awaitable);
}
#else
template<
typename AWAITABLE,
typename RESULT = typename cppcoro::awaitable_traits<AWAITABLE&&>::await_result_t,
std::enable_if_t<!std::is_void_v<RESULT>, int> = 0>
sync_wait_task<RESULT> make_sync_wait_task(AWAITABLE&& awaitable)
{
co_yield co_await std::forward<AWAITABLE>(awaitable);
}
template<
typename AWAITABLE,
typename RESULT = typename cppcoro::awaitable_traits<AWAITABLE&&>::await_result_t,
std::enable_if_t<std::is_void_v<RESULT>, int> = 0>
sync_wait_task<void> make_sync_wait_task(AWAITABLE&& awaitable)
{
co_await std::forward<AWAITABLE>(awaitable);
}
#endif
}
}
#endif

View File

@ -0,0 +1,16 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (c) Lewis Baker
// Licenced under MIT license. See LICENSE.txt for details.
///////////////////////////////////////////////////////////////////////////////
#ifndef CPPCORO_DETAIL_VOID_VALUE_HPP_INCLUDED
#define CPPCORO_DETAIL_VOID_VALUE_HPP_INCLUDED
namespace cppcoro
{
namespace detail
{
struct void_value {};
}
}
#endif

View File

@ -1,59 +0,0 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (c) Lewis Baker
// Licenced under MIT license. See LICENSE.txt for details.
///////////////////////////////////////////////////////////////////////////////
#ifndef CPPCORO_DETAIL_WHEN_ALL_AWAITABLE_HPP_INCLUDED
#define CPPCORO_DETAIL_WHEN_ALL_AWAITABLE_HPP_INCLUDED
#include <cppcoro/detail/continuation.hpp>
#include <atomic>
namespace cppcoro
{
namespace detail
{
class when_all_awaitable
{
public:
when_all_awaitable(std::size_t count) noexcept
: m_refCount(count + 1)
{}
detail::continuation get_continuation() noexcept
{
return detail::continuation{ &when_all_awaitable::resumer_callback, this };
}
bool await_ready() noexcept
{
return m_refCount.load(std::memory_order_acquire) == 1;
}
bool await_suspend(std::experimental::coroutine_handle<> awaiter) noexcept
{
m_awaiter = awaiter;
return m_refCount.fetch_sub(1, std::memory_order_acq_rel) > 1;
}
void await_resume() noexcept {}
private:
static void resumer_callback(void* state) noexcept
{
auto* that = static_cast<when_all_awaitable*>(state);
if (that->m_refCount.fetch_sub(1, std::memory_order_acq_rel) == 1)
{
that->m_awaiter.resume();
}
}
std::atomic<std::size_t> m_refCount;
std::experimental::coroutine_handle<> m_awaiter;
};
}
}
#endif

View File

@ -0,0 +1,55 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (c) Lewis Baker
// Licenced under MIT license. See LICENSE.txt for details.
///////////////////////////////////////////////////////////////////////////////
#ifndef CPPCORO_DETAIL_WHEN_ALL_COUNTER_HPP_INCLUDED
#define CPPCORO_DETAIL_WHEN_ALL_COUNTER_HPP_INCLUDED
#include <experimental/coroutine>
#include <atomic>
#include <cstdint>
namespace cppcoro
{
namespace detail
{
class when_all_counter
{
public:
when_all_counter(std::size_t count) noexcept
: m_count(count + 1)
, m_awaitingCoroutine(nullptr)
{}
bool is_ready() const noexcept
{
// We consider this complete if we're asking whether it's ready
// after a coroutine has already been registered.
return static_cast<bool>(m_awaitingCoroutine);
}
bool try_await(std::experimental::coroutine_handle<> awaitingCoroutine) noexcept
{
m_awaitingCoroutine = awaitingCoroutine;
return m_count.fetch_sub(1, std::memory_order_acq_rel) > 1;
}
void notify_awaitable_completed() noexcept
{
if (m_count.fetch_sub(1, std::memory_order_acq_rel) == 1)
{
m_awaitingCoroutine.resume();
}
}
protected:
std::atomic<std::size_t> m_count;
std::experimental::coroutine_handle<> m_awaitingCoroutine;
};
}
}
#endif

View File

@ -0,0 +1,258 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (c) Lewis Baker
// Licenced under MIT license. See LICENSE.txt for details.
///////////////////////////////////////////////////////////////////////////////
#ifndef CPPCORO_DETAIL_WHEN_ALL_READY_AWAITABLE_HPP_INCLUDED
#define CPPCORO_DETAIL_WHEN_ALL_READY_AWAITABLE_HPP_INCLUDED
#include <cppcoro/detail/when_all_counter.hpp>
#include <experimental/coroutine>
#include <tuple>
namespace cppcoro
{
namespace detail
{
template<typename TASK_CONTAINER>
class when_all_ready_awaitable;
template<>
class when_all_ready_awaitable<std::tuple<>>
{
public:
constexpr when_all_ready_awaitable() noexcept {}
explicit constexpr when_all_ready_awaitable(std::tuple<>) noexcept {}
constexpr bool await_ready() const noexcept { return true; }
void await_suspend(std::experimental::coroutine_handle<>) noexcept {}
std::tuple<> await_resume() const noexcept { return {}; }
};
template<typename... TASKS>
class when_all_ready_awaitable<std::tuple<TASKS...>>
{
public:
explicit when_all_ready_awaitable(TASKS&&... tasks)
noexcept(std::conjunction_v<std::is_nothrow_move_constructible<TASKS>...>)
: m_counter(sizeof...(TASKS))
, m_tasks(std::move(tasks)...)
{}
explicit when_all_ready_awaitable(std::tuple<TASKS...>&& tasks)
noexcept(std::is_nothrow_move_constructible_v<std::tuple<TASKS...>>)
: m_counter(sizeof...(TASKS))
, m_tasks(std::move(tasks))
{}
when_all_ready_awaitable(when_all_ready_awaitable&& other) noexcept
: m_counter(sizeof...(TASKS))
, m_tasks(std::move(other.m_tasks))
{}
auto operator co_await() & noexcept
{
struct awaiter
{
awaiter(when_all_ready_awaitable& awaitable) noexcept
: m_awaitable(awaitable)
{}
bool await_ready() const noexcept
{
return m_awaitable.is_ready();
}
bool await_suspend(std::experimental::coroutine_handle<> awaitingCoroutine) noexcept
{
return m_awaitable.try_await(awaitingCoroutine);
}
std::tuple<TASKS...>& await_resume() noexcept
{
return m_awaitable.m_tasks;
}
private:
when_all_ready_awaitable& m_awaitable;
};
return awaiter{ *this };
}
auto operator co_await() && noexcept
{
struct awaiter
{
awaiter(when_all_ready_awaitable& awaitable) noexcept
: m_awaitable(awaitable)
{}
bool await_ready() const noexcept
{
return m_awaitable.is_ready();
}
bool await_suspend(std::experimental::coroutine_handle<> awaitingCoroutine) noexcept
{
return m_awaitable.try_await(awaitingCoroutine);
}
std::tuple<TASKS...>&& await_resume() noexcept
{
return std::move(m_awaitable.m_tasks);
}
private:
when_all_ready_awaitable& m_awaitable;
};
return awaiter{ *this };
}
private:
bool is_ready() const noexcept
{
return m_counter.is_ready();
}
bool try_await(std::experimental::coroutine_handle<> awaitingCoroutine) noexcept
{
start_tasks(std::make_integer_sequence<std::size_t, sizeof...(TASKS)>{});
return m_counter.try_await(awaitingCoroutine);
}
template<std::size_t... INDICES>
void start_tasks(std::integer_sequence<std::size_t, INDICES...>) noexcept
{
(void)std::initializer_list<int>{
(std::get<INDICES>(m_tasks).start(m_counter), 0)...
};
}
when_all_counter m_counter;
std::tuple<TASKS...> m_tasks;
};
template<typename TASK_CONTAINER>
class when_all_ready_awaitable
{
public:
explicit when_all_ready_awaitable(TASK_CONTAINER&& tasks) noexcept
: m_counter(tasks.size())
, m_tasks(std::forward<TASK_CONTAINER>(tasks))
{}
when_all_ready_awaitable(when_all_ready_awaitable&& other)
noexcept(std::is_nothrow_move_constructible_v<TASK_CONTAINER>)
: m_counter(other.m_tasks.size())
, m_tasks(std::move(other.m_tasks))
{}
when_all_ready_awaitable(const when_all_ready_awaitable&) = delete;
when_all_ready_awaitable& operator=(const when_all_ready_awaitable&) = delete;
auto operator co_await() & noexcept
{
class awaiter
{
public:
awaiter(when_all_ready_awaitable& awaitable)
: m_awaitable(awaitable)
{}
bool await_ready() const noexcept
{
return m_awaitable.is_ready();
}
bool await_suspend(std::experimental::coroutine_handle<> awaitingCoroutine) noexcept
{
return m_awaitable.try_await(awaitingCoroutine);
}
TASK_CONTAINER& await_resume() noexcept
{
return m_awaitable.m_tasks;
}
private:
when_all_ready_awaitable& m_awaitable;
};
return awaiter{ *this };
}
auto operator co_await() && noexcept
{
class awaiter
{
public:
awaiter(when_all_ready_awaitable& awaitable)
: m_awaitable(awaitable)
{}
bool await_ready() const noexcept
{
return m_awaitable.is_ready();
}
bool await_suspend(std::experimental::coroutine_handle<> awaitingCoroutine) noexcept
{
return m_awaitable.try_await(awaitingCoroutine);
}
TASK_CONTAINER&& await_resume() noexcept
{
return std::move(m_awaitable.m_tasks);
}
private:
when_all_ready_awaitable& m_awaitable;
};
return awaiter{ *this };
}
private:
bool is_ready() const noexcept
{
return m_counter.is_ready();
}
bool try_await(std::experimental::coroutine_handle<> awaitingCoroutine) noexcept
{
for (auto&& task : m_tasks)
{
task.start(m_counter);
}
return m_counter.try_await(awaitingCoroutine);
}
when_all_counter m_counter;
TASK_CONTAINER m_tasks;
};
}
}
#endif

View File

@ -0,0 +1,356 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (c) Lewis Baker
// Licenced under MIT license. See LICENSE.txt for details.
///////////////////////////////////////////////////////////////////////////////
#ifndef CPPCORO_DETAIL_WHEN_ALL_TASK_HPP_INCLUDED
#define CPPCORO_DETAIL_WHEN_ALL_TASK_HPP_INCLUDED
#include <cppcoro/awaitable_traits.hpp>
#include <cppcoro/detail/when_all_counter.hpp>
#include <cppcoro/detail/void_value.hpp>
#include <experimental/coroutine>
#include <cassert>
namespace cppcoro
{
namespace detail
{
template<typename TASK_CONTAINER>
class when_all_ready_awaitable;
template<typename RESULT>
class when_all_task;
template<typename RESULT>
class when_all_task_promise final
{
public:
using coroutine_handle_t = std::experimental::coroutine_handle<when_all_task_promise<RESULT>>;
when_all_task_promise() noexcept
{}
auto get_return_object() noexcept
{
return coroutine_handle_t::from_promise(*this);
}
std::experimental::suspend_always initial_suspend() noexcept
{
return{};
}
auto final_suspend() noexcept
{
class completion_notifier
{
public:
bool await_ready() const noexcept { return false; }
void await_suspend(coroutine_handle_t coro) const noexcept
{
coro.promise().m_counter->notify_awaitable_completed();
}
void await_resume() const noexcept {}
};
return completion_notifier{};
}
void unhandled_exception() noexcept
{
m_exception = std::current_exception();
}
void return_void() noexcept
{
// We should have either suspended at co_yield point or
// an exception was thrown before running off the end of
// the coroutine.
assert(false);
}
#if CPPCORO_COMPILER_MSVC
// HACK: This is needed to work around a bug in MSVC 2017.7/2017.8.
// See comment in make_when_all_task below.
template<typename Awaitable>
Awaitable&& await_transform(Awaitable&& awaitable)
{
return static_cast<Awaitable&&>(awaitable);
}
struct get_promise_t {};
static constexpr get_promise_t get_promise = {};
auto await_transform(get_promise_t)
{
class awaiter
{
public:
awaiter(when_all_task_promise* promise) noexcept : m_promise(promise) {}
bool await_ready() noexcept {
return true;
}
void await_suspend(std::experimental::coroutine_handle<>) noexcept {}
when_all_task_promise& await_resume() noexcept
{
return *m_promise;
}
private:
when_all_task_promise* m_promise;
};
return awaiter{ this };
}
#endif
auto yield_value(RESULT&& result) noexcept
{
m_result = std::addressof(result);
return final_suspend();
}
void start(when_all_counter& counter) noexcept
{
m_counter = &counter;
coroutine_handle_t::from_promise(*this).resume();
}
RESULT& result() &
{
rethrow_if_exception();
return *m_result;
}
RESULT&& result() &&
{
rethrow_if_exception();
return std::forward<RESULT>(*m_result);
}
private:
void rethrow_if_exception()
{
if (m_exception)
{
std::rethrow_exception(m_exception);
}
}
when_all_counter* m_counter;
std::exception_ptr m_exception;
std::add_pointer_t<RESULT> m_result;
};
template<>
class when_all_task_promise<void> final
{
public:
using coroutine_handle_t = std::experimental::coroutine_handle<when_all_task_promise<void>>;
when_all_task_promise() noexcept
{}
auto get_return_object() noexcept
{
return coroutine_handle_t::from_promise(*this);
}
std::experimental::suspend_always initial_suspend() noexcept
{
return{};
}
auto final_suspend() noexcept
{
class completion_notifier
{
public:
bool await_ready() const noexcept { return false; }
void await_suspend(coroutine_handle_t coro) const noexcept
{
coro.promise().m_counter->notify_awaitable_completed();
}
void await_resume() const noexcept {}
};
return completion_notifier{};
}
void unhandled_exception() noexcept
{
m_exception = std::current_exception();
}
void return_void() noexcept
{
}
void start(when_all_counter& counter) noexcept
{
m_counter = &counter;
coroutine_handle_t::from_promise(*this).resume();
}
void result()
{
if (m_exception)
{
std::rethrow_exception(m_exception);
}
}
private:
when_all_counter* m_counter;
std::exception_ptr m_exception;
};
template<typename RESULT>
class when_all_task final
{
public:
using promise_type = when_all_task_promise<RESULT>;
using coroutine_handle_t = typename promise_type::coroutine_handle_t;
when_all_task(coroutine_handle_t coroutine) noexcept
: m_coroutine(coroutine)
{}
when_all_task(when_all_task&& other) noexcept
: m_coroutine(std::exchange(other.m_coroutine, coroutine_handle_t{}))
{}
~when_all_task()
{
if (m_coroutine) m_coroutine.destroy();
}
when_all_task(const when_all_task&) = delete;
when_all_task& operator=(const when_all_task&) = delete;
decltype(auto) result() &
{
return m_coroutine.promise().result();
}
decltype(auto) result() &&
{
return std::move(m_coroutine.promise()).result();
}
decltype(auto) non_void_result() &
{
if constexpr (std::is_void_v<decltype(this->result())>)
{
this->result();
return void_value{};
}
else
{
return this->result();
}
}
decltype(auto) non_void_result() &&
{
if constexpr (std::is_void_v<decltype(this->result())>)
{
std::move(*this).result();
return void_value{};
}
else
{
return std::move(*this).result();
}
}
private:
template<typename TASK_CONTAINER>
friend class when_all_ready_awaitable;
void start(when_all_counter& counter) noexcept
{
m_coroutine.promise().start(counter);
}
coroutine_handle_t m_coroutine;
};
template<
typename AWAITABLE,
typename RESULT = typename cppcoro::awaitable_traits<AWAITABLE&&>::await_result_t,
std::enable_if_t<!std::is_void_v<RESULT>, int> = 0>
when_all_task<RESULT> make_when_all_task(AWAITABLE awaitable)
{
#if CPPCORO_COMPILER_MSVC
// HACK: Workaround another bug in MSVC where the expression 'co_yield co_await x' seems
// to completely ignore the co_yield an never calls promise.yield_value().
// The coroutine seems to be resuming the 'co_await' after the 'co_yield'
// rather than before the 'co_yield'.
// This bug is present in VS 2017.7 and VS 2017.8.
auto& promise = co_await when_all_task_promise<RESULT>::get_promise;
co_await promise.yield_value(co_await std::forward<AWAITABLE>(awaitable));
#else
co_yield co_await static_cast<AWAITABLE&&>(awaitable);
#endif
}
template<
typename AWAITABLE,
typename RESULT = typename cppcoro::awaitable_traits<AWAITABLE&&>::await_result_t,
std::enable_if_t<std::is_void_v<RESULT>, int> = 0>
when_all_task<void> make_when_all_task(AWAITABLE awaitable)
{
co_await static_cast<AWAITABLE&&>(awaitable);
}
template<
typename AWAITABLE,
typename RESULT = typename cppcoro::awaitable_traits<AWAITABLE&>::await_result_t,
std::enable_if_t<!std::is_void_v<RESULT>, int> = 0>
when_all_task<RESULT> make_when_all_task(std::reference_wrapper<AWAITABLE> awaitable)
{
#if CPPCORO_COMPILER_MSVC
// HACK: Workaround another bug in MSVC where the expression 'co_yield co_await x' seems
// to completely ignore the co_yield an never calls promise.yield_value().
// The coroutine seems to be resuming the 'co_await' after the 'co_yield'
// rather than before the 'co_yield'.
// This bug is present in VS 2017.7 and VS 2017.8.
auto& promise = co_await when_all_task_promise<RESULT>::get_promise;
co_await promise.yield_value(co_await awaitable.get());
#else
co_yield co_await awaitable.get();
#endif
}
template<
typename AWAITABLE,
typename RESULT = typename cppcoro::awaitable_traits<AWAITABLE&>::await_result_t,
std::enable_if_t<std::is_void_v<RESULT>, int> = 0>
when_all_task<void> make_when_all_task(std::reference_wrapper<AWAITABLE> awaitable)
{
co_await awaitable.get();
}
}
}
#endif

View File

@ -172,9 +172,9 @@ namespace cppcoro
: win32_overlapped_operation_base(std::move(other))
, m_state(other.m_state.load(std::memory_order_relaxed))
, m_cancellationToken(std::move(other.m_cancellationToken))
, m_errorCode(other.m_errorCode)
, m_numberOfBytesTransferred(other.m_numberOfBytesTransferred)
{
assert(m_errorCode == other.m_errorCode);
assert(m_numberOfBytesTransferred == other.m_numberOfBytesTransferred);
}
public:

View File

@ -5,11 +5,119 @@
#ifndef CPPCORO_FMAP_HPP_INCLUDED
#define CPPCORO_FMAP_HPP_INCLUDED
#include <cppcoro/awaitable_traits.hpp>
#include <cppcoro/is_awaitable.hpp>
#include <utility>
#include <type_traits>
#include <functional>
namespace cppcoro
{
namespace detail
{
template<typename FUNC, typename AWAITABLE>
class fmap_awaiter
{
using awaiter_t = typename awaitable_traits<AWAITABLE&&>::awaiter_t;
public:
fmap_awaiter(FUNC&& func, AWAITABLE&& awaitable)
noexcept(
std::is_nothrow_move_constructible_v<awaiter_t> &&
noexcept(detail::get_awaiter(static_cast<AWAITABLE&&>(awaitable))))
: m_func(static_cast<FUNC&&>(func))
, m_awaiter(detail::get_awaiter(static_cast<AWAITABLE&&>(awaitable)))
{}
decltype(auto) await_ready()
noexcept(noexcept(static_cast<awaiter_t&&>(m_awaiter).await_ready()))
{
return static_cast<awaiter_t&&>(m_awaiter).await_ready();
}
template<typename PROMISE>
decltype(auto) await_suspend(std::experimental::coroutine_handle<PROMISE> coro)
noexcept(noexcept(static_cast<awaiter_t&&>(m_awaiter).await_suspend(std::move(coro))))
{
return static_cast<awaiter_t&&>(m_awaiter).await_suspend(std::move(coro));
}
template<
typename AWAIT_RESULT = decltype(std::declval<awaiter_t>().await_resume()),
std::enable_if_t<std::is_void_v<AWAIT_RESULT>, int> = 0>
decltype(auto) await_resume()
noexcept(noexcept(std::invoke(static_cast<FUNC&&>(m_func))))
{
static_cast<awaiter_t&&>(m_awaiter).await_resume();
return std::invoke(static_cast<FUNC&&>(m_func));
}
template<
typename AWAIT_RESULT = decltype(std::declval<awaiter_t>().await_resume()),
std::enable_if_t<!std::is_void_v<AWAIT_RESULT>, int> = 0>
decltype(auto) await_resume()
noexcept(noexcept(std::invoke(static_cast<FUNC&&>(m_func), static_cast<awaiter_t&&>(m_awaiter).await_resume())))
{
return std::invoke(
static_cast<FUNC&&>(m_func),
static_cast<awaiter_t&&>(m_awaiter).await_resume());
}
private:
FUNC&& m_func;
awaiter_t m_awaiter;
};
template<typename FUNC, typename AWAITABLE>
class fmap_awaitable
{
static_assert(!std::is_lvalue_reference_v<FUNC>);
static_assert(!std::is_lvalue_reference_v<AWAITABLE>);
public:
template<
typename FUNC_ARG,
typename AWAITABLE_ARG,
std::enable_if_t<
std::is_constructible_v<FUNC, FUNC_ARG&&> &&
std::is_constructible_v<AWAITABLE, AWAITABLE_ARG&&>, int> = 0>
explicit fmap_awaitable(FUNC_ARG&& func, AWAITABLE_ARG&& awaitable)
noexcept(
std::is_nothrow_constructible_v<FUNC, FUNC_ARG&&> &&
std::is_nothrow_constructible_v<AWAITABLE, AWAITABLE_ARG&&>)
: m_func(static_cast<FUNC_ARG&&>(func))
, m_awaitable(static_cast<AWAITABLE_ARG&&>(awaitable))
{}
auto operator co_await() const &
{
return fmap_awaiter<const FUNC&, const AWAITABLE&>(m_func, m_awaitable);
}
auto operator co_await() &
{
return fmap_awaiter<FUNC&, AWAITABLE&>(m_func, m_awaitable);
}
auto operator co_await() &&
{
return fmap_awaiter<FUNC&&, AWAITABLE&&>(
static_cast<FUNC&&>(m_func),
static_cast<AWAITABLE&&>(m_awaitable));
}
private:
FUNC m_func;
AWAITABLE m_awaitable;
};
}
template<typename FUNC>
struct fmap_transform
{
@ -21,6 +129,19 @@ namespace cppcoro
FUNC func;
};
template<
typename FUNC,
typename AWAITABLE,
std::enable_if_t<cppcoro::is_awaitable_v<AWAITABLE>, int> = 0>
auto fmap(FUNC&& func, AWAITABLE&& awaitable)
{
return detail::fmap_awaitable<
std::remove_cv_t<std::remove_reference_t<FUNC>>,
std::remove_cv_t<std::remove_reference_t<AWAITABLE>>>(
std::forward<FUNC>(func),
std::forward<AWAITABLE>(awaitable));
}
template<typename FUNC>
auto fmap(FUNC&& func)
{

View File

@ -8,6 +8,7 @@
#include <experimental/coroutine>
#include <type_traits>
#include <utility>
#include <exception>
namespace cppcoro
{
@ -49,11 +50,13 @@ namespace cppcoro
void unhandled_exception()
{
std::rethrow_exception(std::current_exception());
m_value = nullptr;
m_exception = std::current_exception();
}
void return_void()
{
m_value = nullptr;
}
reference_type value() const noexcept
@ -65,9 +68,18 @@ namespace cppcoro
template<typename U>
std::experimental::suspend_never await_transform(U&& value) = delete;
void rethrow_if_exception()
{
if (m_exception)
{
std::rethrow_exception(m_exception);
}
}
private:
pointer_type m_value;
std::exception_ptr m_exception;
};
@ -108,7 +120,7 @@ namespace cppcoro
m_coroutine.resume();
if (m_coroutine.done())
{
m_coroutine = nullptr;
std::exchange(m_coroutine, {}).promise().rethrow_if_exception();
}
return *this;
@ -179,6 +191,8 @@ namespace cppcoro
{
return iterator{ m_coroutine };
}
m_coroutine.promise().rethrow_if_exception();
}
return iterator{ nullptr };

View File

@ -0,0 +1,26 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (c) Lewis Baker
// Licenced under MIT license. See LICENSE.txt for details.
///////////////////////////////////////////////////////////////////////////////
#ifndef CPPCORO_IS_AWAITABLE_HPP_INCLUDED
#define CPPCORO_IS_AWAITABLE_HPP_INCLUDED
#include <cppcoro/detail/get_awaiter.hpp>
#include <type_traits>
namespace cppcoro
{
template<typename T, typename = std::void_t<>>
struct is_awaitable : std::false_type {};
template<typename T>
struct is_awaitable<T, std::void_t<decltype(cppcoro::detail::get_awaiter(std::declval<T>()))>>
: std::true_type
{};
template<typename T>
constexpr bool is_awaitable_v = is_awaitable<T>::value;
}
#endif

View File

@ -6,8 +6,12 @@
#define CPPCORO_RESUME_ON_HPP_INCLUDED
#include <cppcoro/task.hpp>
#include <cppcoro/shared_task.hpp>
#include <cppcoro/async_generator.hpp>
#include <cppcoro/awaitable_traits.hpp>
#include <cppcoro/detail/get_awaiter.hpp>
#include <exception>
#include <type_traits>
namespace cppcoro
{
@ -33,20 +37,81 @@ namespace cppcoro
return resume_on(transform.scheduler, std::forward<T>(value));
}
template<typename SCHEDULER, typename T>
task<T> resume_on(SCHEDULER& scheduler, task<T> task)
template<
typename SCHEDULER,
typename AWAITABLE,
typename AWAIT_RESULT = detail::remove_rvalue_reference_t<typename awaitable_traits<AWAITABLE>::await_result_t>,
std::enable_if_t<!std::is_void_v<AWAIT_RESULT>, int> = 0>
auto resume_on(SCHEDULER& scheduler, AWAITABLE awaitable)
-> task<AWAIT_RESULT>
{
co_await task.when_ready();
co_await scheduler.schedule();
co_return co_await std::move(task);
bool rescheduled = false;
std::exception_ptr ex;
try
{
// We manually get the awaiter here so that we can keep
// it alive across the call to `scheduler.schedule()`
// just in case the result is a reference to a value
// in the awaiter that would otherwise be a temporary
// and destructed before the value could be returned.
auto&& awaiter = detail::get_awaiter(static_cast<AWAITABLE&&>(awaitable));
auto&& result = co_await static_cast<decltype(awaiter)>(awaiter);
// Flag as rescheduled before scheduling in case it is the
// schedule() operation that throws an exception as we don't
// want to attempt to schedule twice if scheduling fails.
rescheduled = true;
co_await scheduler.schedule();
co_return static_cast<decltype(result)>(result);
}
catch (...)
{
ex = std::current_exception();
}
// We still want to resume on the scheduler even in the presence
// of an exception.
if (!rescheduled)
{
co_await scheduler.schedule();
}
std::rethrow_exception(ex);
}
template<typename SCHEDULER, typename T>
task<T> resume_on(SCHEDULER& scheduler, shared_task<T> task)
template<
typename SCHEDULER,
typename AWAITABLE,
typename AWAIT_RESULT = detail::remove_rvalue_reference_t<typename awaitable_traits<AWAITABLE>::await_result_t>,
std::enable_if_t<std::is_void_v<AWAIT_RESULT>, int> = 0>
auto resume_on(SCHEDULER& scheduler, AWAITABLE awaitable)
-> task<>
{
co_await task.when_ready();
std::exception_ptr ex;
try
{
co_await static_cast<AWAITABLE&&>(awaitable);
}
catch (...)
{
ex = std::current_exception();
}
// NOTE: We're assuming that `schedule()` operation is noexcept
// here. If it were to throw what would we do if 'ex' was non-null?
// Presumably we'd treat it the same as throwing an exception while
// unwinding and call std::terminate()?
co_await scheduler.schedule();
co_return co_await std::move(task);
if (ex)
{
std::rethrow_exception(ex);
}
}
template<typename SCHEDULER, typename T>

View File

@ -8,6 +8,9 @@
#include <cppcoro/task.hpp>
#include <cppcoro/shared_task.hpp>
#include <cppcoro/async_generator.hpp>
#include <cppcoro/awaitable_traits.hpp>
#include <cppcoro/detail/remove_rvalue_reference.hpp>
namespace cppcoro
{
@ -33,11 +36,12 @@ namespace cppcoro
return schedule_on(transform.scheduler, std::forward<T>(value));
}
template<typename T, typename SCHEDULER>
task<T> schedule_on(SCHEDULER& scheduler, task<T> task)
template<typename SCHEDULER, typename AWAITABLE>
auto schedule_on(SCHEDULER& scheduler, AWAITABLE awaitable)
-> task<detail::remove_rvalue_reference_t<typename awaitable_traits<AWAITABLE>::await_result_t>>
{
co_await scheduler.schedule();
co_return co_await std::move(task);
co_return co_await std::move(awaitable);
}
template<typename T, typename SCHEDULER>

View File

@ -6,10 +6,11 @@
#define CPPCORO_SHARED_LAZY_TASK_HPP_INCLUDED
#include <cppcoro/config.hpp>
#include <cppcoro/awaitable_traits.hpp>
#include <cppcoro/broken_promise.hpp>
#include <cppcoro/task.hpp>
#include <cppcoro/detail/continuation.hpp>
#include <cppcoro/detail/remove_rvalue_reference.hpp>
#include <atomic>
#include <exception>
@ -27,77 +28,63 @@ namespace cppcoro
{
struct shared_task_waiter
{
continuation m_continuation;
std::experimental::coroutine_handle<> m_continuation;
shared_task_waiter* m_next;
};
class shared_task_promise_base
{
friend struct final_awaiter;
struct final_awaiter
{
bool await_ready() const noexcept { return false; }
template<typename PROMISE>
void await_suspend(std::experimental::coroutine_handle<PROMISE> h) noexcept
{
shared_task_promise_base& promise = h.promise();
// Exchange operation needs to be 'release' so that subsequent awaiters have
// visibility of the result. Also needs to be 'acquire' so we have visibility
// of writes to the waiters list.
void* const valueReadyValue = &promise;
void* waiters = promise.m_waiters.exchange(valueReadyValue, std::memory_order_acq_rel);
if (waiters != nullptr)
{
shared_task_waiter* waiter = static_cast<shared_task_waiter*>(waiters);
while (waiter->m_next != nullptr)
{
// Read the m_next pointer before resuming the coroutine
// since resuming the coroutine may destroy the shared_task_waiter value.
auto* next = waiter->m_next;
waiter->m_continuation.resume();
waiter = next;
}
// Resume last waiter in tail position to allow it to potentially
// be compiled as a tail-call.
waiter->m_continuation.resume();
}
}
void await_resume() noexcept {}
};
public:
shared_task_promise_base() noexcept
: m_waiters(&this->m_waiters)
, m_refCount(2)
, m_refCount(1)
, m_exception(nullptr)
{}
auto initial_suspend() noexcept
{
return std::experimental::suspend_always{};
}
auto final_suspend() noexcept
{
struct awaitable
{
shared_task_promise_base& m_promise;
awaitable(shared_task_promise_base& promise) noexcept
: m_promise(promise)
{}
bool await_ready() const noexcept
{
return m_promise.m_refCount.load(std::memory_order_acquire) == 1;
}
bool await_suspend(std::experimental::coroutine_handle<>) noexcept
{
return m_promise.m_refCount.fetch_sub(1, std::memory_order_acq_rel) > 1;
}
void await_resume() noexcept
{}
};
// Exchange operation needs to be 'release' so that subsequent awaiters have
// visibility of the result. Also needs to be 'acquire' so we have visibility
// of writes to the waiters list.
void* const valueReadyValue = this;
void* waiters = m_waiters.exchange(valueReadyValue, std::memory_order_acq_rel);
if (waiters != nullptr)
{
shared_task_waiter* waiter = static_cast<shared_task_waiter*>(waiters);
do
{
// Read the m_next pointer before resuming the coroutine
// since resuming the coroutine may destroy the shared_task_waiter value.
auto* next = waiter->m_next;
waiter->m_continuation.resume();
waiter = next;
} while (waiter != nullptr);
}
return awaitable{ *this };
}
std::experimental::suspend_always initial_suspend() noexcept { return {}; }
final_awaiter final_suspend() noexcept { return {}; }
void unhandled_exception() noexcept
{
// No point capturing exception if no more references to the task.
if (m_refCount.load(std::memory_order_relaxed) > 1)
{
m_exception = std::current_exception();
}
m_exception = std::current_exception();
}
bool is_ready() const noexcept
@ -119,7 +106,7 @@ namespace cppcoro
/// call destroy() on the coroutine handle.
bool try_detach() noexcept
{
return m_refCount.fetch_sub(1, std::memory_order_acq_rel) > 1;
return m_refCount.fetch_sub(1, std::memory_order_acq_rel) != 1;
}
/// Try to enqueue a waiter to the list of waiters.
@ -156,7 +143,10 @@ namespace cppcoro
// Start the coroutine if not already started.
void* oldWaiters = m_waiters.load(std::memory_order_acquire);
if (oldWaiters == notStartedValue &&
m_waiters.compare_exchange_strong(oldWaiters, startedNoWaitersValue, std::memory_order_relaxed))
m_waiters.compare_exchange_strong(
oldWaiters,
startedNoWaitersValue,
std::memory_order_relaxed))
{
// Start the task executing.
coroutine.resume();
@ -328,7 +318,7 @@ namespace cppcoro
bool await_suspend(std::experimental::coroutine_handle<> awaiter) noexcept
{
m_waiter.m_continuation = detail::continuation{ awaiter };
m_waiter.m_continuation = awaiter;
return m_coroutine.promise().try_await(&m_waiter, m_coroutine);
}
};
@ -446,37 +436,6 @@ namespace cppcoro
return awaitable{ m_coroutine };
}
auto get_starter() const noexcept
{
struct starter
{
public:
explicit starter(std::experimental::coroutine_handle<promise_type> coroutine)
: m_coroutine(coroutine)
{}
void start(detail::continuation c) noexcept
{
m_waiter.m_continuation = c;
if (!m_coroutine ||
m_coroutine.promise().is_ready() ||
!m_coroutine.promise().try_await(&m_waiter, m_coroutine))
{
// Task completed synchronously, resume immediately.
c.resume();
}
}
private:
std::experimental::coroutine_handle<promise_type> m_coroutine;
detail::shared_task_waiter m_waiter;
};
return starter{ m_coroutine };
}
private:
template<typename U>
@ -541,36 +500,11 @@ namespace cppcoro
}
}
template<typename T>
shared_task<T> make_shared_task(task<T> t)
template<typename AWAITABLE>
auto make_shared_task(AWAITABLE awaitable)
-> shared_task<detail::remove_rvalue_reference_t<typename awaitable_traits<AWAITABLE>::await_result_t>>
{
co_return co_await std::move(t);
}
#if defined(_MSC_VER) && _MSC_FULL_VER <= 191025019 || CPPCORO_COMPILER_CLANG
// HACK: Workaround for broken MSVC that doesn't execute <expr> in 'co_return <expr>;'.
inline shared_task<void> make_shared_task(task<void> t)
{
co_await t;
}
#endif
// Note: We yield a task<> when applying fmap() operator to a shared_task<> since
// it's not necessarily the case that because the source task was shared that the
// result will be used in a shared context. So we choose to return a task<> which
// generally has less overhead than a shared_task<>.
template<typename FUNC, typename T>
task<std::result_of_t<FUNC&&(T&)>> fmap(FUNC func, shared_task<T> task)
{
co_return std::invoke(std::move(func), co_await std::move(task));
}
template<typename FUNC>
task<std::result_of_t<FUNC&&()>> fmap(FUNC func, shared_task<void> task)
{
co_await task;
co_return std::invoke(std::move(func));
co_return co_await static_cast<AWAITABLE&&>(awaitable);
}
}

View File

@ -0,0 +1,116 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (c) Lewis Baker
// Licenced under MIT license. See LICENSE.txt for details.
///////////////////////////////////////////////////////////////////////////////
#ifndef CPPCORO_STATIC_THREAD_POOL_HPP_INCLUDED
#define CPPCORO_STATIC_THREAD_POOL_HPP_INCLUDED
#include <atomic>
#include <cstdint>
#include <memory>
#include <thread>
#include <vector>
#include <mutex>
#include <experimental/coroutine>
namespace cppcoro
{
class static_thread_pool
{
public:
/// Initialise to a number of threads equal to the number of cores
/// on the current machine.
static_thread_pool();
/// Construct a thread pool with the specified number of threads.
///
/// \param threadCount
/// The number of threads in the pool that will be used to execute work.
explicit static_thread_pool(std::uint32_t threadCount);
~static_thread_pool();
class schedule_operation
{
public:
schedule_operation(static_thread_pool* tp) noexcept : m_threadPool(tp) {}
bool await_ready() noexcept { return false; }
void await_suspend(std::experimental::coroutine_handle<> awaitingCoroutine) noexcept;
void await_resume() noexcept {}
private:
friend class static_thread_pool;
static_thread_pool* m_threadPool;
std::experimental::coroutine_handle<> m_awaitingCoroutine;
schedule_operation* m_next;
};
std::uint32_t thread_count() const noexcept { return m_threadCount; }
[[nodiscard]]
schedule_operation schedule() noexcept { return schedule_operation{ this }; }
private:
friend class schedule_operation;
void run_worker_thread(std::uint32_t threadIndex) noexcept;
void shutdown();
void schedule_impl(schedule_operation* operation) noexcept;
void remote_enqueue(schedule_operation* operation) noexcept;
bool has_any_queued_work_for(std::uint32_t threadIndex) noexcept;
bool approx_has_any_queued_work_for(std::uint32_t threadIndex) const noexcept;
bool is_shutdown_requested() const noexcept;
void notify_intent_to_sleep(std::uint32_t threadIndex) noexcept;
void try_clear_intent_to_sleep(std::uint32_t threadIndex) noexcept;
schedule_operation* try_global_dequeue() noexcept;
/// Try to steal a task from another thread.
///
/// \return
/// A pointer to the operation that was stolen if one could be stolen
/// from another thread. Otherwise returns nullptr if none of the other
/// threads had any tasks that could be stolen.
schedule_operation* try_steal_from_other_thread(std::uint32_t thisThreadIndex) noexcept;
void wake_one_thread() noexcept;
class thread_state;
static thread_local thread_state* s_currentState;
static thread_local static_thread_pool* s_currentThreadPool;
const std::uint32_t m_threadCount;
const std::unique_ptr<thread_state[]> m_threadStates;
std::vector<std::thread> m_threads;
std::atomic<bool> m_stopRequested;
std::mutex m_globalQueueMutex;
std::atomic<schedule_operation*> m_globalQueueHead;
//alignas(std::hardware_destructive_interference_size)
std::atomic<schedule_operation*> m_globalQueueTail;
//alignas(std::hardware_destructive_interference_size)
std::atomic<std::uint32_t> m_sleepingThreadCount;
};
}
#endif

View File

@ -6,32 +6,31 @@
#define CPPCORO_SYNC_WAIT_HPP_INCLUDED
#include <cppcoro/detail/lightweight_manual_reset_event.hpp>
#include <cppcoro/detail/continuation.hpp>
#include <cppcoro/detail/sync_wait_task.hpp>
#include <cppcoro/awaitable_traits.hpp>
#include <cstdint>
#include <atomic>
namespace cppcoro
{
template<typename TASK>
decltype(auto) sync_wait(TASK&& task)
template<typename AWAITABLE>
auto sync_wait(AWAITABLE&& awaitable)
-> typename cppcoro::awaitable_traits<AWAITABLE&&>::await_result_t
{
if (!task.is_ready())
{
detail::lightweight_manual_reset_event event;
auto callback = [](void* state)
{
static_cast<detail::lightweight_manual_reset_event*>(state)->set();
};
auto starter = task.get_starter();
starter.start(cppcoro::detail::continuation{ callback, &event });
event.wait();
}
return std::forward<TASK>(task).operator co_await().await_resume();
#if CPPCORO_COMPILER_MSVC
// HACK: Need to explicitly specify template argument to make_sync_wait_task
// here to work around a bug in MSVC when passing parameters by universal
// reference to a coroutine which causes the compiler to think it needs to
// 'move' parameters passed by rvalue reference.
auto task = detail::make_sync_wait_task<AWAITABLE>(awaitable);
#else
auto task = detail::make_sync_wait_task(std::forward<AWAITABLE>(awaitable));
#endif
detail::lightweight_manual_reset_event event;
task.start(event);
event.wait();
return task.result();
}
}

View File

@ -6,10 +6,10 @@
#define CPPCORO_TASK_HPP_INCLUDED
#include <cppcoro/config.hpp>
#include <cppcoro/awaitable_traits.hpp>
#include <cppcoro/broken_promise.hpp>
#include <cppcoro/fmap.hpp>
#include <cppcoro/detail/continuation.hpp>
#include <cppcoro/detail/remove_rvalue_reference.hpp>
#include <atomic>
#include <exception>
@ -74,9 +74,9 @@ namespace cppcoro
m_exception = std::current_exception();
}
bool try_set_continuation(continuation c)
bool try_set_continuation(std::experimental::coroutine_handle<> continuation)
{
m_continuation = c;
m_continuation = continuation;
return !m_state.exchange(true, std::memory_order_acq_rel);
}
@ -102,7 +102,7 @@ namespace cppcoro
private:
continuation m_continuation;
std::experimental::coroutine_handle<> m_continuation;
std::exception_ptr m_exception;
// Initially false. Set to true when either a continuation is registered
@ -144,7 +144,18 @@ namespace cppcoro
return *reinterpret_cast<T*>(&m_valueStorage);
}
T&& result() &&
// HACK: Need to have co_await of task<int> return prvalue rather than
// rvalue-reference to work around an issue with MSVC where returning
// rvalue reference of a fundamental type from await_resume() will
// cause the value to be copied to a temporary. This breaks the
// sync_wait() implementation.
// See https://github.com/lewissbaker/cppcoro/issues/40#issuecomment-326864107
using rvalue_type = std::conditional_t<
std::is_arithmetic_v<T> || std::is_pointer_v<T>,
T,
T&&>;
rvalue_type result() &&
{
rethrow_if_unhandled_exception();
return std::move(*reinterpret_cast<T*>(&m_valueStorage));
@ -246,7 +257,7 @@ namespace cppcoro
return !m_coroutine || m_coroutine.done();
}
bool await_suspend(std::experimental::coroutine_handle<> awaiter) noexcept
bool await_suspend(std::experimental::coroutine_handle<> awaitingCoroutine) noexcept
{
// NOTE: We are using the bool-returning version of await_suspend() here
// to work around a potential stack-overflow issue if a coroutine
@ -265,7 +276,7 @@ namespace cppcoro
// as this will provide ability to suspend the awaiting coroutine and
// resume another coroutine with a guaranteed tail-call to resume().
m_coroutine.resume();
return m_coroutine.promise().try_set_continuation(detail::continuation{ awaiter });
return m_coroutine.promise().try_set_continuation(awaitingCoroutine);
}
};
@ -378,39 +389,6 @@ namespace cppcoro
return awaitable{ m_coroutine };
}
// Internal helper method for when_all() implementation.
auto get_starter() const noexcept
{
class starter
{
public:
starter(std::experimental::coroutine_handle<promise_type> coroutine) noexcept
: m_coroutine(coroutine)
{}
void start(detail::continuation continuation) noexcept
{
if (m_coroutine && !m_coroutine.done())
{
m_coroutine.resume();
if (m_coroutine.promise().try_set_continuation(continuation))
{
return;
}
}
continuation.resume();
}
private:
std::experimental::coroutine_handle<promise_type> m_coroutine;
};
return starter{ m_coroutine };
}
private:
std::experimental::coroutine_handle<promise_type> m_coroutine;
@ -437,29 +415,11 @@ namespace cppcoro
}
}
// fmap() overloads for task<T>
template<typename FUNC, typename T>
task<std::result_of_t<FUNC&&(T&&)>> fmap(FUNC func, task<T> t)
template<typename AWAITABLE>
auto make_task(AWAITABLE awaitable)
-> task<detail::remove_rvalue_reference_t<typename awaitable_traits<AWAITABLE>::await_result_t>>
{
static_assert(
!std::is_reference_v<FUNC>,
"Passing by reference to task<T> coroutine is unsafe. "
"Use std::ref or std::cref to explicitly pass by reference.");
co_return std::invoke(std::move(func), co_await std::move(t));
}
template<typename FUNC>
task<std::result_of_t<FUNC&&()>> fmap(FUNC func, task<> t)
{
static_assert(
!std::is_reference_v<FUNC>,
"Passing by reference to task<T> coroutine is unsafe. "
"Use std::ref or std::cref to explicitly pass by reference.");
co_await t;
co_return std::invoke(std::move(func));
co_return co_await static_cast<AWAITABLE&&>(awaitable);
}
}

View File

@ -5,164 +5,86 @@
#ifndef CPPCORO_WHEN_ALL_HPP_INCLUDED
#define CPPCORO_WHEN_ALL_HPP_INCLUDED
#include <cppcoro/task.hpp>
#include <cppcoro/shared_task.hpp>
#include <cppcoro/when_all_ready.hpp>
#include <cppcoro/awaitable_traits.hpp>
#include <cppcoro/is_awaitable.hpp>
#include <cppcoro/fmap.hpp>
#include <cppcoro/detail/unwrap_reference.hpp>
#include <cppcoro/detail/when_all_awaitable.hpp>
#include <tuple>
#include <functional>
#include <utility>
#include <vector>
#include <type_traits>
#include <cassert>
namespace cppcoro
{
//////////
// Variadic when_all()
namespace detail
template<
typename... AWAITABLES,
std::enable_if_t<
std::conjunction_v<is_awaitable<detail::unwrap_reference_t<std::remove_reference_t<AWAITABLES>>>...>,
int> = 0>
[[nodiscard]] auto when_all(AWAITABLES&&... awaitables)
{
template<typename T>
T&& move_if_not_reference_wrapper(T& value)
return fmap([](auto&& taskTuple)
{
return std::move(value);
}
template<typename T>
T& move_if_not_reference_wrapper(std::reference_wrapper<T>& value)
{
return value.get();
}
}
inline task<std::tuple<>> when_all()
{
co_return std::tuple<>{};
}
template<typename TASK>
task<std::tuple<typename detail::unwrap_reference_t<TASK>::value_type>> when_all(TASK task)
{
// Specialisation for one task parameter that avoids use of atomics as no synchronisation
// is required.
co_return std::tuple<typename detail::unwrap_reference_t<TASK>::value_type>{ co_await std::move(task) };
}
template<typename... TASKS>
[[nodiscard]]
task<std::tuple<typename detail::unwrap_reference_t<TASKS>::value_type...>> when_all(TASKS... tasks)
{
detail::when_all_awaitable awaitable{ sizeof...(TASKS) };
const std::initializer_list<int> dummy = {
(std::ref(tasks).get().get_starter().start(awaitable.get_continuation()), 0)...,
(co_await awaitable, 0)
};
co_return std::tuple<typename detail::unwrap_reference_t<TASKS>::value_type...>{
co_await detail::move_if_not_reference_wrapper(tasks)...
};
return std::apply([](auto&&... tasks) {
return std::make_tuple(static_cast<decltype(tasks)>(tasks).non_void_result()...);
}, static_cast<decltype(taskTuple)>(taskTuple));
}, when_all_ready(std::forward<AWAITABLES>(awaitables)...));
}
//////////
// when_all() with vector of task
// when_all() with vector of awaitable
template<
typename AWAITABLE,
typename RESULT = typename awaitable_traits<detail::unwrap_reference_t<AWAITABLE>>::await_result_t,
std::enable_if_t<std::is_void_v<RESULT>, int> = 0>
[[nodiscard]]
inline task<> when_all(std::vector<task<>> tasks)
auto when_all(std::vector<AWAITABLE> awaitables)
{
tasks = co_await when_all_ready(std::move(tasks));
// Now await each task so that any exceptions are rethrown.
for (auto& t : tasks)
{
co_await std::move(t);
}
return fmap([](auto&& taskVector) {
for (auto& task : taskVector)
{
task.result();
}
}, when_all_ready(std::move(awaitables)));
}
template<typename T>
template<
typename AWAITABLE,
typename RESULT = typename awaitable_traits<detail::unwrap_reference_t<AWAITABLE>>::await_result_t,
std::enable_if_t<!std::is_void_v<RESULT>, int> = 0>
[[nodiscard]]
task<std::vector<T>> when_all(std::vector<task<T>> tasks)
auto when_all(std::vector<AWAITABLE> awaitables)
{
tasks = co_await when_all_ready(std::move(tasks));
using result_t = std::conditional_t<
std::is_lvalue_reference_v<RESULT>,
std::reference_wrapper<std::remove_reference_t<RESULT>>,
std::remove_reference_t<RESULT>>;
std::vector<T> results;
results.reserve(tasks.size());
for (auto& t : tasks)
{
results.emplace_back(co_await std::move(t));
}
co_return std::move(results);
}
template<typename T>
[[nodiscard]]
task<std::vector<std::reference_wrapper<T>>> when_all(std::vector<task<T&>> tasks)
{
tasks = co_await when_all_ready(std::move(tasks));
std::vector<std::reference_wrapper<T>> results;
results.reserve(tasks.size());
for (auto& t : tasks)
{
results.emplace_back(co_await std::move(t));
}
co_return std::move(results);
}
//////////
// when_all() with vector of shared_task
[[nodiscard]]
inline task<> when_all(std::vector<shared_task<>> tasks)
{
tasks = co_await when_all_ready(std::move(tasks));
// Now await each task so that any exceptions are rethrown.
for (auto& t : tasks)
{
co_await std::move(t);
}
}
template<typename T>
[[nodiscard]]
task<std::vector<T>> when_all(std::vector<shared_task<T>> tasks)
{
tasks = co_await when_all_ready(std::move(tasks));
std::vector<T> results;
results.reserve(tasks.size());
for (auto& t : tasks)
{
results.emplace_back(co_await std::move(t));
}
co_return std::move(results);
}
template<typename T>
[[nodiscard]]
task<std::vector<std::reference_wrapper<T>>> when_all(std::vector<shared_task<T&>> tasks)
{
tasks = co_await when_all_ready(std::move(tasks));
std::vector<std::reference_wrapper<T>> results;
results.reserve(tasks.size());
for (auto& t : tasks)
{
results.emplace_back(co_await std::move(t));
}
co_return std::move(results);
return fmap([](auto&& taskVector) {
std::vector<result_t> results;
results.reserve(taskVector.size());
for (auto& task : taskVector)
{
if constexpr (std::is_rvalue_reference_v<decltype(taskVector)>)
{
results.emplace_back(std::move(task).result());
}
else
{
results.emplace_back(task.result());
}
}
return results;
}, when_all_ready(std::move(awaitables)));
}
}

View File

@ -5,101 +5,51 @@
#ifndef CPPCORO_WHEN_ALL_READY_HPP_INCLUDED
#define CPPCORO_WHEN_ALL_READY_HPP_INCLUDED
#include <cppcoro/task.hpp>
#include <cppcoro/shared_task.hpp>
#include <cppcoro/config.hpp>
#include <cppcoro/awaitable_traits.hpp>
#include <cppcoro/is_awaitable.hpp>
#include <cppcoro/detail/when_all_awaitable.hpp>
#include <cppcoro/detail/when_all_ready_awaitable.hpp>
#include <cppcoro/detail/when_all_task.hpp>
#include <cppcoro/detail/unwrap_reference.hpp>
#include <tuple>
#include <functional>
#include <utility>
#include <vector>
#include <type_traits>
namespace cppcoro
{
template<
typename... AWAITABLES,
std::enable_if_t<std::conjunction_v<
is_awaitable<detail::unwrap_reference_t<std::remove_reference_t<AWAITABLES>>>...>, int> = 0>
[[nodiscard]]
inline task<std::tuple<>> when_all_ready()
CPPCORO_FORCE_INLINE auto when_all_ready(AWAITABLES&&... awaitables)
{
co_return std::tuple<>{};
return detail::when_all_ready_awaitable<std::tuple<detail::when_all_task<
typename awaitable_traits<detail::unwrap_reference_t<std::remove_reference_t<AWAITABLES>>>::await_result_t>...>>(
std::make_tuple(detail::make_when_all_task(std::forward<AWAITABLES>(awaitables))...));
}
template<typename TASK>
[[nodiscard]]
task<std::tuple<TASK>> when_all_ready(TASK task)
// TODO: Generalise this from vector<AWAITABLE> to arbitrary sequence of awaitable.
template<
typename AWAITABLE,
typename RESULT = typename awaitable_traits<detail::unwrap_reference_t<AWAITABLE>>::await_result_t>
[[nodiscard]] auto when_all_ready(std::vector<AWAITABLE> awaitables)
{
// Slightly more efficient implementation for single task case that avoids
// using atomics that are otherwise required to coordinate completion of
// multiple tasks in general version below.
co_await std::ref(task).get().when_ready();
co_return std::tuple<TASK>{ std::move(task) };
}
std::vector<detail::when_all_task<RESULT>> tasks;
template<typename... TASKS>
[[nodiscard]]
task<std::tuple<TASKS...>> when_all_ready(TASKS... tasks)
{
detail::when_all_awaitable awaitable{ sizeof...(TASKS) };
tasks.reserve(awaitables.size());
// Use std::initializer_list trick here to force sequential ordering
// of evaluation of the arguments so that tasks are deterministically
// started in the order they are passed-in and the 'co_await' is
// evaluated last but before all of the temporary 'starter' objects
// are destructed.
const std::initializer_list<int> dummy = {
(std::ref(tasks).get().get_starter().start(awaitable.get_continuation()), 0)...,
(co_await awaitable, 0)
};
co_return std::tuple<TASKS...>{ std::move(tasks)... };
}
template<typename T>
[[nodiscard]]
task<std::vector<task<T>>> when_all_ready(std::vector<task<T>> tasks)
{
if (!tasks.empty())
for (auto& awaitable : awaitables)
{
detail::when_all_awaitable awaitable{ tasks.size() };
for (auto& t : tasks)
{
// NOTE: We are relying on the fact that the 'starter' type returned by get_starter()
// is not required to live until the task completes.
t.get_starter().start(awaitable.get_continuation());
}
co_await awaitable;
tasks.emplace_back(detail::make_when_all_task(std::move(awaitable)));
}
co_return std::move(tasks);
}
template<typename T>
[[nodiscard]]
task<std::vector<shared_task<T>>> when_all_ready(std::vector<shared_task<T>> tasks)
{
if (!tasks.empty())
{
detail::when_all_awaitable awaitable{ tasks.size() };
using starter_t = decltype(std::declval<shared_task<T>>().get_starter());
std::vector<starter_t> starters;
// Reserve the desired number of elements to ensure elements aren't moved as we
// add elements to the vector in the loop below as that would leave dangling
// pointers registered as continuations for the tasks.
starters.reserve(tasks.size());
for (auto& t : tasks)
{
starters.emplace_back(t.get_starter()).start(awaitable.get_continuation());
}
co_await awaitable;
}
co_return std::move(tasks);
return detail::when_all_ready_awaitable<std::vector<detail::when_all_task<RESULT>>>(
std::move(tasks));
}
}

97
lib/auto_reset_event.cpp Normal file
View File

@ -0,0 +1,97 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (c) Lewis Baker
// Licenced under MIT license. See LICENSE.txt for details.
///////////////////////////////////////////////////////////////////////////////
#include "auto_reset_event.hpp"
#if CPPCORO_OS_WINNT
# define WIN32_LEAN_AND_MEAN
# include <Windows.h>
# include <system_error>
#endif
namespace cppcoro
{
#if CPPCORO_OS_WINNT
auto_reset_event::auto_reset_event(bool initiallySet)
: m_event(::CreateEventW(NULL, FALSE, initiallySet ? TRUE : FALSE, NULL))
{
if (m_event.handle() == NULL)
{
DWORD errorCode = ::GetLastError();
throw std::system_error
{
static_cast<int>(errorCode),
std::system_category(),
"auto_reset_event: CreateEvent failed"
};
}
}
auto_reset_event::~auto_reset_event()
{
}
void auto_reset_event::set()
{
BOOL ok =::SetEvent(m_event.handle());
if (!ok)
{
DWORD errorCode = ::GetLastError();
throw std::system_error
{
static_cast<int>(errorCode),
std::system_category(),
"auto_reset_event: SetEvent failed"
};
}
}
void auto_reset_event::wait()
{
DWORD result = ::WaitForSingleObjectEx(m_event.handle(), INFINITE, FALSE);
if (result != WAIT_OBJECT_0)
{
DWORD errorCode = ::GetLastError();
throw std::system_error
{
static_cast<int>(errorCode),
std::system_category(),
"auto_reset_event: WaitForSingleObjectEx failed"
};
}
}
#else
auto_reset_event::auto_reset_event(bool initiallySet)
: m_isSet(initiallySet)
{}
auto_reset_event::~auto_reset_event()
{}
void auto_reset_event::set()
{
std::unique_lock lock{ m_mutex };
if (!m_isSet)
{
m_isSet = true;
m_cv.notify_one();
}
}
void auto_reset_event::wait()
{
std::unique_lock lock{ m_mutex };
while (!m_isSet)
{
m_cv.wait(lock);
}
m_isSet = false;
}
#endif
}

44
lib/auto_reset_event.hpp Normal file
View File

@ -0,0 +1,44 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (c) Lewis Baker
// Licenced under MIT license. See LICENSE.txt for details.
///////////////////////////////////////////////////////////////////////////////
#ifndef CPPCORO_AUTO_RESET_EVENT_HPP_INCLUDED
#define CPPCORO_AUTO_RESET_EVENT_HPP_INCLUDED
#include <cppcoro/config.hpp>
#if CPPCORO_OS_WINNT
# include <cppcoro/detail/win32.hpp>
#else
# include <mutex>
# include <condition_variable>
#endif
namespace cppcoro
{
class auto_reset_event
{
public:
auto_reset_event(bool initiallySet = false);
~auto_reset_event();
void set();
void wait();
private:
#if CPPCORO_OS_WINNT
cppcoro::detail::win32::safe_handle m_event;
#else
std::mutex m_mutex;
std::condition_variable m_cv;
bool m_isSet;
#endif
};
}
#endif

View File

@ -8,6 +8,8 @@ import cake.path
from cake.tools import compiler, script, env, project, variant
includes = cake.path.join(env.expand('${CPPCORO}'), 'include', 'cppcoro', [
'awaitable_traits.hpp',
'is_awaitable.hpp',
'async_auto_reset_event.hpp',
'async_manual_reset_event.hpp',
'async_generator.hpp',
@ -45,6 +47,7 @@ includes = cake.path.join(env.expand('${CPPCORO}'), 'include', 'cppcoro', [
'read_write_file.hpp',
'file_read_operation.hpp',
'file_write_operation.hpp',
'static_thread_pool.hpp',
])
netIncludes = cake.path.join(env.expand('${CPPCORO}'), 'include', 'cppcoro', 'net', [
@ -58,8 +61,14 @@ netIncludes = cake.path.join(env.expand('${CPPCORO}'), 'include', 'cppcoro', 'ne
])
detailIncludes = cake.path.join(env.expand('${CPPCORO}'), 'include', 'cppcoro', 'detail', [
'continuation.hpp',
'when_all_awaitable.hpp',
'void_value.hpp',
'when_all_ready_awaitable.hpp',
'when_all_counter.hpp',
'when_all_task.hpp',
'get_awaiter.hpp',
'is_awaiter.hpp',
'any.hpp',
'sync_wait_task.hpp',
'unwrap_reference.hpp',
'lightweight_manual_reset_event.hpp',
])
@ -67,6 +76,9 @@ detailIncludes = cake.path.join(env.expand('${CPPCORO}'), 'include', 'cppcoro',
privateHeaders = script.cwd([
'cancellation_state.hpp',
'socket_helpers.hpp',
'auto_reset_event.hpp',
'spin_wait.hpp',
'spin_mutex.hpp',
])
sources = script.cwd([
@ -84,6 +96,10 @@ sources = script.cwd([
'ipv4_endpoint.cpp',
'ipv6_address.cpp',
'ipv6_endpoint.cpp',
'static_thread_pool.cpp',
'auto_reset_event.cpp',
'spin_wait.cpp',
'spin_mutex.cpp',
])
extras = script.cwd([

View File

@ -10,7 +10,9 @@
#include <cassert>
#if CPPCORO_OS_WINNT
# define WIN32_LEAN_AND_MEAN
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <Windows.h>
#endif

View File

@ -6,7 +6,9 @@
#include <cppcoro/file_read_operation.hpp>
#if CPPCORO_OS_WINNT
# define WIN32_LEAN_AND_MEAN
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <Windows.h>
bool cppcoro::file_read_operation_impl::try_start(

View File

@ -6,7 +6,9 @@
#include <cppcoro/file_write_operation.hpp>
#if CPPCORO_OS_WINNT
# define WIN32_LEAN_AND_MEAN
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <Windows.h>
bool cppcoro::file_write_operation_impl::try_start(

View File

@ -12,7 +12,12 @@
#include <thread>
#if CPPCORO_OS_WINNT
# define NOMINMAX
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# ifndef NOMINMAX
# define NOMINMAX
# endif
# include <WinSock2.h>
# include <WS2tcpip.h>
# include <MSWSock.h>

View File

@ -8,7 +8,9 @@
#include <system_error>
#if CPPCORO_OS_WINNT
# define WIN32_LEAN_AND_MEAN
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <Windows.h>
# if CPPCORO_OS_WINNT >= 0x0602

View File

@ -6,7 +6,9 @@
#include <cppcoro\read_only_file.hpp>
#if CPPCORO_OS_WINNT
# define WIN32_LEAN_AND_MEAN
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <Windows.h>
cppcoro::read_only_file cppcoro::read_only_file::open(

View File

@ -6,7 +6,9 @@
#include <cppcoro\read_write_file.hpp>
#if CPPCORO_OS_WINNT
# define WIN32_LEAN_AND_MEAN
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <Windows.h>
cppcoro::read_write_file cppcoro::read_write_file::open(

37
lib/spin_mutex.cpp Normal file
View File

@ -0,0 +1,37 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (c) Lewis Baker
// Licenced under MIT license. See LICENSE.txt for details.
///////////////////////////////////////////////////////////////////////////////
#include "spin_mutex.hpp"
#include "spin_wait.hpp"
namespace cppcoro
{
spin_mutex::spin_mutex() noexcept
: m_isLocked(false)
{
}
bool spin_mutex::try_lock() noexcept
{
return !m_isLocked.exchange(true, std::memory_order_acquire);
}
void spin_mutex::lock() noexcept
{
spin_wait wait;
while (!try_lock())
{
while (m_isLocked.load(std::memory_order_relaxed))
{
wait.spin_one();
}
}
}
void spin_mutex::unlock() noexcept
{
m_isLocked.store(false, std::memory_order_release);
}
}

47
lib/spin_mutex.hpp Normal file
View File

@ -0,0 +1,47 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (c) Lewis Baker
// Licenced under MIT license. See LICENSE.txt for details.
///////////////////////////////////////////////////////////////////////////////
#ifndef CPPCORO_SPIN_MUTEX_HPP_INCLUDED
#define CPPCORO_SPIN_MUTEX_HPP_INCLUDED
#include <atomic>
namespace cppcoro
{
class spin_mutex
{
public:
/// Initialise the mutex to the unlocked state.
spin_mutex() noexcept;
/// Attempt to lock the mutex without blocking
///
/// \return
/// true if the lock was acquired, false if the lock was already held
/// and could not be immediately acquired.
bool try_lock() noexcept;
/// Block the current thread until the lock is acquired.
///
/// This will busy-wait until it acquires the lock.
///
/// This has 'acquire' memory semantics and synchronises
/// with prior calls to unlock().
void lock() noexcept;
/// Release the lock.
///
/// This has 'release' memory semantics and synchronises with
/// lock() and try_lock().
void unlock() noexcept;
private:
std::atomic<bool> m_isLocked;
};
}
#endif

101
lib/spin_wait.cpp Normal file
View File

@ -0,0 +1,101 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (c) Lewis Baker
// Licenced under MIT license. See LICENSE.txt for details.
///////////////////////////////////////////////////////////////////////////////
#include "spin_wait.hpp"
#include <cppcoro/config.hpp>
#include <thread>
#if CPPCORO_OS_WINNT
# define WIN32_LEAN_AND_MEAN
# include <Windows.h>
#endif
namespace
{
namespace local
{
constexpr std::uint32_t yield_threshold = 10;
}
}
namespace cppcoro
{
spin_wait::spin_wait() noexcept
{
reset();
}
bool spin_wait::next_spin_will_yield() const noexcept
{
return m_count >= local::yield_threshold;
}
void spin_wait::reset() noexcept
{
static const std::uint32_t initialCount =
std::thread::hardware_concurrency() > 1 ? 0 : local::yield_threshold;
m_count = initialCount;
}
void spin_wait::spin_one() noexcept
{
#if CPPCORO_OS_WINNT
// Spin strategy taken from .NET System.SpinWait class.
// I assume the Microsoft guys knew what they're doing.
if (next_spin_will_yield())
{
// CPU-level pause
// Allow other hyper-threads to run while we busy-wait.
// Make each busy-spin exponentially longer
const std::uint32_t loopCount = 2u << m_count;
for (std::uint32_t i = 0; i < loopCount; ++i)
{
::YieldProcessor();
::YieldProcessor();
}
}
else
{
// We've already spun a number of iterations.
//
const auto yieldCount = m_count - local::yield_threshold;
if (yieldCount % 20 == 19)
{
// Yield remainder of time slice to another thread and
// don't schedule this thread for a little while.
::SleepEx(1, FALSE);
}
else if (yieldCount % 5 == 4)
{
// Yield remainder of time slice to another thread
// that is ready to run (possibly from another processor?).
::SleepEx(0, FALSE);
}
else
{
// Yield to another thread that is ready to run on the
// current processor.
::SwitchToThread();
}
}
#else
if (next_spin_will_yield())
{
std::this_thread::yield();
}
#endif
++m_count;
if (m_count == 0)
{
// Don't wrap around to zero as this would go back to
// busy-waiting.
m_count = local::yield_threshold;
}
}
}

31
lib/spin_wait.hpp Normal file
View File

@ -0,0 +1,31 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (c) Lewis Baker
// Licenced under MIT license. See LICENSE.txt for details.
///////////////////////////////////////////////////////////////////////////////
#ifndef CPPCORO_SPIN_WAIT_HPP_INCLUDED
#define CPPCORO_SPIN_WAIT_HPP_INCLUDED
#include <cstdint>
namespace cppcoro
{
class spin_wait
{
public:
spin_wait() noexcept;
bool next_spin_will_yield() const noexcept;
void spin_one() noexcept;
void reset() noexcept;
private:
std::uint32_t m_count;
};
}
#endif

753
lib/static_thread_pool.cpp Normal file
View File

@ -0,0 +1,753 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (c) Lewis Baker
// Licenced under MIT license. See LICENSE.txt for details.
///////////////////////////////////////////////////////////////////////////////
#include <cppcoro/static_thread_pool.hpp>
#include "auto_reset_event.hpp"
#include "spin_mutex.hpp"
#include "spin_wait.hpp"
#include <cassert>
#include <mutex>
#include <chrono>
namespace
{
namespace local
{
// Keep each thread's local queue under 1MB
constexpr std::size_t max_local_queue_size = 1024 * 1024 / sizeof(void*);
constexpr std::size_t initial_local_queue_size = 256;
}
}
namespace cppcoro
{
thread_local static_thread_pool::thread_state* static_thread_pool::s_currentState = nullptr;
thread_local static_thread_pool* static_thread_pool::s_currentThreadPool = nullptr;
class static_thread_pool::thread_state
{
public:
explicit thread_state()
: m_localQueue(
std::make_unique<std::atomic<schedule_operation*>[]>(
local::initial_local_queue_size))
, m_mask(local::initial_local_queue_size - 1)
, m_head(0)
, m_tail(0)
, m_isSleeping(false)
{
}
bool try_wake_up()
{
if (m_isSleeping.load(std::memory_order_seq_cst))
{
if (m_isSleeping.exchange(false, std::memory_order_seq_cst))
{
try
{
m_wakeUpEvent.set();
}
catch (...)
{
// TODO: What do we do here?
}
return true;
}
}
return false;
}
void notify_intent_to_sleep() noexcept
{
m_isSleeping.store(true, std::memory_order_relaxed);
}
void sleep_until_woken() noexcept
{
try
{
m_wakeUpEvent.wait();
}
catch (...)
{
using namespace std::chrono_literals;
std::this_thread::sleep_for(1ms);
}
}
bool approx_has_any_queued_work() const noexcept
{
return difference(
m_head.load(std::memory_order_relaxed),
m_tail.load(std::memory_order_relaxed)) > 0;
}
bool has_any_queued_work() noexcept
{
std::scoped_lock lock{ m_remoteMutex };
auto tail = m_tail.load(std::memory_order_relaxed);
auto head = m_head.load(std::memory_order_seq_cst);
return difference(head, tail) > 0;
}
bool try_local_enqueue(schedule_operation*& operation) noexcept
{
// Head is only ever written-to by the current thread so we
// are safe to use relaxed memory order when reading it.
auto head = m_head.load(std::memory_order_relaxed);
// It is possible this method may be running concurrently with
// try_remote_steal() which may have just speculatively incremented m_tail
// trying to steal the last item in the queue but has not yet read the
// queue item. So we need to make sure we don't write to the last available
// space (at slot m_tail - 1) as this may still contain a pointer to an
// operation that has not yet been executed.
//
// Note that it's ok to read stale values from m_tail since new values
// won't ever decrease the number of available slots by more than 1.
// Reading a stale value can just mean that sometimes the queue appears
// empty when it may actually have slots free.
//
// Here m_mask is equal to buffersize - 1 so we can only write to a slot
// if the number of items consumed in the queue (head - tail) is less than
// the mask.
auto tail = m_tail.load(std::memory_order_relaxed);
if (difference(head, tail) < static_cast<offset_t>(m_mask))
{
// There is space left in the local buffer.
m_localQueue[head & m_mask].store(operation, std::memory_order_relaxed);
m_head.store(head + 1, std::memory_order_seq_cst);
return true;
}
if (m_mask == local::max_local_queue_size)
{
// No space in the buffer and we don't want to grow
// it any further.
return false;
}
// Allocate the new buffer before taking out the lock so that
// we ensure we hold the lock for as short a time as possible.
const size_t newSize = (m_mask + 1) * 2;
std::unique_ptr<std::atomic<schedule_operation*>[]> newLocalQueue{
new (std::nothrow) std::atomic<schedule_operation*>[newSize]
};
if (!newLocalQueue)
{
// Unable to allocate more memory.
return false;
}
if (!m_remoteMutex.try_lock())
{
// Don't wait to acquire the lock if we can't get it immediately.
// Fail and let it be enqueued to the global queue.
// TODO: Should we have a per-thread overflow queue instead?
return false;
}
std::scoped_lock lock{ std::adopt_lock, m_remoteMutex };
// We can now re-read tail, guaranteed that we are not seeing a stale version.
tail = m_tail.load(std::memory_order_relaxed);
// Copy the existing operations.
const size_t newMask = newSize - 1;
for (size_t i = tail; i != head; ++i)
{
newLocalQueue[i & newMask].store(
m_localQueue[i & m_mask].load(std::memory_order_relaxed),
std::memory_order_relaxed);
}
// Finally, write the new operation to the queue.
newLocalQueue[head & newMask].store(operation, std::memory_order_relaxed);
m_head.store(head + 1, std::memory_order_relaxed);
m_localQueue = std::move(newLocalQueue);
m_mask = newMask;
return true;
}
schedule_operation* try_local_pop() noexcept
{
// Cheap, approximate, no memory-barrier check for emptiness
auto head = m_head.load(std::memory_order_relaxed);
auto tail = m_tail.load(std::memory_order_relaxed);
if (difference(head, tail) <= 0)
{
// Empty
return nullptr;
}
// 3 classes of interleaving of try_local_pop() and try_remote_steal()
// - local pop completes before remote steal (easy)
// - remote steal completes before local pop (easy)
// - both are executed concurrently, both see each other's writes (harder)
// Speculatively try to acquire the head item of the work queue by
// decrementing the head cursor. This may race with a concurrent call
// to try_remote_steal() that is also trying to speculatively increment
// the tail cursor to steal from the other end of the queue. In the case
// that they both try to dequeue the last/only item in the queue then we
// need to fall back to locking to decide who wins
auto newHead = head - 1;
m_head.store(newHead, std::memory_order_seq_cst);
tail = m_tail.load(std::memory_order_seq_cst);
if (difference(newHead, tail) < 0)
{
// There was a race to get the last item.
// We don't know whether the remote steal saw our write
// and decided to back off or not, so we acquire the mutex
// so that we wait until the remote steal has completed so
// we can see what decision it made.
std::lock_guard lock{ m_remoteMutex };
// Use relaxed since the lock guarantees visibility of the writes
// that the remote steal thread performed.
tail = m_tail.load(std::memory_order_relaxed);
if (difference(newHead, tail) < 0)
{
// The other thread didn't see our write and stole the last item.
// We need to restore the head back to it's old value.
// We hold the mutex so can just use relaxed memory order for this.
m_head.store(head, std::memory_order_relaxed);
return nullptr;
}
}
// We successfully acquired an item from the queue.
return m_localQueue[newHead & m_mask].load(std::memory_order_relaxed);
}
schedule_operation* try_steal(bool* lockUnavailable = nullptr) noexcept
{
if (lockUnavailable == nullptr)
{
m_remoteMutex.lock();
}
else if (!m_remoteMutex.try_lock())
{
*lockUnavailable = true;
return nullptr;
}
std::scoped_lock lock{ std::adopt_lock, m_remoteMutex };
auto tail = m_tail.load(std::memory_order_relaxed);
auto head = m_head.load(std::memory_order_seq_cst);
if (difference(head, tail) <= 0)
{
return nullptr;
}
// It looks like there are items in the queue.
// We'll speculatively try to steal one by incrementing
// the tail cursor. As this may be running concurrently
// with try_local_pop() which is also speculatively trying
// to remove an item from the other end of the queue we
// need to re-read the 'head' cursor afterwards to see
// if there was a potential race to dequeue the last item.
// Use seq_cst memory order both here and in try_local_pop()
// to ensure that either we will see their write to head or
// they will see our write to tail or we will both see each
// other's writes.
m_tail.store(tail + 1, std::memory_order_seq_cst);
head = m_head.load(std::memory_order_seq_cst);
if (difference(head, tail) > 0)
{
// There was still an item in the queue after incrementing tail.
// We managed to steal an item from the bottom of the stack.
return m_localQueue[tail & m_mask].load(std::memory_order_relaxed);
}
else
{
// Otherwise we failed to steal the last item.
// Restore the old tail position.
m_tail.store(tail, std::memory_order_seq_cst);
return nullptr;
}
}
private:
using offset_t = std::make_signed_t<std::size_t>;
static constexpr offset_t difference(size_t a, size_t b)
{
return static_cast<offset_t>(a - b);
}
std::unique_ptr<std::atomic<schedule_operation*>[]> m_localQueue;
std::size_t m_mask;
#if CPPCORO_COMPILER_MSVC
# pragma warning(push)
# pragma warning(disable : 4324)
#endif
//alignas(std::hardware_destructive_interference_size)
std::atomic<std::size_t> m_head;
//alignas(std::hardware_destructive_interference_size)
std::atomic<std::size_t> m_tail;
//alignas(std::hardware_destructive_interference_size)
std::atomic<bool> m_isSleeping;
spin_mutex m_remoteMutex;
#if CPPCORO_COMPILER_MSVC
# pragma warning(pop)
#endif
auto_reset_event m_wakeUpEvent;
};
void static_thread_pool::schedule_operation::await_suspend(
std::experimental::coroutine_handle<> awaitingCoroutine) noexcept
{
m_awaitingCoroutine = awaitingCoroutine;
m_threadPool->schedule_impl(this);
}
static_thread_pool::static_thread_pool()
: static_thread_pool(std::thread::hardware_concurrency())
{
}
static_thread_pool::static_thread_pool(std::uint32_t threadCount)
: m_threadCount(threadCount > 0 ? threadCount : 1)
, m_threadStates(std::make_unique<thread_state[]>(m_threadCount))
, m_stopRequested(false)
, m_globalQueueHead(nullptr)
, m_globalQueueTail(nullptr)
, m_sleepingThreadCount(0)
{
m_threads.reserve(threadCount);
try
{
for (std::uint32_t i = 0; i < m_threadCount; ++i)
{
m_threads.emplace_back([this, i] { this->run_worker_thread(i); });
}
}
catch (...)
{
try
{
shutdown();
}
catch (...)
{
std::terminate();
}
throw;
}
}
static_thread_pool::~static_thread_pool()
{
shutdown();
}
void static_thread_pool::run_worker_thread(std::uint32_t threadIndex) noexcept
{
auto& localState = m_threadStates[threadIndex];
s_currentState = &localState;
s_currentThreadPool = this;
auto tryGetRemote = [&]()
{
// Try to get some new work first from the global queue
// then if that queue is empty then try to steal from
// the local queues of other worker threads.
// We try to get new work from the global queue first
// before stealing as stealing from other threads has
// the side-effect of those threads running out of work
// sooner and then having to steal work which increases
// contention.
auto* op = try_global_dequeue();
if (op == nullptr)
{
op = try_steal_from_other_thread(threadIndex);
}
return op;
};
while (true)
{
// Process operations from the local queue.
schedule_operation* op;
while (true)
{
op = localState.try_local_pop();
if (op == nullptr)
{
op = tryGetRemote();
if (op == nullptr)
{
break;
}
}
op->m_awaitingCoroutine.resume();
}
// No more operations in the local queue or remote queue.
//
// We spin for a little while waiting for new items
// to be enqueued. This avoids the expensive operation
// of putting the thread to sleep and waking it up again
// in the case that an external thread is queueing new work
cppcoro::spin_wait spinWait;
while (true)
{
for (int i = 0; i < 30; ++i)
{
if (is_shutdown_requested())
{
return;
}
spinWait.spin_one();
if (approx_has_any_queued_work_for(threadIndex))
{
op = tryGetRemote();
if (op != nullptr)
{
// Now that we've executed some work we can
// return to normal processing since this work
// might have queued some more work to the local
// queue which we should process first.
goto normal_processing;
}
}
}
// We didn't find any work after spinning for a while, let's
// put ourselves to sleep and wait to be woken up.
// First, let other threads know we're going to sleep.
notify_intent_to_sleep(threadIndex);
// As notifying the other threads that we're sleeping may have
// raced with other threads enqueueing more work, we need to
// re-check whether there is any more work to be done so that
// we don't get into a situation where we go to sleep and another
// thread has enqueued some work and doesn't know to wake us up.
if (has_any_queued_work_for(threadIndex))
{
op = tryGetRemote();
if (op != nullptr)
{
// Try to clear the intent to sleep so that some other thread
// that subsequently enqueues some work won't mistakenly try
// to wake this threadup when we are already running as there
// might have been some other thread that it could have woken
// up instead which could have resulted in increased parallelism.
//
// However, it's possible that some other thread may have already
// tried to wake us up, in which case the auto_reset_event used to
// wake up this thread may already be in the 'set' state. Leaving
// it in this state won't really hurt. It'll just mean we might get
// a spurious wake-up next time we try to go to sleep.
try_clear_intent_to_sleep(threadIndex);
goto normal_processing;
}
}
if (is_shutdown_requested())
{
return;
}
localState.sleep_until_woken();
}
normal_processing:
assert(op != nullptr);
op->m_awaitingCoroutine.resume();
}
}
void static_thread_pool::shutdown()
{
m_stopRequested.store(true, std::memory_order_relaxed);
for (std::uint32_t i = 0; i < m_threads.size(); ++i)
{
auto& threadState = m_threadStates[i];
// We should not be shutting down the thread pool if there is any
// outstanding work in the queue. It is up to the application to
// ensure all enqueued work has completed first.
assert(!threadState.has_any_queued_work());
threadState.try_wake_up();
}
for (auto& t : m_threads)
{
t.join();
}
}
void static_thread_pool::schedule_impl(schedule_operation* operation) noexcept
{
if (s_currentThreadPool != this ||
!s_currentState->try_local_enqueue(operation))
{
remote_enqueue(operation);
}
wake_one_thread();
}
void static_thread_pool::remote_enqueue(schedule_operation* operation) noexcept
{
auto* tail = m_globalQueueTail.load(std::memory_order_relaxed);
do
{
operation->m_next = tail;
} while (!m_globalQueueTail.compare_exchange_weak(
tail,
operation,
std::memory_order_seq_cst,
std::memory_order_relaxed));
}
bool static_thread_pool::has_any_queued_work_for(std::uint32_t threadIndex) noexcept
{
if (m_globalQueueTail.load(std::memory_order_seq_cst) != nullptr)
{
return true;
}
if (m_globalQueueHead.load(std::memory_order_seq_cst) != nullptr)
{
return true;
}
for (std::uint32_t i = 0; i < m_threadCount; ++i)
{
if (i == threadIndex) continue;
if (m_threadStates[i].has_any_queued_work())
{
return true;
}
}
return false;
}
bool static_thread_pool::approx_has_any_queued_work_for(std::uint32_t threadIndex) const noexcept
{
// Cheap, approximate, read-only implementation that checks whether any work has
// been queued in the system somewhere. We try to avoid writes here so that we
// don't bounce cache-lines around between threads/cores unnecessarily when
// multiple threads are all spinning waiting for work.
if (m_globalQueueTail.load(std::memory_order_relaxed) != nullptr)
{
return true;
}
if (m_globalQueueHead.load(std::memory_order_relaxed) != nullptr)
{
return true;
}
for (std::uint32_t i = 0; i < m_threadCount; ++i)
{
if (i == threadIndex) continue;
if (m_threadStates[i].approx_has_any_queued_work())
{
return true;
}
}
return false;
}
bool static_thread_pool::is_shutdown_requested() const noexcept
{
return m_stopRequested.load(std::memory_order_relaxed);
}
void static_thread_pool::notify_intent_to_sleep(std::uint32_t threadIndex) noexcept
{
// First mark the thread as asleep
m_threadStates[threadIndex].notify_intent_to_sleep();
// Then publish the fact that a thread is asleep by incrementing the count
// of threads that are asleep.
m_sleepingThreadCount.fetch_add(1, std::memory_order_seq_cst);
}
void static_thread_pool::try_clear_intent_to_sleep(std::uint32_t threadIndex) noexcept
{
// First try to claim that we are waking up one of the threads.
std::uint32_t oldSleepingCount = m_sleepingThreadCount.load(std::memory_order_relaxed);
do
{
if (oldSleepingCount == 0)
{
// No more sleeping threads.
// Someone must have woken us up.
return;
}
} while (!m_sleepingThreadCount.compare_exchange_weak(
oldSleepingCount,
oldSleepingCount - 1,
std::memory_order_acquire,
std::memory_order_relaxed));
// Then preferentially try to wake up our thread.
// If some other thread has already requested that this thread wake up
// then we will wake up another thread - the one that should have been woken
// up by the thread that woke this thread up.
if (!m_threadStates[threadIndex].try_wake_up())
{
for (std::uint32_t i = 0; i < m_threadCount; ++i)
{
if (i == threadIndex) continue;
if (m_threadStates[i].try_wake_up())
{
return;
}
}
}
}
static_thread_pool::schedule_operation*
static_thread_pool::try_global_dequeue() noexcept
{
std::scoped_lock lock{ m_globalQueueMutex };
auto* head = m_globalQueueHead.load(std::memory_order_relaxed);
if (head == nullptr)
{
// Use seq-cst memory order so that when we check for an item in the
// global queue after signalling an intent to sleep that either we
// will see their enqueue or they will see our signal to sleep and
// wake us up.
if (m_globalQueueTail.load(std::memory_order_seq_cst) == nullptr)
{
return nullptr;
}
// Acquire the entire set of queued operations in a single operation.
auto* tail = m_globalQueueTail.exchange(nullptr, std::memory_order_acquire);
if (tail == nullptr)
{
return nullptr;
}
// Reverse the list
do
{
auto* next = std::exchange(tail->m_next, head);
head = std::exchange(tail, next);
} while (tail != nullptr);
}
m_globalQueueHead = head->m_next;
return head;
}
static_thread_pool::schedule_operation*
static_thread_pool::try_steal_from_other_thread(std::uint32_t thisThreadIndex) noexcept
{
// Try first with non-blocking steal attempts.
bool anyLocksUnavailable = false;
for (std::uint32_t otherThreadIndex = 0; otherThreadIndex < m_threadCount; ++otherThreadIndex)
{
if (otherThreadIndex == thisThreadIndex) continue;
auto& otherThreadState = m_threadStates[otherThreadIndex];
auto* op = otherThreadState.try_steal(&anyLocksUnavailable);
if (op != nullptr)
{
return op;
}
}
if (anyLocksUnavailable)
{
// We didn't check all of the other threads for work to steal yet.
// Try again, this time waiting to acquire the locks.
for (std::uint32_t otherThreadIndex = 0; otherThreadIndex < m_threadCount; ++otherThreadIndex)
{
if (otherThreadIndex == thisThreadIndex) continue;
auto& otherThreadState = m_threadStates[otherThreadIndex];
auto* op = otherThreadState.try_steal();
if (op != nullptr)
{
return op;
}
}
}
return nullptr;
}
void static_thread_pool::wake_one_thread() noexcept
{
// First try to claim responsibility for waking up one thread.
// This first read must be seq_cst to ensure that either we have
// visibility of another thread going to sleep or they have
// visibility of our prior enqueue of an item.
std::uint32_t oldSleepingCount = m_sleepingThreadCount.load(std::memory_order_seq_cst);
do
{
if (oldSleepingCount == 0)
{
// No sleeping threads.
// Someone must have woken us up.
return;
}
} while (!m_sleepingThreadCount.compare_exchange_weak(
oldSleepingCount,
oldSleepingCount - 1,
std::memory_order_acquire,
std::memory_order_relaxed));
// Now that we have claimed responsibility for waking a thread up
// we need to find a sleeping thread and wake it up. We should be
// guaranteed of finding a thread to wake-up here, but not necessarily
// in a single pass due to threads potentially waking themselves up
// in try_clear_intent_to_sleep().
while (true)
{
for (std::uint32_t i = 0; i < m_threadCount; ++i)
{
if (m_threadStates[i].try_wake_up())
{
return;
}
}
}
}
}

View File

@ -5,7 +5,9 @@
#include <cppcoro/detail/win32.hpp>
#define WIN32_LEAN_AND_MEAN
#ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
#endif
#include <Windows.h>
void cppcoro::detail::win32::safe_handle::close() noexcept

View File

@ -8,7 +8,9 @@
#include <system_error>
#if CPPCORO_OS_WINNT
# define WIN32_LEAN_AND_MEAN
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <Windows.h>
void cppcoro::writable_file::set_size(

View File

@ -6,7 +6,9 @@
#include <cppcoro\write_only_file.hpp>
#if CPPCORO_OS_WINNT
# define WIN32_LEAN_AND_MEAN
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <Windows.h>
cppcoro::write_only_file cppcoro::write_only_file::open(

View File

@ -21,6 +21,7 @@
#include <cassert>
#include <vector>
#include <ostream>
#include "doctest/doctest.h"
TEST_SUITE_BEGIN("async_auto_reset_event");

View File

@ -9,6 +9,7 @@
#include <cppcoro/sync_wait.hpp>
#include <cppcoro/when_all.hpp>
#include <ostream>
#include "doctest/doctest.h"
TEST_SUITE_BEGIN("async_generator");

View File

@ -9,6 +9,7 @@
#include <cppcoro/when_all_ready.hpp>
#include <cppcoro/sync_wait.hpp>
#include <ostream>
#include "doctest/doctest.h"
TEST_SUITE_BEGIN("async_mutex");
@ -26,6 +27,7 @@ TEST_CASE("try_lock")
CHECK(mutex.try_lock());
}
#if 0
TEST_CASE("multiple lockers")
{
int value = 0;
@ -83,5 +85,6 @@ TEST_CASE("multiple lockers")
CHECK(value == 4);
}
#endif
TEST_SUITE_END();

View File

@ -39,6 +39,7 @@ sources = script.cwd([
'ipv4_endpoint_tests.cpp',
'ipv6_address_tests.cpp',
'ipv6_endpoint_tests.cpp',
'static_thread_pool_tests.cpp',
])
if variant.platform == 'windows':

View File

@ -10,6 +10,7 @@
#include <thread>
#include <ostream>
#include "doctest/doctest.h"
TEST_SUITE_BEGIN("cancellation_token tests");

View File

@ -418,7 +418,8 @@ extern "C" __declspec(dllimport) void __stdcall DebugBreak();
#ifdef _LIBCPP_VERSION
// not forward declaring ostream for libc++ because I had some problems (inline namespaces vs c++98)
// so the <iosfwd> header is used - also it is very light and doesn't drag a ton of stuff
#include <iosfwd>
//#include <iosfwd>
#include <ostream>
#else // _LIBCPP_VERSION
#ifndef DOCTEST_CONFIG_USE_IOSFWD
namespace std

View File

@ -19,6 +19,7 @@
#include "io_service_fixture.hpp"
#include <ostream>
#include "doctest/doctest.h"
TEST_SUITE_BEGIN("file");

View File

@ -25,7 +25,10 @@ public:
m_ioThreads.reserve(threadCount);
try
{
m_ioThreads.emplace_back([this] { m_ioService.process_events(); });
for (std::uint32_t i = 0; i < threadCount; ++i)
{
m_ioThreads.emplace_back([this] { m_ioService.process_events(); });
}
}
catch (...)
{

View File

@ -17,6 +17,7 @@
#include <thread>
#include <vector>
#include <ostream>
#include "doctest/doctest.h"
TEST_SUITE_BEGIN("io_service");

View File

@ -10,6 +10,7 @@
#include <chrono>
#include <algorithm>
#include <ostream>
#include "doctest/doctest.h"
TEST_SUITE_BEGIN("recursive_generator");

View File

@ -13,6 +13,7 @@
#include "io_service_fixture.hpp"
#include <ostream>
#include "doctest/doctest.h"
TEST_SUITE_BEGIN("schedule/resume_on");
@ -36,7 +37,17 @@ TEST_CASE_FIXTURE(io_service_fixture, "schedule_on task<> function")
co_await schedule_on(io_service(), start());
CHECK(std::this_thread::get_id() == ioThreadId);
// TODO: Uncomment this check once the implementation of task<T>
// guarantees that the continuation will resume on the same thread
// that the task completed on. Currently it's possible to resume on
// the thread that launched the task if it completes on another thread
// before the current thread could attach the continuation after it
// suspended. See cppcoro issue #79.
//
// The long-term solution here is to use the symmetric-transfer capability
// to avoid the use of atomics and races, but we're still waiting for MSVC to
// implement this (doesn't seem to be implemented as of VS 2017.8 Preview 5)
//CHECK(std::this_thread::get_id() == ioThreadId);
}());
}
@ -111,6 +122,8 @@ TEST_CASE_FIXTURE(io_service_fixture, "resume_on task<> function")
co_await resume_on(io_service(), start());
// NOTE: This check could potentially spuriously fail with the current
// implementation of task<T>. See cppcoro issue #79.
CHECK(std::this_thread::get_id() != mainThreadId);
}());
}

View File

@ -8,6 +8,7 @@
#include <cppcoro/sync_wait.hpp>
#include <cppcoro/when_all_ready.hpp>
#include <cppcoro/single_consumer_event.hpp>
#include <cppcoro/fmap.hpp>
#include "counted.hpp"
@ -203,7 +204,7 @@ TEST_CASE("shared_task<void> fmap operator")
cppcoro::sync_wait(cppcoro::when_all_ready(
[&]() -> cppcoro::task<>
{
cppcoro::task<std::string> numericStringTask =
auto numericStringTask =
setNumber()
| cppcoro::fmap([&]() { return std::to_string(value); });
@ -231,7 +232,7 @@ TEST_CASE("shared_task<T> fmap operator")
cppcoro::sync_wait(cppcoro::when_all_ready(
[&]() -> cppcoro::task<>
{
cppcoro::task<std::string> numericStringTask =
auto numericStringTask =
getNumber()
| cppcoro::fmap([](int x) { return std::to_string(x); });

View File

@ -0,0 +1,290 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (c) Lewis Baker
// Licenced under MIT license. See LICENSE.txt for details.
///////////////////////////////////////////////////////////////////////////////
#include <cppcoro/static_thread_pool.hpp>
#include <cppcoro/task.hpp>
#include <cppcoro/sync_wait.hpp>
#include <cppcoro/when_all.hpp>
#include <vector>
#include <thread>
#include <cassert>
#include <chrono>
#include <iostream>
#include <numeric>
#include "doctest/doctest.h"
TEST_SUITE_BEGIN("static_thread_pool");
TEST_CASE("construct/destruct")
{
cppcoro::static_thread_pool threadPool;
CHECK(threadPool.thread_count() == std::thread::hardware_concurrency());
}
TEST_CASE("construct/destruct to specific thread count")
{
cppcoro::static_thread_pool threadPool{ 5 };
CHECK(threadPool.thread_count() == 5);
}
TEST_CASE("run one task")
{
cppcoro::static_thread_pool threadPool{ 2 };
auto initiatingThreadId = std::this_thread::get_id();
cppcoro::sync_wait([&]() -> cppcoro::task<void>
{
co_await threadPool.schedule();
if (std::this_thread::get_id() == initiatingThreadId)
{
FAIL("schedule() did not switch threads");
}
}());
}
TEST_CASE("launch many tasks remotely")
{
cppcoro::static_thread_pool threadPool;
auto makeTask = [&]() -> cppcoro::task<>
{
co_await threadPool.schedule();
};
std::vector<cppcoro::task<>> tasks;
for (std::uint32_t i = 0; i < 100; ++i)
{
tasks.push_back(makeTask());
}
cppcoro::sync_wait(cppcoro::when_all(std::move(tasks)));
}
cppcoro::task<std::uint64_t> sum_of_squares(
std::uint32_t start,
std::uint32_t end,
cppcoro::static_thread_pool& tp)
{
co_await tp.schedule();
auto count = end - start;
if (count > 1000)
{
auto half = start + count / 2;
auto[a, b] = co_await cppcoro::when_all(
sum_of_squares(start, half, tp),
sum_of_squares(half, end, tp));
co_return a + b;
}
else
{
std::uint64_t sum = 0;
for (std::uint64_t x = start; x < end; ++x)
{
sum += x * x;
}
co_return sum;
}
}
TEST_CASE("launch sub-task with many sub-tasks")
{
using namespace std::chrono_literals;
constexpr std::uint64_t limit = 1'000'000'000;
cppcoro::static_thread_pool tp;
// Wait for the thread-pool thread to start up.
std::this_thread::sleep_for(1ms);
auto start = std::chrono::high_resolution_clock::now();
auto result = cppcoro::sync_wait(sum_of_squares(0, limit , tp));
auto end = std::chrono::high_resolution_clock::now();
std::uint64_t sum = 0;
for (std::uint64_t i = 0; i < limit; ++i)
{
sum += i * i;
}
auto end2 = std::chrono::high_resolution_clock::now();
auto toNs = [](auto time)
{
return std::chrono::duration_cast<std::chrono::nanoseconds>(time).count();
};
std::cout
<< "multi-threaded version took " << toNs(end - start) << "ns\n"
<< "single-threaded version took " << toNs(end2 - end) << "ns" << std::endl;
CHECK(result == sum);
}
struct fork_join_operation
{
std::atomic<std::size_t> m_count;
std::experimental::coroutine_handle<> m_coro;
fork_join_operation() : m_count(1) {}
void begin_work() noexcept
{
m_count.fetch_add(1, std::memory_order_relaxed);
}
void end_work() noexcept
{
if (m_count.fetch_sub(1, std::memory_order_acq_rel) == 1)
{
m_coro.resume();
}
}
bool await_ready() noexcept { return m_count.load(std::memory_order_acquire) == 1; }
bool await_suspend(std::experimental::coroutine_handle<> coro) noexcept
{
m_coro = coro;
return m_count.fetch_sub(1, std::memory_order_acq_rel) != 1;
}
void await_resume() noexcept {};
};
template<typename FUNC, typename RANGE, typename SCHEDULER>
cppcoro::task<void> for_each_async(SCHEDULER& scheduler, RANGE& range, FUNC func)
{
using reference_type = decltype(*range.begin());
// TODO: Use awaiter_t here instead. This currently assumes that
// result of scheduler.schedule() doesn't have an operator co_await().
using schedule_operation = decltype(scheduler.schedule());
struct work_operation
{
fork_join_operation& m_forkJoin;
FUNC& m_func;
reference_type m_value;
schedule_operation m_scheduleOp;
work_operation(fork_join_operation& forkJoin, SCHEDULER& scheduler, FUNC& func, reference_type&& value)
: m_forkJoin(forkJoin)
, m_func(func)
, m_value(static_cast<reference_type&&>(value))
, m_scheduleOp(scheduler.schedule())
{
}
bool await_ready() noexcept { return false; }
CPPCORO_NOINLINE
void await_suspend(std::experimental::coroutine_handle<> coro) noexcept
{
fork_join_operation& forkJoin = m_forkJoin;
FUNC& func = m_func;
reference_type value = static_cast<reference_type&&>(m_value);
static_assert(std::is_same_v<decltype(m_scheduleOp.await_suspend(coro)), void>);
forkJoin.begin_work();
// Schedule the next iteration of the loop to run
m_scheduleOp.await_suspend(coro);
func(static_cast<reference_type&&>(value));
forkJoin.end_work();
}
void await_resume() noexcept {}
};
co_await scheduler.schedule();
fork_join_operation forkJoin;
for (auto&& x : range)
{
co_await work_operation{
forkJoin,
scheduler,
func,
static_cast<decltype(x)>(x)
};
}
co_await forkJoin;
}
std::uint64_t collatz_distance(std::uint64_t number)
{
std::uint64_t count = 0;
while (number > 1)
{
if (number % 2 == 0) number /= 2;
else number = number * 3 + 1;
++count;
}
return count;
}
TEST_CASE("for_each_async")
{
cppcoro::static_thread_pool tp;
{
std::vector<std::uint64_t> values(1'000'000);
std::iota(values.begin(), values.end(), 1);
cppcoro::sync_wait([&]() -> cppcoro::task<>
{
auto start = std::chrono::high_resolution_clock::now();
co_await for_each_async(tp, values, [](std::uint64_t& value)
{
value = collatz_distance(value);
});
auto end = std::chrono::high_resolution_clock::now();
std::cout << "for_each_async of " << values.size()
<< " took " << std::chrono::duration_cast<std::chrono::microseconds>(end - start).count()
<< "us" << std::endl;
for (std::size_t i = 0; i < 1'000'000; ++i)
{
CHECK(values[i] == collatz_distance(i + 1));
}
}());
}
{
std::vector<std::uint64_t> values(1'000'000);
std::iota(values.begin(), values.end(), 1);
auto start = std::chrono::high_resolution_clock::now();
for (auto&& x : values)
{
x = collatz_distance(x);
}
auto end = std::chrono::high_resolution_clock::now();
std::cout << "single-threaded for loop of " << values.size()
<< " took " << std::chrono::duration_cast<std::chrono::microseconds>(end - start).count()
<< "us" << std::endl;
}
}
TEST_SUITE_END();

View File

@ -7,6 +7,7 @@
#include <cppcoro/single_consumer_event.hpp>
#include <cppcoro/sync_wait.hpp>
#include <cppcoro/when_all_ready.hpp>
#include <cppcoro/fmap.hpp>
#include "counted.hpp"
@ -203,8 +204,6 @@ TEST_CASE("task<void> fmap pipe operator")
auto t = f() | fmap([] { return 123; });
CHECK(!t.is_ready());
cppcoro::sync_wait(cppcoro::when_all_ready(
[&]() -> cppcoro::task<>
{
@ -215,8 +214,6 @@ TEST_CASE("task<void> fmap pipe operator")
event.set();
co_return;
}()));
CHECK(t.is_ready());
}
TEST_CASE("task<int> fmap pipe operator")
@ -224,6 +221,7 @@ TEST_CASE("task<int> fmap pipe operator")
using cppcoro::task;
using cppcoro::fmap;
using cppcoro::sync_wait;
using cppcoro::make_task;
auto one = [&]() -> task<int>
{
@ -232,8 +230,8 @@ TEST_CASE("task<int> fmap pipe operator")
SUBCASE("r-value fmap / r-value lambda")
{
task<int> t = one() | fmap([delta = 1](auto i) { return i + delta; });
CHECK(!t.is_ready());
auto t = one()
| fmap([delta = 1](auto i) { return i + delta; });
CHECK(sync_wait(t) == 2);
}
@ -241,20 +239,17 @@ TEST_CASE("task<int> fmap pipe operator")
{
using namespace std::string_literals;
task<std::string> t;
auto t = [&]
{
auto f = [prefix = "pfx"s](int x)
{
return prefix + std::to_string(x);
};
// Want to make sure that the resulting task has taken
// Want to make sure that the resulting awaitable has taken
// a copy of the lambda passed to fmap().
t = one() | fmap(f);
}
CHECK(!t.is_ready());
return one() | fmap(f);
}();
CHECK(sync_wait(t) == "pfx1");
}
@ -263,20 +258,17 @@ TEST_CASE("task<int> fmap pipe operator")
{
using namespace std::string_literals;
task<std::string> t;
auto t = [&]
{
auto addprefix = fmap([prefix = "a really really long prefix that prevents small string optimisation"s](int x)
{
return prefix + std::to_string(x);
});
// Want to make sure that the resulting task has taken
// Want to make sure that the resulting awaitable has taken
// a copy of the lambda passed to fmap().
t = one() | addprefix;
}
CHECK(!t.is_ready());
return one() | addprefix;
}();
CHECK(sync_wait(t) == "a really really long prefix that prevents small string optimisation1");
}
@ -297,7 +289,7 @@ TEST_CASE("task<int> fmap pipe operator")
// Want to make sure that the resulting task has taken
// a copy of the lambda passed to fmap().
t = one() | addprefix;
t = make_task(one() | addprefix);
}
CHECK(!t.is_ready());
@ -331,8 +323,6 @@ TEST_CASE("chained fmap pipe operations")
auto t = asyncString("base"s) | prepend("pre_"s) | append("_post"s);
CHECK(!t.is_ready());
CHECK(sync_wait(t) == "pre_base_post");
}

View File

@ -16,6 +16,7 @@
#include <string>
#include <vector>
#include <ostream>
#include "doctest/doctest.h"
TEST_SUITE_BEGIN("when_all_ready");
@ -42,51 +43,55 @@ TEST_CASE("when_all_ready() with one task")
};
cppcoro::async_manual_reset_event event;
auto whenAllTask = cppcoro::when_all_ready(f(event));
auto whenAllAwaitable = cppcoro::when_all_ready(f(event));
CHECK(!started);
bool finished = false;
cppcoro::sync_wait(cppcoro::when_all_ready(
[&]() -> cppcoro::task<>
{
auto&[t] = co_await whenAllTask;
CHECK(t.is_ready());
auto&[t] = co_await whenAllAwaitable;
finished = true;
t.result();
}(),
[&]() -> cppcoro::task<>
{
CHECK(started);
CHECK(!finished);
event.set();
CHECK(whenAllTask.is_ready());
CHECK(finished);
co_return;
}()));
}
TEST_CASE("when_all_ready() with multiple task")
{
auto makeTask = [&](bool& started, cppcoro::async_manual_reset_event& event) -> cppcoro::task<>
auto makeTask = [&](bool& started, cppcoro::async_manual_reset_event& event, int result) -> cppcoro::task<int>
{
started = true;
co_await event;
co_return result;
};
cppcoro::async_manual_reset_event event1;
cppcoro::async_manual_reset_event event2;
bool started1 = false;
bool started2 = false;
auto whenAllTask = cppcoro::when_all_ready(
makeTask(started1, event1),
makeTask(started2, event2));
auto whenAllAwaitable = cppcoro::when_all_ready(
makeTask(started1, event1, 1),
makeTask(started2, event2, 2));
CHECK(!started1);
CHECK(!started2);
bool whenAllTaskFinished = false;
bool whenAllAwaitableFinished = false;
cppcoro::sync_wait(cppcoro::when_all_ready(
[&]() -> cppcoro::task<>
{
auto[t1, t2] = co_await std::move(whenAllTask);
whenAllTaskFinished = true;
CHECK(t1.is_ready());
CHECK(t2.is_ready());
auto[t1, t2] = co_await std::move(whenAllAwaitable);
whenAllAwaitableFinished = true;
CHECK(t1.result() == 1);
CHECK(t2.result() == 2);
}(),
[&]() -> cppcoro::task<>
{
@ -95,11 +100,11 @@ TEST_CASE("when_all_ready() with multiple task")
event2.set();
CHECK(!whenAllTaskFinished);
CHECK(!whenAllAwaitableFinished);
event1.set();
CHECK(whenAllTaskFinished);
CHECK(whenAllAwaitableFinished);
co_return;
}()));
@ -118,11 +123,8 @@ TEST_CASE("when_all_ready() with all task types")
{
auto [r0, r1] = co_await std::move(allTask);
CHECK(r0.is_ready());
CHECK(r1.is_ready());
CHECK(co_await r0 == 1);
CHECK(co_await r1 == 2);
CHECK(r0.result() == 1);
CHECK(r1.result() == 2);
}(),
[&]() -> cppcoro::task<>
{
@ -131,37 +133,6 @@ TEST_CASE("when_all_ready() with all task types")
}()));
}
TEST_CASE("when_all_ready() with all task types passed by ref")
{
cppcoro::async_manual_reset_event event;
auto t0 = when_event_set_return<cppcoro::task>(event, 1);
auto t1 = when_event_set_return<cppcoro::shared_task>(event, 2);
auto allTask = cppcoro::when_all_ready(
std::ref(t0),
std::ref(t1));
cppcoro::sync_wait(cppcoro::when_all_ready(
[&]() -> cppcoro::task<>
{
auto[u0, u1] = co_await allTask;
// Address of reference should be same as address of original task.
CHECK(&u0.get() == &t0);
CHECK(&u1.get() == &t1);
CHECK(co_await t0 == 1);
CHECK(co_await t1 == 2);
}(),
[&]() -> cppcoro::task<>
{
event.set();
co_return;
}()));
CHECK(allTask.is_ready());
}
TEST_CASE("when_all_ready() with std::vector<task<T>>")
{
cppcoro::async_manual_reset_event event;
@ -182,8 +153,7 @@ TEST_CASE("when_all_ready() with std::vector<task<T>>")
tasks.emplace_back(makeTask());
}
cppcoro::task<std::vector<cppcoro::task<>>> allTask =
cppcoro::when_all_ready(std::move(tasks));
auto allTask = cppcoro::when_all_ready(std::move(tasks));
// Shouldn't have started any tasks yet.
CHECK(startedCount == 0u);
@ -192,11 +162,11 @@ TEST_CASE("when_all_ready() with std::vector<task<T>>")
[&]() -> cppcoro::task<>
{
auto resultTasks = co_await std::move(allTask);
CHECK(resultTasks .size() == 10u);
CHECK(resultTasks.size() == 10u);
for (auto& t : resultTasks)
{
CHECK(t.is_ready());
CHECK_NOTHROW(t.result());
}
}(),
[&]() -> cppcoro::task<>
@ -232,8 +202,7 @@ TEST_CASE("when_all_ready() with std::vector<shared_task<T>>")
tasks.emplace_back(makeTask());
}
cppcoro::task<std::vector<cppcoro::shared_task<>>> allTask =
cppcoro::when_all_ready(std::move(tasks));
auto allTask = cppcoro::when_all_ready(std::move(tasks));
// Shouldn't have started any tasks yet.
CHECK(startedCount == 0u);
@ -246,7 +215,7 @@ TEST_CASE("when_all_ready() with std::vector<shared_task<T>>")
for (auto& t : resultTasks)
{
CHECK(t.is_ready());
CHECK_NOTHROW(t.result());
}
}(),
[&]() -> cppcoro::task<>
@ -283,8 +252,8 @@ TEST_CASE("when_all_ready() doesn't rethrow exceptions")
auto[t0, t1] = co_await cppcoro::when_all_ready(makeTask(true), makeTask(false));
// You can obtain the exceptions by re-awaiting the returned tasks.
CHECK_THROWS_AS((void)co_await t0, const std::exception&);
CHECK(co_await t1 == 123);
CHECK_THROWS_AS(t0.result(), const std::exception&);
CHECK(t1.result() == 123);
}
catch (...)
{

View File

@ -5,10 +5,13 @@
#include <cppcoro/when_all.hpp>
#include <cppcoro/task.hpp>
#include <cppcoro/shared_task.hpp>
#include <cppcoro/config.hpp>
#include <cppcoro/async_manual_reset_event.hpp>
#include <cppcoro/async_mutex.hpp>
#include <cppcoro/fmap.hpp>
#include <cppcoro/shared_task.hpp>
#include <cppcoro/sync_wait.hpp>
#include <cppcoro/task.hpp>
#include "counted.hpp"
@ -16,6 +19,7 @@
#include <string>
#include <vector>
#include <ostream>
#include "doctest/doctest.h"
TEST_SUITE_BEGIN("when_all");
@ -68,6 +72,32 @@ TEST_CASE("when_all() with one arg")
}()));
}
TEST_CASE("when_all() with awaitables")
{
cppcoro::sync_wait([]() -> cppcoro::task<>
{
auto makeTask = [](int x) -> cppcoro::task<int>
{
co_return x;
};
cppcoro::async_manual_reset_event event;
event.set();
cppcoro::async_mutex mutex;
auto[eventResult, mutexLock, number] = co_await cppcoro::when_all(
std::ref(event),
mutex.scoped_lock_async(),
makeTask(123) | cppcoro::fmap([](int x) { return x + 1; }));
(void)eventResult;
(void)mutexLock;
CHECK(number == 124);
CHECK(!mutex.try_lock());
}());
}
TEST_CASE("when_all() with all task types")
{
counted::reset_counts();
@ -117,7 +147,7 @@ TEST_CASE("when_all() throws if any task throws")
// This could either throw X or Y exception.
// The exact exception that is thrown is not defined if multiple tasks throw an exception.
// TODO: Consider throwing some kind of aggregate_exception that collects all of the exceptions together.
co_await cppcoro::when_all(makeTask(0), makeTask(1), makeTask(2));
(void)co_await cppcoro::when_all(makeTask(0), makeTask(1), makeTask(2));
}
catch (const X&)
{
@ -128,6 +158,45 @@ TEST_CASE("when_all() throws if any task throws")
}());
}
TEST_CASE("when_all() with task<void>")
{
int voidTaskCount = 0;
auto makeVoidTask = [&]() -> cppcoro::task<>
{
++voidTaskCount;
co_return;
};
auto makeIntTask = [](int x) -> cppcoro::task<int>
{
co_return x;
};
// Single void task in when_all()
auto[x] = cppcoro::sync_wait(cppcoro::when_all(makeVoidTask()));
(void)x;
CHECK(voidTaskCount == 1);
// Multiple void tasks in when_all()
auto[a, b] = cppcoro::sync_wait(cppcoro::when_all(
makeVoidTask(),
makeVoidTask()));
(void)a;
(void)b;
CHECK(voidTaskCount == 3);
// Mixing void and non-void tasks in when_all()
auto[v1, i, v2] = cppcoro::sync_wait(cppcoro::when_all(
makeVoidTask(),
makeIntTask(123),
makeVoidTask()));
(void)v1;
(void)v2;
CHECK(voidTaskCount == 5);
CHECK(i == 123);
}
TEST_CASE("when_all() with vector<task<>>")
{
int startedCount = 0;
@ -245,7 +314,7 @@ namespace
auto whenAllTask = cppcoro::when_all(std::move(tasks));
auto& values = co_await whenAllTask;
auto values = co_await whenAllTask;
REQUIRE(values.size() == 2);
CHECK(values[0] == 1);
CHECK(values[1] == 2);
@ -264,12 +333,26 @@ namespace
}
}
TEST_CASE("when_all() with vector<task<T>>")
#if defined(CPPCORO_RELEASE_OPTIMISED)
constexpr bool isOptimised = true;
#else
constexpr bool isOptimised = false;
#endif
// Disable test on MSVC x86 optimised due to bad codegen bug in
// `co_await whenAllTask` expression under MSVC 15.7 (Preview 2) and earlier.
TEST_CASE("when_all() with vector<task<T>>"
* doctest::skip(CPPCORO_COMPILER_MSVC && CPPCORO_COMPILER_MSVC <= 191426316 && CPPCORO_CPU_X86 && isOptimised))
{
check_when_all_vector_of_task_value<cppcoro::task>();
}
TEST_CASE("when_all() with vector<shared_task<T>>")
// Disable test on MSVC x64 optimised due to bad codegen bug in
// 'co_await whenAllTask' expression.
// Issue reported to MS on 19/11/2017.
TEST_CASE("when_all() with vector<shared_task<T>>"
* doctest::skip(CPPCORO_COMPILER_MSVC && CPPCORO_COMPILER_MSVC <= 191225805 &&
isOptimised && CPPCORO_CPU_X64))
{
check_when_all_vector_of_task_value<cppcoro::shared_task>();
}
@ -302,7 +385,7 @@ namespace
auto whenAllTask = cppcoro::when_all(std::move(tasks));
std::vector<std::reference_wrapper<int>>& values = co_await whenAllTask;
std::vector<std::reference_wrapper<int>> values = co_await whenAllTask;
REQUIRE(values.size() == 2);
CHECK(&values[0].get() == &value1);
CHECK(&values[1].get() == &value2);
@ -321,12 +404,22 @@ namespace
}
}
TEST_CASE("when_all() with vector<task<T&>>")
// Disable test on MSVC x64 optimised due to bad codegen bug in
// 'co_await whenAllTask' expression.
// Issue reported to MS on 19/11/2017.
TEST_CASE("when_all() with vector<task<T&>>"
* doctest::skip(CPPCORO_COMPILER_MSVC && CPPCORO_COMPILER_MSVC <= 191225805 &&
isOptimised && CPPCORO_CPU_X64))
{
check_when_all_vector_of_task_reference<cppcoro::task>();
}
TEST_CASE("when_all() with vector<shared_task<T&>>")
// Disable test on MSVC x64 optimised due to bad codegen bug in
// 'co_await whenAllTask' expression.
// Issue reported to MS on 19/11/2017.
TEST_CASE("when_all() with vector<shared_task<T&>>"
* doctest::skip(CPPCORO_COMPILER_MSVC && CPPCORO_COMPILER_MSVC <= 191225805 &&
isOptimised && CPPCORO_CPU_X64))
{
check_when_all_vector_of_task_reference<cppcoro::shared_task>();
}

@ -1 +1 @@
Subproject commit db43f336b95d367163b967491f4e22d252ae59b4
Subproject commit bd492ab55e69011542397b07239cc99e030133d6