coroutine: break down into smaller sub-modules

This commit is contained in:
Andrei Alexeyev 2022-12-22 18:30:59 +01:00
parent 8c804397dd
commit da67472719
No known key found for this signature in database
GPG key ID: 72D26128040B9690
16 changed files with 1211 additions and 1034 deletions

View file

@ -9,528 +9,10 @@
#pragma once #pragma once
#include "taisei.h" #include "taisei.h"
#include "entity.h" // TODO: Remove this file and adjust includes.
#include "util/debug.h"
#include "dynarray.h"
#include <koishi.h> #include "coroutine/coroutine.h"
#include "coroutine/cotask.h"
// #define CO_TASK_DEBUG #include "coroutine/coevent.h"
#include "coroutine/cosched.h"
typedef struct CoTask CoTask; #include "coroutine/taskdsl.h"
typedef struct CoSched CoSched;
typedef void *(*CoTaskFunc)(void *arg, size_t argsize);
typedef enum CoStatus {
CO_STATUS_SUSPENDED = KOISHI_SUSPENDED,
CO_STATUS_RUNNING = KOISHI_RUNNING,
CO_STATUS_IDLE = KOISHI_IDLE,
CO_STATUS_DEAD = KOISHI_DEAD,
} CoStatus;
typedef enum CoEventStatus {
CO_EVENT_PENDING,
CO_EVENT_SIGNALED,
CO_EVENT_CANCELED,
} CoEventStatus;
typedef struct BoxedTask {
alignas(alignof(void*)) uintptr_t ptr;
uint32_t unique_id;
} BoxedTask;
typedef struct CoEvent {
// ListAnchor subscribers;
// FIXME: Is there a better way than a dynamic array?
// An intrusive linked list just isn't robust enough.
DYNAMIC_ARRAY(BoxedTask) subscribers;
uint32_t unique_id;
uint32_t num_signaled;
} CoEvent;
typedef struct CoEventSnapshot {
uint32_t unique_id;
uint32_t num_signaled;
} CoEventSnapshot;
#define COEVENTS_ARRAY(...) \
union { \
CoEvent _first_event_; \
struct { CoEvent __VA_ARGS__; }; \
}
typedef COEVENTS_ARRAY(
finished
) CoTaskEvents;
typedef LIST_ANCHOR(CoTask) CoTaskList;
struct CoSched {
CoTaskList tasks, pending_tasks;
};
typedef struct CoWaitResult {
int frames;
CoEventStatus event_status;
} CoWaitResult;
#ifdef CO_TASK_DEBUG
typedef struct CoTaskDebugInfo {
const char *label;
DebugInfo debug_info;
} CoTaskDebugInfo;
#define COTASK_DEBUG_INFO(label) ((CoTaskDebugInfo) { (label), _DEBUG_INFO_INITIALIZER_ })
#else
typedef char CoTaskDebugInfo;
#define COTASK_DEBUG_INFO(label) (0)
#endif
void coroutines_init(void);
void coroutines_shutdown(void);
void coroutines_draw_stats(void);
void cotask_free(CoTask *task);
bool cotask_cancel(CoTask *task);
void *cotask_resume(CoTask *task, void *arg);
void *cotask_yield(void *arg);
int cotask_wait(int delay);
CoWaitResult cotask_wait_event(CoEvent *evt);
CoWaitResult cotask_wait_event_or_die(CoEvent *evt);
CoWaitResult cotask_wait_event_once(CoEvent *evt);
int cotask_wait_subtasks(void);
CoStatus cotask_status(CoTask *task);
CoTask *cotask_active(void);
EntityInterface *cotask_bind_to_entity(CoTask *task, EntityInterface *ent) attr_returns_nonnull;
CoTaskEvents *cotask_get_events(CoTask *task);
void *cotask_malloc(CoTask *task, size_t size) attr_returns_allocated attr_malloc attr_alloc_size(2);
EntityInterface *cotask_host_entity(CoTask *task, size_t ent_size, EntityType ent_type) attr_nonnull_all attr_returns_allocated;
void cotask_host_events(CoTask *task, uint num_events, CoEvent events[num_events]) attr_nonnull_all;
CoSched *cotask_get_sched(CoTask *task);
BoxedTask cotask_box(CoTask *task);
CoTask *cotask_unbox(BoxedTask box);
void coevent_init(CoEvent *evt);
void coevent_signal(CoEvent *evt);
void coevent_signal_once(CoEvent *evt);
void coevent_cancel(CoEvent *evt);
CoEventSnapshot coevent_snapshot(const CoEvent *evt);
CoEventStatus coevent_poll(const CoEvent *evt, const CoEventSnapshot *snap);
void _coevent_array_action(uint num, CoEvent *events, void (*func)(CoEvent*));
#define COEVENT_ARRAY_ACTION(func, array) (_coevent_array_action(sizeof(array)/sizeof(CoEvent), &((array)._first_event_), func))
#define COEVENT_INIT_ARRAY(array) COEVENT_ARRAY_ACTION(coevent_init, array)
#define COEVENT_CANCEL_ARRAY(array) COEVENT_ARRAY_ACTION(coevent_cancel, array)
void cosched_init(CoSched *sched);
CoTask *_cosched_new_task(CoSched *sched, CoTaskFunc func, void *arg, size_t arg_size, bool is_subtask, CoTaskDebugInfo debug); // creates and runs the task, schedules it for resume on cosched_run_tasks if it's still alive
#define cosched_new_task(sched, func, arg, arg_size, debug_label) \
_cosched_new_task(sched, func, arg, arg_size, false, COTASK_DEBUG_INFO(debug_label))
#define cosched_new_subtask(sched, func, arg, arg_size, debug_label) \
_cosched_new_task(sched, func, arg, arg_size, true, COTASK_DEBUG_INFO(debug_label))
uint cosched_run_tasks(CoSched *sched); // returns number of tasks ran
void cosched_finish(CoSched *sched);
#define TASK_ARGS_TYPE(name) COARGS_##name
#define TASK_ARGSDELAY_NAME(name) COARGSDELAY_##name
#define TASK_ARGSDELAY(name) struct TASK_ARGSDELAY_NAME(name)
#define TASK_ARGSCOND_NAME(name) COARGSCOND_##name
#define TASK_ARGSCOND(name) struct TASK_ARGSCOND_NAME(name)
#define TASK_IFACE_NAME(iface, suffix) COTASKIFACE_##iface##_##suffix
#define TASK_IFACE_ARGS_TYPE(iface) TASK_IFACE_NAME(iface, ARGS)
#define TASK_IFACE_ARGS_SIZED_PTR_TYPE(iface) TASK_IFACE_NAME(iface, ARGS_SPTR)
#define TASK_INDIRECT_TYPE(iface) TASK_IFACE_NAME(iface, HANDLE)
#define TASK_IFACE_SARGS(iface, ...) \
((TASK_IFACE_ARGS_SIZED_PTR_TYPE(iface)) { \
.size = sizeof(TASK_IFACE_ARGS_TYPE(iface)), \
.ptr = (&(TASK_IFACE_ARGS_TYPE(iface)) { __VA_ARGS__ }) \
})
#define DEFINE_TASK_INTERFACE(iface, argstruct) \
typedef TASK_ARGS_STRUCT(argstruct) TASK_IFACE_ARGS_TYPE(iface); \
typedef struct { \
TASK_IFACE_ARGS_TYPE(iface) *ptr; \
size_t size; \
} TASK_IFACE_ARGS_SIZED_PTR_TYPE(iface); \
typedef struct { \
CoTaskFunc _cotask_##iface##_thunk; \
} TASK_INDIRECT_TYPE(iface) /* require semicolon */
#define DEFINE_TASK_INTERFACE_WITH_BASE(iface, ibase, argstruct) \
typedef struct { \
TASK_IFACE_ARGS_TYPE(ibase) base; \
TASK_ARGS_STRUCT(argstruct); \
} TASK_IFACE_ARGS_TYPE(iface); \
typedef struct { \
union { \
TASK_IFACE_ARGS_SIZED_PTR_TYPE(ibase) base; \
struct { \
TASK_IFACE_ARGS_TYPE(iface) *ptr; \
size_t size; \
}; \
}; \
} TASK_IFACE_ARGS_SIZED_PTR_TYPE(iface); \
typedef struct { \
union { \
TASK_INDIRECT_TYPE(ibase) base; \
CoTaskFunc _cotask_##iface##_thunk; \
CoTaskFunc _cotask_##ibase##_thunk; \
}; \
} TASK_INDIRECT_TYPE(iface) /* require semicolon */\
#define TASK_INDIRECT_TYPE_ALIAS(task) TASK_IFACE_NAME(task, HANDLEALIAS)
#define ARGS (*_cotask_args)
#define NO_ARGS attr_deprecated("Use { } instead of NO_ARGS, or omit it entirely") { }
// NOTE: the nested anonymous struct hack allows us to support both of these syntaxes:
// INVOKE_TASK(foo, ENT_BOX(bar));
// INVOKE_TASK(foo, { ENT_BOX(bar) });
#define TASK_ARGS_STRUCT(argstruct) struct { struct argstruct; }
#define TASK_COMMON_PRIVATE_DECLARATIONS(name) \
/* user-defined task body */ \
static void COTASK_##name(TASK_ARGS_TYPE(name) *_cotask_args) /* require semicolon */
#define TASK_COMMON_DECLARATIONS(name, argstype, handletype, linkage) \
/* produce warning if the task is never used */ \
linkage char COTASK_UNUSED_CHECK_##name; \
/* type of indirect handle to a compatible task */ \
typedef handletype TASK_INDIRECT_TYPE_ALIAS(name); \
/* user-defined type of args struct */ \
typedef argstype TASK_ARGS_TYPE(name); \
/* type of internal args struct for INVOKE_TASK_DELAYED */ \
struct TASK_ARGSDELAY_NAME(name) { \
int delay; \
/* NOTE: this must be last for interface inheritance to work! */ \
TASK_ARGS_TYPE(name) real_args; \
}; \
/* type of internal args struct for INVOKE_TASK_WHEN */ \
struct TASK_ARGSCOND_NAME(name) { \
CoEvent *event; \
bool unconditional; \
/* NOTE: this must be last for interface inheritance to work! */ \
TASK_ARGS_TYPE(name) real_args; \
}; \
/* task entry point for INVOKE_TASK */ \
attr_unused linkage void *COTASKTHUNK_##name(void *arg, size_t arg_size); \
/* task entry point for INVOKE_TASK_DELAYED */ \
attr_unused linkage void *COTASKTHUNKDELAY_##name(void *arg, size_t arg_size); \
/* task entry point for INVOKE_TASK_WHEN and INVOKE_TASK_AFTER */ \
attr_unused linkage void *COTASKTHUNKCOND_##name(void *arg, size_t arg_size) /* require semicolon */ \
#define TASK_COMMON_THUNK_DEFINITIONS(name, linkage) \
/* task entry point for INVOKE_TASK */ \
attr_unused linkage void *COTASKTHUNK_##name(void *arg, size_t arg_size) { \
/* copy args to our coroutine stack so that they're valid after caller returns */ \
TASK_ARGS_TYPE(name) args_copy = { }; \
assume(sizeof(args_copy) >= arg_size); \
memcpy(&args_copy, arg, arg_size); \
/* call body */ \
COTASK_##name(&args_copy); \
/* exit coroutine */ \
return NULL; \
} \
/* task entry point for INVOKE_TASK_DELAYED */ \
attr_unused linkage void *COTASKTHUNKDELAY_##name(void *arg, size_t arg_size) { \
/* copy args to our coroutine stack so that they're valid after caller returns */ \
TASK_ARGSDELAY(name) args_copy = { }; \
assume(sizeof(args_copy) >= arg_size); \
memcpy(&args_copy, arg, arg_size); \
/* if delay is negative, bail out early */ \
if(args_copy.delay < 0) return NULL; \
/* wait out the delay */ \
WAIT(args_copy.delay); \
/* call body */ \
COTASK_##name(&args_copy.real_args); \
/* exit coroutine */ \
return NULL; \
} \
/* task entry point for INVOKE_TASK_WHEN and INVOKE_TASK_AFTER */ \
attr_unused linkage void *COTASKTHUNKCOND_##name(void *arg, size_t arg_size) { \
/* copy args to our coroutine stack so that they're valid after caller returns */ \
TASK_ARGSCOND(name) args_copy = { }; \
assume(sizeof(args_copy) >= arg_size); \
memcpy(&args_copy, arg, arg_size); \
/* wait for event, and if it wasn't canceled (or if we want to run unconditionally)... */ \
if(WAIT_EVENT(args_copy.event).event_status == CO_EVENT_SIGNALED || args_copy.unconditional) { \
/* call body */ \
COTASK_##name(&args_copy.real_args); \
} \
/* exit coroutine */ \
return NULL; \
}
#define TASK_COMMON_BEGIN_BODY_DEFINITION(name, linkage) \
linkage void COTASK_##name(TASK_ARGS_TYPE(name) *_cotask_args)
#define DECLARE_TASK_EXPLICIT(name, argstype, handletype, linkage) \
TASK_COMMON_DECLARATIONS(name, argstype, handletype, linkage) /* require semicolon */
#define DEFINE_TASK_EXPLICIT(name, linkage) \
TASK_COMMON_PRIVATE_DECLARATIONS(name); \
TASK_COMMON_THUNK_DEFINITIONS(name, linkage) \
/* begin task body definition */ \
TASK_COMMON_BEGIN_BODY_DEFINITION(name, linkage)
/* declare a task with static linkage (needs to be defined later) */
#define DECLARE_TASK(name, ...) \
MACROHAX_OVERLOAD_HASARGS(DECLARE_TASK_, __VA_ARGS__)(name, ##__VA_ARGS__)
#define DECLARE_TASK_1(name, ...) \
DECLARE_TASK_EXPLICIT(name, TASK_ARGS_STRUCT(__VA_ARGS__), void, static) /* require semicolon */
#define DECLARE_TASK_0(name) DECLARE_TASK_1(name, { })
/* declare a task with static linkage that conforms to a common interface (needs to be defined later) */
#define DECLARE_TASK_WITH_INTERFACE(name, iface) \
DECLARE_TASK_EXPLICIT(name, TASK_IFACE_ARGS_TYPE(iface), TASK_INDIRECT_TYPE(iface), static) /* require semicolon */
/* define a task with static linkage (needs to be declared first) */
#define DEFINE_TASK(name) \
DEFINE_TASK_EXPLICIT(name, static)
/* declare and define a task with static linkage */
#define TASK(name, ...) \
DECLARE_TASK(name, ##__VA_ARGS__); \
DEFINE_TASK(name)
/* declare and define a task with static linkage that conforms to a common interface */
#define TASK_WITH_INTERFACE(name, iface) \
DECLARE_TASK_WITH_INTERFACE(name, iface); \
DEFINE_TASK(name)
/* declare a task with extern linkage (needs to be defined later) */
#define DECLARE_EXTERN_TASK(name, ...)\
MACROHAX_OVERLOAD_HASARGS(DECLARE_EXTERN_TASK_, __VA_ARGS__)(name, ##__VA_ARGS__)
#define DECLARE_EXTERN_TASK_1(name, ...) \
DECLARE_TASK_EXPLICIT(name, TASK_ARGS_STRUCT(__VA_ARGS__), void, extern) /* require semicolon */
#define DECLARE_EXTERN_TASK_0(name) \
DECLARE_EXTERN_TASK_1(name, { })
/* declare a task with extern linkage that conforms to a common interface (needs to be defined later) */
#define DECLARE_EXTERN_TASK_WITH_INTERFACE(name, iface) \
DECLARE_TASK_EXPLICIT(name, TASK_IFACE_ARGS_TYPE(iface), TASK_INDIRECT_TYPE(iface), extern) /* require semicolon */
/* define a task with extern linkage (needs to be declared first) */
#define DEFINE_EXTERN_TASK(name) \
char COTASK_UNUSED_CHECK_##name; \
DEFINE_TASK_EXPLICIT(name, extern)
/*
* INVOKE_TASK(task_name, args...)
* INVOKE_SUBTASK(task_name, args...)
*
* This is the most basic way to start an asynchronous task. Control is transferred
* to the new task immediately when this is called, and returns to the call site
* when the task yields or terminates.
*
* Args are optional. They are treated simply as an initializer for the task's
* args struct, so it's possible to use designated initializer syntax to emulate
* "keyword arguments", etc.
*
* INVOKE_SUBTASK is identical INVOKE_TASK, except the spawned task will attach
* to the currently executing task, becoming its "sub-task" or "slave". When a
* task finishes executing, all of its sub-tasks are also terminated recursively.
*
* Other INVOKE_ macros with a _SUBTASK version behave analogously.
*/
#define INVOKE_TASK(_task, ...) \
_internal_INVOKE_TASK(THIS_SCHED, cosched_new_task, _task, ##__VA_ARGS__)
#define INVOKE_SUBTASK(_task, ...) \
_internal_INVOKE_TASK(THIS_SCHED, cosched_new_subtask, _task, ##__VA_ARGS__)
#define SCHED_INVOKE_TASK(_sched, _task, ...) \
_internal_INVOKE_TASK(_sched, cosched_new_task, _task, ##__VA_ARGS__)
#define _internal_INVOKE_TASK(sched, task_constructor, name, ...) ( \
(void)COTASK_UNUSED_CHECK_##name, \
task_constructor( \
sched, \
COTASKTHUNK_##name, \
(&(TASK_ARGS_TYPE(name)) { __VA_ARGS__ }), \
sizeof(TASK_ARGS_TYPE(name)), \
#name \
) \
)
/*
* INVOKE_TASK_DELAYED(delay, task_name, args...)
* INVOKE_SUBTASK_DELAYED(delay, task_name, args...)
*
* Like INVOKE_TASK, but the task will yield <delay> times before executing the
* actual task body.
*
* If <delay> is negative, the task will not be invoked. The arguments are still
* evaluated, however. (Caveat: in the current implementation, a task is spawned
* either way; it just aborts early without executing the body if the delay is
* negative, so there's some overhead).
*/
#define INVOKE_TASK_DELAYED(_delay, _task, ...) \
_internal_INVOKE_TASK_DELAYED(THIS_SCHED, cosched_new_task, _delay, _task, ##__VA_ARGS__)
#define INVOKE_SUBTASK_DELAYED(_delay, _task, ...) \
_internal_INVOKE_TASK_DELAYED(THIS_SCHED, cosched_new_subtask, _delay, _task, ##__VA_ARGS__)
#define SCHED_INVOKE_TASK_DELAYED(_sched, _delay, _task, ...) \
_internal_INVOKE_TASK_DELAYED(_sched, cosched_new_task, _delay, _task, ##__VA_ARGS__)
#define _internal_INVOKE_TASK_DELAYED(sched, task_constructor, _delay, name, ...) ( \
(void)COTASK_UNUSED_CHECK_##name, \
task_constructor( \
sched, \
COTASKTHUNKDELAY_##name, \
(&(TASK_ARGSDELAY(name)) { \
.real_args = { __VA_ARGS__ }, \
.delay = (_delay) \
}), \
sizeof(TASK_ARGSDELAY(name)), \
#name \
) \
)
/*
* INVOKE_TASK_WHEN(event, task_name, args...)
* INVOKE_SUBTASK_WHEN(event, task_name, args...)
*
* INVOKE_TASK_AFTER(event, task_name, args...)
* INVOKE_SUBTASK_AFTER(event, task_name, args...)
*
* Both INVOKE_TASK_WHEN and INVOKE_TASK_AFTER spawn a task that waits for an
* event to occur. The difference is that _WHEN aborts the task if the event has
* been canceled, but _AFTER proceeds to execute it unconditionally.
*
* <event> is a pointer to a CoEvent struct.
*/
#define INVOKE_TASK_WHEN(_event, _task, ...) \
_internal_INVOKE_TASK_ON_EVENT(THIS_SCHED, cosched_new_task, false, _event, _task, ##__VA_ARGS__)
#define INVOKE_SUBTASK_WHEN(_event, _task, ...) \
_internal_INVOKE_TASK_ON_EVENT(THIS_SCHED, cosched_new_subtask, false, _event, _task, ##__VA_ARGS__)
#define SCHED_INVOKE_TASK_WHEN(_sched, _event, _task, ...) \
_internal_INVOKE_TASK_ON_EVENT(_sched, cosched_new_task, false, _event, _task, ##__VA_ARGS__)
#define INVOKE_TASK_AFTER(_event, _task, ...) \
_internal_INVOKE_TASK_ON_EVENT(THIS_SCHED, cosched_new_task, true, _event, _task, ## __VA_ARGS__)
#define INVOKE_SUBTASK_AFTER(_event, _task, ...) \
_internal_INVOKE_TASK_ON_EVENT(THIS_SCHED, cosched_new_subtask, true, _event, _task, ## __VA_ARGS__)
#define SCHED_INVOKE_TASK_AFTER(_sched, _event, _task, ...) \
_internal_INVOKE_TASK_ON_EVENT(_sched, cosched_new_task, true, _event, _task, ##__VA_ARGS__)
#define _internal_INVOKE_TASK_ON_EVENT(sched, task_constructor, is_unconditional, _event, name, ...) ( \
(void)COTASK_UNUSED_CHECK_##name, \
task_constructor( \
sched, \
COTASKTHUNKCOND_##name, \
(&(TASK_ARGSCOND(name)) { \
.real_args = { __VA_ARGS__ }, \
.event = (_event), \
.unconditional = is_unconditional \
}), \
sizeof(TASK_ARGSCOND(name)), \
#name \
) \
)
/*
* CANCEL_TASK_WHEN(event, boxed_task)
* CANCEL_TASK_AFTER(event, boxed_task)
*
* Invokes an auxiliary task that will wait for an event, and then cancel another
* running task. The difference between WHEN and AFTER is the same as in
* INVOKE_TASK_WHEN/INVOKE_TASK_AFTER -- this is a simple wrapper around those.
*
* <event> is a pointer to a CoEvent struct.
* <boxed_task> is a BoxedTask struct; use cotask_box to obtain one from a pointer.
* You can also use the THIS_TASK macro to refer to the currently running task.
*/
#define CANCEL_TASK_WHEN(_event, _task) INVOKE_TASK_WHEN(_event, _cancel_task_helper, _task)
#define CANCEL_TASK_AFTER(_event, _task) INVOKE_TASK_AFTER(_event, _cancel_task_helper, _task)
DECLARE_EXTERN_TASK(_cancel_task_helper, { BoxedTask task; });
#define CANCEL_TASK(boxed_task) cotask_cancel(cotask_unbox(boxed_task))
#define TASK_INDIRECT(iface, task) ( \
(void)COTASK_UNUSED_CHECK_##task, \
(TASK_INDIRECT_TYPE_ALIAS(task)) { ._cotask_##iface##_thunk = COTASKTHUNK_##task } \
)
#define TASK_INDIRECT_INIT(iface, task) \
{ ._cotask_##iface##_thunk = COTASKTHUNK_##task } \
#define INVOKE_TASK_INDIRECT_(sched, task_constructor, iface, taskhandle, ...) ( \
task_constructor( \
sched, \
taskhandle._cotask_##iface##_thunk, \
(&(TASK_IFACE_ARGS_TYPE(iface)) { __VA_ARGS__ }), \
sizeof(TASK_IFACE_ARGS_TYPE(iface)), \
"<indirect:"#iface">" \
) \
)
#define SCHED_INVOKE_TASK_INDIRECT(_sched, _iface, _handle, ...) \
INVOKE_TASK_INDIRECT_(_sched, cosched_new_task, _iface, _handle, ##__VA_ARGS__)
#define INVOKE_TASK_INDIRECT(_iface, _handle, ...) \
INVOKE_TASK_INDIRECT_(THIS_SCHED, cosched_new_task, _iface, _handle, ##__VA_ARGS__)
#define INVOKE_SUBTASK_INDIRECT(_iface, _handle, ...) \
INVOKE_TASK_INDIRECT_(THIS_SCHED, cosched_new_subtask, iface, _handle, ##__VA_ARGS__)
#define THIS_TASK cotask_box(cotask_active())
#define TASK_EVENTS(task) cotask_get_events(cotask_unbox(task))
#define TASK_MALLOC(size) cotask_malloc(cotask_active(), size)
#define THIS_SCHED cotask_get_sched(cotask_active())
#define TASK_HOST_ENT(ent_struct_type) \
ENT_CAST(cotask_host_entity(cotask_active(), sizeof(ent_struct_type), ENT_TYPE_ID(ent_struct_type)), ent_struct_type)
#define TASK_HOST_EVENTS(events_array) \
cotask_host_events(cotask_active(), sizeof(events_array)/sizeof(CoEvent), &((events_array)._first_event_))
#define YIELD cotask_yield(NULL)
#define WAIT(delay) cotask_wait(delay)
#define WAIT_EVENT(e) cotask_wait_event(e)
#define WAIT_EVENT_OR_DIE(e) cotask_wait_event_or_die(e)
#define WAIT_EVENT_ONCE(e) cotask_wait_event_once(e)
#define STALL cotask_wait(INT_MAX)
#define AWAIT_SUBTASKS cotask_wait_subtasks()
#define NOT_NULL_OR_DIE(expr) ({ \
__auto_type _not_null_ptr = (expr); \
if(_not_null_ptr == NULL) { \
cotask_cancel(NOT_NULL(cotask_active())); \
UNREACHABLE; \
} \
NOT_NULL(_not_null_ptr); \
})
// first arg of the generated function needs to be the ent, because ENT_UNBOXED_DISPATCH_FUNCTION dispatches on first arg.
#define _cotask_emit_bindfunc(typename, ...) \
INLINE typename *_cotask_bind_to_entity_##typename(typename *ent, CoTask *task) { \
return ENT_CAST((cotask_bind_to_entity)( \
task, \
ent ? UNION_CAST(typename*, EntityInterface*, ent) : NULL), \
typename \
); \
}
ENTITIES(_cotask_emit_bindfunc,)
#undef _cotask_emit_bindfunc
INLINE EntityInterface *_cotask_bind_to_entity_Entity(EntityInterface *ent, CoTask *task) {
return (cotask_bind_to_entity)(task, ent);
}
#define cotask_bind_to_entity(task, ent) \
ENT_UNBOXED_DISPATCH_FUNCTION(_cotask_bind_to_entity_, ent, task)
#define TASK_BIND(ent_or_box) cotask_bind_to_entity(cotask_active(), ENT_UNBOX_OR_PASSTHROUGH(ent_or_box))

140
src/coroutine/coevent.c Normal file
View file

@ -0,0 +1,140 @@
/*
* This software is licensed under the terms of the MIT License.
* See COPYING for further information.
* ---
* Copyright (c) 2011-2019, Lukas Weber <laochailan@web.de>.
* Copyright (c) 2012-2019, Andrei Alexeyev <akari@taisei-project.org>.
*/
#include "taisei.h"
#include "internal.h"
void coevent_init(CoEvent *evt) {
static uint32_t g_uid;
uint32_t uid = ++g_uid;
EVT_DEBUG("Init event %p (uid = %u)", (void*)evt, uid);
*evt = (CoEvent) { .unique_id = uid };
assert(g_uid != 0);
}
CoEventSnapshot coevent_snapshot(const CoEvent *evt) {
return (CoEventSnapshot) {
.unique_id = evt->unique_id,
.num_signaled = evt->num_signaled,
};
}
CoEventStatus coevent_poll(const CoEvent *evt, const CoEventSnapshot *snap) {
#if 0
EVT_DEBUG("[%p]", (void*)evt);
EVT_DEBUG("evt->unique_id == %u", evt->unique_id);
EVT_DEBUG("snap->unique_id == %u", snap->unique_id);
EVT_DEBUG("evt->num_signaled == %u", evt->num_signaled);
EVT_DEBUG("snap->num_signaled == %u", snap->num_signaled);
#endif
if(
evt->unique_id != snap->unique_id ||
evt->num_signaled < snap->num_signaled ||
evt->unique_id == 0
) {
EVT_DEBUG("[%p / %u] Event was canceled", (void*)evt, evt->unique_id);
return CO_EVENT_CANCELED;
}
if(evt->num_signaled > snap->num_signaled) {
EVT_DEBUG("[%p / %u] Event was signaled", (void*)evt, evt->unique_id);
return CO_EVENT_SIGNALED;
}
// EVT_DEBUG("Event hasn't changed; waiting...");
return CO_EVENT_PENDING;
}
static bool subscribers_array_predicate(const void *pelem, void *userdata) {
return cotask_unbox_notnull(*(const BoxedTask*)pelem);
}
void coevent_cleanup_subscribers(CoEvent *evt) {
if(evt->subscribers.num_elements == 0) {
return;
}
attr_unused uint prev_num_subs = evt->subscribers.num_elements;
dynarray_filter(&evt->subscribers, subscribers_array_predicate, NULL);
attr_unused uint new_num_subs = evt->subscribers.num_elements;
EVT_DEBUG("Event %p num subscribers %u -> %u", (void*)evt, prev_num_subs, new_num_subs);
}
void coevent_add_subscriber(CoEvent *evt, CoTask *task) {
EVT_DEBUG("Event %p (num=%u; capacity=%u)", (void*)evt, evt->subscribers.num_elements, evt->subscribers.capacity);
EVT_DEBUG("Subscriber: %s", task->debug_label);
*dynarray_append_with_min_capacity(&evt->subscribers, 4) = cotask_box(task);
}
static void coevent_wake_subscribers(CoEvent *evt, uint num_subs, BoxedTask subs[num_subs]) {
for(int i = 0; i < num_subs; ++i) {
CoTask *task = cotask_unbox_notnull(subs[i]);
if(task && cotask_status(task) != CO_STATUS_DEAD) {
EVT_DEBUG("Resume CoEvent{%p} subscriber %s", (void*)evt, task->debug_label);
cotask_resume(task, NULL);
}
}
}
void coevent_signal(CoEvent *evt) {
if(UNLIKELY(evt->unique_id == 0)) {
return;
}
++evt->num_signaled;
EVT_DEBUG("Signal event %p (uid = %u; num_signaled = %u)", (void*)evt, evt->unique_id, evt->num_signaled);
assert(evt->num_signaled != 0);
if(evt->subscribers.num_elements) {
BoxedTask subs_snapshot[evt->subscribers.num_elements];
memcpy(subs_snapshot, evt->subscribers.data, sizeof(subs_snapshot));
evt->subscribers.num_elements = 0;
coevent_wake_subscribers(evt, ARRAY_SIZE(subs_snapshot), subs_snapshot);
}
}
void coevent_signal_once(CoEvent *evt) {
if(!evt->num_signaled) {
coevent_signal(evt);
}
}
void coevent_cancel(CoEvent *evt) {
TASK_DEBUG_EVENT(ev);
if(evt->unique_id == 0) {
EVT_DEBUG("[%lu] Event %p already canceled", ev, (void*)evt);
return;
}
EVT_DEBUG("[%lu] BEGIN Cancel event %p (uid = %u; num_signaled = %u)", ev, (void*)evt, evt->unique_id, evt->num_signaled);
EVT_DEBUG("[%lu] SUBS = %p", ev, (void*)evt->subscribers.data);
evt->unique_id = 0;
if(evt->subscribers.num_elements) {
BoxedTask subs_snapshot[evt->subscribers.num_elements];
memcpy(subs_snapshot, evt->subscribers.data, sizeof(subs_snapshot));
dynarray_free_data(&evt->subscribers);
coevent_wake_subscribers(evt, ARRAY_SIZE(subs_snapshot), subs_snapshot);
// CAUTION: no modifying evt after this point, it may be invalidated
} else {
dynarray_free_data(&evt->subscribers);
}
EVT_DEBUG("[%lu] END Cancel event %p", ev, (void*)evt);
}
void _coevent_array_action(uint num, CoEvent *events, void (*func)(CoEvent*)) {
for(uint i = 0; i < num; ++i) {
func(events + i);
}
}

55
src/coroutine/coevent.h Normal file
View file

@ -0,0 +1,55 @@
/*
* This software is licensed under the terms of the MIT License.
* See COPYING for further information.
* ---
* Copyright (c) 2011-2019, Lukas Weber <laochailan@web.de>.
* Copyright (c) 2012-2019, Andrei Alexeyev <akari@taisei-project.org>.
*/
#pragma once
#include "taisei.h"
#include "dynarray.h"
#include "cotask.h"
typedef enum CoEventStatus {
CO_EVENT_PENDING,
CO_EVENT_SIGNALED,
CO_EVENT_CANCELED,
} CoEventStatus;
typedef struct CoEvent {
DYNAMIC_ARRAY(BoxedTask) subscribers;
uint32_t unique_id;
uint32_t num_signaled;
} CoEvent;
typedef struct CoEventSnapshot {
uint32_t unique_id;
uint32_t num_signaled;
} CoEventSnapshot;
#define COEVENTS_ARRAY(...) \
union { \
CoEvent _first_event_; \
struct { CoEvent __VA_ARGS__; }; \
}
typedef COEVENTS_ARRAY(
finished
) CoTaskEvents;
void coevent_init(CoEvent *evt);
void coevent_signal(CoEvent *evt);
void coevent_signal_once(CoEvent *evt);
void coevent_cancel(CoEvent *evt);
CoEventSnapshot coevent_snapshot(const CoEvent *evt);
CoEventStatus coevent_poll(const CoEvent *evt, const CoEventSnapshot *snap);
void _coevent_array_action(uint num, CoEvent *events, void (*func)(CoEvent*));
#define COEVENT_ARRAY_ACTION(func, array) \
(_coevent_array_action(sizeof(array)/sizeof(CoEvent), &((array)._first_event_), func))
#define COEVENT_INIT_ARRAY(array) COEVENT_ARRAY_ACTION(coevent_init, array)
#define COEVENT_CANCEL_ARRAY(array) COEVENT_ARRAY_ACTION(coevent_cancel, array)

View file

@ -0,0 +1,22 @@
/*
* This software is licensed under the terms of the MIT License.
* See COPYING for further information.
* ---
* Copyright (c) 2011-2019, Lukas Weber <laochailan@web.de>.
* Copyright (c) 2012-2019, Andrei Alexeyev <akari@taisei-project.org>.
*/
#pragma once
#include "taisei.h"
// #define EVT_DEBUG
#ifdef EVT_DEBUG
#undef EVT_DEBUG
#define EVT_DEBUG(...) log_debug(__VA_ARGS__)
#else
#define EVT_DEBUG(...) ((void)0)
#endif
void coevent_cleanup_subscribers(CoEvent *evt);
void coevent_add_subscriber(CoEvent *evt, CoTask *task);

68
src/coroutine/coroutine.c Normal file
View file

@ -0,0 +1,68 @@
/*
* This software is licensed under the terms of the MIT License.
* See COPYING for further information.
* ---
* Copyright (c) 2011-2019, Lukas Weber <laochailan@web.de>.
* Copyright (c) 2012-2019, Andrei Alexeyev <akari@taisei-project.org>.
*/
#include "taisei.h"
#include "internal.h"
#include "util.h"
void coroutines_init(void) {
cotask_global_init();
}
void coroutines_shutdown(void) {
cotask_global_shutdown();
}
#ifdef CO_TASK_STATS
#include "video.h"
#include "resource/font.h"
#endif
void coroutines_draw_stats(void) {
#ifdef CO_TASK_STATS
if(STAT_VAL(num_tasks_in_use) == 0 && STAT_VAL(num_switches_this_frame) == 0) {
return;
}
static char buf[128];
TextParams tp = {
.pos = { SCREEN_W },
.color = RGB(1, 1, 1),
.shader_ptr = res_shader("text_default"),
.font_ptr = res_font("monotiny"),
.align = ALIGN_RIGHT,
};
float ls = font_get_lineskip(tp.font_ptr);
tp.pos.y += ls;
#ifdef CO_TASK_STATS_STACK
snprintf(buf, sizeof(buf), "Peak stack: %zukb Tasks: %4zu / %4zu ",
STAT_VAL(peak_stack_usage) / 1024,
STAT_VAL(num_tasks_in_use),
STAT_VAL(num_tasks_allocated)
);
#else
snprintf(buf, sizeof(buf), "Tasks: %4zu / %4zu ",
STAT_VAL(num_tasks_in_use),
STAT_VAL(num_tasks_allocated)
);
#endif
text_draw(buf, &tp);
tp.pos.y += ls;
snprintf(buf, sizeof(buf), "Switches/frame: %4zu ", STAT_VAL(num_switches_this_frame));
text_draw(buf, &tp);
STAT_VAL_SET(num_switches_this_frame, 0);
#endif
}

14
src/coroutine/coroutine.h Normal file
View file

@ -0,0 +1,14 @@
/*
* This software is licensed under the terms of the MIT License.
* See COPYING for further information.
* ---
* Copyright (c) 2011-2019, Lukas Weber <laochailan@web.de>.
* Copyright (c) 2012-2019, Andrei Alexeyev <akari@taisei-project.org>.
*/
#pragma once
#include "taisei.h"
void coroutines_init(void);
void coroutines_shutdown(void);
void coroutines_draw_stats(void);

138
src/coroutine/cosched.c Normal file
View file

@ -0,0 +1,138 @@
/*
* This software is licensed under the terms of the MIT License.
* See COPYING for further information.
* ---
* Copyright (c) 2011-2019, Lukas Weber <laochailan@web.de>.
* Copyright (c) 2012-2019, Andrei Alexeyev <akari@taisei-project.org>.
*/
#include "taisei.h"
#include "internal.h"
void cosched_init(CoSched *sched) {
memset(sched, 0, sizeof(*sched));
}
CoTask *_cosched_new_task(CoSched *sched, CoTaskFunc func, void *arg, size_t arg_size, bool is_subtask, CoTaskDebugInfo debug) {
assume(sched != NULL);
CoTask *task = cotask_new_internal(cotask_entry);
#ifdef CO_TASK_DEBUG
snprintf(task->debug_label, sizeof(task->debug_label), "#%i <%p> %s (%s:%i:%s)", task->unique_id, (void*)task, debug.label, debug.debug_info.file, debug.debug_info.line, debug.debug_info.func);
#endif
CoTaskInitData init_data = { 0 };
init_data.task = task;
init_data.sched = sched;
init_data.func = func;
init_data.func_arg = arg;
init_data.func_arg_size = arg_size;
if(is_subtask) {
init_data.master_task_data = cotask_get_data(cotask_active());
assert(init_data.master_task_data != NULL);
}
alist_append(&sched->pending_tasks, task);
cotask_resume_internal(task, &init_data);
assert(cotask_status(task) == CO_STATUS_SUSPENDED || cotask_status(task) == CO_STATUS_DEAD);
return task;
}
uint cosched_run_tasks(CoSched *sched) {
alist_merge_tail(&sched->tasks, &sched->pending_tasks);
uint ran = 0;
TASK_DEBUG("---------------------------------------------------------------");
for(CoTask *t = sched->tasks.first, *next; t; t = next) {
next = t->next;
if(cotask_status(t) == CO_STATUS_DEAD) {
TASK_DEBUG("<!> %s", t->debug_label);
alist_unlink(&sched->tasks, t);
cotask_free(t);
} else {
TASK_DEBUG(">>> %s", t->debug_label);
assert(cotask_status(t) == CO_STATUS_SUSPENDED);
cotask_resume(t, NULL);
++ran;
}
}
TASK_DEBUG("---------------------------------------------------------------");
return ran;
}
typedef ht_ptr2int_t events_hashset;
static uint gather_blocking_events(CoTaskList *tasks, events_hashset *events) {
uint n = 0;
for(CoTask *t = tasks->first; t; t = t->next) {
if(!t->data) {
continue;
}
CoTaskData *tdata = t->data;
if(tdata->wait.wait_type != COTASK_WAIT_EVENT) {
continue;
}
CoEvent *e = tdata->wait.event.pevent;
if(e->unique_id != tdata->wait.event.snapshot.unique_id) {
// event not valid? (probably should not happen)
continue;
}
ht_set(events, e, e->unique_id);
++n;
}
return n;
}
static void cancel_blocking_events(CoSched *sched) {
events_hashset events;
ht_create(&events);
gather_blocking_events(&sched->tasks, &events);
gather_blocking_events(&sched->pending_tasks, &events);
ht_ptr2int_iter_t iter;
ht_iter_begin(&events, &iter);
for(;iter.has_data; ht_iter_next(&iter)) {
CoEvent *e = iter.key;
if(e->unique_id == iter.value) {
// NOTE: wakes subscribers, which may cancel/invalidate other events before we do.
// This is why we snapshot unique_id.
// We assume that the memory backing *e is safe to access, however.
coevent_cancel(e);
}
}
ht_destroy(&events);
}
static void finish_task_list(CoTaskList *tasks) {
for(CoTask *t; (t = alist_pop(tasks));) {
cotask_force_finish(t);
}
}
void cosched_finish(CoSched *sched) {
// First cancel all events that have any tasks waiting on them.
// This will wake those tasks, so they can do any necessary cleanup.
cancel_blocking_events(sched);
finish_task_list(&sched->tasks);
finish_task_list(&sched->pending_tasks);
assert(!sched->tasks.first);
assert(!sched->pending_tasks.first);
memset(sched, 0, sizeof(*sched));
}

27
src/coroutine/cosched.h Normal file
View file

@ -0,0 +1,27 @@
/*
* This software is licensed under the terms of the MIT License.
* See COPYING for further information.
* ---
* Copyright (c) 2011-2019, Lukas Weber <laochailan@web.de>.
* Copyright (c) 2012-2019, Andrei Alexeyev <akari@taisei-project.org>.
*/
#pragma once
#include "taisei.h"
#include "cotask.h"
typedef struct CoSched CoSched;
struct CoSched {
CoTaskList tasks, pending_tasks;
};
void cosched_init(CoSched *sched);
CoTask *_cosched_new_task(CoSched *sched, CoTaskFunc func, void *arg, size_t arg_size, bool is_subtask, CoTaskDebugInfo debug); // creates and runs the task, schedules it for resume on cosched_run_tasks if it's still alive
#define cosched_new_task(sched, func, arg, arg_size, debug_label) \
_cosched_new_task(sched, func, arg, arg_size, false, COTASK_DEBUG_INFO(debug_label))
#define cosched_new_subtask(sched, func, arg, arg_size, debug_label) \
_cosched_new_task(sched, func, arg, arg_size, true, COTASK_DEBUG_INFO(debug_label))
uint cosched_run_tasks(CoSched *sched); // returns number of tasks ran
void cosched_finish(CoSched *sched);

View file

@ -4,165 +4,21 @@
* --- * ---
* Copyright (c) 2011-2019, Lukas Weber <laochailan@web.de>. * Copyright (c) 2011-2019, Lukas Weber <laochailan@web.de>.
* Copyright (c) 2012-2019, Andrei Alexeyev <akari@taisei-project.org>. * Copyright (c) 2012-2019, Andrei Alexeyev <akari@taisei-project.org>.
*/ */
#include "taisei.h" #include "taisei.h"
#include "coroutine.h" #include "internal.h"
#include "util.h"
// TODO refactor this intro a few smaller files under coroutine/ static CoTaskList task_pool;
#ifdef ADDRESS_SANITIZER
#include <sanitizer/asan_interface.h>
#else
#define ASAN_UNPOISON_MEMORY_REGION(addr, size) ((void)0)
#endif
#ifdef __EMSCRIPTEN__
#define CO_STACK_SIZE (64 * 1024)
#else
#define CO_STACK_SIZE (256 * 1024)
#endif
// #define EVT_DEBUG
#ifdef DEBUG
#define CO_TASK_STATS
#endif
#ifdef CO_TASK_DEBUG
#define TASK_DEBUG(...) log_debug(__VA_ARGS__)
#else
#define TASK_DEBUG(...) ((void)0)
#endif
#ifdef EVT_DEBUG
#undef EVT_DEBUG
#define EVT_DEBUG(...) log_debug(__VA_ARGS__)
#else
#define EVT_DEBUG(...) ((void)0)
#endif
enum {
COTASK_WAIT_NONE,
COTASK_WAIT_DELAY,
COTASK_WAIT_EVENT,
COTASK_WAIT_SUBTASKS,
};
#define MEM_AREA_SIZE (1 << 12)
#define MEM_ALLOC_ALIGNMENT alignof(max_align_t)
#define MEM_ALIGN_SIZE(x) (x + (MEM_ALLOC_ALIGNMENT - 1)) & ~(MEM_ALLOC_ALIGNMENT - 1)
typedef struct CoTaskData CoTaskData;
struct CoTask {
LIST_INTERFACE(CoTask);
koishi_coroutine_t ko;
// Pointer to a control structure on the coroutine's stack
CoTaskData *data;
uint32_t unique_id;
#ifdef CO_TASK_DEBUG
char debug_label[256];
#endif
};
typedef struct CoTaskHeapMemChunk {
struct CoTaskHeapMemChunk *next;
alignas(MEM_ALLOC_ALIGNMENT) char data[];
} CoTaskHeapMemChunk;
struct CoTaskData {
LIST_INTERFACE(CoTaskData);
CoTask *task;
CoSched *sched;
CoTaskData *master; // AKA supertask
LIST_ANCHOR(CoTaskData) slaves; // AKA subtasks
BoxedEntity bound_ent;
CoTaskEvents events;
bool finalizing;
struct {
CoWaitResult result;
union {
struct {
int remaining;
} delay;
struct {
CoEvent *pevent;
CoEventSnapshot snapshot;
} event;
};
uint wait_type;
} wait;
struct {
EntityInterface *ent;
CoEvent *events;
uint num_events;
} hosted;
struct {
CoTaskHeapMemChunk *onheap_alloc_head;
char *onstack_alloc_head;
alignas(MEM_ALLOC_ALIGNMENT) char onstack_alloc_area[MEM_AREA_SIZE];
} mem;
};
typedef struct CoTaskInitData {
CoTask *task;
CoSched *sched;
CoTaskFunc func;
void *func_arg;
size_t func_arg_size;
CoTaskData *master_task_data;
} CoTaskInitData;
static LIST_ANCHOR(CoTask) task_pool;
static koishi_coroutine_t *co_main; static koishi_coroutine_t *co_main;
#ifdef CO_TASK_STATS #ifdef CO_TASK_DEBUG
static struct { size_t _cotask_debug_event_id;
size_t num_tasks_allocated;
size_t num_tasks_in_use;
size_t num_switches_this_frame;
size_t peak_stack_usage;
} cotask_stats;
#define STAT_VAL(name) (cotask_stats.name)
#define STAT_VAL_SET(name, value) ((cotask_stats.name) = (value))
// enable stack usage tracking (loose)
#ifndef _WIN32
// NOTE: disabled by default because of heavy performance overhead under ASan
// #define CO_TASK_STATS_STACK
#endif #endif
#else // CO_TASK_STATS #ifdef CO_TASK_STATS
CoTaskStats cotask_stats;
#define STAT_VAL(name) ((void)0)
#define STAT_VAL_SET(name, value) ((void)0)
#endif // CO_TASK_STATS
#define STAT_VAL_ADD(name, value) STAT_VAL_SET(name, STAT_VAL(name) + (value))
#ifdef CO_TASK_DEBUG
static size_t debug_event_id;
#define TASK_DEBUG_EVENT(ev) uint64_t ev = debug_event_id++
#else
#define TASK_DEBUG_EVENT(ev) ((void)0)
#endif #endif
#ifdef CO_TASK_STATS_STACK #ifdef CO_TASK_STATS_STACK
@ -294,6 +150,18 @@ static void estimate_stack_usage(CoTask *task) { }
#endif // CO_TASK_STATS_STACK #endif // CO_TASK_STATS_STACK
void cotask_global_init(void) {
co_main = koishi_active();
}
void cotask_global_shutdown(void) {
for(CoTask *task; (task = alist_pop(&task_pool));) {
koishi_deinit(&task->ko);
free(task);
}
}
attr_nonnull_all attr_returns_nonnull attr_nonnull_all attr_returns_nonnull
INLINE CoTask *cotask_from_koishi_coroutine(koishi_coroutine_t *co) { INLINE CoTask *cotask_from_koishi_coroutine(koishi_coroutine_t *co) {
return CASTPTR_ASSUME_ALIGNED((char*)co - offsetof(CoTask, ko), CoTask); return CASTPTR_ASSUME_ALIGNED((char*)co - offsetof(CoTask, ko), CoTask);
@ -316,7 +184,7 @@ CoTask *cotask_unbox(BoxedTask box) {
return NULL; return NULL;
} }
static CoTask *cotask_unbox_notnull(BoxedTask box) { CoTask *cotask_unbox_notnull(BoxedTask box) {
CoTask *task = NOT_NULL((void*)box.ptr); CoTask *task = NOT_NULL((void*)box.ptr);
if(task->unique_id == box.unique_id) { if(task->unique_id == box.unique_id) {
@ -326,7 +194,7 @@ static CoTask *cotask_unbox_notnull(BoxedTask box) {
return NULL; return NULL;
} }
static CoTask *cotask_new_internal(koishi_entrypoint_t entry_point) { CoTask *cotask_new_internal(koishi_entrypoint_t entry_point) {
CoTask *task; CoTask *task;
STAT_VAL_ADD(num_tasks_in_use, 1); STAT_VAL_ADD(num_tasks_in_use, 1);
@ -362,7 +230,7 @@ static CoTask *cotask_new_internal(koishi_entrypoint_t entry_point) {
return task; return task;
} }
static void *cotask_resume_internal(CoTask *task, void *arg) { void *cotask_resume_internal(CoTask *task, void *arg) {
TASK_DEBUG_EVENT(ev); TASK_DEBUG_EVENT(ev);
TASK_DEBUG("[%zu] Resuming task %s", ev, task->debug_label); TASK_DEBUG("[%zu] Resuming task %s", ev, task->debug_label);
STAT_VAL_ADD(num_switches_this_frame, 1); STAT_VAL_ADD(num_switches_this_frame, 1);
@ -371,14 +239,6 @@ static void *cotask_resume_internal(CoTask *task, void *arg) {
return arg; return arg;
} }
attr_returns_nonnull attr_nonnull_all
INLINE CoTaskData *get_task_data(CoTask *task) {
CoTaskData *data = task->data;
assume(data != NULL);
assume(data->task == task);
return data;
}
static void cancel_task_events(CoTaskData *task_data) { static void cancel_task_events(CoTaskData *task_data) {
// HACK: This allows an entity-bound task to wait for its own "finished" // HACK: This allows an entity-bound task to wait for its own "finished"
// event. Can be useful to do some cleanup without spawning a separate task // event. Can be useful to do some cleanup without spawning a separate task
@ -390,10 +250,8 @@ static void cancel_task_events(CoTaskData *task_data) {
COEVENT_CANCEL_ARRAY(task_data->events); COEVENT_CANCEL_ARRAY(task_data->events);
} }
static void coevent_cleanup_subscribers(CoEvent *evt);
static bool cotask_finalize(CoTask *task) { static bool cotask_finalize(CoTask *task) {
CoTaskData *task_data = get_task_data(task); CoTaskData *task_data = cotask_get_data(task);
TASK_DEBUG_EVENT(ev); TASK_DEBUG_EVENT(ev);
@ -504,7 +362,7 @@ static void cotask_entry_setup(CoTask *task, CoTaskData *data, CoTaskInitData *i
COEVENT_INIT_ARRAY(data->events); COEVENT_INIT_ARRAY(data->events);
} }
static void *cotask_entry(void *varg) { void *cotask_entry(void *varg) {
CoTaskData data = { 0 }; CoTaskData data = { 0 };
CoTaskInitData *init_data = varg; CoTaskInitData *init_data = varg;
CoTask *task = init_data->task; CoTask *task = init_data->task;
@ -568,7 +426,11 @@ static void cotask_unsafe_cancel(CoTask *task) {
TASK_DEBUG("[%zu] End canceling task %s", ev, task->debug_label); TASK_DEBUG("[%zu] End canceling task %s", ev, task->debug_label);
} }
static void *cotask_cancel_in_safe_context(void *a); static void *cotask_cancel_in_safe_context(void *arg) {
CoTask *victim = arg;
cotask_unsafe_cancel(victim);
return NULL;
}
static void cotask_force_cancel(CoTask *task) { static void cotask_force_cancel(CoTask *task) {
koishi_coroutine_t *ctx = koishi_active(); koishi_coroutine_t *ctx = koishi_active();
@ -603,12 +465,6 @@ static void cotask_force_cancel(CoTask *task) {
cotask_free(cancel_task); cotask_free(cancel_task);
} }
static void *cotask_cancel_in_safe_context(void *arg) {
CoTask *victim = arg;
cotask_unsafe_cancel(victim);
return NULL;
}
bool cotask_cancel(CoTask *task) { bool cotask_cancel(CoTask *task) {
if(!task || cotask_status(task) == CO_STATUS_DEAD) { if(!task || cotask_status(task) == CO_STATUS_DEAD) {
return false; return false;
@ -619,52 +475,18 @@ bool cotask_cancel(CoTask *task) {
} }
static void *cotask_force_resume(CoTask *task, void *arg) { static void *cotask_force_resume(CoTask *task, void *arg) {
attr_unused CoTaskData *task_data = get_task_data(task); attr_unused CoTaskData *task_data = cotask_get_data(task);
assert(task_data->wait.wait_type == COTASK_WAIT_NONE); assert(task_data->wait.wait_type == COTASK_WAIT_NONE);
assert(!task_data->bound_ent.ent || ENT_UNBOX(task_data->bound_ent)); assert(!task_data->bound_ent.ent || ENT_UNBOX(task_data->bound_ent));
return cotask_resume_internal(task, arg); return cotask_resume_internal(task, arg);
} }
static void *cotask_wake_and_resume(CoTask *task, void *arg) { static void *cotask_wake_and_resume(CoTask *task, void *arg) {
CoTaskData *task_data = get_task_data(task); CoTaskData *task_data = cotask_get_data(task);
task_data->wait.wait_type = COTASK_WAIT_NONE; task_data->wait.wait_type = COTASK_WAIT_NONE;
return cotask_force_resume(task, arg); return cotask_force_resume(task, arg);
} }
CoEventSnapshot coevent_snapshot(const CoEvent *evt) {
return (CoEventSnapshot) {
.unique_id = evt->unique_id,
.num_signaled = evt->num_signaled,
};
}
CoEventStatus coevent_poll(const CoEvent *evt, const CoEventSnapshot *snap) {
#if 0
EVT_DEBUG("[%p]", (void*)evt);
EVT_DEBUG("evt->unique_id == %u", evt->unique_id);
EVT_DEBUG("snap->unique_id == %u", snap->unique_id);
EVT_DEBUG("evt->num_signaled == %u", evt->num_signaled);
EVT_DEBUG("snap->num_signaled == %u", snap->num_signaled);
#endif
if(
evt->unique_id != snap->unique_id ||
evt->num_signaled < snap->num_signaled ||
evt->unique_id == 0
) {
EVT_DEBUG("[%p / %u] Event was canceled", (void*)evt, evt->unique_id);
return CO_EVENT_CANCELED;
}
if(evt->num_signaled > snap->num_signaled) {
EVT_DEBUG("[%p / %u] Event was signaled", (void*)evt, evt->unique_id);
return CO_EVENT_SIGNALED;
}
// EVT_DEBUG("Event hasn't changed; waiting...");
return CO_EVENT_PENDING;
}
static bool cotask_do_wait(CoTaskData *task_data) { static bool cotask_do_wait(CoTaskData *task_data) {
switch(task_data->wait.wait_type) { switch(task_data->wait.wait_type) {
case COTASK_WAIT_NONE: { case COTASK_WAIT_NONE: {
@ -706,7 +528,7 @@ static bool cotask_do_wait(CoTaskData *task_data) {
} }
void *cotask_resume(CoTask *task, void *arg) { void *cotask_resume(CoTask *task, void *arg) {
CoTaskData *task_data = get_task_data(task); CoTaskData *task_data = cotask_get_data(task);
if(task_data->bound_ent.ent && !ENT_UNBOX(task_data->bound_ent)) { if(task_data->bound_ent.ent && !ENT_UNBOX(task_data->bound_ent)) {
cotask_force_cancel(task); cotask_force_cancel(task);
@ -742,7 +564,7 @@ static inline CoWaitResult cotask_wait_init(CoTaskData *task_data, char wait_typ
int cotask_wait(int delay) { int cotask_wait(int delay) {
CoTask *task = cotask_active(); CoTask *task = cotask_active();
CoTaskData *task_data = get_task_data(task); CoTaskData *task_data = cotask_get_data(task);
assert(task_data->wait.wait_type == COTASK_WAIT_NONE); assert(task_data->wait.wait_type == COTASK_WAIT_NONE);
if(delay == 1) { if(delay == 1) {
@ -762,7 +584,7 @@ int cotask_wait(int delay) {
int cotask_wait_subtasks(void) { int cotask_wait_subtasks(void) {
CoTask *task = cotask_active(); CoTask *task = cotask_active();
CoTaskData *task_data = get_task_data(task); CoTaskData *task_data = cotask_get_data(task);
assert(task_data->wait.wait_type == COTASK_WAIT_NONE); assert(task_data->wait.wait_type == COTASK_WAIT_NONE);
cotask_wait_init(task_data, COTASK_WAIT_SUBTASKS); cotask_wait_init(task_data, COTASK_WAIT_SUBTASKS);
@ -801,12 +623,12 @@ static void *_cotask_malloc(CoTaskData *task_data, size_t size, bool allow_heap_
} }
void *cotask_malloc(CoTask *task, size_t size) { void *cotask_malloc(CoTask *task, size_t size) {
CoTaskData *task_data = get_task_data(task); CoTaskData *task_data = cotask_get_data(task);
return _cotask_malloc(task_data, size, true); return _cotask_malloc(task_data, size, true);
} }
EntityInterface *cotask_host_entity(CoTask *task, size_t ent_size, EntityType ent_type) { EntityInterface *cotask_host_entity(CoTask *task, size_t ent_size, EntityType ent_type) {
CoTaskData *task_data = get_task_data(task); CoTaskData *task_data = cotask_get_data(task);
assume(task_data->hosted.ent == NULL); assume(task_data->hosted.ent == NULL);
EntityInterface *ent = _cotask_malloc(task_data, ent_size, false); EntityInterface *ent = _cotask_malloc(task_data, ent_size, false);
ent_register(ent, ent_type); ent_register(ent, ent_type);
@ -815,7 +637,7 @@ EntityInterface *cotask_host_entity(CoTask *task, size_t ent_size, EntityType en
} }
void cotask_host_events(CoTask *task, uint num_events, CoEvent events[num_events]) { void cotask_host_events(CoTask *task, uint num_events, CoEvent events[num_events]) {
CoTaskData *task_data = get_task_data(task); CoTaskData *task_data = cotask_get_data(task);
assume(task_data->hosted.events == NULL); assume(task_data->hosted.events == NULL);
assume(task_data->hosted.num_events == 0); assume(task_data->hosted.num_events == 0);
assume(num_events > 0); assume(num_events > 0);
@ -824,31 +646,9 @@ void cotask_host_events(CoTask *task, uint num_events, CoEvent events[num_events
_coevent_array_action(num_events, events, coevent_init); _coevent_array_action(num_events, events, coevent_init);
} }
static bool subscribers_array_predicate(const void *pelem, void *userdata) {
return cotask_unbox_notnull(*(const BoxedTask*)pelem);
}
static void coevent_cleanup_subscribers(CoEvent *evt) {
if(evt->subscribers.num_elements == 0) {
return;
}
attr_unused uint prev_num_subs = evt->subscribers.num_elements;
dynarray_filter(&evt->subscribers, subscribers_array_predicate, NULL);
attr_unused uint new_num_subs = evt->subscribers.num_elements;
EVT_DEBUG("Event %p num subscribers %u -> %u", (void*)evt, prev_num_subs, new_num_subs);
}
static void coevent_add_subscriber(CoEvent *evt, CoTask *task) {
EVT_DEBUG("Event %p (num=%u; capacity=%u)", (void*)evt, evt->subscribers.num_elements, evt->subscribers.capacity);
EVT_DEBUG("Subscriber: %s", task->debug_label);
*dynarray_append_with_min_capacity(&evt->subscribers, 4) = cotask_box(task);
}
static CoWaitResult cotask_wait_event_internal(CoEvent *evt) { static CoWaitResult cotask_wait_event_internal(CoEvent *evt) {
CoTask *task = cotask_active(); CoTask *task = cotask_active();
CoTaskData *task_data = get_task_data(task); CoTaskData *task_data = cotask_get_data(task);
coevent_add_subscriber(evt, task); coevent_add_subscriber(evt, task);
@ -899,11 +699,11 @@ CoStatus cotask_status(CoTask *task) {
} }
CoSched *cotask_get_sched(CoTask *task) { CoSched *cotask_get_sched(CoTask *task) {
return get_task_data(task)->sched; return cotask_get_data(task)->sched;
} }
EntityInterface *(cotask_bind_to_entity)(CoTask *task, EntityInterface *ent) { EntityInterface *(cotask_bind_to_entity)(CoTask *task, EntityInterface *ent) {
CoTaskData *task_data = get_task_data(task); CoTaskData *task_data = cotask_get_data(task);
assert(task_data->bound_ent.ent == 0); assert(task_data->bound_ent.ent == 0);
if(ent == NULL) { if(ent == NULL) {
@ -916,141 +716,11 @@ EntityInterface *(cotask_bind_to_entity)(CoTask *task, EntityInterface *ent) {
} }
CoTaskEvents *cotask_get_events(CoTask *task) { CoTaskEvents *cotask_get_events(CoTask *task) {
CoTaskData *task_data = get_task_data(task); CoTaskData *task_data = cotask_get_data(task);
return &task_data->events; return &task_data->events;
} }
void coevent_init(CoEvent *evt) { void cotask_force_finish(CoTask *task) {
static uint32_t g_uid;
uint32_t uid = ++g_uid;
EVT_DEBUG("Init event %p (uid = %u)", (void*)evt, uid);
*evt = (CoEvent) { .unique_id = uid };
assert(g_uid != 0);
}
static void coevent_wake_subscribers(CoEvent *evt, uint num_subs, BoxedTask subs[num_subs]) {
for(int i = 0; i < num_subs; ++i) {
CoTask *task = cotask_unbox_notnull(subs[i]);
if(task && cotask_status(task) != CO_STATUS_DEAD) {
EVT_DEBUG("Resume CoEvent{%p} subscriber %s", (void*)evt, task->debug_label);
cotask_resume(task, NULL);
}
}
}
void coevent_signal(CoEvent *evt) {
if(UNLIKELY(evt->unique_id == 0)) {
return;
}
++evt->num_signaled;
EVT_DEBUG("Signal event %p (uid = %u; num_signaled = %u)", (void*)evt, evt->unique_id, evt->num_signaled);
assert(evt->num_signaled != 0);
if(evt->subscribers.num_elements) {
BoxedTask subs_snapshot[evt->subscribers.num_elements];
memcpy(subs_snapshot, evt->subscribers.data, sizeof(subs_snapshot));
evt->subscribers.num_elements = 0;
coevent_wake_subscribers(evt, ARRAY_SIZE(subs_snapshot), subs_snapshot);
}
}
void coevent_signal_once(CoEvent *evt) {
if(!evt->num_signaled) {
coevent_signal(evt);
}
}
void coevent_cancel(CoEvent *evt) {
TASK_DEBUG_EVENT(ev);
if(evt->unique_id == 0) {
EVT_DEBUG("[%lu] Event %p already canceled", ev, (void*)evt);
return;
}
EVT_DEBUG("[%lu] BEGIN Cancel event %p (uid = %u; num_signaled = %u)", ev, (void*)evt, evt->unique_id, evt->num_signaled);
EVT_DEBUG("[%lu] SUBS = %p", ev, (void*)evt->subscribers.data);
evt->unique_id = 0;
if(evt->subscribers.num_elements) {
BoxedTask subs_snapshot[evt->subscribers.num_elements];
memcpy(subs_snapshot, evt->subscribers.data, sizeof(subs_snapshot));
dynarray_free_data(&evt->subscribers);
coevent_wake_subscribers(evt, ARRAY_SIZE(subs_snapshot), subs_snapshot);
// CAUTION: no modifying evt after this point, it may be invalidated
} else {
dynarray_free_data(&evt->subscribers);
}
EVT_DEBUG("[%lu] END Cancel event %p", ev, (void*)evt);
}
void _coevent_array_action(uint num, CoEvent *events, void (*func)(CoEvent*)) {
for(uint i = 0; i < num; ++i) {
func(events + i);
}
}
void cosched_init(CoSched *sched) {
memset(sched, 0, sizeof(*sched));
}
CoTask *_cosched_new_task(CoSched *sched, CoTaskFunc func, void *arg, size_t arg_size, bool is_subtask, CoTaskDebugInfo debug) {
assume(sched != NULL);
CoTask *task = cotask_new_internal(cotask_entry);
#ifdef CO_TASK_DEBUG
snprintf(task->debug_label, sizeof(task->debug_label), "#%i <%p> %s (%s:%i:%s)", task->unique_id, (void*)task, debug.label, debug.debug_info.file, debug.debug_info.line, debug.debug_info.func);
#endif
CoTaskInitData init_data = { 0 };
init_data.task = task;
init_data.sched = sched;
init_data.func = func;
init_data.func_arg = arg;
init_data.func_arg_size = arg_size;
if(is_subtask) {
init_data.master_task_data = get_task_data(cotask_active());
assert(init_data.master_task_data != NULL);
}
alist_append(&sched->pending_tasks, task);
cotask_resume_internal(task, &init_data);
assert(cotask_status(task) == CO_STATUS_SUSPENDED || cotask_status(task) == CO_STATUS_DEAD);
return task;
}
uint cosched_run_tasks(CoSched *sched) {
alist_merge_tail(&sched->tasks, &sched->pending_tasks);
uint ran = 0;
TASK_DEBUG("---------------------------------------------------------------");
for(CoTask *t = sched->tasks.first, *next; t; t = next) {
next = t->next;
if(cotask_status(t) == CO_STATUS_DEAD) {
TASK_DEBUG("<!> %s", t->debug_label);
alist_unlink(&sched->tasks, t);
cotask_free(t);
} else {
TASK_DEBUG(">>> %s", t->debug_label);
assert(cotask_status(t) == CO_STATUS_SUSPENDED);
cotask_resume(t, NULL);
++ran;
}
}
TASK_DEBUG("---------------------------------------------------------------");
return ran;
}
static void force_finish_task(CoTask *task) {
TASK_DEBUG("Finishing task %s", task->debug_label); TASK_DEBUG("Finishing task %s", task->debug_label);
if(task->data) { if(task->data) {
@ -1062,140 +732,3 @@ static void force_finish_task(CoTask *task) {
cotask_free(task); cotask_free(task);
} }
typedef ht_ptr2int_t events_hashset;
static uint gather_blocking_events(CoTaskList *tasks, events_hashset *events) {
uint n = 0;
for(CoTask *t = tasks->first; t; t = t->next) {
if(!t->data) {
continue;
}
CoTaskData *tdata = t->data;
if(tdata->wait.wait_type != COTASK_WAIT_EVENT) {
continue;
}
CoEvent *e = tdata->wait.event.pevent;
if(e->unique_id != tdata->wait.event.snapshot.unique_id) {
// event not valid? (probably should not happen)
continue;
}
ht_set(events, e, e->unique_id);
++n;
}
return n;
}
static void cancel_blocking_events(CoSched *sched) {
events_hashset events;
ht_create(&events);
gather_blocking_events(&sched->tasks, &events);
gather_blocking_events(&sched->pending_tasks, &events);
ht_ptr2int_iter_t iter;
ht_iter_begin(&events, &iter);
for(;iter.has_data; ht_iter_next(&iter)) {
CoEvent *e = iter.key;
if(e->unique_id == iter.value) {
// NOTE: wakes subscribers, which may cancel/invalidate other events before we do.
// This is why we snapshot unique_id.
// We assume that the memory backing *e is safe to access, however.
coevent_cancel(e);
}
}
ht_destroy(&events);
}
static void finish_task_list(CoTaskList *tasks) {
for(CoTask *t; (t = alist_pop(tasks));) {
force_finish_task(t);
}
}
void cosched_finish(CoSched *sched) {
// First cancel all events that have any tasks waiting on them.
// This will wake those tasks, so they can do any necessary cleanup.
cancel_blocking_events(sched);
finish_task_list(&sched->tasks);
finish_task_list(&sched->pending_tasks);
assert(!sched->tasks.first);
assert(!sched->pending_tasks.first);
memset(sched, 0, sizeof(*sched));
}
void coroutines_init(void) {
co_main = koishi_active();
}
void coroutines_shutdown(void) {
for(CoTask *task; (task = alist_pop(&task_pool));) {
koishi_deinit(&task->ko);
free(task);
}
}
#ifdef CO_TASK_STATS
#include "video.h"
#include "resource/font.h"
#endif
void coroutines_draw_stats(void) {
#ifdef CO_TASK_STATS
if(STAT_VAL(num_tasks_in_use) == 0 && STAT_VAL(num_switches_this_frame) == 0) {
return;
}
static char buf[128];
TextParams tp = {
.pos = { SCREEN_W },
.color = RGB(1, 1, 1),
.shader_ptr = res_shader("text_default"),
.font_ptr = res_font("monotiny"),
.align = ALIGN_RIGHT,
};
float ls = font_get_lineskip(tp.font_ptr);
tp.pos.y += ls;
#ifdef CO_TASK_STATS_STACK
snprintf(buf, sizeof(buf), "Peak stack: %zukb Tasks: %4zu / %4zu ",
STAT_VAL(peak_stack_usage) / 1024,
STAT_VAL(num_tasks_in_use),
STAT_VAL(num_tasks_allocated)
);
#else
snprintf(buf, sizeof(buf), "Tasks: %4zu / %4zu ",
STAT_VAL(num_tasks_in_use),
STAT_VAL(num_tasks_allocated)
);
#endif
text_draw(buf, &tp);
tp.pos.y += ls;
snprintf(buf, sizeof(buf), "Switches/frame: %4zu ", STAT_VAL(num_switches_this_frame));
text_draw(buf, &tp);
STAT_VAL_SET(num_switches_this_frame, 0);
#endif
}
DEFINE_EXTERN_TASK(_cancel_task_helper) {
CoTask *task = cotask_unbox(ARGS.task);
if(task) {
cotask_cancel(task);
}
}

76
src/coroutine/cotask.h Normal file
View file

@ -0,0 +1,76 @@
/*
* This software is licensed under the terms of the MIT License.
* See COPYING for further information.
* ---
* Copyright (c) 2011-2019, Lukas Weber <laochailan@web.de>.
* Copyright (c) 2012-2019, Andrei Alexeyev <akari@taisei-project.org>.
*/
#pragma once
#include "taisei.h"
#include <koishi.h>
#include "list.h"
#include "entity.h"
#include "util/debug.h"
// #define CO_TASK_DEBUG
typedef struct CoTask CoTask;
typedef LIST_ANCHOR(CoTask) CoTaskList;
typedef void *(*CoTaskFunc)(void *arg, size_t argsize);
typedef enum CoStatus {
CO_STATUS_SUSPENDED = KOISHI_SUSPENDED,
CO_STATUS_RUNNING = KOISHI_RUNNING,
CO_STATUS_IDLE = KOISHI_IDLE,
CO_STATUS_DEAD = KOISHI_DEAD,
} CoStatus;
typedef struct BoxedTask {
alignas(alignof(void*)) uintptr_t ptr;
uint32_t unique_id;
} BoxedTask;
#include "coevent.h"
typedef struct CoWaitResult {
int frames;
CoEventStatus event_status;
} CoWaitResult;
#ifdef CO_TASK_DEBUG
typedef struct CoTaskDebugInfo {
const char *label;
DebugInfo debug_info;
} CoTaskDebugInfo;
#define COTASK_DEBUG_INFO(label) ((CoTaskDebugInfo) { (label), _DEBUG_INFO_INITIALIZER_ })
#else
typedef char CoTaskDebugInfo;
#define COTASK_DEBUG_INFO(label) (0)
#endif
typedef struct CoSched CoSched;
void cotask_free(CoTask *task);
bool cotask_cancel(CoTask *task);
void *cotask_resume(CoTask *task, void *arg);
void *cotask_yield(void *arg);
int cotask_wait(int delay);
CoWaitResult cotask_wait_event(CoEvent *evt);
CoWaitResult cotask_wait_event_or_die(CoEvent *evt);
CoWaitResult cotask_wait_event_once(CoEvent *evt);
int cotask_wait_subtasks(void);
CoStatus cotask_status(CoTask *task);
CoTask *cotask_active(void);
EntityInterface *cotask_bind_to_entity(CoTask *task, EntityInterface *ent) attr_returns_nonnull;
CoTaskEvents *cotask_get_events(CoTask *task);
void *cotask_malloc(CoTask *task, size_t size) attr_returns_allocated attr_malloc attr_alloc_size(2);
EntityInterface *cotask_host_entity(CoTask *task, size_t ent_size, EntityType ent_type) attr_nonnull_all attr_returns_allocated;
void cotask_host_events(CoTask *task, uint num_events, CoEvent events[num_events]) attr_nonnull_all;
CoSched *cotask_get_sched(CoTask *task);
BoxedTask cotask_box(CoTask *task);
CoTask *cotask_unbox(BoxedTask box);

View file

@ -0,0 +1,164 @@
/*
* This software is licensed under the terms of the MIT License.
* See COPYING for further information.
* ---
* Copyright (c) 2011-2019, Lukas Weber <laochailan@web.de>.
* Copyright (c) 2012-2019, Andrei Alexeyev <akari@taisei-project.org>.
*/
#pragma once
#include "taisei.h"
#ifdef __EMSCRIPTEN__
#define CO_STACK_SIZE (64 * 1024)
#else
#define CO_STACK_SIZE (256 * 1024)
#endif
#ifdef CO_TASK_DEBUG
#define TASK_DEBUG(...) log_debug(__VA_ARGS__)
extern size_t _cotask_debug_event_id;
#define TASK_DEBUG_EVENT(ev) uint64_t ev = _cotask_debug_event_id++
#else
#define TASK_DEBUG(...) ((void)0)
#define TASK_DEBUG_EVENT(ev) ((void)0)
#endif
#ifdef DEBUG
#define CO_TASK_STATS
#endif
#ifdef ADDRESS_SANITIZER
#include <sanitizer/asan_interface.h>
#else
#define ASAN_UNPOISON_MEMORY_REGION(addr, size) ((void)0)
#endif
#define MEM_AREA_SIZE (1 << 12)
#define MEM_ALLOC_ALIGNMENT alignof(max_align_t)
#define MEM_ALIGN_SIZE(x) (x + (MEM_ALLOC_ALIGNMENT - 1)) & ~(MEM_ALLOC_ALIGNMENT - 1)
enum {
COTASK_WAIT_NONE,
COTASK_WAIT_DELAY,
COTASK_WAIT_EVENT,
COTASK_WAIT_SUBTASKS,
};
typedef struct CoTaskData CoTaskData;
struct CoTask {
LIST_INTERFACE(CoTask);
koishi_coroutine_t ko;
// Pointer to a control structure on the coroutine's stack
CoTaskData *data;
uint32_t unique_id;
#ifdef CO_TASK_DEBUG
char debug_label[256];
#endif
};
#ifdef CO_TASK_STATS
typedef struct CoTaskStats {
size_t num_tasks_allocated;
size_t num_tasks_in_use;
size_t num_switches_this_frame;
size_t peak_stack_usage;
} CoTaskStats;
extern CoTaskStats cotask_stats;
#define STAT_VAL(name) (cotask_stats.name)
#define STAT_VAL_SET(name, value) ((cotask_stats.name) = (value))
// enable stack usage tracking (loose)
#ifndef _WIN32
// NOTE: disabled by default because of heavy performance overhead under ASan
// #define CO_TASK_STATS_STACK
#endif
#else // CO_TASK_STATS
#define STAT_VAL(name) ((void)0)
#define STAT_VAL_SET(name, value) ((void)0)
#endif // CO_TASK_STATS
#define STAT_VAL_ADD(name, value) STAT_VAL_SET(name, STAT_VAL(name) + (value))
typedef struct CoTaskHeapMemChunk {
struct CoTaskHeapMemChunk *next;
alignas(MEM_ALLOC_ALIGNMENT) char data[];
} CoTaskHeapMemChunk;
struct CoTaskData {
LIST_INTERFACE(CoTaskData);
CoTask *task;
CoSched *sched;
CoTaskData *master; // AKA supertask
LIST_ANCHOR(CoTaskData) slaves; // AKA subtasks
BoxedEntity bound_ent;
CoTaskEvents events;
bool finalizing;
struct {
CoWaitResult result;
union {
struct {
int remaining;
} delay;
struct {
CoEvent *pevent;
CoEventSnapshot snapshot;
} event;
};
uint wait_type;
} wait;
struct {
EntityInterface *ent;
CoEvent *events;
uint num_events;
} hosted;
struct {
CoTaskHeapMemChunk *onheap_alloc_head;
char *onstack_alloc_head;
alignas(MEM_ALLOC_ALIGNMENT) char onstack_alloc_area[MEM_AREA_SIZE];
} mem;
};
typedef struct CoTaskInitData {
CoTask *task;
CoSched *sched;
CoTaskFunc func;
void *func_arg;
size_t func_arg_size;
CoTaskData *master_task_data;
} CoTaskInitData;
void cotask_global_init(void);
void cotask_global_shutdown(void);
CoTask *cotask_new_internal(koishi_entrypoint_t entry_point);
void *cotask_resume_internal(CoTask *task, void *arg);
CoTask *cotask_unbox_notnull(BoxedTask box);
void cotask_force_finish(CoTask *task);
void *cotask_entry(void *varg);
attr_returns_nonnull attr_nonnull_all
INLINE CoTaskData *cotask_get_data(CoTask *task) {
CoTaskData *data = task->data;
assume(data != NULL);
assume(data->task == task);
return data;
}

21
src/coroutine/internal.h Normal file
View file

@ -0,0 +1,21 @@
/*
* This software is licensed under the terms of the MIT License.
* See COPYING for further information.
* ---
* Copyright (c) 2011-2019, Lukas Weber <laochailan@web.de>.
* Copyright (c) 2012-2019, Andrei Alexeyev <akari@taisei-project.org>.
*/
#pragma once
#include "taisei.h"
#include "log.h"
#include "util.h"
#include "coroutine.h"
#include "cotask.h"
#include "coevent.h"
#include "cosched.h"
#include "cotask_internal.h"
#include "coevent_internal.h"

View file

@ -0,0 +1,8 @@
coroutine_src = files(
'coevent.c',
'coroutine.c',
'cosched.c',
'cotask.c',
'taskdsl.c',
)

10
src/coroutine/taskdsl.c Normal file
View file

@ -0,0 +1,10 @@
#include "taskdsl.h"
DEFINE_EXTERN_TASK(_cancel_task_helper) {
CoTask *task = cotask_unbox(ARGS.task);
if(task) {
cotask_cancel(task);
}
}

418
src/coroutine/taskdsl.h Normal file
View file

@ -0,0 +1,418 @@
/*
* This software is licensed under the terms of the MIT License.
* See COPYING for further information.
* ---
* Copyright (c) 2011-2019, Lukas Weber <laochailan@web.de>.
* Copyright (c) 2012-2019, Andrei Alexeyev <akari@taisei-project.org>.
*/
#pragma once
#include "taisei.h"
#include "entity.h"
#include "cotask.h"
#include "coevent.h"
#include "cosched.h"
#define TASK_ARGS_TYPE(name) COARGS_##name
#define TASK_ARGSDELAY_NAME(name) COARGSDELAY_##name
#define TASK_ARGSDELAY(name) struct TASK_ARGSDELAY_NAME(name)
#define TASK_ARGSCOND_NAME(name) COARGSCOND_##name
#define TASK_ARGSCOND(name) struct TASK_ARGSCOND_NAME(name)
#define TASK_IFACE_NAME(iface, suffix) COTASKIFACE_##iface##_##suffix
#define TASK_IFACE_ARGS_TYPE(iface) TASK_IFACE_NAME(iface, ARGS)
#define TASK_IFACE_ARGS_SIZED_PTR_TYPE(iface) TASK_IFACE_NAME(iface, ARGS_SPTR)
#define TASK_INDIRECT_TYPE(iface) TASK_IFACE_NAME(iface, HANDLE)
#define TASK_IFACE_SARGS(iface, ...) \
((TASK_IFACE_ARGS_SIZED_PTR_TYPE(iface)) { \
.size = sizeof(TASK_IFACE_ARGS_TYPE(iface)), \
.ptr = (&(TASK_IFACE_ARGS_TYPE(iface)) { __VA_ARGS__ }) \
})
#define DEFINE_TASK_INTERFACE(iface, argstruct) \
typedef TASK_ARGS_STRUCT(argstruct) TASK_IFACE_ARGS_TYPE(iface); \
typedef struct { \
TASK_IFACE_ARGS_TYPE(iface) *ptr; \
size_t size; \
} TASK_IFACE_ARGS_SIZED_PTR_TYPE(iface); \
typedef struct { \
CoTaskFunc _cotask_##iface##_thunk; \
} TASK_INDIRECT_TYPE(iface) /* require semicolon */
#define DEFINE_TASK_INTERFACE_WITH_BASE(iface, ibase, argstruct) \
typedef struct { \
TASK_IFACE_ARGS_TYPE(ibase) base; \
TASK_ARGS_STRUCT(argstruct); \
} TASK_IFACE_ARGS_TYPE(iface); \
typedef struct { \
union { \
TASK_IFACE_ARGS_SIZED_PTR_TYPE(ibase) base; \
struct { \
TASK_IFACE_ARGS_TYPE(iface) *ptr; \
size_t size; \
}; \
}; \
} TASK_IFACE_ARGS_SIZED_PTR_TYPE(iface); \
typedef struct { \
union { \
TASK_INDIRECT_TYPE(ibase) base; \
CoTaskFunc _cotask_##iface##_thunk; \
CoTaskFunc _cotask_##ibase##_thunk; \
}; \
} TASK_INDIRECT_TYPE(iface) /* require semicolon */\
#define TASK_INDIRECT_TYPE_ALIAS(task) TASK_IFACE_NAME(task, HANDLEALIAS)
#define ARGS (*_cotask_args)
#define NO_ARGS attr_deprecated("Use { } instead of NO_ARGS, or omit it entirely") { }
// NOTE: the nested anonymous struct hack allows us to support both of these syntaxes:
// INVOKE_TASK(foo, ENT_BOX(bar));
// INVOKE_TASK(foo, { ENT_BOX(bar) });
#define TASK_ARGS_STRUCT(argstruct) struct { struct argstruct; }
#define TASK_COMMON_PRIVATE_DECLARATIONS(name) \
/* user-defined task body */ \
static void COTASK_##name(TASK_ARGS_TYPE(name) *_cotask_args) /* require semicolon */
#define TASK_COMMON_DECLARATIONS(name, argstype, handletype, linkage) \
/* produce warning if the task is never used */ \
linkage char COTASK_UNUSED_CHECK_##name; \
/* type of indirect handle to a compatible task */ \
typedef handletype TASK_INDIRECT_TYPE_ALIAS(name); \
/* user-defined type of args struct */ \
typedef argstype TASK_ARGS_TYPE(name); \
/* type of internal args struct for INVOKE_TASK_DELAYED */ \
struct TASK_ARGSDELAY_NAME(name) { \
int delay; \
/* NOTE: this must be last for interface inheritance to work! */ \
TASK_ARGS_TYPE(name) real_args; \
}; \
/* type of internal args struct for INVOKE_TASK_WHEN */ \
struct TASK_ARGSCOND_NAME(name) { \
CoEvent *event; \
bool unconditional; \
/* NOTE: this must be last for interface inheritance to work! */ \
TASK_ARGS_TYPE(name) real_args; \
}; \
/* task entry point for INVOKE_TASK */ \
attr_unused linkage void *COTASKTHUNK_##name(void *arg, size_t arg_size); \
/* task entry point for INVOKE_TASK_DELAYED */ \
attr_unused linkage void *COTASKTHUNKDELAY_##name(void *arg, size_t arg_size); \
/* task entry point for INVOKE_TASK_WHEN and INVOKE_TASK_AFTER */ \
attr_unused linkage void *COTASKTHUNKCOND_##name(void *arg, size_t arg_size) /* require semicolon */ \
#define TASK_COMMON_THUNK_DEFINITIONS(name, linkage) \
/* task entry point for INVOKE_TASK */ \
attr_unused linkage void *COTASKTHUNK_##name(void *arg, size_t arg_size) { \
/* copy args to our coroutine stack so that they're valid after caller returns */ \
TASK_ARGS_TYPE(name) args_copy = { }; \
assume(sizeof(args_copy) >= arg_size); \
memcpy(&args_copy, arg, arg_size); \
/* call body */ \
COTASK_##name(&args_copy); \
/* exit coroutine */ \
return NULL; \
} \
/* task entry point for INVOKE_TASK_DELAYED */ \
attr_unused linkage void *COTASKTHUNKDELAY_##name(void *arg, size_t arg_size) { \
/* copy args to our coroutine stack so that they're valid after caller returns */ \
TASK_ARGSDELAY(name) args_copy = { }; \
assume(sizeof(args_copy) >= arg_size); \
memcpy(&args_copy, arg, arg_size); \
/* if delay is negative, bail out early */ \
if(args_copy.delay < 0) return NULL; \
/* wait out the delay */ \
WAIT(args_copy.delay); \
/* call body */ \
COTASK_##name(&args_copy.real_args); \
/* exit coroutine */ \
return NULL; \
} \
/* task entry point for INVOKE_TASK_WHEN and INVOKE_TASK_AFTER */ \
attr_unused linkage void *COTASKTHUNKCOND_##name(void *arg, size_t arg_size) { \
/* copy args to our coroutine stack so that they're valid after caller returns */ \
TASK_ARGSCOND(name) args_copy = { }; \
assume(sizeof(args_copy) >= arg_size); \
memcpy(&args_copy, arg, arg_size); \
/* wait for event, and if it wasn't canceled (or if we want to run unconditionally)... */ \
if(WAIT_EVENT(args_copy.event).event_status == CO_EVENT_SIGNALED || args_copy.unconditional) { \
/* call body */ \
COTASK_##name(&args_copy.real_args); \
} \
/* exit coroutine */ \
return NULL; \
}
#define TASK_COMMON_BEGIN_BODY_DEFINITION(name, linkage) \
linkage void COTASK_##name(TASK_ARGS_TYPE(name) *_cotask_args)
#define DECLARE_TASK_EXPLICIT(name, argstype, handletype, linkage) \
TASK_COMMON_DECLARATIONS(name, argstype, handletype, linkage) /* require semicolon */
#define DEFINE_TASK_EXPLICIT(name, linkage) \
TASK_COMMON_PRIVATE_DECLARATIONS(name); \
TASK_COMMON_THUNK_DEFINITIONS(name, linkage) \
/* begin task body definition */ \
TASK_COMMON_BEGIN_BODY_DEFINITION(name, linkage)
/* declare a task with static linkage (needs to be defined later) */
#define DECLARE_TASK(name, ...) \
MACROHAX_OVERLOAD_HASARGS(DECLARE_TASK_, __VA_ARGS__)(name, ##__VA_ARGS__)
#define DECLARE_TASK_1(name, ...) \
DECLARE_TASK_EXPLICIT(name, TASK_ARGS_STRUCT(__VA_ARGS__), void, static) /* require semicolon */
#define DECLARE_TASK_0(name) DECLARE_TASK_1(name, { })
/* declare a task with static linkage that conforms to a common interface (needs to be defined later) */
#define DECLARE_TASK_WITH_INTERFACE(name, iface) \
DECLARE_TASK_EXPLICIT(name, TASK_IFACE_ARGS_TYPE(iface), TASK_INDIRECT_TYPE(iface), static) /* require semicolon */
/* define a task with static linkage (needs to be declared first) */
#define DEFINE_TASK(name) \
DEFINE_TASK_EXPLICIT(name, static)
/* declare and define a task with static linkage */
#define TASK(name, ...) \
DECLARE_TASK(name, ##__VA_ARGS__); \
DEFINE_TASK(name)
/* declare and define a task with static linkage that conforms to a common interface */
#define TASK_WITH_INTERFACE(name, iface) \
DECLARE_TASK_WITH_INTERFACE(name, iface); \
DEFINE_TASK(name)
/* declare a task with extern linkage (needs to be defined later) */
#define DECLARE_EXTERN_TASK(name, ...)\
MACROHAX_OVERLOAD_HASARGS(DECLARE_EXTERN_TASK_, __VA_ARGS__)(name, ##__VA_ARGS__)
#define DECLARE_EXTERN_TASK_1(name, ...) \
DECLARE_TASK_EXPLICIT(name, TASK_ARGS_STRUCT(__VA_ARGS__), void, extern) /* require semicolon */
#define DECLARE_EXTERN_TASK_0(name) \
DECLARE_EXTERN_TASK_1(name, { })
/* declare a task with extern linkage that conforms to a common interface (needs to be defined later) */
#define DECLARE_EXTERN_TASK_WITH_INTERFACE(name, iface) \
DECLARE_TASK_EXPLICIT(name, TASK_IFACE_ARGS_TYPE(iface), TASK_INDIRECT_TYPE(iface), extern) /* require semicolon */
/* define a task with extern linkage (needs to be declared first) */
#define DEFINE_EXTERN_TASK(name) \
char COTASK_UNUSED_CHECK_##name; \
DEFINE_TASK_EXPLICIT(name, extern)
/*
* INVOKE_TASK(task_name, args...)
* INVOKE_SUBTASK(task_name, args...)
*
* This is the most basic way to start an asynchronous task. Control is transferred
* to the new task immediately when this is called, and returns to the call site
* when the task yields or terminates.
*
* Args are optional. They are treated simply as an initializer for the task's
* args struct, so it's possible to use designated initializer syntax to emulate
* "keyword arguments", etc.
*
* INVOKE_SUBTASK is identical INVOKE_TASK, except the spawned task will attach
* to the currently executing task, becoming its "sub-task" or "slave". When a
* task finishes executing, all of its sub-tasks are also terminated recursively.
*
* Other INVOKE_ macros with a _SUBTASK version behave analogously.
*/
#define INVOKE_TASK(_task, ...) \
_internal_INVOKE_TASK(THIS_SCHED, cosched_new_task, _task, ##__VA_ARGS__)
#define INVOKE_SUBTASK(_task, ...) \
_internal_INVOKE_TASK(THIS_SCHED, cosched_new_subtask, _task, ##__VA_ARGS__)
#define SCHED_INVOKE_TASK(_sched, _task, ...) \
_internal_INVOKE_TASK(_sched, cosched_new_task, _task, ##__VA_ARGS__)
#define _internal_INVOKE_TASK(sched, task_constructor, name, ...) ( \
(void)COTASK_UNUSED_CHECK_##name, \
task_constructor( \
sched, \
COTASKTHUNK_##name, \
(&(TASK_ARGS_TYPE(name)) { __VA_ARGS__ }), \
sizeof(TASK_ARGS_TYPE(name)), \
#name \
) \
)
/*
* INVOKE_TASK_DELAYED(delay, task_name, args...)
* INVOKE_SUBTASK_DELAYED(delay, task_name, args...)
*
* Like INVOKE_TASK, but the task will yield <delay> times before executing the
* actual task body.
*
* If <delay> is negative, the task will not be invoked. The arguments are still
* evaluated, however. (Caveat: in the current implementation, a task is spawned
* either way; it just aborts early without executing the body if the delay is
* negative, so there's some overhead).
*/
#define INVOKE_TASK_DELAYED(_delay, _task, ...) \
_internal_INVOKE_TASK_DELAYED(THIS_SCHED, cosched_new_task, _delay, _task, ##__VA_ARGS__)
#define INVOKE_SUBTASK_DELAYED(_delay, _task, ...) \
_internal_INVOKE_TASK_DELAYED(THIS_SCHED, cosched_new_subtask, _delay, _task, ##__VA_ARGS__)
#define SCHED_INVOKE_TASK_DELAYED(_sched, _delay, _task, ...) \
_internal_INVOKE_TASK_DELAYED(_sched, cosched_new_task, _delay, _task, ##__VA_ARGS__)
#define _internal_INVOKE_TASK_DELAYED(sched, task_constructor, _delay, name, ...) ( \
(void)COTASK_UNUSED_CHECK_##name, \
task_constructor( \
sched, \
COTASKTHUNKDELAY_##name, \
(&(TASK_ARGSDELAY(name)) { \
.real_args = { __VA_ARGS__ }, \
.delay = (_delay) \
}), \
sizeof(TASK_ARGSDELAY(name)), \
#name \
) \
)
/*
* INVOKE_TASK_WHEN(event, task_name, args...)
* INVOKE_SUBTASK_WHEN(event, task_name, args...)
*
* INVOKE_TASK_AFTER(event, task_name, args...)
* INVOKE_SUBTASK_AFTER(event, task_name, args...)
*
* Both INVOKE_TASK_WHEN and INVOKE_TASK_AFTER spawn a task that waits for an
* event to occur. The difference is that _WHEN aborts the task if the event has
* been canceled, but _AFTER proceeds to execute it unconditionally.
*
* <event> is a pointer to a CoEvent struct.
*/
#define INVOKE_TASK_WHEN(_event, _task, ...) \
_internal_INVOKE_TASK_ON_EVENT(THIS_SCHED, cosched_new_task, false, _event, _task, ##__VA_ARGS__)
#define INVOKE_SUBTASK_WHEN(_event, _task, ...) \
_internal_INVOKE_TASK_ON_EVENT(THIS_SCHED, cosched_new_subtask, false, _event, _task, ##__VA_ARGS__)
#define SCHED_INVOKE_TASK_WHEN(_sched, _event, _task, ...) \
_internal_INVOKE_TASK_ON_EVENT(_sched, cosched_new_task, false, _event, _task, ##__VA_ARGS__)
#define INVOKE_TASK_AFTER(_event, _task, ...) \
_internal_INVOKE_TASK_ON_EVENT(THIS_SCHED, cosched_new_task, true, _event, _task, ## __VA_ARGS__)
#define INVOKE_SUBTASK_AFTER(_event, _task, ...) \
_internal_INVOKE_TASK_ON_EVENT(THIS_SCHED, cosched_new_subtask, true, _event, _task, ## __VA_ARGS__)
#define SCHED_INVOKE_TASK_AFTER(_sched, _event, _task, ...) \
_internal_INVOKE_TASK_ON_EVENT(_sched, cosched_new_task, true, _event, _task, ##__VA_ARGS__)
#define _internal_INVOKE_TASK_ON_EVENT(sched, task_constructor, is_unconditional, _event, name, ...) ( \
(void)COTASK_UNUSED_CHECK_##name, \
task_constructor( \
sched, \
COTASKTHUNKCOND_##name, \
(&(TASK_ARGSCOND(name)) { \
.real_args = { __VA_ARGS__ }, \
.event = (_event), \
.unconditional = is_unconditional \
}), \
sizeof(TASK_ARGSCOND(name)), \
#name \
) \
)
/*
* CANCEL_TASK_WHEN(event, boxed_task)
* CANCEL_TASK_AFTER(event, boxed_task)
*
* Invokes an auxiliary task that will wait for an event, and then cancel another
* running task. The difference between WHEN and AFTER is the same as in
* INVOKE_TASK_WHEN/INVOKE_TASK_AFTER -- this is a simple wrapper around those.
*
* <event> is a pointer to a CoEvent struct.
* <boxed_task> is a BoxedTask struct; use cotask_box to obtain one from a pointer.
* You can also use the THIS_TASK macro to refer to the currently running task.
*/
#define CANCEL_TASK_WHEN(_event, _task) INVOKE_TASK_WHEN(_event, _cancel_task_helper, _task)
#define CANCEL_TASK_AFTER(_event, _task) INVOKE_TASK_AFTER(_event, _cancel_task_helper, _task)
DECLARE_EXTERN_TASK(_cancel_task_helper, { BoxedTask task; });
#define CANCEL_TASK(boxed_task) cotask_cancel(cotask_unbox(boxed_task))
#define TASK_INDIRECT(iface, task) ( \
(void)COTASK_UNUSED_CHECK_##task, \
(TASK_INDIRECT_TYPE_ALIAS(task)) { ._cotask_##iface##_thunk = COTASKTHUNK_##task } \
)
#define TASK_INDIRECT_INIT(iface, task) \
{ ._cotask_##iface##_thunk = COTASKTHUNK_##task } \
#define INVOKE_TASK_INDIRECT_(sched, task_constructor, iface, taskhandle, ...) ( \
task_constructor( \
sched, \
taskhandle._cotask_##iface##_thunk, \
(&(TASK_IFACE_ARGS_TYPE(iface)) { __VA_ARGS__ }), \
sizeof(TASK_IFACE_ARGS_TYPE(iface)), \
"<indirect:"#iface">" \
) \
)
#define SCHED_INVOKE_TASK_INDIRECT(_sched, _iface, _handle, ...) \
INVOKE_TASK_INDIRECT_(_sched, cosched_new_task, _iface, _handle, ##__VA_ARGS__)
#define INVOKE_TASK_INDIRECT(_iface, _handle, ...) \
INVOKE_TASK_INDIRECT_(THIS_SCHED, cosched_new_task, _iface, _handle, ##__VA_ARGS__)
#define INVOKE_SUBTASK_INDIRECT(_iface, _handle, ...) \
INVOKE_TASK_INDIRECT_(THIS_SCHED, cosched_new_subtask, iface, _handle, ##__VA_ARGS__)
#define THIS_TASK cotask_box(cotask_active())
#define TASK_EVENTS(task) cotask_get_events(cotask_unbox(task))
#define TASK_MALLOC(size) cotask_malloc(cotask_active(), size)
#define THIS_SCHED cotask_get_sched(cotask_active())
#define TASK_HOST_ENT(ent_struct_type) \
ENT_CAST(cotask_host_entity(cotask_active(), sizeof(ent_struct_type), ENT_TYPE_ID(ent_struct_type)), ent_struct_type)
#define TASK_HOST_EVENTS(events_array) \
cotask_host_events(cotask_active(), sizeof(events_array)/sizeof(CoEvent), &((events_array)._first_event_))
#define YIELD cotask_yield(NULL)
#define WAIT(delay) cotask_wait(delay)
#define WAIT_EVENT(e) cotask_wait_event(e)
#define WAIT_EVENT_OR_DIE(e) cotask_wait_event_or_die(e)
#define WAIT_EVENT_ONCE(e) cotask_wait_event_once(e)
#define STALL cotask_wait(INT_MAX)
#define AWAIT_SUBTASKS cotask_wait_subtasks()
#define NOT_NULL_OR_DIE(expr) ({ \
__auto_type _not_null_ptr = (expr); \
if(_not_null_ptr == NULL) { \
cotask_cancel(NOT_NULL(cotask_active())); \
UNREACHABLE; \
} \
NOT_NULL(_not_null_ptr); \
})
// first arg of the generated function needs to be the ent, because ENT_UNBOXED_DISPATCH_FUNCTION dispatches on first arg.
#define _cotask_emit_bindfunc(typename, ...) \
INLINE typename *_cotask_bind_to_entity_##typename(typename *ent, CoTask *task) { \
return ENT_CAST((cotask_bind_to_entity)( \
task, \
ent ? UNION_CAST(typename*, EntityInterface*, ent) : NULL), \
typename \
); \
}
ENTITIES(_cotask_emit_bindfunc,)
#undef _cotask_emit_bindfunc
INLINE EntityInterface *_cotask_bind_to_entity_Entity(EntityInterface *ent, CoTask *task) {
return (cotask_bind_to_entity)(task, ent);
}
#define cotask_bind_to_entity(task, ent) \
ENT_UNBOXED_DISPATCH_FUNCTION(_cotask_bind_to_entity_, ent, task)
#define TASK_BIND(ent_or_box) cotask_bind_to_entity(cotask_active(), ENT_UNBOX_OR_PASSTHROUGH(ent_or_box))

View file

@ -60,7 +60,6 @@ taisei_src = files(
'color.c', 'color.c',
'common_tasks.c', 'common_tasks.c',
'config.c', 'config.c',
'coroutine.c',
'credits.c', 'credits.c',
'dialog.c', 'dialog.c',
'difficulty.c', 'difficulty.c',
@ -124,6 +123,7 @@ if host_machine.system() == 'nx'
endif endif
subdir('audio') subdir('audio')
subdir('coroutine')
subdir('cutscenes') subdir('cutscenes')
subdir('dialog') subdir('dialog')
subdir('eventloop') subdir('eventloop')
@ -142,6 +142,7 @@ subdir('vfs')
taisei_src += [ taisei_src += [
audio_src, audio_src,
coroutine_src,
cutscenes_src, cutscenes_src,
dialog_src, dialog_src,
eventloop_src, eventloop_src,