summaryrefslogtreecommitdiff
path: root/thread_sync.c
diff options
context:
space:
mode:
Diffstat (limited to 'thread_sync.c')
-rw-r--r--thread_sync.c1880
1 files changed, 912 insertions, 968 deletions
diff --git a/thread_sync.c b/thread_sync.c
index 6778e3baa9..e3916c97cb 100644
--- a/thread_sync.c
+++ b/thread_sync.c
@@ -1,66 +1,115 @@
/* included by thread.c */
+#include "ccan/list/list.h"
+#include "builtin.h"
-VALUE rb_cMutex, rb_cQueue, rb_cSizedQueue, rb_cConditionVariable;
-VALUE rb_eClosedQueueError;
+static VALUE rb_cMutex, rb_eClosedQueueError;
/* Mutex */
-
typedef struct rb_mutex_struct {
- rb_nativethread_lock_t lock;
- rb_nativethread_cond_t cond;
- struct rb_thread_struct volatile *th;
+ rb_serial_t ec_serial;
+ rb_thread_t *th; // even if the fiber is collected, we might need access to the thread in mutex_free
struct rb_mutex_struct *next_mutex;
- int cond_waiting;
- int allow_trap;
+ struct ccan_list_head waitq; /* protected by GVL */
} rb_mutex_t;
+/* sync_waiter is always on-stack */
+struct sync_waiter {
+ VALUE self;
+ rb_thread_t *th;
+ rb_fiber_t *fiber;
+ struct ccan_list_node node;
+};
+
+static inline rb_fiber_t*
+nonblocking_fiber(rb_fiber_t *fiber)
+{
+ if (rb_fiberptr_blocking(fiber)) {
+ return NULL;
+ }
+
+ return fiber;
+}
+
+struct queue_sleep_arg {
+ VALUE self;
+ VALUE timeout;
+ rb_hrtime_t end;
+};
+
+#define MUTEX_ALLOW_TRAP FL_USER1
+
+static void
+sync_wakeup(struct ccan_list_head *head, long max)
+{
+ RUBY_DEBUG_LOG("max:%ld", max);
+
+ struct sync_waiter *cur = 0, *next;
+
+ ccan_list_for_each_safe(head, cur, next, node) {
+ ccan_list_del_init(&cur->node);
+
+ if (cur->th->status != THREAD_KILLED) {
+ if (cur->th->scheduler != Qnil && cur->fiber) {
+ rb_fiber_scheduler_unblock(cur->th->scheduler, cur->self, rb_fiberptr_self(cur->fiber));
+ }
+ else {
+ RUBY_DEBUG_LOG("target_th:%u", rb_th_serial(cur->th));
+ rb_threadptr_interrupt(cur->th);
+ cur->th->status = THREAD_RUNNABLE;
+ }
+
+ if (--max == 0) return;
+ }
+ }
+}
+
+static void
+wakeup_one(struct ccan_list_head *head)
+{
+ sync_wakeup(head, 1);
+}
+
+static void
+wakeup_all(struct ccan_list_head *head)
+{
+ sync_wakeup(head, LONG_MAX);
+}
+
+#if defined(HAVE_WORKING_FORK)
static void rb_mutex_abandon_all(rb_mutex_t *mutexes);
static void rb_mutex_abandon_keeping_mutexes(rb_thread_t *th);
static void rb_mutex_abandon_locking_mutex(rb_thread_t *th);
-static const char* rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th);
+#endif
+static const char* rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t *th, rb_serial_t ec_serial);
-/*
- * Document-class: Mutex
- *
- * Mutex implements a simple semaphore that can be used to coordinate access to
- * shared data from multiple concurrent threads.
- *
- * Example:
- *
- * require 'thread'
- * semaphore = Mutex.new
- *
- * a = Thread.new {
- * semaphore.synchronize {
- * # access shared resource
- * }
- * }
- *
- * b = Thread.new {
- * semaphore.synchronize {
- * # access shared resource
- * }
- * }
- *
- */
+static size_t
+rb_mutex_num_waiting(rb_mutex_t *mutex)
+{
+ struct sync_waiter *w = 0;
+ size_t n = 0;
+
+ ccan_list_for_each(&mutex->waitq, w, node) {
+ n++;
+ }
-#define GetMutexPtr(obj, tobj) \
- TypedData_Get_Struct((obj), rb_mutex_t, &mutex_data_type, (tobj))
+ return n;
+}
-#define mutex_mark NULL
+rb_thread_t* rb_fiber_threadptr(const rb_fiber_t *fiber);
+
+static bool
+mutex_locked_p(rb_mutex_t *mutex)
+{
+ return mutex->ec_serial != 0;
+}
static void
mutex_free(void *ptr)
{
- if (ptr) {
- rb_mutex_t *mutex = ptr;
- if (mutex->th) {
- /* rb_warn("free locked mutex"); */
- const char *err = rb_mutex_unlock_th(mutex, mutex->th);
- if (err) rb_bug("%s", err);
- }
- native_mutex_destroy(&mutex->lock);
- native_cond_destroy(&mutex->cond);
+ rb_mutex_t *mutex = ptr;
+ if (mutex_locked_p(mutex)) {
+ const char *err = rb_mutex_unlock_th(mutex, mutex->th, 0);
+ if (err) rb_bug("%s", err);
}
ruby_xfree(ptr);
}
@@ -68,24 +117,29 @@ mutex_free(void *ptr)
static size_t
mutex_memsize(const void *ptr)
{
- return ptr ? sizeof(rb_mutex_t) : 0;
+ return sizeof(rb_mutex_t);
}
static const rb_data_type_t mutex_data_type = {
"mutex",
- {mutex_mark, mutex_free, mutex_memsize,},
+ {NULL, mutex_free, mutex_memsize,},
0, 0, RUBY_TYPED_FREE_IMMEDIATELY
};
+static rb_mutex_t *
+mutex_ptr(VALUE obj)
+{
+ rb_mutex_t *mutex;
+
+ TypedData_Get_Struct(obj, rb_mutex_t, &mutex_data_type, mutex);
+
+ return mutex;
+}
+
VALUE
rb_obj_is_mutex(VALUE obj)
{
- if (rb_typeddata_is_kind_of(obj, &mutex_data_type)) {
- return Qtrue;
- }
- else {
- return Qfalse;
- }
+ return RBOOL(rb_typeddata_is_kind_of(obj, &mutex_data_type));
}
static VALUE
@@ -95,21 +149,9 @@ mutex_alloc(VALUE klass)
rb_mutex_t *mutex;
obj = TypedData_Make_Struct(klass, rb_mutex_t, &mutex_data_type, mutex);
- native_mutex_initialize(&mutex->lock);
- native_cond_initialize(&mutex->cond, RB_CONDATTR_CLOCK_MONOTONIC);
- return obj;
-}
-/*
- * call-seq:
- * Mutex.new -> mutex
- *
- * Creates a new Mutex
- */
-static VALUE
-mutex_initialize(VALUE self)
-{
- return self;
+ ccan_list_head_init(&mutex->waitq);
+ return obj;
}
VALUE
@@ -118,244 +160,347 @@ rb_mutex_new(void)
return mutex_alloc(rb_cMutex);
}
-/*
- * call-seq:
- * mutex.locked? -> true or false
- *
- * Returns +true+ if this lock is currently held by some thread.
- */
VALUE
rb_mutex_locked_p(VALUE self)
{
- rb_mutex_t *mutex;
- GetMutexPtr(self, mutex);
- return mutex->th ? Qtrue : Qfalse;
+ rb_mutex_t *mutex = mutex_ptr(self);
+
+ return RBOOL(mutex_locked_p(mutex));
}
static void
-mutex_locked(rb_thread_t *th, VALUE self)
+thread_mutex_insert(rb_thread_t *thread, rb_mutex_t *mutex)
{
- rb_mutex_t *mutex;
- GetMutexPtr(self, mutex);
-
- if (th->keeping_mutexes) {
- mutex->next_mutex = th->keeping_mutexes;
+ RUBY_ASSERT(!mutex->next_mutex);
+ if (thread->keeping_mutexes) {
+ mutex->next_mutex = thread->keeping_mutexes;
}
- th->keeping_mutexes = mutex;
+
+ thread->keeping_mutexes = mutex;
}
-/*
- * call-seq:
- * mutex.try_lock -> true or false
- *
- * Attempts to obtain the lock and returns immediately. Returns +true+ if the
- * lock was granted.
- */
-VALUE
-rb_mutex_trylock(VALUE self)
+static void
+thread_mutex_remove(rb_thread_t *thread, rb_mutex_t *mutex)
{
- rb_mutex_t *mutex;
- VALUE locked = Qfalse;
- GetMutexPtr(self, mutex);
+ rb_mutex_t **keeping_mutexes = &thread->keeping_mutexes;
- native_mutex_lock(&mutex->lock);
- if (mutex->th == 0) {
- rb_thread_t *th = GET_THREAD();
- mutex->th = th;
- locked = Qtrue;
-
- mutex_locked(th, self);
+ while (*keeping_mutexes && *keeping_mutexes != mutex) {
+ // Move to the next mutex in the list:
+ keeping_mutexes = &(*keeping_mutexes)->next_mutex;
}
- native_mutex_unlock(&mutex->lock);
-
- return locked;
-}
-
-static int
-lock_func(rb_thread_t *th, rb_mutex_t *mutex, int timeout_ms)
-{
- int interrupted = 0;
- int err = 0;
-
- mutex->cond_waiting++;
- for (;;) {
- if (!mutex->th) {
- mutex->th = th;
- break;
- }
- if (RUBY_VM_INTERRUPTED(th)) {
- interrupted = 1;
- break;
- }
- if (err == ETIMEDOUT) {
- interrupted = 2;
- break;
- }
-
- if (timeout_ms) {
- struct timespec timeout_rel;
- struct timespec timeout;
-
- timeout_rel.tv_sec = 0;
- timeout_rel.tv_nsec = timeout_ms * 1000 * 1000;
- timeout = native_cond_timeout(&mutex->cond, timeout_rel);
- err = native_cond_timedwait(&mutex->cond, &mutex->lock, &timeout);
- }
- else {
- native_cond_wait(&mutex->cond, &mutex->lock);
- err = 0;
- }
+
+ if (*keeping_mutexes) {
+ *keeping_mutexes = mutex->next_mutex;
+ mutex->next_mutex = NULL;
}
- mutex->cond_waiting--;
+}
- return interrupted;
+static void
+mutex_set_owner(rb_mutex_t *mutex, rb_thread_t *th, rb_serial_t ec_serial)
+{
+ mutex->th = th;
+ mutex->ec_serial = ec_serial;
}
static void
-lock_interrupt(void *ptr)
+mutex_locked(rb_mutex_t *mutex, rb_thread_t *th, rb_serial_t ec_serial)
{
- rb_mutex_t *mutex = (rb_mutex_t *)ptr;
- native_mutex_lock(&mutex->lock);
- if (mutex->cond_waiting > 0)
- native_cond_broadcast(&mutex->cond);
- native_mutex_unlock(&mutex->lock);
+ mutex_set_owner(mutex, th, ec_serial);
+ thread_mutex_insert(th, mutex);
}
-/*
- * At maximum, only one thread can use cond_timedwait and watch deadlock
- * periodically. Multiple polling thread (i.e. concurrent deadlock check)
- * introduces new race conditions. [Bug #6278] [ruby-core:44275]
- */
-static const rb_thread_t *patrol_thread = NULL;
+static inline bool
+do_mutex_trylock(rb_mutex_t *mutex, rb_thread_t *th, rb_serial_t ec_serial)
+{
+ if (mutex->ec_serial == 0) {
+ RUBY_DEBUG_LOG("%p ok", mutex);
+
+ mutex_locked(mutex, th, ec_serial);
+ return true;
+ }
+ else {
+ RUBY_DEBUG_LOG("%p ng", mutex);
+ return false;
+ }
+}
+
+static VALUE
+rb_mut_trylock(rb_execution_context_t *ec, VALUE self)
+{
+ return RBOOL(do_mutex_trylock(mutex_ptr(self), ec->thread_ptr, rb_ec_serial(ec)));
+}
-/*
- * call-seq:
- * mutex.lock -> self
- *
- * Attempts to grab the lock and waits if it isn't available.
- * Raises +ThreadError+ if +mutex+ was locked by the current thread.
- */
VALUE
-rb_mutex_lock(VALUE self)
+rb_mutex_trylock(VALUE self)
+{
+ return rb_mut_trylock(GET_EC(), self);
+}
+
+static VALUE
+mutex_owned_p(rb_serial_t ec_serial, rb_mutex_t *mutex)
+{
+ return RBOOL(mutex->ec_serial == ec_serial);
+}
+
+static VALUE
+call_rb_fiber_scheduler_block(VALUE mutex)
{
- rb_thread_t *th = GET_THREAD();
+ return rb_fiber_scheduler_block(rb_fiber_scheduler_current(), mutex, Qnil);
+}
+
+static VALUE
+delete_from_waitq(VALUE value)
+{
+ struct sync_waiter *sync_waiter = (void *)value;
+ ccan_list_del(&sync_waiter->node);
+
+ return Qnil;
+}
+
+static inline rb_atomic_t threadptr_get_interrupts(rb_thread_t *th);
+
+struct mutex_args {
+ VALUE self;
rb_mutex_t *mutex;
- GetMutexPtr(self, mutex);
+ rb_execution_context_t *ec;
+};
+
+static inline void
+mutex_args_init(struct mutex_args *args, VALUE mutex)
+{
+ args->self = mutex;
+ args->mutex = mutex_ptr(mutex);
+ args->ec = GET_EC();
+}
+
+static VALUE
+do_mutex_lock(struct mutex_args *args, int interruptible_p)
+{
+ VALUE self = args->self;
+ rb_execution_context_t *ec = args->ec;
+ rb_thread_t *th = ec->thread_ptr;
+ rb_fiber_t *fiber = ec->fiber_ptr;
+ rb_serial_t ec_serial = rb_ec_serial(ec);
+ rb_mutex_t *mutex = args->mutex;
+ rb_atomic_t saved_ints = 0;
/* When running trap handler */
- if (!mutex->allow_trap && th->interrupt_mask & TRAP_INTERRUPT_MASK) {
- rb_raise(rb_eThreadError, "can't be called from trap context");
+ if (!FL_TEST_RAW(self, MUTEX_ALLOW_TRAP) &&
+ th->ec->interrupt_mask & TRAP_INTERRUPT_MASK) {
+ rb_raise(rb_eThreadError, "can't be called from trap context");
}
- if (rb_mutex_trylock(self) == Qfalse) {
- if (mutex->th == th) {
- rb_raise(rb_eThreadError, "deadlock; recursive locking");
- }
-
- while (mutex->th != th) {
- int interrupted;
- enum rb_thread_status prev_status = th->status;
- volatile int timeout_ms = 0;
- struct rb_unblock_callback oldubf;
-
- set_unblock_function(th, lock_interrupt, mutex, &oldubf, FALSE);
- th->status = THREAD_STOPPED_FOREVER;
- th->locking_mutex = self;
-
- native_mutex_lock(&mutex->lock);
- th->vm->sleeper++;
- /*
- * Carefully! while some contended threads are in lock_func(),
- * vm->sleepr is unstable value. we have to avoid both deadlock
- * and busy loop.
- */
- if ((vm_living_thread_num(th->vm) == th->vm->sleeper) &&
- !patrol_thread) {
- timeout_ms = 100;
- patrol_thread = th;
- }
-
- GVL_UNLOCK_BEGIN();
- interrupted = lock_func(th, mutex, (int)timeout_ms);
- native_mutex_unlock(&mutex->lock);
- GVL_UNLOCK_END();
-
- if (patrol_thread == th)
- patrol_thread = NULL;
-
- reset_unblock_function(th, &oldubf);
-
- th->locking_mutex = Qfalse;
- if (mutex->th && interrupted == 2) {
- rb_check_deadlock(th->vm);
- }
- if (th->status == THREAD_STOPPED_FOREVER) {
- th->status = prev_status;
- }
- th->vm->sleeper--;
-
- if (mutex->th == th) mutex_locked(th, self);
-
- if (interrupted) {
- RUBY_VM_CHECK_INTS_BLOCKING(th);
- }
- }
+ if (!do_mutex_trylock(mutex, th, ec_serial)) {
+ if (mutex->ec_serial == ec_serial) {
+ rb_raise(rb_eThreadError, "deadlock; recursive locking");
+ }
+
+ while (mutex->ec_serial != ec_serial) {
+ VM_ASSERT(mutex->ec_serial != 0);
+
+ VALUE scheduler = rb_fiber_scheduler_current();
+ if (scheduler != Qnil) {
+ struct sync_waiter sync_waiter = {
+ .self = self,
+ .th = th,
+ .fiber = nonblocking_fiber(fiber)
+ };
+
+ ccan_list_add_tail(&mutex->waitq, &sync_waiter.node);
+
+ rb_ensure(call_rb_fiber_scheduler_block, self, delete_from_waitq, (VALUE)&sync_waiter);
+
+ if (!mutex->ec_serial) {
+ mutex_set_owner(mutex, th, ec_serial);
+ }
+ }
+ else {
+ if (!th->vm->thread_ignore_deadlock && mutex->th == th) {
+ rb_raise(rb_eThreadError, "deadlock; lock already owned by another fiber belonging to the same thread");
+ }
+
+ struct sync_waiter sync_waiter = {
+ .self = self,
+ .th = th,
+ .fiber = nonblocking_fiber(fiber),
+ };
+
+ RUBY_DEBUG_LOG("%p wait", mutex);
+
+ // similar code with `sleep_forever`, but
+ // sleep_forever(SLEEP_DEADLOCKABLE) raises an exception.
+ // Ensure clause is needed like but `rb_ensure` a bit slow.
+ //
+ // begin
+ // sleep_forever(th, SLEEP_DEADLOCKABLE);
+ // ensure
+ // ccan_list_del(&sync_waiter.node);
+ // end
+ enum rb_thread_status prev_status = th->status;
+ th->status = THREAD_STOPPED_FOREVER;
+ rb_ractor_sleeper_threads_inc(th->ractor);
+ rb_check_deadlock(th->ractor);
+
+ RUBY_ASSERT(!th->locking_mutex);
+ th->locking_mutex = self;
+
+ ccan_list_add_tail(&mutex->waitq, &sync_waiter.node);
+ {
+ native_sleep(th, NULL);
+ }
+ ccan_list_del(&sync_waiter.node);
+
+ // unlocked by another thread while sleeping
+ if (!mutex->ec_serial) {
+ mutex_set_owner(mutex, th, ec_serial);
+ }
+
+ rb_ractor_sleeper_threads_dec(th->ractor);
+ th->status = prev_status;
+ th->locking_mutex = Qfalse;
+
+ RUBY_DEBUG_LOG("%p wakeup", mutex);
+ }
+
+ if (interruptible_p) {
+ /* release mutex before checking for interrupts...as interrupt checking
+ * code might call rb_raise() */
+ if (mutex->ec_serial == ec_serial) {
+ mutex->th = NULL;
+ mutex->ec_serial = 0;
+ }
+ RUBY_VM_CHECK_INTS_BLOCKING(th->ec); /* may release mutex */
+ if (!mutex->ec_serial) {
+ mutex_set_owner(mutex, th, ec_serial);
+ }
+ }
+ else {
+ // clear interrupt information
+ if (RUBY_VM_INTERRUPTED(th->ec)) {
+ // reset interrupts
+ if (saved_ints == 0) {
+ saved_ints = threadptr_get_interrupts(th);
+ }
+ else {
+ // ignore additional interrupts
+ threadptr_get_interrupts(th);
+ }
+ }
+ }
+ }
+
+ if (saved_ints) th->ec->interrupt_flag = saved_ints;
+ if (mutex->ec_serial == ec_serial) mutex_locked(mutex, th, ec_serial);
}
+
+ RUBY_DEBUG_LOG("%p locked", mutex);
+
+ // assertion
+ if (mutex_owned_p(ec_serial, mutex) == Qfalse) rb_bug("do_mutex_lock: mutex is not owned.");
+
return self;
}
-/*
- * call-seq:
- * mutex.owned? -> true or false
- *
- * Returns +true+ if this lock is currently held by current thread.
- */
-VALUE
-rb_mutex_owned_p(VALUE self)
+static VALUE
+mutex_lock_uninterruptible(VALUE self)
{
- VALUE owned = Qfalse;
- rb_thread_t *th = GET_THREAD();
- rb_mutex_t *mutex;
+ struct mutex_args args;
+ mutex_args_init(&args, self);
+ return do_mutex_lock(&args, 0);
+}
- GetMutexPtr(self, mutex);
+static VALUE
+rb_mut_lock(rb_execution_context_t *ec, VALUE self)
+{
+ struct mutex_args args = {
+ .self = self,
+ .mutex = mutex_ptr(self),
+ .ec = ec,
+ };
+ return do_mutex_lock(&args, 1);
+}
- if (mutex->th == th)
- owned = Qtrue;
+VALUE
+rb_mutex_lock(VALUE self)
+{
+ struct mutex_args args;
+ mutex_args_init(&args, self);
+ return do_mutex_lock(&args, 1);
+}
- return owned;
+static VALUE
+rb_mut_owned_p(rb_execution_context_t *ec, VALUE self)
+{
+ return mutex_owned_p(rb_ec_serial(ec), mutex_ptr(self));
}
-static const char *
-rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th)
+VALUE
+rb_mutex_owned_p(VALUE self)
{
- const char *err = NULL;
+ return rb_mut_owned_p(GET_EC(), self);
+}
- native_mutex_lock(&mutex->lock);
+static const char *
+rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t *th, rb_serial_t ec_serial)
+{
+ RUBY_DEBUG_LOG("%p", mutex);
- if (mutex->th == 0) {
- err = "Attempt to unlock a mutex which is not locked";
+ if (mutex->ec_serial == 0) {
+ return "Attempt to unlock a mutex which is not locked";
}
- else if (mutex->th != th) {
- err = "Attempt to unlock a mutex which is locked by another thread";
+ else if (ec_serial && mutex->ec_serial != ec_serial) {
+ return "Attempt to unlock a mutex which is locked by another thread/fiber";
}
- else {
- mutex->th = 0;
- if (mutex->cond_waiting > 0)
- native_cond_signal(&mutex->cond);
+
+ struct sync_waiter *cur = 0, *next;
+
+ mutex->ec_serial = 0;
+ thread_mutex_remove(th, mutex);
+
+ ccan_list_for_each_safe(&mutex->waitq, cur, next, node) {
+ ccan_list_del_init(&cur->node);
+
+ if (cur->th->scheduler != Qnil && cur->fiber) {
+ rb_fiber_scheduler_unblock(cur->th->scheduler, cur->self, rb_fiberptr_self(cur->fiber));
+ return NULL;
+ }
+ else {
+ switch (cur->th->status) {
+ case THREAD_RUNNABLE: /* from someone else calling Thread#run */
+ case THREAD_STOPPED_FOREVER: /* likely (rb_mutex_lock) */
+ RUBY_DEBUG_LOG("wakeup th:%u", rb_th_serial(cur->th));
+ rb_threadptr_interrupt(cur->th);
+ return NULL;
+ case THREAD_STOPPED: /* probably impossible */
+ rb_bug("unexpected THREAD_STOPPED");
+ case THREAD_KILLED:
+ /* not sure about this, possible in exit GC? */
+ rb_bug("unexpected THREAD_KILLED");
+ continue;
+ }
+ }
}
- native_mutex_unlock(&mutex->lock);
+ // We did not find any threads to wake up, so we can just return with no error:
+ return NULL;
+}
- if (!err) {
- rb_mutex_t *volatile *th_mutex = &th->keeping_mutexes;
- while (*th_mutex != mutex) {
- th_mutex = &(*th_mutex)->next_mutex;
- }
- *th_mutex = mutex->next_mutex;
- mutex->next_mutex = NULL;
- }
+static void
+do_mutex_unlock(struct mutex_args *args)
+{
+ const char *err;
+ rb_mutex_t *mutex = args->mutex;
+ rb_thread_t *th = rb_ec_thread_ptr(args->ec);
+
+ err = rb_mutex_unlock_th(mutex, th, rb_ec_serial(args->ec));
+ if (err) rb_raise(rb_eThreadError, "%s", err);
+}
- return err;
+static VALUE
+do_mutex_unlock_safe(VALUE args)
+{
+ do_mutex_unlock((struct mutex_args *)args);
+ return Qnil;
}
/*
@@ -368,36 +513,41 @@ rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th)
VALUE
rb_mutex_unlock(VALUE self)
{
- const char *err;
- rb_mutex_t *mutex;
- GetMutexPtr(self, mutex);
-
- err = rb_mutex_unlock_th(mutex, GET_THREAD());
- if (err) rb_raise(rb_eThreadError, "%s", err);
+ struct mutex_args args;
+ mutex_args_init(&args, self);
+ do_mutex_unlock(&args);
+ return self;
+}
+static VALUE
+rb_mut_unlock(rb_execution_context_t *ec, VALUE self)
+{
+ struct mutex_args args = {
+ .self = self,
+ .mutex = mutex_ptr(self),
+ .ec = ec,
+ };
+ do_mutex_unlock(&args);
return self;
}
+#if defined(HAVE_WORKING_FORK)
static void
rb_mutex_abandon_keeping_mutexes(rb_thread_t *th)
{
- if (th->keeping_mutexes) {
- rb_mutex_abandon_all(th->keeping_mutexes);
- }
+ rb_mutex_abandon_all(th->keeping_mutexes);
th->keeping_mutexes = NULL;
}
static void
rb_mutex_abandon_locking_mutex(rb_thread_t *th)
{
- rb_mutex_t *mutex;
-
- if (!th->locking_mutex) return;
+ if (th->locking_mutex) {
+ rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
- GetMutexPtr(th->locking_mutex, mutex);
- if (mutex->th == th)
- rb_mutex_abandon_all(mutex);
- th->locking_mutex = Qfalse;
+ ccan_list_head_init(&mutex->waitq);
+ th->locking_mutex = Qfalse;
+ }
}
static void
@@ -406,761 +556,649 @@ rb_mutex_abandon_all(rb_mutex_t *mutexes)
rb_mutex_t *mutex;
while (mutexes) {
- mutex = mutexes;
- mutexes = mutex->next_mutex;
- mutex->th = 0;
- mutex->next_mutex = 0;
+ mutex = mutexes;
+ mutexes = mutex->next_mutex;
+ mutex->ec_serial = 0;
+ mutex->next_mutex = 0;
+ ccan_list_head_init(&mutex->waitq);
}
}
+#endif
-static VALUE
-rb_mutex_sleep_forever(VALUE time)
-{
- sleep_forever(GET_THREAD(), 1, 0); /* permit spurious check */
- return Qnil;
-}
+struct rb_mutex_sleep_arguments {
+ VALUE self;
+ VALUE timeout;
+};
static VALUE
-rb_mutex_wait_for(VALUE time)
+mutex_sleep_begin(VALUE _arguments)
{
- struct timeval *t = (struct timeval *)time;
- sleep_timeval(GET_THREAD(), *t, 0); /* permit spurious check */
- return Qnil;
-}
+ struct rb_mutex_sleep_arguments *arguments = (struct rb_mutex_sleep_arguments *)_arguments;
+ VALUE timeout = arguments->timeout;
+ VALUE woken = Qtrue;
-VALUE
-rb_mutex_sleep(VALUE self, VALUE timeout)
-{
- time_t beg, end;
- struct timeval t;
-
- if (!NIL_P(timeout)) {
- t = rb_time_interval(timeout);
- }
- rb_mutex_unlock(self);
- beg = time(0);
- if (NIL_P(timeout)) {
- rb_ensure(rb_mutex_sleep_forever, Qnil, rb_mutex_lock, self);
+ VALUE scheduler = rb_fiber_scheduler_current();
+ if (scheduler != Qnil) {
+ rb_fiber_scheduler_kernel_sleep(scheduler, timeout);
}
else {
- rb_ensure(rb_mutex_wait_for, (VALUE)&t, rb_mutex_lock, self);
+ if (NIL_P(timeout)) {
+ rb_thread_sleep_deadly_allow_spurious_wakeup(arguments->self, Qnil, 0);
+ }
+ else {
+ struct timeval timeout_value = rb_time_interval(timeout);
+ rb_hrtime_t relative_timeout = rb_timeval2hrtime(&timeout_value);
+ /* permit spurious check */
+ woken = RBOOL(sleep_hrtime(GET_THREAD(), relative_timeout, 0));
+ }
}
- end = time(0) - beg;
- return INT2FIX(end);
+
+ return woken;
}
-/*
- * call-seq:
- * mutex.sleep(timeout = nil) -> number
- *
- * Releases the lock and sleeps +timeout+ seconds if it is given and
- * non-nil or forever. Raises +ThreadError+ if +mutex+ wasn't locked by
- * the current thread.
- *
- * When the thread is next woken up, it will attempt to reacquire
- * the lock.
- *
- * Note that this method can wakeup without explicit Thread#wakeup call.
- * For example, receiving signal and so on.
- */
static VALUE
-mutex_sleep(int argc, VALUE *argv, VALUE self)
+rb_mut_sleep(rb_execution_context_t *ec, VALUE self, VALUE timeout)
{
- VALUE timeout;
+ if (!NIL_P(timeout)) {
+ // Validate the argument:
+ rb_time_interval(timeout);
+ }
+
+ rb_mut_unlock(ec, self);
+ time_t beg = time(0);
- rb_scan_args(argc, argv, "01", &timeout);
- return rb_mutex_sleep(self, timeout);
+ struct rb_mutex_sleep_arguments arguments = {
+ .self = self,
+ .timeout = timeout,
+ };
+
+ VALUE woken = rb_ec_ensure(ec, mutex_sleep_begin, (VALUE)&arguments, mutex_lock_uninterruptible, self);
+
+ RUBY_VM_CHECK_INTS_BLOCKING(ec);
+ if (!woken) return Qnil;
+ time_t end = time(0) - beg;
+ return TIMET2NUM(end);
}
-/*
- * call-seq:
- * mutex.synchronize { ... } -> result of the block
- *
- * Obtains a lock, runs the block, and releases the lock when the block
- * completes. See the example under +Mutex+.
- */
+VALUE
+rb_mutex_sleep(VALUE self, VALUE timeout)
+{
+ return rb_mut_sleep(GET_EC(), self, timeout);
+}
VALUE
-rb_mutex_synchronize(VALUE mutex, VALUE (*func)(VALUE arg), VALUE arg)
+rb_mutex_synchronize(VALUE self, VALUE (*func)(VALUE arg), VALUE arg)
{
- rb_mutex_lock(mutex);
- return rb_ensure(func, arg, rb_mutex_unlock, mutex);
+ struct mutex_args args;
+ mutex_args_init(&args, self);
+ do_mutex_lock(&args, 1);
+ return rb_ec_ensure(args.ec, func, arg, do_mutex_unlock_safe, (VALUE)&args);
}
-/*
- * call-seq:
- * mutex.synchronize { ... } -> result of the block
- *
- * Obtains a lock, runs the block, and releases the lock when the block
- * completes. See the example under +Mutex+.
- */
static VALUE
-rb_mutex_synchronize_m(VALUE self, VALUE args)
+do_ec_yield(VALUE _ec)
{
- if (!rb_block_given_p()) {
- rb_raise(rb_eThreadError, "must be called with a block");
- }
+ return rb_ec_yield((rb_execution_context_t *)_ec, Qundef);
+}
- return rb_mutex_synchronize(self, rb_yield, Qundef);
+VALUE
+rb_mut_synchronize(rb_execution_context_t *ec, VALUE self)
+{
+ struct mutex_args args = {
+ .self = self,
+ .mutex = mutex_ptr(self),
+ .ec = ec,
+ };
+ do_mutex_lock(&args, 1);
+ return rb_ec_ensure(args.ec, do_ec_yield, (VALUE)ec, do_mutex_unlock_safe, (VALUE)&args);
}
-void rb_mutex_allow_trap(VALUE self, int val)
+void
+rb_mutex_allow_trap(VALUE self, int val)
{
- rb_mutex_t *m;
- GetMutexPtr(self, m);
+ Check_TypedStruct(self, &mutex_data_type);
- m->allow_trap = val;
+ if (val)
+ FL_SET_RAW(self, MUTEX_ALLOW_TRAP);
+ else
+ FL_UNSET_RAW(self, MUTEX_ALLOW_TRAP);
}
/* Queue */
-enum {
- QUEUE_QUE,
- QUEUE_WAITERS,
- SZQUEUE_WAITERS,
- SZQUEUE_MAX,
- END_QUEUE
+struct rb_queue {
+ struct ccan_list_head waitq;
+ rb_serial_t fork_gen;
+ long capa;
+ long len;
+ long offset;
+ VALUE *buffer;
+ int num_waiting;
};
-#define QUEUE_CLOSED FL_USER5
-#define QUEUE_CLOSE_EXCEPTION FL_USER6
+#define szqueue_waitq(sq) &sq->q.waitq
+#define szqueue_pushq(sq) &sq->pushq
-#define GET_QUEUE_QUE(q) get_array((q), QUEUE_QUE)
-#define GET_QUEUE_WAITERS(q) get_array((q), QUEUE_WAITERS)
-#define GET_SZQUEUE_WAITERS(q) get_array((q), SZQUEUE_WAITERS)
-#define GET_SZQUEUE_MAX(q) RSTRUCT_GET((q), SZQUEUE_MAX)
-#define GET_SZQUEUE_ULONGMAX(q) NUM2ULONG(GET_SZQUEUE_MAX(q))
+struct rb_szqueue {
+ struct rb_queue q;
+ int num_waiting_push;
+ struct ccan_list_head pushq;
+ long max;
+};
-static VALUE
-ary_buf_new(void)
+static void
+queue_mark_and_move(void *ptr)
{
- return rb_ary_tmp_new(1);
+ struct rb_queue *q = ptr;
+ /* no need to mark threads in waitq, they are on stack */
+ for (long index = 0; index < q->len; index++) {
+ rb_gc_mark_and_move(&q->buffer[((q->offset + index) % q->capa)]);
+ }
}
-static VALUE
-get_array(VALUE obj, int idx)
+static void
+queue_free(void *ptr)
{
- VALUE ary = RSTRUCT_GET(obj, idx);
- if (!RB_TYPE_P(ary, T_ARRAY)) {
- rb_raise(rb_eTypeError, "%+"PRIsVALUE" not initialized", obj);
+ struct rb_queue *q = ptr;
+ if (q->buffer) {
+ ruby_sized_xfree(q->buffer, q->capa * sizeof(VALUE));
}
- return ary;
}
-static void
-wakeup_first_thread(VALUE list)
+static size_t
+queue_memsize(const void *ptr)
{
- VALUE thread;
+ const struct rb_queue *q = ptr;
+ return sizeof(struct rb_queue) + (q->capa * sizeof(VALUE));
+}
- while (!NIL_P(thread = rb_ary_shift(list))) {
- if (RTEST(rb_thread_wakeup_alive(thread))) break;
- }
+static const rb_data_type_t queue_data_type = {
+ .wrap_struct_name = "Thread::Queue",
+ .function = {
+ .dmark = queue_mark_and_move,
+ .dfree = queue_free,
+ .dsize = queue_memsize,
+ .dcompact = queue_mark_and_move,
+ },
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED,
+};
+
+static VALUE
+queue_alloc(VALUE klass)
+{
+ VALUE obj;
+ struct rb_queue *q;
+
+ obj = TypedData_Make_Struct(klass, struct rb_queue, &queue_data_type, q);
+ ccan_list_head_init(&q->waitq);
+ return obj;
}
-static void
-wakeup_all_threads(VALUE list)
+static inline bool
+queue_fork_check(struct rb_queue *q)
{
- VALUE thread;
- long i;
+ rb_serial_t fork_gen = GET_VM()->fork_gen;
- for (i=0; i<RARRAY_LEN(list); i++) {
- thread = RARRAY_AREF(list, i);
- rb_thread_wakeup_alive(thread);
+ if (RB_LIKELY(q->fork_gen == fork_gen)) {
+ return false;
}
- rb_ary_clear(list);
+ /* forked children can't reach into parent thread stacks */
+ q->fork_gen = fork_gen;
+ ccan_list_head_init(&q->waitq);
+ q->num_waiting = 0;
+ return true;
}
-static unsigned long
-queue_length(VALUE self)
+static inline struct rb_queue *
+raw_queue_ptr(VALUE obj)
{
- VALUE que = GET_QUEUE_QUE(self);
- return RARRAY_LEN(que);
+ struct rb_queue *q;
+
+ TypedData_Get_Struct(obj, struct rb_queue, &queue_data_type, q);
+ queue_fork_check(q);
+
+ return q;
}
-static unsigned long
-queue_num_waiting(VALUE self)
+static inline void
+check_queue(VALUE obj, struct rb_queue *q)
{
- VALUE waiters = GET_QUEUE_WAITERS(self);
- return RARRAY_LEN(waiters);
+ if (RB_UNLIKELY(q->buffer == NULL)) {
+ rb_raise(rb_eTypeError, "%+"PRIsVALUE" not initialized", obj);
+ }
}
-static unsigned long
-szqueue_num_waiting_producer(VALUE self)
+static inline struct rb_queue *
+queue_ptr(VALUE obj)
{
- VALUE waiters = GET_SZQUEUE_WAITERS(self);
- return RARRAY_LEN(waiters);
+ struct rb_queue *q = raw_queue_ptr(obj);
+ check_queue(obj, q);
+ return q;
}
-static int
-queue_closed_p(VALUE self)
+#define QUEUE_CLOSED FL_USER5
+
+static rb_hrtime_t
+queue_timeout2hrtime(VALUE timeout)
{
- return FL_TEST_RAW(self, QUEUE_CLOSED) != 0;
+ if (NIL_P(timeout)) {
+ return (rb_hrtime_t)0;
+ }
+ rb_hrtime_t rel = 0;
+ if (FIXNUM_P(timeout)) {
+ rel = rb_sec2hrtime(NUM2TIMET(timeout));
+ }
+ else {
+ double2hrtime(&rel, rb_num2dbl(timeout));
+ }
+ return rb_hrtime_add(rel, rb_hrtime_now());
}
static void
-raise_closed_queue_error(VALUE self)
+szqueue_mark_and_move(void *ptr)
{
- rb_raise(rb_eClosedQueueError, "queue closed");
+ struct rb_szqueue *sq = ptr;
+
+ queue_mark_and_move(&sq->q);
}
-static VALUE
-queue_closed_result(VALUE self)
+static void
+szqueue_free(void *ptr)
{
- assert(queue_length(self) == 0);
-
- if (FL_TEST(self, QUEUE_CLOSE_EXCEPTION)) {
- raise_closed_queue_error(self);
- }
- return Qnil;
+ struct rb_szqueue *sq = ptr;
+ queue_free(&sq->q);
}
+static size_t
+szqueue_memsize(const void *ptr)
+{
+ const struct rb_szqueue *sq = ptr;
+ return sizeof(struct rb_szqueue) + (sq->q.capa * sizeof(VALUE));
+}
+
+static const rb_data_type_t szqueue_data_type = {
+ .wrap_struct_name = "Thread::SizedQueue",
+ .function = {
+ .dmark = szqueue_mark_and_move,
+ .dfree = szqueue_free,
+ .dsize = szqueue_memsize,
+ .dcompact = szqueue_mark_and_move,
+ },
+ .parent = &queue_data_type,
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED,
+};
+
static VALUE
-queue_do_close(VALUE self, int argc, VALUE *argv, int is_szq)
+szqueue_alloc(VALUE klass)
{
- VALUE exception = Qfalse;
-
- if (!queue_closed_p(self)) {
- rb_scan_args(argc, argv, "01", &exception);
- FL_SET(self, QUEUE_CLOSED);
-
- if (RTEST(exception)) {
- FL_SET(self, QUEUE_CLOSE_EXCEPTION);
- }
+ struct rb_szqueue *sq;
+ VALUE obj = TypedData_Make_Struct(klass, struct rb_szqueue,
+ &szqueue_data_type, sq);
+ ccan_list_head_init(szqueue_waitq(sq));
+ ccan_list_head_init(szqueue_pushq(sq));
+ return obj;
+}
- if (queue_num_waiting(self) > 0) {
- VALUE waiters = GET_QUEUE_WAITERS(self);
- wakeup_all_threads(waiters);
- }
+static inline struct rb_szqueue *
+raw_szqueue_ptr(VALUE obj)
+{
+ struct rb_szqueue *sq;
- if (is_szq && szqueue_num_waiting_producer(self) > 0) {
- VALUE waiters = GET_SZQUEUE_WAITERS(self);
- wakeup_all_threads(waiters);
- }
+ TypedData_Get_Struct(obj, struct rb_szqueue, &szqueue_data_type, sq);
+ if (RB_UNLIKELY(queue_fork_check(&sq->q))) {
+ ccan_list_head_init(szqueue_pushq(sq));
+ sq->num_waiting_push = 0;
}
- return self;
+ return sq;
}
-/*
- * Document-class: Queue
- *
- * This class provides a way to synchronize communication between threads.
- *
- * Example:
- *
- * require 'thread'
- * queue = Queue.new
- *
- * producer = Thread.new do
- * 5.times do |i|
- * sleep rand(i) # simulate expense
- * queue << i
- * puts "#{i} produced"
- * end
- * end
- *
- * consumer = Thread.new do
- * 5.times do |i|
- * value = queue.pop
- * sleep rand(i/2) # simulate expense
- * puts "consumed #{value}"
- * end
- * end
- *
- */
-
-/*
- * Document-method: Queue::new
- *
- * Creates a new queue instance.
- */
-
-static VALUE
-rb_queue_initialize(VALUE self)
+static inline struct rb_szqueue *
+szqueue_ptr(VALUE obj)
{
- RSTRUCT_SET(self, QUEUE_QUE, ary_buf_new());
- RSTRUCT_SET(self, QUEUE_WAITERS, ary_buf_new());
- return self;
+ struct rb_szqueue *sq = raw_szqueue_ptr(obj);
+ check_queue(obj, &sq->q);
+ return sq;
}
-static VALUE
-queue_do_push(VALUE self, VALUE obj)
+static inline bool
+queue_closed_p(VALUE self)
{
- if (queue_closed_p(self)) {
- raise_closed_queue_error(self);
- }
- rb_ary_push(GET_QUEUE_QUE(self), obj);
- wakeup_first_thread(GET_QUEUE_WAITERS(self));
- return self;
+ return FL_TEST_RAW(self, QUEUE_CLOSED) != 0;
}
/*
- * Document-method: Queue#close
- * call-seq:
- * close(exception=false)
- *
- * Closes the queue. A closed queue cannot be re-opened.
- *
- * After the call to close completes, the following are true:
- *
- * - +closed?+ will return true
- *
- * - +close+ will be ignored.
- *
- * - calling enq/push/<< will raise ClosedQueueError('queue closed')
- *
- * - when +empty?+ is false, calling deq/pop/shift will return an object
- * from the queue as usual.
- *
- * - when +empty?+ is true, deq(non_block=false) will not suspend and
- * will either return nil. If +exception+ parameter is true, raise ClosedQueueError error.
- * deq(non_block=true) will ignore the parameter and raise a ThreadError('queue empty').
- *
- * ClosedQueueError is inherited from StopIteration, so that you can break loop block.
- *
- * Example:
+ * Document-class: ClosedQueueError
*
- * q = Queue.new
- * Thread.new{
- * while e = q.deq # wait for nil to break loop
- * # ...
- * end
- * }
- * q.close # equals to q.close(false)
- *
- * q = Queue.new
- * Thread.new{
- * loop{
- * e = q.deq; ... # break with ClosedQueueError
- * }
- * }
- * q.close(true)
+ * The exception class which will be raised when pushing into a closed
+ * Queue. See Thread::Queue#close and Thread::SizedQueue#close.
*/
-static VALUE
-rb_queue_close(int argc, VALUE *argv, VALUE self)
+NORETURN(static void raise_closed_queue_error(VALUE self));
+
+static void
+raise_closed_queue_error(VALUE self)
{
- return queue_do_close(self, argc, argv, FALSE);
+ rb_raise(rb_eClosedQueueError, "queue closed");
}
-/*
- * Document-method: Queue#closed?
- * call-seq: closed?
- *
- * Returns +true+ if the queue is closed.
- */
-
static VALUE
-rb_queue_closed_p(VALUE self)
+queue_closed_result(VALUE self, struct rb_queue *q)
{
- return queue_closed_p(self) ? Qtrue : Qfalse;
+ RUBY_ASSERT(q->len == 0);
+ return Qnil;
}
-/*
- * Document-method: Queue#push
- * call-seq:
- * push(object)
- * enq(object)
- * <<(object)
- *
- * Pushes the given +object+ to the queue.
- */
+#define QUEUE_INITIAL_CAPA 8
-static VALUE
-rb_queue_push(VALUE self, VALUE obj)
+static inline void
+ring_buffer_init(struct rb_queue *q, long initial_capa)
{
- return queue_do_push(self, obj);
+ q->buffer = ALLOC_N(VALUE, initial_capa);
+ q->capa = initial_capa;
}
-struct waiting_delete {
- VALUE waiting;
- VALUE th;
-};
-
-static VALUE
-queue_delete_from_waiting(struct waiting_delete *p)
+static inline void
+ring_buffer_expand(struct rb_queue *q)
{
- rb_ary_delete(p->waiting, p->th);
- return Qnil;
+ RUBY_ASSERT(q->capa > 0);
+ VALUE *new_buffer = ALLOC_N(VALUE, q->capa * 2);
+ MEMCPY(new_buffer, q->buffer + q->offset, VALUE, q->capa - q->offset);
+ MEMCPY(new_buffer + (q->capa - q->offset), q->buffer, VALUE, q->offset);
+ VALUE *old_buffer = q->buffer;
+ q->buffer = new_buffer;
+ q->offset = 0;
+ ruby_sized_xfree(old_buffer, q->capa * sizeof(VALUE));
+ q->capa *= 2;
}
-static VALUE
-queue_sleep(VALUE arg)
+static void
+ring_buffer_push(VALUE self, struct rb_queue *q, VALUE obj)
{
- rb_thread_sleep_deadly();
- return Qnil;
+ if (RB_UNLIKELY(q->len >= q->capa)) {
+ ring_buffer_expand(q);
+ }
+ RUBY_ASSERT(q->capa > q->len);
+ long index = (q->offset + q->len) % q->capa;
+ q->len++;
+ RB_OBJ_WRITE(self, &q->buffer[index], obj);
}
static VALUE
-queue_do_pop(VALUE self, int should_block)
-{
- struct waiting_delete args;
- args.waiting = GET_QUEUE_WAITERS(self);
- args.th = rb_thread_current();
-
- while (queue_length(self) == 0) {
- if (!should_block) {
- rb_raise(rb_eThreadError, "queue empty");
- }
- else if (queue_closed_p(self)) {
- return queue_closed_result(self);
- }
- else {
- assert(queue_length(self) == 0);
- assert(queue_closed_p(self) == 0);
-
- rb_ary_push(args.waiting, args.th);
- rb_ensure(queue_sleep, (VALUE)0, queue_delete_from_waiting, (VALUE)&args);
- }
+ring_buffer_shift(struct rb_queue *q)
+{
+ if (!q->len) {
+ return Qnil;
}
- return rb_ary_shift(GET_QUEUE_QUE(self));
+ VALUE obj = q->buffer[q->offset];
+ q->len--;
+ if (q->len == 0) {
+ q->offset = 0;
+ }
+ else {
+ q->offset = (q->offset + 1) % q->capa;
+ }
+ return obj;
}
-static int
-queue_pop_should_block(int argc, const VALUE *argv)
+static VALUE
+queue_initialize(rb_execution_context_t *ec, VALUE self, VALUE initial)
{
- int should_block = 1;
- rb_check_arity(argc, 0, 1);
- if (argc > 0) {
- should_block = !RTEST(argv[0]);
+ struct rb_queue *q = raw_queue_ptr(self);
+ ccan_list_head_init(&q->waitq);
+ if (NIL_P(initial)) {
+ ring_buffer_init(q, QUEUE_INITIAL_CAPA);
}
- return should_block;
+ else {
+ initial = rb_to_array(initial);
+ long len = RARRAY_LEN(initial);
+ long initial_capa = QUEUE_INITIAL_CAPA;
+ while (initial_capa < len) {
+ initial_capa *= 2;
+ }
+ ring_buffer_init(q, initial_capa);
+ MEMCPY(q->buffer, RARRAY_CONST_PTR(initial), VALUE, len);
+ q->len = len;
+ }
+ return self;
}
-/*
- * Document-method: Queue#pop
- * call-seq:
- * pop(non_block=false)
- * deq(non_block=false)
- * shift(non_block=false)
- *
- * Retrieves data from the queue.
- *
- * If the queue is empty, the calling thread is suspended until data is pushed
- * onto the queue. If +non_block+ is true, the thread isn't suspended, and an
- * exception is raised.
- */
-
static VALUE
-rb_queue_pop(int argc, VALUE *argv, VALUE self)
+queue_do_push(VALUE self, struct rb_queue *q, VALUE obj)
{
- int should_block = queue_pop_should_block(argc, argv);
- return queue_do_pop(self, should_block);
+ check_queue(self, q);
+ if (queue_closed_p(self)) {
+ raise_closed_queue_error(self);
+ }
+ ring_buffer_push(self, q, obj);
+ wakeup_one(&q->waitq);
+ return self;
}
-/*
- * Document-method: Queue#empty?
- * call-seq: empty?
- *
- * Returns +true+ if the queue is empty.
- */
-
static VALUE
-rb_queue_empty_p(VALUE self)
+queue_sleep(VALUE _args)
{
- return queue_length(self) == 0 ? Qtrue : Qfalse;
+ struct queue_sleep_arg *args = (struct queue_sleep_arg *)_args;
+ rb_thread_sleep_deadly_allow_spurious_wakeup(args->self, args->timeout, args->end);
+ return Qnil;
}
-/*
- * Document-method: Queue#clear
- *
- * Removes all objects from the queue.
- */
+struct queue_waiter {
+ struct sync_waiter w;
+ union {
+ struct rb_queue *q;
+ struct rb_szqueue *sq;
+ } as;
+};
static VALUE
-rb_queue_clear(VALUE self)
+queue_sleep_done(VALUE p)
{
- rb_ary_clear(GET_QUEUE_QUE(self));
- return self;
-}
+ struct queue_waiter *qw = (struct queue_waiter *)p;
-/*
- * Document-method: Queue#length
- * call-seq:
- * length
- * size
- *
- * Returns the length of the queue.
- */
+ ccan_list_del(&qw->w.node);
+ qw->as.q->num_waiting--;
-static VALUE
-rb_queue_length(VALUE self)
-{
- unsigned long len = queue_length(self);
- return ULONG2NUM(len);
+ return Qfalse;
}
-/*
- * Document-method: Queue#num_waiting
- *
- * Returns the number of threads waiting on the queue.
- */
-
static VALUE
-rb_queue_num_waiting(VALUE self)
+szqueue_sleep_done(VALUE p)
{
- unsigned long len = queue_num_waiting(self);
- return ULONG2NUM(len);
-}
+ struct queue_waiter *qw = (struct queue_waiter *)p;
-/*
- * Document-class: SizedQueue
- *
- * This class represents queues of specified size capacity. The push operation
- * may be blocked if the capacity is full.
- *
- * See Queue for an example of how a SizedQueue works.
- */
+ ccan_list_del(&qw->w.node);
+ qw->as.sq->num_waiting_push--;
-/*
- * Document-method: SizedQueue::new
- * call-seq: new(max)
- *
- * Creates a fixed-length queue with a maximum size of +max+.
- */
+ return Qfalse;
+}
-static VALUE
-rb_szqueue_initialize(VALUE self, VALUE vmax)
+static inline VALUE
+queue_do_pop(rb_execution_context_t *ec, VALUE self, struct rb_queue *q, VALUE non_block, VALUE timeout)
{
- long max;
+ if (q->len == 0) {
+ if (RTEST(non_block)) {
+ rb_raise(rb_eThreadError, "queue empty");
+ }
- max = NUM2LONG(vmax);
- if (max <= 0) {
- rb_raise(rb_eArgError, "queue size must be positive");
+ if (RTEST(rb_equal(INT2FIX(0), timeout))) {
+ return Qnil;
+ }
}
- RSTRUCT_SET(self, QUEUE_QUE, ary_buf_new());
- RSTRUCT_SET(self, QUEUE_WAITERS, ary_buf_new());
- RSTRUCT_SET(self, SZQUEUE_WAITERS, ary_buf_new());
- RSTRUCT_SET(self, SZQUEUE_MAX, vmax);
+ rb_hrtime_t end = queue_timeout2hrtime(timeout);
+ while (q->len == 0) {
+ if (queue_closed_p(self)) {
+ return queue_closed_result(self, q);
+ }
+ else {
+ RUBY_ASSERT(q->len == 0);
+ RUBY_ASSERT(queue_closed_p(self) == 0);
+
+ struct queue_waiter queue_waiter = {
+ .w = {.self = self, .th = ec->thread_ptr, .fiber = nonblocking_fiber(ec->fiber_ptr)},
+ .as = {.q = q}
+ };
+
+ struct ccan_list_head *waitq = &q->waitq;
+
+ ccan_list_add_tail(waitq, &queue_waiter.w.node);
+ queue_waiter.as.q->num_waiting++;
+
+ struct queue_sleep_arg queue_sleep_arg = {
+ .self = self,
+ .timeout = timeout,
+ .end = end
+ };
+
+ rb_ensure(queue_sleep, (VALUE)&queue_sleep_arg, queue_sleep_done, (VALUE)&queue_waiter);
+ if (!NIL_P(timeout) && (rb_hrtime_now() >= end))
+ break;
+ }
+ }
- return self;
+ return ring_buffer_shift(q);
}
-/*
- * Document-method: SizedQueue#close
- * call-seq:
- * close(exception=false)
- *
- * Similar to Queue#close.
- *
- * The difference is behavior with waiting enqueuing threads.
- *
- * If there are waiting enqueuing threads, they are interrupted by
- * raising ClosedQueueError('queue closed').
- */
static VALUE
-rb_szqueue_close(int argc, VALUE *argv, VALUE self)
+rb_queue_pop(rb_execution_context_t *ec, VALUE self, VALUE non_block, VALUE timeout)
{
- return queue_do_close(self, argc, argv, TRUE);
+ return queue_do_pop(ec, self, queue_ptr(self), non_block, timeout);
}
-/*
- * Document-method: SizedQueue#max
- *
- * Returns the maximum size of the queue.
- */
-
-static VALUE
-rb_szqueue_max_get(VALUE self)
+static void
+queue_clear(struct rb_queue *q)
{
- return GET_SZQUEUE_MAX(self);
+ q->len = 0;
+ q->offset = 0;
}
-/*
- * Document-method: SizedQueue#max=
- * call-seq: max=(number)
- *
- * Sets the maximum size of the queue to the given +number+.
- */
-
static VALUE
-rb_szqueue_max_set(VALUE self, VALUE vmax)
+szqueue_initialize(rb_execution_context_t *ec, VALUE self, VALUE vmax)
{
- long max = NUM2LONG(vmax), diff = 0;
- VALUE t;
+ long max = NUM2LONG(vmax);
+ struct rb_szqueue *sq = raw_szqueue_ptr(self);
if (max <= 0) {
- rb_raise(rb_eArgError, "queue size must be positive");
- }
- if ((unsigned long)max > GET_SZQUEUE_ULONGMAX(self)) {
- diff = max - GET_SZQUEUE_ULONGMAX(self);
+ rb_raise(rb_eArgError, "queue size must be positive");
}
- RSTRUCT_SET(self, SZQUEUE_MAX, vmax);
- while (diff-- > 0 && !NIL_P(t = rb_ary_shift(GET_SZQUEUE_WAITERS(self)))) {
- rb_thread_wakeup_alive(t);
- }
- return vmax;
+ ring_buffer_init(&sq->q, QUEUE_INITIAL_CAPA);
+ ccan_list_head_init(szqueue_waitq(sq));
+ ccan_list_head_init(szqueue_pushq(sq));
+ sq->max = max;
+
+ return self;
}
-static int
-szqueue_push_should_block(int argc, const VALUE *argv)
+static VALUE
+rb_szqueue_push(rb_execution_context_t *ec, VALUE self, VALUE object, VALUE non_block, VALUE timeout)
{
- int should_block = 1;
- rb_check_arity(argc, 1, 2);
- if (argc > 1) {
- should_block = !RTEST(argv[1]);
- }
- return should_block;
-}
+ struct rb_szqueue *sq = szqueue_ptr(self);
-/*
- * Document-method: SizedQueue#push
- * call-seq:
- * push(object, non_block=false)
- * enq(object, non_block=false)
- * <<(object)
- *
- * Pushes +object+ to the queue.
- *
- * If there is no space left in the queue, waits until space becomes
- * available, unless +non_block+ is true. If +non_block+ is true, the
- * thread isn't suspended, and an exception is raised.
- */
+ if (sq->q.len >= sq->max) {
+ if (RTEST(non_block)) {
+ rb_raise(rb_eThreadError, "queue full");
+ }
-static VALUE
-rb_szqueue_push(int argc, VALUE *argv, VALUE self)
-{
- struct waiting_delete args;
- int should_block = szqueue_push_should_block(argc, argv);
- args.waiting = GET_SZQUEUE_WAITERS(self);
- args.th = rb_thread_current();
-
- while (queue_length(self) >= GET_SZQUEUE_ULONGMAX(self)) {
- if (!should_block) {
- rb_raise(rb_eThreadError, "queue full");
- }
- else if (queue_closed_p(self)) {
- goto closed;
- }
- else {
- rb_ary_push(args.waiting, args.th);
- rb_ensure((VALUE (*)())rb_thread_sleep_deadly, (VALUE)0, queue_delete_from_waiting, (VALUE)&args);
- }
+ if (RTEST(rb_equal(INT2FIX(0), timeout))) {
+ return Qnil;
+ }
}
- if (queue_closed_p(self)) {
- closed:
- raise_closed_queue_error(self);
+ rb_hrtime_t end = queue_timeout2hrtime(timeout);
+ while (sq->q.len >= sq->max) {
+ if (queue_closed_p(self)) {
+ raise_closed_queue_error(self);
+ }
+ else {
+ struct queue_waiter queue_waiter = {
+ .w = {.self = self, .th = ec->thread_ptr, .fiber = nonblocking_fiber(ec->fiber_ptr)},
+ .as = {.sq = sq}
+ };
+
+ struct ccan_list_head *pushq = szqueue_pushq(sq);
+
+ ccan_list_add_tail(pushq, &queue_waiter.w.node);
+ sq->num_waiting_push++;
+
+ struct queue_sleep_arg queue_sleep_arg = {
+ .self = self,
+ .timeout = timeout,
+ .end = end
+ };
+ rb_ensure(queue_sleep, (VALUE)&queue_sleep_arg, szqueue_sleep_done, (VALUE)&queue_waiter);
+ if (!NIL_P(timeout) && rb_hrtime_now() >= end) {
+ return Qnil;
+ }
+ }
}
- return queue_do_push(self, argv[0]);
+ return queue_do_push(self, &sq->q, object);
}
static VALUE
-szqueue_do_pop(VALUE self, int should_block)
+rb_szqueue_pop(rb_execution_context_t *ec, VALUE self, VALUE non_block, VALUE timeout)
{
- VALUE retval = queue_do_pop(self, should_block);
+ struct rb_szqueue *sq = szqueue_ptr(self);
+ VALUE retval = queue_do_pop(ec, self, &sq->q, non_block, timeout);
- if (queue_length(self) < GET_SZQUEUE_ULONGMAX(self)) {
- wakeup_first_thread(GET_SZQUEUE_WAITERS(self));
+ if (sq->q.len < sq->max) {
+ wakeup_one(szqueue_pushq(sq));
}
return retval;
}
-/*
- * Document-method: SizedQueue#pop
- * call-seq:
- * pop(non_block=false)
- * deq(non_block=false)
- * shift(non_block=false)
- *
- * Retrieves data from the queue.
- *
- * If the queue is empty, the calling thread is suspended until data is pushed
- * onto the queue. If +non_block+ is true, the thread isn't suspended, and an
- * exception is raised.
- */
-
-static VALUE
-rb_szqueue_pop(int argc, VALUE *argv, VALUE self)
-{
- int should_block = queue_pop_should_block(argc, argv);
- return szqueue_do_pop(self, should_block);
-}
-
-/*
- * Document-method: Queue#clear
- *
- * Removes all objects from the queue.
- */
+/* ConditionalVariable */
+struct rb_condvar {
+ struct ccan_list_head waitq;
+ rb_serial_t fork_gen;
+};
-static VALUE
-rb_szqueue_clear(VALUE self)
+static size_t
+condvar_memsize(const void *ptr)
{
- rb_ary_clear(GET_QUEUE_QUE(self));
- wakeup_all_threads(GET_SZQUEUE_WAITERS(self));
- return self;
+ return sizeof(struct rb_condvar);
}
-/*
- * Document-method: SizedQueue#num_waiting
- *
- * Returns the number of threads waiting on the queue.
- */
+static const rb_data_type_t cv_data_type = {
+ "condvar",
+ {0, RUBY_TYPED_DEFAULT_FREE, condvar_memsize,},
+ 0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED
+};
-static VALUE
-rb_szqueue_num_waiting(VALUE self)
+static struct rb_condvar *
+condvar_ptr(VALUE self)
{
- long len = queue_num_waiting(self) + szqueue_num_waiting_producer(self);
- return ULONG2NUM(len);
-}
-
-/* ConditionalVariable */
+ struct rb_condvar *cv;
+ rb_serial_t fork_gen = GET_VM()->fork_gen;
-enum {
- CONDVAR_WAITERS,
- END_CONDVAR
-};
+ TypedData_Get_Struct(self, struct rb_condvar, &cv_data_type, cv);
-#define GET_CONDVAR_WAITERS(cv) get_array((cv), CONDVAR_WAITERS)
-
-/*
- * Document-class: ConditionVariable
- *
- * ConditionVariable objects augment class Mutex. Using condition variables,
- * it is possible to suspend while in the middle of a critical section until a
- * resource becomes available.
- *
- * Example:
- *
- * require 'thread'
- *
- * mutex = Mutex.new
- * resource = ConditionVariable.new
- *
- * a = Thread.new {
- * mutex.synchronize {
- * # Thread 'a' now needs the resource
- * resource.wait(mutex)
- * # 'a' can now have the resource
- * }
- * }
- *
- * b = Thread.new {
- * mutex.synchronize {
- * # Thread 'b' has finished using the resource
- * resource.signal
- * }
- * }
- */
+ /* forked children can't reach into parent thread stacks */
+ if (cv->fork_gen != fork_gen) {
+ cv->fork_gen = fork_gen;
+ ccan_list_head_init(&cv->waitq);
+ }
-/*
- * Document-method: ConditionVariable::new
- *
- * Creates a new condition variable instance.
- */
+ return cv;
+}
static VALUE
-rb_condvar_initialize(VALUE self)
+condvar_alloc(VALUE klass)
{
- RSTRUCT_SET(self, CONDVAR_WAITERS, ary_buf_new());
- return self;
+ struct rb_condvar *cv;
+ VALUE obj;
+
+ obj = TypedData_Make_Struct(klass, struct rb_condvar, &cv_data_type, cv);
+ ccan_list_head_init(&cv->waitq);
+
+ return obj;
}
struct sleep_call {
+ rb_execution_context_t *ec;
VALUE mutex;
VALUE timeout;
};
@@ -1171,167 +1209,73 @@ static VALUE
do_sleep(VALUE args)
{
struct sleep_call *p = (struct sleep_call *)args;
- return rb_funcall2(p->mutex, id_sleep, 1, &p->timeout);
+ if (CLASS_OF(p->mutex) == rb_cMutex) {
+ return rb_mut_sleep(p->ec, p->mutex, p->timeout);
+ }
+ else {
+ return rb_funcallv(p->mutex, id_sleep, 1, &p->timeout);
+ }
}
static VALUE
-delete_current_thread(VALUE ary)
+rb_condvar_wait(rb_execution_context_t *ec, VALUE self, VALUE mutex, VALUE timeout)
{
- return rb_ary_delete(ary, rb_thread_current());
-}
+ struct rb_condvar *cv = condvar_ptr(self);
+ struct sleep_call args = {
+ .ec = ec,
+ .mutex = mutex,
+ .timeout = timeout,
+ };
-/*
- * Document-method: ConditionVariable#wait
- * call-seq: wait(mutex, timeout=nil)
- *
- * Releases the lock held in +mutex+ and waits; reacquires the lock on wakeup.
- *
- * If +timeout+ is given, this method returns after +timeout+ seconds passed,
- * even if no other thread doesn't signal.
- */
-
-static VALUE
-rb_condvar_wait(int argc, VALUE *argv, VALUE self)
-{
- VALUE waiters = GET_CONDVAR_WAITERS(self);
- VALUE mutex, timeout;
- struct sleep_call args;
+ struct sync_waiter sync_waiter = {
+ .self = mutex,
+ .th = ec->thread_ptr,
+ .fiber = nonblocking_fiber(ec->fiber_ptr)
+ };
- rb_scan_args(argc, argv, "11", &mutex, &timeout);
-
- args.mutex = mutex;
- args.timeout = timeout;
- rb_ary_push(waiters, rb_thread_current());
- rb_ensure(do_sleep, (VALUE)&args, delete_current_thread, waiters);
-
- return self;
+ ccan_list_add_tail(&cv->waitq, &sync_waiter.node);
+ return rb_ec_ensure(ec, do_sleep, (VALUE)&args, delete_from_waitq, (VALUE)&sync_waiter);
}
-/*
- * Document-method: ConditionVariable#signal
- *
- * Wakes up the first thread in line waiting for this lock.
- */
-
static VALUE
-rb_condvar_signal(VALUE self)
+rb_condvar_signal(rb_execution_context_t *ec, VALUE self)
{
- wakeup_first_thread(GET_CONDVAR_WAITERS(self));
+ struct rb_condvar *cv = condvar_ptr(self);
+ wakeup_one(&cv->waitq);
return self;
}
-/*
- * Document-method: ConditionVariable#broadcast
- *
- * Wakes up all threads waiting for this lock.
- */
-
static VALUE
-rb_condvar_broadcast(VALUE self)
+rb_condvar_broadcast(rb_execution_context_t *ec, VALUE self)
{
- wakeup_all_threads(GET_CONDVAR_WAITERS(self));
+ struct rb_condvar *cv = condvar_ptr(self);
+ wakeup_all(&cv->waitq);
return self;
}
-/* :nodoc: */
-static VALUE
-undumpable(VALUE obj)
-{
- rb_raise(rb_eTypeError, "can't dump %"PRIsVALUE, rb_obj_class(obj));
- UNREACHABLE;
-}
-
static void
Init_thread_sync(void)
{
-#if 0
- rb_cConditionVariable = rb_define_class("ConditionVariable", rb_cObject); /* teach rdoc ConditionVariable */
- rb_cQueue = rb_define_class("Queue", rb_cObject); /* teach rdoc Queue */
- rb_cSizedQueue = rb_define_class("SizedQueue", rb_cObject); /* teach rdoc SizedQueue */
-#endif
-
/* Mutex */
- rb_cMutex = rb_define_class_under(rb_cThread, "Mutex", rb_cObject);
+ rb_cMutex = rb_define_class_id_under(rb_cThread, rb_intern("Mutex"), rb_cObject);
rb_define_alloc_func(rb_cMutex, mutex_alloc);
- rb_define_method(rb_cMutex, "initialize", mutex_initialize, 0);
- rb_define_method(rb_cMutex, "locked?", rb_mutex_locked_p, 0);
- rb_define_method(rb_cMutex, "try_lock", rb_mutex_trylock, 0);
- rb_define_method(rb_cMutex, "lock", rb_mutex_lock, 0);
- rb_define_method(rb_cMutex, "unlock", rb_mutex_unlock, 0);
- rb_define_method(rb_cMutex, "sleep", mutex_sleep, -1);
- rb_define_method(rb_cMutex, "synchronize", rb_mutex_synchronize_m, 0);
- rb_define_method(rb_cMutex, "owned?", rb_mutex_owned_p, 0);
/* Queue */
- rb_cQueue = rb_struct_define_without_accessor_under(
- rb_cThread,
- "Queue", rb_cObject, rb_struct_alloc_noinit,
- "que", "waiters", NULL);
+ VALUE rb_cQueue = rb_define_class_id_under_no_pin(rb_cThread, rb_intern("Queue"), rb_cObject);
+ rb_define_alloc_func(rb_cQueue, queue_alloc);
rb_eClosedQueueError = rb_define_class("ClosedQueueError", rb_eStopIteration);
- rb_define_method(rb_cQueue, "initialize", rb_queue_initialize, 0);
- rb_undef_method(rb_cQueue, "initialize_copy");
- rb_define_method(rb_cQueue, "marshal_dump", undumpable, 0);
- rb_define_method(rb_cQueue, "close", rb_queue_close, -1);
- rb_define_method(rb_cQueue, "closed?", rb_queue_closed_p, 0);
- rb_define_method(rb_cQueue, "push", rb_queue_push, 1);
- rb_define_method(rb_cQueue, "pop", rb_queue_pop, -1);
- rb_define_method(rb_cQueue, "empty?", rb_queue_empty_p, 0);
- rb_define_method(rb_cQueue, "clear", rb_queue_clear, 0);
- rb_define_method(rb_cQueue, "length", rb_queue_length, 0);
- rb_define_method(rb_cQueue, "num_waiting", rb_queue_num_waiting, 0);
-
- rb_define_alias(rb_cQueue, "enq", "push"); /* Alias for #push. */
- rb_define_alias(rb_cQueue, "<<", "push"); /* Alias for #push. */
- rb_define_alias(rb_cQueue, "deq", "pop"); /* Alias for #pop. */
- rb_define_alias(rb_cQueue, "shift", "pop"); /* Alias for #pop. */
- rb_define_alias(rb_cQueue, "size", "length"); /* Alias for #length. */
-
- rb_cSizedQueue = rb_struct_define_without_accessor_under(
- rb_cThread,
- "SizedQueue", rb_cQueue, rb_struct_alloc_noinit,
- "que", "waiters", "queue_waiters", "size", NULL);
-
- rb_define_method(rb_cSizedQueue, "initialize", rb_szqueue_initialize, 1);
- rb_define_method(rb_cSizedQueue, "close", rb_szqueue_close, -1);
- rb_define_method(rb_cSizedQueue, "max", rb_szqueue_max_get, 0);
- rb_define_method(rb_cSizedQueue, "max=", rb_szqueue_max_set, 1);
- rb_define_method(rb_cSizedQueue, "push", rb_szqueue_push, -1);
- rb_define_method(rb_cSizedQueue, "pop", rb_szqueue_pop, -1);
- rb_define_method(rb_cSizedQueue, "clear", rb_szqueue_clear, 0);
- rb_define_method(rb_cSizedQueue, "num_waiting", rb_szqueue_num_waiting, 0);
-
- rb_define_alias(rb_cSizedQueue, "enq", "push"); /* Alias for #push. */
- rb_define_alias(rb_cSizedQueue, "<<", "push"); /* Alias for #push. */
- rb_define_alias(rb_cSizedQueue, "deq", "pop"); /* Alias for #pop. */
- rb_define_alias(rb_cSizedQueue, "shift", "pop"); /* Alias for #pop. */
+ VALUE rb_cSizedQueue = rb_define_class_id_under_no_pin(rb_cThread, rb_intern("SizedQueue"), rb_cQueue);
+ rb_define_alloc_func(rb_cSizedQueue, szqueue_alloc);
/* CVar */
- rb_cConditionVariable = rb_struct_define_without_accessor_under(
- rb_cThread,
- "ConditionVariable", rb_cObject, rb_struct_alloc_noinit,
- "waiters", NULL);
+ VALUE rb_cConditionVariable = rb_define_class_id_under_no_pin(rb_cThread, rb_intern("ConditionVariable"), rb_cObject);
+ rb_define_alloc_func(rb_cConditionVariable, condvar_alloc);
id_sleep = rb_intern("sleep");
- rb_define_method(rb_cConditionVariable, "initialize", rb_condvar_initialize, 0);
- rb_undef_method(rb_cConditionVariable, "initialize_copy");
- rb_define_method(rb_cConditionVariable, "marshal_dump", undumpable, 0);
- rb_define_method(rb_cConditionVariable, "wait", rb_condvar_wait, -1);
- rb_define_method(rb_cConditionVariable, "signal", rb_condvar_signal, 0);
- rb_define_method(rb_cConditionVariable, "broadcast", rb_condvar_broadcast, 0);
-
-#define ALIAS_GLOBAL_CONST(name) do { \
- ID id = rb_intern_const(#name); \
- if (!rb_const_defined_at(rb_cObject, id)) { \
- rb_const_set(rb_cObject, id, rb_c##name); \
- } \
- } while (0)
-
- ALIAS_GLOBAL_CONST(Mutex);
- ALIAS_GLOBAL_CONST(Queue);
- ALIAS_GLOBAL_CONST(SizedQueue);
- ALIAS_GLOBAL_CONST(ConditionVariable);
rb_provide("thread.rb");
}
+
+#include "thread_sync.rbinc"