diff options
Diffstat (limited to 'thread_sync.c')
-rw-r--r-- | thread_sync.c | 573 |
1 files changed, 307 insertions, 266 deletions
diff --git a/thread_sync.c b/thread_sync.c index 98d95ff009..ae69cb4a6e 100644 --- a/thread_sync.c +++ b/thread_sync.c @@ -1,5 +1,6 @@ /* included by thread.c */ #include "ccan/list/list.h" +#include "builtin.h" static VALUE rb_cMutex, rb_cQueue, rb_cSizedQueue, rb_cConditionVariable; static VALUE rb_eClosedQueueError; @@ -8,7 +9,7 @@ static VALUE rb_eClosedQueueError; typedef struct rb_mutex_struct { rb_fiber_t *fiber; struct rb_mutex_struct *next_mutex; - struct list_head waitq; /* protected by GVL */ + struct ccan_list_head waitq; /* protected by GVL */ } rb_mutex_t; /* sync_waiter is always on-stack */ @@ -16,25 +17,43 @@ struct sync_waiter { VALUE self; rb_thread_t *th; rb_fiber_t *fiber; - struct list_node node; + struct ccan_list_node node; +}; + +static inline rb_fiber_t* +nonblocking_fiber(rb_fiber_t *fiber) +{ + if (rb_fiberptr_blocking(fiber)) { + return NULL; + } + + return fiber; +} + +struct queue_sleep_arg { + VALUE self; + VALUE timeout; + rb_hrtime_t end; }; #define MUTEX_ALLOW_TRAP FL_USER1 static void -sync_wakeup(struct list_head *head, long max) +sync_wakeup(struct ccan_list_head *head, long max) { + RUBY_DEBUG_LOG("max:%ld", max); + struct sync_waiter *cur = 0, *next; - list_for_each_safe(head, cur, next, node) { - list_del_init(&cur->node); + ccan_list_for_each_safe(head, cur, next, node) { + ccan_list_del_init(&cur->node); if (cur->th->status != THREAD_KILLED) { - - if (cur->th->scheduler != Qnil && rb_fiberptr_blocking(cur->fiber) == 0) { + if (cur->th->scheduler != Qnil && cur->fiber) { rb_fiber_scheduler_unblock(cur->th->scheduler, cur->self, rb_fiberptr_self(cur->fiber)); } else { + RUBY_DEBUG_LOG("target_th:%u", rb_th_serial(cur->th)); rb_threadptr_interrupt(cur->th); cur->th->status = THREAD_RUNNABLE; } @@ -45,13 +64,13 @@ sync_wakeup(struct list_head *head, long max) } static void -wakeup_one(struct list_head *head) +wakeup_one(struct ccan_list_head *head) { sync_wakeup(head, 1); } static void -wakeup_all(struct list_head *head) +wakeup_all(struct ccan_list_head *head) { sync_wakeup(head, LONG_MAX); } @@ -95,8 +114,8 @@ rb_mutex_num_waiting(rb_mutex_t *mutex) struct sync_waiter *w = 0; size_t n = 0; - list_for_each(&mutex->waitq, w, node) { - n++; + ccan_list_for_each(&mutex->waitq, w, node) { + n++; } return n; @@ -109,9 +128,9 @@ mutex_free(void *ptr) { rb_mutex_t *mutex = ptr; if (mutex->fiber) { - /* rb_warn("free locked mutex"); */ - const char *err = rb_mutex_unlock_th(mutex, rb_fiber_threadptr(mutex->fiber), mutex->fiber); - if (err) rb_bug("%s", err); + /* rb_warn("free locked mutex"); */ + const char *err = rb_mutex_unlock_th(mutex, rb_fiber_threadptr(mutex->fiber), mutex->fiber); + if (err) rb_bug("%s", err); } ruby_xfree(ptr); } @@ -152,7 +171,7 @@ mutex_alloc(VALUE klass) obj = TypedData_Make_Struct(klass, rb_mutex_t, &mutex_data_type, mutex); - list_head_init(&mutex->waitq); + ccan_list_head_init(&mutex->waitq); return obj; } @@ -235,24 +254,21 @@ rb_mutex_trylock(VALUE self) rb_mutex_t *mutex = mutex_ptr(self); if (mutex->fiber == 0) { - rb_fiber_t *fiber = GET_EC()->fiber_ptr; - rb_thread_t *th = GET_THREAD(); - mutex->fiber = fiber; + RUBY_DEBUG_LOG("%p ok", mutex); - mutex_locked(th, self); - return Qtrue; - } + rb_fiber_t *fiber = GET_EC()->fiber_ptr; + rb_thread_t *th = GET_THREAD(); + mutex->fiber = fiber; - return Qfalse; + mutex_locked(th, self); + return Qtrue; + } + else { + RUBY_DEBUG_LOG("%p ng", mutex); + return Qfalse; + } } -/* - * At maximum, only one thread can use cond_timedwait and watch deadlock - * periodically. Multiple polling thread (i.e. concurrent deadlock check) - * introduces new race conditions. [Bug #6278] [ruby-core:44275] - */ -static const rb_thread_t *patrol_thread = NULL; - static VALUE mutex_owned_p(rb_fiber_t *fiber, rb_mutex_t *mutex) { @@ -269,11 +285,13 @@ static VALUE delete_from_waitq(VALUE value) { struct sync_waiter *sync_waiter = (void *)value; - list_del(&sync_waiter->node); + ccan_list_del(&sync_waiter->node); return Qnil; } +static inline rb_atomic_t threadptr_get_interrupts(rb_thread_t *th); + static VALUE do_mutex_lock(VALUE self, int interruptible_p) { @@ -281,11 +299,12 @@ do_mutex_lock(VALUE self, int interruptible_p) rb_thread_t *th = ec->thread_ptr; rb_fiber_t *fiber = ec->fiber_ptr; rb_mutex_t *mutex = mutex_ptr(self); + rb_atomic_t saved_ints = 0; /* When running trap handler */ if (!FL_TEST_RAW(self, MUTEX_ALLOW_TRAP) && - th->ec->interrupt_mask & TRAP_INTERRUPT_MASK) { - rb_raise(rb_eThreadError, "can't be called from trap context"); + th->ec->interrupt_mask & TRAP_INTERRUPT_MASK) { + rb_raise(rb_eThreadError, "can't be called from trap context"); } if (rb_mutex_trylock(self) == Qfalse) { @@ -294,15 +313,17 @@ do_mutex_lock(VALUE self, int interruptible_p) } while (mutex->fiber != fiber) { + VM_ASSERT(mutex->fiber != NULL); + VALUE scheduler = rb_fiber_scheduler_current(); if (scheduler != Qnil) { struct sync_waiter sync_waiter = { .self = self, .th = th, - .fiber = fiber + .fiber = nonblocking_fiber(fiber) }; - list_add_tail(&mutex->waitq, &sync_waiter.node); + ccan_list_add_tail(&mutex->waitq, &sync_waiter.node); rb_ensure(call_rb_fiber_scheduler_block, self, delete_from_waitq, (VALUE)&sync_waiter); @@ -311,51 +332,51 @@ do_mutex_lock(VALUE self, int interruptible_p) } } else { - enum rb_thread_status prev_status = th->status; - rb_hrtime_t *timeout = 0; - rb_hrtime_t rel = rb_msec2hrtime(100); - - th->status = THREAD_STOPPED_FOREVER; - th->locking_mutex = self; - rb_ractor_sleeper_threads_inc(th->ractor); - /* - * Carefully! while some contended threads are in native_sleep(), - * ractor->sleeper is unstable value. we have to avoid both deadlock - * and busy loop. - */ - if ((rb_ractor_living_thread_num(th->ractor) == rb_ractor_sleeper_thread_num(th->ractor)) && - !patrol_thread) { - timeout = &rel; - patrol_thread = th; + if (!th->vm->thread_ignore_deadlock && rb_fiber_threadptr(mutex->fiber) == th) { + rb_raise(rb_eThreadError, "deadlock; lock already owned by another fiber belonging to the same thread"); } struct sync_waiter sync_waiter = { .self = self, .th = th, - .fiber = fiber + .fiber = nonblocking_fiber(fiber), }; - list_add_tail(&mutex->waitq, &sync_waiter.node); + RUBY_DEBUG_LOG("%p wait", mutex); + + // similar code with `sleep_forever`, but + // sleep_forever(SLEEP_DEADLOCKABLE) raises an exception. + // Ensure clause is needed like but `rb_ensure` a bit slow. + // + // begin + // sleep_forever(th, SLEEP_DEADLOCKABLE); + // ensure + // ccan_list_del(&sync_waiter.node); + // end + enum rb_thread_status prev_status = th->status; + th->status = THREAD_STOPPED_FOREVER; + rb_ractor_sleeper_threads_inc(th->ractor); + rb_check_deadlock(th->ractor); - native_sleep(th, timeout); /* release GVL */ + th->locking_mutex = self; - list_del(&sync_waiter.node); + ccan_list_add_tail(&mutex->waitq, &sync_waiter.node); + { + native_sleep(th, NULL); + } + ccan_list_del(&sync_waiter.node); + // unlocked by another thread while sleeping if (!mutex->fiber) { mutex->fiber = fiber; } - if (patrol_thread == th) - patrol_thread = NULL; - - th->locking_mutex = Qfalse; - if (mutex->fiber && timeout && !RUBY_VM_INTERRUPTED(th->ec)) { - rb_check_deadlock(th->ractor); - } - if (th->status == THREAD_STOPPED_FOREVER) { - th->status = prev_status; - } rb_ractor_sleeper_threads_dec(th->ractor); + th->status = prev_status; + th->locking_mutex = Qfalse; + th->locking_mutex = Qfalse; + + RUBY_DEBUG_LOG("%p wakeup", mutex); } if (interruptible_p) { @@ -367,11 +388,27 @@ do_mutex_lock(VALUE self, int interruptible_p) mutex->fiber = fiber; } } + else { + // clear interrupt information + if (RUBY_VM_INTERRUPTED(th->ec)) { + // reset interrupts + if (saved_ints == 0) { + saved_ints = threadptr_get_interrupts(th); + } + else { + // ignore additional interrupts + threadptr_get_interrupts(th); + } + } + } } + if (saved_ints) th->ec->interrupt_flag = saved_ints; if (mutex->fiber == fiber) mutex_locked(th, self); } + RUBY_DEBUG_LOG("%p locked", mutex); + // assertion if (mutex_owned_p(fiber, mutex) == Qfalse) rb_bug("do_mutex_lock: mutex is not owned."); @@ -415,46 +452,46 @@ rb_mutex_owned_p(VALUE self) static const char * rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t *th, rb_fiber_t *fiber) { - const char *err = NULL; + RUBY_DEBUG_LOG("%p", mutex); if (mutex->fiber == 0) { - err = "Attempt to unlock a mutex which is not locked"; + return "Attempt to unlock a mutex which is not locked"; } else if (mutex->fiber != fiber) { - err = "Attempt to unlock a mutex which is locked by another thread/fiber"; + return "Attempt to unlock a mutex which is locked by another thread/fiber"; } - else { - struct sync_waiter *cur = 0, *next; - mutex->fiber = 0; - list_for_each_safe(&mutex->waitq, cur, next, node) { - list_del_init(&cur->node); + struct sync_waiter *cur = 0, *next; - if (cur->th->scheduler != Qnil && rb_fiberptr_blocking(cur->fiber) == 0) { - rb_fiber_scheduler_unblock(cur->th->scheduler, cur->self, rb_fiberptr_self(cur->fiber)); - goto found; - } - else { - switch (cur->th->status) { - case THREAD_RUNNABLE: /* from someone else calling Thread#run */ - case THREAD_STOPPED_FOREVER: /* likely (rb_mutex_lock) */ - rb_threadptr_interrupt(cur->th); - goto found; - case THREAD_STOPPED: /* probably impossible */ - rb_bug("unexpected THREAD_STOPPED"); - case THREAD_KILLED: - /* not sure about this, possible in exit GC? */ - rb_bug("unexpected THREAD_KILLED"); - continue; - } + mutex->fiber = 0; + thread_mutex_remove(th, mutex); + + ccan_list_for_each_safe(&mutex->waitq, cur, next, node) { + ccan_list_del_init(&cur->node); + + if (cur->th->scheduler != Qnil && cur->fiber) { + rb_fiber_scheduler_unblock(cur->th->scheduler, cur->self, rb_fiberptr_self(cur->fiber)); + return NULL; + } + else { + switch (cur->th->status) { + case THREAD_RUNNABLE: /* from someone else calling Thread#run */ + case THREAD_STOPPED_FOREVER: /* likely (rb_mutex_lock) */ + RUBY_DEBUG_LOG("wakeup th:%u", rb_th_serial(cur->th)); + rb_threadptr_interrupt(cur->th); + return NULL; + case THREAD_STOPPED: /* probably impossible */ + rb_bug("unexpected THREAD_STOPPED"); + case THREAD_KILLED: + /* not sure about this, possible in exit GC? */ + rb_bug("unexpected THREAD_KILLED"); + continue; } } - - found: - thread_mutex_remove(th, mutex); } - return err; + // We did not find any threads to wake up, so we can just return with no error: + return NULL; } /* @@ -491,7 +528,7 @@ rb_mutex_abandon_locking_mutex(rb_thread_t *th) if (th->locking_mutex) { rb_mutex_t *mutex = mutex_ptr(th->locking_mutex); - list_head_init(&mutex->waitq); + ccan_list_head_init(&mutex->waitq); th->locking_mutex = Qfalse; } } @@ -502,11 +539,11 @@ rb_mutex_abandon_all(rb_mutex_t *mutexes) rb_mutex_t *mutex; while (mutexes) { - mutex = mutexes; - mutexes = mutex->next_mutex; - mutex->fiber = 0; - mutex->next_mutex = 0; - list_head_init(&mutex->waitq); + mutex = mutexes; + mutexes = mutex->next_mutex; + mutex->fiber = 0; + mutex->next_mutex = 0; + ccan_list_head_init(&mutex->waitq); } } #endif @@ -514,7 +551,7 @@ rb_mutex_abandon_all(rb_mutex_t *mutexes) static VALUE rb_mutex_sleep_forever(VALUE self) { - rb_thread_sleep_deadly_allow_spurious_wakeup(self); + rb_thread_sleep_deadly_allow_spurious_wakeup(self, Qnil, 0); return Qnil; } @@ -611,40 +648,45 @@ static VALUE rb_mutex_synchronize_m(VALUE self) { if (!rb_block_given_p()) { - rb_raise(rb_eThreadError, "must be called with a block"); + rb_raise(rb_eThreadError, "must be called with a block"); } return rb_mutex_synchronize(self, rb_yield, Qundef); } -void rb_mutex_allow_trap(VALUE self, int val) +void +rb_mutex_allow_trap(VALUE self, int val) { Check_TypedStruct(self, &mutex_data_type); if (val) - FL_SET_RAW(self, MUTEX_ALLOW_TRAP); + FL_SET_RAW(self, MUTEX_ALLOW_TRAP); else - FL_UNSET_RAW(self, MUTEX_ALLOW_TRAP); + FL_UNSET_RAW(self, MUTEX_ALLOW_TRAP); } /* Queue */ #define queue_waitq(q) UNALIGNED_MEMBER_PTR(q, waitq) -PACKED_STRUCT_UNALIGNED(struct rb_queue { - struct list_head waitq; +#define queue_list(q) UNALIGNED_MEMBER_PTR(q, que) +RBIMPL_ATTR_PACKED_STRUCT_UNALIGNED_BEGIN() +struct rb_queue { + struct ccan_list_head waitq; rb_serial_t fork_gen; const VALUE que; int num_waiting; -}); +} RBIMPL_ATTR_PACKED_STRUCT_UNALIGNED_END(); #define szqueue_waitq(sq) UNALIGNED_MEMBER_PTR(sq, q.waitq) +#define szqueue_list(sq) UNALIGNED_MEMBER_PTR(sq, q.que) #define szqueue_pushq(sq) UNALIGNED_MEMBER_PTR(sq, pushq) -PACKED_STRUCT_UNALIGNED(struct rb_szqueue { +RBIMPL_ATTR_PACKED_STRUCT_UNALIGNED_BEGIN() +struct rb_szqueue { struct rb_queue q; int num_waiting_push; - struct list_head pushq; + struct ccan_list_head pushq; long max; -}); +} RBIMPL_ATTR_PACKED_STRUCT_UNALIGNED_END(); static void queue_mark(void *ptr) @@ -674,7 +716,7 @@ queue_alloc(VALUE klass) struct rb_queue *q; obj = TypedData_Make_Struct(klass, struct rb_queue, &queue_data_type, q); - list_head_init(queue_waitq(q)); + ccan_list_head_init(queue_waitq(q)); return obj; } @@ -688,7 +730,7 @@ queue_fork_check(struct rb_queue *q) } /* forked children can't reach into parent thread stacks */ q->fork_gen = fork_gen; - list_head_init(queue_waitq(q)); + ccan_list_head_init(queue_waitq(q)); q->num_waiting = 0; return 1; } @@ -706,6 +748,22 @@ queue_ptr(VALUE obj) #define QUEUE_CLOSED FL_USER5 +static rb_hrtime_t +queue_timeout2hrtime(VALUE timeout) +{ + if (NIL_P(timeout)) { + return (rb_hrtime_t)0; + } + rb_hrtime_t rel = 0; + if (FIXNUM_P(timeout)) { + rel = rb_sec2hrtime(NUM2TIMET(timeout)); + } + else { + double2hrtime(&rel, rb_num2dbl(timeout)); + } + return rb_hrtime_add(rel, rb_hrtime_now()); +} + static void szqueue_mark(void *ptr) { @@ -731,9 +789,9 @@ szqueue_alloc(VALUE klass) { struct rb_szqueue *sq; VALUE obj = TypedData_Make_Struct(klass, struct rb_szqueue, - &szqueue_data_type, sq); - list_head_init(szqueue_waitq(sq)); - list_head_init(szqueue_pushq(sq)); + &szqueue_data_type, sq); + ccan_list_head_init(szqueue_waitq(sq)); + ccan_list_head_init(szqueue_pushq(sq)); return obj; } @@ -744,7 +802,7 @@ szqueue_ptr(VALUE obj) TypedData_Get_Struct(obj, struct rb_szqueue, &szqueue_data_type, sq); if (queue_fork_check(&sq->q)) { - list_head_init(szqueue_pushq(sq)); + ccan_list_head_init(szqueue_pushq(sq)); sq->num_waiting_push = 0; } @@ -754,14 +812,14 @@ szqueue_ptr(VALUE obj) static VALUE ary_buf_new(void) { - return rb_ary_tmp_new(1); + return rb_ary_hidden_new(1); } static VALUE check_array(VALUE obj, VALUE ary) { if (!RB_TYPE_P(ary, T_ARRAY)) { - rb_raise(rb_eTypeError, "%+"PRIsVALUE" not initialized", obj); + rb_raise(rb_eTypeError, "%+"PRIsVALUE" not initialized", obj); } return ary; } @@ -796,7 +854,7 @@ raise_closed_queue_error(VALUE self) static VALUE queue_closed_result(VALUE self, struct rb_queue *q) { - assert(queue_length(self, q) == 0); + RUBY_ASSERT(queue_length(self, q) == 0); return Qnil; } @@ -808,8 +866,8 @@ queue_closed_result(VALUE self, struct rb_queue *q) * information must be exchanged safely between multiple threads. The * Thread::Queue class implements all the required locking semantics. * - * The class implements FIFO type of queue. In a FIFO queue, the first - * tasks added are the first retrieved. + * The class implements FIFO (first in, first out) type of queue. + * In a FIFO queue, the first tasks added are the first retrieved. * * Example: * @@ -817,17 +875,17 @@ queue_closed_result(VALUE self, struct rb_queue *q) * * producer = Thread.new do * 5.times do |i| - * sleep rand(i) # simulate expense - * queue << i - * puts "#{i} produced" + * sleep rand(i) # simulate expense + * queue << i + * puts "#{i} produced" * end * end * * consumer = Thread.new do * 5.times do |i| - * value = queue.pop - * sleep rand(i/2) # simulate expense - * puts "consumed #{value}" + * value = queue.pop + * sleep rand(i/2) # simulate expense + * puts "consumed #{value}" * end * end * @@ -838,14 +896,26 @@ queue_closed_result(VALUE self, struct rb_queue *q) /* * Document-method: Queue::new * - * Creates a new queue instance, optionally using the contents of an Enumerable + * call-seq: + * Thread::Queue.new -> empty_queue + * Thread::Queue.new(enumerable) -> queue + * + * Creates a new queue instance, optionally using the contents of an +enumerable+ * for its initial state. * - * Example: + * Example: * * q = Thread::Queue.new - * q = Thread::Queue.new([a, b, c]) - * q = Thread::Queue.new(items) + * #=> #<Thread::Queue:0x00007ff7501110d0> + * q.empty? + * #=> true + * + * q = Thread::Queue.new([1, 2, 3]) + * #=> #<Thread::Queue:0x00007ff7500ec500> + * q.empty? + * #=> false + * q.pop + * #=> 1 */ static VALUE @@ -856,8 +926,8 @@ rb_queue_initialize(int argc, VALUE *argv, VALUE self) if ((argc = rb_scan_args(argc, argv, "01", &initial)) == 1) { initial = rb_to_array(initial); } - RB_OBJ_WRITE(self, &q->que, ary_buf_new()); - list_head_init(queue_waitq(q)); + RB_OBJ_WRITE(self, queue_list(q), ary_buf_new()); + ccan_list_head_init(queue_waitq(q)); if (argc == 1) { rb_ary_concat(q->que, initial); } @@ -868,7 +938,7 @@ static VALUE queue_do_push(VALUE self, struct rb_queue *q, VALUE obj) { if (queue_closed_p(self)) { - raise_closed_queue_error(self); + raise_closed_queue_error(self); } rb_ary_push(check_array(self, q->que), obj); wakeup_one(queue_waitq(q)); @@ -914,9 +984,9 @@ rb_queue_close(VALUE self) struct rb_queue *q = queue_ptr(self); if (!queue_closed_p(self)) { - FL_SET(self, QUEUE_CLOSED); + FL_SET(self, QUEUE_CLOSED); - wakeup_all(queue_waitq(q)); + wakeup_all(queue_waitq(q)); } return self; @@ -952,17 +1022,18 @@ rb_queue_push(VALUE self, VALUE obj) } static VALUE -queue_sleep(VALUE self) +queue_sleep(VALUE _args) { - rb_thread_sleep_deadly_allow_spurious_wakeup(self); + struct queue_sleep_arg *args = (struct queue_sleep_arg *)_args; + rb_thread_sleep_deadly_allow_spurious_wakeup(args->self, args->timeout, args->end); return Qnil; } struct queue_waiter { struct sync_waiter w; union { - struct rb_queue *q; - struct rb_szqueue *sq; + struct rb_queue *q; + struct rb_szqueue *sq; } as; }; @@ -971,7 +1042,7 @@ queue_sleep_done(VALUE p) { struct queue_waiter *qw = (struct queue_waiter *)p; - list_del(&qw->w.node); + ccan_list_del(&qw->w.node); qw->as.q->num_waiting--; return Qfalse; @@ -982,75 +1053,66 @@ szqueue_sleep_done(VALUE p) { struct queue_waiter *qw = (struct queue_waiter *)p; - list_del(&qw->w.node); + ccan_list_del(&qw->w.node); qw->as.sq->num_waiting_push--; return Qfalse; } static VALUE -queue_do_pop(VALUE self, struct rb_queue *q, int should_block) +queue_do_pop(VALUE self, struct rb_queue *q, int should_block, VALUE timeout) { check_array(self, q->que); - - while (RARRAY_LEN(q->que) == 0) { + if (RARRAY_LEN(q->que) == 0) { if (!should_block) { rb_raise(rb_eThreadError, "queue empty"); } - else if (queue_closed_p(self)) { + + if (RTEST(rb_equal(INT2FIX(0), timeout))) { + return Qnil; + } + } + + rb_hrtime_t end = queue_timeout2hrtime(timeout); + while (RARRAY_LEN(q->que) == 0) { + if (queue_closed_p(self)) { return queue_closed_result(self, q); } else { rb_execution_context_t *ec = GET_EC(); - assert(RARRAY_LEN(q->que) == 0); - assert(queue_closed_p(self) == 0); + RUBY_ASSERT(RARRAY_LEN(q->que) == 0); + RUBY_ASSERT(queue_closed_p(self) == 0); struct queue_waiter queue_waiter = { - .w = {.self = self, .th = ec->thread_ptr, .fiber = ec->fiber_ptr}, + .w = {.self = self, .th = ec->thread_ptr, .fiber = nonblocking_fiber(ec->fiber_ptr)}, .as = {.q = q} }; - list_add_tail(queue_waitq(queue_waiter.as.q), &queue_waiter.w.node); + struct ccan_list_head *waitq = queue_waitq(q); + + ccan_list_add_tail(waitq, &queue_waiter.w.node); queue_waiter.as.q->num_waiting++; - rb_ensure(queue_sleep, self, queue_sleep_done, (VALUE)&queue_waiter); + struct queue_sleep_arg queue_sleep_arg = { + .self = self, + .timeout = timeout, + .end = end + }; + + rb_ensure(queue_sleep, (VALUE)&queue_sleep_arg, queue_sleep_done, (VALUE)&queue_waiter); + if (!NIL_P(timeout) && (rb_hrtime_now() >= end)) + break; } } return rb_ary_shift(q->que); } -static int -queue_pop_should_block(int argc, const VALUE *argv) -{ - int should_block = 1; - rb_check_arity(argc, 0, 1); - if (argc > 0) { - should_block = !RTEST(argv[0]); - } - return should_block; -} - -/* - * Document-method: Thread::Queue#pop - * call-seq: - * pop(non_block=false) - * deq(non_block=false) - * shift(non_block=false) - * - * Retrieves data from the queue. - * - * If the queue is empty, the calling thread is suspended until data is pushed - * onto the queue. If +non_block+ is true, the thread isn't suspended, and - * +ThreadError+ is raised. - */ - static VALUE -rb_queue_pop(int argc, VALUE *argv, VALUE self) +rb_queue_pop(rb_execution_context_t *ec, VALUE self, VALUE non_block, VALUE timeout) { - int should_block = queue_pop_should_block(argc, argv); - return queue_do_pop(self, queue_ptr(self), should_block); + return queue_do_pop(self, queue_ptr(self), !RTEST(non_block), timeout); } /* @@ -1096,6 +1158,22 @@ rb_queue_length(VALUE self) return LONG2NUM(queue_length(self, queue_ptr(self))); } +NORETURN(static VALUE rb_queue_freeze(VALUE self)); +/* + * call-seq: + * freeze + * + * The queue can't be frozen, so this method raises an exception: + * Thread::Queue.new.freeze # Raises TypeError (cannot freeze #<Thread::Queue:0x...>) + * + */ +static VALUE +rb_queue_freeze(VALUE self) +{ + rb_raise(rb_eTypeError, "cannot freeze " "%+"PRIsVALUE, self); + UNREACHABLE_RETURN(self); +} + /* * Document-method: Thread::Queue#num_waiting * @@ -1134,12 +1212,12 @@ rb_szqueue_initialize(VALUE self, VALUE vmax) max = NUM2LONG(vmax); if (max <= 0) { - rb_raise(rb_eArgError, "queue size must be positive"); + rb_raise(rb_eArgError, "queue size must be positive"); } - RB_OBJ_WRITE(self, &sq->q.que, ary_buf_new()); - list_head_init(szqueue_waitq(sq)); - list_head_init(szqueue_pushq(sq)); + RB_OBJ_WRITE(self, szqueue_list(sq), ary_buf_new()); + ccan_list_head_init(szqueue_waitq(sq)); + ccan_list_head_init(szqueue_pushq(sq)); sq->max = max; return self; @@ -1161,11 +1239,11 @@ static VALUE rb_szqueue_close(VALUE self) { if (!queue_closed_p(self)) { - struct rb_szqueue *sq = szqueue_ptr(self); + struct rb_szqueue *sq = szqueue_ptr(self); - FL_SET(self, QUEUE_CLOSED); - wakeup_all(szqueue_waitq(sq)); - wakeup_all(szqueue_pushq(sq)); + FL_SET(self, QUEUE_CLOSED); + wakeup_all(szqueue_waitq(sq)); + wakeup_all(szqueue_pushq(sq)); } return self; } @@ -1197,109 +1275,79 @@ rb_szqueue_max_set(VALUE self, VALUE vmax) struct rb_szqueue *sq = szqueue_ptr(self); if (max <= 0) { - rb_raise(rb_eArgError, "queue size must be positive"); + rb_raise(rb_eArgError, "queue size must be positive"); } if (max > sq->max) { - diff = max - sq->max; + diff = max - sq->max; } sq->max = max; sync_wakeup(szqueue_pushq(sq), diff); return vmax; } -static int -szqueue_push_should_block(int argc, const VALUE *argv) -{ - int should_block = 1; - rb_check_arity(argc, 1, 2); - if (argc > 1) { - should_block = !RTEST(argv[1]); - } - return should_block; -} - -/* - * Document-method: Thread::SizedQueue#push - * call-seq: - * push(object, non_block=false) - * enq(object, non_block=false) - * <<(object) - * - * Pushes +object+ to the queue. - * - * If there is no space left in the queue, waits until space becomes - * available, unless +non_block+ is true. If +non_block+ is true, the - * thread isn't suspended, and +ThreadError+ is raised. - */ - static VALUE -rb_szqueue_push(int argc, VALUE *argv, VALUE self) +rb_szqueue_push(rb_execution_context_t *ec, VALUE self, VALUE object, VALUE non_block, VALUE timeout) { struct rb_szqueue *sq = szqueue_ptr(self); - int should_block = szqueue_push_should_block(argc, argv); - while (queue_length(self, &sq->q) >= sq->max) { - if (!should_block) { + if (queue_length(self, &sq->q) >= sq->max) { + if (RTEST(non_block)) { rb_raise(rb_eThreadError, "queue full"); } - else if (queue_closed_p(self)) { - break; + + if (RTEST(rb_equal(INT2FIX(0), timeout))) { + return Qnil; + } + } + + rb_hrtime_t end = queue_timeout2hrtime(timeout); + while (queue_length(self, &sq->q) >= sq->max) { + if (queue_closed_p(self)) { + raise_closed_queue_error(self); } else { rb_execution_context_t *ec = GET_EC(); struct queue_waiter queue_waiter = { - .w = {.self = self, .th = ec->thread_ptr, .fiber = ec->fiber_ptr}, + .w = {.self = self, .th = ec->thread_ptr, .fiber = nonblocking_fiber(ec->fiber_ptr)}, .as = {.sq = sq} }; - struct list_head *pushq = szqueue_pushq(sq); + struct ccan_list_head *pushq = szqueue_pushq(sq); - list_add_tail(pushq, &queue_waiter.w.node); + ccan_list_add_tail(pushq, &queue_waiter.w.node); sq->num_waiting_push++; - rb_ensure(queue_sleep, self, szqueue_sleep_done, (VALUE)&queue_waiter); + struct queue_sleep_arg queue_sleep_arg = { + .self = self, + .timeout = timeout, + .end = end + }; + rb_ensure(queue_sleep, (VALUE)&queue_sleep_arg, szqueue_sleep_done, (VALUE)&queue_waiter); + if (!NIL_P(timeout) && rb_hrtime_now() >= end) { + return Qnil; + } } } - if (queue_closed_p(self)) { - raise_closed_queue_error(self); - } - - return queue_do_push(self, &sq->q, argv[0]); + return queue_do_push(self, &sq->q, object); } static VALUE -szqueue_do_pop(VALUE self, int should_block) +szqueue_do_pop(VALUE self, int should_block, VALUE timeout) { struct rb_szqueue *sq = szqueue_ptr(self); - VALUE retval = queue_do_pop(self, &sq->q, should_block); + VALUE retval = queue_do_pop(self, &sq->q, should_block, timeout); if (queue_length(self, &sq->q) < sq->max) { - wakeup_one(szqueue_pushq(sq)); + wakeup_one(szqueue_pushq(sq)); } return retval; } - -/* - * Document-method: Thread::SizedQueue#pop - * call-seq: - * pop(non_block=false) - * deq(non_block=false) - * shift(non_block=false) - * - * Retrieves data from the queue. - * - * If the queue is empty, the calling thread is suspended until data is pushed - * onto the queue. If +non_block+ is true, the thread isn't suspended, and - * +ThreadError+ is raised. - */ - static VALUE -rb_szqueue_pop(int argc, VALUE *argv, VALUE self) +rb_szqueue_pop(rb_execution_context_t *ec, VALUE self, VALUE non_block, VALUE timeout) { - int should_block = queue_pop_should_block(argc, argv); - return szqueue_do_pop(self, should_block); + return szqueue_do_pop(self, !RTEST(non_block), timeout); } /* @@ -1367,7 +1415,7 @@ rb_szqueue_empty_p(VALUE self) /* ConditionalVariable */ struct rb_condvar { - struct list_head waitq; + struct ccan_list_head waitq; rb_serial_t fork_gen; }; @@ -1422,7 +1470,7 @@ condvar_ptr(VALUE self) /* forked children can't reach into parent thread stacks */ if (cv->fork_gen != fork_gen) { cv->fork_gen = fork_gen; - list_head_init(&cv->waitq); + ccan_list_head_init(&cv->waitq); } return cv; @@ -1435,7 +1483,7 @@ condvar_alloc(VALUE klass) VALUE obj; obj = TypedData_Make_Struct(klass, struct rb_condvar, &cv_data_type, cv); - list_head_init(&cv->waitq); + ccan_list_head_init(&cv->waitq); return obj; } @@ -1450,7 +1498,7 @@ static VALUE rb_condvar_initialize(VALUE self) { struct rb_condvar *cv = condvar_ptr(self); - list_head_init(&cv->waitq); + ccan_list_head_init(&cv->waitq); return self; } @@ -1493,10 +1541,10 @@ rb_condvar_wait(int argc, VALUE *argv, VALUE self) struct sync_waiter sync_waiter = { .self = args.mutex, .th = ec->thread_ptr, - .fiber = ec->fiber_ptr + .fiber = nonblocking_fiber(ec->fiber_ptr) }; - list_add_tail(&cv->waitq, &sync_waiter.node); + ccan_list_add_tail(&cv->waitq, &sync_waiter.node); return rb_ensure(do_sleep, (VALUE)&args, delete_from_waitq, (VALUE)&sync_waiter); } @@ -1583,16 +1631,14 @@ Init_thread_sync(void) rb_define_method(rb_cQueue, "close", rb_queue_close, 0); rb_define_method(rb_cQueue, "closed?", rb_queue_closed_p, 0); rb_define_method(rb_cQueue, "push", rb_queue_push, 1); - rb_define_method(rb_cQueue, "pop", rb_queue_pop, -1); rb_define_method(rb_cQueue, "empty?", rb_queue_empty_p, 0); rb_define_method(rb_cQueue, "clear", rb_queue_clear, 0); rb_define_method(rb_cQueue, "length", rb_queue_length, 0); rb_define_method(rb_cQueue, "num_waiting", rb_queue_num_waiting, 0); + rb_define_method(rb_cQueue, "freeze", rb_queue_freeze, 0); rb_define_alias(rb_cQueue, "enq", "push"); rb_define_alias(rb_cQueue, "<<", "push"); - rb_define_alias(rb_cQueue, "deq", "pop"); - rb_define_alias(rb_cQueue, "shift", "pop"); rb_define_alias(rb_cQueue, "size", "length"); DEFINE_CLASS(SizedQueue, Queue); @@ -1602,17 +1648,10 @@ Init_thread_sync(void) rb_define_method(rb_cSizedQueue, "close", rb_szqueue_close, 0); rb_define_method(rb_cSizedQueue, "max", rb_szqueue_max_get, 0); rb_define_method(rb_cSizedQueue, "max=", rb_szqueue_max_set, 1); - rb_define_method(rb_cSizedQueue, "push", rb_szqueue_push, -1); - rb_define_method(rb_cSizedQueue, "pop", rb_szqueue_pop, -1); rb_define_method(rb_cSizedQueue, "empty?", rb_szqueue_empty_p, 0); rb_define_method(rb_cSizedQueue, "clear", rb_szqueue_clear, 0); rb_define_method(rb_cSizedQueue, "length", rb_szqueue_length, 0); rb_define_method(rb_cSizedQueue, "num_waiting", rb_szqueue_num_waiting, 0); - - rb_define_alias(rb_cSizedQueue, "enq", "push"); - rb_define_alias(rb_cSizedQueue, "<<", "push"); - rb_define_alias(rb_cSizedQueue, "deq", "pop"); - rb_define_alias(rb_cSizedQueue, "shift", "pop"); rb_define_alias(rb_cSizedQueue, "size", "length"); /* CVar */ @@ -1630,3 +1669,5 @@ Init_thread_sync(void) rb_provide("thread.rb"); } + +#include "thread_sync.rbinc" |