diff options
| author | Luke Gruber <luke.gruber@shopify.com> | 2025-12-04 16:51:11 -0500 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2025-12-04 16:51:11 -0500 |
| commit | 8d8159e7d87e4fd1594ce2fad3d2653e47fb1026 (patch) | |
| tree | c11cf0064acddc8b0b3f3f4332565e3e649dee1e | |
| parent | 7d9558f99b5bb8279c138860670b56735a28a3e4 (diff) | |
Fix thread scheduler issue with thread_sched_wait_events (#15392)
Fix race between timer thread dequeuing waiting thread and thread
skipping sleeping due to being dequeued. We now use `th->event_serial` which
is protected by `thread_sched_lock`. When a thread is put on timer thread's waiting
list, the event serial is saved on the item. The timer thread checks
that the saved serial is the same as current thread's serial before
calling `thread_sched_to_ready`.
The following script (taken from a test in `test_thread.rb` used to crash on
scheduler debug assertions. It would likely crash in non-debug mode as well.
```ruby
def assert_nil(val)
if val != nil
raise "Expected #{val} to be nil"
end
end
def assert_equal(expected, actual)
if expected != actual
raise "Expected #{expected} to be #{actual}"
end
end
def test_join2
ok = false
t1 = Thread.new { ok = true; sleep }
Thread.pass until ok
Thread.pass until t1.stop?
t2 = Thread.new do
Thread.pass while ok
t1.join(0.01)
end
t3 = Thread.new do
ok = false
t1.join
end
assert_nil(t2.value)
t1.wakeup
assert_equal(t1, t3.value)
ensure
t1&.kill&.join
t2&.kill&.join
t3&.kill&.join
end
rs = 30.times.map do
Ractor.new do
test_join2
end
end
rs.each(&:join)
```
| -rw-r--r-- | thread_pthread.c | 31 | ||||
| -rw-r--r-- | thread_pthread.h | 1 | ||||
| -rw-r--r-- | thread_pthread_mn.c | 34 | ||||
| -rw-r--r-- | vm_core.h | 1 |
4 files changed, 42 insertions, 25 deletions
diff --git a/thread_pthread.c b/thread_pthread.c index 2eaa407f10..93b32e55c0 100644 --- a/thread_pthread.c +++ b/thread_pthread.c @@ -1122,8 +1122,10 @@ thread_sched_to_waiting_until_wakeup(struct rb_thread_sched *sched, rb_thread_t { if (!RUBY_VM_INTERRUPTED(th->ec)) { bool can_direct_transfer = !th_has_dedicated_nt(th); + th->status = THREAD_STOPPED_FOREVER; thread_sched_wakeup_next_thread(sched, th, can_direct_transfer); thread_sched_wait_running_turn(sched, th, can_direct_transfer); + th->status = THREAD_RUNNABLE; } else { RUBY_DEBUG_LOG("th:%u interrupted", rb_th_serial(th)); @@ -1149,6 +1151,7 @@ thread_sched_yield(struct rb_thread_sched *sched, rb_thread_t *th) bool can_direct_transfer = !th_has_dedicated_nt(th); thread_sched_to_ready_common(sched, th, false, can_direct_transfer); thread_sched_wait_running_turn(sched, th, can_direct_transfer); + th->status = THREAD_RUNNABLE; } else { VM_ASSERT(sched->readyq_cnt == 0); @@ -1338,7 +1341,7 @@ void rb_ractor_lock_self(rb_ractor_t *r); void rb_ractor_unlock_self(rb_ractor_t *r); // The current thread for a ractor is put to "sleep" (descheduled in the STOPPED_FOREVER state) waiting for -// a ractor action to wake it up. See docs for `ractor_sched_sleep_with_cleanup` for more info. +// a ractor action to wake it up. void rb_ractor_sched_wait(rb_execution_context_t *ec, rb_ractor_t *cr, rb_unblock_function_t *ubf, void *ubf_arg) { @@ -2868,7 +2871,7 @@ static struct { static void timer_thread_check_timeslice(rb_vm_t *vm); static int timer_thread_set_timeout(rb_vm_t *vm); -static void timer_thread_wakeup_thread(rb_thread_t *th); +static void timer_thread_wakeup_thread(rb_thread_t *th, uint32_t event_serial); #include "thread_pthread_mn.c" @@ -2970,7 +2973,7 @@ timer_thread_check_exceed(rb_hrtime_t abs, rb_hrtime_t now) } static rb_thread_t * -timer_thread_deq_wakeup(rb_vm_t *vm, rb_hrtime_t now) +timer_thread_deq_wakeup(rb_vm_t *vm, rb_hrtime_t now, uint32_t *event_serial) { struct rb_thread_sched_waiting *w = ccan_list_top(&timer_th.waiting, struct rb_thread_sched_waiting, node); @@ -2987,32 +2990,31 @@ timer_thread_deq_wakeup(rb_vm_t *vm, rb_hrtime_t now) w->flags = thread_sched_waiting_none; w->data.result = 0; - return thread_sched_waiting_thread(w); + rb_thread_t *th = thread_sched_waiting_thread(w); + *event_serial = w->data.event_serial; + return th; } return NULL; } static void -timer_thread_wakeup_thread_locked(struct rb_thread_sched *sched, rb_thread_t *th) +timer_thread_wakeup_thread_locked(struct rb_thread_sched *sched, rb_thread_t *th, uint32_t event_serial) { - if (sched->running != th) { + if (sched->running != th && th->event_serial == event_serial) { thread_sched_to_ready_common(sched, th, true, false); } - else { - // will be release the execution right - } } static void -timer_thread_wakeup_thread(rb_thread_t *th) +timer_thread_wakeup_thread(rb_thread_t *th, uint32_t event_serial) { RUBY_DEBUG_LOG("th:%u", rb_th_serial(th)); struct rb_thread_sched *sched = TH_SCHED(th); thread_sched_lock(sched, th); { - timer_thread_wakeup_thread_locked(sched, th); + timer_thread_wakeup_thread_locked(sched, th, event_serial); } thread_sched_unlock(sched, th); } @@ -3022,11 +3024,14 @@ timer_thread_check_timeout(rb_vm_t *vm) { rb_hrtime_t now = rb_hrtime_now(); rb_thread_t *th; + uint32_t event_serial; rb_native_mutex_lock(&timer_th.waiting_lock); { - while ((th = timer_thread_deq_wakeup(vm, now)) != NULL) { - timer_thread_wakeup_thread(th); + while ((th = timer_thread_deq_wakeup(vm, now, &event_serial)) != NULL) { + rb_native_mutex_unlock(&timer_th.waiting_lock); + timer_thread_wakeup_thread(th, event_serial); + rb_native_mutex_lock(&timer_th.waiting_lock); } } rb_native_mutex_unlock(&timer_th.waiting_lock); diff --git a/thread_pthread.h b/thread_pthread.h index d635948c4b..f579e53781 100644 --- a/thread_pthread.h +++ b/thread_pthread.h @@ -39,6 +39,7 @@ struct rb_thread_sched_waiting { #else uint64_t timeout; #endif + uint32_t event_serial; int fd; // -1 for timeout only int result; } data; diff --git a/thread_pthread_mn.c b/thread_pthread_mn.c index 8100fd534e..2211d4d106 100644 --- a/thread_pthread_mn.c +++ b/thread_pthread_mn.c @@ -3,7 +3,7 @@ #if USE_MN_THREADS static void timer_thread_unregister_waiting(rb_thread_t *th, int fd, enum thread_sched_waiting_flag flags); -static void timer_thread_wakeup_thread_locked(struct rb_thread_sched *sched, rb_thread_t *th); +static void timer_thread_wakeup_thread_locked(struct rb_thread_sched *sched, rb_thread_t *th, uint32_t event_serial); static bool timer_thread_cancel_waiting(rb_thread_t *th) @@ -15,9 +15,7 @@ timer_thread_cancel_waiting(rb_thread_t *th) if (th->sched.waiting_reason.flags) { canceled = true; ccan_list_del_init(&th->sched.waiting_reason.node); - if (th->sched.waiting_reason.flags & (thread_sched_waiting_io_read | thread_sched_waiting_io_write)) { - timer_thread_unregister_waiting(th, th->sched.waiting_reason.data.fd, th->sched.waiting_reason.flags); - } + timer_thread_unregister_waiting(th, th->sched.waiting_reason.data.fd, th->sched.waiting_reason.flags); th->sched.waiting_reason.flags = thread_sched_waiting_none; } } @@ -57,7 +55,7 @@ ubf_event_waiting(void *ptr) thread_sched_unlock(sched, th); } -static bool timer_thread_register_waiting(rb_thread_t *th, int fd, enum thread_sched_waiting_flag flags, rb_hrtime_t *rel); +static bool timer_thread_register_waiting(rb_thread_t *th, int fd, enum thread_sched_waiting_flag flags, rb_hrtime_t *rel, uint32_t event_serial); // return true if timed out static bool @@ -67,13 +65,15 @@ thread_sched_wait_events(struct rb_thread_sched *sched, rb_thread_t *th, int fd, volatile bool timedout = false, need_cancel = false; + uint32_t event_serial = ++th->event_serial; // overflow is okay + if (ubf_set(th, ubf_event_waiting, (void *)th)) { return false; } thread_sched_lock(sched, th); { - if (timer_thread_register_waiting(th, fd, events, rel)) { + if (timer_thread_register_waiting(th, fd, events, rel, event_serial)) { RUBY_DEBUG_LOG("wait fd:%d", fd); RB_VM_SAVE_MACHINE_CONTEXT(th); @@ -81,9 +81,11 @@ thread_sched_wait_events(struct rb_thread_sched *sched, rb_thread_t *th, int fd, RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th); if (th->sched.waiting_reason.flags == thread_sched_waiting_none) { - // already awaken + th->event_serial++; + // timer thread has dequeued us already, but it won't try to wake us because we bumped our serial } else if (RUBY_VM_INTERRUPTED(th->ec)) { + th->event_serial++; // make sure timer thread doesn't try to wake us need_cancel = true; } else { @@ -111,7 +113,8 @@ thread_sched_wait_events(struct rb_thread_sched *sched, rb_thread_t *th, int fd, } thread_sched_unlock(sched, th); - ubf_clear(th); // TODO: maybe it is already NULL? + // if ubf triggered between sched unlock and ubf clear, sched->running == th here + ubf_clear(th); VM_ASSERT(sched->running == th); @@ -680,7 +683,7 @@ kqueue_already_registered(int fd) // return false if the fd is not waitable or not need to wait. static bool -timer_thread_register_waiting(rb_thread_t *th, int fd, enum thread_sched_waiting_flag flags, rb_hrtime_t *rel) +timer_thread_register_waiting(rb_thread_t *th, int fd, enum thread_sched_waiting_flag flags, rb_hrtime_t *rel, uint32_t event_serial) { RUBY_DEBUG_LOG("th:%u fd:%d flag:%d rel:%lu", rb_th_serial(th), fd, flags, rel ? (unsigned long)*rel : 0); @@ -807,6 +810,7 @@ timer_thread_register_waiting(rb_thread_t *th, int fd, enum thread_sched_waiting th->sched.waiting_reason.data.timeout = abs; th->sched.waiting_reason.data.fd = fd; th->sched.waiting_reason.data.result = 0; + th->sched.waiting_reason.data.event_serial = event_serial; } if (abs == 0) { // no timeout @@ -855,6 +859,10 @@ timer_thread_register_waiting(rb_thread_t *th, int fd, enum thread_sched_waiting static void timer_thread_unregister_waiting(rb_thread_t *th, int fd, enum thread_sched_waiting_flag flags) { + if (!(th->sched.waiting_reason.flags & (thread_sched_waiting_io_read | thread_sched_waiting_io_write))) { + return; + } + RUBY_DEBUG_LOG("th:%u fd:%d", rb_th_serial(th), fd); #if HAVE_SYS_EVENT_H kqueue_unregister_waiting(fd, flags); @@ -885,7 +893,7 @@ timer_thread_setup_mn(void) #endif RUBY_DEBUG_LOG("comm_fds:%d/%d", timer_th.comm_fds[0], timer_th.comm_fds[1]); - timer_thread_register_waiting(NULL, timer_th.comm_fds[0], thread_sched_waiting_io_read | thread_sched_waiting_io_force, NULL); + timer_thread_register_waiting(NULL, timer_th.comm_fds[0], thread_sched_waiting_io_read | thread_sched_waiting_io_force, NULL, 0); } static int @@ -986,8 +994,9 @@ timer_thread_polling(rb_vm_t *vm) th->sched.waiting_reason.flags = thread_sched_waiting_none; th->sched.waiting_reason.data.fd = -1; th->sched.waiting_reason.data.result = filter; + uint32_t event_serial = th->sched.waiting_reason.data.event_serial; - timer_thread_wakeup_thread_locked(sched, th); + timer_thread_wakeup_thread_locked(sched, th, event_serial); } else { // already released @@ -1031,8 +1040,9 @@ timer_thread_polling(rb_vm_t *vm) th->sched.waiting_reason.flags = thread_sched_waiting_none; th->sched.waiting_reason.data.fd = -1; th->sched.waiting_reason.data.result = (int)events; + uint32_t event_serial = th->sched.waiting_reason.data.event_serial; - timer_thread_wakeup_thread_locked(sched, th); + timer_thread_wakeup_thread_locked(sched, th, event_serial); } else { // already released @@ -1127,6 +1127,7 @@ typedef struct rb_thread_struct { struct rb_thread_sched_item sched; bool mn_schedulable; rb_atomic_t serial; // only for RUBY_DEBUG_LOG() + uint32_t event_serial; VALUE last_status; /* $? */ |
