diff options
author | KJ Tsanaktsidis <kj@kjtsanaktsidis.id.au> | 2024-01-28 17:15:43 +1100 |
---|---|---|
committer | KJ Tsanaktsidis <kj@kjtsanaktsidis.id.au> | 2024-02-06 22:23:42 +1100 |
commit | 719db18b50a996f3df4f76854f7dc9d69f875582 (patch) | |
tree | 0cd4da42473e9ca04e998b495013a75e37925b3e | |
parent | 19f615521d92f9837099173abb831cfcd2ff284c (diff) |
notify ASAN about M:N threading stack switches
In a similar way to how we do it with fibers in cont.c, we need to call
__sanitize_start_switch_fiber and __sanitize_finish_switch_fiber around
the call to coroutine_transfer to let ASAN save & restore the fake stack
pointer.
When a M:N thread is exiting, we pass `to_dead` to the new
coroutine_transfer0 function, so that we can pass NULL for saving the
stack pointer. This signals to ASAN that the fake stack can be freed
(otherwise it would be leaked)
[Bug #20220]
-rw-r--r-- | thread_pthread.c | 34 | ||||
-rw-r--r-- | thread_pthread_mn.c | 9 |
2 files changed, 36 insertions, 7 deletions
diff --git a/thread_pthread.c b/thread_pthread.c index 332e50f6e1..580df826da 100644 --- a/thread_pthread.c +++ b/thread_pthread.c @@ -333,6 +333,8 @@ static void timer_thread_wakeup(void); static void timer_thread_wakeup_locked(rb_vm_t *vm); static void timer_thread_wakeup_force(void); static void thread_sched_switch(rb_thread_t *cth, rb_thread_t *next_th); +static void coroutine_transfer0(struct coroutine_context *transfer_from, + struct coroutine_context *transfer_to, bool to_dead); #define thread_sched_dump(s) thread_sched_dump_(__FILE__, __LINE__, s) @@ -892,7 +894,7 @@ thread_sched_wait_running_turn(struct rb_thread_sched *sched, rb_thread_t *th, b thread_sched_set_lock_owner(sched, NULL); { rb_ractor_set_current_ec(th->ractor, NULL); - coroutine_transfer(th->sched.context, nt->nt_context); + coroutine_transfer0(th->sched.context, nt->nt_context, false); } thread_sched_set_lock_owner(sched, th); } @@ -1151,7 +1153,28 @@ rb_thread_sched_init(struct rb_thread_sched *sched, bool atfork) } static void -thread_sched_switch0(struct coroutine_context *current_cont, rb_thread_t *next_th, struct rb_native_thread *nt) +coroutine_transfer0(struct coroutine_context *transfer_from, struct coroutine_context *transfer_to, bool to_dead) +{ +#ifdef RUBY_ASAN_ENABLED + void **fake_stack = to_dead ? NULL : &transfer_from->fake_stack; + __sanitizer_start_switch_fiber(fake_stack, transfer_to->stack_base, transfer_to->stack_size); +#endif + + RBIMPL_ATTR_MAYBE_UNUSED() + struct coroutine_context *returning_from = coroutine_transfer(transfer_from, transfer_to); + + /* if to_dead was passed, the caller is promising that this coroutine is finished and it should + * never be resumed! */ + VM_ASSERT(!to_dead); +#ifdef RUBY_ASAN_ENABLED + __sanitizer_finish_switch_fiber(transfer_from->fake_stack, + (const void**)&returning_from->stack_base, &returning_from->stack_size); +#endif + +} + +static void +thread_sched_switch0(struct coroutine_context *current_cont, rb_thread_t *next_th, struct rb_native_thread *nt, bool to_dead) { VM_ASSERT(!nt->dedicated); VM_ASSERT(next_th->nt == NULL); @@ -1160,7 +1183,8 @@ thread_sched_switch0(struct coroutine_context *current_cont, rb_thread_t *next_t ruby_thread_set_native(next_th); native_thread_assign(nt, next_th); - coroutine_transfer(current_cont, next_th->sched.context); + + coroutine_transfer0(current_cont, next_th->sched.context, to_dead); } static void @@ -1169,7 +1193,7 @@ thread_sched_switch(rb_thread_t *cth, rb_thread_t *next_th) struct rb_native_thread *nt = cth->nt; native_thread_assign(NULL, cth); RUBY_DEBUG_LOG("th:%u->%u on nt:%d", rb_th_serial(cth), rb_th_serial(next_th), nt->serial); - thread_sched_switch0(cth->sched.context, next_th, nt); + thread_sched_switch0(cth->sched.context, next_th, nt, cth->status == THREAD_KILLED); } #if VM_CHECK_MODE > 0 @@ -2268,7 +2292,7 @@ nt_start(void *ptr) if (next_th && next_th->nt == NULL) { RUBY_DEBUG_LOG("nt:%d next_th:%d", (int)nt->serial, (int)next_th->serial); - thread_sched_switch0(nt->nt_context, next_th, nt); + thread_sched_switch0(nt->nt_context, next_th, nt, false); } else { RUBY_DEBUG_LOG("no schedulable threads -- next_th:%p", next_th); diff --git a/thread_pthread_mn.c b/thread_pthread_mn.c index f44c8b18a2..503815fa1c 100644 --- a/thread_pthread_mn.c +++ b/thread_pthread_mn.c @@ -413,6 +413,11 @@ native_thread_check_and_create_shared(rb_vm_t *vm) static COROUTINE co_start(struct coroutine_context *from, struct coroutine_context *self) { +#ifdef RUBY_ASAN_ENABLED + __sanitizer_finish_switch_fiber(self->fake_stack, + (const void**)&from->stack_base, &from->stack_size); +#endif + rb_thread_t *th = (rb_thread_t *)self->argument; struct rb_thread_sched *sched = TH_SCHED(th); VM_ASSERT(th->nt != NULL); @@ -447,13 +452,13 @@ co_start(struct coroutine_context *from, struct coroutine_context *self) if (!has_ready_ractor && next_th && !next_th->nt) { // switch to the next thread thread_sched_set_lock_owner(sched, NULL); - thread_sched_switch0(th->sched.context, next_th, nt); + thread_sched_switch0(th->sched.context, next_th, nt, true); th->sched.finished = true; } else { // switch to the next Ractor th->sched.finished = true; - coroutine_transfer(self, nt->nt_context); + coroutine_transfer0(self, nt->nt_context, true); } rb_bug("unreachable"); } |