diff options
| author | Jean Boussier <jean.boussier@gmail.com> | 2026-02-01 12:17:51 +0100 |
|---|---|---|
| committer | Jean Boussier <jean.boussier@gmail.com> | 2026-02-01 15:32:28 +0100 |
| commit | 32f2596984c30a767b5a0b0d2d890a5653e17960 (patch) | |
| tree | 05eee48fa0ceafbe6d91be21951d8c55e552e8d9 | |
| parent | 1f58302e9f4810c0eb60c90b7933a30b5a0d05ff (diff) | |
thread_pthread.c: Use ruby_sized_xfree
| -rw-r--r-- | thread_none.c | 2 | ||||
| -rw-r--r-- | thread_pthread.c | 17 | ||||
| -rw-r--r-- | thread_pthread.h | 1 | ||||
| -rw-r--r-- | thread_pthread_mn.c | 1 | ||||
| -rw-r--r-- | thread_win32.c | 2 | ||||
| -rw-r--r-- | vm.c | 2 | ||||
| -rw-r--r-- | vm_core.h | 2 |
7 files changed, 15 insertions, 12 deletions
diff --git a/thread_none.c b/thread_none.c index e6616c0585..1f7492fda8 100644 --- a/thread_none.c +++ b/thread_none.c @@ -336,7 +336,7 @@ rb_thread_prevent_fork(void *(*func)(void *), void *data) } void -rb_thread_malloc_stack_set(rb_thread_t *th, void *stack) +rb_thread_malloc_stack_set(rb_thread_t *th, void *stack, size_t stack_size) { // no-op } diff --git a/thread_pthread.c b/thread_pthread.c index 542690eca0..5b461efec8 100644 --- a/thread_pthread.c +++ b/thread_pthread.c @@ -1822,8 +1822,8 @@ native_thread_destroy_atfork(struct rb_native_thread *nt) */ RB_ALTSTACK_FREE(nt->altstack); - ruby_xfree(nt->nt_context); - ruby_xfree(nt); + SIZED_FREE(nt->nt_context); + SIZED_FREE(nt); } } @@ -2201,7 +2201,7 @@ native_thread_create_dedicated(rb_thread_t *th) th->sched.malloc_stack = true; rb_ec_initialize_vm_stack(th->ec, vm_stack, vm_stack_word_size); th->sched.context_stack = vm_stack; - + th->sched.context_stack_size = vm_stack_word_size; int err = native_thread_create0(th->nt); if (!err) { @@ -2339,7 +2339,7 @@ rb_threadptr_sched_free(rb_thread_t *th) #if USE_MN_THREADS if (th->sched.malloc_stack) { // has dedicated - ruby_xfree(th->sched.context_stack); + SIZED_FREE_N((VALUE *)th->sched.context_stack, th->sched.context_stack_size); native_thread_destroy(th->nt); } else { @@ -2347,11 +2347,11 @@ rb_threadptr_sched_free(rb_thread_t *th) // TODO: how to free nt and nt->altstack? } - ruby_xfree(th->sched.context); + SIZED_FREE(th->sched.context); th->sched.context = NULL; // VM_ASSERT(th->sched.context == NULL); #else - ruby_xfree(th->sched.context_stack); + SIZED_FREE_N((VALUE *)th->sched.context_stack, th->sched.context_stack_size); native_thread_destroy(th->nt); #endif @@ -3447,7 +3447,7 @@ rb_internal_thread_remove_event_hook(rb_internal_thread_event_hook_t * hook) } if (success) { - ruby_xfree(hook); + SIZED_FREE(hook); } return success; } @@ -3489,10 +3489,11 @@ rb_thread_lock_native_thread(void) } void -rb_thread_malloc_stack_set(rb_thread_t *th, void *stack) +rb_thread_malloc_stack_set(rb_thread_t *th, void *stack, size_t stack_size) { th->sched.malloc_stack = true; th->sched.context_stack = stack; + th->sched.context_stack_size = stack_size; } #endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */ diff --git a/thread_pthread.h b/thread_pthread.h index 992e9fb080..cd93182480 100644 --- a/thread_pthread.h +++ b/thread_pthread.h @@ -75,6 +75,7 @@ struct rb_thread_sched_item { bool finished; bool malloc_stack; void *context_stack; + size_t context_stack_size; struct coroutine_context *context; }; diff --git a/thread_pthread_mn.c b/thread_pthread_mn.c index 72a5d8fce2..69e81e5fbc 100644 --- a/thread_pthread_mn.c +++ b/thread_pthread_mn.c @@ -522,6 +522,7 @@ native_thread_create_shared(rb_thread_t *th) th->ec->machine.stack_start = (void *)((uintptr_t)machine_stack + machine_stack_size); th->ec->machine.stack_maxsize = machine_stack_size; // TODO th->sched.context_stack = machine_stack; + th->sched.context_stack_size = machine_stack_size; th->sched.context = ruby_xmalloc(sizeof(struct coroutine_context)); coroutine_initialize(th->sched.context, co_start, machine_stack, machine_stack_size); diff --git a/thread_win32.c b/thread_win32.c index 3fc7639248..5de79751f9 100644 --- a/thread_win32.c +++ b/thread_win32.c @@ -1021,7 +1021,7 @@ rb_thread_prevent_fork(void *(*func)(void *), void *data) } void -rb_thread_malloc_stack_set(rb_thread_t *th, void *stack) +rb_thread_malloc_stack_set(rb_thread_t *th, void *stack, size_t stack_size) { // no-op } @@ -3927,7 +3927,7 @@ th_init(rb_thread_t *th, VALUE self, rb_vm_t *vm) size_t size = vm->default_params.thread_vm_stack_size / sizeof(VALUE); VALUE *stack = ALLOC_N(VALUE, size); rb_ec_initialize_vm_stack(th->ec, stack, size); - rb_thread_malloc_stack_set(th, stack); + rb_thread_malloc_stack_set(th, stack, size); } else { VM_ASSERT(th->ec->cfp == NULL); @@ -1999,7 +1999,7 @@ VALUE *rb_vm_svar_lep(const rb_execution_context_t *ec, const rb_control_frame_t int rb_vm_get_sourceline(const rb_control_frame_t *); void rb_vm_stack_to_heap(rb_execution_context_t *ec); void ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame); -void rb_thread_malloc_stack_set(rb_thread_t *th, void *stack); +void rb_thread_malloc_stack_set(rb_thread_t *th, void *stack, size_t stack_size); rb_thread_t * ruby_thread_from_native(void); int ruby_thread_set_native(rb_thread_t *th); int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp); |
