summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJean Boussier <jean.boussier@gmail.com>2026-01-31 11:02:17 +0100
committerJean Boussier <jean.boussier@gmail.com>2026-01-31 17:54:33 +0100
commit9cc71cb9093caa7ee471def8aa8f0de596e366cf (patch)
tree500dac8f1cc2d635405134f40a590c2c87444ee4
parent0b4b30af9e09be12960ad1e9aff89b195b7c3734 (diff)
Use static memory instead of mimalloc for the VM, main ractor, main thread.
Also allocate fibers with ruby_xmalloc.
-rw-r--r--cont.c10
-rw-r--r--ractor.c9
-rw-r--r--vm.c21
-rw-r--r--vm_core.h5
-rw-r--r--vm_trace.c34
5 files changed, 22 insertions, 57 deletions
diff --git a/cont.c b/cont.c
index 638d4852d8..a23b551d83 100644
--- a/cont.c
+++ b/cont.c
@@ -215,7 +215,6 @@ typedef struct rb_context_struct {
enum context_type type;
int argc;
int kw_splat;
- bool root;
VALUE self;
VALUE value;
@@ -1103,11 +1102,11 @@ cont_free(void *ptr)
VM_ASSERT(cont->jit_cont != NULL);
jit_cont_free(cont->jit_cont);
/* free rb_cont_t or rb_fiber_t */
- if (cont->root) {
- ruby_mimfree(ptr);
+ if (cont->type == CONTINUATION_CONTEXT) {
+ SIZED_FREE(cont);
}
else {
- ruby_xfree(ptr);
+ SIZED_FREE((rb_fiber_t *)cont);
}
RUBY_FREE_LEAVE("cont");
}
@@ -2575,13 +2574,12 @@ rb_fiber_start(rb_fiber_t *fiber)
void
rb_threadptr_root_fiber_setup(rb_thread_t *th)
{
- rb_fiber_t *fiber = ruby_mimcalloc(1, sizeof(rb_fiber_t));
+ rb_fiber_t *fiber = ZALLOC(rb_fiber_t);
if (!fiber) {
rb_bug("%s", strerror(errno)); /* ... is it possible to call rb_bug here? */
}
fiber->cont.type = FIBER_CONTEXT;
- fiber->cont.root = true;
fiber->cont.saved_ec.fiber_ptr = fiber;
fiber->cont.saved_ec.serial = next_ec_serial(th->ractor);
fiber->cont.saved_ec.thread_ptr = th;
diff --git a/ractor.c b/ractor.c
index da1db8d803..5897ed5430 100644
--- a/ractor.c
+++ b/ractor.c
@@ -298,10 +298,7 @@ ractor_free(void *ptr)
}
ractor_sync_free(r);
- if (r->main_ractor) {
- ruby_mimfree(r);
- }
- else {
+ if (!r->main_ractor) {
ruby_xfree(r);
}
}
@@ -469,10 +466,12 @@ ractor_alloc(VALUE klass)
return rv;
}
+static rb_ractor_t _main_ractor;
+
rb_ractor_t *
rb_ractor_main_alloc(void)
{
- rb_ractor_t *r = ruby_mimcalloc(1, sizeof(rb_ractor_t));
+ rb_ractor_t *r = &_main_ractor;
if (r == NULL) {
fprintf(stderr, "[FATAL] failed to allocate memory for main ractor\n");
exit(EXIT_FAILURE);
diff --git a/vm.c b/vm.c
index 05d2025a12..264bdfa1f2 100644
--- a/vm.c
+++ b/vm.c
@@ -3392,8 +3392,6 @@ ruby_vm_destruct(rb_vm_t *vm)
st_free_table(vm->static_ext_inits);
- rb_vm_postponed_job_free();
-
rb_id_table_free(vm->constant_cache);
set_free_table(vm->unused_block_warning_table);
@@ -3433,14 +3431,11 @@ ruby_vm_destruct(rb_vm_t *vm)
rb_objspace_free_objects(objspace);
rb_free_generic_fields_tbl_();
rb_free_default_rand_key();
-
- ruby_mimfree(th);
}
rb_objspace_free(objspace);
}
rb_native_mutex_destroy(&vm->workqueue_lock);
/* after freeing objspace, you *can't* use ruby_xfree() */
- ruby_mimfree(vm);
ruby_current_vm_ptr = NULL;
if (rb_free_at_exit) {
@@ -3785,7 +3780,7 @@ thread_mark(void *ptr)
rb_gc_mark(th->top_wrapper);
if (th->root_fiber) rb_fiber_mark_self(th->root_fiber);
- RUBY_ASSERT(th->ec == rb_fiberptr_get_ec(th->ec->fiber_ptr));
+ RUBY_ASSERT(th->ec == NULL || th->ec == rb_fiberptr_get_ec(th->ec->fiber_ptr));
rb_gc_mark(th->last_status);
rb_gc_mark(th->locking_mutex);
rb_gc_mark(th->name);
@@ -3822,10 +3817,7 @@ thread_free(void *ptr)
else {
// ruby_xfree(th->nt);
// TODO: MN system collect nt, but without MN system it should be freed here.
- if (th->main_thread) {
- ruby_mimfree(th);
- }
- else {
+ if (!th->main_thread) {
ruby_xfree(th);
}
}
@@ -4576,12 +4568,15 @@ rb_vm_set_progname(VALUE filename)
extern const struct st_hash_type rb_fstring_hash_type;
+static rb_vm_t _vm;
+static rb_thread_t _main_thread = { .main_thread = 1 };
+
void
Init_BareVM(void)
{
/* VM bootstrap: phase 1 */
- rb_vm_t *vm = ruby_mimcalloc(1, sizeof(*vm));
- rb_thread_t *th = ruby_mimcalloc(1, sizeof(*th));
+ rb_vm_t *vm = &_vm;
+ rb_thread_t *th = &_main_thread;
if (!vm || !th) {
fputs("[FATAL] failed to allocate memory\n", stderr);
exit(EXIT_FAILURE);
@@ -4590,7 +4585,6 @@ Init_BareVM(void)
// setup the VM
vm_init2(vm);
- rb_vm_postponed_job_queue_init(vm);
ruby_current_vm_ptr = vm;
rb_objspace_alloc();
vm->negative_cme_table = rb_id_table_create(16);
@@ -4600,7 +4594,6 @@ Init_BareVM(void)
vm->global_hooks.type = hook_list_type_global;
// setup main thread
- th->main_thread = 1;
th->nt = ZALLOC(struct rb_native_thread);
th->vm = vm;
th->ractor = vm->ractor.main_ractor = rb_ractor_main_alloc();
diff --git a/vm_core.h b/vm_core.h
index 8240a3b4f5..55ec08a6e2 100644
--- a/vm_core.h
+++ b/vm_core.h
@@ -787,9 +787,6 @@ typedef struct rb_vm_struct {
/* hook (for internal events: NEWOBJ, FREEOBJ, GC events, etc.) */
rb_hook_list_t global_hooks;
- /* postponed_job (async-signal-safe, and thread-safe) */
- struct rb_postponed_job_queue *postponed_job_queue;
-
int src_encoding_index;
/* workqueue (thread-safe, NOT async-signal-safe) */
@@ -2387,9 +2384,7 @@ rb_exec_event_hook_script_compiled(rb_execution_context_t *ec, const rb_iseq_t *
void rb_vm_trap_exit(rb_vm_t *vm);
void rb_vm_postponed_job_atfork(void); /* vm_trace.c */
-void rb_vm_postponed_job_free(void); /* vm_trace.c */
size_t rb_vm_memsize_postponed_job_queue(void); /* vm_trace.c */
-void rb_vm_postponed_job_queue_init(rb_vm_t *vm); /* vm_trace.c */
RUBY_SYMBOL_EXPORT_BEGIN
diff --git a/vm_trace.c b/vm_trace.c
index 11cf7f55b5..273faa5f88 100644
--- a/vm_trace.c
+++ b/vm_trace.c
@@ -1864,16 +1864,7 @@ typedef struct rb_postponed_job_queue {
rb_atomic_t triggered_bitset;
} rb_postponed_job_queues_t;
-void
-rb_vm_postponed_job_queue_init(rb_vm_t *vm)
-{
- /* use mimmalloc; postponed job registration is a dependency of objspace, so this gets
- * called _VERY_ early inside Init_BareVM */
- rb_postponed_job_queues_t *pjq = ruby_mimmalloc(sizeof(rb_postponed_job_queues_t));
- pjq->triggered_bitset = 0;
- memset(pjq->table, 0, sizeof(pjq->table));
- vm->postponed_job_queue = pjq;
-}
+static rb_postponed_job_queues_t postponed_job_queue;
static rb_execution_context_t *
get_valid_ec(rb_vm_t *vm)
@@ -1886,25 +1877,15 @@ get_valid_ec(rb_vm_t *vm)
void
rb_vm_postponed_job_atfork(void)
{
- rb_vm_t *vm = GET_VM();
- rb_postponed_job_queues_t *pjq = vm->postponed_job_queue;
+ rb_postponed_job_queues_t *pjq = &postponed_job_queue;
/* make sure we set the interrupt flag on _this_ thread if we carried any pjobs over
* from the other side of the fork */
if (pjq->triggered_bitset) {
- RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(get_valid_ec(vm));
+ RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(get_valid_ec(GET_VM()));
}
}
-/* Frees the memory managed by the postponed job infrastructure at shutdown */
-void
-rb_vm_postponed_job_free(void)
-{
- rb_vm_t *vm = GET_VM();
- ruby_mimfree(vm->postponed_job_queue);
- vm->postponed_job_queue = NULL;
-}
-
// Used for VM memsize reporting. Returns the total size of the postponed job
// queue infrastructure.
size_t
@@ -1926,7 +1907,7 @@ rb_postponed_job_preregister(unsigned int flags, rb_postponed_job_func_t func, v
* of concurrent calls to both _preregister and _register functions on the same
* func, however, the data may get mixed up between them. */
- rb_postponed_job_queues_t *pjq = GET_VM()->postponed_job_queue;
+ rb_postponed_job_queues_t *pjq = &postponed_job_queue;
for (unsigned int i = 0; i < PJOB_TABLE_SIZE; i++) {
/* Try and set this slot to equal `func` */
rb_postponed_job_func_t existing_func = (rb_postponed_job_func_t)(uintptr_t)RUBY_ATOMIC_PTR_CAS(pjq->table[i].func, NULL, (void *)(uintptr_t)func);
@@ -1951,11 +1932,10 @@ rb_postponed_job_preregister(unsigned int flags, rb_postponed_job_func_t func, v
void
rb_postponed_job_trigger(rb_postponed_job_handle_t h)
{
- rb_vm_t *vm = GET_VM();
- rb_postponed_job_queues_t *pjq = vm->postponed_job_queue;
+ rb_postponed_job_queues_t *pjq = &postponed_job_queue;
RUBY_ATOMIC_OR(pjq->triggered_bitset, (((rb_atomic_t)1UL) << h));
- RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(get_valid_ec(vm));
+ RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(get_valid_ec(GET_VM()));
}
@@ -1988,7 +1968,7 @@ rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func,
void
rb_postponed_job_flush(rb_vm_t *vm)
{
- rb_postponed_job_queues_t *pjq = GET_VM()->postponed_job_queue;
+ rb_postponed_job_queues_t *pjq = &postponed_job_queue;
rb_execution_context_t *ec = GET_EC();
const rb_atomic_t block_mask = POSTPONED_JOB_INTERRUPT_MASK | TRAP_INTERRUPT_MASK;
volatile rb_atomic_t saved_mask = ec->interrupt_mask & block_mask;