diff options
| author | John Hawthorn <john@hawthorn.email> | 2026-02-23 18:40:29 -0800 |
|---|---|---|
| committer | Takashi Kokubun <takashikkbn@gmail.com> | 2026-03-05 11:08:14 -0800 |
| commit | 33e5d3894fcddeb16518fdd0512fda097e7039fe (patch) | |
| tree | 81da27185931bb839aa8f7392574adf568424713 | |
| parent | a9b84adbd2f30eaefdf2a60341468c00e4bc090c (diff) | |
Map M:N thread stack chunks initially as PROT_NONE
Previously we initially mapped the full 512MB chunk as
PROT_READ|PROD_WRITE and then set a guard page to PROT_NONE the first
time a new thread stack is needed. Usually that's okay as we don't touch
that memory until it is needed and so it doesn't count towards RSS.
However, on Linux even with vm.overcommit_memory=0 (the default) if on a
system (like a tiny cloud VM) with <512MB of RAM+swap that would error
with.
Thread#initialize': can't create Thread: Cannot allocate memory (ThreadError)
This changes the chunk to be mapped initially with PROT_NONE, then
instead of mapping the guard pages we map in the machine and VM stacks
using mprotect. This ensures we don't commit stack memory until it is
first used, and as a side benefit any stray pointers into unused stack
should segfault.
When a stack is freed/reused there is no change from the previous
behaviour, we just use madvise and leave the same regions in place.
[Bug #21944]
| -rw-r--r-- | thread_pthread_mn.c | 39 |
1 files changed, 22 insertions, 17 deletions
diff --git a/thread_pthread_mn.c b/thread_pthread_mn.c index 5c21f212e4..569def6c0c 100644 --- a/thread_pthread_mn.c +++ b/thread_pthread_mn.c @@ -194,7 +194,7 @@ nt_alloc_thread_stack_chunk(void) mmap_flags |= MAP_STACK; #endif - const char *m = (void *)mmap(NULL, MSTACK_CHUNK_SIZE, PROT_READ | PROT_WRITE, mmap_flags, -1, 0); + const char *m = (void *)mmap(NULL, MSTACK_CHUNK_SIZE, PROT_NONE, mmap_flags, -1, 0); if (m == MAP_FAILED) { return NULL; } @@ -213,6 +213,12 @@ nt_alloc_thread_stack_chunk(void) VM_ASSERT(stack_count <= UINT16_MAX); + // Enable read/write for the header pages + if (mprotect((void *)m, (size_t)header_page_cnt * MSTACK_PAGE_SIZE, PROT_READ | PROT_WRITE) != 0) { + munmap((void *)m, MSTACK_CHUNK_SIZE); + return NULL; + } + struct nt_stack_chunk_header *ch = (struct nt_stack_chunk_header *)m; ch->start_page = header_page_cnt; @@ -241,7 +247,7 @@ nt_stack_chunk_get_msf(const rb_vm_t *vm, const char *mstack) return (struct nt_machine_stack_footer *)&mstack[msz - sizeof(struct nt_machine_stack_footer)]; } -static void * +static void nt_stack_chunk_get_stack(const rb_vm_t *vm, struct nt_stack_chunk_header *ch, size_t idx, void **vm_stack, void **machine_stack) { // TODO: only support stack going down @@ -266,8 +272,6 @@ nt_stack_chunk_get_stack(const rb_vm_t *vm, struct nt_stack_chunk_header *ch, si *vm_stack = (void *)vstack; *machine_stack = (void *)mstack; - - return (void *)guard_page; } RBIMPL_ATTR_MAYBE_UNUSED() @@ -291,17 +295,6 @@ nt_stack_chunk_dump(void) } static int -nt_guard_page(const char *p, size_t len) -{ - if (mprotect((void *)p, len, PROT_NONE) != -1) { - return 0; - } - else { - return errno; - } -} - -static int nt_alloc_stack(rb_vm_t *vm, void **vm_stack, void **machine_stack) { int err = 0; @@ -319,8 +312,20 @@ nt_alloc_stack(rb_vm_t *vm, void **vm_stack, void **machine_stack) RUBY_DEBUG_LOG("uninitialized_stack_count:%d", ch->uninitialized_stack_count); size_t idx = ch->stack_count - ch->uninitialized_stack_count--; - void *guard_page = nt_stack_chunk_get_stack(vm, ch, idx, vm_stack, machine_stack); - err = nt_guard_page(guard_page, MSTACK_PAGE_SIZE); + + // The chunk was mapped PROT_NONE; enable the VM stack and + // machine stack pages, leaving the guard page as PROT_NONE. + char *stack_start = nt_stack_chunk_get_stack_start(ch, idx); + size_t vm_stack_size = vm->default_params.thread_vm_stack_size; + size_t mstack_size = nt_thread_stack_size() - vm_stack_size - MSTACK_PAGE_SIZE; + + if (mprotect(stack_start, vm_stack_size, PROT_READ | PROT_WRITE) != 0 || + mprotect(stack_start + vm_stack_size + MSTACK_PAGE_SIZE, mstack_size, PROT_READ | PROT_WRITE) != 0) { + err = errno; + } + else { + nt_stack_chunk_get_stack(vm, ch, idx, vm_stack, machine_stack); + } } else { nt_free_stack_chunks = ch->prev_free_chunk; |
