summaryrefslogtreecommitdiff
path: root/cont.c
diff options
context:
space:
mode:
Diffstat (limited to 'cont.c')
-rw-r--r--cont.c796
1 files changed, 474 insertions, 322 deletions
diff --git a/cont.c b/cont.c
index a1cb1c8ba0..8af093a316 100644
--- a/cont.c
+++ b/cont.c
@@ -26,15 +26,16 @@ extern int madvise(caddr_t, size_t, int);
#include COROUTINE_H
#include "eval_intern.h"
-#include "gc.h"
#include "internal.h"
#include "internal/cont.h"
+#include "internal/thread.h"
#include "internal/error.h"
+#include "internal/eval.h"
+#include "internal/gc.h"
#include "internal/proc.h"
#include "internal/sanitizers.h"
#include "internal/warnings.h"
#include "ruby/fiber/scheduler.h"
-#include "mjit.h"
#include "yjit.h"
#include "vm_core.h"
#include "vm_sync.h"
@@ -47,7 +48,8 @@ static const int DEBUG = 0;
#define RB_PAGE_MASK (~(RB_PAGE_SIZE - 1))
static long pagesize;
-static const rb_data_type_t cont_data_type, fiber_data_type;
+static const rb_data_type_t rb_cont_data_type;
+static const rb_data_type_t rb_fiber_data_type;
static VALUE rb_cContinuation;
static VALUE rb_cFiber;
static VALUE rb_eFiberError;
@@ -70,8 +72,6 @@ static VALUE rb_cFiberPool;
#define FIBER_POOL_ALLOCATION_FREE
#endif
-#define jit_cont_enabled (mjit_enabled || rb_yjit_enabled_p())
-
enum context_type {
CONTINUATION_CONTEXT = 0,
FIBER_CONTEXT = 1
@@ -178,7 +178,7 @@ struct fiber_pool {
// A singly-linked list of allocations which contain 1 or more stacks each.
struct fiber_pool_allocation * allocations;
- // Provides O(1) stack "allocation":
+ // Free list that provides O(1) stack "allocation".
struct fiber_pool_vacancy * vacancies;
// The size of the stack allocations (excluding any guard page).
@@ -190,13 +190,15 @@ struct fiber_pool {
// The initial number of stacks to allocate.
size_t initial_count;
- // Whether to madvise(free) the stack or not:
+ // Whether to madvise(free) the stack or not.
+ // If this value is set to 1, the stack will be madvise(free)ed
+ // (or equivalent), where possible, when it is returned to the pool.
int free_stacks;
// The number of stacks that have been used in this pool.
size_t used;
- // The amount to allocate for the vm_stack:
+ // The amount to allocate for the vm_stack.
size_t vm_stack_size;
};
@@ -225,22 +227,21 @@ typedef struct rb_context_struct {
} machine;
rb_execution_context_t saved_ec;
rb_jmpbuf_t jmpbuf;
- rb_ensure_entry_t *ensure_array;
struct rb_jit_cont *jit_cont; // Continuation contexts for JITs
} rb_context_t;
-
/*
* Fiber status:
- * [Fiber.new] ------> FIBER_CREATED
- * | [Fiber#resume]
- * v
- * +--> FIBER_RESUMED ----+
- * [Fiber#resume] | | [Fiber.yield] |
- * | v |
- * +-- FIBER_SUSPENDED | [Terminate]
- * |
- * FIBER_TERMINATED <-+
+ * [Fiber.new] ------> FIBER_CREATED ----> [Fiber#kill] --> |
+ * | [Fiber#resume] |
+ * v |
+ * +--> FIBER_RESUMED ----> [return] ------> |
+ * [Fiber#resume] | | [Fiber.yield/transfer] |
+ * [Fiber#transfer] | v |
+ * +--- FIBER_SUSPENDED --> [Fiber#kill] --> |
+ * |
+ * |
+ * FIBER_TERMINATED <-------------------+
*/
enum fiber_status {
FIBER_CREATED,
@@ -266,12 +267,25 @@ struct rb_fiber_struct {
unsigned int yielding : 1;
unsigned int blocking : 1;
+ unsigned int killed : 1;
+
struct coroutine_context context;
struct fiber_pool_stack stack;
};
static struct fiber_pool shared_fiber_pool = {NULL, NULL, 0, 0, 0, 0};
+void
+rb_free_shared_fiber_pool(void)
+{
+ struct fiber_pool_allocation *allocations = shared_fiber_pool.allocations;
+ while (allocations) {
+ struct fiber_pool_allocation *next = allocations->next;
+ xfree(allocations);
+ allocations = next;
+ }
+}
+
static ID fiber_initialize_keywords[3] = {0};
/*
@@ -467,18 +481,20 @@ fiber_pool_allocate_memory(size_t * count, size_t stride)
}
#else
errno = 0;
- void * base = mmap(NULL, (*count)*stride, PROT_READ | PROT_WRITE, FIBER_STACK_FLAGS, -1, 0);
+ size_t mmap_size = (*count)*stride;
+ void * base = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, FIBER_STACK_FLAGS, -1, 0);
if (base == MAP_FAILED) {
// If the allocation fails, count = count / 2, and try again.
*count = (*count) >> 1;
}
else {
+ ruby_annotate_mmap(base, mmap_size, "Ruby:fiber_pool_allocate_memory");
#if defined(MADV_FREE_REUSE)
// On Mac MADV_FREE_REUSE is necessary for the task_info api
// to keep the accounting accurate as possible when a page is marked as reusable
// it can possibly not occurring at first call thus re-iterating if necessary.
- while (madvise(base, (*count)*stride, MADV_FREE_REUSE) == -1 && errno == EAGAIN);
+ while (madvise(base, mmap_size, MADV_FREE_REUSE) == -1 && errno == EAGAIN);
#endif
return base;
}
@@ -495,80 +511,87 @@ fiber_pool_allocate_memory(size_t * count, size_t stride)
static struct fiber_pool_allocation *
fiber_pool_expand(struct fiber_pool * fiber_pool, size_t count)
{
- STACK_GROW_DIR_DETECTION;
+ struct fiber_pool_allocation * allocation;
+ RB_VM_LOCK_ENTER();
+ {
+ STACK_GROW_DIR_DETECTION;
- size_t size = fiber_pool->size;
- size_t stride = size + RB_PAGE_SIZE;
+ size_t size = fiber_pool->size;
+ size_t stride = size + RB_PAGE_SIZE;
- // Allocate the memory required for the stacks:
- void * base = fiber_pool_allocate_memory(&count, stride);
+ // Allocate the memory required for the stacks:
+ void * base = fiber_pool_allocate_memory(&count, stride);
- if (base == NULL) {
- rb_raise(rb_eFiberError, "can't alloc machine stack to fiber (%"PRIuSIZE" x %"PRIuSIZE" bytes): %s", count, size, ERRNOMSG);
- }
+ if (base == NULL) {
+ rb_raise(rb_eFiberError, "can't alloc machine stack to fiber (%"PRIuSIZE" x %"PRIuSIZE" bytes): %s", count, size, ERRNOMSG);
+ }
- struct fiber_pool_vacancy * vacancies = fiber_pool->vacancies;
- struct fiber_pool_allocation * allocation = RB_ALLOC(struct fiber_pool_allocation);
+ struct fiber_pool_vacancy * vacancies = fiber_pool->vacancies;
+ allocation = RB_ALLOC(struct fiber_pool_allocation);
- // Initialize fiber pool allocation:
- allocation->base = base;
- allocation->size = size;
- allocation->stride = stride;
- allocation->count = count;
+ // Initialize fiber pool allocation:
+ allocation->base = base;
+ allocation->size = size;
+ allocation->stride = stride;
+ allocation->count = count;
#ifdef FIBER_POOL_ALLOCATION_FREE
- allocation->used = 0;
+ allocation->used = 0;
#endif
- allocation->pool = fiber_pool;
+ allocation->pool = fiber_pool;
- if (DEBUG) {
- fprintf(stderr, "fiber_pool_expand(%"PRIuSIZE"): %p, %"PRIuSIZE"/%"PRIuSIZE" x [%"PRIuSIZE":%"PRIuSIZE"]\n",
- count, (void*)fiber_pool, fiber_pool->used, fiber_pool->count, size, fiber_pool->vm_stack_size);
- }
-
- // Iterate over all stacks, initializing the vacancy list:
- for (size_t i = 0; i < count; i += 1) {
- void * base = (char*)allocation->base + (stride * i);
- void * page = (char*)base + STACK_DIR_UPPER(size, 0);
+ if (DEBUG) {
+ fprintf(stderr, "fiber_pool_expand(%"PRIuSIZE"): %p, %"PRIuSIZE"/%"PRIuSIZE" x [%"PRIuSIZE":%"PRIuSIZE"]\n",
+ count, (void*)fiber_pool, fiber_pool->used, fiber_pool->count, size, fiber_pool->vm_stack_size);
+ }
+ // Iterate over all stacks, initializing the vacancy list:
+ for (size_t i = 0; i < count; i += 1) {
+ void * base = (char*)allocation->base + (stride * i);
+ void * page = (char*)base + STACK_DIR_UPPER(size, 0);
#if defined(_WIN32)
- DWORD old_protect;
+ DWORD old_protect;
- if (!VirtualProtect(page, RB_PAGE_SIZE, PAGE_READWRITE | PAGE_GUARD, &old_protect)) {
- VirtualFree(allocation->base, 0, MEM_RELEASE);
- rb_raise(rb_eFiberError, "can't set a guard page: %s", ERRNOMSG);
- }
+ if (!VirtualProtect(page, RB_PAGE_SIZE, PAGE_READWRITE | PAGE_GUARD, &old_protect)) {
+ VirtualFree(allocation->base, 0, MEM_RELEASE);
+ rb_raise(rb_eFiberError, "can't set a guard page: %s", ERRNOMSG);
+ }
+#elif defined(__wasi__)
+ // wasi-libc's mprotect emulation doesn't support PROT_NONE.
+ (void)page;
#else
- if (mprotect(page, RB_PAGE_SIZE, PROT_NONE) < 0) {
- munmap(allocation->base, count*stride);
- rb_raise(rb_eFiberError, "can't set a guard page: %s", ERRNOMSG);
- }
+ if (mprotect(page, RB_PAGE_SIZE, PROT_NONE) < 0) {
+ munmap(allocation->base, count*stride);
+ rb_raise(rb_eFiberError, "can't set a guard page: %s", ERRNOMSG);
+ }
#endif
- vacancies = fiber_pool_vacancy_initialize(
- fiber_pool, vacancies,
- (char*)base + STACK_DIR_UPPER(0, RB_PAGE_SIZE),
- size
- );
+ vacancies = fiber_pool_vacancy_initialize(
+ fiber_pool, vacancies,
+ (char*)base + STACK_DIR_UPPER(0, RB_PAGE_SIZE),
+ size
+ );
#ifdef FIBER_POOL_ALLOCATION_FREE
- vacancies->stack.allocation = allocation;
+ vacancies->stack.allocation = allocation;
#endif
- }
+ }
- // Insert the allocation into the head of the pool:
- allocation->next = fiber_pool->allocations;
+ // Insert the allocation into the head of the pool:
+ allocation->next = fiber_pool->allocations;
#ifdef FIBER_POOL_ALLOCATION_FREE
- if (allocation->next) {
- allocation->next->previous = allocation;
- }
+ if (allocation->next) {
+ allocation->next->previous = allocation;
+ }
- allocation->previous = NULL;
+ allocation->previous = NULL;
#endif
- fiber_pool->allocations = allocation;
- fiber_pool->vacancies = vacancies;
- fiber_pool->count += count;
+ fiber_pool->allocations = allocation;
+ fiber_pool->vacancies = vacancies;
+ fiber_pool->count += count;
+ }
+ RB_VM_LOCK_LEAVE();
return allocation;
}
@@ -642,41 +665,46 @@ fiber_pool_allocation_free(struct fiber_pool_allocation * allocation)
static struct fiber_pool_stack
fiber_pool_stack_acquire(struct fiber_pool * fiber_pool)
{
- struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pop(fiber_pool);
+ struct fiber_pool_vacancy * vacancy ;
+ RB_VM_LOCK_ENTER();
+ {
+ vacancy = fiber_pool_vacancy_pop(fiber_pool);
- if (DEBUG) fprintf(stderr, "fiber_pool_stack_acquire: %p used=%"PRIuSIZE"\n", (void*)fiber_pool->vacancies, fiber_pool->used);
+ if (DEBUG) fprintf(stderr, "fiber_pool_stack_acquire: %p used=%"PRIuSIZE"\n", (void*)fiber_pool->vacancies, fiber_pool->used);
- if (!vacancy) {
- const size_t maximum = FIBER_POOL_ALLOCATION_MAXIMUM_SIZE;
- const size_t minimum = fiber_pool->initial_count;
+ if (!vacancy) {
+ const size_t maximum = FIBER_POOL_ALLOCATION_MAXIMUM_SIZE;
+ const size_t minimum = fiber_pool->initial_count;
- size_t count = fiber_pool->count;
- if (count > maximum) count = maximum;
- if (count < minimum) count = minimum;
+ size_t count = fiber_pool->count;
+ if (count > maximum) count = maximum;
+ if (count < minimum) count = minimum;
- fiber_pool_expand(fiber_pool, count);
+ fiber_pool_expand(fiber_pool, count);
- // The free list should now contain some stacks:
- VM_ASSERT(fiber_pool->vacancies);
+ // The free list should now contain some stacks:
+ VM_ASSERT(fiber_pool->vacancies);
- vacancy = fiber_pool_vacancy_pop(fiber_pool);
- }
+ vacancy = fiber_pool_vacancy_pop(fiber_pool);
+ }
- VM_ASSERT(vacancy);
- VM_ASSERT(vacancy->stack.base);
+ VM_ASSERT(vacancy);
+ VM_ASSERT(vacancy->stack.base);
#if defined(COROUTINE_SANITIZE_ADDRESS)
- __asan_unpoison_memory_region(fiber_pool_stack_poison_base(&vacancy->stack), fiber_pool_stack_poison_size(&vacancy->stack));
+ __asan_unpoison_memory_region(fiber_pool_stack_poison_base(&vacancy->stack), fiber_pool_stack_poison_size(&vacancy->stack));
#endif
- // Take the top item from the free list:
- fiber_pool->used += 1;
+ // Take the top item from the free list:
+ fiber_pool->used += 1;
#ifdef FIBER_POOL_ALLOCATION_FREE
- vacancy->stack.allocation->used += 1;
+ vacancy->stack.allocation->used += 1;
#endif
- fiber_pool_stack_reset(&vacancy->stack);
+ fiber_pool_stack_reset(&vacancy->stack);
+ }
+ RB_VM_LOCK_LEAVE();
return vacancy->stack;
}
@@ -692,7 +720,9 @@ fiber_pool_stack_free(struct fiber_pool_stack * stack)
// If this is not true, the vacancy information will almost certainly be destroyed:
VM_ASSERT(size <= (stack->size - RB_PAGE_SIZE));
- if (DEBUG) fprintf(stderr, "fiber_pool_stack_free: %p+%"PRIuSIZE" [base=%p, size=%"PRIuSIZE"]\n", base, size, stack->base, stack->size);
+ int advice = stack->pool->free_stacks >> 1;
+
+ if (DEBUG) fprintf(stderr, "fiber_pool_stack_free: %p+%"PRIuSIZE" [base=%p, size=%"PRIuSIZE"] advice=%d\n", base, size, stack->base, stack->size, advice);
// The pages being used by the stack can be returned back to the system.
// That doesn't change the page mapping, but it does allow the system to
@@ -706,24 +736,29 @@ fiber_pool_stack_free(struct fiber_pool_stack * stack)
#ifdef __wasi__
// WebAssembly doesn't support madvise, so we just don't do anything.
#elif VM_CHECK_MODE > 0 && defined(MADV_DONTNEED)
+ if (!advice) advice = MADV_DONTNEED;
// This immediately discards the pages and the memory is reset to zero.
- madvise(base, size, MADV_DONTNEED);
+ madvise(base, size, advice);
#elif defined(MADV_FREE_REUSABLE)
+ if (!advice) advice = MADV_FREE_REUSABLE;
// Darwin / macOS / iOS.
// Acknowledge the kernel down to the task info api we make this
// page reusable for future use.
- // As for MADV_FREE_REUSE below we ensure in the rare occasions the task was not
+ // As for MADV_FREE_REUSABLE below we ensure in the rare occasions the task was not
// completed at the time of the call to re-iterate.
- while (madvise(base, size, MADV_FREE_REUSABLE) == -1 && errno == EAGAIN);
+ while (madvise(base, size, advice) == -1 && errno == EAGAIN);
#elif defined(MADV_FREE)
+ if (!advice) advice = MADV_FREE;
// Recent Linux.
- madvise(base, size, MADV_FREE);
+ madvise(base, size, advice);
#elif defined(MADV_DONTNEED)
+ if (!advice) advice = MADV_DONTNEED;
// Old Linux.
- madvise(base, size, MADV_DONTNEED);
+ madvise(base, size, advice);
#elif defined(POSIX_MADV_DONTNEED)
+ if (!advice) advice = POSIX_MADV_DONTNEED;
// Solaris?
- posix_madvise(base, size, POSIX_MADV_DONTNEED);
+ posix_madvise(base, size, advice);
#elif defined(_WIN32)
VirtualAlloc(base, size, MEM_RESET, PAGE_READWRITE);
// Not available in all versions of Windows.
@@ -780,6 +815,9 @@ static inline void
ec_switch(rb_thread_t *th, rb_fiber_t *fiber)
{
rb_execution_context_t *ec = &fiber->cont.saved_ec;
+#ifdef RUBY_ASAN_ENABLED
+ ec->machine.asan_fake_stack_handle = asan_get_thread_fake_stack_handle();
+#endif
rb_ractor_set_current_ec(th->ractor, th->ec = ec);
// ruby_current_execution_context_ptr = th->ec = ec;
@@ -802,6 +840,10 @@ fiber_restore_thread(rb_thread_t *th, rb_fiber_t *fiber)
VM_ASSERT(th->ec->fiber_ptr == fiber);
}
+#ifndef COROUTINE_DECL
+# define COROUTINE_DECL COROUTINE
+#endif
+NORETURN(static COROUTINE_DECL fiber_entry(struct coroutine_context * from, struct coroutine_context * to));
static COROUTINE
fiber_entry(struct coroutine_context * from, struct coroutine_context * to)
{
@@ -879,6 +921,17 @@ fiber_stack_release(rb_fiber_t * fiber)
rb_ec_clear_vm_stack(ec);
}
+static void
+fiber_stack_release_locked(rb_fiber_t *fiber)
+{
+ if (!ruby_vm_during_cleanup) {
+ // We can't try to acquire the VM lock here because MMTK calls free in its own native thread which has no ec.
+ // This assertion will fail on MMTK but we currently don't have CI for debug releases of MMTK, so we can assert for now.
+ ASSERT_vm_locking_with_barrier();
+ }
+ fiber_stack_release(fiber);
+}
+
static const char *
fiber_status_name(enum fiber_status s)
{
@@ -900,7 +953,9 @@ fiber_verify(const rb_fiber_t *fiber)
switch (fiber->status) {
case FIBER_RESUMED:
- VM_ASSERT(fiber->cont.saved_ec.vm_stack != NULL);
+ if (fiber->cont.saved_ec.thread_ptr->self == 0) {
+ VM_ASSERT(fiber->cont.saved_ec.vm_stack != NULL);
+ }
break;
case FIBER_SUSPENDED:
VM_ASSERT(fiber->cont.saved_ec.vm_stack != NULL);
@@ -930,7 +985,7 @@ cont_ptr(VALUE obj)
{
rb_context_t *cont;
- TypedData_Get_Struct(obj, rb_context_t, &cont_data_type, cont);
+ TypedData_Get_Struct(obj, rb_context_t, &rb_cont_data_type, cont);
return cont;
}
@@ -940,7 +995,7 @@ fiber_ptr(VALUE obj)
{
rb_fiber_t *fiber;
- TypedData_Get_Struct(obj, rb_fiber_t, &fiber_data_type, fiber);
+ TypedData_Get_Struct(obj, rb_fiber_t, &rb_fiber_data_type, fiber);
if (!fiber) rb_raise(rb_eFiberError, "uninitialized fiber");
return fiber;
@@ -1007,13 +1062,8 @@ cont_mark(void *ptr)
cont->machine.stack + cont->machine.stack_size);
}
else {
- /* fiber */
- const rb_fiber_t *fiber = (rb_fiber_t*)cont;
-
- if (!FIBER_TERMINATED_P(fiber)) {
- rb_gc_mark_locations(cont->machine.stack,
- cont->machine.stack + cont->machine.stack_size);
- }
+ /* fiber machine context is marked as part of rb_execution_context_mark, no need to
+ * do anything here. */
}
}
@@ -1039,21 +1089,18 @@ cont_free(void *ptr)
if (cont->type == CONTINUATION_CONTEXT) {
ruby_xfree(cont->saved_ec.vm_stack);
- ruby_xfree(cont->ensure_array);
RUBY_FREE_UNLESS_NULL(cont->machine.stack);
}
else {
rb_fiber_t *fiber = (rb_fiber_t*)cont;
coroutine_destroy(&fiber->context);
- fiber_stack_release(fiber);
+ fiber_stack_release_locked(fiber);
}
RUBY_FREE_UNLESS_NULL(cont->saved_vm_stack.ptr);
- if (jit_cont_enabled) {
- VM_ASSERT(cont->jit_cont != NULL);
- jit_cont_free(cont->jit_cont);
- }
+ VM_ASSERT(cont->jit_cont != NULL);
+ jit_cont_free(cont->jit_cont);
/* free rb_cont_t or rb_fiber_t */
ruby_xfree(ptr);
RUBY_FREE_LEAVE("cont");
@@ -1096,12 +1143,7 @@ rb_fiber_update_self(rb_fiber_t *fiber)
void
rb_fiber_mark_self(const rb_fiber_t *fiber)
{
- if (fiber->cont.self) {
- rb_gc_mark_movable(fiber->cont.self);
- }
- else {
- rb_execution_context_mark(&fiber->cont.saved_ec);
- }
+ rb_gc_mark_movable(fiber->cont.self);
}
static void
@@ -1167,7 +1209,7 @@ fiber_memsize(const void *ptr)
VALUE
rb_obj_is_fiber(VALUE obj)
{
- return RBOOL(rb_typeddata_is_kind_of(obj, &fiber_data_type));
+ return RBOOL(rb_typeddata_is_kind_of(obj, &rb_fiber_data_type));
}
static void
@@ -1198,9 +1240,23 @@ cont_save_machine_stack(rb_thread_t *th, rb_context_t *cont)
MEMCPY(cont->machine.stack, cont->machine.stack_src, VALUE, size);
}
-static const rb_data_type_t cont_data_type = {
+static void
+cont_handle_weak_references(void *ptr)
+{
+ rb_context_t *cont = ptr;
+
+ if (!cont) return;
+
+ if (!rb_gc_handle_weak_references_alive_p(cont->saved_ec.gen_fields_cache.obj) ||
+ !rb_gc_handle_weak_references_alive_p(cont->saved_ec.gen_fields_cache.fields_obj)) {
+ cont->saved_ec.gen_fields_cache.obj = Qundef;
+ cont->saved_ec.gen_fields_cache.fields_obj = Qundef;
+ }
+}
+
+static const rb_data_type_t rb_cont_data_type = {
"continuation",
- {cont_mark, cont_free, cont_memsize, cont_compact},
+ {cont_mark, cont_free, cont_memsize, cont_compact, cont_handle_weak_references},
0, 0, RUBY_TYPED_FREE_IMMEDIATELY
};
@@ -1255,6 +1311,8 @@ jit_cont_new(rb_execution_context_t *ec)
static void
jit_cont_free(struct rb_jit_cont *cont)
{
+ if (!cont) return;
+
rb_native_mutex_lock(&jit_cont_lock);
if (cont == first_jit_cont) {
first_jit_cont = cont->next;
@@ -1280,26 +1338,42 @@ rb_jit_cont_each_iseq(rb_iseq_callback callback, void *data)
if (cont->ec->vm_stack == NULL)
continue;
- const rb_control_frame_t *cfp;
- for (cfp = RUBY_VM_END_CONTROL_FRAME(cont->ec) - 1; ; cfp = RUBY_VM_NEXT_CONTROL_FRAME(cfp)) {
- const rb_iseq_t *iseq;
- if (cfp->pc && (iseq = cfp->iseq) != NULL && imemo_type((VALUE)iseq) == imemo_iseq) {
- callback(iseq, data);
+ const rb_control_frame_t *cfp = cont->ec->cfp;
+ while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(cont->ec, cfp)) {
+ if (cfp->pc && cfp->iseq && imemo_type((VALUE)cfp->iseq) == imemo_iseq) {
+ callback(cfp->iseq, data);
}
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
+ }
+ }
+}
- if (cfp == cont->ec->cfp)
- break; // reached the most recent cfp
+#if USE_YJIT
+// Update the jit_return of all CFPs to leave_exit unless it's leave_exception or not set.
+// This prevents jit_exec_exception from jumping to the caller after invalidation.
+void
+rb_yjit_cancel_jit_return(void *leave_exit, void *leave_exception)
+{
+ struct rb_jit_cont *cont;
+ for (cont = first_jit_cont; cont != NULL; cont = cont->next) {
+ if (cont->ec->vm_stack == NULL)
+ continue;
+
+ const rb_control_frame_t *cfp = cont->ec->cfp;
+ while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(cont->ec, cfp)) {
+ if (cfp->jit_return && cfp->jit_return != leave_exception) {
+ ((rb_control_frame_t *)cfp)->jit_return = leave_exit;
+ }
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
}
}
}
+#endif
// Finish working with jit_cont.
void
rb_jit_cont_finish(void)
{
- if (!jit_cont_enabled)
- return;
-
struct rb_jit_cont *cont, *next;
for (cont = first_jit_cont; cont != NULL; cont = next) {
next = cont->next;
@@ -1312,9 +1386,8 @@ static void
cont_init_jit_cont(rb_context_t *cont)
{
VM_ASSERT(cont->jit_cont == NULL);
- if (jit_cont_enabled) {
- cont->jit_cont = jit_cont_new(&(cont->saved_ec));
- }
+ // We always allocate this since YJIT may be enabled later
+ cont->jit_cont = jit_cont_new(&(cont->saved_ec));
}
struct rb_execution_context_struct *
@@ -1343,7 +1416,8 @@ cont_new(VALUE klass)
rb_thread_t *th = GET_THREAD();
THREAD_MUST_BE_RUNNING(th);
- contval = TypedData_Make_Struct(klass, rb_context_t, &cont_data_type, cont);
+ contval = TypedData_Make_Struct(klass, rb_context_t, &rb_cont_data_type, cont);
+ rb_gc_declare_weak_references(contval);
cont->self = contval;
cont_init(cont, th);
return cont;
@@ -1361,15 +1435,11 @@ rb_fiberptr_blocking(struct rb_fiber_struct *fiber)
return fiber->blocking;
}
-// Start working with jit_cont.
+// Initialize the jit_cont_lock
void
rb_jit_cont_init(void)
{
- if (!jit_cont_enabled)
- return;
-
rb_native_mutex_initialize(&jit_cont_lock);
- cont_init_jit_cont(&GET_EC()->fiber_ptr->cont);
}
#if 0
@@ -1433,22 +1503,6 @@ cont_capture(volatile int *volatile stat)
VM_ASSERT(cont->saved_ec.cfp != NULL);
cont_save_machine_stack(th, cont);
- /* backup ensure_list to array for search in another context */
- {
- rb_ensure_list_t *p;
- int size = 0;
- rb_ensure_entry_t *entry;
- for (p=th->ec->ensure_list; p; p=p->next)
- size++;
- entry = cont->ensure_array = ALLOC_N(rb_ensure_entry_t,size+1);
- for (p=th->ec->ensure_list; p; p=p->next) {
- if (!p->entry.marker)
- p->entry.marker = rb_ary_hidden_new(0); /* dummy object */
- *entry++ = p->entry;
- }
- entry->marker = 0;
- }
-
if (ruby_setjmp(cont->jmpbuf)) {
VALUE value;
@@ -1491,6 +1545,51 @@ cont_restore_thread(rb_context_t *cont)
rb_raise(rb_eRuntimeError, "can't call across trace_func");
}
+#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
+ if (th->ec->tag != sec->tag) {
+ /* find the lowest common ancestor tag of the current EC and the saved EC */
+
+ struct rb_vm_tag *lowest_common_ancestor = NULL;
+ size_t num_tags = 0;
+ size_t num_saved_tags = 0;
+ for (struct rb_vm_tag *tag = th->ec->tag; tag != NULL; tag = tag->prev) {
+ ++num_tags;
+ }
+ for (struct rb_vm_tag *tag = sec->tag; tag != NULL; tag = tag->prev) {
+ ++num_saved_tags;
+ }
+
+ size_t min_tags = num_tags <= num_saved_tags ? num_tags : num_saved_tags;
+
+ struct rb_vm_tag *tag = th->ec->tag;
+ while (num_tags > min_tags) {
+ tag = tag->prev;
+ --num_tags;
+ }
+
+ struct rb_vm_tag *saved_tag = sec->tag;
+ while (num_saved_tags > min_tags) {
+ saved_tag = saved_tag->prev;
+ --num_saved_tags;
+ }
+
+ while (min_tags > 0) {
+ if (tag == saved_tag) {
+ lowest_common_ancestor = tag;
+ break;
+ }
+ tag = tag->prev;
+ saved_tag = saved_tag->prev;
+ --min_tags;
+ }
+
+ /* free all the jump buffers between the current EC's tag and the lowest common ancestor tag */
+ for (struct rb_vm_tag *tag = th->ec->tag; tag != lowest_common_ancestor; tag = tag->prev) {
+ rb_vm_tag_jmpbuf_deinit(&tag->buf);
+ }
+ }
+#endif
+
/* copy vm stack */
#ifdef CAPTURE_JUST_VALID_VM_STACK
MEMCPY(th->ec->vm_stack,
@@ -1509,7 +1608,6 @@ cont_restore_thread(rb_context_t *cont)
th->ec->tag = sec->tag;
th->ec->root_lep = sec->root_lep;
th->ec->root_svar = sec->root_svar;
- th->ec->ensure_list = sec->ensure_list;
th->ec->errinfo = sec->errinfo;
VM_ASSERT(th->ec->vm_stack != NULL);
@@ -1541,11 +1639,10 @@ fiber_setcontext(rb_fiber_t *new_fiber, rb_fiber_t *old_fiber)
}
}
- /* exchange machine_stack_start between old_fiber and new_fiber */
+ /* these values are used in rb_gc_mark_machine_context to mark the fiber's stack. */
old_fiber->cont.saved_ec.machine.stack_start = th->ec->machine.stack_start;
+ old_fiber->cont.saved_ec.machine.stack_end = FIBER_TERMINATED_P(old_fiber) ? NULL : th->ec->machine.stack_end;
- /* old_fiber->machine.stack_end should be NULL */
- old_fiber->cont.saved_ec.machine.stack_end = NULL;
// if (DEBUG) fprintf(stderr, "fiber_setcontext: %p[%p] -> %p[%p]\n", (void*)old_fiber, old_fiber->stack.base, (void*)new_fiber, new_fiber->stack.base);
@@ -1579,9 +1676,9 @@ cont_restore_1(rb_context_t *cont)
cont_restore_thread(cont);
/* restore machine stack */
-#if defined(_M_AMD64) && !defined(__MINGW64__)
+#if (defined(_M_AMD64) && !defined(__MINGW64__)) || defined(_M_ARM64)
{
- /* workaround for x64 SEH */
+ /* workaround for x64 and arm64 SEH on Windows */
jmp_buf buf;
setjmp(buf);
_JUMP_BUFFER *bp = (void*)&cont->jmpbuf;
@@ -1748,6 +1845,13 @@ rb_callcc(VALUE self)
return rb_yield(val);
}
}
+#ifdef RUBY_ASAN_ENABLED
+/* callcc can't possibly work with ASAN; see bug #20273. Also this function
+ * definition below avoids a "defined and not used" warning. */
+MAYBE_UNUSED(static void notusing_callcc(void)) { rb_callcc(Qnil); }
+# define rb_callcc rb_f_notimplement
+#endif
+
static VALUE
make_passing_arg(int argc, const VALUE *argv)
@@ -1766,80 +1870,6 @@ make_passing_arg(int argc, const VALUE *argv)
typedef VALUE e_proc(VALUE);
-/* CAUTION!! : Currently, error in rollback_func is not supported */
-/* same as rb_protect if set rollback_func to NULL */
-void
-ruby_register_rollback_func_for_ensure(e_proc *ensure_func, e_proc *rollback_func)
-{
- st_table **table_p = &GET_VM()->ensure_rollback_table;
- if (UNLIKELY(*table_p == NULL)) {
- *table_p = st_init_numtable();
- }
- st_insert(*table_p, (st_data_t)ensure_func, (st_data_t)rollback_func);
-}
-
-static inline e_proc *
-lookup_rollback_func(e_proc *ensure_func)
-{
- st_table *table = GET_VM()->ensure_rollback_table;
- st_data_t val;
- if (table && st_lookup(table, (st_data_t)ensure_func, &val))
- return (e_proc *) val;
- return (e_proc *) Qundef;
-}
-
-
-static inline void
-rollback_ensure_stack(VALUE self,rb_ensure_list_t *current,rb_ensure_entry_t *target)
-{
- rb_ensure_list_t *p;
- rb_ensure_entry_t *entry;
- size_t i, j;
- size_t cur_size;
- size_t target_size;
- size_t base_point;
- e_proc *func;
-
- cur_size = 0;
- for (p=current; p; p=p->next)
- cur_size++;
- target_size = 0;
- for (entry=target; entry->marker; entry++)
- target_size++;
-
- /* search common stack point */
- p = current;
- base_point = cur_size;
- while (base_point) {
- if (target_size >= base_point &&
- p->entry.marker == target[target_size - base_point].marker)
- break;
- base_point --;
- p = p->next;
- }
-
- /* rollback function check */
- for (i=0; i < target_size - base_point; i++) {
- if (!lookup_rollback_func(target[i].e_proc)) {
- rb_raise(rb_eRuntimeError, "continuation called from out of critical rb_ensure scope");
- }
- }
- /* pop ensure stack */
- while (cur_size > base_point) {
- /* escape from ensure block */
- (*current->entry.e_proc)(current->entry.data2);
- current = current->next;
- cur_size--;
- }
- /* push ensure stack */
- for (j = 0; j < i; j++) {
- func = lookup_rollback_func(target[i - j - 1].e_proc);
- if (!UNDEF_P((VALUE)func)) {
- (*func)(target[i - j - 1].data2);
- }
- }
-}
-
NORETURN(static VALUE rb_cont_call(int argc, VALUE *argv, VALUE contval));
/*
@@ -1871,7 +1901,6 @@ rb_cont_call(int argc, VALUE *argv, VALUE contval)
rb_raise(rb_eRuntimeError, "continuation called across fiber");
}
}
- rollback_ensure_stack(contval, th->ec->ensure_list, cont->ensure_array);
cont->argc = argc;
cont->value = make_passing_arg(argc, argv);
@@ -1948,7 +1977,7 @@ rb_cont_call(int argc, VALUE *argv, VALUE contval)
* == Non-blocking Fibers
*
* The concept of <em>non-blocking fiber</em> was introduced in Ruby 3.0.
- * A non-blocking fiber, when reaching a operation that would normally block
+ * A non-blocking fiber, when reaching an operation that would normally block
* the fiber (like <code>sleep</code>, or wait for another process or I/O)
* will yield control to other fibers and allow the <em>scheduler</em> to
* handle blocking and waking up (resuming) this fiber when it can proceed.
@@ -1967,16 +1996,38 @@ rb_cont_call(int argc, VALUE *argv, VALUE contval)
*
*/
-static const rb_data_type_t fiber_data_type = {
+static void
+fiber_handle_weak_references(void *ptr)
+{
+ rb_fiber_t *fiber = ptr;
+
+ if (!fiber) return;
+
+ if (!rb_gc_handle_weak_references_alive_p(fiber->cont.saved_ec.gen_fields_cache.obj) ||
+ !rb_gc_handle_weak_references_alive_p(fiber->cont.saved_ec.gen_fields_cache.fields_obj)) {
+ fiber->cont.saved_ec.gen_fields_cache.obj = Qundef;
+ fiber->cont.saved_ec.gen_fields_cache.fields_obj = Qundef;
+ }
+}
+
+static const rb_data_type_t rb_fiber_data_type = {
"fiber",
- {fiber_mark, fiber_free, fiber_memsize, fiber_compact,},
+ {fiber_mark, fiber_free, fiber_memsize, fiber_compact, fiber_handle_weak_references},
0, 0, RUBY_TYPED_FREE_IMMEDIATELY
};
static VALUE
fiber_alloc(VALUE klass)
{
- return TypedData_Wrap_Struct(klass, &fiber_data_type, 0);
+ VALUE obj = TypedData_Wrap_Struct(klass, &rb_fiber_data_type, 0);
+ rb_gc_declare_weak_references(obj);
+ return obj;
+}
+
+static rb_serial_t
+next_ec_serial(rb_ractor_t *cr)
+{
+ return cr->next_ec_serial++;
}
static rb_fiber_t*
@@ -1994,9 +2045,11 @@ fiber_t_alloc(VALUE fiber_value, unsigned int blocking)
fiber->cont.self = fiber_value;
fiber->cont.type = FIBER_CONTEXT;
fiber->blocking = blocking;
+ fiber->killed = 0;
cont_init(&fiber->cont, th);
fiber->cont.saved_ec.fiber_ptr = fiber;
+ fiber->cont.saved_ec.serial = next_ec_serial(th->ractor);
rb_ec_clear_vm_stack(&fiber->cont.saved_ec);
fiber->prev = NULL;
@@ -2010,32 +2063,10 @@ fiber_t_alloc(VALUE fiber_value, unsigned int blocking)
return fiber;
}
-static rb_fiber_t *
-root_fiber_alloc(rb_thread_t *th)
-{
- VALUE fiber_value = fiber_alloc(rb_cFiber);
- rb_fiber_t *fiber = th->ec->fiber_ptr;
-
- VM_ASSERT(DATA_PTR(fiber_value) == NULL);
- VM_ASSERT(fiber->cont.type == FIBER_CONTEXT);
- VM_ASSERT(FIBER_RESUMED_P(fiber));
-
- th->root_fiber = fiber;
- DATA_PTR(fiber_value) = fiber;
- fiber->cont.self = fiber_value;
-
- coroutine_initialize_main(&fiber->context);
-
- return fiber;
-}
-
static inline rb_fiber_t*
fiber_current(void)
{
rb_execution_context_t *ec = GET_EC();
- if (ec->fiber_ptr->cont.self == 0) {
- root_fiber_alloc(rb_ec_thread_ptr(ec));
- }
return ec->fiber_ptr;
}
@@ -2059,10 +2090,10 @@ fiber_storage_set(struct rb_fiber_struct *fiber, VALUE storage)
}
static inline VALUE
-fiber_storage_get(rb_fiber_t *fiber)
+fiber_storage_get(rb_fiber_t *fiber, int allocate)
{
VALUE storage = fiber->cont.saved_ec.storage;
- if (storage == Qnil) {
+ if (storage == Qnil && allocate) {
storage = rb_hash_new();
fiber_storage_set(fiber, storage);
}
@@ -2089,13 +2120,21 @@ static VALUE
rb_fiber_storage_get(VALUE self)
{
storage_access_must_be_from_same_fiber(self);
- return rb_obj_dup(fiber_storage_get(fiber_ptr(self)));
+
+ VALUE storage = fiber_storage_get(fiber_ptr(self), FALSE);
+
+ if (storage == Qnil) {
+ return Qnil;
+ }
+ else {
+ return rb_obj_dup(storage);
+ }
}
static int
fiber_storage_validate_each(VALUE key, VALUE value, VALUE _argument)
{
- rb_check_id(&key);
+ Check_Type(key, T_SYMBOL);
return ST_CONTINUE;
}
@@ -2160,18 +2199,16 @@ rb_fiber_storage_set(VALUE self, VALUE value)
* Returns the value of the fiber storage variable identified by +key+.
*
* The +key+ must be a symbol, and the value is set by Fiber#[]= or
- * Fiber#store.
+ * Fiber#storage.
*
* See also Fiber::[]=.
*/
static VALUE
rb_fiber_storage_aref(VALUE class, VALUE key)
{
- ID id = rb_check_id(&key);
- if (!id) return Qnil;
-
- VALUE storage = fiber_storage_get(fiber_current());
+ key = rb_to_symbol(key);
+ VALUE storage = fiber_storage_get(fiber_current(), FALSE);
if (storage == Qnil) return Qnil;
return rb_hash_aref(storage, key);
@@ -2190,12 +2227,17 @@ rb_fiber_storage_aref(VALUE class, VALUE key)
static VALUE
rb_fiber_storage_aset(VALUE class, VALUE key, VALUE value)
{
- ID id = rb_check_id(&key);
- if (!id) return Qnil;
+ key = rb_to_symbol(key);
- VALUE storage = fiber_storage_get(fiber_current());
+ VALUE storage = fiber_storage_get(fiber_current(), value != Qnil);
+ if (storage == Qnil) return Qnil;
- return rb_hash_aset(storage, key, value);
+ if (value == Qnil) {
+ return rb_hash_delete(storage, key);
+ }
+ else {
+ return rb_hash_aset(storage, key, value);
+ }
}
static VALUE
@@ -2339,7 +2381,7 @@ rb_fiber_initialize(int argc, VALUE* argv, VALUE self)
VALUE
rb_fiber_new_storage(rb_block_call_func_t func, VALUE obj, VALUE storage)
{
- return fiber_initialize(fiber_alloc(rb_cFiber), rb_proc_new(func, obj), rb_fiber_pool_default(Qnil), 1, storage);
+ return fiber_initialize(fiber_alloc(rb_cFiber), rb_proc_new(func, obj), rb_fiber_pool_default(Qnil), 0, storage);
}
VALUE
@@ -2473,7 +2515,6 @@ rb_fiber_start(rb_fiber_t *fiber)
rb_proc_t *proc;
enum ruby_tag_type state;
- int need_interrupt = TRUE;
VM_ASSERT(th->ec == GET_EC());
VM_ASSERT(FIBER_RESUMED_P(fiber));
@@ -2499,6 +2540,7 @@ rb_fiber_start(rb_fiber_t *fiber)
}
EC_POP_TAG();
+ int need_interrupt = TRUE;
VALUE err = Qfalse;
if (state) {
err = th->ec->errinfo;
@@ -2507,13 +2549,16 @@ rb_fiber_start(rb_fiber_t *fiber)
if (state == TAG_RAISE) {
// noop...
}
+ else if (state == TAG_FATAL && err == RUBY_FATAL_FIBER_KILLED) {
+ need_interrupt = FALSE;
+ err = Qfalse;
+ }
else if (state == TAG_FATAL) {
rb_threadptr_pending_interrupt_enque(th, err);
}
else {
err = rb_vm_make_jump_tag_but_local_jump(state, err);
}
- need_interrupt = TRUE;
}
rb_fiber_terminate(fiber, need_interrupt, err);
@@ -2523,37 +2568,48 @@ rb_fiber_start(rb_fiber_t *fiber)
void
rb_threadptr_root_fiber_setup(rb_thread_t *th)
{
- rb_fiber_t *fiber = ruby_mimmalloc(sizeof(rb_fiber_t));
+ rb_fiber_t *fiber = ruby_mimcalloc(1, sizeof(rb_fiber_t));
if (!fiber) {
rb_bug("%s", strerror(errno)); /* ... is it possible to call rb_bug here? */
}
- MEMZERO(fiber, rb_fiber_t, 1);
+
fiber->cont.type = FIBER_CONTEXT;
fiber->cont.saved_ec.fiber_ptr = fiber;
+ fiber->cont.saved_ec.serial = next_ec_serial(th->ractor);
fiber->cont.saved_ec.thread_ptr = th;
fiber->blocking = 1;
+ fiber->killed = 0;
fiber_status_set(fiber, FIBER_RESUMED); /* skip CREATED */
+
+ coroutine_initialize_main(&fiber->context);
+
th->ec = &fiber->cont.saved_ec;
- // When rb_threadptr_root_fiber_setup is called for the first time, mjit_enabled and
- // rb_yjit_enabled_p() are still false. So this does nothing and rb_jit_cont_init() that is
- // called later will take care of it. However, you still have to call cont_init_jit_cont()
- // here for other Ractors, which are not initialized by rb_jit_cont_init().
+
cont_init_jit_cont(&fiber->cont);
}
void
+rb_root_fiber_obj_setup(rb_thread_t *th)
+{
+ rb_fiber_t *fiber = th->ec->fiber_ptr;
+ VALUE fiber_value = fiber_alloc(rb_cFiber);
+ DATA_PTR(fiber_value) = fiber;
+ fiber->cont.self = fiber_value;
+}
+
+void
rb_threadptr_root_fiber_release(rb_thread_t *th)
{
if (th->root_fiber) {
/* ignore. A root fiber object will free th->ec */
}
else {
- rb_execution_context_t *ec = GET_EC();
+ rb_execution_context_t *ec = rb_current_execution_context(false);
VM_ASSERT(th->ec->fiber_ptr->cont.type == FIBER_CONTEXT);
VM_ASSERT(th->ec->fiber_ptr->cont.self == 0);
- if (th->ec == ec) {
+ if (ec && th->ec == ec) {
rb_ractor_set_current_ec(th->ractor, NULL);
}
fiber_free(th->ec->fiber_ptr);
@@ -2611,15 +2667,7 @@ rb_fiber_current(void)
static inline void
fiber_store(rb_fiber_t *next_fiber, rb_thread_t *th)
{
- rb_fiber_t *fiber;
-
- if (th->ec->fiber_ptr != NULL) {
- fiber = th->ec->fiber_ptr;
- }
- else {
- /* create root fiber */
- fiber = root_fiber_alloc(th);
- }
+ rb_fiber_t *fiber = th->ec->fiber_ptr;
if (FIBER_CREATED_P(next_fiber)) {
fiber_prepare_stack(next_fiber);
@@ -2634,6 +2682,19 @@ fiber_store(rb_fiber_t *next_fiber, rb_thread_t *th)
fiber_setcontext(next_fiber, fiber);
}
+static void
+fiber_check_killed(rb_fiber_t *fiber)
+{
+ VM_ASSERT(fiber == fiber_current());
+
+ if (fiber->killed) {
+ rb_thread_t *thread = fiber->cont.saved_ec.thread_ptr;
+
+ thread->ec->errinfo = RUBY_FATAL_FIBER_KILLED;
+ EC_JUMP_TAG(thread->ec, RUBY_TAG_FATAL);
+ }
+}
+
static inline VALUE
fiber_switch(rb_fiber_t *fiber, int argc, const VALUE *argv, int kw_splat, rb_fiber_t *resuming_fiber, bool yielding)
{
@@ -2642,7 +2703,9 @@ fiber_switch(rb_fiber_t *fiber, int argc, const VALUE *argv, int kw_splat, rb_fi
rb_thread_t *th = GET_THREAD();
/* make sure the root_fiber object is available */
- if (th->root_fiber == NULL) root_fiber_alloc(th);
+ if (th->root_fiber == NULL) {
+ th->root_fiber = th->ec->fiber_ptr;
+ }
if (th->ec->fiber_ptr == fiber) {
/* ignore fiber context switch
@@ -2708,7 +2771,9 @@ fiber_switch(rb_fiber_t *fiber, int argc, const VALUE *argv, int kw_splat, rb_fi
// We cannot free the stack until the pthread is joined:
#ifndef COROUTINE_PTHREAD_CONTEXT
if (resuming_fiber && FIBER_TERMINATED_P(fiber)) {
- fiber_stack_release(fiber);
+ RB_VM_LOCKING() {
+ fiber_stack_release(fiber);
+ }
}
#endif
@@ -2722,7 +2787,14 @@ fiber_switch(rb_fiber_t *fiber, int argc, const VALUE *argv, int kw_splat, rb_fi
current_fiber = th->ec->fiber_ptr;
value = current_fiber->cont.value;
- if (current_fiber->cont.argc == -1) rb_exc_raise(value);
+
+ fiber_check_killed(current_fiber);
+
+ if (current_fiber->cont.argc == -1) {
+ // Fiber#raise will trigger this path.
+ rb_exc_raise(value);
+ }
+
return value;
}
@@ -2758,6 +2830,8 @@ fiber_blocking_yield(VALUE fiber_value)
rb_fiber_t *fiber = fiber_ptr(fiber_value);
rb_thread_t * volatile th = fiber->cont.saved_ec.thread_ptr;
+ VM_ASSERT(fiber->blocking == 0);
+
// fiber->blocking is `unsigned int : 1`, so we use it as a boolean:
fiber->blocking = 1;
@@ -2799,7 +2873,8 @@ rb_fiber_blocking(VALUE class)
// If we are already blocking, this is essentially a no-op:
if (fiber->blocking) {
return rb_yield(fiber_value);
- } else {
+ }
+ else {
return rb_ensure(fiber_blocking_yield, fiber_value, fiber_blocking_ensure, fiber_value);
}
}
@@ -2838,6 +2913,7 @@ void
rb_fiber_close(rb_fiber_t *fiber)
{
fiber_status_set(fiber, FIBER_TERMINATED);
+ rb_ec_close(&fiber->cont.saved_ec);
}
static void
@@ -3159,12 +3235,13 @@ rb_fiber_s_yield(int argc, VALUE *argv, VALUE klass)
}
static VALUE
-fiber_raise(rb_fiber_t *fiber, int argc, const VALUE *argv)
+fiber_raise(rb_fiber_t *fiber, VALUE exception)
{
- VALUE exception = rb_make_exception(argc, argv);
-
- if (fiber->resuming_fiber) {
- rb_raise(rb_eFiberError, "attempt to raise a resuming fiber");
+ if (fiber == fiber_current()) {
+ rb_exc_raise(exception);
+ }
+ else if (fiber->resuming_fiber) {
+ return fiber_raise(fiber->resuming_fiber, exception);
}
else if (FIBER_SUSPENDED_P(fiber) && !fiber->yielding) {
return fiber_transfer_kw(fiber, -1, &exception, RB_NO_KEYWORDS);
@@ -3175,31 +3252,46 @@ fiber_raise(rb_fiber_t *fiber, int argc, const VALUE *argv)
}
VALUE
-rb_fiber_raise(VALUE fiber, int argc, const VALUE *argv)
+rb_fiber_raise(VALUE fiber, int argc, VALUE *argv)
{
- return fiber_raise(fiber_ptr(fiber), argc, argv);
+ VALUE exception = rb_exception_setup(argc, argv);
+
+ return fiber_raise(fiber_ptr(fiber), exception);
}
/*
* call-seq:
- * fiber.raise -> obj
- * fiber.raise(string) -> obj
- * fiber.raise(exception [, string [, array]]) -> obj
+ * raise(exception, message = exception.to_s, backtrace = nil, cause: $!)
+ * raise(message = nil, cause: $!)
*
* Raises an exception in the fiber at the point at which the last
- * +Fiber.yield+ was called. If the fiber has not been started or has
+ * +Fiber.yield+ was called.
+ *
+ * f = Fiber.new {
+ * puts "Before the yield"
+ * Fiber.yield 1 # -- exception will be raised here
+ * puts "After the yield"
+ * }
+ *
+ * p f.resume
+ * f.raise "Gotcha"
+ *
+ * Output
+ *
+ * Before the first yield
+ * 1
+ * t.rb:8:in 'Fiber.yield': Gotcha (RuntimeError)
+ * from t.rb:8:in 'block in <main>'
+ *
+ * If the fiber has not been started or has
* already run to completion, raises +FiberError+. If the fiber is
* yielding, it is resumed. If it is transferring, it is transferred into.
* But if it is resuming, raises +FiberError+.
*
- * With no arguments, raises a +RuntimeError+. With a single +String+
- * argument, raises a +RuntimeError+ with the string as a message. Otherwise,
- * the first parameter should be the name of an +Exception+ class (or an
- * object that returns an +Exception+ object when sent an +exception+
- * message). The optional second parameter sets the message associated with
- * the exception, and the third parameter is an array of callback information.
- * Exceptions are caught by the +rescue+ clause of <code>begin...end</code>
- * blocks.
+ * Raises +FiberError+ if called on a Fiber belonging to another +Thread+.
+ *
+ * See Kernel#raise for more information on arguments.
+ *
*/
static VALUE
rb_fiber_m_raise(int argc, VALUE *argv, VALUE self)
@@ -3209,6 +3301,46 @@ rb_fiber_m_raise(int argc, VALUE *argv, VALUE self)
/*
* call-seq:
+ * fiber.kill -> nil
+ *
+ * Terminates the fiber by raising an uncatchable exception.
+ * It only terminates the given fiber and no other fiber, returning +nil+ to
+ * another fiber if that fiber was calling #resume or #transfer.
+ *
+ * <tt>Fiber#kill</tt> only interrupts another fiber when it is in Fiber.yield.
+ * If called on the current fiber then it raises that exception at the <tt>Fiber#kill</tt> call site.
+ *
+ * If the fiber has not been started, transition directly to the terminated state.
+ *
+ * If the fiber is already terminated, does nothing.
+ *
+ * Raises FiberError if called on a fiber belonging to another thread.
+ */
+static VALUE
+rb_fiber_m_kill(VALUE self)
+{
+ rb_fiber_t *fiber = fiber_ptr(self);
+
+ if (fiber->killed) return Qfalse;
+ fiber->killed = 1;
+
+ if (fiber->status == FIBER_CREATED) {
+ fiber->status = FIBER_TERMINATED;
+ }
+ else if (fiber->status != FIBER_TERMINATED) {
+ if (fiber_current() == fiber) {
+ fiber_check_killed(fiber);
+ }
+ else {
+ fiber_raise(fiber_ptr(self), Qnil);
+ }
+ }
+
+ return self;
+}
+
+/*
+ * call-seq:
* Fiber.current -> fiber
*
* Returns the current fiber. If you are not running in the context of
@@ -3254,6 +3386,8 @@ rb_fiber_atfork(rb_thread_t *th)
th->root_fiber = th->ec->fiber_ptr;
}
th->root_fiber->prev = 0;
+ th->root_fiber->blocking = 1;
+ th->blocking = 1;
}
}
#endif
@@ -3365,6 +3499,15 @@ Init_Cont(void)
const char *fiber_shared_fiber_pool_free_stacks = getenv("RUBY_SHARED_FIBER_POOL_FREE_STACKS");
if (fiber_shared_fiber_pool_free_stacks) {
shared_fiber_pool.free_stacks = atoi(fiber_shared_fiber_pool_free_stacks);
+
+ if (shared_fiber_pool.free_stacks < 0) {
+ rb_warn("Setting RUBY_SHARED_FIBER_POOL_FREE_STACKS to a negative value is not allowed.");
+ shared_fiber_pool.free_stacks = 0;
+ }
+
+ if (shared_fiber_pool.free_stacks > 1) {
+ rb_warn("Setting RUBY_SHARED_FIBER_POOL_FREE_STACKS to a value greater than 1 is operating system specific, and may cause crashes.");
+ }
}
rb_cFiber = rb_define_class("Fiber", rb_cObject);
@@ -3382,6 +3525,7 @@ Init_Cont(void)
rb_define_method(rb_cFiber, "storage=", rb_fiber_storage_set, 1);
rb_define_method(rb_cFiber, "resume", rb_fiber_m_resume, -1);
rb_define_method(rb_cFiber, "raise", rb_fiber_m_raise, -1);
+ rb_define_method(rb_cFiber, "kill", rb_fiber_m_kill, 0);
rb_define_method(rb_cFiber, "backtrace", rb_fiber_backtrace, -1);
rb_define_method(rb_cFiber, "backtrace_locations", rb_fiber_backtrace_locations, -1);
rb_define_method(rb_cFiber, "to_s", fiber_to_s, 0);
@@ -3396,7 +3540,15 @@ Init_Cont(void)
rb_define_singleton_method(rb_cFiber, "schedule", rb_fiber_s_schedule, -1);
+ rb_thread_t *current_thread = rb_current_thread();
+ RUBY_ASSERT(CLASS_OF(current_thread->ec->fiber_ptr->cont.self) == 0);
+ *(VALUE *)&((struct RBasic *)current_thread->ec->fiber_ptr->cont.self)->klass = rb_cFiber;
+
#ifdef RB_EXPERIMENTAL_FIBER_POOL
+ /*
+ * Document-class: Fiber::Pool
+ * :nodoc: experimental
+ */
rb_cFiberPool = rb_define_class_under(rb_cFiber, "Pool", rb_cObject);
rb_define_alloc_func(rb_cFiberPool, fiber_pool_alloc);
rb_define_method(rb_cFiberPool, "initialize", rb_fiber_pool_initialize, -1);