summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--mjit.c14
-rw-r--r--mjit_worker.c5
-rw-r--r--thread.c3
-rw-r--r--vm_core.h7
-rw-r--r--vm_trace.c58
5 files changed, 67 insertions, 20 deletions
diff --git a/mjit.c b/mjit.c
index 46223af492..1f9140b7a8 100644
--- a/mjit.c
+++ b/mjit.c
@@ -106,20 +106,6 @@ mjit_gc_finish_hook(void)
CRITICAL_SECTION_FINISH(4, "mjit_gc_finish_hook");
}
-/* Wrap critical section to prevent [Bug #15316] */
-void
-mjit_postponed_job_register_start_hook(void)
-{
- CRITICAL_SECTION_START(4, "mjit_postponed_job_register_start_hook");
-}
-
-/* Unwrap critical section of mjit_postponed_job_register_start_hook() */
-void
-mjit_postponed_job_register_finish_hook(void)
-{
- CRITICAL_SECTION_FINISH(4, "mjit_postponed_job_register_finish_hook");
-}
-
/* Iseqs can be garbage collected. This function should call when it
happens. It removes iseq from the unit. */
void
diff --git a/mjit_worker.c b/mjit_worker.c
index 6a909714de..90aaf9d9a9 100644
--- a/mjit_worker.c
+++ b/mjit_worker.c
@@ -1133,6 +1133,9 @@ static mjit_copy_job_t mjit_copy_job;
static void mjit_copy_job_handler(void *data);
+/* vm_trace.c */
+int rb_workqueue_register(unsigned flags, rb_postponed_job_func_t , void *);
+
/* We're lazily copying cache values from main thread because these cache values
could be different between ones on enqueue timing and ones on dequeue timing.
Return TRUE if copy succeeds. */
@@ -1148,7 +1151,7 @@ copy_cache_from_main_thread(mjit_copy_job_t *job)
return job->finish_p;
}
- if (!rb_postponed_job_register(0, mjit_copy_job_handler, (void *)job))
+ if (!rb_workqueue_register(0, mjit_copy_job_handler, (void *)job))
return FALSE;
CRITICAL_SECTION_START(3, "in MJIT copy job wait");
/* checking `stop_worker_p` too because `RUBY_VM_CHECK_INTS(ec)` may not
diff --git a/thread.c b/thread.c
index d02b560ac1..d76f05aa41 100644
--- a/thread.c
+++ b/thread.c
@@ -419,6 +419,7 @@ rb_vm_gvl_destroy(rb_vm_t *vm)
if (0) {
/* may be held by running threads */
rb_native_mutex_destroy(&vm->waitpid_lock);
+ rb_native_mutex_destroy(&vm->workqueue_lock);
}
}
@@ -4422,6 +4423,7 @@ rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const r
/* may be held by MJIT threads in parent */
rb_native_mutex_initialize(&vm->waitpid_lock);
+ rb_native_mutex_initialize(&vm->workqueue_lock);
/* may be held by any thread in parent */
rb_native_mutex_initialize(&th->interrupt_lock);
@@ -5183,6 +5185,7 @@ Init_Thread(void)
gvl_init(th->vm);
gvl_acquire(th->vm, th);
rb_native_mutex_initialize(&th->vm->waitpid_lock);
+ rb_native_mutex_initialize(&th->vm->workqueue_lock);
rb_native_mutex_initialize(&th->interrupt_lock);
th->pending_interrupt_queue = rb_ary_tmp_new(0);
diff --git a/vm_core.h b/vm_core.h
index c557562e17..fd47aed804 100644
--- a/vm_core.h
+++ b/vm_core.h
@@ -638,12 +638,16 @@ typedef struct rb_vm_struct {
/* relation table of ensure - rollback for callcc */
struct st_table *ensure_rollback_table;
- /* postponed_job */
+ /* postponed_job (async-signal-safe, NOT thread-safe) */
struct rb_postponed_job_struct *postponed_job_buffer;
int postponed_job_index;
int src_encoding_index;
+ /* workqueue (thread-safe, NOT async-signal-safe) */
+ struct list_head workqueue; /* <=> rb_workqueue_job.jnode */
+ rb_nativethread_lock_t workqueue_lock;
+
VALUE verbose, debug, orig_progname, progname;
VALUE coverages;
int coverage_mode;
@@ -1628,6 +1632,7 @@ rb_vm_living_threads_init(rb_vm_t *vm)
{
list_head_init(&vm->waiting_fds);
list_head_init(&vm->waiting_pids);
+ list_head_init(&vm->workqueue);
list_head_init(&vm->waiting_grps);
list_head_init(&vm->living_threads);
vm->living_thread_num = 0;
diff --git a/vm_trace.c b/vm_trace.c
index d6b60372b3..23bba86d29 100644
--- a/vm_trace.c
+++ b/vm_trace.c
@@ -1752,12 +1752,18 @@ typedef struct rb_postponed_job_struct {
#define MAX_POSTPONED_JOB 1000
#define MAX_POSTPONED_JOB_SPECIAL_ADDITION 24
+struct rb_workqueue_job {
+ struct list_node jnode; /* <=> vm->workqueue */
+ rb_postponed_job_t job;
+};
+
void
Init_vm_postponed_job(void)
{
rb_vm_t *vm = GET_VM();
vm->postponed_job_buffer = ALLOC_N(rb_postponed_job_t, MAX_POSTPONED_JOB);
vm->postponed_job_index = 0;
+ /* workqueue is initialized when VM locks are initialized */
}
enum postponed_job_register_result {
@@ -1766,7 +1772,7 @@ enum postponed_job_register_result {
PJRR_INTERRUPTED = 2
};
-/* Async-signal-safe, thread-safe against MJIT worker thread */
+/* Async-signal-safe */
static enum postponed_job_register_result
postponed_job_register(rb_execution_context_t *ec, rb_vm_t *vm,
unsigned int flags, rb_postponed_job_func_t func, void *data, int max, int expected_index)
@@ -1774,13 +1780,11 @@ postponed_job_register(rb_execution_context_t *ec, rb_vm_t *vm,
rb_postponed_job_t *pjob;
if (expected_index >= max) return PJRR_FULL; /* failed */
- if (mjit_enabled) mjit_postponed_job_register_start_hook();
if (ATOMIC_CAS(vm->postponed_job_index, expected_index, expected_index+1) == expected_index) {
pjob = &vm->postponed_job_buffer[expected_index];
}
else {
- if (mjit_enabled) mjit_postponed_job_register_finish_hook();
return PJRR_INTERRUPTED;
}
@@ -1789,7 +1793,6 @@ postponed_job_register(rb_execution_context_t *ec, rb_vm_t *vm,
pjob->data = data;
RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec);
- if (mjit_enabled) mjit_postponed_job_register_finish_hook();
return PJRR_SUCCESS;
}
@@ -1842,6 +1845,29 @@ rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func,
}
}
+/*
+ * thread-safe and called from non-Ruby thread
+ * returns FALSE on failure (ENOMEM), TRUE otherwise
+ */
+int
+rb_workqueue_register(unsigned flags, rb_postponed_job_func_t func, void *data)
+{
+ struct rb_workqueue_job *wq_job = malloc(sizeof(*wq_job));
+ rb_vm_t *vm = GET_VM();
+
+ if (!wq_job) return FALSE;
+ wq_job->job.func = func;
+ wq_job->job.data = data;
+
+ rb_nativethread_lock_lock(&vm->workqueue_lock);
+ list_add_tail(&vm->workqueue, &wq_job->jnode);
+ rb_nativethread_lock_unlock(&vm->workqueue_lock);
+
+ RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(GET_EC());
+
+ return TRUE;
+}
+
void
rb_postponed_job_flush(rb_vm_t *vm)
{
@@ -1849,6 +1875,13 @@ rb_postponed_job_flush(rb_vm_t *vm)
const rb_atomic_t block_mask = POSTPONED_JOB_INTERRUPT_MASK|TRAP_INTERRUPT_MASK;
volatile rb_atomic_t saved_mask = ec->interrupt_mask & block_mask;
VALUE volatile saved_errno = ec->errinfo;
+ struct list_head tmp;
+
+ list_head_init(&tmp);
+
+ rb_nativethread_lock_lock(&vm->workqueue_lock);
+ list_append_list(&tmp, &vm->workqueue);
+ rb_nativethread_lock_unlock(&vm->workqueue_lock);
ec->errinfo = Qnil;
/* mask POSTPONED_JOB dispatch */
@@ -1857,16 +1890,33 @@ rb_postponed_job_flush(rb_vm_t *vm)
EC_PUSH_TAG(ec);
if (EC_EXEC_TAG() == TAG_NONE) {
int index;
+ struct rb_workqueue_job *wq_job;
+
while ((index = vm->postponed_job_index) > 0) {
if (ATOMIC_CAS(vm->postponed_job_index, index, index-1) == index) {
rb_postponed_job_t *pjob = &vm->postponed_job_buffer[index-1];
(*pjob->func)(pjob->data);
}
}
+ while ((wq_job = list_pop(&tmp, struct rb_workqueue_job, jnode))) {
+ rb_postponed_job_t pjob = wq_job->job;
+
+ free(wq_job);
+ (pjob.func)(pjob.data);
+ }
}
EC_POP_TAG();
}
/* restore POSTPONED_JOB mask */
ec->interrupt_mask &= ~(saved_mask ^ block_mask);
ec->errinfo = saved_errno;
+
+ /* don't leak memory if a job threw an exception */
+ if (!list_empty(&tmp)) {
+ rb_nativethread_lock_lock(&vm->workqueue_lock);
+ list_prepend_list(&vm->workqueue, &tmp);
+ rb_nativethread_lock_unlock(&vm->workqueue_lock);
+
+ RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(GET_EC());
+ }
}