summaryrefslogtreecommitdiff
path: root/mjit.c
diff options
context:
space:
mode:
authornagachika <nagachika@ruby-lang.org>2021-06-03 20:46:53 +0900
committernagachika <nagachika@ruby-lang.org>2021-06-03 20:46:53 +0900
commit2dd18df4a35a4b2dd0cf2dec7759898246fc6935 (patch)
tree9ec562b07900603960bc76f0ab7c669b9b281dba /mjit.c
parent9680ee97e0b3e87c0fc9a65c01de1ee50a1a178b (diff)
merge revision(s) 86c262541ad07528842d76dab4b9b34bd888d5f4,7e14762159643b4415e094f9d2a90afaf7994588: [Backport #17935]
Fix a race condition around mjit_recompile This fixes SEGVs like https://github.com/ruby/ruby/runs/2715166621?check_suite_focus=true. When mjit_recompile is called when mjit_compile is compiling the exact same iseq (and after it called mjit_capture_cc_entries), iseq->body->jit_unit is re-created and its cc_entries becomes NULL. Then, when it tries to lookup cc_entries through iseq->body->jit_unit, it fails. --- mjit.c | 21 +++++++++++++-------- mjit_worker.c | 4 ++++ 2 files changed, 17 insertions(+), 8 deletions(-) Do not doubly hold an MJIT lock This is a follow-up of 86c262541ad07528842d76dab4b9b34bd888d5f4. CRITICAL_SECTION_START/FINISH are not needed when it's called from an MJIT worker. Also, ZALLOC needs to be calloc because ZALLOC may trigger GC, which an MJIT worker must not do. --- mjit.c | 23 ++++++++++++++--------- mjit_worker.c | 4 ++-- 2 files changed, 16 insertions(+), 11 deletions(-)
Diffstat (limited to 'mjit.c')
-rw-r--r--mjit.c42
1 files changed, 26 insertions, 16 deletions
diff --git a/mjit.c b/mjit.c
index a152fcb557..db0efb76a3 100644
--- a/mjit.c
+++ b/mjit.c
@@ -230,13 +230,13 @@ finish_conts(void)
}
}
-// Create unit for `iseq`.
+// Create unit for `iseq`. This function may be called from an MJIT worker.
static void
create_unit(const rb_iseq_t *iseq)
{
struct rb_mjit_unit *unit;
- unit = ZALLOC(struct rb_mjit_unit);
+ unit = calloc(1, sizeof(struct rb_mjit_unit));
if (unit == NULL)
return;
@@ -245,8 +245,9 @@ create_unit(const rb_iseq_t *iseq)
iseq->body->jit_unit = unit;
}
+// This is called from an MJIT worker when worker_p is true.
static void
-mjit_add_iseq_to_process(const rb_iseq_t *iseq, const struct rb_mjit_compile_info *compile_info)
+mjit_add_iseq_to_process(const rb_iseq_t *iseq, const struct rb_mjit_compile_info *compile_info, bool worker_p)
{
if (!mjit_enabled || pch_status == PCH_FAILED)
return;
@@ -260,14 +261,18 @@ mjit_add_iseq_to_process(const rb_iseq_t *iseq, const struct rb_mjit_compile_inf
if (compile_info != NULL)
iseq->body->jit_unit->compile_info = *compile_info;
- CRITICAL_SECTION_START(3, "in add_iseq_to_process");
+ if (!worker_p) {
+ CRITICAL_SECTION_START(3, "in add_iseq_to_process");
+ }
add_to_list(iseq->body->jit_unit, &unit_queue);
if (active_units.length >= mjit_opts.max_cache_size) {
unload_requests++;
}
- verbose(3, "Sending wakeup signal to workers in mjit_add_iseq_to_process");
- rb_native_cond_broadcast(&mjit_worker_wakeup);
- CRITICAL_SECTION_FINISH(3, "in add_iseq_to_process");
+ if (!worker_p) {
+ verbose(3, "Sending wakeup signal to workers in mjit_add_iseq_to_process");
+ rb_native_cond_broadcast(&mjit_worker_wakeup);
+ CRITICAL_SECTION_FINISH(3, "in add_iseq_to_process");
+ }
}
// Add ISEQ to be JITed in parallel with the current thread.
@@ -275,7 +280,7 @@ mjit_add_iseq_to_process(const rb_iseq_t *iseq, const struct rb_mjit_compile_inf
void
rb_mjit_add_iseq_to_process(const rb_iseq_t *iseq)
{
- mjit_add_iseq_to_process(iseq, NULL);
+ mjit_add_iseq_to_process(iseq, NULL, false);
}
// For this timeout seconds, --jit-wait will wait for JIT compilation finish.
@@ -334,17 +339,22 @@ mjit_recompile(const rb_iseq_t *iseq)
RSTRING_PTR(rb_iseq_path(iseq)), FIX2INT(iseq->body->location.first_lineno));
assert(iseq->body->jit_unit != NULL);
- // Lazily move active_units to stale_units to avoid race conditions around active_units with compaction
- CRITICAL_SECTION_START(3, "in rb_mjit_recompile_iseq");
- iseq->body->jit_unit->stale_p = true;
- pending_stale_p = true;
- CRITICAL_SECTION_FINISH(3, "in rb_mjit_recompile_iseq");
-
- iseq->body->jit_func = (mjit_func_t)NOT_ADDED_JIT_ISEQ_FUNC;
- mjit_add_iseq_to_process(iseq, &iseq->body->jit_unit->compile_info);
if (UNLIKELY(mjit_opts.wait)) {
+ remove_from_list(iseq->body->jit_unit, &active_units);
+ add_to_list(iseq->body->jit_unit, &stale_units);
+ mjit_add_iseq_to_process(iseq, &iseq->body->jit_unit->compile_info, false);
mjit_wait(iseq->body);
}
+ else {
+ // Lazily move active_units to stale_units to avoid race conditions around active_units with compaction.
+ // Also, it's lazily moved to unit_queue as well because otherwise it won't be added to stale_units properly.
+ // It's good to avoid a race condition between mjit_add_iseq_to_process and mjit_compile around jit_unit as well.
+ CRITICAL_SECTION_START(3, "in rb_mjit_recompile_iseq");
+ iseq->body->jit_unit->stale_p = true;
+ iseq->body->jit_func = (mjit_func_t)NOT_ADDED_JIT_ISEQ_FUNC;
+ pending_stale_p = true;
+ CRITICAL_SECTION_FINISH(3, "in rb_mjit_recompile_iseq");
+ }
}
// Recompile iseq, disabling send optimization