summaryrefslogtreecommitdiff
path: root/mjit_worker.c
diff options
context:
space:
mode:
authorTakashi Kokubun <takashikkbn@gmail.com>2020-12-06 20:23:19 -0800
committerTakashi Kokubun <takashikkbn@gmail.com>2020-12-06 20:23:32 -0800
commit556a7285080c1344c75bb93a333c9bfc5d631c61 (patch)
treecc84b882239c81588d951287c104d94877792663 /mjit_worker.c
parent4b4af40f5577263ae48813fed1593ba81c89c7e7 (diff)
Lock active_units references on compaction
This might race with mjit_recompile.
Diffstat (limited to 'mjit_worker.c')
-rw-r--r--mjit_worker.c5
1 files changed, 5 insertions, 0 deletions
diff --git a/mjit_worker.c b/mjit_worker.c
index 725a922..56921da 100644
--- a/mjit_worker.c
+++ b/mjit_worker.c
@@ -936,7 +936,9 @@ compile_compact_jit_code(char* c_file)
// TODO: Consider using a more granular lock after we implement inlining across
// compacted functions (not done yet).
bool success = true;
+ CRITICAL_SECTION_START(3, "before active_units list_for_each");
list_for_each(&active_units.head, child_unit, unode) {
+ CRITICAL_SECTION_FINISH(3, "after active_units list_for_each");
char funcname[MAXPATHLEN];
sprint_funcname(funcname, child_unit);
@@ -950,7 +952,10 @@ compile_compact_jit_code(char* c_file)
if (!iseq_label) iseq_label = sep = "";
fprintf(f, "\n/* %s%s%s:%ld */\n", iseq_label, sep, iseq_path, iseq_lineno);
success &= mjit_compile(f, child_unit->iseq, funcname, child_unit->id);
+
+ CRITICAL_SECTION_START(3, "before active_units list_for_each");
}
+ CRITICAL_SECTION_FINISH(3, "after active_units list_for_each");
// release blocking mjit_gc_start_hook
CRITICAL_SECTION_START(3, "after mjit_compile to wakeup client for GC");