summaryrefslogtreecommitdiff
path: root/yjit
diff options
context:
space:
mode:
authorAlan Wu <XrXr@users.noreply.github.com>2022-12-15 15:10:14 -0500
committerGitHub <noreply@github.com>2022-12-15 15:10:14 -0500
commit5fa608ed79645464bf80fa318d89745159301471 (patch)
tree725a7c17fef47c78af9bb60008b1d6a6ed6e0c8f /yjit
parent53db8fb450a06d6eea8c7452a006b17bd9354c67 (diff)
YJIT: Fix code GC freeing stubs with a trampoline (#6937)
Stubs we generate for invalidation don't necessarily co-locate with the code that jump to the stub. Since we rely on co-location to keep stubs alive as they are in the outlined code block, it used to be possible for code GC inside branch_stub_hit() to free the stub that's its direct caller, leading us to return to freed code after. Stubs used to look like: ``` mov arg0, branch_ptr mov arg1, target_idx mov arg2, ec call branch_stub_hit jmp return_reg ``` Since the call and the jump after the call is the same for all stubs, we can extract them and use a static trampoline for them. That makes branch_stub_hit() always return to static code. Stubs now look like: ``` mov arg0, branch_ptr mov arg1, target_idx jmp trampoline ``` Where the trampoline is: ``` mov arg2, ec call branch_stub_hit jmp return_reg ``` Code GC can now free stubs without problems since we'll always return to the trampoline, which we generate once on boot and lives forever. This might save a small bit of memory due to factoring out the static part of stubs, but it's probably minor. [Bug #19234] Co-authored-by: Takashi Kokubun <takashikkbn@gmail.com>
Notes
Notes: Merged-By: maximecb <maximecb@ruby-lang.org>
Diffstat (limited to 'yjit')
-rw-r--r--yjit/src/codegen.rs10
-rw-r--r--yjit/src/core.rs49
2 files changed, 46 insertions, 13 deletions
diff --git a/yjit/src/codegen.rs b/yjit/src/codegen.rs
index cec5c4671b..abd9eae2de 100644
--- a/yjit/src/codegen.rs
+++ b/yjit/src/codegen.rs
@@ -7147,6 +7147,9 @@ pub struct CodegenGlobals {
// Filled by gen_code_for_exit_from_stub().
stub_exit_code: CodePtr,
+ // For servicing branch stubs
+ branch_stub_hit_trampoline: CodePtr,
+
// Code for full logic of returning from C method and exiting to the interpreter
outline_full_cfunc_return_pos: CodePtr,
@@ -7241,6 +7244,8 @@ impl CodegenGlobals {
let stub_exit_code = gen_code_for_exit_from_stub(&mut ocb);
+ let branch_stub_hit_trampoline = gen_branch_stub_hit_trampoline(&mut ocb);
+
// Generate full exit code for C func
let cfunc_exit_code = gen_full_cfunc_return(&mut ocb);
@@ -7257,6 +7262,7 @@ impl CodegenGlobals {
leave_exit_code,
stub_exit_code: stub_exit_code,
outline_full_cfunc_return_pos: cfunc_exit_code,
+ branch_stub_hit_trampoline,
global_inval_patches: Vec::new(),
inline_frozen_bytes: 0,
method_codegen_table: HashMap::new(),
@@ -7391,6 +7397,10 @@ impl CodegenGlobals {
CodegenGlobals::get_instance().outline_full_cfunc_return_pos
}
+ pub fn get_branch_stub_hit_trampoline() -> CodePtr {
+ CodegenGlobals::get_instance().branch_stub_hit_trampoline
+ }
+
pub fn look_up_codegen_method(method_serial: usize) -> Option<MethodGenFn> {
let table = &CodegenGlobals::get_instance().method_codegen_table;
diff --git a/yjit/src/core.rs b/yjit/src/core.rs
index c87bfe8245..abe0797257 100644
--- a/yjit/src/core.rs
+++ b/yjit/src/core.rs
@@ -1682,7 +1682,6 @@ fn make_branch_entry(block: &BlockRef, gen_fn: BranchGenFn) -> BranchRef {
return branchref;
}
-
c_callable! {
/// Generated code calls this function with the SysV calling convention.
/// See [set_branch_target].
@@ -1910,19 +1909,14 @@ fn set_branch_target(
let mut asm = Assembler::new();
asm.comment("branch stub hit");
- // Call branch_stub_hit(branch_ptr, target_idx, ec)
- let jump_addr = asm.ccall(
- branch_stub_hit as *mut u8,
- vec![
- Opnd::const_ptr(branch_ptr as *const u8),
- Opnd::UImm(target_idx as u64),
- EC,
- ]
- );
+ // Set up the arguments unique to this stub for:
+ // branch_stub_hit(branch_ptr, target_idx, ec)
+ asm.mov(C_ARG_OPNDS[0], Opnd::const_ptr(branch_ptr as *const u8));
+ asm.mov(C_ARG_OPNDS[1], target_idx.into());
- // Jump to the address returned by the
- // branch_stub_hit call
- asm.jmp_opnd(jump_addr);
+ // Jump to trampoline to call branch_stub_hit()
+ // Not really a side exit, just don't need a padded jump here.
+ asm.jmp(CodegenGlobals::get_branch_stub_hit_trampoline().as_side_exit());
asm.compile(ocb);
@@ -1939,6 +1933,35 @@ fn set_branch_target(
}
}
+pub fn gen_branch_stub_hit_trampoline(ocb: &mut OutlinedCb) -> CodePtr {
+ let ocb = ocb.unwrap();
+ let code_ptr = ocb.get_write_ptr();
+ let mut asm = Assembler::new();
+
+ // For `branch_stub_hit(branch_ptr, target_idx, ec)`,
+ // `branch_ptr` and `target_idx` is different for each stub,
+ // but the call and what's after is the same. This trampoline
+ // is the unchanging part.
+ // Since this trampoline is static, it allows code GC inside
+ // branch_stub_hit() to free stubs without problems.
+ asm.comment("branch_stub_hit() trampoline");
+ let jump_addr = asm.ccall(
+ branch_stub_hit as *mut u8,
+ vec![
+ C_ARG_OPNDS[0],
+ C_ARG_OPNDS[1],
+ EC,
+ ]
+ );
+
+ // Jump to the address returned by the branch_stub_hit() call
+ asm.jmp_opnd(jump_addr);
+
+ asm.compile(ocb);
+
+ code_ptr
+}
+
impl Assembler
{
// Mark the start position of a patchable branch in the machine code