summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMaxime Chevalier-Boisvert <maximechevalierb@gmail.com>2022-07-20 14:50:00 -0400
committerTakashi Kokubun <takashikkbn@gmail.com>2022-08-29 08:47:02 -0700
commitf833d75bee13ecb485db1591898cb871b24a2991 (patch)
tree42a7084ceb61a2be4b1d5e2e86b4e843cf985da9
parentf5f58d82834cf84fe15dd1b28754923404a9fc75 (diff)
Refactor YJIT branches to use PosMarker (https://github.com/Shopify/ruby/pull/333)
* Refactor defer_compilation to use PosMarker * Port gen_direct_jump() to use PosMarker * Port gen_branch, branchunless * Port over gen_jump() * Port over branchif and branchnil * Fix use od record_boundary_patch_point in jump_to_next_insn
-rw-r--r--yjit/src/backend/arm64/mod.rs3
-rw-r--r--yjit/src/backend/ir.rs2
-rw-r--r--yjit/src/backend/x86_64/mod.rs8
-rw-r--r--yjit/src/codegen.rs104
-rw-r--r--yjit/src/core.rs96
5 files changed, 94 insertions, 119 deletions
diff --git a/yjit/src/backend/arm64/mod.rs b/yjit/src/backend/arm64/mod.rs
index c5ddbea7c1..35026a520b 100644
--- a/yjit/src/backend/arm64/mod.rs
+++ b/yjit/src/backend/arm64/mod.rs
@@ -637,6 +637,9 @@ impl Assembler
Op::Je => {
emit_conditional_jump::<{Condition::EQ}>(cb, insn.target.unwrap());
},
+ Op::Jne => {
+ emit_conditional_jump::<{Condition::NE}>(cb, insn.target.unwrap());
+ },
Op::Jbe => {
emit_conditional_jump::<{Condition::LS}>(cb, insn.target.unwrap());
},
diff --git a/yjit/src/backend/ir.rs b/yjit/src/backend/ir.rs
index 8d58da88f2..c55a8f609b 100644
--- a/yjit/src/backend/ir.rs
+++ b/yjit/src/backend/ir.rs
@@ -102,6 +102,7 @@ pub enum Op
// Low-level conditional jump instructions
Jbe,
Je,
+ Jne,
Jz,
Jnz,
Jo,
@@ -883,6 +884,7 @@ macro_rules! def_push_2_opnd_no_out {
def_push_1_opnd_no_out!(jmp_opnd, Op::JmpOpnd);
def_push_jcc!(jmp, Op::Jmp);
def_push_jcc!(je, Op::Je);
+def_push_jcc!(jne, Op::Jne);
def_push_jcc!(jbe, Op::Jbe);
def_push_jcc!(jz, Op::Jz);
def_push_jcc!(jnz, Op::Jnz);
diff --git a/yjit/src/backend/x86_64/mod.rs b/yjit/src/backend/x86_64/mod.rs
index 2fb7e39346..2efe920ddf 100644
--- a/yjit/src/backend/x86_64/mod.rs
+++ b/yjit/src/backend/x86_64/mod.rs
@@ -367,6 +367,14 @@ impl Assembler
}
}
+ Op::Jne => {
+ match insn.target.unwrap() {
+ Target::CodePtr(code_ptr) => jne_ptr(cb, code_ptr),
+ Target::Label(label_idx) => jne_label(cb, label_idx),
+ _ => unreachable!()
+ }
+ }
+
Op::Jbe => {
match insn.target.unwrap() {
Target::CodePtr(code_ptr) => jbe_ptr(cb, code_ptr),
diff --git a/yjit/src/codegen.rs b/yjit/src/codegen.rs
index d2f483c79d..0b906970f7 100644
--- a/yjit/src/codegen.rs
+++ b/yjit/src/codegen.rs
@@ -37,7 +37,6 @@ enum CodegenStatus {
KeepCompiling,
CantCompile,
EndBlock,
- DeferCompilation,
}
/// Code generation function signature
@@ -698,17 +697,10 @@ fn jump_to_next_insn(
// We are at the end of the current instruction. Record the boundary.
if jit.record_boundary_patch_point {
- todo!();
-
- /*
- let next_insn = unsafe { jit.pc.offset(insn_len(jit.opcode).try_into().unwrap()) };
- let exit_pos = gen_exit(next_insn, &reset_depth, ocb.unwrap());
- record_global_inval_patch(cb, exit_pos);
+ let exit_pc = unsafe { jit.pc.offset(insn_len(jit.opcode).try_into().unwrap()) };
+ let exit_pos = gen_outlined_exit(exit_pc, &reset_depth, ocb);
+ record_global_inval_patch(asm, exit_pos);
jit.record_boundary_patch_point = false;
- */
-
-
-
}
// Generate the jump instruction
@@ -752,9 +744,6 @@ pub fn gen_single_block(
// Create a backend assembler instance
let mut asm = Assembler::new();
- // Codegen status for the last instruction compiled
- let mut status = CantCompile;
-
// For each instruction to compile
// NOTE: could rewrite this loop with a std::iter::Iterator
while insn_idx < iseq_size {
@@ -792,7 +781,7 @@ pub fn gen_single_block(
}
// Lookup the codegen function for this instruction
- status = CantCompile;
+ let mut status = CantCompile;
if let Some(gen_fn) = get_gen_fn(VALUE(opcode)) {
// :count-placement:
// Count bytecode instructions that execute in generated code.
@@ -835,11 +824,6 @@ pub fn gen_single_block(
break;
}
- // If we are deferring compilation for this instruction
- if status == DeferCompilation {
- break;
- }
-
// For now, reset the chain depth after each instruction as only the
// first instruction in the block can concern itself with the depth.
ctx.reset_chain_depth();
@@ -870,24 +854,9 @@ pub fn gen_single_block(
block.set_end_idx(insn_idx);
}
- // If we are deferring compilation for the current instruction
- if status == DeferCompilation {
- defer_compilation(&jit.block, insn_idx, &ctx, cb, ocb);
-
- // Mark the end position of the block
- let mut block = jit.block.borrow_mut();
- block.set_end_addr(cb.get_write_ptr());
- }
-
-
-
// We currently can't handle cases where the request is for a block that
// doesn't go to the next instruction.
- //assert!(!jit.record_boundary_patch_point);
-
-
-
-
+ assert!(!jit.record_boundary_patch_point);
// If code for the block doesn't fit, fail
if cb.has_dropped_bytes() || ocb.unwrap().has_dropped_bytes() {
@@ -1140,7 +1109,8 @@ fn gen_opt_plus(
ocb: &mut OutlinedCb,
) -> CodegenStatus {
if !jit_at_current_insn(jit) {
- return DeferCompilation;
+ defer_compilation(jit, ctx, asm, ocb);
+ return EndBlock;
}
let comptime_a = jit_peek_at_stack(jit, ctx, 1);
@@ -3208,9 +3178,10 @@ fn gen_opt_case_dispatch(
KeepCompiling // continue with the next instruction
}
+*/
fn gen_branchif_branch(
- cb: &mut CodeBlock,
+ asm: &mut Assembler,
target0: CodePtr,
target1: Option<CodePtr>,
shape: BranchShape,
@@ -3218,14 +3189,14 @@ fn gen_branchif_branch(
assert!(target1 != None);
match shape {
BranchShape::Next0 => {
- jz_ptr(cb, target1.unwrap());
+ asm.jz(target1.unwrap().into());
}
BranchShape::Next1 => {
- jnz_ptr(cb, target0);
+ asm.jnz(target0.into());
}
BranchShape::Default => {
- jnz_ptr(cb, target0);
- jmp_ptr(cb, target1.unwrap());
+ asm.jnz(target0.into());
+ asm.jmp(target1.unwrap().into());
}
}
}
@@ -3233,7 +3204,7 @@ fn gen_branchif_branch(
fn gen_branchif(
jit: &mut JITState,
ctx: &mut Context,
- cb: &mut CodeBlock,
+ asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
let jump_offset = jit_get_arg(jit, 0).as_i32();
@@ -3241,14 +3212,14 @@ fn gen_branchif(
// Check for interrupts, but only on backward branches that may create loops
if jump_offset < 0 {
let side_exit = get_side_exit(jit, ocb, ctx);
- gen_check_ints(cb, side_exit);
+ gen_check_ints(asm, side_exit);
}
// Test if any bit (outside of the Qnil bit) is on
// RUBY_Qfalse /* ...0000 0000 */
// RUBY_Qnil /* ...0000 1000 */
let val_opnd = ctx.stack_pop(1);
- test(cb, val_opnd, imm_opnd(!Qnil.as_i64()));
+ asm.test(val_opnd, Opnd::Imm(!Qnil.as_i64()));
// Get the branch target instruction offsets
let next_idx = jit_next_insn_idx(jit);
@@ -3266,7 +3237,7 @@ fn gen_branchif(
gen_branch(
jit,
ctx,
- cb,
+ asm,
ocb,
jump_block,
ctx,
@@ -3277,7 +3248,6 @@ fn gen_branchif(
EndBlock
}
-*/
fn gen_branchunless_branch(
asm: &mut Assembler,
@@ -3328,18 +3298,11 @@ fn gen_branchunless(
idx: jump_idx.try_into().unwrap(),
};
-
-
-
- // TODO: port gen_branch logic
- todo!("complete branchunless implementation");
-
- /*
// Generate the branch instructions
gen_branch(
jit,
ctx,
- cb,
+ asm,
ocb,
jump_block,
ctx,
@@ -3349,24 +3312,20 @@ fn gen_branchunless(
);
EndBlock
- */
-
-
}
-/*
fn gen_branchnil_branch(
- cb: &mut CodeBlock,
+ asm: &mut Assembler,
target0: CodePtr,
target1: Option<CodePtr>,
shape: BranchShape,
) {
match shape {
- BranchShape::Next0 => jne_ptr(cb, target1.unwrap()),
- BranchShape::Next1 => je_ptr(cb, target0),
+ BranchShape::Next0 => asm.jne(target1.unwrap().into()),
+ BranchShape::Next1 => asm.je(target0.into()),
BranchShape::Default => {
- je_ptr(cb, target0);
- jmp_ptr(cb, target1.unwrap());
+ asm.je(target0.into());
+ asm.jmp(target1.unwrap().into());
}
}
}
@@ -3374,7 +3333,7 @@ fn gen_branchnil_branch(
fn gen_branchnil(
jit: &mut JITState,
ctx: &mut Context,
- cb: &mut CodeBlock,
+ asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
let jump_offset = jit_get_arg(jit, 0).as_i32();
@@ -3382,13 +3341,13 @@ fn gen_branchnil(
// Check for interrupts, but only on backward branches that may create loops
if jump_offset < 0 {
let side_exit = get_side_exit(jit, ocb, ctx);
- gen_check_ints(cb, side_exit);
+ gen_check_ints(asm, side_exit);
}
// Test if the value is Qnil
// RUBY_Qnil /* ...0000 1000 */
let val_opnd = ctx.stack_pop(1);
- cmp(cb, val_opnd, uimm_opnd(Qnil.into()));
+ asm.cmp(val_opnd, Opnd::UImm(Qnil.into()));
// Get the branch target instruction offsets
let next_idx = jit_next_insn_idx(jit) as i32;
@@ -3406,7 +3365,7 @@ fn gen_branchnil(
gen_branch(
jit,
ctx,
- cb,
+ asm,
ocb,
jump_block,
ctx,
@@ -3421,7 +3380,7 @@ fn gen_branchnil(
fn gen_jump(
jit: &mut JITState,
ctx: &mut Context,
- cb: &mut CodeBlock,
+ asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
let jump_offset = jit_get_arg(jit, 0).as_i32();
@@ -3429,7 +3388,7 @@ fn gen_jump(
// Check for interrupts, but only on backward branches that may create loops
if jump_offset < 0 {
let side_exit = get_side_exit(jit, ocb, ctx);
- gen_check_ints(cb, side_exit);
+ gen_check_ints(asm, side_exit);
}
// Get the branch target instruction offsets
@@ -3440,11 +3399,12 @@ fn gen_jump(
};
// Generate the jump instruction
- gen_direct_jump(jit, ctx, jump_block, cb);
+ gen_direct_jump(jit, ctx, jump_block, asm);
EndBlock
}
+/*
/// Guard that self or a stack operand has the same class as `known_klass`, using
/// `sample_instance` to speculate about the shape of the runtime value.
/// FIXNUM and on-heap integers are treated as if they have distinct classes, and
@@ -6067,11 +6027,11 @@ fn get_gen_fn(opcode: VALUE) -> Option<InsnGenFn> {
YARVINSN_opt_invokebuiltin_delegate => Some(gen_opt_invokebuiltin_delegate),
YARVINSN_opt_invokebuiltin_delegate_leave => Some(gen_opt_invokebuiltin_delegate),
YARVINSN_opt_case_dispatch => Some(gen_opt_case_dispatch),
+ */
YARVINSN_branchif => Some(gen_branchif),
YARVINSN_branchunless => Some(gen_branchunless),
YARVINSN_branchnil => Some(gen_branchnil),
YARVINSN_jump => Some(gen_jump),
- */
//YARVINSN_getblockparamproxy => Some(gen_getblockparamproxy),
//YARVINSN_getblockparam => Some(gen_getblockparam),
diff --git a/yjit/src/core.rs b/yjit/src/core.rs
index 63c373b70a..1afa5c537a 100644
--- a/yjit/src/core.rs
+++ b/yjit/src/core.rs
@@ -1805,10 +1805,39 @@ fn get_branch_target(
}
}
+impl Assembler
+{
+ // Mark the start position of a patchable branch in the machine code
+ fn mark_branch_start(&mut self, branchref: &BranchRef)
+ {
+ // We need to create our own branch rc object
+ // so that we can move the closure below
+ let branchref = branchref.clone();
+
+ self.pos_marker(Box::new(move |code_ptr| {
+ let mut branch = branchref.borrow_mut();
+ branch.start_addr = Some(code_ptr);
+ }));
+ }
+
+ // Mark the end position of a patchable branch in the machine code
+ fn mark_branch_end(&mut self, branchref: &BranchRef)
+ {
+ // We need to create our own branch rc object
+ // so that we can move the closure below
+ let branchref = branchref.clone();
+
+ self.pos_marker(Box::new(move |code_ptr| {
+ let mut branch = branchref.borrow_mut();
+ branch.end_addr = Some(code_ptr);
+ }));
+ }
+}
+
pub fn gen_branch(
jit: &JITState,
src_ctx: &Context,
- cb: &mut CodeBlock,
+ asm: &mut Assembler,
ocb: &mut OutlinedCb,
target0: BlockId,
ctx0: &Context,
@@ -1842,8 +1871,9 @@ pub fn gen_branch(
};
// Call the branch generation function
- branch.start_addr = Some(cb.get_write_ptr());
- regenerate_branch(cb, &mut branch);
+ asm.mark_branch_start(&branchref);
+ gen_fn(asm, branch.dst_addrs[0].unwrap(), branch.dst_addrs[1], BranchShape::Default);
+ asm.mark_branch_end(&branchref);
}
fn gen_jump_branch(
@@ -1880,55 +1910,27 @@ pub fn gen_direct_jump(jit: &JITState, ctx: &Context, target0: BlockId, asm: &mu
branch.blocks[0] = Some(blockref.clone());
branch.shape = BranchShape::Default;
-
-
- todo!("convert gen_direct_jump to using new asm");
-
-
- // TODO: could we use regenerate_branch logic here?
-
- /*
// Call the branch generation function
- branch.start_addr = Some(cb.get_write_ptr());
- gen_jump_branch(cb, branch.dst_addrs[0].unwrap(), None, BranchShape::Default);
- branch.end_addr = Some(cb.get_write_ptr());
- */
-
-
-
-
- asm.pos_marker(Box::new(move |code_ptr| {
- let mut branch = branchref.borrow_mut();
- branch.start_addr = Some(code_ptr);
- }));
-
-
-
-
-
-
-
-
-
+ asm.mark_branch_start(&branchref);
+ gen_jump_branch(asm, branch.dst_addrs[0].unwrap(), None, BranchShape::Default);
+ asm.mark_branch_end(&branchref);
} else {
// This None target address signals gen_block_series() to compile the
// target block right after this one (fallthrough).
branch.dst_addrs[0] = None;
branch.shape = BranchShape::Next0;
- todo!();
-
- //branch.start_addr = Some(cb.get_write_ptr());
- //branch.end_addr = Some(cb.get_write_ptr());
+ // The branch is effectively empty (a noop)
+ asm.mark_branch_start(&branchref);
+ asm.mark_branch_end(&branchref);
}
}
/// Create a stub to force the code up to this point to be executed
pub fn defer_compilation(
- block: &BlockRef,
- insn_idx: u32,
+ jit: &JITState,
cur_ctx: &Context,
- cb: &mut CodeBlock,
+ asm: &mut Assembler,
ocb: &mut OutlinedCb,
) {
if cur_ctx.chain_depth != 0 {
@@ -1942,23 +1944,23 @@ pub fn defer_compilation(
}
next_ctx.chain_depth += 1;
- let branch_rc = make_branch_entry(block, cur_ctx, gen_jump_branch);
+ let block_rc = jit.get_block();
+ let branch_rc = make_branch_entry(&jit.get_block(), cur_ctx, gen_jump_branch);
let mut branch = branch_rc.borrow_mut();
+ let block = block_rc.borrow();
let blockid = BlockId {
- iseq: block.borrow().blockid.iseq,
- idx: insn_idx,
+ iseq: block.blockid.iseq,
+ idx: jit.get_insn_idx(),
};
branch.target_ctxs[0] = next_ctx;
branch.targets[0] = Some(blockid);
branch.dst_addrs[0] = get_branch_target(blockid, &next_ctx, &branch_rc, 0, ocb);
// Call the branch generation function
- branch.start_addr = Some(cb.get_write_ptr());
- let mut asm = Assembler::new();
- gen_jump_branch(&mut asm, branch.dst_addrs[0].unwrap(), None, BranchShape::Default);
- asm.compile(cb);
- branch.end_addr = Some(cb.get_write_ptr());
+ asm.mark_branch_start(&branch_rc);
+ gen_jump_branch(asm, branch.dst_addrs[0].unwrap(), None, BranchShape::Default);
+ asm.mark_branch_end(&branch_rc);
}
// Remove all references to a block then free it.