summaryrefslogtreecommitdiff
path: root/yjit
diff options
context:
space:
mode:
authorKevin Newton <kddnewton@gmail.com>2022-08-31 15:44:26 -0400
committerGitHub <noreply@github.com>2022-08-31 15:44:26 -0400
commitbe55b77cc75fe36b484a3feb6ad4178630d73242 (patch)
treed7f456fa5829ad1130bd4cd864f25d70c25e80cd /yjit
parent32a059151507876de804adbfbf4926937333e091 (diff)
Better b.cond usage on AArch64 (#6305)
* Better b.cond usage on AArch64 When we're lowering a conditional jump, we previously had a bit of a complicated setup where we could emit a conditional jump to skip over a jump that was the next instruction, and then write out the destination and use a branch register. Now instead we use the b.cond instruction if our offset fits (not common, but not unused either) and if it doesn't we write out an inverse condition to jump past loading the destination and branching directly. * Added an inverse fn for Condition (#443) Prevents the need to pass two params and potentially reduces errors. Co-authored-by: Jimmy Miller <jimmyhmiller@jimmys-mbp.lan> Co-authored-by: Maxime Chevalier-Boisvert <maximechevalierb@gmail.com> Co-authored-by: Jimmy Miller <jimmyhmiller@jimmys-mbp.lan>
Notes
Notes: Merged-By: maximecb <maximecb@ruby-lang.org>
Diffstat (limited to 'yjit')
-rw-r--r--yjit/src/asm/arm64/arg/condition.rs32
-rw-r--r--yjit/src/asm/arm64/inst/branch_cond.rs12
-rw-r--r--yjit/src/asm/arm64/mod.rs9
-rw-r--r--yjit/src/backend/arm64/mod.rs122
4 files changed, 100 insertions, 75 deletions
diff --git a/yjit/src/asm/arm64/arg/condition.rs b/yjit/src/asm/arm64/arg/condition.rs
index e791e4b078..bb9ce570c3 100644
--- a/yjit/src/asm/arm64/arg/condition.rs
+++ b/yjit/src/asm/arm64/arg/condition.rs
@@ -19,4 +19,34 @@ impl Condition {
pub const GT: u8 = 0b1100; // greater than (signed)
pub const LE: u8 = 0b1101; // less than or equal to (signed)
pub const AL: u8 = 0b1110; // always
-}
+
+ pub const fn inverse(condition: u8) -> u8 {
+ match condition {
+ Condition::EQ => Condition::NE,
+ Condition::NE => Condition::EQ,
+
+ Condition::CS => Condition::CC,
+ Condition::CC => Condition::CS,
+
+ Condition::MI => Condition::PL,
+ Condition::PL => Condition::MI,
+
+ Condition::VS => Condition::VC,
+ Condition::VC => Condition::VS,
+
+ Condition::HI => Condition::LS,
+ Condition::LS => Condition::HI,
+
+ Condition::LT => Condition::GE,
+ Condition::GE => Condition::LT,
+
+ Condition::GT => Condition::LE,
+ Condition::LE => Condition::GT,
+
+ Condition::AL => Condition::AL,
+
+ _ => panic!("Unknown condition")
+
+ }
+ }
+} \ No newline at end of file
diff --git a/yjit/src/asm/arm64/inst/branch_cond.rs b/yjit/src/asm/arm64/inst/branch_cond.rs
index a6bc79dffe..c489bacef0 100644
--- a/yjit/src/asm/arm64/inst/branch_cond.rs
+++ b/yjit/src/asm/arm64/inst/branch_cond.rs
@@ -20,8 +20,8 @@ pub struct BranchCond {
impl BranchCond {
/// B.cond
/// https://developer.arm.com/documentation/ddi0596/2020-12/Base-Instructions/B-cond--Branch-conditionally-
- pub fn bcond(cond: u8, byte_offset: i32) -> Self {
- Self { cond, imm19: byte_offset >> 2 }
+ pub fn bcond(cond: u8, imm19: i32) -> Self {
+ Self { cond, imm19 }
}
}
@@ -53,25 +53,25 @@ mod tests {
#[test]
fn test_b_eq() {
- let result: u32 = BranchCond::bcond(Condition::EQ, 128).into();
+ let result: u32 = BranchCond::bcond(Condition::EQ, 32).into();
assert_eq!(0x54000400, result);
}
#[test]
fn test_b_vs() {
- let result: u32 = BranchCond::bcond(Condition::VS, 128).into();
+ let result: u32 = BranchCond::bcond(Condition::VS, 32).into();
assert_eq!(0x54000406, result);
}
#[test]
fn test_b_eq_max() {
- let result: u32 = BranchCond::bcond(Condition::EQ, (1 << 20) - 4).into();
+ let result: u32 = BranchCond::bcond(Condition::EQ, (1 << 18) - 1).into();
assert_eq!(0x547fffe0, result);
}
#[test]
fn test_b_eq_min() {
- let result: u32 = BranchCond::bcond(Condition::EQ, -(1 << 20)).into();
+ let result: u32 = BranchCond::bcond(Condition::EQ, -(1 << 18)).into();
assert_eq!(0x54800000, result);
}
}
diff --git a/yjit/src/asm/arm64/mod.rs b/yjit/src/asm/arm64/mod.rs
index a6aa8ffcbb..b73b3125e2 100644
--- a/yjit/src/asm/arm64/mod.rs
+++ b/yjit/src/asm/arm64/mod.rs
@@ -203,9 +203,10 @@ pub fn b(cb: &mut CodeBlock, imm26: A64Opnd) {
cb.write_bytes(&bytes);
}
-/// Whether or not the offset between two instructions fits into the b.cond
-/// instruction. If it doesn't, then we have to load the value into a register
-/// first, then use the b.cond instruction to skip past a direct jump.
+/// Whether or not the offset in number of instructions between two instructions
+/// fits into the b.cond instruction. If it doesn't, then we have to load the
+/// value into a register first, then use the b.cond instruction to skip past a
+/// direct jump.
pub const fn bcond_offset_fits_bits(offset: i64) -> bool {
imm_fits_bits(offset, 21) && (offset & 0b11 == 0)
}
@@ -216,7 +217,7 @@ pub fn bcond(cb: &mut CodeBlock, cond: u8, byte_offset: A64Opnd) {
A64Opnd::Imm(imm) => {
assert!(bcond_offset_fits_bits(imm), "The immediate operand must be 21 bits or less and be aligned to a 2-bit boundary.");
- BranchCond::bcond(cond, imm as i32).into()
+ BranchCond::bcond(cond, (imm / 4) as i32).into()
},
_ => panic!("Invalid operand combination to bcond instruction."),
};
diff --git a/yjit/src/backend/arm64/mod.rs b/yjit/src/backend/arm64/mod.rs
index 69524be611..4f07bf8062 100644
--- a/yjit/src/backend/arm64/mod.rs
+++ b/yjit/src/backend/arm64/mod.rs
@@ -565,64 +565,42 @@ impl Assembler
fn emit_conditional_jump<const CONDITION: u8>(cb: &mut CodeBlock, target: Target) {
match target {
Target::CodePtr(dst_ptr) => {
- let dst_addr = dst_ptr.into_u64();
- //let src_addr = cb.get_write_ptr().into_i64() + 4;
- //let offset = dst_addr - src_addr;
-
- // If the condition is met, then we'll skip past the
- // next instruction, put the address in a register, and
- // jump to it.
- bcond(cb, CONDITION, A64Opnd::new_imm(8));
-
- // If we get to this instruction, then the condition
- // wasn't met, in which case we'll jump past the
- // next instruction that perform the direct jump.
-
- b(cb, A64Opnd::new_imm(2i64 + emit_load_size(dst_addr) as i64));
- let num_insns = emit_load_value(cb, Assembler::SCRATCH0, dst_addr);
- br(cb, Assembler::SCRATCH0);
- for _ in num_insns..4 {
- nop(cb);
- }
+ let dst_addr = dst_ptr.into_i64();
+ let src_addr = cb.get_write_ptr().into_i64();
+ let offset = dst_addr - src_addr;
- /*
- // If the jump offset fits into the conditional jump as an
- // immediate value and it's properly aligned, then we can
- // use the b.cond instruction directly. Otherwise, we need
- // to load the address into a register and use the branch
- // register instruction.
- if bcond_offset_fits_bits(offset) {
- bcond(cb, CONDITION, A64Opnd::new_imm(dst_addr - src_addr));
+ let num_insns = if bcond_offset_fits_bits(offset) {
+ // If the jump offset fits into the conditional jump as
+ // an immediate value and it's properly aligned, then we
+ // can use the b.cond instruction directly.
+ bcond(cb, CONDITION, A64Opnd::new_imm(offset));
+
+ // Here we're going to return 1 because we've only
+ // written out 1 instruction.
+ 1
} else {
- // If the condition is met, then we'll skip past the
- // next instruction, put the address in a register, and
- // jump to it.
- bcond(cb, CONDITION, A64Opnd::new_imm(8));
-
- // If the offset fits into a direct jump, then we'll use
- // that and the number of instructions will be shorter.
- // Otherwise we'll use the branch register instruction.
- if b_offset_fits_bits(offset) {
- // If we get to this instruction, then the condition
- // wasn't met, in which case we'll jump past the
- // next instruction that performs the direct jump.
- b(cb, A64Opnd::new_imm(1));
-
- // Here we'll perform the direct jump to the target.
- let offset = dst_addr - cb.get_write_ptr().into_i64() + 4;
- b(cb, A64Opnd::new_imm(offset / 4));
- } else {
- // If we get to this instruction, then the condition
- // wasn't met, in which case we'll jump past the
- // next instruction that perform the direct jump.
- let value = dst_addr as u64;
-
- b(cb, A64Opnd::new_imm(emit_load_size(value).into()));
- emit_load_value(cb, Assembler::SCRATCH0, value);
- br(cb, Assembler::SCRATCH0);
- }
- }
- */
+ // Otherwise, we need to load the address into a
+ // register and use the branch register instruction.
+ let dst_addr = dst_ptr.into_u64();
+ let load_insns: i64 = emit_load_size(dst_addr).into();
+
+ // We're going to write out the inverse condition so
+ // that if it doesn't match it will skip over the
+ // instructions used for branching.
+ bcond(cb, Condition::inverse(CONDITION), A64Opnd::new_imm((load_insns + 2) * 4));
+ emit_load_value(cb, Assembler::SCRATCH0, dst_addr);
+ br(cb, Assembler::SCRATCH0);
+
+ // Here we'll return the number of instructions that it
+ // took to write out the destination address + 1 for the
+ // b.cond and 1 for the br.
+ load_insns + 2
+ };
+
+ // We need to make sure we have at least 6 instructions for
+ // every kind of jump for invalidation purposes, so we're
+ // going to write out padding nop instructions here.
+ for _ in num_insns..6 { nop(cb); }
},
Target::Label(label_idx) => {
// Here we're going to save enough space for ourselves and
@@ -904,10 +882,10 @@ impl Assembler
_ => unreachable!()
};
},
- Insn::Je(target) => {
+ Insn::Je(target) | Insn::Jz(target) => {
emit_conditional_jump::<{Condition::EQ}>(cb, *target);
},
- Insn::Jne(target) => {
+ Insn::Jne(target) | Insn::Jnz(target) => {
emit_conditional_jump::<{Condition::NE}>(cb, *target);
},
Insn::Jl(target) => {
@@ -916,12 +894,6 @@ impl Assembler
Insn::Jbe(target) => {
emit_conditional_jump::<{Condition::LS}>(cb, *target);
},
- Insn::Jz(target) => {
- emit_conditional_jump::<{Condition::EQ}>(cb, *target);
- },
- Insn::Jnz(target) => {
- emit_conditional_jump::<{Condition::NE}>(cb, *target);
- },
Insn::Jo(target) => {
emit_conditional_jump::<{Condition::VS}>(cb, *target);
},
@@ -1054,6 +1026,28 @@ mod tests {
}
#[test]
+ fn test_emit_je_fits_into_bcond() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let offset = 80;
+ let target: CodePtr = ((cb.get_write_ptr().into_u64() + offset) as *mut u8).into();
+
+ asm.je(Target::CodePtr(target));
+ asm.compile_with_num_regs(&mut cb, 0);
+ }
+
+ #[test]
+ fn test_emit_je_does_not_fit_into_bcond() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let offset = 1 << 21;
+ let target: CodePtr = ((cb.get_write_ptr().into_u64() + offset) as *mut u8).into();
+
+ asm.je(Target::CodePtr(target));
+ asm.compile_with_num_regs(&mut cb, 0);
+ }
+
+ #[test]
fn test_emit_lea_label() {
let (mut asm, mut cb) = setup_asm();