summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKevin Newton <kddnewton@gmail.com>2022-08-05 16:52:23 -0400
committerTakashi Kokubun <takashikkbn@gmail.com>2022-08-29 08:47:08 -0700
commit8278d722907dc134e9a3436d5542d7dc168d8925 (patch)
tree418029a7e2786cbd827dba8348e2a3fcb17bdb97
parent8fffff536db7d603c6caef80d11c0926d59b1001 (diff)
Left and right shift for IR (https://github.com/Shopify/ruby/pull/374)
* Left and right shift for IR * Update yjit/src/backend/x86_64/mod.rs Co-authored-by: Alan Wu <XrXr@users.noreply.github.com> Co-authored-by: Maxime Chevalier-Boisvert <maximechevalierb@gmail.com>
-rw-r--r--yjit/src/asm/arm64/inst/sbfm.rs33
-rw-r--r--yjit/src/asm/arm64/mod.rs21
-rw-r--r--yjit/src/backend/arm64/mod.rs36
-rw-r--r--yjit/src/backend/ir.rs12
-rw-r--r--yjit/src/backend/x86_64/mod.rs43
5 files changed, 143 insertions, 2 deletions
diff --git a/yjit/src/asm/arm64/inst/sbfm.rs b/yjit/src/asm/arm64/inst/sbfm.rs
index 4fbb567ed0..6f69e58043 100644
--- a/yjit/src/asm/arm64/inst/sbfm.rs
+++ b/yjit/src/asm/arm64/inst/sbfm.rs
@@ -31,6 +31,18 @@ pub struct SBFM {
}
impl SBFM {
+ /// ASR
+ /// https://developer.arm.com/documentation/ddi0596/2020-12/Base-Instructions/ASR--immediate---Arithmetic-Shift-Right--immediate---an-alias-of-SBFM-?lang=en
+ pub fn asr(rd: u8, rn: u8, shift: u8, num_bits: u8) -> Self {
+ let (imms, n) = if num_bits == 64 {
+ (0b111111, true)
+ } else {
+ (0b011111, false)
+ };
+
+ Self { rd, rn, immr: shift, imms, n, sf: num_bits.into() }
+ }
+
/// SXTW
/// https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/SXTW--Sign-Extend-Word--an-alias-of-SBFM-?lang=en
pub fn sxtw(rd: u8, rn: u8) -> Self {
@@ -44,13 +56,16 @@ const FAMILY: u32 = 0b1001;
impl From<SBFM> for u32 {
/// Convert an instruction into a 32-bit value.
fn from(inst: SBFM) -> Self {
+ let immr = (inst.immr as u32) & ((1 << 6) - 1);
+ let imms = (inst.imms as u32) & ((1 << 6) - 1);
+
0
| ((inst.sf as u32) << 31)
| (FAMILY << 25)
| (1 << 24)
| ((inst.n as u32) << 22)
- | ((inst.immr as u32) << 16)
- | ((inst.imms as u32) << 10)
+ | (immr << 16)
+ | (imms << 10)
| ((inst.rn as u32) << 5)
| inst.rd as u32
}
@@ -69,6 +84,20 @@ mod tests {
use super::*;
#[test]
+ fn test_asr_32_bits() {
+ let inst = SBFM::asr(0, 1, 2, 32);
+ let result: u32 = inst.into();
+ assert_eq!(0x13027c20, result);
+ }
+
+ #[test]
+ fn test_asr_64_bits() {
+ let inst = SBFM::asr(10, 11, 5, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0x9345fd6a, result);
+ }
+
+ #[test]
fn test_sxtw() {
let inst = SBFM::sxtw(0, 1);
let result: u32 = inst.into();
diff --git a/yjit/src/asm/arm64/mod.rs b/yjit/src/asm/arm64/mod.rs
index d114f64a22..68be36c256 100644
--- a/yjit/src/asm/arm64/mod.rs
+++ b/yjit/src/asm/arm64/mod.rs
@@ -166,6 +166,22 @@ pub fn ands(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, rm: A64Opnd) {
cb.write_bytes(&bytes);
}
+/// ASR - arithmetic shift right rn by shift, put the result in rd, don't update
+/// flags
+pub fn asr(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, shift: A64Opnd) {
+ let bytes: [u8; 4] = match (rd, rn, shift) {
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::UImm(shift)) => {
+ assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
+ assert!(uimm_fits_bits(shift, 6), "The shift operand must be 6 bits or less.");
+
+ SBFM::asr(rd.reg_no, rn.reg_no, shift.try_into().unwrap(), rd.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to asr instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
/// Whether or not the offset between two instructions fits into the branch with
/// or without link instruction. If it doesn't, then we have to load the value
/// into a register first.
@@ -904,6 +920,11 @@ mod tests {
}
#[test]
+ fn test_asr() {
+ check_bytes("b4fe4a93", |cb| asr(cb, X20, X21, A64Opnd::new_uimm(10)));
+ }
+
+ #[test]
fn test_bcond() {
check_bytes("01200054", |cb| bcond(cb, Condition::NE, A64Opnd::new_imm(0x400)));
}
diff --git a/yjit/src/backend/arm64/mod.rs b/yjit/src/backend/arm64/mod.rs
index 2cddf55756..2e8c2068af 100644
--- a/yjit/src/backend/arm64/mod.rs
+++ b/yjit/src/backend/arm64/mod.rs
@@ -576,6 +576,15 @@ impl Assembler
Op::Not => {
mvn(cb, insn.out.into(), insn.opnds[0].into());
},
+ Op::RShift => {
+ asr(cb, insn.out.into(), insn.opnds[0].into(), insn.opnds[1].into());
+ },
+ Op::URShift => {
+ lsr(cb, insn.out.into(), insn.opnds[0].into(), insn.opnds[1].into());
+ },
+ Op::LShift => {
+ lsl(cb, insn.out.into(), insn.opnds[0].into(), insn.opnds[1].into());
+ },
Op::Store => {
// This order may be surprising but it is correct. The way
// the Arm64 assembler works, the register that is going to
@@ -902,6 +911,33 @@ mod tests {
}
#[test]
+ fn test_emit_lshift() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let opnd = asm.lshift(Opnd::Reg(X0_REG), Opnd::UImm(5));
+ asm.store(Opnd::mem(64, Opnd::Reg(X2_REG), 0), opnd);
+ asm.compile_with_num_regs(&mut cb, 1);
+ }
+
+ #[test]
+ fn test_emit_rshift() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let opnd = asm.rshift(Opnd::Reg(X0_REG), Opnd::UImm(5));
+ asm.store(Opnd::mem(64, Opnd::Reg(X2_REG), 0), opnd);
+ asm.compile_with_num_regs(&mut cb, 1);
+ }
+
+ #[test]
+ fn test_emit_urshift() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let opnd = asm.urshift(Opnd::Reg(X0_REG), Opnd::UImm(5));
+ asm.store(Opnd::mem(64, Opnd::Reg(X2_REG), 0), opnd);
+ asm.compile_with_num_regs(&mut cb, 1);
+ }
+
+ #[test]
fn test_emit_test() {
let (mut asm, mut cb) = setup_asm();
diff --git a/yjit/src/backend/ir.rs b/yjit/src/backend/ir.rs
index a23b27dda2..5eee61b228 100644
--- a/yjit/src/backend/ir.rs
+++ b/yjit/src/backend/ir.rs
@@ -66,6 +66,15 @@ pub enum Op
// instruction.
Not,
+ /// Shift a value right by a certain amount (signed).
+ RShift,
+
+ /// Shift a value right by a certain amount (unsigned).
+ URShift,
+
+ /// Shift a value left by a certain amount.
+ LShift,
+
//
// Low-level instructions
//
@@ -912,6 +921,9 @@ def_push_2_opnd!(sub, Op::Sub);
def_push_2_opnd!(and, Op::And);
def_push_2_opnd!(or, Op::Or);
def_push_1_opnd!(not, Op::Not);
+def_push_2_opnd!(lshift, Op::LShift);
+def_push_2_opnd!(rshift, Op::RShift);
+def_push_2_opnd!(urshift, Op::URShift);
def_push_1_opnd_no_out!(cpush, Op::CPush);
def_push_0_opnd!(cpop, Op::CPop);
def_push_1_opnd_no_out!(cpop_into, Op::CPopInto);
diff --git a/yjit/src/backend/x86_64/mod.rs b/yjit/src/backend/x86_64/mod.rs
index 8d45230e91..4ba849b239 100644
--- a/yjit/src/backend/x86_64/mod.rs
+++ b/yjit/src/backend/x86_64/mod.rs
@@ -164,6 +164,37 @@ impl Assembler
asm.push_insn(op, vec![opnd0, opnd1], target, text, pos_marker);
},
+ // These instructions modify their input operand in-place, so we
+ // may need to load the input value to preserve it
+ Op::LShift | Op::RShift | Op::URShift => {
+ let (opnd0, opnd1) = match (opnds[0], opnds[1]) {
+ // Instruction output whose live range spans beyond this instruction
+ (Opnd::InsnOut { .. }, _) => {
+ let idx = match original_opnds[0] {
+ Opnd::InsnOut { idx, .. } => {
+ idx
+ },
+ _ => unreachable!()
+ };
+
+ // Our input must be from a previous instruction!
+ assert!(idx < index);
+
+ if live_ranges[idx] > index {
+ (asm.load(opnds[0]), opnds[1])
+ } else {
+ (opnds[0], opnds[1])
+ }
+ },
+ // We have to load memory operands to avoid corrupting them
+ (Opnd::Mem(_) | Opnd::Reg(_), _) => {
+ (asm.load(opnds[0]), opnds[1])
+ },
+ _ => (opnds[0], opnds[1])
+ };
+
+ asm.push_insn(op, vec![opnd0, opnd1], target, text, pos_marker);
+ },
Op::CSelZ | Op::CSelNZ | Op::CSelE | Op::CSelNE |
Op::CSelL | Op::CSelLE | Op::CSelG | Op::CSelGE => {
let new_opnds = opnds.into_iter().map(|opnd| {
@@ -293,6 +324,18 @@ impl Assembler
not(cb, insn.opnds[0].into())
},
+ Op::LShift => {
+ shl(cb, insn.opnds[0].into(), insn.opnds[1].into())
+ },
+
+ Op::RShift => {
+ sar(cb, insn.opnds[0].into(), insn.opnds[1].into())
+ },
+
+ Op::URShift => {
+ shr(cb, insn.opnds[0].into(), insn.opnds[1].into())
+ },
+
Op::Store => mov(cb, insn.opnds[0].into(), insn.opnds[1].into()),
// This assumes only load instructions can contain references to GC'd Value operands