summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKevin Newton <kddnewton@gmail.com>2022-07-22 14:01:21 -0400
committerTakashi Kokubun <takashikkbn@gmail.com>2022-08-29 08:47:04 -0700
commitf593b2c6db622de6f973e4e847e959855c341a25 (patch)
tree5cd24ab64680b03ca36fb85bc3afc1ce22e0d8d6
parent96303342e417cb2e5980d3e3f0909d32bf004431 (diff)
Fixes for AArch64 (https://github.com/Shopify/ruby/pull/338)
* Better splitting for Op::Add, Op::Sub, and Op::Cmp * Split stores if the displacement is too large * Use a shifted immediate argument * Split all places where shifted immediates are used * Add more tests to the cirrus workflow
-rw-r--r--.cirrus.yml3
-rw-r--r--yjit/src/asm/arm64/arg/mod.rs2
-rw-r--r--yjit/src/asm/arm64/arg/shifted_imm.rs75
-rw-r--r--yjit/src/asm/arm64/inst/data_imm.rs80
-rw-r--r--yjit/src/asm/arm64/mod.rs40
-rw-r--r--yjit/src/backend/arm64/mod.rs109
6 files changed, 200 insertions, 109 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index d71ac851e3..74ab6bd979 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -140,6 +140,7 @@ yjit_task:
bootstraptest/test_flow.rb \
bootstraptest/test_fork.rb \
bootstraptest/test_gc.rb \
+ bootstraptest/test_io.rb \
bootstraptest/test_jump.rb \
bootstraptest/test_literal_suffix.rb \
bootstraptest/test_load.rb \
@@ -147,7 +148,9 @@ yjit_task:
bootstraptest/test_massign.rb \
bootstraptest/test_method.rb \
bootstraptest/test_objectspace.rb \
+ bootstraptest/test_proc.rb \
bootstraptest/test_string.rb \
bootstraptest/test_struct.rb \
bootstraptest/test_yjit_new_backend.rb
+ bootstraptest/test_yjit_rust_port.rb
# full_build_script: make -j
diff --git a/yjit/src/asm/arm64/arg/mod.rs b/yjit/src/asm/arm64/arg/mod.rs
index bb779ab6df..30f3cc3dfe 100644
--- a/yjit/src/asm/arm64/arg/mod.rs
+++ b/yjit/src/asm/arm64/arg/mod.rs
@@ -4,9 +4,11 @@
mod bitmask_imm;
mod condition;
mod sf;
+mod shifted_imm;
mod sys_reg;
pub use bitmask_imm::BitmaskImmediate;
pub use condition::Condition;
pub use sf::Sf;
+pub use shifted_imm::ShiftedImmediate;
pub use sys_reg::SystemRegister;
diff --git a/yjit/src/asm/arm64/arg/shifted_imm.rs b/yjit/src/asm/arm64/arg/shifted_imm.rs
new file mode 100644
index 0000000000..5d1eeaf26d
--- /dev/null
+++ b/yjit/src/asm/arm64/arg/shifted_imm.rs
@@ -0,0 +1,75 @@
+/// How much to shift the immediate by.
+pub enum Shift {
+ LSL0 = 0b0, // no shift
+ LSL12 = 0b1 // logical shift left by 12 bits
+}
+
+/// Some instructions accept a 12-bit immediate that has an optional shift
+/// attached to it. This allows encoding larger values than just fit into 12
+/// bits. We attempt to encode those here. If the values are too large we have
+/// to bail out.
+pub struct ShiftedImmediate {
+ shift: Shift,
+ value: u16
+}
+
+impl TryFrom<u64> for ShiftedImmediate {
+ type Error = ();
+
+ /// Attempt to convert a u64 into a BitmaskImm.
+ fn try_from(value: u64) -> Result<Self, Self::Error> {
+ let mut current = value;
+ if current < 2_u64.pow(12) {
+ return Ok(ShiftedImmediate { shift: Shift::LSL0, value: current as u16 });
+ }
+
+ if (current & (2_u64.pow(12) - 1) == 0) && ((current >> 12) < 2_u64.pow(12)) {
+ return Ok(ShiftedImmediate { shift: Shift::LSL12, value: (current >> 12) as u16 });
+ }
+
+ Err(())
+ }
+}
+
+impl From<ShiftedImmediate> for u32 {
+ /// Encode a bitmask immediate into a 32-bit value.
+ fn from(imm: ShiftedImmediate) -> Self {
+ 0
+ | (((imm.shift as u32) & 1) << 12)
+ | (imm.value as u32)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_no_shift() {
+ let value = 256;
+ let result = ShiftedImmediate::try_from(value);
+
+ assert!(matches!(result, Ok(ShiftedImmediate { shift: Shift::LSL0, value })));
+ }
+
+ #[test]
+ fn test_maximum_no_shift() {
+ let value = (1 << 12) - 1;
+ let result = ShiftedImmediate::try_from(value);
+
+ assert!(matches!(result, Ok(ShiftedImmediate { shift: Shift::LSL0, value })));
+ }
+
+ #[test]
+ fn test_with_shift() {
+ let result = ShiftedImmediate::try_from(256 << 12);
+
+ assert!(matches!(result, Ok(ShiftedImmediate { shift: Shift::LSL12, value: 256 })));
+ }
+
+ #[test]
+ fn test_unencodable() {
+ let result = ShiftedImmediate::try_from((256 << 12) + 1);
+ assert!(matches!(result, Err(())));
+ }
+}
diff --git a/yjit/src/asm/arm64/inst/data_imm.rs b/yjit/src/asm/arm64/inst/data_imm.rs
index 19e2bfa199..b474b00a52 100644
--- a/yjit/src/asm/arm64/inst/data_imm.rs
+++ b/yjit/src/asm/arm64/inst/data_imm.rs
@@ -1,4 +1,4 @@
-use super::super::arg::Sf;
+use super::super::arg::{Sf, ShiftedImmediate};
/// The operation being performed by this instruction.
enum Op {
@@ -12,12 +12,6 @@ enum S {
UpdateFlags = 0b1
}
-/// How much to shift the immediate by.
-enum Shift {
- LSL0 = 0b0, // no shift
- LSL12 = 0b1 // logical shift left by 12 bits
-}
-
/// The struct that represents an A64 data processing -- immediate instruction
/// that can be encoded.
///
@@ -35,11 +29,8 @@ pub struct DataImm {
/// The register number of the first operand register.
rn: u8,
- /// The value of the immediate.
- imm12: u16,
-
/// How much to shift the immediate by.
- shift: Shift,
+ imm: ShiftedImmediate,
/// Whether or not to update the flags when this instruction is performed.
s: S,
@@ -54,64 +45,32 @@ pub struct DataImm {
impl DataImm {
/// ADD (immediate)
/// https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/ADD--immediate---Add--immediate--?lang=en
- pub fn add(rd: u8, rn: u8, imm12: u16, num_bits: u8) -> Self {
- Self {
- rd,
- rn,
- imm12,
- shift: Shift::LSL0,
- s: S::LeaveFlags,
- op: Op::Add,
- sf: num_bits.into()
- }
+ pub fn add(rd: u8, rn: u8, imm: ShiftedImmediate, num_bits: u8) -> Self {
+ Self { rd, rn, imm, s: S::LeaveFlags, op: Op::Add, sf: num_bits.into() }
}
/// ADDS (immediate, set flags)
/// https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/ADDS--immediate---Add--immediate---setting-flags-?lang=en
- pub fn adds(rd: u8, rn: u8, imm12: u16, num_bits: u8) -> Self {
- Self {
- rd,
- rn,
- imm12,
- shift: Shift::LSL0,
- s: S::UpdateFlags,
- op: Op::Add,
- sf: num_bits.into()
- }
+ pub fn adds(rd: u8, rn: u8, imm: ShiftedImmediate, num_bits: u8) -> Self {
+ Self { rd, rn, imm, s: S::UpdateFlags, op: Op::Add, sf: num_bits.into() }
}
/// CMP (immediate)
/// https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/CMP--immediate---Compare--immediate---an-alias-of-SUBS--immediate--?lang=en
- pub fn cmp(rn: u8, imm12: u16, num_bits: u8) -> Self {
- Self::subs(31, rn, imm12, num_bits)
+ pub fn cmp(rn: u8, imm: ShiftedImmediate, num_bits: u8) -> Self {
+ Self::subs(31, rn, imm, num_bits)
}
/// SUB (immediate)
/// https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/SUB--immediate---Subtract--immediate--?lang=en
- pub fn sub(rd: u8, rn: u8, imm12: u16, num_bits: u8) -> Self {
- Self {
- rd,
- rn,
- imm12,
- shift: Shift::LSL0,
- s: S::LeaveFlags,
- op: Op::Sub,
- sf: num_bits.into()
- }
+ pub fn sub(rd: u8, rn: u8, imm: ShiftedImmediate, num_bits: u8) -> Self {
+ Self { rd, rn, imm, s: S::LeaveFlags, op: Op::Sub, sf: num_bits.into() }
}
/// SUBS (immediate, set flags)
/// https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/SUBS--immediate---Subtract--immediate---setting-flags-?lang=en
- pub fn subs(rd: u8, rn: u8, imm12: u16, num_bits: u8) -> Self {
- Self {
- rd,
- rn,
- imm12,
- shift: Shift::LSL0,
- s: S::UpdateFlags,
- op: Op::Sub,
- sf: num_bits.into()
- }
+ pub fn subs(rd: u8, rn: u8, imm: ShiftedImmediate, num_bits: u8) -> Self {
+ Self { rd, rn, imm, s: S::UpdateFlags, op: Op::Sub, sf: num_bits.into() }
}
}
@@ -121,7 +80,7 @@ const FAMILY: u32 = 0b1000;
impl From<DataImm> for u32 {
/// Convert an instruction into a 32-bit value.
fn from(inst: DataImm) -> Self {
- let imm12 = (inst.imm12 as u32) & ((1 << 12) - 1);
+ let imm: u32 = inst.imm.into();
0
| ((inst.sf as u32) << 31)
@@ -129,8 +88,7 @@ impl From<DataImm> for u32 {
| ((inst.s as u32) << 29)
| (FAMILY << 25)
| (1 << 24)
- | ((inst.shift as u32) << 22)
- | (imm12 << 10)
+ | (imm << 10)
| ((inst.rn as u32) << 5)
| inst.rd as u32
}
@@ -150,35 +108,35 @@ mod tests {
#[test]
fn test_add() {
- let inst = DataImm::add(0, 1, 7, 64);
+ let inst = DataImm::add(0, 1, 7.try_into().unwrap(), 64);
let result: u32 = inst.into();
assert_eq!(0x91001c20, result);
}
#[test]
fn test_adds() {
- let inst = DataImm::adds(0, 1, 7, 64);
+ let inst = DataImm::adds(0, 1, 7.try_into().unwrap(), 64);
let result: u32 = inst.into();
assert_eq!(0xb1001c20, result);
}
#[test]
fn test_cmp() {
- let inst = DataImm::cmp(0, 7, 64);
+ let inst = DataImm::cmp(0, 7.try_into().unwrap(), 64);
let result: u32 = inst.into();
assert_eq!(0xf1001c1f, result);
}
#[test]
fn test_sub() {
- let inst = DataImm::sub(0, 1, 7, 64);
+ let inst = DataImm::sub(0, 1, 7.try_into().unwrap(), 64);
let result: u32 = inst.into();
assert_eq!(0xd1001c20, result);
}
#[test]
fn test_subs() {
- let inst = DataImm::subs(0, 1, 7, 64);
+ let inst = DataImm::subs(0, 1, 7.try_into().unwrap(), 64);
let result: u32 = inst.into();
assert_eq!(0xf1001c20, result);
}
diff --git a/yjit/src/asm/arm64/mod.rs b/yjit/src/asm/arm64/mod.rs
index 8b59d6c354..0eba37ee15 100644
--- a/yjit/src/asm/arm64/mod.rs
+++ b/yjit/src/asm/arm64/mod.rs
@@ -41,18 +41,16 @@ pub fn add(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, rm: A64Opnd) {
},
(A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::UImm(uimm12)) => {
assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
- assert!(uimm_fits_bits(uimm12, 12), "The immediate operand must be 12 bits or less.");
- DataImm::add(rd.reg_no, rn.reg_no, uimm12 as u16, rd.num_bits).into()
+ DataImm::add(rd.reg_no, rn.reg_no, uimm12.try_into().unwrap(), rd.num_bits).into()
},
(A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::Imm(imm12)) => {
assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
- assert!(imm_fits_bits(imm12, 12), "The immediate operand must be 12 bits or less.");
if imm12 < 0 {
- DataImm::sub(rd.reg_no, rn.reg_no, -imm12 as u16, rd.num_bits).into()
+ DataImm::sub(rd.reg_no, rn.reg_no, (-imm12 as u64).try_into().unwrap(), rd.num_bits).into()
} else {
- DataImm::add(rd.reg_no, rn.reg_no, imm12 as u16, rd.num_bits).into()
+ DataImm::add(rd.reg_no, rn.reg_no, (imm12 as u64).try_into().unwrap(), rd.num_bits).into()
}
},
_ => panic!("Invalid operand combination to add instruction."),
@@ -74,18 +72,16 @@ pub fn adds(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, rm: A64Opnd) {
},
(A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::UImm(imm12)) => {
assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
- assert!(uimm_fits_bits(imm12, 12), "The immediate operand must be 12 bits or less.");
- DataImm::adds(rd.reg_no, rn.reg_no, imm12 as u16, rd.num_bits).into()
+ DataImm::adds(rd.reg_no, rn.reg_no, imm12.try_into().unwrap(), rd.num_bits).into()
},
(A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::Imm(imm12)) => {
assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
- assert!(imm_fits_bits(imm12, 12), "The immediate operand must be 12 bits or less.");
if imm12 < 0 {
- DataImm::subs(rd.reg_no, rn.reg_no, -imm12 as u16, rd.num_bits).into()
+ DataImm::subs(rd.reg_no, rn.reg_no, (-imm12 as u64).try_into().unwrap(), rd.num_bits).into()
} else {
- DataImm::adds(rd.reg_no, rn.reg_no, imm12 as u16, rd.num_bits).into()
+ DataImm::adds(rd.reg_no, rn.reg_no, (imm12 as u64).try_into().unwrap(), rd.num_bits).into()
}
},
_ => panic!("Invalid operand combination to adds instruction."),
@@ -272,9 +268,7 @@ pub fn cmp(cb: &mut CodeBlock, rn: A64Opnd, rm: A64Opnd) {
DataReg::cmp(rn.reg_no, rm.reg_no, rn.num_bits).into()
},
(A64Opnd::Reg(rn), A64Opnd::UImm(imm12)) => {
- assert!(uimm_fits_bits(imm12, 12), "The immediate operand must be 12 bits or less.");
-
- DataImm::cmp(rn.reg_no, imm12 as u16, rn.num_bits).into()
+ DataImm::cmp(rn.reg_no, imm12.try_into().unwrap(), rn.num_bits).into()
},
_ => panic!("Invalid operand combination to cmp instruction."),
};
@@ -477,12 +471,12 @@ pub fn mov(cb: &mut CodeBlock, rd: A64Opnd, rm: A64Opnd) {
(A64Opnd::Reg(A64Reg { reg_no: 31, num_bits: 64 }), A64Opnd::Reg(rm)) => {
assert!(rm.num_bits == 64, "Expected rm to be 64 bits");
- DataImm::add(31, rm.reg_no, 0, 64).into()
+ DataImm::add(31, rm.reg_no, 0.try_into().unwrap(), 64).into()
},
(A64Opnd::Reg(rd), A64Opnd::Reg(A64Reg { reg_no: 31, num_bits: 64 })) => {
assert!(rd.num_bits == 64, "Expected rd to be 64 bits");
- DataImm::add(rd.reg_no, 31, 0, 64).into()
+ DataImm::add(rd.reg_no, 31, 0.try_into().unwrap(), 64).into()
},
(A64Opnd::Reg(rd), A64Opnd::Reg(rm)) => {
assert!(rd.num_bits == rm.num_bits, "Expected registers to be the same size");
@@ -713,18 +707,16 @@ pub fn sub(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, rm: A64Opnd) {
},
(A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::UImm(uimm12)) => {
assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
- assert!(uimm_fits_bits(uimm12, 12), "The immediate operand must be 12 bits or less.");
- DataImm::sub(rd.reg_no, rn.reg_no, uimm12 as u16, rd.num_bits).into()
+ DataImm::sub(rd.reg_no, rn.reg_no, uimm12.try_into().unwrap(), rd.num_bits).into()
},
(A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::Imm(imm12)) => {
assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
- assert!(imm_fits_bits(imm12, 12), "The immediate operand must be 12 bits or less.");
if imm12 < 0 {
- DataImm::add(rd.reg_no, rn.reg_no, -imm12 as u16, rd.num_bits).into()
+ DataImm::add(rd.reg_no, rn.reg_no, (-imm12 as u64).try_into().unwrap(), rd.num_bits).into()
} else {
- DataImm::sub(rd.reg_no, rn.reg_no, imm12 as u16, rd.num_bits).into()
+ DataImm::sub(rd.reg_no, rn.reg_no, (imm12 as u64).try_into().unwrap(), rd.num_bits).into()
}
},
_ => panic!("Invalid operand combination to sub instruction."),
@@ -746,18 +738,16 @@ pub fn subs(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, rm: A64Opnd) {
},
(A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::UImm(uimm12)) => {
assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
- assert!(uimm_fits_bits(uimm12, 12), "The immediate operand must be 12 bits or less.");
- DataImm::subs(rd.reg_no, rn.reg_no, uimm12 as u16, rd.num_bits).into()
+ DataImm::subs(rd.reg_no, rn.reg_no, uimm12.try_into().unwrap(), rd.num_bits).into()
},
(A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::Imm(imm12)) => {
assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
- assert!(imm_fits_bits(imm12, 12), "The immediate operand must be 12 bits or less.");
if imm12 < 0 {
- DataImm::adds(rd.reg_no, rn.reg_no, -imm12 as u16, rd.num_bits).into()
+ DataImm::adds(rd.reg_no, rn.reg_no, (-imm12 as u64).try_into().unwrap(), rd.num_bits).into()
} else {
- DataImm::subs(rd.reg_no, rn.reg_no, imm12 as u16, rd.num_bits).into()
+ DataImm::subs(rd.reg_no, rn.reg_no, (imm12 as u64).try_into().unwrap(), rd.num_bits).into()
}
},
_ => panic!("Invalid operand combination to subs instruction."),
diff --git a/yjit/src/backend/arm64/mod.rs b/yjit/src/backend/arm64/mod.rs
index d3db3877dc..57943ce58f 100644
--- a/yjit/src/backend/arm64/mod.rs
+++ b/yjit/src/backend/arm64/mod.rs
@@ -81,7 +81,10 @@ impl Assembler
/// have no memory operands.
fn arm64_split(mut self) -> Assembler
{
- fn load_bitmask_immediate(asm: &mut Assembler, opnd: Opnd) -> Opnd {
+ /// Operands that take the place of bitmask immediates must follow a
+ /// certain encoding. In this function we ensure that those operands
+ /// do follow that encoding, and if they don't then we load them first.
+ fn split_bitmask_immediate(asm: &mut Assembler, opnd: Opnd) -> Opnd {
match opnd {
Opnd::Reg(_) | Opnd::InsnOut { .. } => opnd,
Opnd::Mem(_) => asm.load(opnd),
@@ -105,6 +108,41 @@ impl Assembler
}
}
+ /// Operands that take the place of a shifted immediate must fit within
+ /// a certain size. If they don't then we need to load them first.
+ fn split_shifted_immediate(asm: &mut Assembler, opnd: Opnd) -> Opnd {
+ match opnd {
+ Opnd::Reg(_) | Opnd::InsnOut { .. } => opnd,
+ Opnd::Mem(_) | Opnd::Imm(_) => asm.load(opnd),
+ Opnd::UImm(uimm) => {
+ if ShiftedImmediate::try_from(uimm).is_ok() {
+ opnd
+ } else {
+ asm.load(opnd)
+ }
+ },
+ Opnd::None | Opnd::Value(_) => unreachable!()
+ }
+ }
+
+ /// When you're storing a register into a memory location, the
+ /// displacement from the base register of the memory location must fit
+ /// into 9 bits. If it doesn't, then we need to load that memory address
+ /// into a register first.
+ fn split_store(asm: &mut Assembler, opnd: Opnd) -> Opnd {
+ match opnd {
+ Opnd::Mem(mem) => {
+ if imm_fits_bits(mem.disp.into(), 9) {
+ opnd
+ } else {
+ let base = asm.lea(opnd);
+ Opnd::mem(64, base, 0)
+ }
+ },
+ _ => unreachable!("Can only store memory addresses.")
+ }
+ }
+
self.forward_pass(|asm, index, op, opnds, target, text, pos_marker| {
// Load all Value operands into registers that aren't already a part
// of Load instructions.
@@ -120,22 +158,20 @@ impl Assembler
};
match op {
- Op::Add | Op::Sub => {
- // Check if one of the operands is a register. If it is,
- // then we'll make that the first operand.
+ Op::Add => {
match (opnds[0], opnds[1]) {
- (Opnd::Mem(_), Opnd::Mem(_)) => {
- let opnd0 = asm.load(opnds[0]);
- let opnd1 = asm.load(opnds[1]);
- asm.push_insn(op, vec![opnd0, opnd1], target, text, pos_marker);
+ (Opnd::Reg(_) | Opnd::InsnOut { .. }, Opnd::Reg(_) | Opnd::InsnOut { .. }) => {
+ asm.add(opnds[0], opnds[1]);
},
- (mem_opnd @ Opnd::Mem(_), other_opnd) |
- (other_opnd, mem_opnd @ Opnd::Mem(_)) => {
- let opnd0 = asm.load(mem_opnd);
- asm.push_insn(op, vec![opnd0, other_opnd], target, text, pos_marker);
+ (reg_opnd @ (Opnd::Reg(_) | Opnd::InsnOut { .. }), other_opnd) |
+ (other_opnd, reg_opnd @ (Opnd::Reg(_) | Opnd::InsnOut { .. })) => {
+ let opnd1 = split_shifted_immediate(asm, other_opnd);
+ asm.add(reg_opnd, opnd1);
},
_ => {
- asm.push_insn(op, opnds, target, text, pos_marker);
+ let opnd0 = asm.load(opnds[0]);
+ let opnd1 = split_shifted_immediate(asm, opnds[1]);
+ asm.add(opnd0, opnd1);
}
}
},
@@ -146,12 +182,12 @@ impl Assembler
},
(reg_opnd @ Opnd::Reg(_), other_opnd) |
(other_opnd, reg_opnd @ Opnd::Reg(_)) => {
- let opnd1 = load_bitmask_immediate(asm, other_opnd);
+ let opnd1 = split_bitmask_immediate(asm, other_opnd);
asm.and(reg_opnd, opnd1);
},
_ => {
let opnd0 = asm.load(opnds[0]);
- let opnd1 = load_bitmask_immediate(asm, opnds[1]);
+ let opnd1 = split_bitmask_immediate(asm, opnds[1]);
asm.and(opnd0, opnd1);
}
}
@@ -173,6 +209,16 @@ impl Assembler
// just performs the call.
asm.ccall(target.unwrap().unwrap_fun_ptr(), vec![]);
},
+ Op::Cmp => {
+ let opnd0 = match opnds[0] {
+ Opnd::Reg(_) | Opnd::InsnOut { .. } => opnds[0],
+ _ => asm.load(opnds[0])
+ };
+
+ let opnd1 = split_shifted_immediate(asm, opnds[1]);
+
+ asm.push_insn(op, vec![opnd0, opnd1], target, text, pos_marker);
+ },
Op::CRet => {
if opnds[0] != Opnd::Reg(C_RET_REG) {
let value = asm.load(opnds[0]);
@@ -238,15 +284,20 @@ impl Assembler
// register or an immediate that can be encoded as a
// bitmask immediate. Otherwise, we'll need to split the
// move into multiple instructions.
- _ => load_bitmask_immediate(asm, opnds[1])
+ _ => split_bitmask_immediate(asm, opnds[1])
};
// If we're attempting to load into a memory operand, then
// we'll switch over to the store instruction. Otherwise
// we'll use the normal mov instruction.
match opnds[0] {
- Opnd::Mem(_) => asm.store(opnds[0], value),
- _ => asm.mov(opnds[0], value)
+ Opnd::Mem(_) => {
+ let opnd0 = split_store(asm, opnds[0]);
+ asm.store(opnd0, value);
+ },
+ _ => {
+ asm.mov(opnds[0], value);
+ }
};
},
Op::Not => {
@@ -260,6 +311,11 @@ impl Assembler
asm.not(opnd0);
},
Op::Store => {
+ // The displacement for the STUR instruction can't be more
+ // than 9 bits long. If it's longer, we need to load the
+ // memory address into a register first.
+ let opnd0 = split_store(asm, opnds[0]);
+
// The value being stored must be in a register, so if it's
// not already one we'll load it first.
let opnd1 = match opnds[1] {
@@ -267,7 +323,17 @@ impl Assembler
_ => asm.load(opnds[1])
};
- asm.store(opnds[0], opnd1);
+ asm.store(opnd0, opnd1);
+ },
+ Op::Sub => {
+ let opnd0 = match opnds[0] {
+ Opnd::Reg(_) | Opnd::InsnOut { .. } => opnds[0],
+ _ => asm.load(opnds[0])
+ };
+
+ let opnd1 = split_shifted_immediate(asm, opnds[1]);
+
+ asm.push_insn(op, vec![opnd0, opnd1], target, text, pos_marker);
},
Op::Test => {
// The value being tested must be in a register, so if it's
@@ -281,7 +347,7 @@ impl Assembler
// unsigned immediate that can be encoded as a bitmask
// immediate. If it's not one of those, we'll need to load
// it first.
- let opnd1 = load_bitmask_immediate(asm, opnds[1]);
+ let opnd1 = split_bitmask_immediate(asm, opnds[1]);
asm.test(opnd0, opnd1);
},
@@ -454,18 +520,15 @@ impl Assembler
cb.add_comment(&insn.text.as_ref().unwrap());
}
},
-
Op::Label => {
cb.write_label(insn.target.unwrap().unwrap_label_idx());
},
-
// Report back the current position in the generated code
Op::PosMarker => {
let pos = cb.get_write_ptr();
let pos_marker_fn = insn.pos_marker.as_ref().unwrap();
pos_marker_fn(pos);
}
-
Op::BakeString => {
let str = insn.text.as_ref().unwrap();
for byte in str.as_bytes() {