diff options
| author | Kevin Newton <kddnewton@gmail.com> | 2022-07-15 16:14:55 -0400 |
|---|---|---|
| committer | Takashi Kokubun <takashikkbn@gmail.com> | 2022-08-29 08:47:01 -0700 |
| commit | 0da253e72cc80c1dbf8517f5217b59a64ec0f44e (patch) | |
| tree | 7eb3cb61dbb2b4b23c8e4da81a6d7bbe40f35fb5 | |
| parent | bf7277b518d5ab634ee708f54fbb8735a8eafdbc (diff) | |
Port print_int to the new backend (https://github.com/Shopify/ruby/pull/321)
* Port print_int to the new backend
* Tests for print_int and print_str
| -rw-r--r-- | yjit/src/asm/arm64/inst/load.rs | 42 | ||||
| -rw-r--r-- | yjit/src/asm/arm64/inst/mod.rs | 2 | ||||
| -rw-r--r-- | yjit/src/asm/arm64/inst/sbfm.rs | 77 | ||||
| -rw-r--r-- | yjit/src/asm/arm64/mod.rs | 40 | ||||
| -rw-r--r-- | yjit/src/asm/arm64/opnd.rs | 64 | ||||
| -rw-r--r-- | yjit/src/backend/arm64/mod.rs | 28 | ||||
| -rw-r--r-- | yjit/src/backend/ir.rs | 5 | ||||
| -rw-r--r-- | yjit/src/backend/x86_64/mod.rs | 4 | ||||
| -rw-r--r-- | yjit/src/utils.rs | 110 |
9 files changed, 284 insertions, 88 deletions
diff --git a/yjit/src/asm/arm64/inst/load.rs b/yjit/src/asm/arm64/inst/load.rs index 727dad52f7..b64a6a96ac 100644 --- a/yjit/src/asm/arm64/inst/load.rs +++ b/yjit/src/asm/arm64/inst/load.rs @@ -4,6 +4,12 @@ enum Size { Size64 = 0b11, } +/// The operation to perform for this instruction. +enum Opc { + LDUR = 0b01, + LDURSW = 0b10 +} + /// A convenience function so that we can convert the number of bits of an /// register operand directly into an Sf enum variant. impl From<u8> for Size { @@ -22,8 +28,8 @@ impl From<u8> for Size { /// LDUR /// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+ /// | 31 30 29 28 | 27 26 25 24 | 23 22 21 20 | 19 18 17 16 | 15 14 13 12 | 11 10 09 08 | 07 06 05 04 | 03 02 01 00 | -/// | 1 1 1 0 0 0 0 1 0 0 0 | -/// | size. imm9.......................... rn.............. rt.............. | +/// | 1 1 1 0 0 0 0 0 0 | +/// | size. opc.. imm9.......................... rn.............. rt.............. | /// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+ /// pub struct Load { @@ -36,6 +42,9 @@ pub struct Load { /// The optional signed immediate byte offset from the base register. imm9: i16, + /// The operation to perform for this instruction. + opc: Opc, + /// The size of the operands being operated on. size: Size } @@ -44,12 +53,13 @@ impl Load { /// LDUR (load register, unscaled) /// https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/LDUR--Load-Register--unscaled--?lang=en pub fn ldur(rt: u8, rn: u8, imm9: i16, num_bits: u8) -> Self { - Self { - rt, - rn, - imm9, - size: num_bits.into() - } + Self { rt, rn, imm9, opc: Opc::LDUR, size: num_bits.into() } + } + + /// LDURSW (load register, unscaled, signed) + /// https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/LDURSW--Load-Register-Signed-Word--unscaled--?lang=en + pub fn ldursw(rt: u8, rn: u8, imm9: i16) -> Self { + Self { rt, rn, imm9, opc: Opc::LDURSW, size: Size::Size32 } } } @@ -65,7 +75,7 @@ impl From<Load> for u32 { | ((inst.size as u32) << 30) | (0b11 << 28) | (FAMILY << 25) - | (1 << 22) + | ((inst.opc as u32) << 22) | (imm9 << 12) | ((inst.rn as u32) << 5) | (inst.rt as u32) @@ -97,4 +107,18 @@ mod tests { let result: u32 = inst.into(); assert_eq!(0xf847b020, result); } + + #[test] + fn test_ldursw() { + let inst = Load::ldursw(0, 1, 0); + let result: u32 = inst.into(); + assert_eq!(0xb8800020, result); + } + + #[test] + fn test_ldursw_with_imm() { + let inst = Load::ldursw(0, 1, 123); + let result: u32 = inst.into(); + assert_eq!(0xb887b020, result); + } } diff --git a/yjit/src/asm/arm64/inst/mod.rs b/yjit/src/asm/arm64/inst/mod.rs index 752ee64aa3..5d4d252d93 100644 --- a/yjit/src/asm/arm64/inst/mod.rs +++ b/yjit/src/asm/arm64/inst/mod.rs @@ -15,6 +15,7 @@ mod logical_reg; mod mov; mod nop; mod pc_rel; +mod sbfm; mod shift_imm; mod store; mod sys_reg; @@ -33,6 +34,7 @@ pub use logical_reg::LogicalReg; pub use mov::Mov; pub use nop::Nop; pub use pc_rel::PCRelative; +pub use sbfm::SBFM; pub use shift_imm::ShiftImm; pub use store::Store; pub use sys_reg::SysReg; diff --git a/yjit/src/asm/arm64/inst/sbfm.rs b/yjit/src/asm/arm64/inst/sbfm.rs new file mode 100644 index 0000000000..4fbb567ed0 --- /dev/null +++ b/yjit/src/asm/arm64/inst/sbfm.rs @@ -0,0 +1,77 @@ +use super::super::arg::Sf; + +/// The struct that represents an A64 signed bitfield move instruction that can +/// be encoded. +/// +/// SBFM +/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+ +/// | 31 30 29 28 | 27 26 25 24 | 23 22 21 20 | 19 18 17 16 | 15 14 13 12 | 11 10 09 08 | 07 06 05 04 | 03 02 01 00 | +/// | 0 0 1 0 0 1 1 0 | +/// | sf N immr............... imms............... rn.............. rd.............. | +/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+ +/// +pub struct SBFM { + /// The number for the general-purpose register to load the value into. + rd: u8, + + /// The number for the general-purpose register to copy from. + rn: u8, + + /// The leftmost bit number to be moved from the source. + imms: u8, + + // The right rotate amount. + immr: u8, + + /// Whether or not this is a 64-bit operation. + n: bool, + + /// The size of this operation. + sf: Sf +} + +impl SBFM { + /// SXTW + /// https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/SXTW--Sign-Extend-Word--an-alias-of-SBFM-?lang=en + pub fn sxtw(rd: u8, rn: u8) -> Self { + Self { rd, rn, immr: 0, imms: 31, n: true, sf: Sf::Sf64 } + } +} + +/// https://developer.arm.com/documentation/ddi0602/2022-03/Index-by-Encoding/Data-Processing----Immediate?lang=en#bitfield +const FAMILY: u32 = 0b1001; + +impl From<SBFM> for u32 { + /// Convert an instruction into a 32-bit value. + fn from(inst: SBFM) -> Self { + 0 + | ((inst.sf as u32) << 31) + | (FAMILY << 25) + | (1 << 24) + | ((inst.n as u32) << 22) + | ((inst.immr as u32) << 16) + | ((inst.imms as u32) << 10) + | ((inst.rn as u32) << 5) + | inst.rd as u32 + } +} + +impl From<SBFM> for [u8; 4] { + /// Convert an instruction into a 4 byte array. + fn from(inst: SBFM) -> [u8; 4] { + let result: u32 = inst.into(); + result.to_le_bytes() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_sxtw() { + let inst = SBFM::sxtw(0, 1); + let result: u32 = inst.into(); + assert_eq!(0x93407c20, result); + } +} diff --git a/yjit/src/asm/arm64/mod.rs b/yjit/src/asm/arm64/mod.rs index ca69b33d9e..6eebccaa61 100644 --- a/yjit/src/asm/arm64/mod.rs +++ b/yjit/src/asm/arm64/mod.rs @@ -321,6 +321,21 @@ pub fn ldur(cb: &mut CodeBlock, rt: A64Opnd, rn: A64Opnd) { cb.write_bytes(&bytes); } +/// LDURSW - load a 32-bit memory address into a register and sign-extend it +pub fn ldursw(cb: &mut CodeBlock, rt: A64Opnd, rn: A64Opnd) { + let bytes: [u8; 4] = match (rt, rn) { + (A64Opnd::Reg(rt), A64Opnd::Mem(rn)) => { + assert!(rt.num_bits == rn.num_bits, "Expected registers to be the same size"); + assert!(imm_fits_bits(rn.disp.into(), 9), "Expected displacement to be 9 bits or less"); + + Load::ldursw(rt.reg_no, rn.base_reg_no, rn.disp as i16).into() + }, + _ => panic!("Invalid operand combination to ldursw instruction.") + }; + + cb.write_bytes(&bytes); +} + /// LSL - logical shift left a register by an immediate pub fn lsl(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, shift: A64Opnd) { let bytes: [u8; 4] = match (rd, rn, shift) { @@ -558,6 +573,21 @@ pub fn subs(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, rm: A64Opnd) { cb.write_bytes(&bytes); } +/// SXTW - sign extend a 32-bit register into a 64-bit register +pub fn sxtw(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd) { + let bytes: [u8; 4] = match (rd, rn) { + (A64Opnd::Reg(rd), A64Opnd::Reg(rn)) => { + assert_eq!(rd.num_bits, 64, "rd must be 64-bits wide."); + assert_eq!(rn.num_bits, 32, "rn must be 32-bits wide."); + + SBFM::sxtw(rd.reg_no, rn.reg_no).into() + }, + _ => panic!("Invalid operand combination to sxtw instruction."), + }; + + cb.write_bytes(&bytes); +} + /// RET - unconditionally return to a location in a register, defaults to X30 pub fn ret(cb: &mut CodeBlock, rn: A64Opnd) { let bytes: [u8; 4] = match rn { @@ -751,6 +781,11 @@ mod tests { } #[test] + fn test_ldursw() { + check_bytes("6ab187b8", |cb| ldursw(cb, X10, A64Opnd::new_mem(64, X11, 123))); + } + + #[test] fn test_lsl() { check_bytes("6ac572d3", |cb| lsl(cb, X10, X11, A64Opnd::new_uimm(14))); } @@ -871,6 +906,11 @@ mod tests { } #[test] + fn test_sxtw() { + check_bytes("6a7d4093", |cb| sxtw(cb, X10, W11)); + } + + #[test] fn test_tst_register() { check_bytes("1f0001ea", |cb| tst(cb, X0, X1)); } diff --git a/yjit/src/asm/arm64/opnd.rs b/yjit/src/asm/arm64/opnd.rs index e1f95979a9..a10e289455 100644 --- a/yjit/src/asm/arm64/opnd.rs +++ b/yjit/src/asm/arm64/opnd.rs @@ -146,38 +146,38 @@ pub const X30: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 64, reg_no: 30 }); pub const X31: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 64, reg_no: 31 }); // 32-bit registers -pub const W0: A64Reg = A64Reg { num_bits: 32, reg_no: 0 }; -pub const W1: A64Reg = A64Reg { num_bits: 32, reg_no: 1 }; -pub const W2: A64Reg = A64Reg { num_bits: 32, reg_no: 2 }; -pub const W3: A64Reg = A64Reg { num_bits: 32, reg_no: 3 }; -pub const W4: A64Reg = A64Reg { num_bits: 32, reg_no: 4 }; -pub const W5: A64Reg = A64Reg { num_bits: 32, reg_no: 5 }; -pub const W6: A64Reg = A64Reg { num_bits: 32, reg_no: 6 }; -pub const W7: A64Reg = A64Reg { num_bits: 32, reg_no: 7 }; -pub const W8: A64Reg = A64Reg { num_bits: 32, reg_no: 8 }; -pub const W9: A64Reg = A64Reg { num_bits: 32, reg_no: 9 }; -pub const W10: A64Reg = A64Reg { num_bits: 32, reg_no: 10 }; -pub const W11: A64Reg = A64Reg { num_bits: 32, reg_no: 11 }; -pub const W12: A64Reg = A64Reg { num_bits: 32, reg_no: 12 }; -pub const W13: A64Reg = A64Reg { num_bits: 32, reg_no: 13 }; -pub const W14: A64Reg = A64Reg { num_bits: 32, reg_no: 14 }; -pub const W15: A64Reg = A64Reg { num_bits: 32, reg_no: 15 }; -pub const W16: A64Reg = A64Reg { num_bits: 32, reg_no: 16 }; -pub const W17: A64Reg = A64Reg { num_bits: 32, reg_no: 17 }; -pub const W18: A64Reg = A64Reg { num_bits: 32, reg_no: 18 }; -pub const W19: A64Reg = A64Reg { num_bits: 32, reg_no: 19 }; -pub const W20: A64Reg = A64Reg { num_bits: 32, reg_no: 20 }; -pub const W21: A64Reg = A64Reg { num_bits: 32, reg_no: 21 }; -pub const W22: A64Reg = A64Reg { num_bits: 32, reg_no: 22 }; -pub const W23: A64Reg = A64Reg { num_bits: 32, reg_no: 23 }; -pub const W24: A64Reg = A64Reg { num_bits: 32, reg_no: 24 }; -pub const W25: A64Reg = A64Reg { num_bits: 32, reg_no: 25 }; -pub const W26: A64Reg = A64Reg { num_bits: 32, reg_no: 26 }; -pub const W27: A64Reg = A64Reg { num_bits: 32, reg_no: 27 }; -pub const W28: A64Reg = A64Reg { num_bits: 32, reg_no: 28 }; -pub const W29: A64Reg = A64Reg { num_bits: 32, reg_no: 29 }; -pub const W30: A64Reg = A64Reg { num_bits: 32, reg_no: 30 }; -pub const W31: A64Reg = A64Reg { num_bits: 32, reg_no: 31 }; +pub const W0: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 0 }); +pub const W1: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 1 }); +pub const W2: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 2 }); +pub const W3: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 3 }); +pub const W4: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 4 }); +pub const W5: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 5 }); +pub const W6: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 6 }); +pub const W7: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 7 }); +pub const W8: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 8 }); +pub const W9: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 9 }); +pub const W10: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 10 }); +pub const W11: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 11 }); +pub const W12: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 12 }); +pub const W13: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 13 }); +pub const W14: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 14 }); +pub const W15: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 15 }); +pub const W16: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 16 }); +pub const W17: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 17 }); +pub const W18: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 18 }); +pub const W19: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 19 }); +pub const W20: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 20 }); +pub const W21: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 21 }); +pub const W22: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 22 }); +pub const W23: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 23 }); +pub const W24: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 24 }); +pub const W25: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 25 }); +pub const W26: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 26 }); +pub const W27: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 27 }); +pub const W28: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 28 }); +pub const W29: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 29 }); +pub const W30: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 30 }); +pub const W31: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 31 }); // C argument registers pub const C_ARG_REGS: [A64Opnd; 4] = [X0, X1, X2, X3]; diff --git a/yjit/src/backend/arm64/mod.rs b/yjit/src/backend/arm64/mod.rs index 9539b907ea..8b5576f7be 100644 --- a/yjit/src/backend/arm64/mod.rs +++ b/yjit/src/backend/arm64/mod.rs @@ -158,6 +158,22 @@ impl Assembler asm.jmp_opnd(opnds[0]); } }, + Op::LoadSExt => { + match opnds[0] { + // We only want to sign extend if the operand is a + // register, instruction output, or memory address that + // is 32 bits. Otherwise we'll just load the value + // directly since there's no need to sign extend. + Opnd::Reg(Reg { num_bits: 32, .. }) | + Opnd::InsnOut { num_bits: 32, .. } | + Opnd::Mem(Mem { num_bits: 32, .. }) => { + asm.load_sext(opnds[0]); + }, + _ => { + asm.load(opnds[0]); + } + }; + }, Op::Mov => { // The value that is being moved must be either a register // or an immediate that can be encoded as a bitmask @@ -448,6 +464,18 @@ impl Assembler } }; }, + Op::LoadSExt => { + match insn.opnds[0] { + Opnd::Reg(Reg { num_bits: 32, .. }) | + Opnd::InsnOut { num_bits: 32, .. } => { + sxtw(cb, insn.out.into(), insn.opnds[0].into()); + }, + Opnd::Mem(Mem { num_bits: 32, .. }) => { + ldursw(cb, insn.out.into(), insn.opnds[0].into()); + }, + _ => unreachable!() + }; + }, Op::Mov => { mov(cb, insn.opnds[0].into(), insn.opnds[1].into()); }, diff --git a/yjit/src/backend/ir.rs b/yjit/src/backend/ir.rs index 7e763b1ae9..e42a0c50b4 100644 --- a/yjit/src/backend/ir.rs +++ b/yjit/src/backend/ir.rs @@ -68,6 +68,10 @@ pub enum Op // A low-level instruction that loads a value into a register. Load, + // A low-level instruction that loads a value into a register and + // sign-extends it to a 64-bit value. + LoadSExt, + // Low-level instruction to store a value to memory. Store, @@ -865,6 +869,7 @@ def_push_0_opnd_no_out!(cpush_all, Op::CPushAll); def_push_0_opnd_no_out!(cpop_all, Op::CPopAll); def_push_1_opnd_no_out!(cret, Op::CRet); def_push_1_opnd!(load, Op::Load); +def_push_1_opnd!(load_sext, Op::LoadSExt); def_push_1_opnd!(lea, Op::Lea); def_push_2_opnd_no_out!(store, Op::Store); def_push_2_opnd_no_out!(mov, Op::Mov); diff --git a/yjit/src/backend/x86_64/mod.rs b/yjit/src/backend/x86_64/mod.rs index 0001d45977..31a907b55e 100644 --- a/yjit/src/backend/x86_64/mod.rs +++ b/yjit/src/backend/x86_64/mod.rs @@ -253,6 +253,10 @@ impl Assembler } }, + Op::LoadSExt => { + movsx(cb, insn.out.into(), insn.opnds[0].into()) + }, + Op::Mov => mov(cb, insn.opnds[0].into(), insn.opnds[1].into()), // Load effective address diff --git a/yjit/src/utils.rs b/yjit/src/utils.rs index 98af604193..5f42ba1fdb 100644 --- a/yjit/src/utils.rs +++ b/yjit/src/utils.rs @@ -105,36 +105,6 @@ pub fn iseq_get_location(iseq: IseqPtr) -> String { s } -#[cfg(test)] -mod tests { - #[test] - fn min_max_preserved_after_cast_to_usize() { - use crate::utils::IntoUsize; - - let min: usize = u64::MIN.as_usize(); - assert_eq!(min, u64::MIN.try_into().unwrap()); - let max: usize = u64::MAX.as_usize(); - assert_eq!(max, u64::MAX.try_into().unwrap()); - - let min: usize = u32::MIN.as_usize(); - assert_eq!(min, u32::MIN.try_into().unwrap()); - let max: usize = u32::MAX.as_usize(); - assert_eq!(max, u32::MAX.try_into().unwrap()); - } - - #[test] - fn test_offset_of() { - #[repr(C)] - struct Foo { - a: u8, - b: u64, - } - - assert_eq!(0, offset_of!(Foo, a), "C99 6.7.2.1p13 says no padding at the front"); - assert_eq!(8, offset_of!(Foo, b), "ABI dependent, but should hold"); - } -} - // TODO: we may want to move this function into yjit.c, maybe add a convenient Rust-side wrapper /* // For debugging. Print the bytecode for an iseq. @@ -163,36 +133,31 @@ macro_rules! c_callable { } pub(crate) use c_callable; -/* -pub fn print_int(cb: &mut CodeBlock, opnd: X86Opnd) { +pub fn print_int(asm: &mut Assembler, opnd: Opnd) { c_callable!{ fn print_int_fn(val: i64) { println!("{}", val); } } - push_regs(cb); + asm.cpush_all(); - match opnd { - X86Opnd::Mem(_) | X86Opnd::Reg(_) => { + let argument = match opnd { + Opnd::Mem(_) | Opnd::Reg(_) | Opnd::InsnOut { .. } => { // Sign-extend the value if necessary - if opnd.num_bits() < 64 { - movsx(cb, C_ARG_REGS[0], opnd); + if opnd.rm_num_bits() < 64 { + asm.load_sext(opnd) } else { - mov(cb, C_ARG_REGS[0], opnd); + opnd } - } - X86Opnd::Imm(_) | X86Opnd::UImm(_) => { - mov(cb, C_ARG_REGS[0], opnd); - } + }, + Opnd::Imm(_) | Opnd::UImm(_) => opnd, _ => unreachable!(), - } + }; - mov(cb, RAX, const_ptr_opnd(print_int_fn as *const u8)); - call(cb, RAX); - pop_regs(cb); + asm.ccall(print_int_fn as *const u8, vec![argument]); + asm.cpop_all(); } -*/ /// Generate code to print a pointer pub fn print_ptr(asm: &mut Assembler, opnd: Opnd) { @@ -251,3 +216,54 @@ pub fn print_str(asm: &mut Assembler, str: &str) { asm.cpop_all(); } + +#[cfg(test)] +mod tests { + use super::*; + use crate::asm::CodeBlock; + + #[test] + fn min_max_preserved_after_cast_to_usize() { + use crate::utils::IntoUsize; + + let min: usize = u64::MIN.as_usize(); + assert_eq!(min, u64::MIN.try_into().unwrap()); + let max: usize = u64::MAX.as_usize(); + assert_eq!(max, u64::MAX.try_into().unwrap()); + + let min: usize = u32::MIN.as_usize(); + assert_eq!(min, u32::MIN.try_into().unwrap()); + let max: usize = u32::MAX.as_usize(); + assert_eq!(max, u32::MAX.try_into().unwrap()); + } + + #[test] + fn test_offset_of() { + #[repr(C)] + struct Foo { + a: u8, + b: u64, + } + + assert_eq!(0, offset_of!(Foo, a), "C99 6.7.2.1p13 says no padding at the front"); + assert_eq!(8, offset_of!(Foo, b), "ABI dependent, but should hold"); + } + + #[test] + fn test_print_int() { + let mut asm = Assembler::new(); + let mut cb = CodeBlock::new_dummy(1024); + + print_int(&mut asm, Opnd::Imm(42)); + asm.compile(&mut cb); + } + + #[test] + fn test_print_str() { + let mut asm = Assembler::new(); + let mut cb = CodeBlock::new_dummy(1024); + + print_str(&mut asm, "Hello, world!"); + asm.compile(&mut cb); + } +} |
