summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAaron Patterson <tenderlove@ruby-lang.org>2025-11-25 16:51:54 -0800
committerAaron Patterson <aaron.patterson@gmail.com>2025-12-03 13:37:40 -0800
commitd7e55f84f2bd62d302b29513d4c4dc8ae9aef96f (patch)
tree57028be0e4406fcd5420b1b509d5d4952f4d26b5
parent2b23b05bf2c0f30f2c4ee9bb3030fa58f2cba3a6 (diff)
ZJIT: Use the custom iterator
This commit uses the custom instruction iterator in arm64 / x86_64 instruction splitting. Once we introduce basic blocks to LIR, the custom iterator will ensure that instructions are added to the correct place.
-rw-r--r--zjit/src/backend/arm64/mod.rs25
-rw-r--r--zjit/src/backend/x86_64/mod.rs21
2 files changed, 24 insertions, 22 deletions
diff --git a/zjit/src/backend/arm64/mod.rs b/zjit/src/backend/arm64/mod.rs
index 013fd31583..78fb69b0b0 100644
--- a/zjit/src/backend/arm64/mod.rs
+++ b/zjit/src/backend/arm64/mod.rs
@@ -391,10 +391,10 @@ impl Assembler {
let mut asm_local = Assembler::new_with_asm(&self);
let live_ranges: Vec<LiveRange> = take(&mut self.live_ranges);
- let mut iterator = self.insns.into_iter().enumerate().peekable();
+ let mut iterator = self.instruction_iterator();
let asm = &mut asm_local;
- while let Some((index, mut insn)) = iterator.next() {
+ while let Some((index, mut insn)) = iterator.next(asm) {
// Here we're going to map the operands of the instruction to load
// any Opnd::Value operands into registers if they are heap objects
// such that only the Op::Load instruction needs to handle that
@@ -428,13 +428,13 @@ impl Assembler {
*right = split_shifted_immediate(asm, other_opnd);
// Now `right` is either a register or an immediate, both can try to
// merge with a subsequent mov.
- merge_three_reg_mov(&live_ranges, &mut iterator, left, left, out);
+ merge_three_reg_mov(&live_ranges, &mut iterator, asm, left, left, out);
asm.push_insn(insn);
}
_ => {
*left = split_load_operand(asm, *left);
*right = split_shifted_immediate(asm, *right);
- merge_three_reg_mov(&live_ranges, &mut iterator, left, right, out);
+ merge_three_reg_mov(&live_ranges, &mut iterator, asm, left, right, out);
asm.push_insn(insn);
}
}
@@ -444,7 +444,7 @@ impl Assembler {
*right = split_shifted_immediate(asm, *right);
// Now `right` is either a register or an immediate,
// both can try to merge with a subsequent mov.
- merge_three_reg_mov(&live_ranges, &mut iterator, left, left, out);
+ merge_three_reg_mov(&live_ranges, &mut iterator, asm, left, left, out);
asm.push_insn(insn);
}
Insn::And { left, right, out } |
@@ -454,7 +454,7 @@ impl Assembler {
*left = opnd0;
*right = opnd1;
- merge_three_reg_mov(&live_ranges, &mut iterator, left, right, out);
+ merge_three_reg_mov(&live_ranges, &mut iterator, asm, left, right, out);
asm.push_insn(insn);
}
@@ -567,7 +567,7 @@ impl Assembler {
if matches!(out, Opnd::VReg { .. }) && *out == *src && live_ranges[out.vreg_idx()].end() == index + 1 => {
*out = Opnd::Reg(*reg);
asm.push_insn(insn);
- iterator.next(); // Pop merged Insn::Mov
+ iterator.next(asm); // Pop merged Insn::Mov
}
_ => {
asm.push_insn(insn);
@@ -694,7 +694,7 @@ impl Assembler {
/// VRegs, most splits should happen in [`Self::arm64_split`]. However, some instructions
/// need to be split with registers after `alloc_regs`, e.g. for `compile_exits`, so this
/// splits them and uses scratch registers for it.
- fn arm64_scratch_split(self) -> Assembler {
+ fn arm64_scratch_split(mut self) -> Assembler {
/// If opnd is Opnd::Mem with a too large disp, make the disp smaller using lea.
fn split_large_disp(asm: &mut Assembler, opnd: Opnd, scratch_opnd: Opnd) -> Opnd {
match opnd {
@@ -753,9 +753,9 @@ impl Assembler {
let mut asm_local = Assembler::new_with_asm(&self);
let asm = &mut asm_local;
asm.accept_scratch_reg = true;
- let mut iterator = self.insns.into_iter().enumerate().peekable();
+ let iterator = &mut self.instruction_iterator();
- while let Some((_, mut insn)) = iterator.next() {
+ while let Some((_, mut insn)) = iterator.next(asm) {
match &mut insn {
Insn::Add { left, right, out } |
Insn::Sub { left, right, out } |
@@ -1660,7 +1660,8 @@ impl Assembler {
/// If a, b, and c are all registers.
fn merge_three_reg_mov(
live_ranges: &[LiveRange],
- iterator: &mut std::iter::Peekable<impl Iterator<Item = (usize, Insn)>>,
+ iterator: &mut InsnIter,
+ asm: &mut Assembler,
left: &Opnd,
right: &Opnd,
out: &mut Opnd,
@@ -1671,7 +1672,7 @@ fn merge_three_reg_mov(
= (left, right, iterator.peek()) {
if out == src && live_ranges[out.vreg_idx()].end() == *mov_idx && matches!(*dest, Opnd::Reg(_) | Opnd::VReg{..}) {
*out = *dest;
- iterator.next(); // Pop merged Insn::Mov
+ iterator.next(asm); // Pop merged Insn::Mov
}
}
}
diff --git a/zjit/src/backend/x86_64/mod.rs b/zjit/src/backend/x86_64/mod.rs
index a9bd57f368..5e975e1bd0 100644
--- a/zjit/src/backend/x86_64/mod.rs
+++ b/zjit/src/backend/x86_64/mod.rs
@@ -138,11 +138,12 @@ impl Assembler {
/// Split IR instructions for the x86 platform
fn x86_split(mut self) -> Assembler
{
- let mut asm = Assembler::new_with_asm(&self);
+ let mut asm_local = Assembler::new_with_asm(&self);
+ let asm = &mut asm_local;
let live_ranges: Vec<LiveRange> = take(&mut self.live_ranges);
- let mut iterator = self.insns.into_iter().enumerate().peekable();
+ let mut iterator = self.instruction_iterator();
- while let Some((index, mut insn)) = iterator.next() {
+ while let Some((index, mut insn)) = iterator.next(asm) {
let is_load = matches!(insn, Insn::Load { .. } | Insn::LoadInto { .. });
let mut opnd_iter = insn.opnd_iter_mut();
@@ -187,13 +188,13 @@ impl Assembler {
if out == src && left == dest && live_ranges[out.vreg_idx()].end() == index + 1 && uimm_num_bits(*value) <= 32 => {
*out = *dest;
asm.push_insn(insn);
- iterator.next(); // Pop merged Insn::Mov
+ iterator.next(asm); // Pop merged Insn::Mov
}
(Opnd::Reg(_), Opnd::Reg(_), Some(Insn::Mov { dest, src }))
if out == src && live_ranges[out.vreg_idx()].end() == index + 1 && *dest == *left => {
*out = *dest;
asm.push_insn(insn);
- iterator.next(); // Pop merged Insn::Mov
+ iterator.next(asm); // Pop merged Insn::Mov
}
_ => {
match (*left, *right) {
@@ -373,7 +374,7 @@ impl Assembler {
(Insn::Lea { opnd, out }, Some(Insn::Mov { dest: Opnd::Reg(reg), src }))
if matches!(out, Opnd::VReg { .. }) && out == src && live_ranges[out.vreg_idx()].end() == index + 1 => {
asm.push_insn(Insn::Lea { opnd: *opnd, out: Opnd::Reg(*reg) });
- iterator.next(); // Pop merged Insn::Mov
+ iterator.next(asm); // Pop merged Insn::Mov
}
_ => asm.push_insn(insn),
}
@@ -384,14 +385,14 @@ impl Assembler {
}
}
- asm
+ asm_local
}
/// Split instructions using scratch registers. To maximize the use of the register pool
/// for VRegs, most splits should happen in [`Self::x86_split`]. However, some instructions
/// need to be split with registers after `alloc_regs`, e.g. for `compile_exits`, so
/// this splits them and uses scratch registers for it.
- pub fn x86_scratch_split(self) -> Assembler {
+ pub fn x86_scratch_split(mut self) -> Assembler {
/// For some instructions, we want to be able to lower a 64-bit operand
/// without requiring more registers to be available in the register
/// allocator. So we just use the SCRATCH0_OPND register temporarily to hold
@@ -470,9 +471,9 @@ impl Assembler {
let mut asm_local = Assembler::new_with_asm(&self);
let asm = &mut asm_local;
asm.accept_scratch_reg = true;
- let mut iterator = self.insns.into_iter().enumerate().peekable();
+ let mut iterator = self.instruction_iterator();
- while let Some((_, mut insn)) = iterator.next() {
+ while let Some((_, mut insn)) = iterator.next(asm) {
match &mut insn {
Insn::Add { left, right, out } |
Insn::Sub { left, right, out } |