summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTakashi Kokubun <takashikkbn@gmail.com>2025-10-28 02:32:33 -0700
committerTakashi Kokubun <takashikkbn@gmail.com>2025-10-28 09:25:30 -0700
commitcc051ef0e56b37c9bd29cabd4e930a170a832bcf (patch)
treeaa984fd2ff46e63bbe1105f310749d38eadba266
parentb463c1a90450f16072301cd226d32841c1f200e8 (diff)
ZJIT: Simplify Assembler constructors
-rw-r--r--zjit/src/backend/arm64/mod.rs9
-rw-r--r--zjit/src/backend/lir.rs38
-rw-r--r--zjit/src/backend/x86_64/mod.rs9
3 files changed, 34 insertions, 22 deletions
diff --git a/zjit/src/backend/arm64/mod.rs b/zjit/src/backend/arm64/mod.rs
index a6a5fc5958..5760cadfc3 100644
--- a/zjit/src/backend/arm64/mod.rs
+++ b/zjit/src/backend/arm64/mod.rs
@@ -207,7 +207,7 @@ impl Assembler {
/// Return an Assembler with scratch registers disabled in the backend, and a scratch register.
pub fn new_with_scratch_reg() -> (Self, Opnd) {
- (Self::new_with_label_names(Vec::default(), 0, true), SCRATCH_OPND)
+ (Self::new_with_accept_scratch_reg(true), SCRATCH_OPND)
}
/// Return true if opnd contains a scratch reg
@@ -386,9 +386,9 @@ impl Assembler {
}
}
+ let mut asm_local = Assembler::new_with_asm(&self);
let live_ranges: Vec<LiveRange> = take(&mut self.live_ranges);
let mut iterator = self.insns.into_iter().enumerate().peekable();
- let mut asm_local = Assembler::new_with_label_names(take(&mut self.label_names), live_ranges.len(), self.accept_scratch_reg);
let asm = &mut asm_local;
while let Some((index, mut insn)) = iterator.next() {
@@ -691,9 +691,10 @@ impl Assembler {
/// VRegs, most splits should happen in [`Self::arm64_split`]. However, some instructions
/// need to be split with registers after `alloc_regs`, e.g. for `compile_side_exits`, so this
/// splits them and uses scratch registers for it.
- fn arm64_split_with_scratch_reg(mut self) -> Assembler {
+ fn arm64_split_with_scratch_reg(self) -> Assembler {
+ let mut asm = Assembler::new_with_asm(&self);
+ asm.accept_scratch_reg = true;
let iterator = self.insns.into_iter().enumerate().peekable();
- let mut asm = Assembler::new_with_label_names(take(&mut self.label_names), self.live_ranges.len(), true);
for (_, mut insn) in iterator {
match &mut insn {
diff --git a/zjit/src/backend/lir.rs b/zjit/src/backend/lir.rs
index 6efb3e1259..55151d0605 100644
--- a/zjit/src/backend/lir.rs
+++ b/zjit/src/backend/lir.rs
@@ -1182,26 +1182,36 @@ pub struct Assembler {
impl Assembler
{
- /// Create an Assembler
+ /// Create an Assembler with defaults
pub fn new() -> Self {
- Self::new_with_label_names(Vec::default(), 0, false)
- }
-
- /// Create an Assembler with parameters that are populated by another Assembler instance.
- /// This API is used for copying an Assembler for the next compiler pass.
- pub fn new_with_label_names(label_names: Vec<String>, num_vregs: usize, accept_scratch_reg: bool) -> Self {
- let mut live_ranges = Vec::with_capacity(ASSEMBLER_INSNS_CAPACITY);
- live_ranges.resize(num_vregs, LiveRange { start: None, end: None });
-
Self {
insns: Vec::with_capacity(ASSEMBLER_INSNS_CAPACITY),
- live_ranges,
- label_names,
- accept_scratch_reg,
+ live_ranges: Vec::with_capacity(ASSEMBLER_INSNS_CAPACITY),
+ label_names: Vec::default(),
+ accept_scratch_reg: false,
leaf_ccall_stack_size: None,
}
}
+ /// Create an Assembler that allows the use of scratch registers.
+ /// This should be called only through [`Self::new_with_scratch_reg`].
+ pub(super) fn new_with_accept_scratch_reg(accept_scratch_reg: bool) -> Self {
+ Self { accept_scratch_reg, ..Self::new() }
+ }
+
+ /// Create an Assembler with parameters of another Assembler and empty instructions.
+ /// Compiler passes build a next Assembler with this API and insert new instructions to it.
+ pub(super) fn new_with_asm(old_asm: &Assembler) -> Self {
+ let mut asm = Self {
+ label_names: old_asm.label_names.clone(),
+ accept_scratch_reg: old_asm.accept_scratch_reg,
+ ..Self::new()
+ };
+ // Bump the initial VReg index to allow the use of the VRegs for the old Assembler
+ asm.live_ranges.resize(old_asm.live_ranges.len(), LiveRange { start: None, end: None });
+ asm
+ }
+
pub fn expect_leaf_ccall(&mut self, stack_size: usize) {
self.leaf_ccall_stack_size = Some(stack_size);
}
@@ -1357,9 +1367,9 @@ impl Assembler
let mut saved_regs: Vec<(Reg, usize)> = vec![];
// live_ranges is indexed by original `index` given by the iterator.
+ let mut asm = Assembler::new_with_asm(&self);
let live_ranges: Vec<LiveRange> = take(&mut self.live_ranges);
let mut iterator = self.insns.into_iter().enumerate().peekable();
- let mut asm = Assembler::new_with_label_names(take(&mut self.label_names), live_ranges.len(), self.accept_scratch_reg);
while let Some((index, mut insn)) = iterator.next() {
let before_ccall = match (&insn, iterator.peek().map(|(_, insn)| insn)) {
diff --git a/zjit/src/backend/x86_64/mod.rs b/zjit/src/backend/x86_64/mod.rs
index 4fdbfe3f97..e7e2f796f1 100644
--- a/zjit/src/backend/x86_64/mod.rs
+++ b/zjit/src/backend/x86_64/mod.rs
@@ -102,7 +102,7 @@ const SCRATCH_OPND: Opnd = Opnd::Reg(R11_REG);
impl Assembler {
/// Return an Assembler with scratch registers disabled in the backend, and a scratch register.
pub fn new_with_scratch_reg() -> (Self, Opnd) {
- (Self::new_with_label_names(Vec::default(), 0, true), SCRATCH_OPND)
+ (Self::new_with_accept_scratch_reg(true), SCRATCH_OPND)
}
/// Return true if opnd contains a scratch reg
@@ -137,9 +137,9 @@ impl Assembler {
/// Split IR instructions for the x86 platform
fn x86_split(mut self) -> Assembler
{
+ let mut asm = Assembler::new_with_asm(&self);
let live_ranges: Vec<LiveRange> = take(&mut self.live_ranges);
let mut iterator = self.insns.into_iter().enumerate().peekable();
- let mut asm = Assembler::new_with_label_names(take(&mut self.label_names), live_ranges.len(), self.accept_scratch_reg);
while let Some((index, mut insn)) = iterator.next() {
let is_load = matches!(insn, Insn::Load { .. } | Insn::LoadInto { .. });
@@ -390,7 +390,7 @@ impl Assembler {
/// for VRegs, most splits should happen in [`Self::x86_split`]. However, some instructions
/// need to be split with registers after `alloc_regs`, e.g. for `compile_side_exits`, so
/// this splits them and uses scratch registers for it.
- pub fn x86_split_with_scratch_reg(mut self) -> Assembler {
+ pub fn x86_split_with_scratch_reg(self) -> Assembler {
/// For some instructions, we want to be able to lower a 64-bit operand
/// without requiring more registers to be available in the register
/// allocator. So we just use the SCRATCH_OPND register temporarily to hold
@@ -419,8 +419,9 @@ impl Assembler {
}
}
+ let mut asm = Assembler::new_with_asm(&self);
+ asm.accept_scratch_reg = true;
let mut iterator = self.insns.into_iter().enumerate().peekable();
- let mut asm = Assembler::new_with_label_names(take(&mut self.label_names), self.live_ranges.len(), true);
while let Some((_, mut insn)) = iterator.next() {
match &mut insn {