summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/zjit-macos.yml2
-rw-r--r--.github/workflows/zjit-ubuntu.yml2
-rw-r--r--test/ruby/test_zjit.rb12
-rw-r--r--zjit/src/backend/arm64/mod.rs2
-rw-r--r--zjit/src/backend/lir.rs28
-rw-r--r--zjit/src/backend/x86_64/mod.rs2
6 files changed, 30 insertions, 18 deletions
diff --git a/.github/workflows/zjit-macos.yml b/.github/workflows/zjit-macos.yml
index 7060d6a252..5260c3ecb1 100644
--- a/.github/workflows/zjit-macos.yml
+++ b/.github/workflows/zjit-macos.yml
@@ -128,6 +128,7 @@ jobs:
../src/bootstraptest/test_massign.rb \
../src/bootstraptest/test_method.rb \
../src/bootstraptest/test_objectspace.rb \
+ ../src/bootstraptest/test_ractor.rb \
../src/bootstraptest/test_string.rb \
../src/bootstraptest/test_struct.rb \
../src/bootstraptest/test_syntax.rb \
@@ -138,7 +139,6 @@ jobs:
# ../src/bootstraptest/test_eval.rb \
# ../src/bootstraptest/test_insns.rb \
# ../src/bootstraptest/test_proc.rb \
- # ../src/bootstraptest/test_ractor.rb \
# ../src/bootstraptest/test_yjit.rb \
if: ${{ matrix.test_task == 'btest' }}
diff --git a/.github/workflows/zjit-ubuntu.yml b/.github/workflows/zjit-ubuntu.yml
index 443c9c71df..a6a502057e 100644
--- a/.github/workflows/zjit-ubuntu.yml
+++ b/.github/workflows/zjit-ubuntu.yml
@@ -150,6 +150,7 @@ jobs:
../src/bootstraptest/test_massign.rb \
../src/bootstraptest/test_method.rb \
../src/bootstraptest/test_objectspace.rb \
+ ../src/bootstraptest/test_ractor.rb \
../src/bootstraptest/test_string.rb \
../src/bootstraptest/test_struct.rb \
../src/bootstraptest/test_syntax.rb \
@@ -160,7 +161,6 @@ jobs:
# ../src/bootstraptest/test_eval.rb \
# ../src/bootstraptest/test_insns.rb \
# ../src/bootstraptest/test_proc.rb \
- # ../src/bootstraptest/test_ractor.rb \
# ../src/bootstraptest/test_yjit.rb \
if: ${{ matrix.test_task == 'btest' }}
diff --git a/test/ruby/test_zjit.rb b/test/ruby/test_zjit.rb
index 0c73e6b456..7da5d96d35 100644
--- a/test/ruby/test_zjit.rb
+++ b/test/ruby/test_zjit.rb
@@ -713,8 +713,7 @@ class TestZJIT < Test::Unit::TestCase
end
def test_spilled_method_args
- omit 'CCall with spilled arguments is not implemented yet'
- assert_compiles '55', %q{
+ assert_runs '55', %q{
def foo(n1, n2, n3, n4, n5, n6, n7, n8, n9, n10)
n1 + n2 + n3 + n4 + n5 + n6 + n7 + n8 + n9 + n10
end
@@ -906,10 +905,17 @@ class TestZJIT < Test::Unit::TestCase
# Assert that every method call in `test_script` can be compiled by ZJIT
# at a given call_threshold
def assert_compiles(expected, test_script, insns: [], **opts)
+ assert_runs(expected, test_script, insns:, assert_compiles: true, **opts)
+ end
+
+ # Assert that `test_script` runs successfully with ZJIT enabled.
+ # Unlike `assert_compiles`, `assert_runs(assert_compiles: false)`
+ # allows ZJIT to skip compiling methods.
+ def assert_runs(expected, test_script, insns: [], assert_compiles: false, **opts)
pipe_fd = 3
script = <<~RUBY
- ret_val = (_test_proc = -> { RubyVM::ZJIT.assert_compiles; #{test_script.lstrip} }).call
+ ret_val = (_test_proc = -> { #{('RubyVM::ZJIT.assert_compiles; ' if assert_compiles)}#{test_script.lstrip} }).call
result = {
ret_val:,
#{ unless insns.empty?
diff --git a/zjit/src/backend/arm64/mod.rs b/zjit/src/backend/arm64/mod.rs
index 3c18a57dd0..d44c482fe9 100644
--- a/zjit/src/backend/arm64/mod.rs
+++ b/zjit/src/backend/arm64/mod.rs
@@ -1297,7 +1297,7 @@ impl Assembler
/// Optimize and compile the stored instructions
pub fn compile_with_regs(self, cb: &mut CodeBlock, regs: Vec<Reg>) -> Option<(CodePtr, Vec<u32>)> {
let asm = self.arm64_split();
- let mut asm = asm.alloc_regs(regs);
+ let mut asm = asm.alloc_regs(regs)?;
asm.compile_side_exits()?;
// Create label instances in the code block
diff --git a/zjit/src/backend/lir.rs b/zjit/src/backend/lir.rs
index 4cc093ed5e..9ad36dcb44 100644
--- a/zjit/src/backend/lir.rs
+++ b/zjit/src/backend/lir.rs
@@ -3,12 +3,11 @@ use std::fmt;
use std::mem::take;
use crate::codegen::local_size_and_idx_to_ep_offset;
use crate::cruby::{Qundef, RUBY_OFFSET_CFP_PC, RUBY_OFFSET_CFP_SP, SIZEOF_VALUE_I32};
+use crate::options::{debug, get_option};
use crate::{cruby::VALUE};
use crate::backend::current::*;
use crate::virtualmem::CodePtr;
use crate::asm::{CodeBlock, Label};
-#[cfg(feature = "disasm")]
-use crate::options::*;
pub const EC: Opnd = _EC;
pub const CFP: Opnd = _CFP;
@@ -1519,7 +1518,7 @@ impl Assembler
/// Sets the out field on the various instructions that require allocated
/// registers because their output is used as the operand on a subsequent
/// instruction. This is our implementation of the linear scan algorithm.
- pub(super) fn alloc_regs(mut self, regs: Vec<Reg>) -> Assembler {
+ pub(super) fn alloc_regs(mut self, regs: Vec<Reg>) -> Option<Assembler> {
// Dump live registers for register spill debugging.
fn dump_live_regs(insns: Vec<Insn>, live_ranges: Vec<LiveRange>, num_regs: usize, spill_index: usize) {
// Convert live_ranges to live_regs: the number of live registers at each index
@@ -1566,8 +1565,12 @@ impl Assembler
// If C_RET_REG is in use, move it to another register.
// This must happen before last-use registers are deallocated.
if let Some(vreg_idx) = pool.vreg_for(&C_RET_REG) {
- let new_reg = pool.alloc_reg(vreg_idx)
- .expect("spilling VReg is not implemented yet, can't evacuate C_RET_REG on CCall"); // TODO: support spilling VReg
+ let new_reg = if let Some(new_reg) = pool.alloc_reg(vreg_idx) {
+ new_reg
+ } else {
+ debug!("spilling VReg is not implemented yet, can't evacuate C_RET_REG on CCall");
+ return None;
+ };
asm.mov(Opnd::Reg(new_reg), C_RET_OPND);
pool.dealloc_reg(&C_RET_REG);
reg_mapping[vreg_idx] = Some(new_reg);
@@ -1660,13 +1663,16 @@ impl Assembler
_ => match pool.alloc_reg(vreg_idx.unwrap()) {
Some(reg) => Some(reg),
None => {
- let mut insns = asm.insns;
- insns.push(insn);
- while let Some((_, insn)) = iterator.next() {
+ if get_option!(debug) {
+ let mut insns = asm.insns;
insns.push(insn);
+ while let Some((_, insn)) = iterator.next() {
+ insns.push(insn);
+ }
+ dump_live_regs(insns, live_ranges, regs.len(), index);
}
- dump_live_regs(insns, live_ranges, regs.len(), index);
- unreachable!("Register spill not supported");
+ debug!("Register spill not supported");
+ return None;
}
}
};
@@ -1737,7 +1743,7 @@ impl Assembler
}
assert!(pool.is_empty(), "Expected all registers to be returned to the pool");
- asm
+ Some(asm)
}
/// Compile the instructions down to machine code.
diff --git a/zjit/src/backend/x86_64/mod.rs b/zjit/src/backend/x86_64/mod.rs
index 793a096365..80fd7c714c 100644
--- a/zjit/src/backend/x86_64/mod.rs
+++ b/zjit/src/backend/x86_64/mod.rs
@@ -836,7 +836,7 @@ impl Assembler
/// Optimize and compile the stored instructions
pub fn compile_with_regs(self, cb: &mut CodeBlock, regs: Vec<Reg>) -> Option<(CodePtr, Vec<u32>)> {
let asm = self.x86_split();
- let mut asm = asm.alloc_regs(regs);
+ let mut asm = asm.alloc_regs(regs)?;
asm.compile_side_exits()?;
// Create label instances in the code block