summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--yjit/src/backend/arm64/mod.rs65
-rw-r--r--yjit/src/backend/ir.rs21
-rw-r--r--yjit/src/backend/x86_64/mod.rs47
-rw-r--r--yjit/src/codegen.rs118
-rw-r--r--yjit/src/core.rs86
-rw-r--r--yjit/src/invariants.rs10
-rw-r--r--yjit/src/utils.rs4
7 files changed, 175 insertions, 176 deletions
diff --git a/yjit/src/backend/arm64/mod.rs b/yjit/src/backend/arm64/mod.rs
index 23bcb18a06..93c441e228 100644
--- a/yjit/src/backend/arm64/mod.rs
+++ b/yjit/src/backend/arm64/mod.rs
@@ -188,6 +188,12 @@ fn emit_load_value(cb: &mut CodeBlock, rd: A64Opnd, value: u64) -> usize {
/// These are caller-saved registers.
pub static TEMP_REGS: [Reg; 5] = [X1_REG, X9_REG, X10_REG, X14_REG, X15_REG];
+#[derive(Debug, PartialEq)]
+enum EmitError {
+ RetryOnNextPage,
+ OutOfMemory,
+}
+
impl Assembler
{
// Special scratch registers for intermediate processing.
@@ -693,7 +699,7 @@ impl Assembler
/// Emit platform-specific machine code
/// Returns a list of GC offsets. Can return failure to signal caller to retry.
- fn arm64_emit(&mut self, cb: &mut CodeBlock, ocb: &mut Option<&mut OutlinedCb>) -> Result<Vec<u32>, ()> {
+ fn arm64_emit(&mut self, cb: &mut CodeBlock, ocb: &mut Option<&mut OutlinedCb>) -> Result<Vec<u32>, EmitError> {
/// Determine how many instructions it will take to represent moving
/// this value into a register. Note that the return value of this
/// function must correspond to how many instructions are used to
@@ -806,12 +812,13 @@ impl Assembler
target: Target,
asm: &mut Assembler,
ocb: &mut Option<&mut OutlinedCb>,
- ) -> Target {
+ ) -> Result<Target, EmitError> {
if let Target::SideExit { counter, context } = target {
- let side_exit = asm.get_side_exit(&context.unwrap(), Some(counter), ocb.as_mut().unwrap());
- Target::SideExitPtr(side_exit)
+ let side_exit = asm.get_side_exit(&context.unwrap(), Some(counter), ocb.as_mut().unwrap())
+ .ok_or(EmitError::OutOfMemory)?;
+ Ok(Target::SideExitPtr(side_exit))
} else {
- target
+ Ok(target)
}
}
@@ -1092,7 +1099,7 @@ impl Assembler
br(cb, opnd.into());
},
Insn::Jmp(target) => {
- match compile_side_exit(*target, self, ocb) {
+ match compile_side_exit(*target, self, ocb)? {
Target::CodePtr(dst_ptr) => {
emit_jmp_ptr(cb, dst_ptr, true);
},
@@ -1116,25 +1123,25 @@ impl Assembler
};
},
Insn::Je(target) | Insn::Jz(target) => {
- emit_conditional_jump::<{Condition::EQ}>(cb, compile_side_exit(*target, self, ocb));
+ emit_conditional_jump::<{Condition::EQ}>(cb, compile_side_exit(*target, self, ocb)?);
},
Insn::Jne(target) | Insn::Jnz(target) | Insn::JoMul(target) => {
- emit_conditional_jump::<{Condition::NE}>(cb, compile_side_exit(*target, self, ocb));
+ emit_conditional_jump::<{Condition::NE}>(cb, compile_side_exit(*target, self, ocb)?);
},
Insn::Jl(target) => {
- emit_conditional_jump::<{Condition::LT}>(cb, compile_side_exit(*target, self, ocb));
+ emit_conditional_jump::<{Condition::LT}>(cb, compile_side_exit(*target, self, ocb)?);
},
Insn::Jg(target) => {
- emit_conditional_jump::<{Condition::GT}>(cb, compile_side_exit(*target, self, ocb));
+ emit_conditional_jump::<{Condition::GT}>(cb, compile_side_exit(*target, self, ocb)?);
},
Insn::Jbe(target) => {
- emit_conditional_jump::<{Condition::LS}>(cb, compile_side_exit(*target, self, ocb));
+ emit_conditional_jump::<{Condition::LS}>(cb, compile_side_exit(*target, self, ocb)?);
},
Insn::Jb(target) => {
- emit_conditional_jump::<{Condition::CC}>(cb, compile_side_exit(*target, self, ocb));
+ emit_conditional_jump::<{Condition::CC}>(cb, compile_side_exit(*target, self, ocb)?);
},
Insn::Jo(target) => {
- emit_conditional_jump::<{Condition::VS}>(cb, compile_side_exit(*target, self, ocb));
+ emit_conditional_jump::<{Condition::VS}>(cb, compile_side_exit(*target, self, ocb)?);
},
Insn::IncrCounter { mem, value } => {
let label = cb.new_label("incr_counter_loop".to_string());
@@ -1192,7 +1199,7 @@ impl Assembler
// We don't want label references to cross page boundaries. Signal caller for
// retry.
if !self.label_names.is_empty() {
- return Err(());
+ return Err(EmitError::RetryOnNextPage);
}
} else {
insn_idx += 1;
@@ -1204,7 +1211,7 @@ impl Assembler
}
/// Optimize and compile the stored instructions
- pub fn compile_with_regs(self, cb: &mut CodeBlock, ocb: Option<&mut OutlinedCb>, regs: Vec<Reg>) -> Vec<u32> {
+ pub fn compile_with_regs(self, cb: &mut CodeBlock, ocb: Option<&mut OutlinedCb>, regs: Vec<Reg>) -> Option<(CodePtr, Vec<u32>)> {
let asm = self.arm64_split();
let mut asm = asm.alloc_regs(regs);
@@ -1217,19 +1224,25 @@ impl Assembler
let start_ptr = cb.get_write_ptr();
let starting_label_state = cb.get_label_state();
let mut ocb = ocb; // for &mut
- let gc_offsets = asm.arm64_emit(cb, &mut ocb)
- .unwrap_or_else(|_err| {
+ let emit_result = match asm.arm64_emit(cb, &mut ocb) {
+ Err(EmitError::RetryOnNextPage) => {
// we want to lower jumps to labels to b.cond instructions, which have a 1 MiB
// range limit. We can easily exceed the limit in case the jump straddles two pages.
// In this case, we retry with a fresh page.
cb.set_label_state(starting_label_state);
cb.next_page(start_ptr, emit_jmp_ptr_with_invalidation);
- asm.arm64_emit(cb, &mut ocb).expect("should not fail when writing to a fresh code page")
- });
+ let result = asm.arm64_emit(cb, &mut ocb);
+ assert_ne!(
+ Err(EmitError::RetryOnNextPage),
+ result,
+ "should not fail when writing to a fresh code page"
+ );
+ result
+ }
+ result => result
+ };
- if cb.has_dropped_bytes() {
- cb.clear_labels();
- } else {
+ if let (Ok(gc_offsets), false) = (emit_result, cb.has_dropped_bytes()) {
cb.link_labels();
// Invalidate icache for newly written out region so we don't run stale code.
@@ -1241,9 +1254,13 @@ impl Assembler
unsafe { rb_yjit_icache_invalidate(start as _, end as _) };
}
});
- }
- gc_offsets
+ Some((start_ptr, gc_offsets))
+ } else {
+ cb.clear_labels();
+
+ None
+ }
}
}
diff --git a/yjit/src/backend/ir.rs b/yjit/src/backend/ir.rs
index 5de65f8ecb..960397ac7c 100644
--- a/yjit/src/backend/ir.rs
+++ b/yjit/src/backend/ir.rs
@@ -1093,11 +1093,11 @@ impl Assembler
}
/// Get a cached side exit, wrapping a counter if specified
- pub fn get_side_exit(&mut self, side_exit_context: &SideExitContext, counter: Option<Counter>, ocb: &mut OutlinedCb) -> CodePtr {
+ pub fn get_side_exit(&mut self, side_exit_context: &SideExitContext, counter: Option<Counter>, ocb: &mut OutlinedCb) -> Option<CodePtr> {
// Get a cached side exit
let side_exit = match self.side_exits.get(&side_exit_context) {
None => {
- let exit_code = gen_outlined_exit(side_exit_context.pc, &side_exit_context.get_ctx(), ocb);
+ let exit_code = gen_outlined_exit(side_exit_context.pc, &side_exit_context.get_ctx(), ocb)?;
self.side_exits.insert(side_exit_context.clone(), exit_code);
exit_code
}
@@ -1509,16 +1509,16 @@ impl Assembler
asm
}
- /// Compile the instructions down to machine code
- /// NOTE: should compile return a list of block labels to enable
- /// compiling multiple blocks at a time?
- pub fn compile(self, cb: &mut CodeBlock, ocb: Option<&mut OutlinedCb>) -> Vec<u32>
+ /// Compile the instructions down to machine code.
+ /// Can fail due to lack of code memory and inopportune code placement, among other reasons.
+ #[must_use]
+ pub fn compile(self, cb: &mut CodeBlock, ocb: Option<&mut OutlinedCb>) -> Option<(CodePtr, Vec<u32>)>
{
#[cfg(feature = "disasm")]
let start_addr = cb.get_write_ptr();
let alloc_regs = Self::get_alloc_regs();
- let gc_offsets = self.compile_with_regs(cb, ocb, alloc_regs);
+ let ret = self.compile_with_regs(cb, ocb, alloc_regs);
#[cfg(feature = "disasm")]
if let Some(dump_disasm) = get_option_ref!(dump_disasm) {
@@ -1526,15 +1526,16 @@ impl Assembler
let end_addr = cb.get_write_ptr();
dump_disasm_addr_range(cb, start_addr, end_addr, dump_disasm)
}
- gc_offsets
+ ret
}
/// Compile with a limited number of registers. Used only for unit tests.
- pub fn compile_with_num_regs(self, cb: &mut CodeBlock, num_regs: usize) -> Vec<u32>
+ #[cfg(test)]
+ pub fn compile_with_num_regs(self, cb: &mut CodeBlock, num_regs: usize) -> (CodePtr, Vec<u32>)
{
let mut alloc_regs = Self::get_alloc_regs();
let alloc_regs = alloc_regs.drain(0..num_regs).collect();
- self.compile_with_regs(cb, None, alloc_regs)
+ self.compile_with_regs(cb, None, alloc_regs).unwrap()
}
/// Consume the assembler by creating a new draining iterator.
diff --git a/yjit/src/backend/x86_64/mod.rs b/yjit/src/backend/x86_64/mod.rs
index fe5f821372..a188b91c07 100644
--- a/yjit/src/backend/x86_64/mod.rs
+++ b/yjit/src/backend/x86_64/mod.rs
@@ -6,7 +6,7 @@ use std::mem::take;
use crate::asm::*;
use crate::asm::x86_64::*;
-use crate::codegen::{JITState};
+use crate::codegen::{JITState, CodePtr};
use crate::core::Context;
use crate::cruby::*;
use crate::backend::ir::*;
@@ -386,7 +386,7 @@ impl Assembler
}
/// Emit platform-specific machine code
- pub fn x86_emit(&mut self, cb: &mut CodeBlock, ocb: &mut Option<&mut OutlinedCb>) -> Vec<u32>
+ pub fn x86_emit(&mut self, cb: &mut CodeBlock, ocb: &mut Option<&mut OutlinedCb>) -> Option<Vec<u32>>
{
/// For some instructions, we want to be able to lower a 64-bit operand
/// without requiring more registers to be available in the register
@@ -421,12 +421,12 @@ impl Assembler
target: Target,
asm: &mut Assembler,
ocb: &mut Option<&mut OutlinedCb>,
- ) -> Target {
+ ) -> Option<Target> {
if let Target::SideExit { counter, context } = target {
let side_exit = asm.get_side_exit(&context.unwrap(), Some(counter), ocb.as_mut().unwrap());
- Target::SideExitPtr(side_exit)
+ Some(Target::SideExitPtr(side_exit?))
} else {
- target
+ Some(target)
}
}
@@ -682,7 +682,7 @@ impl Assembler
// Conditional jump to a label
Insn::Jmp(target) => {
- match compile_side_exit(*target, self, ocb) {
+ match compile_side_exit(*target, self, ocb)? {
Target::CodePtr(code_ptr) | Target::SideExitPtr(code_ptr) => jmp_ptr(cb, code_ptr),
Target::Label(label_idx) => jmp_label(cb, label_idx),
Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exit"),
@@ -690,7 +690,7 @@ impl Assembler
}
Insn::Je(target) => {
- match compile_side_exit(*target, self, ocb) {
+ match compile_side_exit(*target, self, ocb)? {
Target::CodePtr(code_ptr) | Target::SideExitPtr(code_ptr) => je_ptr(cb, code_ptr),
Target::Label(label_idx) => je_label(cb, label_idx),
Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exit"),
@@ -698,7 +698,7 @@ impl Assembler
}
Insn::Jne(target) => {
- match compile_side_exit(*target, self, ocb) {
+ match compile_side_exit(*target, self, ocb)? {
Target::CodePtr(code_ptr) | Target::SideExitPtr(code_ptr) => jne_ptr(cb, code_ptr),
Target::Label(label_idx) => jne_label(cb, label_idx),
Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exit"),
@@ -706,7 +706,7 @@ impl Assembler
}
Insn::Jl(target) => {
- match compile_side_exit(*target, self, ocb) {
+ match compile_side_exit(*target, self, ocb)? {
Target::CodePtr(code_ptr) | Target::SideExitPtr(code_ptr) => jl_ptr(cb, code_ptr),
Target::Label(label_idx) => jl_label(cb, label_idx),
Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exit"),
@@ -714,7 +714,7 @@ impl Assembler
},
Insn::Jg(target) => {
- match compile_side_exit(*target, self, ocb) {
+ match compile_side_exit(*target, self, ocb)? {
Target::CodePtr(code_ptr) | Target::SideExitPtr(code_ptr) => jg_ptr(cb, code_ptr),
Target::Label(label_idx) => jg_label(cb, label_idx),
Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exit"),
@@ -722,7 +722,7 @@ impl Assembler
},
Insn::Jbe(target) => {
- match compile_side_exit(*target, self, ocb) {
+ match compile_side_exit(*target, self, ocb)? {
Target::CodePtr(code_ptr) | Target::SideExitPtr(code_ptr) => jbe_ptr(cb, code_ptr),
Target::Label(label_idx) => jbe_label(cb, label_idx),
Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exit"),
@@ -730,7 +730,7 @@ impl Assembler
},
Insn::Jb(target) => {
- match compile_side_exit(*target, self, ocb) {
+ match compile_side_exit(*target, self, ocb)? {
Target::CodePtr(code_ptr) | Target::SideExitPtr(code_ptr) => jb_ptr(cb, code_ptr),
Target::Label(label_idx) => jb_label(cb, label_idx),
Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exit"),
@@ -738,7 +738,7 @@ impl Assembler
},
Insn::Jz(target) => {
- match compile_side_exit(*target, self, ocb) {
+ match compile_side_exit(*target, self, ocb)? {
Target::CodePtr(code_ptr) | Target::SideExitPtr(code_ptr) => jz_ptr(cb, code_ptr),
Target::Label(label_idx) => jz_label(cb, label_idx),
Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exit"),
@@ -746,7 +746,7 @@ impl Assembler
}
Insn::Jnz(target) => {
- match compile_side_exit(*target, self, ocb) {
+ match compile_side_exit(*target, self, ocb)? {
Target::CodePtr(code_ptr) | Target::SideExitPtr(code_ptr) => jnz_ptr(cb, code_ptr),
Target::Label(label_idx) => jnz_label(cb, label_idx),
Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exit"),
@@ -755,7 +755,7 @@ impl Assembler
Insn::Jo(target) |
Insn::JoMul(target) => {
- match compile_side_exit(*target, self, ocb) {
+ match compile_side_exit(*target, self, ocb)? {
Target::CodePtr(code_ptr) | Target::SideExitPtr(code_ptr) => jo_ptr(cb, code_ptr),
Target::Label(label_idx) => jo_label(cb, label_idx),
Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exit"),
@@ -815,11 +815,11 @@ impl Assembler
}
}
- gc_offsets
+ Some(gc_offsets)
}
/// Optimize and compile the stored instructions
- pub fn compile_with_regs(self, cb: &mut CodeBlock, ocb: Option<&mut OutlinedCb>, regs: Vec<Reg>) -> Vec<u32> {
+ pub fn compile_with_regs(self, cb: &mut CodeBlock, ocb: Option<&mut OutlinedCb>, regs: Vec<Reg>) -> Option<(CodePtr, Vec<u32>)> {
let asm = self.x86_split();
let mut asm = asm.alloc_regs(regs);
@@ -830,15 +830,18 @@ impl Assembler
}
let mut ocb = ocb; // for &mut
+ let start_ptr = cb.get_write_ptr();
let gc_offsets = asm.x86_emit(cb, &mut ocb);
- if cb.has_dropped_bytes() {
- cb.clear_labels();
- } else {
+ if let (Some(gc_offsets), false) = (gc_offsets, cb.has_dropped_bytes()) {
cb.link_labels();
- }
- gc_offsets
+ Some((start_ptr, gc_offsets))
+ } else {
+ cb.clear_labels();
+
+ None
+ }
}
}
diff --git a/yjit/src/codegen.rs b/yjit/src/codegen.rs
index 880338f88d..39dc5f999a 100644
--- a/yjit/src/codegen.rs
+++ b/yjit/src/codegen.rs
@@ -219,18 +219,22 @@ impl JITState {
}
}
- pub fn assume_method_lookup_stable(&mut self, asm: &mut Assembler, ocb: &mut OutlinedCb, cme: CmePtr) {
- jit_ensure_block_entry_exit(self, asm, ocb);
+ pub fn assume_method_lookup_stable(&mut self, asm: &mut Assembler, ocb: &mut OutlinedCb, cme: CmePtr) -> Option<()> {
+ jit_ensure_block_entry_exit(self, asm, ocb)?;
self.method_lookup_assumptions.push(cme);
+
+ Some(())
}
fn get_cfp(&self) -> *mut rb_control_frame_struct {
unsafe { get_ec_cfp(self.ec) }
}
- pub fn assume_stable_constant_names(&mut self, asm: &mut Assembler, ocb: &mut OutlinedCb, id: *const ID) {
- jit_ensure_block_entry_exit(self, asm, ocb);
+ pub fn assume_stable_constant_names(&mut self, asm: &mut Assembler, ocb: &mut OutlinedCb, id: *const ID) -> Option<()> {
+ jit_ensure_block_entry_exit(self, asm, ocb)?;
self.stable_constant_names_assumption = Some(id);
+
+ Some(())
}
pub fn queue_outgoing_branch(&mut self, branch: PendingBranchRef) {
@@ -454,9 +458,8 @@ fn verify_ctx(jit: &JITState, ctx: &Context) {
// to the interpreter when it cannot service a stub by generating new code.
// Before coming here, branch_stub_hit() takes care of fully reconstructing
// interpreter state.
-fn gen_stub_exit(ocb: &mut OutlinedCb) -> CodePtr {
+fn gen_stub_exit(ocb: &mut OutlinedCb) -> Option<CodePtr> {
let ocb = ocb.unwrap();
- let code_ptr = ocb.get_write_ptr();
let mut asm = Assembler::new();
gen_counter_incr(&mut asm, Counter::exit_from_branch_stub);
@@ -470,9 +473,7 @@ fn gen_stub_exit(ocb: &mut OutlinedCb) -> CodePtr {
asm.cret(Qundef.into());
- asm.compile(ocb, None);
-
- code_ptr
+ asm.compile(ocb, None).map(|(code_ptr, _)| code_ptr)
}
/// Generate an exit to return to the interpreter
@@ -546,34 +547,28 @@ fn gen_exit(exit_pc: *mut VALUE, asm: &mut Assembler) {
/// moment, so there is one unique side exit for each context. Note that
/// it's incorrect to jump to the side exit after any ctx stack push operations
/// since they change the logic required for reconstructing interpreter state.
-pub fn gen_outlined_exit(exit_pc: *mut VALUE, ctx: &Context, ocb: &mut OutlinedCb) -> CodePtr {
+pub fn gen_outlined_exit(exit_pc: *mut VALUE, ctx: &Context, ocb: &mut OutlinedCb) -> Option<CodePtr> {
let mut cb = ocb.unwrap();
- let exit_code = cb.get_write_ptr();
let mut asm = Assembler::new();
asm.ctx = ctx.clone();
asm.set_reg_temps(ctx.get_reg_temps());
gen_exit(exit_pc, &mut asm);
- asm.compile(&mut cb, None);
-
- exit_code
+ asm.compile(&mut cb, None).map(|(code_ptr, _)| code_ptr)
}
/// Get a side exit. Increment a counter in it if --yjit-stats is enabled.
-pub fn gen_counted_exit(side_exit: CodePtr, ocb: &mut OutlinedCb, counter: Option<Counter>) -> CodePtr {
+pub fn gen_counted_exit(side_exit: CodePtr, ocb: &mut OutlinedCb, counter: Option<Counter>) -> Option<CodePtr> {
// The counter is only incremented when stats are enabled
if !get_option!(gen_stats) {
- return side_exit;
+ return Some(side_exit);
}
let counter = match counter {
Some(counter) => counter,
- None => return side_exit,
+ None => return Some(side_exit),
};
- let ocb = ocb.unwrap();
- let code_ptr = ocb.get_write_ptr();
-
let mut asm = Assembler::new();
// Load the pointer into a register
@@ -586,16 +581,17 @@ pub fn gen_counted_exit(side_exit: CodePtr, ocb: &mut OutlinedCb, counter: Optio
// Jump to the existing side exit
asm.jmp(Target::CodePtr(side_exit));
- asm.compile(ocb, None);
- code_ptr
+ let ocb = ocb.unwrap();
+ asm.compile(ocb, None).map(|(code_ptr, _)| code_ptr)
}
// Ensure that there is an exit for the start of the block being compiled.
// Block invalidation uses this exit.
-pub fn jit_ensure_block_entry_exit(jit: &mut JITState, asm: &mut Assembler, ocb: &mut OutlinedCb) {
+#[must_use]
+pub fn jit_ensure_block_entry_exit(jit: &mut JITState, asm: &mut Assembler, ocb: &mut OutlinedCb) -> Option<()> {
if jit.block_entry_exit.is_some() {
- return;
+ return Some(());
}
let block_starting_context = &jit.get_starting_ctx();
@@ -605,17 +601,18 @@ pub fn jit_ensure_block_entry_exit(jit: &mut JITState, asm: &mut Assembler, ocb:
// Generate the exit with the cache in Assembler.
let side_exit_context = SideExitContext::new(jit.pc, block_starting_context.clone());
let entry_exit = asm.get_side_exit(&side_exit_context, None, ocb);
- jit.block_entry_exit = Some(entry_exit);
+ jit.block_entry_exit = Some(entry_exit?);
} else {
let block_entry_pc = unsafe { rb_iseq_pc_at_idx(jit.iseq, jit.starting_insn_idx.into()) };
- jit.block_entry_exit = Some(gen_outlined_exit(block_entry_pc, block_starting_context, ocb));
+ jit.block_entry_exit = Some(gen_outlined_exit(block_entry_pc, block_starting_context, ocb)?);
}
+
+ Some(())
}
// Landing code for when c_return tracing is enabled. See full_cfunc_return().
-fn gen_full_cfunc_return(ocb: &mut OutlinedCb) -> CodePtr {
+fn gen_full_cfunc_return(ocb: &mut OutlinedCb) -> Option<CodePtr> {
let ocb = ocb.unwrap();
- let code_ptr = ocb.get_write_ptr();
let mut asm = Assembler::new();
// This chunk of code expects REG_EC to be filled properly and
@@ -639,16 +636,13 @@ fn gen_full_cfunc_return(ocb: &mut OutlinedCb) -> CodePtr {
asm.cret(Qundef.into());
- asm.compile(ocb, None);
-
- return code_ptr;
+ asm.compile(ocb, None).map(|(code_ptr, _)| code_ptr)
}
/// Generate a continuation for leave that exits to the interpreter at REG_CFP->pc.
/// This is used by gen_leave() and gen_entry_prologue()
-fn gen_leave_exit(ocb: &mut OutlinedCb) -> CodePtr {
+fn gen_leave_exit(ocb: &mut OutlinedCb) -> Option<CodePtr> {
let ocb = ocb.unwrap();
- let code_ptr = ocb.get_write_ptr();
let mut asm = Assembler::new();
// gen_leave() fully reconstructs interpreter state and leaves the
@@ -667,18 +661,15 @@ fn gen_leave_exit(ocb: &mut OutlinedCb) -> CodePtr {
asm.cret(ret_opnd);
- asm.compile(ocb, None);
-
- return code_ptr;
+ asm.compile(ocb, None).map(|(code_ptr, _)| code_ptr)
}
// Increment SP and transfer the execution to the interpreter after jit_exec_exception().
// On jit_exec_exception(), you need to return Qundef to keep executing caller non-FINISH
// frames on the interpreter. You also need to increment SP to push the return value to
// the caller's stack, which is different from gen_stub_exit().
-fn gen_leave_exception(ocb: &mut OutlinedCb) -> CodePtr {
+fn gen_leave_exception(ocb: &mut OutlinedCb) -> Option<CodePtr> {
let ocb = ocb.unwrap();
- let code_ptr = ocb.get_write_ptr();
let mut asm = Assembler::new();
// gen_leave() leaves the return value in C_RET_OPND before coming here.
@@ -704,9 +695,7 @@ fn gen_leave_exception(ocb: &mut OutlinedCb) -> CodePtr {
// Execute vm_exec_core
asm.cret(Qundef.into());
- asm.compile(ocb, None);
-
- return code_ptr;
+ asm.compile(ocb, None).map(|(code_ptr, _)| code_ptr)
}
// Generate a runtime guard that ensures the PC is at the expected
@@ -807,7 +796,7 @@ pub fn gen_entry_prologue(
None
};
- asm.compile(cb, Some(ocb));
+ asm.compile(cb, Some(ocb))?;
if cb.has_dropped_bytes() {
None
@@ -851,7 +840,7 @@ fn jump_to_next_insn(
jit: &mut JITState,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
-) {
+) -> Option<()> {
// Reset the depth since in current usages we only ever jump to to
// chain_depth > 0 from the same instruction.
let mut reset_depth = asm.ctx.clone();
@@ -866,12 +855,13 @@ fn jump_to_next_insn(
if jit.record_boundary_patch_point {
let exit_pc = unsafe { jit.pc.offset(insn_len(jit.opcode).try_into().unwrap()) };
let exit_pos = gen_outlined_exit(exit_pc, &reset_depth, ocb);
- record_global_inval_patch(asm, exit_pos);
+ record_global_inval_patch(asm, exit_pos?);
jit.record_boundary_patch_point = false;
}
// Generate the jump instruction
gen_direct_jump(jit, &reset_depth, jump_block, asm);
+ Some(())
}
// Compile a sequence of bytecode instructions for a given basic block version.
@@ -981,7 +971,7 @@ pub fn gen_single_block(
// If previous instruction requested to record the boundary
if jit.record_boundary_patch_point {
// Generate an exit to this instruction and record it
- let exit_pos = gen_outlined_exit(jit.pc, &asm.ctx, ocb);
+ let exit_pos = gen_outlined_exit(jit.pc, &asm.ctx, ocb).ok_or(())?;
record_global_inval_patch(&mut asm, exit_pos);
jit.record_boundary_patch_point = false;
}
@@ -1062,7 +1052,7 @@ pub fn gen_single_block(
}
// Compile code into the code block
- let gc_offsets = asm.compile(cb, Some(ocb));
+ let (_, gc_offsets) = asm.compile(cb, Some(ocb)).ok_or(())?;
let end_addr = cb.get_write_ptr();
// Flush perf symbols after asm.compile() writes addresses
@@ -8196,7 +8186,7 @@ fn gen_opt_getconstant_path(
// Make sure there is an exit for this block as the interpreter might want
// to invalidate this block from yjit_constant_ic_update().
- jit_ensure_block_entry_exit(jit, asm, ocb);
+ jit_ensure_block_entry_exit(jit, asm, ocb)?;
// See vm_ic_hit_p(). The same conditions are checked in yjit_constant_ic_update().
// If a cache is not filled, fallback to the general C call.
@@ -8784,16 +8774,16 @@ impl CodegenGlobals {
let mut ocb = OutlinedCb::wrap(CodeBlock::new_dummy(mem_size / 2));
let ocb_start_addr = ocb.unwrap().get_write_ptr();
- let leave_exit_code = gen_leave_exit(&mut ocb);
- let leave_exception_code = gen_leave_exception(&mut ocb);
+ let leave_exit_code = gen_leave_exit(&mut ocb).unwrap();
+ let leave_exception_code = gen_leave_exception(&mut ocb).unwrap();
- let stub_exit_code = gen_stub_exit(&mut ocb);
+ let stub_exit_code = gen_stub_exit(&mut ocb).unwrap();
- let branch_stub_hit_trampoline = gen_branch_stub_hit_trampoline(&mut ocb);
- let entry_stub_hit_trampoline = gen_entry_stub_hit_trampoline(&mut ocb);
+ let branch_stub_hit_trampoline = gen_branch_stub_hit_trampoline(&mut ocb).unwrap();
+ let entry_stub_hit_trampoline = gen_entry_stub_hit_trampoline(&mut ocb).unwrap();
// Generate full exit code for C func
- let cfunc_exit_code = gen_full_cfunc_return(&mut ocb);
+ let cfunc_exit_code = gen_full_cfunc_return(&mut ocb).unwrap();
let ocb_end_addr = ocb.unwrap().get_write_ptr();
let ocb_pages = ocb.unwrap().addrs_to_pages(ocb_start_addr, ocb_end_addr);
@@ -9009,7 +8999,7 @@ mod tests {
fn test_gen_exit() {
let (_, _ctx, mut asm, mut cb, _) = setup_codegen();
gen_exit(0 as *mut VALUE, &mut asm);
- asm.compile(&mut cb, None);
+ asm.compile(&mut cb, None).unwrap();
assert!(cb.get_write_pos() > 0);
}
@@ -9032,7 +9022,7 @@ mod tests {
fn test_gen_nop() {
let (mut jit, context, mut asm, mut cb, mut ocb) = setup_codegen();
let status = gen_nop(&mut jit, &mut asm, &mut ocb);
- asm.compile(&mut cb, None);
+ asm.compile(&mut cb, None).unwrap();
assert_eq!(status, Some(KeepCompiling));
assert_eq!(context.diff(&Context::default()), TypeDiff::Compatible(0));
@@ -9064,7 +9054,7 @@ mod tests {
assert_eq!(Type::Fixnum, asm.ctx.get_opnd_type(StackOpnd(0)));
assert_eq!(Type::Fixnum, asm.ctx.get_opnd_type(StackOpnd(1)));
- asm.compile(&mut cb, None);
+ asm.compile(&mut cb, None).unwrap();
assert!(cb.get_write_pos() > 0); // Write some movs
}
@@ -9088,7 +9078,7 @@ mod tests {
assert_eq!(Type::Flonum, asm.ctx.get_opnd_type(StackOpnd(0)));
// TODO: this is writing zero bytes on x86. Why?
- asm.compile(&mut cb, None);
+ asm.compile(&mut cb, None).unwrap();
assert!(cb.get_write_pos() > 0); // Write some movs
}
@@ -9117,7 +9107,7 @@ mod tests {
assert_eq!(status, Some(KeepCompiling));
assert_eq!(tmp_type_top, Type::Nil);
- asm.compile(&mut cb, None);
+ asm.compile(&mut cb, None).unwrap();
assert!(cb.get_write_pos() > 0);
}
@@ -9136,7 +9126,7 @@ mod tests {
assert_eq!(status, Some(KeepCompiling));
assert_eq!(tmp_type_top, Type::True);
- asm.compile(&mut cb, None);
+ asm.compile(&mut cb, None).unwrap();
assert!(cb.get_write_pos() > 0);
}
@@ -9156,7 +9146,7 @@ mod tests {
assert_eq!(status, Some(KeepCompiling));
assert_eq!(tmp_type_top, Type::Fixnum);
- asm.compile(&mut cb, None);
+ asm.compile(&mut cb, None).unwrap();
assert!(cb.get_write_pos() > 0);
}
@@ -9179,7 +9169,7 @@ mod tests {
let status = gen_putself(&mut jit, &mut asm, &mut ocb);
assert_eq!(status, Some(KeepCompiling));
- asm.compile(&mut cb, None);
+ asm.compile(&mut cb, None).unwrap();
assert!(cb.get_write_pos() > 0);
}
@@ -9202,7 +9192,7 @@ mod tests {
assert_eq!(Type::Flonum, asm.ctx.get_opnd_type(StackOpnd(1)));
assert_eq!(Type::CString, asm.ctx.get_opnd_type(StackOpnd(0)));
- asm.compile(&mut cb, None);
+ asm.compile(&mut cb, None).unwrap();
assert!(cb.get_write_pos() > 0);
}
@@ -9224,7 +9214,7 @@ mod tests {
assert_eq!(Type::CString, asm.ctx.get_opnd_type(StackOpnd(1)));
assert_eq!(Type::Flonum, asm.ctx.get_opnd_type(StackOpnd(0)));
- asm.compile(&mut cb, None);
+ asm.compile(&mut cb, None).unwrap();
assert!(cb.get_write_pos() > 0); // Write some movs
}
@@ -9245,7 +9235,7 @@ mod tests {
assert_eq!(Type::Flonum, asm.ctx.get_opnd_type(StackOpnd(0)));
- asm.compile(&mut cb, None);
+ asm.compile(&mut cb, None).unwrap();
assert!(cb.get_write_pos() == 0); // No instructions written
}
diff --git a/yjit/src/core.rs b/yjit/src/core.rs
index 8fb1a6a6ba..b75b809d26 100644
--- a/yjit/src/core.rs
+++ b/yjit/src/core.rs
@@ -2274,7 +2274,7 @@ pub fn regenerate_entry(cb: &mut CodeBlock, entryref: &EntryRef, next_entry: Cod
let old_dropped_bytes = cb.has_dropped_bytes();
cb.set_write_ptr(unsafe { entryref.as_ref() }.start_addr);
cb.set_dropped_bytes(false);
- asm.compile(cb, None);
+ asm.compile(cb, None).expect("can rewrite existing code");
// Rewind write_pos to the original one
assert_eq!(cb.get_write_ptr(), unsafe { entryref.as_ref() }.end_addr);
@@ -2298,19 +2298,35 @@ c_callable! {
/// Generated code calls this function with the SysV calling convention.
/// See [gen_call_entry_stub_hit].
fn entry_stub_hit(entry_ptr: *const c_void, ec: EcPtr) -> *const u8 {
- with_vm_lock(src_loc!(), || {
- match with_compile_time(|| { entry_stub_hit_body(entry_ptr, ec) }) {
- Some(addr) => addr,
- // Failed to service the stub by generating a new block so now we
- // need to exit to the interpreter at the stubbed location.
- None => return CodegenGlobals::get_stub_exit_code().raw_ptr(),
- }
+ with_compile_time(|| {
+ with_vm_lock(src_loc!(), || {
+ let cb = CodegenGlobals::get_inline_cb();
+ let ocb = CodegenGlobals::get_outlined_cb();
+
+ let addr = entry_stub_hit_body(entry_ptr, ec, cb, ocb)
+ .unwrap_or_else(|| {
+ // Trigger code GC (e.g. no space).
+ // This entry point will be recompiled later.
+ cb.code_gc(ocb);
+ CodegenGlobals::get_stub_exit_code().raw_ptr()
+ });
+
+ cb.mark_all_executable();
+ ocb.unwrap().mark_all_executable();
+
+ addr
+ })
})
}
}
/// Called by the generated code when an entry stub is executed
-fn entry_stub_hit_body(entry_ptr: *const c_void, ec: EcPtr) -> Option<*const u8> {
+fn entry_stub_hit_body(
+ entry_ptr: *const c_void,
+ ec: EcPtr,
+ cb: &mut CodeBlock,
+ ocb: &mut OutlinedCb
+) -> Option<*const u8> {
// Get ISEQ and insn_idx from the current ec->cfp
let cfp = unsafe { get_ec_cfp(ec) };
let iseq = unsafe { get_cfp_iseq(cfp) };
@@ -2319,14 +2335,11 @@ fn entry_stub_hit_body(entry_ptr: *const c_void, ec: EcPtr) -> Option<*const u8>
u8::try_from(get_cfp_sp(cfp).offset_from(get_cfp_bp(cfp))).ok()?
};
- let cb = CodegenGlobals::get_inline_cb();
- let ocb = CodegenGlobals::get_outlined_cb();
-
// Compile a new entry guard as a next entry
let next_entry = cb.get_write_ptr();
let mut asm = Assembler::new();
let pending_entry = gen_entry_chain_guard(&mut asm, ocb, iseq, insn_idx)?;
- asm.compile(cb, Some(ocb));
+ asm.compile(cb, Some(ocb))?;
// Find or compile a block version
let blockid = BlockId { iseq, idx: insn_idx };
@@ -2337,7 +2350,7 @@ fn entry_stub_hit_body(entry_ptr: *const c_void, ec: EcPtr) -> Option<*const u8>
Some(blockref) => {
let mut asm = Assembler::new();
asm.jmp(unsafe { blockref.as_ref() }.start_addr.into());
- asm.compile(cb, Some(ocb));
+ asm.compile(cb, Some(ocb))?;
Some(blockref)
}
// If this block hasn't yet been compiled, generate blocks after the entry guard.
@@ -2353,14 +2366,8 @@ fn entry_stub_hit_body(entry_ptr: *const c_void, ec: EcPtr) -> Option<*const u8>
// Write an entry to the heap and push it to the ISEQ
let pending_entry = Rc::try_unwrap(pending_entry).ok().expect("PendingEntry should be unique");
get_or_create_iseq_payload(iseq).entries.push(pending_entry.into_entry());
- } else { // No space
- // Trigger code GC. This entry point will be recompiled later.
- cb.code_gc(ocb);
}
- cb.mark_all_executable();
- ocb.unwrap().mark_all_executable();
-
// Let the stub jump to the block
blockref.map(|block| unsafe { block.as_ref() }.start_addr.raw_ptr())
}
@@ -2368,7 +2375,6 @@ fn entry_stub_hit_body(entry_ptr: *const c_void, ec: EcPtr) -> Option<*const u8>
/// Generate a stub that calls entry_stub_hit
pub fn gen_entry_stub(entry_address: usize, ocb: &mut OutlinedCb) -> Option<CodePtr> {
let ocb = ocb.unwrap();
- let stub_addr = ocb.get_write_ptr();
let mut asm = Assembler::new();
asm_comment!(asm, "entry stub hit");
@@ -2379,20 +2385,13 @@ pub fn gen_entry_stub(entry_address: usize, ocb: &mut OutlinedCb) -> Option<Code
// Not really a side exit, just don't need a padded jump here.
asm.jmp(CodegenGlobals::get_entry_stub_hit_trampoline().as_side_exit());
- asm.compile(ocb, None);
-
- if ocb.has_dropped_bytes() {
- return None; // No space
- } else {
- return Some(stub_addr);
- }
+ asm.compile(ocb, None).map(|(code_ptr, _)| code_ptr)
}
/// A trampoline used by gen_entry_stub. entry_stub_hit may issue Code GC, so
/// it's useful for Code GC to call entry_stub_hit from a globally shared code.
-pub fn gen_entry_stub_hit_trampoline(ocb: &mut OutlinedCb) -> CodePtr {
+pub fn gen_entry_stub_hit_trampoline(ocb: &mut OutlinedCb) -> Option<CodePtr> {
let ocb = ocb.unwrap();
- let code_ptr = ocb.get_write_ptr();
let mut asm = Assembler::new();
// See gen_entry_guard for how it's used.
@@ -2402,9 +2401,7 @@ pub fn gen_entry_stub_hit_trampoline(ocb: &mut OutlinedCb) -> CodePtr {
// Jump to the address returned by the entry_stub_hit() call
asm.jmp_opnd(jump_addr);
- asm.compile(ocb, None);
-
- code_ptr
+ asm.compile(ocb, None).map(|(code_ptr, _)| code_ptr)
}
/// Generate code for a branch, possibly rewriting and changing the size of it
@@ -2431,7 +2428,7 @@ fn regenerate_branch(cb: &mut CodeBlock, branch: &Branch) {
let old_dropped_bytes = cb.has_dropped_bytes();
cb.set_write_ptr(branch.start_addr);
cb.set_dropped_bytes(false);
- asm.compile(cb, None);
+ asm.compile(cb, None).expect("can rewrite existing code");
let new_end_addr = cb.get_write_ptr();
branch.end_addr.set(new_end_addr);
@@ -2669,9 +2666,6 @@ fn gen_branch_stub(
) -> Option<CodePtr> {
let ocb = ocb.unwrap();
- // Generate an outlined stub that will call branch_stub_hit()
- let stub_addr = ocb.get_write_ptr();
-
let mut asm = Assembler::new();
asm.ctx = ctx.clone();
asm.set_reg_temps(ctx.reg_temps);
@@ -2705,19 +2699,11 @@ fn gen_branch_stub(
// Not really a side exit, just don't need a padded jump here.
asm.jmp(CodegenGlobals::get_branch_stub_hit_trampoline().as_side_exit());
- asm.compile(ocb, None);
-
- if ocb.has_dropped_bytes() {
- // No space
- None
- } else {
- Some(stub_addr)
- }
+ asm.compile(ocb, None).map(|(code_ptr, _)| code_ptr)
}
-pub fn gen_branch_stub_hit_trampoline(ocb: &mut OutlinedCb) -> CodePtr {
+pub fn gen_branch_stub_hit_trampoline(ocb: &mut OutlinedCb) -> Option<CodePtr> {
let ocb = ocb.unwrap();
- let code_ptr = ocb.get_write_ptr();
let mut asm = Assembler::new();
// For `branch_stub_hit(branch_ptr, target_idx, ec)`,
@@ -2750,9 +2736,7 @@ pub fn gen_branch_stub_hit_trampoline(ocb: &mut OutlinedCb) -> CodePtr {
// return register so we get something else for the return value.
let _ = asm.live_reg_opnd(stub_hit_ret);
- asm.compile(ocb, None);
-
- code_ptr
+ asm.compile(ocb, None).map(|(code_ptr, _)| code_ptr)
}
/// Return registers to be pushed and popped on branch_stub_hit.
@@ -3093,7 +3077,7 @@ pub fn invalidate_block_version(blockref: &BlockRef) {
let mut asm = Assembler::new();
asm.jmp(block_entry_exit.as_side_exit());
cb.set_dropped_bytes(false);
- asm.compile(&mut cb, Some(ocb));
+ asm.compile(&mut cb, Some(ocb)).expect("can rewrite existing code");
assert!(
cb.get_write_ptr() <= block_end,
diff --git a/yjit/src/invariants.rs b/yjit/src/invariants.rs
index 17a7152d8b..26c15b692e 100644
--- a/yjit/src/invariants.rs
+++ b/yjit/src/invariants.rs
@@ -89,7 +89,9 @@ pub fn assume_bop_not_redefined(
bop: ruby_basic_operators,
) -> bool {
if unsafe { BASIC_OP_UNREDEFINED_P(bop, klass) } {
- jit_ensure_block_entry_exit(jit, asm, ocb);
+ if jit_ensure_block_entry_exit(jit, asm, ocb).is_none() {
+ return false;
+ }
jit.bop_assumptions.push((klass, bop));
return true;
@@ -153,7 +155,9 @@ pub fn assume_single_ractor_mode(jit: &mut JITState, asm: &mut Assembler, ocb: &
if unsafe { rb_yjit_multi_ractor_p() } {
false
} else {
- jit_ensure_block_entry_exit(jit, asm, ocb);
+ if jit_ensure_block_entry_exit(jit, asm, ocb).is_none() {
+ return false;
+ }
jit.block_assumes_single_ractor = true;
true
@@ -527,7 +531,7 @@ pub extern "C" fn rb_yjit_tracing_invalidate_all() {
cb.set_write_ptr(patch.inline_patch_pos);
cb.set_dropped_bytes(false);
- asm.compile(cb, None);
+ asm.compile(cb, None).expect("can rewrite existing code");
last_patch_end = cb.get_write_ptr().raw_ptr();
}
cb.set_pos(old_pos);
diff --git a/yjit/src/utils.rs b/yjit/src/utils.rs
index a883e959a0..58415e279f 100644
--- a/yjit/src/utils.rs
+++ b/yjit/src/utils.rs
@@ -263,7 +263,7 @@ mod tests {
let mut cb = CodeBlock::new_dummy(1024);
print_int(&mut asm, Opnd::Imm(42));
- asm.compile(&mut cb, None);
+ asm.compile(&mut cb, None).unwrap();
}
#[test]
@@ -272,6 +272,6 @@ mod tests {
let mut cb = CodeBlock::new_dummy(1024);
print_str(&mut asm, "Hello, world!");
- asm.compile(&mut cb, None);
+ asm.compile(&mut cb, None).unwrap();
}
}