summaryrefslogtreecommitdiff
path: root/yjit/src/core.rs
diff options
context:
space:
mode:
Diffstat (limited to 'yjit/src/core.rs')
-rw-r--r--yjit/src/core.rs42
1 files changed, 35 insertions, 7 deletions
diff --git a/yjit/src/core.rs b/yjit/src/core.rs
index fb7d52cc5d..7c8532a0c3 100644
--- a/yjit/src/core.rs
+++ b/yjit/src/core.rs
@@ -53,11 +53,11 @@ pub enum Type {
ImmSymbol,
TString, // An object with the T_STRING flag set, possibly an rb_cString
- CString, // An un-subclassed string of type rb_cString (can have instance vars in some cases)
+ CString, // An object that at one point had its class field equal rb_cString (creating a singleton class changes it)
TArray, // An object with the T_ARRAY flag set, possibly an rb_cArray
- CArray, // An un-subclassed array of type rb_cArray (can have instance vars in some cases)
+ CArray, // An object that at one point had its class field equal rb_cArray (creating a singleton class changes it)
THash, // An object with the T_HASH flag set, possibly an rb_cHash
- CHash, // An un-subclassed hash of type rb_cHash (can have instance vars in some cases)
+ CHash, // An object that at one point had its class field equal rb_cHash (creating a singleton class changes it)
BlockParamProxy, // A special sentinel value indicating the block parameter should be read from
// the current surrounding cfp
@@ -1000,7 +1000,7 @@ impl fmt::Debug for MutableBranchList {
// SAFETY: the derived Clone for boxed slices does not mutate this Cell
let branches = unsafe { self.0.ref_unchecked().clone() };
- formatter.debug_list().entries(branches.into_iter()).finish()
+ formatter.debug_list().entries(branches.iter()).finish()
}
}
@@ -1138,8 +1138,12 @@ pub fn for_each_off_stack_iseq_payload<F: FnMut(&mut IseqPayload)>(mut callback:
/// Free the per-iseq payload
#[no_mangle]
-pub extern "C" fn rb_yjit_iseq_free(payload: *mut c_void) {
+pub extern "C" fn rb_yjit_iseq_free(iseq: IseqPtr) {
+ // Free invariants for the ISEQ
+ iseq_free_invariants(iseq);
+
let payload = {
+ let payload = unsafe { rb_iseq_get_yjit_payload(iseq) };
if payload.is_null() {
// Nothing to free.
return;
@@ -1266,7 +1270,8 @@ pub extern "C" fn rb_yjit_iseq_mark(payload: *mut c_void) {
/// GC callback for updating GC objects in the per-iseq payload.
/// This is a mirror of [rb_yjit_iseq_mark].
#[no_mangle]
-pub extern "C" fn rb_yjit_iseq_update_references(payload: *mut c_void) {
+pub extern "C" fn rb_yjit_iseq_update_references(iseq: IseqPtr) {
+ let payload = unsafe { rb_iseq_get_yjit_payload(iseq) };
let payload = if payload.is_null() {
// Nothing to update.
return;
@@ -1657,6 +1662,9 @@ impl JITState {
for klass in self.no_singleton_class_assumptions {
track_no_singleton_class_assumption(blockref, klass);
}
+ if self.no_ep_escape {
+ track_no_ep_escape_assumption(blockref, self.iseq);
+ }
blockref
}
@@ -1798,6 +1806,13 @@ impl Context {
return Opnd::mem(64, SP, offset);
}
+ /// Get an operand for the adjusted environment pointer address using SP register.
+ /// This is valid only when a Binding object hasn't been created for the frame.
+ pub fn ep_opnd(&self, offset: i32) -> Opnd {
+ let ep_offset = self.get_stack_size() as i32 + 1;
+ self.sp_opnd(-ep_offset + offset)
+ }
+
/// Stop using a register for a given stack temp.
/// This allows us to reuse the register for a value that we know is dead
/// and will no longer be used (e.g. popped stack temp).
@@ -2643,6 +2658,12 @@ fn regenerate_branch(cb: &mut CodeBlock, branch: &Branch) {
branch.get_target_address(1).map(|addr| Target::CodePtr(addr)),
);
+ // If the entire block is the branch and the block could be invalidated,
+ // we need to pad to ensure there is room for invalidation patching.
+ if branch.start_addr == block.start_addr && branch_terminates_block && block.entry_exit.is_some() {
+ asm.pad_inval_patch();
+ }
+
// Rewrite the branch
let old_write_pos = cb.get_write_pos();
let old_dropped_bytes = cb.has_dropped_bytes();
@@ -3124,6 +3145,12 @@ pub fn defer_compilation(
// Likely a stub due to the increased chain depth
let target0_address = branch.set_target(0, blockid, &next_ctx, ocb);
+ // Pad the block if it has the potential to be invalidated. This must be
+ // done before gen_fn() in case the jump is overwritten by a fallthrough.
+ if jit.block_entry_exit.is_some() {
+ asm.pad_inval_patch();
+ }
+
// Call the branch generation function
asm_comment!(asm, "defer_compilation");
asm.mark_branch_start(&branch);
@@ -3307,9 +3334,10 @@ pub fn invalidate_block_version(blockref: &BlockRef) {
assert!(
cb.get_write_ptr() <= block_end,
- "invalidation wrote past end of block (code_size: {:?}, new_size: {})",
+ "invalidation wrote past end of block (code_size: {:?}, new_size: {}, start_addr: {:?})",
block.code_size(),
cb.get_write_ptr().as_offset() - block_start.as_offset(),
+ block.start_addr.raw_ptr(cb),
);
cb.set_write_ptr(cur_pos);
cb.set_dropped_bytes(cur_dropped_bytes);