diff options
Diffstat (limited to 'yjit/src/invariants.rs')
-rw-r--r-- | yjit/src/invariants.rs | 489 |
1 files changed, 263 insertions, 226 deletions
diff --git a/yjit/src/invariants.rs b/yjit/src/invariants.rs index c31f0ccedc..e460293440 100644 --- a/yjit/src/invariants.rs +++ b/yjit/src/invariants.rs @@ -2,23 +2,23 @@ //! generated code if and when these assumptions are invalidated. use crate::asm::OutlinedCb; +use crate::backend::ir::Assembler; use crate::codegen::*; use crate::core::*; use crate::cruby::*; -use crate::options::*; use crate::stats::*; use crate::utils::IntoUsize; use crate::yjit::yjit_enabled_p; use std::collections::{HashMap, HashSet}; -use std::mem; use std::os::raw::c_void; +use std::mem; // Invariants to track: // assume_bop_not_redefined(jit, INTEGER_REDEFINED_OP_FLAG, BOP_PLUS) // assume_method_lookup_stable(comptime_recv_klass, cme, jit); -// assume_single_ractor_mode(jit) -// assume_stable_global_constant_state(jit); +// assume_single_ractor_mode() +// track_stable_constant_names_assumption() /// Used to track all of the various block references that contain assumptions /// about the state of the virtual machine. @@ -26,11 +26,6 @@ pub struct Invariants { /// Tracks block assumptions about callable method entry validity. cme_validity: HashMap<*const rb_callable_method_entry_t, HashSet<BlockRef>>, - /// Tracks block assumptions about method lookup. Maps a class to a table of - /// method ID points to a set of blocks. While a block `b` is in the table, - /// b->callee_cme == rb_callable_method_entry(klass, mid). - method_lookup: HashMap<VALUE, HashMap<ID, HashSet<BlockRef>>>, - /// A map from a class and its associated basic operator to a set of blocks /// that are assuming that that operator is not redefined. This is used for /// quick access to all of the blocks that are making this assumption when @@ -58,6 +53,12 @@ pub struct Invariants { /// A map from a block to a set of IDs that it is assuming have not been /// redefined. block_constant_states: HashMap<BlockRef, HashSet<ID>>, + + /// A map from a class to a set of blocks that assume objects of the class + /// will have no singleton class. When the set is empty, it means that + /// there has been a singleton class for the class after boot, so you cannot + /// assume no singleton class going forward. + no_singleton_classes: HashMap<VALUE, HashSet<BlockRef>>, } /// Private singleton instance of the invariants global struct. @@ -69,12 +70,12 @@ impl Invariants { unsafe { INVARIANTS = Some(Invariants { cme_validity: HashMap::new(), - method_lookup: HashMap::new(), basic_operator_blocks: HashMap::new(), block_basic_operators: HashMap::new(), single_ractor: HashSet::new(), constant_state_blocks: HashMap::new(), block_constant_states: HashMap::new(), + no_singleton_classes: HashMap::new(), }); } } @@ -85,29 +86,21 @@ impl Invariants { } } -/// A public function that can be called from within the code generation -/// functions to ensure that the block being generated is invalidated when the -/// basic operator is redefined. +/// Mark the pending block as assuming that certain basic operators (e.g. Integer#==) +/// have not been redefined. +#[must_use] pub fn assume_bop_not_redefined( jit: &mut JITState, + asm: &mut Assembler, ocb: &mut OutlinedCb, klass: RedefinitionFlag, bop: ruby_basic_operators, ) -> bool { if unsafe { BASIC_OP_UNREDEFINED_P(bop, klass) } { - jit_ensure_block_entry_exit(jit, ocb); - - let invariants = Invariants::get_instance(); - invariants - .basic_operator_blocks - .entry((klass, bop)) - .or_default() - .insert(jit.get_block()); - invariants - .block_basic_operators - .entry(jit.get_block()) - .or_default() - .insert((klass, bop)); + if jit_ensure_block_entry_exit(jit, asm, ocb).is_none() { + return false; + } + jit.bop_assumptions.push((klass, bop)); return true; } else { @@ -115,115 +108,126 @@ pub fn assume_bop_not_redefined( } } -// Remember that a block assumes that -// `rb_callable_method_entry(receiver_klass, cme->called_id) == cme` and that -// `cme` is valid. -// When either of these assumptions becomes invalid, rb_yjit_method_lookup_change() or -// rb_yjit_cme_invalidate() invalidates the block. -// -// @raise NoMemoryError -pub fn assume_method_lookup_stable( - jit: &mut JITState, - ocb: &mut OutlinedCb, - receiver_klass: VALUE, +/// Track that a block is only valid when a certain basic operator has not been redefined +/// since the block's inception. +pub fn track_bop_assumption(uninit_block: BlockRef, bop: (RedefinitionFlag, ruby_basic_operators)) { + let invariants = Invariants::get_instance(); + invariants + .basic_operator_blocks + .entry(bop) + .or_default() + .insert(uninit_block); + invariants + .block_basic_operators + .entry(uninit_block) + .or_default() + .insert(bop); +} + +/// Track that a block will assume that `cme` is valid (false == METHOD_ENTRY_INVALIDATED(cme)). +/// [rb_yjit_cme_invalidate] invalidates the block when `cme` is invalidated. +pub fn track_method_lookup_stability_assumption( + uninit_block: BlockRef, callee_cme: *const rb_callable_method_entry_t, ) { - // RUBY_ASSERT(rb_callable_method_entry(receiver_klass, cme->called_id) == cme); - // RUBY_ASSERT_ALWAYS(RB_TYPE_P(receiver_klass, T_CLASS) || RB_TYPE_P(receiver_klass, T_ICLASS)); - // RUBY_ASSERT_ALWAYS(!rb_objspace_garbage_object_p(receiver_klass)); - - jit_ensure_block_entry_exit(jit, ocb); - - let block = jit.get_block(); - block - .borrow_mut() - .add_cme_dependency(receiver_klass, callee_cme); - Invariants::get_instance() .cme_validity .entry(callee_cme) .or_default() - .insert(block.clone()); + .insert(uninit_block); +} - let mid = unsafe { (*callee_cme).called_id }; +/// Track that a block will assume that `klass` objects will have no singleton class. +pub fn track_no_singleton_class_assumption(uninit_block: BlockRef, klass: VALUE) { Invariants::get_instance() - .method_lookup - .entry(receiver_klass) + .no_singleton_classes + .entry(klass) .or_default() - .entry(mid) - .or_default() - .insert(block); + .insert(uninit_block); +} + +/// Returns true if we've seen a singleton class of a given class since boot. +pub fn has_singleton_class_of(klass: VALUE) -> bool { + Invariants::get_instance() + .no_singleton_classes + .get(&klass) + .map_or(false, |blocks| blocks.is_empty()) +} + +// Checks rb_method_basic_definition_p and registers the current block for invalidation if method +// lookup changes. +// A "basic method" is one defined during VM boot, so we can use this to check assumptions based on +// default behavior. +pub fn assume_method_basic_definition( + jit: &mut JITState, + asm: &mut Assembler, + ocb: &mut OutlinedCb, + klass: VALUE, + mid: ID +) -> bool { + if unsafe { rb_method_basic_definition_p(klass, mid) } != 0 { + let cme = unsafe { rb_callable_method_entry(klass, mid) }; + jit.assume_method_lookup_stable(asm, ocb, cme); + true + } else { + false + } } /// Tracks that a block is assuming it is operating in single-ractor mode. #[must_use] -pub fn assume_single_ractor_mode(jit: &mut JITState, ocb: &mut OutlinedCb) -> bool { +pub fn assume_single_ractor_mode(jit: &mut JITState, asm: &mut Assembler, ocb: &mut OutlinedCb) -> bool { if unsafe { rb_yjit_multi_ractor_p() } { false } else { - jit_ensure_block_entry_exit(jit, ocb); - Invariants::get_instance() - .single_ractor - .insert(jit.get_block()); + if jit_ensure_block_entry_exit(jit, asm, ocb).is_none() { + return false; + } + jit.block_assumes_single_ractor = true; + true } } -/// Walk through the ISEQ to go from the current opt_getinlinecache to the -/// subsequent opt_setinlinecache and find all of the name components that are -/// associated with this constant (which correspond to the getconstant -/// arguments). -pub fn assume_stable_constant_names(jit: &mut JITState, ocb: &mut OutlinedCb) { - /// Tracks that a block is assuming that the name component of a constant - /// has not changed since the last call to this function. - unsafe extern "C" fn assume_stable_constant_name( - code: *mut VALUE, - insn: VALUE, - index: u64, - data: *mut c_void, - ) -> bool { - if insn.as_usize() == OP_OPT_SETINLINECACHE { - return false; - } +/// Track that the block will assume single ractor mode. +pub fn track_single_ractor_assumption(uninit_block: BlockRef) { + Invariants::get_instance() + .single_ractor + .insert(uninit_block); +} - if insn.as_usize() == OP_GETCONSTANT { - let jit = &mut *(data as *mut JITState); - - // The first operand to GETCONSTANT is always the ID associated with - // the constant lookup. We are grabbing this out in order to - // associate this block with the stability of this constant name. - let id = code.add(index.as_usize() + 1).read().as_u64() as ID; - - let invariants = Invariants::get_instance(); - invariants - .constant_state_blocks - .entry(id) - .or_default() - .insert(jit.get_block()); - invariants - .block_constant_states - .entry(jit.get_block()) - .or_default() - .insert(id); +/// Track that a block will assume that the name components of a constant path expression +/// has not changed since the block's full initialization. +pub fn track_stable_constant_names_assumption(uninit_block: BlockRef, idlist: *const ID) { + fn assume_stable_constant_name( + uninit_block: BlockRef, + id: ID, + ) { + if id == ID!(NULL) { + // Used for :: prefix + return; } - true + let invariants = Invariants::get_instance(); + invariants + .constant_state_blocks + .entry(id) + .or_default() + .insert(uninit_block); + invariants + .block_constant_states + .entry(uninit_block) + .or_default() + .insert(id); } - jit_ensure_block_entry_exit(jit, ocb); - unsafe { - let iseq = jit.get_iseq(); - let encoded = get_iseq_body_iseq_encoded(iseq); - let start_index = jit.get_pc().offset_from(encoded); - - rb_iseq_each( - iseq, - start_index.try_into().unwrap(), - Some(assume_stable_constant_name), - jit as *mut _ as *mut c_void, - ); - }; + for i in 0.. { + match unsafe { *idlist.offset(i) } { + 0 => break, // End of NULL terminated list + id => assume_stable_constant_name(uninit_block, id), + } + } } /// Called when a basic operator is redefined. Note that all the blocks assuming @@ -270,31 +274,6 @@ pub extern "C" fn rb_yjit_cme_invalidate(callee_cme: *const rb_callable_method_e }); } -/// Callback for when rb_callable_method_entry(klass, mid) is going to change. -/// Invalidate blocks that assume stable method lookup of `mid` in `klass` when this happens. -/// This needs to be wrapped on the C side with RB_VM_LOCK_ENTER(). -#[no_mangle] -pub extern "C" fn rb_yjit_method_lookup_change(klass: VALUE, mid: ID) { - // If YJIT isn't enabled, do nothing - if !yjit_enabled_p() { - return; - } - - with_vm_lock(src_loc!(), || { - Invariants::get_instance() - .method_lookup - .entry(klass) - .and_modify(|deps| { - if let Some(deps) = deps.remove(&mid) { - for block in &deps { - invalidate_block_version(block); - incr_counter!(invalidate_method_lookup); - } - } - }); - }); -} - /// Callback for then Ruby is about to spawn a ractor. In that case we need to /// invalidate every block that is assuming single ractor mode. #[no_mangle] @@ -325,32 +304,11 @@ pub extern "C" fn rb_yjit_constant_state_changed(id: ID) { } with_vm_lock(src_loc!(), || { - if get_option!(global_constant_state) { - // If the global-constant-state option is set, then we're going to - // invalidate every block that depends on any constant. - - Invariants::get_instance() - .constant_state_blocks - .keys() - .for_each(|id| { - if let Some(blocks) = - Invariants::get_instance().constant_state_blocks.remove(&id) - { - for block in &blocks { - invalidate_block_version(block); - incr_counter!(invalidate_constant_state_bump); - } - } - }); - } else { - // If the global-constant-state option is not set, then we're only going - // to invalidate the blocks that are associated with the given ID. - - if let Some(blocks) = Invariants::get_instance().constant_state_blocks.remove(&id) { - for block in &blocks { - invalidate_block_version(block); - incr_counter!(invalidate_constant_state_bump); - } + // Invalidate the blocks that are associated with the given ID. + if let Some(blocks) = Invariants::get_instance().constant_state_blocks.remove(&id) { + for block in &blocks { + invalidate_block_version(block); + incr_counter!(invalidate_constant_state_bump); } } }); @@ -360,12 +318,17 @@ pub extern "C" fn rb_yjit_constant_state_changed(id: ID) { /// See `struct yjijt_root_struct` in C. #[no_mangle] pub extern "C" fn rb_yjit_root_mark() { + // Call rb_gc_mark on exit location's raw_samples to + // wrap frames in a GC allocated object. This needs to be called + // at the same time as root mark. + YjitExitLocations::gc_mark_raw_samples(); + // Comment from C YJIT: // // Why not let the GC move the cme keys in this table? // Because this is basically a compare_by_identity Hash. // If a key moves, we would need to reinsert it into the table so it is rehashed. - // That is tricky to do, espcially as it could trigger allocation which could + // That is tricky to do, especially as it could trigger allocation which could // trigger GC. Not sure if it is okay to trigger GC while the GC is updating // references. // @@ -380,41 +343,32 @@ pub extern "C" fn rb_yjit_root_mark() { unsafe { rb_gc_mark(cme) }; } - - // Mark class and iclass objects - for klass in invariants.method_lookup.keys() { - // TODO: This is a leak. Unused blocks linger in the table forever, preventing the - // callee class they speculate on from being collected. - // We could do a bespoke weak reference scheme on classes similar to - // the interpreter's call cache. See finalizer for T_CLASS and cc_table_free(). - - unsafe { rb_gc_mark(*klass) }; - } } /// Remove all invariant assumptions made by the block by removing the block as /// as a key in all of the relevant tables. -pub fn block_assumptions_free(blockref: &BlockRef) { +/// For safety, the block has to be initialized and the vm lock must be held. +/// However, outgoing/incoming references to the block does _not_ need to be valid. +pub fn block_assumptions_free(blockref: BlockRef) { let invariants = Invariants::get_instance(); { - let block = blockref.borrow(); + // SAFETY: caller ensures that this reference is valid + let block = unsafe { blockref.as_ref() }; // For each method lookup dependency for dep in block.iter_cme_deps() { // Remove tracking for cme validity - if let Some(blockset) = invariants.cme_validity.get_mut(&dep.callee_cme) { - blockset.remove(blockref); - } - - // Remove tracking for lookup stability - if let Some(id_to_block_set) = invariants.method_lookup.get_mut(&dep.receiver_klass) { - let mid = unsafe { (*dep.callee_cme).called_id }; - if let Some(block_set) = id_to_block_set.get_mut(&mid) { - block_set.remove(&blockref); + if let Some(blockset) = invariants.cme_validity.get_mut(&dep) { + blockset.remove(&blockref); + if blockset.is_empty() { + invariants.cme_validity.remove(&dep); } } } + if invariants.cme_validity.is_empty() { + invariants.cme_validity.shrink_to_fit(); + } } // Remove tracking for basic operators that the given block assumes have @@ -425,32 +379,68 @@ pub fn block_assumptions_free(blockref: &BlockRef) { for key in &bops { if let Some(blocks) = invariants.basic_operator_blocks.get_mut(key) { blocks.remove(&blockref); + if blocks.is_empty() { + invariants.basic_operator_blocks.remove(key); + } } } } + if invariants.block_basic_operators.is_empty() { + invariants.block_basic_operators.shrink_to_fit(); + } + if invariants.basic_operator_blocks.is_empty() { + invariants.basic_operator_blocks.shrink_to_fit(); + } + // Remove tracking for blocks assuming single ractor mode invariants.single_ractor.remove(&blockref); + if invariants.single_ractor.is_empty() { + invariants.single_ractor.shrink_to_fit(); + } // Remove tracking for constant state for a given ID. if let Some(ids) = invariants.block_constant_states.remove(&blockref) { for id in ids { if let Some(blocks) = invariants.constant_state_blocks.get_mut(&id) { blocks.remove(&blockref); + if blocks.is_empty() { + invariants.constant_state_blocks.remove(&id); + } } } } + if invariants.block_constant_states.is_empty() { + invariants.block_constant_states.shrink_to_fit(); + } + if invariants.constant_state_blocks.is_empty() { + invariants.constant_state_blocks.shrink_to_fit(); + } + + // Remove tracking for blocks assuming no singleton class + for (_, blocks) in invariants.no_singleton_classes.iter_mut() { + blocks.remove(&blockref); + } } /// Callback from the opt_setinlinecache instruction in the interpreter. /// Invalidate the block for the matching opt_getinlinecache so it could regenerate code /// using the new value in the constant cache. #[no_mangle] -pub extern "C" fn rb_yjit_constant_ic_update(iseq: *const rb_iseq_t, ic: IC) { +pub extern "C" fn rb_yjit_constant_ic_update(iseq: *const rb_iseq_t, ic: IC, insn_idx: std::os::raw::c_uint) { // If YJIT isn't enabled, do nothing if !yjit_enabled_p() { return; } + // Try to downcast the iseq index + let insn_idx: IseqIdx = if let Ok(idx) = insn_idx.try_into() { + idx + } else { + // The index is too large, YJIT can't possibly have code for it, + // so there is nothing to invalidate. + return; + }; + if !unsafe { (*(*ic).entry).ic_cref }.is_null() || unsafe { rb_yjit_multi_ractor_p() } { // We can't generate code in these situations, so no need to invalidate. // See gen_opt_getinlinecache. @@ -459,34 +449,33 @@ pub extern "C" fn rb_yjit_constant_ic_update(iseq: *const rb_iseq_t, ic: IC) { with_vm_lock(src_loc!(), || { let code = unsafe { get_iseq_body_iseq_encoded(iseq) }; - let get_insn_idx = unsafe { (*ic).get_insn_idx }; // This should come from a running iseq, so direct threading translation // should have been done assert!(unsafe { FL_TEST(iseq.into(), VALUE(ISEQ_TRANSLATED)) } != VALUE(0)); - assert!(get_insn_idx < unsafe { get_iseq_encoded_size(iseq) }); + assert!(u32::from(insn_idx) < unsafe { get_iseq_encoded_size(iseq) }); - // Ensure that the instruction the get_insn_idx is pointing to is in - // fact a opt_getinlinecache instruction. + // Ensure that the instruction the insn_idx is pointing to is in + // fact a opt_getconstant_path instruction. assert_eq!( unsafe { - let opcode_pc = code.add(get_insn_idx.as_usize()); + let opcode_pc = code.add(insn_idx.as_usize()); let translated_opcode: VALUE = opcode_pc.read(); rb_vm_insn_decode(translated_opcode) }, - OP_OPT_GETINLINECACHE.try_into().unwrap() + YARVINSN_opt_getconstant_path.try_into().unwrap() ); // Find the matching opt_getinlinecache and invalidate all the blocks there // RUBY_ASSERT(insn_op_type(BIN(opt_getinlinecache), 1) == TS_IC); - let ic_pc = unsafe { code.add(get_insn_idx.as_usize() + 2) }; + let ic_pc = unsafe { code.add(insn_idx.as_usize() + 1) }; let ic_operand: IC = unsafe { ic_pc.read() }.as_mut_ptr(); if ic == ic_operand { for block in take_version_list(BlockId { iseq, - idx: get_insn_idx, + idx: insn_idx, }) { invalidate_block_version(&block); incr_counter!(invalidate_constant_ic_fill); @@ -497,6 +486,35 @@ pub extern "C" fn rb_yjit_constant_ic_update(iseq: *const rb_iseq_t, ic: IC) { }); } +/// Invalidate blocks that assume objects of a given class will have no singleton class. +#[no_mangle] +pub extern "C" fn rb_yjit_invalidate_no_singleton_class(klass: VALUE) { + // Skip tracking singleton classes during boot. Such objects already have a singleton class + // before entering JIT code, so they get rejected when they're checked for the first time. + if unsafe { INVARIANTS.is_none() } { + return; + } + + // We apply this optimization only to Array, Hash, and String for now. + if unsafe { [rb_cArray, rb_cHash, rb_cString].contains(&klass) } { + let no_singleton_classes = &mut Invariants::get_instance().no_singleton_classes; + match no_singleton_classes.get_mut(&klass) { + Some(blocks) => { + // Invalidate existing blocks and let has_singleton_class_of() + // return true when they are compiled again + for block in mem::take(blocks) { + invalidate_block_version(&block); + incr_counter!(invalidate_no_singleton_class); + } + } + None => { + // Let has_singleton_class_of() return true for this class + no_singleton_classes.insert(klass, HashSet::new()); + } + } + } +} + // Invalidate all generated code and patch C method return code to contain // logic for firing the c_return TracePoint event. Once rb_vm_barrier() // returns, all other ractors are pausing inside RB_VM_LOCK_ENTER(), which @@ -523,59 +541,78 @@ pub extern "C" fn rb_yjit_tracing_invalidate_all() { return; } - use crate::asm::x86_64::jmp_ptr; - // Stop other ractors since we are going to patch machine code. with_vm_lock(src_loc!(), || { // Make it so all live block versions are no longer valid branch targets - unsafe { rb_yjit_for_each_iseq(Some(invalidate_all_blocks_for_tracing)) }; - - extern "C" fn invalidate_all_blocks_for_tracing(iseq: IseqPtr) { - if let Some(payload) = unsafe { load_iseq_payload(iseq) } { - // C comment: - // Leaking the blocks for now since we might have situations where - // a different ractor is waiting for the VM lock in branch_stub_hit(). - // If we free the block that ractor can wake up with a dangling block. - // - // Deviation: since we ref count the the blocks now, we might be deallocating and - // not leak the block. - // - // Empty all blocks on the iseq so we don't compile new blocks that jump to the - // invalidated region. + let mut on_stack_iseqs = HashSet::new(); + for_each_on_stack_iseq(|iseq| { + on_stack_iseqs.insert(iseq); + }); + for_each_iseq(|iseq| { + if let Some(payload) = get_iseq_payload(iseq) { let blocks = payload.take_all_blocks(); - for blockref in blocks { - block_assumptions_free(&blockref); + + if on_stack_iseqs.contains(&iseq) { + // This ISEQ is running, so we can't free blocks immediately + for block in blocks { + delayed_deallocation(block); + } + payload.dead_blocks.shrink_to_fit(); + } else { + // Safe to free dead blocks since the ISEQ isn't running + // Since we're freeing _all_ blocks, we don't need to keep the graph well formed + for block in blocks { + unsafe { free_block(block, false) }; + } + mem::take(&mut payload.dead_blocks) + .into_iter() + .for_each(|block| unsafe { free_block(block, false) }); } } // Reset output code entry point unsafe { rb_iseq_reset_jit_func(iseq) }; - } + }); let cb = CodegenGlobals::get_inline_cb(); + // Prevent on-stack frames from jumping to the caller on jit_exec_exception + extern "C" { + fn rb_yjit_cancel_jit_return(leave_exit: *mut c_void, leave_exception: *mut c_void) -> VALUE; + } + unsafe { + rb_yjit_cancel_jit_return( + CodegenGlobals::get_leave_exit_code().raw_ptr(cb) as _, + CodegenGlobals::get_leave_exception_code().raw_ptr(cb) as _, + ); + } + // Apply patches let old_pos = cb.get_write_pos(); - let patches = CodegenGlobals::take_global_inval_patches(); + let old_dropped_bytes = cb.has_dropped_bytes(); + let mut patches = CodegenGlobals::take_global_inval_patches(); + patches.sort_by_cached_key(|patch| patch.inline_patch_pos.raw_ptr(cb)); + let mut last_patch_end = std::ptr::null(); for patch in &patches { - cb.set_write_ptr(patch.inline_patch_pos); - jmp_ptr(cb, patch.outlined_target_pos); + let patch_pos = patch.inline_patch_pos.raw_ptr(cb); + assert!( + last_patch_end <= patch_pos, + "patches should not overlap (last_patch_end: {last_patch_end:?}, patch_pos: {patch_pos:?})", + ); - // FIXME: Can't easily check we actually wrote out the JMP at the moment. - // assert!(!cb.has_dropped_bytes(), "patches should have space and jump offsets should fit in JMP rel32"); + cb.set_write_ptr(patch.inline_patch_pos); + cb.set_dropped_bytes(false); + cb.without_page_end_reserve(|cb| { + let mut asm = crate::backend::ir::Assembler::new(); + asm.jmp(patch.outlined_target_pos.as_side_exit()); + if asm.compile(cb, None).is_none() { + panic!("Failed to apply patch at {:?}", patch.inline_patch_pos); + } + }); + last_patch_end = cb.get_write_ptr().raw_ptr(cb); } cb.set_pos(old_pos); - - // Freeze invalidated part of the codepage. We only want to wait for - // running instances of the code to exit from now on, so we shouldn't - // change the code. There could be other ractors sleeping in - // branch_stub_hit(), for example. We could harden this by changing memory - // protection on the frozen range. - assert!( - CodegenGlobals::get_inline_frozen_bytes() <= old_pos, - "frozen bytes should increase monotonically" - ); - CodegenGlobals::set_inline_frozen_bytes(old_pos); + cb.set_dropped_bytes(old_dropped_bytes); CodegenGlobals::get_outlined_cb() .unwrap() |