summaryrefslogtreecommitdiff
path: root/yjit/src/invariants.rs
diff options
context:
space:
mode:
authorAlan Wu <XrXr@users.noreply.github.com>2022-11-29 16:14:13 -0500
committerMaxime Chevalier-Boisvert <maximechevalierb@gmail.com>2022-11-30 12:23:50 -0500
commita0b0365e905e1ac51998ace7e6fc723406a2f157 (patch)
tree5523f45099f3a5e2c8cf3112f4acc4cf42d5defe /yjit/src/invariants.rs
parentb30248f74a8f6ce37a78f07597c7c5452ff50abd (diff)
YJIT: Deallocate `struct Block` to plug memory leaks
Previously we essentially never freed block even after invalidation. Their reference count never reached zero for a couple of reasons: 1. `Branch::block` formed a cycle with the block holding the branch 2. Strong count on a branch that has ever contained a stub never reached 0 because we increment the `.clone()` call for `BranchRef::into_raw()` didn't have a matching decrement. It's not safe to immediately deallocate blocks during invalidation since `branch_stub_hit()` can end up running with a branch pointer from an invalidated branch. To plug the leaks, we wait until code GC or global invalidation and deallocate the blocks for iseqs that are definitely not running.
Notes
Notes: Merged: https://github.com/ruby/ruby/pull/6833
Diffstat (limited to 'yjit/src/invariants.rs')
-rw-r--r--yjit/src/invariants.rs31
1 files changed, 19 insertions, 12 deletions
diff --git a/yjit/src/invariants.rs b/yjit/src/invariants.rs
index 5a44ce1bfc..6ad87b9d85 100644
--- a/yjit/src/invariants.rs
+++ b/yjit/src/invariants.rs
@@ -497,21 +497,28 @@ pub extern "C" fn rb_yjit_tracing_invalidate_all() {
// Stop other ractors since we are going to patch machine code.
with_vm_lock(src_loc!(), || {
// Make it so all live block versions are no longer valid branch targets
+ let mut on_stack_iseqs = HashSet::new();
+ for_each_on_stack_iseq(|iseq| {
+ on_stack_iseqs.insert(iseq);
+ });
for_each_iseq(|iseq| {
if let Some(payload) = get_iseq_payload(iseq) {
- // C comment:
- // Leaking the blocks for now since we might have situations where
- // a different ractor is waiting for the VM lock in branch_stub_hit().
- // If we free the block that ractor can wake up with a dangling block.
- //
- // Deviation: since we ref count the the blocks now, we might be deallocating and
- // not leak the block.
- //
- // Empty all blocks on the iseq so we don't compile new blocks that jump to the
- // invalidated region.
let blocks = payload.take_all_blocks();
- for blockref in blocks {
- block_assumptions_free(&blockref);
+
+ if on_stack_iseqs.contains(&iseq) {
+ // This ISEQ is running, so we can't free blocks immediately
+ for block in blocks {
+ delayed_deallocation(&block);
+ }
+ payload.dead_blocks.shrink_to_fit();
+ } else {
+ // Safe to free dead blocks since the ISEQ isn't running
+ for block in blocks {
+ free_block(&block);
+ }
+ mem::take(&mut payload.dead_blocks)
+ .iter()
+ .for_each(free_block);
}
}