summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTakashi Kokubun <takashikkbn@gmail.com>2023-12-07 14:53:05 -0800
committerGitHub <noreply@github.com>2023-12-07 22:53:05 +0000
commitac5fd58700b5576bfdd89ffe06dd4302b6721e5f (patch)
treeecfcd1027b4fb3f0dea5153206322b95f054d0bc
parent2755cb1b2fbc4a5f08ca56345b5945bd452da74e (diff)
YJIT: Fix on-stack ISEQ comparison for auto_compact (#9164)
-rw-r--r--test/ruby/test_yjit.rb25
-rw-r--r--yjit/src/core.rs19
2 files changed, 38 insertions, 6 deletions
diff --git a/test/ruby/test_yjit.rb b/test/ruby/test_yjit.rb
index e87e062948..de96a85ded 100644
--- a/test/ruby/test_yjit.rb
+++ b/test/ruby/test_yjit.rb
@@ -1102,6 +1102,31 @@ class TestYJIT < Test::Unit::TestCase
RUBY
end
+ def test_code_gc_with_auto_compact
+ assert_compiles((code_gc_helpers + <<~'RUBY'), exits: :any, result: :ok, mem_size: 1, code_gc: true)
+ # Test ISEQ moves in the middle of code GC
+ GC.auto_compact = true
+
+ fiber = Fiber.new {
+ # Loop to call the same basic block again after Fiber.yield
+ while true
+ Fiber.yield(nil.to_i)
+ end
+ }
+
+ return :not_paged1 unless add_pages(250) # use some pages
+ return :broken_resume1 if fiber.resume != 0 # leave an on-stack code as well
+
+ add_pages(2000) # use a whole lot of pages to run out of 1MiB
+ return :broken_resume2 if fiber.resume != 0 # on-stack code should be callable
+
+ code_gc_count = RubyVM::YJIT.runtime_stats[:code_gc_count]
+ return :"code_gc_#{code_gc_count}" if code_gc_count == 0
+
+ :ok
+ RUBY
+ end
+
def test_code_gc_partial_last_page
# call_threshold: 2 to avoid JIT-ing code_gc itself. If code_gc were JITed right before
# code_gc is called, the last page would be on stack.
diff --git a/yjit/src/core.rs b/yjit/src/core.rs
index 8e8d246a3a..f84286bd2f 100644
--- a/yjit/src/core.rs
+++ b/yjit/src/core.rs
@@ -1095,17 +1095,24 @@ pub fn for_each_on_stack_iseq_payload<F: FnMut(&IseqPayload)>(mut callback: F) {
/// Iterate over all NOT on-stack ISEQ payloads
pub fn for_each_off_stack_iseq_payload<F: FnMut(&mut IseqPayload)>(mut callback: F) {
- let mut on_stack_iseqs: Vec<IseqPtr> = vec![];
- for_each_on_stack_iseq(|iseq| {
- on_stack_iseqs.push(iseq);
- });
- for_each_iseq(|iseq| {
+ // Get all ISEQs on the heap. Note that rb_objspace_each_objects() runs GC first,
+ // which could move ISEQ pointers when GC.auto_compact = true.
+ // So for_each_on_stack_iseq() must be called after this, which doesn't run GC.
+ let mut iseqs: Vec<IseqPtr> = vec![];
+ for_each_iseq(|iseq| iseqs.push(iseq));
+
+ // Get all ISEQs that are on a CFP of existing ECs.
+ let mut on_stack_iseqs: HashSet<IseqPtr> = HashSet::new();
+ for_each_on_stack_iseq(|iseq| { on_stack_iseqs.insert(iseq); });
+
+ // Invoke the callback for iseqs - on_stack_iseqs
+ for iseq in iseqs {
if !on_stack_iseqs.contains(&iseq) {
if let Some(iseq_payload) = get_iseq_payload(iseq) {
callback(iseq_payload);
}
}
- })
+ }
}
/// Free the per-iseq payload