summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTakashi Kokubun <takashikkbn@gmail.com>2023-02-20 09:06:09 -0800
committerGitHub <noreply@github.com>2023-02-20 09:06:09 -0800
commitecd0cdaf820af789f355f1a18c31d6adfe8aad94 (patch)
treec8756096eec15a4debd1b777b35b7a89c9875436
parentb326a5f3ddf97161039a15fbb49e25b512e6efc8 (diff)
YJIT: Fix assertion for partially mapped last pages (#7337)
Follows up [Bug #19400]
Notes
Notes: Merged-By: k0kubun <takashikkbn@gmail.com>
-rw-r--r--test/ruby/test_yjit.rb19
-rw-r--r--yjit/src/asm/mod.rs2
-rw-r--r--yjit/src/virtualmem.rs18
3 files changed, 33 insertions, 6 deletions
diff --git a/test/ruby/test_yjit.rb b/test/ruby/test_yjit.rb
index ce51dd83f7..3ab04f6fe9 100644
--- a/test/ruby/test_yjit.rb
+++ b/test/ruby/test_yjit.rb
@@ -974,6 +974,25 @@ class TestYJIT < Test::Unit::TestCase
RUBY
end
+ def test_code_gc_partial_last_page
+ # call_threshold: 2 to avoid JIT-ing code_gc itself. If code_gc were JITed right before
+ # code_gc is called, the last page would be on stack.
+ assert_compiles(<<~'RUBY', exits: :any, result: :ok, call_threshold: 2)
+ # Leave a bunch of off-stack pages
+ i = 0
+ while i < 1000
+ eval("x = proc { 1.to_s }; x.call; x.call")
+ i += 1
+ end
+
+ # On Linux, memory page size != code page size. So the last code page could be partially
+ # mapped. This call tests that assertions and other things work fine under the situation.
+ RubyVM::YJIT.code_gc
+
+ :ok
+ RUBY
+ end
+
def test_trace_script_compiled # not ISEQ_TRACE_EVENTS
assert_compiles(<<~'RUBY', exits: :any, result: :ok)
@eval_counter = 0
diff --git a/yjit/src/asm/mod.rs b/yjit/src/asm/mod.rs
index 1c43beae02..7f95270b78 100644
--- a/yjit/src/asm/mod.rs
+++ b/yjit/src/asm/mod.rs
@@ -423,7 +423,7 @@ impl CodeBlock {
/// Convert an address range to memory page indexes against a num_pages()-sized array.
pub fn addrs_to_pages(&self, start_addr: CodePtr, end_addr: CodePtr) -> Vec<usize> {
let mem_start = self.mem_block.borrow().start_ptr().into_usize();
- let mem_end = self.mem_block.borrow().end_ptr().into_usize();
+ let mem_end = self.mem_block.borrow().mapped_end_ptr().into_usize();
assert!(mem_start <= start_addr.into_usize());
assert!(start_addr.into_usize() <= end_addr.into_usize());
assert!(end_addr.into_usize() <= mem_end);
diff --git a/yjit/src/virtualmem.rs b/yjit/src/virtualmem.rs
index 33194b09a3..d6e5089dac 100644
--- a/yjit/src/virtualmem.rs
+++ b/yjit/src/virtualmem.rs
@@ -101,8 +101,12 @@ impl<A: Allocator> VirtualMemory<A> {
CodePtr(self.region_start)
}
- pub fn end_ptr(&self) -> CodePtr {
- CodePtr(NonNull::new(self.region_start.as_ptr().wrapping_add(self.mapped_region_bytes)).unwrap())
+ pub fn mapped_end_ptr(&self) -> CodePtr {
+ self.start_ptr().add_bytes(self.mapped_region_bytes)
+ }
+
+ pub fn virtual_end_ptr(&self) -> CodePtr {
+ self.start_ptr().add_bytes(self.region_size_bytes)
}
/// Size of the region in bytes that we have allocated physical memory for.
@@ -208,10 +212,14 @@ impl<A: Allocator> VirtualMemory<A> {
assert_eq!(start_ptr.into_usize() % self.page_size_bytes, 0);
// Bounds check the request. We should only free memory we manage.
- let region_range = self.region_start.as_ptr() as *const u8..self.end_ptr().raw_ptr();
+ let mapped_region = self.start_ptr().raw_ptr()..self.mapped_end_ptr().raw_ptr();
+ let virtual_region = self.start_ptr().raw_ptr()..self.virtual_end_ptr().raw_ptr();
let last_byte_to_free = start_ptr.add_bytes(size.saturating_sub(1).as_usize()).raw_ptr();
- assert!(region_range.contains(&start_ptr.raw_ptr()));
- assert!(region_range.contains(&last_byte_to_free));
+ assert!(mapped_region.contains(&start_ptr.raw_ptr()));
+ // On platforms where code page size != memory page size (e.g. Linux), we often need
+ // to free code pages that contain unmapped memory pages. When it happens on the last
+ // code page, it's more appropriate to check the last byte against the virtual region.
+ assert!(virtual_region.contains(&last_byte_to_free));
self.allocator.mark_unused(start_ptr.0.as_ptr(), size);
}