summaryrefslogtreecommitdiff
path: root/yjit
diff options
context:
space:
mode:
authorTakashi Kokubun <takashikkbn@gmail.com>2022-10-31 11:29:45 -0700
committerGitHub <noreply@github.com>2022-10-31 14:29:45 -0400
commit2b39640b0bbf7459b305d8a98bb01f197975b8d9 (patch)
tree3b605cd8f90c2c42cea97ee50f917c5d0f75d525 /yjit
parent5e6633fcf988e5874d0a9929bdf2cd496289e75d (diff)
YJIT: Add RubyVM::YJIT.code_gc (#6644)
* YJIT: Add RubyVM::YJIT.code_gc * Rename compiled_page_count to live_page_count
Notes
Notes: Merged-By: maximecb <maximecb@ruby-lang.org>
Diffstat (limited to 'yjit')
-rw-r--r--yjit/src/asm/mod.rs24
-rw-r--r--yjit/src/stats.rs4
-rw-r--r--yjit/src/yjit.rs12
3 files changed, 32 insertions, 8 deletions
diff --git a/yjit/src/asm/mod.rs b/yjit/src/asm/mod.rs
index b68520a767..7ac3625fbd 100644
--- a/yjit/src/asm/mod.rs
+++ b/yjit/src/asm/mod.rs
@@ -209,17 +209,25 @@ impl CodeBlock {
self.page_size
}
- /// Return the number of code pages that have been allocated by the VirtualMemory.
- pub fn num_pages(&self) -> usize {
+ /// Return the number of code pages that have been mapped by the VirtualMemory.
+ pub fn num_mapped_pages(&self) -> usize {
let mapped_region_size = self.mem_block.borrow().mapped_region_size();
// CodeBlock's page size != VirtualMem's page size on Linux,
// so mapped_region_size % self.page_size may not be 0
((mapped_region_size - 1) / self.page_size) + 1
}
+ /// Return the number of code pages that have been reserved by the VirtualMemory.
+ pub fn num_virtual_pages(&self) -> usize {
+ let virtual_region_size = self.mem_block.borrow().virtual_region_size();
+ // CodeBlock's page size != VirtualMem's page size on Linux,
+ // so mapped_region_size % self.page_size may not be 0
+ ((virtual_region_size - 1) / self.page_size) + 1
+ }
+
/// Return the number of code pages that have been freed and not used yet.
pub fn num_freed_pages(&self) -> usize {
- (0..self.num_pages()).filter(|&page_idx| self.has_freed_page(page_idx)).count()
+ (0..self.num_mapped_pages()).filter(|&page_idx| self.has_freed_page(page_idx)).count()
}
pub fn has_freed_page(&self, page_idx: usize) -> bool {
@@ -303,7 +311,7 @@ impl CodeBlock {
pub fn code_size(&self) -> usize {
let mut size = 0;
let current_page_idx = self.write_pos / self.page_size;
- for page_idx in 0..self.num_pages() {
+ for page_idx in 0..self.num_mapped_pages() {
if page_idx == current_page_idx {
// Count only actually used bytes for the current page.
size += (self.write_pos % self.page_size).saturating_sub(self.page_start());
@@ -546,7 +554,7 @@ impl CodeBlock {
}
// Check which pages are still in use
- let mut pages_in_use = vec![false; self.num_pages()];
+ let mut pages_in_use = vec![false; self.num_mapped_pages()];
// For each ISEQ, we currently assume that only code pages used by inline code
// are used by outlined code, so we mark only code pages used by inlined code.
for_each_on_stack_iseq_payload(|iseq_payload| {
@@ -560,10 +568,14 @@ impl CodeBlock {
}
// Let VirtuamMem free the pages
- let freed_pages: Vec<usize> = pages_in_use.iter().enumerate()
+ let mut freed_pages: Vec<usize> = pages_in_use.iter().enumerate()
.filter(|&(_, &in_use)| !in_use).map(|(page, _)| page).collect();
self.free_pages(&freed_pages);
+ // Append virtual pages in case RubyVM::YJIT.code_gc is manually triggered.
+ let mut virtual_pages: Vec<usize> = (self.num_mapped_pages()..self.num_virtual_pages()).collect();
+ freed_pages.append(&mut virtual_pages);
+
// Invalidate everything to have more compact code after code GC.
// This currently patches every ISEQ, which works, but in the future,
// we could limit that to patch only on-stack ISEQs for optimizing code GC.
diff --git a/yjit/src/stats.rs b/yjit/src/stats.rs
index e851d4e4d1..e07b475a9f 100644
--- a/yjit/src/stats.rs
+++ b/yjit/src/stats.rs
@@ -381,8 +381,8 @@ fn rb_yjit_gen_stats_dict() -> VALUE {
// GCed code size
hash_aset_usize!(hash, "freed_code_size", freed_page_count * cb.page_size());
- // Compiled pages
- hash_aset_usize!(hash, "compiled_page_count", cb.num_pages() - freed_page_count);
+ // Live pages
+ hash_aset_usize!(hash, "live_page_count", cb.num_mapped_pages() - freed_page_count);
}
// If we're not generating stats, the hash is done
diff --git a/yjit/src/yjit.rs b/yjit/src/yjit.rs
index 5cd23f066f..4850dca7a8 100644
--- a/yjit/src/yjit.rs
+++ b/yjit/src/yjit.rs
@@ -79,6 +79,18 @@ pub extern "C" fn rb_yjit_iseq_gen_entry_point(iseq: IseqPtr, ec: EcPtr) -> *con
}
}
+/// Free and recompile all existing JIT code
+#[no_mangle]
+pub extern "C" fn rb_yjit_code_gc(_ec: EcPtr, _ruby_self: VALUE) -> VALUE {
+ if !yjit_enabled_p() {
+ return Qnil;
+ }
+
+ let cb = CodegenGlobals::get_inline_cb();
+ cb.code_gc();
+ Qnil
+}
+
/// Simulate a situation where we are out of executable memory
#[no_mangle]
pub extern "C" fn rb_yjit_simulate_oom_bang(_ec: EcPtr, _ruby_self: VALUE) -> VALUE {