summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--doc/yjit/yjit.md3
-rw-r--r--yjit/src/asm/mod.rs4
-rw-r--r--yjit/src/codegen.rs11
-rw-r--r--yjit/src/options.rs36
-rw-r--r--yjit/src/stats.rs7
-rw-r--r--yjit/src/virtualmem.rs22
6 files changed, 62 insertions, 21 deletions
diff --git a/doc/yjit/yjit.md b/doc/yjit/yjit.md
index c63d31b64f..906a827d51 100644
--- a/doc/yjit/yjit.md
+++ b/doc/yjit/yjit.md
@@ -166,7 +166,8 @@ The machine code generated for a given method can be printed by adding `puts Rub
YJIT supports all command-line options supported by upstream CRuby, but also adds a few YJIT-specific options:
- `--yjit`: enable YJIT (disabled by default)
-- `--yjit-exec-mem-size=N`: size of the executable memory block to allocate, in MiB (default 48 MiB)
+- `--yjit-mem-size=N`: soft limit on YJIT memory usage in MiB (default: 128). Tries to limit `code_region_size + yjit_alloc_size`
+- `--yjit-exec-mem-size=N`: hard limit on executable memory block in MiB. Limits `code_region_size`
- `--yjit-call-threshold=N`: number of calls after which YJIT begins to compile a function.
It defaults to 30, and it's then increased to 120 when the number of ISEQs in the process reaches 40,000.
- `--yjit-cold-threshold=N`: number of global calls after which an ISEQ is considered cold and not
diff --git a/yjit/src/asm/mod.rs b/yjit/src/asm/mod.rs
index a113a41c73..ed6feb3174 100644
--- a/yjit/src/asm/mod.rs
+++ b/yjit/src/asm/mod.rs
@@ -686,7 +686,7 @@ impl CodeBlock {
let alloc = TestingAllocator::new(mem_size);
let mem_start: *const u8 = alloc.mem_start();
- let virt_mem = VirtualMem::new(alloc, 1, NonNull::new(mem_start as *mut u8).unwrap(), mem_size);
+ let virt_mem = VirtualMem::new(alloc, 1, NonNull::new(mem_start as *mut u8).unwrap(), mem_size, 128 * 1024 * 1024);
Self::new(Rc::new(RefCell::new(virt_mem)), false, Rc::new(None), true)
}
@@ -704,7 +704,7 @@ impl CodeBlock {
let alloc = TestingAllocator::new(mem_size);
let mem_start: *const u8 = alloc.mem_start();
- let virt_mem = VirtualMem::new(alloc, 1, NonNull::new(mem_start as *mut u8).unwrap(), mem_size);
+ let virt_mem = VirtualMem::new(alloc, 1, NonNull::new(mem_start as *mut u8).unwrap(), mem_size, 128 * 1024 * 1024);
Self::new(Rc::new(RefCell::new(virt_mem)), false, Rc::new(Some(freed_pages)), true)
}
diff --git a/yjit/src/codegen.rs b/yjit/src/codegen.rs
index d3aac992aa..746a46903c 100644
--- a/yjit/src/codegen.rs
+++ b/yjit/src/codegen.rs
@@ -10504,11 +10504,11 @@ impl CodegenGlobals {
/// Initialize the codegen globals
pub fn init() {
// Executable memory and code page size in bytes
- let mem_size = get_option!(exec_mem_size);
+ let exec_mem_size = get_option!(exec_mem_size).unwrap_or(get_option!(mem_size));
#[cfg(not(test))]
let (mut cb, mut ocb) = {
- let virt_block: *mut u8 = unsafe { rb_yjit_reserve_addr_space(mem_size as u32) };
+ let virt_block: *mut u8 = unsafe { rb_yjit_reserve_addr_space(exec_mem_size as u32) };
// Memory protection syscalls need page-aligned addresses, so check it here. Assuming
// `virt_block` is page-aligned, `second_half` should be page-aligned as long as the
@@ -10530,7 +10530,8 @@ impl CodegenGlobals {
SystemAllocator {},
page_size,
NonNull::new(virt_block).unwrap(),
- mem_size,
+ exec_mem_size,
+ get_option!(mem_size),
);
let mem_block = Rc::new(RefCell::new(mem_block));
@@ -10546,9 +10547,9 @@ impl CodegenGlobals {
// In test mode we're not linking with the C code
// so we don't allocate executable memory
#[cfg(test)]
- let mut cb = CodeBlock::new_dummy(mem_size / 2);
+ let mut cb = CodeBlock::new_dummy(exec_mem_size / 2);
#[cfg(test)]
- let mut ocb = OutlinedCb::wrap(CodeBlock::new_dummy(mem_size / 2));
+ let mut ocb = OutlinedCb::wrap(CodeBlock::new_dummy(exec_mem_size / 2));
let ocb_start_addr = ocb.unwrap().get_write_ptr();
let leave_exit_code = gen_leave_exit(&mut ocb).unwrap();
diff --git a/yjit/src/options.rs b/yjit/src/options.rs
index 85d068595a..c91c365738 100644
--- a/yjit/src/options.rs
+++ b/yjit/src/options.rs
@@ -27,9 +27,14 @@ pub static mut rb_yjit_cold_threshold: u64 = 200_000;
#[derive(Debug)]
#[repr(C)]
pub struct Options {
- // Size of the executable memory block to allocate in bytes
- // Note that the command line argument is expressed in MiB and not bytes
- pub exec_mem_size: usize,
+ /// Soft limit of all memory used by YJIT in bytes
+ /// VirtualMem avoids allocating new pages if code_region_size + yjit_alloc_size
+ /// is larger than this threshold. Rust may still allocate memory beyond this limit.
+ pub mem_size: usize,
+
+ /// Hard limit of the executable memory block to allocate in bytes
+ /// Note that the command line argument is expressed in MiB and not bytes
+ pub exec_mem_size: Option<usize>,
// Disable the propagation of type information
pub no_type_prop: bool,
@@ -81,7 +86,8 @@ pub struct Options {
// Initialize the options to default values
pub static mut OPTIONS: Options = Options {
- exec_mem_size: 48 * 1024 * 1024,
+ mem_size: 128 * 1024 * 1024,
+ exec_mem_size: None,
no_type_prop: false,
max_versions: 4,
num_temp_regs: 5,
@@ -100,8 +106,10 @@ pub static mut OPTIONS: Options = Options {
};
/// YJIT option descriptions for `ruby --help`.
-static YJIT_OPTIONS: [(&str, &str); 9] = [
- ("--yjit-exec-mem-size=num", "Size of executable memory block in MiB (default: 48)."),
+/// Note that --help allows only 80 characters per line, including indentation. 80-character limit --> |
+pub const YJIT_OPTIONS: &'static [(&str, &str)] = &[
+ ("--yjit-mem-size=num", "Soft limit on YJIT memory usage in MiB (default: 128)."),
+ ("--yjit-exec-mem-size=num", "Hard limit on executable memory block in MiB."),
("--yjit-call-threshold=num", "Number of calls to trigger JIT."),
("--yjit-cold-threshold=num", "Global calls after which ISEQs not compiled (default: 200K)."),
("--yjit-stats", "Enable collecting YJIT statistics."),
@@ -183,6 +191,20 @@ pub fn parse_option(str_ptr: *const std::os::raw::c_char) -> Option<()> {
match (opt_name, opt_val) {
("", "") => (), // Simply --yjit
+ ("mem-size", _) => match opt_val.parse::<usize>() {
+ Ok(n) => {
+ if n == 0 || n > 2 * 1024 * 1024 {
+ return None
+ }
+
+ // Convert from MiB to bytes internally for convenience
+ unsafe { OPTIONS.mem_size = n * 1024 * 1024 }
+ }
+ Err(_) => {
+ return None;
+ }
+ },
+
("exec-mem-size", _) => match opt_val.parse::<usize>() {
Ok(n) => {
if n == 0 || n > 2 * 1024 * 1024 {
@@ -190,7 +212,7 @@ pub fn parse_option(str_ptr: *const std::os::raw::c_char) -> Option<()> {
}
// Convert from MiB to bytes internally for convenience
- unsafe { OPTIONS.exec_mem_size = n * 1024 * 1024 }
+ unsafe { OPTIONS.exec_mem_size = Some(n * 1024 * 1024) }
}
Err(_) => {
return None;
diff --git a/yjit/src/stats.rs b/yjit/src/stats.rs
index c49f8af553..a6dffe9103 100644
--- a/yjit/src/stats.rs
+++ b/yjit/src/stats.rs
@@ -56,6 +56,11 @@ unsafe impl GlobalAlloc for StatsAlloc {
}
}
+/// The number of bytes YJIT has allocated on the Rust heap.
+pub fn yjit_alloc_size() -> usize {
+ GLOBAL_ALLOCATOR.alloc_size.load(Ordering::SeqCst)
+}
+
/// Mapping of C function / ISEQ name to integer indices
/// This is accessed at compilation time only (protected by a lock)
static mut CFUNC_NAME_TO_IDX: Option<HashMap<String, usize>> = None;
@@ -770,7 +775,7 @@ fn rb_yjit_gen_stats_dict(key: VALUE) -> VALUE {
set_stat_usize!(hash, "code_region_size", cb.mapped_region_size());
// Rust global allocations in bytes
- set_stat_usize!(hash, "yjit_alloc_size", GLOBAL_ALLOCATOR.alloc_size.load(Ordering::SeqCst));
+ set_stat_usize!(hash, "yjit_alloc_size", yjit_alloc_size());
// How many bytes we are using to store context data
let context_data = CodegenGlobals::get_context_data();
diff --git a/yjit/src/virtualmem.rs b/yjit/src/virtualmem.rs
index f3c0ceefff..f56b0d8213 100644
--- a/yjit/src/virtualmem.rs
+++ b/yjit/src/virtualmem.rs
@@ -5,7 +5,7 @@
use std::ptr::NonNull;
-use crate::{utils::IntoUsize, backend::ir::Target};
+use crate::{backend::ir::Target, stats::yjit_alloc_size, utils::IntoUsize};
#[cfg(not(test))]
pub type VirtualMem = VirtualMemory<sys::SystemAllocator>;
@@ -26,9 +26,12 @@ pub struct VirtualMemory<A: Allocator> {
/// Location of the virtual memory region.
region_start: NonNull<u8>,
- /// Size of the region in bytes.
+ /// Size of this virtual memory region in bytes.
region_size_bytes: usize,
+ /// mapped_region_bytes + yjit_alloc_size may not increase beyond this limit.
+ memory_limit_bytes: usize,
+
/// Number of bytes per "page", memory protection permission can only be controlled at this
/// granularity.
page_size_bytes: usize,
@@ -106,13 +109,20 @@ use WriteError::*;
impl<A: Allocator> VirtualMemory<A> {
/// Bring a part of the address space under management.
- pub fn new(allocator: A, page_size: u32, virt_region_start: NonNull<u8>, size_bytes: usize) -> Self {
+ pub fn new(
+ allocator: A,
+ page_size: u32,
+ virt_region_start: NonNull<u8>,
+ region_size_bytes: usize,
+ memory_limit_bytes: usize,
+ ) -> Self {
assert_ne!(0, page_size);
let page_size_bytes = page_size.as_usize();
Self {
region_start: virt_region_start,
- region_size_bytes: size_bytes,
+ region_size_bytes,
+ memory_limit_bytes,
page_size_bytes,
mapped_region_bytes: 0,
current_write_page: None,
@@ -176,7 +186,8 @@ impl<A: Allocator> VirtualMemory<A> {
}
self.current_write_page = Some(page_addr);
- } else if (start..whole_region_end).contains(&raw) {
+ } else if (start..whole_region_end).contains(&raw) &&
+ (page_addr + page_size - start as usize) + yjit_alloc_size() < self.memory_limit_bytes {
// Writing to a brand new page
let mapped_region_end_addr = mapped_region_end as usize;
let alloc_size = page_addr - mapped_region_end_addr + page_size;
@@ -368,6 +379,7 @@ pub mod tests {
PAGE_SIZE.try_into().unwrap(),
NonNull::new(mem_start as *mut u8).unwrap(),
mem_size,
+ 128 * 1024 * 1024,
)
}