summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--zjit.c18
-rw-r--r--zjit/src/codegen.rs51
-rw-r--r--zjit/src/virtualmem.rs2
3 files changed, 68 insertions, 3 deletions
diff --git a/zjit.c b/zjit.c
index 69b8fc6b67..d0034e7bf1 100644
--- a/zjit.c
+++ b/zjit.c
@@ -31,6 +31,24 @@
#include <errno.h>
+uint32_t
+rb_zjit_get_page_size(void)
+{
+#if defined(_SC_PAGESIZE)
+ long page_size = sysconf(_SC_PAGESIZE);
+ if (page_size <= 0) rb_bug("zjit: failed to get page size");
+
+ // 1 GiB limit. x86 CPUs with PDPE1GB can do this and anything larger is unexpected.
+ // Though our design sort of assume we have fine grained control over memory protection
+ // which require small page sizes.
+ if (page_size > 0x40000000l) rb_bug("zjit page size too large");
+
+ return (uint32_t)page_size;
+#else
+#error "YJIT supports POSIX only for now"
+#endif
+}
+
// Address space reservation. Memory pages are mapped on an as needed basis.
// See the Rust mm module for details.
uint8_t *
diff --git a/zjit/src/codegen.rs b/zjit/src/codegen.rs
index f54d8b2036..22028e1c94 100644
--- a/zjit/src/codegen.rs
+++ b/zjit/src/codegen.rs
@@ -1,7 +1,8 @@
use std::rc::Rc;
use std::cell::RefCell;
-
-use crate::virtualmem::VirtualMem;
+use crate::cruby::*;
+use crate::virtualmem::*;
+use crate::{utils::IntoUsize};
/// Block of memory into which instructions can be assembled
pub struct CodeBlock {
@@ -9,6 +10,13 @@ pub struct CodeBlock {
mem_block: Rc<RefCell<VirtualMem>>,
}
+impl CodeBlock {
+ /// Make a new CodeBlock
+ pub fn new(mem_block: Rc<RefCell<VirtualMem>>) -> Self {
+ Self { mem_block }
+ }
+}
+
/// Global state needed for code generation
pub struct ZJITState {
/// Inline code block (fast path)
@@ -17,3 +25,42 @@ pub struct ZJITState {
/// Private singleton instance of the codegen globals
static mut ZJIT_STATE: Option<ZJITState> = None;
+
+impl ZJITState {
+ /// Initialize the ZJIT globals
+ pub fn init() {
+ let exec_mem_size: usize = 64 * 1024 * 1024; // TODO: support the option
+
+ #[cfg(not(test))]
+ let cb = {
+ let virt_block: *mut u8 = unsafe { rb_zjit_reserve_addr_space(64 * 1024 * 1024) };
+
+ // Memory protection syscalls need page-aligned addresses, so check it here. Assuming
+ // `virt_block` is page-aligned, `second_half` should be page-aligned as long as the
+ // page size in bytes is a power of two 2¹⁹ or smaller. This is because the user
+ // requested size is half of mem_option × 2²⁰ as it's in MiB.
+ //
+ // Basically, we don't support x86-64 2MiB and 1GiB pages. ARMv8 can do up to 64KiB
+ // (2¹⁶ bytes) pages, which should be fine. 4KiB pages seem to be the most popular though.
+ let page_size = unsafe { rb_zjit_get_page_size() };
+ assert_eq!(
+ virt_block as usize % page_size.as_usize(), 0,
+ "Start of virtual address block should be page-aligned",
+ );
+
+ use crate::virtualmem::*;
+ use std::ptr::NonNull;
+
+ let mem_block = VirtualMem::new(
+ crate::virtualmem::sys::SystemAllocator {},
+ page_size,
+ NonNull::new(virt_block).unwrap(),
+ exec_mem_size,
+ 64 * 1024 * 1024, // TODO: support the option
+ );
+ let mem_block = Rc::new(RefCell::new(mem_block));
+
+ CodeBlock::new(mem_block.clone())
+ };
+ }
+}
diff --git a/zjit/src/virtualmem.rs b/zjit/src/virtualmem.rs
index cf75a4fd08..3ec2346c3c 100644
--- a/zjit/src/virtualmem.rs
+++ b/zjit/src/virtualmem.rs
@@ -270,7 +270,7 @@ impl<A: Allocator> CodePtrBase for VirtualMemory<A> {
/// Requires linking with CRuby to work
#[cfg(not(test))]
-mod sys {
+pub mod sys {
use crate::cruby::*;
/// Zero size! This just groups together syscalls that require linking with CRuby.