summaryrefslogtreecommitdiff
path: root/yjit_iface.c
diff options
context:
space:
mode:
authorMaxime Chevalier-Boisvert <maxime.chevalierboisvert@shopify.com>2021-11-04 13:05:41 -0700
committerGitHub <noreply@github.com>2021-11-04 16:05:41 -0400
commit2421527d6e4737c371bca0cf7e694f8a2a0f923d (patch)
tree90621ade7e47fab78ce7c885d55aed1c99fdc434 /yjit_iface.c
parent85b4cf16e2cae0577633c1acb1dc7413d58fcb5a (diff)
YJIT code pages refactoring for code GC (#5073)
* New code page allocation logic * Fix leaked globals * Fix leaked symbols, yjit asm tests * Make COUNTED_EXIT take a jit argument, so we can eliminate global ocb * Remove extra whitespace * Change block start_pos/end_pos to be pointers instead of uint32_t * Change branch end_pos and start_pos to end_addr, start_addr
Notes
Notes: Merged-By: maximecb <maximecb@ruby-lang.org>
Diffstat (limited to 'yjit_iface.c')
-rw-r--r--yjit_iface.c141
1 files changed, 120 insertions, 21 deletions
diff --git a/yjit_iface.c b/yjit_iface.c
index 9c52c4d2e3..82477c65b3 100644
--- a/yjit_iface.c
+++ b/yjit_iface.c
@@ -36,6 +36,12 @@ extern st_table *rb_encoded_insn_data;
struct rb_yjit_options rb_yjit_opts;
+// Size of code pages to allocate
+#define CODE_PAGE_SIZE 16 * 1024
+
+// How many code pages to allocate at once
+#define PAGES_PER_ALLOC 512
+
static const rb_data_type_t yjit_block_type = {
"YJIT/Block",
{0, 0, 0, },
@@ -54,6 +60,7 @@ yjit_iseq_pc_at_idx(const rb_iseq_t *iseq, uint32_t insn_idx)
}
// For debugging. Print the disassembly of an iseq.
+RBIMPL_ATTR_MAYBE_UNUSED()
static void
yjit_print_iseq(const rb_iseq_t *iseq)
{
@@ -520,8 +527,7 @@ block_address(VALUE self)
{
block_t * block;
TypedData_Get_Struct(self, block_t, &yjit_block_type, block);
- uint8_t *code_addr = cb_get_ptr(cb, block->start_pos);
- return LONG2NUM((intptr_t)code_addr);
+ return LONG2NUM((intptr_t)block->start_addr);
}
/* Get the machine code for YJIT::Block as a binary string */
@@ -532,8 +538,8 @@ block_code(VALUE self)
TypedData_Get_Struct(self, block_t, &yjit_block_type, block);
return (VALUE)rb_str_new(
- (const char*)cb->mem_block + block->start_pos,
- block->end_pos - block->start_pos
+ (const char*)block->start_addr,
+ block->end_addr - block->start_addr
);
}
@@ -880,7 +886,7 @@ rb_yjit_iseq_mark(const struct rb_iseq_constant_body *body)
}
// Mark the machine code page this block lives on
- rb_gc_mark_movable(block->code_page);
+ //rb_gc_mark_movable(block->code_page);
}
}
}
@@ -926,7 +932,7 @@ rb_yjit_iseq_update_references(const struct rb_iseq_constant_body *body)
}
// Update the machine code page this block lives on
- block->code_page = rb_gc_location(block->code_page);
+ //block->code_page = rb_gc_location(block->code_page);
}
}
}
@@ -949,10 +955,39 @@ rb_yjit_iseq_free(const struct rb_iseq_constant_body *body)
rb_darray_free(body->yjit_blocks);
}
+// Struct representing a code page
+typedef struct code_page_struct
+{
+ // Chunk of executable memory
+ uint8_t* mem_block;
+
+ // Size of the executable memory chunk
+ uint32_t page_size;
+
+ // Inline code block
+ codeblock_t cb;
+
+ // Outlined code block
+ codeblock_t ocb;
+
+ // Next node in the free list (private)
+ struct code_page_struct* _next;
+
+} code_page_t;
+
+// Current code page we are writing machine code into
+static VALUE yjit_cur_code_page = Qfalse;
+
+// Head of the list of free code pages
+static code_page_t *code_page_freelist = NULL;
+
+// Free a code page, add it to the free list
static void
-yjit_code_page_free(void *code_page)
+yjit_code_page_free(void *voidp)
{
- free_code_page((code_page_t*)code_page);
+ code_page_t* code_page = (code_page_t*)voidp;
+ code_page->_next = code_page_freelist;
+ code_page_freelist = code_page;
}
// Custom type for interacting with the GC
@@ -963,19 +998,47 @@ static const rb_data_type_t yjit_code_page_type = {
};
// Allocate a code page and wrap it into a Ruby object owned by the GC
-VALUE rb_yjit_code_page_alloc(void)
+static VALUE
+rb_yjit_code_page_alloc(void)
{
- code_page_t *code_page = alloc_code_page();
- VALUE cp_obj = TypedData_Wrap_Struct(0, &yjit_code_page_type, code_page);
+ // If the free list is empty
+ if (!code_page_freelist) {
+ // Allocate many pages at once
+ uint8_t* code_chunk = alloc_exec_mem(PAGES_PER_ALLOC * CODE_PAGE_SIZE);
+
+ // Do this in reverse order so we allocate our pages in order
+ for (int i = PAGES_PER_ALLOC - 1; i >= 0; --i) {
+ code_page_t* code_page = malloc(sizeof(code_page_t));
+ code_page->mem_block = code_chunk + i * CODE_PAGE_SIZE;
+ assert ((intptr_t)code_page->mem_block % CODE_PAGE_SIZE == 0);
+ code_page->page_size = CODE_PAGE_SIZE;
+ code_page->_next = code_page_freelist;
+ code_page_freelist = code_page;
+ }
+ }
+
+ code_page_t* code_page = code_page_freelist;
+ code_page_freelist = code_page_freelist->_next;
+
+ // Create a Ruby wrapper struct for the code page object
+ VALUE wrapper = TypedData_Wrap_Struct(0, &yjit_code_page_type, code_page);
+
+ // Write a pointer to the wrapper object on the page
+ *((VALUE*)code_page->mem_block) = wrapper;
- // Write a pointer to the wrapper object at the beginning of the code page
- *((VALUE*)code_page->mem_block) = cp_obj;
+ // Initialize the code blocks
+ uint8_t* page_start = code_page->mem_block + sizeof(VALUE);
+ uint8_t* page_end = code_page->mem_block + CODE_PAGE_SIZE;
+ uint32_t halfsize = (uint32_t)(page_end - page_start) / 2;
+ cb_init(&code_page->cb, page_start, halfsize);
+ cb_init(&code_page->cb, page_start + halfsize, halfsize);
- return cp_obj;
+ return wrapper;
}
// Unwrap the Ruby object representing a code page
-code_page_t *rb_yjit_code_page_unwrap(VALUE cp_obj)
+static code_page_t *
+rb_yjit_code_page_unwrap(VALUE cp_obj)
{
code_page_t * code_page;
TypedData_Get_Struct(cp_obj, code_page_t, &yjit_code_page_type, code_page);
@@ -983,21 +1046,23 @@ code_page_t *rb_yjit_code_page_unwrap(VALUE cp_obj)
}
// Get the code page wrapper object for a code pointer
-VALUE rb_yjit_code_page_from_ptr(uint8_t *code_ptr)
+static VALUE
+rb_yjit_code_page_from_ptr(uint8_t* code_ptr)
{
- VALUE *page_start = (VALUE*)((intptr_t)code_ptr & ~(CODE_PAGE_SIZE - 1));
+ VALUE* page_start = (VALUE*)((intptr_t)code_ptr & ~(CODE_PAGE_SIZE - 1));
VALUE wrapper = *page_start;
return wrapper;
}
// Get the inline code block corresponding to a code pointer
-void rb_yjit_get_cb(codeblock_t *cb, uint8_t *code_ptr)
+static void
+yjit_get_cb(codeblock_t* cb, uint8_t* code_ptr)
{
VALUE page_wrapper = rb_yjit_code_page_from_ptr(code_ptr);
code_page_t *code_page = rb_yjit_code_page_unwrap(page_wrapper);
// A pointer to the page wrapper object is written at the start of the code page
- uint8_t *mem_block = code_page->mem_block + sizeof(VALUE);
+ uint8_t* mem_block = code_page->mem_block + sizeof(VALUE);
uint32_t mem_size = (code_page->page_size/2) - sizeof(VALUE);
RUBY_ASSERT(mem_block);
@@ -1006,13 +1071,14 @@ void rb_yjit_get_cb(codeblock_t *cb, uint8_t *code_ptr)
}
// Get the outlined code block corresponding to a code pointer
-void rb_yjit_get_ocb(codeblock_t *cb, uint8_t *code_ptr)
+static void
+yjit_get_ocb(codeblock_t* cb, uint8_t* code_ptr)
{
VALUE page_wrapper = rb_yjit_code_page_from_ptr(code_ptr);
code_page_t *code_page = rb_yjit_code_page_unwrap(page_wrapper);
// A pointer to the page wrapper object is written at the start of the code page
- uint8_t *mem_block = code_page->mem_block + (code_page->page_size/2);
+ uint8_t* mem_block = code_page->mem_block + (code_page->page_size/2);
uint32_t mem_size = code_page->page_size/2;
RUBY_ASSERT(mem_block);
@@ -1020,6 +1086,39 @@ void rb_yjit_get_ocb(codeblock_t *cb, uint8_t *code_ptr)
cb_init(cb, mem_block, mem_size);
}
+// Get the current code page or allocate a new one
+static VALUE
+yjit_get_code_page(uint32_t cb_bytes_needed, uint32_t ocb_bytes_needed)
+{
+ // If this is the first code page
+ if (yjit_cur_code_page == Qfalse) {
+ yjit_cur_code_page = rb_yjit_code_page_alloc();
+ }
+
+ // Get the current code page
+ code_page_t *code_page = rb_yjit_code_page_unwrap(yjit_cur_code_page);
+
+ // Compute how many bytes are left in the code blocks
+ uint32_t cb_bytes_left = code_page->cb.mem_size - code_page->cb.write_pos;
+ uint32_t ocb_bytes_left = code_page->ocb.mem_size - code_page->ocb.write_pos;
+ RUBY_ASSERT_ALWAYS(cb_bytes_needed <= code_page->cb.mem_size);
+ RUBY_ASSERT_ALWAYS(ocb_bytes_needed <= code_page->ocb.mem_size);
+
+ // If there's enough space left in the current code page
+ if (cb_bytes_needed <= cb_bytes_left && ocb_bytes_needed <= ocb_bytes_left) {
+ return yjit_cur_code_page;
+ }
+
+ // Allocate a new code page
+ yjit_cur_code_page = rb_yjit_code_page_alloc();
+ code_page_t *new_code_page = rb_yjit_code_page_unwrap(yjit_cur_code_page);
+
+ // Jump to the new code page
+ jmp_ptr(&code_page->cb, new_code_page->cb.mem_block);
+
+ return yjit_cur_code_page;
+}
+
bool
rb_yjit_enabled_p(void)
{