summaryrefslogtreecommitdiff
path: root/yjit_core.c
diff options
context:
space:
mode:
authorAaron Patterson <tenderlove@ruby-lang.org>2021-10-26 16:57:30 -0700
committerAaron Patterson <aaron.patterson@gmail.com>2021-12-01 12:45:59 -0800
commit157095b3a44d8b0130a532a0b7be3f5ac197111c (patch)
tree362d1b19c520ebf270b92921671dc5b312b2307c /yjit_core.c
parent94ee88b38cf0a20666e3965f5c9c4d520cf02b22 (diff)
Mark JIT code as writeable / executable depending on the situation
Some platforms don't want memory to be marked as writeable and executable at the same time. When we write to the code block, we calculate the OS page that the buffer position maps to. Then we call `mprotect` to allow writes on that particular page. As an optimization, we cache the "last written" aligned page which allows us to amortize the cost of the `mprotect` call. In other words, sequential writes to the same page will only call `mprotect` on the page once. When we're done writing, we call `mprotect` on the entire JIT buffer. This means we don't need to keep track of which pages were marked as writeable, we let the OS take care of that. Co-authored-by: John Hawthorn <john@hawthorn.email>
Notes
Notes: Merged: https://github.com/ruby/ruby/pull/5032
Diffstat (limited to 'yjit_core.c')
-rw-r--r--yjit_core.c13
1 files changed, 13 insertions, 0 deletions
diff --git a/yjit_core.c b/yjit_core.c
index f19b83c5ff..00905e7f24 100644
--- a/yjit_core.c
+++ b/yjit_core.c
@@ -833,12 +833,16 @@ gen_entry_point(const rb_iseq_t *iseq, uint32_t insn_idx, rb_execution_context_t
// The entry context makes no assumptions about types
blockid_t blockid = { iseq, insn_idx };
+ rb_vm_barrier();
// Write the interpreter entry prologue. Might be NULL when out of memory.
uint8_t *code_ptr = yjit_entry_prologue(cb, iseq);
// Try to generate code for the entry block
block_t *block = gen_block_version(blockid, &DEFAULT_CTX, ec);
+ cb_mark_all_executable(ocb);
+ cb_mark_all_executable(cb);
+
// If we couldn't generate any code
if (!block || block->end_idx == insn_idx) {
return NULL;
@@ -872,6 +876,8 @@ branch_stub_hit(branch_t *branch, const uint32_t target_idx, rb_execution_contex
dst_addr = branch->dst_addrs[target_idx];
}
else {
+ rb_vm_barrier();
+
// :stub-sp-flush:
// Generated code do stack operations without modifying cfp->sp, while the
// cfp->sp tells the GC what values on the stack to root. Generated code
@@ -952,6 +958,9 @@ branch_stub_hit(branch_t *branch, const uint32_t target_idx, rb_execution_contex
// frame. We do that in code_for_exit_from_stub.
dst_addr = code_for_exit_from_stub;
}
+
+ cb_mark_all_executable(ocb);
+ cb_mark_all_executable(cb);
}
const ptrdiff_t new_branch_size = branch_code_size(branch);
@@ -1201,6 +1210,7 @@ static void
invalidate_block_version(block_t *block)
{
ASSERT_vm_locking();
+
// TODO: want to assert that all other ractors are stopped here. Can't patch
// machine code that some other thread is running.
@@ -1324,6 +1334,9 @@ invalidate_block_version(block_t *block)
yjit_runtime_counters.invalidation_count++;
#endif
+ cb_mark_all_executable(ocb);
+ cb_mark_all_executable(cb);
+
// fprintf(stderr, "invalidation done\n");
}