summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--test/ruby/test_yjit.rb6
-rw-r--r--vm_exec.h10
-rw-r--r--vm_insnhelper.c36
-rw-r--r--yjit.c14
-rw-r--r--yjit/bindgen/src/main.rs2
-rw-r--r--yjit/src/codegen.rs115
-rw-r--r--yjit/src/cruby_bindings.inc.rs2
7 files changed, 178 insertions, 7 deletions
diff --git a/test/ruby/test_yjit.rb b/test/ruby/test_yjit.rb
index 115c1902f0..6e8070199d 100644
--- a/test/ruby/test_yjit.rb
+++ b/test/ruby/test_yjit.rb
@@ -548,7 +548,7 @@ class TestYJIT < Test::Unit::TestCase
def test_getblockparamproxy
# Currently two side exits as OPTIMIZED_METHOD_TYPE_CALL is unimplemented
- assert_compiles(<<~'RUBY', insns: [:getblockparamproxy], exits: { opt_send_without_block: 2 })
+ assert_compiles(<<~'RUBY', insns: [:getblockparamproxy])
def foo &blk
p blk.call
p blk.call
@@ -607,7 +607,7 @@ class TestYJIT < Test::Unit::TestCase
def test_send_kwargs
# For now, this side-exits when calls include keyword args
- assert_compiles(<<~'RUBY', result: "2#a:1,b:2/A", exits: {opt_send_without_block: 1})
+ assert_compiles(<<~'RUBY', result: "2#a:1,b:2/A")
def internal_method(**kw)
"#{kw.size}##{kw.keys.map { |k| "#{k}:#{kw[k]}" }.join(",")}"
end
@@ -647,7 +647,7 @@ class TestYJIT < Test::Unit::TestCase
def test_send_kwargs_splat
# For now, this side-exits when calling with a splat
- assert_compiles(<<~'RUBY', result: "2#a:1,b:2/B", exits: {opt_send_without_block: 1})
+ assert_compiles(<<~'RUBY', result: "2#a:1,b:2/B")
def internal_method(**kw)
"#{kw.size}##{kw.keys.map { |k| "#{k}:#{kw[k]}" }.join(",")}"
end
diff --git a/vm_exec.h b/vm_exec.h
index dbfd4e9f44..41c4b74ffc 100644
--- a/vm_exec.h
+++ b/vm_exec.h
@@ -169,10 +169,20 @@ default: \
#define THROW_EXCEPTION(exc) return (VALUE)(exc)
#endif
+// Run the interpreter from the JIT
+#define VM_EXEC(ec, val) do { \
+ if (val == Qundef) { \
+ VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_FINISH); \
+ val = vm_exec(ec); \
+ } \
+} while (0)
+
+// Run the JIT from the interpreter
#define JIT_EXEC(ec, val) do { \
rb_jit_func_t func; \
if (val == Qundef && (func = jit_compile(ec))) { \
val = func(ec, ec->cfp); \
+ RESTORE_REGS(); /* fix cfp for tailcall */ \
if (ec->tag->state) THROW_EXCEPTION(val); \
} \
} while (0)
diff --git a/vm_insnhelper.c b/vm_insnhelper.c
index d33fdb8fa7..c676399c59 100644
--- a/vm_insnhelper.c
+++ b/vm_insnhelper.c
@@ -5528,6 +5528,42 @@ vm_sendish(
return val;
}
+VALUE
+rb_vm_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
+{
+ VALUE bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, false);
+ VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
+ VM_EXEC(ec, val);
+ return val;
+}
+
+VALUE
+rb_vm_opt_send_without_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
+{
+ VALUE bh = VM_BLOCK_HANDLER_NONE;
+ VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
+ VM_EXEC(ec, val);
+ return val;
+}
+
+VALUE
+rb_vm_invokesuper(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
+{
+ VALUE bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, true);
+ VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_super);
+ VM_EXEC(ec, val);
+ return val;
+}
+
+VALUE
+rb_vm_invokeblock(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
+{
+ VALUE bh = VM_BLOCK_HANDLER_NONE;
+ VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_invokeblock);
+ VM_EXEC(ec, val);
+ return val;
+}
+
/* object.c */
VALUE rb_nil_to_s(VALUE);
VALUE rb_true_to_s(VALUE);
diff --git a/yjit.c b/yjit.c
index 4db46d59f3..50dcecae96 100644
--- a/yjit.c
+++ b/yjit.c
@@ -1122,6 +1122,20 @@ rb_yjit_assert_holding_vm_lock(void)
ASSERT_vm_locking();
}
+// The number of stack slots that vm_sendish() pops for send and invokesuper.
+size_t
+rb_yjit_sendish_sp_pops(const struct rb_callinfo *ci)
+{
+ return 1 - sp_inc_of_sendish(ci); // + 1 to ignore return value push
+}
+
+// The number of stack slots that vm_sendish() pops for invokeblock.
+size_t
+rb_yjit_invokeblock_sp_pops(const struct rb_callinfo *ci)
+{
+ return 1 - sp_inc_of_invokeblock(ci); // + 1 to ignore return value push
+}
+
// Primitives used by yjit.rb
VALUE rb_yjit_stats_enabled_p(rb_execution_context_t *ec, VALUE self);
VALUE rb_yjit_trace_exit_locations_enabled_p(rb_execution_context_t *ec, VALUE self);
diff --git a/yjit/bindgen/src/main.rs b/yjit/bindgen/src/main.rs
index d00816b3d5..5bda8b471b 100644
--- a/yjit/bindgen/src/main.rs
+++ b/yjit/bindgen/src/main.rs
@@ -325,6 +325,8 @@ fn main() {
.allowlist_function("rb_yjit_icache_invalidate")
.allowlist_function("rb_optimized_call")
.allowlist_function("rb_yjit_assert_holding_vm_lock")
+ .allowlist_function("rb_yjit_sendish_sp_pops")
+ .allowlist_function("rb_yjit_invokeblock_sp_pops")
// from vm_sync.h
.allowlist_function("rb_vm_barrier")
diff --git a/yjit/src/codegen.rs b/yjit/src/codegen.rs
index ab436d5022..e6d65ef423 100644
--- a/yjit/src/codegen.rs
+++ b/yjit/src/codegen.rs
@@ -6428,6 +6428,38 @@ fn gen_struct_aset(
Some(EndBlock)
}
+// Generate code that calls a method with dynamic dispatch
+fn gen_send_dynamic<F: Fn(&mut Assembler) -> Opnd>(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ cd: *const rb_call_data,
+ sp_pops: usize,
+ vm_sendish: F,
+) -> Option<CodegenStatus> {
+ // Our frame handling is not compatible with tailcall
+ if unsafe { vm_ci_flag((*cd).ci) } & VM_CALL_TAILCALL != 0 {
+ return None;
+ }
+
+ // Save PC and SP to prepare for dynamic dispatch
+ jit_prepare_routine_call(jit, asm);
+
+ // Pop arguments and a receiver
+ asm.stack_pop(sp_pops);
+
+ // Dispatch a method
+ let ret = vm_sendish(asm);
+
+ // Push the return value
+ let stack_ret = asm.stack_push(Type::Unknown);
+ asm.mov(stack_ret, ret);
+
+ // Fix the interpreter SP deviated by vm_sendish
+ asm.mov(Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SP), SP);
+
+ Some(KeepCompiling)
+}
+
fn gen_send_general(
jit: &mut JITState,
asm: &mut Assembler,
@@ -6909,9 +6941,22 @@ fn gen_opt_send_without_block(
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> Option<CodegenStatus> {
+ // Generate specialized code if possible
let cd = jit.get_arg(0).as_ptr();
+ if let Some(status) = gen_send_general(jit, asm, ocb, cd, None) {
+ return Some(status);
+ }
- gen_send_general(jit, asm, ocb, cd, None)
+ // Otherwise, fallback to dynamic dispatch using the interpreter's implementation of send
+ gen_send_dynamic(jit, asm, cd, unsafe { rb_yjit_sendish_sp_pops((*cd).ci) }, |asm| {
+ extern "C" {
+ fn rb_vm_opt_send_without_block(ec: EcPtr, cfp: CfpPtr, cd: VALUE) -> VALUE;
+ }
+ asm.ccall(
+ rb_vm_opt_send_without_block as *const u8,
+ vec![EC, CFP, (cd as usize).into()],
+ )
+ })
}
fn gen_send(
@@ -6919,9 +6964,24 @@ fn gen_send(
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> Option<CodegenStatus> {
+ // Generate specialized code if possible
let cd = jit.get_arg(0).as_ptr();
let block = jit.get_arg(1).as_optional_ptr();
- return gen_send_general(jit, asm, ocb, cd, block);
+ if let Some(status) = gen_send_general(jit, asm, ocb, cd, block) {
+ return Some(status);
+ }
+
+ // Otherwise, fallback to dynamic dispatch using the interpreter's implementation of send
+ let blockiseq = jit.get_arg(1).as_iseq();
+ gen_send_dynamic(jit, asm, cd, unsafe { rb_yjit_sendish_sp_pops((*cd).ci) }, |asm| {
+ extern "C" {
+ fn rb_vm_send(ec: EcPtr, cfp: CfpPtr, cd: VALUE, blockiseq: IseqPtr) -> VALUE;
+ }
+ asm.ccall(
+ rb_vm_send as *const u8,
+ vec![EC, CFP, (cd as usize).into(), VALUE(blockiseq as usize).into()],
+ )
+ })
}
fn gen_invokeblock(
@@ -6929,13 +6989,36 @@ fn gen_invokeblock(
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> Option<CodegenStatus> {
+ // Generate specialized code if possible
+ let cd = jit.get_arg(0).as_ptr();
+ if let Some(status) = gen_invokeblock_specialized(jit, asm, ocb, cd) {
+ return Some(status);
+ }
+
+ // Otherwise, fallback to dynamic dispatch using the interpreter's implementation of send
+ gen_send_dynamic(jit, asm, cd, unsafe { rb_yjit_invokeblock_sp_pops((*cd).ci) }, |asm| {
+ extern "C" {
+ fn rb_vm_invokeblock(ec: EcPtr, cfp: CfpPtr, cd: VALUE) -> VALUE;
+ }
+ asm.ccall(
+ rb_vm_invokeblock as *const u8,
+ vec![EC, CFP, (cd as usize).into()],
+ )
+ })
+}
+
+fn gen_invokeblock_specialized(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ ocb: &mut OutlinedCb,
+ cd: *const rb_call_data,
+) -> Option<CodegenStatus> {
if !jit.at_current_insn() {
defer_compilation(jit, asm, ocb);
return Some(EndBlock);
}
// Get call info
- let cd = jit.get_arg(0).as_ptr();
let ci = unsafe { get_call_data_ci(cd) };
let argc: i32 = unsafe { vm_ci_argc(ci) }.try_into().unwrap();
let flags = unsafe { vm_ci_flag(ci) };
@@ -7065,7 +7148,31 @@ fn gen_invokesuper(
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> Option<CodegenStatus> {
- let cd: *const rb_call_data = jit.get_arg(0).as_ptr();
+ // Generate specialized code if possible
+ let cd = jit.get_arg(0).as_ptr();
+ if let Some(status) = gen_invokesuper_specialized(jit, asm, ocb, cd) {
+ return Some(status);
+ }
+
+ // Otherwise, fallback to dynamic dispatch using the interpreter's implementation of send
+ let blockiseq = jit.get_arg(1).as_iseq();
+ gen_send_dynamic(jit, asm, cd, unsafe { rb_yjit_sendish_sp_pops((*cd).ci) }, |asm| {
+ extern "C" {
+ fn rb_vm_invokesuper(ec: EcPtr, cfp: CfpPtr, cd: VALUE, blockiseq: IseqPtr) -> VALUE;
+ }
+ asm.ccall(
+ rb_vm_invokesuper as *const u8,
+ vec![EC, CFP, (cd as usize).into(), VALUE(blockiseq as usize).into()],
+ )
+ })
+}
+
+fn gen_invokesuper_specialized(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ ocb: &mut OutlinedCb,
+ cd: *const rb_call_data,
+) -> Option<CodegenStatus> {
let block: Option<IseqPtr> = jit.get_arg(1).as_optional_ptr();
// Defer compilation so we can specialize on class of receiver
diff --git a/yjit/src/cruby_bindings.inc.rs b/yjit/src/cruby_bindings.inc.rs
index ed1384571f..506120f3f0 100644
--- a/yjit/src/cruby_bindings.inc.rs
+++ b/yjit/src/cruby_bindings.inc.rs
@@ -1336,4 +1336,6 @@ extern "C" {
line: ::std::os::raw::c_int,
);
pub fn rb_yjit_assert_holding_vm_lock();
+ pub fn rb_yjit_sendish_sp_pops(ci: *const rb_callinfo) -> usize;
+ pub fn rb_yjit_invokeblock_sp_pops(ci: *const rb_callinfo) -> usize;
}