diff options
author | Takashi Kokubun <takashikkbn@gmail.com> | 2020-06-22 23:30:37 -0700 |
---|---|---|
committer | Takashi Kokubun <takashikkbn@gmail.com> | 2020-06-23 00:09:54 -0700 |
commit | 37a2e48d76efc047c140db984f816514ec5a1048 (patch) | |
tree | 3339f31845f8c68ad926b9c2f9769915f3827c30 /mjit_compile.c | |
parent | 6aa3aaac054619942762447bd60a5e26966305c2 (diff) |
Avoid generating opt_send with cfunc cc with JIT
only for opt_nil_p and opt_not.
While vm_method_cfunc_is is used for opt_eq too, many fast paths of it
don't call it. So if it's populated, it should generate opt_send,
regardless of cfunc or not. And again, opt_neq isn't relevant due to the
difference in operands.
So opt_nil_p and opt_not are the only variants using vm_method_cfunc_is
like they use.
```
$ benchmark-driver -v --rbenv 'before2 --jit::ruby --jit;before --jit;after --jit' benchmark/mjit_opt_cc_insns.yml --repeat-count=4
before2 --jit: ruby 2.8.0dev (2020-06-22T08:37:37Z master 3238641750) +JIT [x86_64-linux]
before --jit: ruby 2.8.0dev (2020-06-23T01:01:24Z master 9ce2066209) +JIT [x86_64-linux]
after --jit: ruby 2.8.0dev (2020-06-23T06:58:37Z master 17e9df3157) +JIT [x86_64-linux]
last_commit=Avoid generating opt_send with cfunc cc with JIT
Calculating -------------------------------------
before2 --jit before --jit after --jit
mjit_nil?(1) 54.204M 75.536M 75.031M i/s - 40.000M times in 0.737947s 0.529548s 0.533110s
mjit_not(1) 53.822M 70.921M 71.920M i/s - 40.000M times in 0.743195s 0.564007s 0.556171s
mjit_eq(1, nil) 7.367M 6.496M 7.331M i/s - 8.000M times in 1.085882s 1.231470s 1.091327s
Comparison:
mjit_nil?(1)
before --jit: 75536059.3 i/s
after --jit: 75031409.4 i/s - 1.01x slower
before2 --jit: 54204431.6 i/s - 1.39x slower
mjit_not(1)
after --jit: 71920324.1 i/s
before --jit: 70921063.1 i/s - 1.01x slower
before2 --jit: 53821697.6 i/s - 1.34x slower
mjit_eq(1, nil)
before2 --jit: 7367280.0 i/s
after --jit: 7330527.4 i/s - 1.01x slower
before --jit: 6496302.8 i/s - 1.13x slower
```
Diffstat (limited to 'mjit_compile.c')
-rw-r--r-- | mjit_compile.c | 15 |
1 files changed, 12 insertions, 3 deletions
diff --git a/mjit_compile.c b/mjit_compile.c index ed1cd1fd1c..b2e40828b6 100644 --- a/mjit_compile.c +++ b/mjit_compile.c @@ -17,6 +17,7 @@ #include "internal.h" #include "internal/compile.h" #include "internal/hash.h" +#include "internal/object.h" #include "internal/variable.h" #include "mjit.h" #include "vm_core.h" @@ -97,9 +98,16 @@ captured_cc_entries(const struct compile_status *status) // Returns true if call cache is still not obsoleted and vm_cc_cme(cc)->def->type is available. static bool -has_valid_method_type(CALL_CACHE cc, rb_method_type_t type) +has_valid_method_type(CALL_CACHE cc) { - return vm_cc_cme(cc) != NULL && vm_cc_cme(cc)->def->type == type; + return vm_cc_cme(cc) != NULL; +} + +// Returns true if MJIT thinks this cc's opt_* insn may fallback to opt_send_without_block. +static bool +has_cache_for_send(CALL_CACHE cc, bool cfunc_cached) +{ + return has_valid_method_type(cc) && (!cfunc_cached || vm_cc_cme(cc)->def->type != VM_METHOD_TYPE_CFUNC); } // Returns true if iseq can use fastpath for setup, otherwise NULL. This becomes true in the same condition @@ -439,8 +447,9 @@ precompile_inlinable_iseqs(FILE *f, const rb_iseq_t *iseq, struct compile_status const struct rb_callcache *cc = captured_cc_entries(status)[call_data_index(cd, body)]; // use copy to avoid race condition const rb_iseq_t *child_iseq; - if (has_valid_method_type(cc, VM_METHOD_TYPE_ISEQ) && + if (has_valid_method_type(cc) && !(vm_ci_flag(ci) & VM_CALL_TAILCALL) && // inlining only non-tailcall path + vm_cc_cme(cc)->def->type == VM_METHOD_TYPE_ISEQ && fastpath_applied_iseq_p(ci, cc, child_iseq = def_iseq_ptr(vm_cc_cme(cc)->def)) && // CC_SET_FASTPATH in vm_callee_setup_arg inlinable_iseq_p(child_iseq->body)) { |