summaryrefslogtreecommitdiff
path: root/vm_insnhelper.c
diff options
context:
space:
mode:
authorKoichi Sasada <ko1@atdot.net>2021-01-21 03:33:59 +0900
committerKoichi Sasada <ko1@atdot.net>2021-01-29 16:22:12 +0900
commit1ecda213668644d656eb0d60654737482447dd92 (patch)
tree2231cf25d2215ba1358c21c82e823fcae5203e34 /vm_insnhelper.c
parent9241211538189a58b477bd55b539357617fd42ed (diff)
global call-cache cache table for rb_funcall*
rb_funcall* (rb_funcall(), rb_funcallv(), ...) functions invokes Ruby's method with given receiver. Ruby 2.7 introduced inline method cache with static memory area. However, Ruby 3.0 reimplemented the method cache data structures and the inline cache was removed. Without inline cache, rb_funcall* searched methods everytime. Most of cases per-Class Method Cache (pCMC) will be helped but pCMC requires VM-wide locking and it hurts performance on multi-Ractor execution, especially all Ractors calls methods with rb_funcall*. This patch introduced Global Call-Cache Cache Table (gccct) for rb_funcall*. Call-Cache was introduced from Ruby 3.0 to manage method cache entry atomically and gccct enables method-caching without VM-wide locking. This table solves the performance issue on multi-ractor execution. [Bug #17497] Ruby-level method invocation does not use gccct because it has inline-method-cache and the table size is limited. Basically rb_funcall* is not used frequently, so 1023 entries can be enough. We will revisit the table size if it is not enough.
Notes
Notes: Merged: https://github.com/ruby/ruby/pull/4129
Diffstat (limited to 'vm_insnhelper.c')
-rw-r--r--vm_insnhelper.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/vm_insnhelper.c b/vm_insnhelper.c
index f385da46fa..a0e6a522d6 100644
--- a/vm_insnhelper.c
+++ b/vm_insnhelper.c
@@ -2360,6 +2360,7 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
{
const struct rb_callinfo *ci = calling->ci;
const struct rb_callcache *cc = calling->cc;
+ bool cacheable_ci = vm_ci_markable(ci);
if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_KW_SPLAT))) {
if (LIKELY(rb_simple_iseq_p(iseq))) {
@@ -2373,7 +2374,7 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
VM_ASSERT(ci == calling->ci);
VM_ASSERT(cc == calling->cc);
- CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), vm_call_iseq_optimizable_p(ci, cc));
+ CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), cacheable_ci && vm_call_iseq_optimizable_p(ci, cc));
return 0;
}
else if (rb_iseq_only_optparam_p(iseq)) {
@@ -2393,12 +2394,12 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
CC_SET_FASTPATH(cc, vm_call_iseq_setup_normal_opt_start,
!IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
- METHOD_ENTRY_CACHEABLE(vm_cc_cme(cc)));
+ cacheable_ci && METHOD_ENTRY_CACHEABLE(vm_cc_cme(cc)));
}
else {
CC_SET_FASTPATH(cc, vm_call_iseq_setup_tailcall_opt_start,
!IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
- METHOD_ENTRY_CACHEABLE(vm_cc_cme(cc)));
+ cacheable_ci && METHOD_ENTRY_CACHEABLE(vm_cc_cme(cc)));
}
/* initialize opt vars for self-references */
@@ -2426,7 +2427,7 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
args_setup_kw_parameters(ec, iseq, ci_kws, ci_kw_len, ci_keywords, klocals);
CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_kwarg,
- METHOD_ENTRY_CACHEABLE(vm_cc_cme(cc)));
+ cacheable_ci && METHOD_ENTRY_CACHEABLE(vm_cc_cme(cc)));
return 0;
}
@@ -2439,7 +2440,7 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
if (klocals[kw_param->num] == INT2FIX(0)) {
/* copy from default_values */
CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_nokwarg,
- METHOD_ENTRY_CACHEABLE(vm_cc_cme(cc)));
+ cacheable_ci && METHOD_ENTRY_CACHEABLE(vm_cc_cme(cc)));
}
return 0;