summaryrefslogtreecommitdiff
path: root/insns.def
diff options
context:
space:
mode:
authorKoichi Sasada <ko1@atdot.net>2020-01-08 16:14:01 +0900
committerKoichi Sasada <ko1@atdot.net>2020-02-22 09:58:59 +0900
commitb9007b6c548f91e88fd3f2ffa23de740431fa969 (patch)
tree1746393d1c5f704e8dc7e0a458198264062273bf /insns.def
parentf2286925f08406bc857f7b03ad6779a5d61443ae (diff)
Introduce disposable call-cache.
This patch contains several ideas: (1) Disposable inline method cache (IMC) for race-free inline method cache * Making call-cache (CC) as a RVALUE (GC target object) and allocate new CC on cache miss. * This technique allows race-free access from parallel processing elements like RCU. (2) Introduce per-Class method cache (pCMC) * Instead of fixed-size global method cache (GMC), pCMC allows flexible cache size. * Caching CCs reduces CC allocation and allow sharing CC's fast-path between same call-info (CI) call-sites. (3) Invalidate an inline method cache by invalidating corresponding method entries (MEs) * Instead of using class serials, we set "invalidated" flag for method entry itself to represent cache invalidation. * Compare with using class serials, the impact of method modification (add/overwrite/delete) is small. * Updating class serials invalidate all method caches of the class and sub-classes. * Proposed approach only invalidate the method cache of only one ME. See [Feature #16614] for more details.
Notes
Notes: Merged: https://github.com/ruby/ruby/pull/2888
Diffstat (limited to 'insns.def')
-rw-r--r--insns.def13
1 files changed, 7 insertions, 6 deletions
diff --git a/insns.def b/insns.def
index 2385f33f75..aab5cca065 100644
--- a/insns.def
+++ b/insns.def
@@ -827,7 +827,7 @@ opt_nil_p
(VALUE recv)
(VALUE val)
{
- val = vm_opt_nil_p(cd, recv);
+ val = vm_opt_nil_p(GET_ISEQ(), cd, recv);
if (val == Qundef) {
CALL_SIMPLE_METHOD();
@@ -903,8 +903,9 @@ invokeblock
// attr rb_snum_t sp_inc = sp_inc_of_invokeblock(cd->ci);
// attr rb_snum_t comptime_sp_inc = sp_inc_of_invokeblock(ci);
{
- if (UNLIKELY(cd->cc.call != vm_invokeblock_i)) {
- cd->cc.call = vm_invokeblock_i; // check before setting to avoid CoW
+ if (UNLIKELY(vm_cc_call(cd->cc) != vm_invokeblock_i)) {
+ const struct rb_callcache *cc = vm_cc_new(0, NULL, vm_invokeblock_i);
+ RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, cc);
}
VALUE bh = VM_BLOCK_HANDLER_NONE;
@@ -1167,7 +1168,7 @@ opt_eq
(VALUE recv, VALUE obj)
(VALUE val)
{
- val = opt_eq_func(recv, obj, cd);
+ val = opt_eq_func(GET_ISEQ(), recv, obj, cd);
if (val == Qundef) {
CALL_SIMPLE_METHOD();
@@ -1181,7 +1182,7 @@ opt_neq
(VALUE recv, VALUE obj)
(VALUE val)
{
- val = vm_opt_neq(cd, cd_eq, recv, obj);
+ val = vm_opt_neq(GET_ISEQ(), cd, cd_eq, recv, obj);
if (val == Qundef) {
CALL_SIMPLE_METHOD();
@@ -1431,7 +1432,7 @@ opt_not
(VALUE recv)
(VALUE val)
{
- val = vm_opt_not(cd, recv);
+ val = vm_opt_not(GET_ISEQ(), cd, recv);
if (val == Qundef) {
CALL_SIMPLE_METHOD();