summaryrefslogtreecommitdiff
path: root/vm_insnhelper.h
diff options
context:
space:
mode:
authorKoichi Sasada <ko1@atdot.net>2020-01-08 16:14:01 +0900
committerKoichi Sasada <ko1@atdot.net>2020-02-22 09:58:59 +0900
commitb9007b6c548f91e88fd3f2ffa23de740431fa969 (patch)
tree1746393d1c5f704e8dc7e0a458198264062273bf /vm_insnhelper.h
parentf2286925f08406bc857f7b03ad6779a5d61443ae (diff)
Introduce disposable call-cache.
This patch contains several ideas: (1) Disposable inline method cache (IMC) for race-free inline method cache * Making call-cache (CC) as a RVALUE (GC target object) and allocate new CC on cache miss. * This technique allows race-free access from parallel processing elements like RCU. (2) Introduce per-Class method cache (pCMC) * Instead of fixed-size global method cache (GMC), pCMC allows flexible cache size. * Caching CCs reduces CC allocation and allow sharing CC's fast-path between same call-info (CI) call-sites. (3) Invalidate an inline method cache by invalidating corresponding method entries (MEs) * Instead of using class serials, we set "invalidated" flag for method entry itself to represent cache invalidation. * Compare with using class serials, the impact of method modification (add/overwrite/delete) is small. * Updating class serials invalidate all method caches of the class and sub-classes. * Proposed approach only invalidate the method cache of only one ME. See [Feature #16614] for more details.
Notes
Notes: Merged: https://github.com/ruby/ruby/pull/2888
Diffstat (limited to 'vm_insnhelper.h')
-rw-r--r--vm_insnhelper.h15
1 files changed, 4 insertions, 11 deletions
diff --git a/vm_insnhelper.h b/vm_insnhelper.h
index e9337b82a8..07b38ea9d9 100644
--- a/vm_insnhelper.h
+++ b/vm_insnhelper.h
@@ -121,20 +121,13 @@ enum vm_regan_acttype {
*/
static inline void
-CC_SET_FASTPATH(CALL_CACHE cc, vm_call_handler func, bool enabled)
+CC_SET_FASTPATH(const struct rb_callcache *cc, vm_call_handler func, bool enabled)
{
if (LIKELY(enabled)) {
- cc->call = func;
+ vm_cc_call_set(cc, func);
}
}
-static inline void
-CC_SET_ME(CALL_CACHE cc, const rb_callable_method_entry_t *me)
-{
- cc->me = me;
- cc->method_serial = me ? me->def->method_serial : 0;
-}
-
#define GET_BLOCK_HANDLER() (GET_LEP()[VM_ENV_DATA_INDEX_SPECVAL])
/**********************************************************/
@@ -258,10 +251,10 @@ THROW_DATA_CONSUMED_SET(struct vm_throw_data *obj)
/* If this returns true, an optimized function returned by `vm_call_iseq_setup_func`
can be used as a fastpath. */
static bool
-vm_call_iseq_optimizable_p(const struct rb_callinfo *ci, const struct rb_call_cache *cc)
+vm_call_iseq_optimizable_p(const struct rb_callinfo *ci, const struct rb_callcache *cc)
{
return !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
- !(METHOD_ENTRY_VISI(cc->me) == METHOD_VISI_PROTECTED);
+ !(METHOD_ENTRY_VISI(vm_cc_cme(cc)) == METHOD_VISI_PROTECTED);
}
#endif /* RUBY_INSNHELPER_H */