summaryrefslogtreecommitdiff
path: root/iseq.c
diff options
context:
space:
mode:
authorKoichi Sasada <ko1@atdot.net>2020-01-08 16:14:01 +0900
committerKoichi Sasada <ko1@atdot.net>2020-02-22 09:58:59 +0900
commitb9007b6c548f91e88fd3f2ffa23de740431fa969 (patch)
tree1746393d1c5f704e8dc7e0a458198264062273bf /iseq.c
parentf2286925f08406bc857f7b03ad6779a5d61443ae (diff)
Introduce disposable call-cache.
This patch contains several ideas: (1) Disposable inline method cache (IMC) for race-free inline method cache * Making call-cache (CC) as a RVALUE (GC target object) and allocate new CC on cache miss. * This technique allows race-free access from parallel processing elements like RCU. (2) Introduce per-Class method cache (pCMC) * Instead of fixed-size global method cache (GMC), pCMC allows flexible cache size. * Caching CCs reduces CC allocation and allow sharing CC's fast-path between same call-info (CI) call-sites. (3) Invalidate an inline method cache by invalidating corresponding method entries (MEs) * Instead of using class serials, we set "invalidated" flag for method entry itself to represent cache invalidation. * Compare with using class serials, the impact of method modification (add/overwrite/delete) is small. * Updating class serials invalidate all method caches of the class and sub-classes. * Proposed approach only invalidate the method cache of only one ME. See [Feature #16614] for more details.
Notes
Notes: Merged: https://github.com/ruby/ruby/pull/2888
Diffstat (limited to 'iseq.c')
-rw-r--r--iseq.c17
1 files changed, 17 insertions, 0 deletions
diff --git a/iseq.c b/iseq.c
index 867bbc0d63..c6c5c6e127 100644
--- a/iseq.c
+++ b/iseq.c
@@ -247,6 +247,7 @@ rb_iseq_update_references(rb_iseq_t *iseq)
if (!SPECIAL_CONST_P(cds[i].ci)) {
cds[i].ci = (struct rb_callinfo *)rb_gc_location((VALUE)cds[i].ci);
}
+ cds[i].cc = (struct rb_callcache *)rb_gc_location((VALUE)cds[i].cc);
}
}
if (FL_TEST(iseq, ISEQ_MARKABLE_ISEQ)) {
@@ -323,6 +324,11 @@ rb_iseq_mark(const rb_iseq_t *iseq)
struct rb_call_data *cds = (struct rb_call_data *)body->call_data;
for (unsigned int i=0; i<body->ci_size; i++) {
rb_gc_mark_movable((VALUE)cds[i].ci);
+ const struct rb_callcache *cc = cds[i].cc;
+ if (cc && vm_cc_markable(cds[i].cc)) {
+ rb_gc_mark_movable((VALUE)cc);
+ // TODO: check enable
+ }
}
}
@@ -351,6 +357,14 @@ rb_iseq_mark(const rb_iseq_t *iseq)
}
}
}
+
+ if (body->jit_unit && body->jit_unit->cc_entries != NULL) {
+ // TODO: move to mjit.c?
+ for (unsigned int i=0; i<body->ci_size; i++) {
+ const struct rb_callcache *cc = body->jit_unit->cc_entries[i];
+ rb_gc_mark((VALUE)cc); // pindown
+ }
+ }
}
if (FL_TEST_RAW(iseq, ISEQ_NOT_LOADED_YET)) {
@@ -663,6 +677,9 @@ finish_iseq_build(rb_iseq_t *iseq)
rb_exc_raise(err);
}
+ RB_DEBUG_COUNTER_INC(iseq_num);
+ RB_DEBUG_COUNTER_ADD(iseq_cd_num, iseq->body->ci_size);
+
rb_iseq_init_trace(iseq);
return Qtrue;
}