summaryrefslogtreecommitdiff
path: root/vm_method.c
diff options
context:
space:
mode:
authorAlan Wu <XrXr@users.noreply.github.com>2023-11-27 19:19:41 -0500
committerAlan Wu <XrXr@users.noreply.github.com>2023-11-28 13:03:04 -0500
commitcd4207869ff831c41db3ec873b175369ffca080a (patch)
treee3a63a06f41f952077aa84d2e300cf68b24ca113 /vm_method.c
parent0704f40787d306b00db6bbdfee7e13ee172f600a (diff)
Fix cache incoherency for ME resolved through VM_METHOD_TYPE_REFINED
Previously, we didn't invalidate the method entry wrapped by VM_METHOD_TYPE_REFINED method entries which could cause calls to land in the wrong method like it did in the included test. Do the invalidation, and adjust rb_method_entry_clone() to accommodate this new invalidation vector. Fix: cfd7729ce7a31c8b6ec5dd0e99c67b2932de4732 See-also: e201b81f79828c30500947fe8c8ea3c515e3d112
Diffstat (limited to 'vm_method.c')
-rw-r--r--vm_method.c31
1 files changed, 31 insertions, 0 deletions
diff --git a/vm_method.c b/vm_method.c
index 245c58ac3e..6b05b38e4d 100644
--- a/vm_method.c
+++ b/vm_method.c
@@ -240,6 +240,13 @@ clear_method_cache_by_id_in_class(VALUE klass, ID mid)
vm_cme_invalidate((rb_callable_method_entry_t *)cme);
RB_DEBUG_COUNTER_INC(cc_invalidate_tree_cme);
+ // In case of refinement ME, also invalidate the wrapped ME that
+ // could be cached at some callsite and is unreachable from any
+ // RCLASS_CC_TBL.
+ if (cme->def->type == VM_METHOD_TYPE_REFINED && cme->def->body.refined.orig_me) {
+ vm_cme_invalidate((rb_callable_method_entry_t *)cme->def->body.refined.orig_me);
+ }
+
if (cme->def->iseq_overload) {
rb_callable_method_entry_t *monly_cme = (rb_callable_method_entry_t *)lookup_overloaded_cme(cme);
if (monly_cme) {
@@ -676,12 +683,36 @@ rb_method_entry_create(ID called_id, VALUE klass, rb_method_visibility_t visi, r
return me;
}
+// Return a cloned ME that's not invalidated (MEs are disposable for caching).
const rb_method_entry_t *
rb_method_entry_clone(const rb_method_entry_t *src_me)
{
rb_method_entry_t *me = rb_method_entry_alloc(src_me->called_id, src_me->owner, src_me->defined_class, src_me->def, METHOD_ENTRY_COMPLEMENTED(src_me));
METHOD_ENTRY_FLAGS_COPY(me, src_me);
+
+ // Also clone inner ME in case of refinement ME
+ if (src_me->def &&
+ src_me->def->type == VM_METHOD_TYPE_REFINED &&
+ src_me->def->body.refined.orig_me) {
+ const rb_method_entry_t *orig_me = src_me->def->body.refined.orig_me;
+ VM_ASSERT(orig_me->def->type != VM_METHOD_TYPE_REFINED);
+
+ rb_method_entry_t *orig_clone = rb_method_entry_alloc(orig_me->called_id,
+ orig_me->owner, orig_me->defined_class, orig_me->def, METHOD_ENTRY_COMPLEMENTED(orig_me));
+ METHOD_ENTRY_FLAGS_COPY(orig_clone, orig_me);
+
+ // Clone definition, since writing a VALUE to a shared definition
+ // can create reference edges we can't run WBs for.
+ rb_method_definition_t *clone_def =
+ rb_method_definition_create(VM_METHOD_TYPE_REFINED, src_me->called_id);
+
+ rb_method_refined_t refined = {
+ .owner = src_me->def->body.refined.owner,
+ .orig_me = orig_clone,
+ };
+ rb_method_definition_set(me, clone_def, &refined);
+ }
return me;
}