summaryrefslogtreecommitdiff
path: root/vm_method.c
diff options
context:
space:
mode:
authorJohn Hawthorn <john@hawthorn.email>2025-12-19 02:21:55 -0800
committerJohn Hawthorn <john@hawthorn.email>2025-12-19 12:06:29 -0800
commitd9c0d4c71cd3500b2307c518b423ee58eeff9ae5 (patch)
tree26a0331398c0ed442c2d27015aa3177f31e64663 /vm_method.c
parent04e90fe200d736db0a32a794b8dc742fa0cb5441 (diff)
Don't copy invalidated CME in rb_vm_cc_table_dup
The cc_entries list associated with the invalidated CME can be deleted from the table during GC, so it isn't safe to copy (and we shouldn't copy it anyways, it's stale data).
Diffstat (limited to 'vm_method.c')
-rw-r--r--vm_method.c12
1 files changed, 12 insertions, 0 deletions
diff --git a/vm_method.c b/vm_method.c
index 2a6323e593..2b3ac74d57 100644
--- a/vm_method.c
+++ b/vm_method.c
@@ -149,10 +149,21 @@ vm_cc_table_dup_i(ID key, VALUE old_ccs_ptr, void *data)
{
VALUE new_table = (VALUE)data;
struct rb_class_cc_entries *old_ccs = (struct rb_class_cc_entries *)old_ccs_ptr;
+
+ if (METHOD_ENTRY_INVALIDATED(old_ccs->cme)) {
+ // Invalidated CME. This entry will be removed from the old table on
+ // the next GC mark, so it's unsafe (and undesirable) to copy
+ return ID_TABLE_CONTINUE;
+ }
+
size_t memsize = vm_ccs_alloc_size(old_ccs->capa);
struct rb_class_cc_entries *new_ccs = ruby_xcalloc(1, memsize);
rb_managed_id_table_insert(new_table, key, (VALUE)new_ccs);
+ // We hold the VM lock, so invalidation should not have happened between
+ // our earlier invalidation check and now.
+ VM_ASSERT(!METHOD_ENTRY_INVALIDATED(old_ccs->cme));
+
memcpy(new_ccs, old_ccs, memsize);
#if VM_CHECK_MODE > 0
@@ -169,6 +180,7 @@ vm_cc_table_dup_i(ID key, VALUE old_ccs_ptr, void *data)
VALUE
rb_vm_cc_table_dup(VALUE old_table)
{
+ ASSERT_vm_locking();
VALUE new_table = rb_vm_cc_table_create(rb_managed_id_table_size(old_table));
rb_managed_id_table_foreach(old_table, vm_cc_table_dup_i, (void *)new_table);
return new_table;