summaryrefslogtreecommitdiff
path: root/vm_method.c
diff options
context:
space:
mode:
authorJohn Hawthorn <john@hawthorn.email>2025-08-01 13:05:59 -0700
committerJohn Hawthorn <john@hawthorn.email>2025-08-01 13:45:29 -0700
commitfaa67506e51908e2b235fe68ca3dac8c3bfaf354 (patch)
tree20038a49d6f603c66387caf7b8065fe00791f792 /vm_method.c
parentbc789ca804c15eb33b9e682b081aa9bd24c5f7fb (diff)
Ensure CC entries always marked, add missing WB
Previously we were issuing writebarriers for each cc, but were missing the cme. We also need to avoid it being possible to run GC after we've copied the values into the allocated array, but before they're visible in the object.
Diffstat (limited to 'vm_method.c')
-rw-r--r--vm_method.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/vm_method.c b/vm_method.c
index 874e25ed76..76b1c97d04 100644
--- a/vm_method.c
+++ b/vm_method.c
@@ -136,17 +136,19 @@ rb_vm_cc_table_create(size_t capa)
static enum rb_id_table_iterator_result
vm_cc_table_dup_i(ID key, VALUE old_ccs_ptr, void *data)
{
+ VALUE new_table = (VALUE)data;
struct rb_class_cc_entries *old_ccs = (struct rb_class_cc_entries *)old_ccs_ptr;
size_t memsize = vm_ccs_alloc_size(old_ccs->capa);
- struct rb_class_cc_entries *new_ccs = ruby_xmalloc(memsize);
+ struct rb_class_cc_entries *new_ccs = ruby_xcalloc(1, memsize);
+ rb_managed_id_table_insert(new_table, key, (VALUE)new_ccs);
+
memcpy(new_ccs, old_ccs, memsize);
#if VM_CHECK_MODE > 0
new_ccs->debug_sig = ~(VALUE)new_ccs;
#endif
- VALUE new_table = (VALUE)data;
- rb_managed_id_table_insert(new_table, key, (VALUE)new_ccs);
+ RB_OBJ_WRITTEN(new_table, Qundef, (VALUE)new_ccs->cme);
for (int index = 0; index < new_ccs->len; index++) {
RB_OBJ_WRITTEN(new_table, Qundef, new_ccs->entries[index].cc);
}