summaryrefslogtreecommitdiff
path: root/vm_method.c
diff options
context:
space:
mode:
authorJean Boussier <jean.boussier@gmail.com>2025-07-30 12:44:39 +0200
committerJean Boussier <jean.boussier@gmail.com>2025-08-01 10:42:04 +0200
commitf2a7e48deadb9101d49c9b613abf5a83c9e1dd49 (patch)
tree45a61c8a5dc4c2bca1b84d6a54c67a8abed658db /vm_method.c
parentfc5e1541e4bb4b7995b6acc1ea6121b60fc64e7a (diff)
Make `RClass.cc_table` a managed object
For now this doesn't change anything, but now that the table is managed by GC, it opens the door to use RCU when in multi-ractor mode, hence allow unsynchornized reads.
Diffstat (limited to 'vm_method.c')
-rw-r--r--vm_method.c150
1 files changed, 135 insertions, 15 deletions
diff --git a/vm_method.c b/vm_method.c
index 327fcbafdd..779e77b673 100644
--- a/vm_method.c
+++ b/vm_method.c
@@ -22,6 +22,126 @@ static inline rb_method_entry_t *lookup_method_table(VALUE klass, ID id);
#define ruby_running (GET_VM()->running)
/* int ruby_running = 0; */
+static void
+vm_ccs_free(struct rb_class_cc_entries *ccs)
+{
+ if (ccs->entries) {
+ ruby_xfree(ccs->entries);
+ }
+ ruby_xfree(ccs);
+}
+
+static enum rb_id_table_iterator_result
+mark_cc_entry_i(VALUE ccs_ptr, void *data)
+{
+ struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
+
+ VM_ASSERT(vm_ccs_p(ccs));
+
+ if (METHOD_ENTRY_INVALIDATED(ccs->cme)) {
+ vm_ccs_free(ccs);
+ return ID_TABLE_DELETE;
+ }
+ else {
+ rb_gc_mark_movable((VALUE)ccs->cme);
+
+ for (int i=0; i<ccs->len; i++) {
+ VM_ASSERT(vm_cc_check_cme(ccs->entries[i].cc, ccs->cme));
+
+ rb_gc_mark_movable((VALUE)ccs->entries[i].cc);
+ }
+ return ID_TABLE_CONTINUE;
+ }
+}
+
+static void
+vm_cc_table_mark(void *data)
+{
+ struct rb_id_table *tbl = (struct rb_id_table *)data;
+ if (tbl) {
+ rb_id_table_foreach_values(tbl, mark_cc_entry_i, NULL);
+ }
+}
+
+static enum rb_id_table_iterator_result
+cc_table_free_i(VALUE ccs_ptr, void *data)
+{
+ struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
+ VM_ASSERT(vm_ccs_p(ccs));
+
+ vm_ccs_free(ccs);
+
+ return ID_TABLE_CONTINUE;
+}
+
+static void
+vm_cc_table_free(void *data)
+{
+ struct rb_id_table *tbl = (struct rb_id_table *)data;
+
+ rb_id_table_foreach_values(tbl, cc_table_free_i, NULL);
+ rb_managed_id_table_type.function.dfree(data);
+}
+
+static enum rb_id_table_iterator_result
+cc_table_memsize_i(VALUE ccs_ptr, void *data_ptr)
+{
+ size_t *total_size = data_ptr;
+ struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
+ *total_size += sizeof(*ccs);
+ *total_size += sizeof(ccs->entries[0]) * ccs->capa;
+ return ID_TABLE_CONTINUE;
+}
+
+static size_t
+vm_cc_table_memsize(const void *data)
+{
+ size_t memsize = rb_managed_id_table_type.function.dsize(data);
+ struct rb_id_table *tbl = (struct rb_id_table *)data;
+ rb_id_table_foreach_values(tbl, cc_table_memsize_i, &memsize);
+ return memsize;
+}
+
+static enum rb_id_table_iterator_result
+compact_cc_entry_i(VALUE ccs_ptr, void *data)
+{
+ struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
+
+ ccs->cme = (const struct rb_callable_method_entry_struct *)rb_gc_location((VALUE)ccs->cme);
+ VM_ASSERT(vm_ccs_p(ccs));
+
+ for (int i=0; i<ccs->len; i++) {
+ ccs->entries[i].cc = (const struct rb_callcache *)rb_gc_location((VALUE)ccs->entries[i].cc);
+ }
+
+ return ID_TABLE_CONTINUE;
+}
+
+static void
+vm_cc_table_compact(void *data)
+{
+ struct rb_id_table *tbl = (struct rb_id_table *)data;
+ rb_id_table_foreach_values(tbl, compact_cc_entry_i, NULL);
+}
+
+static const rb_data_type_t cc_table_type = {
+ .wrap_struct_name = "VM/cc_table",
+ .function = {
+ .dmark = vm_cc_table_mark,
+ .dfree = vm_cc_table_free,
+ .dsize = vm_cc_table_memsize,
+ .dcompact = vm_cc_table_compact,
+ },
+ .parent = &rb_managed_id_table_type,
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE,
+};
+
+VALUE
+rb_vm_cc_table_create(size_t capa)
+{
+ return rb_managed_id_table_create(&cc_table_type, capa);
+}
+
static enum rb_id_table_iterator_result
vm_ccs_dump_i(ID mid, VALUE val, void *data)
{
@@ -39,18 +159,18 @@ vm_ccs_dump_i(ID mid, VALUE val, void *data)
static void
vm_ccs_dump(VALUE klass, ID target_mid)
{
- struct rb_id_table *cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
+ VALUE cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
if (cc_tbl) {
VALUE ccs;
if (target_mid) {
- if (rb_id_table_lookup(cc_tbl, target_mid, &ccs)) {
+ if (rb_managed_id_table_lookup(cc_tbl, target_mid, &ccs)) {
fprintf(stderr, " [CCTB] %p\n", (void *)cc_tbl);
vm_ccs_dump_i(target_mid, ccs, NULL);
}
}
else {
fprintf(stderr, " [CCTB] %p\n", (void *)cc_tbl);
- rb_id_table_foreach(cc_tbl, vm_ccs_dump_i, (void *)target_mid);
+ rb_managed_id_table_foreach(cc_tbl, vm_ccs_dump_i, (void *)target_mid);
}
}
}
@@ -169,15 +289,15 @@ static const rb_callable_method_entry_t *complemented_callable_method_entry(VALU
static const rb_callable_method_entry_t *lookup_overloaded_cme(const rb_callable_method_entry_t *cme);
static void
-invalidate_method_cache_in_cc_table(struct rb_id_table *tbl, ID mid)
+invalidate_method_cache_in_cc_table(VALUE tbl, ID mid)
{
VALUE ccs_data;
- if (tbl && rb_id_table_lookup(tbl, mid, &ccs_data)) {
+ if (tbl && rb_managed_id_table_lookup(tbl, mid, &ccs_data)) {
struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_data;
rb_yjit_cme_invalidate((rb_callable_method_entry_t *)ccs->cme);
if (NIL_P(ccs->cme->owner)) invalidate_negative_cache(mid);
rb_vm_ccs_free(ccs);
- rb_id_table_delete(tbl, mid);
+ rb_managed_id_table_delete(tbl, mid);
RB_DEBUG_COUNTER_INC(cc_invalidate_leaf_ccs);
}
}
@@ -253,7 +373,7 @@ clear_method_cache_by_id_in_class(VALUE klass, ID mid)
// check only current class
// invalidate CCs
- struct rb_id_table *cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
+ VALUE cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
invalidate_method_cache_in_cc_table(cc_tbl, mid);
if (RCLASS_CC_TBL_NOT_PRIME_P(klass, cc_tbl)) {
invalidate_method_cache_in_cc_table(RCLASS_PRIME_CC_TBL(klass), mid);
@@ -385,13 +505,13 @@ invalidate_ccs_in_iclass_cc_tbl(VALUE value, void *data)
}
void
-rb_invalidate_method_caches(struct rb_id_table *cm_tbl, struct rb_id_table *cc_tbl)
+rb_invalidate_method_caches(struct rb_id_table *cm_tbl, VALUE cc_tbl)
{
if (cm_tbl) {
rb_id_table_foreach_values(cm_tbl, invalidate_method_entry_in_iclass_callable_m_tbl, NULL);
}
if (cc_tbl) {
- rb_id_table_foreach_values(cc_tbl, invalidate_ccs_in_iclass_cc_tbl, NULL);
+ rb_managed_id_table_foreach_values(cc_tbl, invalidate_ccs_in_iclass_cc_tbl, NULL);
}
}
@@ -1559,10 +1679,10 @@ cached_callable_method_entry(VALUE klass, ID mid)
{
ASSERT_vm_locking();
- struct rb_id_table *cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
+ VALUE cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
VALUE ccs_data;
- if (cc_tbl && rb_id_table_lookup(cc_tbl, mid, &ccs_data)) {
+ if (cc_tbl && rb_managed_id_table_lookup(cc_tbl, mid, &ccs_data)) {
struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_data;
VM_ASSERT(vm_ccs_p(ccs));
@@ -1573,7 +1693,7 @@ cached_callable_method_entry(VALUE klass, ID mid)
}
else {
rb_vm_ccs_free(ccs);
- rb_id_table_delete(cc_tbl, mid);
+ rb_managed_id_table_delete(cc_tbl, mid);
}
}
@@ -1587,15 +1707,15 @@ cache_callable_method_entry(VALUE klass, ID mid, const rb_callable_method_entry_
ASSERT_vm_locking();
VM_ASSERT(cme != NULL);
- struct rb_id_table *cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
+ VALUE cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
VALUE ccs_data;
if (!cc_tbl) {
- cc_tbl = rb_id_table_create(2);
+ cc_tbl = rb_vm_cc_table_create(2);
RCLASS_WRITE_CC_TBL(klass, cc_tbl);
}
- if (rb_id_table_lookup(cc_tbl, mid, &ccs_data)) {
+ if (rb_managed_id_table_lookup(cc_tbl, mid, &ccs_data)) {
#if VM_CHECK_MODE > 0
struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_data;
VM_ASSERT(ccs->cme == cme);