summaryrefslogtreecommitdiff
path: root/vm_method.c
diff options
context:
space:
mode:
Diffstat (limited to 'vm_method.c')
-rw-r--r--vm_method.c667
1 files changed, 503 insertions, 164 deletions
diff --git a/vm_method.c b/vm_method.c
index 62aae93234..dcf35527f7 100644
--- a/vm_method.c
+++ b/vm_method.c
@@ -23,6 +23,199 @@ static inline rb_method_entry_t *lookup_method_table(VALUE klass, ID id);
/* int ruby_running = 0; */
static enum rb_id_table_iterator_result
+mark_cc_entry_i(VALUE ccs_ptr, void *data)
+{
+ struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
+
+ VM_ASSERT(vm_ccs_p(ccs));
+
+ if (METHOD_ENTRY_INVALIDATED(ccs->cme)) {
+ /* Before detaching the CCs from this class, we need to invalidate the cc
+ * since we will no longer be marking the cme on their behalf.
+ */
+ for (int i = 0; i < ccs->len; i++) {
+ const struct rb_callcache *cc = ccs->entries[i].cc;
+ if (cc->klass == Qundef) continue; // already invalidated
+ VM_ASSERT(cc->klass == Qundef || vm_cc_check_cme(cc, ccs->cme));
+ VM_ASSERT(!vm_cc_super_p(cc) && !vm_cc_refinement_p(cc));
+ vm_cc_invalidate(cc);
+ }
+ ruby_xfree(ccs);
+ return ID_TABLE_DELETE;
+ }
+ else {
+ rb_gc_mark_movable((VALUE)ccs->cme);
+
+ for (int i = 0; i < ccs->len; i++) {
+ const struct rb_callcache *cc = ccs->entries[i].cc;
+ VM_ASSERT(cc->klass == Qundef || vm_cc_check_cme(cc, ccs->cme));
+
+ rb_gc_mark_movable((VALUE)cc);
+ }
+ return ID_TABLE_CONTINUE;
+ }
+}
+
+static void
+vm_cc_table_mark(void *data)
+{
+ struct rb_id_table *tbl = (struct rb_id_table *)data;
+ if (tbl) {
+ rb_id_table_foreach_values(tbl, mark_cc_entry_i, NULL);
+ }
+}
+
+static enum rb_id_table_iterator_result
+cc_table_free_i(VALUE ccs_ptr, void *data)
+{
+ struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
+ VM_ASSERT(vm_ccs_p(ccs));
+
+ ruby_xfree(ccs);
+
+ return ID_TABLE_CONTINUE;
+}
+
+static void
+vm_cc_table_free(void *data)
+{
+ struct rb_id_table *tbl = (struct rb_id_table *)data;
+
+ rb_id_table_foreach_values(tbl, cc_table_free_i, NULL);
+ rb_managed_id_table_type.function.dfree(data);
+}
+
+static enum rb_id_table_iterator_result
+cc_table_memsize_i(VALUE ccs_ptr, void *data_ptr)
+{
+ size_t *total_size = data_ptr;
+ struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
+ *total_size += sizeof(*ccs);
+ *total_size += sizeof(ccs->entries[0]) * ccs->capa;
+ return ID_TABLE_CONTINUE;
+}
+
+static size_t
+vm_cc_table_memsize(const void *data)
+{
+ size_t memsize = rb_managed_id_table_type.function.dsize(data);
+ struct rb_id_table *tbl = (struct rb_id_table *)data;
+ rb_id_table_foreach_values(tbl, cc_table_memsize_i, &memsize);
+ return memsize;
+}
+
+static enum rb_id_table_iterator_result
+compact_cc_entry_i(VALUE ccs_ptr, void *data)
+{
+ struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
+
+ ccs->cme = (const struct rb_callable_method_entry_struct *)rb_gc_location((VALUE)ccs->cme);
+ VM_ASSERT(vm_ccs_p(ccs));
+
+ for (int i=0; i<ccs->len; i++) {
+ ccs->entries[i].cc = (const struct rb_callcache *)rb_gc_location((VALUE)ccs->entries[i].cc);
+ }
+
+ return ID_TABLE_CONTINUE;
+}
+
+static void
+vm_cc_table_compact(void *data)
+{
+ struct rb_id_table *tbl = (struct rb_id_table *)data;
+ rb_id_table_foreach_values(tbl, compact_cc_entry_i, NULL);
+}
+
+static const rb_data_type_t cc_table_type = {
+ .wrap_struct_name = "VM/cc_table",
+ .function = {
+ .dmark = vm_cc_table_mark,
+ .dfree = vm_cc_table_free,
+ .dsize = vm_cc_table_memsize,
+ .dcompact = vm_cc_table_compact,
+ },
+ .parent = &rb_managed_id_table_type,
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE,
+};
+
+VALUE
+rb_vm_cc_table_create(size_t capa)
+{
+ return rb_managed_id_table_create(&cc_table_type, capa);
+}
+
+static enum rb_id_table_iterator_result
+vm_cc_table_dup_i(ID key, VALUE old_ccs_ptr, void *data)
+{
+ VALUE new_table = (VALUE)data;
+ struct rb_class_cc_entries *old_ccs = (struct rb_class_cc_entries *)old_ccs_ptr;
+
+ if (METHOD_ENTRY_INVALIDATED(old_ccs->cme)) {
+ // Invalidated CME. This entry will be removed from the old table on
+ // the next GC mark, so it's unsafe (and undesirable) to copy
+ return ID_TABLE_CONTINUE;
+ }
+
+ size_t memsize = vm_ccs_alloc_size(old_ccs->capa);
+ struct rb_class_cc_entries *new_ccs = ruby_xcalloc(1, memsize);
+ rb_managed_id_table_insert(new_table, key, (VALUE)new_ccs);
+
+ // We hold the VM lock, so invalidation should not have happened between
+ // our earlier invalidation check and now.
+ VM_ASSERT(!METHOD_ENTRY_INVALIDATED(old_ccs->cme));
+
+ memcpy(new_ccs, old_ccs, memsize);
+
+#if VM_CHECK_MODE > 0
+ new_ccs->debug_sig = ~(VALUE)new_ccs;
+#endif
+
+ RB_OBJ_WRITTEN(new_table, Qundef, (VALUE)new_ccs->cme);
+ for (int index = 0; index < new_ccs->len; index++) {
+ RB_OBJ_WRITTEN(new_table, Qundef, new_ccs->entries[index].cc);
+ }
+ return ID_TABLE_CONTINUE;
+}
+
+VALUE
+rb_vm_cc_table_dup(VALUE old_table)
+{
+ ASSERT_vm_locking();
+ VALUE new_table = rb_vm_cc_table_create(rb_managed_id_table_size(old_table));
+ rb_managed_id_table_foreach(old_table, vm_cc_table_dup_i, (void *)new_table);
+ return new_table;
+}
+
+static void
+vm_ccs_invalidate(struct rb_class_cc_entries *ccs)
+{
+ for (int i=0; i<ccs->len; i++) {
+ const struct rb_callcache *cc = ccs->entries[i].cc;
+ VM_ASSERT(!vm_cc_super_p(cc) && !vm_cc_refinement_p(cc));
+ vm_cc_invalidate(cc);
+ }
+}
+
+static void
+rb_vm_ccs_invalidate_and_free(struct rb_class_cc_entries *ccs)
+{
+ RB_DEBUG_COUNTER_INC(ccs_free);
+ vm_ccs_invalidate(ccs);
+ ruby_xfree(ccs);
+}
+
+void
+rb_vm_cc_table_delete(VALUE table, ID mid)
+{
+ VALUE ccs_obj;
+ if (rb_managed_id_table_lookup(table, mid, &ccs_obj)) {
+ struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_obj;
+ rb_managed_id_table_delete(table, mid);
+ rb_vm_ccs_invalidate_and_free(ccs);
+ }
+}
+
+static enum rb_id_table_iterator_result
vm_ccs_dump_i(ID mid, VALUE val, void *data)
{
const struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)val;
@@ -39,18 +232,18 @@ vm_ccs_dump_i(ID mid, VALUE val, void *data)
static void
vm_ccs_dump(VALUE klass, ID target_mid)
{
- struct rb_id_table *cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
+ VALUE cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
if (cc_tbl) {
VALUE ccs;
if (target_mid) {
- if (rb_id_table_lookup(cc_tbl, target_mid, &ccs)) {
+ if (rb_managed_id_table_lookup(cc_tbl, target_mid, &ccs)) {
fprintf(stderr, " [CCTB] %p\n", (void *)cc_tbl);
vm_ccs_dump_i(target_mid, ccs, NULL);
}
}
else {
fprintf(stderr, " [CCTB] %p\n", (void *)cc_tbl);
- rb_id_table_foreach(cc_tbl, vm_ccs_dump_i, (void *)target_mid);
+ rb_managed_id_table_foreach(cc_tbl, vm_ccs_dump_i, (void *)target_mid);
}
}
}
@@ -122,6 +315,7 @@ vm_cme_invalidate(rb_callable_method_entry_t *cme)
RB_DEBUG_COUNTER_INC(cc_cme_invalidate);
rb_yjit_cme_invalidate(cme);
+ rb_zjit_cme_invalidate(cme);
}
static int
@@ -131,9 +325,6 @@ rb_clear_constant_cache_for_id_i(st_data_t ic, st_data_t arg)
return ST_CONTINUE;
}
-// Here for backward compat.
-void rb_clear_constant_cache(void) {}
-
void
rb_clear_constant_cache_for_id(ID id)
{
@@ -142,11 +333,12 @@ rb_clear_constant_cache_for_id(ID id)
if (rb_id_table_lookup(vm->constant_cache, id, &lookup_result)) {
set_table *ics = (set_table *)lookup_result;
- set_foreach(ics, rb_clear_constant_cache_for_id_i, (st_data_t) NULL);
+ set_table_foreach(ics, rb_clear_constant_cache_for_id_i, (st_data_t) NULL);
ruby_vm_constant_cache_invalidations += ics->num_entries;
}
rb_yjit_constant_state_changed(id);
+ rb_zjit_constant_state_changed(id);
}
static void
@@ -167,15 +359,16 @@ static const rb_callable_method_entry_t *complemented_callable_method_entry(VALU
static const rb_callable_method_entry_t *lookup_overloaded_cme(const rb_callable_method_entry_t *cme);
static void
-invalidate_method_cache_in_cc_table(struct rb_id_table *tbl, ID mid)
+invalidate_method_cache_in_cc_table(VALUE tbl, ID mid)
{
VALUE ccs_data;
- if (tbl && rb_id_table_lookup(tbl, mid, &ccs_data)) {
+ if (tbl && rb_managed_id_table_lookup(tbl, mid, &ccs_data)) {
struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_data;
rb_yjit_cme_invalidate((rb_callable_method_entry_t *)ccs->cme);
+ rb_zjit_cme_invalidate((rb_callable_method_entry_t *)ccs->cme);
if (NIL_P(ccs->cme->owner)) invalidate_negative_cache(mid);
- rb_vm_ccs_free(ccs);
- rb_id_table_delete(tbl, mid);
+ rb_vm_ccs_invalidate_and_free(ccs);
+ rb_managed_id_table_delete(tbl, mid);
RB_DEBUG_COUNTER_INC(cc_invalidate_leaf_ccs);
}
}
@@ -185,9 +378,8 @@ invalidate_callable_method_entry_in_callable_m_table(struct rb_id_table *tbl, ID
{
VALUE cme;
if (tbl && rb_id_table_lookup(tbl, mid, &cme)) {
- if (rb_yjit_enabled_p) {
- rb_yjit_cme_invalidate((rb_callable_method_entry_t *)cme);
- }
+ rb_yjit_cme_invalidate((rb_callable_method_entry_t *)cme);
+ rb_zjit_cme_invalidate((rb_callable_method_entry_t *)cme);
rb_id_table_delete(tbl, mid);
RB_DEBUG_COUNTER_INC(cc_invalidate_leaf_callable);
}
@@ -201,7 +393,7 @@ struct invalidate_callable_method_entry_foreach_arg {
};
static void
-invalidate_callable_method_entry_in_every_m_table_i(rb_classext_t *ext, bool is_prime, VALUE namespace, void *data)
+invalidate_callable_method_entry_in_every_m_table_i(rb_classext_t *ext, bool is_prime, VALUE box_value, void *data)
{
st_data_t me;
struct invalidate_callable_method_entry_foreach_arg *arg = (struct invalidate_callable_method_entry_foreach_arg *)data;
@@ -231,9 +423,8 @@ invalidate_complemented_method_entry_in_callable_m_table(struct rb_id_table *tbl
{
VALUE cme;
if (tbl && rb_id_table_lookup(tbl, mid, &cme)) {
- if (rb_yjit_enabled_p) {
- rb_yjit_cme_invalidate((rb_callable_method_entry_t *)cme);
- }
+ rb_yjit_cme_invalidate((rb_callable_method_entry_t *)cme);
+ rb_zjit_cme_invalidate((rb_callable_method_entry_t *)cme);
rb_id_table_delete(tbl, mid);
RB_DEBUG_COUNTER_INC(cc_invalidate_tree_callable);
}
@@ -245,91 +436,92 @@ clear_method_cache_by_id_in_class(VALUE klass, ID mid)
VM_ASSERT_TYPE2(klass, T_CLASS, T_ICLASS);
if (rb_objspace_garbage_object_p(klass)) return;
- RB_VM_LOCK_ENTER();
- if (LIKELY(RCLASS_SUBCLASSES_FIRST(klass) == NULL)) {
- // no subclasses
- // check only current class
+ RB_VM_LOCKING() {
+ rb_vm_barrier();
- // invalidate CCs
- struct rb_id_table *cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
- invalidate_method_cache_in_cc_table(cc_tbl, mid);
- if (RCLASS_CC_TBL_NOT_PRIME_P(klass, cc_tbl)) {
- invalidate_method_cache_in_cc_table(RCLASS_PRIME_CC_TBL(klass), mid);
- }
+ if (LIKELY(RCLASS_SUBCLASSES_FIRST(klass) == NULL)) {
+ // no subclasses
+ // check only current class
- // remove from callable_m_tbl, if exists
- struct rb_id_table *cm_tbl = RCLASS_WRITABLE_CALLABLE_M_TBL(klass);
- invalidate_callable_method_entry_in_callable_m_table(cm_tbl, mid);
- if (RCLASS_CALLABLE_M_TBL_NOT_PRIME_P(klass, cm_tbl)) {
- invalidate_callable_method_entry_in_callable_m_table(RCLASS_PRIME_CALLABLE_M_TBL(klass), mid);
- }
+ // invalidate CCs
+ VALUE cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
+ invalidate_method_cache_in_cc_table(cc_tbl, mid);
+ if (RCLASS_CC_TBL_NOT_PRIME_P(klass, cc_tbl)) {
+ invalidate_method_cache_in_cc_table(RCLASS_PRIME_CC_TBL(klass), mid);
+ }
- RB_DEBUG_COUNTER_INC(cc_invalidate_leaf);
- }
- else {
- const rb_callable_method_entry_t *cme = complemented_callable_method_entry(klass, mid);
+ // remove from callable_m_tbl, if exists
+ struct rb_id_table *cm_tbl = RCLASS_WRITABLE_CALLABLE_M_TBL(klass);
+ invalidate_callable_method_entry_in_callable_m_table(cm_tbl, mid);
+ if (RCLASS_CALLABLE_M_TBL_NOT_PRIME_P(klass, cm_tbl)) {
+ invalidate_callable_method_entry_in_callable_m_table(RCLASS_PRIME_CALLABLE_M_TBL(klass), mid);
+ }
- if (cme) {
- // invalidate cme if found to invalidate the inline method cache.
- if (METHOD_ENTRY_CACHED(cme)) {
- if (METHOD_ENTRY_COMPLEMENTED(cme)) {
- // do nothing
- }
- else {
- // invalidate cc by invalidating cc->cme
- VALUE owner = cme->owner;
- VM_ASSERT_TYPE(owner, T_CLASS);
- VALUE klass_housing_cme;
- if (cme->def->type == VM_METHOD_TYPE_REFINED && !cme->def->body.refined.orig_me) {
- klass_housing_cme = owner;
+ RB_DEBUG_COUNTER_INC(cc_invalidate_leaf);
+ }
+ else {
+ const rb_callable_method_entry_t *cme = complemented_callable_method_entry(klass, mid);
+
+ if (cme) {
+ // invalidate cme if found to invalidate the inline method cache.
+ if (METHOD_ENTRY_CACHED(cme)) {
+ if (METHOD_ENTRY_COMPLEMENTED(cme)) {
+ // do nothing
}
else {
- klass_housing_cme = RCLASS_ORIGIN(owner);
+ // invalidate cc by invalidating cc->cme
+ VALUE owner = cme->owner;
+ VM_ASSERT_TYPE(owner, T_CLASS);
+ VALUE klass_housing_cme;
+ if (cme->def->type == VM_METHOD_TYPE_REFINED && !cme->def->body.refined.orig_me) {
+ klass_housing_cme = owner;
+ }
+ else {
+ klass_housing_cme = RCLASS_ORIGIN(owner);
+ }
+
+ // replace the cme that will be invalid in the all classexts
+ invalidate_callable_method_entry_in_every_m_table(klass_housing_cme, mid, cme);
}
- // replace the cme that will be invalid in the all classexts
- invalidate_callable_method_entry_in_every_m_table(klass_housing_cme, mid, cme);
- }
+ vm_cme_invalidate((rb_callable_method_entry_t *)cme);
+ RB_DEBUG_COUNTER_INC(cc_invalidate_tree_cme);
- vm_cme_invalidate((rb_callable_method_entry_t *)cme);
- RB_DEBUG_COUNTER_INC(cc_invalidate_tree_cme);
+ // In case of refinement ME, also invalidate the wrapped ME that
+ // could be cached at some callsite and is unreachable from any
+ // RCLASS_WRITABLE_CC_TBL.
+ if (cme->def->type == VM_METHOD_TYPE_REFINED && cme->def->body.refined.orig_me) {
+ vm_cme_invalidate((rb_callable_method_entry_t *)cme->def->body.refined.orig_me);
+ }
- // In case of refinement ME, also invalidate the wrapped ME that
- // could be cached at some callsite and is unreachable from any
- // RCLASS_WRITABLE_CC_TBL.
- if (cme->def->type == VM_METHOD_TYPE_REFINED && cme->def->body.refined.orig_me) {
- vm_cme_invalidate((rb_callable_method_entry_t *)cme->def->body.refined.orig_me);
+ if (cme->def->iseq_overload) {
+ rb_callable_method_entry_t *monly_cme = (rb_callable_method_entry_t *)lookup_overloaded_cme(cme);
+ if (monly_cme) {
+ vm_cme_invalidate(monly_cme);
+ }
+ }
}
- if (cme->def->iseq_overload) {
- rb_callable_method_entry_t *monly_cme = (rb_callable_method_entry_t *)lookup_overloaded_cme(cme);
- if (monly_cme) {
- vm_cme_invalidate(monly_cme);
+ // invalidate complement tbl
+ if (METHOD_ENTRY_COMPLEMENTED(cme)) {
+ VALUE defined_class = cme->defined_class;
+ struct rb_id_table *cm_tbl = RCLASS_WRITABLE_CALLABLE_M_TBL(defined_class);
+ invalidate_complemented_method_entry_in_callable_m_table(cm_tbl, mid);
+ if (RCLASS_CALLABLE_M_TBL_NOT_PRIME_P(defined_class, cm_tbl)) {
+ struct rb_id_table *prime_cm_table = RCLASS_PRIME_CALLABLE_M_TBL(defined_class);
+ invalidate_complemented_method_entry_in_callable_m_table(prime_cm_table, mid);
}
}
- }
- // invalidate complement tbl
- if (METHOD_ENTRY_COMPLEMENTED(cme)) {
- VALUE defined_class = cme->defined_class;
- struct rb_id_table *cm_tbl = RCLASS_WRITABLE_CALLABLE_M_TBL(defined_class);
- invalidate_complemented_method_entry_in_callable_m_table(cm_tbl, mid);
- if (RCLASS_CALLABLE_M_TBL_NOT_PRIME_P(defined_class, cm_tbl)) {
- struct rb_id_table *prime_cm_table = RCLASS_PRIME_CALLABLE_M_TBL(defined_class);
- invalidate_complemented_method_entry_in_callable_m_table(prime_cm_table, mid);
- }
+ RB_DEBUG_COUNTER_INC(cc_invalidate_tree);
+ }
+ else {
+ invalidate_negative_cache(mid);
}
-
- RB_DEBUG_COUNTER_INC(cc_invalidate_tree);
- }
- else {
- invalidate_negative_cache(mid);
}
- }
-
- rb_gccct_clear_table(Qnil);
- RB_VM_LOCK_LEAVE();
+ rb_gccct_clear_table(Qnil);
+ }
}
static void
@@ -380,42 +572,45 @@ invalidate_ccs_in_iclass_cc_tbl(VALUE value, void *data)
{
struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)value;
vm_cme_invalidate((rb_callable_method_entry_t *)ccs->cme);
+ xfree(ccs);
return ID_TABLE_DELETE;
}
void
-rb_invalidate_method_caches(struct rb_id_table *cm_tbl, struct rb_id_table *cc_tbl)
+rb_invalidate_method_caches(struct rb_id_table *cm_tbl, VALUE cc_tbl)
{
if (cm_tbl) {
rb_id_table_foreach_values(cm_tbl, invalidate_method_entry_in_iclass_callable_m_tbl, NULL);
}
if (cc_tbl) {
- rb_id_table_foreach_values(cc_tbl, invalidate_ccs_in_iclass_cc_tbl, NULL);
+ rb_managed_id_table_foreach_values(cc_tbl, invalidate_ccs_in_iclass_cc_tbl, NULL);
}
}
static int
-invalidate_all_refinement_cc(void *vstart, void *vend, size_t stride, void *data)
-{
- VALUE v = (VALUE)vstart;
- for (; v != (VALUE)vend; v += stride) {
- void *ptr = rb_asan_poisoned_object_p(v);
- rb_asan_unpoison_object(v, false);
-
- if (RBASIC(v)->flags) { // liveness check
- if (imemo_type_p(v, imemo_callcache)) {
- const struct rb_callcache *cc = (const struct rb_callcache *)v;
- if (vm_cc_refinement_p(cc) && cc->klass) {
- vm_cc_invalidate(cc);
- }
- }
- }
+invalidate_cc_refinement(st_data_t key, st_data_t data)
+{
+ VALUE v = (VALUE)key;
+ void *ptr = rb_asan_poisoned_object_p(v);
+ rb_asan_unpoison_object(v, false);
+
+ if (rb_gc_pointer_to_heap_p(v) &&
+ !rb_objspace_garbage_object_p(v) &&
+ RBASIC(v)->flags) { // liveness check
+ const struct rb_callcache *cc = (const struct rb_callcache *)v;
- if (ptr) {
- rb_asan_poison_object(v);
+ VM_ASSERT(vm_cc_refinement_p(cc));
+
+ if (vm_cc_valid(cc)) {
+ vm_cc_invalidate(cc);
}
}
- return 0; // continue to iteration
+
+ if (ptr) {
+ rb_asan_poison_object(v);
+ }
+
+ return ST_CONTINUE;
}
static st_index_t
@@ -497,13 +692,12 @@ rb_vm_ci_lookup(ID mid, unsigned int flag, unsigned int argc, const struct rb_ca
((struct rb_callinfo_kwarg *)kwarg)->references++;
}
- struct rb_callinfo *new_ci = IMEMO_NEW(struct rb_callinfo, imemo_callinfo, (VALUE)kwarg);
+ struct rb_callinfo *new_ci = SHAREABLE_IMEMO_NEW(struct rb_callinfo, imemo_callinfo, (VALUE)kwarg);
new_ci->mid = mid;
new_ci->flag = flag;
new_ci->argc = argc;
- RB_VM_LOCK_ENTER();
- {
+ RB_VM_LOCKING() {
st_table *ci_table = vm->ci_table;
VM_ASSERT(ci_table);
@@ -511,7 +705,6 @@ rb_vm_ci_lookup(ID mid, unsigned int flag, unsigned int argc, const struct rb_ca
st_update(ci_table, (st_data_t)new_ci, ci_lookup_i, (st_data_t)&ci);
} while (ci == NULL);
}
- RB_VM_LOCK_LEAVE();
VM_ASSERT(ci);
@@ -530,16 +723,51 @@ rb_vm_ci_free(const struct rb_callinfo *ci)
}
void
+rb_vm_insert_cc_refinement(const struct rb_callcache *cc)
+{
+ st_data_t key = (st_data_t)cc;
+
+ rb_vm_t *vm = GET_VM();
+ RB_VM_LOCK_ENTER();
+ {
+ rb_set_insert(vm->cc_refinement_table, key);
+ }
+ RB_VM_LOCK_LEAVE();
+}
+
+void
+rb_vm_delete_cc_refinement(const struct rb_callcache *cc)
+{
+ ASSERT_vm_locking();
+
+ rb_vm_t *vm = GET_VM();
+ st_data_t key = (st_data_t)cc;
+
+ rb_set_table_delete(vm->cc_refinement_table, &key);
+}
+
+void
rb_clear_all_refinement_method_cache(void)
{
- rb_objspace_each_objects(invalidate_all_refinement_cc, NULL);
+ rb_vm_t *vm = GET_VM();
+
+ RB_VM_LOCK_ENTER();
+ {
+ rb_set_table_foreach(vm->cc_refinement_table, invalidate_cc_refinement, (st_data_t)NULL);
+ rb_set_table_clear(vm->cc_refinement_table);
+ rb_set_compact_table(vm->cc_refinement_table);
+ }
+ RB_VM_LOCK_LEAVE();
+
rb_yjit_invalidate_all_method_lookup_assumptions();
}
void
rb_method_table_insert(VALUE klass, struct rb_id_table *table, ID method_id, const rb_method_entry_t *me)
{
- rb_method_table_insert0(klass, table, method_id, me, RB_TYPE_P(klass, T_ICLASS) && !RICLASS_OWNS_M_TBL_P(klass));
+ RB_VM_LOCKING() {
+ rb_method_table_insert0(klass, table, method_id, me, RB_TYPE_P(klass, T_ICLASS) && !RICLASS_OWNS_M_TBL_P(klass));
+ }
}
void
@@ -607,7 +835,7 @@ rb_add_method_optimized(VALUE klass, ID mid, enum method_optimized_type opt_type
}
static void
-rb_method_definition_release(rb_method_definition_t *def)
+method_definition_release(rb_method_definition_t *def)
{
if (def != NULL) {
const unsigned int reference_count_was = RUBY_ATOMIC_FETCH_SUB(def->reference_count, 1);
@@ -617,9 +845,6 @@ rb_method_definition_release(rb_method_definition_t *def)
if (reference_count_was == 1) {
if (METHOD_DEBUG) fprintf(stderr, "-%p-%s:1->0 (remove)\n", (void *)def,
rb_id2name(def->original_id));
- if (def->type == VM_METHOD_TYPE_BMETHOD && def->body.bmethod.hooks) {
- xfree(def->body.bmethod.hooks);
- }
xfree(def);
}
else {
@@ -629,6 +854,12 @@ rb_method_definition_release(rb_method_definition_t *def)
}
}
+void
+rb_method_definition_release(rb_method_definition_t *def)
+{
+ method_definition_release(def);
+}
+
static void delete_overloaded_cme(const rb_callable_method_entry_t *cme);
void
@@ -642,7 +873,18 @@ rb_free_method_entry_vm_weak_references(const rb_method_entry_t *me)
void
rb_free_method_entry(const rb_method_entry_t *me)
{
- rb_method_definition_release(me->def);
+#if USE_ZJIT
+ if (METHOD_ENTRY_CACHED(me)) {
+ rb_zjit_cme_free((const rb_callable_method_entry_t *)me);
+ }
+#endif
+
+#if USE_YJIT
+ // YJIT rb_yjit_root_mark() roots CMEs in `Invariants`,
+ // to remove from `Invariants` here.
+#endif
+
+ method_definition_release(me->def);
}
static inline rb_method_entry_t *search_method(VALUE klass, ID id, VALUE *defined_class_ptr);
@@ -709,6 +951,7 @@ setup_method_cfunc_struct(rb_method_cfunc_t *cfunc, VALUE (*func)(ANYARGS), int
cfunc->invoker = call_cfunc_invoker_func(argc);
}
+
static rb_method_definition_t *
method_definition_addref(rb_method_definition_t *def, bool complemented)
{
@@ -723,9 +966,15 @@ method_definition_addref(rb_method_definition_t *def, bool complemented)
}
void
+rb_method_definition_addref(rb_method_definition_t *def)
+{
+ method_definition_addref(def, false);
+}
+
+void
rb_method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *def, void *opts)
{
- rb_method_definition_release(me->def);
+ method_definition_release(me->def);
*(rb_method_definition_t **)&me->def = method_definition_addref(def, METHOD_ENTRY_COMPLEMENTED(me));
if (!ruby_running) add_opt_method_entry(me);
@@ -741,7 +990,7 @@ rb_method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *de
/* setup iseq first (before invoking GC) */
RB_OBJ_WRITE(me, &def->body.iseq.iseqptr, iseq);
- // Methods defined in `with_yjit` should be considered METHOD_ENTRY_BASIC
+ // Methods defined in `with_jit` should be considered METHOD_ENTRY_BASIC
if (rb_iseq_attr_p(iseq, BUILTIN_ATTR_C_TRACE)) {
METHOD_ENTRY_BASIC_SET((rb_method_entry_t *)me, TRUE);
}
@@ -779,7 +1028,9 @@ rb_method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *de
if (cfp && (line = rb_vm_get_sourceline(cfp))) {
VALUE location = rb_ary_new3(2, rb_iseq_path(cfp->iseq), INT2FIX(line));
- RB_OBJ_WRITE(me, &def->body.attr.location, rb_ary_freeze(location));
+ rb_ary_freeze(location);
+ RB_OBJ_SET_SHAREABLE(location);
+ RB_OBJ_WRITE(me, &def->body.attr.location, location);
}
else {
VM_ASSERT(def->body.attr.location == 0);
@@ -788,7 +1039,7 @@ rb_method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *de
}
case VM_METHOD_TYPE_BMETHOD:
RB_OBJ_WRITE(me, &def->body.bmethod.proc, (VALUE)opts);
- RB_OBJ_WRITE(me, &def->body.bmethod.defined_ractor, rb_ractor_self(GET_RACTOR()));
+ def->body.bmethod.defined_ractor_id = rb_ec_ractor_id(GET_EC());
return;
case VM_METHOD_TYPE_NOTIMPLEMENTED:
setup_method_cfunc_struct(UNALIGNED_MEMBER_PTR(def, body.cfunc), (VALUE(*)(ANYARGS))rb_f_notimplement_internal, -1);
@@ -828,9 +1079,6 @@ method_definition_reset(const rb_method_entry_t *me)
break;
case VM_METHOD_TYPE_BMETHOD:
RB_OBJ_WRITTEN(me, Qundef, def->body.bmethod.proc);
- RB_OBJ_WRITTEN(me, Qundef, def->body.bmethod.defined_ractor);
- /* give up to check all in a list */
- if (def->body.bmethod.hooks) rb_gc_writebarrier_remember((VALUE)me);
break;
case VM_METHOD_TYPE_REFINED:
RB_OBJ_WRITTEN(me, Qundef, def->body.refined.orig_me);
@@ -848,6 +1096,8 @@ method_definition_reset(const rb_method_entry_t *me)
}
}
+static rb_atomic_t method_serial = 1;
+
rb_method_definition_t *
rb_method_definition_create(rb_method_type_t type, ID mid)
{
@@ -855,9 +1105,8 @@ rb_method_definition_create(rb_method_type_t type, ID mid)
def = ZALLOC(rb_method_definition_t);
def->type = type;
def->original_id = mid;
- static uintptr_t method_serial = 1;
- def->method_serial = method_serial++;
- def->ns = rb_current_namespace();
+ def->method_serial = (uintptr_t)RUBY_ATOMIC_FETCH_ADD(method_serial, 1);
+ def->box = rb_current_box();
return def;
}
@@ -869,7 +1118,11 @@ rb_method_entry_alloc(ID called_id, VALUE owner, VALUE defined_class, rb_method_
// not negative cache
VM_ASSERT_TYPE2(defined_class, T_CLASS, T_ICLASS);
}
- rb_method_entry_t *me = IMEMO_NEW(rb_method_entry_t, imemo_ment, defined_class);
+ rb_method_entry_t *me = SHAREABLE_IMEMO_NEW(rb_method_entry_t, imemo_ment, defined_class);
+
+ // mark_and_move_method_entry pins itself when it is in the overloaded_cme table
+ rb_gc_register_pinning_obj((VALUE)me);
+
*((rb_method_definition_t **)&me->def) = def;
me->called_id = called_id;
me->owner = owner;
@@ -963,7 +1216,7 @@ rb_method_entry_complement_defined_class(const rb_method_entry_t *src_me, ID cal
void
rb_method_entry_copy(rb_method_entry_t *dst, const rb_method_entry_t *src)
{
- rb_method_definition_release(dst->def);
+ method_definition_release(dst->def);
*(rb_method_definition_t **)&dst->def = method_definition_addref(src->def, METHOD_ENTRY_COMPLEMENTED(src));
method_definition_reset(dst);
dst->called_id = src->called_id;
@@ -1049,6 +1302,7 @@ check_override_opt_method(VALUE klass, VALUE mid)
}
}
+static inline rb_method_entry_t* search_method0(VALUE klass, ID id, VALUE *defined_class_ptr, bool skip_refined);
/*
* klass->method_table[mid] = method_entry(defined_class, visi, def)
*
@@ -1089,7 +1343,12 @@ rb_method_entry_make(VALUE klass, ID mid, VALUE defined_class, rb_method_visibil
if (RB_TYPE_P(klass, T_MODULE) && FL_TEST(klass, RMODULE_IS_REFINEMENT)) {
VALUE refined_class = rb_refinement_module_get_refined_class(klass);
+ bool search_superclass = type == VM_METHOD_TYPE_ZSUPER && !lookup_method_table(refined_class, mid);
rb_add_refined_method_entry(refined_class, mid);
+ if (search_superclass) {
+ rb_method_entry_t *me = lookup_method_table(refined_class, mid);
+ me->def->body.refined.orig_me = search_method0(refined_class, mid, NULL, true);
+ }
}
if (type == VM_METHOD_TYPE_REFINED) {
rb_method_entry_t *old_me = lookup_method_table(RCLASS_ORIGIN(klass), mid);
@@ -1329,7 +1588,9 @@ method_added(VALUE klass, ID mid)
void
rb_add_method(VALUE klass, ID mid, rb_method_type_t type, void *opts, rb_method_visibility_t visi)
{
- rb_method_entry_make(klass, mid, klass, visi, type, NULL, mid, opts);
+ RB_VM_LOCKING() {
+ rb_method_entry_make(klass, mid, klass, visi, type, NULL, mid, opts);
+ }
if (type != VM_METHOD_TYPE_UNDEF && type != VM_METHOD_TYPE_REFINED) {
method_added(klass, mid);
@@ -1354,11 +1615,14 @@ static rb_method_entry_t *
method_entry_set(VALUE klass, ID mid, const rb_method_entry_t *me,
rb_method_visibility_t visi, VALUE defined_class)
{
- rb_method_entry_t *newme = rb_method_entry_make(klass, mid, defined_class, visi,
- me->def->type, me->def, 0, NULL);
- if (newme == me) {
- me->def->no_redef_warning = TRUE;
- METHOD_ENTRY_FLAGS_SET(newme, visi, FALSE);
+ rb_method_entry_t *newme;
+ RB_VM_LOCKING() {
+ newme = rb_method_entry_make(klass, mid, defined_class, visi,
+ me->def->type, me->def, 0, NULL);
+ if (newme == me) {
+ me->def->no_redef_warning = TRUE;
+ METHOD_ENTRY_FLAGS_SET(newme, visi, FALSE);
+ }
}
method_added(klass, mid);
@@ -1392,10 +1656,20 @@ rb_undef_alloc_func(VALUE klass)
rb_alloc_func_t
rb_get_alloc_func(VALUE klass)
{
- Check_Type(klass, T_CLASS);
+ RBIMPL_ASSERT_TYPE(klass, T_CLASS);
- for (; klass; klass = RCLASS_SUPER(klass)) {
- rb_alloc_func_t allocator = RCLASS_ALLOCATOR(klass);
+ rb_alloc_func_t allocator = RCLASS_ALLOCATOR(klass);
+ if (allocator == UNDEF_ALLOC_FUNC) return 0;
+ if (allocator) return allocator;
+
+ VALUE *superclasses = RCLASS_SUPERCLASSES(klass);
+ size_t depth = RCLASS_SUPERCLASS_DEPTH(klass);
+
+ for (size_t i = depth; i > 0; i--) {
+ klass = superclasses[i - 1];
+ RBIMPL_ASSERT_TYPE(klass, T_CLASS);
+
+ allocator = RCLASS_ALLOCATOR(klass);
if (allocator == UNDEF_ALLOC_FUNC) break;
if (allocator) return allocator;
}
@@ -1524,10 +1798,10 @@ cached_callable_method_entry(VALUE klass, ID mid)
{
ASSERT_vm_locking();
- struct rb_id_table *cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
+ VALUE cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
VALUE ccs_data;
- if (cc_tbl && rb_id_table_lookup(cc_tbl, mid, &ccs_data)) {
+ if (cc_tbl && rb_managed_id_table_lookup(cc_tbl, mid, &ccs_data)) {
struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_data;
VM_ASSERT(vm_ccs_p(ccs));
@@ -1537,8 +1811,10 @@ cached_callable_method_entry(VALUE klass, ID mid)
return ccs->cme;
}
else {
- rb_vm_ccs_free(ccs);
- rb_id_table_delete(cc_tbl, mid);
+ rb_vm_barrier();
+
+ rb_managed_id_table_delete(cc_tbl, mid);
+ rb_vm_ccs_invalidate_and_free(ccs);
}
}
@@ -1552,22 +1828,29 @@ cache_callable_method_entry(VALUE klass, ID mid, const rb_callable_method_entry_
ASSERT_vm_locking();
VM_ASSERT(cme != NULL);
- struct rb_id_table *cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
+ VALUE cc_tbl = RCLASS_WRITABLE_CC_TBL(klass);
VALUE ccs_data;
if (!cc_tbl) {
- cc_tbl = rb_id_table_create(2);
+ cc_tbl = rb_vm_cc_table_create(2);
RCLASS_WRITE_CC_TBL(klass, cc_tbl);
}
- if (rb_id_table_lookup(cc_tbl, mid, &ccs_data)) {
+ if (rb_managed_id_table_lookup(cc_tbl, mid, &ccs_data)) {
#if VM_CHECK_MODE > 0
struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_data;
VM_ASSERT(ccs->cme == cme);
#endif
}
else {
- vm_ccs_create(klass, cc_tbl, mid, cme);
+ if (rb_multi_ractor_p()) {
+ VALUE new_cc_tbl = rb_vm_cc_table_dup(cc_tbl);
+ vm_ccs_create(klass, new_cc_tbl, mid, cme);
+ RB_OBJ_ATOMIC_WRITE(klass, &RCLASSEXT_CC_TBL(RCLASS_EXT_WRITABLE(klass)), new_cc_tbl);
+ }
+ else {
+ vm_ccs_create(klass, cc_tbl, mid, cme);
+ }
}
}
@@ -1596,8 +1879,26 @@ callable_method_entry_or_negative(VALUE klass, ID mid, VALUE *defined_class_ptr)
const rb_callable_method_entry_t *cme;
VM_ASSERT_TYPE2(klass, T_CLASS, T_ICLASS);
- RB_VM_LOCK_ENTER();
- {
+
+ /* Fast path: lock-free read from cache */
+ VALUE cc_tbl = RUBY_ATOMIC_VALUE_LOAD(RCLASS_WRITABLE_CC_TBL(klass));
+ if (cc_tbl) {
+ VALUE ccs_data;
+ if (rb_managed_id_table_lookup(cc_tbl, mid, &ccs_data)) {
+ struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_data;
+ VM_ASSERT(vm_ccs_p(ccs));
+
+ if (LIKELY(!METHOD_ENTRY_INVALIDATED(ccs->cme))) {
+ VM_ASSERT(ccs->cme->called_id == mid);
+ if (defined_class_ptr != NULL) *defined_class_ptr = ccs->cme->defined_class;
+ RB_DEBUG_COUNTER_INC(ccs_found);
+ return ccs->cme;
+ }
+ }
+ }
+
+ /* Slow path: need to lock and potentially populate cache */
+ RB_VM_LOCKING() {
cme = cached_callable_method_entry(klass, mid);
if (cme) {
@@ -1618,7 +1919,6 @@ callable_method_entry_or_negative(VALUE klass, ID mid, VALUE *defined_class_ptr)
cache_callable_method_entry(klass, mid, cme);
}
}
- RB_VM_LOCK_LEAVE();
return cme;
}
@@ -1742,7 +2042,12 @@ resolve_refined_method(VALUE refinements, const rb_method_entry_t *me, VALUE *de
tmp_me = me->def->body.refined.orig_me;
if (tmp_me) {
- if (defined_class_ptr) *defined_class_ptr = tmp_me->defined_class;
+ if (!tmp_me->defined_class) {
+ VM_ASSERT_TYPE(tmp_me->owner, T_MODULE);
+ }
+ else if (defined_class_ptr) {
+ *defined_class_ptr = tmp_me->defined_class;
+ }
return tmp_me;
}
@@ -2575,21 +2880,53 @@ rb_mod_public(int argc, VALUE *argv, VALUE module)
* protected(method_name, method_name, ...) -> array
* protected(array) -> array
*
- * With no arguments, sets the default visibility for subsequently
- * defined methods to protected. With arguments, sets the named methods
- * to have protected visibility.
- * String arguments are converted to symbols.
- * An Array of Symbols and/or Strings is also accepted.
- * If a single argument is passed, it is returned.
- * If no argument is passed, nil is returned.
- * If multiple arguments are passed, the arguments are returned as an array.
+ * Sets the visibility of a section or of a list of method names as protected.
+ * Accepts no arguments, a splat of method names (symbols or strings) or an
+ * array of method names. Returns the arguments that it received.
+ *
+ * == Important difference between protected in other languages
+ *
+ * Protected methods in Ruby are different from other languages such as Java,
+ * where methods are marked as protected to give access to subclasses. In Ruby,
+ * subclasses <b>already have access to all methods defined in the parent
+ * class</b>, even private ones.
+ *
+ * Marking a method as protected allows <b>different objects of the same
+ * class</b> to call it.
+ *
+ * One use case is for comparison methods, such as <code>==</code>, if we want
+ * to expose a method for comparison between objects of the same class without
+ * making the method public to objects of other classes.
+ *
+ * == Performance considerations
+ *
+ * Protected methods are slower than others because they can't use inline
+ * cache.
+ *
+ * == Example
+ *
+ * class Account
+ * # Mark balance as protected, so that we can compare between accounts
+ * # without making it public.
+ * attr_reader :balance
+ * protected :balance
+ *
+ * def initialize(balance)
+ * @balance = balance
+ * end
+ *
+ * def >(other)
+ * # The invocation to `other.balance` is allowed because `other` is a
+ * # different object of the same class (Account).
+ * balance > other.balance
+ * end
+ * end
*
- * If a method has protected visibility, it is callable only where
- * <code>self</code> of the context is the same as the method.
- * (method definition or instance_eval). This behavior is different from
- * Java's protected method. Usually <code>private</code> should be used.
+ * account1 = Account.new(100)
+ * account2 = Account.new(50)
*
- * Note that a protected method is slow because it can't use inline cache.
+ * account1 > account2 # => true (works)
+ * account1.balance # => NoMethodError (fails because balance is not public)
*
* To show a private method on RDoc, use <code>:doc:</code> instead of this.
*/
@@ -2703,13 +3040,14 @@ rb_mod_ruby2_keywords(int argc, VALUE *argv, VALUE module)
switch (me->def->type) {
case VM_METHOD_TYPE_ISEQ:
if (ISEQ_BODY(me->def->body.iseq.iseqptr)->param.flags.has_rest &&
+ !ISEQ_BODY(me->def->body.iseq.iseqptr)->param.flags.has_post &&
!ISEQ_BODY(me->def->body.iseq.iseqptr)->param.flags.has_kw &&
!ISEQ_BODY(me->def->body.iseq.iseqptr)->param.flags.has_kwrest) {
ISEQ_BODY(me->def->body.iseq.iseqptr)->param.flags.ruby2_keywords = 1;
rb_clear_method_cache(module, name);
}
else {
- rb_warn("Skipping set of ruby2_keywords flag for %"PRIsVALUE" (method accepts keywords or method does not accept argument splat)", QUOTE_ID(name));
+ rb_warn("Skipping set of ruby2_keywords flag for %"PRIsVALUE" (method accepts keywords or post arguments or method does not accept argument splat)", QUOTE_ID(name));
}
break;
case VM_METHOD_TYPE_BMETHOD: {
@@ -2722,13 +3060,14 @@ rb_mod_ruby2_keywords(int argc, VALUE *argv, VALUE module)
const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(procval);
const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
if (ISEQ_BODY(iseq)->param.flags.has_rest &&
+ !ISEQ_BODY(iseq)->param.flags.has_post &&
!ISEQ_BODY(iseq)->param.flags.has_kw &&
!ISEQ_BODY(iseq)->param.flags.has_kwrest) {
ISEQ_BODY(iseq)->param.flags.ruby2_keywords = 1;
rb_clear_method_cache(module, name);
}
else {
- rb_warn("Skipping set of ruby2_keywords flag for %"PRIsVALUE" (method accepts keywords or method does not accept argument splat)", QUOTE_ID(name));
+ rb_warn("Skipping set of ruby2_keywords flag for %"PRIsVALUE" (method accepts keywords or post arguments or method does not accept argument splat)", QUOTE_ID(name));
}
break;
}