diff options
| author | U.Nakamura <usa@ruby-lang.org> | 2023-07-18 21:10:12 +0900 |
|---|---|---|
| committer | U.Nakamura <usa@ruby-lang.org> | 2023-07-18 21:10:12 +0900 |
| commit | 2698b68ae86df333ff8bc8c4655f8012ee619257 (patch) | |
| tree | acd4357baf2851bb02c205569d6f393bcfa43ea7 | |
| parent | 3eea678b069aeec7f3a97437be204f65d5193e00 (diff) | |
merge revision(s) 537183cd2ac0163851277b46a2f21ea5914c11c0: [Backport #19577]
Fix write barrier order for `klass` to `cme` edge
Previously, the following crashes with
`CFLAGS=-DRGENGC_CHECK_MODE=2 -DRUBY_DEBUG=1 -fno-inline`:
$ ./miniruby -e 'GC.stress = true; Marshal.dump({})'
It crashes with a write barrier (WB) miss assertion on an edge from the
`Hash` class object to a newly allocated negative method entry.
This is due to usages of vm_ccs_create() running the WB too early,
before the method entry is inserted into the cc table, so before the
reference edge is established. The insertion can trigger GC and promote
the class object, so running the WB after the insertion is necessary.
Move the insertion into vm_ccs_create() and run the WB after the
insertion.
Discovered on CI:
http://ci.rvm.jp/results/trunk-asserts@ruby-sp2-docker/4391770
---
vm_eval.c | 3 +--
vm_insnhelper.c | 10 ++++++----
vm_method.c | 3 +--
3 files changed, 8 insertions(+), 8 deletions(-)
| -rw-r--r-- | version.h | 2 | ||||
| -rw-r--r-- | vm_eval.c | 3 | ||||
| -rw-r--r-- | vm_insnhelper.c | 10 | ||||
| -rw-r--r-- | vm_method.c | 3 |
4 files changed, 9 insertions, 9 deletions
@@ -11,7 +11,7 @@ # define RUBY_VERSION_MINOR RUBY_API_VERSION_MINOR #define RUBY_VERSION_TEENY 4 #define RUBY_RELEASE_DATE RUBY_RELEASE_YEAR_STR"-"RUBY_RELEASE_MONTH_STR"-"RUBY_RELEASE_DAY_STR -#define RUBY_PATCHLEVEL 231 +#define RUBY_PATCHLEVEL 232 #define RUBY_RELEASE_YEAR 2023 #define RUBY_RELEASE_MONTH 7 @@ -395,8 +395,7 @@ cc_new(VALUE klass, ID mid, int argc, const rb_callable_method_entry_t *cme) ccs = (struct rb_class_cc_entries *)ccs_data; } else { - ccs = vm_ccs_create(klass, cme); - rb_id_table_insert(cc_tbl, mid, (VALUE)ccs); + ccs = vm_ccs_create(klass, cc_tbl, mid, cme); } for (int i=0; i<ccs->len; i++) { diff --git a/vm_insnhelper.c b/vm_insnhelper.c index e01d39de77..aff6baa340 100644 --- a/vm_insnhelper.c +++ b/vm_insnhelper.c @@ -1689,7 +1689,7 @@ static VALUE vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg static VALUE vm_mtbl_dump(VALUE klass, ID target_mid); static struct rb_class_cc_entries * -vm_ccs_create(VALUE klass, const rb_callable_method_entry_t *cme) +vm_ccs_create(VALUE klass, struct rb_id_table *cc_tbl, ID mid, const rb_callable_method_entry_t *cme) { struct rb_class_cc_entries *ccs = ALLOC(struct rb_class_cc_entries); #if VM_CHECK_MODE > 0 @@ -1697,9 +1697,12 @@ vm_ccs_create(VALUE klass, const rb_callable_method_entry_t *cme) #endif ccs->capa = 0; ccs->len = 0; - RB_OBJ_WRITE(klass, &ccs->cme, cme); + ccs->cme = cme; METHOD_ENTRY_CACHED_SET((rb_callable_method_entry_t *)cme); ccs->entries = NULL; + + rb_id_table_insert(cc_tbl, mid, (VALUE)ccs); + RB_OBJ_WRITTEN(klass, Qundef, cme); return ccs; } @@ -1850,8 +1853,7 @@ vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci) } else { // TODO: required? - ccs = vm_ccs_create(klass, cme); - rb_id_table_insert(cc_tbl, mid, (VALUE)ccs); + ccs = vm_ccs_create(klass, cc_tbl, mid, cme); } } diff --git a/vm_method.c b/vm_method.c index 94c3f978dc..7cebd2e3bc 100644 --- a/vm_method.c +++ b/vm_method.c @@ -1288,8 +1288,7 @@ cache_callable_method_entry(VALUE klass, ID mid, const rb_callable_method_entry_ VM_ASSERT(ccs->cme == cme); } else { - ccs = vm_ccs_create(klass, cme); - rb_id_table_insert(cc_tbl, mid, (VALUE)ccs); + ccs = vm_ccs_create(klass, cc_tbl, mid, cme); } } |
