summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authornagachika <nagachika@ruby-lang.org>2023-07-17 09:50:10 +0900
committernagachika <nagachika@ruby-lang.org>2023-07-17 09:50:10 +0900
commit98b4ac7287928c202c90e9de1ae02c0707ec68b8 (patch)
treefb5a437a80a00d76c8e3f2aa509ee8418406a86d
parentcb8d656100659eaee44042ca680886c30892df04 (diff)
merge revision(s) 537183cd2ac0163851277b46a2f21ea5914c11c0: [Backport #19577]
Fix write barrier order for `klass` to `cme` edge Previously, the following crashes with `CFLAGS=-DRGENGC_CHECK_MODE=2 -DRUBY_DEBUG=1 -fno-inline`: $ ./miniruby -e 'GC.stress = true; Marshal.dump({})' It crashes with a write barrier (WB) miss assertion on an edge from the `Hash` class object to a newly allocated negative method entry. This is due to usages of vm_ccs_create() running the WB too early, before the method entry is inserted into the cc table, so before the reference edge is established. The insertion can trigger GC and promote the class object, so running the WB after the insertion is necessary. Move the insertion into vm_ccs_create() and run the WB after the insertion. Discovered on CI: http://ci.rvm.jp/results/trunk-asserts@ruby-sp2-docker/4391770 --- vm_eval.c | 3 +-- vm_insnhelper.c | 10 ++++++---- vm_method.c | 3 +-- 3 files changed, 8 insertions(+), 8 deletions(-)
-rw-r--r--version.h2
-rw-r--r--vm_eval.c3
-rw-r--r--vm_insnhelper.c10
-rw-r--r--vm_method.c3
4 files changed, 9 insertions, 9 deletions
diff --git a/version.h b/version.h
index 5473fd8585..cb7bb0fa25 100644
--- a/version.h
+++ b/version.h
@@ -11,7 +11,7 @@
# define RUBY_VERSION_MINOR RUBY_API_VERSION_MINOR
#define RUBY_VERSION_TEENY 2
#define RUBY_RELEASE_DATE RUBY_RELEASE_YEAR_STR"-"RUBY_RELEASE_MONTH_STR"-"RUBY_RELEASE_DAY_STR
-#define RUBY_PATCHLEVEL 80
+#define RUBY_PATCHLEVEL 81
#include "ruby/version.h"
#include "ruby/internal/abi.h"
diff --git a/vm_eval.c b/vm_eval.c
index 2e1a9b80a6..ed30bdb70b 100644
--- a/vm_eval.c
+++ b/vm_eval.c
@@ -396,8 +396,7 @@ cc_new(VALUE klass, ID mid, int argc, const rb_callable_method_entry_t *cme)
ccs = (struct rb_class_cc_entries *)ccs_data;
}
else {
- ccs = vm_ccs_create(klass, cme);
- rb_id_table_insert(cc_tbl, mid, (VALUE)ccs);
+ ccs = vm_ccs_create(klass, cc_tbl, mid, cme);
}
for (int i=0; i<ccs->len; i++) {
diff --git a/vm_insnhelper.c b/vm_insnhelper.c
index 6d0e66fdef..d3d44cb0a3 100644
--- a/vm_insnhelper.c
+++ b/vm_insnhelper.c
@@ -1892,7 +1892,7 @@ static VALUE vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg
static VALUE vm_mtbl_dump(VALUE klass, ID target_mid);
static struct rb_class_cc_entries *
-vm_ccs_create(VALUE klass, const rb_callable_method_entry_t *cme)
+vm_ccs_create(VALUE klass, struct rb_id_table *cc_tbl, ID mid, const rb_callable_method_entry_t *cme)
{
struct rb_class_cc_entries *ccs = ALLOC(struct rb_class_cc_entries);
#if VM_CHECK_MODE > 0
@@ -1900,9 +1900,12 @@ vm_ccs_create(VALUE klass, const rb_callable_method_entry_t *cme)
#endif
ccs->capa = 0;
ccs->len = 0;
- RB_OBJ_WRITE(klass, &ccs->cme, cme);
+ ccs->cme = cme;
METHOD_ENTRY_CACHED_SET((rb_callable_method_entry_t *)cme);
ccs->entries = NULL;
+
+ rb_id_table_insert(cc_tbl, mid, (VALUE)ccs);
+ RB_OBJ_WRITTEN(klass, Qundef, cme);
return ccs;
}
@@ -2053,8 +2056,7 @@ vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
}
else {
// TODO: required?
- ccs = vm_ccs_create(klass, cme);
- rb_id_table_insert(cc_tbl, mid, (VALUE)ccs);
+ ccs = vm_ccs_create(klass, cc_tbl, mid, cme);
}
}
diff --git a/vm_method.c b/vm_method.c
index 30241cc9cd..5f7264a53b 100644
--- a/vm_method.c
+++ b/vm_method.c
@@ -1330,8 +1330,7 @@ cache_callable_method_entry(VALUE klass, ID mid, const rb_callable_method_entry_
VM_ASSERT(ccs->cme == cme);
}
else {
- ccs = vm_ccs_create(klass, cme);
- rb_id_table_insert(cc_tbl, mid, (VALUE)ccs);
+ ccs = vm_ccs_create(klass, cc_tbl, mid, cme);
}
}