summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorYusuke Endoh <mame@ruby-lang.org>2021-10-29 01:58:15 +0900
committerNobuyoshi Nakada <nobu@ruby-lang.org>2021-10-29 10:57:46 +0900
commitc1228f833c5925c01b2aed1544d9c7fc2421e79b (patch)
treed9adab9bb139a36fa88407938c68a3d1bb6ec482
parent92ad520ad0372ad64ece6af6bbb6823e629c5d5b (diff)
vm_core.h: Avoid unaligned access to ic_serial on 32-bit machine
This caused Bus error on 32 bit Solaris
Notes
Notes: Merged: https://github.com/ruby/ruby/pull/5049
-rw-r--r--tool/ruby_vm/views/_mjit_compile_getinlinecache.erb4
-rw-r--r--vm_core.h41
-rw-r--r--vm_insnhelper.c4
-rw-r--r--yjit_codegen.c2
4 files changed, 42 insertions, 9 deletions
diff --git a/tool/ruby_vm/views/_mjit_compile_getinlinecache.erb b/tool/ruby_vm/views/_mjit_compile_getinlinecache.erb
index 1acfdb7f0b..d4eb4977a4 100644
--- a/tool/ruby_vm/views/_mjit_compile_getinlinecache.erb
+++ b/tool/ruby_vm/views/_mjit_compile_getinlinecache.erb
@@ -13,9 +13,9 @@
% # compiler: Capture IC values, locking getinlinecache
struct iseq_inline_constant_cache_entry *ice = ic->entry;
- if (ice != NULL && ice->ic_serial && !status->compile_info->disable_const_cache) {
+ if (ice != NULL && GET_IC_SERIAL(ice) && !status->compile_info->disable_const_cache) {
% # JIT: Inline everything in IC, and cancel the slow path
- fprintf(f, " if (vm_inlined_ic_hit_p(0x%"PRIxVALUE", 0x%"PRIxVALUE", (const rb_cref_t *)0x%"PRIxVALUE", %"PRI_SERIALT_PREFIX"u, reg_cfp->ep)) {", ice->flags, ice->value, (VALUE)ice->ic_cref, ice->ic_serial);
+ fprintf(f, " if (vm_inlined_ic_hit_p(0x%"PRIxVALUE", 0x%"PRIxVALUE", (const rb_cref_t *)0x%"PRIxVALUE", %"PRI_SERIALT_PREFIX"u, reg_cfp->ep)) {", ice->flags, ice->value, (VALUE)ice->ic_cref, GET_IC_SERIAL(ice));
fprintf(f, " stack[%d] = 0x%"PRIxVALUE";\n", b->stack_size, ice->value);
fprintf(f, " goto label_%d;\n", pos + insn_len(insn) + (int)dst);
fprintf(f, " }");
diff --git a/vm_core.h b/vm_core.h
index a696a1af39..96437ff559 100644
--- a/vm_core.h
+++ b/vm_core.h
@@ -219,21 +219,54 @@ struct rb_control_frame_struct;
/* iseq data type */
typedef struct rb_compile_option_struct rb_compile_option_t;
+union ic_serial_entry {
+ rb_serial_t raw;
+ VALUE data[2];
+};
+
// imemo_constcache
struct iseq_inline_constant_cache_entry {
VALUE flags;
VALUE value; // v0
- rb_serial_t ic_serial; // v1
-#if (SIZEOF_SERIAL_T < 2 * SIZEOF_VOIDP)
- VALUE ic_padding; // v2
-#endif
+ union ic_serial_entry ic_serial; // v1, v2
const rb_cref_t *ic_cref; // v3
};
STATIC_ASSERT(sizeof_iseq_inline_constant_cache_entry,
(offsetof(struct iseq_inline_constant_cache_entry, ic_cref) +
sizeof(const rb_cref_t *)) <= sizeof(struct RObject));
+#if SIZEOF_SERIAL_T <= SIZEOF_VALUE
+
+#define GET_IC_SERIAL(ice) (ice)->ic_serial.raw
+#define SET_IC_SERIAL(ice, v) (ice)->ic_serial.raw = (v)
+
+#else
+
+static inline rb_serial_t
+get_ic_serial(const struct iseq_inline_constant_cache_entry *ice)
+{
+ union ic_serial_entry tmp;
+ tmp.data[0] = ice->ic_serial.data[0];
+ tmp.data[1] = ice->ic_serial.data[1];
+ return tmp.raw;
+}
+
+#define GET_IC_SERIAL(ice) get_ic_serial(ice)
+
+static inline void
+set_ic_serial(struct iseq_inline_constant_cache_entry *ice, rb_serial_t v)
+{
+ union ic_serial_entry tmp;
+ tmp.raw = v;
+ ice->ic_serial.data[0] = tmp.data[0];
+ ice->ic_serial.data[1] = tmp.data[1];
+}
+
+#define SET_IC_SERIAL(ice, v) set_ic_serial((ice), (v))
+
+#endif
+
struct iseq_inline_constant_cache {
struct iseq_inline_constant_cache_entry *entry;
// For YJIT: the index to the opt_getinlinecache instruction in the same iseq.
diff --git a/vm_insnhelper.c b/vm_insnhelper.c
index 048ed63aed..86bdfd2fb6 100644
--- a/vm_insnhelper.c
+++ b/vm_insnhelper.c
@@ -4781,7 +4781,7 @@ static bool
vm_ic_hit_p(const struct iseq_inline_constant_cache_entry *ice, const VALUE *reg_ep)
{
VM_ASSERT(IMEMO_TYPE_P(ice, imemo_constcache));
- return vm_inlined_ic_hit_p(ice->flags, ice->value, ice->ic_cref, ice->ic_serial, reg_ep);
+ return vm_inlined_ic_hit_p(ice->flags, ice->value, ice->ic_cref, GET_IC_SERIAL(ice), reg_ep);
}
// YJIT needs this function to never allocate and never raise
@@ -4798,7 +4798,7 @@ vm_ic_update(const rb_iseq_t *iseq, IC ic, VALUE val, const VALUE *reg_ep)
struct iseq_inline_constant_cache_entry *ice = (struct iseq_inline_constant_cache_entry *)rb_imemo_new(imemo_constcache, 0, 0, 0, 0);
RB_OBJ_WRITE(ice, &ice->value, val);
ice->ic_cref = vm_get_const_key_cref(reg_ep);
- ice->ic_serial = GET_GLOBAL_CONSTANT_STATE() - ruby_vm_const_missing_count;
+ SET_IC_SERIAL(ice, GET_GLOBAL_CONSTANT_STATE() - ruby_vm_const_missing_count);
if (rb_ractor_shareable_p(val)) ice->flags |= IMEMO_CONST_CACHE_SHAREABLE;
ruby_vm_const_missing_count = 0;
RB_OBJ_WRITE(iseq, &ic->entry, ice);
diff --git a/yjit_codegen.c b/yjit_codegen.c
index b0d799c013..d438970cf2 100644
--- a/yjit_codegen.c
+++ b/yjit_codegen.c
@@ -4364,7 +4364,7 @@ gen_opt_getinlinecache(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
// See vm_ic_hit_p(). The same conditions are checked in yjit_constant_ic_update().
struct iseq_inline_constant_cache_entry *ice = ic->entry;
if (!ice || // cache not filled
- ice->ic_serial != ruby_vm_global_constant_state /* cache out of date */) {
+ GET_IC_SERIAL(ice) != ruby_vm_global_constant_state /* cache out of date */) {
// In these cases, leave a block that unconditionally side exits
// for the interpreter to invalidate.
return YJIT_CANT_COMPILE;