summaryrefslogtreecommitdiff
path: root/compile.c
diff options
context:
space:
mode:
authorKoichi Sasada <ko1@atdot.net>2021-01-04 18:08:25 +0900
committerNARUSE, Yui <naruse@airemix.jp>2021-01-13 17:06:16 +0900
commitb93e16dc0f45069d4a5fcce20d5c4437e151f0a8 (patch)
tree58e71dd4acd25b3deca641a6d5d2e25432dbc646 /compile.c
parent95aff214687a5e12c3eb57d056665741e734c188 (diff)
enable constant cache on ractors
constant cache `IC` is accessed by non-atomic manner and there are thread-safety issues, so Ruby 3.0 disables to use const cache on non-main ractors. This patch enables it by introducing `imemo_constcache` and allocates it by every re-fill of const cache like `imemo_callcache`. [Bug #17510] Now `IC` only has one entry `IC::entry` and it points to `iseq_inline_constant_cache_entry`, managed by T_IMEMO object. `IC` is atomic data structure so `rb_mjit_before_vm_ic_update()` and `rb_mjit_after_vm_ic_update()` is not needed.
Diffstat (limited to 'compile.c')
-rw-r--r--compile.c14
1 files changed, 4 insertions, 10 deletions
diff --git a/compile.c b/compile.c
index 264c310012..c29d42e433 100644
--- a/compile.c
+++ b/compile.c
@@ -2359,11 +2359,8 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *const anchor)
}
break;
}
- case TS_ISE: /* inline storage entry */
- /* Treated as an IC, but may contain a markable VALUE */
- FL_SET(iseqv, ISEQ_MARKABLE_ISEQ);
- /* fall through */
case TS_IC: /* inline cache */
+ case TS_ISE: /* inline storage entry */
case TS_IVC: /* inline ivar cache */
{
unsigned int ic_index = FIX2UINT(operands[j]);
@@ -2375,8 +2372,7 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *const anchor)
ic_index, body->is_size);
}
generated_iseq[code_index + 1 + j] = (VALUE)ic;
-
- if (type == TS_IVC) FL_SET(iseqv, ISEQ_MARKABLE_ISEQ);
+ FL_SET(iseqv, ISEQ_MARKABLE_ISEQ);
break;
}
case TS_CALLDATA:
@@ -9440,14 +9436,13 @@ iseq_build_from_ary_body(rb_iseq_t *iseq, LINK_ANCHOR *const anchor,
}
break;
case TS_ISE:
- FL_SET((VALUE)iseq, ISEQ_MARKABLE_ISEQ);
- /* fall through */
case TS_IC:
case TS_IVC: /* inline ivar cache */
argv[j] = op;
if (NUM2UINT(op) >= iseq->body->is_size) {
iseq->body->is_size = NUM2INT(op) + 1;
}
+ FL_SET((VALUE)iseq, ISEQ_MARKABLE_ISEQ);
break;
case TS_CALLDATA:
argv[j] = iseq_build_callinfo_from_hash(iseq, op);
@@ -10425,14 +10420,13 @@ ibf_load_code(const struct ibf_load *load, rb_iseq_t *iseq, ibf_offset_t bytecod
break;
}
case TS_ISE:
- FL_SET(iseqv, ISEQ_MARKABLE_ISEQ);
- /* fall through */
case TS_IC:
case TS_IVC:
{
VALUE op = ibf_load_small_value(load, &reading_pos);
code[code_index] = (VALUE)&is_entries[op];
}
+ FL_SET(iseqv, ISEQ_MARKABLE_ISEQ);
break;
case TS_CALLDATA:
{