summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKoichi Sasada <ko1@atdot.net>2021-01-04 18:08:25 +0900
committerNARUSE, Yui <naruse@airemix.jp>2021-01-13 17:06:16 +0900
commitb93e16dc0f45069d4a5fcce20d5c4437e151f0a8 (patch)
tree58e71dd4acd25b3deca641a6d5d2e25432dbc646
parent95aff214687a5e12c3eb57d056665741e734c188 (diff)
enable constant cache on ractors
constant cache `IC` is accessed by non-atomic manner and there are thread-safety issues, so Ruby 3.0 disables to use const cache on non-main ractors. This patch enables it by introducing `imemo_constcache` and allocates it by every re-fill of const cache like `imemo_callcache`. [Bug #17510] Now `IC` only has one entry `IC::entry` and it points to `iseq_inline_constant_cache_entry`, managed by T_IMEMO object. `IC` is atomic data structure so `rb_mjit_before_vm_ic_update()` and `rb_mjit_after_vm_ic_update()` is not needed.
-rw-r--r--bootstraptest/test_ractor.rb12
-rw-r--r--compile.c14
-rw-r--r--debug_counter.h1
-rw-r--r--ext/objspace/objspace.c1
-rw-r--r--gc.c19
-rw-r--r--insns.def9
-rw-r--r--internal/imemo.h1
-rw-r--r--iseq.c11
-rw-r--r--mjit.c18
-rw-r--r--mjit.h4
-rw-r--r--test/ruby/test_gc.rb22
-rw-r--r--tool/ruby_vm/views/_mjit_compile_getinlinecache.erb13
-rw-r--r--vm_core.h20
-rw-r--r--vm_insnhelper.c32
14 files changed, 106 insertions, 71 deletions
diff --git a/bootstraptest/test_ractor.rb b/bootstraptest/test_ractor.rb
index 843714a7bc..56add68403 100644
--- a/bootstraptest/test_ractor.rb
+++ b/bootstraptest/test_ractor.rb
@@ -967,6 +967,18 @@ assert_equal 'can not access non-shareable objects in constant C::CONST by non-m
end
}
+# Constant cache should care about non-sharable constants
+assert_equal "can not access non-shareable objects in constant Object::STR by non-main Ractor.", %q{
+ STR = "hello"
+ def str; STR; end
+ s = str() # fill const cache
+ begin
+ Ractor.new{ str() }.take
+ rescue Ractor::RemoteError => e
+ e.cause.message
+ end
+}
+
# Setting non-shareable objects into constants by other Ractors is not allowed
assert_equal 'can not set constants with non-shareable objects by non-main Ractors', %q{
class C
diff --git a/compile.c b/compile.c
index 264c310012..c29d42e433 100644
--- a/compile.c
+++ b/compile.c
@@ -2359,11 +2359,8 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *const anchor)
}
break;
}
- case TS_ISE: /* inline storage entry */
- /* Treated as an IC, but may contain a markable VALUE */
- FL_SET(iseqv, ISEQ_MARKABLE_ISEQ);
- /* fall through */
case TS_IC: /* inline cache */
+ case TS_ISE: /* inline storage entry */
case TS_IVC: /* inline ivar cache */
{
unsigned int ic_index = FIX2UINT(operands[j]);
@@ -2375,8 +2372,7 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *const anchor)
ic_index, body->is_size);
}
generated_iseq[code_index + 1 + j] = (VALUE)ic;
-
- if (type == TS_IVC) FL_SET(iseqv, ISEQ_MARKABLE_ISEQ);
+ FL_SET(iseqv, ISEQ_MARKABLE_ISEQ);
break;
}
case TS_CALLDATA:
@@ -9440,14 +9436,13 @@ iseq_build_from_ary_body(rb_iseq_t *iseq, LINK_ANCHOR *const anchor,
}
break;
case TS_ISE:
- FL_SET((VALUE)iseq, ISEQ_MARKABLE_ISEQ);
- /* fall through */
case TS_IC:
case TS_IVC: /* inline ivar cache */
argv[j] = op;
if (NUM2UINT(op) >= iseq->body->is_size) {
iseq->body->is_size = NUM2INT(op) + 1;
}
+ FL_SET((VALUE)iseq, ISEQ_MARKABLE_ISEQ);
break;
case TS_CALLDATA:
argv[j] = iseq_build_callinfo_from_hash(iseq, op);
@@ -10425,14 +10420,13 @@ ibf_load_code(const struct ibf_load *load, rb_iseq_t *iseq, ibf_offset_t bytecod
break;
}
case TS_ISE:
- FL_SET(iseqv, ISEQ_MARKABLE_ISEQ);
- /* fall through */
case TS_IC:
case TS_IVC:
{
VALUE op = ibf_load_small_value(load, &reading_pos);
code[code_index] = (VALUE)&is_entries[op];
}
+ FL_SET(iseqv, ISEQ_MARKABLE_ISEQ);
break;
case TS_CALLDATA:
{
diff --git a/debug_counter.h b/debug_counter.h
index 8acca2741d..6dc66ef988 100644
--- a/debug_counter.h
+++ b/debug_counter.h
@@ -309,6 +309,7 @@ RB_DEBUG_COUNTER(obj_imemo_memo)
RB_DEBUG_COUNTER(obj_imemo_parser_strterm)
RB_DEBUG_COUNTER(obj_imemo_callinfo)
RB_DEBUG_COUNTER(obj_imemo_callcache)
+RB_DEBUG_COUNTER(obj_imemo_constcache)
/* ar_table */
RB_DEBUG_COUNTER(artable_hint_hit)
diff --git a/ext/objspace/objspace.c b/ext/objspace/objspace.c
index 0930e10e92..7afdfc1f6b 100644
--- a/ext/objspace/objspace.c
+++ b/ext/objspace/objspace.c
@@ -650,6 +650,7 @@ count_imemo_objects(int argc, VALUE *argv, VALUE self)
imemo_type_ids[10] = rb_intern("imemo_parser_strterm");
imemo_type_ids[11] = rb_intern("imemo_callinfo");
imemo_type_ids[12] = rb_intern("imemo_callcache");
+ imemo_type_ids[13] = rb_intern("imemo_constcache");
}
each_object_with_flags(count_imemo_objects_i, (void *)hash);
diff --git a/gc.c b/gc.c
index f9ebf6a937..e363dc158a 100644
--- a/gc.c
+++ b/gc.c
@@ -2397,6 +2397,7 @@ rb_imemo_name(enum imemo_type type)
IMEMO_NAME(parser_strterm);
IMEMO_NAME(callinfo);
IMEMO_NAME(callcache);
+ IMEMO_NAME(constcache);
#undef IMEMO_NAME
}
return "unknown";
@@ -3102,9 +3103,9 @@ obj_free(rb_objspace_t *objspace, VALUE obj)
case imemo_callcache:
RB_DEBUG_COUNTER_INC(obj_imemo_callcache);
break;
- default:
- /* unreachable */
- break;
+ case imemo_constcache:
+ RB_DEBUG_COUNTER_INC(obj_imemo_constcache);
+ break;
}
return 0;
@@ -6239,6 +6240,12 @@ gc_mark_imemo(rb_objspace_t *objspace, VALUE obj)
gc_mark(objspace, (VALUE)vm_cc_cme(cc));
}
return;
+ case imemo_constcache:
+ {
+ const struct iseq_inline_constant_cache_entry *ice = (struct iseq_inline_constant_cache_entry *)obj;
+ gc_mark(objspace, ice->value);
+ }
+ return;
#if VM_CHECK_MODE > 0
default:
VM_UNREACHABLE(gc_mark_imemo);
@@ -8963,6 +8970,12 @@ gc_ref_update_imemo(rb_objspace_t *objspace, VALUE obj)
}
}
break;
+ case imemo_constcache:
+ {
+ const struct iseq_inline_constant_cache_entry *ice = (struct iseq_inline_constant_cache_entry *)obj;
+ UPDATE_IF_MOVED(objspace, ice->value);
+ }
+ break;
case imemo_parser_strterm:
case imemo_tmpbuf:
case imemo_callinfo:
diff --git a/insns.def b/insns.def
index 5bfcf997ce..51e130786b 100644
--- a/insns.def
+++ b/insns.def
@@ -1023,9 +1023,10 @@ opt_getinlinecache
()
(VALUE val)
{
- if (vm_ic_hit_p(ic->ic_serial, ic->ic_cref, GET_EP())) {
- val = ic->value;
- JUMP(dst);
+ struct iseq_inline_constant_cache_entry *ice = ic->entry;
+ if (ice && vm_ic_hit_p(ice, GET_EP())) {
+ val = ice->value;
+ JUMP(dst);
}
else {
val = Qnil;
@@ -1039,7 +1040,7 @@ opt_setinlinecache
(VALUE val)
(VALUE val)
{
- vm_ic_update(ic, val, GET_EP());
+ vm_ic_update(GET_ISEQ(), ic, val, GET_EP());
}
/* run iseq only once */
diff --git a/internal/imemo.h b/internal/imemo.h
index d10f89cb86..a9e2136ac4 100644
--- a/internal/imemo.h
+++ b/internal/imemo.h
@@ -45,6 +45,7 @@ enum imemo_type {
imemo_parser_strterm = 10,
imemo_callinfo = 11,
imemo_callcache = 12,
+ imemo_constcache = 13,
};
/* CREF (Class REFerence) is defined in method.h */
diff --git a/iseq.c b/iseq.c
index 4a67ee19d8..096d456f11 100644
--- a/iseq.c
+++ b/iseq.c
@@ -182,6 +182,17 @@ iseq_extract_values(VALUE *code, size_t pos, iseq_value_itr_t * func, void *data
}
}
break;
+ case TS_IC:
+ {
+ IC ic = (IC)code[pos + op_no + 1];
+ if (ic->entry) {
+ VALUE nv = func(data, (VALUE)ic->entry);
+ if ((VALUE)ic->entry != nv) {
+ ic->entry = (void *)nv;
+ }
+ }
+ }
+ break;
case TS_IVC:
{
IVC ivc = (IVC)code[pos + op_no + 1];
diff --git a/mjit.c b/mjit.c
index d87961d7cc..4a14fa7ae1 100644
--- a/mjit.c
+++ b/mjit.c
@@ -82,24 +82,6 @@ mjit_gc_exit_hook(void)
CRITICAL_SECTION_FINISH(4, "mjit_gc_exit_hook");
}
-// Lock setinlinecache
-void
-rb_mjit_before_vm_ic_update(void)
-{
- if (!mjit_enabled)
- return;
- CRITICAL_SECTION_START(3, "before vm_ic_update");
-}
-
-// Unlock setinlinecache
-void
-rb_mjit_after_vm_ic_update(void)
-{
- if (!mjit_enabled)
- return;
- CRITICAL_SECTION_FINISH(3, "after vm_ic_update");
-}
-
// Deal with ISeq movement from compactor
void
mjit_update_references(const rb_iseq_t *iseq)
diff --git a/mjit.h b/mjit.h
index dd23cf7caa..34c47d79a5 100644
--- a/mjit.h
+++ b/mjit.h
@@ -83,8 +83,6 @@ RUBY_EXTERN bool mjit_call_p;
extern void rb_mjit_add_iseq_to_process(const rb_iseq_t *iseq);
extern VALUE rb_mjit_wait_call(rb_execution_context_t *ec, struct rb_iseq_constant_body *body);
extern struct rb_mjit_compile_info* rb_mjit_iseq_compile_info(const struct rb_iseq_constant_body *body);
-extern void rb_mjit_before_vm_ic_update(void);
-extern void rb_mjit_after_vm_ic_update(void);
extern void rb_mjit_recompile_send(const rb_iseq_t *iseq);
extern void rb_mjit_recompile_ivar(const rb_iseq_t *iseq);
extern void rb_mjit_recompile_exivar(const rb_iseq_t *iseq);
@@ -198,8 +196,6 @@ void mjit_finish(bool close_handle_p);
# else // USE_MJIT
-static inline void rb_mjit_before_vm_ic_update(void){}
-static inline void rb_mjit_after_vm_ic_update(void){}
static inline struct mjit_cont *mjit_cont_new(rb_execution_context_t *ec){return NULL;}
static inline void mjit_cont_free(struct mjit_cont *cont){}
static inline void mjit_gc_start_hook(void){}
diff --git a/test/ruby/test_gc.rb b/test/ruby/test_gc.rb
index 1823538f73..daa8b5c37a 100644
--- a/test/ruby/test_gc.rb
+++ b/test/ruby/test_gc.rb
@@ -92,19 +92,23 @@ class TestGc < Test::Unit::TestCase
assert_kind_of(Integer, res[:count])
stat, count = {}, {}
- GC.start
- GC.stat(stat)
- ObjectSpace.count_objects(count)
- # repeat same methods invocation for cache object creation.
- GC.stat(stat)
- ObjectSpace.count_objects(count)
+ 2.times{ # to ignore const cache imemo creation
+ GC.start
+ GC.stat(stat)
+ ObjectSpace.count_objects(count)
+ # repeat same methods invocation for cache object creation.
+ GC.stat(stat)
+ ObjectSpace.count_objects(count)
+ }
assert_equal(count[:TOTAL]-count[:FREE], stat[:heap_live_slots])
assert_equal(count[:FREE], stat[:heap_free_slots])
# measure again without GC.start
- 1000.times{ "a" + "b" }
- GC.stat(stat)
- ObjectSpace.count_objects(count)
+ 2.times{ # to ignore const cache imemo creation
+ 1000.times{ "a" + "b" }
+ GC.stat(stat)
+ ObjectSpace.count_objects(count)
+ }
assert_equal(count[:FREE], stat[:heap_free_slots])
end
diff --git a/tool/ruby_vm/views/_mjit_compile_getinlinecache.erb b/tool/ruby_vm/views/_mjit_compile_getinlinecache.erb
index 44b7f3286a..1b636bceb6 100644
--- a/tool/ruby_vm/views/_mjit_compile_getinlinecache.erb
+++ b/tool/ruby_vm/views/_mjit_compile_getinlinecache.erb
@@ -12,11 +12,13 @@
% end
% # compiler: Capture IC values, locking getinlinecache
- rb_mjit_before_vm_ic_update();
- rb_serial_t ic_serial = ic->ic_serial;
- const rb_cref_t *ic_cref = ic->ic_cref;
- VALUE ic_value = ic->value;
- rb_mjit_after_vm_ic_update();
+ struct iseq_inline_constant_cache_entry *ice = ic->entry;
+ if (ice == NULL) {
+ goto getinlinecache_cancel;
+ }
+ rb_serial_t ic_serial = ice->ic_serial;
+ const rb_cref_t *ic_cref = ice->ic_cref;
+ VALUE ic_value = ice->value;
if (ic_serial && !status->compile_info->disable_const_cache) {
% # JIT: Inline everything in IC, and cancel the slow path
@@ -34,3 +36,4 @@
b->stack_size += <%= insn.call_attribute('sp_inc') %>;
break;
}
+ getinlinecache_cancel:;
diff --git a/vm_core.h b/vm_core.h
index f6b81b57c6..cfce3af59d 100644
--- a/vm_core.h
+++ b/vm_core.h
@@ -218,10 +218,18 @@ struct rb_control_frame_struct;
/* iseq data type */
typedef struct rb_compile_option_struct rb_compile_option_t;
-struct iseq_inline_cache_entry {
- rb_serial_t ic_serial;
- const rb_cref_t *ic_cref;
- VALUE value;
+// imemo_constcache
+struct iseq_inline_constant_cache_entry {
+ VALUE flags;
+
+ VALUE value; // v0
+ const rb_cref_t *ic_cref; // v1
+ rb_serial_t ic_serial; // v2
+ // v3
+};
+
+struct iseq_inline_constant_cache {
+ struct iseq_inline_constant_cache_entry *entry;
};
struct iseq_inline_iv_cache_entry {
@@ -233,7 +241,7 @@ union iseq_inline_storage_entry {
struct rb_thread_struct *running_thread;
VALUE value;
} once;
- struct iseq_inline_cache_entry cache;
+ struct iseq_inline_constant_cache ic_cache;
struct iseq_inline_iv_cache_entry iv_cache;
};
@@ -1126,7 +1134,7 @@ enum vm_svar_index {
};
/* inline cache */
-typedef struct iseq_inline_cache_entry *IC;
+typedef struct iseq_inline_constant_cache *IC;
typedef struct iseq_inline_iv_cache_entry *IVC;
typedef union iseq_inline_storage_entry *ISE;
typedef const struct rb_callinfo *CALL_INFO;
diff --git a/vm_insnhelper.c b/vm_insnhelper.c
index 07058cd65c..73ba44a12e 100644
--- a/vm_insnhelper.c
+++ b/vm_insnhelper.c
@@ -4610,26 +4610,34 @@ vm_opt_newarray_min(rb_num_t num, const VALUE *ptr)
#undef id_cmp
+#define IMEMO_CONST_CACHE_SHAREABLE IMEMO_FL_USER0
+
static int
-vm_ic_hit_p(const rb_serial_t ic_serial, const rb_cref_t *ic_cref, const VALUE *reg_ep)
+vm_ic_hit_p(const struct iseq_inline_constant_cache_entry *ice, const VALUE *reg_ep)
{
- if (ic_serial == GET_GLOBAL_CONSTANT_STATE() && rb_ractor_main_p()) {
- return (ic_cref == NULL || // no need to check CREF
- ic_cref == vm_get_cref(reg_ep));
+ VM_ASSERT(IMEMO_TYPE_P(ice, imemo_constcache));
+ if (ice->ic_serial == GET_GLOBAL_CONSTANT_STATE() &&
+ (FL_TEST_RAW((VALUE)ice, IMEMO_CONST_CACHE_SHAREABLE) || rb_ractor_main_p())) {
+
+ VM_ASSERT(FL_TEST_RAW((VALUE)ice, IMEMO_CONST_CACHE_SHAREABLE) ? rb_ractor_shareable_p(ice->value) : true);
+
+ return (ice->ic_cref == NULL || // no need to check CREF
+ ice->ic_cref == vm_get_cref(reg_ep));
}
return FALSE;
}
static void
-vm_ic_update(IC ic, VALUE val, const VALUE *reg_ep)
-{
- VM_ASSERT(ic->value != Qundef);
- rb_mjit_before_vm_ic_update();
- ic->value = val;
- ic->ic_serial = GET_GLOBAL_CONSTANT_STATE() - ruby_vm_const_missing_count;
- ic->ic_cref = vm_get_const_key_cref(reg_ep);
- rb_mjit_after_vm_ic_update();
+vm_ic_update(const rb_iseq_t *iseq, IC ic, VALUE val, const VALUE *reg_ep)
+{
+
+ struct iseq_inline_constant_cache_entry *ice = (struct iseq_inline_constant_cache_entry *)rb_imemo_new(imemo_constcache, 0, 0, 0, 0);
+ RB_OBJ_WRITE(ice, &ice->value, val);
+ ice->ic_cref = vm_get_const_key_cref(reg_ep);
+ ice->ic_serial = GET_GLOBAL_CONSTANT_STATE() - ruby_vm_const_missing_count;
+ if (rb_ractor_shareable_p(val)) ice->flags |= IMEMO_CONST_CACHE_SHAREABLE;
ruby_vm_const_missing_count = 0;
+ RB_OBJ_WRITE(iseq, &ic->entry, ice);
}
static VALUE