diff options
author | Kevin Newton <kddnewton@gmail.com> | 2021-11-24 10:31:23 -0500 |
---|---|---|
committer | Aaron Patterson <aaron.patterson@gmail.com> | 2022-03-24 09:14:38 -0700 |
commit | 629908586b4bead1103267652f8b96b1083573a8 (patch) | |
tree | c2d53b1ae8b86571256f290851d95d6af4ba73db /vm_insnhelper.c | |
parent | 5f10bd634fb6ae8f74a4ea730176233b0ca96954 (diff) |
Finer-grained inline constant cache invalidation
Current behavior - caches depend on a global counter. All constant mutations cause caches to be invalidated.
```ruby
class A
B = 1
end
def foo
A::B # inline cache depends on global counter
end
foo # populate inline cache
foo # hit inline cache
C = 1 # global counter increments, all caches are invalidated
foo # misses inline cache due to `C = 1`
```
Proposed behavior - caches depend on name components. Only constant mutations with corresponding names will invalidate the cache.
```ruby
class A
B = 1
end
def foo
A::B # inline cache depends constants named "A" and "B"
end
foo # populate inline cache
foo # hit inline cache
C = 1 # caches that depend on the name "C" are invalidated
foo # hits inline cache because IC only depends on "A" and "B"
```
Examples of breaking the new cache:
```ruby
module C
# Breaks `foo` cache because "A" constant is set and the cache in foo depends
# on "A" and "B"
class A; end
end
B = 1
```
We expect the new cache scheme to be invalidated less often because names aren't frequently reused. With the cache being invalidated less, we can rely on its stability more to keep our constant references fast and reduce the need to throw away generated code in YJIT.
Notes
Notes:
Merged: https://github.com/ruby/ruby/pull/5433
Diffstat (limited to 'vm_insnhelper.c')
-rw-r--r-- | vm_insnhelper.c | 51 |
1 files changed, 44 insertions, 7 deletions
diff --git a/vm_insnhelper.c b/vm_insnhelper.c index cbc53b5455..ac44266d27 100644 --- a/vm_insnhelper.c +++ b/vm_insnhelper.c @@ -4926,13 +4926,47 @@ vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr) #define IMEMO_CONST_CACHE_SHAREABLE IMEMO_FL_USER0 +// For each getconstant, associate the ID that corresponds to the first operand +// to that instruction with the inline cache. +static bool +vm_ic_compile_i(VALUE *code, VALUE insn, size_t index, void *ic) +{ + if (insn == BIN(opt_setinlinecache)) { + return false; + } + + if (insn == BIN(getconstant)) { + ID id = code[index + 1]; + rb_vm_t *vm = GET_VM(); + + st_table *ics; + if (!rb_id_table_lookup(vm->constant_cache, id, (VALUE *) &ics)) { + ics = st_init_numtable(); + rb_id_table_insert(vm->constant_cache, id, (VALUE) ics); + } + + st_insert(ics, (st_data_t) ic, (st_data_t) Qtrue); + } + + return true; +} + +// Loop through the instruction sequences starting at the opt_getinlinecache +// call and gather up every getconstant's ID. Associate that with the VM's +// constant cache so that whenever one of the constants changes the inline cache +// will get busted. +static void +vm_ic_compile(rb_control_frame_t *cfp, IC ic) +{ + const rb_iseq_t *iseq = cfp->iseq; + rb_iseq_each(iseq, cfp->pc - iseq->body->iseq_encoded, vm_ic_compile_i, (void *) ic); +} + // For MJIT inlining static inline bool -vm_inlined_ic_hit_p(VALUE flags, VALUE value, const rb_cref_t *ic_cref, rb_serial_t ic_serial, const VALUE *reg_ep) +vm_inlined_ic_hit_p(VALUE flags, VALUE value, const rb_cref_t *ic_cref, const VALUE *reg_ep) { - if (ic_serial == GET_GLOBAL_CONSTANT_STATE() && - ((flags & IMEMO_CONST_CACHE_SHAREABLE) || rb_ractor_main_p())) { - + if ((flags & IMEMO_CONST_CACHE_SHAREABLE) || rb_ractor_main_p()) { VM_ASSERT((flags & IMEMO_CONST_CACHE_SHAREABLE) ? rb_ractor_shareable_p(value) : true); return (ic_cref == NULL || // no need to check CREF @@ -4945,7 +4979,7 @@ static bool vm_ic_hit_p(const struct iseq_inline_constant_cache_entry *ice, const VALUE *reg_ep) { VM_ASSERT(IMEMO_TYPE_P(ice, imemo_constcache)); - return vm_inlined_ic_hit_p(ice->flags, ice->value, ice->ic_cref, GET_IC_SERIAL(ice), reg_ep); + return vm_inlined_ic_hit_p(ice->flags, ice->value, ice->ic_cref, reg_ep); } // YJIT needs this function to never allocate and never raise @@ -4958,13 +4992,16 @@ rb_vm_ic_hit_p(IC ic, const VALUE *reg_ep) static void vm_ic_update(const rb_iseq_t *iseq, IC ic, VALUE val, const VALUE *reg_ep) { + if (ruby_vm_const_missing_count > 0) { + ruby_vm_const_missing_count = 0; + ic->entry = NULL; + return; + } struct iseq_inline_constant_cache_entry *ice = (struct iseq_inline_constant_cache_entry *)rb_imemo_new(imemo_constcache, 0, 0, 0, 0); RB_OBJ_WRITE(ice, &ice->value, val); ice->ic_cref = vm_get_const_key_cref(reg_ep); - SET_IC_SERIAL(ice, GET_GLOBAL_CONSTANT_STATE() - ruby_vm_const_missing_count); if (rb_ractor_shareable_p(val)) ice->flags |= IMEMO_CONST_CACHE_SHAREABLE; - ruby_vm_const_missing_count = 0; RB_OBJ_WRITE(iseq, &ic->entry, ice); #ifndef MJIT_HEADER // MJIT and YJIT can't be on at the same time, so there is no need to |