diff options
author | Kevin Newton <kddnewton@gmail.com> | 2021-11-24 10:31:23 -0500 |
---|---|---|
committer | Aaron Patterson <aaron.patterson@gmail.com> | 2022-03-24 09:14:38 -0700 |
commit | 629908586b4bead1103267652f8b96b1083573a8 (patch) | |
tree | c2d53b1ae8b86571256f290851d95d6af4ba73db /vm_core.h | |
parent | 5f10bd634fb6ae8f74a4ea730176233b0ca96954 (diff) |
Finer-grained inline constant cache invalidation
Current behavior - caches depend on a global counter. All constant mutations cause caches to be invalidated.
```ruby
class A
B = 1
end
def foo
A::B # inline cache depends on global counter
end
foo # populate inline cache
foo # hit inline cache
C = 1 # global counter increments, all caches are invalidated
foo # misses inline cache due to `C = 1`
```
Proposed behavior - caches depend on name components. Only constant mutations with corresponding names will invalidate the cache.
```ruby
class A
B = 1
end
def foo
A::B # inline cache depends constants named "A" and "B"
end
foo # populate inline cache
foo # hit inline cache
C = 1 # caches that depend on the name "C" are invalidated
foo # hits inline cache because IC only depends on "A" and "B"
```
Examples of breaking the new cache:
```ruby
module C
# Breaks `foo` cache because "A" constant is set and the cache in foo depends
# on "A" and "B"
class A; end
end
B = 1
```
We expect the new cache scheme to be invalidated less often because names aren't frequently reused. With the cache being invalidated less, we can rely on its stability more to keep our constant references fast and reduce the need to throw away generated code in YJIT.
Notes
Notes:
Merged: https://github.com/ruby/ruby/pull/5433
Diffstat (limited to 'vm_core.h')
-rw-r--r-- | vm_core.h | 40 |
1 files changed, 8 insertions, 32 deletions
@@ -229,44 +229,14 @@ struct iseq_inline_constant_cache_entry { VALUE flags; VALUE value; // v0 - union ic_serial_entry ic_serial; // v1, v2 + VALUE _unused1; // v1 + VALUE _unused2; // v2 const rb_cref_t *ic_cref; // v3 }; STATIC_ASSERT(sizeof_iseq_inline_constant_cache_entry, (offsetof(struct iseq_inline_constant_cache_entry, ic_cref) + sizeof(const rb_cref_t *)) <= sizeof(struct RObject)); -#if SIZEOF_SERIAL_T <= SIZEOF_VALUE - -#define GET_IC_SERIAL(ice) (ice)->ic_serial.raw -#define SET_IC_SERIAL(ice, v) (ice)->ic_serial.raw = (v) - -#else - -static inline rb_serial_t -get_ic_serial(const struct iseq_inline_constant_cache_entry *ice) -{ - union ic_serial_entry tmp; - tmp.data[0] = ice->ic_serial.data[0]; - tmp.data[1] = ice->ic_serial.data[1]; - return tmp.raw; -} - -#define GET_IC_SERIAL(ice) get_ic_serial(ice) - -static inline void -set_ic_serial(struct iseq_inline_constant_cache_entry *ice, rb_serial_t v) -{ - union ic_serial_entry tmp; - tmp.raw = v; - ice->ic_serial.data[0] = tmp.data[0]; - ice->ic_serial.data[1] = tmp.data[1]; -} - -#define SET_IC_SERIAL(ice, v) set_ic_serial((ice), (v)) - -#endif - struct iseq_inline_constant_cache { struct iseq_inline_constant_cache_entry *entry; // For YJIT: the index to the opt_getinlinecache instruction in the same iseq. @@ -722,6 +692,12 @@ typedef struct rb_vm_struct { struct rb_id_table *negative_cme_table; st_table *overloaded_cme_table; // cme -> overloaded_cme + // This id table contains a mapping from ID to ICs. It does this with ID + // keys and nested st_tables as values. The nested tables have ICs as keys + // and Qtrue as values. It is used when inline constant caches need to be + // invalidated or ISEQs are being freed. + struct rb_id_table *constant_cache; + #ifndef VM_GLOBAL_CC_CACHE_TABLE_SIZE #define VM_GLOBAL_CC_CACHE_TABLE_SIZE 1023 #endif |