diff options
author | Koichi Sasada <ko1@atdot.net> | 2020-01-08 16:14:01 +0900 |
---|---|---|
committer | Koichi Sasada <ko1@atdot.net> | 2020-02-22 09:58:59 +0900 |
commit | b9007b6c548f91e88fd3f2ffa23de740431fa969 (patch) | |
tree | 1746393d1c5f704e8dc7e0a458198264062273bf /vm.c | |
parent | f2286925f08406bc857f7b03ad6779a5d61443ae (diff) |
Introduce disposable call-cache.
This patch contains several ideas:
(1) Disposable inline method cache (IMC) for race-free inline method cache
* Making call-cache (CC) as a RVALUE (GC target object) and allocate new
CC on cache miss.
* This technique allows race-free access from parallel processing
elements like RCU.
(2) Introduce per-Class method cache (pCMC)
* Instead of fixed-size global method cache (GMC), pCMC allows flexible
cache size.
* Caching CCs reduces CC allocation and allow sharing CC's fast-path
between same call-info (CI) call-sites.
(3) Invalidate an inline method cache by invalidating corresponding method
entries (MEs)
* Instead of using class serials, we set "invalidated" flag for method
entry itself to represent cache invalidation.
* Compare with using class serials, the impact of method modification
(add/overwrite/delete) is small.
* Updating class serials invalidate all method caches of the class and
sub-classes.
* Proposed approach only invalidate the method cache of only one ME.
See [Feature #16614] for more details.
Notes
Notes:
Merged: https://github.com/ruby/ruby/pull/2888
Diffstat (limited to 'vm.c')
-rw-r--r-- | vm.c | 26 |
1 files changed, 24 insertions, 2 deletions
@@ -386,6 +386,8 @@ rb_serial_t ruby_vm_global_method_state = 1; rb_serial_t ruby_vm_global_constant_state = 1; rb_serial_t ruby_vm_class_serial = 1; +const struct rb_callcache *vm_empty_cc; + static void thread_free(void *ptr); void @@ -2806,8 +2808,9 @@ static VALUE m_core_undef_method(VALUE self, VALUE cbase, VALUE sym) { REWIND_CFP({ - rb_undef(cbase, SYM2ID(sym)); - rb_clear_method_cache_by_class(self); + ID mid = SYM2ID(sym); + rb_undef(cbase, mid); + rb_clear_method_cache(self, mid); }); return Qnil; } @@ -2962,6 +2965,13 @@ f_lambda(VALUE _) return rb_block_lambda(); } +static VALUE +vm_mtbl(VALUE self, VALUE obj, VALUE sym) +{ + vm_mtbl_dump(CLASS_OF(obj), SYM2ID(sym)); + return Qnil; +} + void Init_VM(void) { @@ -3249,9 +3259,11 @@ Init_VM(void) #if VMDEBUG rb_define_singleton_method(rb_cRubyVM, "SDR", sdr, 0); rb_define_singleton_method(rb_cRubyVM, "NSDR", nsdr, 0); + rb_define_singleton_method(rb_cRubyVM, "mtbl", vm_mtbl, 2); #else (void)sdr; (void)nsdr; + (void)vm_mtbl; #endif /* VM bootstrap: phase 2 */ @@ -3348,6 +3360,10 @@ Init_vm_objects(void) vm->frozen_strings = st_init_table_with_size(&rb_fstring_hash_type, 10000); rb_objspace_gc_enable(vm->objspace); + + vm_empty_cc = vm_cc_new(0, NULL, vm_call_general); + FL_SET_RAW(vm_empty_cc, VM_CALLCACHE_UNMARKABLE); + rb_gc_register_mark_object((VALUE)vm_empty_cc); } /* top self */ @@ -3716,6 +3732,12 @@ vm_collect_usage_register(int reg, int isset) } #endif +MJIT_FUNC_EXPORTED const struct rb_callcache * +rb_vm_empty_cc(void) +{ + return vm_empty_cc; +} + #endif /* #ifndef MJIT_HEADER */ #include "vm_call_iseq_optimized.inc" /* required from vm_insnhelper.c */ |