summaryrefslogtreecommitdiff
path: root/vm_insnhelper.c
diff options
context:
space:
mode:
author卜部昌平 <shyouhei@ruby-lang.org>2019-09-18 17:18:48 +0900
committer卜部昌平 <shyouhei@ruby-lang.org>2019-09-19 15:18:10 +0900
commitd74fa8e55ce64904f2f99dfef4cbdff94e290672 (patch)
tree7bfd074fe668f719defa81a3b2fb7b9e291b3aa9 /vm_insnhelper.c
parent9fb9f2d318520ddfdbe73809eea85847550b42ae (diff)
reuse cc->call
I noticed that in case of cache misshit, re-calculated cc->me can be the same method entry than the pevious one. That is an okay situation but can't we partially reuse the cache, because cc->call should still be valid then? One thing that has to be special-cased is when the method entry gets amended by some refinements. That happens behind-the-scene of call cache mechanism. We have to check if cc->me->def points to the previously saved one. Calculating ------------------------------------- trunk ours vm2_poly_same_method 1.534M 2.025M i/s - 6.000M times in 3.910203s 2.962752s Comparison: vm2_poly_same_method ours: 2025143.9 i/s trunk: 1534447.2 i/s - 1.32x slower
Notes
Notes: Merged: https://github.com/ruby/ruby/pull/2468
Diffstat (limited to 'vm_insnhelper.c')
-rw-r--r--vm_insnhelper.c37
1 files changed, 31 insertions, 6 deletions
diff --git a/vm_insnhelper.c b/vm_insnhelper.c
index 352d38fe45..66c50cdf59 100644
--- a/vm_insnhelper.c
+++ b/vm_insnhelper.c
@@ -1374,16 +1374,41 @@ vm_expandarray(VALUE *sp, VALUE ary, rb_num_t num, int flag)
static VALUE vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc);
+#ifdef __has_attribute
+#if __has_attribute(artificial)
+__attribute__((__artificial__))
+#endif
+#endif
+static inline vm_call_handler
+calccall(const struct rb_call_cache *cc, const rb_callable_method_entry_t *me)
+{
+ if (UNLIKELY(!me)) {
+ return vm_call_general; /* vm_call_method_nome() situation */
+ }
+ else if (LIKELY(cc->me != me)) {
+ return vm_call_general; /* normal cases */
+ }
+ else if (UNLIKELY(cc->def != me->def)) {
+ return vm_call_general; /* cc->me was refined elsewhere */
+ }
+ else {
+ return cc->call;
+ }
+}
+
MJIT_FUNC_EXPORTED void
rb_vm_search_method_slowpath(const struct rb_call_info *ci, struct rb_call_cache *cc, VALUE klass)
{
- cc->me = rb_callable_method_entry(klass, ci->mid);
+ const rb_callable_method_entry_t *me =
+ rb_callable_method_entry(klass, ci->mid);
+ *cc = (struct rb_call_cache) {
+ GET_GLOBAL_METHOD_STATE(),
+ RCLASS_SERIAL(klass),
+ me,
+ me ? me->def : NULL,
+ calccall(cc, me),
+ };
VM_ASSERT(callable_method_entry_p(cc->me));
- cc->call = vm_call_general;
-#if OPT_INLINE_METHOD_CACHE
- cc->method_state = GET_GLOBAL_METHOD_STATE();
- cc->class_serial = RCLASS_SERIAL(klass);
-#endif
}
static void