diff options
author | Koichi Sasada <ko1@atdot.net> | 2021-11-18 11:01:31 +0900 |
---|---|---|
committer | Koichi Sasada <ko1@atdot.net> | 2021-11-19 08:32:39 +0900 |
commit | 82ea2870188d66aa75a99f03b4e7fdd1750aa196 (patch) | |
tree | 6ae732893312619a03e8dee23418a52df784d1f8 /vm_insnhelper.c | |
parent | be71c95b88019a1ca7a030a757ce343b743d8aff (diff) |
optimize `Struct` getter/setter
Introduce new optimized method type
`OPTIMIZED_METHOD_TYPE_STRUCT_AREF/ASET` with index information.
Notes
Notes:
Merged: https://github.com/ruby/ruby/pull/5131
Diffstat (limited to 'vm_insnhelper.c')
-rw-r--r-- | vm_insnhelper.c | 196 |
1 files changed, 135 insertions, 61 deletions
diff --git a/vm_insnhelper.c b/vm_insnhelper.c index 07e3cdbfaa..baf77acb14 100644 --- a/vm_insnhelper.c +++ b/vm_insnhelper.c @@ -22,6 +22,7 @@ #include "internal/proc.h" #include "internal/random.h" #include "internal/variable.h" +#include "internal/struct.h" #include "variable.h" /* finish iseq array */ @@ -3311,53 +3312,6 @@ vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct } } -static inline VALUE vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci, bool is_lambda, VALUE block_handler); - -NOINLINE(static VALUE - vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, - struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler)); - -static VALUE -vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, - struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler) -{ - int argc = calling->argc; - - /* remove self */ - if (argc > 0) MEMMOVE(&TOPN(argc), &TOPN(argc-1), VALUE, argc); - DEC_SP(1); - - return vm_invoke_block(ec, reg_cfp, calling, ci, false, block_handler); -} - -static VALUE -vm_call_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling) -{ - RB_DEBUG_COUNTER_INC(ccf_opt_call); - - const struct rb_callinfo *ci = calling->ci; - VALUE procval = calling->recv; - return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, VM_BH_FROM_PROC(procval)); -} - -static VALUE -vm_call_opt_block_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling) -{ - RB_DEBUG_COUNTER_INC(ccf_opt_block_call); - - VALUE block_handler = VM_ENV_BLOCK_HANDLER(VM_CF_LEP(reg_cfp)); - const struct rb_callinfo *ci = calling->ci; - - if (BASIC_OP_UNREDEFINED_P(BOP_CALL, PROC_REDEFINED_OP_FLAG)) { - return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, block_handler); - } - else { - calling->recv = rb_vm_bh_to_procval(ec, block_handler); - calling->cc = rb_vm_search_method_slowpath(ci, CLASS_OF(calling->recv)); - return vm_call_general(ec, reg_cfp, calling); - } -} - static VALUE vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_callinfo *orig_ci, enum method_missing_reason reason) @@ -3529,6 +3483,139 @@ vm_call_refined(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_c } } +static inline VALUE vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci, bool is_lambda, VALUE block_handler); + +NOINLINE(static VALUE + vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, + struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler)); + +static VALUE +vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, + struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler) +{ + int argc = calling->argc; + + /* remove self */ + if (argc > 0) MEMMOVE(&TOPN(argc), &TOPN(argc-1), VALUE, argc); + DEC_SP(1); + + return vm_invoke_block(ec, reg_cfp, calling, ci, false, block_handler); +} + +static VALUE +vm_call_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling) +{ + RB_DEBUG_COUNTER_INC(ccf_opt_call); + + const struct rb_callinfo *ci = calling->ci; + VALUE procval = calling->recv; + return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, VM_BH_FROM_PROC(procval)); +} + +static VALUE +vm_call_opt_block_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling) +{ + RB_DEBUG_COUNTER_INC(ccf_opt_block_call); + + VALUE block_handler = VM_ENV_BLOCK_HANDLER(VM_CF_LEP(reg_cfp)); + const struct rb_callinfo *ci = calling->ci; + + if (BASIC_OP_UNREDEFINED_P(BOP_CALL, PROC_REDEFINED_OP_FLAG)) { + return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, block_handler); + } + else { + calling->recv = rb_vm_bh_to_procval(ec, block_handler); + calling->cc = rb_vm_search_method_slowpath(ci, CLASS_OF(calling->recv)); + return vm_call_general(ec, reg_cfp, calling); + } +} + +static VALUE +vm_call_opt_struct_aref0(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling) +{ + VALUE recv = calling->recv; + + VM_ASSERT(RB_TYPE_P(recv, T_STRUCT)); + VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED); + VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_AREF); + + const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index; + return internal_RSTRUCT_GET(recv, off); +} + +static VALUE +vm_call_opt_struct_aref(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling) +{ + RB_DEBUG_COUNTER_INC(ccf_opt_struct_aref); + + VALUE ret = vm_call_opt_struct_aref0(ec, reg_cfp, calling); + reg_cfp->sp -= 1; + return ret; +} + +static VALUE +vm_call_opt_struct_aset0(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling) +{ + VALUE recv = calling->recv; + VALUE val = *(reg_cfp->sp - 1); + + VM_ASSERT(RB_TYPE_P(recv, T_STRUCT)); + VM_ASSERT(vm_cc_cme(calling->cc)->def->type == VM_METHOD_TYPE_OPTIMIZED); + VM_ASSERT(vm_cc_cme(calling->cc)->def->body.optimized.type == OPTIMIZED_METHOD_TYPE_STRUCT_ASET); + + rb_check_frozen(recv); + + const unsigned int off = vm_cc_cme(calling->cc)->def->body.optimized.index; + internal_RSTRUCT_SET(recv, off, val); + + return val; +} + +static VALUE +vm_call_opt_struct_aset(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling) +{ + RB_DEBUG_COUNTER_INC(ccf_opt_struct_aset); + + VALUE ret = vm_call_opt_struct_aset0(ec, reg_cfp, calling); + reg_cfp->sp -= 2; + return ret; +} + +NOINLINE(static VALUE vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, + const struct rb_callinfo *ci, const struct rb_callcache *cc)); + +static VALUE +vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, + const struct rb_callinfo *ci, const struct rb_callcache *cc) +{ + switch (vm_cc_cme(cc)->def->body.optimized.type) { + case OPTIMIZED_METHOD_TYPE_SEND: + CC_SET_FASTPATH(cc, vm_call_opt_send, TRUE); + return vm_call_opt_send(ec, cfp, calling); + case OPTIMIZED_METHOD_TYPE_CALL: + CC_SET_FASTPATH(cc, vm_call_opt_call, TRUE); + return vm_call_opt_call(ec, cfp, calling); + case OPTIMIZED_METHOD_TYPE_BLOCK_CALL: + CC_SET_FASTPATH(cc, vm_call_opt_block_call, TRUE); + return vm_call_opt_block_call(ec, cfp, calling); + case OPTIMIZED_METHOD_TYPE_STRUCT_AREF: + CALLER_SETUP_ARG(cfp, calling, ci); + CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci); + rb_check_arity(calling->argc, 0, 0); + CC_SET_FASTPATH(cc, vm_call_opt_struct_aref, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)); + return vm_call_opt_struct_aref(ec, cfp, calling); + + case OPTIMIZED_METHOD_TYPE_STRUCT_ASET: + CALLER_SETUP_ARG(cfp, calling, ci); + CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci); + rb_check_arity(calling->argc, 1, 1); + CC_SET_FASTPATH(cc, vm_call_opt_struct_aset, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)); + return vm_call_opt_struct_aset(ec, cfp, calling); + default: + rb_bug("vm_call_method: unsupported optimized method type (%d)", vm_cc_cme(cc)->def->body.optimized.type); + } +} + #define VM_CALL_METHOD_ATTR(var, func, nohook) \ if (UNLIKELY(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) { \ EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, calling->recv, vm_cc_cme(cc)->def->original_id, \ @@ -3597,20 +3684,7 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st return vm_call_alias(ec, cfp, calling); case VM_METHOD_TYPE_OPTIMIZED: - switch (vm_cc_cme(cc)->def->body.optimized.type) { - case OPTIMIZED_METHOD_TYPE_SEND: - CC_SET_FASTPATH(cc, vm_call_opt_send, TRUE); - return vm_call_opt_send(ec, cfp, calling); - case OPTIMIZED_METHOD_TYPE_CALL: - CC_SET_FASTPATH(cc, vm_call_opt_call, TRUE); - return vm_call_opt_call(ec, cfp, calling); - case OPTIMIZED_METHOD_TYPE_BLOCK_CALL: - CC_SET_FASTPATH(cc, vm_call_opt_block_call, TRUE); - return vm_call_opt_block_call(ec, cfp, calling); - default: - rb_bug("vm_call_method: unsupported optimized method type (%d)", - vm_cc_cme(cc)->def->body.optimized.type); - } + return vm_call_optimized(ec, cfp, calling, ci, cc); case VM_METHOD_TYPE_UNDEF: break; |