summaryrefslogtreecommitdiff
path: root/vm_insnhelper.c
diff options
context:
space:
mode:
Diffstat (limited to 'vm_insnhelper.c')
-rw-r--r--vm_insnhelper.c25
1 files changed, 14 insertions, 11 deletions
diff --git a/vm_insnhelper.c b/vm_insnhelper.c
index 249bb49ea7..a1893b1ba2 100644
--- a/vm_insnhelper.c
+++ b/vm_insnhelper.c
@@ -2421,13 +2421,13 @@ opt_equality(const rb_iseq_t *cd_owner, VALUE recv, VALUE obj, CALL_DATA cd)
#undef EQ_UNREDEFINED_P
-static inline const struct rb_callcache *gccct_method_search(rb_execution_context_t *ec, VALUE recv, ID mid, int argc); // vm_eval.c
+static inline const struct rb_callcache *gccct_method_search(rb_execution_context_t *ec, VALUE recv, ID mid, const struct rb_callinfo *ci); // vm_eval.c
NOINLINE(static VALUE opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid));
static VALUE
opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid)
{
- const struct rb_callcache *cc = gccct_method_search(GET_EC(), recv, mid, 1);
+ const struct rb_callcache *cc = gccct_method_search(GET_EC(), recv, mid, &VM_CI_ON_STACK(mid, 0, 1, NULL));
if (cc && check_cfunc(vm_cc_cme(cc), rb_obj_equal)) {
return RBOOL(recv == obj);
@@ -2978,6 +2978,7 @@ warn_unused_block(const rb_callable_method_entry_t *cme, const rb_iseq_t *iseq,
{
rb_vm_t *vm = GET_VM();
st_table *dup_check_table = vm->unused_block_warning_table;
+ st_data_t key;
union {
VALUE v;
@@ -2989,14 +2990,17 @@ warn_unused_block(const rb_callable_method_entry_t *cme, const rb_iseq_t *iseq,
};
// relax check
- st_data_t key = (st_data_t)cme->def->original_id;
+ if (!vm->unused_block_warning_strict) {
+ key = (st_data_t)cme->def->original_id;
- if (st_lookup(dup_check_table, key, NULL)) {
- return;
+ if (st_lookup(dup_check_table, key, NULL)) {
+ return;
+ }
}
// strict check
// make unique key from pc and me->def pointer
+ key = 0;
for (int i=0; i<SIZEOF_VALUE; i++) {
// fprintf(stderr, "k1:%3d k2:%3d\n", k1.b[i], k2.b[SIZEOF_VALUE-1-i]);
key |= (st_data_t)(k1.b[i] ^ k2.b[SIZEOF_VALUE-1-i]) << (8 * i);
@@ -3032,7 +3036,6 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
{
const struct rb_callinfo *ci = calling->cd->ci;
const struct rb_callcache *cc = calling->cc;
- bool cacheable_ci = vm_ci_markable(ci);
if (UNLIKELY(!ISEQ_BODY(iseq)->param.flags.use_block &&
calling->block_handler != VM_BLOCK_HANDLER_NONE &&
@@ -3053,7 +3056,7 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
VM_ASSERT(ci == calling->cd->ci);
VM_ASSERT(cc == calling->cc);
- if (cacheable_ci && vm_call_iseq_optimizable_p(ci, cc)) {
+ if (vm_call_iseq_optimizable_p(ci, cc)) {
if ((iseq->body->builtin_attrs & BUILTIN_ATTR_SINGLE_NOARG_LEAF) &&
!(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) {
VM_ASSERT(iseq->body->builtin_attrs & BUILTIN_ATTR_LEAF);
@@ -3083,12 +3086,12 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
CC_SET_FASTPATH(cc, vm_call_iseq_setup_normal_opt_start,
!IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
- cacheable_ci && vm_call_cacheable(ci, cc));
+ vm_call_cacheable(ci, cc));
}
else {
CC_SET_FASTPATH(cc, vm_call_iseq_setup_tailcall_opt_start,
!IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
- cacheable_ci && vm_call_cacheable(ci, cc));
+ vm_call_cacheable(ci, cc));
}
/* initialize opt vars for self-references */
@@ -3116,7 +3119,7 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
args_setup_kw_parameters(ec, iseq, ci_kws, ci_kw_len, ci_keywords, klocals);
CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_kwarg,
- cacheable_ci && vm_call_cacheable(ci, cc));
+ vm_call_cacheable(ci, cc));
return 0;
}
@@ -3129,7 +3132,7 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
if (klocals[kw_param->num] == INT2FIX(0)) {
/* copy from default_values */
CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_nokwarg,
- cacheable_ci && vm_call_cacheable(ci, cc));
+ vm_call_cacheable(ci, cc));
}
return 0;