% # -*- C -*- % # Copyright (c) 2018 Takashi Kokubun. All rights reserved. % # % # This file is a part of the programming language Ruby. Permission is hereby % # granted, to either redistribute and/or modify this file, provided that the % # conditions mentioned in the file COPYING are met. Consult the file for % # details. % % # Optimized case of send / opt_send_without_block instructions. { % # compiler: Prepare operands which may be used by `insn.call_attribute` % insn.opes.each_with_index do |ope, i| MAYBE_UNUSED(<%= ope.fetch(:decl) %>) = (<%= ope.fetch(:type) %>)operands[<%= i %>]; % end % # compiler: Use captured cc to avoid race condition size_t cd_index = call_data_index(cd, body); const struct rb_callcache **cc_entries = captured_cc_entries(status); const struct rb_callcache *captured_cc = cc_entries[cd_index]; % % # compiler: Inline send insn where some supported fastpath is used. const rb_iseq_t *iseq = NULL; const CALL_INFO ci = cd->ci; int kw_splat = IS_ARGS_KW_SPLAT(ci) > 0; extern bool rb_splat_or_kwargs_p(const struct rb_callinfo *restrict ci); if (!status->compile_info->disable_send_cache && has_valid_method_type(captured_cc) && ( % # `CC_SET_FASTPATH(cd->cc, vm_call_cfunc_with_frame, ...)` in `vm_call_cfunc` (vm_cc_cme(captured_cc)->def->type == VM_METHOD_TYPE_CFUNC && !rb_splat_or_kwargs_p(ci) && !kw_splat) % # `CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(...), vm_call_iseq_optimizable_p(...))` in `vm_callee_setup_arg`, % # and support only non-VM_CALL_TAILCALL path inside it || (vm_cc_cme(captured_cc)->def->type == VM_METHOD_TYPE_ISEQ && fastpath_applied_iseq_p(ci, captured_cc, iseq = def_iseq_ptr(vm_cc_cme(captured_cc)->def)) && !(vm_ci_flag(ci) & VM_CALL_TAILCALL)) )) { const bool cfunc_debug = false; // Set true when you want to see inlined cfunc if (cfunc_debug && vm_cc_cme(captured_cc)->def->type == VM_METHOD_TYPE_CFUNC) fprintf(stderr, " * %s\n", rb_id2name(vm_ci_mid(ci))); int sp_inc = (int)sp_inc_of_sendish(ci); fprintf(f, "{\n"); % # JIT: Invalidate call cache if it requires vm_search_method. This allows to inline some of following things. bool opt_class_of = !maybe_special_const_class_p(captured_cc->klass); // If true, use RBASIC_CLASS instead of CLASS_OF to reduce code size fprintf(f, " const struct rb_callcache *cc = (const struct rb_callcache *)0x%"PRIxVALUE";\n", (VALUE)captured_cc); fprintf(f, " const rb_callable_method_entry_t *cc_cme = (const rb_callable_method_entry_t *)0x%"PRIxVALUE";\n", (VALUE)vm_cc_cme(captured_cc)); fprintf(f, " const VALUE recv = stack[%d];\n", b->stack_size + sp_inc - 1); fprintf(f, " if (UNLIKELY(%s || !vm_cc_valid_p(cc, cc_cme, %s(recv)))) {\n", opt_class_of ? "RB_SPECIAL_CONST_P(recv)" : "false", opt_class_of ? "RBASIC_CLASS" : "CLASS_OF"); fprintf(f, " reg_cfp->pc = original_body_iseq + %d;\n", pos); fprintf(f, " reg_cfp->sp = vm_base_ptr(reg_cfp) + %d;\n", b->stack_size); fprintf(f, " goto send_cancel;\n"); fprintf(f, " }\n"); % # JIT: move sp and pc if necessary <%= render 'mjit_compile_pc_and_sp', locals: { insn: insn } -%> % # JIT: If ISeq is inlinable, call the inlined method without pushing a frame. if (iseq && status->inlined_iseqs != NULL && iseq->body == status->inlined_iseqs[pos]) { fprintf(f, " {\n"); fprintf(f, " VALUE orig_self = reg_cfp->self;\n"); fprintf(f, " reg_cfp->self = stack[%d];\n", b->stack_size + sp_inc - 1); fprintf(f, " stack[%d] = _mjit%d_inlined_%d(ec, reg_cfp, orig_self, original_iseq);\n", b->stack_size + sp_inc - 1, status->compiled_id, pos); fprintf(f, " reg_cfp->self = orig_self;\n"); fprintf(f, " }\n"); } else { % # JIT: Forked `vm_sendish` (except method_explorer = vm_search_method_wrap) to inline various things fprintf(f, " {\n"); fprintf(f, " VALUE val;\n"); fprintf(f, " struct rb_calling_info calling;\n"); % if insn.name == 'send' fprintf(f, " calling.block_handler = vm_caller_setup_arg_block(ec, reg_cfp, (const struct rb_callinfo *)0x%"PRIxVALUE", (rb_iseq_t *)0x%"PRIxVALUE", FALSE);\n", (VALUE)ci, (VALUE)blockiseq); % else fprintf(f, " calling.block_handler = VM_BLOCK_HANDLER_NONE;\n"); % end fprintf(f, " calling.kw_splat = %d;\n", kw_splat); fprintf(f, " calling.recv = stack[%d];\n", b->stack_size + sp_inc - 1); fprintf(f, " calling.argc = %d;\n", vm_ci_argc(ci)); if (vm_cc_cme(captured_cc)->def->type == VM_METHOD_TYPE_CFUNC) { % # TODO: optimize this more fprintf(f, " calling.ci = (CALL_INFO)0x%"PRIxVALUE";\n", (VALUE)ci); // creating local cd here because operand's cd->cc may not be the same as inlined cc. fprintf(f, " calling.cc = cc;"); fprintf(f, " val = vm_call_cfunc_with_frame(ec, reg_cfp, &calling);\n"); } else { // VM_METHOD_TYPE_ISEQ % # fastpath_applied_iseq_p checks rb_simple_iseq_p, which ensures has_opt == FALSE fprintf(f, " vm_call_iseq_setup_normal(ec, reg_cfp, &calling, cc_cme, 0, %d, %d);\n", iseq->body->param.size, iseq->body->local_table_size); if (iseq->body->catch_except_p) { fprintf(f, " VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_FINISH);\n"); fprintf(f, " val = vm_exec(ec, true);\n"); } else { fprintf(f, " if ((val = mjit_exec(ec)) == Qundef) {\n"); fprintf(f, " VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_FINISH);\n"); // This is vm_call0_body's code after vm_call_iseq_setup fprintf(f, " val = vm_exec(ec, false);\n"); fprintf(f, " }\n"); } } fprintf(f, " stack[%d] = val;\n", b->stack_size + sp_inc - 1); fprintf(f, " }\n"); % # JIT: We should evaluate ISeq modified for TracePoint if it's enabled. Note: This is slow. fprintf(f, " if (UNLIKELY(!mjit_call_p)) {\n"); fprintf(f, " reg_cfp->sp = vm_base_ptr(reg_cfp) + %d;\n", b->stack_size + (int)<%= insn.call_attribute('sp_inc') %>); if (!pc_moved_p) { fprintf(f, " reg_cfp->pc = original_body_iseq + %d;\n", next_pos); } fprintf(f, " RB_DEBUG_COUNTER_INC(mjit_cancel_invalidate_all);\n"); fprintf(f, " goto cancel;\n"); fprintf(f, " }\n"); } % # compiler: Move JIT compiler's internal stack pointer b->stack_size += <%= insn.call_attribute('sp_inc') %>; fprintf(f, "}\n"); break; } }