summaryrefslogtreecommitdiff
path: root/tool/ruby_vm/views/_mjit_compile_send.erb
blob: e2d24bd3f776cb7a87cc2f3ae7364dba1992ed4b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
% # -*- C -*-
% # Copyright (c) 2018 Takashi Kokubun.  All rights reserved.
% #
% # This file is a part of  the programming language Ruby.  Permission is hereby
% # granted, to either  redistribute and/or modify this file,  provided that the
% # conditions mentioned  in the  file COPYING  are met.   Consult the  file for
% # details.
%
% # Optimized case of send / opt_send_without_block instructions.
{
    MAYBE_UNUSED(int pc_moved_p) = FALSE;
% # compiler: Prepare operands which may be used by `insn.call_attribute`
% insn.opes.each_with_index do |ope, i|
    MAYBE_UNUSED(<%= ope.fetch(:decl) %>) = (<%= ope.fetch(:type) %>)operands[<%= i %>];
% end
% # compiler: Use copied cc to avoid race condition
    CALL_CACHE cc_copy = status->cc_entries + (cc - body->cc_entries);
%
    if (!status->compile_info->disable_send_cache && has_valid_method_type(cc_copy)) {
        const rb_iseq_t *iseq;
        unsigned int argc = ci->orig_argc; // this `argc` variable is for calculating a value's position on stack considering `blockarg`.
% if insn.name == 'send'
        argc += ((ci->flag & VM_CALL_ARGS_BLOCKARG) ? 1 : 0); // simulate `vm_caller_setup_arg_block`'s `--reg_cfp->sp`
% end

        if (!(ci->flag & VM_CALL_TAILCALL) // inlining non-tailcall path
                && cc_copy->me->def->type == VM_METHOD_TYPE_ISEQ && inlinable_iseq_p(ci, cc_copy, iseq = def_iseq_ptr(cc_copy->me->def))) { // CC_SET_FASTPATH in vm_callee_setup_arg
            int param_size = iseq->body->param.size;

            fprintf(f, "{\n");
% # JIT: Declare stack_size to be used in some macro of _mjit_compile_insn_body.erb
            if (status->local_stack_p) {
                fprintf(f, "    MAYBE_UNUSED(unsigned int) stack_size = %u;\n", b->stack_size);
            }

% # JIT: Invalidate call cache if it requires vm_search_method. This allows to inline some of following things.
            fprintf(f, "    if (UNLIKELY(GET_GLOBAL_METHOD_STATE() != %"PRI_SERIALT_PREFIX"u ||\n", cc_copy->method_state);
            fprintf(f, "        RCLASS_SERIAL(CLASS_OF(stack[%d])) != %"PRI_SERIALT_PREFIX"u)) {\n", b->stack_size - 1 - argc, cc_copy->class_serial);
            fprintf(f, "        reg_cfp->pc = original_body_iseq + %d;\n", pos);
            fprintf(f, "        reg_cfp->sp = vm_base_ptr(reg_cfp) + %d;\n", b->stack_size);
            fprintf(f, "        goto send_cancel;\n");
            fprintf(f, "    }\n");

% # JIT: move sp and pc if necessary
<%= render 'mjit_compile_pc_and_sp', locals: { insn: insn } -%>

% # JIT: Print insn body in insns.def
            fprintf(f, "    {\n");
            fprintf(f, "        struct rb_calling_info calling;\n");
% if insn.name == 'send'
            fprintf(f, "        calling.block_handler = vm_caller_setup_arg_block(ec, reg_cfp, (CALL_INFO)0x%"PRIxVALUE", (rb_iseq_t *)0x%"PRIxVALUE", FALSE);\n", operands[0], operands[2]);
% else
            fprintf(f, "        calling.block_handler = VM_BLOCK_HANDLER_NONE;\n");
% end
            fprintf(f, "        calling.argc = %d;\n", ci->orig_argc);
            fprintf(f, "        calling.recv = stack[%d];\n", b->stack_size - 1 - argc);

% # JIT: Special CALL_METHOD. Bypass cc_copy->call and inline vm_call_iseq_setup_normal for vm_call_iseq_setup_func FASTPATH.
            fprintf(f, "        {\n");
            fprintf(f, "            VALUE v;\n");
            fprintf(f, "            vm_call_iseq_setup_normal(ec, reg_cfp, &calling, (const rb_callable_method_entry_t *)0x%"PRIxVALUE", 0, %d, %d);\n",
                    (VALUE)cc_copy->me, param_size, iseq->body->local_table_size); /* inlinable_iseq_p checks rb_simple_iseq_p, which ensures has_opt == FALSE */
            if (iseq->body->catch_except_p) {
                fprintf(f, "            VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_FINISH);\n");
                fprintf(f, "            v = vm_exec(ec, TRUE);\n");
            }
            else {
                fprintf(f, "            if ((v = mjit_exec(ec)) == Qundef) {\n");
                fprintf(f, "                VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_FINISH);\n"); /* This is vm_call0_body's code after vm_call_iseq_setup */
                fprintf(f, "                v = vm_exec(ec, FALSE);\n");
                fprintf(f, "            }\n");
            }
            fprintf(f, "            stack[%d] = v;\n", b->stack_size - argc - 1);
            fprintf(f, "        }\n");

            fprintf(f, "    }\n");

% # JIT: We should evaluate ISeq modified for TracePoint if it's enabled. Note: This is slow.
            fprintf(f, "    if (UNLIKELY(ruby_vm_event_enabled_global_flags & ISEQ_TRACE_EVENTS)) {\n");
            fprintf(f, "        reg_cfp->sp = vm_base_ptr(reg_cfp) + %d;\n", b->stack_size + (int)<%= insn.call_attribute('sp_inc') %>);
            if (!pc_moved_p) {
                fprintf(f, "        reg_cfp->pc = original_body_iseq + %d;\n", next_pos);
            }
            fprintf(f, "        RB_DEBUG_COUNTER_INC(mjit_cancel_trace);\n");
            fprintf(f, "        goto cancel;\n");
            fprintf(f, "    }\n");

% # compiler: Move JIT compiler's internal stack pointer
            b->stack_size += <%= insn.call_attribute('sp_inc') %>;

            fprintf(f, "}\n");
            break;
        }
    }
}