summaryrefslogtreecommitdiff
path: root/tool/ruby_vm/views/_mjit_compile_send.erb
diff options
context:
space:
mode:
Diffstat (limited to 'tool/ruby_vm/views/_mjit_compile_send.erb')
-rw-r--r--tool/ruby_vm/views/_mjit_compile_send.erb13
1 files changed, 8 insertions, 5 deletions
diff --git a/tool/ruby_vm/views/_mjit_compile_send.erb b/tool/ruby_vm/views/_mjit_compile_send.erb
index 4104a76..4ba97ac 100644
--- a/tool/ruby_vm/views/_mjit_compile_send.erb
+++ b/tool/ruby_vm/views/_mjit_compile_send.erb
@@ -13,6 +13,9 @@
% insn.opes.each_with_index do |ope, i|
MAYBE_UNUSED(<%= ope.fetch(:decl) %>) = (<%= ope.fetch(:type) %>)operands[<%= i %>];
% end
+% # compiler: Use copied cc to avoid race condition
+ CALL_CACHE cc_copy = status->cc_entries + (cc - body->cc_entries);
+ cc_copy = cc;
%
if (has_valid_method_type(cc)) {
const rb_iseq_t *iseq;
@@ -21,7 +24,7 @@
argc += ((ci->flag & VM_CALL_ARGS_BLOCKARG) ? 1 : 0);
% end
- if (cc->me->def->type == VM_METHOD_TYPE_ISEQ && inlinable_iseq_p(ci, cc, iseq = rb_iseq_check(cc->me->def->body.iseq.iseqptr))) { /* CC_SET_FASTPATH in vm_callee_setup_arg */
+ if (cc_copy->me->def->type == VM_METHOD_TYPE_ISEQ && inlinable_iseq_p(ci, cc, iseq = rb_iseq_check(cc_copy->me->def->body.iseq.iseqptr))) { /* CC_SET_FASTPATH in vm_callee_setup_arg */
int param_size = iseq->body->param.size; /* TODO: check calling->argc for argument_arity_error */
fprintf(f, "{\n");
@@ -31,8 +34,8 @@
}
% # JIT: Invalidate call cache if it requires vm_search_method. This allows to inline some of following things.
- fprintf(f, " if (UNLIKELY(GET_GLOBAL_METHOD_STATE() != %"PRI_SERIALT_PREFIX"u ||\n", cc->method_state);
- fprintf(f, " RCLASS_SERIAL(CLASS_OF(stack[%d])) != %"PRI_SERIALT_PREFIX"u)) {\n", b->stack_size - 1 - argc, cc->class_serial);
+ fprintf(f, " if (UNLIKELY(GET_GLOBAL_METHOD_STATE() != %"PRI_SERIALT_PREFIX"u ||\n", cc_copy->method_state);
+ fprintf(f, " RCLASS_SERIAL(CLASS_OF(stack[%d])) != %"PRI_SERIALT_PREFIX"u)) {\n", b->stack_size - 1 - argc, cc_copy->class_serial);
fprintf(f, " reg_cfp->pc = original_body_iseq + %d;\n", pos);
fprintf(f, " reg_cfp->sp = (VALUE *)reg_cfp->bp + %d;\n", b->stack_size + 1);
fprintf(f, " goto cancel;\n");
@@ -52,11 +55,11 @@
fprintf(f, " calling.argc = %d;\n", ci->orig_argc);
fprintf(f, " calling.recv = stack[%d];\n", b->stack_size - 1 - argc);
-% # JIT: Special CALL_METHOD. Bypass cc->call and inline vm_call_iseq_setup_normal for vm_call_iseq_setup_func FASTPATH.
+% # JIT: Special CALL_METHOD. Bypass cc_copy->call and inline vm_call_iseq_setup_normal for vm_call_iseq_setup_func FASTPATH.
fprintf(f, " {\n");
fprintf(f, " VALUE v;\n");
fprintf(f, " vm_call_iseq_setup_normal(ec, reg_cfp, &calling, (const rb_callable_method_entry_t *)0x%"PRIxVALUE", 0, %d, %d);\n",
- (VALUE)cc->me, param_size, iseq->body->local_table_size); /* rb_simple_iseq_p checks rb_simple_iseq_p, which ensures has_opt == FALSE */
+ (VALUE)cc_copy->me, param_size, iseq->body->local_table_size); /* rb_simple_iseq_p checks rb_simple_iseq_p, which ensures has_opt == FALSE */
if (iseq->body->catch_except_p) {
fprintf(f, " VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_FINISH);\n");
fprintf(f, " v = vm_exec(ec, TRUE);\n");