summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog39
-rw-r--r--compile.c135
-rw-r--r--insns.def102
-rw-r--r--iseq.c48
-rw-r--r--iseq.h4
-rwxr-xr-xtool/instruction.rb5
-rw-r--r--vm.c2
-rw-r--r--vm_args.c96
-rw-r--r--vm_core.h71
-rw-r--r--vm_eval.c117
-rw-r--r--vm_insnhelper.c568
-rw-r--r--vm_insnhelper.h16
12 files changed, 681 insertions, 522 deletions
diff --git a/ChangeLog b/ChangeLog
index 9e7d91d..8e73336 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,42 @@
+Sun Sep 20 02:46:34 2015 Koichi Sasada <ko1@atdot.net>
+
+ * vm_core.h: split rb_call_info_t into several structs.
+ * rb_call_info (ci) has compiled fixed information.
+ * if ci->flag & VM_CALL_KWARG, then rb_call_info is
+ also rb_call_info_with_kwarg. This technique reduce one word
+ for major rb_call_info data.
+ * rb_calling_info has temporary data (argc, blockptr, recv).
+ for each method dispatch. This data is allocated only on
+ machine stack.
+ * rb_call_cache is for inline method cache.
+
+ Before this patch, only rb_call_info_t data is passed.
+ After this patch, above three structs are passed.
+
+ This patch improves:
+ * data locarity (rb_call_info is now read-only data).
+ * reduce memory consumption (rb_call_info_with_kwarg,
+ rb_calling_info).
+
+ * compile.c: use above data.
+
+ * insns.def: ditto.
+
+ * iseq.c: ditto.
+
+ * vm_args.c: ditto.
+
+ * vm_eval.c: ditto.
+
+ * vm_insnhelper.c: ditto.
+
+ * vm_insnhelper.h: ditto.
+
+ * iseq.h: add iseq_compile_data::ci_index and
+ iseq_compile_data::ci_kw_indx.
+
+ * tool/instruction.rb: introduce TS_CALLCACHE operand type.
+
Sun Sep 20 02:18:10 2015 Tanaka Akira <akr@fsij.org>
* test/lib/envutil.rb: mkfifo command based File.mkfifo method
diff --git a/compile.c b/compile.c
index 98c3b09..9f9fcb6 100644
--- a/compile.c
+++ b/compile.c
@@ -944,45 +944,42 @@ new_insn_body(rb_iseq_t *iseq, int line_no, enum ruby_vminsn_type insn_id, int a
return new_insn_core(iseq, line_no, insn_id, argc, operands);
}
-static rb_call_info_t *
-new_callinfo(rb_iseq_t *iseq, ID mid, int argc, unsigned int flag, rb_call_info_kw_arg_t *kw_arg, int has_blockiseq)
+static struct rb_call_info *
+new_callinfo(rb_iseq_t *iseq, ID mid, int argc, unsigned int flag, struct rb_call_info_kw_arg *kw_arg, int has_blockiseq)
{
- rb_call_info_t *ci = (rb_call_info_t *)compile_data_alloc(iseq, sizeof(rb_call_info_t));
+ size_t size = kw_arg != NULL ? sizeof(struct rb_call_info_with_kwarg) : sizeof(struct rb_call_info);
+ struct rb_call_info *ci = (struct rb_call_info *)compile_data_alloc(iseq, size);
+ struct rb_call_info_with_kwarg *ci_kw = (struct rb_call_info_with_kwarg *)ci;
ci->mid = mid;
ci->flag = flag;
ci->orig_argc = argc;
- ci->argc = argc;
- ci->kw_arg = kw_arg;
if (kw_arg) {
- ci->argc += kw_arg->keyword_len;
+ ci->flag |= VM_CALL_KWARG;
+ ci_kw->kw_arg = kw_arg;
ci->orig_argc += kw_arg->keyword_len;
+ iseq->body->ci_kw_size++;
+ }
+ else {
+ iseq->body->ci_size++;
}
if (!(ci->flag & (VM_CALL_ARGS_SPLAT | VM_CALL_ARGS_BLOCKARG)) &&
- ci->kw_arg == NULL && !has_blockiseq) {
+ kw_arg == NULL && !has_blockiseq) {
ci->flag |= VM_CALL_ARGS_SIMPLE;
}
-
- ci->method_state = 0;
- ci->class_serial = 0;
- ci->blockptr = 0;
- ci->recv = Qundef;
- ci->call = 0; /* TODO: should set default function? */
-
- ci->aux.index = iseq->body->callinfo_size++;
-
return ci;
}
static INSN *
-new_insn_send(rb_iseq_t *iseq, int line_no, ID id, VALUE argc, const rb_iseq_t *blockiseq, VALUE flag, rb_call_info_kw_arg_t *keywords)
+new_insn_send(rb_iseq_t *iseq, int line_no, ID id, VALUE argc, const rb_iseq_t *blockiseq, VALUE flag, struct rb_call_info_kw_arg *keywords)
{
- VALUE *operands = (VALUE *)compile_data_alloc(iseq, sizeof(VALUE) * 2);
+ VALUE *operands = (VALUE *)compile_data_alloc(iseq, sizeof(VALUE) * 3);
operands[0] = (VALUE)new_callinfo(iseq, id, FIX2INT(argc), FIX2INT(flag), keywords, blockiseq != NULL);
- operands[1] = (VALUE)blockiseq;
- return new_insn_core(iseq, line_no, BIN(send), 2, operands);
+ operands[1] = Qfalse; /* cache */
+ operands[2] = (VALUE)blockiseq;
+ return new_insn_core(iseq, line_no, BIN(send), 3, operands);
}
static rb_iseq_t *
@@ -1497,8 +1494,11 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *anchor)
generated_iseq = ALLOC_N(VALUE, code_index);
line_info_table = ALLOC_N(struct iseq_line_info_entry, insn_num);
iseq->body->is_entries = ZALLOC_N(union iseq_inline_storage_entry, iseq->body->is_size);
- iseq->body->callinfo_entries = ALLOC_N(rb_call_info_t, iseq->body->callinfo_size);
- /* MEMZERO(iseq->body->callinfo_entries, rb_call_info_t, iseq->body->callinfo_size); */
+ iseq->body->ci_entries = (struct rb_call_info *)ruby_xmalloc(sizeof(struct rb_call_info) * iseq->body->ci_size +
+ sizeof(struct rb_call_info_with_kwarg) * iseq->body->ci_kw_size);
+ iseq->body->cc_entries = ZALLOC_N(struct rb_call_cache, iseq->body->ci_size + iseq->body->ci_kw_size);
+
+ iseq->compile_data->ci_index = iseq->compile_data->ci_kw_index = 0;
list = FIRST_ELEMENT(anchor);
line_info_index = code_index = sp = 0;
@@ -1599,16 +1599,31 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *anchor)
}
case TS_CALLINFO: /* call info */
{
- rb_call_info_t *base_ci = (rb_call_info_t *)operands[j];
- rb_call_info_t *ci = &iseq->body->callinfo_entries[base_ci->aux.index];
- *ci = *base_ci;
-
- if (UNLIKELY(base_ci->aux.index >= iseq->body->callinfo_size)) {
- rb_bug("iseq_set_sequence: ci_index overflow: index: %d, size: %d", base_ci->argc, iseq->body->callinfo_size);
+ struct rb_call_info *base_ci = (struct rb_call_info *)operands[j];
+ struct rb_call_info *ci;
+
+ if (base_ci->flag & VM_CALL_KWARG) {
+ struct rb_call_info_with_kwarg *ci_kw_entries = (struct rb_call_info_with_kwarg *)&iseq->body->ci_entries[iseq->body->ci_size];
+ struct rb_call_info_with_kwarg *ci_kw = &ci_kw_entries[iseq->compile_data->ci_kw_index++];
+ *ci_kw = *((struct rb_call_info_with_kwarg *)base_ci);
+ ci = (struct rb_call_info *)ci_kw;
+ assert(iseq->compile_data->ci_kw_index <= iseq->body->ci_kw_size);
+ }
+ else {
+ ci = &iseq->body->ci_entries[iseq->compile_data->ci_index++];
+ *ci = *base_ci;
+ assert(iseq->compile_data->ci_index <= iseq->body->ci_size);
}
+
generated_iseq[code_index + 1 + j] = (VALUE)ci;
break;
}
+ case TS_CALLCACHE:
+ {
+ struct rb_call_cache *cc = &iseq->body->cc_entries[iseq->compile_data->ci_index + iseq->compile_data->ci_kw_index - 1];
+ generated_iseq[code_index + 1 + j] = (VALUE)cc;
+ break;
+ }
case TS_ID: /* ID */
generated_iseq[code_index + 1 + j] = SYM2ID(operands[j]);
break;
@@ -1948,7 +1963,7 @@ iseq_peephole_optimize(rb_iseq_t *iseq, LINK_ELEMENT *list, const int do_tailcal
enum ruby_vminsn_type previ = piobj->insn_id;
if (previ == BIN(send) || previ == BIN(opt_send_without_block) || previ == BIN(invokesuper)) {
- rb_call_info_t *ci = (rb_call_info_t *)piobj->operands[0];
+ struct rb_call_info *ci = (struct rb_call_info *)piobj->operands[0];
rb_iseq_t *blockiseq = (rb_iseq_t *)piobj->operands[1];
if (blockiseq == 0) {
ci->flag |= VM_CALL_TAILCALL;
@@ -1966,9 +1981,12 @@ insn_set_specialized_instruction(rb_iseq_t *iseq, INSN *iobj, int insn_id)
if (insn_id == BIN(opt_neq)) {
VALUE *old_operands = iobj->operands;
+ iobj->operand_size = 4;
iobj->operands = (VALUE *)compile_data_alloc(iseq, iobj->operand_size * sizeof(VALUE));
iobj->operands[0] = old_operands[0];
- iobj->operands[1] = (VALUE)new_callinfo(iseq, idEq, 1, 0, NULL, FALSE);
+ iobj->operands[1] = Qfalse; /* CALL_CACHE */
+ iobj->operands[2] = (VALUE)new_callinfo(iseq, idEq, 1, 0, NULL, FALSE);
+ iobj->operands[3] = Qfalse; /* CALL_CACHE */
}
return COMPILE_OK;
@@ -1978,8 +1996,8 @@ static int
iseq_specialized_instruction(rb_iseq_t *iseq, INSN *iobj)
{
if (iobj->insn_id == BIN(send)) {
- rb_call_info_t *ci = (rb_call_info_t *)OPERAND_AT(iobj, 0);
- const rb_iseq_t *blockiseq = (rb_iseq_t *)OPERAND_AT(iobj, 1);
+ struct rb_call_info *ci = (struct rb_call_info *)OPERAND_AT(iobj, 0);
+ const rb_iseq_t *blockiseq = (rb_iseq_t *)OPERAND_AT(iobj, 2);
#define SP_INSN(opt) insn_set_specialized_instruction(iseq, iobj, BIN(opt_##opt))
if (ci->flag & VM_CALL_ARGS_SIMPLE) {
@@ -2020,7 +2038,7 @@ iseq_specialized_instruction(rb_iseq_t *iseq, INSN *iobj)
if ((ci->flag & VM_CALL_ARGS_BLOCKARG) == 0 && blockiseq == NULL) {
iobj->insn_id = BIN(opt_send_without_block);
- iobj->operand_size = 1;
+ iobj->operand_size = insn_len(iobj->insn_id) - 1;
}
}
#undef SP_INSN
@@ -2402,7 +2420,7 @@ compile_branch_condition(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * cond,
}
static int
-compile_array_keyword_arg(rb_iseq_t *iseq, LINK_ANCHOR *ret, const NODE * const root_node, rb_call_info_kw_arg_t ** const kw_arg_ptr)
+compile_array_keyword_arg(rb_iseq_t *iseq, LINK_ANCHOR *ret, const NODE * const root_node, struct rb_call_info_kw_arg ** const kw_arg_ptr)
{
if (kw_arg_ptr == NULL) return FALSE;
@@ -2427,7 +2445,7 @@ compile_array_keyword_arg(rb_iseq_t *iseq, LINK_ANCHOR *ret, const NODE * const
node = root_node->nd_head;
{
int len = (int)node->nd_alen / 2;
- rb_call_info_kw_arg_t *kw_arg = (rb_call_info_kw_arg_t *)ruby_xmalloc(sizeof(rb_call_info_kw_arg_t) + sizeof(VALUE) * (len - 1));
+ struct rb_call_info_kw_arg *kw_arg = (struct rb_call_info_kw_arg *)ruby_xmalloc(sizeof(struct rb_call_info_kw_arg) + sizeof(VALUE) * (len - 1));
VALUE *keywords = kw_arg->keywords;
int i = 0;
kw_arg->keyword_len = len;
@@ -2455,7 +2473,7 @@ enum compile_array_type_t {
static int
compile_array_(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE* node_root,
- enum compile_array_type_t type, rb_call_info_kw_arg_t **keywords_ptr, int poped)
+ enum compile_array_type_t type, struct rb_call_info_kw_arg **keywords_ptr, int poped)
{
NODE *node = node_root;
int line = (int)nd_line(node);
@@ -2680,15 +2698,15 @@ compile_massign_lhs(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE *node)
switch (nd_type(node)) {
case NODE_ATTRASGN: {
INSN *iobj;
- rb_call_info_t *ci;
+ struct rb_call_info *ci;
VALUE dupidx;
COMPILE_POPED(ret, "masgn lhs (NODE_ATTRASGN)", node);
POP_ELEMENT(ret); /* pop pop insn */
iobj = (INSN *)POP_ELEMENT(ret); /* pop send insn */
- ci = (rb_call_info_t *)iobj->operands[0];
- ci->orig_argc += 1; ci->argc = ci->orig_argc;
+ ci = (struct rb_call_info *)iobj->operands[0];
+ ci->orig_argc += 1;
dupidx = INT2FIX(ci->orig_argc);
ADD_INSN1(ret, nd_line(node), topn, dupidx);
@@ -3228,7 +3246,7 @@ add_ensure_iseq(LINK_ANCHOR *ret, rb_iseq_t *iseq, int is_return)
}
static VALUE
-setup_args(rb_iseq_t *iseq, LINK_ANCHOR *args, NODE *argn, unsigned int *flag, rb_call_info_kw_arg_t **keywords)
+setup_args(rb_iseq_t *iseq, LINK_ANCHOR *args, NODE *argn, unsigned int *flag, struct rb_call_info_kw_arg **keywords)
{
VALUE argc = INT2FIX(0);
int nsplat = 0;
@@ -4504,8 +4522,10 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
VALUE str = rb_fstring(node->nd_args->nd_head->nd_lit);
node->nd_args->nd_head->nd_lit = str;
COMPILE(ret, "recv", node->nd_recv);
- ADD_INSN2(ret, line, opt_aref_with,
- new_callinfo(iseq, idAREF, 1, 0, NULL, FALSE), str);
+ ADD_INSN3(ret, line, opt_aref_with,
+ new_callinfo(iseq, idAREF, 1, 0, NULL, FALSE),
+ Qnil, /* CALL_CACHE */
+ str);
if (poped) {
ADD_INSN(ret, line, pop);
}
@@ -4523,7 +4543,7 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
ID mid = node->nd_mid;
VALUE argc;
unsigned int flag = 0;
- rb_call_info_kw_arg_t *keywords = NULL;
+ struct rb_call_info_kw_arg *keywords = NULL;
const rb_iseq_t *parent_block = iseq->compile_data->current_block;
iseq->compile_data->current_block = NULL;
@@ -4635,7 +4655,7 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
DECL_ANCHOR(args);
int argc;
unsigned int flag = 0;
- rb_call_info_kw_arg_t *keywords = NULL;
+ struct rb_call_info_kw_arg *keywords = NULL;
const rb_iseq_t *parent_block = iseq->compile_data->current_block;
INIT_ANCHOR(args);
@@ -4742,8 +4762,9 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
/* dummy receiver */
ADD_INSN1(ret, line, putobject, nd_type(node) == NODE_ZSUPER ? Qfalse : Qtrue);
ADD_SEQ(ret, args);
- ADD_INSN2(ret, line, invokesuper,
+ ADD_INSN3(ret, line, invokesuper,
new_callinfo(iseq, 0, argc, flag | VM_CALL_SUPER | VM_CALL_FCALL, keywords, parent_block != NULL),
+ Qnil, /* CALL_CACHE */
parent_block);
if (poped) {
@@ -4839,7 +4860,7 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
DECL_ANCHOR(args);
VALUE argc;
unsigned int flag = 0;
- rb_call_info_kw_arg_t *keywords = NULL;
+ struct rb_call_info_kw_arg *keywords = NULL;
INIT_ANCHOR(args);
if (iseq->body->type == ISEQ_TYPE_TOP) {
@@ -4982,7 +5003,7 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
else {
ADD_SEQ(ret, recv);
ADD_SEQ(ret, val);
- ADD_INSN1(ret, line, opt_regexpmatch2, new_callinfo(iseq, idEqTilde, 1, 0, NULL, FALSE));
+ ADD_INSN2(ret, line, opt_regexpmatch2, new_callinfo(iseq, idEqTilde, 1, 0, NULL, FALSE), Qnil);
}
}
else {
@@ -5516,8 +5537,9 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
ADD_INSN(ret, line, swap);
ADD_INSN1(ret, line, topn, INT2FIX(1));
}
- ADD_INSN2(ret, line, opt_aset_with,
- new_callinfo(iseq, idASET, 2, 0, NULL, FALSE), str);
+ ADD_INSN3(ret, line, opt_aset_with,
+ new_callinfo(iseq, idASET, 2, 0, NULL, FALSE),
+ Qnil/* CALL_CACHE */, str);
ADD_INSN(ret, line, pop);
break;
}
@@ -5687,13 +5709,17 @@ insn_data_to_s_detail(INSN *iobj)
break;
case TS_CALLINFO: /* call info */
{
- rb_call_info_t *ci = (rb_call_info_t *)OPERAND_AT(iobj, j);
+ struct rb_call_info *ci = (struct rb_call_info *)OPERAND_AT(iobj, j);
rb_str_cat2(str, "<callinfo:");
- if (ci->mid)
- rb_str_catf(str, "%"PRIsVALUE, rb_id2str(ci->mid));
+ if (ci->mid) rb_str_catf(str, "%"PRIsVALUE, rb_id2str(ci->mid));
rb_str_catf(str, ", %d>", ci->orig_argc);
break;
}
+ case TS_CALLCACHE: /* call cache */
+ {
+ rb_str_catf(str, "<call cache>");
+ break;
+ }
case TS_CDHASH: /* case/when condition cache */
rb_str_cat2(str, "<ch>");
break;
@@ -5911,7 +5937,7 @@ iseq_build_callinfo_from_hash(rb_iseq_t *iseq, VALUE op)
ID mid = 0;
int orig_argc = 0;
unsigned int flag = 0;
- rb_call_info_kw_arg_t *kw_arg = 0;
+ struct rb_call_info_kw_arg *kw_arg = 0;
if (!NIL_P(op)) {
VALUE vmid = rb_hash_aref(op, ID2SYM(rb_intern("mid")));
@@ -6030,6 +6056,9 @@ iseq_build_from_ary_body(rb_iseq_t *iseq, LINK_ANCHOR *anchor,
case TS_CALLINFO:
argv[j] = iseq_build_callinfo_from_hash(iseq, op);
break;
+ case TS_CALLCACHE:
+ argv[j] = Qfalse;
+ break;
case TS_ID:
argv[j] = rb_convert_type(op, T_SYMBOL,
"Symbol", "to_sym");
diff --git a/insns.def b/insns.def
index d27fd1a..7eb3221 100644
--- a/insns.def
+++ b/insns.def
@@ -936,14 +936,15 @@ defineclass
*/
DEFINE_INSN
send
-(CALL_INFO ci, ISEQ iseq)
+(CALL_INFO ci, CALL_CACHE cc, ISEQ blockiseq)
(...)
(VALUE val) // inc += - (int)(ci->orig_argc + ((ci->flag & VM_CALL_ARGS_BLOCKARG) ? 1 : 0));
{
- ci->argc = ci->orig_argc;
- vm_caller_setup_arg_block(th, reg_cfp, ci, iseq, FALSE);
- vm_search_method(ci, ci->recv = TOPN(ci->argc));
- CALL_METHOD(ci);
+ struct rb_calling_info calling;
+
+ vm_caller_setup_arg_block(th, reg_cfp, &calling, ci, blockiseq, FALSE);
+ vm_search_method(ci, cc, calling.recv = TOPN(calling.argc = ci->orig_argc));
+ CALL_METHOD(&calling, ci, cc);
}
DEFINE_INSN
@@ -967,13 +968,14 @@ opt_str_freeze
*/
DEFINE_INSN
opt_send_without_block
-(CALL_INFO ci)
+(CALL_INFO ci, CALL_CACHE cc)
(...)
(VALUE val) // inc += -ci->orig_argc;
{
- ci->argc = ci->orig_argc;
- vm_search_method(ci, ci->recv = TOPN(ci->argc));
- CALL_METHOD(ci);
+ struct rb_calling_info calling;
+ calling.blockptr = NULL;
+ vm_search_method(ci, cc, calling.recv = TOPN(calling.argc = ci->orig_argc));
+ CALL_METHOD(&calling, ci, cc);
}
/**
@@ -983,15 +985,17 @@ opt_send_without_block
*/
DEFINE_INSN
invokesuper
-(CALL_INFO ci, ISEQ iseq)
+(CALL_INFO ci, CALL_CACHE cc, ISEQ blockiseq)
(...)
(VALUE val) // inc += - (int)(ci->orig_argc + ((ci->flag & VM_CALL_ARGS_BLOCKARG) ? 1 : 0));
{
- ci->argc = ci->orig_argc;
- vm_caller_setup_arg_block(th, reg_cfp, ci, iseq, TRUE);
- ci->recv = GET_SELF();
- vm_search_super_method(th, GET_CFP(), ci);
- CALL_METHOD(ci);
+ struct rb_calling_info calling;
+ calling.argc = ci->orig_argc;
+
+ vm_caller_setup_arg_block(th, reg_cfp, &calling, ci, blockiseq, TRUE);
+ calling.recv = GET_SELF();
+ vm_search_super_method(th, GET_CFP(), &calling, ci, cc);
+ CALL_METHOD(&calling, ci, cc);
}
/**
@@ -1005,10 +1009,12 @@ invokeblock
(...)
(VALUE val) // inc += 1 - ci->orig_argc;
{
- ci->argc = ci->orig_argc;
- ci->blockptr = 0;
- ci->recv = GET_SELF();
- val = vm_invoke_block(th, GET_CFP(), ci);
+ struct rb_calling_info calling;
+ calling.argc = ci->orig_argc;
+ calling.blockptr = NULL;
+ calling.recv = GET_SELF();
+
+ val = vm_invoke_block(th, GET_CFP(), &calling, ci);
if (val == Qundef) {
RESTORE_REGS();
NEXT_INSN();
@@ -1260,7 +1266,7 @@ opt_case_dispatch
*/
DEFINE_INSN
opt_plus
-(CALL_INFO ci)
+(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv, VALUE obj)
(VALUE val)
{
@@ -1323,7 +1329,7 @@ opt_plus
*/
DEFINE_INSN
opt_minus
-(CALL_INFO ci)
+(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv, VALUE obj)
(VALUE val)
{
@@ -1371,7 +1377,7 @@ opt_minus
*/
DEFINE_INSN
opt_mult
-(CALL_INFO ci)
+(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv, VALUE obj)
(VALUE val)
{
@@ -1421,7 +1427,7 @@ opt_mult
*/
DEFINE_INSN
opt_div
-(CALL_INFO ci)
+(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv, VALUE obj)
(VALUE val)
{
@@ -1484,7 +1490,7 @@ opt_div
*/
DEFINE_INSN
opt_mod
-(CALL_INFO ci)
+(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv, VALUE obj)
(VALUE val)
{
@@ -1551,11 +1557,11 @@ opt_mod
*/
DEFINE_INSN
opt_eq
-(CALL_INFO ci)
+(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv, VALUE obj)
(VALUE val)
{
- val = opt_eq_func(recv, obj, ci);
+ val = opt_eq_func(recv, obj, ci, cc);
if (val == Qundef) {
/* other */
@@ -1572,16 +1578,17 @@ opt_eq
*/
DEFINE_INSN
opt_neq
-(CALL_INFO ci, CALL_INFO ci_eq)
+(CALL_INFO ci, CALL_CACHE cc, CALL_INFO ci_eq, CALL_CACHE cc_eq)
(VALUE recv, VALUE obj)
(VALUE val)
{
extern VALUE rb_obj_not_equal(VALUE obj1, VALUE obj2);
- vm_search_method(ci, recv);
+ vm_search_method(ci, cc, recv);
+
val = Qundef;
- if (check_cfunc(ci->me, rb_obj_not_equal)) {
- val = opt_eq_func(recv, obj, ci_eq);
+ if (check_cfunc(cc->me, rb_obj_not_equal)) {
+ val = opt_eq_func(recv, obj, ci_eq, cc_eq);
if (val != Qundef) {
val = RTEST(val) ? Qfalse : Qtrue;
@@ -1603,7 +1610,7 @@ opt_neq
*/
DEFINE_INSN
opt_lt
-(CALL_INFO ci)
+(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv, VALUE obj)
(VALUE val)
{
@@ -1647,7 +1654,7 @@ opt_lt
*/
DEFINE_INSN
opt_le
-(CALL_INFO ci)
+(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv, VALUE obj)
(VALUE val)
{
@@ -1682,7 +1689,7 @@ opt_le
*/
DEFINE_INSN
opt_gt
-(CALL_INFO ci)
+(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv, VALUE obj)
(VALUE val)
{
@@ -1726,7 +1733,7 @@ opt_gt
*/
DEFINE_INSN
opt_ge
-(CALL_INFO ci)
+(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv, VALUE obj)
(VALUE val)
{
@@ -1760,7 +1767,7 @@ opt_ge
*/
DEFINE_INSN
opt_ltlt
-(CALL_INFO ci)
+(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv, VALUE obj)
(VALUE val)
{
@@ -1792,7 +1799,7 @@ opt_ltlt
*/
DEFINE_INSN
opt_aref
-(CALL_INFO ci)
+(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv, VALUE obj)
(VALUE val)
{
@@ -1822,7 +1829,7 @@ opt_aref
*/
DEFINE_INSN
opt_aset
-(CALL_INFO ci)
+(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv, VALUE obj, VALUE set)
(VALUE val)
{
@@ -1855,7 +1862,7 @@ opt_aset
*/
DEFINE_INSN
opt_aset_with
-(CALL_INFO ci, VALUE key)
+(CALL_INFO ci, CALL_CACHE cc, VALUE key)
(VALUE recv, VALUE val)
(VALUE val)
{
@@ -1877,7 +1884,7 @@ opt_aset_with
*/
DEFINE_INSN
opt_aref_with
-(CALL_INFO ci, VALUE key)
+(CALL_INFO ci, CALL_CACHE cc, VALUE key)
(VALUE recv)
(VALUE val)
{
@@ -1898,7 +1905,7 @@ opt_aref_with
*/
DEFINE_INSN
opt_length
-(CALL_INFO ci)
+(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv)
(VALUE val)
{
@@ -1933,7 +1940,7 @@ opt_length
*/
DEFINE_INSN
opt_size
-(CALL_INFO ci)
+(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv)
(VALUE val)
{
@@ -1968,7 +1975,7 @@ opt_size
*/
DEFINE_INSN
opt_empty_p
-(CALL_INFO ci)
+(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv)
(VALUE val)
{
@@ -2006,7 +2013,7 @@ opt_empty_p
*/
DEFINE_INSN
opt_succ
-(CALL_INFO ci)
+(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv)
(VALUE val)
{
@@ -2049,14 +2056,15 @@ opt_succ
*/
DEFINE_INSN
opt_not
-(CALL_INFO ci)
+(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv)
(VALUE val)
{
extern VALUE rb_obj_not(VALUE obj);
- vm_search_method(ci, recv);
- if (check_cfunc(ci->me, rb_obj_not)) {
+ vm_search_method(ci, cc, recv);
+
+ if (check_cfunc(cc->me, rb_obj_not)) {
val = RTEST(recv) ? Qfalse : Qtrue;
}
else {
@@ -2092,7 +2100,7 @@ opt_regexpmatch1
*/
DEFINE_INSN
opt_regexpmatch2
-(CALL_INFO ci)
+(CALL_INFO ci, CALL_CACHE cc)
(VALUE obj2, VALUE obj1)
(VALUE val)
{
diff --git a/iseq.c b/iseq.c
index de09949..d3e918d 100644
--- a/iseq.c
+++ b/iseq.c
@@ -74,14 +74,14 @@ rb_iseq_free(const rb_iseq_t *iseq)
ruby_xfree((void *)iseq->body->local_table);
ruby_xfree((void *)iseq->body->is_entries);
- if (iseq->body->callinfo_entries) {
+ if (iseq->body->ci_entries) {
unsigned int i;
- for (i=0; i<iseq->body->callinfo_size; i++) {
- /* TODO: revisit callinfo data structure */
- const rb_call_info_kw_arg_t *kw_arg = iseq->body->callinfo_entries[i].kw_arg;
+ struct rb_call_info_with_kwarg *ci_kw_entries = (struct rb_call_info_with_kwarg *)&iseq->body->ci_entries[iseq->body->ci_size];
+ for (i=0; i<iseq->body->ci_kw_size; i++) {
+ const struct rb_call_info_kw_arg *kw_arg = ci_kw_entries[i].kw_arg;
ruby_xfree((void *)kw_arg);
}
- ruby_xfree(iseq->body->callinfo_entries);
+ ruby_xfree(iseq->body->ci_entries);
}
ruby_xfree((void *)iseq->body->catch_table);
ruby_xfree((void *)iseq->body->param.opt_table);
@@ -161,7 +161,7 @@ iseq_memsize(const rb_iseq_t *iseq)
}
if (body) {
- rb_call_info_t *ci_entries = body->callinfo_entries;
+ struct rb_call_info_with_kwarg *ci_kw_entries = (struct rb_call_info_with_kwarg *)&body->ci_entries[body->ci_size];
size += sizeof(struct rb_iseq_constant_body);
size += body->iseq_size * sizeof(VALUE);
@@ -173,13 +173,14 @@ iseq_memsize(const rb_iseq_t *iseq)
size += (body->param.opt_num + 1) * sizeof(VALUE);
size += param_keyword_size(body->param.keyword);
size += body->is_size * sizeof(union iseq_inline_storage_entry);
- size += body->callinfo_size * sizeof(rb_call_info_t);
+ size += body->ci_size * sizeof(struct rb_call_info);
+ size += body->ci_kw_size * sizeof(struct rb_call_info_with_kwarg);
- if (ci_entries) {
+ if (ci_kw_entries) {
unsigned int i;
- for (i = 0; i < body->callinfo_size; i++) {
- const rb_call_info_kw_arg_t *kw_arg = ci_entries[i].kw_arg;
+ for (i = 0; i < body->ci_kw_size; i++) {
+ const struct rb_call_info_kw_arg *kw_arg = ci_kw_entries[i].kw_arg;
if (kw_arg) {
size += rb_call_info_kw_arg_bytes(kw_arg->keyword_len);
@@ -1267,7 +1268,7 @@ rb_insn_operand_intern(const rb_iseq_t *iseq,
case TS_CALLINFO:
{
- rb_call_info_t *ci = (rb_call_info_t *)op;
+ struct rb_call_info *ci = (struct rb_call_info *)op;
VALUE ary = rb_ary_new();
if (ci->mid) {
@@ -1276,8 +1277,8 @@ rb_insn_operand_intern(const rb_iseq_t *iseq,
rb_ary_push(ary, rb_sprintf("argc:%d", ci->orig_argc));
- if (ci->kw_arg) {
- rb_ary_push(ary, rb_sprintf("kw:%d", ci->kw_arg->keyword_len));
+ if (ci->flag & VM_CALL_KWARG) {
+ rb_ary_push(ary, rb_sprintf("kw:%d", ((struct rb_call_info_with_kwarg *)ci)->kw_arg->keyword_len));
}
if (ci->flag) {
@@ -1288,6 +1289,7 @@ rb_insn_operand_intern(const rb_iseq_t *iseq,
if (ci->flag & VM_CALL_VCALL) rb_ary_push(flags, rb_str_new2("VCALL"));
if (ci->flag & VM_CALL_TAILCALL) rb_ary_push(flags, rb_str_new2("TAILCALL"));
if (ci->flag & VM_CALL_SUPER) rb_ary_push(flags, rb_str_new2("SUPER"));
+ if (ci->flag & VM_CALL_KWARG) rb_ary_push(flags, rb_str_new2("KWARG"));
if (ci->flag & VM_CALL_OPT_SEND) rb_ary_push(flags, rb_str_new2("SNED")); /* maybe not reachable */
if (ci->flag & VM_CALL_ARGS_SIMPLE) rb_ary_push(flags, rb_str_new2("ARGS_SIMPLE")); /* maybe not reachable */
rb_ary_push(ary, rb_ary_join(flags, rb_str_new2("|")));
@@ -1296,6 +1298,10 @@ rb_insn_operand_intern(const rb_iseq_t *iseq,
}
break;
+ case TS_CALLCACHE:
+ ret = rb_str_new2("<callcache>");
+ break;
+
case TS_CDHASH:
ret = rb_str_new2("<cdhash>");
break;
@@ -1883,20 +1889,21 @@ iseq_data_to_ary(const rb_iseq_t *iseq)
break;
case TS_CALLINFO:
{
- rb_call_info_t *ci = (rb_call_info_t *)*seq;
+ struct rb_call_info *ci = (struct rb_call_info *)*seq;
VALUE e = rb_hash_new();
int orig_argc = ci->orig_argc;
rb_hash_aset(e, ID2SYM(rb_intern("mid")), ci->mid ? ID2SYM(ci->mid) : Qnil);
rb_hash_aset(e, ID2SYM(rb_intern("flag")), UINT2NUM(ci->flag));
- if (ci->kw_arg) {
+ if (ci->flag & VM_CALL_KWARG) {
+ struct rb_call_info_with_kwarg *ci_kw = (struct rb_call_info_with_kwarg *)ci;
int i;
- VALUE kw = rb_ary_new2((long)ci->kw_arg->keyword_len);
+ VALUE kw = rb_ary_new2((long)ci_kw->kw_arg->keyword_len);
- orig_argc -= ci->kw_arg->keyword_len;
- for (i = 0; i < ci->kw_arg->keyword_len; i++) {
- rb_ary_push(kw, ci->kw_arg->keywords[i]);
+ orig_argc -= ci_kw->kw_arg->keyword_len;
+ for (i = 0; i < ci_kw->kw_arg->keyword_len; i++) {
+ rb_ary_push(kw, ci_kw->kw_arg->keywords[i]);
}
rb_hash_aset(e, ID2SYM(rb_intern("kw_arg")), kw);
}
@@ -1906,6 +1913,9 @@ iseq_data_to_ary(const rb_iseq_t *iseq)
rb_ary_push(ary, e);
}
break;
+ case TS_CALLCACHE:
+ rb_ary_push(ary, Qfalse);
+ break;
case TS_ID:
rb_ary_push(ary, ID2SYM(*seq));
break;
diff --git a/iseq.h b/iseq.h
index 9cbfd85..9d544fd 100644
--- a/iseq.h
+++ b/iseq.h
@@ -20,7 +20,7 @@ typedef struct rb_iseq_struct rb_iseq_t;
static inline size_t
rb_call_info_kw_arg_bytes(int keyword_len)
{
- return sizeof(rb_call_info_kw_arg_t) + sizeof(VALUE) * (keyword_len - 1);
+ return sizeof(struct rb_call_info_kw_arg) + sizeof(VALUE) * (keyword_len - 1);
}
RUBY_SYMBOL_EXPORT_BEGIN
@@ -142,6 +142,8 @@ struct iseq_compile_data {
int last_coverable_line;
int label_no;
int node_level;
+ unsigned int ci_index;
+ unsigned int ci_kw_index;
const rb_compile_option_t *option;
#if SUPPORT_JOKE
st_table *labels_table;
diff --git a/tool/instruction.rb b/tool/instruction.rb
index 16d97f5..4f7d08c 100755
--- a/tool/instruction.rb
+++ b/tool/instruction.rb
@@ -715,7 +715,7 @@ class RubyVM
# skip make operands when body has no reference to this operand
# TODO: really needed?
re = /\b#{var}\b/n
- if re =~ insn.body or re =~ insn.sp_inc or insn.rets.any?{|t, v| re =~ v} or re =~ 'ic' or re =~ 'ci'
+ if re =~ insn.body or re =~ insn.sp_inc or insn.rets.any?{|t, v| re =~ v} or re =~ 'ic' or re =~ 'ci' or re =~ 'cc'
ops << " #{type} #{var} = (#{type})GET_OPERAND(#{i+1});"
end
@@ -949,6 +949,8 @@ class RubyVM
"TS_IC"
when /^CALL_INFO/
"TS_CALLINFO"
+ when /^CALL_CACHE/
+ "TS_CALLCACHE"
when /^\.\.\./
"TS_VARIABLE"
when /^CDHASH/
@@ -971,6 +973,7 @@ class RubyVM
'TS_GENTRY' => 'G',
'TS_IC' => 'K',
'TS_CALLINFO' => 'C',
+ 'TS_CALLCACHE' => 'E',
'TS_CDHASH' => 'H',
'TS_ISEQ' => 'S',
'TS_VARIABLE' => '.',
diff --git a/vm.c b/vm.c
index bd996c5..92b33f3 100644
--- a/vm.c
+++ b/vm.c
@@ -1652,7 +1652,7 @@ vm_exec(rb_thread_t *th)
}
}
- if (catch_iseq != 0) { /* found catch table */
+ if (catch_iseq != NULL) { /* found catch table */
/* enter catch scope */
cfp->sp = vm_base_ptr(cfp) + cont_sp;
cfp->pc = cfp->iseq->body->iseq_encoded + cont_pc;
diff --git a/vm_args.c b/vm_args.c
index fe7a9b9..bc336dd 100644
--- a/vm_args.c
+++ b/vm_args.c
@@ -15,9 +15,10 @@ VALUE rb_keyword_error_new(const char *error, VALUE keys); /* class.c */
struct args_info {
/* basic args info */
- rb_call_info_t *ci;
+ struct rb_calling_info *calling;
VALUE *argv;
int argc;
+ const struct rb_call_info_kw_arg *kw_arg;
/* additional args info */
int rest_index;
@@ -235,8 +236,9 @@ args_pop_keyword_hash(struct args_info *args, VALUE *kw_hash_ptr, rb_thread_t *t
static int
args_kw_argv_to_hash(struct args_info *args)
{
- const VALUE *const passed_keywords = args->ci->kw_arg->keywords;
- const int kw_len = args->ci->kw_arg->keyword_len;
+ const struct rb_call_info_kw_arg *kw_arg = args->kw_arg;
+ const VALUE *const passed_keywords = kw_arg->keywords;
+ const int kw_len = kw_arg->keyword_len;
VALUE h = rb_hash_new();
const int kw_start = args->argc - kw_len;
const VALUE * const kw_argv = args->argv + kw_start;
@@ -257,8 +259,9 @@ args_stored_kw_argv_to_hash(struct args_info *args)
{
VALUE h = rb_hash_new();
int i;
- const VALUE *const passed_keywords = args->ci->kw_arg->keywords;
- const int passed_keyword_len = args->ci->kw_arg->keyword_len;
+ const struct rb_call_info_kw_arg *kw_arg = args->kw_arg;
+ const VALUE *const passed_keywords = kw_arg->keywords;
+ const int passed_keyword_len = kw_arg->keyword_len;
for (i=0; i<passed_keyword_len; i++) {
rb_hash_aset(h, passed_keywords[i], args->kw_argv[i]);
@@ -462,10 +465,10 @@ args_setup_kw_rest_parameter(VALUE keyword_hash, VALUE *locals)
}
static inline void
-args_setup_block_parameter(rb_thread_t *th, rb_call_info_t *ci, VALUE *locals)
+args_setup_block_parameter(rb_thread_t *th, struct rb_calling_info *calling, VALUE *locals)
{
VALUE blockval = Qnil;
- const rb_block_t *blockptr = ci->blockptr;
+ const rb_block_t *blockptr = calling->blockptr;
if (blockptr) {
/* make Proc object */
@@ -473,7 +476,7 @@ args_setup_block_parameter(rb_thread_t *th, rb_call_info_t *ci, VALUE *locals)
rb_proc_t *proc;
blockval = rb_vm_make_proc(th, blockptr, rb_cProc);
GetProcPtr(blockval, proc);
- ci->blockptr = &proc->block;
+ calling->blockptr = &proc->block;
}
else {
blockval = blockptr->proc;
@@ -499,7 +502,9 @@ fill_keys_values(st_data_t key, st_data_t val, st_data_t ptr)
}
static int
-setup_parameters_complex(rb_thread_t * const th, const rb_iseq_t * const iseq, rb_call_info_t * const ci,
+setup_parameters_complex(rb_thread_t * const th, const rb_iseq_t * const iseq,
+ struct rb_calling_info *const calling,
+ const struct rb_call_info *ci,
VALUE * const locals, const enum arg_setup_type arg_setup_type)
{
const int min_argc = iseq->body->param.lead_num + iseq->body->param.post_num;
@@ -525,20 +530,22 @@ setup_parameters_complex(rb_thread_t * const th, const rb_iseq_t * const iseq, r
* <- iseq->body->param.size------------>
* ^ locals ^ sp
*/
- for (i=ci->argc; i<iseq->body->param.size; i++) {
+ for (i=calling->argc; i<iseq->body->param.size; i++) {
locals[i] = Qnil;
}
th->cfp->sp = &locals[i];
/* setup args */
args = &args_body;
- args->ci = ci;
- given_argc = args->argc = ci->argc;
+ args->calling = calling;
+ given_argc = args->argc = calling->argc;
args->argv = locals;
- if (ci->kw_arg) {
+ if (ci->flag & VM_CALL_KWARG) {
+ args->kw_arg = ((struct rb_call_info_with_kwarg *)ci)->kw_arg;
+
if (iseq->body->param.flags.has_kw) {
- int kw_len = ci->kw_arg->keyword_len;
+ int kw_len = args->kw_arg->keyword_len;
/* copy kw_argv */
args->kw_argv = ALLOCA_N(VALUE, kw_len);
args->argc -= kw_len;
@@ -551,6 +558,7 @@ setup_parameters_complex(rb_thread_t * const th, const rb_iseq_t * const iseq, r
}
}
else {
+ args->kw_arg = NULL;
args->kw_argv = NULL;
}
@@ -642,7 +650,8 @@ setup_parameters_complex(rb_thread_t * const th, const rb_iseq_t * const iseq, r
VALUE * const klocals = locals + iseq->body->param.keyword->bits_start - iseq->body->param.keyword->num;
if (args->kw_argv != NULL) {
- args_setup_kw_parameters(args->kw_argv, args->ci->kw_arg->keyword_len, args->ci->kw_arg->keywords, iseq, klocals);
+ const struct rb_call_info_kw_arg *kw_arg = args->kw_arg;
+ args_setup_kw_parameters(args->kw_argv, kw_arg->keyword_len, kw_arg->keywords, iseq, klocals);
}
else if (!NIL_P(keyword_hash)) {
int kw_len = rb_long2int(RHASH_SIZE(keyword_hash));
@@ -665,7 +674,7 @@ setup_parameters_complex(rb_thread_t * const th, const rb_iseq_t * const iseq, r
}
if (iseq->body->param.flags.has_block) {
- args_setup_block_parameter(th, ci, locals + iseq->body->param.block_start);
+ args_setup_block_parameter(th, calling, locals + iseq->body->param.block_start);
}
#if 0
@@ -717,10 +726,11 @@ argument_kw_error(rb_thread_t *th, const rb_iseq_t *iseq, const char *error, con
}
static inline void
-vm_caller_setup_arg_splat(rb_control_frame_t *cfp, rb_call_info_t *ci)
+vm_caller_setup_arg_splat(rb_control_frame_t *cfp, struct rb_calling_info *calling)
{
- VALUE *argv = cfp->sp - ci->argc;
- VALUE ary = argv[ci->argc-1];
+ int argc = calling->argc;
+ VALUE *argv = cfp->sp - argc;
+ VALUE ary = argv[argc-1];
cfp->sp--;
@@ -733,15 +743,16 @@ vm_caller_setup_arg_splat(rb_control_frame_t *cfp, rb_call_info_t *ci)
for (i = 0; i < len; i++) {
*cfp->sp++ = ptr[i];
}
- ci->argc += i - 1;
+ calling->argc += i - 1;
}
}
static inline void
-vm_caller_setup_arg_kw(rb_control_frame_t *cfp, rb_call_info_t *ci)
+vm_caller_setup_arg_kw(rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci)
{
- const VALUE *const passed_keywords = ci->kw_arg->keywords;
- const int kw_len = ci->kw_arg->keyword_len;
+ struct rb_call_info_with_kwarg *ci_kw = (struct rb_call_info_with_kwarg *)ci;
+ const VALUE *const passed_keywords = ci_kw->kw_arg->keywords;
+ const int kw_len = ci_kw->kw_arg->keyword_len;
const VALUE h = rb_hash_new();
VALUE *sp = cfp->sp;
int i;
@@ -752,18 +763,12 @@ vm_caller_setup_arg_kw(rb_control_frame_t *cfp, rb_call_info_t *ci)
(sp-kw_len)[0] = h;
cfp->sp -= kw_len - 1;
- ci->argc -= kw_len - 1;
+ calling->argc -= kw_len - 1;
}
-#define SAVE_RESTORE_CI(expr, ci) do { \
- int saved_argc = (ci)->argc; rb_block_t *saved_blockptr = (ci)->blockptr; /* save */ \
- expr; \
- (ci)->argc = saved_argc; (ci)->blockptr = saved_blockptr; /* restore */ \
-} while (0)
-
static void
-vm_caller_setup_arg_block(const rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci,
- rb_iseq_t *blockiseq, const int is_super)
+vm_caller_setup_arg_block(const rb_thread_t *th, rb_control_frame_t *reg_cfp,
+ struct rb_calling_info *calling, const struct rb_call_info *ci, rb_iseq_t *blockiseq, const int is_super)
{
if (ci->flag & VM_CALL_ARGS_BLOCKARG) {
rb_proc_t *po;
@@ -774,8 +779,7 @@ vm_caller_setup_arg_block(const rb_thread_t *th, rb_control_frame_t *reg_cfp, rb
if (proc != Qnil) {
if (!rb_obj_is_proc(proc)) {
VALUE b;
-
- SAVE_RESTORE_CI(b = rb_check_convert_type(proc, T_DATA, "Proc", "to_proc"), ci);
+ b = rb_check_convert_type(proc, T_DATA, "Proc", "to_proc");
if (NIL_P(b) || !rb_obj_is_proc(b)) {
rb_raise(rb_eTypeError,
@@ -785,32 +789,32 @@ vm_caller_setup_arg_block(const rb_thread_t *th, rb_control_frame_t *reg_cfp, rb
proc = b;
}
GetProcPtr(proc, po);
- ci->blockptr = &po->block;
+ calling->blockptr = &po->block;
RUBY_VM_GET_BLOCK_PTR_IN_CFP(reg_cfp)->proc = proc;
}
else {
- ci->blockptr = NULL;
+ calling->blockptr = NULL;
}
}
else if (blockiseq != 0) { /* likely */
- ci->blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(reg_cfp);
- ci->blockptr->iseq = blockiseq;
- ci->blockptr->proc = 0;
+ rb_block_t *blockptr = calling->blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(reg_cfp);
+ blockptr->iseq = blockiseq;
+ blockptr->proc = 0;
}
else {
if (is_super) {
- ci->blockptr = GET_BLOCK_PTR();
+ calling->blockptr = GET_BLOCK_PTR();
}
else {
- ci->blockptr = NULL;
+ calling->blockptr = NULL;
}
}
}
-#define IS_ARGS_SPLAT(ci) ((ci)->flag & VM_CALL_ARGS_SPLAT)
-#define IS_ARGS_KEYWORD(ci) ((ci)->kw_arg != NULL)
+#define IS_ARGS_SPLAT(ci) ((ci)->flag & VM_CALL_ARGS_SPLAT)
+#define IS_ARGS_KEYWORD(ci) ((ci)->flag & VM_CALL_KWARG)
-#define CALLER_SETUP_ARG(cfp, ci) do { \
- if (UNLIKELY(IS_ARGS_SPLAT(ci))) vm_caller_setup_arg_splat((cfp), (ci)); \
- if (UNLIKELY(IS_ARGS_KEYWORD(ci))) vm_caller_setup_arg_kw((cfp), (ci)); \
+#define CALLER_SETUP_ARG(cfp, calling, ci) do { \
+ if (UNLIKELY(IS_ARGS_SPLAT(ci))) vm_caller_setup_arg_splat((cfp), (calling)); \
+ if (UNLIKELY(IS_ARGS_KEYWORD(ci))) vm_caller_setup_arg_kw((cfp), (calling), (ci)); \
} while (0)
diff --git a/vm_core.h b/vm_core.h
index 4b7d946..f9e4f04 100644
--- a/vm_core.h
+++ b/vm_core.h
@@ -173,11 +173,6 @@ union iseq_inline_storage_entry {
struct rb_thread_struct;
struct rb_control_frame_struct;
-typedef struct rb_call_info_kw_arg_struct {
- int keyword_len;
- VALUE keywords[1];
-} rb_call_info_kw_arg_t;
-
enum method_missing_reason {
MISSING_NOENTRY = 0x00,
MISSING_PRIVATE = 0x01,
@@ -188,14 +183,30 @@ enum method_missing_reason {
MISSING_NONE = 0x20
};
-/* rb_call_info_t contains calling information including inline cache */
-typedef struct rb_call_info_struct {
+struct rb_call_info {
/* fixed at compile time */
ID mid;
unsigned int flag;
int orig_argc;
- const rb_call_info_kw_arg_t *kw_arg;
+};
+
+struct rb_call_info_kw_arg {
+ int keyword_len;
+ VALUE keywords[1];
+};
+
+struct rb_call_info_with_kwarg {
+ struct rb_call_info ci;
+ struct rb_call_info_kw_arg *kw_arg;
+};
+
+struct rb_calling_info {
+ struct rb_block_struct *blockptr;
+ VALUE recv;
+ int argc;
+};
+struct rb_call_cache {
/* inline cache: keys */
rb_serial_t method_state;
rb_serial_t class_serial;
@@ -203,18 +214,14 @@ typedef struct rb_call_info_struct {
/* inline cache: values */
const rb_callable_method_entry_t *me;
- /* temporary values for method calling */
- struct rb_block_struct *blockptr;
- VALUE recv;
- int argc;
+ VALUE (*call)(struct rb_thread_struct *th, struct rb_control_frame_struct *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc);
+
union {
unsigned int index; /* used by ivar */
enum method_missing_reason method_missing_reason; /* used by method_missing */
int inc_sp; /* used by cfunc */
} aux;
-
- VALUE (*call)(struct rb_thread_struct *th, struct rb_control_frame_struct *cfp, struct rb_call_info_struct *ci);
-} rb_call_info_t;
+};
#if 1
#define GetCoreDataFromValue(obj, type, ptr) do { \
@@ -337,12 +344,19 @@ struct rb_iseq_constant_body {
struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
union iseq_inline_storage_entry *is_entries;
- rb_call_info_t *callinfo_entries;
+ struct rb_call_info *ci_entries; /* struct rb_call_info ci_entries[ci_size];
+ * struct rb_call_info_with_kwarg cikw_entries[ci_kw_size];
+ * So that:
+ * struct rb_call_info_with_kwarg *cikw_entries = &body->ci_entries[ci_size];
+ */
+ struct rb_call_cache *cc_entries; /* size is ci_size = ci_kw_size */
+
const VALUE mark_ary; /* Array: includes operands which should be GC marked */
unsigned int local_table_size;
unsigned int is_size;
- unsigned int callinfo_size;
+ unsigned int ci_size;
+ unsigned int ci_kw_size;
unsigned int line_info_size;
};
@@ -632,7 +646,7 @@ typedef struct rb_thread_struct {
const rb_callable_method_entry_t *passed_bmethod_me;
/* for cfunc */
- rb_call_info_t *passed_ci;
+ struct rb_calling_info *calling;
/* for load(true) */
VALUE top_self;
@@ -827,14 +841,16 @@ enum vm_check_match_type {
#define VM_CHECKMATCH_TYPE_MASK 0x03
#define VM_CHECKMATCH_ARRAY 0x04
-#define VM_CALL_ARGS_SPLAT (0x01 << 1) /* m(*args) */
-#define VM_CALL_ARGS_BLOCKARG (0x01 << 2) /* m(&block) */
-#define VM_CALL_FCALL (0x01 << 3) /* m(...) */
-#define VM_CALL_VCALL (0x01 << 4) /* m */
-#define VM_CALL_TAILCALL (0x01 << 5) /* located at tail position */
-#define VM_CALL_SUPER (0x01 << 6) /* super */
-#define VM_CALL_OPT_SEND (0x01 << 7) /* internal flag */
-#define VM_CALL_ARGS_SIMPLE (0x01 << 8) /* (ci->flag & (SPLAT|BLOCKARG)) && blockiseq == NULL && ci->kw_arg == NULL */
+#define VM_CALL_ARGS_SPLAT (0x01 << 0) /* m(*args) */
+#define VM_CALL_ARGS_BLOCKARG (0x01 << 1) /* m(&block) */
+#define VM_CALL_FCALL (0x01 << 2) /* m(...) */
+#define VM_CALL_VCALL (0x01 << 3) /* m */
+#define VM_CALL_ARGS_SIMPLE (0x01 << 4) /* (ci->flag & (SPLAT|BLOCKARG)) && blockiseq == NULL && ci->kw_arg == NULL */
+#define VM_CALL_BLOCKISEQ (0x01 << 5) /* has blockiseq */
+#define VM_CALL_KWARG (0x01 << 6) /* has kwarg */
+#define VM_CALL_TAILCALL (0x01 << 7) /* located at tail position */
+#define VM_CALL_SUPER (0x01 << 8) /* super */
+#define VM_CALL_OPT_SEND (0x01 << 9) /* internal flag */
enum vm_special_object_type {
VM_SPECIAL_OBJECT_VMCORE = 1,
@@ -878,7 +894,8 @@ enum vm_svar_index {
/* inline cache */
typedef struct iseq_inline_cache_entry *IC;
-typedef rb_call_info_t *CALL_INFO;
+typedef struct rb_call_info *CALL_INFO;
+typedef struct rb_call_cache *CALL_CACHE;
void rb_vm_change_state(void);
diff --git a/vm_eval.c b/vm_eval.c
index 5d3d893..f550769 100644
--- a/vm_eval.c
+++ b/vm_eval.c
@@ -39,43 +39,48 @@ typedef enum call_type {
static VALUE send_internal(int argc, const VALUE *argv, VALUE recv, call_type scope);
-static VALUE vm_call0_body(rb_thread_t* th, rb_call_info_t *ci, const VALUE *argv);
+static VALUE vm_call0_body(rb_thread_t* th, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc, const VALUE *argv);
static VALUE
vm_call0(rb_thread_t* th, VALUE recv, ID id, int argc, const VALUE *argv, const rb_callable_method_entry_t *me)
{
- rb_call_info_t ci_entry, *ci = &ci_entry;
+ struct rb_calling_info calling_entry, *calling;
+ struct rb_call_info ci_entry;
+ struct rb_call_cache cc_entry;
- ci->flag = 0;
- ci->mid = id;
- ci->recv = recv;
- ci->argc = argc;
- ci->me = me;
- ci->kw_arg = NULL;
+ calling = &calling_entry;
- return vm_call0_body(th, ci, argv);
+ ci_entry.flag = 0;
+ ci_entry.mid = id;
+
+ cc_entry.me = me;
+
+ calling->recv = recv;
+ calling->argc = argc;
+
+ return vm_call0_body(th, calling, &ci_entry, &cc_entry, argv);
}
#if OPT_CALL_CFUNC_WITHOUT_FRAME
static VALUE
-vm_call0_cfunc(rb_thread_t* th, rb_call_info_t *ci, const VALUE *argv)
+vm_call0_cfunc(rb_thread_t* th, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc, const VALUE *argv)
{
VALUE val;
- RUBY_DTRACE_CMETHOD_ENTRY_HOOK(th, ci->me->owner, ci->mid);
- EXEC_EVENT_HOOK(th, RUBY_EVENT_C_CALL, ci->recv, ci->mid, ci->me->owner, Qnil);
+ RUBY_DTRACE_CMETHOD_ENTRY_HOOK(th, cc->me->owner, ci->mid);
+ EXEC_EVENT_HOOK(th, RUBY_EVENT_C_CALL, calling->recv, ci->mid, cc->me->owner, Qnil);
{
rb_control_frame_t *reg_cfp = th->cfp;
- const rb_callable_method_entry_t *me = ci->me;
+ const rb_callable_method_entry_t *me = cc->me;
const rb_method_cfunc_t *cfunc = &me->def->body.cfunc;
int len = cfunc->argc;
- VALUE recv = ci->recv;
- int argc = ci->argc;
+ VALUE recv = calling->recv;
+ int argc = calling->argc;
- if (len >= 0) rb_check_arity(ci->argc, len, len);
+ if (len >= 0) rb_check_arity(argc, len, len);
th->passed_ci = ci;
- ci->aux.inc_sp = 0;
+ cc->aux.inc_sp = 0;
VM_PROFILE_UP(2);
val = (*cfunc->invoker)(cfunc->func, recv, argc, argv);
@@ -93,23 +98,23 @@ vm_call0_cfunc(rb_thread_t* th, rb_call_info_t *ci, const VALUE *argv)
vm_pop_frame(th);
}
}
- EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, ci->recv, ci->mid, ci->me->owner, val);
- RUBY_DTRACE_CMETHOD_RETURN_HOOK(th, ci->me->owner, ci->mid);
+ EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, calling->recv, ci->mid, callnig->cc->me->owner, val);
+ RUBY_DTRACE_CMETHOD_RETURN_HOOK(th, cc->me->owner, ci->mid);
return val;
}
#else
static VALUE
-vm_call0_cfunc_with_frame(rb_thread_t* th, rb_call_info_t *ci, const VALUE *argv)
+vm_call0_cfunc_with_frame(rb_thread_t* th, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc, const VALUE *argv)
{
VALUE val;
- const rb_callable_method_entry_t *me = ci->me;
+ const rb_callable_method_entry_t *me = cc->me;
const rb_method_cfunc_t *cfunc = &me->def->body.cfunc;
int len = cfunc->argc;
- VALUE recv = ci->recv;
- int argc = ci->argc;
+ VALUE recv = calling->recv;
+ int argc = calling->argc;
ID mid = ci->mid;
- rb_block_t *blockptr = ci->blockptr;
+ rb_block_t *blockptr = calling->blockptr;
RUBY_DTRACE_CMETHOD_ENTRY_HOOK(th, me->owner, mid);
EXEC_EVENT_HOOK(th, RUBY_EVENT_C_CALL, recv, mid, me->owner, Qnil);
@@ -138,114 +143,114 @@ vm_call0_cfunc_with_frame(rb_thread_t* th, rb_call_info_t *ci, const VALUE *argv
}
static VALUE
-vm_call0_cfunc(rb_thread_t* th, rb_call_info_t *ci, const VALUE *argv)
+vm_call0_cfunc(rb_thread_t* th, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc, const VALUE *argv)
{
- return vm_call0_cfunc_with_frame(th, ci, argv);
+ return vm_call0_cfunc_with_frame(th, calling, ci, cc, argv);
}
#endif
/* `ci' should point temporal value (on stack value) */
static VALUE
-vm_call0_body(rb_thread_t* th, rb_call_info_t *ci, const VALUE *argv)
+vm_call0_body(rb_thread_t* th, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc, const VALUE *argv)
{
VALUE ret;
if (th->passed_block) {
- ci->blockptr = (rb_block_t *)th->passed_block;
+ calling->blockptr = (rb_block_t *)th->passed_block;
th->passed_block = 0;
}
else {
- ci->blockptr = 0;
+ calling->blockptr = 0;
}
again:
- switch (ci->me->def->type) {
+ switch (cc->me->def->type) {
case VM_METHOD_TYPE_ISEQ:
{
rb_control_frame_t *reg_cfp = th->cfp;
int i;
- CHECK_VM_STACK_OVERFLOW(reg_cfp, ci->argc + 1);
+ CHECK_VM_STACK_OVERFLOW(reg_cfp, calling->argc + 1);
- *reg_cfp->sp++ = ci->recv;
- for (i = 0; i < ci->argc; i++) {
+ *reg_cfp->sp++ = calling->recv;
+ for (i = 0; i < calling->argc; i++) {
*reg_cfp->sp++ = argv[i];
}
- vm_call_iseq_setup(th, reg_cfp, ci);
+ vm_call_iseq_setup(th, reg_cfp, calling, ci, cc);
th->cfp->flag |= VM_FRAME_FLAG_FINISH;
return vm_exec(th); /* CHECK_INTS in this function */
}
case VM_METHOD_TYPE_NOTIMPLEMENTED:
case VM_METHOD_TYPE_CFUNC:
- ret = vm_call0_cfunc(th, ci, argv);
+ ret = vm_call0_cfunc(th, calling, ci, cc, argv);
goto success;
case VM_METHOD_TYPE_ATTRSET:
- rb_check_arity(ci->argc, 1, 1);
- ret = rb_ivar_set(ci->recv, ci->me->def->body.attr.id, argv[0]);
+ rb_check_arity(calling->argc, 1, 1);
+ ret = rb_ivar_set(calling->recv, cc->me->def->body.attr.id, argv[0]);
goto success;
case VM_METHOD_TYPE_IVAR:
- rb_check_arity(ci->argc, 0, 0);
- ret = rb_attr_get(ci->recv, ci->me->def->body.attr.id);
+ rb_check_arity(calling->argc, 0, 0);
+ ret = rb_attr_get(calling->recv, cc->me->def->body.attr.id);
goto success;
case VM_METHOD_TYPE_BMETHOD:
- ret = vm_call_bmethod_body(th, ci, argv);
+ ret = vm_call_bmethod_body(th, calling, ci, cc, argv);
goto success;
case VM_METHOD_TYPE_ZSUPER:
case VM_METHOD_TYPE_REFINED:
{
- const rb_method_type_t type = ci->me->def->type;
+ const rb_method_type_t type = cc->me->def->type;
VALUE super_class;
- if (type == VM_METHOD_TYPE_REFINED && ci->me->def->body.refined.orig_me) {
- ci->me = refined_method_callable_without_refinement(ci->me);
+ if (type == VM_METHOD_TYPE_REFINED && cc->me->def->body.refined.orig_me) {
+ cc->me = refined_method_callable_without_refinement(cc->me);
goto again;
}
- super_class = RCLASS_SUPER(ci->me->defined_class);
+ super_class = RCLASS_SUPER(cc->me->defined_class);
- if (!super_class || !(ci->me = rb_callable_method_entry(super_class, ci->mid))) {
+ if (!super_class || !(cc->me = rb_callable_method_entry(super_class, ci->mid))) {
enum method_missing_reason ex = (type == VM_METHOD_TYPE_ZSUPER) ? MISSING_SUPER : 0;
- ret = method_missing(ci->recv, ci->mid, ci->argc, argv, ex);
+ ret = method_missing(calling->recv, ci->mid, calling->argc, argv, ex);
goto success;
}
RUBY_VM_CHECK_INTS(th);
goto again;
}
case VM_METHOD_TYPE_ALIAS:
- ci->me = aliased_callable_method_entry(ci->me);
+ cc->me = aliased_callable_method_entry(cc->me);
goto again;
case VM_METHOD_TYPE_MISSING:
{
- VALUE new_args = rb_ary_new4(ci->argc, argv);
+ VALUE new_args = rb_ary_new4(calling->argc, argv);
rb_ary_unshift(new_args, ID2SYM(ci->mid));
- th->passed_block = ci->blockptr;
- ret = rb_funcall2(ci->recv, idMethodMissing, ci->argc+1,
+ th->passed_block = calling->blockptr;
+ ret = rb_funcall2(calling->recv, idMethodMissing, calling->argc+1,
RARRAY_CONST_PTR(new_args));
RB_GC_GUARD(new_args);
return ret;
}
case VM_METHOD_TYPE_OPTIMIZED:
- switch (ci->me->def->body.optimize_type) {
+ switch (cc->me->def->body.optimize_type) {
case OPTIMIZED_METHOD_TYPE_SEND:
- ret = send_internal(ci->argc, argv, ci->recv, CALL_FCALL);
+ ret = send_internal(calling->argc, argv, calling->recv, CALL_FCALL);
goto success;
case OPTIMIZED_METHOD_TYPE_CALL:
{
rb_proc_t *proc;
- GetProcPtr(ci->recv, proc);
- ret = rb_vm_invoke_proc(th, proc, ci->argc, argv, ci->blockptr);
+ GetProcPtr(calling->recv, proc);
+ ret = rb_vm_invoke_proc(th, proc, calling->argc, argv, calling->blockptr);
goto success;
}
default:
- rb_bug("vm_call0: unsupported optimized method type (%d)", ci->me->def->body.optimize_type);
+ rb_bug("vm_call0: unsupported optimized method type (%d)", cc->me->def->body.optimize_type);
}
break;
case VM_METHOD_TYPE_UNDEF:
break;
}
- rb_bug("vm_call0: unsupported method type (%d)", ci->me->def->type);
+ rb_bug("vm_call0: unsupported method type (%d)", cc->me->def->type);
return Qundef;
success:
diff --git a/vm_insnhelper.c b/vm_insnhelper.c
index 675b26a..59adf88 100644
--- a/vm_insnhelper.c
+++ b/vm_insnhelper.c
@@ -712,7 +712,7 @@ vm_search_const_defined_class(const VALUE cbase, ID id)
#endif
static inline VALUE
-vm_getivar(VALUE obj, ID id, IC ic, rb_call_info_t *ci, int is_attr)
+vm_getivar(VALUE obj, ID id, IC ic, struct rb_call_cache *cc, int is_attr)
{
#if USE_IC_FOR_IVAR
if (RB_TYPE_P(obj, T_OBJECT)) {
@@ -721,8 +721,8 @@ vm_getivar(VALUE obj, ID id, IC ic, rb_call_info_t *ci, int is_attr)
const long len = ROBJECT_NUMIV(obj);
const VALUE *const ptr = ROBJECT_IVPTR(obj);
- if (LIKELY(is_attr ? ci->aux.index > 0 : ic->ic_serial == RCLASS_SERIAL(klass))) {
- long index = !is_attr ? (long)ic->ic_value.index : (long)(ci->aux.index - 1);
+ if (LIKELY(is_attr ? cc->aux.index > 0 : ic->ic_serial == RCLASS_SERIAL(klass))) {
+ long index = !is_attr ? (long)ic->ic_value.index : (long)(cc->aux.index - 1);
if (index < len) {
val = ptr[index];
@@ -742,7 +742,7 @@ vm_getivar(VALUE obj, ID id, IC ic, rb_call_info_t *ci, int is_attr)
ic->ic_serial = RCLASS_SERIAL(klass);
}
else { /* call_info */
- ci->aux.index = (int)index + 1;
+ cc->aux.index = (int)index + 1;
}
}
}
@@ -762,7 +762,7 @@ vm_getivar(VALUE obj, ID id, IC ic, rb_call_info_t *ci, int is_attr)
}
static inline VALUE
-vm_setivar(VALUE obj, ID id, VALUE val, IC ic, rb_call_info_t *ci, int is_attr)
+vm_setivar(VALUE obj, ID id, VALUE val, IC ic, struct rb_call_cache *cc, int is_attr)
{
#if USE_IC_FOR_IVAR
rb_check_frozen(obj);
@@ -773,8 +773,8 @@ vm_setivar(VALUE obj, ID id, VALUE val, IC ic, rb_call_info_t *ci, int is_attr)
if (LIKELY(
(!is_attr && ic->ic_serial == RCLASS_SERIAL(klass)) ||
- (is_attr && ci->aux.index > 0))) {
- long index = !is_attr ? (long)ic->ic_value.index : (long)ci->aux.index-1;
+ (is_attr && cc->aux.index > 0))) {
+ long index = !is_attr ? (long)ic->ic_value.index : (long)cc->aux.index-1;
long len = ROBJECT_NUMIV(obj);
VALUE *ptr = ROBJECT_IVPTR(obj);
@@ -795,7 +795,7 @@ vm_setivar(VALUE obj, ID id, VALUE val, IC ic, rb_call_info_t *ci, int is_attr)
rb_raise(rb_eArgError, "too many instance variables");
}
else {
- ci->aux.index = (int)(index + 1);
+ cc->aux.index = (int)(index + 1);
}
}
/* fall through */
@@ -1049,26 +1049,26 @@ vm_expandarray(rb_control_frame_t *cfp, VALUE ary, rb_num_t num, int flag)
RB_GC_GUARD(ary);
}
-static VALUE vm_call_general(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci);
+static VALUE vm_call_general(rb_thread_t *th, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc);
static void
-vm_search_method(rb_call_info_t *ci, VALUE recv)
+vm_search_method(const struct rb_call_info *ci, struct rb_call_cache *cc, VALUE recv)
{
VALUE klass = CLASS_OF(recv);
#if OPT_INLINE_METHOD_CACHE
- if (LIKELY(GET_GLOBAL_METHOD_STATE() == ci->method_state && RCLASS_SERIAL(klass) == ci->class_serial)) {
+ if (LIKELY(GET_GLOBAL_METHOD_STATE() == cc->method_state && RCLASS_SERIAL(klass) == cc->class_serial)) {
/* cache hit! */
return;
}
#endif
- ci->me = rb_callable_method_entry(klass, ci->mid);
- VM_ASSERT(callable_method_entry_p(ci->me));
- ci->call = vm_call_general;
+ cc->me = rb_callable_method_entry(klass, ci->mid);
+ VM_ASSERT(callable_method_entry_p(cc->me));
+ cc->call = vm_call_general;
#if OPT_INLINE_METHOD_CACHE
- ci->method_state = GET_GLOBAL_METHOD_STATE();
- ci->class_serial = RCLASS_SERIAL(klass);
+ cc->method_state = GET_GLOBAL_METHOD_STATE();
+ cc->class_serial = RCLASS_SERIAL(klass);
#endif
}
@@ -1089,7 +1089,7 @@ static
inline
#endif
VALUE
-opt_eq_func(VALUE recv, VALUE obj, CALL_INFO ci)
+opt_eq_func(VALUE recv, VALUE obj, CALL_INFO ci, CALL_CACHE cc)
{
if (FIXNUM_2_P(recv, obj) &&
BASIC_OP_UNREDEFINED_P(BOP_EQ, FIXNUM_REDEFINED_OP_FLAG)) {
@@ -1119,9 +1119,9 @@ opt_eq_func(VALUE recv, VALUE obj, CALL_INFO ci)
}
{
- vm_search_method(ci, recv);
+ vm_search_method(ci, cc, recv);
- if (check_cfunc(ci->me, rb_obj_equal)) {
+ if (check_cfunc(cc->me, rb_obj_equal)) {
return recv == obj ? Qtrue : Qfalse;
}
}
@@ -1132,12 +1132,14 @@ opt_eq_func(VALUE recv, VALUE obj, CALL_INFO ci)
VALUE
rb_equal_opt(VALUE obj1, VALUE obj2)
{
- rb_call_info_t ci;
+ struct rb_call_info ci;
+ struct rb_call_cache cc;
+
ci.mid = idEq;
- ci.method_state = 0;
- ci.me = NULL;
- ci.class_serial = 0;
- return opt_eq_func(obj1, obj2, &ci);
+ cc.method_state = 0;
+ cc.class_serial = 0;
+ cc.me = NULL;
+ return opt_eq_func(obj1, obj2, &ci, &cc);
}
static VALUE vm_call0(rb_thread_t*, VALUE, ID, int, const VALUE*, const rb_callable_method_entry_t *);
@@ -1230,12 +1232,11 @@ vm_base_ptr(rb_control_frame_t *cfp)
#include "vm_args.c"
-static VALUE vm_call_iseq_setup_2(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci, int opt_pc);
-static inline VALUE vm_call_iseq_setup_normal_0start(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci);
-static inline VALUE vm_call_iseq_setup_tailcall_0start(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci);
-static inline VALUE vm_call_iseq_setup_normal(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci, int opt_pc);
-static inline VALUE vm_call_iseq_setup_tailcall(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci, int opt_pc);
-
+static inline VALUE vm_call_iseq_setup_2(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc, int opt_pc);
+static inline VALUE vm_call_iseq_setup_normal(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc, int opt_pc);
+static inline VALUE vm_call_iseq_setup_tailcall(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc, int opt_pc);
+static VALUE vm_call_iseq_setup_normal_0start(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc);
+static VALUE vm_call_iseq_setup_tailcall_0start(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc);
static inline VALUE
vm_callee_setup_block_arg_arg0_check(VALUE *argv)
@@ -1273,73 +1274,73 @@ simple_iseq_p(const rb_iseq_t *iseq)
}
static inline int
-vm_callee_setup_block_arg(rb_thread_t *th, rb_call_info_t *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type)
+vm_callee_setup_block_arg(rb_thread_t *th, struct rb_calling_info *calling, const struct rb_call_info *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type)
{
if (LIKELY(simple_iseq_p(iseq))) {
rb_control_frame_t *cfp = th->cfp;
VALUE arg0;
- CALLER_SETUP_ARG(cfp, ci); /* splat arg */
+ CALLER_SETUP_ARG(cfp, calling, ci); /* splat arg */
if (arg_setup_type == arg_setup_block &&
- ci->argc == 1 &&
+ calling->argc == 1 &&
iseq->body->param.flags.has_lead &&
!iseq->body->param.flags.ambiguous_param0 &&
!NIL_P(arg0 = vm_callee_setup_block_arg_arg0_check(argv))) {
- ci->argc = vm_callee_setup_block_arg_arg0_splat(cfp, iseq, argv, arg0);
+ calling->argc = vm_callee_setup_block_arg_arg0_splat(cfp, iseq, argv, arg0);
}
- if (ci->argc != iseq->body->param.lead_num) {
+ if (calling->argc != iseq->body->param.lead_num) {
if (arg_setup_type == arg_setup_block) {
- if (ci->argc < iseq->body->param.lead_num) {
+ if (calling->argc < iseq->body->param.lead_num) {
int i;
CHECK_VM_STACK_OVERFLOW(cfp, iseq->body->param.lead_num);
- for (i=ci->argc; i<iseq->body->param.lead_num; i++) argv[i] = Qnil;
- ci->argc = iseq->body->param.lead_num; /* fill rest parameters */
+ for (i=calling->argc; i<iseq->body->param.lead_num; i++) argv[i] = Qnil;
+ calling->argc = iseq->body->param.lead_num; /* fill rest parameters */
}
- else if (ci->argc > iseq->body->param.lead_num) {
- ci->argc = iseq->body->param.lead_num; /* simply truncate arguments */
+ else if (calling->argc > iseq->body->param.lead_num) {
+ calling->argc = iseq->body->param.lead_num; /* simply truncate arguments */
}
}
else if (arg_setup_type == arg_setup_lambda &&
- ci->argc == 1 &&
+ calling->argc == 1 &&
!NIL_P(arg0 = vm_callee_setup_block_arg_arg0_check(argv)) &&
RARRAY_LEN(arg0) == iseq->body->param.lead_num) {
- ci->argc = vm_callee_setup_block_arg_arg0_splat(cfp, iseq, argv, arg0);
+ calling->argc = vm_callee_setup_block_arg_arg0_splat(cfp, iseq, argv, arg0);
}
else {
- argument_arity_error(th, iseq, ci->argc, iseq->body->param.lead_num, iseq->body->param.lead_num);
+ argument_arity_error(th, iseq, calling->argc, iseq->body->param.lead_num, iseq->body->param.lead_num);
}
}
return 0;
}
else {
- return setup_parameters_complex(th, iseq, ci, argv, arg_setup_type);
+ return setup_parameters_complex(th, iseq, calling, ci, argv, arg_setup_type);
}
}
static inline int
-vm_callee_setup_arg(rb_thread_t *th, rb_call_info_t *ci, const rb_iseq_t *iseq, VALUE *argv)
+vm_callee_setup_arg(rb_thread_t *th, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc, const rb_iseq_t *iseq, VALUE *argv)
{
if (LIKELY(simple_iseq_p(iseq))) {
rb_control_frame_t *cfp = th->cfp;
- CALLER_SETUP_ARG(cfp, ci); /* splat arg */
+ CALLER_SETUP_ARG(cfp, calling, ci); /* splat arg */
- if (ci->argc != iseq->body->param.lead_num) {
- argument_arity_error(th, iseq, ci->argc, iseq->body->param.lead_num, iseq->body->param.lead_num);
+ if (calling->argc != iseq->body->param.lead_num) {
+ argument_arity_error(th, iseq, calling->argc, iseq->body->param.lead_num, iseq->body->param.lead_num);
}
- CI_SET_FASTPATH(ci,
+ CI_SET_FASTPATH(cc,
(UNLIKELY(ci->flag & VM_CALL_TAILCALL) ? vm_call_iseq_setup_tailcall_0start :
- vm_call_iseq_setup_normal_0start),
- (!IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) && !(METHOD_ENTRY_VISI(ci->me) == METHOD_VISI_PROTECTED)));
-
+ vm_call_iseq_setup_normal_0start),
+ (!IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
+ !(METHOD_ENTRY_VISI(cc->me) == METHOD_VISI_PROTECTED)));
return 0;
}
else {
- return setup_parameters_complex(th, iseq, ci, argv, arg_setup_method);
+ return setup_parameters_complex(th, iseq, calling, ci, argv, arg_setup_method);
}
}
@@ -1353,33 +1354,33 @@ def_iseq_ptr(rb_method_definition_t *def)
}
static VALUE
-vm_call_iseq_setup(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci)
+vm_call_iseq_setup(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
{
- int opt_pc = vm_callee_setup_arg(th, ci, def_iseq_ptr(ci->me->def), cfp->sp - ci->argc);
- return vm_call_iseq_setup_2(th, cfp, ci, opt_pc);
+ int opt_pc = vm_callee_setup_arg(th, calling, ci, cc, def_iseq_ptr(cc->me->def), cfp->sp - calling->argc);
+ return vm_call_iseq_setup_2(th, cfp, calling, ci, cc, opt_pc);
}
-static VALUE
-vm_call_iseq_setup_2(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci, int opt_pc)
+static inline VALUE
+vm_call_iseq_setup_2(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc, int opt_pc)
{
if (LIKELY(!(ci->flag & VM_CALL_TAILCALL))) {
- return vm_call_iseq_setup_normal(th, cfp, ci, opt_pc);
+ return vm_call_iseq_setup_normal(th, cfp, calling, ci, cc, opt_pc);
}
else {
- return vm_call_iseq_setup_tailcall(th, cfp, ci, opt_pc);
+ return vm_call_iseq_setup_tailcall(th, cfp, calling, ci, cc, opt_pc);
}
}
static inline VALUE
-vm_call_iseq_setup_normal(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci, int opt_pc)
+vm_call_iseq_setup_normal(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc, int opt_pc)
{
- VALUE *argv = cfp->sp - ci->argc;
- const rb_callable_method_entry_t *me = ci->me;
+ const rb_callable_method_entry_t *me = cc->me;
const rb_iseq_t *iseq = def_iseq_ptr(me->def);
+ VALUE *argv = cfp->sp - calling->argc;
VALUE *sp = argv + iseq->body->param.size;
- vm_push_frame(th, iseq, VM_FRAME_MAGIC_METHOD, ci->recv,
- VM_ENVVAL_BLOCK_PTR(ci->blockptr), (VALUE)me,
+ vm_push_frame(th, iseq, VM_FRAME_MAGIC_METHOD, calling->recv,
+ VM_ENVVAL_BLOCK_PTR(calling->blockptr), (VALUE)me,
iseq->body->iseq_encoded + opt_pc, sp,
iseq->body->local_size - iseq->body->param.size,
iseq->body->stack_max);
@@ -1389,11 +1390,11 @@ vm_call_iseq_setup_normal(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info
}
static inline VALUE
-vm_call_iseq_setup_tailcall(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci, int opt_pc)
+vm_call_iseq_setup_tailcall(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc, int opt_pc)
{
unsigned int i;
- VALUE *argv = cfp->sp - ci->argc;
- const rb_callable_method_entry_t *me = ci->me;
+ VALUE *argv = cfp->sp - calling->argc;
+ const rb_callable_method_entry_t *me = cc->me;
const rb_iseq_t *iseq = def_iseq_ptr(me->def);
VALUE *src_argv = argv;
VALUE *sp_orig, *sp;
@@ -1406,7 +1407,7 @@ vm_call_iseq_setup_tailcall(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_in
sp_orig = sp = cfp->sp;
/* push self */
- sp[0] = ci->recv;
+ sp[0] = calling->recv;
sp++;
/* copy arguments */
@@ -1415,7 +1416,7 @@ vm_call_iseq_setup_tailcall(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_in
}
vm_push_frame(th, iseq, VM_FRAME_MAGIC_METHOD | finish_flag,
- ci->recv, VM_ENVVAL_BLOCK_PTR(ci->blockptr), (VALUE)me,
+ calling->recv, VM_ENVVAL_BLOCK_PTR(calling->blockptr), (VALUE)me,
iseq->body->iseq_encoded + opt_pc, sp,
iseq->body->local_size - iseq->body->param.size,
iseq->body->stack_max);
@@ -1424,16 +1425,16 @@ vm_call_iseq_setup_tailcall(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_in
return Qundef;
}
-static inline VALUE
-vm_call_iseq_setup_normal_0start(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci)
+static VALUE
+vm_call_iseq_setup_normal_0start(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
{
- return vm_call_iseq_setup_normal(th, cfp, ci, 0);
+ return vm_call_iseq_setup_normal(th, cfp, calling, ci, cc, 0);
}
-static inline VALUE
-vm_call_iseq_setup_tailcall_0start(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci)
+static VALUE
+vm_call_iseq_setup_tailcall_0start(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
{
- return vm_call_iseq_setup_tailcall(th, cfp, ci, 0);
+ return vm_call_iseq_setup_tailcall(th, cfp, calling, ci, cc, 0);
}
static VALUE
@@ -1595,17 +1596,16 @@ vm_method_cfunc_entry(const rb_callable_method_entry_t *me)
}
static VALUE
-vm_call_cfunc_with_frame(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci)
+vm_call_cfunc_with_frame(rb_thread_t *th, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
{
VALUE val;
- const rb_callable_method_entry_t *me = ci->me;
+ const rb_callable_method_entry_t *me = cc->me;
const rb_method_cfunc_t *cfunc = vm_method_cfunc_entry(me);
int len = cfunc->argc;
- /* don't use `ci' after EXEC_EVENT_HOOK because ci can be override */
- VALUE recv = ci->recv;
- rb_block_t *blockptr = ci->blockptr;
- int argc = ci->argc;
+ VALUE recv = calling->recv;
+ rb_block_t *blockptr = calling->blockptr;
+ int argc = calling->argc;
RUBY_DTRACE_CMETHOD_ENTRY_HOOK(th, me->owner, me->called_id);
EXEC_EVENT_HOOK(th, RUBY_EVENT_C_CALL, recv, me->called_id, me->owner, Qundef);
@@ -1634,15 +1634,15 @@ vm_call_cfunc_with_frame(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_i
#if OPT_CALL_CFUNC_WITHOUT_FRAME
static VALUE
-vm_call_cfunc_latter(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci)
+vm_call_cfunc_latter(rb_thread_t *th, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
{
VALUE val;
- int argc = ci->argc;
+ int argc = calling->argc;
VALUE *argv = STACK_ADDR_FROM_TOP(argc);
- VALUE recv = ci->recv;
- const rb_method_cfunc_t *cfunc = vm_method_cfunc_entry(ci->me);
+ VALUE recv = calling->recv;
+ const rb_method_cfunc_t *cfunc = vm_method_cfunc_entry(cc->me);
- th->passed_ci = ci;
+ th->passed_calling = calling;
reg_cfp->sp -= argc + 1;
ci->aux.inc_sp = argc + 1;
VM_PROFILE_UP(0);
@@ -1667,25 +1667,25 @@ vm_call_cfunc_latter(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_
}
static VALUE
-vm_call_cfunc(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci)
+vm_call_cfunc(rb_thread_t *th, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_call_info *ci)
{
VALUE val;
- const rb_callable_method_entry_t *me = ci->me;
+ const rb_callable_method_entry_t *me = cc->me;
int len = vm_method_cfunc_entry(me)->argc;
- VALUE recv = ci->recv;
+ VALUE recv = calling->recv;
- CALLER_SETUP_ARG(reg_cfp, ci);
- if (len >= 0) rb_check_arity(ci->argc, len, len);
+ CALLER_SETUP_ARG(reg_cfp, calling, ci);
+ if (len >= 0) rb_check_arity(calling->argc, len, len);
RUBY_DTRACE_CMETHOD_ENTRY_HOOK(th, me->owner, me->called_id);
EXEC_EVENT_HOOK(th, RUBY_EVENT_C_CALL, recv, me->called_id, me->owner, Qnil);
- if (!(ci->me->def->flag & METHOD_VISI_PROTECTED) &&
+ if (!(cc->me->def->flag & METHOD_VISI_PROTECTED) &&
!(ci->flag & VM_CALL_ARGS_SPLAT) &&
!(ci->kw_arg != NULL)) {
- CI_SET_FASTPATH(ci, vm_call_cfunc_latter, 1);
+ CI_SET_FASTPATH(cc, vm_call_cfunc_latter, 1);
}
- val = vm_call_cfunc_latter(th, reg_cfp, ci);
+ val = vm_call_cfunc_latter(th, reg_cfp, calling);
EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, recv, me->called_id, me->owner, val);
RUBY_DTRACE_CMETHOD_RETURN_HOOK(th, me->owner, me->called_id);
@@ -1696,73 +1696,75 @@ vm_call_cfunc(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci)
void
rb_vm_call_cfunc_push_frame(rb_thread_t *th)
{
- rb_call_info_t *ci = th->passed_ci;
- const rb_callable_method_entry_t *me = ci->me;
+ struct rb_calling_info *calling = th->passed_calling;
+ const rb_callable_method_entry_t *me = calling->me;
th->passed_ci = 0;
vm_push_frame(th, 0, VM_FRAME_MAGIC_CFUNC,
- ci->recv, VM_ENVVAL_BLOCK_PTR(ci->blockptr), (VALUE)me /* cref */,
- 0, th->cfp->sp + ci->aux.inc_sp, 1, 0);
+ calling->recv, VM_ENVVAL_BLOCK_PTR(calling->blockptr), (VALUE)me /* cref */,
+ 0, th->cfp->sp + cc->aux.inc_sp, 1, 0);
- if (ci->call != vm_call_general) {
- ci->call = vm_call_cfunc_with_frame;
+ if (calling->call != vm_call_general) {
+ calling->call = vm_call_cfunc_with_frame;
}
}
#else /* OPT_CALL_CFUNC_WITHOUT_FRAME */
static VALUE
-vm_call_cfunc(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci)
+vm_call_cfunc(rb_thread_t *th, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
{
- CALLER_SETUP_ARG(reg_cfp, ci);
- return vm_call_cfunc_with_frame(th, reg_cfp, ci);
+ CALLER_SETUP_ARG(reg_cfp, calling, ci);
+ return vm_call_cfunc_with_frame(th, reg_cfp, calling, ci, cc);
}
#endif
static VALUE
-vm_call_ivar(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci)
+vm_call_ivar(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
{
- VALUE val = vm_getivar(ci->recv, ci->me->def->body.attr.id, 0, ci, 1);
+ VALUE val = vm_getivar(calling->recv, cc->me->def->body.attr.id, NULL, cc, 1);
cfp->sp -= 1;
return val;
}
static VALUE
-vm_call_attrset(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci)
+vm_call_attrset(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
{
- VALUE val = vm_setivar(ci->recv, ci->me->def->body.attr.id, *(cfp->sp - 1), 0, ci, 1);
+ VALUE val = vm_setivar(calling->recv, cc->me->def->body.attr.id, *(cfp->sp - 1), NULL, cc, 1);
cfp->sp -= 2;
return val;
}
static inline VALUE
-vm_call_bmethod_body(rb_thread_t *th, rb_call_info_t *ci, const VALUE *argv)
+vm_call_bmethod_body(rb_thread_t *th, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc, const VALUE *argv)
{
rb_proc_t *proc;
VALUE val;
/* control block frame */
- th->passed_bmethod_me = ci->me;
- GetProcPtr(ci->me->def->body.proc, proc);
- val = vm_invoke_bmethod(th, proc, ci->recv, ci->argc, argv, ci->blockptr);
+ th->passed_bmethod_me = cc->me;
+ GetProcPtr(cc->me->def->body.proc, proc);
+ val = vm_invoke_bmethod(th, proc, calling->recv, calling->argc, argv, calling->blockptr);
return val;
}
static VALUE
-vm_call_bmethod(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci)
+vm_call_bmethod(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
{
VALUE *argv;
+ int argc;
- CALLER_SETUP_ARG(cfp, ci);
+ CALLER_SETUP_ARG(cfp, calling, ci);
- argv = ALLOCA_N(VALUE, ci->argc);
- MEMCPY(argv, cfp->sp - ci->argc, VALUE, ci->argc);
- cfp->sp += - ci->argc - 1;
+ argc = calling->argc;
+ argv = ALLOCA_N(VALUE, argc);
+ MEMCPY(argv, cfp->sp - argc, VALUE, argc);
+ cfp->sp += - argc - 1;
- return vm_call_bmethod_body(th, ci, argv);
+ return vm_call_bmethod_body(th, calling, ci, cc, argv);
}
static enum method_missing_reason
-ci_missing_reason(const rb_call_info_t *ci)
+ci_missing_reason(const struct rb_call_info *ci)
{
enum method_missing_reason stat = MISSING_NOENTRY;
if (ci->flag & VM_CALL_VCALL) stat |= MISSING_VCALL;
@@ -1776,97 +1778,116 @@ __forceinline
#else
inline
#endif
-VALUE vm_call_method(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci);
+VALUE vm_call_method(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc);
static VALUE
-vm_call_opt_send(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci)
+vm_call_opt_send(rb_thread_t *th, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_call_info *orig_ci, struct rb_call_cache *orig_cc)
{
int i;
VALUE sym;
- rb_call_info_t ci_entry;
+ struct rb_call_info *ci;
+ struct rb_call_info_with_kwarg ci_entry;
+ struct rb_call_cache cc_entry, *cc;
- CALLER_SETUP_ARG(reg_cfp, ci);
+ CALLER_SETUP_ARG(reg_cfp, calling, orig_ci);
- i = ci->argc - 1;
+ i = calling->argc - 1;
- if (ci->argc == 0) {
+ if (calling->argc == 0) {
rb_raise(rb_eArgError, "no method name given");
}
- ci_entry = *ci; /* copy ci entry */
- ci = &ci_entry;
- ci->kw_arg = NULL; /* TODO: delegate kw_arg without making a Hash object */
+ /* setup new ci */
+ if (orig_ci->flag & VM_CALL_KWARG) {
+ ci = (struct rb_call_info *)&ci_entry;
+ ci_entry = *(struct rb_call_info_with_kwarg *)orig_ci;
+ }
+ else {
+ ci = &ci_entry.ci;
+ ci_entry.ci = *orig_ci;
+ }
+ ci->flag = ci->flag & ~VM_CALL_KWARG; /* TODO: delegate kw_arg without making a Hash object */
+
+ /* setup new cc */
+ cc_entry = *orig_cc;
+ cc = &cc_entry;
sym = TOPN(i);
if (!(ci->mid = rb_check_id(&sym))) {
- if (rb_method_basic_definition_p(CLASS_OF(ci->recv), idMethodMissing)) {
- VALUE exc = make_no_method_exception(rb_eNoMethodError, NULL, ci->recv, rb_long2int(ci->argc), &TOPN(i));
+ if (rb_method_basic_definition_p(CLASS_OF(calling->recv), idMethodMissing)) {
+ VALUE exc = make_no_method_exception(rb_eNoMethodError, NULL, calling->recv, rb_long2int(calling->argc), &TOPN(i));
rb_exc_raise(exc);
}
TOPN(i) = rb_str_intern(sym);
ci->mid = idMethodMissing;
- th->method_missing_reason = ci->aux.method_missing_reason = ci_missing_reason(ci);
+ th->method_missing_reason = cc->aux.method_missing_reason = ci_missing_reason(ci);
}
else {
/* shift arguments */
if (i > 0) {
MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
}
- ci->argc -= 1;
+ calling->argc -= 1;
DEC_SP(1);
}
- ci->me = rb_callable_method_entry_without_refinements(CLASS_OF(ci->recv), ci->mid);
+ cc->me = rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), ci->mid);
ci->flag = VM_CALL_FCALL | VM_CALL_OPT_SEND;
-
- return vm_call_method(th, reg_cfp, ci);
+ return vm_call_method(th, reg_cfp, calling, ci, cc);
}
static VALUE
-vm_call_opt_call(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci)
+vm_call_opt_call(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
{
rb_proc_t *proc;
int argc;
VALUE *argv;
- CALLER_SETUP_ARG(cfp, ci);
+ CALLER_SETUP_ARG(cfp, calling, ci);
- argc = ci->argc;
+ argc = calling->argc;
argv = ALLOCA_N(VALUE, argc);
- GetProcPtr(ci->recv, proc);
+ GetProcPtr(calling->recv, proc);
MEMCPY(argv, cfp->sp - argc, VALUE, argc);
cfp->sp -= argc + 1;
- return rb_vm_invoke_proc(th, proc, argc, argv, ci->blockptr);
+ return rb_vm_invoke_proc(th, proc, argc, argv, calling->blockptr);
}
static VALUE
-vm_call_method_missing(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci)
+vm_call_method_missing(rb_thread_t *th, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_call_info *orig_ci, struct rb_call_cache *orig_cc)
{
- VALUE *argv = STACK_ADDR_FROM_TOP(ci->argc);
- rb_call_info_t ci_entry;
+ VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
+ struct rb_call_info ci_entry;
+ const struct rb_call_info *ci;
+ struct rb_call_cache cc_entry, *cc;
+ unsigned int argc;
- CALLER_SETUP_ARG(reg_cfp, ci);
+ CALLER_SETUP_ARG(reg_cfp, calling, orig_ci);
+ argc = calling->argc+1;
ci_entry.flag = VM_CALL_FCALL | VM_CALL_OPT_SEND;
- ci_entry.argc = ci->argc+1;
ci_entry.mid = idMethodMissing;
- ci_entry.blockptr = ci->blockptr;
- ci_entry.recv = ci->recv;
- ci_entry.me = rb_callable_method_entry(CLASS_OF(ci_entry.recv), idMethodMissing);
- ci_entry.kw_arg = NULL;
+ ci_entry.orig_argc = argc;
+ ci = &ci_entry;
+
+ cc_entry = *orig_cc;
+ cc_entry.me = rb_callable_method_entry(CLASS_OF(calling->recv), idMethodMissing);
+ cc = &cc_entry;
+
+ calling->argc = argc;
/* shift arguments: m(a, b, c) #=> method_missing(:m, a, b, c) */
CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
- if (ci->argc > 0) {
- MEMMOVE(argv+1, argv, VALUE, ci->argc);
+ if (argc > 0) {
+ MEMMOVE(argv+1, argv, VALUE, argc);
}
- argv[0] = ID2SYM(ci->mid);
+ argv[0] = ID2SYM(orig_ci->mid);
INC_SP(1);
- th->method_missing_reason = ci->aux.method_missing_reason;
- return vm_call_method(th, reg_cfp, &ci_entry);
+ th->method_missing_reason = orig_cc->aux.method_missing_reason;
+ return vm_call_method(th, reg_cfp, calling, ci, cc);
}
static inline VALUE
@@ -1879,7 +1900,7 @@ find_refinement(VALUE refinements, VALUE klass)
}
static int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_definition_t *d2);
-static VALUE vm_call_super_method(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci);
+static VALUE vm_call_super_method(rb_thread_t *th, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc);
static rb_control_frame_t *
current_method_entry(rb_thread_t *th, rb_control_frame_t *cfp)
@@ -1975,88 +1996,101 @@ __forceinline
inline
#endif
VALUE
-vm_call_method(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci)
+vm_call_method(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
{
int enable_fastpath = 1;
- rb_call_info_t ci_temp;
-
- VM_ASSERT(callable_method_entry_p(ci->me));
+ struct rb_call_info_with_kwarg ci_temp;
+ struct rb_call_cache cc_temp;
start_method_dispatch:
- VM_ASSERT(callable_method_entry_p(ci->me));
- if (ci->me != 0) {
- if (LIKELY(METHOD_ENTRY_VISI(ci->me) == METHOD_VISI_PUBLIC && METHOD_ENTRY_SAFE(ci->me) == 0)) {
+ VM_ASSERT(callable_method_entry_p(cc->me));
+
+ if (cc->me != NULL) {
+ if (LIKELY(METHOD_ENTRY_VISI(cc->me) == METHOD_VISI_PUBLIC && METHOD_ENTRY_SAFE(cc->me) == 0)) {
VALUE klass;
normal_method_dispatch:
- VM_ASSERT(callable_method_entry_p(ci->me));
- switch (ci->me->def->type) {
+ VM_ASSERT(callable_method_entry_p(cc->me));
+
+ switch (cc->me->def->type) {
case VM_METHOD_TYPE_ISEQ:{
- CI_SET_FASTPATH(ci, vm_call_iseq_setup, enable_fastpath);
- return vm_call_iseq_setup(th, cfp, ci);
+ CI_SET_FASTPATH(cc, vm_call_iseq_setup, enable_fastpath);
+ return vm_call_iseq_setup(th, cfp, calling, ci, cc);
}
case VM_METHOD_TYPE_NOTIMPLEMENTED:
case VM_METHOD_TYPE_CFUNC:
- CI_SET_FASTPATH(ci, vm_call_cfunc, enable_fastpath);
- return vm_call_cfunc(th, cfp, ci);
+ CI_SET_FASTPATH(cc, vm_call_cfunc, enable_fastpath);
+ return vm_call_cfunc(th, cfp, calling, ci, cc);
case VM_METHOD_TYPE_ATTRSET:{
- CALLER_SETUP_ARG(cfp, ci);
- rb_check_arity(ci->argc, 1, 1);
- ci->aux.index = 0;
- CI_SET_FASTPATH(ci, vm_call_attrset, enable_fastpath && !(ci->flag & VM_CALL_ARGS_SPLAT));
- return vm_call_attrset(th, cfp, ci);
+ CALLER_SETUP_ARG(cfp, calling, ci);
+ rb_check_arity(calling->argc, 1, 1);
+ cc->aux.index = 0;
+ CI_SET_FASTPATH(cc, vm_call_attrset, enable_fastpath && !(ci->flag & VM_CALL_ARGS_SPLAT));
+ return vm_call_attrset(th, cfp, calling, ci, cc);
}
case VM_METHOD_TYPE_IVAR:{
- CALLER_SETUP_ARG(cfp, ci);
- rb_check_arity(ci->argc, 0, 0);
- ci->aux.index = 0;
- CI_SET_FASTPATH(ci, vm_call_ivar, enable_fastpath && !(ci->flag & VM_CALL_ARGS_SPLAT));
- return vm_call_ivar(th, cfp, ci);
+ CALLER_SETUP_ARG(cfp, calling, ci);
+ rb_check_arity(calling->argc, 0, 0);
+ cc->aux.index = 0;
+ CI_SET_FASTPATH(cc, vm_call_ivar, enable_fastpath && !(ci->flag & VM_CALL_ARGS_SPLAT));
+ return vm_call_ivar(th, cfp, calling, ci, cc);
}
case VM_METHOD_TYPE_MISSING:{
- ci->aux.method_missing_reason = 0;
- CI_SET_FASTPATH(ci, vm_call_method_missing, enable_fastpath);
- return vm_call_method_missing(th, cfp, ci);
+ cc->aux.method_missing_reason = 0;
+ CI_SET_FASTPATH(cc, vm_call_method_missing, enable_fastpath);
+ return vm_call_method_missing(th, cfp, calling, ci, cc);
}
case VM_METHOD_TYPE_BMETHOD:{
- CI_SET_FASTPATH(ci, vm_call_bmethod, enable_fastpath);
- return vm_call_bmethod(th, cfp, ci);
+ CI_SET_FASTPATH(cc, vm_call_bmethod, enable_fastpath);
+ return vm_call_bmethod(th, cfp, calling, ci, cc);
}
- case VM_METHOD_TYPE_ZSUPER:{
- klass = ci->me->owner;
- klass = RCLASS_ORIGIN(klass);
- zsuper_method_dispatch:
- klass = RCLASS_SUPER(klass);
- if (!klass) {
- ci->me = 0;
- goto start_method_dispatch;
- }
- ci_temp = *ci;
- ci = &ci_temp;
+ case VM_METHOD_TYPE_ZSUPER:
+ {
+ klass = cc->me->owner;
+ klass = RCLASS_ORIGIN(klass);
+
+ zsuper_method_dispatch:
+ klass = RCLASS_SUPER(klass);
+ if (!klass) {
+ cc->me = NULL;
+ goto start_method_dispatch;
+ }
+ else {
+ if (ci->flag & VM_CALL_KWARG) {
+ ci_temp = *(struct rb_call_info_with_kwarg *)ci;
+ }
+ else {
+ ci_temp.ci = *ci;
+ }
- ci->me = rb_callable_method_entry(klass, ci->mid);
+ ci = &ci_temp.ci;
+ cc_temp = *cc;
+ cc = &cc_temp;
+ cc->me = rb_callable_method_entry(klass, ci->mid);
- if (ci->me != 0) {
- goto normal_method_dispatch;
- }
- else {
- goto start_method_dispatch;
+ if (cc->me != NULL) {
+ goto normal_method_dispatch;
+ }
+ else {
+ goto start_method_dispatch;
+ }
+ }
}
- }
case VM_METHOD_TYPE_ALIAS:
- ci->me = aliased_callable_method_entry(ci->me);
+ cc->me = aliased_callable_method_entry(cc->me);
+ VM_ASSERT(cc->me != NULL);
goto normal_method_dispatch;
case VM_METHOD_TYPE_OPTIMIZED:{
- switch (ci->me->def->body.optimize_type) {
+ switch (cc->me->def->body.optimize_type) {
case OPTIMIZED_METHOD_TYPE_SEND:
- CI_SET_FASTPATH(ci, vm_call_opt_send, enable_fastpath);
- return vm_call_opt_send(th, cfp, ci);
+ CI_SET_FASTPATH(cc, vm_call_opt_send, enable_fastpath);
+ return vm_call_opt_send(th, cfp, calling, ci, cc);
case OPTIMIZED_METHOD_TYPE_CALL:
- CI_SET_FASTPATH(ci, vm_call_opt_call, enable_fastpath);
- return vm_call_opt_call(th, cfp, ci);
+ CI_SET_FASTPATH(cc, vm_call_opt_call, enable_fastpath);
+ return vm_call_opt_call(th, cfp, calling, ci, cc);
default:
rb_bug("vm_call_method: unsupported optimized method type (%d)",
- ci->me->def->body.optimize_type);
+ cc->me->def->body.optimize_type);
}
break;
}
@@ -2066,71 +2100,71 @@ vm_call_method(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci)
const rb_cref_t *cref = rb_vm_get_cref(cfp->ep);
VALUE refinements = cref ? CREF_REFINEMENTS(cref) : Qnil;
VALUE refinement;
- const rb_callable_method_entry_t *me;
+ const rb_callable_method_entry_t *ref_me;
- refinement = find_refinement(refinements, ci->me->owner);
+ refinement = find_refinement(refinements, cc->me->owner);
if (NIL_P(refinement)) {
goto no_refinement_dispatch;
}
- me = rb_callable_method_entry(refinement, ci->mid);
- if (me) {
- if (ci->call == vm_call_super_method) {
+ ref_me = rb_callable_method_entry(refinement, ci->mid);
+ if (ref_me) {
+ if (cc->call == vm_call_super_method) {
const rb_control_frame_t *top_cfp = current_method_entry(th, cfp);
const rb_callable_method_entry_t *top_me = rb_vm_frame_method_entry(top_cfp);
- if (top_me && rb_method_definition_eq(me->def, top_me->def)) {
+ if (top_me && rb_method_definition_eq(ref_me->def, top_me->def)) {
goto no_refinement_dispatch;
}
}
- ci->me = me;
- if (me->def->type != VM_METHOD_TYPE_REFINED) {
+ cc->me = ref_me;
+ if (ref_me->def->type != VM_METHOD_TYPE_REFINED) {
goto start_method_dispatch;
}
}
else {
- ci->me = 0;
+ cc->me = 0;
goto start_method_dispatch;
}
no_refinement_dispatch:
- if (ci->me->def->body.refined.orig_me) {
- ci->me = refined_method_callable_without_refinement(ci->me);
+ if (cc->me->def->body.refined.orig_me) {
+ cc->me = refined_method_callable_without_refinement(cc->me);
- if (UNDEFINED_METHOD_ENTRY_P(ci->me)) {
- ci->me = 0;
+ if (UNDEFINED_METHOD_ENTRY_P(cc->me)) {
+ cc->me = 0;
}
goto start_method_dispatch;
}
else {
- klass = ci->me->owner;
+ klass = cc->me->owner;
goto zsuper_method_dispatch;
}
}
}
- rb_bug("vm_call_method: unsupported method type (%d)", ci->me->def->type);
+ rb_bug("vm_call_method: unsupported method type (%d)", cc->me->def->type);
}
else {
int safe;
- if (!(ci->flag & VM_CALL_FCALL) && (METHOD_ENTRY_VISI(ci->me) == METHOD_VISI_PRIVATE)) {
+ if (!(ci->flag & VM_CALL_FCALL) && (METHOD_ENTRY_VISI(cc->me) == METHOD_VISI_PRIVATE)) {
enum method_missing_reason stat = MISSING_PRIVATE;
- bp();
if (ci->flag & VM_CALL_VCALL) stat |= MISSING_VCALL;
- ci->aux.method_missing_reason = stat;
- CI_SET_FASTPATH(ci, vm_call_method_missing, 1);
- return vm_call_method_missing(th, cfp, ci);
+ cc->aux.method_missing_reason = stat;
+ CI_SET_FASTPATH(cc, vm_call_method_missing, 1);
+ return vm_call_method_missing(th, cfp, calling, ci, cc);
}
- else if (!(ci->flag & VM_CALL_OPT_SEND) && (METHOD_ENTRY_VISI(ci->me) == METHOD_VISI_PROTECTED)) {
- enable_fastpath = 0;
- if (!rb_obj_is_kind_of(cfp->self, ci->me->defined_class)) {
- ci->aux.method_missing_reason = MISSING_PROTECTED;
- return vm_call_method_missing(th, cfp, ci);
+ else if (!(ci->flag & VM_CALL_OPT_SEND) && (METHOD_ENTRY_VISI(cc->me) == METHOD_VISI_PROTECTED)) {
+ if (!rb_obj_is_kind_of(cfp->self, cc->me->defined_class)) {
+ cc->aux.method_missing_reason = MISSING_PROTECTED;
+ return vm_call_method_missing(th, cfp, calling, ci, cc);
}
else {
+ enable_fastpath = 0;
+ VM_ASSERT(cc->me != NULL);
goto normal_method_dispatch;
}
}
- else if ((safe = METHOD_ENTRY_SAFE(ci->me)) > th->safe_level && safe > 2) {
+ else if ((safe = METHOD_ENTRY_SAFE(cc->me)) > th->safe_level && safe > 2) {
rb_raise(rb_eSecurityError, "calling insecure method: %"PRIsVALUE, rb_id2str(ci->mid));
}
else {
@@ -2143,13 +2177,13 @@ vm_call_method(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci)
const int stat = ci_missing_reason(ci);
if (ci->mid == idMethodMissing) {
rb_control_frame_t *reg_cfp = cfp;
- VALUE *argv = STACK_ADDR_FROM_TOP(ci->argc);
- rb_raise_method_missing(th, ci->argc, argv, ci->recv, stat);
+ VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
+ rb_raise_method_missing(th, calling->argc, argv, calling->recv, stat);
}
else {
- ci->aux.method_missing_reason = stat;
- CI_SET_FASTPATH(ci, vm_call_method_missing, 1);
- return vm_call_method_missing(th, cfp, ci);
+ cc->aux.method_missing_reason = stat;
+ CI_SET_FASTPATH(cc, vm_call_method_missing, 1);
+ return vm_call_method_missing(th, cfp, calling, ci, cc);
}
}
@@ -2157,15 +2191,15 @@ vm_call_method(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci)
}
static VALUE
-vm_call_general(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci)
+vm_call_general(rb_thread_t *th, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
{
- return vm_call_method(th, reg_cfp, ci);
+ return vm_call_method(th, reg_cfp, calling, ci, cc);
}
static VALUE
-vm_call_super_method(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci)
+vm_call_super_method(rb_thread_t *th, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
{
- return vm_call_method(th, reg_cfp, ci);
+ return vm_call_method(th, reg_cfp, calling, ci, cc);
}
/* super */
@@ -2188,10 +2222,11 @@ vm_super_outside(void)
}
static void
-vm_search_super_method(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci)
+vm_search_super_method(rb_thread_t *th, rb_control_frame_t *reg_cfp,
+ struct rb_calling_info *calling, struct rb_call_info *ci, struct rb_call_cache *cc)
{
VALUE current_defined_class, klass;
- VALUE sigval = TOPN(ci->argc);
+ VALUE sigval = TOPN(calling->argc);
const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
if (!me) {
@@ -2207,14 +2242,14 @@ vm_search_super_method(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_inf
if (BUILTIN_TYPE(current_defined_class) != T_MODULE &&
BUILTIN_TYPE(current_defined_class) != T_ICLASS && /* bound UnboundMethod */
!FL_TEST(current_defined_class, RMODULE_INCLUDED_INTO_REFINEMENT) &&
- !rb_obj_is_kind_of(ci->recv, current_defined_class)) {
+ !rb_obj_is_kind_of(calling->recv, current_defined_class)) {
VALUE m = RB_TYPE_P(current_defined_class, T_ICLASS) ?
RBASIC(current_defined_class)->klass : current_defined_class;
rb_raise(rb_eTypeError,
"self has wrong type to call super in this context: "
"%"PRIsVALUE" (expected %"PRIsVALUE")",
- rb_obj_class(ci->recv), m);
+ rb_obj_class(calling->recv), m);
}
if (me->def->type == VM_METHOD_TYPE_BMETHOD && !sigval) {
@@ -2229,13 +2264,13 @@ vm_search_super_method(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_inf
if (!klass) {
/* bound instance method of module */
- ci->aux.method_missing_reason = MISSING_SUPER;
- CI_SET_FASTPATH(ci, vm_call_method_missing, 1);
+ cc->aux.method_missing_reason = MISSING_SUPER;
+ CI_SET_FASTPATH(cc, vm_call_method_missing, 1);
}
else {
/* TODO: use inline cache */
- ci->me = rb_callable_method_entry(klass, ci->mid);
- CI_SET_FASTPATH(ci, vm_call_super_method, 1);
+ cc->me = rb_callable_method_entry(klass, ci->mid);
+ CI_SET_FASTPATH(cc, vm_call_super_method, 1);
}
}
@@ -2299,27 +2334,32 @@ vm_yield_with_cfunc(rb_thread_t *th, const rb_block_t *block, VALUE self,
}
static int
-vm_yield_callee_setup_arg(rb_thread_t *th, rb_call_info_t *ci, const rb_iseq_t *iseq, VALUE *argv, enum arg_setup_type arg_setup_type)
+vm_yield_callee_setup_arg(rb_thread_t *th, struct rb_calling_info *calling,
+ const struct rb_call_info *ci, const rb_iseq_t *iseq,
+ VALUE *argv, enum arg_setup_type arg_setup_type)
{
- return vm_callee_setup_block_arg(th, ci, iseq, argv, arg_setup_type);
+ return vm_callee_setup_block_arg(th, calling, ci, iseq, argv, arg_setup_type);
}
static int
vm_yield_setup_args(rb_thread_t *th, const rb_iseq_t *iseq, const int argc, VALUE *argv, const rb_block_t *blockptr, enum arg_setup_type arg_setup_type)
{
- rb_call_info_t ci_entry;
- ci_entry.argc = argc;
- ci_entry.blockptr = (rb_block_t *)blockptr;
+ struct rb_calling_info calling_entry, *calling;
+ struct rb_call_info ci_entry, *ci;
+
+ calling = &calling_entry;
+ calling->argc = argc;
+ calling->blockptr = (rb_block_t *)blockptr;
+
ci_entry.flag = 0;
- ci_entry.kw_arg = NULL;
- ci_entry.me = NULL;
+ ci = &ci_entry;
- return vm_yield_callee_setup_arg(th, &ci_entry, iseq, argv, arg_setup_type);
+ return vm_yield_callee_setup_arg(th, calling, ci, iseq, argv, arg_setup_type);
}
/* ruby iseq -> ruby block iseq */
static VALUE
-vm_invoke_block(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci)
+vm_invoke_block(rb_thread_t *th, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_call_info *ci)
{
const rb_block_t *block = VM_CF_BLOCK_PTR(reg_cfp);
const rb_iseq_t *iseq;
@@ -2333,8 +2373,8 @@ vm_invoke_block(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci
if (!RUBY_VM_IFUNC_P(iseq)) {
const int arg_size = iseq->body->param.size;
int is_lambda = block_proc_is_lambda(block->proc);
- VALUE * const rsp = GET_SP() - ci->argc;
- int opt_pc = vm_yield_callee_setup_arg(th, ci, iseq, rsp, is_lambda ? arg_setup_lambda : arg_setup_block);
+ VALUE * const rsp = GET_SP() - calling->argc;
+ int opt_pc = vm_yield_callee_setup_arg(th, calling, ci, iseq, rsp, is_lambda ? arg_setup_lambda : arg_setup_block);
SET_SP(rsp);
@@ -2351,8 +2391,8 @@ vm_invoke_block(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci
else {
VALUE val;
int argc;
- CALLER_SETUP_ARG(th->cfp, ci);
- argc = ci->argc;
+ CALLER_SETUP_ARG(th->cfp, calling, ci);
+ argc = calling->argc;
val = vm_yield_with_cfunc(th, block, block->self, argc, STACK_ADDR_FROM_TOP(argc), 0);
POPN(argc); /* TODO: should put before C/yield? */
return val;
diff --git a/vm_insnhelper.h b/vm_insnhelper.h
index de3b25f..cad07da 100644
--- a/vm_insnhelper.h
+++ b/vm_insnhelper.h
@@ -162,8 +162,8 @@ enum vm_regan_acttype {
} \
} while (0)
-#define CALL_METHOD(ci) do { \
- VALUE v = (*(ci)->call)(th, GET_CFP(), (ci)); \
+#define CALL_METHOD(calling, ci, cc) do { \
+ VALUE v = (*(cc)->call)(th, GET_CFP(), (calling), (ci), (cc)); \
if (v == Qundef) { \
RESTORE_REGS(); \
NEXT_INSN(); \
@@ -182,8 +182,8 @@ enum vm_regan_acttype {
#endif
#if OPT_CALL_FASTPATH
-#define CI_SET_FASTPATH(ci, func, enabled) do { \
- if (LIKELY(enabled)) ((ci)->call = (func)); \
+#define CI_SET_FASTPATH(cc, func, enabled) do { \
+ if (LIKELY(enabled)) ((cc)->call = (func)); \
} while (0)
#else
#define CI_SET_FASTPATH(ci, func, enabled) /* do nothing */
@@ -213,9 +213,11 @@ enum vm_regan_acttype {
#endif
#define CALL_SIMPLE_METHOD(recv_) do { \
- ci->blockptr = 0; ci->argc = ci->orig_argc; \
- vm_search_method(ci, ci->recv = (recv_)); \
- CALL_METHOD(ci); \
+ struct rb_calling_info calling; \
+ calling.blockptr = NULL; \
+ calling.argc = ci->orig_argc; \
+ vm_search_method(ci, cc, calling.recv = (recv_)); \
+ CALL_METHOD(&calling, ci, cc); \
} while (0)
#define NEXT_CLASS_SERIAL() (++ruby_vm_class_serial)