summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog20
-rw-r--r--compile.c111
-rw-r--r--cont.c54
-rw-r--r--eval.c35
-rw-r--r--eval_intern.h22
-rw-r--r--gc.c13
-rw-r--r--insns.def18
-rw-r--r--internal.h10
-rw-r--r--iseq.c21
-rw-r--r--load.c2
-rw-r--r--parse.y6
-rw-r--r--proc.c519
-rw-r--r--process.c1
-rw-r--r--ruby.c11
-rw-r--r--string.c4
-rw-r--r--thread.c6
-rw-r--r--tool/mk_call_iseq_optimized.rb6
-rw-r--r--vm.c714
-rw-r--r--vm_args.c103
-rw-r--r--vm_core.h530
-rw-r--r--vm_dump.c15
-rw-r--r--vm_eval.c185
-rw-r--r--vm_insnhelper.c530
-rw-r--r--vm_insnhelper.h8
-rw-r--r--vm_method.c4
25 files changed, 1880 insertions, 1068 deletions
diff --git a/ChangeLog b/ChangeLog
index 7837d59fc5..402bc597dd 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,23 @@
+Thu Jul 28 19:53:21 2016 Koichi Sasada <ko1@atdot.net>
+
+ * vm_core.h: revisit the structure of frame, block and env.
+ [Bug #12628]
+
+ This patch introduce many changes.
+
+ * Introduce concept of "Block Handler (BH)" to represent
+ passed blocks.
+
+ * move rb_control_frame_t::flag to ep[0] (as a special local
+ variable). This flags represents not only frame type, but also
+ env flags such as escaped.
+
+ * rename `rb_block_t` to `struct rb_block`.
+
+ * Make Proc, Binding and RubyVM::Env objects wb-protected.
+
+ Check [Bug #12628] for more details.
+
Thu Jul 28 15:05:12 2016 Nobuyoshi Nakada <nobu@ruby-lang.org>
* include/ruby/ruby.h (ruby_fl_type): use __extension__ to get rid
diff --git a/compile.c b/compile.c
index 33e0ba37f6..dac26c6ce0 100644
--- a/compile.c
+++ b/compile.c
@@ -180,6 +180,7 @@ r_value(VALUE value)
#define debug_compile(msg, v) (v)
#endif
+#define LVAR_ERRINFO (1)
/* create new label */
#define NEW_LABEL(l) new_label_body(iseq, (l))
@@ -264,6 +265,16 @@ r_value(VALUE value)
} \
} while (0)
+#define ADD_GETLOCAL(seq, line, idx, level) \
+ do { \
+ ADD_INSN2((seq), (line), getlocal, INT2FIX((idx) + VM_ENV_DATA_SIZE - 1), INT2FIX(level)); \
+ } while (0)
+
+#define ADD_SETLOCAL(seq, line, idx, level) \
+ do { \
+ ADD_INSN2((seq), (line), setlocal, INT2FIX((idx) + VM_ENV_DATA_SIZE - 1), INT2FIX(level)); \
+ } while (0)
+
/* add label */
#define ADD_LABEL(seq, label) \
ADD_ELEM((seq), (LINK_ELEMENT *) (label))
@@ -646,7 +657,7 @@ rb_iseq_compile_node(rb_iseq_t *iseq, NODE *node)
}
if (iseq->body->type == ISEQ_TYPE_RESCUE || iseq->body->type == ISEQ_TYPE_ENSURE) {
- ADD_INSN2(ret, 0, getlocal, INT2FIX(2), INT2FIX(0));
+ ADD_GETLOCAL(ret, 0, LVAR_ERRINFO, 0);
ADD_INSN1(ret, 0, throw, INT2FIX(0) /* continue throw */ );
}
else {
@@ -1176,7 +1187,6 @@ iseq_set_exception_local_table(rb_iseq_t *iseq)
CONST_ID(id_dollar_bang, "#$!");
iseq->body->local_table_size = 1;
- iseq->body->local_size = iseq->body->local_table_size + 1;
ids[0] = id_dollar_bang;
iseq->body->local_table = ids;
return COMPILE_OK;
@@ -1237,7 +1247,7 @@ get_dyna_var_idx(const rb_iseq_t *iseq, ID id, int *level, int *ls)
}
*level = lv;
- *ls = iseq->body->local_size;
+ *ls = iseq->body->local_table_size;
return idx;
}
@@ -1467,10 +1477,10 @@ iseq_set_arguments(rb_iseq_t *iseq, LINK_ANCHOR *optargs, NODE *node_args)
static int
iseq_set_local_table(rb_iseq_t *iseq, const ID *tbl)
{
- int size;
+ unsigned int size;
if (tbl) {
- size = (int)*tbl;
+ size = (unsigned int)*tbl;
tbl++;
}
else {
@@ -1482,18 +1492,9 @@ iseq_set_local_table(rb_iseq_t *iseq, const ID *tbl)
MEMCPY(ids, tbl, ID, size);
iseq->body->local_table = ids;
}
+ iseq->body->local_table_size = size;
- iseq->body->local_size = iseq->body->local_table_size = size;
- iseq->body->local_size += 1;
- /*
- if (lfp == dfp ) { // top, class, method
- dfp[-1]: svar
- else { // block
- dfp[-1]: cref
- }
- */
-
- debugs("iseq_set_local_table: %d, %d\n", iseq->body->local_size, iseq->body->local_table_size);
+ debugs("iseq_set_local_table: %u\n", iseq->body->local_table_size);
return COMPILE_OK;
}
@@ -4470,7 +4471,7 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
switch (nd_type(narg)) {
case NODE_ARRAY:
while (narg) {
- ADD_INSN2(ret, line, getlocal, INT2FIX(2), INT2FIX(0));
+ ADD_GETLOCAL(ret, line, LVAR_ERRINFO, 0);
COMPILE(ret, "rescue arg", narg->nd_head);
ADD_INSN1(ret, line, checkmatch, INT2FIX(VM_CHECKMATCH_TYPE_RESCUE));
ADD_INSNL(ret, line, branchif, label_hit);
@@ -4480,7 +4481,7 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
case NODE_SPLAT:
case NODE_ARGSCAT:
case NODE_ARGSPUSH:
- ADD_INSN2(ret, line, getlocal, INT2FIX(2), INT2FIX(0));
+ ADD_GETLOCAL(ret, line, LVAR_ERRINFO, 0);
COMPILE(ret, "rescue/cond splat", narg);
ADD_INSN1(ret, line, checkmatch, INT2FIX(VM_CHECKMATCH_TYPE_RESCUE | VM_CHECKMATCH_ARRAY));
ADD_INSNL(ret, line, branchif, label_hit);
@@ -4490,7 +4491,7 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
}
}
else {
- ADD_INSN2(ret, line, getlocal, INT2FIX(2), INT2FIX(0));
+ ADD_GETLOCAL(ret, line, LVAR_ERRINFO, 0);
ADD_INSN1(ret, line, putobject, rb_eStandardError);
ADD_INSN1(ret, line, checkmatch, INT2FIX(VM_CHECKMATCH_TYPE_RESCUE));
ADD_INSNL(ret, line, branchif, label_hit);
@@ -4577,7 +4578,7 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
case NODE_LASGN:{
ID id = node->nd_vid;
- int idx = iseq->body->local_iseq->body->local_size - get_local_var_idx(iseq, id);
+ int idx = iseq->body->local_iseq->body->local_table_size - get_local_var_idx(iseq, id);
debugs("lvar: %"PRIsVALUE" idx: %d\n", rb_id2str(id), idx);
COMPILE(ret, "rvalue", node->nd_value);
@@ -4585,8 +4586,7 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
if (!poped) {
ADD_INSN(ret, line, dup);
}
- ADD_INSN2(ret, line, setlocal, INT2FIX(idx), INT2FIX(get_lvar_level(iseq)));
-
+ ADD_SETLOCAL(ret, line, idx, get_lvar_level(iseq));
break;
}
case NODE_DASGN:
@@ -4605,8 +4605,7 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
compile_bug(ERROR_ARGS "NODE_DASGN(_CURR): unknown id (%"PRIsVALUE")",
rb_id2str(node->nd_vid));
}
-
- ADD_INSN2(ret, line, setlocal, INT2FIX(ls - idx), INT2FIX(lv));
+ ADD_SETLOCAL(ret, line, ls - idx, lv);
break;
}
case NODE_GASGN:{
@@ -5192,24 +5191,25 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
/* normal arguments */
for (i = 0; i < liseq->body->param.lead_num; i++) {
- int idx = liseq->body->local_size - i;
- ADD_INSN2(args, line, getlocal, INT2FIX(idx), INT2FIX(lvar_level));
+ int idx = liseq->body->local_table_size - i;
+ ADD_GETLOCAL(args, line, idx, lvar_level);
}
if (liseq->body->param.flags.has_opt) {
/* optional arguments */
int j;
for (j = 0; j < liseq->body->param.opt_num; j++) {
- int idx = liseq->body->local_size - (i + j);
- ADD_INSN2(args, line, getlocal, INT2FIX(idx), INT2FIX(lvar_level));
+ int idx = liseq->body->local_table_size - (i + j);
+ ADD_GETLOCAL(args, line, idx, lvar_level);
}
i += j;
argc = i;
}
if (liseq->body->param.flags.has_rest) {
/* rest argument */
- int idx = liseq->body->local_size - liseq->body->param.rest_start;
- ADD_INSN2(args, line, getlocal, INT2FIX(idx), INT2FIX(lvar_level));
+ int idx = liseq->body->local_table_size - liseq->body->param.rest_start;
+ ADD_GETLOCAL(args, line, idx, lvar_level);
+
argc = liseq->body->param.rest_start + 1;
flag |= VM_CALL_ARGS_SPLAT;
}
@@ -5221,8 +5221,8 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
if (liseq->body->param.flags.has_rest) {
int j;
for (j=0; j<post_len; j++) {
- int idx = liseq->body->local_size - (post_start + j);
- ADD_INSN2(args, line, getlocal, INT2FIX(idx), INT2FIX(lvar_level));
+ int idx = liseq->body->local_table_size - (post_start + j);
+ ADD_GETLOCAL(args, line, idx, lvar_level);
}
ADD_INSN1(args, line, newarray, INT2FIX(j));
ADD_INSN (args, line, concatarray);
@@ -5231,21 +5231,22 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
else {
int j;
for (j=0; j<post_len; j++) {
- int idx = liseq->body->local_size - (post_start + j);
- ADD_INSN2(args, line, getlocal, INT2FIX(idx), INT2FIX(lvar_level));
+ int idx = liseq->body->local_table_size - (post_start + j);
+ ADD_GETLOCAL(args, line, idx, lvar_level);
}
argc = post_len + post_start;
}
}
if (liseq->body->param.flags.has_kw) { /* TODO: support keywords */
- int local_size = liseq->body->local_size;
+ int local_size = liseq->body->local_table_size;
argc++;
ADD_INSN1(args, line, putspecialobject, INT2FIX(VM_SPECIAL_OBJECT_VMCORE));
if (liseq->body->param.flags.has_kwrest) {
- ADD_INSN2(args, line, getlocal, INT2FIX(liseq->body->local_size - liseq->body->param.keyword->rest_start), INT2FIX(lvar_level));
+ int idx = liseq->body->local_table_size - liseq->body->param.keyword->rest_start;
+ ADD_GETLOCAL(args, line, idx, lvar_level);
ADD_SEND (args, line, rb_intern("dup"), INT2FIX(0));
}
else {
@@ -5255,7 +5256,7 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
ID id = liseq->body->param.keyword->table[i];
int idx = local_size - get_local_var_idx(liseq, id);
ADD_INSN1(args, line, putobject, ID2SYM(id));
- ADD_INSN2(args, line, getlocal, INT2FIX(idx), INT2FIX(lvar_level));
+ ADD_GETLOCAL(args, line, idx, lvar_level);
}
ADD_SEND(args, line, id_core_hash_merge_ptr, INT2FIX(i * 2 + 1));
if (liseq->body->param.flags.has_rest) {
@@ -5265,7 +5266,9 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
}
}
else if (liseq->body->param.flags.has_kwrest) {
- ADD_INSN2(args, line, getlocal, INT2FIX(liseq->body->local_size - liseq->body->param.keyword->rest_start), INT2FIX(lvar_level));
+ int idx = liseq->body->local_table_size - liseq->body->param.keyword->rest_start;
+ ADD_GETLOCAL(args, line, idx, lvar_level);
+
ADD_SEND (args, line, rb_intern("dup"), INT2FIX(0));
if (liseq->body->param.flags.has_rest) {
ADD_INSN1(args, line, newarray, INT2FIX(1));
@@ -5406,10 +5409,10 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
case NODE_LVAR:{
if (!poped) {
ID id = node->nd_vid;
- int idx = iseq->body->local_iseq->body->local_size - get_local_var_idx(iseq, id);
+ int idx = iseq->body->local_iseq->body->local_table_size - get_local_var_idx(iseq, id);
debugs("id: %"PRIsVALUE" idx: %d\n", rb_id2str(id), idx);
- ADD_INSN2(ret, line, getlocal, INT2FIX(idx), INT2FIX(get_lvar_level(iseq)));
+ ADD_GETLOCAL(ret, line, idx, get_lvar_level(iseq));
}
break;
}
@@ -5422,7 +5425,7 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
compile_bug(ERROR_ARGS "unknown dvar (%"PRIsVALUE")",
rb_id2str(node->nd_vid));
}
- ADD_INSN2(ret, line, getlocal, INT2FIX(ls - idx), INT2FIX(lv));
+ ADD_GETLOCAL(ret, line, ls - idx, lv);
}
break;
}
@@ -5954,7 +5957,7 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
case NODE_ERRINFO:{
if (!poped) {
if (iseq->body->type == ISEQ_TYPE_RESCUE) {
- ADD_INSN2(ret, line, getlocal, INT2FIX(2), INT2FIX(0));
+ ADD_GETLOCAL(ret, line, LVAR_ERRINFO, 0);
}
else {
const rb_iseq_t *ip = iseq;
@@ -5967,7 +5970,7 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
level++;
}
if (ip) {
- ADD_INSN2(ret, line, getlocal, INT2FIX(2), INT2FIX(level));
+ ADD_GETLOCAL(ret, line, LVAR_ERRINFO, level);
}
else {
ADD_INSN(ret, line, putnil);
@@ -6032,10 +6035,10 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
* kw = default_value
* end
*/
- int kw_bits_idx = iseq->body->local_size - iseq->body->param.keyword->bits_start;
+ int kw_bits_idx = iseq->body->local_table_size - iseq->body->param.keyword->bits_start;
int keyword_idx = iseq->body->param.keyword->num;
- ADD_INSN2(ret, line, checkkeyword, INT2FIX(kw_bits_idx), INT2FIX(keyword_idx));
+ ADD_INSN2(ret, line, checkkeyword, INT2FIX(kw_bits_idx + VM_ENV_DATA_SIZE - 1), INT2FIX(keyword_idx));
ADD_INSNL(ret, line, branchif, end_label);
COMPILE_POPED(ret, "keyword default argument", node->nd_body);
ADD_LABEL(ret, end_label);
@@ -6779,7 +6782,6 @@ rb_iseq_build_from_ary(rb_iseq_t *iseq, VALUE misc, VALUE locals, VALUE params,
len = RARRAY_LENINT(locals);
iseq->body->local_table_size = len;
iseq->body->local_table = tbl = len > 0 ? (ID *)ALLOC_N(ID, iseq->body->local_table_size) : NULL;
- iseq->body->local_size = iseq->body->local_table_size + 1;
for (i = 0; i < len; i++) {
VALUE lv = RARRAY_AREF(locals, i);
@@ -6866,11 +6868,11 @@ rb_iseq_build_from_ary(rb_iseq_t *iseq, VALUE misc, VALUE locals, VALUE params,
/* for parser */
int
-rb_dvar_defined(ID id, const rb_block_t *base_block)
+rb_dvar_defined(ID id, const struct rb_block *base_block)
{
const rb_iseq_t *iseq;
- if (base_block && (iseq = base_block->iseq)) {
+ if (base_block && (iseq = vm_block_iseq(base_block)) != NULL) {
while (iseq->body->type == ISEQ_TYPE_BLOCK ||
iseq->body->type == ISEQ_TYPE_RESCUE ||
iseq->body->type == ISEQ_TYPE_ENSURE ||
@@ -6891,13 +6893,13 @@ rb_dvar_defined(ID id, const rb_block_t *base_block)
}
int
-rb_local_defined(ID id, const rb_block_t *base_block)
+rb_local_defined(ID id, const struct rb_block *base_block)
{
const rb_iseq_t *iseq;
- if (base_block && base_block->iseq) {
+ if (base_block && (iseq = vm_block_iseq(base_block)) != NULL) {
unsigned int i;
- iseq = base_block->iseq->body->local_iseq;
+ iseq = iseq->body->local_iseq;
for (i=0; i<iseq->body->local_table_size; i++) {
if (iseq->body->local_table[i] == id) {
@@ -6975,7 +6977,7 @@ for_self_aset(rb_iseq_t *iseq, LINK_ANCHOR *ret, VALUE a)
iseq->body->param.lead_num = 1;
iseq->body->param.size = 1;
- ADD_INSN2(ret, line, getlocal, INT2FIX(numberof(vars)-0), INT2FIX(0));
+ ADD_GETLOCAL(ret, line, numberof(vars)-1, 0);
ADD_INSN1(ret, line, putobject, args->arg);
ADD_INSN1(ret, line, opt_call_c_function, (VALUE)args->func);
ADD_INSN(ret, line, pop);
@@ -7415,7 +7417,7 @@ ibf_load_line_info_table(const struct ibf_load *load, const struct rb_iseq_const
static ID *
ibf_dump_local_table(struct ibf_dump *dump, const rb_iseq_t *iseq)
{
- const int size = iseq->body->local_size - 1;
+ const int size = iseq->body->local_table_size;
ID *table = ALLOCA_N(ID, size);
int i;
@@ -7429,7 +7431,7 @@ ibf_dump_local_table(struct ibf_dump *dump, const rb_iseq_t *iseq)
static ID *
ibf_load_local_table(const struct ibf_load *load, const struct rb_iseq_constant_body *body)
{
- const int size = body->local_size - 1;
+ const int size = body->local_table_size;
if (size > 0) {
ID *table = IBF_R(body->local_table, ID, size);
@@ -7596,7 +7598,6 @@ ibf_load_iseq_each(const struct ibf_load *load, rb_iseq_t *iseq, ibf_offset_t of
/* memcpy(load_body, load->buff + offset, sizeof(*load_body)); */
load_body->type = body->type;
load_body->stack_max = body->stack_max;
- load_body->local_size = body->local_size;
load_body->iseq_size = body->iseq_size;
load_body->param = body->param;
load_body->local_table_size = body->local_table_size;
diff --git a/cont.c b/cont.c
index 10bf3417fd..528a0af16b 100644
--- a/cont.c
+++ b/cont.c
@@ -174,6 +174,7 @@ cont_mark(void *ptr)
if (ptr) {
rb_context_t *cont = ptr;
rb_gc_mark(cont->value);
+
rb_thread_mark(&cont->saved_thread);
rb_gc_mark(cont->saved_thread.self);
@@ -490,7 +491,7 @@ cont_capture(volatile int *stat)
cont->vm_stack = ALLOC_N(VALUE, th->stack_size);
MEMCPY(cont->vm_stack, th->stack, VALUE, th->stack_size);
#endif
- cont->saved_thread.stack = 0;
+ cont->saved_thread.stack = NULL;
cont_save_machine_stack(th, cont);
@@ -539,7 +540,7 @@ cont_restore_thread(rb_context_t *cont)
th->fiber = sth->fiber;
fib = th->fiber ? th->fiber : th->root_fiber;
- if (fib) {
+ if (fib && fib->cont.saved_thread.stack) {
th->stack_size = fib->cont.saved_thread.stack_size;
th->stack = fib->cont.saved_thread.stack;
}
@@ -554,6 +555,7 @@ cont_restore_thread(rb_context_t *cont)
else {
/* fiber */
th->stack = sth->stack;
+ sth->stack = NULL;
th->stack_size = sth->stack_size;
th->local_storage = sth->local_storage;
th->local_storage_recursive_hash = sth->local_storage_recursive_hash;
@@ -573,7 +575,6 @@ cont_restore_thread(rb_context_t *cont)
th->root_lep = sth->root_lep;
th->root_svar = sth->root_svar;
th->ensure_list = sth->ensure_list;
-
}
#if FIBER_USE_NATIVE
@@ -727,7 +728,6 @@ fiber_setcontext(rb_fiber_t *newfib, rb_fiber_t *oldfib)
rb_bug("non_root_fiber->context.uc_stac.ss_sp should not be NULL");
}
#endif
-
/* swap machine context */
#ifdef _WIN32
SwitchToFiber(newfib->fib_handle);
@@ -1084,7 +1084,6 @@ rb_cont_call(int argc, VALUE *argv, VALUE contval)
/* restore `tracing' context. see [Feature #4347] */
th->trace_arg = cont->saved_thread.trace_arg;
-
cont_restore_0(cont, &contval);
return Qnil; /* unreachable */
}
@@ -1190,6 +1189,18 @@ fiber_t_alloc(VALUE fibval)
return fib;
}
+rb_control_frame_t *
+rb_vm_push_frame(rb_thread_t *th,
+ const rb_iseq_t *iseq,
+ VALUE type,
+ VALUE self,
+ VALUE specval,
+ VALUE cref_or_me,
+ const VALUE *pc,
+ VALUE *sp,
+ int local_size,
+ int stack_max);
+
static VALUE
fiber_init(VALUE fibval, VALUE proc)
{
@@ -1201,27 +1212,24 @@ fiber_init(VALUE fibval, VALUE proc)
/* initialize cont */
cont->vm_stack = 0;
- th->stack = 0;
+ th->stack = NULL;
th->stack_size = 0;
th->stack_size = cth->vm->default_params.fiber_vm_stack_size / sizeof(VALUE);
th->stack = ALLOC_N(VALUE, th->stack_size);
-
th->cfp = (void *)(th->stack + th->stack_size);
- th->cfp--;
- th->cfp->pc = 0;
- th->cfp->sp = th->stack + 2;
-#if VM_DEBUG_BP_CHECK
- th->cfp->bp_check = 0;
-#endif
- th->cfp->ep = th->stack + 1;
- th->cfp->ep[ 0] = VM_ENVVAL_BLOCK_PTR(0);
- th->cfp->ep[-1] = 0;
- th->cfp->self = Qnil;
- th->cfp->flag = VM_FRAME_MAGIC_DUMMY | VM_FRAME_FLAG_FINISH;
- th->cfp->iseq = 0;
- th->cfp->proc = 0;
- th->cfp->block_iseq = 0;
+
+ rb_vm_push_frame(th,
+ NULL,
+ VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH,
+ Qnil, /* self */
+ VM_BLOCK_HANDLER_NONE,
+ 0, /* specval */
+ NULL, /* pc */
+ th->stack, /* sp */
+ 0, /* local_size */
+ 0);
+
th->tag = 0;
th->local_storage = st_init_numtable();
th->local_storage_recursive_hash = Qnil;
@@ -1268,12 +1276,12 @@ rb_fiber_start(void)
argv = (argc = cont->argc) > 1 ? RARRAY_CONST_PTR(args) : &args;
cont->value = Qnil;
th->errinfo = Qnil;
- th->root_lep = rb_vm_ep_local_ep(proc->block.ep);
+ th->root_lep = rb_vm_ep_local_ep(vm_block_ep(&proc->block));
th->root_svar = Qfalse;
fib->status = RUNNING;
EXEC_EVENT_HOOK(th, RUBY_EVENT_FIBER_SWITCH, th->self, 0, 0, Qnil);
- cont->value = rb_vm_invoke_proc(th, proc, argc, argv, 0);
+ cont->value = rb_vm_invoke_proc(th, proc, argc, argv, VM_BLOCK_HANDLER_NONE);
}
TH_POP_TAG();
diff --git a/eval.c b/eval.c
index 3241cb8048..db7552c4d7 100644
--- a/eval.c
+++ b/eval.c
@@ -759,12 +759,11 @@ int
rb_block_given_p(void)
{
rb_thread_t *th = GET_THREAD();
-
- if (rb_vm_control_frame_block_ptr(th->cfp)) {
- return TRUE;
+ if (rb_vm_frame_block_handler(th->cfp) == VM_BLOCK_HANDLER_NONE) {
+ return FALSE;
}
else {
- return FALSE;
+ return TRUE;
}
}
@@ -1236,15 +1235,15 @@ rb_mod_refine(VALUE module, VALUE klass)
id_refined_class, id_defined_at;
VALUE refinements, activated_refinements;
rb_thread_t *th = GET_THREAD();
- rb_block_t *block = rb_vm_control_frame_block_ptr(th->cfp);
+ VALUE block_handler = rb_vm_frame_block_handler(th->cfp);
- if (!block) {
- rb_raise(rb_eArgError, "no block given");
+ if (block_handler == VM_BLOCK_HANDLER_NONE) {
+ rb_raise(rb_eArgError, "no block given");
}
- if (block->proc) {
- rb_raise(rb_eArgError,
- "can't pass a Proc as a block to Module#refine");
+ if (vm_block_handler_type(block_handler) != block_handler_type_iseq) {
+ rb_raise(rb_eArgError, "can't pass a Proc as a block to Module#refine");
}
+
Check_Type(klass, T_CLASS);
CONST_ID(id_refinements, "__refinements__");
refinements = rb_attr_get(module, id_refinements);
@@ -1315,7 +1314,7 @@ mod_using(VALUE self, VALUE module)
void
rb_obj_call_init(VALUE obj, int argc, const VALUE *argv)
{
- PASS_PASSED_BLOCK();
+ PASS_PASSED_BLOCK_HANDLER();
rb_funcall2(obj, idInitialize, argc, argv);
}
@@ -1448,7 +1447,7 @@ top_using(VALUE self, VALUE module)
return self;
}
-static VALUE *
+static const VALUE *
errinfo_place(rb_thread_t *th)
{
rb_control_frame_t *cfp = th->cfp;
@@ -1457,12 +1456,12 @@ errinfo_place(rb_thread_t *th)
while (RUBY_VM_VALID_CONTROL_FRAME_P(cfp, end_cfp)) {
if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
if (cfp->iseq->body->type == ISEQ_TYPE_RESCUE) {
- return &cfp->ep[-2];
+ return &cfp->ep[VM_ENV_INDEX_LAST_LVAR];
}
else if (cfp->iseq->body->type == ISEQ_TYPE_ENSURE &&
- !THROW_DATA_P(cfp->ep[-2]) &&
- !FIXNUM_P(cfp->ep[-2])) {
- return &cfp->ep[-2];
+ !THROW_DATA_P(cfp->ep[VM_ENV_INDEX_LAST_LVAR]) &&
+ !FIXNUM_P(cfp->ep[VM_ENV_INDEX_LAST_LVAR])) {
+ return &cfp->ep[VM_ENV_INDEX_LAST_LVAR];
}
}
cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
@@ -1473,7 +1472,7 @@ errinfo_place(rb_thread_t *th)
static VALUE
get_thread_errinfo(rb_thread_t *th)
{
- VALUE *ptr = errinfo_place(th);
+ const VALUE *ptr = errinfo_place(th);
if (ptr) {
return *ptr;
}
@@ -1502,7 +1501,7 @@ errinfo_setter(VALUE val, ID id, VALUE *var)
rb_raise(rb_eTypeError, "assigning non-exception to $!");
}
else {
- VALUE *ptr = errinfo_place(GET_THREAD());
+ const VALUE *ptr = errinfo_place(GET_THREAD());
if (ptr) {
*ptr = val;
}
diff --git a/eval_intern.h b/eval_intern.h
index bb4b93bbd6..9db0fd1bdb 100644
--- a/eval_intern.h
+++ b/eval_intern.h
@@ -5,13 +5,23 @@
#include "vm_core.h"
static inline void
-pass_passed_block(rb_thread_t *th)
+vm_passed_block_handler_set(rb_thread_t *th, VALUE block_handler)
{
- th->passed_block = rb_vm_control_frame_block_ptr(th->cfp);
- th->cfp->flag |= VM_FRAME_FLAG_PASSED;
+ VM_ASSERT(vm_block_handler_verify(block_handler));
+ th->passed_block_handler = block_handler;
}
-#define PASS_PASSED_BLOCK_TH(th) pass_passed_block(th)
-#define PASS_PASSED_BLOCK() pass_passed_block(GET_THREAD())
+
+static inline void
+pass_passed_block_handler(rb_thread_t *th)
+{
+ VALUE block_handler = rb_vm_frame_block_handler(th->cfp);
+ VM_ASSERT(vm_block_handler_verify(block_handler));
+ vm_passed_block_handler_set(th, block_handler);
+ VM_ENV_FLAGS_SET(th->cfp->ep, VM_FRAME_FLAG_PASSED);
+}
+
+#define PASS_PASSED_BLOCK_HANDLER_TH(th) pass_passed_block_handler(th)
+#define PASS_PASSED_BLOCK_HANDLER() pass_passed_block_handler(GET_THREAD())
#ifdef HAVE_STDLIB_H
#include <stdlib.h>
@@ -277,7 +287,7 @@ NORETURN(void rb_raise_method_missing(rb_thread_t *th, int argc, const VALUE *ar
VALUE rb_vm_make_jump_tag_but_local_jump(int state, VALUE val);
rb_cref_t *rb_vm_cref(void);
rb_cref_t *rb_vm_cref_replace_with_duplicated_cref(void);
-VALUE rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg, const rb_block_t *blockptr, VALUE filename);
+VALUE rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg, VALUE block_handler, VALUE filename);
void rb_vm_set_progname(VALUE filename);
void rb_thread_terminate_all(void);
VALUE rb_vm_cbase(void);
diff --git a/gc.c b/gc.c
index f3bfe00fbf..119e7a226d 100644
--- a/gc.c
+++ b/gc.c
@@ -9158,19 +9158,6 @@ rb_raw_iseq_info(char *buff, const int buff_size, const rb_iseq_t *iseq)
}
}
-static const rb_iseq_t *
-vm_proc_iseq(VALUE procval)
-{
- rb_proc_t *proc = RTYPEDDATA_DATA(procval);
-
- if (RUBY_VM_NORMAL_ISEQ_P(proc->block.iseq)) {
- return proc->block.iseq;
- }
- else {
- return NULL;
- }
-}
-
const char *
rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
{
diff --git a/insns.def b/insns.def
index 7da7f891ff..f895af7a5b 100644
--- a/insns.def
+++ b/insns.def
@@ -58,7 +58,7 @@ getlocal
(VALUE val)
{
int i, lev = (int)level;
- VALUE *ep = GET_EP();
+ const VALUE *ep = GET_EP();
/* optimized insns generated for level == (0|1) in defs/opt_operand.def */
for (i = 0; i < lev; i++) {
@@ -81,13 +81,13 @@ setlocal
()
{
int i, lev = (int)level;
- VALUE *ep = GET_EP();
+ const VALUE *ep = GET_EP();
/* optimized insns generated for level == (0|1) in defs/opt_operand.def */
for (i = 0; i < lev; i++) {
ep = GET_PREV_EP(ep);
}
- *(ep - idx) = val;
+ vm_env_write(ep, -(int)idx, val);
}
/**
@@ -790,7 +790,7 @@ checkkeyword
ret = (bits & (0x01 << keyword_index)) ? Qfalse : Qtrue;
}
else {
- assert(RB_TYPE_P(kw_bits, T_HASH));
+ VM_ASSERT(RB_TYPE_P(kw_bits, T_HASH));
ret = rb_hash_has_key(kw_bits, INT2FIX(keyword_index)) ? Qfalse : Qtrue;
}
}
@@ -932,11 +932,11 @@ defineclass
rb_iseq_check(class_iseq);
/* enter scope */
- vm_push_frame(th, class_iseq, VM_FRAME_MAGIC_CLASS, klass,
- VM_ENVVAL_BLOCK_PTR(GET_BLOCK_PTR()),
+ vm_push_frame(th, class_iseq, VM_FRAME_MAGIC_CLASS | VM_ENV_FLAG_LOCAL, klass,
+ GET_BLOCK_HANDLER(),
(VALUE)vm_cref_push(th, klass, NULL, FALSE),
class_iseq->body->iseq_encoded, GET_SP(),
- class_iseq->body->local_size,
+ class_iseq->body->local_table_size,
class_iseq->body->stack_max);
RESTORE_REGS();
NEXT_INSN();
@@ -1059,7 +1059,7 @@ opt_send_without_block
(VALUE val) // inc += -ci->orig_argc;
{
struct rb_calling_info calling;
- calling.blockptr = NULL;
+ calling.block_handler = VM_BLOCK_HANDLER_NONE;
vm_search_method(ci, cc, calling.recv = TOPN(calling.argc = ci->orig_argc));
CALL_METHOD(&calling, ci, cc);
}
@@ -1097,7 +1097,7 @@ invokeblock
{
struct rb_calling_info calling;
calling.argc = ci->orig_argc;
- calling.blockptr = NULL;
+ calling.block_handler = VM_BLOCK_HANDLER_NONE;
calling.recv = GET_SELF();
val = vm_invoke_block(th, GET_CFP(), &calling, ci);
diff --git a/internal.h b/internal.h
index edadce991a..b9fa536140 100644
--- a/internal.h
+++ b/internal.h
@@ -894,9 +894,9 @@ int rb_class_has_methods(VALUE c);
VALUE rb_invcmp(VALUE, VALUE);
/* compile.c */
-struct rb_block_struct;
-int rb_dvar_defined(ID, const struct rb_block_struct *);
-int rb_local_defined(ID, const struct rb_block_struct *);
+struct rb_block;
+int rb_dvar_defined(ID, const struct rb_block *);
+int rb_local_defined(ID, const struct rb_block *);
CONSTFUNC(const char * rb_insns_name(int i));
VALUE rb_insns_name_array(void);
@@ -1212,7 +1212,7 @@ struct RBasicRaw {
#endif
VALUE rb_parser_get_yydebug(VALUE);
VALUE rb_parser_set_yydebug(VALUE, VALUE);
-VALUE rb_parser_set_context(VALUE, const struct rb_block_struct *, int);
+VALUE rb_parser_set_context(VALUE, const struct rb_block *, int);
void *rb_parser_load_file(VALUE parser, VALUE name);
int rb_is_const_name(VALUE name);
int rb_is_class_name(VALUE name);
@@ -1372,7 +1372,7 @@ VALUE rb_enc_str_scrub(rb_encoding *enc, VALUE str, VALUE repl);
#define is_ascii_string(str) (rb_enc_str_coderange(str) == ENC_CODERANGE_7BIT)
#define is_broken_string(str) (rb_enc_str_coderange(str) == ENC_CODERANGE_BROKEN)
size_t rb_str_memsize(VALUE);
-VALUE rb_sym_proc_call(VALUE args, VALUE sym, int argc, const VALUE *argv, VALUE passed_proc);
+VALUE rb_sym_proc_call(ID mid, int argc, const VALUE *argv, VALUE passed_proc);
VALUE rb_sym_to_proc(VALUE sym);
/* symbol.c */
diff --git a/iseq.c b/iseq.c
index 1c22fea74d..0f98107c7f 100644
--- a/iseq.c
+++ b/iseq.c
@@ -607,11 +607,11 @@ rb_iseq_load(VALUE data, VALUE parent, VALUE opt)
}
rb_iseq_t *
-rb_iseq_compile_with_option(VALUE src, VALUE file, VALUE absolute_path, VALUE line, rb_block_t *base_block, VALUE opt)
+rb_iseq_compile_with_option(VALUE src, VALUE file, VALUE absolute_path, VALUE line, const struct rb_block *base_block, VALUE opt)
{
rb_thread_t *th = GET_THREAD();
rb_iseq_t *iseq = NULL;
- const rb_iseq_t *const parent = base_block ? base_block->iseq : NULL;
+ const rb_iseq_t *const parent = base_block ? vm_block_iseq(base_block) : NULL;
rb_compile_option_t option;
const enum iseq_type type = parent ? ISEQ_TYPE_EVAL : ISEQ_TYPE_TOP;
#if !defined(__GNUC__) || (__GNUC__ == 4 && __GNUC_MINOR__ == 8)
@@ -661,7 +661,7 @@ rb_iseq_compile(VALUE src, VALUE file, VALUE line)
}
rb_iseq_t *
-rb_iseq_compile_on_base(VALUE src, VALUE file, VALUE line, rb_block_t *base_block)
+rb_iseq_compile_on_base(VALUE src, VALUE file, VALUE line, const struct rb_block *base_block)
{
return rb_iseq_compile_with_option(src, file, Qnil, line, base_block, Qnil);
}
@@ -1263,11 +1263,14 @@ rb_insn_operand_intern(const rb_iseq_t *iseq,
if (pnop) {
const rb_iseq_t *diseq = iseq;
VALUE level = *pnop, i;
+ ID lid;
for (i = 0; i < level; i++) {
diseq = diseq->body->parent_iseq;
}
- ret = id_to_name(diseq->body->local_table[diseq->body->local_size - op], INT2FIX('*'));
+ lid = diseq->body->local_table[diseq->body->local_table_size +
+ VM_ENV_DATA_SIZE - 1 - op];
+ ret = id_to_name(lid, INT2FIX('*'));
}
else {
ret = rb_sprintf("%"PRIuVALUE, op);
@@ -1520,7 +1523,7 @@ rb_iseq_disasm(const rb_iseq_t *iseq)
rb_str_catf(str,
"local table (size: %d, argc: %d "
"[opts: %d, rest: %d, post: %d, block: %d, kw: %d@%d, kwrest: %d])\n",
- iseq->body->local_size,
+ iseq->body->local_table_size,
iseq->body->param.lead_num,
iseq->body->param.opt_num,
iseq->body->param.flags.has_rest ? iseq->body->param.rest_start : -1,
@@ -1553,7 +1556,7 @@ rb_iseq_disasm(const rb_iseq_t *iseq)
(iseq->body->param.flags.has_post && iseq->body->param.post_start <= li && li < iseq->body->param.post_start + iseq->body->param.post_num) ? "Post" : "",
(iseq->body->param.flags.has_block && iseq->body->param.block_start == li) ? "Block" : "");
- rb_str_catf(str, "[%2d] ", iseq->body->local_size - i);
+ rb_str_catf(str, "[%2d] ", iseq->body->local_table_size - i);
width = RSTRING_LEN(str) + 11;
if (name)
rb_str_append(str, name);
@@ -1646,9 +1649,7 @@ iseqw_s_of(VALUE klass, VALUE body)
rb_secure(1);
if (rb_obj_is_proc(body)) {
- rb_proc_t *proc;
- GetProcPtr(body, proc);
- iseq = proc->block.iseq;
+ iseq = vm_proc_iseq(body);
if (!RUBY_VM_NORMAL_ISEQ_P(iseq)) {
iseq = NULL;
@@ -2052,7 +2053,7 @@ iseq_data_to_ary(const rb_iseq_t *iseq)
st_free_table(labels_table);
rb_hash_aset(misc, ID2SYM(rb_intern("arg_size")), INT2FIX(iseq->body->param.size));
- rb_hash_aset(misc, ID2SYM(rb_intern("local_size")), INT2FIX(iseq->body->local_size));
+ rb_hash_aset(misc, ID2SYM(rb_intern("local_size")), INT2FIX(iseq->body->local_table_size));
rb_hash_aset(misc, ID2SYM(rb_intern("stack_max")), INT2FIX(iseq->body->stack_max));
/* TODO: compatibility issue */
diff --git a/load.c b/load.c
index 9c8d2b1a21..1ccd23878c 100644
--- a/load.c
+++ b/load.c
@@ -997,7 +997,7 @@ rb_require_internal(VALUE fname, int safe)
case 's':
handle = (long)rb_vm_call_cfunc(rb_vm_top_self(), load_ext,
- path, 0, path);
+ path, VM_BLOCK_HANDLER_NONE, path);
rb_ary_push(ruby_dln_librefs, LONG2NUM(handle));
break;
}
diff --git a/parse.y b/parse.y
index 064aee8aea..d798788bb6 100644
--- a/parse.y
+++ b/parse.y
@@ -228,8 +228,6 @@ vtable_included(const struct vtable * tbl, ID id)
return 0;
}
-typedef struct rb_block_struct rb_block_t;
-
typedef struct token_info {
const char *token;
int linenum;
@@ -316,7 +314,7 @@ struct parser_params {
VALUE error_buffer;
VALUE debug_lines;
VALUE coverage;
- const rb_block_t *base_block;
+ const struct rb_block *base_block;
#else
/* Ripper only */
@@ -10893,7 +10891,7 @@ rb_parser_new(void)
}
VALUE
-rb_parser_set_context(VALUE vparser, const rb_block_t *base, int main)
+rb_parser_set_context(VALUE vparser, const struct rb_block *base, int main)
{
struct parser_params *parser;
diff --git a/proc.c b/proc.c
index 29a857f4a4..c932539563 100644
--- a/proc.c
+++ b/proc.c
@@ -47,35 +47,51 @@ static int method_min_max_arity(VALUE, int *max);
/* Proc */
#define IS_METHOD_PROC_IFUNC(ifunc) ((ifunc)->func == bmcall)
-#define IS_METHOD_PROC_ISEQ(iseq) \
- (RUBY_VM_IFUNC_P(iseq) && \
- IS_METHOD_PROC_IFUNC((struct vm_ifunc *)(iseq)))
+
+static VALUE proc_to_s_(VALUE self, const rb_proc_t *proc);
+
+static void
+block_mark(const struct rb_block *block)
+{
+ switch (vm_block_type(block)) {
+ case block_type_iseq:
+ case block_type_ifunc:
+ {
+ const struct rb_captured_block *captured = &block->as.captured;
+ RUBY_MARK_UNLESS_NULL(captured->self);
+ RUBY_MARK_UNLESS_NULL((VALUE)captured->code.val);
+ if (captured->ep && captured->ep[VM_ENV_DATA_INDEX_ENV] != Qundef /* cfunc_proc_t */) {
+ RUBY_MARK_UNLESS_NULL(VM_ENV_ENVVAL(captured->ep));
+ }
+ }
+ break;
+ case block_type_symbol:
+ RUBY_MARK_UNLESS_NULL(block->as.symbol);
+ break;
+ case block_type_proc:
+ RUBY_MARK_UNLESS_NULL(block->as.proc);
+ break;
+ }
+}
static void
proc_mark(void *ptr)
{
rb_proc_t *proc = ptr;
- RUBY_MARK_UNLESS_NULL(proc->block.proc);
- RUBY_MARK_UNLESS_NULL(proc->block.self);
- if (proc->block.ep) {
- RUBY_MARK_UNLESS_NULL(rb_vm_proc_envval(proc));
- }
- if (proc->block.iseq && RUBY_VM_IFUNC_P(proc->block.iseq)) {
- rb_gc_mark((VALUE)(proc->block.iseq));
- }
+ block_mark(&proc->block);
RUBY_MARK_LEAVE("proc");
}
typedef struct {
rb_proc_t basic;
- VALUE env[3]; /* me, specval, envval */
+ VALUE env[VM_ENV_DATA_SIZE + 1]; /* ..., envval */
} cfunc_proc_t;
static size_t
proc_memsize(const void *ptr)
{
const rb_proc_t *proc = ptr;
- if (proc->block.ep == ((const cfunc_proc_t *)ptr)->env+1)
+ if (proc->block.as.captured.ep == ((const cfunc_proc_t *)ptr)->env+1)
return sizeof(cfunc_proc_t);
return sizeof(rb_proc_t);
}
@@ -87,7 +103,7 @@ static const rb_data_type_t proc_data_type = {
RUBY_TYPED_DEFAULT_FREE,
proc_memsize,
},
- 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
+ 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
};
VALUE
@@ -108,21 +124,20 @@ rb_obj_is_proc(VALUE proc)
}
}
+VALUE rb_proc_create(VALUE klass, const struct rb_block *block,
+ int8_t safe_level, int8_t is_from_method, int8_t is_lambda);
+
/* :nodoc: */
static VALUE
proc_dup(VALUE self)
{
VALUE procval;
rb_proc_t *src;
- rb_proc_t *dst;
GetProcPtr(self, src);
- procval = rb_proc_alloc(rb_cProc);
- GetProcPtr(procval, dst);
- *dst = *src;
- dst->block.proc = procval;
+ procval = rb_proc_create(rb_cProc, &src->block,
+ src->safe_level, src->is_from_method, src->is_lambda);
RB_GC_GUARD(self); /* for: body = proc_dup(body) */
-
return procval;
}
@@ -266,7 +281,8 @@ binding_mark(void *ptr)
RUBY_MARK_ENTER("binding");
- RUBY_MARK_UNLESS_NULL(bind->env);
+ block_mark(&bind->block);
+
RUBY_MARK_UNLESS_NULL(bind->path);
RUBY_MARK_LEAVE("binding");
@@ -305,7 +321,7 @@ binding_dup(VALUE self)
rb_binding_t *src, *dst;
GetBindingPtr(self, src);
GetBindingPtr(bindval, dst);
- dst->env = src->env;
+ dst->block = src->block;
dst->path = src->path;
dst->first_lineno = src->first_lineno;
return bindval;
@@ -375,7 +391,7 @@ bind_eval(int argc, VALUE *argv, VALUE bindval)
return rb_f_eval(argc+1, args, Qnil /* self will be searched in eval */);
}
-static VALUE *
+static const VALUE *
get_local_variable_ptr(VALUE envval, ID lid)
{
rb_env_t *env;
@@ -385,9 +401,9 @@ get_local_variable_ptr(VALUE envval, ID lid)
unsigned int i;
GetEnvPtr(envval, env);
- iseq = env->block.iseq;
+ iseq = env->iseq;
- if (RUBY_VM_NORMAL_ISEQ_P(iseq)) {
+ if (iseq && RUBY_VM_NORMAL_ISEQ_P(iseq)) {
for (i=0; i<iseq->body->local_table_size; i++) {
if (iseq->body->local_table[i] == lid) {
return &env->env[i];
@@ -454,7 +470,7 @@ bind_local_variables(VALUE bindval)
const rb_env_t *env;
GetBindingPtr(bindval, bind);
- GetEnvPtr(bind->env, env);
+ GetEnvPtr(VM_ENV_ENVVAL(vm_block_ep(&bind->block)), env);
return rb_vm_env_local_variables(env);
}
@@ -487,7 +503,7 @@ bind_local_variable_get(VALUE bindval, VALUE sym)
GetBindingPtr(bindval, bind);
- if ((ptr = get_local_variable_ptr(bind->env, lid)) == NULL) {
+ if ((ptr = get_local_variable_ptr(VM_ENV_ENVVAL(vm_block_ep(&bind->block)), lid)) == NULL) {
sym = ID2SYM(lid);
undefined:
rb_name_err_raise("local variable `%1$s' not defined for %2$s",
@@ -526,17 +542,20 @@ bind_local_variable_set(VALUE bindval, VALUE sym, VALUE val)
{
ID lid = check_local_id(bindval, &sym);
rb_binding_t *bind;
- VALUE *ptr;
+ const VALUE *ptr;
+ VALUE envval;
if (!lid) lid = rb_intern_str(sym);
GetBindingPtr(bindval, bind);
- if ((ptr = get_local_variable_ptr(bind->env, lid)) == NULL) {
+ envval = VM_ENV_ENVVAL(vm_block_ep(&bind->block));
+ if ((ptr = get_local_variable_ptr(envval, lid)) == NULL) {
/* not found. create new env */
ptr = rb_binding_add_dynavars(bind, 1, &lid);
+ envval = VM_ENV_ENVVAL(vm_block_ep(&bind->block));
}
- *ptr = val;
+ RB_OBJ_WRITE(envval, ptr, val);
return val;
}
@@ -567,7 +586,7 @@ bind_local_variable_defined_p(VALUE bindval, VALUE sym)
if (!lid) return Qfalse;
GetBindingPtr(bindval, bind);
- return get_local_variable_ptr(bind->env, lid) ? Qtrue : Qfalse;
+ return get_local_variable_ptr(VM_ENV_ENVVAL(vm_block_ep(&bind->block)), lid) ? Qtrue : Qfalse;
}
/*
@@ -580,11 +599,8 @@ static VALUE
bind_receiver(VALUE bindval)
{
const rb_binding_t *bind;
- const rb_env_t *env;
-
GetBindingPtr(bindval, bind);
- GetEnvPtr(bind->env, env);
- return env->block.self;
+ return vm_block_self(&bind->block);
}
static VALUE
@@ -593,11 +609,19 @@ cfunc_proc_new(VALUE klass, VALUE ifunc, int8_t is_lambda)
rb_proc_t *proc;
cfunc_proc_t *sproc;
VALUE procval = TypedData_Make_Struct(klass, cfunc_proc_t, &proc_data_type, sproc);
- sproc->env[1] = VM_ENVVAL_BLOCK_PTR(0);
+ VALUE *ep;
+
proc = &sproc->basic;
- proc->block.ep = sproc->env+1;
- proc->block.iseq = (rb_iseq_t *)ifunc;
- proc->block.proc = procval;
+ vm_block_type_set(&proc->block, block_type_ifunc);
+
+ *(VALUE **)&proc->block.as.captured.ep = ep = sproc->env + VM_ENV_DATA_SIZE-1;
+ ep[VM_ENV_DATA_INDEX_FLAGS] = VM_FRAME_MAGIC_IFUNC | VM_ENV_FLAG_LOCAL | VM_ENV_FLAG_ESCAPED;
+ ep[VM_ENV_DATA_INDEX_ME_CREF] = Qfalse;
+ ep[VM_ENV_DATA_INDEX_SPECVAL] = VM_BLOCK_HANDLER_NONE;
+ ep[VM_ENV_DATA_INDEX_ENV] = Qundef; /* envval */
+
+ /* self? */
+ RB_OBJ_WRITE(procval, &proc->block.as.captured.code.ifunc, ifunc);
proc->is_lambda = is_lambda;
return procval;
}
@@ -605,7 +629,13 @@ cfunc_proc_new(VALUE klass, VALUE ifunc, int8_t is_lambda)
static VALUE
sym_proc_new(VALUE klass, VALUE sym)
{
- return cfunc_proc_new(klass, sym, 0);
+ VALUE procval = rb_proc_alloc(klass);
+ rb_proc_t *proc;
+ GetProcPtr(procval, proc);
+
+ vm_block_type_set(&proc->block, block_type_symbol);
+ RB_OBJ_WRITE(procval, &proc->block.as.symbol, sym);
+ return procval;
}
VALUE
@@ -625,16 +655,23 @@ static const char proc_without_block[] = "tried to create Proc object without a
static VALUE
proc_new(VALUE klass, int8_t is_lambda)
{
- VALUE procval = Qnil;
+ VALUE procval;
rb_thread_t *th = GET_THREAD();
rb_control_frame_t *cfp = th->cfp;
- rb_block_t *block;
+ VALUE block_handler;
- if (!(block = rb_vm_control_frame_block_ptr(cfp))) {
+ if ((block_handler = rb_vm_frame_block_handler(cfp)) == VM_BLOCK_HANDLER_NONE) {
#if !PROC_NEW_REQUIRES_BLOCK
cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
- if ((block = rb_vm_control_frame_block_ptr(cfp)) != 0) {
+ if ((block_handler = rb_vm_frame_block_handler(cfp)) != VM_BLOCK_HANDLER_NONE) {
+ const VALUE *lep = rb_vm_ep_local_ep(cfp->ep);
+
+ if (VM_ENV_ESCAPED_P(lep)) {
+ procval = VM_ENV_PROCVAL(lep);
+ goto return_existing_proc;
+ }
+
if (is_lambda) {
rb_warn(proc_without_block);
}
@@ -647,13 +684,13 @@ proc_new(VALUE klass, int8_t is_lambda)
}
}
- procval = block->proc;
+ /* block is in cf */
+ switch (vm_block_handler_type(block_handler)) {
+ case block_handler_type_proc:
+ procval = VM_BH_TO_PROC(block_handler);
- if (procval) {
- if (SYMBOL_P(procval)) {
- return (klass != rb_cProc) ? sym_proc_new(klass, procval) : rb_sym_to_proc(procval);
- }
- else if (RBASIC_CLASS(procval) == klass) {
+ return_existing_proc:
+ if (RBASIC_CLASS(procval) == klass) {
return procval;
}
else {
@@ -661,10 +698,20 @@ proc_new(VALUE klass, int8_t is_lambda)
RBASIC_SET_CLASS(newprocval, klass);
return newprocval;
}
- }
+ break;
- procval = rb_vm_make_proc_lambda(th, block, klass, is_lambda);
- return procval;
+ case block_handler_type_symbol:
+ return (klass != rb_cProc) ?
+ sym_proc_new(klass, VM_BH_TO_SYMBOL(block_handler)) :
+ rb_sym_to_proc(VM_BH_TO_SYMBOL(block_handler));
+ break;
+
+ case block_handler_type_ifunc:
+ case block_handler_type_iseq:
+ return rb_vm_make_proc_lambda(th, VM_BH_TO_CAPT_BLOCK(block_handler), klass, is_lambda);
+ }
+ VM_UNREACHABLE(proc_new);
+ return Qnil;
}
/*
@@ -777,27 +824,7 @@ rb_block_lambda(void)
static VALUE
proc_call(int argc, VALUE *argv, VALUE procval)
{
- VALUE vret;
- const rb_block_t *blockptr = 0;
- const rb_iseq_t *iseq;
- rb_proc_t *proc;
- VALUE passed_procval;
- GetProcPtr(procval, proc);
-
- iseq = proc->block.iseq;
- if (RUBY_VM_IFUNC_P(iseq) || iseq->body->param.flags.has_block) {
- if (rb_block_given_p()) {
- rb_proc_t *passed_proc;
- passed_procval = rb_block_proc();
- GetProcPtr(passed_procval, passed_proc);
- blockptr = &passed_proc->block;
- }
- }
-
- vret = rb_vm_invoke_proc(GET_THREAD(), proc, argc, argv, blockptr);
- RB_GC_GUARD(procval);
- RB_GC_GUARD(passed_procval);
- return vret;
+ /* removed */
}
#endif
@@ -815,44 +842,35 @@ check_argc(long argc)
#define check_argc(argc) (argc)
#endif
-static rb_block_t *
-passed_block(VALUE pass_procval)
-{
- if (!NIL_P(pass_procval)) {
- rb_proc_t *pass_proc;
- if (SYMBOL_P(pass_procval)) {
- pass_procval = sym_proc_new(rb_cProc, pass_procval);
- }
- GetProcPtr(pass_procval, pass_proc);
- return &pass_proc->block;
- }
- return 0;
-}
-
VALUE
rb_proc_call(VALUE self, VALUE args)
{
VALUE vret;
rb_proc_t *proc;
GetProcPtr(self, proc);
- vret = rb_vm_invoke_proc(GET_THREAD(), proc, check_argc(RARRAY_LEN(args)), RARRAY_CONST_PTR(args), 0);
+ vret = rb_vm_invoke_proc(GET_THREAD(), proc,
+ check_argc(RARRAY_LEN(args)), RARRAY_CONST_PTR(args),
+ VM_BLOCK_HANDLER_NONE);
RB_GC_GUARD(self);
RB_GC_GUARD(args);
return vret;
}
+static VALUE
+proc_to_block_handler(VALUE procval)
+{
+ return NIL_P(procval) ? VM_BLOCK_HANDLER_NONE : procval;
+}
+
VALUE
-rb_proc_call_with_block(VALUE self, int argc, const VALUE *argv, VALUE pass_procval)
+rb_proc_call_with_block(VALUE self, int argc, const VALUE *argv, VALUE passed_procval)
{
+ rb_thread_t *th = GET_THREAD();
VALUE vret;
rb_proc_t *proc;
- rb_block_t *block = 0;
GetProcPtr(self, proc);
-
- block = passed_block(pass_procval);
- vret = rb_vm_invoke_proc(GET_THREAD(), proc, argc, argv, block);
+ vret = rb_vm_invoke_proc(th, proc, argc, argv, proc_to_block_handler(passed_procval));
RB_GC_GUARD(self);
- RB_GC_GUARD(pass_procval);
return vret;
}
@@ -916,21 +934,24 @@ rb_iseq_min_max_arity(const rb_iseq_t *iseq, int *max)
}
static int
-rb_block_min_max_arity(rb_block_t *block, int *max)
-{
- const rb_iseq_t *iseq = block->iseq;
-
- if (iseq) {
- if (RUBY_VM_NORMAL_ISEQ_P(iseq)) {
- return rb_iseq_min_max_arity(iseq, max);
- }
- else {
- if (IS_METHOD_PROC_ISEQ(iseq)) {
- const struct vm_ifunc *ifunc = (struct vm_ifunc *)iseq;
+rb_block_min_max_arity(const struct rb_block *block, int *max)
+{
+ switch (vm_block_type(block)) {
+ case block_type_iseq:
+ return rb_iseq_min_max_arity(block->as.captured.code.iseq, max);
+ case block_type_proc:
+ return rb_block_min_max_arity(vm_proc_block(block->as.proc), max);
+ case block_type_ifunc:
+ {
+ const struct vm_ifunc *ifunc = block->as.captured.code.ifunc;
+ if (IS_METHOD_PROC_IFUNC(ifunc)) {
/* e.g. method(:foo).to_proc.arity */
return method_min_max_arity((VALUE)ifunc->data, max);
}
}
+ /* fall through */
+ case block_type_symbol:
+ break;
}
*max = UNLIMITED_ARGUMENTS;
return 0;
@@ -946,7 +967,7 @@ static int
rb_proc_min_max_arity(VALUE self, int *max)
{
rb_proc_t *proc;
- rb_block_t *block;
+ const struct rb_block *block;
GetProcPtr(self, proc);
block = &proc->block;
return rb_block_min_max_arity(block, max);
@@ -961,57 +982,95 @@ rb_proc_arity(VALUE self)
return (proc->is_lambda ? min == max : max != UNLIMITED_ARGUMENTS) ? min : -min-1;
}
+static void
+block_setup(struct rb_block *block, VALUE block_handler)
+{
+ switch (vm_block_handler_type(block_handler)) {
+ case block_handler_type_iseq:
+ block->type = block_type_iseq;
+ block->as.captured = *VM_BH_TO_ISEQ_BLOCK(block_handler);
+ break;
+ case block_handler_type_ifunc:
+ block->type = block_type_ifunc;
+ block->as.captured = *VM_BH_TO_IFUNC_BLOCK(block_handler);
+ break;
+ case block_handler_type_symbol:
+ block->type = block_type_symbol;
+ block->as.symbol = VM_BH_TO_SYMBOL(block_handler);
+ break;
+ case block_handler_type_proc:
+ block->type = block_type_proc;
+ block->as.proc = VM_BH_TO_PROC(block_handler);
+ }
+}
+
int
rb_block_arity(void)
{
int min, max;
rb_thread_t *th = GET_THREAD();
rb_control_frame_t *cfp = th->cfp;
- rb_block_t *block = rb_vm_control_frame_block_ptr(cfp);
- VALUE proc_value;
-
- if (!block) rb_raise(rb_eArgError, "no block given");
- min = rb_block_min_max_arity(block, &max);
- proc_value = block->proc;
- if (proc_value) {
- if (SYMBOL_P(proc_value)) {
- return -1;
- }
- else {
+ VALUE block_handler = rb_vm_frame_block_handler(cfp);
+ struct rb_block block;
+
+ if (block_handler == VM_BLOCK_HANDLER_NONE) {
+ rb_raise(rb_eArgError, "no block given");
+ }
+
+ block_setup(&block, block_handler);
+ min = rb_block_min_max_arity(&block, &max);
+
+ switch (vm_block_type(&block)) {
+ case block_handler_type_symbol:
+ return -1;
+
+ case block_handler_type_proc:
+ {
+ VALUE procval = block_handler;
rb_proc_t *proc;
- GetProcPtr(proc_value, proc);
- if (proc)
- return (proc->is_lambda ? min == max : max != UNLIMITED_ARGUMENTS) ? min : -min-1;
+ GetProcPtr(procval, proc);
+ return (proc->is_lambda ? min == max : max != UNLIMITED_ARGUMENTS) ? min : -min-1;
+ /* fall through */
}
+
+ default:
+ return max != UNLIMITED_ARGUMENTS ? min : -min-1;
}
- return max != UNLIMITED_ARGUMENTS ? min : -min-1;
}
const rb_iseq_t *
rb_proc_get_iseq(VALUE self, int *is_proc)
{
const rb_proc_t *proc;
- const rb_iseq_t *iseq;
+ const struct rb_block *block;
GetProcPtr(self, proc);
- iseq = proc->block.iseq;
+ block = &proc->block;
if (is_proc) *is_proc = !proc->is_lambda;
- if (RUBY_VM_IFUNC_P(iseq)) {
- const struct vm_ifunc *ifunc = (struct vm_ifunc *)iseq;
- iseq = 0;
- if (IS_METHOD_PROC_IFUNC(ifunc)) {
- /* method(:foo).to_proc */
- iseq = rb_method_iseq((VALUE)ifunc->data);
- if (is_proc) *is_proc = 0;
+
+ switch (vm_block_type(block)) {
+ case block_type_iseq:
+ return rb_iseq_check(block->as.captured.code.iseq);
+ case block_type_proc:
+ return rb_proc_get_iseq(block->as.proc, is_proc);
+ case block_type_ifunc:
+ {
+ const struct vm_ifunc *ifunc = block->as.captured.code.ifunc;
+ if (IS_METHOD_PROC_IFUNC(ifunc)) {
+ /* method(:foo).to_proc */
+ if (is_proc) *is_proc = 0;
+ return rb_method_iseq((VALUE)ifunc->data);
+ }
+ else {
+ return NULL;
+ }
}
- return iseq;
- }
- else if (SYMBOL_P(iseq)) {
+ case block_type_symbol:
return NULL;
}
- else {
- return rb_iseq_check(iseq);
- }
+
+ VM_UNREACHABLE(rb_proc_get_iseq);
+ return NULL;
}
static VALUE
@@ -1090,8 +1149,9 @@ rb_hash_proc(st_index_t hash, VALUE prc)
{
rb_proc_t *proc;
GetProcPtr(prc, proc);
- hash = rb_hash_uint(hash, (st_index_t)proc->block.iseq);
- return rb_hash_uint(hash, (st_index_t)proc->block.ep >> 16);
+ hash = rb_hash_uint(hash, (st_index_t)proc->block.as.captured.code.val);
+ hash = rb_hash_uint(hash, (st_index_t)proc->block.as.captured.self);
+ return rb_hash_uint(hash, (st_index_t)proc->block.as.captured.ep >> 16);
}
VALUE
@@ -1153,34 +1213,40 @@ proc_hash(VALUE self)
*/
static VALUE
-proc_to_s(VALUE self)
+proc_to_s_(VALUE self, const rb_proc_t *proc)
{
VALUE str = 0;
- rb_proc_t *proc;
const char *cname = rb_obj_classname(self);
- const rb_iseq_t *iseq;
+ const struct rb_block *block;
const char *is_lambda;
- GetProcPtr(self, proc);
- iseq = proc->block.iseq;
+ block = &proc->block;
is_lambda = proc->is_lambda ? " (lambda)" : "";
- if (RUBY_VM_NORMAL_ISEQ_P(iseq) && rb_iseq_check(iseq)) {
- int first_lineno = 0;
-
- if (iseq->body->line_info_table) {
- first_lineno = FIX2INT(rb_iseq_first_lineno(iseq));
+ again:
+ switch (vm_block_type(block)) {
+ case block_type_proc:
+ block = vm_proc_block(block->as.proc);
+ goto again;
+ case block_type_iseq:
+ {
+ const rb_iseq_t *iseq = rb_iseq_check(block->as.captured.code.iseq);
+ int first_lineno = 0;
+ if (iseq->body->line_info_table) {
+ first_lineno = FIX2INT(rb_iseq_first_lineno(iseq));
+ }
+ str = rb_sprintf("#<%s:%p@%"PRIsVALUE":%d%s>", cname, (void *)self,
+ iseq->body->location.path, first_lineno, is_lambda);
}
- str = rb_sprintf("#<%s:%p@%"PRIsVALUE":%d%s>", cname, (void *)self,
- iseq->body->location.path, first_lineno, is_lambda);
- }
- else if (SYMBOL_P(iseq)) {
+ break;
+ case block_type_symbol:
str = rb_sprintf("#<%s:%p(&%+"PRIsVALUE")%s>", cname, (void *)self,
- (VALUE)iseq, is_lambda);
- }
- else {
- str = rb_sprintf("#<%s:%p%s>", cname, (void *)proc->block.iseq,
+ block->as.symbol, is_lambda);
+ break;
+ case block_type_ifunc:
+ str = rb_sprintf("#<%s:%p%s>", cname, proc->block.as.captured.code.ifunc,
is_lambda);
+ break;
}
if (OBJ_TAINTED(self)) {
@@ -1189,6 +1255,14 @@ proc_to_s(VALUE self)
return str;
}
+static VALUE
+proc_to_s(VALUE self)
+{
+ const rb_proc_t *proc;
+ GetProcPtr(self, proc);
+ return proc_to_s_(self, proc);
+}
+
/*
* call-seq:
* prc.to_proc -> proc
@@ -1791,16 +1865,19 @@ rb_mod_define_method(int argc, VALUE *argv, VALUE mod)
body = rb_block_lambda();
#else
rb_thread_t *th = GET_THREAD();
- rb_block_t *block = rb_vm_control_frame_block_ptr(th->cfp);
- if (!block) rb_raise(rb_eArgError, proc_without_block);
+ VALUE block_handler = rb_vm_frame_block_handler(th->cfp);
+ if (block_handler == VM_BLOCK_HANDLER_NONE) rb_raise(rb_eArgError, proc_without_block);
- body = block->proc;
-
- if (SYMBOL_P(body)) {
- body = rb_sym_to_proc(body);
- }
- else if (!body) {
- body = rb_vm_make_proc_lambda(th, block, rb_cProc, TRUE);
+ switch (vm_block_handler_type(block_handler)) {
+ case block_handler_type_proc:
+ body = VM_BH_TO_PROC(block_handler);
+ break;
+ case block_handler_type_symbol:
+ body = rb_sym_to_proc(VM_BH_TO_SYMBOL(block_handler));
+ break;
+ case block_handler_type_iseq:
+ case block_handler_type_ifunc:
+ body = rb_vm_make_proc_lambda(th, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc, TRUE);
}
#endif
}
@@ -1842,14 +1919,14 @@ rb_mod_define_method(int argc, VALUE *argv, VALUE mod)
RB_GC_GUARD(body);
}
else {
- rb_proc_t *proc;
- body = proc_dup(body);
- GetProcPtr(body, proc);
- if (RUBY_VM_NORMAL_ISEQ_P(proc->block.iseq)) {
+ VALUE procval = proc_dup(body);
+ if (vm_proc_iseq(procval) != NULL) {
+ rb_proc_t *proc;
+ GetProcPtr(procval, proc);
proc->is_lambda = TRUE;
proc->is_from_method = TRUE;
}
- rb_add_method(mod, id, VM_METHOD_TYPE_BMETHOD, (void *)body, scope_visi->method_visi);
+ rb_add_method(mod, id, VM_METHOD_TYPE_BMETHOD, (void *)procval, scope_visi->method_visi);
if (scope_visi->module_func) {
rb_add_method(rb_singleton_class(mod), id, VM_METHOD_TYPE_BMETHOD, (void *)body, METHOD_VISI_PUBLIC);
}
@@ -1963,8 +2040,8 @@ method_clone(VALUE self)
VALUE
rb_method_call(int argc, const VALUE *argv, VALUE method)
{
- VALUE proc = rb_block_given_p() ? rb_block_proc() : Qnil;
- return rb_method_call_with_block(argc, argv, method, proc);
+ VALUE procval = rb_block_given_p() ? rb_block_proc() : Qnil;
+ return rb_method_call_with_block(argc, argv, method, procval);
}
static const rb_callable_method_entry_t *
@@ -1976,16 +2053,16 @@ method_callable_method_entry(const struct METHOD *data)
static inline VALUE
call_method_data(rb_thread_t *th, const struct METHOD *data,
- int argc, const VALUE *argv, VALUE pass_procval)
+ int argc, const VALUE *argv, VALUE passed_procval)
{
- th->passed_block = passed_block(pass_procval);
+ vm_passed_block_handler_set(th, proc_to_block_handler(passed_procval));
return rb_vm_call(th, data->recv, data->me->called_id, argc, argv,
method_callable_method_entry(data));
}
static VALUE
call_method_data_safe(rb_thread_t *th, const struct METHOD *data,
- int argc, const VALUE *argv, VALUE pass_procval,
+ int argc, const VALUE *argv, VALUE passed_procval,
int safe)
{
VALUE result = Qnil; /* OK */
@@ -1995,7 +2072,7 @@ call_method_data_safe(rb_thread_t *th, const struct METHOD *data,
if ((state = TH_EXEC_TAG()) == 0) {
/* result is used only if state == 0, no exceptions is caught. */
/* otherwise it doesn't matter even if clobbered. */
- NO_CLOBBERED(result) = call_method_data(th, data, argc, argv, pass_procval);
+ NO_CLOBBERED(result) = call_method_data(th, data, argc, argv, passed_procval);
}
TH_POP_TAG();
rb_set_safe_level_force(safe);
@@ -2005,7 +2082,7 @@ call_method_data_safe(rb_thread_t *th, const struct METHOD *data,
}
VALUE
-rb_method_call_with_block(int argc, const VALUE *argv, VALUE method, VALUE pass_procval)
+rb_method_call_with_block(int argc, const VALUE *argv, VALUE method, VALUE passed_procval)
{
const struct METHOD *data;
rb_thread_t *const th = GET_THREAD();
@@ -2019,10 +2096,10 @@ rb_method_call_with_block(int argc, const VALUE *argv, VALUE method, VALUE pass_
int safe = rb_safe_level();
if (safe < safe_level_to_run) {
rb_set_safe_level_force(safe_level_to_run);
- return call_method_data_safe(th, data, argc, argv, pass_procval, safe);
+ return call_method_data_safe(th, data, argc, argv, passed_procval, safe);
}
}
- return call_method_data(th, data, argc, argv, pass_procval);
+ return call_method_data(th, data, argc, argv, passed_procval);
}
/**********************************************************************
@@ -2627,8 +2704,18 @@ localjump_reason(VALUE exc)
rb_cref_t *rb_vm_cref_new_toplevel(void); /* vm.c */
+static inline void
+env_write(VALUE env, const VALUE *ep, int index, VALUE v)
+{
+ VM_ASSERT(VM_ENV_ESCAPED_P(ep));
+ VM_ASSERT(env == VM_ENV_ENVVAL(ep));
+ VM_ASSERT(vm_env_ep(env) == ep);
+
+ RB_OBJ_WRITE(env, &ep[index], v);
+}
+
static VALUE
-env_clone(VALUE envval, VALUE receiver, const rb_cref_t *cref)
+env_clone(VALUE envval, const rb_cref_t *cref)
{
VALUE newenvval = TypedData_Wrap_Struct(RBASIC_CLASS(envval), RTYPEDDATA_TYPE(envval), 0);
rb_env_t *env, *newenv;
@@ -2642,9 +2729,11 @@ env_clone(VALUE envval, VALUE receiver, const rb_cref_t *cref)
envsize = sizeof(rb_env_t) + (env->env_size - 1) * sizeof(VALUE);
newenv = xmalloc(envsize);
memcpy(newenv, env, envsize);
+ VM_ASSERT(env->ep > env->env);
+ newenv->ep = &newenv->env[env->ep - env->env];
+ VM_FORCE_WRITE(&newenv->ep[VM_ENV_DATA_INDEX_ENV], newenvval);
RTYPEDDATA_DATA(newenvval) = newenv;
- newenv->block.self = receiver;
- newenv->block.ep[-1] = (VALUE)cref;
+ env_write(newenvval, newenv->ep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)cref);
return newenvval;
}
@@ -2666,31 +2755,61 @@ env_clone(VALUE envval, VALUE receiver, const rb_cref_t *cref)
static VALUE
proc_binding(VALUE self)
{
- VALUE bindval, envval;
- const rb_proc_t *proc;
- const rb_iseq_t *iseq;
+ VALUE bindval, envval = Qundef, binding_self = Qundef;
rb_binding_t *bind;
+ const rb_proc_t *proc;
+ const rb_iseq_t *iseq = NULL;
+ const struct rb_block *block;
+ const rb_env_t *env;
GetProcPtr(self, proc);
- envval = rb_vm_proc_envval(proc);
- iseq = proc->block.iseq;
- if (SYMBOL_P(iseq)) goto error;
- if (RUBY_VM_IFUNC_P(iseq)) {
- struct vm_ifunc *ifunc = (struct vm_ifunc *)iseq;
- if (IS_METHOD_PROC_IFUNC(ifunc)) {
- VALUE method = (VALUE)ifunc->data;
- envval = env_clone(envval, method_receiver(method), method_cref(method));
- iseq = rb_method_iseq(method);
- }
- else {
- error:
- rb_raise(rb_eArgError, "Can't create Binding from C level Proc");
+ block = &proc->block;
+
+ again:
+ switch (vm_block_type(block)) {
+ case block_type_iseq:
+ iseq = block->as.captured.code.iseq;
+ binding_self = block->as.captured.self;
+ envval = VM_ENV_ENVVAL(block->as.captured.ep);
+ break;
+ case block_type_proc:
+ GetProcPtr(block->as.proc, proc);
+ block = &proc->block;
+ goto again;
+ case block_type_symbol:
+ goto error;
+ case block_type_ifunc:
+ {
+ const struct vm_ifunc *ifunc = block->as.captured.code.ifunc;
+ if (IS_METHOD_PROC_IFUNC(ifunc)) {
+ VALUE method = (VALUE)ifunc->data;
+ rb_env_t *newenv;
+
+ iseq = rb_method_iseq(method);
+ envval = VM_ENV_ENVVAL(block->as.captured.ep);
+ envval = env_clone(envval, method_cref(method));
+ binding_self = method_receiver(method);
+
+ GetEnvPtr(envval, newenv);
+ /* set empty iseq */
+ newenv->iseq = rb_iseq_new(NULL, rb_str_new2("<empty iseq>"), rb_str_new2("<empty_iseq>"), Qnil, 0, ISEQ_TYPE_TOP);
+ break;
+ }
+ else {
+ error:
+ rb_raise(rb_eArgError, "Can't create Binding from C level Proc");
+ return Qnil;
+ }
}
}
bindval = rb_binding_alloc(rb_cBinding);
GetBindingPtr(bindval, bind);
- bind->env = envval;
+ GetEnvPtr(envval, env);
+
+ bind->block.as.captured.self = binding_self;
+ bind->block.as.captured.code.iseq = env->iseq;
+ bind->block.as.captured.ep = env->ep;
if (iseq) {
rb_iseq_check(iseq);
diff --git a/process.c b/process.c
index cb253c2183..8406405755 100644
--- a/process.c
+++ b/process.c
@@ -3680,7 +3680,6 @@ rb_f_fork(VALUE obj)
rb_thread_atfork();
if (rb_block_given_p()) {
int status;
-
rb_protect(rb_yield, Qundef, &status);
ruby_stop(status);
}
diff --git a/ruby.c b/ruby.c
index 4c1e7844bf..797fa79774 100644
--- a/ruby.c
+++ b/ruby.c
@@ -639,13 +639,10 @@ require_libraries(VALUE *req_list)
*req_list = 0;
}
-static rb_block_t*
+static const struct rb_block*
toplevel_context(rb_binding_t *bind)
{
- rb_env_t *env;
-
- GetEnvPtr(bind->env, env);
- return &env->block;
+ return &bind->block;
}
static void
@@ -1447,7 +1444,7 @@ process_options(int argc, char **argv, struct cmdline_options *opt)
char fbuf[MAXPATHLEN];
int i = (int)proc_options(argc, argv, opt, 0);
rb_binding_t *toplevel_binding;
- rb_block_t *base_block;
+ const struct rb_block *base_block;
argc -= i;
argv += i;
@@ -1700,7 +1697,7 @@ process_options(int argc, char **argv, struct cmdline_options *opt)
path = rb_realpath_internal(Qnil, opt->script_name, 1);
}
base_block = toplevel_context(toplevel_binding);
- iseq = rb_iseq_new_main(tree, opt->script_name, path, base_block->iseq);
+ iseq = rb_iseq_new_main(tree, opt->script_name, path, vm_block_iseq(base_block));
}
if (opt->dump & DUMP_BIT(insns)) {
diff --git a/string.c b/string.c
index 4a24e5d67c..a5b1279084 100644
--- a/string.c
+++ b/string.c
@@ -9459,7 +9459,7 @@ sym_to_sym(VALUE sym)
}
VALUE
-rb_sym_proc_call(VALUE args, VALUE sym, int argc, const VALUE *argv, VALUE passed_proc)
+rb_sym_proc_call(ID mid, int argc, const VALUE *argv, VALUE passed_proc)
{
VALUE obj;
@@ -9467,7 +9467,7 @@ rb_sym_proc_call(VALUE args, VALUE sym, int argc, const VALUE *argv, VALUE passe
rb_raise(rb_eArgError, "no receiver given");
}
obj = argv[0];
- return rb_funcall_with_block(obj, (ID)sym, argc - 1, argv + 1, passed_proc);
+ return rb_funcall_with_block(obj, mid, argc - 1, argv + 1, passed_proc);
}
#if 0
diff --git a/thread.c b/thread.c
index 023405bc89..6404bd27d7 100644
--- a/thread.c
+++ b/thread.c
@@ -587,10 +587,12 @@ thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_s
if (!th->first_func) {
GetProcPtr(th->first_proc, proc);
th->errinfo = Qnil;
- th->root_lep = rb_vm_ep_local_ep(proc->block.ep);
+ th->root_lep = rb_vm_ep_local_ep(vm_proc_ep(th->first_proc));
th->root_svar = Qfalse;
EXEC_EVENT_HOOK(th, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, Qundef);
- th->value = rb_vm_invoke_proc(th, proc, (int)RARRAY_LEN(args), RARRAY_CONST_PTR(args), 0);
+ th->value = rb_vm_invoke_proc(th, proc,
+ (int)RARRAY_LEN(args), RARRAY_CONST_PTR(args),
+ VM_BLOCK_HANDLER_NONE);
EXEC_EVENT_HOOK(th, RUBY_EVENT_THREAD_END, th->self, 0, 0, Qundef);
}
else {
diff --git a/tool/mk_call_iseq_optimized.rb b/tool/mk_call_iseq_optimized.rb
index 7df36eb850..f04917cceb 100644
--- a/tool/mk_call_iseq_optimized.rb
+++ b/tool/mk_call_iseq_optimized.rb
@@ -10,7 +10,7 @@ puts <<EOS
EOS
P = (0..3)
-L = (1..6)
+L = (0..5)
def fname param, local
"vm_call_iseq_setup_normal_0start_#{param}params_#{local}locals"
@@ -48,8 +48,8 @@ vm_call_iseq_setup_func(const struct rb_call_info *ci, const int param_size, con
else {
if (param_size <= #{P.end} &&
local_size <= #{L.end}) {
- VM_ASSERT(local_size != 0);
- return vm_call_iseq_handlers[param_size][local_size-1];
+ VM_ASSERT(local_size >= 0);
+ return vm_call_iseq_handlers[param_size][local_size];
}
return &vm_call_iseq_setup_normal_0start;
}
diff --git a/vm.c b/vm.c
index c900eec570..66eee02bd8 100644
--- a/vm.c
+++ b/vm.c
@@ -8,10 +8,6 @@
**********************************************************************/
-#ifndef VM_CHECK_MODE
-#define VM_CHECK_MODE 0
-#endif
-
#include "internal.h"
#include "ruby/vm.h"
#include "ruby/st.h"
@@ -25,18 +21,18 @@
VALUE rb_str_concat_literals(size_t, const VALUE*);
-PUREFUNC(static inline VALUE *VM_EP_LEP(VALUE *));
-static inline VALUE *
-VM_EP_LEP(VALUE *ep)
+PUREFUNC(static inline const VALUE *VM_EP_LEP(const VALUE *));
+static inline const VALUE *
+VM_EP_LEP(const VALUE *ep)
{
- while (!VM_EP_LEP_P(ep)) {
- ep = VM_EP_PREV_EP(ep);
+ while (!VM_ENV_LOCAL_P(ep)) {
+ ep = VM_ENV_PREV_EP(ep);
}
return ep;
}
-static inline rb_control_frame_t *
-rb_vm_search_cf_from_ep(const rb_thread_t * const th, rb_control_frame_t *cfp, const VALUE * const ep)
+static inline const rb_control_frame_t *
+rb_vm_search_cf_from_ep(const rb_thread_t * const th, const rb_control_frame_t *cfp, const VALUE * const ep)
{
if (!ep) {
return NULL;
@@ -55,37 +51,125 @@ rb_vm_search_cf_from_ep(const rb_thread_t * const th, rb_control_frame_t *cfp, c
}
}
-VALUE *
-rb_vm_ep_local_ep(VALUE *ep)
+const VALUE *
+rb_vm_ep_local_ep(const VALUE *ep)
{
return VM_EP_LEP(ep);
}
-PUREFUNC(static inline VALUE *VM_CF_LEP(const rb_control_frame_t * const cfp));
-static inline VALUE *
+PUREFUNC(static inline const VALUE *VM_CF_LEP(const rb_control_frame_t * const cfp));
+static inline const VALUE *
VM_CF_LEP(const rb_control_frame_t * const cfp)
{
return VM_EP_LEP(cfp->ep);
}
-static inline VALUE *
+static inline const VALUE *
VM_CF_PREV_EP(const rb_control_frame_t * const cfp)
{
- return VM_EP_PREV_EP(cfp->ep);
+ return VM_ENV_PREV_EP(cfp->ep);
+}
+
+PUREFUNC(static inline VALUE VM_CF_BLOCK_HANDLER(const rb_control_frame_t * const cfp));
+static inline VALUE
+VM_CF_BLOCK_HANDLER(const rb_control_frame_t * const cfp)
+{
+ const VALUE *ep = VM_CF_LEP(cfp);
+ return VM_ENV_BLOCK_HANDLER(ep);
+}
+
+VALUE
+rb_vm_frame_block_handler(const rb_control_frame_t *cfp)
+{
+ return VM_CF_BLOCK_HANDLER(cfp);
+}
+
+#if VM_CHECK_MODE > 0
+static int
+VM_CFP_IN_HEAP_P(const rb_thread_t *th, const rb_control_frame_t *cfp)
+{
+ const VALUE *start = th->stack;
+ const VALUE *end = (VALUE *)th->stack + th->stack_size;
+ if (start <= (VALUE *)cfp && (VALUE *)cfp < end) {
+ return FALSE;
+ }
+ else {
+ return TRUE;
+ }
+}
+
+static int envval_p(VALUE envval);
+
+static int
+VM_EP_IN_HEAP_P(const rb_thread_t *th, const VALUE *ep)
+{
+ const VALUE *start = th->stack;
+ const VALUE *end = (VALUE *)th->cfp;
+ if (start <= ep && ep < end) {
+ return FALSE;
+ }
+ else {
+ return TRUE;
+ }
+}
+
+int
+vm_ep_in_heap_p_(const rb_thread_t *th, const VALUE *ep)
+{
+ if (VM_EP_IN_HEAP_P(th, ep)) {
+ VALUE envval = ep[VM_ENV_DATA_INDEX_ENV]; /* VM_ENV_ENVVAL(ep); */
+
+ if (envval != Qundef) {
+ rb_env_t *env;
+
+ VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
+ VM_ASSERT(envval_p(envval));
+ GetEnvPtr(envval, env);
+ VM_ASSERT(env->ep == ep);
+ }
+ return TRUE;
+ }
+ else {
+ return FALSE;
+ }
+}
+
+int
+rb_vm_ep_in_heap_p(const VALUE *ep)
+{
+ return vm_ep_in_heap_p_(GET_THREAD(), ep);
+}
+#endif
+
+static struct rb_captured_block *
+VM_CFP_TO_CAPTURED_BLOCK(const rb_control_frame_t *cfp)
+{
+ VM_ASSERT(!VM_CFP_IN_HEAP_P(GET_THREAD(), cfp));
+ return (struct rb_captured_block *)&cfp->self;
}
-PUREFUNC(static inline rb_block_t *VM_CF_BLOCK_PTR(const rb_control_frame_t * const cfp));
-static inline rb_block_t *
-VM_CF_BLOCK_PTR(const rb_control_frame_t * const cfp)
+static rb_control_frame_t *
+VM_CAPTURED_BLOCK_TO_CFP(const struct rb_captured_block *captured)
{
- VALUE *ep = VM_CF_LEP(cfp);
- return VM_EP_BLOCK_PTR(ep);
+ rb_control_frame_t *cfp = ((rb_control_frame_t *)((VALUE *)(captured) - 3));
+ VM_ASSERT(!VM_CFP_IN_HEAP_P(GET_THREAD(), cfp));
+ VM_ASSERT(sizeof(rb_control_frame_t)/sizeof(VALUE) == 6 + VM_DEBUG_BP_CHECK ? 1 : 0);
+ return cfp;
}
-rb_block_t *
-rb_vm_control_frame_block_ptr(const rb_control_frame_t *cfp)
+static int
+VM_BH_FROM_CFP_P(VALUE block_handler, const rb_control_frame_t *cfp)
{
- return VM_CF_BLOCK_PTR(cfp);
+ const struct rb_captured_block *captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
+ return VM_TAGGED_PTR_REF(block_handler, 0x03) == captured;
+}
+
+static VALUE
+vm_passed_block_handler(rb_thread_t *th)
+{
+ VALUE block_handler = th->passed_block_handler;
+ th->passed_block_handler = VM_BLOCK_HANDLER_NONE;
+ return block_handler;
}
static rb_cref_t *
@@ -181,6 +265,15 @@ vm_cref_dump(const char *mesg, const rb_cref_t *cref)
}
}
+static void
+vm_bind_update_env(rb_binding_t *bind, VALUE envval)
+{
+ rb_env_t *env;
+ GetEnvPtr(envval, env);
+ bind->block.as.captured.code.iseq = env->iseq;
+ bind->block.as.captured.ep = env->ep;
+}
+
#if VM_COLLECT_USAGE_DETAILS
static void vm_collect_usage_operand(int insn, int n, VALUE op);
static void vm_collect_usage_insn(int insn);
@@ -189,20 +282,18 @@ static void vm_collect_usage_register(int reg, int isset);
static VALUE vm_make_env_object(rb_thread_t *th, rb_control_frame_t *cfp);
-static VALUE
-vm_invoke_bmethod(rb_thread_t *th, rb_proc_t *proc, VALUE self,
- int argc, const VALUE *argv, const rb_block_t *blockptr);
-static VALUE
-vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, VALUE self,
- int argc, const VALUE *argv, const rb_block_t *blockptr);
+static VALUE vm_invoke_bmethod(rb_thread_t *th, rb_proc_t *proc, VALUE self,
+ int argc, const VALUE *argv, VALUE block_handler);
+static VALUE vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, VALUE self,
+ int argc, const VALUE *argv, VALUE block_handler);
static rb_serial_t ruby_vm_global_method_state = 1;
static rb_serial_t ruby_vm_global_constant_state = 1;
static rb_serial_t ruby_vm_class_serial = 1;
#include "vm_insnhelper.h"
-#include "vm_insnhelper.c"
#include "vm_exec.h"
+#include "vm_insnhelper.c"
#include "vm_exec.c"
#include "vm_method.c"
@@ -350,20 +441,20 @@ vm_set_top_stack(rb_thread_t *th, const rb_iseq_t *iseq)
}
/* for return */
- vm_push_frame(th, iseq, VM_FRAME_MAGIC_TOP | VM_FRAME_FLAG_FINISH, th->top_self,
- VM_ENVVAL_BLOCK_PTR(0),
+ vm_push_frame(th, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, th->top_self,
+ VM_BLOCK_HANDLER_NONE,
(VALUE)vm_cref_new_toplevel(th), /* cref or me */
- iseq->body->iseq_encoded, th->cfp->sp, iseq->body->local_size, iseq->body->stack_max);
+ iseq->body->iseq_encoded, th->cfp->sp, iseq->body->local_table_size, iseq->body->stack_max);
}
static void
-vm_set_eval_stack(rb_thread_t * th, const rb_iseq_t *iseq, const rb_cref_t *cref, rb_block_t *base_block)
+vm_set_eval_stack(rb_thread_t * th, const rb_iseq_t *iseq, const rb_cref_t *cref, const struct rb_block *base_block)
{
vm_push_frame(th, iseq, VM_FRAME_MAGIC_EVAL | VM_FRAME_FLAG_FINISH,
- base_block->self, VM_ENVVAL_PREV_EP_PTR(base_block->ep),
+ vm_block_self(base_block), VM_GUARDED_PREV_EP(vm_block_ep(base_block)),
(VALUE)cref, /* cref or me */
iseq->body->iseq_encoded,
- th->cfp->sp, iseq->body->local_size, iseq->body->stack_max);
+ th->cfp->sp, iseq->body->local_table_size, iseq->body->stack_max);
}
static void
@@ -371,16 +462,14 @@ vm_set_main_stack(rb_thread_t *th, const rb_iseq_t *iseq)
{
VALUE toplevel_binding = rb_const_get(rb_cObject, rb_intern("TOPLEVEL_BINDING"));
rb_binding_t *bind;
- rb_env_t *env;
GetBindingPtr(toplevel_binding, bind);
- GetEnvPtr(bind->env, env);
- vm_set_eval_stack(th, iseq, 0, &env->block);
+ vm_set_eval_stack(th, iseq, 0, &bind->block);
/* save binding */
- if (iseq->body->local_size > 0) {
- bind->env = vm_make_env_object(th, th->cfp);
+ if (bind && iseq->body->local_table_size > 0) {
+ vm_bind_update_env(bind, vm_make_env_object(th, th->cfp));
}
}
@@ -422,7 +511,7 @@ vm_get_ruby_level_caller_cfp(const rb_thread_t *th, const rb_control_frame_t *cf
return (rb_control_frame_t *)cfp;
}
- if ((cfp->flag & VM_FRAME_FLAG_PASSED) == 0) {
+ if (VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_PASSED) == FALSE) {
break;
}
cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
@@ -508,12 +597,13 @@ env_mark(void * const ptr)
/* TODO: should mark more restricted range */
RUBY_GC_INFO("env->env\n");
+ VM_ASSERT(VM_ENV_FLAGS(env->ep, VM_ENV_FLAG_ESCAPED));
+
rb_gc_mark_values((long)env->env_size, env->env);
+ VM_ENV_FLAGS_SET(env->ep, VM_ENV_FLAG_WB_REQUIRED);
RUBY_MARK_UNLESS_NULL(rb_vm_env_prev_envval(env));
- RUBY_MARK_UNLESS_NULL(env->block.self);
- RUBY_MARK_UNLESS_NULL(env->block.proc);
- RUBY_MARK_UNLESS_NULL((VALUE)env->block.iseq);
+ RUBY_MARK_UNLESS_NULL((VALUE)env->iseq);
RUBY_MARK_LEAVE("env");
}
@@ -527,14 +617,39 @@ env_memsize(const void *ptr)
return size;
}
+#if VM_CHECK_MODE > 0
+static void
+env_free(void *ptr)
+{
+ if (ptr) {
+ rb_env_t * const env = ptr;
+ VM_ASSERT(VM_ENV_FLAGS(env->ep, VM_ENV_FLAG_ESCAPED));
+ free(env);
+ }
+}
+#else
+#define env_free RUBY_TYPED_DEFAULT_FREE
+#endif
+
static const rb_data_type_t env_data_type = {
"VM/env",
- {env_mark, RUBY_TYPED_DEFAULT_FREE, env_memsize,},
- 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
+ {env_mark, env_free, env_memsize,},
+ 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
};
-#define VM_EP_IN_HEAP_P(th, ep) (!((th)->stack <= (ep) && (ep) < ((th)->stack + (th)->stack_size)))
-#define VM_ENV_EP_ENVVAL(ep) ((ep)[1])
+#if VM_CHECK_MODE > 0
+static int
+envval_p(VALUE envval)
+{
+ if (rb_typeddata_is_kind_of(envval, &env_data_type)) {
+ return TRUE;
+ }
+ else {
+ rb_obj_info_dump(envval);
+ return FALSE;
+ }
+}
+#endif
static VALUE check_env_value(VALUE envval);
@@ -542,10 +657,10 @@ static int
check_env(rb_env_t * const env)
{
fprintf(stderr, "---\n");
- fprintf(stderr, "envptr: %p\n", (void *)&env->block.ep[0]);
- fprintf(stderr, "envval: %10p ", (void *)env->block.ep[1]);
- dp(env->block.ep[1]);
- fprintf(stderr, "ep: %10p\n", (void *)env->block.ep);
+ fprintf(stderr, "envptr: %p\n", (void *)&env->ep[0]);
+ fprintf(stderr, "envval: %10p ", (void *)env->ep[1]);
+ dp(env->ep[1]);
+ fprintf(stderr, "ep: %10p\n", (void *)env->ep);
if (rb_vm_env_prev_envval(env)) {
fprintf(stderr, ">>\n");
check_env_value(rb_vm_env_prev_envval(env));
@@ -567,67 +682,66 @@ check_env_value(VALUE envval)
return Qnil; /* unreachable */
}
-/* return FALSE if proc was already created */
-static int
-vm_make_proc_from_block(rb_thread_t *th, rb_block_t *block, VALUE *procptr)
+static void
+vm_block_handler_escape(rb_thread_t *th, VALUE block_handler, VALUE *procvalptr)
{
- if (!block->proc) {
- *procptr = block->proc = rb_vm_make_proc(th, block, rb_cProc);
- return TRUE;
- }
- else if (SYMBOL_P(block->proc)) {
- *procptr = rb_sym_to_proc(block->proc);
- return TRUE;
- }
- else {
- *procptr = block->proc;
- return FALSE;
+ switch (vm_block_handler_type(block_handler)) {
+ case block_handler_type_ifunc:
+ case block_handler_type_iseq:
+ *procvalptr = rb_vm_make_proc(th, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
+ return;
+
+ case block_handler_type_symbol:
+ case block_handler_type_proc:
+ *procvalptr = block_handler;
+ return;
}
+ VM_UNREACHABLE(vm_block_handler_escape);
+ return;
}
static VALUE
vm_make_env_each(rb_thread_t *const th, rb_control_frame_t *const cfp)
{
- VALUE envval, blockprocval = 0;
- VALUE * const ep = cfp->ep;
+ VALUE envval, blockprocval = Qfalse;
+ const VALUE * const ep = cfp->ep;
rb_env_t *env;
- VALUE *new_ep;
+ const VALUE *new_ep;
int local_size, env_size;
- if (VM_EP_IN_HEAP_P(th, ep)) {
- return VM_ENV_EP_ENVVAL(ep);
+ if (VM_ENV_ESCAPED_P(ep)) {
+ return VM_ENV_ENVVAL(ep);
}
- if (!VM_EP_LEP_P(ep)) {
- VALUE *prev_ep = VM_EP_PREV_EP(ep);
+ if (!VM_ENV_LOCAL_P(ep)) {
+ const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
- if (!VM_EP_IN_HEAP_P(th, prev_ep)) {
+ if (!VM_ENV_ESCAPED_P(prev_ep)) {
rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
while (prev_cfp->ep != prev_ep) {
prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(prev_cfp);
- if (VM_CHECK_MODE > 0 && prev_cfp->ep == 0) rb_bug("invalid ep");
+ VM_ASSERT(prev_cfp->ep != NULL);
}
vm_make_env_each(th, prev_cfp);
- *ep = VM_ENVVAL_PREV_EP_PTR(prev_cfp->ep);
+ VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_SPECVAL], VM_GUARDED_PREV_EP(prev_cfp->ep));
}
}
else {
- rb_block_t *block = VM_EP_BLOCK_PTR(ep);
+ VALUE block_handler = VM_ENV_BLOCK_HANDLER(ep);
- if (block && vm_make_proc_from_block(th, block, &blockprocval)) {
- rb_proc_t *p;
- GetProcPtr(blockprocval, p);
- *ep = VM_ENVVAL_BLOCK_PTR(&p->block);
+ if (block_handler != VM_BLOCK_HANDLER_NONE) {
+ vm_block_handler_escape(th, block_handler, &blockprocval);
+ VM_STACK_ENV_WRITE(ep, VM_ENV_DATA_INDEX_SPECVAL, blockprocval);
}
}
if (!RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
- local_size = 1 /* cref/me */;
+ local_size = VM_ENV_DATA_SIZE;
}
else {
- local_size = cfp->iseq->body->local_size;
+ local_size = cfp->iseq->body->local_table_size + VM_ENV_DATA_SIZE;
}
/*
@@ -643,15 +757,14 @@ vm_make_env_each(rb_thread_t *const th, rb_control_frame_t *const cfp)
*/
env_size = local_size +
- 1 /* specval */ +
1 /* envval */ +
(blockprocval ? 1 : 0) /* blockprocval */;
-
envval = TypedData_Wrap_Struct(rb_cEnv, &env_data_type, 0);
env = xmalloc(sizeof(rb_env_t) + (env_size - 1 /* rb_env_t::env[1] */) * sizeof(VALUE));
env->env_size = env_size;
- MEMCPY(env->env, ep - local_size, VALUE, local_size + 1 /* specval */);
+ /* setup env */
+ MEMCPY((VALUE *)env->env, ep - (local_size - 1 /* specval */), VALUE, local_size);
#if 0
for (i = 0; i < local_size; i++) {
@@ -665,25 +778,23 @@ vm_make_env_each(rb_thread_t *const th, rb_control_frame_t *const cfp)
/* be careful not to trigger GC after this */
RTYPEDDATA_DATA(envval) = env;
- /*
+ new_ep = &env->env[local_size - 1 /* specval */];
+ RB_OBJ_WRITE(envval, &new_ep[1], envval);
+ if (blockprocval) RB_OBJ_WRITE(envval, &new_ep[2], blockprocval);
+ VM_ENV_FLAGS_SET(new_ep, VM_ENV_FLAG_ESCAPED | VM_ENV_FLAG_WB_REQUIRED);
+
+ /*
* must happen after TypedData_Wrap_Struct to ensure penvval is markable
* in case object allocation triggers GC and clobbers penvval.
*/
- *ep = envval; /* GC mark */
-
- new_ep = &env->env[local_size];
- new_ep[1] = envval;
- if (blockprocval) new_ep[2] = blockprocval;
+ VM_STACK_ENV_WRITE(ep, 0, envval); /* GC mark */
- /* as Binding */
- env->block.self = cfp->self;
- env->block.ep = cfp->ep = new_ep;
- env->block.iseq = cfp->iseq;
- env->block.proc = 0;
+ /* setup env object */
+ env->ep = cfp->ep = new_ep;
+ env->iseq = cfp->iseq;
if (!RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
- /* TODO */
- env->block.iseq = 0;
+ env->iseq = NULL;
}
return envval;
@@ -714,13 +825,13 @@ rb_vm_stack_to_heap(rb_thread_t *th)
VALUE
rb_vm_env_prev_envval(const rb_env_t *env)
{
- const VALUE *ep = env->block.ep;
+ const VALUE *ep = env->ep;
- if (VM_EP_LEP_P(ep)) {
+ if (VM_ENV_LOCAL_P(ep)) {
return Qfalse;
}
else {
- return VM_ENV_EP_ENVVAL(VM_EP_PREV_EP(ep));
+ return VM_ENV_ENVVAL(VM_ENV_PREV_EP(ep));
}
}
@@ -740,7 +851,7 @@ collect_local_variables_in_env(const rb_env_t *env, const struct local_var_list
{
VALUE prev_envval;
- while (collect_local_variables_in_iseq(env->block.iseq, vars), (prev_envval = rb_vm_env_prev_envval(env)) != Qfalse) {
+ while (collect_local_variables_in_iseq(env->iseq, vars), (prev_envval = rb_vm_env_prev_envval(env)) != Qfalse) {
GetEnvPtr(prev_envval, env);
}
}
@@ -748,9 +859,9 @@ collect_local_variables_in_env(const rb_env_t *env, const struct local_var_list
static int
vm_collect_local_variables_in_heap(rb_thread_t *th, const VALUE *ep, const struct local_var_list *vars)
{
- if (VM_EP_IN_HEAP_P(th, ep)) {
+ if (VM_ENV_ESCAPED_P(ep)) {
rb_env_t *env;
- GetEnvPtr(VM_ENV_EP_ENVVAL(ep), env);
+ GetEnvPtr(VM_ENV_ENVVAL(ep), env);
collect_local_variables_in_env(env, vars);
return 1;
}
@@ -781,15 +892,24 @@ rb_iseq_local_variables(const rb_iseq_t *iseq)
/* Proc */
-static inline VALUE
-rb_proc_create(VALUE klass, const rb_block_t *block,
- int8_t safe_level, int8_t is_from_method, int8_t is_lambda)
+VALUE
+rb_proc_create_from_captured(VALUE klass,
+ const struct rb_captured_block *captured,
+ enum rb_block_type block_type,
+ int8_t safe_level, int8_t is_from_method, int8_t is_lambda)
{
VALUE procval = rb_proc_alloc(klass);
rb_proc_t *proc = RTYPEDDATA_DATA(procval);
- proc->block = *block;
- proc->block.proc = procval;
+ VM_ASSERT(VM_EP_IN_HEAP_P(GET_THREAD(), captured->ep));
+
+ /* copy block */
+ RB_OBJ_WRITE(procval, &proc->block.as.captured.self, captured->self);
+ RB_OBJ_WRITE(procval, &proc->block.as.captured.code.val, captured->code.val);
+ *((const VALUE **)&proc->block.as.captured.ep) = captured->ep;
+ RB_OBJ_WRITTEN(procval, Qundef, VM_ENV_ENVVAL(captured->ep));
+
+ vm_block_type_set(&proc->block, block_type);
proc->safe_level = safe_level;
proc->is_from_method = is_from_method;
proc->is_lambda = is_lambda;
@@ -798,40 +918,61 @@ rb_proc_create(VALUE klass, const rb_block_t *block,
}
VALUE
-rb_vm_make_proc(rb_thread_t *th, const rb_block_t *block, VALUE klass)
-{
- return rb_vm_make_proc_lambda(th, block, klass, 0);
-}
-
-VALUE
-rb_vm_make_proc_lambda(rb_thread_t *th, const rb_block_t *block, VALUE klass, int8_t is_lambda)
+rb_proc_create(VALUE klass, const struct rb_block *block,
+ int8_t safe_level, int8_t is_from_method, int8_t is_lambda)
{
- VALUE procval;
- rb_control_frame_t *cfp = RUBY_VM_GET_CFP_FROM_BLOCK_PTR(block);
-
- if (block->proc) {
- rb_bug("rb_vm_make_proc: Proc value is already created.");
- }
+ VALUE procval = rb_proc_alloc(klass);
+ rb_proc_t *proc = RTYPEDDATA_DATA(procval);
- vm_make_env_object(th, cfp);
- procval = rb_proc_create(klass, block, (int8_t)th->safe_level, 0, is_lambda);
+ VM_ASSERT(VM_EP_IN_HEAP_P(GET_THREAD(), vm_block_ep(block)));
- if (VMDEBUG) {
- if (th->stack < block->ep && block->ep < th->stack + th->stack_size) {
- rb_bug("invalid ptr: block->ep");
- }
+ /* copy block */
+ switch (vm_block_type(block)) {
+ case block_type_iseq:
+ case block_type_ifunc:
+ RB_OBJ_WRITE(procval, &proc->block.as.captured.self, block->as.captured.self);
+ RB_OBJ_WRITE(procval, &proc->block.as.captured.code.val, block->as.captured.code.val);
+ *((const VALUE **)&proc->block.as.captured.ep) = block->as.captured.ep;
+ RB_OBJ_WRITTEN(procval, Qundef, VM_ENV_ENVVAL(block->as.captured.ep));
+ break;
+ case block_type_symbol:
+ RB_OBJ_WRITE(procval, &proc->block.as.symbol, block->as.symbol);
+ break;
+ case block_type_proc:
+ RB_OBJ_WRITE(procval, &proc->block.as.proc, block->as.proc);
+ break;
}
+ vm_block_type_set(&proc->block, block->type);
+ proc->safe_level = safe_level;
+ proc->is_from_method = is_from_method;
+ proc->is_lambda = is_lambda;
return procval;
}
VALUE
-rb_vm_proc_envval(const rb_proc_t *proc)
+rb_vm_make_proc(rb_thread_t *th, const struct rb_captured_block *captured, VALUE klass)
{
- VALUE envval = VM_ENV_EP_ENVVAL(proc->block.ep);
- return envval;
+ return rb_vm_make_proc_lambda(th, captured, klass, FALSE);
}
+VALUE
+rb_vm_make_proc_lambda(rb_thread_t *th, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda)
+{
+ VALUE procval;
+
+ if (!VM_ENV_FLAGS(captured->ep, VM_ENV_FLAG_ESCAPED)) {
+ rb_control_frame_t *cfp = VM_CAPTURED_BLOCK_TO_CFP(captured);
+ vm_make_env_object(th, cfp);
+ }
+ VM_ASSERT(VM_EP_IN_HEAP_P(th, captured->ep));
+ VM_ASSERT(RB_TYPE_P(captured->code.val, T_IMEMO));
+
+ procval = rb_proc_create_from_captured(klass, captured,
+ imemo_type(captured->code.val) == imemo_iseq ? block_type_iseq : block_type_ifunc,
+ (int8_t)th->safe_level, FALSE, is_lambda);
+ return procval;
+}
/* Binding */
@@ -857,19 +998,22 @@ rb_vm_make_binding(rb_thread_t *th, const rb_control_frame_t *src_cfp)
bindval = rb_binding_alloc(rb_cBinding);
GetBindingPtr(bindval, bind);
- bind->env = envval;
+ vm_bind_update_env(bind, envval);
+ bind->block.as.captured.self = cfp->self;
+ bind->block.as.captured.code.iseq = cfp->iseq;
bind->path = ruby_level_cfp->iseq->body->location.path;
bind->first_lineno = rb_vm_get_sourceline(ruby_level_cfp);
return bindval;
}
-VALUE *
+const VALUE *
rb_binding_add_dynavars(rb_binding_t *bind, int dyncount, const ID *dynvars)
{
- VALUE envval = bind->env, path = bind->path;
+ VALUE envval;
+ VALUE path = bind->path;
+ const struct rb_block *base_block;
rb_env_t *env;
- rb_block_t *base_block;
rb_thread_t *th = GET_THREAD();
const rb_iseq_t *base_iseq, *iseq;
NODE *node = 0;
@@ -878,10 +1022,8 @@ rb_binding_add_dynavars(rb_binding_t *bind, int dyncount, const ID *dynvars)
if (dyncount < 0) return 0;
- GetEnvPtr(envval, env);
-
- base_block = &env->block;
- base_iseq = base_block->iseq;
+ base_block = &bind->block;
+ base_iseq = vm_block_iseq(base_block);
if (dyncount >= numberof(minibuf)) dyns = ALLOCV_N(ID, idtmp, dyncount + 1);
@@ -900,42 +1042,41 @@ rb_binding_add_dynavars(rb_binding_t *bind, int dyncount, const ID *dynvars)
ALLOCV_END(idtmp);
vm_set_eval_stack(th, iseq, 0, base_block);
- bind->env = vm_make_env_object(th, th->cfp);
+ vm_bind_update_env(bind, envval = vm_make_env_object(th, th->cfp));
rb_vm_pop_frame(th);
- GetEnvPtr(bind->env, env);
+ GetEnvPtr(envval, env);
return env->env;
}
/* C -> Ruby: block */
static inline VALUE
-invoke_block(rb_thread_t *th, const rb_iseq_t *iseq, VALUE self, const rb_block_t *block, const rb_cref_t *cref, int type, int opt_pc)
+invoke_block(rb_thread_t *th, const rb_iseq_t *iseq, VALUE self, const struct rb_captured_block *captured, const rb_cref_t *cref, VALUE type, int opt_pc)
{
int arg_size = iseq->body->param.size;
vm_push_frame(th, iseq, type | VM_FRAME_FLAG_FINISH, self,
- VM_ENVVAL_PREV_EP_PTR(block->ep),
+ VM_GUARDED_PREV_EP(captured->ep),
(VALUE)cref, /* cref or method */
iseq->body->iseq_encoded + opt_pc,
- th->cfp->sp + arg_size, iseq->body->local_size - arg_size,
+ th->cfp->sp + arg_size, iseq->body->local_table_size - arg_size,
iseq->body->stack_max);
-
return vm_exec(th);
}
static VALUE
-invoke_bmethod(rb_thread_t *th, const rb_iseq_t *iseq, VALUE self, const rb_block_t *block, const rb_callable_method_entry_t *me, int type, int opt_pc)
+invoke_bmethod(rb_thread_t *th, const rb_iseq_t *iseq, VALUE self, const struct rb_captured_block *captured, const rb_callable_method_entry_t *me, VALUE type, int opt_pc)
{
/* bmethod */
int arg_size = iseq->body->param.size;
VALUE ret;
vm_push_frame(th, iseq, type | VM_FRAME_FLAG_FINISH | VM_FRAME_FLAG_BMETHOD, self,
- VM_ENVVAL_PREV_EP_PTR(block->ep),
- (VALUE)me, /* cref or method (TODO: can we ignore cref?) */
+ VM_GUARDED_PREV_EP(captured->ep),
+ (VALUE)me,
iseq->body->iseq_encoded + opt_pc,
- th->cfp->sp + arg_size, iseq->body->local_size - arg_size,
+ th->cfp->sp + arg_size, iseq->body->local_table_size - arg_size,
iseq->body->stack_max);
RUBY_DTRACE_METHOD_ENTRY_HOOK(th, me->owner, me->called_id);
@@ -947,95 +1088,114 @@ invoke_bmethod(rb_thread_t *th, const rb_iseq_t *iseq, VALUE self, const rb_bloc
}
static inline VALUE
-invoke_block_from_c_0(rb_thread_t *th, const rb_block_t *block,
- VALUE self, int argc, const VALUE *argv, const rb_block_t *blockptr,
- const rb_cref_t *cref, const int splattable)
+invoke_iseq_block_from_c(rb_thread_t *th, const struct rb_captured_block *captured,
+ VALUE self, int argc, const VALUE *argv, VALUE passed_block_handler,
+ const rb_cref_t *cref, const int splattable, int is_lambda)
{
- if (UNLIKELY(!RTEST(block->iseq))) {
- return Qnil;
- }
- else if (LIKELY(RUBY_VM_NORMAL_ISEQ_P(block->iseq))) {
- const rb_iseq_t *iseq = rb_iseq_check(block->iseq);
- int i, opt_pc;
- int type = block_proc_is_lambda(block->proc) ? VM_FRAME_MAGIC_LAMBDA : VM_FRAME_MAGIC_BLOCK;
- VALUE *sp = th->cfp->sp;
- const rb_callable_method_entry_t *me = th->passed_bmethod_me;
-
- th->passed_bmethod_me = NULL;
-
- for (i=0; i<argc; i++) {
- sp[i] = argv[i];
- }
+ const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
+ int i, opt_pc;
+ VALUE type = is_lambda ? VM_FRAME_MAGIC_LAMBDA : VM_FRAME_MAGIC_BLOCK;
+ VALUE *sp = th->cfp->sp;
+ const rb_callable_method_entry_t *me = th->passed_bmethod_me;
+ th->passed_bmethod_me = NULL;
- opt_pc = vm_yield_setup_args(th, iseq, argc, sp, blockptr,
- (type == VM_FRAME_MAGIC_LAMBDA ? (splattable ? arg_setup_lambda : arg_setup_method) : arg_setup_block));
+ for (i=0; i<argc; i++) {
+ sp[i] = argv[i];
+ }
- if (me == NULL) {
- return invoke_block(th, iseq, self, block, cref, type, opt_pc);
- }
- else {
- return invoke_bmethod(th, iseq, self, block, me, type, opt_pc);
- }
+ opt_pc = vm_yield_setup_args(th, iseq, argc, sp, passed_block_handler,
+ (type == VM_FRAME_MAGIC_LAMBDA ? (splattable ? arg_setup_lambda : arg_setup_method) : arg_setup_block));
+ if (me == NULL) {
+ return invoke_block(th, iseq, self, captured, cref, type, opt_pc);
}
else {
- return vm_yield_with_cfunc(th, block, self, argc, argv, blockptr);
+ return invoke_bmethod(th, iseq, self, captured, me, type, opt_pc);
}
}
-static VALUE
-invoke_block_from_c_splattable(rb_thread_t *th, const rb_block_t *block,
- VALUE self, int argc, const VALUE *argv,
- const rb_block_t *blockptr, const rb_cref_t *cref)
-{
- return invoke_block_from_c_0(th, block, self, argc, argv, blockptr, cref, TRUE);
-}
-
-static VALUE
-invoke_block_from_c_unsplattable(rb_thread_t *th, const rb_block_t *block,
- VALUE self, int argc, const VALUE *argv,
- const rb_block_t *blockptr, const rb_cref_t *cref)
-{
- return invoke_block_from_c_0(th, block, self, argc, argv, blockptr, cref, FALSE);
+static inline VALUE
+invoke_block_from_c_splattable(rb_thread_t *th, VALUE block_handler,
+ int argc, const VALUE *argv,
+ VALUE passed_block_handler, const rb_cref_t *cref)
+{
+ int is_lambda = FALSE;
+ again:
+ switch (vm_block_handler_type(block_handler)) {
+ case block_handler_type_iseq:
+ {
+ const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
+ return invoke_iseq_block_from_c(th, captured, captured->self, argc, argv, passed_block_handler, cref, TRUE, is_lambda);
+ }
+ case block_handler_type_ifunc:
+ return vm_yield_with_cfunc(th, VM_BH_TO_IFUNC_BLOCK(block_handler), VM_BH_TO_IFUNC_BLOCK(block_handler)->self,
+ argc, argv, passed_block_handler);
+ case block_handler_type_symbol:
+ return vm_yield_with_symbol(th, VM_BH_TO_SYMBOL(block_handler), argc, argv, passed_block_handler);
+ case block_handler_type_proc:
+ is_lambda = block_proc_is_lambda(VM_BH_TO_PROC(block_handler));
+ block_handler = vm_proc_to_block_handler(VM_BH_TO_PROC(block_handler));
+ goto again;
+ }
+ VM_UNREACHABLE(invoke_block_from_c_splattable);
+ return Qundef;
}
-
-static inline const rb_block_t *
-check_block(rb_thread_t *th)
+static inline VALUE
+check_block_handler(rb_thread_t *th)
{
- const rb_block_t *blockptr = VM_CF_BLOCK_PTR(th->cfp);
-
- if (UNLIKELY(blockptr == 0)) {
+ VALUE block_handler = VM_CF_BLOCK_HANDLER(th->cfp);
+ VM_ASSERT(vm_block_handler_verify(block_handler));
+ if (UNLIKELY(block_handler == VM_BLOCK_HANDLER_NONE)) {
rb_vm_localjump_error("no block given", Qnil, 0);
}
- return blockptr;
+ return block_handler;
}
static VALUE
vm_yield_with_cref(rb_thread_t *th, int argc, const VALUE *argv, const rb_cref_t *cref)
{
- const rb_block_t *blockptr = check_block(th);
- return invoke_block_from_c_splattable(th, blockptr, blockptr->self, argc, argv, NULL, cref);
+ return invoke_block_from_c_splattable(th, check_block_handler(th), argc, argv, VM_BLOCK_HANDLER_NONE, cref);
}
static VALUE
vm_yield(rb_thread_t *th, int argc, const VALUE *argv)
{
- const rb_block_t *blockptr = check_block(th);
- return invoke_block_from_c_splattable(th, blockptr, blockptr->self, argc, argv, NULL, NULL);
+ return invoke_block_from_c_splattable(th, check_block_handler(th), argc, argv, VM_BLOCK_HANDLER_NONE, NULL);
}
static VALUE
-vm_yield_with_block(rb_thread_t *th, int argc, const VALUE *argv, const rb_block_t *blockargptr)
+vm_yield_with_block(rb_thread_t *th, int argc, const VALUE *argv, VALUE block_handler)
+{
+ return invoke_block_from_c_splattable(th, check_block_handler(th), argc, argv, block_handler, NULL);
+}
+
+static inline VALUE
+invoke_block_from_c_unsplattable(rb_thread_t *th, const struct rb_block *block,
+ VALUE self, int argc, const VALUE *argv,
+ VALUE passed_block_handler, int is_lambda)
{
- const rb_block_t *blockptr = check_block(th);
- return invoke_block_from_c_splattable(th, blockptr, blockptr->self, argc, argv, blockargptr, NULL);
+ again:
+ switch (vm_block_type(block)) {
+ case block_type_iseq:
+ return invoke_iseq_block_from_c(th, &block->as.captured, self, argc, argv, passed_block_handler, NULL, FALSE, is_lambda);
+ case block_type_ifunc:
+ return vm_yield_with_cfunc(th, &block->as.captured, self, argc, argv, passed_block_handler);
+ case block_type_symbol:
+ return vm_yield_with_symbol(th, block->as.symbol, argc, argv, passed_block_handler);
+ case block_type_proc:
+ is_lambda = block_proc_is_lambda(block->as.proc);
+ block = vm_proc_block(block->as.proc);
+ goto again;
+ }
+ VM_UNREACHABLE(invoke_block_from_c_unsplattable);
+ return Qundef;
}
static VALUE
vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, VALUE self,
- int argc, const VALUE *argv, const rb_block_t *blockptr)
+ int argc, const VALUE *argv, VALUE passed_block_handler)
{
VALUE val = Qundef;
int state;
@@ -1044,7 +1204,7 @@ vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, VALUE self,
TH_PUSH_TAG(th);
if ((state = EXEC_TAG()) == 0) {
th->safe_level = proc->safe_level;
- val = invoke_block_from_c_unsplattable(th, &proc->block, self, argc, argv, blockptr, NULL);
+ val = invoke_block_from_c_unsplattable(th, &proc->block, self, argc, argv, passed_block_handler, proc->is_lambda);
}
TH_POP_TAG();
@@ -1058,21 +1218,23 @@ vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, VALUE self,
static VALUE
vm_invoke_bmethod(rb_thread_t *th, rb_proc_t *proc, VALUE self,
- int argc, const VALUE *argv, const rb_block_t *blockptr)
+ int argc, const VALUE *argv, VALUE block_handler)
{
- return invoke_block_from_c_unsplattable(th, &proc->block, self, argc, argv, blockptr, NULL);
+ return invoke_block_from_c_unsplattable(th, &proc->block, self, argc, argv, block_handler, TRUE);
}
VALUE
rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc,
- int argc, const VALUE *argv, const rb_block_t *blockptr)
+ int argc, const VALUE *argv, VALUE passed_block_handler)
{
- VALUE self = proc->block.self;
+ VALUE self = vm_block_self(&proc->block);
+ VM_ASSERT(vm_block_handler_verify(passed_block_handler));
+
if (proc->is_from_method) {
- return vm_invoke_bmethod(th, proc, self, argc, argv, blockptr);
+ return vm_invoke_bmethod(th, proc, self, argc, argv, passed_block_handler);
}
else {
- return vm_invoke_proc(th, proc, self, argc, argv, blockptr);
+ return vm_invoke_proc(th, proc, self, argc, argv, passed_block_handler);
}
}
@@ -1352,12 +1514,21 @@ rb_vm_jump_tag_but_local_jump(int state)
NORETURN(static void vm_iter_break(rb_thread_t *th, VALUE val));
+static rb_control_frame_t *
+next_not_local_frame(rb_control_frame_t *cfp)
+{
+ while (VM_ENV_LOCAL_P(cfp->ep)) {
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
+ }
+ return cfp;
+}
+
static void
vm_iter_break(rb_thread_t *th, VALUE val)
{
- rb_control_frame_t *cfp = th->cfp;
- VALUE *ep = VM_CF_PREV_EP(cfp);
- rb_control_frame_t *target_cfp = rb_vm_search_cf_from_ep(th, cfp, ep);
+ rb_control_frame_t *cfp = next_not_local_frame(th->cfp);
+ const VALUE *ep = VM_CF_PREV_EP(cfp);
+ const rb_control_frame_t *target_cfp = rb_vm_search_cf_from_ep(th, cfp, ep);
#if 0 /* raise LocalJumpError */
if (!target_cfp) {
@@ -1557,85 +1728,67 @@ hook_before_rewind(rb_thread_t *th, rb_control_frame_t *cfp, int will_finish_vm_
struct CONTROL_FRAME {
VALUE *pc; // cfp[0], program counter
VALUE *sp; // cfp[1], stack pointer
- VALUE *bp; // cfp[2], base pointer
- rb_iseq_t *iseq; // cfp[3], iseq
- VALUE flag; // cfp[4], magic
- VALUE self; // cfp[5], self
- VALUE *ep; // cfp[6], env pointer
- rb_iseq_t * block_iseq; // cfp[7], block iseq
- VALUE proc; // cfp[8], always 0
+ rb_iseq_t *iseq; // cfp[2], iseq
+ VALUE self; // cfp[3], self
+ const VALUE *ep; // cfp[4], env pointer
+ const void *block_code; // cfp[5], blcok code
};
- struct BLOCK {
+ struct rb_captured_blcok {
VALUE self;
VALUE *ep;
- rb_iseq_t *block_iseq;
- VALUE proc;
- };
-
- struct METHOD_CONTROL_FRAME {
- rb_control_frame_t frame;
+ union code;
};
- struct METHOD_FRAME {
- VALUE arg0;
- ...
- VALUE argM;
+ struct METHOD_ENV {
VALUE param0;
...
VALUE paramN;
- VALUE cref;
- VALUE special; // lep [1]
- struct block_object *block_ptr | 0x01; // lep [0]
- };
-
- struct BLOCK_CONTROL_FRAME {
- rb_control_frame_t frame;
+ VALUE lvar1;
+ ...
+ VALUE lvarM;
+ VALUE cref; // ep[-2]
+ VALUE special; // ep[-1]
+ VALUE flags; // ep[ 0] == lep[0]
};
- struct BLOCK_FRAME {
- VALUE arg0;
+ struct BLOCK_ENV {
+ VALUE block_param0;
...
- VALUE argM;
- VALUE param0;
+ VALUE block_paramN;
+ VALUE block_lvar1;
...
- VALUE paramN;
- VALUE cref;
- VALUE *(prev_ptr | 0x01); // ep[0]
- };
-
- struct CLASS_CONTROL_FRAME {
- rb_control_frame_t frame;
+ VALUE block_lvarM;
+ VALUE cref; // ep[-2]
+ VALUE special; // ep[-1]
+ VALUE flags; // ep[ 0]
};
- struct CLASS_FRAME {
- VALUE param0;
+ struct CLASS_ENV {
+ VALUE class_lvar0;
...
- VALUE paramN;
+ VALUE class_lvarN;
VALUE cref;
VALUE prev_ep; // for frame jump
+ VALUE flags;
};
struct C_METHOD_CONTROL_FRAME {
VALUE *pc; // 0
VALUE *sp; // stack pointer
- VALUE *bp; // base pointer (used in exception)
rb_iseq_t *iseq; // cmi
- VALUE magic; // C_METHOD_FRAME
VALUE self; // ?
VALUE *ep; // ep == lep
- rb_iseq_t * block_iseq; //
- VALUE proc; // always 0
+ void *code; //
};
struct C_BLOCK_CONTROL_FRAME {
VALUE *pc; // point only "finish" insn
VALUE *sp; // sp
rb_iseq_t *iseq; // ?
- VALUE magic; // C_METHOD_FRAME
- VALUE self; // needed?
+ VALUE self; //
VALUE *ep; // ep
- rb_iseq_t * block_iseq; // 0
+ void *code; //
};
*/
@@ -1834,6 +1987,8 @@ vm_exec(rb_thread_t *th)
if (catch_iseq != NULL) { /* found catch table */
/* enter catch scope */
+ const int arg_size = 1;
+
rb_iseq_check(catch_iseq);
cfp->sp = vm_base_ptr(cfp) + cont_sp;
cfp->pc = cfp->iseq->body->iseq_encoded + cont_pc;
@@ -1842,11 +1997,11 @@ vm_exec(rb_thread_t *th)
cfp->sp[0] = (VALUE)err;
vm_push_frame(th, catch_iseq, VM_FRAME_MAGIC_RESCUE,
cfp->self,
- VM_ENVVAL_PREV_EP_PTR(cfp->ep),
+ VM_GUARDED_PREV_EP(cfp->ep),
0, /* cref or me */
catch_iseq->body->iseq_encoded,
- cfp->sp + 1 /* push value */,
- catch_iseq->body->local_size - 1,
+ cfp->sp + arg_size /* push value */,
+ catch_iseq->body->local_table_size - arg_size,
catch_iseq->body->stack_max);
state = 0;
@@ -1951,17 +2106,17 @@ rb_thread_current_status(const rb_thread_t *th)
VALUE
rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg,
- const rb_block_t *blockptr, VALUE filename)
+ VALUE block_handler, VALUE filename)
{
rb_thread_t *th = GET_THREAD();
const rb_control_frame_t *reg_cfp = th->cfp;
const rb_iseq_t *iseq = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
VALUE val;
- vm_push_frame(th, iseq, VM_FRAME_MAGIC_TOP | VM_FRAME_FLAG_FINISH,
- recv, VM_ENVVAL_BLOCK_PTR(blockptr),
+ vm_push_frame(th, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH,
+ recv, block_handler,
(VALUE)vm_cref_new_toplevel(th), /* cref or me */
- 0, reg_cfp->sp, 1, 0);
+ 0, reg_cfp->sp, 0, 0);
val = (*func)(arg);
@@ -2237,9 +2392,14 @@ rb_thread_mark(void *ptr)
rb_gc_mark_values((long)(sp - p), p);
while (cfp != limit_cfp) {
- rb_gc_mark(cfp->proc);
+#if VM_CHECK_MODE > 0
+ const VALUE *ep = cfp->ep;
+ VM_ASSERT(!!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) == vm_ep_in_heap_p_(th, ep));
+#endif
rb_gc_mark(cfp->self);
rb_gc_mark((VALUE)cfp->iseq);
+ rb_gc_mark((VALUE)cfp->block_code);
+
cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
}
}
@@ -2387,10 +2547,10 @@ th_init(rb_thread_t *th, VALUE self)
th->cfp = (void *)(th->stack + th->stack_size);
- vm_push_frame(th, 0 /* dummy iseq */, VM_FRAME_MAGIC_DUMMY | VM_FRAME_FLAG_FINISH /* dummy frame */,
- Qnil /* dummy self */, VM_ENVVAL_BLOCK_PTR(0) /* dummy block ptr */,
+ vm_push_frame(th, 0 /* dummy iseq */, VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH /* dummy frame */,
+ Qnil /* dummy self */, VM_BLOCK_HANDLER_NONE /* dummy block ptr */,
0 /* dummy cref/me */,
- 0 /* dummy pc */, th->stack, 1, 0);
+ 0 /* dummy pc */, th->stack, 0, 0);
th->status = THREAD_RUNNABLE;
th->errinfo = Qnil;
@@ -2942,7 +3102,7 @@ Init_VM(void)
th->cfp->pc = iseq->body->iseq_encoded;
th->cfp->self = th->top_self;
- th->cfp->ep[-1] = (VALUE)vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE, FALSE, NULL, FALSE);
+ VM_STACK_ENV_WRITE(th->cfp->ep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE, FALSE, NULL, FALSE));
/*
* The Binding of the top level scope
diff --git a/vm_args.c b/vm_args.c
index 1e310eb25c..89e25ca49b 100644
--- a/vm_args.c
+++ b/vm_args.c
@@ -466,22 +466,22 @@ args_setup_kw_rest_parameter(VALUE keyword_hash, VALUE *locals)
static inline void
args_setup_block_parameter(rb_thread_t *th, struct rb_calling_info *calling, VALUE *locals)
{
+ VALUE block_handler = calling->block_handler;
VALUE blockval = Qnil;
- const rb_block_t *blockptr = calling->blockptr;
- if (blockptr) {
- /* make Proc object */
- if (blockptr->proc == 0) {
- rb_proc_t *proc;
- blockval = rb_vm_make_proc(th, blockptr, rb_cProc);
- GetProcPtr(blockval, proc);
- calling->blockptr = &proc->block;
- }
- else if (SYMBOL_P(blockptr->proc)) {
- blockval = rb_sym_to_proc(blockptr->proc);
- }
- else {
- blockval = blockptr->proc;
+ if (block_handler != VM_BLOCK_HANDLER_NONE) {
+
+ switch (vm_block_handler_type(block_handler)) {
+ case block_handler_type_iseq:
+ case block_handler_type_ifunc:
+ blockval = rb_vm_make_proc(th, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
+ break;
+ case block_handler_type_symbol:
+ blockval = rb_sym_to_proc(VM_BH_TO_SYMBOL(block_handler));
+ break;
+ case block_handler_type_proc:
+ blockval = VM_BH_TO_PROC(block_handler);
+ break;
}
}
*locals = blockval;
@@ -698,9 +698,9 @@ raise_argument_error(rb_thread_t *th, const rb_iseq_t *iseq, const VALUE exc)
VALUE at;
if (iseq) {
- vm_push_frame(th, iseq, VM_FRAME_MAGIC_DUMMY, Qnil /* self */,
- VM_ENVVAL_BLOCK_PTR(0) /* specval*/, Qfalse /* me or cref */,
- iseq->body->iseq_encoded, th->cfp->sp, 1 /* local_size (cref/me) */, 0 /* stack_max */);
+ vm_push_frame(th, iseq, VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL, Qnil /* self */,
+ VM_BLOCK_HANDLER_NONE /* specval*/, Qfalse /* me or cref */,
+ iseq->body->iseq_encoded, th->cfp->sp, 0, 0 /* stack_max */);
at = rb_vm_backtrace_object();
rb_vm_pop_frame(th);
}
@@ -766,16 +766,23 @@ vm_caller_setup_arg_kw(rb_control_frame_t *cfp, struct rb_calling_info *calling,
calling->argc -= kw_len - 1;
}
-static inline void
-vm_caller_setup_proc_as_block(rb_control_frame_t *reg_cfp,
- struct rb_calling_info *calling,
- VALUE proc)
+static VALUE
+vm_to_proc(VALUE proc)
{
- rb_proc_t *po;
+ if (UNLIKELY(!rb_obj_is_proc(proc))) {
+ VALUE b;
+ b = rb_check_convert_type(proc, T_DATA, "Proc", "to_proc");
- GetProcPtr(proc, po);
- calling->blockptr = &po->block;
- RUBY_VM_GET_BLOCK_PTR_IN_CFP(reg_cfp)->proc = proc;
+ if (NIL_P(b) || !rb_obj_is_proc(b)) {
+ rb_raise(rb_eTypeError,
+ "wrong argument type %s (expected Proc)",
+ rb_obj_classname(proc));
+ }
+ return b;
+ }
+ else {
+ return proc;
+ }
}
static void
@@ -783,51 +790,31 @@ vm_caller_setup_arg_block(const rb_thread_t *th, rb_control_frame_t *reg_cfp,
struct rb_calling_info *calling, const struct rb_call_info *ci, rb_iseq_t *blockiseq, const int is_super)
{
if (ci->flag & VM_CALL_ARGS_BLOCKARG) {
- VALUE proc;
+ VALUE block_code = *(--reg_cfp->sp);
- proc = *(--reg_cfp->sp);
-
- if (NIL_P(proc)) {
- calling->blockptr = NULL;
+ if (NIL_P(block_code)) {
+ calling->block_handler = VM_BLOCK_HANDLER_NONE;
}
- else if (SYMBOL_P(proc) &&
- rb_method_basic_definition_p(rb_cSymbol, idTo_proc)) {
- if (LIKELY(!(ci->flag & VM_CALL_TAILCALL))) {
- calling->blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(reg_cfp);
- calling->blockptr->iseq = (rb_iseq_t *)proc;
- calling->blockptr->proc = proc;
+ else {
+ if (SYMBOL_P(block_code) && rb_method_basic_definition_p(rb_cSymbol, idTo_proc)) {
+ calling->block_handler = block_code;
}
else {
- proc = rb_sym_to_proc(proc);
- vm_caller_setup_proc_as_block(reg_cfp, calling, proc);
- }
- }
- else {
- if (!rb_obj_is_proc(proc)) {
- VALUE b;
- b = rb_check_convert_type(proc, T_DATA, "Proc", "to_proc");
-
- if (NIL_P(b) || !rb_obj_is_proc(b)) {
- rb_raise(rb_eTypeError,
- "wrong argument type %s (expected Proc)",
- rb_obj_classname(proc));
- }
- proc = b;
+ calling->block_handler = vm_to_proc(block_code);
}
- vm_caller_setup_proc_as_block(reg_cfp, calling, proc);
}
}
- else if (blockiseq != 0) { /* likely */
- rb_block_t *blockptr = calling->blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(reg_cfp);
- blockptr->iseq = blockiseq;
- blockptr->proc = 0;
+ else if (blockiseq != NULL) { /* likely */
+ struct rb_captured_block *captured = VM_CFP_TO_CAPTURED_BLOCK(reg_cfp);
+ captured->code.iseq = blockiseq;
+ calling->block_handler = VM_BH_FROM_ISEQ_BLOCK(captured);
}
else {
if (is_super) {
- calling->blockptr = GET_BLOCK_PTR();
+ calling->block_handler = GET_BLOCK_HANDLER();
}
else {
- calling->blockptr = NULL;
+ calling->block_handler = VM_BLOCK_HANDLER_NONE;
}
}
}
diff --git a/vm_core.h b/vm_core.h
index 1f61140d7a..25a467dff6 100644
--- a/vm_core.h
+++ b/vm_core.h
@@ -47,8 +47,12 @@
#if VM_CHECK_MODE > 0
#define VM_ASSERT(expr) ( \
RUBY_ASSERT_WHEN(VM_CHECK_MODE > 0, expr))
+
+#define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
+
#else
#define VM_ASSERT(expr) ((void)0)
+#define VM_UNREACHABLE(func) ((void)0)
#endif
#define RUBY_VM_THREAD_MODEL 2
@@ -225,7 +229,7 @@ struct rb_call_info_with_kwarg {
};
struct rb_calling_info {
- struct rb_block_struct *blockptr;
+ VALUE block_handler;
VALUE recv;
int argc;
};
@@ -278,10 +282,6 @@ struct rb_iseq_constant_body {
ISEQ_TYPE_DEFINED_GUARD
} type; /* instruction sequence type */
- unsigned int stack_max; /* for stack overflow check */
- /* sizeof(vars) + 1 */
- unsigned int local_size;
-
unsigned int iseq_size;
const VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
@@ -384,6 +384,7 @@ struct rb_iseq_constant_body {
unsigned int ci_size;
unsigned int ci_kw_size;
unsigned int line_info_size;
+ unsigned int stack_max; /* for stack overflow check */
};
/* T_IMEMO/iseq */
@@ -593,28 +594,52 @@ typedef struct rb_vm_struct {
#define VM_DEBUG_VERIFY_METHOD_CACHE (VM_DEBUG_MODE != 0)
#endif
+struct rb_captured_block {
+ VALUE self;
+ const VALUE *ep;
+ union {
+ const rb_iseq_t *iseq;
+ const struct vm_ifunc *ifunc;
+ VALUE val;
+ } code;
+};
+
+enum rb_block_handler_type {
+ block_handler_type_iseq,
+ block_handler_type_ifunc,
+ block_handler_type_symbol,
+ block_handler_type_proc
+};
+
+enum rb_block_type {
+ block_type_iseq,
+ block_type_ifunc,
+ block_type_symbol,
+ block_type_proc
+};
+
+struct rb_block {
+ union {
+ struct rb_captured_block captured;
+ VALUE symbol;
+ VALUE proc;
+ } as;
+ enum rb_block_type type;
+};
+
typedef struct rb_control_frame_struct {
const VALUE *pc; /* cfp[0] */
VALUE *sp; /* cfp[1] */
const rb_iseq_t *iseq; /* cfp[2] */
- VALUE flag; /* cfp[3] */
- VALUE self; /* cfp[4] / block[0] */
- VALUE *ep; /* cfp[5] / block[1] */
- const rb_iseq_t *block_iseq;/* cfp[6] / block[2] */
- VALUE proc; /* cfp[7] / block[3] */
+ VALUE self; /* cfp[3] / block[0] */
+ const VALUE *ep; /* cfp[4] / block[1] */
+ const void *block_code; /* cfp[5] / block[2] */ /* iseq or ifunc */
#if VM_DEBUG_BP_CHECK
- VALUE *bp_check; /* cfp[8] */
+ VALUE *bp_check; /* cfp[6] */
#endif
} rb_control_frame_t;
-typedef struct rb_block_struct {
- VALUE self; /* share with method frame if it's only block */
- VALUE *ep; /* share with method frame if it's only block */
- const rb_iseq_t *iseq;
- VALUE proc;
-} rb_block_t;
-
extern const rb_data_type_t ruby_threadptr_data_type;
#define GetThreadPtr(obj, ptr) \
@@ -690,7 +715,7 @@ typedef struct rb_thread_struct {
int waiting_fd;
/* for rb_iterate */
- const rb_block_t *passed_block;
+ VALUE passed_block_handler;
/* for bmethod */
const rb_callable_method_entry_t *passed_bmethod_me;
@@ -703,7 +728,7 @@ typedef struct rb_thread_struct {
VALUE top_wrapper;
/* eval env */
- VALUE *root_lep;
+ const VALUE *root_lep;
VALUE root_svar;
/* thread control */
@@ -822,8 +847,8 @@ rb_iseq_t *rb_iseq_new_with_opt(NODE*, VALUE, VALUE, VALUE, VALUE, const rb_iseq
/* src -> iseq */
rb_iseq_t *rb_iseq_compile(VALUE src, VALUE file, VALUE line);
-rb_iseq_t *rb_iseq_compile_on_base(VALUE src, VALUE file, VALUE line, rb_block_t *base_block);
-rb_iseq_t *rb_iseq_compile_with_option(VALUE src, VALUE file, VALUE absolute_path, VALUE line, rb_block_t *base_block, VALUE opt);
+rb_iseq_t *rb_iseq_compile_on_base(VALUE src, VALUE file, VALUE line, const struct rb_block *base_block);
+rb_iseq_t *rb_iseq_compile_with_option(VALUE src, VALUE file, VALUE absolute_path, VALUE line, const struct rb_block *base_block, VALUE opt);
VALUE rb_iseq_disasm(const rb_iseq_t *iseq);
int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
@@ -841,7 +866,7 @@ RUBY_SYMBOL_EXPORT_END
GetCoreDataFromValue((obj), rb_proc_t, (ptr))
typedef struct {
- rb_block_t block;
+ const struct rb_block block;
int8_t safe_level; /* 0..1 */
int8_t is_from_method; /* bool */
int8_t is_lambda; /* bool */
@@ -852,8 +877,9 @@ typedef struct {
typedef struct {
int env_size;
- rb_block_t block;
- VALUE env[1]; /* flexible array */
+ const VALUE *ep;
+ const rb_iseq_t *iseq;
+ const VALUE env[1]; /* flexible array */
} rb_env_t;
extern const rb_data_type_t ruby_binding_data_type;
@@ -862,7 +888,7 @@ extern const rb_data_type_t ruby_binding_data_type;
GetCoreDataFromValue((obj), rb_binding_t, (ptr))
typedef struct {
- VALUE env;
+ struct rb_block block;
VALUE path;
unsigned short first_lineno;
} rb_binding_t;
@@ -903,32 +929,6 @@ enum vm_svar_index {
VM_SVAR_FLIPFLOP_START = 2 /* flipflop */
};
-#define VM_FRAME_MAGIC_METHOD 0x11
-#define VM_FRAME_MAGIC_BLOCK 0x21
-#define VM_FRAME_MAGIC_CLASS 0x31
-#define VM_FRAME_MAGIC_TOP 0x41
-#define VM_FRAME_MAGIC_CFUNC 0x61
-#define VM_FRAME_MAGIC_PROC 0x71
-#define VM_FRAME_MAGIC_IFUNC 0x81
-#define VM_FRAME_MAGIC_EVAL 0x91
-#define VM_FRAME_MAGIC_LAMBDA 0xa1
-#define VM_FRAME_MAGIC_RESCUE 0xb1
-#define VM_FRAME_MAGIC_DUMMY 0xc1
-#define VM_FRAME_MAGIC_MASK_BITS 8
-#define VM_FRAME_MAGIC_MASK (~(~(VALUE)0<<VM_FRAME_MAGIC_MASK_BITS))
-
-#define VM_FRAME_TYPE(cfp) ((cfp)->flag & VM_FRAME_MAGIC_MASK)
-
-/* other frame flag */
-#define VM_FRAME_FLAG_PASSED 0x0100
-#define VM_FRAME_FLAG_FINISH 0x0200
-#define VM_FRAME_FLAG_BMETHOD 0x0400
-#define VM_FRAME_TYPE_FINISH_P(cfp) (((cfp)->flag & VM_FRAME_FLAG_FINISH) != 0)
-#define VM_FRAME_TYPE_BMETHOD_P(cfp) (((cfp)->flag & VM_FRAME_FLAG_BMETHOD) != 0)
-
-#define RUBYVM_CFUNC_FRAME_P(cfp) \
- (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
-
/* inline cache */
typedef struct iseq_inline_cache_entry *IC;
typedef struct rb_call_info *CALL_INFO;
@@ -945,31 +945,180 @@ typedef VALUE CDHASH;
typedef rb_control_frame_t *
(FUNC_FASTCALL(*rb_insn_func_t))(rb_thread_t *, rb_control_frame_t *);
-#define GC_GUARDED_PTR(p) ((VALUE)((VALUE)(p) | 0x01))
-#define GC_GUARDED_PTR_REF(p) ((void *)(((VALUE)(p)) & ~0x03))
+#define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
+#define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
+
+#define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
+#define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
#define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
-/*
- * block frame:
- * ep[ 0]: prev frame
- * ep[-1]: CREF (for *_eval)
- *
- * method frame:
- * ep[ 0]: block pointer (ptr | VM_ENVVAL_BLOCK_PTR_FLAG)
- */
+enum {
+ /* Frame/Environment flag bits:
+ * MMMM MMMM MMMM MMMM ____ ____ FFFF EEEX (LSB)
+ *
+ * X : tag for GC marking (It seems as Fixnum)
+ * EEE : 3 bits Env flags
+ * FFFF: 4 bits Frame flags
+ * MMMM: 16 bits frame magic (to check frame corruption)
+ */
+
+ /* frame types */
+ VM_FRAME_MAGIC_METHOD = 0x11110001,
+ VM_FRAME_MAGIC_BLOCK = 0x22220001,
+ VM_FRAME_MAGIC_CLASS = 0x33330001,
+ VM_FRAME_MAGIC_TOP = 0x44440001,
+ VM_FRAME_MAGIC_CFUNC = 0x55550001,
+ VM_FRAME_MAGIC_PROC = 0x66660001,
+ VM_FRAME_MAGIC_IFUNC = 0x77770001,
+ VM_FRAME_MAGIC_EVAL = 0x88880001,
+ VM_FRAME_MAGIC_LAMBDA = 0x99990001,
+ VM_FRAME_MAGIC_RESCUE = 0xaaaa0001,
+ VM_FRAME_MAGIC_DUMMY = 0xbbbb0001,
+
+ VM_FRAME_MAGIC_MASK = 0xffff0001,
+
+ /* frame flag */
+ VM_FRAME_FLAG_PASSED = 0x0010,
+ VM_FRAME_FLAG_FINISH = 0x0020,
+ VM_FRAME_FLAG_BMETHOD = 0x0040,
+
+ /* env flag */
+ VM_ENV_FLAG_LOCAL = 0x0002,
+ VM_ENV_FLAG_ESCAPED = 0x0004,
+ VM_ENV_FLAG_WB_REQUIRED = 0x0008
+};
+
+static inline void VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value);
+
+#define VM_FRAME_TYPE_FINISH_P(cfp) (VM_ENV_FLAGS((cfp)->ep, VM_FRAME_FLAG_FINISH ) != 0)
+#define VM_FRAME_TYPE_BMETHOD_P(cfp) (VM_ENV_FLAGS((cfp)->ep, VM_FRAME_FLAG_BMETHOD) != 0)
+
+#define VM_ENV_DATA_SIZE ( 3)
+
+#define VM_ENV_DATA_INDEX_ME_CREF (-2) /* ep[-2] */
+#define VM_ENV_DATA_INDEX_SPECVAL (-1) /* ep[-1] */
+#define VM_ENV_DATA_INDEX_FLAGS ( 0) /* ep[ 0] */
+#define VM_ENV_DATA_INDEX_ENV ( 1) /* ep[ 1] */
+#define VM_ENV_DATA_INDEX_ENV_PROC ( 2) /* ep[ 2] */
+
+#define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
+
+static inline void
+VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag)
+{
+ VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
+ VM_ASSERT(FIXNUM_P(flags));
+ VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
+}
+
+static inline void
+VM_ENV_FLAGS_UNSET(const VALUE *ep, VALUE flag)
+{
+ VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
+ VM_ASSERT(FIXNUM_P(flags));
+ VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
+}
+
+static inline long
+VM_ENV_FLAGS(const VALUE *ep, long flag)
+{
+ VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
+ VM_ASSERT(FIXNUM_P(flags));
+ return flags & flag;
+}
+
+static inline long
+VM_FRAME_TYPE(const rb_control_frame_t *cfp)
+{
+ return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
+}
+
+#define RUBYVM_CFUNC_FRAME_P(cfp) \
+ (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
+
+#define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
+#define VM_BLOCK_HANDLER_NONE 0
+
+static inline int
+VM_ENV_LOCAL_P(const VALUE *ep)
+{
+ return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
+}
+
+static inline const VALUE *
+VM_ENV_PREV_EP(const VALUE *ep)
+{
+ VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
+ return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
+}
+
+static inline VALUE
+VM_ENV_BLOCK_HANDLER(const VALUE *ep)
+{
+ VM_ASSERT(VM_ENV_LOCAL_P(ep));
+ return ep[VM_ENV_DATA_INDEX_SPECVAL];
+}
+
+#if VM_CHECK_MODE > 0
+int rb_vm_ep_in_heap_p(const VALUE *ep);
+#endif
+
+static inline int
+VM_ENV_ESCAPED_P(const VALUE *ep)
+{
+ VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
+ return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
+}
+
+static inline VALUE
+VM_ENV_ENVVAL(const VALUE *ep)
+{
+ VM_ASSERT(VM_ENV_ESCAPED_P(ep));
+ return ep[VM_ENV_DATA_INDEX_ENV];
+}
+
+static inline VALUE
+VM_ENV_PROCVAL(const VALUE *ep)
+{
+ VM_ASSERT(VM_ENV_ESCAPED_P(ep));
+ VM_ASSERT(VM_ENV_LOCAL_P(ep));
+ VM_ASSERT(VM_ENV_BLOCK_HANDLER(ep) != VM_BLOCK_HANDLER_NONE);
+
+ return ep[VM_ENV_DATA_INDEX_ENV_PROC];
+}
-#define VM_ENVVAL_BLOCK_PTR_FLAG 0x02
-#define VM_ENVVAL_BLOCK_PTR(v) (GC_GUARDED_PTR(v) | VM_ENVVAL_BLOCK_PTR_FLAG)
-#define VM_ENVVAL_BLOCK_PTR_P(v) ((v) & VM_ENVVAL_BLOCK_PTR_FLAG)
-#define VM_ENVVAL_PREV_EP_PTR(v) ((VALUE)GC_GUARDED_PTR(v))
-#define VM_ENVVAL_PREV_EP_PTR_P(v) (!(VM_ENVVAL_BLOCK_PTR_P(v)))
+static inline void
+VM_FORCE_WRITE(const VALUE *ptr, VALUE v)
+{
+ *((VALUE *)ptr) = v;
+}
+
+static inline void
+VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value)
+{
+ VM_ASSERT(RB_SPECIAL_CONST_P(special_const_value));
+ VM_FORCE_WRITE(ptr, special_const_value);
+}
+
+static inline void
+VM_STACK_ENV_WRITE(const VALUE *ep, int index, VALUE v)
+{
+ VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
+ VM_FORCE_WRITE(&ep[index], v);
+}
-#define VM_EP_PREV_EP(ep) ((VALUE *)GC_GUARDED_PTR_REF((ep)[0]))
-#define VM_EP_BLOCK_PTR(ep) ((rb_block_t *)GC_GUARDED_PTR_REF((ep)[0]))
-#define VM_EP_LEP_P(ep) VM_ENVVAL_BLOCK_PTR_P((ep)[0])
+#if VM_CHECK_MODE > 0
+static inline const VALUE *
+vm_env_ep(VALUE envval)
+{
+ rb_env_t *env;
+ GetEnvPtr(envval, env);
+ return env->ep;
+}
+#endif
-VALUE *rb_vm_ep_local_ep(VALUE *ep);
-rb_block_t *rb_vm_control_frame_block_ptr(const rb_control_frame_t *cfp);
+const VALUE *rb_vm_ep_local_ep(const VALUE *ep);
+VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp);
#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
#define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
@@ -980,13 +1129,230 @@ rb_block_t *rb_vm_control_frame_block_ptr(const rb_control_frame_t *cfp);
#define RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp) \
(!RUBY_VM_VALID_CONTROL_FRAME_P((cfp), RUBY_VM_END_CONTROL_FRAME(th)))
-#define RUBY_VM_IFUNC_P(ptr) (RB_TYPE_P((VALUE)(ptr), T_IMEMO) && imemo_type((VALUE)ptr) == imemo_ifunc)
#define RUBY_VM_NORMAL_ISEQ_P(ptr) (RB_TYPE_P((VALUE)(ptr), T_IMEMO) && imemo_type((VALUE)ptr) == imemo_iseq && rb_iseq_check((rb_iseq_t *)ptr))
-#define RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp) ((rb_block_t *)(&(cfp)->self))
-#define RUBY_VM_GET_CFP_FROM_BLOCK_PTR(b) \
- ((rb_control_frame_t *)((VALUE *)(b) - 4))
-/* magic number `4' is depend on rb_control_frame_t layout. */
+static inline int
+VM_BH_ISEQ_BLOCK_P(VALUE block_handler)
+{
+ if ((block_handler & 0x03) == 0x01) {
+#if VM_CHECK_MODE > 0
+ struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
+ VM_ASSERT(RB_TYPE_P(captured->code.val, T_IMEMO));
+ VM_ASSERT(imemo_type(captured->code.val) == imemo_iseq);
+#endif
+ return 1;
+ }
+ else {
+ return 0;
+ }
+}
+
+static inline VALUE
+VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block *captured)
+{
+ VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
+ VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
+ return block_handler;
+}
+
+static inline const struct rb_captured_block *
+VM_BH_TO_ISEQ_BLOCK(VALUE block_handler)
+{
+ struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
+ VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
+ return captured;
+}
+
+static inline int
+VM_BH_IFUNC_P(VALUE block_handler)
+{
+ if ((block_handler & 0x03) == 0x03) {
+#if VM_CHECK_MODE > 0
+ struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
+ VM_ASSERT(RB_TYPE_P(captured->code.val, T_IMEMO));
+ VM_ASSERT(imemo_type(captured->code.val) == imemo_ifunc);
+#endif
+ return 1;
+ }
+ else {
+ return 0;
+ }
+}
+
+static inline VALUE
+VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block *captured)
+{
+ VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
+ VM_ASSERT(VM_BH_IFUNC_P(block_handler));
+ return block_handler;
+}
+
+static inline const struct rb_captured_block *
+VM_BH_TO_IFUNC_BLOCK(VALUE block_handler)
+{
+ struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
+ VM_ASSERT(VM_BH_IFUNC_P(block_handler));
+ return captured;
+}
+
+static inline const struct rb_captured_block *
+VM_BH_TO_CAPT_BLOCK(VALUE block_handler)
+{
+ struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
+ VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
+ return captured;
+}
+
+static inline enum rb_block_handler_type
+vm_block_handler_type(VALUE block_handler)
+{
+ if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
+ return block_handler_type_iseq;
+ }
+ else if (VM_BH_IFUNC_P(block_handler)) {
+ return block_handler_type_ifunc;
+ }
+ else if (SYMBOL_P(block_handler)) {
+ return block_handler_type_symbol;
+ }
+ else {
+ VM_ASSERT(rb_obj_is_proc(block_handler));
+ return block_handler_type_proc;
+ }
+}
+
+static inline int
+vm_block_handler_verify(VALUE block_handler)
+{
+ VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
+ vm_block_handler_type(block_handler) >= 0);
+ return 1;
+}
+
+static inline enum rb_block_type
+vm_block_type(const struct rb_block *block)
+{
+#if VM_CHECK_MODE > 0
+ switch (block->type) {
+ case block_type_iseq:
+ VM_ASSERT(RB_TYPE_P(block->as.captured.code.val, T_IMEMO));
+ VM_ASSERT(imemo_type(block->as.captured.code.val) == imemo_iseq);
+ break;
+ case block_type_ifunc:
+ VM_ASSERT(RB_TYPE_P(block->as.captured.code.val, T_IMEMO));
+ VM_ASSERT(imemo_type(block->as.captured.code.val) == imemo_ifunc);
+ break;
+ case block_type_symbol:
+ VM_ASSERT(SYMBOL_P(block->as.symbol));
+ break;
+ case block_type_proc:
+ VM_ASSERT(rb_obj_is_proc(block->as.proc));
+ break;
+ }
+#endif
+ return block->type;
+}
+
+static inline void
+vm_block_type_set(const struct rb_block *block, enum rb_block_type type)
+{
+ struct rb_block *mb = (struct rb_block *)block;
+ mb->type = type;
+}
+
+static inline const struct rb_block *
+vm_proc_block(VALUE procval)
+{
+ rb_proc_t *proc = RTYPEDDATA_DATA(procval);
+ VM_ASSERT(rb_obj_is_proc(procval));
+ return &proc->block;
+}
+
+static inline const rb_iseq_t *vm_block_iseq(const struct rb_block *block);
+static inline const VALUE *vm_block_ep(const struct rb_block *block);
+
+static inline const rb_iseq_t *
+vm_proc_iseq(VALUE procval)
+{
+ VM_ASSERT(rb_obj_is_proc(procval));
+ return vm_block_iseq(vm_proc_block(procval));
+}
+
+static inline const VALUE *
+vm_proc_ep(VALUE procval)
+{
+ return vm_block_ep(vm_proc_block(procval));
+}
+
+static inline const rb_iseq_t *
+vm_block_iseq(const struct rb_block *block)
+{
+ switch (vm_block_type(block)) {
+ case block_type_iseq: return block->as.captured.code.iseq;
+ case block_type_proc: return vm_proc_iseq(block->as.proc);
+ case block_type_ifunc:
+ case block_type_symbol: return NULL;
+ }
+ VM_UNREACHABLE(vm_block_iseq);
+ return NULL;
+}
+
+static inline const VALUE *
+vm_block_ep(const struct rb_block *block)
+{
+ switch (vm_block_type(block)) {
+ case block_type_iseq:
+ case block_type_ifunc: return block->as.captured.ep;
+ case block_type_proc: return vm_proc_ep(block->as.proc);
+ case block_type_symbol: return NULL;
+ }
+ VM_UNREACHABLE(vm_block_ep);
+ return NULL;
+}
+
+static inline VALUE
+vm_block_self(const struct rb_block *block)
+{
+ switch (vm_block_type(block)) {
+ case block_type_iseq:
+ case block_type_ifunc:
+ return block->as.captured.self;
+ case block_type_proc:
+ return vm_block_self(vm_proc_block(block->as.proc));
+ case block_type_symbol:
+ return Qundef;
+ }
+ VM_UNREACHABLE(vm_block_self);
+ return Qundef;
+}
+
+static inline VALUE
+VM_BH_TO_SYMBOL(VALUE block_handler)
+{
+ VM_ASSERT(SYMBOL_P(block_handler));
+ return block_handler;
+}
+
+static inline VALUE
+VM_BH_FROM_SYMBOL(VALUE symbol)
+{
+ VM_ASSERT(SYMBOL_P(symbol));
+ return symbol;
+}
+
+static inline VALUE
+VM_BH_TO_PROC(VALUE block_handler)
+{
+ VM_ASSERT(rb_obj_is_proc(block_handler));
+ return block_handler;
+}
+
+static inline VALUE
+VM_BH_FROM_PROC(VALUE procval)
+{
+ VM_ASSERT(rb_obj_is_proc(procval));
+ return procval;
+}
/* VM related object allocate functions */
VALUE rb_thread_alloc(VALUE klass);
@@ -1010,15 +1376,13 @@ VALUE rb_iseq_eval_main(const rb_iseq_t *iseq);
RUBY_SYMBOL_EXPORT_END
int rb_thread_method_id_and_class(rb_thread_t *th, ID *idp, VALUE *klassp);
-VALUE rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc,
- int argc, const VALUE *argv, const rb_block_t *blockptr);
-VALUE rb_vm_make_proc_lambda(rb_thread_t *th, const rb_block_t *block, VALUE klass, int8_t is_lambda);
-VALUE rb_vm_make_proc(rb_thread_t *th, const rb_block_t *block, VALUE klass);
+VALUE rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, int argc, const VALUE *argv, VALUE block_handler);
+VALUE rb_vm_make_proc_lambda(rb_thread_t *th, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda);
+VALUE rb_vm_make_proc(rb_thread_t *th, const struct rb_captured_block *captured, VALUE klass);
VALUE rb_vm_make_binding(rb_thread_t *th, const rb_control_frame_t *src_cfp);
VALUE rb_vm_env_local_variables(const rb_env_t *env);
VALUE rb_vm_env_prev_envval(const rb_env_t *env);
-VALUE rb_vm_proc_envval(const rb_proc_t *proc);
-VALUE *rb_binding_add_dynavars(rb_binding_t *bind, int dyncount, const ID *dynvars);
+const VALUE *rb_binding_add_dynavars(rb_binding_t *bind, int dyncount, const ID *dynvars);
void rb_vm_inc_const_missing_count(void);
void rb_vm_gvl_destroy(rb_vm_t *vm);
VALUE rb_vm_call(rb_thread_t *th, VALUE recv, VALUE id, int argc,
diff --git a/vm_dump.c b/vm_dump.c
index df0d2413c9..a27d47dd23 100644
--- a/vm_dump.c
+++ b/vm_dump.c
@@ -38,10 +38,6 @@ control_frame_dump(rb_thread_t *th, rb_control_frame_t *cfp)
const rb_callable_method_entry_t *me;
- if (cfp->block_iseq != 0 && !RUBY_VM_IFUNC_P(cfp->block_iseq)) {
- biseq_name = ""; /* RSTRING(cfp->block_iseq->body->location.label)->ptr; */
- }
-
if (ep < 0 || (size_t)ep > th->stack_size) {
ep = (ptrdiff_t)cfp->ep;
ep_in_heap = 'p';
@@ -95,6 +91,7 @@ control_frame_dump(rb_thread_t *th, rb_control_frame_t *cfp)
}
if (cfp->iseq != 0) {
+#define RUBY_VM_IFUNC_P(ptr) (RB_TYPE_P((VALUE)(ptr), T_IMEMO) && imemo_type((VALUE)ptr) == imemo_ifunc)
if (RUBY_VM_IFUNC_P(cfp->iseq)) {
iseq_name = "<ifunc>";
}
@@ -185,7 +182,7 @@ rb_vmdebug_stack_dump_raw_current(void)
}
void
-rb_vmdebug_env_dump_raw(rb_env_t *env, VALUE *ep)
+rb_vmdebug_env_dump_raw(rb_env_t *env, const VALUE *ep)
{
int i;
fprintf(stderr, "-- env --------------------\n");
@@ -215,13 +212,13 @@ rb_vmdebug_proc_dump_raw(rb_proc_t *proc)
{
rb_env_t *env;
char *selfstr;
- VALUE val = rb_inspect(proc->block.self);
+ VALUE val = rb_inspect(vm_block_self(&proc->block));
selfstr = StringValueCStr(val);
fprintf(stderr, "-- proc -------------------\n");
fprintf(stderr, "self: %s\n", selfstr);
- GetEnvPtr(rb_vm_proc_envval(proc), env);
- rb_vmdebug_env_dump_raw(env, proc->block.ep);
+ GetEnvPtr(VM_ENV_ENVVAL(vm_block_ep(&proc->block)), env);
+ rb_vmdebug_env_dump_raw(env, vm_block_ep(&proc->block));
}
void
@@ -239,7 +236,7 @@ static VALUE *
vm_base_ptr(rb_control_frame_t *cfp)
{
rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
- VALUE *bp = prev_cfp->sp + cfp->iseq->body->local_size + 1;
+ VALUE *bp = prev_cfp->sp + iseq->body->local_table_size + VM_ENV_DATA_SIZE;
if (cfp->iseq->body->type == ISEQ_TYPE_METHOD) {
bp += 1;
diff --git a/vm_eval.c b/vm_eval.c
index 7e131b28e7..4ef4d6e582 100644
--- a/vm_eval.c
+++ b/vm_eval.c
@@ -18,9 +18,9 @@ struct local_var_list {
static inline VALUE method_missing(VALUE obj, ID id, int argc, const VALUE *argv, enum method_missing_reason call_status);
static inline VALUE vm_yield_with_cref(rb_thread_t *th, int argc, const VALUE *argv, const rb_cref_t *cref);
static inline VALUE vm_yield(rb_thread_t *th, int argc, const VALUE *argv);
-static inline VALUE vm_yield_with_block(rb_thread_t *th, int argc, const VALUE *argv, const rb_block_t *blockargptr);
+static inline VALUE vm_yield_with_block(rb_thread_t *th, int argc, const VALUE *argv, VALUE block_handler);
static VALUE vm_exec(rb_thread_t *th);
-static void vm_set_eval_stack(rb_thread_t * th, const rb_iseq_t *iseq, const rb_cref_t *cref, rb_block_t *base_block);
+static void vm_set_eval_stack(rb_thread_t * th, const rb_iseq_t *iseq, const rb_cref_t *cref, const struct rb_block *base_block);
static int vm_collect_local_variables_in_heap(rb_thread_t *th, const VALUE *dfp, const struct local_var_list *vars);
static VALUE rb_eUncaughtThrow;
@@ -114,16 +114,16 @@ vm_call0_cfunc_with_frame(rb_thread_t* th, struct rb_calling_info *calling, cons
VALUE recv = calling->recv;
int argc = calling->argc;
ID mid = ci->mid;
- rb_block_t *blockptr = calling->blockptr;
+ VALUE block_handler = calling->block_handler;
RUBY_DTRACE_CMETHOD_ENTRY_HOOK(th, me->owner, mid);
EXEC_EVENT_HOOK(th, RUBY_EVENT_C_CALL, recv, mid, me->owner, Qnil);
{
rb_control_frame_t *reg_cfp = th->cfp;
- vm_push_frame(th, 0, VM_FRAME_MAGIC_CFUNC, recv,
- VM_ENVVAL_BLOCK_PTR(blockptr), (VALUE)me,
- 0, reg_cfp->sp, 1, 0);
+ vm_push_frame(th, 0, VM_FRAME_MAGIC_CFUNC | VM_ENV_FLAG_LOCAL, recv,
+ block_handler, (VALUE)me,
+ 0, reg_cfp->sp, 0, 0);
if (len >= 0) rb_check_arity(argc, len, len);
@@ -155,13 +155,7 @@ vm_call0_body(rb_thread_t* th, struct rb_calling_info *calling, const struct rb_
{
VALUE ret;
- if (th->passed_block) {
- calling->blockptr = (rb_block_t *)th->passed_block;
- th->passed_block = 0;
- }
- else {
- calling->blockptr = 0;
- }
+ calling->block_handler = vm_passed_block_handler(th);
again:
switch (cc->me->def->type) {
@@ -178,7 +172,7 @@ vm_call0_body(rb_thread_t* th, struct rb_calling_info *calling, const struct rb_
}
vm_call_iseq_setup(th, reg_cfp, calling, ci, cc);
- th->cfp->flag |= VM_FRAME_FLAG_FINISH;
+ VM_ENV_FLAGS_SET(th->cfp->ep, VM_FRAME_FLAG_FINISH);
return vm_exec(th); /* CHECK_INTS in this function */
}
case VM_METHOD_TYPE_NOTIMPLEMENTED:
@@ -222,7 +216,7 @@ vm_call0_body(rb_thread_t* th, struct rb_calling_info *calling, const struct rb_
goto again;
case VM_METHOD_TYPE_MISSING:
{
- th->passed_block = calling->blockptr;
+ vm_passed_block_handler_set(th, calling->block_handler);
return method_missing(calling->recv, ci->mid, calling->argc,
argv, MISSING_NOENTRY);
}
@@ -235,7 +229,7 @@ vm_call0_body(rb_thread_t* th, struct rb_calling_info *calling, const struct rb_
{
rb_proc_t *proc;
GetProcPtr(calling->recv, proc);
- ret = rb_vm_invoke_proc(th, proc, calling->argc, argv, calling->blockptr);
+ ret = rb_vm_invoke_proc(th, proc, calling->argc, argv, calling->block_handler);
goto success;
}
default:
@@ -288,8 +282,9 @@ vm_call_super(rb_thread_t *th, int argc, const VALUE *argv)
VALUE
rb_call_super(int argc, const VALUE *argv)
{
- PASS_PASSED_BLOCK();
- return vm_call_super(GET_THREAD(), argc, argv);
+ rb_thread_t *th = GET_THREAD();
+ PASS_PASSED_BLOCK_HANDLER_TH(th);
+ return vm_call_super(th, argc, argv);
}
VALUE
@@ -747,11 +742,10 @@ method_missing(VALUE obj, ID id, int argc, const VALUE *argv, enum method_missin
{
VALUE *nargv, result, work, klass;
rb_thread_t *th = GET_THREAD();
- const rb_block_t *blockptr = th->passed_block;
+ VALUE block_handler = vm_passed_block_handler(th);
const rb_callable_method_entry_t *me;
th->method_missing_reason = call_status;
- th->passed_block = 0;
if (id == idMethodMissing) {
missing:
@@ -768,7 +762,7 @@ method_missing(VALUE obj, ID id, int argc, const VALUE *argv, enum method_missin
if (!klass) goto missing;
me = rb_callable_method_entry(klass, idMethodMissing);
if (!me || METHOD_ENTRY_BASIC(me)) goto missing;
- th->passed_block = blockptr;
+ vm_passed_block_handler_set(th, block_handler);
result = vm_call0(th, obj, idMethodMissing, argc, argv, me);
if (work) ALLOCV_END(work);
return result;
@@ -778,7 +772,7 @@ void
rb_raise_method_missing(rb_thread_t *th, int argc, const VALUE *argv,
VALUE obj, int call_status)
{
- th->passed_block = 0;
+ vm_passed_block_handler_set(th, VM_BLOCK_HANDLER_NONE);
raise_method_missing(th, argc, argv, obj, call_status | MISSING_MISSING);
}
@@ -874,23 +868,16 @@ rb_funcallv_public(VALUE recv, ID mid, int argc, const VALUE *argv)
VALUE
rb_funcall_passing_block(VALUE recv, ID mid, int argc, const VALUE *argv)
{
- PASS_PASSED_BLOCK();
-
+ PASS_PASSED_BLOCK_HANDLER();
return rb_call(recv, mid, argc, argv, CALL_PUBLIC);
}
VALUE
-rb_funcall_with_block(VALUE recv, ID mid, int argc, const VALUE *argv, VALUE pass_procval)
+rb_funcall_with_block(VALUE recv, ID mid, int argc, const VALUE *argv, VALUE passed_procval)
{
- if (!NIL_P(pass_procval)) {
+ if (!NIL_P(passed_procval)) {
rb_thread_t *th = GET_THREAD();
- rb_block_t *block = 0;
-
- rb_proc_t *pass_proc;
- GetProcPtr(pass_procval, pass_proc);
- block = &pass_proc->block;
-
- th->passed_block = block;
+ vm_passed_block_handler_set(th, passed_procval);
}
return rb_call(recv, mid, argc, argv, CALL_PUBLIC);
@@ -957,7 +944,7 @@ send_internal(int argc, const VALUE *argv, VALUE recv, call_type scope)
else {
argv++; argc--;
}
- PASS_PASSED_BLOCK_TH(th);
+ PASS_PASSED_BLOCK_HANDLER_TH(th);
ret = rb_call0(recv, id, argc, argv, scope, self);
ALLOCV_END(vargv);
return ret;
@@ -1080,13 +1067,8 @@ rb_yield_splat(VALUE values)
VALUE
rb_yield_block(VALUE val, VALUE arg, int argc, const VALUE *argv, VALUE blockarg)
{
- const rb_block_t *blockptr = NULL;
- if (!NIL_P(blockarg)) {
- rb_proc_t *blockproc;
- GetProcPtr(blockarg, blockproc);
- blockptr = &blockproc->block;
- }
- return vm_yield_with_block(GET_THREAD(), argc, argv, blockptr);
+ return vm_yield_with_block(GET_THREAD(), argc, argv,
+ NIL_P(blockarg) ? VM_BLOCK_HANDLER_NONE : blockarg);
}
static VALUE
@@ -1166,16 +1148,17 @@ rb_iterate0(VALUE (* it_proc) (VALUE), VALUE data1,
if (state == 0) {
iter_retry:
{
- rb_block_t *blockptr;
+ VALUE block_handler;
+
if (ifunc) {
- blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp);
- blockptr->iseq = (void *)ifunc;
- blockptr->proc = 0;
+ struct rb_captured_block *captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
+ captured->code.ifunc = ifunc;
+ block_handler = VM_BH_FROM_IFUNC_BLOCK(captured);
}
else {
- blockptr = VM_CF_BLOCK_PTR(cfp);
+ block_handler = VM_CF_BLOCK_HANDLER(cfp);
}
- th->passed_block = blockptr;
+ vm_passed_block_handler_set(th, block_handler);
}
retval = (*it_proc) (data1);
}
@@ -1294,12 +1277,14 @@ eval_string_with_cref(VALUE self, VALUE src, VALUE scope, rb_cref_t *const cref_
{
int state;
VALUE result = Qundef;
- VALUE envval;
rb_thread_t *th = GET_THREAD();
- rb_env_t *env = NULL;
- rb_block_t block, *base_block;
- VALUE file = filename ? filename : rb_source_location(&lineno);
- int line = lineno;
+ struct rb_block block;
+ const struct rb_block *base_block;
+ volatile VALUE file;
+ volatile int line;
+
+ file = filename ? filename : rb_source_location(&lineno);
+ line = lineno;
{
rb_cref_t *cref = cref_arg;
@@ -1314,25 +1299,23 @@ eval_string_with_cref(VALUE self, VALUE src, VALUE scope, rb_cref_t *const cref_
if (!NIL_P(scope)) {
bind = Check_TypedStruct(scope, &ruby_binding_data_type);
- {
- envval = bind->env;
- if (NIL_P(absolute_path) && !NIL_P(bind->path)) {
- file = bind->path;
- line = bind->first_lineno;
- absolute_path = rb_current_realfilepath();
- }
+
+ if (NIL_P(absolute_path) && !NIL_P(bind->path)) {
+ file = bind->path;
+ line = bind->first_lineno;
+ absolute_path = rb_current_realfilepath();
}
- GetEnvPtr(envval, env);
- base_block = &env->block;
+ base_block = &bind->block;
}
else {
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
if (cfp != 0) {
- block = *RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp);
+ block.as.captured = *VM_CFP_TO_CAPTURED_BLOCK(cfp);
+ block.as.captured.self = self;
+ block.as.captured.code.iseq = cfp->iseq;
+ block.type = block_type_iseq;
base_block = &block;
- base_block->self = self;
- base_block->iseq = cfp->iseq; /* TODO */
}
else {
rb_raise(rb_eRuntimeError, "Can't eval on top of Fiber or Thread");
@@ -1355,9 +1338,10 @@ eval_string_with_cref(VALUE self, VALUE src, VALUE scope, rb_cref_t *const cref_
rb_exc_raise(adjust_backtrace_in_eval(th, th->errinfo));
}
- if (!cref && base_block->iseq) {
+ /* TODO: what the code checking? */
+ if (!cref && base_block->as.captured.code.val) {
if (NIL_P(scope)) {
- rb_cref_t *orig_cref = rb_vm_get_cref(base_block->ep);
+ rb_cref_t *orig_cref = rb_vm_get_cref(vm_block_ep(base_block));
cref = vm_cref_dup(orig_cref);
}
else {
@@ -1373,7 +1357,7 @@ eval_string_with_cref(VALUE self, VALUE src, VALUE scope, rb_cref_t *const cref_
/* save new env */
if (bind && iseq->body->local_table_size > 0) {
- bind->env = vm_make_env_object(th, th->cfp);
+ vm_bind_update_env(bind, vm_make_env_object(th, th->cfp));
}
}
@@ -1579,16 +1563,41 @@ static VALUE
yield_under(VALUE under, VALUE self, int argc, const VALUE *argv)
{
rb_thread_t *th = GET_THREAD();
- rb_block_t block, *blockptr;
+ rb_control_frame_t *cfp = th->cfp;
+ VALUE block_handler = VM_CF_BLOCK_HANDLER(cfp);
+ VALUE new_block_handler = 0;
+ const struct rb_captured_block *captured = NULL;
+ struct rb_captured_block new_captured;
+ const VALUE *ep = NULL;
rb_cref_t *cref;
- if ((blockptr = VM_CF_BLOCK_PTR(th->cfp)) != 0) {
- block = *blockptr;
- block.self = self;
- VM_CF_LEP(th->cfp)[0] = VM_ENVVAL_BLOCK_PTR(&block);
+ if (block_handler != VM_BLOCK_HANDLER_NONE) {
+ again:
+ switch (vm_block_handler_type(block_handler)) {
+ case block_handler_type_iseq:
+ captured = VM_BH_TO_CAPT_BLOCK(block_handler);
+ new_captured = *captured;
+ new_block_handler = VM_BH_FROM_ISEQ_BLOCK(&new_captured);
+ break;
+ case block_handler_type_ifunc:
+ captured = VM_BH_TO_CAPT_BLOCK(block_handler);
+ new_captured = *captured;
+ new_block_handler = VM_BH_FROM_IFUNC_BLOCK(&new_captured);
+ break;
+ case block_handler_type_proc:
+ block_handler = vm_proc_to_block_handler(VM_BH_TO_PROC(block_handler));
+ goto again;
+ case block_handler_type_symbol:
+ return rb_sym_proc_call(SYM2ID(VM_BH_TO_SYMBOL(block_handler)), 1, &self, VM_BLOCK_HANDLER_NONE);
+ }
+
+ new_captured.self = self;
+ ep = captured->ep;
+
+ VM_FORCE_WRITE_SPECIAL_CONST(&VM_CF_LEP(th->cfp)[VM_ENV_DATA_INDEX_SPECVAL], new_block_handler);
}
- cref = vm_cref_push(th, under, blockptr, TRUE);
+ cref = vm_cref_push(th, under, ep, TRUE);
return vm_yield_with_cref(th, argc, argv, cref);
}
@@ -1596,18 +1605,22 @@ VALUE
rb_yield_refine_block(VALUE refinement, VALUE refinements)
{
rb_thread_t *th = GET_THREAD();
- rb_block_t block, *blockptr;
- rb_cref_t *cref;
+ VALUE block_handler = VM_CF_BLOCK_HANDLER(th->cfp);
- if ((blockptr = VM_CF_BLOCK_PTR(th->cfp)) != 0) {
- block = *blockptr;
- block.self = refinement;
- VM_CF_LEP(th->cfp)[0] = VM_ENVVAL_BLOCK_PTR(&block);
+ if (vm_block_handler_type(block_handler) != block_handler_type_iseq) {
+ rb_bug("rb_yield_refine_block: an iseq block is required");
+ }
+ else {
+ const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
+ struct rb_captured_block new_captured = *captured;
+ VALUE new_block_handler = VM_BH_FROM_ISEQ_BLOCK(&new_captured);
+ const VALUE *ep = captured->ep;
+ rb_cref_t *cref = vm_cref_push(th, refinement, ep, TRUE);
+ CREF_REFINEMENTS_SET(cref, refinements);
+ VM_FORCE_WRITE_SPECIAL_CONST(&VM_CF_LEP(th->cfp)[VM_ENV_DATA_INDEX_SPECVAL], new_block_handler);
+ new_captured.self = refinement;
+ return vm_yield_with_cref(th, 0, NULL, cref);
}
- cref = vm_cref_push(th, refinement, blockptr, TRUE);
- CREF_REFINEMENTS_SET(cref, refinements);
-
- return vm_yield_with_cref(th, 0, NULL, cref);
}
/* string eval under the class/module context */
@@ -2093,9 +2106,9 @@ rb_f_local_variables(void)
local_var_list_add(&vars, cfp->iseq->body->local_table[i]);
}
}
- if (!VM_EP_LEP_P(cfp->ep)) {
+ if (!VM_ENV_LOCAL_P(cfp->ep)) {
/* block */
- VALUE *ep = VM_CF_PREV_EP(cfp);
+ const VALUE *ep = VM_CF_PREV_EP(cfp);
if (vm_collect_local_variables_in_heap(th, ep, &vars)) {
break;
@@ -2142,7 +2155,7 @@ rb_f_block_given_p(void)
rb_control_frame_t *cfp = th->cfp;
cfp = vm_get_ruby_level_caller_cfp(th, RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
- if (cfp != 0 && VM_CF_BLOCK_PTR(cfp)) {
+ if (cfp != NULL && VM_CF_BLOCK_HANDLER(cfp) != VM_BLOCK_HANDLER_NONE) {
return Qtrue;
}
else {
diff --git a/vm_insnhelper.c b/vm_insnhelper.c
index 40bc4b228c..b425a2a5cc 100644
--- a/vm_insnhelper.c
+++ b/vm_insnhelper.c
@@ -72,7 +72,7 @@ callable_method_entry_p(const rb_callable_method_entry_t *me)
static void
vm_check_frame_detail(VALUE type, int req_block, int req_me, int req_cref, VALUE specval, VALUE cref_or_me)
{
- int magic = (int)(type & VM_FRAME_MAGIC_MASK);
+ unsigned int magic = (unsigned int)(type & VM_FRAME_MAGIC_MASK);
enum imemo_type cref_or_me_type = imemo_none;
if (RB_TYPE_P(cref_or_me, T_IMEMO)) {
@@ -82,10 +82,10 @@ vm_check_frame_detail(VALUE type, int req_block, int req_me, int req_cref, VALUE
req_me = TRUE;
}
- if (req_block && !VM_ENVVAL_BLOCK_PTR_P(specval)) {
+ if (req_block && (type & VM_ENV_FLAG_LOCAL) == 0) {
rb_bug("vm_push_frame: specval (%p) should be a block_ptr on %x frame", (void *)specval, magic);
}
- if (!req_block && VM_ENVVAL_BLOCK_PTR_P(specval)) {
+ if (!req_block && (type & VM_ENV_FLAG_LOCAL) != 0) {
rb_bug("vm_push_frame: specval (%p) should not be a block_ptr on %x frame", (void *)specval, magic);
}
@@ -125,6 +125,7 @@ vm_check_frame(VALUE type,
VALUE cref_or_me)
{
int magic = (int)(type & VM_FRAME_MAGIC_MASK);
+ VM_ASSERT(FIXNUM_P(type));
#define CHECK(magic, req_block, req_me, req_cref) case magic: vm_check_frame_detail(type, req_block, req_me, req_cref, specval, cref_or_me); break;
switch (magic) {
@@ -165,7 +166,7 @@ vm_push_frame(rb_thread_t *th,
int i;
vm_check_frame(type, specval, cref_or_me);
- VM_ASSERT(local_size >= 1);
+ VM_ASSERT(local_size >= 0);
/* check stack overflow */
CHECK_VM_STACK_OVERFLOW0(cfp, sp, local_size + stack_max);
@@ -175,23 +176,23 @@ vm_push_frame(rb_thread_t *th,
/* setup new frame */
cfp->pc = (VALUE *)pc;
cfp->iseq = (rb_iseq_t *)iseq;
- cfp->flag = type;
cfp->self = self;
- cfp->block_iseq = NULL;
- cfp->proc = 0;
+ cfp->block_code = NULL;
/* setup vm value stack */
/* initialize local variables */
- for (i=0; i < local_size - 1; i++) {
+ for (i=0; i < local_size; i++) {
*sp++ = Qnil;
}
- /* set special val */
- *sp++ = cref_or_me; /* Qnil or T_IMEMO(cref) or T_IMEMO(ment) */
- *sp = specval;
-
- /* setup vm control frame stack */
+ /* setup ep with managing data */
+ VM_ASSERT(VM_ENV_DATA_INDEX_ME_CREF == -2);
+ VM_ASSERT(VM_ENV_DATA_INDEX_SPECVAL == -1);
+ VM_ASSERT(VM_ENV_DATA_INDEX_FLAGS == -0);
+ *sp++ = cref_or_me; /* ep[-2] / Qnil or T_IMEMO(cref) or T_IMEMO(ment) */
+ *sp++ = specval /* ep[-1] / block handler or prev env ptr */;
+ *sp = type; /* ep[-0] / ENV_FLAGS */
cfp->ep = sp;
cfp->sp = sp + 1;
@@ -207,20 +208,33 @@ vm_push_frame(rb_thread_t *th,
return cfp;
}
+rb_control_frame_t *
+rb_vm_push_frame(rb_thread_t *th,
+ const rb_iseq_t *iseq,
+ VALUE type,
+ VALUE self,
+ VALUE specval,
+ VALUE cref_or_me,
+ const VALUE *pc,
+ VALUE *sp,
+ int local_size,
+ int stack_max)
+{
+ return vm_push_frame(th, iseq, type, self, specval, cref_or_me, pc, sp, local_size, stack_max);
+}
+
+/* return TRUE if the frame is finished */
static inline int
-vm_pop_frame(rb_thread_t *th, rb_control_frame_t *cfp, const VALUE *ep /* we'll use ep soon */)
+vm_pop_frame(rb_thread_t *th, rb_control_frame_t *cfp, const VALUE *ep)
{
+ VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
+
if (VM_CHECK_MODE >= 4) rb_gc_verify_internal_consistency();
if (VMDEBUG == 2) SDR();
th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
- if (UNLIKELY(VM_FRAME_TYPE_FINISH_P(cfp))) {
- return TRUE;
- }
- else {
- return FALSE;
- }
+ return flags & VM_FRAME_FLAG_FINISH;
}
void
@@ -252,49 +266,93 @@ rb_error_arity(int argc, int min, int max)
rb_exc_raise(rb_arity_error_new(argc, min, max));
}
-/* svar */
+/* lvar */
+
+NOINLINE(static void vm_env_write_slowpath(const VALUE *ep, int index, VALUE v));
-static inline struct vm_svar **
-lep_svar_place(rb_thread_t *th, const VALUE *lep)
+static void
+vm_env_write_slowpath(const VALUE *ep, int index, VALUE v)
{
- const VALUE *svar_place;
+ /* remember env value forcely */
+ rb_gc_writebarrier_remember(VM_ENV_ENVVAL(ep));
+ VM_FORCE_WRITE(&ep[index], v);
+ VM_ENV_FLAGS_UNSET(ep, VM_ENV_FLAG_WB_REQUIRED);
+}
- if (lep && (th == NULL || th->root_lep != lep)) {
- svar_place = &lep[-1];
+static inline void
+vm_env_write(const VALUE *ep, int index, VALUE v)
+{
+ VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
+ if (LIKELY((flags & VM_ENV_FLAG_WB_REQUIRED) == 0)) {
+ VM_STACK_ENV_WRITE(ep, index, v);
}
else {
- svar_place = &th->root_svar;
+ vm_env_write_slowpath(ep, index, v);
}
+}
+
+void
+rb_vm_env_write(const VALUE *ep, int index, VALUE v)
+{
+ vm_env_write(ep, index, v);
+}
+
+
+/* svar */
#if VM_CHECK_MODE > 0
- {
- VALUE svar = *svar_place;
-
- if (svar != Qfalse) {
- if (RB_TYPE_P((VALUE)svar, T_IMEMO)) {
- switch (imemo_type(svar)) {
- case imemo_svar:
- case imemo_cref:
- case imemo_ment:
- goto okay;
- default:
- break; /* fall through */
- }
- }
- rb_bug("lep_svar_place: unknown type: %s", rb_obj_info(svar));
+static int
+vm_svar_valid_p(VALUE svar)
+{
+ if (RB_TYPE_P((VALUE)svar, T_IMEMO)) {
+ switch (imemo_type(svar)) {
+ case imemo_svar:
+ case imemo_cref:
+ case imemo_ment:
+ return TRUE;
+ default:
+ break;
}
- okay:;
}
+ rb_bug("vm_svar_valid_p: unknown type: %s", rb_obj_info(svar));
+ return FALSE;
+}
#endif
- return (struct vm_svar **)svar_place;
+static inline struct vm_svar *
+lep_svar(rb_thread_t *th, const VALUE *lep)
+{
+ VALUE svar;
+
+ if (lep && (th == NULL || th->root_lep != lep)) {
+ svar = lep[VM_ENV_DATA_INDEX_ME_CREF];
+ }
+ else {
+ svar = th->root_svar;
+ }
+
+ VM_ASSERT(svar == Qfalse || vm_svar_valid_p(svar));
+
+ return (struct vm_svar *)svar;
+}
+
+static inline void
+lep_svar_write(rb_thread_t *th, const VALUE *lep, const struct vm_svar *svar)
+{
+ VM_ASSERT(vm_svar_valid_p((VALUE)svar));
+
+ if (lep && (th == NULL || th->root_lep != lep)) {
+ vm_env_write(lep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)svar);
+ }
+ else {
+ RB_OBJ_WRITE(th->self, &th->root_svar, svar);
+ }
}
static VALUE
lep_svar_get(rb_thread_t *th, const VALUE *lep, rb_num_t key)
{
- struct vm_svar ** const svar_place = lep_svar_place(th, lep);
- const struct vm_svar *const svar = *svar_place;
+ const struct vm_svar *svar = lep_svar(th, lep);
if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) return Qnil;
@@ -323,13 +381,12 @@ svar_new(VALUE obj)
}
static void
-lep_svar_set(rb_thread_t *th, VALUE *lep, rb_num_t key, VALUE val)
+lep_svar_set(rb_thread_t *th, const VALUE *lep, rb_num_t key, VALUE val)
{
- struct vm_svar **svar_place = lep_svar_place(th, lep);
- struct vm_svar *svar = *svar_place;
+ struct vm_svar *svar = lep_svar(th, lep);
if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) {
- svar = *svar_place = svar_new((VALUE)svar);
+ lep_svar_write(th, lep, svar = svar_new((VALUE)svar));
}
switch (key) {
@@ -351,7 +408,7 @@ lep_svar_set(rb_thread_t *th, VALUE *lep, rb_num_t key, VALUE val)
}
static inline VALUE
-vm_getspecial(rb_thread_t *th, VALUE *lep, rb_num_t key, rb_num_t type)
+vm_getspecial(rb_thread_t *th, const VALUE *lep, rb_num_t key, rb_num_t type)
{
VALUE val;
@@ -416,15 +473,15 @@ check_method_entry(VALUE obj, int can_be_svar)
const rb_callable_method_entry_t *
rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
{
- VALUE *ep = cfp->ep;
+ const VALUE *ep = cfp->ep;
rb_callable_method_entry_t *me;
- while (!VM_EP_LEP_P(ep)) {
- if ((me = check_method_entry(ep[-1], FALSE)) != NULL) return me;
- ep = VM_EP_PREV_EP(ep);
+ while (!VM_ENV_LOCAL_P(ep)) {
+ if ((me = check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return me;
+ ep = VM_ENV_PREV_EP(ep);
}
- return check_method_entry(ep[-1], TRUE);
+ return check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
}
static rb_cref_t *
@@ -472,12 +529,12 @@ vm_env_cref(const VALUE *ep)
{
rb_cref_t *cref;
- while (!VM_EP_LEP_P(ep)) {
- if ((cref = check_cref(ep[-1], FALSE)) != NULL) return cref;
- ep = VM_EP_PREV_EP(ep);
+ while (!VM_ENV_LOCAL_P(ep)) {
+ if ((cref = check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return cref;
+ ep = VM_ENV_PREV_EP(ep);
}
- return check_cref(ep[-1], TRUE);
+ return check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
}
static int
@@ -499,15 +556,15 @@ is_cref(const VALUE v, int can_be_svar)
static int
vm_env_cref_by_cref(const VALUE *ep)
{
- while (!VM_EP_LEP_P(ep)) {
- if (is_cref(ep[-1], FALSE)) return TRUE;
- ep = VM_EP_PREV_EP(ep);
+ while (!VM_ENV_LOCAL_P(ep)) {
+ if (is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) return TRUE;
+ ep = VM_ENV_PREV_EP(ep);
}
- return is_cref(ep[-1], TRUE);
+ return is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
}
static rb_cref_t *
-cref_replace_with_duplicated_cref_each_frame(VALUE *vptr, int can_be_svar, VALUE parent)
+cref_replace_with_duplicated_cref_each_frame(const VALUE *vptr, int can_be_svar, VALUE parent)
{
const VALUE v = *vptr;
rb_cref_t *cref, *new_cref;
@@ -518,16 +575,15 @@ cref_replace_with_duplicated_cref_each_frame(VALUE *vptr, int can_be_svar, VALUE
cref = (rb_cref_t *)v;
new_cref = vm_cref_dup(cref);
if (parent) {
- /* this pointer is in svar */
RB_OBJ_WRITE(parent, vptr, new_cref);
}
else {
- *vptr = (VALUE)new_cref;
+ VM_FORCE_WRITE(vptr, (VALUE)new_cref);
}
return (rb_cref_t *)new_cref;
case imemo_svar:
if (can_be_svar) {
- return cref_replace_with_duplicated_cref_each_frame((VALUE *)&((struct vm_svar *)v)->cref_or_me, FALSE, v);
+ return cref_replace_with_duplicated_cref_each_frame((const VALUE *)&((struct vm_svar *)v)->cref_or_me, FALSE, v);
}
case imemo_ment:
rb_bug("cref_replace_with_duplicated_cref_each_frame: unreachable");
@@ -543,14 +599,17 @@ vm_cref_replace_with_duplicated_cref(const VALUE *ep)
{
if (vm_env_cref_by_cref(ep)) {
rb_cref_t *cref;
+ VALUE envval;
- while (!VM_EP_LEP_P(ep)) {
- if ((cref = cref_replace_with_duplicated_cref_each_frame((VALUE *)&ep[-1], FALSE, Qfalse)) != NULL) {
+ while (!VM_ENV_LOCAL_P(ep)) {
+ envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
+ if ((cref = cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE, envval)) != NULL) {
return cref;
}
- ep = VM_EP_PREV_EP(ep);
+ ep = VM_ENV_PREV_EP(ep);
}
- return cref_replace_with_duplicated_cref_each_frame((VALUE *)&ep[-1], TRUE, Qfalse);
+ envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
+ return cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE, envval);
}
else {
rb_bug("vm_cref_dup: unreachable");
@@ -608,12 +667,12 @@ rb_vm_rewrite_cref(rb_cref_t *cref, VALUE old_klass, VALUE new_klass, rb_cref_t
}
static rb_cref_t *
-vm_cref_push(rb_thread_t *th, VALUE klass, rb_block_t *blockptr, int pushed_by_eval)
+vm_cref_push(rb_thread_t *th, VALUE klass, const VALUE *ep, int pushed_by_eval)
{
rb_cref_t *prev_cref = NULL;
- if (blockptr) {
- prev_cref = vm_env_cref(blockptr->ep);
+ if (ep) {
+ prev_cref = vm_env_cref(ep);
}
else {
rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(th, th->cfp);
@@ -921,7 +980,7 @@ static VALUE
vm_throw_start(rb_thread_t *const th, rb_control_frame_t *const reg_cfp, enum ruby_tag_type state,
const int flag, const rb_num_t level, const VALUE throwobj)
{
- rb_control_frame_t *escape_cfp = NULL;
+ const rb_control_frame_t *escape_cfp = NULL;
const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(th); /* end of control frame pointer */
if (flag != 0) {
@@ -929,7 +988,7 @@ vm_throw_start(rb_thread_t *const th, rb_control_frame_t *const reg_cfp, enum ru
}
else if (state == TAG_BREAK) {
int is_orphan = 1;
- VALUE *ep = GET_EP();
+ const VALUE *ep = GET_EP();
const rb_iseq_t *base_iseq = GET_ISEQ();
escape_cfp = reg_cfp;
@@ -940,7 +999,7 @@ vm_throw_start(rb_thread_t *const th, rb_control_frame_t *const reg_cfp, enum ru
base_iseq = escape_cfp->iseq;
}
else {
- ep = VM_EP_PREV_EP(ep);
+ ep = VM_ENV_PREV_EP(ep);
base_iseq = base_iseq->body->parent_iseq;
escape_cfp = rb_vm_search_cf_from_ep(th, escape_cfp, ep);
VM_ASSERT(escape_cfp->iseq == base_iseq);
@@ -953,7 +1012,7 @@ vm_throw_start(rb_thread_t *const th, rb_control_frame_t *const reg_cfp, enum ru
state = TAG_RETURN;
}
else {
- ep = VM_EP_PREV_EP(ep);
+ ep = VM_ENV_PREV_EP(ep);
while (escape_cfp < eocfp) {
if (escape_cfp->ep == ep) {
@@ -986,22 +1045,22 @@ vm_throw_start(rb_thread_t *const th, rb_control_frame_t *const reg_cfp, enum ru
}
else if (state == TAG_RETRY) {
rb_num_t i;
- VALUE *ep = VM_EP_PREV_EP(GET_EP());
+ const VALUE *ep = VM_ENV_PREV_EP(GET_EP());
for (i = 0; i < level; i++) {
- ep = VM_EP_PREV_EP(ep);
+ ep = VM_ENV_PREV_EP(ep);
}
escape_cfp = rb_vm_search_cf_from_ep(th, reg_cfp, ep);
}
else if (state == TAG_RETURN) {
- VALUE *current_ep = GET_EP();
- VALUE *target_lep = VM_EP_LEP(current_ep);
+ const VALUE *current_ep = GET_EP();
+ const VALUE *target_lep = VM_EP_LEP(current_ep);
int in_class_frame = 0;
escape_cfp = reg_cfp;
while (escape_cfp < eocfp) {
- VALUE *lep = VM_CF_LEP(escape_cfp);
+ const VALUE *lep = VM_CF_LEP(escape_cfp);
if (!target_lep) {
target_lep = lep;
@@ -1021,14 +1080,14 @@ vm_throw_start(rb_thread_t *const th, rb_control_frame_t *const reg_cfp, enum ru
goto valid_return;
}
else {
- VALUE *tep = current_ep;
+ const VALUE *tep = current_ep;
while (target_lep != tep) {
if (escape_cfp->ep == tep) {
/* in lambda */
goto valid_return;
}
- tep = VM_EP_PREV_EP(tep);
+ tep = VM_ENV_PREV_EP(tep);
}
}
}
@@ -1140,6 +1199,7 @@ vm_search_method(const struct rb_call_info *ci, struct rb_call_cache *cc, VALUE
#if OPT_INLINE_METHOD_CACHE
if (LIKELY(GET_GLOBAL_METHOD_STATE() == cc->method_state && RCLASS_SERIAL(klass) == cc->class_serial)) {
/* cache hit! */
+ VM_ASSERT(cc->call != NULL);
return;
}
#endif
@@ -1287,26 +1347,29 @@ double_cmp_ge(double a, double b)
}
static VALUE *
-vm_base_ptr(rb_control_frame_t *cfp)
+vm_base_ptr(const rb_control_frame_t *cfp)
{
- rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
- VALUE *bp = prev_cfp->sp + cfp->iseq->body->local_size + 1;
-
- if (cfp->iseq->body->type == ISEQ_TYPE_METHOD) {
- /* adjust `self' */
- bp += 1;
- }
+ const rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
+ if (cfp->iseq && RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
+ VALUE *bp = prev_cfp->sp + cfp->iseq->body->local_table_size + VM_ENV_DATA_SIZE;
+ if (cfp->iseq->body->type == ISEQ_TYPE_METHOD) {
+ /* adjust `self' */
+ bp += 1;
+ }
#if VM_DEBUG_BP_CHECK
- if (bp != cfp->bp_check) {
- fprintf(stderr, "bp_check: %ld, bp: %ld\n",
- (long)(cfp->bp_check - GET_THREAD()->stack),
- (long)(bp - GET_THREAD()->stack));
- rb_bug("vm_base_ptr: unreachable");
- }
+ if (bp != cfp->bp_check) {
+ fprintf(stderr, "bp_check: %ld, bp: %ld\n",
+ (long)(cfp->bp_check - GET_THREAD()->stack),
+ (long)(bp - GET_THREAD()->stack));
+ rb_bug("vm_base_ptr: unreachable");
+ }
#endif
-
- return bp;
+ return bp;
+ }
+ else {
+ return NULL;
+ }
}
/* method call processes with call_info */
@@ -1347,7 +1410,7 @@ vm_call_iseq_setup_normal_0start(rb_thread_t *th, rb_control_frame_t *cfp, struc
{
const rb_iseq_t *iseq = def_iseq_ptr(cc->me->def);
int param = iseq->body->param.size;
- int local = iseq->body->local_size;
+ int local = iseq->body->local_table_size;
return vm_call_iseq_setup_normal(th, cfp, calling, ci, cc, 0, param, local);
}
@@ -1390,7 +1453,7 @@ vm_call_iseq_setup(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_i
{
const rb_iseq_t *iseq = def_iseq_ptr(cc->me->def);
const int param_size = iseq->body->param.size;
- const int local_size = iseq->body->local_size;
+ const int local_size = iseq->body->local_table_size;
const int opt_pc = vm_callee_setup_arg(th, calling, ci, cc, def_iseq_ptr(cc->me->def), cfp->sp - calling->argc, param_size, local_size);
return vm_call_iseq_setup_2(th, cfp, calling, ci, cc, opt_pc, param_size, local_size);
}
@@ -1417,8 +1480,8 @@ vm_call_iseq_setup_normal(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_ca
VALUE *sp = argv + param_size;
cfp->sp = argv - 1 /* recv */;
- vm_push_frame(th, iseq, VM_FRAME_MAGIC_METHOD, calling->recv,
- VM_ENVVAL_BLOCK_PTR(calling->blockptr), (VALUE)me,
+ vm_push_frame(th, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL, calling->recv,
+ calling->block_handler, (VALUE)me,
iseq->body->iseq_encoded + opt_pc, sp,
local_size - param_size,
iseq->body->stack_max);
@@ -1437,6 +1500,18 @@ vm_call_iseq_setup_tailcall(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_
VALUE *sp_orig, *sp;
VALUE finish_flag = VM_FRAME_TYPE_FINISH_P(cfp) ? VM_FRAME_FLAG_FINISH : 0;
+ if (VM_BH_FROM_CFP_P(calling->block_handler, cfp)) {
+ struct rb_captured_block *dst_captured = VM_CFP_TO_CAPTURED_BLOCK(RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
+ const struct rb_captured_block *src_captured = VM_BH_TO_CAPT_BLOCK(calling->block_handler);
+ dst_captured->code.val = src_captured->code.val;
+ if (VM_BH_ISEQ_BLOCK_P(calling->block_handler)) {
+ calling->block_handler = VM_BH_FROM_ISEQ_BLOCK(dst_captured);
+ }
+ else {
+ calling->block_handler = VM_BH_FROM_IFUNC_BLOCK(dst_captured);
+ }
+ }
+
vm_pop_frame(th, cfp, cfp->ep);
cfp = th->cfp;
@@ -1453,10 +1528,10 @@ vm_call_iseq_setup_tailcall(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_
*sp++ = src_argv[i];
}
- vm_push_frame(th, iseq, VM_FRAME_MAGIC_METHOD | finish_flag,
- calling->recv, VM_ENVVAL_BLOCK_PTR(calling->blockptr), (VALUE)me,
+ vm_push_frame(th, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL | finish_flag,
+ calling->recv, calling->block_handler, (VALUE)me,
iseq->body->iseq_encoded + opt_pc, sp,
- iseq->body->local_size - iseq->body->param.size,
+ iseq->body->local_table_size - iseq->body->param.size,
iseq->body->stack_max);
cfp->sp = sp_orig;
@@ -1637,15 +1712,15 @@ vm_call_cfunc_with_frame(rb_thread_t *th, rb_control_frame_t *reg_cfp, struct rb
int len = cfunc->argc;
VALUE recv = calling->recv;
- rb_block_t *blockptr = calling->blockptr;
+ VALUE block_handler = calling->block_handler;
int argc = calling->argc;
RUBY_DTRACE_CMETHOD_ENTRY_HOOK(th, me->owner, me->called_id);
EXEC_EVENT_HOOK(th, RUBY_EVENT_C_CALL, recv, me->called_id, me->owner, Qundef);
- vm_push_frame(th, NULL, VM_FRAME_MAGIC_CFUNC, recv,
- VM_ENVVAL_BLOCK_PTR(blockptr), (VALUE)me,
- 0, th->cfp->sp, 1, 0);
+ vm_push_frame(th, NULL, VM_FRAME_MAGIC_CFUNC | VM_ENV_FLAG_LOCAL, recv,
+ block_handler, (VALUE)me,
+ 0, th->cfp->sp, 0, 0);
if (len >= 0) rb_check_arity(argc, len, len);
@@ -1733,9 +1808,9 @@ rb_vm_call_cfunc_push_frame(rb_thread_t *th)
const rb_callable_method_entry_t *me = calling->me;
th->passed_ci = 0;
- vm_push_frame(th, 0, VM_FRAME_MAGIC_CFUNC,
- calling->recv, VM_ENVVAL_BLOCK_PTR(calling->blockptr), (VALUE)me /* cref */,
- 0, th->cfp->sp + cc->aux.inc_sp, 1, 0);
+ vm_push_frame(th, 0, VM_FRAME_MAGIC_CFUNC | VM_ENV_FLAG_LOCAL,
+ calling->recv, calling->block_handler, (VALUE)me /* cref */,
+ 0, th->cfp->sp + cc->aux.inc_sp, 0, 0);
if (calling->call != vm_call_general) {
calling->call = vm_call_cfunc_with_frame;
@@ -1774,7 +1849,7 @@ vm_call_bmethod_body(rb_thread_t *th, struct rb_calling_info *calling, const str
/* control block frame */
th->passed_bmethod_me = cc->me;
GetProcPtr(cc->me->def->body.proc, proc);
- val = vm_invoke_bmethod(th, proc, calling->recv, calling->argc, argv, calling->blockptr);
+ val = vm_invoke_bmethod(th, proc, calling->recv, calling->argc, argv, calling->block_handler);
return val;
}
@@ -1786,7 +1861,6 @@ vm_call_bmethod(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info
int argc;
CALLER_SETUP_ARG(cfp, calling, ci);
-
argc = calling->argc;
argv = ALLOCA_N(VALUE, argc);
MEMCPY(argv, cfp->sp - argc, VALUE, argc);
@@ -1879,7 +1953,7 @@ vm_call_opt_call(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_inf
MEMCPY(argv, cfp->sp - argc, VALUE, argc);
cfp->sp -= argc + 1;
- return rb_vm_invoke_proc(th, proc, argc, argv, calling->blockptr);
+ return rb_vm_invoke_proc(th, proc, argc, argv, calling->block_handler);
}
static VALUE
@@ -2305,18 +2379,40 @@ block_proc_is_lambda(const VALUE procval)
}
static VALUE
-vm_yield_with_cfunc(rb_thread_t *th, const rb_block_t *block, VALUE self,
- int argc, const VALUE *argv,
- const rb_block_t *blockargptr)
+vm_block_handler_to_proc(rb_thread_t *th, VALUE block_handler)
+{
+ VALUE blockarg = Qnil;
+
+ if (block_handler != VM_BLOCK_HANDLER_NONE) {
+ switch (vm_block_handler_type(block_handler)) {
+ case block_handler_type_proc:
+ blockarg = block_handler;
+ break;
+ case block_handler_type_symbol:
+ blockarg = rb_sym_to_proc(block_handler);
+ break;
+ case block_handler_type_iseq:
+ case block_handler_type_ifunc:
+ blockarg = rb_vm_make_proc(th, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
+ break;
+ }
+ }
+
+ return blockarg;
+}
+
+static VALUE
+vm_yield_with_cfunc(rb_thread_t *th,
+ const struct rb_captured_block *captured,
+ VALUE self, int argc, const VALUE *argv, VALUE block_handler)
{
- const struct vm_ifunc *ifunc = (struct vm_ifunc *)block->iseq;
- VALUE val, arg, blockarg, data;
- rb_block_call_func *func;
+ int is_lambda = FALSE; /* TODO */
+ VALUE val, arg, blockarg;
+ const struct vm_ifunc *ifunc = captured->code.ifunc;
const rb_callable_method_entry_t *me = th->passed_bmethod_me;
th->passed_bmethod_me = NULL;
- if (!RUBY_VM_IFUNC_P(block->proc) && !SYMBOL_P(block->proc) &&
- block_proc_is_lambda(block->proc)) {
+ if (is_lambda) {
arg = rb_ary_new4(argc, argv);
}
else if (argc == 0) {
@@ -2326,36 +2422,26 @@ vm_yield_with_cfunc(rb_thread_t *th, const rb_block_t *block, VALUE self,
arg = argv[0];
}
- if (blockargptr) {
- if (blockargptr->proc) {
- blockarg = blockargptr->proc;
- }
- else {
- blockarg = rb_vm_make_proc(th, blockargptr, rb_cProc);
- }
- }
- else {
- blockarg = Qnil;
- }
-
- vm_push_frame(th, (rb_iseq_t *)ifunc, VM_FRAME_MAGIC_IFUNC,
- self, VM_ENVVAL_PREV_EP_PTR(block->ep), (VALUE)me,
- 0, th->cfp->sp, 1, 0);
+ blockarg = vm_block_handler_to_proc(th, block_handler);
- if (SYMBOL_P(ifunc)) {
- func = rb_sym_proc_call;
- data = SYM2ID((VALUE)ifunc);
- }
- else {
- func = (rb_block_call_func *)ifunc->func;
- data = (VALUE)ifunc->data;
- }
- val = (*func)(arg, data, argc, argv, blockarg);
+ vm_push_frame(th, (const rb_iseq_t *)captured->code.ifunc,
+ VM_FRAME_MAGIC_IFUNC,
+ self,
+ VM_GUARDED_PREV_EP(captured->ep),
+ (VALUE)me,
+ 0, th->cfp->sp, 0, 0);
+ val = (*ifunc->func)(arg, ifunc->data, argc, argv, blockarg);
rb_vm_pop_frame(th);
return val;
}
+static VALUE
+vm_yield_with_symbol(rb_thread_t *th, VALUE symbol, int argc, const VALUE *argv, VALUE block_handler)
+{
+ return rb_sym_proc_call(SYM2ID(symbol), argc, argv, vm_block_handler_to_proc(th, block_handler));
+}
+
static inline int
vm_callee_setup_block_arg_arg0_splat(rb_control_frame_t *cfp, const rb_iseq_t *iseq, VALUE *argv, VALUE ary)
{
@@ -2428,14 +2514,14 @@ vm_callee_setup_block_arg(rb_thread_t *th, struct rb_calling_info *calling, cons
}
static int
-vm_yield_setup_args(rb_thread_t *th, const rb_iseq_t *iseq, const int argc, VALUE *argv, const rb_block_t *blockptr, enum arg_setup_type arg_setup_type)
+vm_yield_setup_args(rb_thread_t *th, const rb_iseq_t *iseq, const int argc, VALUE *argv, VALUE block_handler, enum arg_setup_type arg_setup_type)
{
struct rb_calling_info calling_entry, *calling;
struct rb_call_info ci_entry, *ci;
calling = &calling_entry;
calling->argc = argc;
- calling->blockptr = (rb_block_t *)blockptr;
+ calling->block_handler = block_handler;
ci_entry.flag = 0;
ci = &ci_entry;
@@ -2443,64 +2529,128 @@ vm_yield_setup_args(rb_thread_t *th, const rb_iseq_t *iseq, const int argc, VALU
return vm_callee_setup_block_arg(th, calling, ci, iseq, argv, arg_setup_type);
}
-/* ruby iseq -> ruby block iseq */
+/* ruby iseq -> ruby block */
static VALUE
-vm_invoke_block(rb_thread_t *th, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_call_info *ci)
+vm_invoke_iseq_block(rb_thread_t *th, rb_control_frame_t *reg_cfp,
+ struct rb_calling_info *calling, const struct rb_call_info *ci,
+ int is_lambda, const struct rb_captured_block *captured)
{
- const rb_block_t *block = VM_CF_BLOCK_PTR(reg_cfp);
- VALUE type = GET_ISEQ()->body->local_iseq->body->type;
+ const rb_iseq_t *iseq = captured->code.iseq;
+ const int arg_size = iseq->body->param.size;
+ VALUE * const rsp = GET_SP() - calling->argc;
+ int opt_pc = vm_callee_setup_block_arg(th, calling, ci, iseq, rsp, is_lambda ? arg_setup_lambda : arg_setup_block);
- if ((type != ISEQ_TYPE_METHOD && type != ISEQ_TYPE_CLASS) || block == 0) {
- rb_vm_localjump_error("no block given (yield)", Qnil, 0);
- }
+ SET_SP(rsp);
- if (RUBY_VM_NORMAL_ISEQ_P(block->iseq)) {
- const rb_iseq_t *iseq = block->iseq;
- const int arg_size = iseq->body->param.size;
- int is_lambda = block_proc_is_lambda(block->proc);
- VALUE * const rsp = GET_SP() - calling->argc;
- int opt_pc = vm_callee_setup_block_arg(th, calling, ci, iseq, rsp, is_lambda ? arg_setup_lambda : arg_setup_block);
+ vm_push_frame(th, iseq,
+ is_lambda ? VM_FRAME_MAGIC_LAMBDA : VM_FRAME_MAGIC_BLOCK,
+ captured->self,
+ VM_GUARDED_PREV_EP(captured->ep), 0,
+ iseq->body->iseq_encoded + opt_pc,
+ rsp + arg_size,
+ iseq->body->local_table_size - arg_size, iseq->body->stack_max);
- SET_SP(rsp);
+ return Qundef;
+}
- vm_push_frame(th, iseq,
- is_lambda ? VM_FRAME_MAGIC_LAMBDA : VM_FRAME_MAGIC_BLOCK,
- block->self,
- VM_ENVVAL_PREV_EP_PTR(block->ep), 0,
- iseq->body->iseq_encoded + opt_pc,
- rsp + arg_size,
- iseq->body->local_size - arg_size, iseq->body->stack_max);
+static VALUE
+vm_invoke_symbol_block(rb_thread_t *th, rb_control_frame_t *reg_cfp,
+ struct rb_calling_info *calling, const struct rb_call_info *ci,
+ VALUE symbol)
+{
+ VALUE val;
+ int argc;
+ CALLER_SETUP_ARG(th->cfp, calling, ci);
+ argc = calling->argc;
+ val = vm_yield_with_symbol(th, symbol, argc, STACK_ADDR_FROM_TOP(argc), VM_BLOCK_HANDLER_NONE);
+ POPN(argc);
+ return val;
+}
+
+static VALUE
+vm_invoke_ifunc_block(rb_thread_t *th, rb_control_frame_t *reg_cfp,
+ struct rb_calling_info *calling, const struct rb_call_info *ci,
+ const struct rb_captured_block *captured)
+{
+ VALUE val;
+ int argc;
+ CALLER_SETUP_ARG(th->cfp, calling, ci);
+ argc = calling->argc;
+ val = vm_yield_with_cfunc(th, captured, captured->self, argc, STACK_ADDR_FROM_TOP(argc), VM_BLOCK_HANDLER_NONE);
+ POPN(argc); /* TODO: should put before C/yield? */
+ return val;
+}
+
+static VALUE
+vm_proc_to_block_handler(VALUE procval)
+{
+ const struct rb_block *block = vm_proc_block(procval);
- return Qundef;
+ switch (vm_block_type(block)) {
+ case block_type_iseq:
+ return VM_BH_FROM_ISEQ_BLOCK(&block->as.captured);
+ case block_type_ifunc:
+ return VM_BH_FROM_IFUNC_BLOCK(&block->as.captured);
+ case block_type_symbol:
+ return VM_BH_FROM_SYMBOL(block->as.symbol);
+ case block_type_proc:
+ return VM_BH_FROM_PROC(block->as.proc);
}
- else {
- VALUE val;
- int argc;
- CALLER_SETUP_ARG(th->cfp, calling, ci);
- argc = calling->argc;
- val = vm_yield_with_cfunc(th, block, block->self, argc, STACK_ADDR_FROM_TOP(argc), 0);
- POPN(argc); /* TODO: should put before C/yield? */
- return val;
+ VM_UNREACHABLE(vm_yield_with_proc);
+ return Qundef;
+}
+
+static VALUE
+vm_invoke_block(rb_thread_t *th, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_call_info *ci)
+{
+ VALUE block_handler = VM_CF_BLOCK_HANDLER(reg_cfp);
+ VALUE type = GET_ISEQ()->body->local_iseq->body->type;
+ int is_lambda = FALSE;
+
+ if ((type != ISEQ_TYPE_METHOD && type != ISEQ_TYPE_CLASS) ||
+ block_handler == VM_BLOCK_HANDLER_NONE) {
+ rb_vm_localjump_error("no block given (yield)", Qnil, 0);
+ }
+
+ again:
+ switch (vm_block_handler_type(block_handler)) {
+ case block_handler_type_iseq:
+ {
+ const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
+ return vm_invoke_iseq_block(th, reg_cfp, calling, ci, is_lambda, captured);
+ }
+ case block_handler_type_ifunc:
+ {
+ const struct rb_captured_block *captured = VM_BH_TO_IFUNC_BLOCK(block_handler);
+ return vm_invoke_ifunc_block(th, reg_cfp, calling, ci, captured);
+ }
+ case block_handler_type_proc:
+ is_lambda = block_proc_is_lambda(VM_BH_TO_PROC(block_handler));
+ block_handler = vm_proc_to_block_handler(VM_BH_TO_PROC(block_handler));
+ goto again;
+ case block_handler_type_symbol:
+ return vm_invoke_symbol_block(th, reg_cfp, calling, ci, VM_BH_TO_SYMBOL(block_handler));
}
+ VM_UNREACHABLE(vm_invoke_block: unreachable);
+ return Qnil;
}
static VALUE
vm_make_proc_with_iseq(const rb_iseq_t *blockiseq)
{
- rb_block_t *blockptr;
rb_thread_t *th = GET_THREAD();
- rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
+ const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
+ struct rb_captured_block *captured;
if (cfp == 0) {
rb_bug("vm_make_proc_with_iseq: unreachable");
}
- blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp);
- blockptr->iseq = blockiseq;
- blockptr->proc = 0;
+ captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
+ captured->code.iseq = blockiseq;
- return rb_vm_make_proc(th, blockptr, rb_cProc);
+ return rb_vm_make_proc(th, captured, rb_cProc);
}
static VALUE
@@ -2619,7 +2769,7 @@ vm_defined(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE
break;
}
case DEFINED_YIELD:
- if (GET_BLOCK_PTR()) {
+ if (GET_BLOCK_HANDLER() != VM_BLOCK_HANDLER_NONE) {
expr_type = DEFINED_YIELD;
}
break;
diff --git a/vm_insnhelper.h b/vm_insnhelper.h
index 69eaaacf2e..18fabde187 100644
--- a/vm_insnhelper.h
+++ b/vm_insnhelper.h
@@ -104,7 +104,7 @@ enum vm_regan_acttype {
/* deal with variables */
/**********************************************************/
-#define GET_PREV_EP(ep) ((VALUE *)((ep)[0] & ~0x03))
+#define GET_PREV_EP(ep) ((VALUE *)((ep)[VM_ENV_DATA_INDEX_SPECVAL] & ~0x03))
#define GET_GLOBAL(entry) rb_gvar_get((struct rb_global_entry*)(entry))
#define SET_GLOBAL(entry, val) rb_gvar_set((struct rb_global_entry*)(entry), (val))
@@ -148,7 +148,7 @@ enum vm_regan_acttype {
#define CI_SET_FASTPATH(ci, func, enabled) /* do nothing */
#endif
-#define GET_BLOCK_PTR() ((rb_block_t *)(GC_GUARDED_PTR_REF(GET_LEP()[0])))
+#define GET_BLOCK_HANDLER() (GET_LEP()[VM_ENV_DATA_INDEX_SPECVAL])
/**********************************************************/
/* deal with control flow 3: exception */
@@ -173,7 +173,7 @@ enum vm_regan_acttype {
#define CALL_SIMPLE_METHOD(recv_) do { \
struct rb_calling_info calling; \
- calling.blockptr = NULL; \
+ calling.block_handler = VM_BLOCK_HANDLER_NONE; \
calling.argc = ci->orig_argc; \
vm_search_method(ci, cc, calling.recv = (recv_)); \
CALL_METHOD(&calling, ci, cc); \
@@ -189,7 +189,7 @@ static VALUE make_no_method_exception(VALUE exc, VALUE format, VALUE obj,
int argc, const VALUE *argv, int priv);
static inline struct vm_throw_data *
-THROW_DATA_NEW(VALUE val, rb_control_frame_t *cf, VALUE st)
+THROW_DATA_NEW(VALUE val, const rb_control_frame_t *cf, VALUE st)
{
return (struct vm_throw_data *)rb_imemo_new(imemo_throw_data, val, (VALUE)cf, st, 0);
}
diff --git a/vm_method.c b/vm_method.c
index 69f98c4421..acdd976ff9 100644
--- a/vm_method.c
+++ b/vm_method.c
@@ -1871,9 +1871,9 @@ call_method_entry(rb_thread_t *th, VALUE defined_class, VALUE obj, ID id,
{
const rb_callable_method_entry_t *cme =
prepare_callable_method_entry(defined_class, id, me);
- const rb_block_t *passed_block = th->passed_block;
+ VALUE passed_block_handler = vm_passed_block_handler(th);
VALUE result = vm_call0(th, obj, id, argc, argv, cme);
- th->passed_block = passed_block;
+ vm_passed_block_handler_set(th, passed_block_handler);
return result;
}