summaryrefslogtreecommitdiff
path: root/vm.c
diff options
context:
space:
mode:
authorko1 <ko1@b2dd03c8-39d4-4d8f-98ff-823fe69b080e>2016-07-28 11:02:30 +0000
committerko1 <ko1@b2dd03c8-39d4-4d8f-98ff-823fe69b080e>2016-07-28 11:02:30 +0000
commit9f60791a0412cae804f13ed3e746a634c7a0731b (patch)
tree49b3f9ff4924a622e13fbdfc9478d34717d19378 /vm.c
parent683eafd973e4db60756f6ee12e1b066e316f026f (diff)
* vm_core.h: revisit the structure of frame, block and env.
[Bug #12628] This patch introduce many changes. * Introduce concept of "Block Handler (BH)" to represent passed blocks. * move rb_control_frame_t::flag to ep[0] (as a special local variable). This flags represents not only frame type, but also env flags such as escaped. * rename `rb_block_t` to `struct rb_block`. * Make Proc, Binding and RubyVM::Env objects wb-protected. Check [Bug #12628] for more details. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@55766 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
Diffstat (limited to 'vm.c')
-rw-r--r--vm.c714
1 files changed, 437 insertions, 277 deletions
diff --git a/vm.c b/vm.c
index c900eec570..66eee02bd8 100644
--- a/vm.c
+++ b/vm.c
@@ -8,10 +8,6 @@
**********************************************************************/
-#ifndef VM_CHECK_MODE
-#define VM_CHECK_MODE 0
-#endif
-
#include "internal.h"
#include "ruby/vm.h"
#include "ruby/st.h"
@@ -25,18 +21,18 @@
VALUE rb_str_concat_literals(size_t, const VALUE*);
-PUREFUNC(static inline VALUE *VM_EP_LEP(VALUE *));
-static inline VALUE *
-VM_EP_LEP(VALUE *ep)
+PUREFUNC(static inline const VALUE *VM_EP_LEP(const VALUE *));
+static inline const VALUE *
+VM_EP_LEP(const VALUE *ep)
{
- while (!VM_EP_LEP_P(ep)) {
- ep = VM_EP_PREV_EP(ep);
+ while (!VM_ENV_LOCAL_P(ep)) {
+ ep = VM_ENV_PREV_EP(ep);
}
return ep;
}
-static inline rb_control_frame_t *
-rb_vm_search_cf_from_ep(const rb_thread_t * const th, rb_control_frame_t *cfp, const VALUE * const ep)
+static inline const rb_control_frame_t *
+rb_vm_search_cf_from_ep(const rb_thread_t * const th, const rb_control_frame_t *cfp, const VALUE * const ep)
{
if (!ep) {
return NULL;
@@ -55,37 +51,125 @@ rb_vm_search_cf_from_ep(const rb_thread_t * const th, rb_control_frame_t *cfp, c
}
}
-VALUE *
-rb_vm_ep_local_ep(VALUE *ep)
+const VALUE *
+rb_vm_ep_local_ep(const VALUE *ep)
{
return VM_EP_LEP(ep);
}
-PUREFUNC(static inline VALUE *VM_CF_LEP(const rb_control_frame_t * const cfp));
-static inline VALUE *
+PUREFUNC(static inline const VALUE *VM_CF_LEP(const rb_control_frame_t * const cfp));
+static inline const VALUE *
VM_CF_LEP(const rb_control_frame_t * const cfp)
{
return VM_EP_LEP(cfp->ep);
}
-static inline VALUE *
+static inline const VALUE *
VM_CF_PREV_EP(const rb_control_frame_t * const cfp)
{
- return VM_EP_PREV_EP(cfp->ep);
+ return VM_ENV_PREV_EP(cfp->ep);
+}
+
+PUREFUNC(static inline VALUE VM_CF_BLOCK_HANDLER(const rb_control_frame_t * const cfp));
+static inline VALUE
+VM_CF_BLOCK_HANDLER(const rb_control_frame_t * const cfp)
+{
+ const VALUE *ep = VM_CF_LEP(cfp);
+ return VM_ENV_BLOCK_HANDLER(ep);
+}
+
+VALUE
+rb_vm_frame_block_handler(const rb_control_frame_t *cfp)
+{
+ return VM_CF_BLOCK_HANDLER(cfp);
+}
+
+#if VM_CHECK_MODE > 0
+static int
+VM_CFP_IN_HEAP_P(const rb_thread_t *th, const rb_control_frame_t *cfp)
+{
+ const VALUE *start = th->stack;
+ const VALUE *end = (VALUE *)th->stack + th->stack_size;
+ if (start <= (VALUE *)cfp && (VALUE *)cfp < end) {
+ return FALSE;
+ }
+ else {
+ return TRUE;
+ }
+}
+
+static int envval_p(VALUE envval);
+
+static int
+VM_EP_IN_HEAP_P(const rb_thread_t *th, const VALUE *ep)
+{
+ const VALUE *start = th->stack;
+ const VALUE *end = (VALUE *)th->cfp;
+ if (start <= ep && ep < end) {
+ return FALSE;
+ }
+ else {
+ return TRUE;
+ }
+}
+
+int
+vm_ep_in_heap_p_(const rb_thread_t *th, const VALUE *ep)
+{
+ if (VM_EP_IN_HEAP_P(th, ep)) {
+ VALUE envval = ep[VM_ENV_DATA_INDEX_ENV]; /* VM_ENV_ENVVAL(ep); */
+
+ if (envval != Qundef) {
+ rb_env_t *env;
+
+ VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
+ VM_ASSERT(envval_p(envval));
+ GetEnvPtr(envval, env);
+ VM_ASSERT(env->ep == ep);
+ }
+ return TRUE;
+ }
+ else {
+ return FALSE;
+ }
+}
+
+int
+rb_vm_ep_in_heap_p(const VALUE *ep)
+{
+ return vm_ep_in_heap_p_(GET_THREAD(), ep);
+}
+#endif
+
+static struct rb_captured_block *
+VM_CFP_TO_CAPTURED_BLOCK(const rb_control_frame_t *cfp)
+{
+ VM_ASSERT(!VM_CFP_IN_HEAP_P(GET_THREAD(), cfp));
+ return (struct rb_captured_block *)&cfp->self;
}
-PUREFUNC(static inline rb_block_t *VM_CF_BLOCK_PTR(const rb_control_frame_t * const cfp));
-static inline rb_block_t *
-VM_CF_BLOCK_PTR(const rb_control_frame_t * const cfp)
+static rb_control_frame_t *
+VM_CAPTURED_BLOCK_TO_CFP(const struct rb_captured_block *captured)
{
- VALUE *ep = VM_CF_LEP(cfp);
- return VM_EP_BLOCK_PTR(ep);
+ rb_control_frame_t *cfp = ((rb_control_frame_t *)((VALUE *)(captured) - 3));
+ VM_ASSERT(!VM_CFP_IN_HEAP_P(GET_THREAD(), cfp));
+ VM_ASSERT(sizeof(rb_control_frame_t)/sizeof(VALUE) == 6 + VM_DEBUG_BP_CHECK ? 1 : 0);
+ return cfp;
}
-rb_block_t *
-rb_vm_control_frame_block_ptr(const rb_control_frame_t *cfp)
+static int
+VM_BH_FROM_CFP_P(VALUE block_handler, const rb_control_frame_t *cfp)
{
- return VM_CF_BLOCK_PTR(cfp);
+ const struct rb_captured_block *captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
+ return VM_TAGGED_PTR_REF(block_handler, 0x03) == captured;
+}
+
+static VALUE
+vm_passed_block_handler(rb_thread_t *th)
+{
+ VALUE block_handler = th->passed_block_handler;
+ th->passed_block_handler = VM_BLOCK_HANDLER_NONE;
+ return block_handler;
}
static rb_cref_t *
@@ -181,6 +265,15 @@ vm_cref_dump(const char *mesg, const rb_cref_t *cref)
}
}
+static void
+vm_bind_update_env(rb_binding_t *bind, VALUE envval)
+{
+ rb_env_t *env;
+ GetEnvPtr(envval, env);
+ bind->block.as.captured.code.iseq = env->iseq;
+ bind->block.as.captured.ep = env->ep;
+}
+
#if VM_COLLECT_USAGE_DETAILS
static void vm_collect_usage_operand(int insn, int n, VALUE op);
static void vm_collect_usage_insn(int insn);
@@ -189,20 +282,18 @@ static void vm_collect_usage_register(int reg, int isset);
static VALUE vm_make_env_object(rb_thread_t *th, rb_control_frame_t *cfp);
-static VALUE
-vm_invoke_bmethod(rb_thread_t *th, rb_proc_t *proc, VALUE self,
- int argc, const VALUE *argv, const rb_block_t *blockptr);
-static VALUE
-vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, VALUE self,
- int argc, const VALUE *argv, const rb_block_t *blockptr);
+static VALUE vm_invoke_bmethod(rb_thread_t *th, rb_proc_t *proc, VALUE self,
+ int argc, const VALUE *argv, VALUE block_handler);
+static VALUE vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, VALUE self,
+ int argc, const VALUE *argv, VALUE block_handler);
static rb_serial_t ruby_vm_global_method_state = 1;
static rb_serial_t ruby_vm_global_constant_state = 1;
static rb_serial_t ruby_vm_class_serial = 1;
#include "vm_insnhelper.h"
-#include "vm_insnhelper.c"
#include "vm_exec.h"
+#include "vm_insnhelper.c"
#include "vm_exec.c"
#include "vm_method.c"
@@ -350,20 +441,20 @@ vm_set_top_stack(rb_thread_t *th, const rb_iseq_t *iseq)
}
/* for return */
- vm_push_frame(th, iseq, VM_FRAME_MAGIC_TOP | VM_FRAME_FLAG_FINISH, th->top_self,
- VM_ENVVAL_BLOCK_PTR(0),
+ vm_push_frame(th, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, th->top_self,
+ VM_BLOCK_HANDLER_NONE,
(VALUE)vm_cref_new_toplevel(th), /* cref or me */
- iseq->body->iseq_encoded, th->cfp->sp, iseq->body->local_size, iseq->body->stack_max);
+ iseq->body->iseq_encoded, th->cfp->sp, iseq->body->local_table_size, iseq->body->stack_max);
}
static void
-vm_set_eval_stack(rb_thread_t * th, const rb_iseq_t *iseq, const rb_cref_t *cref, rb_block_t *base_block)
+vm_set_eval_stack(rb_thread_t * th, const rb_iseq_t *iseq, const rb_cref_t *cref, const struct rb_block *base_block)
{
vm_push_frame(th, iseq, VM_FRAME_MAGIC_EVAL | VM_FRAME_FLAG_FINISH,
- base_block->self, VM_ENVVAL_PREV_EP_PTR(base_block->ep),
+ vm_block_self(base_block), VM_GUARDED_PREV_EP(vm_block_ep(base_block)),
(VALUE)cref, /* cref or me */
iseq->body->iseq_encoded,
- th->cfp->sp, iseq->body->local_size, iseq->body->stack_max);
+ th->cfp->sp, iseq->body->local_table_size, iseq->body->stack_max);
}
static void
@@ -371,16 +462,14 @@ vm_set_main_stack(rb_thread_t *th, const rb_iseq_t *iseq)
{
VALUE toplevel_binding = rb_const_get(rb_cObject, rb_intern("TOPLEVEL_BINDING"));
rb_binding_t *bind;
- rb_env_t *env;
GetBindingPtr(toplevel_binding, bind);
- GetEnvPtr(bind->env, env);
- vm_set_eval_stack(th, iseq, 0, &env->block);
+ vm_set_eval_stack(th, iseq, 0, &bind->block);
/* save binding */
- if (iseq->body->local_size > 0) {
- bind->env = vm_make_env_object(th, th->cfp);
+ if (bind && iseq->body->local_table_size > 0) {
+ vm_bind_update_env(bind, vm_make_env_object(th, th->cfp));
}
}
@@ -422,7 +511,7 @@ vm_get_ruby_level_caller_cfp(const rb_thread_t *th, const rb_control_frame_t *cf
return (rb_control_frame_t *)cfp;
}
- if ((cfp->flag & VM_FRAME_FLAG_PASSED) == 0) {
+ if (VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_PASSED) == FALSE) {
break;
}
cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
@@ -508,12 +597,13 @@ env_mark(void * const ptr)
/* TODO: should mark more restricted range */
RUBY_GC_INFO("env->env\n");
+ VM_ASSERT(VM_ENV_FLAGS(env->ep, VM_ENV_FLAG_ESCAPED));
+
rb_gc_mark_values((long)env->env_size, env->env);
+ VM_ENV_FLAGS_SET(env->ep, VM_ENV_FLAG_WB_REQUIRED);
RUBY_MARK_UNLESS_NULL(rb_vm_env_prev_envval(env));
- RUBY_MARK_UNLESS_NULL(env->block.self);
- RUBY_MARK_UNLESS_NULL(env->block.proc);
- RUBY_MARK_UNLESS_NULL((VALUE)env->block.iseq);
+ RUBY_MARK_UNLESS_NULL((VALUE)env->iseq);
RUBY_MARK_LEAVE("env");
}
@@ -527,14 +617,39 @@ env_memsize(const void *ptr)
return size;
}
+#if VM_CHECK_MODE > 0
+static void
+env_free(void *ptr)
+{
+ if (ptr) {
+ rb_env_t * const env = ptr;
+ VM_ASSERT(VM_ENV_FLAGS(env->ep, VM_ENV_FLAG_ESCAPED));
+ free(env);
+ }
+}
+#else
+#define env_free RUBY_TYPED_DEFAULT_FREE
+#endif
+
static const rb_data_type_t env_data_type = {
"VM/env",
- {env_mark, RUBY_TYPED_DEFAULT_FREE, env_memsize,},
- 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
+ {env_mark, env_free, env_memsize,},
+ 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
};
-#define VM_EP_IN_HEAP_P(th, ep) (!((th)->stack <= (ep) && (ep) < ((th)->stack + (th)->stack_size)))
-#define VM_ENV_EP_ENVVAL(ep) ((ep)[1])
+#if VM_CHECK_MODE > 0
+static int
+envval_p(VALUE envval)
+{
+ if (rb_typeddata_is_kind_of(envval, &env_data_type)) {
+ return TRUE;
+ }
+ else {
+ rb_obj_info_dump(envval);
+ return FALSE;
+ }
+}
+#endif
static VALUE check_env_value(VALUE envval);
@@ -542,10 +657,10 @@ static int
check_env(rb_env_t * const env)
{
fprintf(stderr, "---\n");
- fprintf(stderr, "envptr: %p\n", (void *)&env->block.ep[0]);
- fprintf(stderr, "envval: %10p ", (void *)env->block.ep[1]);
- dp(env->block.ep[1]);
- fprintf(stderr, "ep: %10p\n", (void *)env->block.ep);
+ fprintf(stderr, "envptr: %p\n", (void *)&env->ep[0]);
+ fprintf(stderr, "envval: %10p ", (void *)env->ep[1]);
+ dp(env->ep[1]);
+ fprintf(stderr, "ep: %10p\n", (void *)env->ep);
if (rb_vm_env_prev_envval(env)) {
fprintf(stderr, ">>\n");
check_env_value(rb_vm_env_prev_envval(env));
@@ -567,67 +682,66 @@ check_env_value(VALUE envval)
return Qnil; /* unreachable */
}
-/* return FALSE if proc was already created */
-static int
-vm_make_proc_from_block(rb_thread_t *th, rb_block_t *block, VALUE *procptr)
+static void
+vm_block_handler_escape(rb_thread_t *th, VALUE block_handler, VALUE *procvalptr)
{
- if (!block->proc) {
- *procptr = block->proc = rb_vm_make_proc(th, block, rb_cProc);
- return TRUE;
- }
- else if (SYMBOL_P(block->proc)) {
- *procptr = rb_sym_to_proc(block->proc);
- return TRUE;
- }
- else {
- *procptr = block->proc;
- return FALSE;
+ switch (vm_block_handler_type(block_handler)) {
+ case block_handler_type_ifunc:
+ case block_handler_type_iseq:
+ *procvalptr = rb_vm_make_proc(th, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
+ return;
+
+ case block_handler_type_symbol:
+ case block_handler_type_proc:
+ *procvalptr = block_handler;
+ return;
}
+ VM_UNREACHABLE(vm_block_handler_escape);
+ return;
}
static VALUE
vm_make_env_each(rb_thread_t *const th, rb_control_frame_t *const cfp)
{
- VALUE envval, blockprocval = 0;
- VALUE * const ep = cfp->ep;
+ VALUE envval, blockprocval = Qfalse;
+ const VALUE * const ep = cfp->ep;
rb_env_t *env;
- VALUE *new_ep;
+ const VALUE *new_ep;
int local_size, env_size;
- if (VM_EP_IN_HEAP_P(th, ep)) {
- return VM_ENV_EP_ENVVAL(ep);
+ if (VM_ENV_ESCAPED_P(ep)) {
+ return VM_ENV_ENVVAL(ep);
}
- if (!VM_EP_LEP_P(ep)) {
- VALUE *prev_ep = VM_EP_PREV_EP(ep);
+ if (!VM_ENV_LOCAL_P(ep)) {
+ const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
- if (!VM_EP_IN_HEAP_P(th, prev_ep)) {
+ if (!VM_ENV_ESCAPED_P(prev_ep)) {
rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
while (prev_cfp->ep != prev_ep) {
prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(prev_cfp);
- if (VM_CHECK_MODE > 0 && prev_cfp->ep == 0) rb_bug("invalid ep");
+ VM_ASSERT(prev_cfp->ep != NULL);
}
vm_make_env_each(th, prev_cfp);
- *ep = VM_ENVVAL_PREV_EP_PTR(prev_cfp->ep);
+ VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_SPECVAL], VM_GUARDED_PREV_EP(prev_cfp->ep));
}
}
else {
- rb_block_t *block = VM_EP_BLOCK_PTR(ep);
+ VALUE block_handler = VM_ENV_BLOCK_HANDLER(ep);
- if (block && vm_make_proc_from_block(th, block, &blockprocval)) {
- rb_proc_t *p;
- GetProcPtr(blockprocval, p);
- *ep = VM_ENVVAL_BLOCK_PTR(&p->block);
+ if (block_handler != VM_BLOCK_HANDLER_NONE) {
+ vm_block_handler_escape(th, block_handler, &blockprocval);
+ VM_STACK_ENV_WRITE(ep, VM_ENV_DATA_INDEX_SPECVAL, blockprocval);
}
}
if (!RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
- local_size = 1 /* cref/me */;
+ local_size = VM_ENV_DATA_SIZE;
}
else {
- local_size = cfp->iseq->body->local_size;
+ local_size = cfp->iseq->body->local_table_size + VM_ENV_DATA_SIZE;
}
/*
@@ -643,15 +757,14 @@ vm_make_env_each(rb_thread_t *const th, rb_control_frame_t *const cfp)
*/
env_size = local_size +
- 1 /* specval */ +
1 /* envval */ +
(blockprocval ? 1 : 0) /* blockprocval */;
-
envval = TypedData_Wrap_Struct(rb_cEnv, &env_data_type, 0);
env = xmalloc(sizeof(rb_env_t) + (env_size - 1 /* rb_env_t::env[1] */) * sizeof(VALUE));
env->env_size = env_size;
- MEMCPY(env->env, ep - local_size, VALUE, local_size + 1 /* specval */);
+ /* setup env */
+ MEMCPY((VALUE *)env->env, ep - (local_size - 1 /* specval */), VALUE, local_size);
#if 0
for (i = 0; i < local_size; i++) {
@@ -665,25 +778,23 @@ vm_make_env_each(rb_thread_t *const th, rb_control_frame_t *const cfp)
/* be careful not to trigger GC after this */
RTYPEDDATA_DATA(envval) = env;
- /*
+ new_ep = &env->env[local_size - 1 /* specval */];
+ RB_OBJ_WRITE(envval, &new_ep[1], envval);
+ if (blockprocval) RB_OBJ_WRITE(envval, &new_ep[2], blockprocval);
+ VM_ENV_FLAGS_SET(new_ep, VM_ENV_FLAG_ESCAPED | VM_ENV_FLAG_WB_REQUIRED);
+
+ /*
* must happen after TypedData_Wrap_Struct to ensure penvval is markable
* in case object allocation triggers GC and clobbers penvval.
*/
- *ep = envval; /* GC mark */
-
- new_ep = &env->env[local_size];
- new_ep[1] = envval;
- if (blockprocval) new_ep[2] = blockprocval;
+ VM_STACK_ENV_WRITE(ep, 0, envval); /* GC mark */
- /* as Binding */
- env->block.self = cfp->self;
- env->block.ep = cfp->ep = new_ep;
- env->block.iseq = cfp->iseq;
- env->block.proc = 0;
+ /* setup env object */
+ env->ep = cfp->ep = new_ep;
+ env->iseq = cfp->iseq;
if (!RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
- /* TODO */
- env->block.iseq = 0;
+ env->iseq = NULL;
}
return envval;
@@ -714,13 +825,13 @@ rb_vm_stack_to_heap(rb_thread_t *th)
VALUE
rb_vm_env_prev_envval(const rb_env_t *env)
{
- const VALUE *ep = env->block.ep;
+ const VALUE *ep = env->ep;
- if (VM_EP_LEP_P(ep)) {
+ if (VM_ENV_LOCAL_P(ep)) {
return Qfalse;
}
else {
- return VM_ENV_EP_ENVVAL(VM_EP_PREV_EP(ep));
+ return VM_ENV_ENVVAL(VM_ENV_PREV_EP(ep));
}
}
@@ -740,7 +851,7 @@ collect_local_variables_in_env(const rb_env_t *env, const struct local_var_list
{
VALUE prev_envval;
- while (collect_local_variables_in_iseq(env->block.iseq, vars), (prev_envval = rb_vm_env_prev_envval(env)) != Qfalse) {
+ while (collect_local_variables_in_iseq(env->iseq, vars), (prev_envval = rb_vm_env_prev_envval(env)) != Qfalse) {
GetEnvPtr(prev_envval, env);
}
}
@@ -748,9 +859,9 @@ collect_local_variables_in_env(const rb_env_t *env, const struct local_var_list
static int
vm_collect_local_variables_in_heap(rb_thread_t *th, const VALUE *ep, const struct local_var_list *vars)
{
- if (VM_EP_IN_HEAP_P(th, ep)) {
+ if (VM_ENV_ESCAPED_P(ep)) {
rb_env_t *env;
- GetEnvPtr(VM_ENV_EP_ENVVAL(ep), env);
+ GetEnvPtr(VM_ENV_ENVVAL(ep), env);
collect_local_variables_in_env(env, vars);
return 1;
}
@@ -781,15 +892,24 @@ rb_iseq_local_variables(const rb_iseq_t *iseq)
/* Proc */
-static inline VALUE
-rb_proc_create(VALUE klass, const rb_block_t *block,
- int8_t safe_level, int8_t is_from_method, int8_t is_lambda)
+VALUE
+rb_proc_create_from_captured(VALUE klass,
+ const struct rb_captured_block *captured,
+ enum rb_block_type block_type,
+ int8_t safe_level, int8_t is_from_method, int8_t is_lambda)
{
VALUE procval = rb_proc_alloc(klass);
rb_proc_t *proc = RTYPEDDATA_DATA(procval);
- proc->block = *block;
- proc->block.proc = procval;
+ VM_ASSERT(VM_EP_IN_HEAP_P(GET_THREAD(), captured->ep));
+
+ /* copy block */
+ RB_OBJ_WRITE(procval, &proc->block.as.captured.self, captured->self);
+ RB_OBJ_WRITE(procval, &proc->block.as.captured.code.val, captured->code.val);
+ *((const VALUE **)&proc->block.as.captured.ep) = captured->ep;
+ RB_OBJ_WRITTEN(procval, Qundef, VM_ENV_ENVVAL(captured->ep));
+
+ vm_block_type_set(&proc->block, block_type);
proc->safe_level = safe_level;
proc->is_from_method = is_from_method;
proc->is_lambda = is_lambda;
@@ -798,40 +918,61 @@ rb_proc_create(VALUE klass, const rb_block_t *block,
}
VALUE
-rb_vm_make_proc(rb_thread_t *th, const rb_block_t *block, VALUE klass)
-{
- return rb_vm_make_proc_lambda(th, block, klass, 0);
-}
-
-VALUE
-rb_vm_make_proc_lambda(rb_thread_t *th, const rb_block_t *block, VALUE klass, int8_t is_lambda)
+rb_proc_create(VALUE klass, const struct rb_block *block,
+ int8_t safe_level, int8_t is_from_method, int8_t is_lambda)
{
- VALUE procval;
- rb_control_frame_t *cfp = RUBY_VM_GET_CFP_FROM_BLOCK_PTR(block);
-
- if (block->proc) {
- rb_bug("rb_vm_make_proc: Proc value is already created.");
- }
+ VALUE procval = rb_proc_alloc(klass);
+ rb_proc_t *proc = RTYPEDDATA_DATA(procval);
- vm_make_env_object(th, cfp);
- procval = rb_proc_create(klass, block, (int8_t)th->safe_level, 0, is_lambda);
+ VM_ASSERT(VM_EP_IN_HEAP_P(GET_THREAD(), vm_block_ep(block)));
- if (VMDEBUG) {
- if (th->stack < block->ep && block->ep < th->stack + th->stack_size) {
- rb_bug("invalid ptr: block->ep");
- }
+ /* copy block */
+ switch (vm_block_type(block)) {
+ case block_type_iseq:
+ case block_type_ifunc:
+ RB_OBJ_WRITE(procval, &proc->block.as.captured.self, block->as.captured.self);
+ RB_OBJ_WRITE(procval, &proc->block.as.captured.code.val, block->as.captured.code.val);
+ *((const VALUE **)&proc->block.as.captured.ep) = block->as.captured.ep;
+ RB_OBJ_WRITTEN(procval, Qundef, VM_ENV_ENVVAL(block->as.captured.ep));
+ break;
+ case block_type_symbol:
+ RB_OBJ_WRITE(procval, &proc->block.as.symbol, block->as.symbol);
+ break;
+ case block_type_proc:
+ RB_OBJ_WRITE(procval, &proc->block.as.proc, block->as.proc);
+ break;
}
+ vm_block_type_set(&proc->block, block->type);
+ proc->safe_level = safe_level;
+ proc->is_from_method = is_from_method;
+ proc->is_lambda = is_lambda;
return procval;
}
VALUE
-rb_vm_proc_envval(const rb_proc_t *proc)
+rb_vm_make_proc(rb_thread_t *th, const struct rb_captured_block *captured, VALUE klass)
{
- VALUE envval = VM_ENV_EP_ENVVAL(proc->block.ep);
- return envval;
+ return rb_vm_make_proc_lambda(th, captured, klass, FALSE);
}
+VALUE
+rb_vm_make_proc_lambda(rb_thread_t *th, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda)
+{
+ VALUE procval;
+
+ if (!VM_ENV_FLAGS(captured->ep, VM_ENV_FLAG_ESCAPED)) {
+ rb_control_frame_t *cfp = VM_CAPTURED_BLOCK_TO_CFP(captured);
+ vm_make_env_object(th, cfp);
+ }
+ VM_ASSERT(VM_EP_IN_HEAP_P(th, captured->ep));
+ VM_ASSERT(RB_TYPE_P(captured->code.val, T_IMEMO));
+
+ procval = rb_proc_create_from_captured(klass, captured,
+ imemo_type(captured->code.val) == imemo_iseq ? block_type_iseq : block_type_ifunc,
+ (int8_t)th->safe_level, FALSE, is_lambda);
+ return procval;
+}
/* Binding */
@@ -857,19 +998,22 @@ rb_vm_make_binding(rb_thread_t *th, const rb_control_frame_t *src_cfp)
bindval = rb_binding_alloc(rb_cBinding);
GetBindingPtr(bindval, bind);
- bind->env = envval;
+ vm_bind_update_env(bind, envval);
+ bind->block.as.captured.self = cfp->self;
+ bind->block.as.captured.code.iseq = cfp->iseq;
bind->path = ruby_level_cfp->iseq->body->location.path;
bind->first_lineno = rb_vm_get_sourceline(ruby_level_cfp);
return bindval;
}
-VALUE *
+const VALUE *
rb_binding_add_dynavars(rb_binding_t *bind, int dyncount, const ID *dynvars)
{
- VALUE envval = bind->env, path = bind->path;
+ VALUE envval;
+ VALUE path = bind->path;
+ const struct rb_block *base_block;
rb_env_t *env;
- rb_block_t *base_block;
rb_thread_t *th = GET_THREAD();
const rb_iseq_t *base_iseq, *iseq;
NODE *node = 0;
@@ -878,10 +1022,8 @@ rb_binding_add_dynavars(rb_binding_t *bind, int dyncount, const ID *dynvars)
if (dyncount < 0) return 0;
- GetEnvPtr(envval, env);
-
- base_block = &env->block;
- base_iseq = base_block->iseq;
+ base_block = &bind->block;
+ base_iseq = vm_block_iseq(base_block);
if (dyncount >= numberof(minibuf)) dyns = ALLOCV_N(ID, idtmp, dyncount + 1);
@@ -900,42 +1042,41 @@ rb_binding_add_dynavars(rb_binding_t *bind, int dyncount, const ID *dynvars)
ALLOCV_END(idtmp);
vm_set_eval_stack(th, iseq, 0, base_block);
- bind->env = vm_make_env_object(th, th->cfp);
+ vm_bind_update_env(bind, envval = vm_make_env_object(th, th->cfp));
rb_vm_pop_frame(th);
- GetEnvPtr(bind->env, env);
+ GetEnvPtr(envval, env);
return env->env;
}
/* C -> Ruby: block */
static inline VALUE
-invoke_block(rb_thread_t *th, const rb_iseq_t *iseq, VALUE self, const rb_block_t *block, const rb_cref_t *cref, int type, int opt_pc)
+invoke_block(rb_thread_t *th, const rb_iseq_t *iseq, VALUE self, const struct rb_captured_block *captured, const rb_cref_t *cref, VALUE type, int opt_pc)
{
int arg_size = iseq->body->param.size;
vm_push_frame(th, iseq, type | VM_FRAME_FLAG_FINISH, self,
- VM_ENVVAL_PREV_EP_PTR(block->ep),
+ VM_GUARDED_PREV_EP(captured->ep),
(VALUE)cref, /* cref or method */
iseq->body->iseq_encoded + opt_pc,
- th->cfp->sp + arg_size, iseq->body->local_size - arg_size,
+ th->cfp->sp + arg_size, iseq->body->local_table_size - arg_size,
iseq->body->stack_max);
-
return vm_exec(th);
}
static VALUE
-invoke_bmethod(rb_thread_t *th, const rb_iseq_t *iseq, VALUE self, const rb_block_t *block, const rb_callable_method_entry_t *me, int type, int opt_pc)
+invoke_bmethod(rb_thread_t *th, const rb_iseq_t *iseq, VALUE self, const struct rb_captured_block *captured, const rb_callable_method_entry_t *me, VALUE type, int opt_pc)
{
/* bmethod */
int arg_size = iseq->body->param.size;
VALUE ret;
vm_push_frame(th, iseq, type | VM_FRAME_FLAG_FINISH | VM_FRAME_FLAG_BMETHOD, self,
- VM_ENVVAL_PREV_EP_PTR(block->ep),
- (VALUE)me, /* cref or method (TODO: can we ignore cref?) */
+ VM_GUARDED_PREV_EP(captured->ep),
+ (VALUE)me,
iseq->body->iseq_encoded + opt_pc,
- th->cfp->sp + arg_size, iseq->body->local_size - arg_size,
+ th->cfp->sp + arg_size, iseq->body->local_table_size - arg_size,
iseq->body->stack_max);
RUBY_DTRACE_METHOD_ENTRY_HOOK(th, me->owner, me->called_id);
@@ -947,95 +1088,114 @@ invoke_bmethod(rb_thread_t *th, const rb_iseq_t *iseq, VALUE self, const rb_bloc
}
static inline VALUE
-invoke_block_from_c_0(rb_thread_t *th, const rb_block_t *block,
- VALUE self, int argc, const VALUE *argv, const rb_block_t *blockptr,
- const rb_cref_t *cref, const int splattable)
+invoke_iseq_block_from_c(rb_thread_t *th, const struct rb_captured_block *captured,
+ VALUE self, int argc, const VALUE *argv, VALUE passed_block_handler,
+ const rb_cref_t *cref, const int splattable, int is_lambda)
{
- if (UNLIKELY(!RTEST(block->iseq))) {
- return Qnil;
- }
- else if (LIKELY(RUBY_VM_NORMAL_ISEQ_P(block->iseq))) {
- const rb_iseq_t *iseq = rb_iseq_check(block->iseq);
- int i, opt_pc;
- int type = block_proc_is_lambda(block->proc) ? VM_FRAME_MAGIC_LAMBDA : VM_FRAME_MAGIC_BLOCK;
- VALUE *sp = th->cfp->sp;
- const rb_callable_method_entry_t *me = th->passed_bmethod_me;
-
- th->passed_bmethod_me = NULL;
-
- for (i=0; i<argc; i++) {
- sp[i] = argv[i];
- }
+ const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
+ int i, opt_pc;
+ VALUE type = is_lambda ? VM_FRAME_MAGIC_LAMBDA : VM_FRAME_MAGIC_BLOCK;
+ VALUE *sp = th->cfp->sp;
+ const rb_callable_method_entry_t *me = th->passed_bmethod_me;
+ th->passed_bmethod_me = NULL;
- opt_pc = vm_yield_setup_args(th, iseq, argc, sp, blockptr,
- (type == VM_FRAME_MAGIC_LAMBDA ? (splattable ? arg_setup_lambda : arg_setup_method) : arg_setup_block));
+ for (i=0; i<argc; i++) {
+ sp[i] = argv[i];
+ }
- if (me == NULL) {
- return invoke_block(th, iseq, self, block, cref, type, opt_pc);
- }
- else {
- return invoke_bmethod(th, iseq, self, block, me, type, opt_pc);
- }
+ opt_pc = vm_yield_setup_args(th, iseq, argc, sp, passed_block_handler,
+ (type == VM_FRAME_MAGIC_LAMBDA ? (splattable ? arg_setup_lambda : arg_setup_method) : arg_setup_block));
+ if (me == NULL) {
+ return invoke_block(th, iseq, self, captured, cref, type, opt_pc);
}
else {
- return vm_yield_with_cfunc(th, block, self, argc, argv, blockptr);
+ return invoke_bmethod(th, iseq, self, captured, me, type, opt_pc);
}
}
-static VALUE
-invoke_block_from_c_splattable(rb_thread_t *th, const rb_block_t *block,
- VALUE self, int argc, const VALUE *argv,
- const rb_block_t *blockptr, const rb_cref_t *cref)
-{
- return invoke_block_from_c_0(th, block, self, argc, argv, blockptr, cref, TRUE);
-}
-
-static VALUE
-invoke_block_from_c_unsplattable(rb_thread_t *th, const rb_block_t *block,
- VALUE self, int argc, const VALUE *argv,
- const rb_block_t *blockptr, const rb_cref_t *cref)
-{
- return invoke_block_from_c_0(th, block, self, argc, argv, blockptr, cref, FALSE);
+static inline VALUE
+invoke_block_from_c_splattable(rb_thread_t *th, VALUE block_handler,
+ int argc, const VALUE *argv,
+ VALUE passed_block_handler, const rb_cref_t *cref)
+{
+ int is_lambda = FALSE;
+ again:
+ switch (vm_block_handler_type(block_handler)) {
+ case block_handler_type_iseq:
+ {
+ const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
+ return invoke_iseq_block_from_c(th, captured, captured->self, argc, argv, passed_block_handler, cref, TRUE, is_lambda);
+ }
+ case block_handler_type_ifunc:
+ return vm_yield_with_cfunc(th, VM_BH_TO_IFUNC_BLOCK(block_handler), VM_BH_TO_IFUNC_BLOCK(block_handler)->self,
+ argc, argv, passed_block_handler);
+ case block_handler_type_symbol:
+ return vm_yield_with_symbol(th, VM_BH_TO_SYMBOL(block_handler), argc, argv, passed_block_handler);
+ case block_handler_type_proc:
+ is_lambda = block_proc_is_lambda(VM_BH_TO_PROC(block_handler));
+ block_handler = vm_proc_to_block_handler(VM_BH_TO_PROC(block_handler));
+ goto again;
+ }
+ VM_UNREACHABLE(invoke_block_from_c_splattable);
+ return Qundef;
}
-
-static inline const rb_block_t *
-check_block(rb_thread_t *th)
+static inline VALUE
+check_block_handler(rb_thread_t *th)
{
- const rb_block_t *blockptr = VM_CF_BLOCK_PTR(th->cfp);
-
- if (UNLIKELY(blockptr == 0)) {
+ VALUE block_handler = VM_CF_BLOCK_HANDLER(th->cfp);
+ VM_ASSERT(vm_block_handler_verify(block_handler));
+ if (UNLIKELY(block_handler == VM_BLOCK_HANDLER_NONE)) {
rb_vm_localjump_error("no block given", Qnil, 0);
}
- return blockptr;
+ return block_handler;
}
static VALUE
vm_yield_with_cref(rb_thread_t *th, int argc, const VALUE *argv, const rb_cref_t *cref)
{
- const rb_block_t *blockptr = check_block(th);
- return invoke_block_from_c_splattable(th, blockptr, blockptr->self, argc, argv, NULL, cref);
+ return invoke_block_from_c_splattable(th, check_block_handler(th), argc, argv, VM_BLOCK_HANDLER_NONE, cref);
}
static VALUE
vm_yield(rb_thread_t *th, int argc, const VALUE *argv)
{
- const rb_block_t *blockptr = check_block(th);
- return invoke_block_from_c_splattable(th, blockptr, blockptr->self, argc, argv, NULL, NULL);
+ return invoke_block_from_c_splattable(th, check_block_handler(th), argc, argv, VM_BLOCK_HANDLER_NONE, NULL);
}
static VALUE
-vm_yield_with_block(rb_thread_t *th, int argc, const VALUE *argv, const rb_block_t *blockargptr)
+vm_yield_with_block(rb_thread_t *th, int argc, const VALUE *argv, VALUE block_handler)
+{
+ return invoke_block_from_c_splattable(th, check_block_handler(th), argc, argv, block_handler, NULL);
+}
+
+static inline VALUE
+invoke_block_from_c_unsplattable(rb_thread_t *th, const struct rb_block *block,
+ VALUE self, int argc, const VALUE *argv,
+ VALUE passed_block_handler, int is_lambda)
{
- const rb_block_t *blockptr = check_block(th);
- return invoke_block_from_c_splattable(th, blockptr, blockptr->self, argc, argv, blockargptr, NULL);
+ again:
+ switch (vm_block_type(block)) {
+ case block_type_iseq:
+ return invoke_iseq_block_from_c(th, &block->as.captured, self, argc, argv, passed_block_handler, NULL, FALSE, is_lambda);
+ case block_type_ifunc:
+ return vm_yield_with_cfunc(th, &block->as.captured, self, argc, argv, passed_block_handler);
+ case block_type_symbol:
+ return vm_yield_with_symbol(th, block->as.symbol, argc, argv, passed_block_handler);
+ case block_type_proc:
+ is_lambda = block_proc_is_lambda(block->as.proc);
+ block = vm_proc_block(block->as.proc);
+ goto again;
+ }
+ VM_UNREACHABLE(invoke_block_from_c_unsplattable);
+ return Qundef;
}
static VALUE
vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, VALUE self,
- int argc, const VALUE *argv, const rb_block_t *blockptr)
+ int argc, const VALUE *argv, VALUE passed_block_handler)
{
VALUE val = Qundef;
int state;
@@ -1044,7 +1204,7 @@ vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, VALUE self,
TH_PUSH_TAG(th);
if ((state = EXEC_TAG()) == 0) {
th->safe_level = proc->safe_level;
- val = invoke_block_from_c_unsplattable(th, &proc->block, self, argc, argv, blockptr, NULL);
+ val = invoke_block_from_c_unsplattable(th, &proc->block, self, argc, argv, passed_block_handler, proc->is_lambda);
}
TH_POP_TAG();
@@ -1058,21 +1218,23 @@ vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, VALUE self,
static VALUE
vm_invoke_bmethod(rb_thread_t *th, rb_proc_t *proc, VALUE self,
- int argc, const VALUE *argv, const rb_block_t *blockptr)
+ int argc, const VALUE *argv, VALUE block_handler)
{
- return invoke_block_from_c_unsplattable(th, &proc->block, self, argc, argv, blockptr, NULL);
+ return invoke_block_from_c_unsplattable(th, &proc->block, self, argc, argv, block_handler, TRUE);
}
VALUE
rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc,
- int argc, const VALUE *argv, const rb_block_t *blockptr)
+ int argc, const VALUE *argv, VALUE passed_block_handler)
{
- VALUE self = proc->block.self;
+ VALUE self = vm_block_self(&proc->block);
+ VM_ASSERT(vm_block_handler_verify(passed_block_handler));
+
if (proc->is_from_method) {
- return vm_invoke_bmethod(th, proc, self, argc, argv, blockptr);
+ return vm_invoke_bmethod(th, proc, self, argc, argv, passed_block_handler);
}
else {
- return vm_invoke_proc(th, proc, self, argc, argv, blockptr);
+ return vm_invoke_proc(th, proc, self, argc, argv, passed_block_handler);
}
}
@@ -1352,12 +1514,21 @@ rb_vm_jump_tag_but_local_jump(int state)
NORETURN(static void vm_iter_break(rb_thread_t *th, VALUE val));
+static rb_control_frame_t *
+next_not_local_frame(rb_control_frame_t *cfp)
+{
+ while (VM_ENV_LOCAL_P(cfp->ep)) {
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
+ }
+ return cfp;
+}
+
static void
vm_iter_break(rb_thread_t *th, VALUE val)
{
- rb_control_frame_t *cfp = th->cfp;
- VALUE *ep = VM_CF_PREV_EP(cfp);
- rb_control_frame_t *target_cfp = rb_vm_search_cf_from_ep(th, cfp, ep);
+ rb_control_frame_t *cfp = next_not_local_frame(th->cfp);
+ const VALUE *ep = VM_CF_PREV_EP(cfp);
+ const rb_control_frame_t *target_cfp = rb_vm_search_cf_from_ep(th, cfp, ep);
#if 0 /* raise LocalJumpError */
if (!target_cfp) {
@@ -1557,85 +1728,67 @@ hook_before_rewind(rb_thread_t *th, rb_control_frame_t *cfp, int will_finish_vm_
struct CONTROL_FRAME {
VALUE *pc; // cfp[0], program counter
VALUE *sp; // cfp[1], stack pointer
- VALUE *bp; // cfp[2], base pointer
- rb_iseq_t *iseq; // cfp[3], iseq
- VALUE flag; // cfp[4], magic
- VALUE self; // cfp[5], self
- VALUE *ep; // cfp[6], env pointer
- rb_iseq_t * block_iseq; // cfp[7], block iseq
- VALUE proc; // cfp[8], always 0
+ rb_iseq_t *iseq; // cfp[2], iseq
+ VALUE self; // cfp[3], self
+ const VALUE *ep; // cfp[4], env pointer
+ const void *block_code; // cfp[5], blcok code
};
- struct BLOCK {
+ struct rb_captured_blcok {
VALUE self;
VALUE *ep;
- rb_iseq_t *block_iseq;
- VALUE proc;
- };
-
- struct METHOD_CONTROL_FRAME {
- rb_control_frame_t frame;
+ union code;
};
- struct METHOD_FRAME {
- VALUE arg0;
- ...
- VALUE argM;
+ struct METHOD_ENV {
VALUE param0;
...
VALUE paramN;
- VALUE cref;
- VALUE special; // lep [1]
- struct block_object *block_ptr | 0x01; // lep [0]
- };
-
- struct BLOCK_CONTROL_FRAME {
- rb_control_frame_t frame;
+ VALUE lvar1;
+ ...
+ VALUE lvarM;
+ VALUE cref; // ep[-2]
+ VALUE special; // ep[-1]
+ VALUE flags; // ep[ 0] == lep[0]
};
- struct BLOCK_FRAME {
- VALUE arg0;
+ struct BLOCK_ENV {
+ VALUE block_param0;
...
- VALUE argM;
- VALUE param0;
+ VALUE block_paramN;
+ VALUE block_lvar1;
...
- VALUE paramN;
- VALUE cref;
- VALUE *(prev_ptr | 0x01); // ep[0]
- };
-
- struct CLASS_CONTROL_FRAME {
- rb_control_frame_t frame;
+ VALUE block_lvarM;
+ VALUE cref; // ep[-2]
+ VALUE special; // ep[-1]
+ VALUE flags; // ep[ 0]
};
- struct CLASS_FRAME {
- VALUE param0;
+ struct CLASS_ENV {
+ VALUE class_lvar0;
...
- VALUE paramN;
+ VALUE class_lvarN;
VALUE cref;
VALUE prev_ep; // for frame jump
+ VALUE flags;
};
struct C_METHOD_CONTROL_FRAME {
VALUE *pc; // 0
VALUE *sp; // stack pointer
- VALUE *bp; // base pointer (used in exception)
rb_iseq_t *iseq; // cmi
- VALUE magic; // C_METHOD_FRAME
VALUE self; // ?
VALUE *ep; // ep == lep
- rb_iseq_t * block_iseq; //
- VALUE proc; // always 0
+ void *code; //
};
struct C_BLOCK_CONTROL_FRAME {
VALUE *pc; // point only "finish" insn
VALUE *sp; // sp
rb_iseq_t *iseq; // ?
- VALUE magic; // C_METHOD_FRAME
- VALUE self; // needed?
+ VALUE self; //
VALUE *ep; // ep
- rb_iseq_t * block_iseq; // 0
+ void *code; //
};
*/
@@ -1834,6 +1987,8 @@ vm_exec(rb_thread_t *th)
if (catch_iseq != NULL) { /* found catch table */
/* enter catch scope */
+ const int arg_size = 1;
+
rb_iseq_check(catch_iseq);
cfp->sp = vm_base_ptr(cfp) + cont_sp;
cfp->pc = cfp->iseq->body->iseq_encoded + cont_pc;
@@ -1842,11 +1997,11 @@ vm_exec(rb_thread_t *th)
cfp->sp[0] = (VALUE)err;
vm_push_frame(th, catch_iseq, VM_FRAME_MAGIC_RESCUE,
cfp->self,
- VM_ENVVAL_PREV_EP_PTR(cfp->ep),
+ VM_GUARDED_PREV_EP(cfp->ep),
0, /* cref or me */
catch_iseq->body->iseq_encoded,
- cfp->sp + 1 /* push value */,
- catch_iseq->body->local_size - 1,
+ cfp->sp + arg_size /* push value */,
+ catch_iseq->body->local_table_size - arg_size,
catch_iseq->body->stack_max);
state = 0;
@@ -1951,17 +2106,17 @@ rb_thread_current_status(const rb_thread_t *th)
VALUE
rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg,
- const rb_block_t *blockptr, VALUE filename)
+ VALUE block_handler, VALUE filename)
{
rb_thread_t *th = GET_THREAD();
const rb_control_frame_t *reg_cfp = th->cfp;
const rb_iseq_t *iseq = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
VALUE val;
- vm_push_frame(th, iseq, VM_FRAME_MAGIC_TOP | VM_FRAME_FLAG_FINISH,
- recv, VM_ENVVAL_BLOCK_PTR(blockptr),
+ vm_push_frame(th, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH,
+ recv, block_handler,
(VALUE)vm_cref_new_toplevel(th), /* cref or me */
- 0, reg_cfp->sp, 1, 0);
+ 0, reg_cfp->sp, 0, 0);
val = (*func)(arg);
@@ -2237,9 +2392,14 @@ rb_thread_mark(void *ptr)
rb_gc_mark_values((long)(sp - p), p);
while (cfp != limit_cfp) {
- rb_gc_mark(cfp->proc);
+#if VM_CHECK_MODE > 0
+ const VALUE *ep = cfp->ep;
+ VM_ASSERT(!!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) == vm_ep_in_heap_p_(th, ep));
+#endif
rb_gc_mark(cfp->self);
rb_gc_mark((VALUE)cfp->iseq);
+ rb_gc_mark((VALUE)cfp->block_code);
+
cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
}
}
@@ -2387,10 +2547,10 @@ th_init(rb_thread_t *th, VALUE self)
th->cfp = (void *)(th->stack + th->stack_size);
- vm_push_frame(th, 0 /* dummy iseq */, VM_FRAME_MAGIC_DUMMY | VM_FRAME_FLAG_FINISH /* dummy frame */,
- Qnil /* dummy self */, VM_ENVVAL_BLOCK_PTR(0) /* dummy block ptr */,
+ vm_push_frame(th, 0 /* dummy iseq */, VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH /* dummy frame */,
+ Qnil /* dummy self */, VM_BLOCK_HANDLER_NONE /* dummy block ptr */,
0 /* dummy cref/me */,
- 0 /* dummy pc */, th->stack, 1, 0);
+ 0 /* dummy pc */, th->stack, 0, 0);
th->status = THREAD_RUNNABLE;
th->errinfo = Qnil;
@@ -2942,7 +3102,7 @@ Init_VM(void)
th->cfp->pc = iseq->body->iseq_encoded;
th->cfp->self = th->top_self;
- th->cfp->ep[-1] = (VALUE)vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE, FALSE, NULL, FALSE);
+ VM_STACK_ENV_WRITE(th->cfp->ep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE, FALSE, NULL, FALSE));
/*
* The Binding of the top level scope