diff options
author | normal <normal@b2dd03c8-39d4-4d8f-98ff-823fe69b080e> | 2017-05-09 05:06:41 +0000 |
---|---|---|
committer | normal <normal@b2dd03c8-39d4-4d8f-98ff-823fe69b080e> | 2017-05-09 05:06:41 +0000 |
commit | 9d09240d9e329bbe5a8168dd850f5bb3549fbaa9 (patch) | |
tree | e22cadfa88ec235f9630197c0c3a56ce6cbe4850 /vm.c | |
parent | 62b885b0900fa8949d794fccce44787e5669f16e (diff) |
rb_execution_context_t: move stack, stack_size and cfp from rb_thread_t
The goal is to reduce rb_context_t and rb_fiber_t size
by removing the need to store the entire rb_thread_t in
there.
[ruby-core:81045] Work-in-progress: soon, we will move more fields here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58614 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
Diffstat (limited to 'vm.c')
-rw-r--r-- | vm.c | 158 |
1 files changed, 81 insertions, 77 deletions
@@ -88,8 +88,8 @@ rb_vm_frame_block_handler(const rb_control_frame_t *cfp) static int VM_CFP_IN_HEAP_P(const rb_thread_t *th, const rb_control_frame_t *cfp) { - const VALUE *start = th->stack; - const VALUE *end = (VALUE *)th->stack + th->stack_size; + const VALUE *start = th->ec.stack; + const VALUE *end = (VALUE *)th->ec.stack + th->ec.stack_size; if (start <= (VALUE *)cfp && (VALUE *)cfp < end) { return FALSE; } @@ -101,8 +101,8 @@ VM_CFP_IN_HEAP_P(const rb_thread_t *th, const rb_control_frame_t *cfp) static int VM_EP_IN_HEAP_P(const rb_thread_t *th, const VALUE *ep) { - const VALUE *start = th->stack; - const VALUE *end = (VALUE *)th->cfp; + const VALUE *start = th->ec.stack; + const VALUE *end = (VALUE *)th->ec.cfp; if (start <= ep && ep < end) { return FALSE; } @@ -439,7 +439,8 @@ vm_set_top_stack(rb_thread_t *th, const rb_iseq_t *iseq) vm_push_frame(th, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, th->top_self, VM_BLOCK_HANDLER_NONE, (VALUE)vm_cref_new_toplevel(th), /* cref or me */ - iseq->body->iseq_encoded, th->cfp->sp, iseq->body->local_table_size, iseq->body->stack_max); + iseq->body->iseq_encoded, th->ec.cfp->sp, + iseq->body->local_table_size, iseq->body->stack_max); } static void @@ -449,7 +450,8 @@ vm_set_eval_stack(rb_thread_t * th, const rb_iseq_t *iseq, const rb_cref_t *cref vm_block_self(base_block), VM_GUARDED_PREV_EP(vm_block_ep(base_block)), (VALUE)cref, /* cref or me */ iseq->body->iseq_encoded, - th->cfp->sp, iseq->body->local_table_size, iseq->body->stack_max); + th->ec.cfp->sp, iseq->body->local_table_size, + iseq->body->stack_max); } static void @@ -465,7 +467,7 @@ vm_set_main_stack(rb_thread_t *th, const rb_iseq_t *iseq) /* save binding */ if (iseq->body->local_table_size > 0) { - vm_bind_update_env(bind, vm_make_env_object(th, th->cfp)); + vm_bind_update_env(bind, vm_make_env_object(th, th->ec.cfp)); } } @@ -519,7 +521,7 @@ void rb_vm_pop_cfunc_frame(void) { rb_thread_t *th = GET_THREAD(); - rb_control_frame_t *cfp = th->cfp; + rb_control_frame_t *cfp = th->ec.cfp; const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(cfp); EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, cfp->self, me->def->original_id, me->called_id, me->owner, Qnil); @@ -531,11 +533,11 @@ void rb_vm_rewind_cfp(rb_thread_t *th, rb_control_frame_t *cfp) { /* check skipped frame */ - while (th->cfp != cfp) { + while (th->ec.cfp != cfp) { #if VMDEBUG - printf("skipped frame: %s\n", vm_frametype_name(th->cfp)); + printf("skipped frame: %s\n", vm_frametype_name(th->ec.cfp)); #endif - if (VM_FRAME_TYPE(th->cfp) != VM_FRAME_MAGIC_CFUNC) { + if (VM_FRAME_TYPE(th->ec.cfp) != VM_FRAME_MAGIC_CFUNC) { rb_vm_pop_frame(th); } else { /* unlikely path */ @@ -717,7 +719,7 @@ vm_make_env_object(rb_thread_t *th, rb_control_frame_t *cfp) void rb_vm_stack_to_heap(rb_thread_t *th) { - rb_control_frame_t *cfp = th->cfp; + rb_control_frame_t *cfp = th->ec.cfp; while ((cfp = rb_vm_get_binding_creatable_next_cfp(th, cfp)) != 0) { vm_make_env_object(th, cfp); cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); @@ -940,7 +942,7 @@ rb_binding_add_dynavars(rb_binding_t *bind, int dyncount, const ID *dynvars) ALLOCV_END(idtmp); vm_set_eval_stack(th, iseq, 0, base_block); - vm_bind_update_env(bind, envval = vm_make_env_object(th, th->cfp)); + vm_bind_update_env(bind, envval = vm_make_env_object(th, th->ec.cfp)); rb_vm_pop_frame(th); env = (const rb_env_t *)envval; @@ -958,7 +960,8 @@ invoke_block(rb_thread_t *th, const rb_iseq_t *iseq, VALUE self, const struct rb VM_GUARDED_PREV_EP(captured->ep), (VALUE)cref, /* cref or method */ iseq->body->iseq_encoded + opt_pc, - th->cfp->sp + arg_size, iseq->body->local_table_size - arg_size, + th->ec.cfp->sp + arg_size, + iseq->body->local_table_size - arg_size, iseq->body->stack_max); return vm_exec(th); } @@ -974,7 +977,8 @@ invoke_bmethod(rb_thread_t *th, const rb_iseq_t *iseq, VALUE self, const struct VM_GUARDED_PREV_EP(captured->ep), (VALUE)me, iseq->body->iseq_encoded + opt_pc, - th->cfp->sp + arg_size, iseq->body->local_table_size - arg_size, + th->ec.cfp->sp + arg_size, + iseq->body->local_table_size - arg_size, iseq->body->stack_max); RUBY_DTRACE_METHOD_ENTRY_HOOK(th, me->owner, me->def->original_id); @@ -993,7 +997,7 @@ invoke_iseq_block_from_c(rb_thread_t *th, const struct rb_captured_block *captur const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq); int i, opt_pc; VALUE type = is_lambda ? VM_FRAME_MAGIC_LAMBDA : VM_FRAME_MAGIC_BLOCK; - rb_control_frame_t *cfp = th->cfp; + rb_control_frame_t *cfp = th->ec.cfp; VALUE *sp = cfp->sp; const rb_callable_method_entry_t *me = th->passed_bmethod_me; th->passed_bmethod_me = NULL; @@ -1049,7 +1053,7 @@ invoke_block_from_c_splattable(rb_thread_t *th, VALUE block_handler, static inline VALUE check_block_handler(rb_thread_t *th) { - VALUE block_handler = VM_CF_BLOCK_HANDLER(th->cfp); + VALUE block_handler = VM_CF_BLOCK_HANDLER(th->ec.cfp); VM_ASSERT(vm_block_handler_verify(block_handler)); if (UNLIKELY(block_handler == VM_BLOCK_HANDLER_NONE)) { rb_vm_localjump_error("no block given", Qnil, 0); @@ -1187,14 +1191,14 @@ static VALUE vm_svar_get(VALUE key) { rb_thread_t *th = GET_THREAD(); - return vm_cfp_svar_get(th, th->cfp, key); + return vm_cfp_svar_get(th, th->ec.cfp, key); } static void vm_svar_set(VALUE key, VALUE val) { rb_thread_t *th = GET_THREAD(); - vm_cfp_svar_set(th, th->cfp, key, val); + vm_cfp_svar_set(th, th->ec.cfp, key, val); } VALUE @@ -1227,7 +1231,7 @@ VALUE rb_sourcefilename(void) { rb_thread_t *th = GET_THREAD(); - rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp); + rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp); if (cfp) { return cfp->iseq->body->location.path; @@ -1241,7 +1245,7 @@ const char * rb_sourcefile(void) { rb_thread_t *th = GET_THREAD(); - rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp); + rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp); if (cfp) { return RSTRING_PTR(cfp->iseq->body->location.path); @@ -1255,7 +1259,7 @@ int rb_sourceline(void) { rb_thread_t *th = GET_THREAD(); - rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp); + rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp); if (cfp) { return rb_vm_get_sourceline(cfp); @@ -1269,7 +1273,7 @@ VALUE rb_source_location(int *pline) { rb_thread_t *th = GET_THREAD(); - rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp); + rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp); if (cfp) { if (pline) *pline = rb_vm_get_sourceline(cfp); @@ -1293,7 +1297,7 @@ rb_cref_t * rb_vm_cref(void) { rb_thread_t *th = GET_THREAD(); - rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp); + rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp); if (cfp == NULL) { return NULL; @@ -1306,7 +1310,7 @@ rb_cref_t * rb_vm_cref_replace_with_duplicated_cref(void) { rb_thread_t *th = GET_THREAD(); - rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp); + rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp); rb_cref_t *cref = vm_cref_replace_with_duplicated_cref(cfp->ep); return cref; } @@ -1315,7 +1319,7 @@ const rb_cref_t * rb_vm_cref_in_context(VALUE self, VALUE cbase) { rb_thread_t *th = GET_THREAD(); - const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp); + const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp); const rb_cref_t *cref; if (cfp->self != self) return NULL; if (!vm_env_cref_by_cref(cfp->ep)) return NULL; @@ -1340,7 +1344,7 @@ VALUE rb_vm_cbase(void) { rb_thread_t *th = GET_THREAD(); - rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp); + rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp); if (cfp == 0) { rb_raise(rb_eRuntimeError, "Can't call on top of Fiber or Thread"); @@ -1443,7 +1447,7 @@ next_not_local_frame(rb_control_frame_t *cfp) static void vm_iter_break(rb_thread_t *th, VALUE val) { - rb_control_frame_t *cfp = next_not_local_frame(th->cfp); + rb_control_frame_t *cfp = next_not_local_frame(th->ec.cfp); const VALUE *ep = VM_CF_PREV_EP(cfp); const rb_control_frame_t *target_cfp = rb_vm_search_cf_from_ep(th, cfp, ep); @@ -1642,34 +1646,34 @@ hook_before_rewind(rb_thread_t *th, const rb_control_frame_t *cfp, int will_fini if (state == TAG_RAISE && RBASIC_CLASS(err) == rb_eSysStackError) { return; } - switch (VM_FRAME_TYPE(th->cfp)) { + switch (VM_FRAME_TYPE(th->ec.cfp)) { case VM_FRAME_MAGIC_METHOD: RUBY_DTRACE_METHOD_RETURN_HOOK(th, 0, 0); - EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_RETURN, th->cfp->self, 0, 0, 0, frame_return_value(err)); + EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_RETURN, th->ec.cfp->self, 0, 0, 0, frame_return_value(err)); THROW_DATA_CONSUMED_SET(err); break; case VM_FRAME_MAGIC_BLOCK: case VM_FRAME_MAGIC_LAMBDA: - if (VM_FRAME_BMETHOD_P(th->cfp)) { - EXEC_EVENT_HOOK(th, RUBY_EVENT_B_RETURN, th->cfp->self, 0, 0, 0, frame_return_value(err)); + if (VM_FRAME_BMETHOD_P(th->ec.cfp)) { + EXEC_EVENT_HOOK(th, RUBY_EVENT_B_RETURN, th->ec.cfp->self, 0, 0, 0, frame_return_value(err)); if (!will_finish_vm_exec) { /* kick RUBY_EVENT_RETURN at invoke_block_from_c() for bmethod */ - EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_RETURN, th->cfp->self, - rb_vm_frame_method_entry(th->cfp)->def->original_id, - rb_vm_frame_method_entry(th->cfp)->called_id, - rb_vm_frame_method_entry(th->cfp)->owner, + EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_RETURN, th->ec.cfp->self, + rb_vm_frame_method_entry(th->ec.cfp)->def->original_id, + rb_vm_frame_method_entry(th->ec.cfp)->called_id, + rb_vm_frame_method_entry(th->ec.cfp)->owner, frame_return_value(err)); } THROW_DATA_CONSUMED_SET(err); } else { - EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_B_RETURN, th->cfp->self, 0, 0, 0, frame_return_value(err)); + EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_B_RETURN, th->ec.cfp->self, 0, 0, 0, frame_return_value(err)); THROW_DATA_CONSUMED_SET(err); } break; case VM_FRAME_MAGIC_CLASS: - EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_END, th->cfp->self, 0, 0, 0, Qnil); + EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_END, th->ec.cfp->self, 0, 0, 0, Qnil); break; } } @@ -1789,20 +1793,20 @@ vm_exec(rb_thread_t *th) cont_pc = cont_sp = 0; catch_iseq = NULL; - while (th->cfp->pc == 0 || th->cfp->iseq == 0) { - if (UNLIKELY(VM_FRAME_TYPE(th->cfp) == VM_FRAME_MAGIC_CFUNC)) { - EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, th->cfp->self, - rb_vm_frame_method_entry(th->cfp)->def->original_id, - rb_vm_frame_method_entry(th->cfp)->called_id, - rb_vm_frame_method_entry(th->cfp)->owner, Qnil); + while (th->ec.cfp->pc == 0 || th->ec.cfp->iseq == 0) { + if (UNLIKELY(VM_FRAME_TYPE(th->ec.cfp) == VM_FRAME_MAGIC_CFUNC)) { + EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, th->ec.cfp->self, + rb_vm_frame_method_entry(th->ec.cfp)->def->original_id, + rb_vm_frame_method_entry(th->ec.cfp)->called_id, + rb_vm_frame_method_entry(th->ec.cfp)->owner, Qnil); RUBY_DTRACE_CMETHOD_RETURN_HOOK(th, - rb_vm_frame_method_entry(th->cfp)->owner, - rb_vm_frame_method_entry(th->cfp)->def->original_id); + rb_vm_frame_method_entry(th->ec.cfp)->owner, + rb_vm_frame_method_entry(th->ec.cfp)->def->original_id); } rb_vm_pop_frame(th); } - cfp = th->cfp; + cfp = th->ec.cfp; epc = cfp->pc - cfp->iseq->body->iseq_encoded; escape_cfp = NULL; @@ -1832,7 +1836,7 @@ vm_exec(rb_thread_t *th) th->errinfo = Qnil; result = THROW_DATA_VAL(err); THROW_DATA_CATCH_FRAME_SET(err, cfp + 1); - hook_before_rewind(th, th->cfp, TRUE, state, err); + hook_before_rewind(th, th->ec.cfp, TRUE, state, err); rb_vm_pop_frame(th); goto finish_vme; } @@ -1844,7 +1848,7 @@ vm_exec(rb_thread_t *th) #if OPT_STACK_CACHING initial = THROW_DATA_VAL(err); #else - *th->cfp->sp++ = THROW_DATA_VAL(err); + *th->ec.cfp->sp++ = THROW_DATA_VAL(err); #endif th->errinfo = Qnil; goto vm_loop_start; @@ -1915,7 +1919,7 @@ vm_exec(rb_thread_t *th) #if OPT_STACK_CACHING initial = THROW_DATA_VAL(err); #else - *th->cfp->sp++ = THROW_DATA_VAL(err); + *th->ec.cfp->sp++ = THROW_DATA_VAL(err); #endif } th->errinfo = Qnil; @@ -1974,9 +1978,9 @@ vm_exec(rb_thread_t *th) goto vm_loop_start; } else { - hook_before_rewind(th, th->cfp, FALSE, state, err); + hook_before_rewind(th, th->ec.cfp, FALSE, state, err); - if (VM_FRAME_FINISHED_P(th->cfp)) { + if (VM_FRAME_FINISHED_P(th->ec.cfp)) { rb_vm_pop_frame(th); th->errinfo = (VALUE)err; TH_TMPPOP_TAG(); @@ -2035,7 +2039,7 @@ rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *cal int rb_thread_method_id_and_class(rb_thread_t *th, ID *idp, ID *called_idp, VALUE *klassp) { - return rb_vm_control_frame_id_and_class(th->cfp, idp, called_idp, klassp); + return rb_vm_control_frame_id_and_class(th->ec.cfp, idp, called_idp, klassp); } int @@ -2047,7 +2051,7 @@ rb_frame_method_id_and_class(ID *idp, VALUE *klassp) VALUE rb_thread_current_status(const rb_thread_t *th) { - const rb_control_frame_t *cfp = th->cfp; + const rb_control_frame_t *cfp = th->ec.cfp; const rb_callable_method_entry_t *me; VALUE str = Qnil; @@ -2073,7 +2077,7 @@ rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg, VALUE block_handler, VALUE filename) { rb_thread_t *th = GET_THREAD(); - const rb_control_frame_t *reg_cfp = th->cfp; + const rb_control_frame_t *reg_cfp = th->ec.cfp; const rb_iseq_t *iseq = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP); VALUE val; @@ -2348,11 +2352,11 @@ rb_thread_mark(void *ptr) rb_thread_t *th = ptr; RUBY_MARK_ENTER("thread"); - if (th->stack) { - VALUE *p = th->stack; - VALUE *sp = th->cfp->sp; - rb_control_frame_t *cfp = th->cfp; - rb_control_frame_t *limit_cfp = (void *)(th->stack + th->stack_size); + if (th->ec.stack) { + VALUE *p = th->ec.stack; + VALUE *sp = th->ec.cfp->sp; + rb_control_frame_t *cfp = th->ec.cfp; + rb_control_frame_t *limit_cfp = (void *)(th->ec.stack + th->ec.stack_size); rb_gc_mark_values((long)(sp - p), p); @@ -2413,7 +2417,7 @@ thread_free(void *ptr) RUBY_FREE_ENTER("thread"); if (!th->root_fiber) { - RUBY_FREE_UNLESS_NULL(th->stack); + RUBY_FREE_UNLESS_NULL(th->ec.stack); } if (th->locking_mutex != Qfalse) { @@ -2451,7 +2455,7 @@ thread_memsize(const void *ptr) size_t size = sizeof(rb_thread_t); if (!th->root_fiber) { - size += th->stack_size * sizeof(VALUE); + size += th->ec.stack_size * sizeof(VALUE); } if (th->local_storage) { size += st_memsize(th->local_storage); @@ -2501,18 +2505,18 @@ th_init(rb_thread_t *th, VALUE self) /* altstack of main thread is reallocated in another place */ th->altstack = malloc(rb_sigaltstack_size()); #endif - /* th->stack_size is word number. + /* th->ec.stack_size is word number. * th->vm->default_params.thread_vm_stack_size is byte size. */ - th->stack_size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE); - th->stack = thread_recycle_stack(th->stack_size); + th->ec.stack_size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE); + th->ec.stack = thread_recycle_stack(th->ec.stack_size); - th->cfp = (void *)(th->stack + th->stack_size); + th->ec.cfp = (void *)(th->ec.stack + th->ec.stack_size); vm_push_frame(th, 0 /* dummy iseq */, VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH | VM_FRAME_FLAG_CFRAME /* dummy frame */, Qnil /* dummy self */, VM_BLOCK_HANDLER_NONE /* dummy block ptr */, 0 /* dummy cref/me */, - 0 /* dummy pc */, th->stack, 0, 0); + 0 /* dummy pc */, th->ec.stack, 0, 0); th->status = THREAD_RUNNABLE; th->errinfo = Qnil; @@ -2586,11 +2590,11 @@ vm_define_method(rb_thread_t *th, VALUE obj, ID id, VALUE iseqval, int is_single #define REWIND_CFP(expr) do { \ rb_thread_t *th__ = GET_THREAD(); \ - VALUE *const curr_sp = (th__->cfp++)->sp; \ - VALUE *const saved_sp = th__->cfp->sp; \ - th__->cfp->sp = curr_sp; \ + VALUE *const curr_sp = (th__->ec.cfp++)->sp; \ + VALUE *const saved_sp = th__->ec.cfp->sp; \ + th__->ec.cfp->sp = curr_sp; \ expr; \ - (th__->cfp--)->sp = saved_sp; \ + (th__->ec.cfp--)->sp = saved_sp; \ } while (0) static VALUE @@ -3057,12 +3061,12 @@ Init_VM(void) rb_vm_living_threads_insert(vm, th); rb_gc_register_mark_object((VALUE)iseq); - th->cfp->iseq = iseq; - th->cfp->pc = iseq->body->iseq_encoded; - th->cfp->self = th->top_self; + th->ec.cfp->iseq = iseq; + th->ec.cfp->pc = iseq->body->iseq_encoded; + th->ec.cfp->self = th->top_self; - VM_ENV_FLAGS_UNSET(th->cfp->ep, VM_FRAME_FLAG_CFRAME); - VM_STACK_ENV_WRITE(th->cfp->ep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE, FALSE, NULL, FALSE)); + VM_ENV_FLAGS_UNSET(th->ec.cfp->ep, VM_FRAME_FLAG_CFRAME); + VM_STACK_ENV_WRITE(th->ec.cfp->ep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE, FALSE, NULL, FALSE)); /* * The Binding of the top level scope @@ -3080,7 +3084,7 @@ void rb_vm_set_progname(VALUE filename) { rb_thread_t *th = GET_VM()->main_thread; - rb_control_frame_t *cfp = (void *)(th->stack + th->stack_size); + rb_control_frame_t *cfp = (void *)(th->ec.stack + th->ec.stack_size); --cfp; RB_OBJ_WRITE(cfp->iseq, &cfp->iseq->body->location.path, filename); } |