diff options
Diffstat (limited to 'vm.c')
-rw-r--r-- | vm.c | 2351 |
1 files changed, 1401 insertions, 950 deletions
@@ -11,23 +11,28 @@ #define vm_exec rb_vm_exec #include "eval_intern.h" -#include "gc.h" #include "internal.h" +#include "internal/class.h" #include "internal/compile.h" #include "internal/cont.h" #include "internal/error.h" +#include "internal/encoding.h" #include "internal/eval.h" +#include "internal/gc.h" #include "internal/inits.h" +#include "internal/missing.h" #include "internal/object.h" -#include "internal/parse.h" #include "internal/proc.h" #include "internal/re.h" +#include "internal/ruby_parser.h" #include "internal/symbol.h" #include "internal/thread.h" +#include "internal/transcode.h" #include "internal/vm.h" #include "internal/sanitizers.h" +#include "internal/variable.h" #include "iseq.h" -#include "mjit.h" +#include "rjit.h" #include "yjit.h" #include "ruby/st.h" #include "ruby/vm.h" @@ -38,39 +43,31 @@ #include "vm_insnhelper.h" #include "ractor_core.h" #include "vm_sync.h" +#include "shape.h" #include "builtin.h" -#ifndef MJIT_HEADER #include "probes.h" -#else -#include "probes.dmyh" -#endif #include "probes_helper.h" #ifdef RUBY_ASSERT_CRITICAL_SECTION int ruby_assert_critical_section_entered = 0; #endif +static void *native_main_thread_stack_top; + VALUE rb_str_concat_literals(size_t, const VALUE*); -/* :FIXME: This #ifdef is because we build pch in case of mswin and - * not in case of other situations. That distinction might change in - * a future. We would better make it detectable in something better - * than just _MSC_VER. */ -#ifdef _MSC_VER -RUBY_FUNC_EXPORTED -#else -MJIT_FUNC_EXPORTED -#endif -VALUE vm_exec(rb_execution_context_t *, bool); +VALUE vm_exec(rb_execution_context_t *); + +extern const char *const rb_debug_counter_names[]; PUREFUNC(static inline const VALUE *VM_EP_LEP(const VALUE *)); static inline const VALUE * VM_EP_LEP(const VALUE *ep) { while (!VM_ENV_LOCAL_P(ep)) { - ep = VM_ENV_PREV_EP(ep); + ep = VM_ENV_PREV_EP(ep); } return ep; } @@ -79,19 +76,19 @@ static inline const rb_control_frame_t * rb_vm_search_cf_from_ep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE * const ep) { if (!ep) { - return NULL; + return NULL; } else { - const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */ + const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */ - while (cfp < eocfp) { - if (cfp->ep == ep) { - return cfp; - } - cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); - } + while (cfp < eocfp) { + if (cfp->ep == ep) { + return cfp; + } + cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); + } - return NULL; + return NULL; } } @@ -143,10 +140,10 @@ VM_CFP_IN_HEAP_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp VM_ASSERT(start != NULL); if (start <= (VALUE *)cfp && (VALUE *)cfp < end) { - return FALSE; + return FALSE; } else { - return TRUE; + return TRUE; } } @@ -158,10 +155,10 @@ VM_EP_IN_HEAP_P(const rb_execution_context_t *ec, const VALUE *ep) VM_ASSERT(start != NULL); if (start <= ep && ep < end) { - return FALSE; + return FALSE; } else { - return TRUE; + return TRUE; } } @@ -169,19 +166,19 @@ static int vm_ep_in_heap_p_(const rb_execution_context_t *ec, const VALUE *ep) { if (VM_EP_IN_HEAP_P(ec, ep)) { - VALUE envval = ep[VM_ENV_DATA_INDEX_ENV]; /* VM_ENV_ENVVAL(ep); */ + VALUE envval = ep[VM_ENV_DATA_INDEX_ENV]; /* VM_ENV_ENVVAL(ep); */ - if (envval != Qundef) { - const rb_env_t *env = (const rb_env_t *)envval; + if (!UNDEF_P(envval)) { + const rb_env_t *env = (const rb_env_t *)envval; - VM_ASSERT(vm_assert_env(envval)); - VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED)); - VM_ASSERT(env->ep == ep); - } - return TRUE; + VM_ASSERT(vm_assert_env(envval)); + VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED)); + VM_ASSERT(env->ep == ep); + } + return TRUE; } else { - return FALSE; + return FALSE; } } @@ -206,7 +203,7 @@ VM_CAPTURED_BLOCK_TO_CFP(const struct rb_captured_block *captured) { rb_control_frame_t *cfp = ((rb_control_frame_t *)((VALUE *)(captured) - 3)); VM_ASSERT(!VM_CFP_IN_HEAP_P(GET_EC(), cfp)); - VM_ASSERT(sizeof(rb_control_frame_t)/sizeof(VALUE) == 8 + VM_DEBUG_BP_CHECK ? 1 : 0); + VM_ASSERT(sizeof(rb_control_frame_t)/sizeof(VALUE) == 7 + VM_DEBUG_BP_CHECK ? 1 : 0); return cfp; } @@ -231,12 +228,11 @@ vm_cref_new0(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_ { VALUE refinements = Qnil; int omod_shared = FALSE; - rb_cref_t *cref; /* scope */ union { - rb_scope_visibility_t visi; - VALUE value; + rb_scope_visibility_t visi; + VALUE value; } scope_visi; scope_visi.visi.method_visi = visi; @@ -244,17 +240,20 @@ vm_cref_new0(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_ /* refinements */ if (prev_cref != NULL && prev_cref != (void *)1 /* TODO: why CREF_NEXT(cref) is 1? */) { - refinements = CREF_REFINEMENTS(prev_cref); + refinements = CREF_REFINEMENTS(prev_cref); - if (!NIL_P(refinements)) { - omod_shared = TRUE; - CREF_OMOD_SHARED_SET(prev_cref); - } + if (!NIL_P(refinements)) { + omod_shared = TRUE; + CREF_OMOD_SHARED_SET(prev_cref); + } } VM_ASSERT(singleton || klass); - cref = (rb_cref_t *)rb_imemo_new(imemo_cref, klass, (VALUE)(use_prev_prev ? CREF_NEXT(prev_cref) : prev_cref), scope_visi.value, refinements); + rb_cref_t *cref = IMEMO_NEW(rb_cref_t, imemo_cref, refinements); + cref->klass_or_self = klass; + cref->next = use_prev_prev ? CREF_NEXT(prev_cref) : prev_cref; + *((rb_scope_visibility_t *)&cref->scope_visi) = scope_visi.visi; if (pushed_by_eval) CREF_PUSHED_BY_EVAL_SET(cref); if (omod_shared) CREF_OMOD_SHARED_SET(cref); @@ -327,7 +326,7 @@ vm_cref_new_toplevel(rb_execution_context_t *ec) VALUE top_wrapper = rb_ec_thread_ptr(ec)->top_wrapper; if (top_wrapper) { - cref = vm_cref_new(top_wrapper, METHOD_VISI_PRIVATE, FALSE, cref, FALSE, FALSE); + cref = vm_cref_new(top_wrapper, METHOD_VISI_PRIVATE, FALSE, cref, FALSE, FALSE); } return cref; @@ -345,8 +344,8 @@ vm_cref_dump(const char *mesg, const rb_cref_t *cref) ruby_debug_printf("vm_cref_dump: %s (%p)\n", mesg, (void *)cref); while (cref) { - ruby_debug_printf("= cref| klass: %s\n", RSTRING_PTR(rb_class_path(CREF_CLASS(cref)))); - cref = CREF_NEXT(cref); + ruby_debug_printf("= cref| klass: %s\n", RSTRING_PTR(rb_class_path(CREF_CLASS(cref)))); + cref = CREF_NEXT(cref); } } @@ -377,32 +376,162 @@ extern VALUE rb_vm_invoke_bmethod(rb_execution_context_t *ec, rb_proc_t *proc, V const rb_callable_method_entry_t *me); static VALUE vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler); -#include "vm_insnhelper.c" +#if USE_YJIT +// Counter to serve as a proxy for execution time, total number of calls +static uint64_t yjit_total_entry_hits = 0; + +// Number of calls used to estimate how hot an ISEQ is +#define YJIT_CALL_COUNT_INTERV 20u -#ifndef MJIT_HEADER +/// Test whether we are ready to compile an ISEQ or not +static inline bool +rb_yjit_threshold_hit(const rb_iseq_t *iseq, uint64_t entry_calls) +{ + yjit_total_entry_hits += 1; + + // Record the number of calls at the beginning of the interval + if (entry_calls + YJIT_CALL_COUNT_INTERV == rb_yjit_call_threshold) { + iseq->body->yjit_calls_at_interv = yjit_total_entry_hits; + } + + // Try to estimate the total time taken (total number of calls) to reach 20 calls to this ISEQ + // This give us a ratio of how hot/cold this ISEQ is + if (entry_calls == rb_yjit_call_threshold) { + // We expect threshold 1 to compile everything immediately + if (rb_yjit_call_threshold < YJIT_CALL_COUNT_INTERV) { + return true; + } + + uint64_t num_calls = yjit_total_entry_hits - iseq->body->yjit_calls_at_interv; + + // Reject ISEQs that don't get called often enough + if (num_calls > rb_yjit_cold_threshold) { + rb_yjit_incr_counter("cold_iseq_entry"); + return false; + } + + return true; + } + + return false; +} +#else +#define rb_yjit_threshold_hit(iseq, entry_calls) false +#endif + +#if USE_RJIT || USE_YJIT +// Generate JIT code that supports the following kinds of ISEQ entries: +// * The first ISEQ on vm_exec (e.g. <main>, or Ruby methods/blocks +// called by a C method). The current frame has VM_FRAME_FLAG_FINISH. +// The current vm_exec stops if JIT code returns a non-Qundef value. +// * ISEQs called by the interpreter on vm_sendish (e.g. Ruby methods or +// blocks called by a Ruby frame that isn't compiled or side-exited). +// The current frame doesn't have VM_FRAME_FLAG_FINISH. The current +// vm_exec does NOT stop whether JIT code returns Qundef or not. +static inline rb_jit_func_t +jit_compile(rb_execution_context_t *ec) +{ + const rb_iseq_t *iseq = ec->cfp->iseq; + struct rb_iseq_constant_body *body = ISEQ_BODY(iseq); + bool yjit_enabled = rb_yjit_enabled_p; + if (!(yjit_enabled || rb_rjit_call_p)) { + return NULL; + } + + // Increment the ISEQ's call counter and trigger JIT compilation if not compiled + if (body->jit_entry == NULL) { + body->jit_entry_calls++; + if (yjit_enabled) { + if (rb_yjit_threshold_hit(iseq, body->jit_entry_calls)) { + rb_yjit_compile_iseq(iseq, ec, false); + } + } + else if (body->jit_entry_calls == rb_rjit_call_threshold()) { + rb_rjit_compile(iseq); + } + } + return body->jit_entry; +} + +// Execute JIT code compiled by jit_compile() +static inline VALUE +jit_exec(rb_execution_context_t *ec) +{ + rb_jit_func_t func = jit_compile(ec); + if (func) { + // Call the JIT code + return func(ec, ec->cfp); + } + else { + return Qundef; + } +} +#else +# define jit_compile(ec) ((rb_jit_func_t)0) +# define jit_exec(ec) Qundef +#endif + +#if USE_YJIT +// Generate JIT code that supports the following kind of ISEQ entry: +// * The first ISEQ pushed by vm_exec_handle_exception. The frame would +// point to a location specified by a catch table, and it doesn't have +// VM_FRAME_FLAG_FINISH. The current vm_exec stops if JIT code returns +// a non-Qundef value. So you should not return a non-Qundef value +// until ec->cfp is changed to a frame with VM_FRAME_FLAG_FINISH. +static inline rb_jit_func_t +jit_compile_exception(rb_execution_context_t *ec) +{ + const rb_iseq_t *iseq = ec->cfp->iseq; + struct rb_iseq_constant_body *body = ISEQ_BODY(iseq); + if (!rb_yjit_enabled_p) { + return NULL; + } + + // Increment the ISEQ's call counter and trigger JIT compilation if not compiled + if (body->jit_exception == NULL) { + body->jit_exception_calls++; + if (body->jit_exception_calls == rb_yjit_call_threshold) { + rb_yjit_compile_iseq(iseq, ec, true); + } + } + + return body->jit_exception; +} + +// Execute JIT code compiled by jit_compile_exception() +static inline VALUE +jit_exec_exception(rb_execution_context_t *ec) +{ + rb_jit_func_t func = jit_compile_exception(ec); + if (func) { + // Call the JIT code + return func(ec, ec->cfp); + } + else { + return Qundef; + } +} +#else +# define jit_compile_exception(ec) ((rb_jit_func_t)0) +# define jit_exec_exception(ec) Qundef +#endif + +static void add_opt_method_entry(const rb_method_entry_t *me); + +#include "vm_insnhelper.c" #include "vm_exec.c" #include "vm_method.c" -#endif /* #ifndef MJIT_HEADER */ #include "vm_eval.c" -#ifndef MJIT_HEADER #define PROCDEBUG 0 -rb_serial_t -rb_next_class_serial(void) -{ - rb_serial_t class_serial = NEXT_CLASS_SERIAL(); - return class_serial; -} - VALUE rb_cRubyVM; VALUE rb_cThread; VALUE rb_mRubyVMFrozenCore; VALUE rb_block_param_proxy; -#define ruby_vm_redefined_flag GET_VM()->redefined_flag VALUE ruby_vm_const_missing_count = 0; rb_vm_t *ruby_current_vm_ptr = NULL; rb_ractor_t *ruby_single_main_ractor; @@ -411,19 +540,32 @@ bool ruby_vm_keep_script_lines; #ifdef RB_THREAD_LOCAL_SPECIFIER RB_THREAD_LOCAL_SPECIFIER rb_execution_context_t *ruby_current_ec; -#ifdef __APPLE__ - rb_execution_context_t * - rb_current_ec(void) - { - return ruby_current_ec; - } - void - rb_current_ec_set(rb_execution_context_t *ec) - { - ruby_current_ec = ec; - } +#ifdef RUBY_NT_SERIAL +RB_THREAD_LOCAL_SPECIFIER rb_atomic_t ruby_nt_serial; #endif +// no-inline decl on thread_pthread.h +rb_execution_context_t * +rb_current_ec_noinline(void) +{ + return ruby_current_ec; +} + +void +rb_current_ec_set(rb_execution_context_t *ec) +{ + ruby_current_ec = ec; +} + + +#ifdef __APPLE__ +rb_execution_context_t * +rb_current_ec(void) +{ + return ruby_current_ec; +} + +#endif #else native_tls_key_t ruby_current_ec_key; #endif @@ -434,7 +576,6 @@ unsigned int ruby_vm_event_local_num; rb_serial_t ruby_vm_constant_cache_invalidations = 0; rb_serial_t ruby_vm_constant_cache_misses = 0; -rb_serial_t ruby_vm_class_serial = 1; rb_serial_t ruby_vm_global_cvar_state = 1; static const struct rb_callcache vm_empty_cc = { @@ -465,42 +606,44 @@ rb_vm_inc_const_missing_count(void) ruby_vm_const_missing_count +=1; } -MJIT_FUNC_EXPORTED int +int rb_dtrace_setup(rb_execution_context_t *ec, VALUE klass, ID id, - struct ruby_dtrace_method_hook_args *args) + struct ruby_dtrace_method_hook_args *args) { enum ruby_value_type type; if (!klass) { - if (!ec) ec = GET_EC(); - if (!rb_ec_frame_method_id_and_class(ec, &id, 0, &klass) || !klass) - return FALSE; + if (!ec) ec = GET_EC(); + if (!rb_ec_frame_method_id_and_class(ec, &id, 0, &klass) || !klass) + return FALSE; } if (RB_TYPE_P(klass, T_ICLASS)) { - klass = RBASIC(klass)->klass; + klass = RBASIC(klass)->klass; } - else if (FL_TEST(klass, FL_SINGLETON)) { - klass = rb_attr_get(klass, id__attached__); - if (NIL_P(klass)) return FALSE; + else if (RCLASS_SINGLETON_P(klass)) { + klass = RCLASS_ATTACHED_OBJECT(klass); + if (NIL_P(klass)) return FALSE; } type = BUILTIN_TYPE(klass); if (type == T_CLASS || type == T_ICLASS || type == T_MODULE) { - VALUE name = rb_class_path(klass); - const char *classname, *filename; - const char *methodname = rb_id2name(id); - if (methodname && (filename = rb_source_location_cstr(&args->line_no)) != 0) { - if (NIL_P(name) || !(classname = StringValuePtr(name))) - classname = "<unknown>"; - args->classname = classname; - args->methodname = methodname; - args->filename = filename; - args->klass = klass; - args->name = name; - return TRUE; - } + VALUE name = rb_class_path(klass); + const char *classname, *filename; + const char *methodname = rb_id2name(id); + if (methodname && (filename = rb_source_location_cstr(&args->line_no)) != 0) { + if (NIL_P(name) || !(classname = StringValuePtr(name))) + classname = "<unknown>"; + args->classname = classname; + args->methodname = methodname; + args->filename = filename; + args->klass = klass; + args->name = name; + return TRUE; + } } return FALSE; } +extern unsigned int redblack_buffer_size; + /* * call-seq: * RubyVM.stat -> Hash @@ -514,10 +657,11 @@ rb_dtrace_setup(rb_execution_context_t *ec, VALUE klass, ID id, * { * :constant_cache_invalidations=>2, * :constant_cache_misses=>14, - * :class_serial=>546, * :global_cvar_state=>27 * } * + * If <tt>USE_DEBUG_COUNTER</tt> is enabled, debug counters will be included. + * * The contents of the hash are implementation specific and may be changed in * the future. * @@ -526,44 +670,62 @@ rb_dtrace_setup(rb_execution_context_t *ec, VALUE klass, ID id, static VALUE vm_stat(int argc, VALUE *argv, VALUE self) { - static VALUE sym_constant_cache_invalidations, sym_constant_cache_misses, sym_class_serial, sym_global_cvar_state; + static VALUE sym_constant_cache_invalidations, sym_constant_cache_misses, sym_global_cvar_state, sym_next_shape_id; + static VALUE sym_shape_cache_size; VALUE arg = Qnil; VALUE hash = Qnil, key = Qnil; if (rb_check_arity(argc, 0, 1) == 1) { arg = argv[0]; - if (SYMBOL_P(arg)) - key = arg; - else if (RB_TYPE_P(arg, T_HASH)) - hash = arg; - else - rb_raise(rb_eTypeError, "non-hash or symbol given"); + if (SYMBOL_P(arg)) + key = arg; + else if (RB_TYPE_P(arg, T_HASH)) + hash = arg; + else + rb_raise(rb_eTypeError, "non-hash or symbol given"); } else { - hash = rb_hash_new(); + hash = rb_hash_new(); } #define S(s) sym_##s = ID2SYM(rb_intern_const(#s)) S(constant_cache_invalidations); S(constant_cache_misses); - S(class_serial); - S(global_cvar_state); + S(global_cvar_state); + S(next_shape_id); + S(shape_cache_size); #undef S #define SET(name, attr) \ if (key == sym_##name) \ - return SERIALT2NUM(attr); \ + return SERIALT2NUM(attr); \ else if (hash != Qnil) \ - rb_hash_aset(hash, sym_##name, SERIALT2NUM(attr)); + rb_hash_aset(hash, sym_##name, SERIALT2NUM(attr)); SET(constant_cache_invalidations, ruby_vm_constant_cache_invalidations); SET(constant_cache_misses, ruby_vm_constant_cache_misses); - SET(class_serial, ruby_vm_class_serial); SET(global_cvar_state, ruby_vm_global_cvar_state); + SET(next_shape_id, (rb_serial_t)GET_SHAPE_TREE()->next_shape_id); + SET(shape_cache_size, (rb_serial_t)GET_SHAPE_TREE()->cache_size); #undef SET +#if USE_DEBUG_COUNTER + ruby_debug_counter_show_at_exit(FALSE); + for (size_t i = 0; i < RB_DEBUG_COUNTER_MAX; i++) { + const VALUE name = rb_sym_intern_ascii_cstr(rb_debug_counter_names[i]); + const VALUE boxed_value = SIZET2NUM(rb_debug_counter[i]); + + if (key == name) { + return boxed_value; + } + else if (hash != Qnil) { + rb_hash_aset(hash, name, boxed_value); + } + } +#endif + if (!NIL_P(key)) { /* matched key should return above */ - rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key)); + rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key)); } return hash; @@ -575,13 +737,13 @@ static void vm_set_top_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq) { if (ISEQ_BODY(iseq)->type != ISEQ_TYPE_TOP) { - rb_raise(rb_eTypeError, "Not a toplevel InstructionSequence"); + rb_raise(rb_eTypeError, "Not a toplevel InstructionSequence"); } /* for return */ vm_push_frame(ec, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, rb_ec_thread_ptr(ec)->top_self, - VM_BLOCK_HANDLER_NONE, - (VALUE)vm_cref_new_toplevel(ec), /* cref or me */ + VM_BLOCK_HANDLER_NONE, + (VALUE)vm_cref_new_toplevel(ec), /* cref or me */ ISEQ_BODY(iseq)->iseq_encoded, ec->cfp->sp, ISEQ_BODY(iseq)->local_table_size, ISEQ_BODY(iseq)->stack_max); } @@ -590,8 +752,8 @@ static void vm_set_eval_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq, const rb_cref_t *cref, const struct rb_block *base_block) { vm_push_frame(ec, iseq, VM_FRAME_MAGIC_EVAL | VM_FRAME_FLAG_FINISH, - vm_block_self(base_block), VM_GUARDED_PREV_EP(vm_block_ep(base_block)), - (VALUE)cref, /* cref or me */ + vm_block_self(base_block), VM_GUARDED_PREV_EP(vm_block_ep(base_block)), + (VALUE)cref, /* cref or me */ ISEQ_BODY(iseq)->iseq_encoded, ec->cfp->sp, ISEQ_BODY(iseq)->local_table_size, ISEQ_BODY(iseq)->stack_max); @@ -610,7 +772,7 @@ vm_set_main_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq) /* save binding */ if (ISEQ_BODY(iseq)->local_table_size > 0) { - vm_bind_update_env(toplevel_binding, bind, vm_make_env_object(ec, ec->cfp)); + vm_bind_update_env(toplevel_binding, bind, vm_make_env_object(ec, ec->cfp)); } } @@ -618,51 +780,49 @@ rb_control_frame_t * rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp) { while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) { - if (cfp->iseq) { - return (rb_control_frame_t *)cfp; - } - cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); + if (cfp->iseq) { + return (rb_control_frame_t *)cfp; + } + cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); } return 0; } -MJIT_FUNC_EXPORTED rb_control_frame_t * +rb_control_frame_t * rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp) { while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) { - if (VM_FRAME_RUBYFRAME_P(cfp)) { - return (rb_control_frame_t *)cfp; - } - cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); + if (VM_FRAME_RUBYFRAME_P(cfp)) { + return (rb_control_frame_t *)cfp; + } + cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); } return 0; } -#endif /* #ifndef MJIT_HEADER */ - static rb_control_frame_t * vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp) { if (VM_FRAME_RUBYFRAME_P(cfp)) { - return (rb_control_frame_t *)cfp; + return (rb_control_frame_t *)cfp; } cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) { - if (VM_FRAME_RUBYFRAME_P(cfp)) { - return (rb_control_frame_t *)cfp; - } + if (VM_FRAME_RUBYFRAME_P(cfp)) { + return (rb_control_frame_t *)cfp; + } - if (VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_PASSED) == FALSE) { - break; - } - cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); + if (VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_PASSED) == FALSE) { + break; + } + cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); } return 0; } -MJIT_STATIC void +void rb_vm_pop_cfunc_frame(void) { rb_execution_context_t *ec = GET_EC(); @@ -674,22 +834,20 @@ rb_vm_pop_cfunc_frame(void) vm_pop_frame(ec, cfp, cfp->ep); } -#ifndef MJIT_HEADER - void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp) { /* check skipped frame */ while (ec->cfp != cfp) { #if VMDEBUG - printf("skipped frame: %s\n", vm_frametype_name(ec->cfp)); + printf("skipped frame: %s\n", vm_frametype_name(ec->cfp)); #endif - if (VM_FRAME_TYPE(ec->cfp) != VM_FRAME_MAGIC_CFUNC) { - rb_vm_pop_frame(ec); - } - else { /* unlikely path */ - rb_vm_pop_cfunc_frame(); - } + if (VM_FRAME_TYPE(ec->cfp) != VM_FRAME_MAGIC_CFUNC) { + rb_vm_pop_frame(ec); + } + else { /* unlikely path */ + rb_vm_pop_cfunc_frame(); + } } } @@ -711,11 +869,11 @@ ruby_vm_run_at_exit_hooks(rb_vm_t *vm) rb_at_exit_list *l = vm->at_exit; while (l) { - rb_at_exit_list* t = l->next; - rb_vm_at_exit_func *func = l->func; - ruby_xfree(l); - l = t; - (*func)(vm); + rb_at_exit_list* t = l->next; + rb_vm_at_exit_func *func = l->func; + ruby_xfree(l); + l = t; + (*func)(vm); } } @@ -732,9 +890,9 @@ check_env(const rb_env_t *env) dp(env->ep[1]); ruby_debug_printf("ep: %10p\n", (void *)env->ep); if (rb_vm_env_prev_env(env)) { - fputs(">>\n", stderr); - check_env_value(rb_vm_env_prev_env(env)); - fputs("<<\n", stderr); + fputs(">>\n", stderr); + check_env_value(rb_vm_env_prev_env(env)); + fputs("<<\n", stderr); } return 1; } @@ -743,7 +901,7 @@ static VALUE check_env_value(const rb_env_t *env) { if (check_env(env)) { - return (VALUE)env; + return (VALUE)env; } rb_bug("invalid env"); return Qnil; /* unreachable */ @@ -755,7 +913,7 @@ vm_block_handler_escape(const rb_execution_context_t *ec, VALUE block_handler) switch (vm_block_handler_type(block_handler)) { case block_handler_type_ifunc: case block_handler_type_iseq: - return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc); + return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc); case block_handler_type_symbol: case block_handler_type_proc: @@ -769,17 +927,15 @@ static VALUE vm_make_env_each(const rb_execution_context_t * const ec, rb_control_frame_t *const cfp) { const VALUE * const ep = cfp->ep; - const rb_env_t *env; - const rb_iseq_t *env_iseq; VALUE *env_body, *env_ep; int local_size, env_size; if (VM_ENV_ESCAPED_P(ep)) { - return VM_ENV_ENVVAL(ep); + return VM_ENV_ENVVAL(ep); } if (!VM_ENV_LOCAL_P(ep)) { - const VALUE *prev_ep = VM_ENV_PREV_EP(ep); + const VALUE *prev_ep = VM_ENV_PREV_EP(ep); if (!VM_ENV_ESCAPED_P(prev_ep)) { rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); @@ -793,16 +949,16 @@ vm_make_env_each(const rb_execution_context_t * const ec, rb_control_frame_t *co } } else { - VALUE block_handler = VM_ENV_BLOCK_HANDLER(ep); + VALUE block_handler = VM_ENV_BLOCK_HANDLER(ep); - if (block_handler != VM_BLOCK_HANDLER_NONE) { + if (block_handler != VM_BLOCK_HANDLER_NONE) { VALUE blockprocval = vm_block_handler_escape(ec, block_handler); - VM_STACK_ENV_WRITE(ep, VM_ENV_DATA_INDEX_SPECVAL, blockprocval); - } + VM_STACK_ENV_WRITE(ep, VM_ENV_DATA_INDEX_SPECVAL, blockprocval); + } } if (!VM_FRAME_RUBYFRAME_P(cfp)) { - local_size = VM_ENV_DATA_SIZE; + local_size = VM_ENV_DATA_SIZE; } else { local_size = ISEQ_BODY(cfp->iseq)->local_table_size + VM_ENV_DATA_SIZE; @@ -821,27 +977,36 @@ vm_make_env_each(const rb_execution_context_t * const ec, rb_control_frame_t *co */ env_size = local_size + - 1 /* envval */; + 1 /* envval */; + + // Careful with order in the following sequence. Each allocation can move objects. env_body = ALLOC_N(VALUE, env_size); - MEMCPY(env_body, ep - (local_size - 1 /* specval */), VALUE, local_size); + rb_env_t *env = IMEMO_NEW(rb_env_t, imemo_env, 0); -#if 0 - for (i = 0; i < local_size; i++) { - if (VM_FRAME_RUBYFRAME_P(cfp)) { - /* clear value stack for GC */ - ep[-local_size + i] = 0; - } - } -#endif + // Set up env without WB since it's brand new (similar to newobj_init(), newobj_fill()) + MEMCPY(env_body, ep - (local_size - 1 /* specval */), VALUE, local_size); - env_iseq = VM_FRAME_RUBYFRAME_P(cfp) ? cfp->iseq : NULL; env_ep = &env_body[local_size - 1 /* specval */]; + env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env; - env = vm_env_new(env_ep, env_body, env_size, env_iseq); + env->iseq = (rb_iseq_t *)(VM_FRAME_RUBYFRAME_P(cfp) ? cfp->iseq : NULL); + env->ep = env_ep; + env->env = env_body; + env->env_size = env_size; cfp->ep = env_ep; VM_ENV_FLAGS_SET(env_ep, VM_ENV_FLAG_ESCAPED | VM_ENV_FLAG_WB_REQUIRED); VM_STACK_ENV_WRITE(ep, 0, (VALUE)env); /* GC mark */ + +#if 0 + for (i = 0; i < local_size; i++) { + if (VM_FRAME_RUBYFRAME_P(cfp)) { + /* clear value stack for GC */ + ep[-local_size + i] = 0; + } + } +#endif + return (VALUE)env; } @@ -851,7 +1016,7 @@ vm_make_env_object(const rb_execution_context_t *ec, rb_control_frame_t *cfp) VALUE envval = vm_make_env_each(ec, cfp); if (PROCDEBUG) { - check_env_value((const rb_env_t *)envval); + check_env_value((const rb_env_t *)envval); } return envval; @@ -862,8 +1027,8 @@ rb_vm_stack_to_heap(rb_execution_context_t *ec) { rb_control_frame_t *cfp = ec->cfp; while ((cfp = rb_vm_get_binding_creatable_next_cfp(ec, cfp)) != 0) { - vm_make_env_object(ec, cfp); - cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); + vm_make_env_object(ec, cfp); + cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); } } @@ -873,7 +1038,7 @@ rb_vm_env_prev_env(const rb_env_t *env) const VALUE *ep = env->ep; if (VM_ENV_LOCAL_P(ep)) { - return NULL; + return NULL; } else { const VALUE *prev_ep = VM_ENV_PREV_EP(ep); @@ -897,7 +1062,7 @@ collect_local_variables_in_env(const rb_env_t *env, const struct local_var_list { do { if (VM_ENV_FLAGS(env->ep, VM_ENV_FLAG_ISOLATED)) break; - collect_local_variables_in_iseq(env->iseq, vars); + collect_local_variables_in_iseq(env->iseq, vars); } while ((env = rb_vm_env_prev_env(env)) != NULL); } @@ -905,11 +1070,11 @@ static int vm_collect_local_variables_in_heap(const VALUE *ep, const struct local_var_list *vars) { if (VM_ENV_ESCAPED_P(ep)) { - collect_local_variables_in_env(VM_ENV_ENVVAL_PTR(ep), vars); - return 1; + collect_local_variables_in_env(VM_ENV_ENVVAL_PTR(ep), vars); + return 1; } else { - return 0; + return 0; } } @@ -937,9 +1102,9 @@ rb_iseq_local_variables(const rb_iseq_t *iseq) static VALUE vm_proc_create_from_captured(VALUE klass, - const struct rb_captured_block *captured, - enum rb_block_type block_type, - int8_t is_from_method, int8_t is_lambda) + const struct rb_captured_block *captured, + enum rb_block_type block_type, + int8_t is_from_method, int8_t is_lambda) { VALUE procval = rb_proc_alloc(klass); rb_proc_t *proc = RTYPEDDATA_DATA(procval); @@ -965,16 +1130,16 @@ rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *s switch (vm_block_type(src)) { case block_type_iseq: case block_type_ifunc: - RB_OBJ_WRITE(obj, &dst->as.captured.self, src->as.captured.self); - RB_OBJ_WRITE(obj, &dst->as.captured.code.val, src->as.captured.code.val); - rb_vm_block_ep_update(obj, dst, src->as.captured.ep); - break; + RB_OBJ_WRITE(obj, &dst->as.captured.self, src->as.captured.self); + RB_OBJ_WRITE(obj, &dst->as.captured.code.val, src->as.captured.code.val); + rb_vm_block_ep_update(obj, dst, src->as.captured.ep); + break; case block_type_symbol: - RB_OBJ_WRITE(obj, &dst->as.symbol, src->as.symbol); - break; + RB_OBJ_WRITE(obj, &dst->as.symbol, src->as.symbol); + break; case block_type_proc: - RB_OBJ_WRITE(obj, &dst->as.proc, src->as.proc); - break; + RB_OBJ_WRITE(obj, &dst->as.proc, src->as.proc); + break; } } @@ -1062,26 +1227,33 @@ env_copy(const VALUE *src_ep, VALUE read_only_variables) VALUE *env_body = ZALLOC_N(VALUE, src_env->env_size); // fill with Qfalse VALUE *ep = &env_body[src_env->env_size - 2]; - volatile VALUE prev_env = Qnil; + const rb_env_t *copied_env = vm_env_new(ep, env_body, src_env->env_size, src_env->iseq); + + // Copy after allocations above, since they can move objects in src_ep. + RB_OBJ_WRITE(copied_env, &ep[VM_ENV_DATA_INDEX_ME_CREF], src_ep[VM_ENV_DATA_INDEX_ME_CREF]); + ep[VM_ENV_DATA_INDEX_FLAGS] = src_ep[VM_ENV_DATA_INDEX_FLAGS] | VM_ENV_FLAG_ISOLATED; + if (!VM_ENV_LOCAL_P(src_ep)) { + VM_ENV_FLAGS_SET(ep, VM_ENV_FLAG_LOCAL); + } if (read_only_variables) { for (int i=RARRAY_LENINT(read_only_variables)-1; i>=0; i--) { ID id = NUM2ID(RARRAY_AREF(read_only_variables, i)); for (unsigned int j=0; j<ISEQ_BODY(src_env->iseq)->local_table_size; j++) { - if (id == ISEQ_BODY(src_env->iseq)->local_table[j]) { + if (id == ISEQ_BODY(src_env->iseq)->local_table[j]) { VALUE v = src_env->env[j]; if (!rb_ractor_shareable_p(v)) { VALUE name = rb_id2str(id); VALUE msg = rb_sprintf("can not make shareable Proc because it can refer" " unshareable object %+" PRIsVALUE " from ", v); if (name) - rb_str_catf(msg, "variable `%" PRIsVALUE "'", name); + rb_str_catf(msg, "variable '%" PRIsVALUE "'", name); else rb_str_cat_cstr(msg, "a hidden variable"); rb_exc_raise(rb_exc_new_str(rb_eRactorIsolationError, msg)); } - env_body[j] = v; + RB_OBJ_WRITE((VALUE)copied_env, &env_body[j], v); rb_ary_delete_at(read_only_variables, i); break; } @@ -1089,21 +1261,17 @@ env_copy(const VALUE *src_ep, VALUE read_only_variables) } } - ep[VM_ENV_DATA_INDEX_ME_CREF] = src_ep[VM_ENV_DATA_INDEX_ME_CREF]; - ep[VM_ENV_DATA_INDEX_FLAGS] = src_ep[VM_ENV_DATA_INDEX_FLAGS] | VM_ENV_FLAG_ISOLATED; - if (!VM_ENV_LOCAL_P(src_ep)) { const VALUE *prev_ep = VM_ENV_PREV_EP(src_env->ep); const rb_env_t *new_prev_env = env_copy(prev_ep, read_only_variables); - prev_env = (VALUE)new_prev_env; ep[VM_ENV_DATA_INDEX_SPECVAL] = VM_GUARDED_PREV_EP(new_prev_env->ep); + RB_OBJ_WRITTEN(copied_env, Qundef, new_prev_env); + VM_ENV_FLAGS_UNSET(ep, VM_ENV_FLAG_LOCAL); } else { ep[VM_ENV_DATA_INDEX_SPECVAL] = VM_BLOCK_HANDLER_NONE; } - const rb_env_t *copied_env = vm_env_new(ep, env_body, src_env->env_size, src_env->iseq); - RB_GC_GUARD(prev_env); return copied_env; } @@ -1139,11 +1307,11 @@ proc_shared_outer_variables(struct rb_id_table *outer_variables, bool isolate, c rb_str_append(str, name); } if (*sep == ',') rb_str_cat_cstr(str, ")"); - rb_str_cat_cstr(str, data.yield ? " and uses `yield'." : "."); + rb_str_cat_cstr(str, data.yield ? " and uses 'yield'." : "."); rb_exc_raise(rb_exc_new_str(rb_eArgError, str)); } else if (data.yield) { - rb_raise(rb_eArgError, "can not %s because it uses `yield'.", message); + rb_raise(rb_eArgError, "can not %s because it uses 'yield'.", message); } return data.read_only; @@ -1208,21 +1376,45 @@ rb_proc_ractor_make_shareable(VALUE self) return self; } -MJIT_FUNC_EXPORTED VALUE +VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda) { VALUE procval; + enum imemo_type code_type = imemo_type(captured->code.val); if (!VM_ENV_ESCAPED_P(captured->ep)) { - rb_control_frame_t *cfp = VM_CAPTURED_BLOCK_TO_CFP(captured); - vm_make_env_object(ec, cfp); + rb_control_frame_t *cfp = VM_CAPTURED_BLOCK_TO_CFP(captured); + vm_make_env_object(ec, cfp); } + VM_ASSERT(VM_EP_IN_HEAP_P(ec, captured->ep)); - VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq) || - imemo_type_p(captured->code.val, imemo_ifunc)); + VM_ASSERT(code_type == imemo_iseq || code_type == imemo_ifunc); procval = vm_proc_create_from_captured(klass, captured, - imemo_type(captured->code.val) == imemo_iseq ? block_type_iseq : block_type_ifunc, FALSE, is_lambda); + code_type == imemo_iseq ? block_type_iseq : block_type_ifunc, + FALSE, is_lambda); + + if (code_type == imemo_ifunc) { + struct vm_ifunc *ifunc = (struct vm_ifunc *)captured->code.val; + if (ifunc->svar_lep) { + VALUE ep0 = ifunc->svar_lep[0]; + if (RB_TYPE_P(ep0, T_IMEMO) && imemo_type_p(ep0, imemo_env)) { + // `ep0 == imemo_env` means this ep is escaped to heap (in env object). + const rb_env_t *env = (const rb_env_t *)ep0; + ifunc->svar_lep = (VALUE *)env->ep; + } + else { + VM_ASSERT(FIXNUM_P(ep0)); + if (ep0 & VM_ENV_FLAG_ESCAPED) { + // ok. do nothing + } + else { + ifunc->svar_lep = NULL; + } + } + } + } + return procval; } @@ -1237,7 +1429,7 @@ rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *s rb_binding_t *bind; if (cfp == 0 || ruby_level_cfp == 0) { - rb_raise(rb_eRuntimeError, "Can't create Binding Object on top of Fiber."); + rb_raise(rb_eRuntimeError, "Can't create Binding Object on top of Fiber."); } if (!VM_FRAME_RUBYFRAME_P(src_cfp) && !VM_FRAME_RUBYFRAME_P(RUBY_VM_PREVIOUS_CONTROL_FRAME(src_cfp))) { @@ -1267,7 +1459,7 @@ rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const I rb_execution_context_t *ec = GET_EC(); const rb_iseq_t *base_iseq, *iseq; rb_ast_body_t ast; - NODE tmp_node; + rb_node_scope_t tmp_node; if (dyncount < 0) return 0; @@ -1279,17 +1471,22 @@ rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const I dyns->size = dyncount; MEMCPY(dyns->ids, dynvars, ID, dyncount); - rb_node_init(&tmp_node, NODE_SCOPE, (VALUE)dyns, 0, 0); - ast.root = &tmp_node; - ast.compile_option = 0; - ast.script_lines = INT2FIX(-1); + rb_node_init(RNODE(&tmp_node), NODE_SCOPE); + tmp_node.nd_tbl = dyns; + tmp_node.nd_body = 0; + tmp_node.nd_args = 0; + + ast.root = RNODE(&tmp_node); + ast.frozen_string_literal = -1; + ast.coverage_enabled = -1; + ast.script_lines = (rb_parser_ary_t *)INT2FIX(-1); if (base_iseq) { iseq = rb_iseq_new(&ast, ISEQ_BODY(base_iseq)->location.label, path, realpath, base_iseq, ISEQ_TYPE_EVAL); } else { - VALUE tempstr = rb_fstring_lit("<temp>"); - iseq = rb_iseq_new_top(&ast, tempstr, tempstr, tempstr, NULL); + VALUE tempstr = rb_fstring_lit("<temp>"); + iseq = rb_iseq_new_top(&ast, tempstr, tempstr, tempstr, NULL); } tmp_node.nd_tbl = 0; /* reset table */ ALLOCV_END(idtmp); @@ -1310,34 +1507,34 @@ invoke_block(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, cons int arg_size = ISEQ_BODY(iseq)->param.size; vm_push_frame(ec, iseq, type | VM_FRAME_FLAG_FINISH, self, - VM_GUARDED_PREV_EP(captured->ep), - (VALUE)cref, /* cref or method */ + VM_GUARDED_PREV_EP(captured->ep), + (VALUE)cref, /* cref or method */ ISEQ_BODY(iseq)->iseq_encoded + opt_pc, - ec->cfp->sp + arg_size, + ec->cfp->sp + arg_size, ISEQ_BODY(iseq)->local_table_size - arg_size, ISEQ_BODY(iseq)->stack_max); - return vm_exec(ec, true); + return vm_exec(ec); } static VALUE invoke_bmethod(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, const struct rb_captured_block *captured, const rb_callable_method_entry_t *me, VALUE type, int opt_pc) { - /* bmethod */ + /* bmethod call from outside the VM */ int arg_size = ISEQ_BODY(iseq)->param.size; VALUE ret; VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD); vm_push_frame(ec, iseq, type | VM_FRAME_FLAG_BMETHOD, self, - VM_GUARDED_PREV_EP(captured->ep), - (VALUE)me, + VM_GUARDED_PREV_EP(captured->ep), + (VALUE)me, ISEQ_BODY(iseq)->iseq_encoded + opt_pc, - ec->cfp->sp + arg_size, + ec->cfp->sp + 1 /* self */ + arg_size, ISEQ_BODY(iseq)->local_table_size - arg_size, ISEQ_BODY(iseq)->stack_max); VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_FINISH); - ret = vm_exec(ec, true); + ret = vm_exec(ec); return ret; } @@ -1349,64 +1546,78 @@ ALWAYS_INLINE(static VALUE static inline VALUE invoke_iseq_block_from_c(rb_execution_context_t *ec, const struct rb_captured_block *captured, - VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler, + VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler, const rb_cref_t *cref, int is_lambda, const rb_callable_method_entry_t *me) { const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq); - int i, opt_pc; + int opt_pc; VALUE type = VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0); rb_control_frame_t *cfp = ec->cfp; VALUE *sp = cfp->sp; + int flags = (kw_splat ? VM_CALL_KW_SPLAT : 0); + VALUE *use_argv = (VALUE *)argv; + VALUE av[2]; stack_check(ec); - CHECK_VM_STACK_OVERFLOW(cfp, argc); + if (UNLIKELY(argc > VM_ARGC_STACK_MAX) && + (VM_ARGC_STACK_MAX >= 1 || + /* Skip ruby array for potential autosplat case */ + (argc != 1 || is_lambda))) { + use_argv = vm_argv_ruby_array(av, argv, &flags, &argc, kw_splat); + } + + CHECK_VM_STACK_OVERFLOW(cfp, argc + 1); vm_check_canary(ec, sp); - cfp->sp = sp + argc; - for (i=0; i<argc; i++) { - sp[i] = argv[i]; + + VALUE *stack_argv = sp; + if (me) { + *sp = self; // bemthods need `self` on the VM stack + stack_argv++; } + cfp->sp = stack_argv + argc; + MEMCPY(stack_argv, use_argv, VALUE, argc); // restrict: new stack space - opt_pc = vm_yield_setup_args(ec, iseq, argc, sp, kw_splat, passed_block_handler, - (is_lambda ? arg_setup_method : arg_setup_block)); + opt_pc = vm_yield_setup_args(ec, iseq, argc, stack_argv, flags, passed_block_handler, + (is_lambda ? arg_setup_method : arg_setup_block)); cfp->sp = sp; if (me == NULL) { - return invoke_block(ec, iseq, self, captured, cref, type, opt_pc); + return invoke_block(ec, iseq, self, captured, cref, type, opt_pc); } else { - return invoke_bmethod(ec, iseq, self, captured, me, type, opt_pc); + return invoke_bmethod(ec, iseq, self, captured, me, type, opt_pc); } } static inline VALUE invoke_block_from_c_bh(rb_execution_context_t *ec, VALUE block_handler, - int argc, const VALUE *argv, - int kw_splat, VALUE passed_block_handler, const rb_cref_t *cref, - int is_lambda, int force_blockarg) + int argc, const VALUE *argv, + int kw_splat, VALUE passed_block_handler, const rb_cref_t *cref, + int is_lambda, int force_blockarg) { again: switch (vm_block_handler_type(block_handler)) { case block_handler_type_iseq: - { - const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler); - return invoke_iseq_block_from_c(ec, captured, captured->self, - argc, argv, kw_splat, passed_block_handler, + { + const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler); + return invoke_iseq_block_from_c(ec, captured, captured->self, + argc, argv, kw_splat, passed_block_handler, cref, is_lambda, NULL); - } + } case block_handler_type_ifunc: - return vm_yield_with_cfunc(ec, VM_BH_TO_IFUNC_BLOCK(block_handler), - VM_BH_TO_IFUNC_BLOCK(block_handler)->self, + return vm_yield_with_cfunc(ec, VM_BH_TO_IFUNC_BLOCK(block_handler), + VM_BH_TO_IFUNC_BLOCK(block_handler)->self, argc, argv, kw_splat, passed_block_handler, NULL); case block_handler_type_symbol: - return vm_yield_with_symbol(ec, VM_BH_TO_SYMBOL(block_handler), - argc, argv, kw_splat, passed_block_handler); + return vm_yield_with_symbol(ec, VM_BH_TO_SYMBOL(block_handler), + argc, argv, kw_splat, passed_block_handler); case block_handler_type_proc: - if (force_blockarg == FALSE) { - is_lambda = block_proc_is_lambda(VM_BH_TO_PROC(block_handler)); - } - block_handler = vm_proc_to_block_handler(VM_BH_TO_PROC(block_handler)); - goto again; + if (force_blockarg == FALSE) { + is_lambda = block_proc_is_lambda(VM_BH_TO_PROC(block_handler)); + } + block_handler = vm_proc_to_block_handler(VM_BH_TO_PROC(block_handler)); + goto again; } VM_UNREACHABLE(invoke_block_from_c_splattable); return Qundef; @@ -1418,7 +1629,7 @@ check_block_handler(rb_execution_context_t *ec) VALUE block_handler = VM_CF_BLOCK_HANDLER(ec->cfp); vm_block_handler_verify(block_handler); if (UNLIKELY(block_handler == VM_BLOCK_HANDLER_NONE)) { - rb_vm_localjump_error("no block given", Qnil, 0); + rb_vm_localjump_error("no block given", Qnil, 0); } return block_handler; @@ -1429,7 +1640,7 @@ vm_yield_with_cref(rb_execution_context_t *ec, int argc, const VALUE *argv, int { return invoke_block_from_c_bh(ec, check_block_handler(ec), argc, argv, kw_splat, VM_BLOCK_HANDLER_NONE, - cref, is_lambda, FALSE); + cref, is_lambda, FALSE); } static VALUE @@ -1443,7 +1654,7 @@ vm_yield_with_block(rb_execution_context_t *ec, int argc, const VALUE *argv, VAL { return invoke_block_from_c_bh(ec, check_block_handler(ec), argc, argv, kw_splat, block_handler, - NULL, FALSE, FALSE); + NULL, FALSE, FALSE); } static VALUE @@ -1461,7 +1672,7 @@ ALWAYS_INLINE(static VALUE static inline VALUE invoke_block_from_c_proc(rb_execution_context_t *ec, const rb_proc_t *proc, - VALUE self, int argc, const VALUE *argv, + VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler, int is_lambda, const rb_callable_method_entry_t *me) { @@ -1486,11 +1697,11 @@ invoke_block_from_c_proc(rb_execution_context_t *ec, const rb_proc_t *proc, } return vm_yield_with_cfunc(ec, &block->as.captured, self, argc, argv, kw_splat, passed_block_handler, me); case block_type_symbol: - return vm_yield_with_symbol(ec, block->as.symbol, argc, argv, kw_splat, passed_block_handler); + return vm_yield_with_symbol(ec, block->as.symbol, argc, argv, kw_splat, passed_block_handler); case block_type_proc: - is_lambda = block_proc_is_lambda(block->as.proc); - block = vm_proc_block(block->as.proc); - goto again; + is_lambda = block_proc_is_lambda(block->as.proc); + block = vm_proc_block(block->as.proc); + goto again; } VM_UNREACHABLE(invoke_block_from_c_proc); return Qundef; @@ -1498,21 +1709,21 @@ invoke_block_from_c_proc(rb_execution_context_t *ec, const rb_proc_t *proc, static VALUE vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self, - int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler) + int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler) { return invoke_block_from_c_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler, proc->is_lambda, NULL); } -MJIT_FUNC_EXPORTED VALUE +VALUE rb_vm_invoke_bmethod(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler, const rb_callable_method_entry_t *me) { return invoke_block_from_c_proc(ec, proc, self, argc, argv, kw_splat, block_handler, TRUE, me); } -MJIT_FUNC_EXPORTED VALUE +VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, - int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler) + int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler) { VALUE self = vm_block_self(&proc->block); vm_block_handler_verify(passed_block_handler); @@ -1521,7 +1732,7 @@ rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, return rb_vm_invoke_bmethod(ec, proc, self, argc, argv, kw_splat, passed_block_handler, NULL); } else { - return vm_invoke_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler); + return vm_invoke_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler); } } @@ -1535,36 +1746,42 @@ rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE s return rb_vm_invoke_bmethod(ec, proc, self, argc, argv, kw_splat, passed_block_handler, NULL); } else { - return vm_invoke_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler); + return vm_invoke_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler); } } /* special variable */ -static rb_control_frame_t * -vm_normal_frame(const rb_execution_context_t *ec, rb_control_frame_t *cfp) +VALUE * +rb_vm_svar_lep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp) { - while (cfp->pc == 0) { - cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); - if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) { - return 0; - } + while (cfp->pc == 0 || cfp->iseq == 0) { + if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_IFUNC) { + struct vm_ifunc *ifunc = (struct vm_ifunc *)cfp->iseq; + return ifunc->svar_lep; + } + else { + cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); + } + + if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) { + return NULL; + } } - return cfp; + + return (VALUE *)VM_CF_LEP(cfp); } static VALUE vm_cfp_svar_get(const rb_execution_context_t *ec, rb_control_frame_t *cfp, VALUE key) { - cfp = vm_normal_frame(ec, cfp); - return lep_svar_get(ec, cfp ? VM_CF_LEP(cfp) : 0, key); + return lep_svar_get(ec, rb_vm_svar_lep(ec, cfp), key); } static void vm_cfp_svar_set(const rb_execution_context_t *ec, rb_control_frame_t *cfp, VALUE key, const VALUE val) { - cfp = vm_normal_frame(ec, cfp); - lep_svar_set(ec, cfp ? VM_CF_LEP(cfp) : 0, key, val); + lep_svar_set(ec, rb_vm_svar_lep(ec, cfp), key, val); } static VALUE @@ -1603,6 +1820,17 @@ rb_lastline_set(VALUE val) vm_svar_set(GET_EC(), VM_SVAR_LASTLINE, val); } +void +rb_lastline_set_up(VALUE val, unsigned int up) +{ + rb_control_frame_t * cfp = GET_EC()->cfp; + + for(unsigned int i = 0; i < up; i++) { + cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); + } + vm_cfp_svar_set(GET_EC(), cfp, VM_SVAR_LASTLINE, val); +} + /* misc */ const char * @@ -1612,10 +1840,10 @@ rb_sourcefile(void) const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp); if (cfp) { - return RSTRING_PTR(rb_iseq_path(cfp->iseq)); + return RSTRING_PTR(rb_iseq_path(cfp->iseq)); } else { - return 0; + return 0; } } @@ -1626,10 +1854,10 @@ rb_sourceline(void) const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp); if (cfp) { - return rb_vm_get_sourceline(cfp); + return rb_vm_get_sourceline(cfp); } else { - return 0; + return 0; } } @@ -1640,16 +1868,16 @@ rb_source_location(int *pline) const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp); if (cfp && VM_FRAME_RUBYFRAME_P(cfp)) { - if (pline) *pline = rb_vm_get_sourceline(cfp); - return rb_iseq_path(cfp->iseq); + if (pline) *pline = rb_vm_get_sourceline(cfp); + return rb_iseq_path(cfp->iseq); } else { - if (pline) *pline = 0; - return Qnil; + if (pline) *pline = 0; + return Qnil; } } -MJIT_FUNC_EXPORTED const char * +const char * rb_source_location_cstr(int *pline) { VALUE path = rb_source_location(pline); @@ -1692,9 +1920,9 @@ void debug_cref(rb_cref_t *cref) { while (cref) { - dp(CREF_CLASS(cref)); - printf("%ld\n", CREF_VISI(cref)); - cref = CREF_NEXT(cref); + dp(CREF_CLASS(cref)); + printf("%ld\n", CREF_VISI(cref)); + cref = CREF_NEXT(cref); } } #endif @@ -1706,7 +1934,7 @@ rb_vm_cbase(void) const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp); if (cfp == 0) { - rb_raise(rb_eRuntimeError, "Can't call on top of Fiber or Thread"); + rb_raise(rb_eRuntimeError, "Can't call on top of Fiber or Thread"); } return vm_get_cbase(cfp->ep); } @@ -1722,30 +1950,30 @@ make_localjump_error(const char *mesg, VALUE value, int reason) switch (reason) { case TAG_BREAK: - CONST_ID(id, "break"); - break; + CONST_ID(id, "break"); + break; case TAG_REDO: - CONST_ID(id, "redo"); - break; + CONST_ID(id, "redo"); + break; case TAG_RETRY: - CONST_ID(id, "retry"); - break; + CONST_ID(id, "retry"); + break; case TAG_NEXT: - CONST_ID(id, "next"); - break; + CONST_ID(id, "next"); + break; case TAG_RETURN: - CONST_ID(id, "return"); - break; + CONST_ID(id, "return"); + break; default: - CONST_ID(id, "noreason"); - break; + CONST_ID(id, "noreason"); + break; } rb_iv_set(exc, "@exit_value", value); rb_iv_set(exc, "@reason", ID2SYM(id)); return exc; } -MJIT_FUNC_EXPORTED void +void rb_vm_localjump_error(const char *mesg, VALUE value, int reason) { VALUE exc = make_localjump_error(mesg, value, reason); @@ -1753,39 +1981,39 @@ rb_vm_localjump_error(const char *mesg, VALUE value, int reason) } VALUE -rb_vm_make_jump_tag_but_local_jump(int state, VALUE val) +rb_vm_make_jump_tag_but_local_jump(enum ruby_tag_type state, VALUE val) { const char *mesg; switch (state) { case TAG_RETURN: - mesg = "unexpected return"; - break; + mesg = "unexpected return"; + break; case TAG_BREAK: - mesg = "unexpected break"; - break; + mesg = "unexpected break"; + break; case TAG_NEXT: - mesg = "unexpected next"; - break; + mesg = "unexpected next"; + break; case TAG_REDO: - mesg = "unexpected redo"; - val = Qnil; - break; + mesg = "unexpected redo"; + val = Qnil; + break; case TAG_RETRY: - mesg = "retry outside of rescue clause"; - val = Qnil; - break; + mesg = "retry outside of rescue clause"; + val = Qnil; + break; default: - return Qnil; + return Qnil; } - if (val == Qundef) { - val = GET_EC()->tag->retval; + if (UNDEF_P(val)) { + val = GET_EC()->tag->retval; } return make_localjump_error(mesg, val, state); } void -rb_vm_jump_tag_but_local_jump(int state) +rb_vm_jump_tag_but_local_jump(enum ruby_tag_type state) { VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef); if (!NIL_P(exc)) rb_exc_raise(exc); @@ -1796,7 +2024,7 @@ static rb_control_frame_t * next_not_local_frame(rb_control_frame_t *cfp) { while (VM_ENV_LOCAL_P(cfp->ep)) { - cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); + cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); } return cfp; } @@ -1811,7 +2039,7 @@ vm_iter_break(rb_execution_context_t *ec, VALUE val) const rb_control_frame_t *target_cfp = rb_vm_search_cf_from_ep(ec, cfp, ep); if (!target_cfp) { - rb_vm_localjump_error("unexpected break", val, TAG_BREAK); + rb_vm_localjump_error("unexpected break", val, TAG_BREAK); } ec->errinfo = (VALUE)THROW_DATA_NEW(val, target_cfp, TAG_BREAK); @@ -1832,9 +2060,17 @@ rb_iter_break_value(VALUE val) /* optimization: redefine management */ +short ruby_vm_redefined_flag[BOP_LAST_]; static st_table *vm_opt_method_def_table = 0; static st_table *vm_opt_mid_table = 0; +void +rb_free_vm_opt_tables(void) +{ + st_free_table(vm_opt_method_def_table); + st_free_table(vm_opt_mid_table); +} + static int vm_redefinition_check_flag(VALUE klass) { @@ -1872,13 +2108,15 @@ vm_redefinition_check_method_type(const rb_method_entry_t *me) return FALSE; } + if (METHOD_ENTRY_BASIC(me)) return TRUE; + const rb_method_definition_t *def = me->def; switch (def->type) { case VM_METHOD_TYPE_CFUNC: case VM_METHOD_TYPE_OPTIMIZED: - return TRUE; + return TRUE; default: - return FALSE; + return FALSE; } } @@ -1894,7 +2132,14 @@ rb_vm_check_redefinition_opt_method(const rb_method_entry_t *me, VALUE klass) if (st_lookup(vm_opt_method_def_table, (st_data_t)me->def, &bop)) { int flag = vm_redefinition_check_flag(klass); if (flag != 0) { + rb_category_warn( + RB_WARN_CATEGORY_PERFORMANCE, + "Redefining '%s#%s' disables interpreter and JIT optimizations", + rb_class2name(me->owner), + rb_id2name(me->called_id) + ); rb_yjit_bop_redefined(flag, (enum ruby_basic_operators)bop); + rb_rjit_bop_redefined(flag, (enum ruby_basic_operators)bop); ruby_vm_redefined_flag[bop] |= flag; } } @@ -1921,16 +2166,36 @@ rb_vm_check_redefinition_by_prepend(VALUE klass) } static void -add_opt_method(VALUE klass, ID mid, VALUE bop) +add_opt_method_entry_bop(const rb_method_entry_t *me, ID mid, enum ruby_basic_operators bop) +{ + st_insert(vm_opt_method_def_table, (st_data_t)me->def, (st_data_t)bop); + st_insert(vm_opt_mid_table, (st_data_t)mid, (st_data_t)Qtrue); +} + +static void +add_opt_method(VALUE klass, ID mid, enum ruby_basic_operators bop) { const rb_method_entry_t *me = rb_method_entry_at(klass, mid); if (me && vm_redefinition_check_method_type(me)) { - st_insert(vm_opt_method_def_table, (st_data_t)me->def, (st_data_t)bop); - st_insert(vm_opt_mid_table, (st_data_t)mid, (st_data_t)Qtrue); + add_opt_method_entry_bop(me, mid, bop); } else { - rb_bug("undefined optimized method: %s", rb_id2name(mid)); + rb_bug("undefined optimized method: %s", rb_id2name(mid)); + } +} + +static enum ruby_basic_operators vm_redefinition_bop_for_id(ID mid); + +static void +add_opt_method_entry(const rb_method_entry_t *me) +{ + if (me && vm_redefinition_check_method_type(me)) { + ID mid = me->called_id; + enum ruby_basic_operators bop = vm_redefinition_bop_for_id(mid); + if ((int)bop >= 0) { + add_opt_method_entry_bop(me, mid, bop); + } } } @@ -1938,10 +2203,7 @@ static void vm_init_redefined_flag(void) { ID mid; - VALUE bop; - - vm_opt_method_def_table = st_init_numtable(); - vm_opt_mid_table = st_init_numtable(); + enum ruby_basic_operators bop; #define OP(mid_, bop_) (mid = id##mid_, bop = BOP_##bop_, ruby_vm_redefined_flag[bop] = 0) #define C(k) add_opt_method(rb_c##k, mid, bop) @@ -1952,7 +2214,7 @@ vm_init_redefined_flag(void) OP(MOD, MOD), (C(Integer), C(Float)); OP(Eq, EQ), (C(Integer), C(Float), C(String), C(Symbol)); OP(Eqq, EQQ), (C(Integer), C(Float), C(Symbol), C(String), - C(NilClass), C(TrueClass), C(FalseClass)); + C(NilClass), C(TrueClass), C(FalseClass)); OP(LT, LT), (C(Integer), C(Float)); OP(LE, LE), (C(Integer), C(Float)); OP(GT, GT), (C(Integer), C(Float)); @@ -1969,14 +2231,57 @@ vm_init_redefined_flag(void) OP(UMinus, UMINUS), (C(String)); OP(Max, MAX), (C(Array)); OP(Min, MIN), (C(Array)); + OP(Hash, HASH), (C(Array)); OP(Call, CALL), (C(Proc)); OP(And, AND), (C(Integer)); OP(Or, OR), (C(Integer)); OP(NilP, NIL_P), (C(NilClass)); + OP(Cmp, CMP), (C(Integer), C(Float), C(String)); + OP(Default, DEFAULT), (C(Hash)); #undef C #undef OP } +static enum ruby_basic_operators +vm_redefinition_bop_for_id(ID mid) +{ + switch (mid) { +#define OP(mid_, bop_) case id##mid_: return BOP_##bop_ + OP(PLUS, PLUS); + OP(MINUS, MINUS); + OP(MULT, MULT); + OP(DIV, DIV); + OP(MOD, MOD); + OP(Eq, EQ); + OP(Eqq, EQQ); + OP(LT, LT); + OP(LE, LE); + OP(GT, GT); + OP(GE, GE); + OP(LTLT, LTLT); + OP(AREF, AREF); + OP(ASET, ASET); + OP(Length, LENGTH); + OP(Size, SIZE); + OP(EmptyP, EMPTY_P); + OP(Succ, SUCC); + OP(EqTilde, MATCH); + OP(Freeze, FREEZE); + OP(UMinus, UMINUS); + OP(Max, MAX); + OP(Min, MIN); + OP(Hash, HASH); + OP(Call, CALL); + OP(And, AND); + OP(Or, OR); + OP(NilP, NIL_P); + OP(Cmp, CMP); + OP(Default, DEFAULT); +#undef OP + } + return -1; +} + /* for vm development */ #if VMDEBUG @@ -1993,7 +2298,7 @@ vm_frametype_name(const rb_control_frame_t *cfp) case VM_FRAME_MAGIC_EVAL: return "eval"; case VM_FRAME_MAGIC_RESCUE: return "rescue"; default: - rb_bug("unknown frame"); + rb_bug("unknown frame"); } } #endif @@ -2002,12 +2307,12 @@ static VALUE frame_return_value(const struct vm_throw_data *err) { if (THROW_DATA_P(err) && - THROW_DATA_STATE(err) == TAG_BREAK && - THROW_DATA_CONSUMED_P(err) == FALSE) { - return THROW_DATA_VAL(err); + THROW_DATA_STATE(err) == TAG_BREAK && + THROW_DATA_CONSUMED_P(err) == FALSE) { + return THROW_DATA_VAL(err); } else { - return Qnil; + return Qnil; } } @@ -2037,14 +2342,13 @@ frame_name(const rb_control_frame_t *cfp) // cfp_returning_with_value: // Whether cfp is the last frame in the unwinding process for a non-local return. static void -hook_before_rewind(rb_execution_context_t *ec, const rb_control_frame_t *cfp, - bool cfp_returning_with_value, int state, struct vm_throw_data *err) +hook_before_rewind(rb_execution_context_t *ec, bool cfp_returning_with_value, int state, struct vm_throw_data *err) { if (state == TAG_RAISE && RBASIC(err)->klass == rb_eSysStackError) { - return; + return; } else { - const rb_iseq_t *iseq = cfp->iseq; + const rb_iseq_t *iseq = ec->cfp->iseq; rb_hook_list_t *local_hooks = iseq->aux.exec.local_hooks; switch (VM_FRAME_TYPE(ec->cfp)) { @@ -2068,10 +2372,10 @@ hook_before_rewind(rb_execution_context_t *ec, const rb_control_frame_t *cfp, } - EXEC_EVENT_HOOK(ec, RUBY_EVENT_B_RETURN, ec->cfp->self, 0, 0, 0, bmethod_return_value); + EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_B_RETURN, ec->cfp->self, 0, 0, 0, bmethod_return_value); if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_B_RETURN)) { rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_B_RETURN, - ec->cfp->self, 0, 0, 0, bmethod_return_value, FALSE); + ec->cfp->self, 0, 0, 0, bmethod_return_value, TRUE); } const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(ec->cfp); @@ -2188,254 +2492,228 @@ hook_before_rewind(rb_execution_context_t *ec, const rb_control_frame_t *cfp, VALUE *ep; // ep void *code; // }; - - If mjit_exec is already called before calling vm_exec, `mjit_enable_p` should - be FALSE to avoid calling `mjit_exec` twice. */ static inline VALUE -vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state, - VALUE errinfo, VALUE *initial); +vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state, VALUE errinfo); +static inline VALUE +vm_exec_loop(rb_execution_context_t *ec, enum ruby_tag_type state, struct rb_vm_tag *tag, VALUE result); // for non-Emscripten Wasm build, use vm_exec with optimized setjmp for runtime performance #if defined(__wasm__) && !defined(__EMSCRIPTEN__) struct rb_vm_exec_context { - rb_execution_context_t *ec; - struct rb_vm_tag *tag; - VALUE initial; + rb_execution_context_t *const ec; + struct rb_vm_tag *const tag; + VALUE result; - enum ruby_tag_type state; - bool mjit_enable_p; }; static void -vm_exec_enter_vm_loop(rb_execution_context_t *ec, struct rb_vm_exec_context *ctx, - struct rb_vm_tag *_tag, bool skip_first_ex_handle) { - if (skip_first_ex_handle) { - goto vm_loop_start; - } - - ctx->result = ec->errinfo; - rb_ec_raised_reset(ec, RAISED_STACKOVERFLOW | RAISED_NOMEMORY); - while ((ctx->result = vm_exec_handle_exception(ec, ctx->state, ctx->result, &ctx->initial)) == Qundef) { - /* caught a jump, exec the handler */ - ctx->result = vm_exec_core(ec, ctx->initial); - vm_loop_start: - VM_ASSERT(ec->tag == _tag); - /* when caught `throw`, `tag.state` is set. */ - if ((ctx->state = _tag->state) == TAG_NONE) break; - _tag->state = TAG_NONE; - } -} - -static void vm_exec_bottom_main(void *context) { - struct rb_vm_exec_context *ctx = (struct rb_vm_exec_context *)context; + struct rb_vm_exec_context *ctx = context; + rb_execution_context_t *ec = ctx->ec; - ctx->state = TAG_NONE; - if (!ctx->mjit_enable_p || (ctx->result = mjit_exec(ctx->ec)) == Qundef) { - ctx->result = vm_exec_core(ctx->ec, ctx->initial); - } - vm_exec_enter_vm_loop(ctx->ec, ctx, ctx->tag, true); + ctx->result = vm_exec_loop(ec, TAG_NONE, ctx->tag, vm_exec_core(ec)); } static void vm_exec_bottom_rescue(void *context) { - struct rb_vm_exec_context *ctx = (struct rb_vm_exec_context *)context; - ctx->state = rb_ec_tag_state(ctx->ec); - vm_exec_enter_vm_loop(ctx->ec, ctx, ctx->tag, false); + struct rb_vm_exec_context *ctx = context; + rb_execution_context_t *ec = ctx->ec; + + ctx->result = vm_exec_loop(ec, rb_ec_tag_state(ec), ctx->tag, ec->errinfo); } +#endif VALUE -vm_exec(rb_execution_context_t *ec, bool mjit_enable_p) +vm_exec(rb_execution_context_t *ec) { - struct rb_vm_exec_context ctx = { - .ec = ec, - .initial = 0, .result = Qundef, - .mjit_enable_p = mjit_enable_p, - }; - struct rb_wasm_try_catch try_catch; + VALUE result = Qundef; EC_PUSH_TAG(ec); _tag.retval = Qnil; - ctx.tag = &_tag; + +#if defined(__wasm__) && !defined(__EMSCRIPTEN__) + struct rb_vm_exec_context ctx = { + .ec = ec, + .tag = &_tag, + }; + struct rb_wasm_try_catch try_catch; EC_REPUSH_TAG(); rb_wasm_try_catch_init(&try_catch, vm_exec_bottom_main, vm_exec_bottom_rescue, &ctx); - rb_wasm_try_catch_loop_run(&try_catch, &_tag.buf); - - EC_POP_TAG(); - return ctx.result; -} + rb_wasm_try_catch_loop_run(&try_catch, &RB_VM_TAG_JMPBUF_GET(_tag.buf)); + result = ctx.result; #else - -VALUE -vm_exec(rb_execution_context_t *ec, bool mjit_enable_p) -{ enum ruby_tag_type state; - VALUE result = Qundef; - VALUE initial = 0; - - EC_PUSH_TAG(ec); - - _tag.retval = Qnil; if ((state = EC_EXEC_TAG()) == TAG_NONE) { - if (!mjit_enable_p || (result = mjit_exec(ec)) == Qundef) { - result = vm_exec_core(ec, initial); + if (UNDEF_P(result = jit_exec(ec))) { + result = vm_exec_core(ec); } - goto vm_loop_start; /* fallback to the VM */ + /* fallback to the VM */ + result = vm_exec_loop(ec, TAG_NONE, &_tag, result); } else { - result = ec->errinfo; - rb_ec_raised_reset(ec, RAISED_STACKOVERFLOW | RAISED_NOMEMORY); - while ((result = vm_exec_handle_exception(ec, state, result, &initial)) == Qundef) { - /* caught a jump, exec the handler */ - result = vm_exec_core(ec, initial); - vm_loop_start: - VM_ASSERT(ec->tag == &_tag); - /* when caught `throw`, `tag.state` is set. */ - if ((state = _tag.state) == TAG_NONE) break; - _tag.state = TAG_NONE; - } + result = vm_exec_loop(ec, state, &_tag, ec->errinfo); } +#endif + EC_POP_TAG(); return result; } -#endif static inline VALUE -vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state, - VALUE errinfo, VALUE *initial) +vm_exec_loop(rb_execution_context_t *ec, enum ruby_tag_type state, + struct rb_vm_tag *tag, VALUE result) +{ + if (state == TAG_NONE) { /* no jumps, result is discarded */ + goto vm_loop_start; + } + + rb_ec_raised_reset(ec, RAISED_STACKOVERFLOW | RAISED_NOMEMORY); + while (UNDEF_P(result = vm_exec_handle_exception(ec, state, result))) { + // caught a jump, exec the handler. JIT code in jit_exec_exception() + // may return Qundef to run remaining frames with vm_exec_core(). + if (UNDEF_P(result = jit_exec_exception(ec))) { + result = vm_exec_core(ec); + } + vm_loop_start: + VM_ASSERT(ec->tag == tag); + /* when caught `throw`, `tag.state` is set. */ + if ((state = tag->state) == TAG_NONE) break; + tag->state = TAG_NONE; + } + + return result; +} + +static inline VALUE +vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state, VALUE errinfo) { struct vm_throw_data *err = (struct vm_throw_data *)errinfo; for (;;) { - unsigned int i; - const struct iseq_catch_table_entry *entry; - const struct iseq_catch_table *ct; - unsigned long epc, cont_pc, cont_sp; - const rb_iseq_t *catch_iseq; - rb_control_frame_t *cfp; - VALUE type; - const rb_control_frame_t *escape_cfp; - - cont_pc = cont_sp = 0; - catch_iseq = NULL; - - while (ec->cfp->pc == 0 || ec->cfp->iseq == 0) { - if (UNLIKELY(VM_FRAME_TYPE(ec->cfp) == VM_FRAME_MAGIC_CFUNC)) { - EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_C_RETURN, ec->cfp->self, - rb_vm_frame_method_entry(ec->cfp)->def->original_id, - rb_vm_frame_method_entry(ec->cfp)->called_id, - rb_vm_frame_method_entry(ec->cfp)->owner, Qnil); - RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, - rb_vm_frame_method_entry(ec->cfp)->owner, - rb_vm_frame_method_entry(ec->cfp)->def->original_id); - } - rb_vm_pop_frame(ec); - } - - cfp = ec->cfp; + unsigned int i; + const struct iseq_catch_table_entry *entry; + const struct iseq_catch_table *ct; + unsigned long epc, cont_pc, cont_sp; + const rb_iseq_t *catch_iseq; + VALUE type; + const rb_control_frame_t *escape_cfp; + + cont_pc = cont_sp = 0; + catch_iseq = NULL; + + while (ec->cfp->pc == 0 || ec->cfp->iseq == 0) { + if (UNLIKELY(VM_FRAME_TYPE(ec->cfp) == VM_FRAME_MAGIC_CFUNC)) { + EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_C_RETURN, ec->cfp->self, + rb_vm_frame_method_entry(ec->cfp)->def->original_id, + rb_vm_frame_method_entry(ec->cfp)->called_id, + rb_vm_frame_method_entry(ec->cfp)->owner, Qnil); + RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, + rb_vm_frame_method_entry(ec->cfp)->owner, + rb_vm_frame_method_entry(ec->cfp)->def->original_id); + } + rb_vm_pop_frame(ec); + } + + rb_control_frame_t *const cfp = ec->cfp; epc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded; - escape_cfp = NULL; - if (state == TAG_BREAK || state == TAG_RETURN) { - escape_cfp = THROW_DATA_CATCH_FRAME(err); - - if (cfp == escape_cfp) { - if (state == TAG_RETURN) { - if (!VM_FRAME_FINISHED_P(cfp)) { - THROW_DATA_CATCH_FRAME_SET(err, cfp + 1); - THROW_DATA_STATE_SET(err, state = TAG_BREAK); - } - else { + escape_cfp = NULL; + if (state == TAG_BREAK || state == TAG_RETURN) { + escape_cfp = THROW_DATA_CATCH_FRAME(err); + + if (cfp == escape_cfp) { + if (state == TAG_RETURN) { + if (!VM_FRAME_FINISHED_P(cfp)) { + THROW_DATA_CATCH_FRAME_SET(err, cfp + 1); + THROW_DATA_STATE_SET(err, state = TAG_BREAK); + } + else { ct = ISEQ_BODY(cfp->iseq)->catch_table; - if (ct) for (i = 0; i < ct->size; i++) { - entry = UNALIGNED_MEMBER_PTR(ct, entries[i]); - if (entry->start < epc && entry->end >= epc) { - if (entry->type == CATCH_TYPE_ENSURE) { - catch_iseq = entry->iseq; - cont_pc = entry->cont; - cont_sp = entry->sp; - break; - } - } - } - if (catch_iseq == NULL) { - ec->errinfo = Qnil; - THROW_DATA_CATCH_FRAME_SET(err, cfp + 1); + if (ct) for (i = 0; i < ct->size; i++) { + entry = UNALIGNED_MEMBER_PTR(ct, entries[i]); + if (entry->start < epc && entry->end >= epc) { + if (entry->type == CATCH_TYPE_ENSURE) { + catch_iseq = entry->iseq; + cont_pc = entry->cont; + cont_sp = entry->sp; + break; + } + } + } + if (catch_iseq == NULL) { + ec->errinfo = Qnil; + THROW_DATA_CATCH_FRAME_SET(err, cfp + 1); // cfp == escape_cfp here so calling with cfp_returning_with_value = true - hook_before_rewind(ec, ec->cfp, true, state, err); - rb_vm_pop_frame(ec); - return THROW_DATA_VAL(err); - } - } - /* through */ - } - else { - /* TAG_BREAK */ -#if OPT_STACK_CACHING - *initial = THROW_DATA_VAL(err); -#else - *ec->cfp->sp++ = THROW_DATA_VAL(err); -#endif - ec->errinfo = Qnil; - return Qundef; - } - } - } + hook_before_rewind(ec, true, state, err); + rb_vm_pop_frame(ec); + return THROW_DATA_VAL(err); + } + } + /* through */ + } + else { + /* TAG_BREAK */ + *cfp->sp++ = THROW_DATA_VAL(err); + ec->errinfo = Qnil; + return Qundef; + } + } + } - if (state == TAG_RAISE) { + if (state == TAG_RAISE) { ct = ISEQ_BODY(cfp->iseq)->catch_table; - if (ct) for (i = 0; i < ct->size; i++) { - entry = UNALIGNED_MEMBER_PTR(ct, entries[i]); - if (entry->start < epc && entry->end >= epc) { - - if (entry->type == CATCH_TYPE_RESCUE || - entry->type == CATCH_TYPE_ENSURE) { - catch_iseq = entry->iseq; - cont_pc = entry->cont; - cont_sp = entry->sp; - break; - } - } - } - } - else if (state == TAG_RETRY) { + if (ct) for (i = 0; i < ct->size; i++) { + entry = UNALIGNED_MEMBER_PTR(ct, entries[i]); + if (entry->start < epc && entry->end >= epc) { + + if (entry->type == CATCH_TYPE_RESCUE || + entry->type == CATCH_TYPE_ENSURE) { + catch_iseq = entry->iseq; + cont_pc = entry->cont; + cont_sp = entry->sp; + break; + } + } + } + } + else if (state == TAG_RETRY) { ct = ISEQ_BODY(cfp->iseq)->catch_table; - if (ct) for (i = 0; i < ct->size; i++) { - entry = UNALIGNED_MEMBER_PTR(ct, entries[i]); - if (entry->start < epc && entry->end >= epc) { - - if (entry->type == CATCH_TYPE_ENSURE) { - catch_iseq = entry->iseq; - cont_pc = entry->cont; - cont_sp = entry->sp; - break; - } - else if (entry->type == CATCH_TYPE_RETRY) { - const rb_control_frame_t *escape_cfp; - escape_cfp = THROW_DATA_CATCH_FRAME(err); - if (cfp == escape_cfp) { + if (ct) for (i = 0; i < ct->size; i++) { + entry = UNALIGNED_MEMBER_PTR(ct, entries[i]); + if (entry->start < epc && entry->end >= epc) { + + if (entry->type == CATCH_TYPE_ENSURE) { + catch_iseq = entry->iseq; + cont_pc = entry->cont; + cont_sp = entry->sp; + break; + } + else if (entry->type == CATCH_TYPE_RETRY) { + const rb_control_frame_t *escape_cfp; + escape_cfp = THROW_DATA_CATCH_FRAME(err); + if (cfp == escape_cfp) { cfp->pc = ISEQ_BODY(cfp->iseq)->iseq_encoded + entry->cont; - ec->errinfo = Qnil; - return Qundef; - } - } - } - } - } + ec->errinfo = Qnil; + return Qundef; + } + } + } + } + } else if ((state == TAG_BREAK && !escape_cfp) || (state == TAG_REDO) || (state == TAG_NEXT)) { - type = (const enum catch_type[TAG_MASK]) { + type = (const enum rb_catch_type[TAG_MASK]) { [TAG_BREAK] = CATCH_TYPE_BREAK, [TAG_NEXT] = CATCH_TYPE_NEXT, [TAG_REDO] = CATCH_TYPE_REDO, @@ -2443,88 +2721,84 @@ vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state, }[state]; ct = ISEQ_BODY(cfp->iseq)->catch_table; - if (ct) for (i = 0; i < ct->size; i++) { - entry = UNALIGNED_MEMBER_PTR(ct, entries[i]); - - if (entry->start < epc && entry->end >= epc) { - if (entry->type == CATCH_TYPE_ENSURE) { - catch_iseq = entry->iseq; - cont_pc = entry->cont; - cont_sp = entry->sp; - break; - } - else if (entry->type == type) { + if (ct) for (i = 0; i < ct->size; i++) { + entry = UNALIGNED_MEMBER_PTR(ct, entries[i]); + + if (entry->start < epc && entry->end >= epc) { + if (entry->type == CATCH_TYPE_ENSURE) { + catch_iseq = entry->iseq; + cont_pc = entry->cont; + cont_sp = entry->sp; + break; + } + else if (entry->type == type) { cfp->pc = ISEQ_BODY(cfp->iseq)->iseq_encoded + entry->cont; - cfp->sp = vm_base_ptr(cfp) + entry->sp; + cfp->sp = vm_base_ptr(cfp) + entry->sp; - if (state != TAG_REDO) { -#if OPT_STACK_CACHING - *initial = THROW_DATA_VAL(err); -#else - *ec->cfp->sp++ = THROW_DATA_VAL(err); -#endif - } - ec->errinfo = Qnil; - VM_ASSERT(ec->tag->state == TAG_NONE); - return Qundef; - } - } - } - } - else { + if (state != TAG_REDO) { + *cfp->sp++ = THROW_DATA_VAL(err); + } + ec->errinfo = Qnil; + VM_ASSERT(ec->tag->state == TAG_NONE); + return Qundef; + } + } + } + } + else { ct = ISEQ_BODY(cfp->iseq)->catch_table; - if (ct) for (i = 0; i < ct->size; i++) { - entry = UNALIGNED_MEMBER_PTR(ct, entries[i]); - if (entry->start < epc && entry->end >= epc) { - - if (entry->type == CATCH_TYPE_ENSURE) { - catch_iseq = entry->iseq; - cont_pc = entry->cont; - cont_sp = entry->sp; - break; - } - } - } - } - - if (catch_iseq != NULL) { /* found catch table */ - /* enter catch scope */ - const int arg_size = 1; - - rb_iseq_check(catch_iseq); - cfp->sp = vm_base_ptr(cfp) + cont_sp; + if (ct) for (i = 0; i < ct->size; i++) { + entry = UNALIGNED_MEMBER_PTR(ct, entries[i]); + if (entry->start < epc && entry->end >= epc) { + + if (entry->type == CATCH_TYPE_ENSURE) { + catch_iseq = entry->iseq; + cont_pc = entry->cont; + cont_sp = entry->sp; + break; + } + } + } + } + + if (catch_iseq != NULL) { /* found catch table */ + /* enter catch scope */ + const int arg_size = 1; + + rb_iseq_check(catch_iseq); + cfp->sp = vm_base_ptr(cfp) + cont_sp; cfp->pc = ISEQ_BODY(cfp->iseq)->iseq_encoded + cont_pc; - /* push block frame */ - cfp->sp[0] = (VALUE)err; - vm_push_frame(ec, catch_iseq, VM_FRAME_MAGIC_RESCUE, - cfp->self, - VM_GUARDED_PREV_EP(cfp->ep), - 0, /* cref or me */ + /* push block frame */ + cfp->sp[0] = (VALUE)err; + vm_push_frame(ec, catch_iseq, VM_FRAME_MAGIC_RESCUE, + cfp->self, + VM_GUARDED_PREV_EP(cfp->ep), + 0, /* cref or me */ ISEQ_BODY(catch_iseq)->iseq_encoded, - cfp->sp + arg_size /* push value */, + cfp->sp + arg_size /* push value */, ISEQ_BODY(catch_iseq)->local_table_size - arg_size, ISEQ_BODY(catch_iseq)->stack_max); - state = 0; - ec->tag->state = TAG_NONE; - ec->errinfo = Qnil; + state = 0; + ec->tag->state = TAG_NONE; + ec->errinfo = Qnil; - return Qundef; - } - else { - hook_before_rewind(ec, ec->cfp, (cfp == escape_cfp), state, err); + return Qundef; + } + else { + hook_before_rewind(ec, (cfp == escape_cfp), state, err); - if (VM_FRAME_FINISHED_P(ec->cfp)) { - rb_vm_pop_frame(ec); - ec->errinfo = (VALUE)err; - ec->tag = ec->tag->prev; - EC_JUMP_TAG(ec, state); - } - else { - rb_vm_pop_frame(ec); - } - } + if (VM_FRAME_FINISHED_P(ec->cfp)) { + rb_vm_pop_frame(ec); + ec->errinfo = (VALUE)err; + ec->tag = ec->tag->prev; + EC_JUMP_TAG(ec, state); + } + else { + rb_vm_pop_frame(ec); + } + } } } @@ -2536,7 +2810,7 @@ rb_iseq_eval(const rb_iseq_t *iseq) rb_execution_context_t *ec = GET_EC(); VALUE val; vm_set_top_stack(ec, iseq); - val = vm_exec(ec, true); + val = vm_exec(ec); return val; } @@ -2547,7 +2821,7 @@ rb_iseq_eval_main(const rb_iseq_t *iseq) VALUE val; vm_set_main_stack(ec, iseq); - val = vm_exec(ec, true); + val = vm_exec(ec); return val; } @@ -2557,13 +2831,13 @@ rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *cal const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(cfp); if (me) { - if (idp) *idp = me->def->original_id; - if (called_idp) *called_idp = me->called_id; - if (klassp) *klassp = me->owner; - return TRUE; + if (idp) *idp = me->def->original_id; + if (called_idp) *called_idp = me->called_id; + if (klassp) *klassp = me->owner; + return TRUE; } else { - return FALSE; + return FALSE; } } @@ -2581,7 +2855,7 @@ rb_frame_method_id_and_class(ID *idp, VALUE *klassp) VALUE rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg, - VALUE block_handler, VALUE filename) + VALUE block_handler, VALUE filename) { rb_execution_context_t *ec = GET_EC(); const rb_control_frame_t *reg_cfp = ec->cfp; @@ -2589,9 +2863,9 @@ rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg, VALUE val; vm_push_frame(ec, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, - recv, block_handler, - (VALUE)vm_cref_new_toplevel(ec), /* cref or me */ - 0, reg_cfp->sp, 0, 0); + recv, block_handler, + (VALUE)vm_cref_new_toplevel(ec), /* cref or me */ + 0, reg_cfp->sp, 0, 0); val = (*func)(arg); @@ -2607,6 +2881,7 @@ rb_vm_update_references(void *ptr) if (ptr) { rb_vm_t *vm = ptr; + rb_gc_update_tbl_refs(vm->ci_table); rb_gc_update_tbl_refs(vm->frozen_strings); vm->mark_object_ary = rb_gc_location(vm->mark_object_ary); vm->load_path = rb_gc_location(vm->load_path); @@ -2620,11 +2895,14 @@ rb_vm_update_references(void *ptr) vm->loaded_features = rb_gc_location(vm->loaded_features); vm->loaded_features_snapshot = rb_gc_location(vm->loaded_features_snapshot); vm->loaded_features_realpaths = rb_gc_location(vm->loaded_features_realpaths); + vm->loaded_features_realpath_map = rb_gc_location(vm->loaded_features_realpath_map); vm->top_self = rb_gc_location(vm->top_self); vm->orig_progname = rb_gc_location(vm->orig_progname); rb_gc_update_tbl_refs(vm->overloaded_cme_table); + rb_gc_update_values(RUBY_NSIG, vm->trap_list.cmd); + if (vm->coverages) { vm->coverages = rb_gc_location(vm->coverages); vm->me2counter = rb_gc_location(vm->me2counter); @@ -2649,8 +2927,8 @@ rb_vm_each_stack_value(void *ptr, void (*cb)(VALUE, void*), void *ctx) if (ec->vm_stack) { VALUE *p = ec->vm_stack; VALUE *sp = ec->cfp->sp; - while (p <= sp) { - if (!rb_special_const_p(*p)) { + while (p < sp) { + if (!RB_SPECIAL_CONST_P(*p)) { cb(*p, ctx); } p++; @@ -2669,58 +2947,46 @@ vm_mark_negative_cme(VALUE val, void *dmy) return ID_TABLE_CONTINUE; } +void rb_thread_sched_mark_zombies(rb_vm_t *vm); + void rb_vm_mark(void *ptr) { RUBY_MARK_ENTER("vm"); RUBY_GC_INFO("-------------------------------------------------\n"); if (ptr) { - rb_vm_t *vm = ptr; + rb_vm_t *vm = ptr; rb_ractor_t *r = 0; - long i, len; - const VALUE *obj_ary; + long i; - ccan_list_for_each(&vm->ractor.set, r, vmlr_node) { + ccan_list_for_each(&vm->ractor.set, r, vmlr_node) { // ractor.set only contains blocking or running ractors VM_ASSERT(rb_ractor_status_p(r, ractor_blocking) || rb_ractor_status_p(r, ractor_running)); rb_gc_mark(rb_ractor_self(r)); - } - - rb_gc_mark_movable(vm->mark_object_ary); + } - len = RARRAY_LEN(vm->mark_object_ary); - obj_ary = RARRAY_CONST_PTR(vm->mark_object_ary); - for (i=0; i < len; i++) { - const VALUE *ptr; - long j, jlen; - - rb_gc_mark(*obj_ary); - jlen = RARRAY_LEN(*obj_ary); - ptr = RARRAY_CONST_PTR(*obj_ary); - for (j=0; j < jlen; j++) { - rb_gc_mark(*ptr++); - } - obj_ary++; + for (struct global_object_list *list = vm->global_object_list; list; list = list->next) { + rb_gc_mark_maybe(*list->varptr); } + rb_gc_mark_movable(vm->mark_object_ary); rb_gc_mark_movable(vm->load_path); rb_gc_mark_movable(vm->load_path_snapshot); - RUBY_MARK_MOVABLE_UNLESS_NULL(vm->load_path_check_cache); + rb_gc_mark_movable(vm->load_path_check_cache); rb_gc_mark_movable(vm->expanded_load_path); rb_gc_mark_movable(vm->loaded_features); rb_gc_mark_movable(vm->loaded_features_snapshot); rb_gc_mark_movable(vm->loaded_features_realpaths); + rb_gc_mark_movable(vm->loaded_features_realpath_map); rb_gc_mark_movable(vm->top_self); rb_gc_mark_movable(vm->orig_progname); - RUBY_MARK_MOVABLE_UNLESS_NULL(vm->coverages); - RUBY_MARK_MOVABLE_UNLESS_NULL(vm->me2counter); - /* Prevent classes from moving */ - rb_mark_tbl(vm->defined_module_hash); + rb_gc_mark_movable(vm->coverages); + rb_gc_mark_movable(vm->me2counter); - if (vm->loading_table) { - rb_mark_tbl(vm->loading_table); - } + if (vm->loading_table) { + rb_mark_tbl(vm->loading_table); + } rb_gc_mark_values(RUBY_NSIG, vm->trap_list.cmd); @@ -2739,7 +3005,8 @@ rb_vm_mark(void *ptr) } } - mjit_mark(); + rb_thread_sched_mark_zombies(vm); + rb_rjit_mark(); } RUBY_MARK_LEAVE("vm"); @@ -2753,17 +3020,7 @@ rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE cls, VALUE exc = rb_exc_new3(cls, rb_obj_freeze(mesg)); OBJ_FREEZE(exc); ((VALUE *)vm->special_exceptions)[sp] = exc; - rb_gc_register_mark_object(exc); -} - -int -rb_vm_add_root_module(VALUE module) -{ - rb_vm_t *vm = GET_VM(); - - st_insert(vm->defined_module_hash, (st_data_t)module, (st_data_t)module); - - return TRUE; + rb_vm_register_global_object(exc); } static int @@ -2773,48 +3030,117 @@ free_loading_table_entry(st_data_t key, st_data_t value, st_data_t arg) return ST_DELETE; } +void rb_free_loaded_features_index(rb_vm_t *vm); +void rb_objspace_free_objects(void *objspace); + int ruby_vm_destruct(rb_vm_t *vm) { RUBY_FREE_ENTER("vm"); if (vm) { - rb_thread_t *th = vm->ractor.main_thread; - struct rb_objspace *objspace = vm->objspace; - vm->ractor.main_thread = NULL; - - if (th) { - rb_fiber_reset_root_local_storage(th); - thread_free(th); - } - rb_vm_living_threads_init(vm); - ruby_vm_run_at_exit_hooks(vm); - if (vm->loading_table) { - st_foreach(vm->loading_table, free_loading_table_entry, 0); - st_free_table(vm->loading_table); - vm->loading_table = 0; - } - if (vm->frozen_strings) { - st_free_table(vm->frozen_strings); - vm->frozen_strings = 0; - } - RB_ALTSTACK_FREE(vm->main_altstack); - if (objspace) { - rb_objspace_free(objspace); - } - rb_native_mutex_destroy(&vm->waitpid_lock); + rb_thread_t *th = vm->ractor.main_thread; + VALUE *stack = th->ec->vm_stack; + if (rb_free_at_exit) { + rb_free_encoded_insn_data(); + rb_free_global_enc_table(); + rb_free_loaded_builtin_table(); + + rb_free_shared_fiber_pool(); + rb_free_static_symid_str(); + rb_free_transcoder_table(); + rb_free_vm_opt_tables(); + rb_free_warning(); + rb_free_rb_global_tbl(); + rb_free_loaded_features_index(vm); + + rb_id_table_free(vm->negative_cme_table); + st_free_table(vm->overloaded_cme_table); + + rb_id_table_free(RCLASS(rb_mRubyVMFrozenCore)->m_tbl); + + rb_shape_t *cursor = rb_shape_get_root_shape(); + rb_shape_t *end = rb_shape_get_shape_by_id(GET_SHAPE_TREE()->next_shape_id); + while (cursor < end) { + // 0x1 == SINGLE_CHILD_P + if (cursor->edges && !(((uintptr_t)cursor->edges) & 0x1)) + rb_id_table_free(cursor->edges); + cursor += 1; + } + + xfree(GET_SHAPE_TREE()); + + st_free_table(vm->static_ext_inits); + st_free_table(vm->ensure_rollback_table); + + rb_vm_postponed_job_free(); + + rb_id_table_free(vm->constant_cache); + + if (th) { + xfree(th->nt); + th->nt = NULL; + } + +#ifndef HAVE_SETPROCTITLE + ruby_free_proctitle(); +#endif + } + else { + if (th) { + rb_fiber_reset_root_local_storage(th); + thread_free(th); + } + } + + struct rb_objspace *objspace = vm->objspace; + + rb_vm_living_threads_init(vm); + ruby_vm_run_at_exit_hooks(vm); + if (vm->loading_table) { + st_foreach(vm->loading_table, free_loading_table_entry, 0); + st_free_table(vm->loading_table); + vm->loading_table = 0; + } + if (vm->ci_table) { + st_free_table(vm->ci_table); + vm->ci_table = NULL; + } + if (vm->frozen_strings) { + st_free_table(vm->frozen_strings); + vm->frozen_strings = 0; + } + RB_ALTSTACK_FREE(vm->main_altstack); + + struct global_object_list *next; + for (struct global_object_list *list = vm->global_object_list; list; list = next) { + next = list->next; + xfree(list); + } + + if (objspace) { + if (rb_free_at_exit) { + rb_objspace_free_objects(objspace); + rb_free_generic_iv_tbl_(); + rb_free_default_rand_key(); + if (th && vm->fork_gen == 0) { + /* If we have forked, main_thread may not be the initial thread */ + xfree(stack); + ruby_mimfree(th); + } + } + rb_objspace_free(objspace); + } rb_native_mutex_destroy(&vm->workqueue_lock); - /* after freeing objspace, you *can't* use ruby_xfree() */ - ruby_mimfree(vm); - ruby_current_vm_ptr = NULL; + /* after freeing objspace, you *can't* use ruby_xfree() */ + ruby_mimfree(vm); + ruby_current_vm_ptr = NULL; } RUBY_FREE_LEAVE("vm"); return 0; } -size_t rb_vm_memsize_waiting_list(struct ccan_list_head *waiting_list); // process.c size_t rb_vm_memsize_waiting_fds(struct ccan_list_head *waiting_fds); // thread.c -size_t rb_vm_memsize_postponed_job_buffer(void); // vm_trace.c size_t rb_vm_memsize_workqueue(struct ccan_list_head *workqueue); // vm_trace.c // Used for VM memsize reporting. Returns the size of the at_exit list by @@ -2869,21 +3195,20 @@ vm_memsize(const void *ptr) return ( sizeof(rb_vm_t) + - rb_vm_memsize_waiting_list(&vm->waiting_pids) + - rb_vm_memsize_waiting_list(&vm->waiting_grps) + rb_vm_memsize_waiting_fds(&vm->waiting_fds) + rb_st_memsize(vm->loaded_features_index) + rb_st_memsize(vm->loading_table) + rb_st_memsize(vm->ensure_rollback_table) + - rb_vm_memsize_postponed_job_buffer() + + rb_vm_memsize_postponed_job_queue() + rb_vm_memsize_workqueue(&vm->workqueue) + - rb_st_memsize(vm->defined_module_hash) + vm_memsize_at_exit_list(vm->at_exit) + + rb_st_memsize(vm->ci_table) + rb_st_memsize(vm->frozen_strings) + vm_memsize_builtin_function_table(vm->builtin_function_table) + rb_id_table_memsize(vm->negative_cme_table) + rb_st_memsize(vm->overloaded_cme_table) + - vm_memsize_constant_cache() + vm_memsize_constant_cache() + + GET_SHAPE_TREE()->cache_size * sizeof(redblack_node_t) ); // TODO @@ -2920,11 +3245,11 @@ get_param(const char *name, size_t default_value, size_t min_value) const char *envval; size_t result = default_value; if ((envval = getenv(name)) != 0) { - long val = atol(envval); - if (val < (long)min_value) { - val = (long)min_value; - } - result = (size_t)(((val -1 + RUBY_VM_SIZE_ALIGN) / RUBY_VM_SIZE_ALIGN) * RUBY_VM_SIZE_ALIGN); + long val = atol(envval); + if (val < (long)min_value) { + val = (long)min_value; + } + result = (size_t)(((val -1 + RUBY_VM_SIZE_ALIGN) / RUBY_VM_SIZE_ALIGN) * RUBY_VM_SIZE_ALIGN); } if (0) ruby_debug_printf("%s: %"PRIuSIZE"\n", name, result); /* debug print */ @@ -2940,7 +3265,7 @@ check_machine_stack_size(size_t *sizep) #ifdef PTHREAD_STACK_MIN if (size < (size_t)PTHREAD_STACK_MIN) { - *sizep = (size_t)PTHREAD_STACK_MIN * 2; + *sizep = (size_t)PTHREAD_STACK_MIN * 2; } #endif } @@ -2950,23 +3275,23 @@ vm_default_params_setup(rb_vm_t *vm) { vm->default_params.thread_vm_stack_size = get_param("RUBY_THREAD_VM_STACK_SIZE", - RUBY_VM_THREAD_VM_STACK_SIZE, - RUBY_VM_THREAD_VM_STACK_SIZE_MIN); + RUBY_VM_THREAD_VM_STACK_SIZE, + RUBY_VM_THREAD_VM_STACK_SIZE_MIN); vm->default_params.thread_machine_stack_size = get_param("RUBY_THREAD_MACHINE_STACK_SIZE", - RUBY_VM_THREAD_MACHINE_STACK_SIZE, - RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN); + RUBY_VM_THREAD_MACHINE_STACK_SIZE, + RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN); vm->default_params.fiber_vm_stack_size = get_param("RUBY_FIBER_VM_STACK_SIZE", - RUBY_VM_FIBER_VM_STACK_SIZE, - RUBY_VM_FIBER_VM_STACK_SIZE_MIN); + RUBY_VM_FIBER_VM_STACK_SIZE, + RUBY_VM_FIBER_VM_STACK_SIZE_MIN); vm->default_params.fiber_machine_stack_size = get_param("RUBY_FIBER_MACHINE_STACK_SIZE", - RUBY_VM_FIBER_MACHINE_STACK_SIZE, - RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN); + RUBY_VM_FIBER_MACHINE_STACK_SIZE, + RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN); /* environment dependent check */ check_machine_stack_size(&vm->default_params.thread_machine_stack_size); @@ -2976,7 +3301,6 @@ vm_default_params_setup(rb_vm_t *vm) static void vm_init2(rb_vm_t *vm) { - MEMZERO(vm, rb_vm_t, 1); rb_vm_living_threads_init(vm); vm->thread_report_on_exception = 1; vm->src_encoding_index = -1; @@ -2985,7 +3309,7 @@ vm_init2(rb_vm_t *vm) } void -rb_execution_context_update(const rb_execution_context_t *ec) +rb_execution_context_update(rb_execution_context_t *ec) { /* update VM stack */ if (ec->vm_stack) { @@ -3025,6 +3349,8 @@ rb_execution_context_update(const rb_execution_context_t *ec) cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); } } + + ec->storage = rb_gc_location(ec->storage); } static enum rb_id_table_iterator_result @@ -3040,55 +3366,57 @@ rb_execution_context_mark(const rb_execution_context_t *ec) /* mark VM stack */ if (ec->vm_stack) { VM_ASSERT(ec->cfp); - VALUE *p = ec->vm_stack; - VALUE *sp = ec->cfp->sp; - rb_control_frame_t *cfp = ec->cfp; - rb_control_frame_t *limit_cfp = (void *)(ec->vm_stack + ec->vm_stack_size); + VALUE *p = ec->vm_stack; + VALUE *sp = ec->cfp->sp; + rb_control_frame_t *cfp = ec->cfp; + rb_control_frame_t *limit_cfp = (void *)(ec->vm_stack + ec->vm_stack_size); VM_ASSERT(sp == ec->cfp->sp); rb_gc_mark_vm_stack_values((long)(sp - p), p); - while (cfp != limit_cfp) { - const VALUE *ep = cfp->ep; - VM_ASSERT(!!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) == vm_ep_in_heap_p_(ec, ep)); - rb_gc_mark_movable(cfp->self); - rb_gc_mark_movable((VALUE)cfp->iseq); - rb_gc_mark_movable((VALUE)cfp->block_code); + while (cfp != limit_cfp) { + const VALUE *ep = cfp->ep; + VM_ASSERT(!!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) == vm_ep_in_heap_p_(ec, ep)); - if (!VM_ENV_LOCAL_P(ep)) { - const VALUE *prev_ep = VM_ENV_PREV_EP(ep); - if (VM_ENV_FLAGS(prev_ep, VM_ENV_FLAG_ESCAPED)) { - rb_gc_mark_movable(prev_ep[VM_ENV_DATA_INDEX_ENV]); - } + if (VM_FRAME_TYPE(cfp) != VM_FRAME_MAGIC_DUMMY) { + rb_gc_mark_movable(cfp->self); + rb_gc_mark_movable((VALUE)cfp->iseq); + rb_gc_mark_movable((VALUE)cfp->block_code); - if (VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED)) { - rb_gc_mark_movable(ep[VM_ENV_DATA_INDEX_ENV]); - rb_gc_mark(ep[VM_ENV_DATA_INDEX_ME_CREF]); - } + if (!VM_ENV_LOCAL_P(ep)) { + const VALUE *prev_ep = VM_ENV_PREV_EP(ep); + if (VM_ENV_FLAGS(prev_ep, VM_ENV_FLAG_ESCAPED)) { + rb_gc_mark_movable(prev_ep[VM_ENV_DATA_INDEX_ENV]); + } + + if (VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED)) { + rb_gc_mark_movable(ep[VM_ENV_DATA_INDEX_ENV]); + rb_gc_mark(ep[VM_ENV_DATA_INDEX_ME_CREF]); + } + } } - cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); - } + cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); + } } /* mark machine stack */ if (ec->machine.stack_start && ec->machine.stack_end && - ec != GET_EC() /* marked for current ec at the first stage of marking */ - ) { - rb_gc_mark_machine_stack(ec); - rb_gc_mark_locations((VALUE *)&ec->machine.regs, - (VALUE *)(&ec->machine.regs) + - sizeof(ec->machine.regs) / (sizeof(VALUE))); + ec != GET_EC() /* marked for current ec at the first stage of marking */ + ) { + rb_gc_mark_machine_context(ec); } - RUBY_MARK_UNLESS_NULL(ec->errinfo); - RUBY_MARK_UNLESS_NULL(ec->root_svar); + rb_gc_mark(ec->errinfo); + rb_gc_mark(ec->root_svar); if (ec->local_storage) { rb_id_table_foreach_values(ec->local_storage, mark_local_storage_i, NULL); } - RUBY_MARK_UNLESS_NULL(ec->local_storage_recursive_hash); - RUBY_MARK_UNLESS_NULL(ec->local_storage_recursive_hash_for_trace); - RUBY_MARK_UNLESS_NULL(ec->private_const_reference); + rb_gc_mark(ec->local_storage_recursive_hash); + rb_gc_mark(ec->local_storage_recursive_hash_for_trace); + rb_gc_mark(ec->private_const_reference); + + rb_gc_mark_movable(ec->storage); } void rb_fiber_mark_self(rb_fiber_t *fib); @@ -3119,8 +3447,8 @@ thread_mark(void *ptr) switch (th->invoke_type) { case thread_invoke_type_proc: case thread_invoke_type_ractor_proc: - RUBY_MARK_UNLESS_NULL(th->invoke_arg.proc.proc); - RUBY_MARK_UNLESS_NULL(th->invoke_arg.proc.args); + rb_gc_mark(th->invoke_arg.proc.proc); + rb_gc_mark(th->invoke_arg.proc.args); break; case thread_invoke_type_func: rb_gc_mark_maybe((VALUE)th->invoke_arg.func.arg); @@ -3130,47 +3458,53 @@ thread_mark(void *ptr) } rb_gc_mark(rb_ractor_self(th->ractor)); - RUBY_MARK_UNLESS_NULL(th->thgroup); - RUBY_MARK_UNLESS_NULL(th->value); - RUBY_MARK_UNLESS_NULL(th->pending_interrupt_queue); - RUBY_MARK_UNLESS_NULL(th->pending_interrupt_mask_stack); - RUBY_MARK_UNLESS_NULL(th->top_self); - RUBY_MARK_UNLESS_NULL(th->top_wrapper); + rb_gc_mark(th->thgroup); + rb_gc_mark(th->value); + rb_gc_mark(th->pending_interrupt_queue); + rb_gc_mark(th->pending_interrupt_mask_stack); + rb_gc_mark(th->top_self); + rb_gc_mark(th->top_wrapper); if (th->root_fiber) rb_fiber_mark_self(th->root_fiber); - /* Ensure EC stack objects are pinned */ - rb_execution_context_mark(th->ec); - RUBY_MARK_UNLESS_NULL(th->stat_insn_usage); - RUBY_MARK_UNLESS_NULL(th->last_status); - RUBY_MARK_UNLESS_NULL(th->locking_mutex); - RUBY_MARK_UNLESS_NULL(th->name); + RUBY_ASSERT(th->ec == rb_fiberptr_get_ec(th->ec->fiber_ptr)); + rb_gc_mark(th->stat_insn_usage); + rb_gc_mark(th->last_status); + rb_gc_mark(th->locking_mutex); + rb_gc_mark(th->name); - RUBY_MARK_UNLESS_NULL(th->scheduler); + rb_gc_mark(th->scheduler); RUBY_MARK_LEAVE("thread"); } +void rb_threadptr_sched_free(rb_thread_t *th); // thread_*.c + static void thread_free(void *ptr) { rb_thread_t *th = ptr; RUBY_FREE_ENTER("thread"); + rb_threadptr_sched_free(th); + if (th->locking_mutex != Qfalse) { - rb_bug("thread_free: locking_mutex must be NULL (%p:%p)", (void *)th, (void *)th->locking_mutex); + rb_bug("thread_free: locking_mutex must be NULL (%p:%p)", (void *)th, (void *)th->locking_mutex); } if (th->keeping_mutexes != NULL) { - rb_bug("thread_free: keeping_mutexes must be NULL (%p:%p)", (void *)th, (void *)th->keeping_mutexes); + rb_bug("thread_free: keeping_mutexes must be NULL (%p:%p)", (void *)th, (void *)th->keeping_mutexes); } + ruby_xfree(th->specific_storage); + rb_threadptr_root_fiber_release(th); if (th->vm && th->vm->ractor.main_thread == th) { - RUBY_GC_INFO("MRI main thread\n"); + RUBY_GC_INFO("MRI main thread\n"); } else { - ruby_xfree(th->nt); // TODO - ruby_xfree(th); + // ruby_xfree(th->nt); + // TODO: MN system collect nt, but without MN system it should be freed here. + ruby_xfree(th); } RUBY_FREE_LEAVE("thread"); @@ -3183,10 +3517,10 @@ thread_memsize(const void *ptr) size_t size = sizeof(rb_thread_t); if (!th->root_fiber) { - size += th->ec->vm_stack_size * sizeof(VALUE); + size += th->ec->vm_stack_size * sizeof(VALUE); } if (th->ec->local_storage) { - size += rb_id_table_memsize(th->ec->local_storage); + size += rb_id_table_memsize(th->ec->local_storage); } return size; } @@ -3195,9 +3529,9 @@ thread_memsize(const void *ptr) const rb_data_type_t ruby_threadptr_data_type = { "VM/thread", { - thread_mark, - thread_free, - thread_memsize, + thread_mark, + thread_free, + thread_memsize, thread_compact, }, 0, 0, RUBY_TYPED_FREE_IMMEDIATELY @@ -3228,6 +3562,10 @@ rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size) { rb_ec_set_vm_stack(ec, stack, size); +#if VM_CHECK_MODE > 0 + MEMZERO(stack, VALUE, size); // malloc memory could have the VM canary in it +#endif + ec->cfp = (void *)(ec->vm_stack + ec->vm_stack_size); vm_push_frame(ec, @@ -3249,11 +3587,9 @@ rb_ec_clear_vm_stack(rb_execution_context_t *ec) } static void -th_init(rb_thread_t *th, VALUE self, rb_vm_t *vm, rb_ractor_t *r) +th_init(rb_thread_t *th, VALUE self, rb_vm_t *vm) { th->self = self; - th->vm = vm; - th->ractor = r; rb_threadptr_root_fiber_setup(th); @@ -3277,21 +3613,26 @@ th_init(rb_thread_t *th, VALUE self, rb_vm_t *vm, rb_ractor_t *r) th->top_self = vm->top_self; // 0 while self == 0 th->value = Qundef; -#if defined(NON_SCALAR_THREAD_ID) && !defined(__wasm__) && !defined(__EMSCRIPTEN__) - th->nt->thread_id_string[0] = '\0'; -#endif - th->ec->errinfo = Qnil; th->ec->root_svar = Qfalse; th->ec->local_storage_recursive_hash = Qnil; th->ec->local_storage_recursive_hash_for_trace = Qnil; + th->ec->storage = Qnil; + #if OPT_CALL_THREADED_CODE th->retval = Qundef; #endif th->name = Qnil; th->report_on_exception = vm->thread_report_on_exception; th->ext_config.ractor_safe = true; + +#if USE_RUBY_DEBUG_LOG + static rb_atomic_t thread_serial = 1; + th->serial = RUBY_ATOMIC_FETCH_ADD(thread_serial, 1); + + RUBY_DEBUG_LOG("th:%u", th->serial); +#endif } VALUE @@ -3299,7 +3640,8 @@ rb_thread_alloc(VALUE klass) { VALUE self = thread_alloc(klass); rb_thread_t *target_th = rb_thread_ptr(self); - th_init(target_th, self, GET_VM(), GET_RACTOR()); + target_th->ractor = GET_RACTOR(); + th_init(target_th, self, target_th->vm = GET_VM()); return self; } @@ -3316,7 +3658,7 @@ static VALUE m_core_set_method_alias(VALUE self, VALUE cbase, VALUE sym1, VALUE sym2) { REWIND_CFP({ - rb_alias(cbase, SYM2ID(sym1), SYM2ID(sym2)); + rb_alias(cbase, SYM2ID(sym1), SYM2ID(sym2)); }); return Qnil; } @@ -3325,7 +3667,7 @@ static VALUE m_core_set_variable_alias(VALUE self, VALUE sym1, VALUE sym2) { REWIND_CFP({ - rb_alias_variable(SYM2ID(sym1), SYM2ID(sym2)); + rb_alias_variable(SYM2ID(sym1), SYM2ID(sym2)); }); return Qnil; } @@ -3335,8 +3677,8 @@ m_core_undef_method(VALUE self, VALUE cbase, VALUE sym) { REWIND_CFP({ ID mid = SYM2ID(sym); - rb_undef(cbase, mid); - rb_clear_method_cache(self, mid); + rb_undef(cbase, mid); + rb_clear_method_cache(self, mid); }); return Qnil; } @@ -3379,7 +3721,9 @@ kwmerge_i(VALUE key, VALUE value, VALUE hash) static VALUE m_core_hash_merge_kwd(VALUE recv, VALUE hash, VALUE kw) { - REWIND_CFP(hash = core_hash_merge_kwd(hash, kw)); + if (!NIL_P(kw)) { + REWIND_CFP(hash = core_hash_merge_kwd(hash, kw)); + } return hash; } @@ -3408,36 +3752,6 @@ core_hash_merge_kwd(VALUE hash, VALUE kw) return hash; } -/* Returns true if JIT is enabled */ -static VALUE -mjit_enabled_p(VALUE _) -{ - return RBOOL(mjit_enabled); -} - -static VALUE -mjit_pause_m(int argc, VALUE *argv, RB_UNUSED_VAR(VALUE self)) -{ - VALUE options = Qnil; - VALUE wait = Qtrue; - rb_scan_args(argc, argv, "0:", &options); - - if (!NIL_P(options)) { - static ID keyword_ids[1]; - if (!keyword_ids[0]) - keyword_ids[0] = rb_intern("wait"); - rb_get_kwargs(options, keyword_ids, 0, 1, &wait); - } - - return mjit_pause(RTEST(wait)); -} - -static VALUE -mjit_resume_m(VALUE _) -{ - return mjit_resume(); -} - extern VALUE *rb_gc_stack_start; extern size_t rb_gc_stack_maxsize; @@ -3447,7 +3761,7 @@ extern size_t rb_gc_stack_maxsize; static VALUE sdr(VALUE self) { - rb_vm_bugreport(NULL); + rb_vm_bugreport(NULL, stderr); return Qnil; } @@ -3465,11 +3779,11 @@ nsdr(VALUE self) int i; if (syms == 0) { - rb_memerror(); + rb_memerror(); } for (i=0; i<n; i++) { - rb_ary_push(ary, rb_str_new2(syms[i])); + rb_ary_push(ary, rb_str_new2(syms[i])); } free(syms); /* OK */ #endif @@ -3515,6 +3829,7 @@ f_sprintf(int c, const VALUE *v, VALUE _) return rb_f_sprintf(c, v); } +/* :nodoc: */ static VALUE vm_mtbl(VALUE self, VALUE obj, VALUE sym) { @@ -3522,6 +3837,7 @@ vm_mtbl(VALUE self, VALUE obj, VALUE sym) return Qnil; } +/* :nodoc: */ static VALUE vm_mtbl2(VALUE self, VALUE obj, VALUE sym) { @@ -3597,6 +3913,7 @@ Init_VM(void) /* FrozenCore (hidden) */ fcore = rb_class_new(rb_cBasicObject); rb_set_class_path(fcore, rb_cRubyVM, "FrozenCore"); + rb_vm_register_global_object(rb_class_path_cached(fcore)); RBASIC(fcore)->flags = T_ICLASS; klass = rb_singleton_class(fcore); rb_define_method_id(klass, id_core_set_method_alias, m_core_set_method_alias, 3); @@ -3615,18 +3932,9 @@ Init_VM(void) rb_obj_freeze(fcore); RBASIC_CLEAR_CLASS(klass); rb_obj_freeze(klass); - rb_gc_register_mark_object(fcore); + rb_vm_register_global_object(fcore); rb_mRubyVMFrozenCore = fcore; - /* ::RubyVM::MJIT - * Provides access to the Method JIT compiler of MRI. - * Of course, this module is MRI specific. - */ - VALUE mjit = rb_define_module_under(rb_cRubyVM, "MJIT"); - rb_define_singleton_method(mjit, "enabled?", mjit_enabled_p, 0); - rb_define_singleton_method(mjit, "pause", mjit_pause_m, -1); - rb_define_singleton_method(mjit, "resume", mjit_resume_m, 0); - /* * Document-class: Thread * @@ -3824,9 +4132,6 @@ Init_VM(void) rb_ary_push(opts, rb_str_new2("call threaded code")); #endif -#if OPT_STACK_CACHING - rb_ary_push(opts, rb_str_new2("stack caching")); -#endif #if OPT_OPERANDS_UNIFICATION rb_ary_push(opts, rb_str_new2("operands unification")); #endif @@ -3836,9 +4141,6 @@ Init_VM(void) #if OPT_INLINE_METHOD_CACHE rb_ary_push(opts, rb_str_new2("inline method cache")); #endif -#if OPT_BLOCKINLINING - rb_ary_push(opts, rb_str_new2("block inlining")); -#endif /* ::RubyVM::INSTRUCTION_NAMES * A list of bytecode instruction names in MRI. @@ -3869,39 +4171,41 @@ Init_VM(void) /* VM bootstrap: phase 2 */ { - rb_vm_t *vm = ruby_current_vm_ptr; - rb_thread_t *th = GET_THREAD(); - VALUE filename = rb_fstring_lit("<main>"); - const rb_iseq_t *iseq = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP); + rb_vm_t *vm = ruby_current_vm_ptr; + rb_thread_t *th = GET_THREAD(); + VALUE filename = rb_fstring_lit("<main>"); + const rb_iseq_t *iseq = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP); // Ractor setup rb_ractor_main_setup(vm, th->ractor, th); - /* create vm object */ - vm->self = TypedData_Wrap_Struct(rb_cRubyVM, &vm_data_type, vm); + /* create vm object */ + vm->self = TypedData_Wrap_Struct(rb_cRubyVM, &vm_data_type, vm); - /* create main thread */ + /* create main thread */ th->self = TypedData_Wrap_Struct(rb_cThread, &thread_data_type, th); - vm->ractor.main_thread = th; + vm->ractor.main_thread = th; vm->ractor.main_ractor = th->ractor; - th->vm = vm; - th->top_wrapper = 0; - th->top_self = rb_vm_top_self(); + th->vm = vm; + th->top_wrapper = 0; + th->top_self = rb_vm_top_self(); - rb_gc_register_mark_object((VALUE)iseq); - th->ec->cfp->iseq = iseq; + rb_vm_register_global_object((VALUE)iseq); + th->ec->cfp->iseq = iseq; th->ec->cfp->pc = ISEQ_BODY(iseq)->iseq_encoded; - th->ec->cfp->self = th->top_self; + th->ec->cfp->self = th->top_self; - VM_ENV_FLAGS_UNSET(th->ec->cfp->ep, VM_FRAME_FLAG_CFRAME); - VM_STACK_ENV_WRITE(th->ec->cfp->ep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE, FALSE, NULL, FALSE, FALSE)); + VM_ENV_FLAGS_UNSET(th->ec->cfp->ep, VM_FRAME_FLAG_CFRAME); + VM_STACK_ENV_WRITE(th->ec->cfp->ep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE, FALSE, NULL, FALSE, FALSE)); - /* - * The Binding of the top level scope - */ - rb_define_global_const("TOPLEVEL_BINDING", rb_binding_new()); + /* + * The Binding of the top level scope + */ + rb_define_global_const("TOPLEVEL_BINDING", rb_binding_new()); +#ifdef _WIN32 rb_objspace_gc_enable(vm->objspace); +#endif } vm_init_redefined_flag(); @@ -3909,7 +4213,7 @@ Init_VM(void) rb_add_method_optimized(rb_singleton_class(rb_block_param_proxy), idCall, OPTIMIZED_METHOD_TYPE_BLOCK_CALL, 0, METHOD_VISI_PUBLIC); rb_obj_freeze(rb_block_param_proxy); - rb_gc_register_mark_object(rb_block_param_proxy); + rb_vm_register_global_object(rb_block_param_proxy); /* vm_backtrace.c */ Init_vm_backtrace(); @@ -3922,7 +4226,8 @@ rb_vm_set_progname(VALUE filename) rb_control_frame_t *cfp = (void *)(th->ec->vm_stack + th->ec->vm_stack_size); --cfp; - rb_iseq_pathobj_set(cfp->iseq, rb_str_dup(filename), rb_iseq_realpath(cfp->iseq)); + filename = rb_str_new_frozen(filename); + rb_iseq_pathobj_set(cfp->iseq, filename, rb_iseq_realpath(cfp->iseq)); } extern const struct st_hash_type rb_fstring_hash_type; @@ -3931,35 +4236,171 @@ void Init_BareVM(void) { /* VM bootstrap: phase 1 */ - rb_vm_t * vm = ruby_mimmalloc(sizeof(*vm)); - rb_thread_t * th = ruby_mimmalloc(sizeof(*th)); + rb_vm_t *vm = ruby_mimcalloc(1, sizeof(*vm)); + rb_thread_t *th = ruby_mimcalloc(1, sizeof(*th)); if (!vm || !th) { - fputs("[FATAL] failed to allocate memory\n", stderr); - exit(EXIT_FAILURE); + fputs("[FATAL] failed to allocate memory\n", stderr); + exit(EXIT_FAILURE); } // setup the VM - MEMZERO(th, rb_thread_t, 1); vm_init2(vm); - vm->objspace = rb_objspace_alloc(); + rb_vm_postponed_job_queue_init(vm); ruby_current_vm_ptr = vm; + rb_objspace_alloc(); vm->negative_cme_table = rb_id_table_create(16); vm->overloaded_cme_table = st_init_numtable(); vm->constant_cache = rb_id_table_create(0); + vm->unused_block_warning_table = st_init_numtable(); + + // TODO: remove before Ruby 3.4.0 release + const char *s = getenv("RUBY_TRY_UNUSED_BLOCK_WARNING_STRICT"); + if (s && strcmp(s, "1") == 0) { + vm->unused_block_warning_strict = true; + } // setup main thread th->nt = ZALLOC(struct rb_native_thread); + th->vm = vm; + th->ractor = vm->ractor.main_ractor = rb_ractor_main_alloc(); Init_native_thread(th); - th_init(th, 0, vm, vm->ractor.main_ractor = rb_ractor_main_alloc()); + rb_jit_cont_init(); + th_init(th, 0, vm); rb_ractor_set_current_ec(th->ractor, th->ec); - ruby_thread_init_stack(th); + /* n.b. native_main_thread_stack_top is set by the INIT_STACK macro */ + ruby_thread_init_stack(th, native_main_thread_stack_top); // setup ractor system rb_native_mutex_initialize(&vm->ractor.sync.lock); - rb_native_cond_initialize(&vm->ractor.sync.barrier_cond); rb_native_cond_initialize(&vm->ractor.sync.terminate_cond); + + vm_opt_method_def_table = st_init_numtable(); + vm_opt_mid_table = st_init_numtable(); + +#ifdef RUBY_THREAD_WIN32_H + rb_native_cond_initialize(&vm->ractor.sync.barrier_cond); +#endif +} + +void +ruby_init_stack(void *addr) +{ + native_main_thread_stack_top = addr; +} + +#ifndef _WIN32 +#include <unistd.h> +#include <sys/mman.h> +#endif + + +#ifndef MARK_OBJECT_ARY_BUCKET_SIZE +#define MARK_OBJECT_ARY_BUCKET_SIZE 1024 +#endif + +struct pin_array_list { + VALUE next; + long len; + VALUE *array; +}; + +static void +pin_array_list_mark(void *data) +{ + struct pin_array_list *array = (struct pin_array_list *)data; + rb_gc_mark_movable(array->next); + + rb_gc_mark_vm_stack_values(array->len, array->array); +} + +static void +pin_array_list_free(void *data) +{ + struct pin_array_list *array = (struct pin_array_list *)data; + xfree(array->array); +} + +static size_t +pin_array_list_memsize(const void *data) +{ + return sizeof(struct pin_array_list) + (MARK_OBJECT_ARY_BUCKET_SIZE * sizeof(VALUE)); +} + +static void +pin_array_list_update_references(void *data) +{ + struct pin_array_list *array = (struct pin_array_list *)data; + array->next = rb_gc_location(array->next); +} + +static const rb_data_type_t pin_array_list_type = { + .wrap_struct_name = "VM/pin_array_list", + .function = { + .dmark = pin_array_list_mark, + .dfree = pin_array_list_free, + .dsize = pin_array_list_memsize, + .dcompact = pin_array_list_update_references, + }, + .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE, +}; + +static VALUE +pin_array_list_new(VALUE next) +{ + struct pin_array_list *array_list; + VALUE obj = TypedData_Make_Struct(0, struct pin_array_list, &pin_array_list_type, array_list); + RB_OBJ_WRITE(obj, &array_list->next, next); + array_list->array = ALLOC_N(VALUE, MARK_OBJECT_ARY_BUCKET_SIZE); + return obj; +} + +static VALUE +pin_array_list_append(VALUE obj, VALUE item) +{ + struct pin_array_list *array_list; + TypedData_Get_Struct(obj, struct pin_array_list, &pin_array_list_type, array_list); + + if (array_list->len >= MARK_OBJECT_ARY_BUCKET_SIZE) { + obj = pin_array_list_new(obj); + TypedData_Get_Struct(obj, struct pin_array_list, &pin_array_list_type, array_list); + } + + RB_OBJ_WRITE(obj, &array_list->array[array_list->len], item); + array_list->len++; + return obj; +} + +void +rb_vm_register_global_object(VALUE obj) +{ + RUBY_ASSERT(!RB_SPECIAL_CONST_P(obj)); + if (RB_SPECIAL_CONST_P(obj)) { + return; + } + + switch (RB_BUILTIN_TYPE(obj)) { + case T_CLASS: + case T_MODULE: + if (FL_TEST(obj, RCLASS_IS_ROOT)) { + return; + } + FL_SET(obj, RCLASS_IS_ROOT); + break; + default: + break; + } + RB_VM_LOCK_ENTER(); + { + VALUE list = GET_VM()->mark_object_ary; + VALUE head = pin_array_list_append(list, obj); + if (head != list) { + GET_VM()->mark_object_ary = head; + } + RB_GC_GUARD(obj); + } + RB_VM_LOCK_LEAVE(); } void @@ -3967,16 +4408,15 @@ Init_vm_objects(void) { rb_vm_t *vm = GET_VM(); - vm->defined_module_hash = st_init_numtable(); - /* initialize mark object array, hash */ - vm->mark_object_ary = rb_ary_tmp_new(128); + vm->mark_object_ary = pin_array_list_new(Qnil); vm->loading_table = st_init_strtable(); + vm->ci_table = st_init_table(&vm_ci_hashtype); vm->frozen_strings = st_init_table_with_size(&rb_fstring_hash_type, 10000); } /* Stub for builtin function when not building YJIT units*/ -#if !YJIT_BUILD +#if !USE_YJIT void Init_builtin_yjit(void) {} #endif @@ -4011,6 +4451,14 @@ rb_ruby_verbose_ptr(void) return &cr->verbose; } +static bool prism; + +bool * +rb_ruby_prism_ptr(void) +{ + return &prism; +} + VALUE * rb_ruby_debug_ptr(void) { @@ -4018,10 +4466,18 @@ rb_ruby_debug_ptr(void) return &cr->debug; } +bool rb_free_at_exit = false; + +bool +ruby_free_at_exit_p(void) +{ + return rb_free_at_exit; +} + /* iseq.c */ VALUE rb_insn_operand_intern(const rb_iseq_t *iseq, - VALUE insn, int op_no, VALUE op, - int len, size_t pos, VALUE *pnop, VALUE child); + VALUE insn, int op_no, VALUE op, + int len, size_t pos, VALUE *pnop, VALUE child); st_table * rb_vm_fstring_table(void) @@ -4059,29 +4515,29 @@ vm_analysis_insn(int insn) CONST_ID(bigram_hash, "USAGE_ANALYSIS_INSN_BIGRAM"); uh = rb_const_get(rb_cRubyVM, usage_hash); if (NIL_P(ihash = rb_hash_aref(uh, INT2FIX(insn)))) { - ihash = rb_hash_new(); - HASH_ASET(uh, INT2FIX(insn), ihash); + ihash = rb_hash_new(); + HASH_ASET(uh, INT2FIX(insn), ihash); } if (NIL_P(cv = rb_hash_aref(ihash, INT2FIX(-1)))) { - cv = INT2FIX(0); + cv = INT2FIX(0); } HASH_ASET(ihash, INT2FIX(-1), INT2FIX(FIX2INT(cv) + 1)); /* calc bigram */ if (prev_insn != -1) { - VALUE bi; - VALUE ary[2]; - VALUE cv; + VALUE bi; + VALUE ary[2]; + VALUE cv; - ary[0] = INT2FIX(prev_insn); - ary[1] = INT2FIX(insn); - bi = rb_ary_new4(2, &ary[0]); + ary[0] = INT2FIX(prev_insn); + ary[1] = INT2FIX(insn); + bi = rb_ary_new4(2, &ary[0]); - uh = rb_const_get(rb_cRubyVM, bigram_hash); - if (NIL_P(cv = rb_hash_aref(uh, bi))) { - cv = INT2FIX(0); - } - HASH_ASET(uh, bi, INT2FIX(FIX2INT(cv) + 1)); + uh = rb_const_get(rb_cRubyVM, bigram_hash); + if (NIL_P(cv = rb_hash_aref(uh, bi))) { + cv = INT2FIX(0); + } + HASH_ASET(uh, bi, INT2FIX(FIX2INT(cv) + 1)); } prev_insn = insn; } @@ -4101,19 +4557,19 @@ vm_analysis_operand(int insn, int n, VALUE op) uh = rb_const_get(rb_cRubyVM, usage_hash); if (NIL_P(ihash = rb_hash_aref(uh, INT2FIX(insn)))) { - ihash = rb_hash_new(); - HASH_ASET(uh, INT2FIX(insn), ihash); + ihash = rb_hash_new(); + HASH_ASET(uh, INT2FIX(insn), ihash); } if (NIL_P(ophash = rb_hash_aref(ihash, INT2FIX(n)))) { - ophash = rb_hash_new(); - HASH_ASET(ihash, INT2FIX(n), ophash); + ophash = rb_hash_new(); + HASH_ASET(ihash, INT2FIX(n), ophash); } /* intern */ valstr = rb_insn_operand_intern(GET_EC()->cfp->iseq, insn, n, op, 0, 0, 0, 0); /* set count */ if (NIL_P(cv = rb_hash_aref(ophash, valstr))) { - cv = INT2FIX(0); + cv = INT2FIX(0); } HASH_ASET(ophash, valstr, INT2FIX(FIX2INT(cv) + 1)); } @@ -4125,16 +4581,16 @@ vm_analysis_register(int reg, int isset) VALUE uh; VALUE valstr; static const char regstrs[][5] = { - "pc", /* 0 */ - "sp", /* 1 */ - "ep", /* 2 */ - "cfp", /* 3 */ - "self", /* 4 */ - "iseq", /* 5 */ + "pc", /* 0 */ + "sp", /* 1 */ + "ep", /* 2 */ + "cfp", /* 3 */ + "self", /* 4 */ + "iseq", /* 5 */ }; static const char getsetstr[][4] = { - "get", - "set", + "get", + "set", }; static VALUE syms[sizeof(regstrs) / sizeof(regstrs[0])][2]; @@ -4142,22 +4598,22 @@ vm_analysis_register(int reg, int isset) CONST_ID(usage_hash, "USAGE_ANALYSIS_REGS"); if (syms[0] == 0) { - char buff[0x10]; - int i; - - for (i = 0; i < (int)(sizeof(regstrs) / sizeof(regstrs[0])); i++) { - int j; - for (j = 0; j < 2; j++) { - snprintf(buff, 0x10, "%d %s %-4s", i, getsetstr[j], regstrs[i]); - syms[i][j] = ID2SYM(rb_intern(buff)); - } - } + char buff[0x10]; + int i; + + for (i = 0; i < (int)(sizeof(regstrs) / sizeof(regstrs[0])); i++) { + int j; + for (j = 0; j < 2; j++) { + snprintf(buff, 0x10, "%d %s %-4s", i, getsetstr[j], regstrs[i]); + syms[i][j] = ID2SYM(rb_intern(buff)); + } + } } valstr = syms[reg][isset]; uh = rb_const_get(rb_cRubyVM, usage_hash); if (NIL_P(cv = rb_hash_aref(uh, valstr))) { - cv = INT2FIX(0); + cv = INT2FIX(0); } HASH_ASET(uh, valstr, INT2FIX(FIX2INT(cv) + 1)); } @@ -4220,68 +4676,65 @@ usage_analysis_register_stop(VALUE self) static VALUE usage_analysis_insn_running(VALUE self) { - return RBOOL(ruby_vm_collect_usage_func_insn != 0); + return RBOOL(ruby_vm_collect_usage_func_insn != 0); } /* :nodoc: */ static VALUE usage_analysis_operand_running(VALUE self) { - return RBOOL(ruby_vm_collect_usage_func_operand != 0); + return RBOOL(ruby_vm_collect_usage_func_operand != 0); } /* :nodoc: */ static VALUE usage_analysis_register_running(VALUE self) { - return RBOOL(ruby_vm_collect_usage_func_register != 0); + return RBOOL(ruby_vm_collect_usage_func_register != 0); } +static VALUE +usage_analysis_clear(VALUE self, ID usage_hash) +{ + VALUE uh; + uh = rb_const_get(self, usage_hash); + rb_hash_clear(uh); + + return Qtrue; +} + + /* :nodoc: */ static VALUE usage_analysis_insn_clear(VALUE self) { - ID usage_hash; - ID bigram_hash; - VALUE uh; - VALUE bh; - - CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN"); - CONST_ID(bigram_hash, "USAGE_ANALYSIS_INSN_BIGRAM"); - uh = rb_const_get(rb_cRubyVM, usage_hash); - bh = rb_const_get(rb_cRubyVM, bigram_hash); - rb_hash_clear(uh); - rb_hash_clear(bh); + ID usage_hash; + ID bigram_hash; - return Qtrue; + CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN"); + CONST_ID(bigram_hash, "USAGE_ANALYSIS_INSN_BIGRAM"); + usage_analysis_clear(rb_cRubyVM, usage_hash); + return usage_analysis_clear(rb_cRubyVM, bigram_hash); } /* :nodoc: */ static VALUE usage_analysis_operand_clear(VALUE self) { - ID usage_hash; - VALUE uh; - - CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN"); - uh = rb_const_get(rb_cRubyVM, usage_hash); - rb_hash_clear(uh); + ID usage_hash; - return Qtrue; + CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN"); + return usage_analysis_clear(self, usage_hash); } /* :nodoc: */ static VALUE usage_analysis_register_clear(VALUE self) { - ID usage_hash; - VALUE uh; + ID usage_hash; - CONST_ID(usage_hash, "USAGE_ANALYSIS_REGS"); - uh = rb_const_get(rb_cRubyVM, usage_hash); - rb_hash_clear(uh); - - return Qtrue; + CONST_ID(usage_hash, "USAGE_ANALYSIS_REGS"); + return usage_analysis_clear(self, usage_hash); } #else @@ -4298,10 +4751,10 @@ static void vm_collect_usage_insn(int insn) { if (RUBY_DTRACE_INSN_ENABLED()) { - RUBY_DTRACE_INSN(rb_insns_name(insn)); + RUBY_DTRACE_INSN(rb_insns_name(insn)); } if (ruby_vm_collect_usage_func_insn) - (*ruby_vm_collect_usage_func_insn)(insn); + (*ruby_vm_collect_usage_func_insn)(insn); } /* @param insn instruction number @@ -4312,15 +4765,15 @@ static void vm_collect_usage_operand(int insn, int n, VALUE op) { if (RUBY_DTRACE_INSN_OPERAND_ENABLED()) { - VALUE valstr; + VALUE valstr; - valstr = rb_insn_operand_intern(GET_EC()->cfp->iseq, insn, n, op, 0, 0, 0, 0); + valstr = rb_insn_operand_intern(GET_EC()->cfp->iseq, insn, n, op, 0, 0, 0, 0); - RUBY_DTRACE_INSN_OPERAND(RSTRING_PTR(valstr), rb_insns_name(insn)); - RB_GC_GUARD(valstr); + RUBY_DTRACE_INSN_OPERAND(RSTRING_PTR(valstr), rb_insns_name(insn)); + RB_GC_GUARD(valstr); } if (ruby_vm_collect_usage_func_operand) - (*ruby_vm_collect_usage_func_operand)(insn, n, op); + (*ruby_vm_collect_usage_func_operand)(insn, n, op); } /* @param reg register id. see code of vm_analysis_register() */ @@ -4329,22 +4782,20 @@ static void vm_collect_usage_register(int reg, int isset) { if (ruby_vm_collect_usage_func_register) - (*ruby_vm_collect_usage_func_register)(reg, isset); + (*ruby_vm_collect_usage_func_register)(reg, isset); } #endif -MJIT_FUNC_EXPORTED const struct rb_callcache * +const struct rb_callcache * rb_vm_empty_cc(void) { return &vm_empty_cc; } -MJIT_FUNC_EXPORTED const struct rb_callcache * +const struct rb_callcache * rb_vm_empty_cc_for_super(void) { return &vm_empty_cc_for_super; } -#endif /* #ifndef MJIT_HEADER */ - #include "vm_call_iseq_optimized.inc" /* required from vm_insnhelper.c */ |