summaryrefslogtreecommitdiff
path: root/vm.c
diff options
context:
space:
mode:
Diffstat (limited to 'vm.c')
-rw-r--r--vm.c1746
1 files changed, 1227 insertions, 519 deletions
diff --git a/vm.c b/vm.c
index d009a5f64a..2cae6779d9 100644
--- a/vm.c
+++ b/vm.c
@@ -11,25 +11,32 @@
#define vm_exec rb_vm_exec
#include "eval_intern.h"
-#include "gc.h"
#include "internal.h"
+#include "internal/box.h"
+#include "internal/class.h"
#include "internal/compile.h"
#include "internal/cont.h"
#include "internal/error.h"
+#include "internal/encoding.h"
#include "internal/eval.h"
+#include "internal/gc.h"
#include "internal/inits.h"
+#include "internal/missing.h"
#include "internal/object.h"
-#include "internal/parse.h"
#include "internal/proc.h"
#include "internal/re.h"
+#include "internal/ruby_parser.h"
#include "internal/symbol.h"
#include "internal/thread.h"
+#include "internal/transcode.h"
#include "internal/vm.h"
#include "internal/sanitizers.h"
#include "internal/variable.h"
#include "iseq.h"
-#include "mjit.h"
+#include "symbol.h" // This includes a macro for a more performant rb_id2sym.
#include "yjit.h"
+#include "insns.inc"
+#include "zjit.h"
#include "ruby/st.h"
#include "ruby/vm.h"
#include "vm_core.h"
@@ -39,32 +46,24 @@
#include "vm_insnhelper.h"
#include "ractor_core.h"
#include "vm_sync.h"
+#include "shape.h"
#include "builtin.h"
-#ifndef MJIT_HEADER
#include "probes.h"
-#else
-#include "probes.dmyh"
-#endif
#include "probes_helper.h"
#ifdef RUBY_ASSERT_CRITICAL_SECTION
int ruby_assert_critical_section_entered = 0;
#endif
+static void *native_main_thread_stack_top;
+
+bool ruby_vm_during_cleanup = false;
+
VALUE rb_str_concat_literals(size_t, const VALUE*);
-/* :FIXME: This #ifdef is because we build pch in case of mswin and
- * not in case of other situations. That distinction might change in
- * a future. We would better make it detectable in something better
- * than just _MSC_VER. */
-#ifdef _MSC_VER
-RUBY_FUNC_EXPORTED
-#else
-MJIT_FUNC_EXPORTED
-#endif
-VALUE vm_exec(rb_execution_context_t *, bool);
+VALUE vm_exec(rb_execution_context_t *);
extern const char *const rb_debug_counter_names[];
@@ -98,6 +97,81 @@ rb_vm_search_cf_from_ep(const rb_execution_context_t *ec, const rb_control_frame
}
}
+#if VM_CHECK_MODE > 0
+// ruby_box_crashed defined in internal/box.h
+#define VM_BOX_CRASHED() {ruby_box_crashed = true;}
+#define VM_BOX_ASSERT(expr, msg) \
+ if (!(expr)) { ruby_box_crashed = true; rb_bug(msg); }
+#else
+#define VM_BOX_CRASHED() {}
+#define VM_BOX_ASSERT(expr, msg) ((void)0)
+#endif
+
+static const VALUE *
+VM_EP_RUBY_LEP(const rb_execution_context_t *ec, const rb_control_frame_t *current_cfp)
+{
+ // rb_vmdebug_box_env_dump_raw() simulates this function
+ const VALUE *ep = current_cfp->ep;
+ const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
+ const rb_control_frame_t *cfp = current_cfp;
+
+ if (VM_ENV_FRAME_TYPE_P(ep, VM_FRAME_MAGIC_IFUNC)) {
+ ep = VM_EP_LEP(current_cfp->ep);
+ /**
+ * Returns CFUNC frame only in this case.
+ *
+ * Usually CFUNC frame doesn't represent the current box and it should operate
+ * the caller box. See the example:
+ *
+ * # in the main box
+ * module Kernel
+ * def foo = "foo"
+ * module_function :foo
+ * end
+ *
+ * In the case above, `module_function` is defined in the root box.
+ * If `module_function` worked in the root box, `Kernel#foo` is invisible
+ * from it and it causes NameError: undefined method `foo` for module `Kernel`.
+ *
+ * But in cases of IFUNC (blocks written in C), IFUNC doesn't have its own box
+ * and its local env frame will be CFUNC frame.
+ * For example, `Enumerator#chunk` calls IFUNC blocks, written as `chunk_i` function.
+ *
+ * [1].chunk{ it.even? }.each{ ... }
+ *
+ * Before calling the Ruby block `{ it.even? }`, `#chunk` calls `chunk_i` as IFUNC
+ * to iterate the array's members (it's just like `#each`).
+ * We expect that `chunk_i` works as expected by the implementation of `#chunk`
+ * without any overwritten definitions from boxes.
+ * So the definitions on IFUNC frames should be equal to the caller CFUNC.
+ */
+ VM_ASSERT(VM_ENV_FRAME_TYPE_P(ep, VM_FRAME_MAGIC_CFUNC));
+ return ep;
+ }
+
+ while (VM_ENV_FRAME_TYPE_P(ep, VM_FRAME_MAGIC_CFUNC)) {
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
+
+ VM_BOX_ASSERT(cfp, "CFUNC should have a valid previous control frame");
+ VM_BOX_ASSERT(cfp < eocfp, "CFUNC should have a valid caller frame");
+ if (!cfp || cfp >= eocfp) {
+ return NULL;
+ }
+
+ VM_BOX_ASSERT(cfp->ep, "CFUNC should have a valid caller frame with env");
+ ep = cfp->ep;
+ if (!ep) {
+ return NULL;
+ }
+ }
+
+ while (!VM_ENV_LOCAL_P(ep)) {
+ ep = VM_ENV_PREV_EP(ep);
+ }
+
+ return ep;
+}
+
const VALUE *
rb_vm_ep_local_ep(const VALUE *ep)
{
@@ -121,7 +195,15 @@ PUREFUNC(static inline VALUE VM_CF_BLOCK_HANDLER(const rb_control_frame_t * cons
static inline VALUE
VM_CF_BLOCK_HANDLER(const rb_control_frame_t * const cfp)
{
- const VALUE *ep = VM_CF_LEP(cfp);
+ const VALUE *ep;
+ if (VM_ENV_BOXED_P(cfp->ep)) {
+ VM_ASSERT(VM_ENV_LOCAL_P(cfp->ep));
+ /* Never set black_handler for VM_FRAME_MAGIC_TOP or VM_FRAME_MAGIC_CLASS
+ * and the specval is used for boxes (rb_box_t) in these case
+ */
+ return VM_BLOCK_HANDLER_NONE;
+ }
+ ep = VM_CF_LEP(cfp);
return VM_ENV_BLOCK_HANDLER(ep);
}
@@ -177,7 +259,7 @@ vm_ep_in_heap_p_(const rb_execution_context_t *ec, const VALUE *ep)
if (!UNDEF_P(envval)) {
const rb_env_t *env = (const rb_env_t *)envval;
- VM_ASSERT(vm_assert_env(envval));
+ VM_ASSERT(imemo_type_p(envval, imemo_env));
VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
VM_ASSERT(env->ep == ep);
}
@@ -209,7 +291,7 @@ VM_CAPTURED_BLOCK_TO_CFP(const struct rb_captured_block *captured)
{
rb_control_frame_t *cfp = ((rb_control_frame_t *)((VALUE *)(captured) - 3));
VM_ASSERT(!VM_CFP_IN_HEAP_P(GET_EC(), cfp));
- VM_ASSERT(sizeof(rb_control_frame_t)/sizeof(VALUE) == 8 + VM_DEBUG_BP_CHECK ? 1 : 0);
+ VM_ASSERT(sizeof(rb_control_frame_t)/sizeof(VALUE) == 7 + VM_DEBUG_BP_CHECK ? 1 : 0);
return cfp;
}
@@ -234,16 +316,11 @@ vm_cref_new0(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_
{
VALUE refinements = Qnil;
int omod_shared = FALSE;
- rb_cref_t *cref;
/* scope */
- union {
- rb_scope_visibility_t visi;
- VALUE value;
- } scope_visi;
-
- scope_visi.visi.method_visi = visi;
- scope_visi.visi.module_func = module_func;
+ rb_scope_visibility_t scope_visi;
+ scope_visi.method_visi = visi;
+ scope_visi.module_func = module_func;
/* refinements */
if (prev_cref != NULL && prev_cref != (void *)1 /* TODO: why CREF_NEXT(cref) is 1? */) {
@@ -257,7 +334,10 @@ vm_cref_new0(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_
VM_ASSERT(singleton || klass);
- cref = (rb_cref_t *)rb_imemo_new(imemo_cref, klass, (VALUE)(use_prev_prev ? CREF_NEXT(prev_cref) : prev_cref), scope_visi.value, refinements);
+ rb_cref_t *cref = SHAREABLE_IMEMO_NEW(rb_cref_t, imemo_cref, refinements);
+ cref->klass_or_self = klass;
+ cref->next = use_prev_prev ? CREF_NEXT(prev_cref) : prev_cref;
+ *((rb_scope_visibility_t *)&cref->scope_visi) = scope_visi;
if (pushed_by_eval) CREF_PUSHED_BY_EVAL_SET(cref);
if (omod_shared) CREF_OMOD_SHARED_SET(cref);
@@ -375,90 +455,215 @@ static void vm_collect_usage_register(int reg, int isset);
#endif
static VALUE vm_make_env_object(const rb_execution_context_t *ec, rb_control_frame_t *cfp);
-extern VALUE rb_vm_invoke_bmethod(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
+static VALUE vm_invoke_bmethod(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
int argc, const VALUE *argv, int kw_splat, VALUE block_handler,
const rb_callable_method_entry_t *me);
static VALUE vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
-#if USE_MJIT || USE_YJIT
-# ifdef MJIT_HEADER
-NOINLINE(static COLDFUNC VALUE mjit_check_iseq(rb_execution_context_t *ec, const rb_iseq_t *iseq, struct rb_iseq_constant_body *body));
-# else
-static inline VALUE mjit_check_iseq(rb_execution_context_t *ec, const rb_iseq_t *iseq, struct rb_iseq_constant_body *body);
-# endif
-static VALUE
-mjit_check_iseq(rb_execution_context_t *ec, const rb_iseq_t *iseq, struct rb_iseq_constant_body *body)
-{
- uintptr_t mjit_state = (uintptr_t)(body->jit_func);
- ASSUME(MJIT_FUNC_STATE_P(mjit_state));
- switch ((enum rb_mjit_func_state)mjit_state) {
- case MJIT_FUNC_NOT_COMPILED:
- if (body->total_calls == mjit_opts.call_threshold) {
- rb_mjit_add_iseq_to_process(iseq);
- if (UNLIKELY(mjit_opts.wait && !MJIT_FUNC_STATE_P(body->jit_func))) {
- return body->jit_func(ec, ec->cfp);
- }
+#if USE_YJIT
+// Counter to serve as a proxy for execution time, total number of calls
+static uint64_t yjit_total_entry_hits = 0;
+
+// Number of calls used to estimate how hot an ISEQ is
+#define YJIT_CALL_COUNT_INTERV 20u
+
+/// Test whether we are ready to compile an ISEQ or not
+static inline bool
+rb_yjit_threshold_hit(const rb_iseq_t *iseq, uint64_t entry_calls)
+{
+ yjit_total_entry_hits += 1;
+
+ // Record the number of calls at the beginning of the interval
+ if (entry_calls + YJIT_CALL_COUNT_INTERV == rb_yjit_call_threshold) {
+ iseq->body->yjit_calls_at_interv = yjit_total_entry_hits;
+ }
+
+ // Try to estimate the total time taken (total number of calls) to reach 20 calls to this ISEQ
+ // This give us a ratio of how hot/cold this ISEQ is
+ if (entry_calls == rb_yjit_call_threshold) {
+ // We expect threshold 1 to compile everything immediately
+ if (rb_yjit_call_threshold < YJIT_CALL_COUNT_INTERV) {
+ return true;
}
- break;
- case MJIT_FUNC_COMPILING:
- case MJIT_FUNC_FAILED:
- break;
+
+ uint64_t num_calls = yjit_total_entry_hits - iseq->body->yjit_calls_at_interv;
+
+ // Reject ISEQs that don't get called often enough
+ if (num_calls > rb_yjit_cold_threshold) {
+ rb_yjit_incr_counter("cold_iseq_entry");
+ return false;
+ }
+
+ return true;
}
- return Qundef;
+
+ return false;
}
+#else
+#define rb_yjit_threshold_hit(iseq, entry_calls) false
+#endif
-// Try to execute the current iseq in ec. Use JIT code if it is ready.
-// If it is not, add ISEQ to the compilation queue and return Qundef for MJIT.
-// YJIT compiles on the thread running the iseq.
-static inline VALUE
-jit_exec(rb_execution_context_t *ec)
+#if USE_YJIT
+// Generate JIT code that supports the following kinds of ISEQ entries:
+// * The first ISEQ on vm_exec (e.g. <main>, or Ruby methods/blocks
+// called by a C method). The current frame has VM_FRAME_FLAG_FINISH.
+// The current vm_exec stops if JIT code returns a non-Qundef value.
+// * ISEQs called by the interpreter on vm_sendish (e.g. Ruby methods or
+// blocks called by a Ruby frame that isn't compiled or side-exited).
+// The current frame doesn't have VM_FRAME_FLAG_FINISH. The current
+// vm_exec does NOT stop whether JIT code returns Qundef or not.
+static inline rb_jit_func_t
+yjit_compile(rb_execution_context_t *ec)
+{
+ const rb_iseq_t *iseq = ec->cfp->iseq;
+ struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
+
+ // Increment the ISEQ's call counter and trigger JIT compilation if not compiled
+ if (body->jit_entry == NULL) {
+ body->jit_entry_calls++;
+ if (rb_yjit_threshold_hit(iseq, body->jit_entry_calls)) {
+ rb_yjit_compile_iseq(iseq, ec, false);
+ }
+ }
+ return body->jit_entry;
+}
+#else
+# define yjit_compile(ec) ((rb_jit_func_t)0)
+#endif
+
+#if USE_ZJIT
+static inline rb_jit_func_t
+zjit_compile(rb_execution_context_t *ec)
{
- // Increment the ISEQ's call counter
const rb_iseq_t *iseq = ec->cfp->iseq;
struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
- bool yjit_enabled = rb_yjit_enabled_p();
- if (yjit_enabled || mjit_call_p) {
- body->total_calls++;
+
+ if (body->jit_entry == NULL) {
+ body->jit_entry_calls++;
+
+ // At profile-threshold, rewrite some of the YARV instructions
+ // to zjit_* instructions to profile these instructions.
+ if (body->jit_entry_calls == rb_zjit_profile_threshold) {
+ rb_zjit_profile_enable(iseq);
+ }
+
+ // At call-threshold, compile the ISEQ with ZJIT.
+ if (body->jit_entry_calls == rb_zjit_call_threshold) {
+ rb_zjit_compile_iseq(iseq, false);
+ }
}
- else {
+ return body->jit_entry;
+}
+#else
+# define zjit_compile(ec) ((rb_jit_func_t)0)
+#endif
+
+// Execute JIT code compiled by yjit_compile() or zjit_compile()
+static inline VALUE
+jit_exec(rb_execution_context_t *ec)
+{
+#if USE_YJIT
+ if (rb_yjit_enabled_p) {
+ rb_jit_func_t func = yjit_compile(ec);
+ if (func) {
+ return func(ec, ec->cfp);
+ }
return Qundef;
}
+#endif
- // Trigger JIT compilation as needed
- jit_func_t func;
- if (yjit_enabled) {
- if (body->total_calls == rb_yjit_call_threshold()) {
- // If we couldn't generate any code for this iseq, then return
- // Qundef so the interpreter will handle the call.
- if (!rb_yjit_compile_iseq(iseq, ec)) {
- return Qundef;
- }
+#if USE_ZJIT
+ void *zjit_entry = rb_zjit_entry;
+ if (zjit_entry) {
+ rb_jit_func_t func = zjit_compile(ec);
+ if (func) {
+ return ((rb_zjit_func_t)zjit_entry)(ec, ec->cfp, func);
}
- // YJIT tried compiling this function once before and couldn't do
- // it, so return Qundef so the interpreter handles it.
- if ((func = body->jit_func) == 0) {
- return Qundef;
+ }
+#endif
+ return Qundef;
+}
+
+#if USE_YJIT || USE_ZJIT
+// Generate JIT code that supports the following kind of ISEQ entry:
+// * The first ISEQ pushed by vm_exec_handle_exception. The frame would
+// point to a location specified by a catch table, and it doesn't have
+// VM_FRAME_FLAG_FINISH. The current vm_exec stops if JIT code returns
+// a non-Qundef value. So you should not return a non-Qundef value
+// until ec->cfp is changed to a frame with VM_FRAME_FLAG_FINISH.
+static inline rb_jit_func_t
+jit_compile_exception(rb_execution_context_t *ec)
+{
+ const rb_iseq_t *iseq = ec->cfp->iseq;
+ struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
+
+#if USE_ZJIT
+ if (body->jit_exception == NULL && rb_zjit_enabled_p) {
+ body->jit_exception_calls++;
+
+ // At profile-threshold, rewrite some of the YARV instructions
+ // to zjit_* instructions to profile these instructions.
+ if (body->jit_exception_calls == rb_zjit_profile_threshold) {
+ rb_zjit_profile_enable(iseq);
+ }
+
+ // At call-threshold, compile the ISEQ with ZJIT.
+ if (body->jit_exception_calls == rb_zjit_call_threshold) {
+ rb_zjit_compile_iseq(iseq, true);
}
}
- else if (UNLIKELY(MJIT_FUNC_STATE_P(func = body->jit_func))) {
- return mjit_check_iseq(ec, iseq, body);
+#endif
+
+#if USE_YJIT
+ // Increment the ISEQ's call counter and trigger JIT compilation if not compiled
+ if (body->jit_exception == NULL && rb_yjit_enabled_p) {
+ body->jit_exception_calls++;
+ if (body->jit_exception_calls == rb_yjit_call_threshold) {
+ rb_yjit_compile_iseq(iseq, ec, true);
+ }
}
+#endif
+ return body->jit_exception;
+}
- // Call the JIT code
- return func(ec, ec->cfp); // SystemV x64 calling convention: ec -> RDI, cfp -> RSI
+// Execute JIT code compiled by jit_compile_exception()
+static inline VALUE
+jit_exec_exception(rb_execution_context_t *ec)
+{
+ rb_jit_func_t func = jit_compile_exception(ec);
+ if (func) {
+ // Call the JIT code
+ return func(ec, ec->cfp);
+ }
+ else {
+ return Qundef;
+ }
}
+#else
+# define jit_compile_exception(ec) ((rb_jit_func_t)0)
+# define jit_exec_exception(ec) Qundef
#endif
-#include "vm_insnhelper.c"
+static void add_opt_method_entry(const rb_method_entry_t *me);
+
+#define RB_TYPE_2_P(obj, type1, type2) \
+ (RB_TYPE_P(obj, type1) || RB_TYPE_P(obj, type2))
+#define RB_TYPE_3_P(obj, type1, type2, type3) \
+ (RB_TYPE_P(obj, type1) || RB_TYPE_P(obj, type2) || RB_TYPE_P(obj, type3))
-#ifndef MJIT_HEADER
+#define VM_ASSERT_TYPE(obj, type) \
+ VM_ASSERT(RB_TYPE_P(obj, type), #obj ": %s", rb_obj_info(obj))
+#define VM_ASSERT_TYPE2(obj, type1, type2) \
+ VM_ASSERT(RB_TYPE_2_P(obj, type1, type2), #obj ": %s", rb_obj_info(obj))
+#define VM_ASSERT_TYPE3(obj, type1, type2, type3) \
+ VM_ASSERT(RB_TYPE_3_P(obj, type1, type2, type3), #obj ": %s", rb_obj_info(obj))
+
+#include "vm_insnhelper.c"
#include "vm_exec.c"
#include "vm_method.c"
-#endif /* #ifndef MJIT_HEADER */
#include "vm_eval.c"
-#ifndef MJIT_HEADER
#define PROCDEBUG 0
@@ -475,26 +680,48 @@ bool ruby_vm_keep_script_lines;
#ifdef RB_THREAD_LOCAL_SPECIFIER
RB_THREAD_LOCAL_SPECIFIER rb_execution_context_t *ruby_current_ec;
-#ifdef __APPLE__
- rb_execution_context_t *
- rb_current_ec(void)
- {
- return ruby_current_ec;
- }
- void
- rb_current_ec_set(rb_execution_context_t *ec)
- {
- ruby_current_ec = ec;
- }
+#ifdef RUBY_NT_SERIAL
+RB_THREAD_LOCAL_SPECIFIER rb_atomic_t ruby_nt_serial;
#endif
+// no-inline decl on vm_core.h
+rb_execution_context_t *
+rb_current_ec_noinline(void)
+{
+ return ruby_current_ec;
+}
+
+void
+rb_current_ec_set(rb_execution_context_t *ec)
+{
+ ruby_current_ec = ec;
+}
+
+
+#ifdef RB_THREAD_CURRENT_EC_NOINLINE
+rb_execution_context_t *
+rb_current_ec(void)
+{
+ return ruby_current_ec;
+}
+
+#endif
#else
native_tls_key_t ruby_current_ec_key;
+
+// no-inline decl on vm_core.h
+rb_execution_context_t *
+rb_current_ec_noinline(void)
+{
+ return native_tls_get(ruby_current_ec_key);
+}
+
#endif
-rb_event_flag_t ruby_vm_event_flags;
-rb_event_flag_t ruby_vm_event_enabled_global_flags;
-unsigned int ruby_vm_event_local_num;
+rb_event_flag_t ruby_vm_event_flags = 0;
+rb_event_flag_t ruby_vm_event_enabled_global_flags = 0;
+unsigned int ruby_vm_c_events_enabled = 0;
+unsigned int ruby_vm_iseq_events_enabled = 0;
rb_serial_t ruby_vm_constant_cache_invalidations = 0;
rb_serial_t ruby_vm_constant_cache_misses = 0;
@@ -502,7 +729,7 @@ rb_serial_t ruby_vm_global_cvar_state = 1;
static const struct rb_callcache vm_empty_cc = {
.flags = T_IMEMO | (imemo_callcache << FL_USHIFT) | VM_CALLCACHE_UNMARKABLE,
- .klass = Qfalse,
+ .klass = Qundef,
.cme_ = NULL,
.call_ = vm_call_general,
.aux_ = {
@@ -512,7 +739,7 @@ static const struct rb_callcache vm_empty_cc = {
static const struct rb_callcache vm_empty_cc_for_super = {
.flags = T_IMEMO | (imemo_callcache << FL_USHIFT) | VM_CALLCACHE_UNMARKABLE,
- .klass = Qfalse,
+ .klass = Qundef,
.cme_ = NULL,
.call_ = vm_call_super_method,
.aux_ = {
@@ -528,7 +755,7 @@ rb_vm_inc_const_missing_count(void)
ruby_vm_const_missing_count +=1;
}
-MJIT_FUNC_EXPORTED int
+int
rb_dtrace_setup(rb_execution_context_t *ec, VALUE klass, ID id,
struct ruby_dtrace_method_hook_args *args)
{
@@ -541,8 +768,8 @@ rb_dtrace_setup(rb_execution_context_t *ec, VALUE klass, ID id,
if (RB_TYPE_P(klass, T_ICLASS)) {
klass = RBASIC(klass)->klass;
}
- else if (FL_TEST(klass, FL_SINGLETON)) {
- klass = rb_attr_get(klass, id__attached__);
+ else if (RCLASS_SINGLETON_P(klass)) {
+ klass = RCLASS_ATTACHED_OBJECT(klass);
if (NIL_P(klass)) return FALSE;
}
type = BUILTIN_TYPE(klass);
@@ -564,6 +791,8 @@ rb_dtrace_setup(rb_execution_context_t *ec, VALUE klass, ID id,
return FALSE;
}
+extern unsigned int redblack_buffer_size;
+
/*
* call-seq:
* RubyVM.stat -> Hash
@@ -591,6 +820,7 @@ static VALUE
vm_stat(int argc, VALUE *argv, VALUE self)
{
static VALUE sym_constant_cache_invalidations, sym_constant_cache_misses, sym_global_cvar_state, sym_next_shape_id;
+ static VALUE sym_shape_cache_size;
VALUE arg = Qnil;
VALUE hash = Qnil, key = Qnil;
@@ -612,6 +842,7 @@ vm_stat(int argc, VALUE *argv, VALUE self)
S(constant_cache_misses);
S(global_cvar_state);
S(next_shape_id);
+ S(shape_cache_size);
#undef S
#define SET(name, attr) \
@@ -623,7 +854,8 @@ vm_stat(int argc, VALUE *argv, VALUE self)
SET(constant_cache_invalidations, ruby_vm_constant_cache_invalidations);
SET(constant_cache_misses, ruby_vm_constant_cache_misses);
SET(global_cvar_state, ruby_vm_global_cvar_state);
- SET(next_shape_id, (rb_serial_t)GET_VM()->next_shape_id);
+ SET(next_shape_id, (rb_serial_t)rb_shapes_count());
+ SET(shape_cache_size, (rb_serial_t)rb_shape_tree.cache_size);
#undef SET
#if USE_DEBUG_COUNTER
@@ -651,15 +883,16 @@ vm_stat(int argc, VALUE *argv, VALUE self)
/* control stack frame */
static void
-vm_set_top_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq)
+vm_set_top_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq, const rb_box_t *box)
{
if (ISEQ_BODY(iseq)->type != ISEQ_TYPE_TOP) {
rb_raise(rb_eTypeError, "Not a toplevel InstructionSequence");
}
/* for return */
- vm_push_frame(ec, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, rb_ec_thread_ptr(ec)->top_self,
- VM_BLOCK_HANDLER_NONE,
+ vm_push_frame(ec, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH,
+ rb_ec_thread_ptr(ec)->top_self,
+ GC_GUARDED_PTR(box),
(VALUE)vm_cref_new_toplevel(ec), /* cref or me */
ISEQ_BODY(iseq)->iseq_encoded, ec->cfp->sp,
ISEQ_BODY(iseq)->local_table_size, ISEQ_BODY(iseq)->stack_max);
@@ -705,7 +938,7 @@ rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_
return 0;
}
-MJIT_FUNC_EXPORTED rb_control_frame_t *
+rb_control_frame_t *
rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
{
while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
@@ -717,8 +950,6 @@ rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control
return 0;
}
-#endif /* #ifndef MJIT_HEADER */
-
static rb_control_frame_t *
vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
{
@@ -741,7 +972,7 @@ vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_
return 0;
}
-MJIT_STATIC void
+void
rb_vm_pop_cfunc_frame(void)
{
rb_execution_context_t *ec = GET_EC();
@@ -753,8 +984,6 @@ rb_vm_pop_cfunc_frame(void)
vm_pop_frame(ec, cfp, cfp->ep);
}
-#ifndef MJIT_HEADER
-
void
rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp)
{
@@ -834,7 +1063,7 @@ vm_block_handler_escape(const rb_execution_context_t *ec, VALUE block_handler)
switch (vm_block_handler_type(block_handler)) {
case block_handler_type_ifunc:
case block_handler_type_iseq:
- return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
+ return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
case block_handler_type_symbol:
case block_handler_type_proc:
@@ -848,8 +1077,6 @@ static VALUE
vm_make_env_each(const rb_execution_context_t * const ec, rb_control_frame_t *const cfp)
{
const VALUE * const ep = cfp->ep;
- const rb_env_t *env;
- const rb_iseq_t *env_iseq;
VALUE *env_body, *env_ep;
int local_size, env_size;
@@ -872,6 +1099,7 @@ vm_make_env_each(const rb_execution_context_t * const ec, rb_control_frame_t *co
}
}
else {
+ VM_ASSERT(VM_ENV_LOCAL_P(ep));
VALUE block_handler = VM_ENV_BLOCK_HANDLER(ep);
if (block_handler != VM_BLOCK_HANDLER_NONE) {
@@ -884,7 +1112,22 @@ vm_make_env_each(const rb_execution_context_t * const ec, rb_control_frame_t *co
local_size = VM_ENV_DATA_SIZE;
}
else {
- local_size = ISEQ_BODY(cfp->iseq)->local_table_size + VM_ENV_DATA_SIZE;
+ local_size = ISEQ_BODY(cfp->iseq)->local_table_size;
+ if (ISEQ_BODY(cfp->iseq)->param.flags.forwardable && VM_ENV_LOCAL_P(cfp->ep)) {
+ int ci_offset = local_size - ISEQ_BODY(cfp->iseq)->param.size + VM_ENV_DATA_SIZE;
+
+ CALL_INFO ci = (CALL_INFO)VM_CF_LEP(cfp)[-ci_offset];
+ local_size += vm_ci_argc(ci);
+ }
+ local_size += VM_ENV_DATA_SIZE;
+ }
+
+ // Invalidate JIT code that assumes cfp->ep == vm_base_ptr(cfp).
+ // This is done before creating the imemo_env because VM_STACK_ENV_WRITE
+ // below leaves the on-stack ep in a state that is unsafe to GC.
+ if (VM_FRAME_RUBYFRAME_P(cfp)) {
+ rb_yjit_invalidate_ep_is_bp(cfp->iseq);
+ rb_zjit_invalidate_no_ep_escape(cfp->iseq);
}
/*
@@ -901,9 +1144,26 @@ vm_make_env_each(const rb_execution_context_t * const ec, rb_control_frame_t *co
env_size = local_size +
1 /* envval */;
+
+ // Careful with order in the following sequence. Each allocation can move objects.
env_body = ALLOC_N(VALUE, env_size);
+ rb_env_t *env = IMEMO_NEW(rb_env_t, imemo_env, 0);
+
+ // Set up env without WB since it's brand new (similar to newobj_init(), newobj_fill())
MEMCPY(env_body, ep - (local_size - 1 /* specval */), VALUE, local_size);
+ env_ep = &env_body[local_size - 1 /* specval */];
+ env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
+
+ env->iseq = (rb_iseq_t *)(VM_FRAME_RUBYFRAME_P(cfp) ? cfp->iseq : NULL);
+ env->ep = env_ep;
+ env->env = env_body;
+ env->env_size = env_size;
+
+ cfp->ep = env_ep;
+ VM_ENV_FLAGS_SET(env_ep, VM_ENV_FLAG_ESCAPED | VM_ENV_FLAG_WB_REQUIRED);
+ VM_STACK_ENV_WRITE(ep, 0, (VALUE)env); /* GC mark */
+
#if 0
for (i = 0; i < local_size; i++) {
if (VM_FRAME_RUBYFRAME_P(cfp)) {
@@ -913,14 +1173,6 @@ vm_make_env_each(const rb_execution_context_t * const ec, rb_control_frame_t *co
}
#endif
- env_iseq = VM_FRAME_RUBYFRAME_P(cfp) ? cfp->iseq : NULL;
- env_ep = &env_body[local_size - 1 /* specval */];
-
- env = vm_env_new(env_ep, env_body, env_size, env_iseq);
-
- cfp->ep = env_ep;
- VM_ENV_FLAGS_SET(env_ep, VM_ENV_FLAG_ESCAPED | VM_ENV_FLAG_WB_REQUIRED);
- VM_STACK_ENV_WRITE(ep, 0, (VALUE)env); /* GC mark */
return (VALUE)env;
}
@@ -1002,6 +1254,21 @@ rb_vm_env_local_variables(const rb_env_t *env)
}
VALUE
+rb_vm_env_numbered_parameters(const rb_env_t *env)
+{
+ struct local_var_list vars;
+ local_var_list_init(&vars);
+ // if (VM_ENV_FLAGS(env->ep, VM_ENV_FLAG_ISOLATED)) break; // TODO: is this needed?
+ const rb_iseq_t *iseq = env->iseq;
+ unsigned int i;
+ if (!iseq) return 0;
+ for (i = 0; i < ISEQ_BODY(iseq)->local_table_size; i++) {
+ numparam_list_add(&vars, ISEQ_BODY(iseq)->local_table[i]);
+ }
+ return local_var_list_finish(&vars);
+}
+
+VALUE
rb_iseq_local_variables(const rb_iseq_t *iseq)
{
struct local_var_list vars;
@@ -1079,8 +1346,17 @@ rb_proc_dup(VALUE self)
rb_proc_t *src;
GetProcPtr(self, src);
- procval = proc_create(rb_obj_class(self), &src->block, src->is_from_method, src->is_lambda);
- if (RB_OBJ_SHAREABLE_P(self)) FL_SET_RAW(procval, RUBY_FL_SHAREABLE);
+
+ switch (vm_block_type(&src->block)) {
+ case block_type_ifunc:
+ procval = rb_func_proc_dup(self);
+ break;
+ default:
+ procval = proc_create(rb_obj_class(self), &src->block, src->is_from_method, src->is_lambda);
+ break;
+ }
+
+ if (RB_OBJ_SHAREABLE_P(self)) RB_OBJ_SET_SHAREABLE(procval);
RB_GC_GUARD(self); /* for: body = rb_proc_dup(body) */
return procval;
}
@@ -1141,26 +1417,55 @@ env_copy(const VALUE *src_ep, VALUE read_only_variables)
VALUE *env_body = ZALLOC_N(VALUE, src_env->env_size); // fill with Qfalse
VALUE *ep = &env_body[src_env->env_size - 2];
- volatile VALUE prev_env = Qnil;
+ const rb_env_t *copied_env = vm_env_new(ep, env_body, src_env->env_size, src_env->iseq);
+
+ // Copy after allocations above, since they can move objects in src_ep.
+ VALUE svar_val = src_ep[VM_ENV_DATA_INDEX_ME_CREF];
+ if (imemo_type_p(svar_val, imemo_svar)) {
+ const struct vm_svar *svar = (struct vm_svar *)svar_val;
+
+ if (svar->cref_or_me) {
+ svar_val = svar->cref_or_me;
+ }
+ else {
+ svar_val = Qfalse;
+ }
+ }
+ RB_OBJ_WRITE(copied_env, &ep[VM_ENV_DATA_INDEX_ME_CREF], svar_val);
+
+ ep[VM_ENV_DATA_INDEX_FLAGS] = src_ep[VM_ENV_DATA_INDEX_FLAGS] | VM_ENV_FLAG_ISOLATED;
+ if (!VM_ENV_LOCAL_P(src_ep)) {
+ VM_ENV_FLAGS_SET(ep, VM_ENV_FLAG_LOCAL);
+ }
if (read_only_variables) {
for (int i=RARRAY_LENINT(read_only_variables)-1; i>=0; i--) {
ID id = NUM2ID(RARRAY_AREF(read_only_variables, i));
- for (unsigned int j=0; j<ISEQ_BODY(src_env->iseq)->local_table_size; j++) {
- if (id == ISEQ_BODY(src_env->iseq)->local_table[j]) {
+ const struct rb_iseq_constant_body *body = ISEQ_BODY(src_env->iseq);
+ for (unsigned int j=0; j<body->local_table_size; j++) {
+ if (id == body->local_table[j]) {
+ // check reassignment
+ if (body->lvar_states[j] == lvar_reassigned) {
+ VALUE name = rb_id2str(id);
+ VALUE msg = rb_sprintf("cannot make a shareable Proc because "
+ "the outer variable '%" PRIsVALUE "' may be reassigned.", name);
+ rb_exc_raise(rb_exc_new_str(rb_eRactorIsolationError, msg));
+ }
+
+ // check shareable
VALUE v = src_env->env[j];
if (!rb_ractor_shareable_p(v)) {
VALUE name = rb_id2str(id);
- VALUE msg = rb_sprintf("can not make shareable Proc because it can refer"
+ VALUE msg = rb_sprintf("cannot make a shareable Proc because it can refer"
" unshareable object %+" PRIsVALUE " from ", v);
if (name)
- rb_str_catf(msg, "variable `%" PRIsVALUE "'", name);
+ rb_str_catf(msg, "variable '%" PRIsVALUE "'", name);
else
rb_str_cat_cstr(msg, "a hidden variable");
rb_exc_raise(rb_exc_new_str(rb_eRactorIsolationError, msg));
}
- env_body[j] = v;
+ RB_OBJ_WRITE((VALUE)copied_env, &env_body[j], v);
rb_ary_delete_at(read_only_variables, i);
break;
}
@@ -1168,21 +1473,18 @@ env_copy(const VALUE *src_ep, VALUE read_only_variables)
}
}
- ep[VM_ENV_DATA_INDEX_ME_CREF] = src_ep[VM_ENV_DATA_INDEX_ME_CREF];
- ep[VM_ENV_DATA_INDEX_FLAGS] = src_ep[VM_ENV_DATA_INDEX_FLAGS] | VM_ENV_FLAG_ISOLATED;
-
if (!VM_ENV_LOCAL_P(src_ep)) {
const VALUE *prev_ep = VM_ENV_PREV_EP(src_env->ep);
const rb_env_t *new_prev_env = env_copy(prev_ep, read_only_variables);
- prev_env = (VALUE)new_prev_env;
ep[VM_ENV_DATA_INDEX_SPECVAL] = VM_GUARDED_PREV_EP(new_prev_env->ep);
+ RB_OBJ_WRITTEN(copied_env, Qundef, new_prev_env);
+ VM_ENV_FLAGS_UNSET(ep, VM_ENV_FLAG_LOCAL);
}
else {
ep[VM_ENV_DATA_INDEX_SPECVAL] = VM_BLOCK_HANDLER_NONE;
}
- const rb_env_t *copied_env = vm_env_new(ep, env_body, src_env->env_size, src_env->iseq);
- RB_GC_GUARD(prev_env);
+ RB_OBJ_SET_SHAREABLE((VALUE)copied_env);
return copied_env;
}
@@ -1218,23 +1520,29 @@ proc_shared_outer_variables(struct rb_id_table *outer_variables, bool isolate, c
rb_str_append(str, name);
}
if (*sep == ',') rb_str_cat_cstr(str, ")");
- rb_str_cat_cstr(str, data.yield ? " and uses `yield'." : ".");
+ rb_str_cat_cstr(str, data.yield ? " and uses 'yield'." : ".");
rb_exc_raise(rb_exc_new_str(rb_eArgError, str));
}
else if (data.yield) {
- rb_raise(rb_eArgError, "can not %s because it uses `yield'.", message);
+ rb_raise(rb_eArgError, "can not %s because it uses 'yield'.", message);
}
return data.read_only;
}
VALUE
-rb_proc_isolate_bang(VALUE self)
+rb_proc_isolate_bang(VALUE self, VALUE replace_self)
{
const rb_iseq_t *iseq = vm_proc_iseq(self);
if (iseq) {
rb_proc_t *proc = (rb_proc_t *)RTYPEDDATA_DATA(self);
+
+ if (!UNDEF_P(replace_self)) {
+ VM_ASSERT(rb_ractor_shareable_p(replace_self));
+ RB_OBJ_WRITE(self, &proc->block.as.captured.self, replace_self);
+ }
+
if (proc->block.type != block_type_iseq) rb_raise(rb_eRuntimeError, "not supported yet");
if (ISEQ_BODY(iseq)->outer_variables) {
@@ -1243,9 +1551,10 @@ rb_proc_isolate_bang(VALUE self)
proc_isolate_env(self, proc, Qfalse);
proc->is_isolated = TRUE;
+ RB_OBJ_WRITE(self, &proc->block.as.captured.self, Qnil);
}
- FL_SET_RAW(self, RUBY_FL_SHAREABLE);
+ RB_OBJ_SET_SHAREABLE(self);
return self;
}
@@ -1253,17 +1562,22 @@ VALUE
rb_proc_isolate(VALUE self)
{
VALUE dst = rb_proc_dup(self);
- rb_proc_isolate_bang(dst);
+ rb_proc_isolate_bang(dst, Qundef);
return dst;
}
VALUE
-rb_proc_ractor_make_shareable(VALUE self)
+rb_proc_ractor_make_shareable(VALUE self, VALUE replace_self)
{
const rb_iseq_t *iseq = vm_proc_iseq(self);
if (iseq) {
rb_proc_t *proc = (rb_proc_t *)RTYPEDDATA_DATA(self);
+
+ if (!UNDEF_P(replace_self)) {
+ RB_OBJ_WRITE(self, &proc->block.as.captured.self, replace_self);
+ }
+
if (proc->block.type != block_type_iseq) rb_raise(rb_eRuntimeError, "not supported yet");
if (!rb_ractor_shareable_p(vm_block_self(&proc->block))) {
@@ -1282,26 +1596,61 @@ rb_proc_ractor_make_shareable(VALUE self)
proc_isolate_env(self, proc, read_only_variables);
proc->is_isolated = TRUE;
}
+ else {
+ const struct rb_block *block = vm_proc_block(self);
+ if (block->type != block_type_symbol) rb_raise(rb_eRuntimeError, "not supported yet");
- FL_SET_RAW(self, RUBY_FL_SHAREABLE);
+ VALUE proc_self = vm_block_self(block);
+ if (!rb_ractor_shareable_p(proc_self)) {
+ rb_raise(rb_eRactorIsolationError,
+ "Proc's self is not shareable: %" PRIsVALUE,
+ self);
+ }
+ }
+
+ RB_OBJ_SET_FROZEN_SHAREABLE(self);
return self;
}
-MJIT_FUNC_EXPORTED VALUE
+VALUE
rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda)
{
VALUE procval;
+ enum imemo_type code_type = imemo_type(captured->code.val);
if (!VM_ENV_ESCAPED_P(captured->ep)) {
rb_control_frame_t *cfp = VM_CAPTURED_BLOCK_TO_CFP(captured);
vm_make_env_object(ec, cfp);
}
+
VM_ASSERT(VM_EP_IN_HEAP_P(ec, captured->ep));
- VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq) ||
- imemo_type_p(captured->code.val, imemo_ifunc));
+ VM_ASSERT(code_type == imemo_iseq || code_type == imemo_ifunc);
procval = vm_proc_create_from_captured(klass, captured,
- imemo_type(captured->code.val) == imemo_iseq ? block_type_iseq : block_type_ifunc, FALSE, is_lambda);
+ code_type == imemo_iseq ? block_type_iseq : block_type_ifunc,
+ FALSE, is_lambda);
+
+ if (code_type == imemo_ifunc) {
+ struct vm_ifunc *ifunc = (struct vm_ifunc *)captured->code.val;
+ if (ifunc->svar_lep) {
+ VALUE ep0 = ifunc->svar_lep[0];
+ if (RB_TYPE_P(ep0, T_IMEMO) && imemo_type_p(ep0, imemo_env)) {
+ // `ep0 == imemo_env` means this ep is escaped to heap (in env object).
+ const rb_env_t *env = (const rb_env_t *)ep0;
+ ifunc->svar_lep = (VALUE *)env->ep;
+ }
+ else {
+ VM_ASSERT(FIXNUM_P(ep0));
+ if (ep0 & VM_ENV_FLAG_ESCAPED) {
+ // ok. do nothing
+ }
+ else {
+ ifunc->svar_lep = NULL;
+ }
+ }
+ }
+ }
+
return procval;
}
@@ -1345,8 +1694,7 @@ rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const I
const rb_env_t *env;
rb_execution_context_t *ec = GET_EC();
const rb_iseq_t *base_iseq, *iseq;
- rb_ast_body_t ast;
- NODE tmp_node;
+ rb_node_scope_t tmp_node;
if (dyncount < 0) return 0;
@@ -1358,17 +1706,20 @@ rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const I
dyns->size = dyncount;
MEMCPY(dyns->ids, dynvars, ID, dyncount);
- rb_node_init(&tmp_node, NODE_SCOPE, (VALUE)dyns, 0, 0);
- ast.root = &tmp_node;
- ast.compile_option = 0;
- ast.script_lines = INT2FIX(-1);
+ rb_node_init(RNODE(&tmp_node), NODE_SCOPE);
+ tmp_node.nd_tbl = dyns;
+ tmp_node.nd_body = 0;
+ tmp_node.nd_parent = NULL;
+ tmp_node.nd_args = 0;
+
+ VALUE ast_value = rb_ruby_ast_new(RNODE(&tmp_node));
if (base_iseq) {
- iseq = rb_iseq_new(&ast, ISEQ_BODY(base_iseq)->location.label, path, realpath, base_iseq, ISEQ_TYPE_EVAL);
+ iseq = rb_iseq_new(ast_value, ISEQ_BODY(base_iseq)->location.label, path, realpath, base_iseq, ISEQ_TYPE_EVAL);
}
else {
VALUE tempstr = rb_fstring_lit("<temp>");
- iseq = rb_iseq_new_top(&ast, tempstr, tempstr, tempstr, NULL);
+ iseq = rb_iseq_new_top(ast_value, tempstr, tempstr, tempstr, NULL);
}
tmp_node.nd_tbl = 0; /* reset table */
ALLOCV_END(idtmp);
@@ -1383,7 +1734,7 @@ rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const I
/* C -> Ruby: block */
-static inline VALUE
+static inline void
invoke_block(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, const struct rb_captured_block *captured, const rb_cref_t *cref, VALUE type, int opt_pc)
{
int arg_size = ISEQ_BODY(iseq)->param.size;
@@ -1395,15 +1746,13 @@ invoke_block(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, cons
ec->cfp->sp + arg_size,
ISEQ_BODY(iseq)->local_table_size - arg_size,
ISEQ_BODY(iseq)->stack_max);
- return vm_exec(ec, true);
}
-static VALUE
+static inline void
invoke_bmethod(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, const struct rb_captured_block *captured, const rb_callable_method_entry_t *me, VALUE type, int opt_pc)
{
- /* bmethod */
+ /* bmethod call from outside the VM */
int arg_size = ISEQ_BODY(iseq)->param.size;
- VALUE ret;
VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
@@ -1411,14 +1760,11 @@ invoke_bmethod(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, co
VM_GUARDED_PREV_EP(captured->ep),
(VALUE)me,
ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
- ec->cfp->sp + arg_size,
+ ec->cfp->sp + 1 /* self */ + arg_size,
ISEQ_BODY(iseq)->local_table_size - arg_size,
ISEQ_BODY(iseq)->stack_max);
VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_FINISH);
- ret = vm_exec(ec, true);
-
- return ret;
}
ALWAYS_INLINE(static VALUE
@@ -1432,33 +1778,49 @@ invoke_iseq_block_from_c(rb_execution_context_t *ec, const struct rb_captured_bl
const rb_cref_t *cref, int is_lambda, const rb_callable_method_entry_t *me)
{
const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
- int i, opt_pc;
+ int opt_pc;
VALUE type = VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0);
rb_control_frame_t *cfp = ec->cfp;
VALUE *sp = cfp->sp;
+ int flags = (kw_splat ? VM_CALL_KW_SPLAT : 0);
+ VALUE *use_argv = (VALUE *)argv;
+ VALUE av[2];
stack_check(ec);
- CHECK_VM_STACK_OVERFLOW(cfp, argc);
+ if (UNLIKELY(argc > VM_ARGC_STACK_MAX) &&
+ (VM_ARGC_STACK_MAX >= 1 ||
+ /* Skip ruby array for potential autosplat case */
+ (argc != 1 || is_lambda))) {
+ use_argv = vm_argv_ruby_array(av, argv, &flags, &argc, kw_splat);
+ }
+
+ CHECK_VM_STACK_OVERFLOW(cfp, argc + 1);
vm_check_canary(ec, sp);
- cfp->sp = sp + argc;
- for (i=0; i<argc; i++) {
- sp[i] = argv[i];
+
+ VALUE *stack_argv = sp;
+ if (me) {
+ *sp = self; // bemthods need `self` on the VM stack
+ stack_argv++;
}
+ cfp->sp = stack_argv + argc;
+ MEMCPY(stack_argv, use_argv, VALUE, argc); // restrict: new stack space
- opt_pc = vm_yield_setup_args(ec, iseq, argc, sp, kw_splat, passed_block_handler,
+ opt_pc = vm_yield_setup_args(ec, iseq, argc, stack_argv, flags, passed_block_handler,
(is_lambda ? arg_setup_method : arg_setup_block));
cfp->sp = sp;
if (me == NULL) {
- return invoke_block(ec, iseq, self, captured, cref, type, opt_pc);
+ invoke_block(ec, iseq, self, captured, cref, type, opt_pc);
}
else {
- return invoke_bmethod(ec, iseq, self, captured, me, type, opt_pc);
+ invoke_bmethod(ec, iseq, self, captured, me, type, opt_pc);
}
+
+ return vm_exec(ec);
}
-static inline VALUE
+static VALUE
invoke_block_from_c_bh(rb_execution_context_t *ec, VALUE block_handler,
int argc, const VALUE *argv,
int kw_splat, VALUE passed_block_handler, const rb_cref_t *cref,
@@ -1582,14 +1944,14 @@ vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
return invoke_block_from_c_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler, proc->is_lambda, NULL);
}
-MJIT_FUNC_EXPORTED VALUE
-rb_vm_invoke_bmethod(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
+static VALUE
+vm_invoke_bmethod(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
int argc, const VALUE *argv, int kw_splat, VALUE block_handler, const rb_callable_method_entry_t *me)
{
return invoke_block_from_c_proc(ec, proc, self, argc, argv, kw_splat, block_handler, TRUE, me);
}
-MJIT_FUNC_EXPORTED VALUE
+VALUE
rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc,
int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
{
@@ -1597,7 +1959,7 @@ rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc,
vm_block_handler_verify(passed_block_handler);
if (proc->is_from_method) {
- return rb_vm_invoke_bmethod(ec, proc, self, argc, argv, kw_splat, passed_block_handler, NULL);
+ return vm_invoke_bmethod(ec, proc, self, argc, argv, kw_splat, passed_block_handler, NULL);
}
else {
return vm_invoke_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler);
@@ -1611,7 +1973,7 @@ rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE s
vm_block_handler_verify(passed_block_handler);
if (proc->is_from_method) {
- return rb_vm_invoke_bmethod(ec, proc, self, argc, argv, kw_splat, passed_block_handler, NULL);
+ return vm_invoke_bmethod(ec, proc, self, argc, argv, kw_splat, passed_block_handler, NULL);
}
else {
return vm_invoke_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler);
@@ -1620,30 +1982,36 @@ rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE s
/* special variable */
-static rb_control_frame_t *
-vm_normal_frame(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
+VALUE *
+rb_vm_svar_lep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
{
- while (cfp->pc == 0) {
- cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
+ while (cfp->pc == 0 || cfp->iseq == 0) {
+ if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_IFUNC) {
+ struct vm_ifunc *ifunc = (struct vm_ifunc *)cfp->iseq;
+ return ifunc->svar_lep;
+ }
+ else {
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
+ }
+
if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
- return 0;
+ return NULL;
}
}
- return cfp;
+
+ return (VALUE *)VM_CF_LEP(cfp);
}
static VALUE
vm_cfp_svar_get(const rb_execution_context_t *ec, rb_control_frame_t *cfp, VALUE key)
{
- cfp = vm_normal_frame(ec, cfp);
- return lep_svar_get(ec, cfp ? VM_CF_LEP(cfp) : 0, key);
+ return lep_svar_get(ec, rb_vm_svar_lep(ec, cfp), key);
}
static void
vm_cfp_svar_set(const rb_execution_context_t *ec, rb_control_frame_t *cfp, VALUE key, const VALUE val)
{
- cfp = vm_normal_frame(ec, cfp);
- lep_svar_set(ec, cfp ? VM_CF_LEP(cfp) : 0, key, val);
+ lep_svar_set(ec, rb_vm_svar_lep(ec, cfp), key, val);
}
static VALUE
@@ -1682,6 +2050,17 @@ rb_lastline_set(VALUE val)
vm_svar_set(GET_EC(), VM_SVAR_LASTLINE, val);
}
+void
+rb_lastline_set_up(VALUE val, unsigned int up)
+{
+ rb_control_frame_t * cfp = GET_EC()->cfp;
+
+ for(unsigned int i = 0; i < up; i++) {
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
+ }
+ vm_cfp_svar_set(GET_EC(), cfp, VM_SVAR_LASTLINE, val);
+}
+
/* misc */
const char *
@@ -1728,7 +2107,7 @@ rb_source_location(int *pline)
}
}
-MJIT_FUNC_EXPORTED const char *
+const char *
rb_source_location_cstr(int *pline)
{
VALUE path = rb_source_location(pline);
@@ -1824,7 +2203,7 @@ make_localjump_error(const char *mesg, VALUE value, int reason)
return exc;
}
-MJIT_FUNC_EXPORTED void
+void
rb_vm_localjump_error(const char *mesg, VALUE value, int reason)
{
VALUE exc = make_localjump_error(mesg, value, reason);
@@ -1832,7 +2211,7 @@ rb_vm_localjump_error(const char *mesg, VALUE value, int reason)
}
VALUE
-rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
+rb_vm_make_jump_tag_but_local_jump(enum ruby_tag_type state, VALUE val)
{
const char *mesg;
@@ -1864,7 +2243,7 @@ rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
}
void
-rb_vm_jump_tag_but_local_jump(int state)
+rb_vm_jump_tag_but_local_jump(enum ruby_tag_type state)
{
VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
if (!NIL_P(exc)) rb_exc_raise(exc);
@@ -1915,6 +2294,13 @@ short ruby_vm_redefined_flag[BOP_LAST_];
static st_table *vm_opt_method_def_table = 0;
static st_table *vm_opt_mid_table = 0;
+void
+rb_free_vm_opt_tables(void)
+{
+ st_free_table(vm_opt_method_def_table);
+ st_free_table(vm_opt_mid_table);
+}
+
static int
vm_redefinition_check_flag(VALUE klass)
{
@@ -1952,6 +2338,8 @@ vm_redefinition_check_method_type(const rb_method_entry_t *me)
return FALSE;
}
+ if (METHOD_ENTRY_BASIC(me)) return TRUE;
+
const rb_method_definition_t *def = me->def;
switch (def->type) {
case VM_METHOD_TYPE_CFUNC:
@@ -1966,7 +2354,7 @@ static void
rb_vm_check_redefinition_opt_method(const rb_method_entry_t *me, VALUE klass)
{
st_data_t bop;
- if (RB_TYPE_P(klass, T_ICLASS) && FL_TEST(klass, RICLASS_IS_ORIGIN) &&
+ if (RB_TYPE_P(klass, T_ICLASS) && RICLASS_IS_ORIGIN_P(klass) &&
RB_TYPE_P(RBASIC_CLASS(klass), T_CLASS)) {
klass = RBASIC_CLASS(klass);
}
@@ -1974,8 +2362,14 @@ rb_vm_check_redefinition_opt_method(const rb_method_entry_t *me, VALUE klass)
if (st_lookup(vm_opt_method_def_table, (st_data_t)me->def, &bop)) {
int flag = vm_redefinition_check_flag(klass);
if (flag != 0) {
+ rb_category_warn(
+ RB_WARN_CATEGORY_PERFORMANCE,
+ "Redefining '%s#%s' disables interpreter and JIT optimizations",
+ rb_class2name(me->owner),
+ rb_id2name(me->called_id)
+ );
rb_yjit_bop_redefined(flag, (enum ruby_basic_operators)bop);
- rb_mjit_bop_redefined(flag, (enum ruby_basic_operators)bop);
+ rb_zjit_bop_redefined(flag, (enum ruby_basic_operators)bop);
ruby_vm_redefined_flag[bop] |= flag;
}
}
@@ -2002,27 +2396,44 @@ rb_vm_check_redefinition_by_prepend(VALUE klass)
}
static void
-add_opt_method(VALUE klass, ID mid, VALUE bop)
+add_opt_method_entry_bop(const rb_method_entry_t *me, ID mid, enum ruby_basic_operators bop)
+{
+ st_insert(vm_opt_method_def_table, (st_data_t)me->def, (st_data_t)bop);
+ st_insert(vm_opt_mid_table, (st_data_t)mid, (st_data_t)Qtrue);
+}
+
+static void
+add_opt_method(VALUE klass, ID mid, enum ruby_basic_operators bop)
{
const rb_method_entry_t *me = rb_method_entry_at(klass, mid);
if (me && vm_redefinition_check_method_type(me)) {
- st_insert(vm_opt_method_def_table, (st_data_t)me->def, (st_data_t)bop);
- st_insert(vm_opt_mid_table, (st_data_t)mid, (st_data_t)Qtrue);
+ add_opt_method_entry_bop(me, mid, bop);
}
else {
rb_bug("undefined optimized method: %s", rb_id2name(mid));
}
}
+static enum ruby_basic_operators vm_redefinition_bop_for_id(ID mid);
+
+static void
+add_opt_method_entry(const rb_method_entry_t *me)
+{
+ if (me && vm_redefinition_check_method_type(me)) {
+ ID mid = me->called_id;
+ enum ruby_basic_operators bop = vm_redefinition_bop_for_id(mid);
+ if ((int)bop >= 0) {
+ add_opt_method_entry_bop(me, mid, bop);
+ }
+ }
+}
+
static void
vm_init_redefined_flag(void)
{
ID mid;
- VALUE bop;
-
- vm_opt_method_def_table = st_init_numtable();
- vm_opt_mid_table = st_init_numtable();
+ enum ruby_basic_operators bop;
#define OP(mid_, bop_) (mid = id##mid_, bop = BOP_##bop_, ruby_vm_redefined_flag[bop] = 0)
#define C(k) add_opt_method(rb_c##k, mid, bop)
@@ -2039,6 +2450,7 @@ vm_init_redefined_flag(void)
OP(GT, GT), (C(Integer), C(Float));
OP(GE, GE), (C(Integer), C(Float));
OP(LTLT, LTLT), (C(String), C(Array));
+ OP(GTGT, GTGT), (C(Integer));
OP(AREF, AREF), (C(Array), C(Hash), C(Integer));
OP(ASET, ASET), (C(Array), C(Hash));
OP(Length, LENGTH), (C(Array), C(String), C(Hash));
@@ -2046,20 +2458,63 @@ vm_init_redefined_flag(void)
OP(EmptyP, EMPTY_P), (C(Array), C(String), C(Hash));
OP(Succ, SUCC), (C(Integer), C(String));
OP(EqTilde, MATCH), (C(Regexp), C(String));
- OP(Freeze, FREEZE), (C(String));
+ OP(Freeze, FREEZE), (C(String), C(Array), C(Hash));
OP(UMinus, UMINUS), (C(String));
OP(Max, MAX), (C(Array));
OP(Min, MIN), (C(Array));
+ OP(Hash, HASH), (C(Array));
OP(Call, CALL), (C(Proc));
OP(And, AND), (C(Integer));
OP(Or, OR), (C(Integer));
OP(NilP, NIL_P), (C(NilClass));
OP(Cmp, CMP), (C(Integer), C(Float), C(String));
OP(Default, DEFAULT), (C(Hash));
+ OP(IncludeP, INCLUDE_P), (C(Array));
#undef C
#undef OP
}
+static enum ruby_basic_operators
+vm_redefinition_bop_for_id(ID mid)
+{
+ switch (mid) {
+#define OP(mid_, bop_) case id##mid_: return BOP_##bop_
+ OP(PLUS, PLUS);
+ OP(MINUS, MINUS);
+ OP(MULT, MULT);
+ OP(DIV, DIV);
+ OP(MOD, MOD);
+ OP(Eq, EQ);
+ OP(Eqq, EQQ);
+ OP(LT, LT);
+ OP(LE, LE);
+ OP(GT, GT);
+ OP(GE, GE);
+ OP(LTLT, LTLT);
+ OP(AREF, AREF);
+ OP(ASET, ASET);
+ OP(Length, LENGTH);
+ OP(Size, SIZE);
+ OP(EmptyP, EMPTY_P);
+ OP(Succ, SUCC);
+ OP(EqTilde, MATCH);
+ OP(Freeze, FREEZE);
+ OP(UMinus, UMINUS);
+ OP(Max, MAX);
+ OP(Min, MIN);
+ OP(Hash, HASH);
+ OP(Call, CALL);
+ OP(And, AND);
+ OP(Or, OR);
+ OP(NilP, NIL_P);
+ OP(Cmp, CMP);
+ OP(Default, DEFAULT);
+ OP(Pack, PACK);
+#undef OP
+ }
+ return -1;
+}
+
/* for vm development */
#if VMDEBUG
@@ -2120,15 +2575,18 @@ frame_name(const rb_control_frame_t *cfp)
// cfp_returning_with_value:
// Whether cfp is the last frame in the unwinding process for a non-local return.
static void
-hook_before_rewind(rb_execution_context_t *ec, const rb_control_frame_t *cfp,
- bool cfp_returning_with_value, int state, struct vm_throw_data *err)
+hook_before_rewind(rb_execution_context_t *ec, bool cfp_returning_with_value, int state, struct vm_throw_data *err)
{
if (state == TAG_RAISE && RBASIC(err)->klass == rb_eSysStackError) {
return;
}
else {
- const rb_iseq_t *iseq = cfp->iseq;
- rb_hook_list_t *local_hooks = iseq->aux.exec.local_hooks;
+ const rb_iseq_t *iseq = ec->cfp->iseq;
+ rb_hook_list_t *local_hooks = NULL;
+ unsigned int local_hooks_cnt = iseq->aux.exec.local_hooks_cnt;
+ if (RB_UNLIKELY(local_hooks_cnt > 0)) {
+ local_hooks = rb_iseq_local_hooks(iseq, rb_ec_ractor_ptr(ec), false);
+ }
switch (VM_FRAME_TYPE(ec->cfp)) {
case VM_FRAME_MAGIC_METHOD:
@@ -2166,15 +2624,18 @@ hook_before_rewind(rb_execution_context_t *ec, const rb_control_frame_t *cfp,
bmethod_return_value);
VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
- local_hooks = me->def->body.bmethod.hooks;
-
- if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_RETURN)) {
- rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_RETURN, ec->cfp->self,
- rb_vm_frame_method_entry(ec->cfp)->def->original_id,
- rb_vm_frame_method_entry(ec->cfp)->called_id,
- rb_vm_frame_method_entry(ec->cfp)->owner,
- bmethod_return_value, TRUE);
+ unsigned int local_hooks_cnt = me->def->body.bmethod.local_hooks_cnt;
+ if (UNLIKELY(local_hooks_cnt > 0)) {
+ local_hooks = rb_method_def_local_hooks(me->def, rb_ec_ractor_ptr(ec), false);
+ if (local_hooks && local_hooks->events & RUBY_EVENT_RETURN) {
+ rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_RETURN, ec->cfp->self,
+ rb_vm_frame_method_entry(ec->cfp)->def->original_id,
+ rb_vm_frame_method_entry(ec->cfp)->called_id,
+ rb_vm_frame_method_entry(ec->cfp)->owner,
+ bmethod_return_value, TRUE);
+ }
}
+
THROW_DATA_CONSUMED_SET(err);
}
else {
@@ -2271,131 +2732,110 @@ hook_before_rewind(rb_execution_context_t *ec, const rb_control_frame_t *cfp,
VALUE *ep; // ep
void *code; //
};
-
- If jit_exec is already called before calling vm_exec, `jit_enable_p` should
- be FALSE to avoid calling `jit_exec` twice.
*/
static inline VALUE
-vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
- VALUE errinfo, VALUE *initial);
+vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state, VALUE errinfo);
+static inline VALUE
+vm_exec_loop(rb_execution_context_t *ec, enum ruby_tag_type state, struct rb_vm_tag *tag, VALUE result);
// for non-Emscripten Wasm build, use vm_exec with optimized setjmp for runtime performance
#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
struct rb_vm_exec_context {
- rb_execution_context_t *ec;
- struct rb_vm_tag *tag;
- VALUE initial;
+ rb_execution_context_t *const ec;
+ struct rb_vm_tag *const tag;
+
VALUE result;
- enum ruby_tag_type state;
- bool jit_enable_p;
};
static void
-vm_exec_enter_vm_loop(rb_execution_context_t *ec, struct rb_vm_exec_context *ctx,
- struct rb_vm_tag *_tag, bool skip_first_ex_handle) {
- if (skip_first_ex_handle) {
- goto vm_loop_start;
- }
-
- ctx->result = ec->errinfo;
- rb_ec_raised_reset(ec, RAISED_STACKOVERFLOW | RAISED_NOMEMORY);
- while (UNDEF_P(ctx->result = vm_exec_handle_exception(ec, ctx->state, ctx->result, &ctx->initial))) {
- /* caught a jump, exec the handler */
- ctx->result = vm_exec_core(ec, ctx->initial);
- vm_loop_start:
- VM_ASSERT(ec->tag == _tag);
- /* when caught `throw`, `tag.state` is set. */
- if ((ctx->state = _tag->state) == TAG_NONE) break;
- _tag->state = TAG_NONE;
- }
-}
-
-static void
vm_exec_bottom_main(void *context)
{
- struct rb_vm_exec_context *ctx = (struct rb_vm_exec_context *)context;
+ struct rb_vm_exec_context *ctx = context;
+ rb_execution_context_t *ec = ctx->ec;
- ctx->state = TAG_NONE;
- if (!ctx->jit_enable_p || UNDEF_P(ctx->result = jit_exec(ctx->ec))) {
- ctx->result = vm_exec_core(ctx->ec, ctx->initial);
- }
- vm_exec_enter_vm_loop(ctx->ec, ctx, ctx->tag, true);
+ ctx->result = vm_exec_loop(ec, TAG_NONE, ctx->tag, vm_exec_core(ec));
}
static void
vm_exec_bottom_rescue(void *context)
{
- struct rb_vm_exec_context *ctx = (struct rb_vm_exec_context *)context;
- ctx->state = rb_ec_tag_state(ctx->ec);
- vm_exec_enter_vm_loop(ctx->ec, ctx, ctx->tag, false);
+ struct rb_vm_exec_context *ctx = context;
+ rb_execution_context_t *ec = ctx->ec;
+
+ ctx->result = vm_exec_loop(ec, rb_ec_tag_state(ec), ctx->tag, ec->errinfo);
}
+#endif
VALUE
-vm_exec(rb_execution_context_t *ec, bool jit_enable_p)
+vm_exec(rb_execution_context_t *ec)
{
- struct rb_vm_exec_context ctx = {
- .ec = ec,
- .initial = 0, .result = Qundef,
- .jit_enable_p = jit_enable_p,
- };
- struct rb_wasm_try_catch try_catch;
+ VALUE result = Qundef;
EC_PUSH_TAG(ec);
_tag.retval = Qnil;
- ctx.tag = &_tag;
+
+#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
+ struct rb_vm_exec_context ctx = {
+ .ec = ec,
+ .tag = &_tag,
+ };
+ struct rb_wasm_try_catch try_catch;
EC_REPUSH_TAG();
rb_wasm_try_catch_init(&try_catch, vm_exec_bottom_main, vm_exec_bottom_rescue, &ctx);
- rb_wasm_try_catch_loop_run(&try_catch, &_tag.buf);
-
- EC_POP_TAG();
- return ctx.result;
-}
+ rb_wasm_try_catch_loop_run(&try_catch, &RB_VM_TAG_JMPBUF_GET(_tag.buf));
+ result = ctx.result;
#else
-
-VALUE
-vm_exec(rb_execution_context_t *ec, bool jit_enable_p)
-{
enum ruby_tag_type state;
- VALUE result = Qundef;
- VALUE initial = 0;
-
- EC_PUSH_TAG(ec);
-
- _tag.retval = Qnil;
if ((state = EC_EXEC_TAG()) == TAG_NONE) {
- if (!jit_enable_p || UNDEF_P(result = jit_exec(ec))) {
- result = vm_exec_core(ec, initial);
+ if (UNDEF_P(result = jit_exec(ec))) {
+ result = vm_exec_core(ec);
}
- goto vm_loop_start; /* fallback to the VM */
+ /* fallback to the VM */
+ result = vm_exec_loop(ec, TAG_NONE, &_tag, result);
}
else {
- result = ec->errinfo;
- rb_ec_raised_reset(ec, RAISED_STACKOVERFLOW | RAISED_NOMEMORY);
- while (UNDEF_P(result = vm_exec_handle_exception(ec, state, result, &initial))) {
- /* caught a jump, exec the handler */
- result = vm_exec_core(ec, initial);
- vm_loop_start:
- VM_ASSERT(ec->tag == &_tag);
- /* when caught `throw`, `tag.state` is set. */
- if ((state = _tag.state) == TAG_NONE) break;
- _tag.state = TAG_NONE;
- }
+ result = vm_exec_loop(ec, state, &_tag, ec->errinfo);
}
+#endif
+
EC_POP_TAG();
return result;
}
-#endif
static inline VALUE
-vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
- VALUE errinfo, VALUE *initial)
+vm_exec_loop(rb_execution_context_t *ec, enum ruby_tag_type state,
+ struct rb_vm_tag *tag, VALUE result)
+{
+ if (state == TAG_NONE) { /* no jumps, result is discarded */
+ goto vm_loop_start;
+ }
+
+ rb_ec_raised_reset(ec, RAISED_STACKOVERFLOW | RAISED_NOMEMORY);
+ while (UNDEF_P(result = vm_exec_handle_exception(ec, state, result))) {
+ // caught a jump, exec the handler. JIT code in jit_exec_exception()
+ // may return Qundef to run remaining frames with vm_exec_core().
+ if (UNDEF_P(result = jit_exec_exception(ec))) {
+ result = vm_exec_core(ec);
+ }
+ vm_loop_start:
+ VM_ASSERT(ec->tag == tag);
+ /* when caught `throw`, `tag.state` is set. */
+ if ((state = tag->state) == TAG_NONE) break;
+ tag->state = TAG_NONE;
+ }
+
+ return result;
+}
+
+static inline VALUE
+vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state, VALUE errinfo)
{
struct vm_throw_data *err = (struct vm_throw_data *)errinfo;
@@ -2405,7 +2845,6 @@ vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
const struct iseq_catch_table *ct;
unsigned long epc, cont_pc, cont_sp;
const rb_iseq_t *catch_iseq;
- rb_control_frame_t *cfp;
VALUE type;
const rb_control_frame_t *escape_cfp;
@@ -2425,7 +2864,7 @@ vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
rb_vm_pop_frame(ec);
}
- cfp = ec->cfp;
+ rb_control_frame_t *const cfp = ec->cfp;
epc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded;
escape_cfp = NULL;
@@ -2455,7 +2894,7 @@ vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
ec->errinfo = Qnil;
THROW_DATA_CATCH_FRAME_SET(err, cfp + 1);
// cfp == escape_cfp here so calling with cfp_returning_with_value = true
- hook_before_rewind(ec, ec->cfp, true, state, err);
+ hook_before_rewind(ec, true, state, err);
rb_vm_pop_frame(ec);
return THROW_DATA_VAL(err);
}
@@ -2464,11 +2903,7 @@ vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
}
else {
/* TAG_BREAK */
-#if OPT_STACK_CACHING
- *initial = THROW_DATA_VAL(err);
-#else
- *ec->cfp->sp++ = THROW_DATA_VAL(err);
-#endif
+ *cfp->sp++ = THROW_DATA_VAL(err);
ec->errinfo = Qnil;
return Qundef;
}
@@ -2541,11 +2976,7 @@ vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
cfp->sp = vm_base_ptr(cfp) + entry->sp;
if (state != TAG_REDO) {
-#if OPT_STACK_CACHING
- *initial = THROW_DATA_VAL(err);
-#else
- *ec->cfp->sp++ = THROW_DATA_VAL(err);
-#endif
+ *cfp->sp++ = THROW_DATA_VAL(err);
}
ec->errinfo = Qnil;
VM_ASSERT(ec->tag->state == TAG_NONE);
@@ -2596,11 +3027,12 @@ vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
return Qundef;
}
else {
- hook_before_rewind(ec, ec->cfp, (cfp == escape_cfp), state, err);
+ hook_before_rewind(ec, (cfp == escape_cfp), state, err);
if (VM_FRAME_FINISHED_P(ec->cfp)) {
rb_vm_pop_frame(ec);
ec->errinfo = (VALUE)err;
+ rb_vm_tag_jmpbuf_deinit(&ec->tag->buf);
ec->tag = ec->tag->prev;
EC_JUMP_TAG(ec, state);
}
@@ -2614,12 +3046,12 @@ vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
/* misc */
VALUE
-rb_iseq_eval(const rb_iseq_t *iseq)
+rb_iseq_eval(const rb_iseq_t *iseq, const rb_box_t *box)
{
rb_execution_context_t *ec = GET_EC();
VALUE val;
- vm_set_top_stack(ec, iseq);
- val = vm_exec(ec, true);
+ vm_set_top_stack(ec, iseq, box);
+ val = vm_exec(ec);
return val;
}
@@ -2628,9 +3060,8 @@ rb_iseq_eval_main(const rb_iseq_t *iseq)
{
rb_execution_context_t *ec = GET_EC();
VALUE val;
-
vm_set_main_stack(ec, iseq);
- val = vm_exec(ec, true);
+ val = vm_exec(ec);
return val;
}
@@ -2668,11 +3099,12 @@ rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg,
{
rb_execution_context_t *ec = GET_EC();
const rb_control_frame_t *reg_cfp = ec->cfp;
- const rb_iseq_t *iseq = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
+ const rb_iseq_t *iseq = rb_iseq_new(Qnil, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
+ const rb_box_t *box = rb_current_box();
VALUE val;
vm_push_frame(ec, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH,
- recv, block_handler,
+ recv, GC_GUARDED_PTR(box),
(VALUE)vm_cref_new_toplevel(ec), /* cref or me */
0, reg_cfp->sp, 0, 0);
@@ -2682,6 +3114,117 @@ rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg,
return val;
}
+/* Ruby::Box */
+
+VALUE
+rb_vm_call_cfunc_in_box(VALUE recv, VALUE (*func)(VALUE, VALUE), VALUE arg1, VALUE arg2,
+ VALUE filename, const rb_box_t *box)
+{
+ rb_execution_context_t *ec = GET_EC();
+ const rb_control_frame_t *reg_cfp = ec->cfp;
+ const rb_iseq_t *iseq = rb_iseq_new(Qnil, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
+ VALUE val;
+
+ vm_push_frame(ec, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH,
+ recv, GC_GUARDED_PTR(box),
+ (VALUE)vm_cref_new_toplevel(ec), /* cref or me */
+ 0, reg_cfp->sp, 0, 0);
+
+ val = (*func)(arg1, arg2);
+
+ rb_vm_pop_frame(ec);
+ return val;
+}
+
+void
+rb_vm_frame_flag_set_box_require(const rb_execution_context_t *ec)
+{
+ VM_ASSERT(rb_box_available());
+ VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_BOX_REQUIRE);
+}
+
+static const rb_box_t *
+current_box_on_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
+{
+ rb_callable_method_entry_t *cme;
+ const rb_box_t *box;
+ const VALUE *lep = VM_EP_RUBY_LEP(ec, cfp);
+ VM_BOX_ASSERT(lep, "lep should be valid");
+ VM_BOX_ASSERT(rb_box_available(), "box should be available here");
+
+ if (VM_ENV_FRAME_TYPE_P(lep, VM_FRAME_MAGIC_METHOD) || VM_ENV_FRAME_TYPE_P(lep, VM_FRAME_MAGIC_CFUNC)) {
+ cme = check_method_entry(lep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
+ VM_BOX_ASSERT(cme, "cme should be valid");
+ VM_BOX_ASSERT(cme->def, "cme->def shold be valid");
+ return cme->def->box;
+ }
+ else if (VM_ENV_FRAME_TYPE_P(lep, VM_FRAME_MAGIC_TOP) || VM_ENV_FRAME_TYPE_P(lep, VM_FRAME_MAGIC_CLASS)) {
+ VM_BOX_ASSERT(VM_ENV_LOCAL_P(lep), "lep should be local on MAGIC_TOP or MAGIC_CLASS frames");
+ return VM_ENV_BOX(lep);
+ }
+ else if (VM_ENV_FRAME_TYPE_P(lep, VM_FRAME_MAGIC_DUMMY)) {
+ // No valid local ep found (just after process boot?)
+ // return the root box (the only valid box) until the main is initialized
+ box = rb_main_box();
+ if (box)
+ return box;
+ return rb_root_box();
+ }
+ else {
+ VM_BOX_CRASHED();
+ rb_bug("BUG: Local ep without cme/box, flags: %08lX", (unsigned long)lep[VM_ENV_DATA_INDEX_FLAGS]);
+ }
+ UNREACHABLE_RETURN(0);
+}
+
+const rb_box_t *
+rb_vm_current_box(const rb_execution_context_t *ec)
+{
+ return current_box_on_cfp(ec, ec->cfp);
+}
+
+static const rb_control_frame_t *
+find_loader_control_frame(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const rb_control_frame_t *end_cfp)
+{
+ while (RUBY_VM_VALID_CONTROL_FRAME_P(cfp, end_cfp)) {
+ if (!VM_ENV_FRAME_TYPE_P(cfp->ep, VM_FRAME_MAGIC_CFUNC))
+ break;
+ if (!BOX_ROOT_P(current_box_on_cfp(ec, cfp)))
+ break;
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
+ }
+ VM_ASSERT(RUBY_VM_VALID_CONTROL_FRAME_P(cfp, end_cfp));
+ return cfp;
+}
+
+const rb_box_t *
+rb_vm_loading_box(const rb_execution_context_t *ec)
+{
+ const rb_control_frame_t *cfp, *current_cfp, *end_cfp;
+
+ if (!rb_box_available() || !ec)
+ return rb_root_box();
+
+ cfp = ec->cfp;
+ current_cfp = cfp;
+ end_cfp = RUBY_VM_END_CONTROL_FRAME(ec);
+
+ while (RUBY_VM_VALID_CONTROL_FRAME_P(cfp, end_cfp)) {
+ if (VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BOX_REQUIRE)) {
+ if (RTEST(cfp->self) && BOX_OBJ_P(cfp->self)) {
+ // Box#require, #require_relative, #load
+ return rb_get_box_t(cfp->self);
+ }
+ // Kernel#require, #require_relative, #load
+ cfp = find_loader_control_frame(ec, cfp, end_cfp);
+ return current_box_on_cfp(ec, cfp);
+ }
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
+ }
+ // no require/load with explicit boxes.
+ return current_box_on_cfp(ec, current_cfp);
+}
+
/* vm */
void
@@ -2690,23 +3233,16 @@ rb_vm_update_references(void *ptr)
if (ptr) {
rb_vm_t *vm = ptr;
- rb_gc_update_tbl_refs(vm->frozen_strings);
+ vm->self = rb_gc_location(vm->self);
vm->mark_object_ary = rb_gc_location(vm->mark_object_ary);
- vm->load_path = rb_gc_location(vm->load_path);
- vm->load_path_snapshot = rb_gc_location(vm->load_path_snapshot);
-
- if (vm->load_path_check_cache) {
- vm->load_path_check_cache = rb_gc_location(vm->load_path_check_cache);
- }
-
- vm->expanded_load_path = rb_gc_location(vm->expanded_load_path);
- vm->loaded_features = rb_gc_location(vm->loaded_features);
- vm->loaded_features_snapshot = rb_gc_location(vm->loaded_features_snapshot);
- vm->loaded_features_realpaths = rb_gc_location(vm->loaded_features_realpaths);
- vm->top_self = rb_gc_location(vm->top_self);
vm->orig_progname = rb_gc_location(vm->orig_progname);
- rb_gc_update_tbl_refs(vm->overloaded_cme_table);
+ if (vm->root_box)
+ rb_box_gc_update_references(vm->root_box);
+ if (vm->main_box)
+ rb_box_gc_update_references(vm->main_box);
+
+ rb_gc_update_values(RUBY_NSIG, vm->trap_list.cmd);
if (vm->coverages) {
vm->coverages = rb_gc_location(vm->coverages);
@@ -2733,7 +3269,7 @@ rb_vm_each_stack_value(void *ptr, void (*cb)(VALUE, void*), void *ctx)
VALUE *p = ec->vm_stack;
VALUE *sp = ec->cfp->sp;
while (p < sp) {
- if (!rb_special_const_p(*p)) {
+ if (!RB_SPECIAL_CONST_P(*p)) {
cb(*p, ctx);
}
p++;
@@ -2752,6 +3288,8 @@ vm_mark_negative_cme(VALUE val, void *dmy)
return ID_TABLE_CONTINUE;
}
+void rb_thread_sched_mark_zombies(rb_vm_t *vm);
+
void
rb_vm_mark(void *ptr)
{
@@ -2760,8 +3298,7 @@ rb_vm_mark(void *ptr)
if (ptr) {
rb_vm_t *vm = ptr;
rb_ractor_t *r = 0;
- long i, len;
- const VALUE *obj_ary;
+ long i;
ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
// ractor.set only contains blocking or running ractors
@@ -2770,43 +3307,28 @@ rb_vm_mark(void *ptr)
rb_gc_mark(rb_ractor_self(r));
}
- rb_gc_mark_movable(vm->mark_object_ary);
-
- len = RARRAY_LEN(vm->mark_object_ary);
- obj_ary = RARRAY_CONST_PTR(vm->mark_object_ary);
- for (i=0; i < len; i++) {
- const VALUE *ptr;
- long j, jlen;
-
- rb_gc_mark(*obj_ary);
- jlen = RARRAY_LEN(*obj_ary);
- ptr = RARRAY_CONST_PTR(*obj_ary);
- for (j=0; j < jlen; j++) {
- rb_gc_mark(*ptr++);
- }
- obj_ary++;
+ for (struct global_object_list *list = vm->global_object_list; list; list = list->next) {
+ rb_gc_mark_maybe(*list->varptr);
}
- rb_gc_mark_movable(vm->load_path);
- rb_gc_mark_movable(vm->load_path_snapshot);
- RUBY_MARK_MOVABLE_UNLESS_NULL(vm->load_path_check_cache);
- rb_gc_mark_movable(vm->expanded_load_path);
- rb_gc_mark_movable(vm->loaded_features);
- rb_gc_mark_movable(vm->loaded_features_snapshot);
- rb_gc_mark_movable(vm->loaded_features_realpaths);
- rb_gc_mark_movable(vm->top_self);
- rb_gc_mark_movable(vm->orig_progname);
- RUBY_MARK_MOVABLE_UNLESS_NULL(vm->coverages);
- RUBY_MARK_MOVABLE_UNLESS_NULL(vm->me2counter);
- /* Prevent classes from moving */
- rb_mark_tbl(vm->defined_module_hash);
+ rb_gc_mark_movable(vm->self);
- if (vm->loading_table) {
- rb_mark_tbl(vm->loading_table);
+ if (vm->root_box) {
+ rb_box_entry_mark(vm->root_box);
}
+ if (vm->main_box) {
+ rb_box_entry_mark(vm->main_box);
+ }
+
+ rb_gc_mark_movable(vm->mark_object_ary);
+ rb_gc_mark_movable(vm->orig_progname);
+ rb_gc_mark_movable(vm->coverages);
+ rb_gc_mark_movable(vm->me2counter);
rb_gc_mark_values(RUBY_NSIG, vm->trap_list.cmd);
+ rb_hook_list_mark(&vm->global_hooks);
+
rb_id_table_foreach_values(vm->negative_cme_table, vm_mark_negative_cme, NULL);
rb_mark_tbl_no_pin(vm->overloaded_cme_table);
for (i=0; i<VM_GLOBAL_CC_CACHE_TABLE_SIZE; i++) {
@@ -2822,7 +3344,7 @@ rb_vm_mark(void *ptr)
}
}
- mjit_mark();
+ rb_thread_sched_mark_zombies(vm);
}
RUBY_MARK_LEAVE("vm");
@@ -2836,68 +3358,102 @@ rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE cls,
VALUE exc = rb_exc_new3(cls, rb_obj_freeze(mesg));
OBJ_FREEZE(exc);
((VALUE *)vm->special_exceptions)[sp] = exc;
- rb_gc_register_mark_object(exc);
+ rb_vm_register_global_object(exc);
}
-int
-rb_vm_add_root_module(VALUE module)
-{
- rb_vm_t *vm = GET_VM();
-
- st_insert(vm->defined_module_hash, (st_data_t)module, (st_data_t)module);
-
- return TRUE;
-}
-
-static int
-free_loading_table_entry(st_data_t key, st_data_t value, st_data_t arg)
-{
- xfree((char *)key);
- return ST_DELETE;
-}
+void rb_objspace_free_objects(void *objspace);
int
ruby_vm_destruct(rb_vm_t *vm)
{
RUBY_FREE_ENTER("vm");
+ ruby_vm_during_cleanup = true;
if (vm) {
rb_thread_t *th = vm->ractor.main_thread;
- struct rb_objspace *objspace = vm->objspace;
- vm->ractor.main_thread = NULL;
- if (th) {
+ if (rb_free_at_exit) {
+ rb_free_encoded_insn_data();
+ rb_free_global_enc_table();
+ rb_free_loaded_builtin_table();
+ rb_free_global_symbol_table();
+
+ rb_free_shared_fiber_pool();
+ rb_free_transcoder_table();
+ rb_free_vm_opt_tables();
+ rb_free_warning();
+ rb_free_rb_global_tbl();
+
+ rb_id_table_free(vm->negative_cme_table);
+ st_free_table(vm->overloaded_cme_table);
+
+ // TODO: Is this ignorable for classext->m_tbl ?
+ // rb_id_table_free(RCLASS(rb_mRubyVMFrozenCore)->m_tbl);
+
+ st_free_table(vm->static_ext_inits);
+
+ rb_vm_postponed_job_free();
+
+ rb_id_table_free(vm->constant_cache);
+ set_free_table(vm->unused_block_warning_table);
+
+ rb_thread_free_native_thread(th);
+
+#ifndef HAVE_SETPROCTITLE
+ ruby_free_proctitle();
+#endif
+ }
+ else {
rb_fiber_reset_root_local_storage(th);
thread_free(th);
}
+
+ struct rb_objspace *objspace = vm->gc.objspace;
+
rb_vm_living_threads_init(vm);
ruby_vm_run_at_exit_hooks(vm);
- if (vm->loading_table) {
- st_foreach(vm->loading_table, free_loading_table_entry, 0);
- st_free_table(vm->loading_table);
- vm->loading_table = 0;
+ if (vm->ci_table) {
+ st_free_table(vm->ci_table);
+ vm->ci_table = NULL;
}
- if (vm->frozen_strings) {
- st_free_table(vm->frozen_strings);
- vm->frozen_strings = 0;
+ if (vm->cc_refinement_table) {
+ rb_set_free_table(vm->cc_refinement_table);
+ vm->cc_refinement_table = NULL;
}
RB_ALTSTACK_FREE(vm->main_altstack);
+
+ struct global_object_list *next;
+ for (struct global_object_list *list = vm->global_object_list; list; list = next) {
+ next = list->next;
+ xfree(list);
+ }
+
if (objspace) {
+ if (rb_free_at_exit) {
+ rb_objspace_free_objects(objspace);
+ rb_free_generic_fields_tbl_();
+ rb_free_default_rand_key();
+
+ ruby_mimfree(th);
+ }
rb_objspace_free(objspace);
}
- rb_native_mutex_destroy(&vm->waitpid_lock);
rb_native_mutex_destroy(&vm->workqueue_lock);
/* after freeing objspace, you *can't* use ruby_xfree() */
ruby_mimfree(vm);
ruby_current_vm_ptr = NULL;
+
+ if (rb_free_at_exit) {
+ rb_shape_free_all();
+#if USE_YJIT
+ rb_yjit_free_at_exit();
+#endif
+ }
}
RUBY_FREE_LEAVE("vm");
return 0;
}
-size_t rb_vm_memsize_waiting_list(struct ccan_list_head *waiting_list); // process.c
-size_t rb_vm_memsize_waiting_fds(struct ccan_list_head *waiting_fds); // thread.c
-size_t rb_vm_memsize_postponed_job_buffer(void); // vm_trace.c
size_t rb_vm_memsize_workqueue(struct ccan_list_head *workqueue); // vm_trace.c
// Used for VM memsize reporting. Returns the size of the at_exit list by
@@ -2905,7 +3461,7 @@ size_t rb_vm_memsize_workqueue(struct ccan_list_head *workqueue); // vm_trace.c
static enum rb_id_table_iterator_result
vm_memsize_constant_cache_i(ID id, VALUE ics, void *size)
{
- *((size_t *) size) += rb_st_memsize((st_table *) ics);
+ *((size_t *) size) += rb_set_memsize((set_table *) ics);
return ID_TABLE_CONTINUE;
}
@@ -2952,20 +3508,14 @@ vm_memsize(const void *ptr)
return (
sizeof(rb_vm_t) +
- rb_vm_memsize_waiting_list(&vm->waiting_pids) +
- rb_vm_memsize_waiting_list(&vm->waiting_grps) +
- rb_vm_memsize_waiting_fds(&vm->waiting_fds) +
- rb_st_memsize(vm->loaded_features_index) +
- rb_st_memsize(vm->loading_table) +
- rb_st_memsize(vm->ensure_rollback_table) +
- rb_vm_memsize_postponed_job_buffer() +
+ rb_vm_memsize_postponed_job_queue() +
rb_vm_memsize_workqueue(&vm->workqueue) +
- rb_st_memsize(vm->defined_module_hash) +
vm_memsize_at_exit_list(vm->at_exit) +
- rb_st_memsize(vm->frozen_strings) +
+ rb_st_memsize(vm->ci_table) +
vm_memsize_builtin_function_table(vm->builtin_function_table) +
rb_id_table_memsize(vm->negative_cme_table) +
rb_st_memsize(vm->overloaded_cme_table) +
+ rb_set_memsize(vm->cc_refinement_table) +
vm_memsize_constant_cache()
);
@@ -3059,7 +3609,6 @@ vm_default_params_setup(rb_vm_t *vm)
static void
vm_init2(rb_vm_t *vm)
{
- MEMZERO(vm, rb_vm_t, 1);
rb_vm_living_threads_init(vm);
vm->thread_report_on_exception = 1;
vm->src_encoding_index = -1;
@@ -3110,6 +3659,9 @@ rb_execution_context_update(rb_execution_context_t *ec)
}
ec->storage = rb_gc_location(ec->storage);
+
+ ec->gen_fields_cache.obj = rb_gc_location(ec->gen_fields_cache.obj);
+ ec->gen_fields_cache.fields_obj = rb_gc_location(ec->gen_fields_cache.fields_obj);
}
static enum rb_id_table_iterator_result
@@ -3130,28 +3682,34 @@ rb_execution_context_mark(const rb_execution_context_t *ec)
rb_control_frame_t *cfp = ec->cfp;
rb_control_frame_t *limit_cfp = (void *)(ec->vm_stack + ec->vm_stack_size);
- VM_ASSERT(sp == ec->cfp->sp);
- rb_gc_mark_vm_stack_values((long)(sp - p), p);
+ for (long i = 0; i < (long)(sp - p); i++) {
+ rb_gc_mark_movable(p[i]);
+ }
while (cfp != limit_cfp) {
const VALUE *ep = cfp->ep;
VM_ASSERT(!!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) == vm_ep_in_heap_p_(ec, ep));
- if (VM_FRAME_TYPE(cfp) != VM_FRAME_MAGIC_DUMMY) {
- rb_gc_mark_movable(cfp->self);
- rb_gc_mark_movable((VALUE)cfp->iseq);
- rb_gc_mark_movable((VALUE)cfp->block_code);
+ rb_gc_mark_movable(cfp->self);
+ rb_gc_mark_movable((VALUE)cfp->iseq);
+ rb_gc_mark_movable((VALUE)cfp->block_code);
- if (!VM_ENV_LOCAL_P(ep)) {
- const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
- if (VM_ENV_FLAGS(prev_ep, VM_ENV_FLAG_ESCAPED)) {
- rb_gc_mark_movable(prev_ep[VM_ENV_DATA_INDEX_ENV]);
- }
+ if (VM_ENV_LOCAL_P(ep) && VM_ENV_BOXED_P(ep)) {
+ const rb_box_t *box = VM_ENV_BOX(ep);
+ if (BOX_USER_P(box)) {
+ rb_gc_mark_movable(box->box_object);
+ }
+ }
- if (VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED)) {
- rb_gc_mark_movable(ep[VM_ENV_DATA_INDEX_ENV]);
- rb_gc_mark(ep[VM_ENV_DATA_INDEX_ME_CREF]);
- }
+ if (!VM_ENV_LOCAL_P(ep)) {
+ const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
+ if (VM_ENV_FLAGS(prev_ep, VM_ENV_FLAG_ESCAPED)) {
+ rb_gc_mark_movable(prev_ep[VM_ENV_DATA_INDEX_ENV]);
+ }
+
+ if (VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED)) {
+ rb_gc_mark_movable(ep[VM_ENV_DATA_INDEX_ENV]);
+ rb_gc_mark(ep[VM_ENV_DATA_INDEX_ME_CREF]);
}
}
@@ -3163,27 +3721,25 @@ rb_execution_context_mark(const rb_execution_context_t *ec)
if (ec->machine.stack_start && ec->machine.stack_end &&
ec != GET_EC() /* marked for current ec at the first stage of marking */
) {
- rb_gc_mark_machine_stack(ec);
- rb_gc_mark_locations((VALUE *)&ec->machine.regs,
- (VALUE *)(&ec->machine.regs) +
- sizeof(ec->machine.regs) / (sizeof(VALUE)));
+ rb_gc_mark_machine_context(ec);
}
- RUBY_MARK_UNLESS_NULL(ec->errinfo);
- RUBY_MARK_UNLESS_NULL(ec->root_svar);
+ rb_gc_mark(ec->errinfo);
+ rb_gc_mark(ec->root_svar);
if (ec->local_storage) {
rb_id_table_foreach_values(ec->local_storage, mark_local_storage_i, NULL);
}
- RUBY_MARK_UNLESS_NULL(ec->local_storage_recursive_hash);
- RUBY_MARK_UNLESS_NULL(ec->local_storage_recursive_hash_for_trace);
- RUBY_MARK_UNLESS_NULL(ec->private_const_reference);
+ rb_gc_mark(ec->local_storage_recursive_hash);
+ rb_gc_mark(ec->local_storage_recursive_hash_for_trace);
+ rb_gc_mark(ec->private_const_reference);
- RUBY_MARK_MOVABLE_UNLESS_NULL(ec->storage);
+ rb_gc_mark_movable(ec->storage);
}
void rb_fiber_mark_self(rb_fiber_t *fib);
void rb_fiber_update_self(rb_fiber_t *fib);
void rb_threadptr_root_fiber_setup(rb_thread_t *th);
+void rb_root_fiber_obj_setup(rb_thread_t *th);
void rb_threadptr_root_fiber_release(rb_thread_t *th);
static void
@@ -3192,10 +3748,6 @@ thread_compact(void *ptr)
rb_thread_t *th = ptr;
th->self = rb_gc_location(th->self);
-
- if (!th->root_fiber) {
- rb_execution_context_update(th->ec);
- }
}
static void
@@ -3203,14 +3755,18 @@ thread_mark(void *ptr)
{
rb_thread_t *th = ptr;
RUBY_MARK_ENTER("thread");
- rb_fiber_mark_self(th->ec->fiber_ptr);
+
+ // ec is null when setting up the thread in rb_threadptr_root_fiber_setup
+ if (th->ec) {
+ rb_fiber_mark_self(th->ec->fiber_ptr);
+ }
/* mark ruby objects */
switch (th->invoke_type) {
case thread_invoke_type_proc:
case thread_invoke_type_ractor_proc:
- RUBY_MARK_UNLESS_NULL(th->invoke_arg.proc.proc);
- RUBY_MARK_UNLESS_NULL(th->invoke_arg.proc.args);
+ rb_gc_mark(th->invoke_arg.proc.proc);
+ rb_gc_mark(th->invoke_arg.proc.args);
break;
case thread_invoke_type_func:
rb_gc_mark_maybe((VALUE)th->invoke_arg.func.arg);
@@ -3220,31 +3776,36 @@ thread_mark(void *ptr)
}
rb_gc_mark(rb_ractor_self(th->ractor));
- RUBY_MARK_UNLESS_NULL(th->thgroup);
- RUBY_MARK_UNLESS_NULL(th->value);
- RUBY_MARK_UNLESS_NULL(th->pending_interrupt_queue);
- RUBY_MARK_UNLESS_NULL(th->pending_interrupt_mask_stack);
- RUBY_MARK_UNLESS_NULL(th->top_self);
- RUBY_MARK_UNLESS_NULL(th->top_wrapper);
+ rb_gc_mark(th->thgroup);
+ rb_gc_mark(th->value);
+ rb_gc_mark(th->pending_interrupt_queue);
+ rb_gc_mark(th->pending_interrupt_mask_stack);
+ rb_gc_mark(th->top_self);
+ rb_gc_mark(th->top_wrapper);
if (th->root_fiber) rb_fiber_mark_self(th->root_fiber);
RUBY_ASSERT(th->ec == rb_fiberptr_get_ec(th->ec->fiber_ptr));
- RUBY_MARK_UNLESS_NULL(th->stat_insn_usage);
- RUBY_MARK_UNLESS_NULL(th->last_status);
- RUBY_MARK_UNLESS_NULL(th->locking_mutex);
- RUBY_MARK_UNLESS_NULL(th->name);
+ rb_gc_mark(th->last_status);
+ rb_gc_mark(th->locking_mutex);
+ rb_gc_mark(th->name);
- RUBY_MARK_UNLESS_NULL(th->scheduler);
+ rb_gc_mark(th->scheduler);
+
+ rb_threadptr_interrupt_exec_task_mark(th);
RUBY_MARK_LEAVE("thread");
}
+void rb_threadptr_sched_free(rb_thread_t *th); // thread_*.c
+
static void
thread_free(void *ptr)
{
rb_thread_t *th = ptr;
RUBY_FREE_ENTER("thread");
+ rb_threadptr_sched_free(th);
+
if (th->locking_mutex != Qfalse) {
rb_bug("thread_free: locking_mutex must be NULL (%p:%p)", (void *)th, (void *)th->locking_mutex);
}
@@ -3252,13 +3813,14 @@ thread_free(void *ptr)
rb_bug("thread_free: keeping_mutexes must be NULL (%p:%p)", (void *)th, (void *)th->keeping_mutexes);
}
- rb_threadptr_root_fiber_release(th);
+ ruby_xfree(th->specific_storage);
if (th->vm && th->vm->ractor.main_thread == th) {
RUBY_GC_INFO("MRI main thread\n");
}
else {
- ruby_xfree(th->nt); // TODO
+ // ruby_xfree(th->nt);
+ // TODO: MN system collect nt, but without MN system it should be freed here.
ruby_xfree(th);
}
@@ -3305,7 +3867,7 @@ thread_alloc(VALUE klass)
return TypedData_Make_Struct(klass, rb_thread_t, &thread_data_type, th);
}
-inline void
+void
rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
{
ec->vm_stack = stack;
@@ -3317,6 +3879,10 @@ rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
{
rb_ec_set_vm_stack(ec, stack, size);
+#if VM_CHECK_MODE > 0
+ MEMZERO(stack, VALUE, size); // malloc memory could have the VM canary in it
+#endif
+
ec->cfp = (void *)(ec->vm_stack + ec->vm_stack_size);
vm_push_frame(ec,
@@ -3331,17 +3897,28 @@ rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
void
rb_ec_clear_vm_stack(rb_execution_context_t *ec)
{
+ // set cfp to NULL before clearing the stack in case `thread_profile_frames`
+ // gets called in this middle of `rb_ec_set_vm_stack` via signal handler.
+ ec->cfp = NULL;
rb_ec_set_vm_stack(ec, NULL, 0);
+}
- // Avoid dangling pointers:
- ec->cfp = NULL;
+void
+rb_ec_close(rb_execution_context_t *ec)
+{
+ // Fiber storage is not accessible from outside the running fiber, so it is safe to clear it here.
+ ec->storage = Qnil;
}
static void
th_init(rb_thread_t *th, VALUE self, rb_vm_t *vm)
{
+ const rb_box_t *box = rb_current_box();
+
th->self = self;
+ ccan_list_head_init(&th->interrupt_exec_tasks);
+
rb_threadptr_root_fiber_setup(th);
/* All threads are blocking until a non-blocking fiber is scheduled */
@@ -3350,7 +3927,9 @@ th_init(rb_thread_t *th, VALUE self, rb_vm_t *vm)
if (self == 0) {
size_t size = vm->default_params.thread_vm_stack_size / sizeof(VALUE);
- rb_ec_initialize_vm_stack(th->ec, ALLOC_N(VALUE, size), size);
+ VALUE *stack = ALLOC_N(VALUE, size);
+ rb_ec_initialize_vm_stack(th->ec, stack, size);
+ rb_thread_malloc_stack_set(th, stack);
}
else {
VM_ASSERT(th->ec->cfp == NULL);
@@ -3361,7 +3940,12 @@ th_init(rb_thread_t *th, VALUE self, rb_vm_t *vm)
th->status = THREAD_RUNNABLE;
th->last_status = Qnil;
th->top_wrapper = 0;
- th->top_self = vm->top_self; // 0 while self == 0
+ if (box->top_self) {
+ th->top_self = box->top_self;
+ }
+ else {
+ th->top_self = 0;
+ }
th->value = Qundef;
th->ec->errinfo = Qnil;
@@ -3370,6 +3954,7 @@ th_init(rb_thread_t *th, VALUE self, rb_vm_t *vm)
th->ec->local_storage_recursive_hash_for_trace = Qnil;
th->ec->storage = Qnil;
+ th->ec->ractor_id = rb_ractor_id(th->ractor);
#if OPT_CALL_THREADED_CODE
th->retval = Qundef;
@@ -3379,8 +3964,10 @@ th_init(rb_thread_t *th, VALUE self, rb_vm_t *vm)
th->ext_config.ractor_safe = true;
#if USE_RUBY_DEBUG_LOG
- static rb_atomic_t thread_serial = 0;
+ static rb_atomic_t thread_serial = 1;
th->serial = RUBY_ATOMIC_FETCH_ADD(thread_serial, 1);
+
+ RUBY_DEBUG_LOG("th:%u", th->serial);
#endif
}
@@ -3391,6 +3978,7 @@ rb_thread_alloc(VALUE klass)
rb_thread_t *target_th = rb_thread_ptr(self);
target_th->ractor = GET_RACTOR();
th_init(target_th, self, target_th->vm = GET_VM());
+ rb_root_fiber_obj_setup(target_th);
return self;
}
@@ -3470,7 +4058,9 @@ kwmerge_i(VALUE key, VALUE value, VALUE hash)
static VALUE
m_core_hash_merge_kwd(VALUE recv, VALUE hash, VALUE kw)
{
- REWIND_CFP(hash = core_hash_merge_kwd(hash, kw));
+ if (!NIL_P(kw)) {
+ REWIND_CFP(hash = core_hash_merge_kwd(hash, kw));
+ }
return hash;
}
@@ -3508,7 +4098,7 @@ extern size_t rb_gc_stack_maxsize;
static VALUE
sdr(VALUE self)
{
- rb_vm_bugreport(NULL);
+ rb_vm_bugreport(NULL, stderr);
return Qnil;
}
@@ -3576,6 +4166,7 @@ f_sprintf(int c, const VALUE *v, VALUE _)
return rb_f_sprintf(c, v);
}
+/* :nodoc: */
static VALUE
vm_mtbl(VALUE self, VALUE obj, VALUE sym)
{
@@ -3583,6 +4174,7 @@ vm_mtbl(VALUE self, VALUE obj, VALUE sym)
return Qnil;
}
+/* :nodoc: */
static VALUE
vm_mtbl2(VALUE self, VALUE obj, VALUE sym)
{
@@ -3658,7 +4250,7 @@ Init_VM(void)
/* FrozenCore (hidden) */
fcore = rb_class_new(rb_cBasicObject);
rb_set_class_path(fcore, rb_cRubyVM, "FrozenCore");
- RBASIC(fcore)->flags = T_ICLASS;
+ rb_vm_register_global_object(rb_class_path_cached(fcore));
klass = rb_singleton_class(fcore);
rb_define_method_id(klass, id_core_set_method_alias, m_core_set_method_alias, 3);
rb_define_method_id(klass, id_core_set_variable_alias, m_core_set_variable_alias, 2);
@@ -3676,7 +4268,7 @@ Init_VM(void)
rb_obj_freeze(fcore);
RBASIC_CLEAR_CLASS(klass);
rb_obj_freeze(klass);
- rb_gc_register_mark_object(fcore);
+ rb_vm_register_global_object(fcore);
rb_mRubyVMFrozenCore = fcore;
/*
@@ -3876,9 +4468,6 @@ Init_VM(void)
rb_ary_push(opts, rb_str_new2("call threaded code"));
#endif
-#if OPT_STACK_CACHING
- rb_ary_push(opts, rb_str_new2("stack caching"));
-#endif
#if OPT_OPERANDS_UNIFICATION
rb_ary_push(opts, rb_str_new2("operands unification"));
#endif
@@ -3888,9 +4477,6 @@ Init_VM(void)
#if OPT_INLINE_METHOD_CACHE
rb_ary_push(opts, rb_str_new2("inline method cache"));
#endif
-#if OPT_BLOCKINLINING
- rb_ary_push(opts, rb_str_new2("block inlining"));
-#endif
/* ::RubyVM::INSTRUCTION_NAMES
* A list of bytecode instruction names in MRI.
@@ -3924,7 +4510,7 @@ Init_VM(void)
rb_vm_t *vm = ruby_current_vm_ptr;
rb_thread_t *th = GET_THREAD();
VALUE filename = rb_fstring_lit("<main>");
- const rb_iseq_t *iseq = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
+ const rb_iseq_t *iseq = rb_iseq_new(Qnil, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
// Ractor setup
rb_ractor_main_setup(vm, th->ractor, th);
@@ -3940,7 +4526,9 @@ Init_VM(void)
th->top_wrapper = 0;
th->top_self = rb_vm_top_self();
- rb_gc_register_mark_object((VALUE)iseq);
+ rb_root_fiber_obj_setup(th);
+
+ rb_vm_register_global_object((VALUE)iseq);
th->ec->cfp->iseq = iseq;
th->ec->cfp->pc = ISEQ_BODY(iseq)->iseq_encoded;
th->ec->cfp->self = th->top_self;
@@ -3953,7 +4541,9 @@ Init_VM(void)
*/
rb_define_global_const("TOPLEVEL_BINDING", rb_binding_new());
- rb_objspace_gc_enable(vm->objspace);
+#ifdef _WIN32
+ rb_objspace_gc_enable(vm->gc.objspace);
+#endif
}
vm_init_redefined_flag();
@@ -3961,7 +4551,7 @@ Init_VM(void)
rb_add_method_optimized(rb_singleton_class(rb_block_param_proxy), idCall,
OPTIMIZED_METHOD_TYPE_BLOCK_CALL, 0, METHOD_VISI_PUBLIC);
rb_obj_freeze(rb_block_param_proxy);
- rb_gc_register_mark_object(rb_block_param_proxy);
+ rb_vm_register_global_object(rb_block_param_proxy);
/* vm_backtrace.c */
Init_vm_backtrace();
@@ -3974,7 +4564,8 @@ rb_vm_set_progname(VALUE filename)
rb_control_frame_t *cfp = (void *)(th->ec->vm_stack + th->ec->vm_stack_size);
--cfp;
- rb_iseq_pathobj_set(cfp->iseq, rb_str_dup(filename), rb_iseq_realpath(cfp->iseq));
+ filename = rb_str_new_frozen(filename);
+ rb_iseq_pathobj_set(cfp->iseq, filename, rb_iseq_realpath(cfp->iseq));
}
extern const struct st_hash_type rb_fstring_hash_type;
@@ -3983,37 +4574,55 @@ void
Init_BareVM(void)
{
/* VM bootstrap: phase 1 */
- rb_vm_t * vm = ruby_mimmalloc(sizeof(*vm));
- rb_thread_t * th = ruby_mimmalloc(sizeof(*th));
+ rb_vm_t *vm = ruby_mimcalloc(1, sizeof(*vm));
+ rb_thread_t *th = ruby_mimcalloc(1, sizeof(*th));
if (!vm || !th) {
fputs("[FATAL] failed to allocate memory\n", stderr);
exit(EXIT_FAILURE);
}
// setup the VM
- MEMZERO(th, rb_thread_t, 1);
vm_init2(vm);
- vm->objspace = rb_objspace_alloc();
+ rb_vm_postponed_job_queue_init(vm);
ruby_current_vm_ptr = vm;
+ rb_objspace_alloc();
vm->negative_cme_table = rb_id_table_create(16);
vm->overloaded_cme_table = st_init_numtable();
vm->constant_cache = rb_id_table_create(0);
+ vm->unused_block_warning_table = set_init_numtable();
+ vm->global_hooks.type = hook_list_type_global;
// setup main thread
th->nt = ZALLOC(struct rb_native_thread);
th->vm = vm;
th->ractor = vm->ractor.main_ractor = rb_ractor_main_alloc();
Init_native_thread(th);
+ rb_jit_cont_init();
th_init(th, 0, vm);
rb_ractor_set_current_ec(th->ractor, th->ec);
- ruby_thread_init_stack(th);
+
+ /* n.b. native_main_thread_stack_top is set by the INIT_STACK macro */
+ ruby_thread_init_stack(th, native_main_thread_stack_top);
// setup ractor system
rb_native_mutex_initialize(&vm->ractor.sync.lock);
- rb_native_cond_initialize(&vm->ractor.sync.barrier_cond);
rb_native_cond_initialize(&vm->ractor.sync.terminate_cond);
+
+ vm_opt_method_def_table = st_init_numtable();
+ vm_opt_mid_table = st_init_numtable();
+
+#ifdef RUBY_THREAD_WIN32_H
+ rb_native_cond_initialize(&vm->ractor.sync.barrier_complete_cond);
+ rb_native_cond_initialize(&vm->ractor.sync.barrier_release_cond);
+#endif
+}
+
+void
+ruby_init_stack(void *addr)
+{
+ native_main_thread_stack_top = addr;
}
#ifndef _WIN32
@@ -4021,41 +4630,137 @@ Init_BareVM(void)
#include <sys/mman.h>
#endif
-void
-Init_vm_objects(void)
+
+#ifndef MARK_OBJECT_ARY_BUCKET_SIZE
+#define MARK_OBJECT_ARY_BUCKET_SIZE 1024
+#endif
+
+struct pin_array_list {
+ VALUE next;
+ long len;
+ VALUE *array;
+};
+
+static void
+pin_array_list_mark(void *data)
{
- rb_vm_t *vm = GET_VM();
+ struct pin_array_list *array = (struct pin_array_list *)data;
+ rb_gc_mark_movable(array->next);
- vm->defined_module_hash = st_init_numtable();
+ rb_gc_mark_vm_stack_values(array->len, array->array);
+}
- /* initialize mark object array, hash */
- vm->mark_object_ary = rb_ary_hidden_new(128);
- vm->loading_table = st_init_strtable();
- vm->frozen_strings = st_init_table_with_size(&rb_fstring_hash_type, 10000);
-#if EXTSTATIC
- vm->static_ext_inits = st_init_strtable();
-#endif
+static void
+pin_array_list_free(void *data)
+{
+ struct pin_array_list *array = (struct pin_array_list *)data;
+ xfree(array->array);
+}
+
+static size_t
+pin_array_list_memsize(const void *data)
+{
+ return sizeof(struct pin_array_list) + (MARK_OBJECT_ARY_BUCKET_SIZE * sizeof(VALUE));
+}
+
+static void
+pin_array_list_update_references(void *data)
+{
+ struct pin_array_list *array = (struct pin_array_list *)data;
+ array->next = rb_gc_location(array->next);
+}
-#ifdef HAVE_MMAP
- vm->shape_list = (rb_shape_t *)mmap(NULL, rb_size_mul_or_raise(SHAPE_BITMAP_SIZE * 32, sizeof(rb_shape_t), rb_eRuntimeError),
- PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- if (vm->shape_list == MAP_FAILED) {
- vm->shape_list = 0;
+static const rb_data_type_t pin_array_list_type = {
+ .wrap_struct_name = "VM/pin_array_list",
+ .function = {
+ .dmark = pin_array_list_mark,
+ .dfree = pin_array_list_free,
+ .dsize = pin_array_list_memsize,
+ .dcompact = pin_array_list_update_references,
+ },
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE,
+};
+
+static VALUE
+pin_array_list_new(VALUE next)
+{
+ struct pin_array_list *array_list;
+ VALUE obj = TypedData_Make_Struct(0, struct pin_array_list, &pin_array_list_type, array_list);
+ RB_OBJ_WRITE(obj, &array_list->next, next);
+ array_list->array = ALLOC_N(VALUE, MARK_OBJECT_ARY_BUCKET_SIZE);
+ return obj;
+}
+
+static VALUE
+pin_array_list_append(VALUE obj, VALUE item)
+{
+ struct pin_array_list *array_list;
+ TypedData_Get_Struct(obj, struct pin_array_list, &pin_array_list_type, array_list);
+
+ if (array_list->len >= MARK_OBJECT_ARY_BUCKET_SIZE) {
+ obj = pin_array_list_new(obj);
+ TypedData_Get_Struct(obj, struct pin_array_list, &pin_array_list_type, array_list);
}
-#else
- vm->shape_list = xcalloc(SHAPE_BITMAP_SIZE * 32, sizeof(rb_shape_t));
-#endif
- if (!vm->shape_list) {
- rb_memerror();
+ RB_OBJ_WRITE(obj, &array_list->array[array_list->len], item);
+ array_list->len++;
+ return obj;
+}
+
+void
+rb_vm_register_global_object(VALUE obj)
+{
+ RUBY_ASSERT(!RB_SPECIAL_CONST_P(obj));
+ if (RB_SPECIAL_CONST_P(obj)) {
+ return;
}
+
+ switch (RB_BUILTIN_TYPE(obj)) {
+ case T_CLASS:
+ case T_MODULE:
+ if (FL_TEST(obj, RCLASS_IS_ROOT)) {
+ return;
+ }
+ FL_SET(obj, RCLASS_IS_ROOT);
+ break;
+ default:
+ break;
+ }
+ RB_VM_LOCKING() {
+ VALUE list = GET_VM()->mark_object_ary;
+ VALUE head = pin_array_list_append(list, obj);
+ if (head != list) {
+ GET_VM()->mark_object_ary = head;
+ }
+ RB_GC_GUARD(obj);
+ }
+}
+
+void
+Init_vm_objects(void)
+{
+ rb_vm_t *vm = GET_VM();
+
+ /* initialize mark object array, hash */
+ vm->mark_object_ary = pin_array_list_new(Qnil);
+ vm->ci_table = st_init_table(&vm_ci_hashtype);
+ vm->cc_refinement_table = rb_set_init_numtable();
}
-/* Stub for builtin function when not building YJIT units*/
+// Whether JIT is enabled or not, we need to load/undef `#with_jit` for other builtins.
+#include "jit_hook.rbinc"
+#include "jit_undef.rbinc"
+
+// Stub for builtin function when not building YJIT units
#if !USE_YJIT
void Init_builtin_yjit(void) {}
#endif
+// Stub for builtin function when not building ZJIT units
+#if !USE_ZJIT
+void Init_builtin_zjit(void) {}
+#endif
+
/* top self */
static VALUE
@@ -4067,17 +4772,20 @@ main_to_s(VALUE obj)
VALUE
rb_vm_top_self(void)
{
- return GET_VM()->top_self;
+ const rb_box_t *box = rb_current_box();
+ VM_ASSERT(box);
+ VM_ASSERT(box->top_self);
+ return box->top_self;
}
void
Init_top_self(void)
{
rb_vm_t *vm = GET_VM();
-
- vm->top_self = rb_obj_alloc(rb_cObject);
- rb_define_singleton_method(rb_vm_top_self(), "to_s", main_to_s, 0);
- rb_define_alias(rb_singleton_class(rb_vm_top_self()), "inspect", "to_s");
+ vm->root_box = (rb_box_t *)rb_root_box();
+ vm->root_box->top_self = rb_obj_alloc(rb_cObject);
+ rb_define_singleton_method(vm->root_box->top_self, "to_s", main_to_s, 0);
+ rb_define_alias(rb_singleton_class(vm->root_box->top_self), "inspect", "to_s");
}
VALUE *
@@ -4094,17 +4802,19 @@ rb_ruby_debug_ptr(void)
return &cr->debug;
}
+bool rb_free_at_exit = false;
+
+bool
+ruby_free_at_exit_p(void)
+{
+ return rb_free_at_exit;
+}
+
/* iseq.c */
VALUE rb_insn_operand_intern(const rb_iseq_t *iseq,
VALUE insn, int op_no, VALUE op,
int len, size_t pos, VALUE *pnop, VALUE child);
-st_table *
-rb_vm_fstring_table(void)
-{
- return GET_VM()->frozen_strings;
-}
-
#if VM_COLLECT_USAGE_DETAILS
#define HASH_ASET(h, k, v) rb_hash_aset((h), (st_data_t)(k), (st_data_t)(v))
@@ -4296,21 +5006,21 @@ usage_analysis_register_stop(VALUE self)
static VALUE
usage_analysis_insn_running(VALUE self)
{
- return RBOOL(ruby_vm_collect_usage_func_insn != 0);
+ return RBOOL(ruby_vm_collect_usage_func_insn != 0);
}
/* :nodoc: */
static VALUE
usage_analysis_operand_running(VALUE self)
{
- return RBOOL(ruby_vm_collect_usage_func_operand != 0);
+ return RBOOL(ruby_vm_collect_usage_func_operand != 0);
}
/* :nodoc: */
static VALUE
usage_analysis_register_running(VALUE self)
{
- return RBOOL(ruby_vm_collect_usage_func_register != 0);
+ return RBOOL(ruby_vm_collect_usage_func_register != 0);
}
static VALUE
@@ -4406,18 +5116,16 @@ vm_collect_usage_register(int reg, int isset)
}
#endif
-MJIT_FUNC_EXPORTED const struct rb_callcache *
+const struct rb_callcache *
rb_vm_empty_cc(void)
{
return &vm_empty_cc;
}
-MJIT_FUNC_EXPORTED const struct rb_callcache *
+const struct rb_callcache *
rb_vm_empty_cc_for_super(void)
{
return &vm_empty_cc_for_super;
}
-#endif /* #ifndef MJIT_HEADER */
-
#include "vm_call_iseq_optimized.inc" /* required from vm_insnhelper.c */