summaryrefslogtreecommitdiff
path: root/vm.c
diff options
context:
space:
mode:
Diffstat (limited to 'vm.c')
-rw-r--r--vm.c2781
1 files changed, 1701 insertions, 1080 deletions
diff --git a/vm.c b/vm.c
index 7f3376ce68..36f6700ad7 100644
--- a/vm.c
+++ b/vm.c
@@ -11,23 +11,28 @@
#define vm_exec rb_vm_exec
#include "eval_intern.h"
-#include "gc.h"
#include "internal.h"
+#include "internal/class.h"
#include "internal/compile.h"
#include "internal/cont.h"
#include "internal/error.h"
+#include "internal/encoding.h"
#include "internal/eval.h"
+#include "internal/gc.h"
#include "internal/inits.h"
+#include "internal/missing.h"
#include "internal/object.h"
-#include "internal/parse.h"
#include "internal/proc.h"
#include "internal/re.h"
+#include "internal/ruby_parser.h"
#include "internal/symbol.h"
#include "internal/thread.h"
+#include "internal/transcode.h"
#include "internal/vm.h"
#include "internal/sanitizers.h"
+#include "internal/variable.h"
#include "iseq.h"
-#include "mjit.h"
+#include "rjit.h"
#include "yjit.h"
#include "ruby/st.h"
#include "ruby/vm.h"
@@ -38,35 +43,31 @@
#include "vm_insnhelper.h"
#include "ractor_core.h"
#include "vm_sync.h"
+#include "shape.h"
#include "builtin.h"
-#ifndef MJIT_HEADER
#include "probes.h"
-#else
-#include "probes.dmyh"
-#endif
#include "probes_helper.h"
+#ifdef RUBY_ASSERT_CRITICAL_SECTION
+int ruby_assert_critical_section_entered = 0;
+#endif
+
+static void *native_main_thread_stack_top;
+
VALUE rb_str_concat_literals(size_t, const VALUE*);
-/* :FIXME: This #ifdef is because we build pch in case of mswin and
- * not in case of other situations. That distinction might change in
- * a future. We would better make it detectable in something better
- * than just _MSC_VER. */
-#ifdef _MSC_VER
-RUBY_FUNC_EXPORTED
-#else
-MJIT_FUNC_EXPORTED
-#endif
-VALUE vm_exec(rb_execution_context_t *, bool);
+VALUE vm_exec(rb_execution_context_t *);
+
+extern const char *const rb_debug_counter_names[];
PUREFUNC(static inline const VALUE *VM_EP_LEP(const VALUE *));
static inline const VALUE *
VM_EP_LEP(const VALUE *ep)
{
while (!VM_ENV_LOCAL_P(ep)) {
- ep = VM_ENV_PREV_EP(ep);
+ ep = VM_ENV_PREV_EP(ep);
}
return ep;
}
@@ -75,19 +76,19 @@ static inline const rb_control_frame_t *
rb_vm_search_cf_from_ep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE * const ep)
{
if (!ep) {
- return NULL;
+ return NULL;
}
else {
- const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
+ const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
- while (cfp < eocfp) {
- if (cfp->ep == ep) {
- return cfp;
- }
- cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
- }
+ while (cfp < eocfp) {
+ if (cfp->ep == ep) {
+ return cfp;
+ }
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
+ }
- return NULL;
+ return NULL;
}
}
@@ -139,10 +140,10 @@ VM_CFP_IN_HEAP_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp
VM_ASSERT(start != NULL);
if (start <= (VALUE *)cfp && (VALUE *)cfp < end) {
- return FALSE;
+ return FALSE;
}
else {
- return TRUE;
+ return TRUE;
}
}
@@ -154,10 +155,10 @@ VM_EP_IN_HEAP_P(const rb_execution_context_t *ec, const VALUE *ep)
VM_ASSERT(start != NULL);
if (start <= ep && ep < end) {
- return FALSE;
+ return FALSE;
}
else {
- return TRUE;
+ return TRUE;
}
}
@@ -165,19 +166,19 @@ static int
vm_ep_in_heap_p_(const rb_execution_context_t *ec, const VALUE *ep)
{
if (VM_EP_IN_HEAP_P(ec, ep)) {
- VALUE envval = ep[VM_ENV_DATA_INDEX_ENV]; /* VM_ENV_ENVVAL(ep); */
+ VALUE envval = ep[VM_ENV_DATA_INDEX_ENV]; /* VM_ENV_ENVVAL(ep); */
- if (envval != Qundef) {
- const rb_env_t *env = (const rb_env_t *)envval;
+ if (!UNDEF_P(envval)) {
+ const rb_env_t *env = (const rb_env_t *)envval;
- VM_ASSERT(vm_assert_env(envval));
- VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
- VM_ASSERT(env->ep == ep);
- }
- return TRUE;
+ VM_ASSERT(vm_assert_env(envval));
+ VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
+ VM_ASSERT(env->ep == ep);
+ }
+ return TRUE;
}
else {
- return FALSE;
+ return FALSE;
}
}
@@ -202,7 +203,7 @@ VM_CAPTURED_BLOCK_TO_CFP(const struct rb_captured_block *captured)
{
rb_control_frame_t *cfp = ((rb_control_frame_t *)((VALUE *)(captured) - 3));
VM_ASSERT(!VM_CFP_IN_HEAP_P(GET_EC(), cfp));
- VM_ASSERT(sizeof(rb_control_frame_t)/sizeof(VALUE) == 8 + VM_DEBUG_BP_CHECK ? 1 : 0);
+ VM_ASSERT(sizeof(rb_control_frame_t)/sizeof(VALUE) == 7 + VM_DEBUG_BP_CHECK ? 1 : 0);
return cfp;
}
@@ -223,16 +224,15 @@ vm_passed_block_handler(rb_execution_context_t *ec)
}
static rb_cref_t *
-vm_cref_new0(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_t *prev_cref, int pushed_by_eval, int use_prev_prev)
+vm_cref_new0(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_t *prev_cref, int pushed_by_eval, int use_prev_prev, int singleton)
{
VALUE refinements = Qnil;
int omod_shared = FALSE;
- rb_cref_t *cref;
/* scope */
union {
- rb_scope_visibility_t visi;
- VALUE value;
+ rb_scope_visibility_t visi;
+ VALUE value;
} scope_visi;
scope_visi.visi.method_visi = visi;
@@ -240,32 +240,38 @@ vm_cref_new0(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_
/* refinements */
if (prev_cref != NULL && prev_cref != (void *)1 /* TODO: why CREF_NEXT(cref) is 1? */) {
- refinements = CREF_REFINEMENTS(prev_cref);
+ refinements = CREF_REFINEMENTS(prev_cref);
- if (!NIL_P(refinements)) {
- omod_shared = TRUE;
- CREF_OMOD_SHARED_SET(prev_cref);
- }
+ if (!NIL_P(refinements)) {
+ omod_shared = TRUE;
+ CREF_OMOD_SHARED_SET(prev_cref);
+ }
}
- cref = (rb_cref_t *)rb_imemo_new(imemo_cref, klass, (VALUE)(use_prev_prev ? CREF_NEXT(prev_cref) : prev_cref), scope_visi.value, refinements);
+ VM_ASSERT(singleton || klass);
+
+ rb_cref_t *cref = IMEMO_NEW(rb_cref_t, imemo_cref, refinements);
+ cref->klass_or_self = klass;
+ cref->next = use_prev_prev ? CREF_NEXT(prev_cref) : prev_cref;
+ *((rb_scope_visibility_t *)&cref->scope_visi) = scope_visi.visi;
if (pushed_by_eval) CREF_PUSHED_BY_EVAL_SET(cref);
if (omod_shared) CREF_OMOD_SHARED_SET(cref);
+ if (singleton) CREF_SINGLETON_SET(cref);
return cref;
}
static rb_cref_t *
-vm_cref_new(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_t *prev_cref, int pushed_by_eval)
+vm_cref_new(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_t *prev_cref, int pushed_by_eval, int singleton)
{
- return vm_cref_new0(klass, visi, module_func, prev_cref, pushed_by_eval, FALSE);
+ return vm_cref_new0(klass, visi, module_func, prev_cref, pushed_by_eval, FALSE, singleton);
}
static rb_cref_t *
vm_cref_new_use_prev(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_t *prev_cref, int pushed_by_eval)
{
- return vm_cref_new0(klass, visi, module_func, prev_cref, pushed_by_eval, TRUE);
+ return vm_cref_new0(klass, visi, module_func, prev_cref, pushed_by_eval, TRUE, FALSE);
}
static int
@@ -277,18 +283,18 @@ ref_delete_symkey(VALUE key, VALUE value, VALUE unused)
static rb_cref_t *
vm_cref_dup(const rb_cref_t *cref)
{
- VALUE klass = CREF_CLASS(cref);
const rb_scope_visibility_t *visi = CREF_SCOPE_VISI(cref);
rb_cref_t *next_cref = CREF_NEXT(cref), *new_cref;
int pushed_by_eval = CREF_PUSHED_BY_EVAL(cref);
+ int singleton = CREF_SINGLETON(cref);
- new_cref = vm_cref_new(klass, visi->method_visi, visi->module_func, next_cref, pushed_by_eval);
+ new_cref = vm_cref_new(cref->klass_or_self, visi->method_visi, visi->module_func, next_cref, pushed_by_eval, singleton);
if (!NIL_P(CREF_REFINEMENTS(cref))) {
VALUE ref = rb_hash_dup(CREF_REFINEMENTS(cref));
rb_hash_foreach(ref, ref_delete_symkey, Qnil);
CREF_REFINEMENTS_SET(new_cref, ref);
- CREF_OMOD_SHARED_UNSET(new_cref);
+ CREF_OMOD_SHARED_UNSET(new_cref);
}
return new_cref;
@@ -298,12 +304,12 @@ vm_cref_dup(const rb_cref_t *cref)
rb_cref_t *
rb_vm_cref_dup_without_refinements(const rb_cref_t *cref)
{
- VALUE klass = CREF_CLASS(cref);
const rb_scope_visibility_t *visi = CREF_SCOPE_VISI(cref);
rb_cref_t *next_cref = CREF_NEXT(cref), *new_cref;
int pushed_by_eval = CREF_PUSHED_BY_EVAL(cref);
+ int singleton = CREF_SINGLETON(cref);
- new_cref = vm_cref_new(klass, visi->method_visi, visi->module_func, next_cref, pushed_by_eval);
+ new_cref = vm_cref_new(cref->klass_or_self, visi->method_visi, visi->module_func, next_cref, pushed_by_eval, singleton);
if (!NIL_P(CREF_REFINEMENTS(cref))) {
CREF_REFINEMENTS_SET(new_cref, Qnil);
@@ -316,11 +322,11 @@ rb_vm_cref_dup_without_refinements(const rb_cref_t *cref)
static rb_cref_t *
vm_cref_new_toplevel(rb_execution_context_t *ec)
{
- rb_cref_t *cref = vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE /* toplevel visibility is private */, FALSE, NULL, FALSE);
+ rb_cref_t *cref = vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE /* toplevel visibility is private */, FALSE, NULL, FALSE, FALSE);
VALUE top_wrapper = rb_ec_thread_ptr(ec)->top_wrapper;
if (top_wrapper) {
- cref = vm_cref_new(top_wrapper, METHOD_VISI_PRIVATE, FALSE, cref, FALSE);
+ cref = vm_cref_new(top_wrapper, METHOD_VISI_PRIVATE, FALSE, cref, FALSE, FALSE);
}
return cref;
@@ -338,8 +344,8 @@ vm_cref_dump(const char *mesg, const rb_cref_t *cref)
ruby_debug_printf("vm_cref_dump: %s (%p)\n", mesg, (void *)cref);
while (cref) {
- ruby_debug_printf("= cref| klass: %s\n", RSTRING_PTR(rb_class_path(CREF_CLASS(cref))));
- cref = CREF_NEXT(cref);
+ ruby_debug_printf("= cref| klass: %s\n", RSTRING_PTR(rb_class_path(CREF_CLASS(cref))));
+ cref = CREF_NEXT(cref);
}
}
@@ -370,32 +376,162 @@ extern VALUE rb_vm_invoke_bmethod(rb_execution_context_t *ec, rb_proc_t *proc, V
const rb_callable_method_entry_t *me);
static VALUE vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
-#include "vm_insnhelper.c"
+#if USE_YJIT
+// Counter to serve as a proxy for execution time, total number of calls
+static uint64_t yjit_total_entry_hits = 0;
+
+// Number of calls used to estimate how hot an ISEQ is
+#define YJIT_CALL_COUNT_INTERV 20u
+
+/// Test whether we are ready to compile an ISEQ or not
+static inline bool
+rb_yjit_threshold_hit(const rb_iseq_t *iseq, uint64_t entry_calls)
+{
+ yjit_total_entry_hits += 1;
+
+ // Record the number of calls at the beginning of the interval
+ if (entry_calls + YJIT_CALL_COUNT_INTERV == rb_yjit_call_threshold) {
+ iseq->body->yjit_calls_at_interv = yjit_total_entry_hits;
+ }
+
+ // Try to estimate the total time taken (total number of calls) to reach 20 calls to this ISEQ
+ // This give us a ratio of how hot/cold this ISEQ is
+ if (entry_calls == rb_yjit_call_threshold) {
+ // We expect threshold 1 to compile everything immediately
+ if (rb_yjit_call_threshold < YJIT_CALL_COUNT_INTERV) {
+ return true;
+ }
+
+ uint64_t num_calls = yjit_total_entry_hits - iseq->body->yjit_calls_at_interv;
+
+ // Reject ISEQs that don't get called often enough
+ if (num_calls > rb_yjit_cold_threshold) {
+ rb_yjit_incr_counter("cold_iseq_entry");
+ return false;
+ }
+
+ return true;
+ }
+
+ return false;
+}
+#else
+#define rb_yjit_threshold_hit(iseq, entry_calls) false
+#endif
+
+#if USE_RJIT || USE_YJIT
+// Generate JIT code that supports the following kinds of ISEQ entries:
+// * The first ISEQ on vm_exec (e.g. <main>, or Ruby methods/blocks
+// called by a C method). The current frame has VM_FRAME_FLAG_FINISH.
+// The current vm_exec stops if JIT code returns a non-Qundef value.
+// * ISEQs called by the interpreter on vm_sendish (e.g. Ruby methods or
+// blocks called by a Ruby frame that isn't compiled or side-exited).
+// The current frame doesn't have VM_FRAME_FLAG_FINISH. The current
+// vm_exec does NOT stop whether JIT code returns Qundef or not.
+static inline rb_jit_func_t
+jit_compile(rb_execution_context_t *ec)
+{
+ const rb_iseq_t *iseq = ec->cfp->iseq;
+ struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
+ bool yjit_enabled = rb_yjit_enabled_p;
+ if (!(yjit_enabled || rb_rjit_call_p)) {
+ return NULL;
+ }
+
+ // Increment the ISEQ's call counter and trigger JIT compilation if not compiled
+ if (body->jit_entry == NULL) {
+ body->jit_entry_calls++;
+ if (yjit_enabled) {
+ if (rb_yjit_threshold_hit(iseq, body->jit_entry_calls)) {
+ rb_yjit_compile_iseq(iseq, ec, false);
+ }
+ }
+ else if (body->jit_entry_calls == rb_rjit_call_threshold()) {
+ rb_rjit_compile(iseq);
+ }
+ }
+ return body->jit_entry;
+}
-#ifndef MJIT_HEADER
+// Execute JIT code compiled by jit_compile()
+static inline VALUE
+jit_exec(rb_execution_context_t *ec)
+{
+ rb_jit_func_t func = jit_compile(ec);
+ if (func) {
+ // Call the JIT code
+ return func(ec, ec->cfp);
+ }
+ else {
+ return Qundef;
+ }
+}
+#else
+# define jit_compile(ec) ((rb_jit_func_t)0)
+# define jit_exec(ec) Qundef
+#endif
+
+#if USE_YJIT
+// Generate JIT code that supports the following kind of ISEQ entry:
+// * The first ISEQ pushed by vm_exec_handle_exception. The frame would
+// point to a location specified by a catch table, and it doesn't have
+// VM_FRAME_FLAG_FINISH. The current vm_exec stops if JIT code returns
+// a non-Qundef value. So you should not return a non-Qundef value
+// until ec->cfp is changed to a frame with VM_FRAME_FLAG_FINISH.
+static inline rb_jit_func_t
+jit_compile_exception(rb_execution_context_t *ec)
+{
+ const rb_iseq_t *iseq = ec->cfp->iseq;
+ struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
+ if (!rb_yjit_enabled_p) {
+ return NULL;
+ }
+
+ // Increment the ISEQ's call counter and trigger JIT compilation if not compiled
+ if (body->jit_exception == NULL) {
+ body->jit_exception_calls++;
+ if (body->jit_exception_calls == rb_yjit_call_threshold) {
+ rb_yjit_compile_iseq(iseq, ec, true);
+ }
+ }
+
+ return body->jit_exception;
+}
+
+// Execute JIT code compiled by jit_compile_exception()
+static inline VALUE
+jit_exec_exception(rb_execution_context_t *ec)
+{
+ rb_jit_func_t func = jit_compile_exception(ec);
+ if (func) {
+ // Call the JIT code
+ return func(ec, ec->cfp);
+ }
+ else {
+ return Qundef;
+ }
+}
+#else
+# define jit_compile_exception(ec) ((rb_jit_func_t)0)
+# define jit_exec_exception(ec) Qundef
+#endif
+
+static void add_opt_method_entry(const rb_method_entry_t *me);
+
+#include "vm_insnhelper.c"
#include "vm_exec.c"
#include "vm_method.c"
-#endif /* #ifndef MJIT_HEADER */
#include "vm_eval.c"
-#ifndef MJIT_HEADER
#define PROCDEBUG 0
-rb_serial_t
-rb_next_class_serial(void)
-{
- rb_serial_t class_serial = NEXT_CLASS_SERIAL();
- return class_serial;
-}
-
VALUE rb_cRubyVM;
VALUE rb_cThread;
VALUE rb_mRubyVMFrozenCore;
VALUE rb_block_param_proxy;
-#define ruby_vm_redefined_flag GET_VM()->redefined_flag
VALUE ruby_vm_const_missing_count = 0;
rb_vm_t *ruby_current_vm_ptr = NULL;
rb_ractor_t *ruby_single_main_ractor;
@@ -404,19 +540,32 @@ bool ruby_vm_keep_script_lines;
#ifdef RB_THREAD_LOCAL_SPECIFIER
RB_THREAD_LOCAL_SPECIFIER rb_execution_context_t *ruby_current_ec;
-#ifdef __APPLE__
- rb_execution_context_t *
- rb_current_ec(void)
- {
- return ruby_current_ec;
- }
- void
- rb_current_ec_set(rb_execution_context_t *ec)
- {
- ruby_current_ec = ec;
- }
+#ifdef RUBY_NT_SERIAL
+RB_THREAD_LOCAL_SPECIFIER rb_atomic_t ruby_nt_serial;
#endif
+// no-inline decl on thread_pthread.h
+rb_execution_context_t *
+rb_current_ec_noinline(void)
+{
+ return ruby_current_ec;
+}
+
+void
+rb_current_ec_set(rb_execution_context_t *ec)
+{
+ ruby_current_ec = ec;
+}
+
+
+#ifdef __APPLE__
+rb_execution_context_t *
+rb_current_ec(void)
+{
+ return ruby_current_ec;
+}
+
+#endif
#else
native_tls_key_t ruby_current_ec_key;
#endif
@@ -425,8 +574,8 @@ rb_event_flag_t ruby_vm_event_flags;
rb_event_flag_t ruby_vm_event_enabled_global_flags;
unsigned int ruby_vm_event_local_num;
-rb_serial_t ruby_vm_global_constant_state = 1;
-rb_serial_t ruby_vm_class_serial = 1;
+rb_serial_t ruby_vm_constant_cache_invalidations = 0;
+rb_serial_t ruby_vm_constant_cache_misses = 0;
rb_serial_t ruby_vm_global_cvar_state = 1;
static const struct rb_callcache vm_empty_cc = {
@@ -439,6 +588,16 @@ static const struct rb_callcache vm_empty_cc = {
}
};
+static const struct rb_callcache vm_empty_cc_for_super = {
+ .flags = T_IMEMO | (imemo_callcache << FL_USHIFT) | VM_CALLCACHE_UNMARKABLE,
+ .klass = Qfalse,
+ .cme_ = NULL,
+ .call_ = vm_call_super_method,
+ .aux_ = {
+ .v = Qfalse,
+ }
+};
+
static void thread_free(void *ptr);
void
@@ -447,42 +606,44 @@ rb_vm_inc_const_missing_count(void)
ruby_vm_const_missing_count +=1;
}
-MJIT_FUNC_EXPORTED int
+int
rb_dtrace_setup(rb_execution_context_t *ec, VALUE klass, ID id,
- struct ruby_dtrace_method_hook_args *args)
+ struct ruby_dtrace_method_hook_args *args)
{
enum ruby_value_type type;
if (!klass) {
- if (!ec) ec = GET_EC();
- if (!rb_ec_frame_method_id_and_class(ec, &id, 0, &klass) || !klass)
- return FALSE;
+ if (!ec) ec = GET_EC();
+ if (!rb_ec_frame_method_id_and_class(ec, &id, 0, &klass) || !klass)
+ return FALSE;
}
if (RB_TYPE_P(klass, T_ICLASS)) {
- klass = RBASIC(klass)->klass;
+ klass = RBASIC(klass)->klass;
}
- else if (FL_TEST(klass, FL_SINGLETON)) {
- klass = rb_attr_get(klass, id__attached__);
- if (NIL_P(klass)) return FALSE;
+ else if (RCLASS_SINGLETON_P(klass)) {
+ klass = RCLASS_ATTACHED_OBJECT(klass);
+ if (NIL_P(klass)) return FALSE;
}
type = BUILTIN_TYPE(klass);
if (type == T_CLASS || type == T_ICLASS || type == T_MODULE) {
- VALUE name = rb_class_path(klass);
- const char *classname, *filename;
- const char *methodname = rb_id2name(id);
- if (methodname && (filename = rb_source_location_cstr(&args->line_no)) != 0) {
- if (NIL_P(name) || !(classname = StringValuePtr(name)))
- classname = "<unknown>";
- args->classname = classname;
- args->methodname = methodname;
- args->filename = filename;
- args->klass = klass;
- args->name = name;
- return TRUE;
- }
+ VALUE name = rb_class_path(klass);
+ const char *classname, *filename;
+ const char *methodname = rb_id2name(id);
+ if (methodname && (filename = rb_source_location_cstr(&args->line_no)) != 0) {
+ if (NIL_P(name) || !(classname = StringValuePtr(name)))
+ classname = "<unknown>";
+ args->classname = classname;
+ args->methodname = methodname;
+ args->filename = filename;
+ args->klass = klass;
+ args->name = name;
+ return TRUE;
+ }
}
return FALSE;
}
+extern unsigned int redblack_buffer_size;
+
/*
* call-seq:
* RubyVM.stat -> Hash
@@ -491,60 +652,80 @@ rb_dtrace_setup(rb_execution_context_t *ec, VALUE klass, ID id,
*
* Returns a Hash containing implementation-dependent counters inside the VM.
*
- * This hash includes information about method/constant cache serials:
+ * This hash includes information about method/constant caches:
*
* {
- * :global_constant_state=>481,
- * :class_serial=>9029
+ * :constant_cache_invalidations=>2,
+ * :constant_cache_misses=>14,
+ * :global_cvar_state=>27
* }
*
+ * If <tt>USE_DEBUG_COUNTER</tt> is enabled, debug counters will be included.
+ *
* The contents of the hash are implementation specific and may be changed in
* the future.
*
* This method is only expected to work on C Ruby.
*/
-
static VALUE
vm_stat(int argc, VALUE *argv, VALUE self)
{
- static VALUE sym_global_constant_state, sym_class_serial, sym_global_cvar_state;
+ static VALUE sym_constant_cache_invalidations, sym_constant_cache_misses, sym_global_cvar_state, sym_next_shape_id;
+ static VALUE sym_shape_cache_size;
VALUE arg = Qnil;
VALUE hash = Qnil, key = Qnil;
if (rb_check_arity(argc, 0, 1) == 1) {
arg = argv[0];
- if (SYMBOL_P(arg))
- key = arg;
- else if (RB_TYPE_P(arg, T_HASH))
- hash = arg;
- else
- rb_raise(rb_eTypeError, "non-hash or symbol given");
+ if (SYMBOL_P(arg))
+ key = arg;
+ else if (RB_TYPE_P(arg, T_HASH))
+ hash = arg;
+ else
+ rb_raise(rb_eTypeError, "non-hash or symbol given");
}
else {
- hash = rb_hash_new();
+ hash = rb_hash_new();
}
- if (sym_global_constant_state == 0) {
#define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
- S(global_constant_state);
- S(class_serial);
- S(global_cvar_state);
+ S(constant_cache_invalidations);
+ S(constant_cache_misses);
+ S(global_cvar_state);
+ S(next_shape_id);
+ S(shape_cache_size);
#undef S
- }
#define SET(name, attr) \
if (key == sym_##name) \
- return SERIALT2NUM(attr); \
+ return SERIALT2NUM(attr); \
else if (hash != Qnil) \
- rb_hash_aset(hash, sym_##name, SERIALT2NUM(attr));
+ rb_hash_aset(hash, sym_##name, SERIALT2NUM(attr));
- SET(global_constant_state, ruby_vm_global_constant_state);
- SET(class_serial, ruby_vm_class_serial);
+ SET(constant_cache_invalidations, ruby_vm_constant_cache_invalidations);
+ SET(constant_cache_misses, ruby_vm_constant_cache_misses);
SET(global_cvar_state, ruby_vm_global_cvar_state);
+ SET(next_shape_id, (rb_serial_t)GET_SHAPE_TREE()->next_shape_id);
+ SET(shape_cache_size, (rb_serial_t)GET_SHAPE_TREE()->cache_size);
#undef SET
+#if USE_DEBUG_COUNTER
+ ruby_debug_counter_show_at_exit(FALSE);
+ for (size_t i = 0; i < RB_DEBUG_COUNTER_MAX; i++) {
+ const VALUE name = rb_sym_intern_ascii_cstr(rb_debug_counter_names[i]);
+ const VALUE boxed_value = SIZET2NUM(rb_debug_counter[i]);
+
+ if (key == name) {
+ return boxed_value;
+ }
+ else if (hash != Qnil) {
+ rb_hash_aset(hash, name, boxed_value);
+ }
+ }
+#endif
+
if (!NIL_P(key)) { /* matched key should return above */
- rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
+ rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
}
return hash;
@@ -555,27 +736,27 @@ vm_stat(int argc, VALUE *argv, VALUE self)
static void
vm_set_top_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq)
{
- if (iseq->body->type != ISEQ_TYPE_TOP) {
- rb_raise(rb_eTypeError, "Not a toplevel InstructionSequence");
+ if (ISEQ_BODY(iseq)->type != ISEQ_TYPE_TOP) {
+ rb_raise(rb_eTypeError, "Not a toplevel InstructionSequence");
}
/* for return */
vm_push_frame(ec, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, rb_ec_thread_ptr(ec)->top_self,
- VM_BLOCK_HANDLER_NONE,
- (VALUE)vm_cref_new_toplevel(ec), /* cref or me */
- iseq->body->iseq_encoded, ec->cfp->sp,
- iseq->body->local_table_size, iseq->body->stack_max);
+ VM_BLOCK_HANDLER_NONE,
+ (VALUE)vm_cref_new_toplevel(ec), /* cref or me */
+ ISEQ_BODY(iseq)->iseq_encoded, ec->cfp->sp,
+ ISEQ_BODY(iseq)->local_table_size, ISEQ_BODY(iseq)->stack_max);
}
static void
vm_set_eval_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq, const rb_cref_t *cref, const struct rb_block *base_block)
{
vm_push_frame(ec, iseq, VM_FRAME_MAGIC_EVAL | VM_FRAME_FLAG_FINISH,
- vm_block_self(base_block), VM_GUARDED_PREV_EP(vm_block_ep(base_block)),
- (VALUE)cref, /* cref or me */
- iseq->body->iseq_encoded,
- ec->cfp->sp, iseq->body->local_table_size,
- iseq->body->stack_max);
+ vm_block_self(base_block), VM_GUARDED_PREV_EP(vm_block_ep(base_block)),
+ (VALUE)cref, /* cref or me */
+ ISEQ_BODY(iseq)->iseq_encoded,
+ ec->cfp->sp, ISEQ_BODY(iseq)->local_table_size,
+ ISEQ_BODY(iseq)->stack_max);
}
static void
@@ -590,8 +771,8 @@ vm_set_main_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq)
vm_set_eval_stack(ec, iseq, 0, &bind->block);
/* save binding */
- if (iseq->body->local_table_size > 0) {
- vm_bind_update_env(toplevel_binding, bind, vm_make_env_object(ec, ec->cfp));
+ if (ISEQ_BODY(iseq)->local_table_size > 0) {
+ vm_bind_update_env(toplevel_binding, bind, vm_make_env_object(ec, ec->cfp));
}
}
@@ -599,51 +780,49 @@ rb_control_frame_t *
rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
{
while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
- if (cfp->iseq) {
- return (rb_control_frame_t *)cfp;
- }
- cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
+ if (cfp->iseq) {
+ return (rb_control_frame_t *)cfp;
+ }
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
}
return 0;
}
-MJIT_FUNC_EXPORTED rb_control_frame_t *
+rb_control_frame_t *
rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
{
while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
- if (VM_FRAME_RUBYFRAME_P(cfp)) {
- return (rb_control_frame_t *)cfp;
- }
- cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
+ if (VM_FRAME_RUBYFRAME_P(cfp)) {
+ return (rb_control_frame_t *)cfp;
+ }
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
}
return 0;
}
-#endif /* #ifndef MJIT_HEADER */
-
static rb_control_frame_t *
vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
{
if (VM_FRAME_RUBYFRAME_P(cfp)) {
- return (rb_control_frame_t *)cfp;
+ return (rb_control_frame_t *)cfp;
}
cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
- if (VM_FRAME_RUBYFRAME_P(cfp)) {
- return (rb_control_frame_t *)cfp;
- }
+ if (VM_FRAME_RUBYFRAME_P(cfp)) {
+ return (rb_control_frame_t *)cfp;
+ }
- if (VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_PASSED) == FALSE) {
- break;
- }
- cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
+ if (VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_PASSED) == FALSE) {
+ break;
+ }
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
}
return 0;
}
-MJIT_STATIC void
+void
rb_vm_pop_cfunc_frame(void)
{
rb_execution_context_t *ec = GET_EC();
@@ -655,22 +834,20 @@ rb_vm_pop_cfunc_frame(void)
vm_pop_frame(ec, cfp, cfp->ep);
}
-#ifndef MJIT_HEADER
-
void
rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp)
{
/* check skipped frame */
while (ec->cfp != cfp) {
#if VMDEBUG
- printf("skipped frame: %s\n", vm_frametype_name(ec->cfp));
+ printf("skipped frame: %s\n", vm_frametype_name(ec->cfp));
#endif
- if (VM_FRAME_TYPE(ec->cfp) != VM_FRAME_MAGIC_CFUNC) {
- rb_vm_pop_frame(ec);
- }
- else { /* unlikely path */
- rb_vm_pop_cfunc_frame();
- }
+ if (VM_FRAME_TYPE(ec->cfp) != VM_FRAME_MAGIC_CFUNC) {
+ rb_vm_pop_frame(ec);
+ }
+ else { /* unlikely path */
+ rb_vm_pop_cfunc_frame();
+ }
}
}
@@ -692,11 +869,11 @@ ruby_vm_run_at_exit_hooks(rb_vm_t *vm)
rb_at_exit_list *l = vm->at_exit;
while (l) {
- rb_at_exit_list* t = l->next;
- rb_vm_at_exit_func *func = l->func;
- ruby_xfree(l);
- l = t;
- (*func)(vm);
+ rb_at_exit_list* t = l->next;
+ rb_vm_at_exit_func *func = l->func;
+ ruby_xfree(l);
+ l = t;
+ (*func)(vm);
}
}
@@ -713,9 +890,9 @@ check_env(const rb_env_t *env)
dp(env->ep[1]);
ruby_debug_printf("ep: %10p\n", (void *)env->ep);
if (rb_vm_env_prev_env(env)) {
- fputs(">>\n", stderr);
- check_env_value(rb_vm_env_prev_env(env));
- fputs("<<\n", stderr);
+ fputs(">>\n", stderr);
+ check_env_value(rb_vm_env_prev_env(env));
+ fputs("<<\n", stderr);
}
return 1;
}
@@ -724,7 +901,7 @@ static VALUE
check_env_value(const rb_env_t *env)
{
if (check_env(env)) {
- return (VALUE)env;
+ return (VALUE)env;
}
rb_bug("invalid env");
return Qnil; /* unreachable */
@@ -736,7 +913,7 @@ vm_block_handler_escape(const rb_execution_context_t *ec, VALUE block_handler)
switch (vm_block_handler_type(block_handler)) {
case block_handler_type_ifunc:
case block_handler_type_iseq:
- return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
+ return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
case block_handler_type_symbol:
case block_handler_type_proc:
@@ -750,17 +927,15 @@ static VALUE
vm_make_env_each(const rb_execution_context_t * const ec, rb_control_frame_t *const cfp)
{
const VALUE * const ep = cfp->ep;
- const rb_env_t *env;
- const rb_iseq_t *env_iseq;
VALUE *env_body, *env_ep;
int local_size, env_size;
if (VM_ENV_ESCAPED_P(ep)) {
- return VM_ENV_ENVVAL(ep);
+ return VM_ENV_ENVVAL(ep);
}
if (!VM_ENV_LOCAL_P(ep)) {
- const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
+ const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
if (!VM_ENV_ESCAPED_P(prev_ep)) {
rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
@@ -774,19 +949,19 @@ vm_make_env_each(const rb_execution_context_t * const ec, rb_control_frame_t *co
}
}
else {
- VALUE block_handler = VM_ENV_BLOCK_HANDLER(ep);
+ VALUE block_handler = VM_ENV_BLOCK_HANDLER(ep);
- if (block_handler != VM_BLOCK_HANDLER_NONE) {
+ if (block_handler != VM_BLOCK_HANDLER_NONE) {
VALUE blockprocval = vm_block_handler_escape(ec, block_handler);
- VM_STACK_ENV_WRITE(ep, VM_ENV_DATA_INDEX_SPECVAL, blockprocval);
- }
+ VM_STACK_ENV_WRITE(ep, VM_ENV_DATA_INDEX_SPECVAL, blockprocval);
+ }
}
if (!VM_FRAME_RUBYFRAME_P(cfp)) {
- local_size = VM_ENV_DATA_SIZE;
+ local_size = VM_ENV_DATA_SIZE;
}
else {
- local_size = cfp->iseq->body->local_table_size + VM_ENV_DATA_SIZE;
+ local_size = ISEQ_BODY(cfp->iseq)->local_table_size + VM_ENV_DATA_SIZE;
}
/*
@@ -802,27 +977,36 @@ vm_make_env_each(const rb_execution_context_t * const ec, rb_control_frame_t *co
*/
env_size = local_size +
- 1 /* envval */;
+ 1 /* envval */;
+
+ // Careful with order in the following sequence. Each allocation can move objects.
env_body = ALLOC_N(VALUE, env_size);
- MEMCPY(env_body, ep - (local_size - 1 /* specval */), VALUE, local_size);
+ rb_env_t *env = IMEMO_NEW(rb_env_t, imemo_env, 0);
-#if 0
- for (i = 0; i < local_size; i++) {
- if (VM_FRAME_RUBYFRAME_P(cfp)) {
- /* clear value stack for GC */
- ep[-local_size + i] = 0;
- }
- }
-#endif
+ // Set up env without WB since it's brand new (similar to newobj_init(), newobj_fill())
+ MEMCPY(env_body, ep - (local_size - 1 /* specval */), VALUE, local_size);
- env_iseq = VM_FRAME_RUBYFRAME_P(cfp) ? cfp->iseq : NULL;
env_ep = &env_body[local_size - 1 /* specval */];
+ env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
- env = vm_env_new(env_ep, env_body, env_size, env_iseq);
+ env->iseq = (rb_iseq_t *)(VM_FRAME_RUBYFRAME_P(cfp) ? cfp->iseq : NULL);
+ env->ep = env_ep;
+ env->env = env_body;
+ env->env_size = env_size;
cfp->ep = env_ep;
VM_ENV_FLAGS_SET(env_ep, VM_ENV_FLAG_ESCAPED | VM_ENV_FLAG_WB_REQUIRED);
VM_STACK_ENV_WRITE(ep, 0, (VALUE)env); /* GC mark */
+
+#if 0
+ for (i = 0; i < local_size; i++) {
+ if (VM_FRAME_RUBYFRAME_P(cfp)) {
+ /* clear value stack for GC */
+ ep[-local_size + i] = 0;
+ }
+ }
+#endif
+
return (VALUE)env;
}
@@ -832,7 +1016,7 @@ vm_make_env_object(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
VALUE envval = vm_make_env_each(ec, cfp);
if (PROCDEBUG) {
- check_env_value((const rb_env_t *)envval);
+ check_env_value((const rb_env_t *)envval);
}
return envval;
@@ -843,8 +1027,8 @@ rb_vm_stack_to_heap(rb_execution_context_t *ec)
{
rb_control_frame_t *cfp = ec->cfp;
while ((cfp = rb_vm_get_binding_creatable_next_cfp(ec, cfp)) != 0) {
- vm_make_env_object(ec, cfp);
- cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
+ vm_make_env_object(ec, cfp);
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
}
}
@@ -854,7 +1038,7 @@ rb_vm_env_prev_env(const rb_env_t *env)
const VALUE *ep = env->ep;
if (VM_ENV_LOCAL_P(ep)) {
- return NULL;
+ return NULL;
}
else {
const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
@@ -867,8 +1051,8 @@ collect_local_variables_in_iseq(const rb_iseq_t *iseq, const struct local_var_li
{
unsigned int i;
if (!iseq) return 0;
- for (i = 0; i < iseq->body->local_table_size; i++) {
- local_var_list_add(vars, iseq->body->local_table[i]);
+ for (i = 0; i < ISEQ_BODY(iseq)->local_table_size; i++) {
+ local_var_list_add(vars, ISEQ_BODY(iseq)->local_table[i]);
}
return 1;
}
@@ -878,7 +1062,7 @@ collect_local_variables_in_env(const rb_env_t *env, const struct local_var_list
{
do {
if (VM_ENV_FLAGS(env->ep, VM_ENV_FLAG_ISOLATED)) break;
- collect_local_variables_in_iseq(env->iseq, vars);
+ collect_local_variables_in_iseq(env->iseq, vars);
} while ((env = rb_vm_env_prev_env(env)) != NULL);
}
@@ -886,11 +1070,11 @@ static int
vm_collect_local_variables_in_heap(const VALUE *ep, const struct local_var_list *vars)
{
if (VM_ENV_ESCAPED_P(ep)) {
- collect_local_variables_in_env(VM_ENV_ENVVAL_PTR(ep), vars);
- return 1;
+ collect_local_variables_in_env(VM_ENV_ENVVAL_PTR(ep), vars);
+ return 1;
}
else {
- return 0;
+ return 0;
}
}
@@ -909,7 +1093,7 @@ rb_iseq_local_variables(const rb_iseq_t *iseq)
struct local_var_list vars;
local_var_list_init(&vars);
while (collect_local_variables_in_iseq(iseq, &vars)) {
- iseq = iseq->body->parent_iseq;
+ iseq = ISEQ_BODY(iseq)->parent_iseq;
}
return local_var_list_finish(&vars);
}
@@ -918,9 +1102,9 @@ rb_iseq_local_variables(const rb_iseq_t *iseq)
static VALUE
vm_proc_create_from_captured(VALUE klass,
- const struct rb_captured_block *captured,
- enum rb_block_type block_type,
- int8_t is_from_method, int8_t is_lambda)
+ const struct rb_captured_block *captured,
+ enum rb_block_type block_type,
+ int8_t is_from_method, int8_t is_lambda)
{
VALUE procval = rb_proc_alloc(klass);
rb_proc_t *proc = RTYPEDDATA_DATA(procval);
@@ -946,16 +1130,16 @@ rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *s
switch (vm_block_type(src)) {
case block_type_iseq:
case block_type_ifunc:
- RB_OBJ_WRITE(obj, &dst->as.captured.self, src->as.captured.self);
- RB_OBJ_WRITE(obj, &dst->as.captured.code.val, src->as.captured.code.val);
- rb_vm_block_ep_update(obj, dst, src->as.captured.ep);
- break;
+ RB_OBJ_WRITE(obj, &dst->as.captured.self, src->as.captured.self);
+ RB_OBJ_WRITE(obj, &dst->as.captured.code.val, src->as.captured.code.val);
+ rb_vm_block_ep_update(obj, dst, src->as.captured.ep);
+ break;
case block_type_symbol:
- RB_OBJ_WRITE(obj, &dst->as.symbol, src->as.symbol);
- break;
+ RB_OBJ_WRITE(obj, &dst->as.symbol, src->as.symbol);
+ break;
case block_type_proc:
- RB_OBJ_WRITE(obj, &dst->as.proc, src->as.proc);
- break;
+ RB_OBJ_WRITE(obj, &dst->as.proc, src->as.proc);
+ break;
}
}
@@ -981,7 +1165,7 @@ rb_proc_dup(VALUE self)
rb_proc_t *src;
GetProcPtr(self, src);
- procval = proc_create(rb_cProc, &src->block, src->is_from_method, src->is_lambda);
+ procval = proc_create(rb_obj_class(self), &src->block, src->is_from_method, src->is_lambda);
if (RB_OBJ_SHAREABLE_P(self)) FL_SET_RAW(procval, RUBY_FL_SHAREABLE);
RB_GC_GUARD(self); /* for: body = rb_proc_dup(body) */
return procval;
@@ -994,6 +1178,24 @@ struct collect_outer_variable_name_data {
bool isolate;
};
+static VALUE
+ID2NUM(ID id)
+{
+ if (SIZEOF_VOIDP > SIZEOF_LONG)
+ return ULL2NUM(id);
+ else
+ return ULONG2NUM(id);
+}
+
+static ID
+NUM2ID(VALUE num)
+{
+ if (SIZEOF_VOIDP > SIZEOF_LONG)
+ return (ID)NUM2ULL(num);
+ else
+ return (ID)NUM2ULONG(num);
+}
+
static enum rb_id_table_iterator_result
collect_outer_variable_names(ID id, VALUE val, void *ptr)
{
@@ -1012,7 +1214,7 @@ collect_outer_variable_names(ID id, VALUE val, void *ptr)
store = &data->read_only;
}
if (*store == Qfalse) *store = rb_ary_new();
- rb_ary_push(*store, ID2SYM(id));
+ rb_ary_push(*store, ID2NUM(id));
}
return ID_TABLE_CONTINUE;
}
@@ -1025,21 +1227,33 @@ env_copy(const VALUE *src_ep, VALUE read_only_variables)
VALUE *env_body = ZALLOC_N(VALUE, src_env->env_size); // fill with Qfalse
VALUE *ep = &env_body[src_env->env_size - 2];
- volatile VALUE prev_env = Qnil;
+ const rb_env_t *copied_env = vm_env_new(ep, env_body, src_env->env_size, src_env->iseq);
+
+ // Copy after allocations above, since they can move objects in src_ep.
+ RB_OBJ_WRITE(copied_env, &ep[VM_ENV_DATA_INDEX_ME_CREF], src_ep[VM_ENV_DATA_INDEX_ME_CREF]);
+ ep[VM_ENV_DATA_INDEX_FLAGS] = src_ep[VM_ENV_DATA_INDEX_FLAGS] | VM_ENV_FLAG_ISOLATED;
+ if (!VM_ENV_LOCAL_P(src_ep)) {
+ VM_ENV_FLAGS_SET(ep, VM_ENV_FLAG_LOCAL);
+ }
if (read_only_variables) {
for (int i=RARRAY_LENINT(read_only_variables)-1; i>=0; i--) {
- ID id = SYM2ID(RARRAY_AREF(read_only_variables, i));
+ ID id = NUM2ID(RARRAY_AREF(read_only_variables, i));
- for (unsigned int j=0; j<src_env->iseq->body->local_table_size; j++) {
- if (id == src_env->iseq->body->local_table[j]) {
+ for (unsigned int j=0; j<ISEQ_BODY(src_env->iseq)->local_table_size; j++) {
+ if (id == ISEQ_BODY(src_env->iseq)->local_table[j]) {
VALUE v = src_env->env[j];
if (!rb_ractor_shareable_p(v)) {
- rb_raise(rb_eRactorIsolationError,
- "can not make shareable Proc because it can refer unshareable object %"
- PRIsVALUE" from variable `%s'", rb_inspect(v), rb_id2name(id));
+ VALUE name = rb_id2str(id);
+ VALUE msg = rb_sprintf("can not make shareable Proc because it can refer"
+ " unshareable object %+" PRIsVALUE " from ", v);
+ if (name)
+ rb_str_catf(msg, "variable '%" PRIsVALUE "'", name);
+ else
+ rb_str_cat_cstr(msg, "a hidden variable");
+ rb_exc_raise(rb_exc_new_str(rb_eRactorIsolationError, msg));
}
- env_body[j] = v;
+ RB_OBJ_WRITE((VALUE)copied_env, &env_body[j], v);
rb_ary_delete_at(read_only_variables, i);
break;
}
@@ -1047,21 +1261,17 @@ env_copy(const VALUE *src_ep, VALUE read_only_variables)
}
}
- ep[VM_ENV_DATA_INDEX_ME_CREF] = src_ep[VM_ENV_DATA_INDEX_ME_CREF];
- ep[VM_ENV_DATA_INDEX_FLAGS] = src_ep[VM_ENV_DATA_INDEX_FLAGS] | VM_ENV_FLAG_ISOLATED;
-
if (!VM_ENV_LOCAL_P(src_ep)) {
const VALUE *prev_ep = VM_ENV_PREV_EP(src_env->ep);
const rb_env_t *new_prev_env = env_copy(prev_ep, read_only_variables);
- prev_env = (VALUE)new_prev_env;
ep[VM_ENV_DATA_INDEX_SPECVAL] = VM_GUARDED_PREV_EP(new_prev_env->ep);
+ RB_OBJ_WRITTEN(copied_env, Qundef, new_prev_env);
+ VM_ENV_FLAGS_UNSET(ep, VM_ENV_FLAG_LOCAL);
}
else {
ep[VM_ENV_DATA_INDEX_SPECVAL] = VM_BLOCK_HANDLER_NONE;
}
- const rb_env_t *copied_env = vm_env_new(ep, env_body, src_env->env_size, src_env->iseq);
- RB_GC_GUARD(prev_env);
return copied_env;
}
@@ -1074,6 +1284,39 @@ proc_isolate_env(VALUE self, rb_proc_t *proc, VALUE read_only_variables)
RB_OBJ_WRITTEN(self, Qundef, env);
}
+static VALUE
+proc_shared_outer_variables(struct rb_id_table *outer_variables, bool isolate, const char *message)
+{
+ struct collect_outer_variable_name_data data = {
+ .isolate = isolate,
+ .ary = Qfalse,
+ .read_only = Qfalse,
+ .yield = false,
+ };
+ rb_id_table_foreach(outer_variables, collect_outer_variable_names, (void *)&data);
+
+ if (data.ary != Qfalse) {
+ VALUE str = rb_sprintf("can not %s because it accesses outer variables", message);
+ VALUE ary = data.ary;
+ const char *sep = " (";
+ for (long i = 0; i < RARRAY_LEN(ary); i++) {
+ VALUE name = rb_id2str(NUM2ID(RARRAY_AREF(ary, i)));
+ if (!name) continue;
+ rb_str_cat_cstr(str, sep);
+ sep = ", ";
+ rb_str_append(str, name);
+ }
+ if (*sep == ',') rb_str_cat_cstr(str, ")");
+ rb_str_cat_cstr(str, data.yield ? " and uses 'yield'." : ".");
+ rb_exc_raise(rb_exc_new_str(rb_eArgError, str));
+ }
+ else if (data.yield) {
+ rb_raise(rb_eArgError, "can not %s because it uses 'yield'.", message);
+ }
+
+ return data.read_only;
+}
+
VALUE
rb_proc_isolate_bang(VALUE self)
{
@@ -1083,29 +1326,8 @@ rb_proc_isolate_bang(VALUE self)
rb_proc_t *proc = (rb_proc_t *)RTYPEDDATA_DATA(self);
if (proc->block.type != block_type_iseq) rb_raise(rb_eRuntimeError, "not supported yet");
- if (iseq->body->outer_variables) {
- struct collect_outer_variable_name_data data = {
- .isolate = true,
- .ary = Qfalse,
- .yield = false,
- };
- rb_id_table_foreach(iseq->body->outer_variables, collect_outer_variable_names, (void *)&data);
-
- if (data.ary != Qfalse) {
- VALUE str = rb_ary_join(data.ary, rb_str_new2(", "));
- if (data.yield) {
- rb_raise(rb_eArgError, "can not isolate a Proc because it accesses outer variables (%s) and uses `yield'.",
- StringValueCStr(str));
- }
- else {
- rb_raise(rb_eArgError, "can not isolate a Proc because it accesses outer variables (%s).",
- StringValueCStr(str));
- }
- }
- else {
- VM_ASSERT(data.yield);
- rb_raise(rb_eArgError, "can not isolate a Proc because it uses `yield'.");
- }
+ if (ISEQ_BODY(iseq)->outer_variables) {
+ proc_shared_outer_variables(ISEQ_BODY(iseq)->outer_variables, true, "isolate a Proc");
}
proc_isolate_env(self, proc, Qfalse);
@@ -1133,34 +1355,17 @@ rb_proc_ractor_make_shareable(VALUE self)
rb_proc_t *proc = (rb_proc_t *)RTYPEDDATA_DATA(self);
if (proc->block.type != block_type_iseq) rb_raise(rb_eRuntimeError, "not supported yet");
- VALUE read_only_variables = Qfalse;
+ if (!rb_ractor_shareable_p(vm_block_self(&proc->block))) {
+ rb_raise(rb_eRactorIsolationError,
+ "Proc's self is not shareable: %" PRIsVALUE,
+ self);
+ }
- if (iseq->body->outer_variables) {
- struct collect_outer_variable_name_data data = {
- .isolate = false,
- .ary = Qfalse,
- .read_only = Qfalse,
- .yield = false,
- };
-
- rb_id_table_foreach(iseq->body->outer_variables, collect_outer_variable_names, (void *)&data);
-
- if (data.ary != Qfalse) {
- VALUE str = rb_ary_join(data.ary, rb_str_new2(", "));
- if (data.yield) {
- rb_raise(rb_eArgError, "can not make a Proc shareable because it accesses outer variables (%s) and uses `yield'.",
- StringValueCStr(str));
- }
- else {
- rb_raise(rb_eArgError, "can not make a Proc shareable because it accesses outer variables (%s).",
- StringValueCStr(str));
- }
- }
- else if (data.yield) {
- rb_raise(rb_eArgError, "can not make a Proc shareable because it uses `yield'.");
- }
+ VALUE read_only_variables = Qfalse;
- read_only_variables = data.read_only;
+ if (ISEQ_BODY(iseq)->outer_variables) {
+ read_only_variables =
+ proc_shared_outer_variables(ISEQ_BODY(iseq)->outer_variables, false, "make a Proc shareable");
}
proc_isolate_env(self, proc, read_only_variables);
@@ -1171,21 +1376,45 @@ rb_proc_ractor_make_shareable(VALUE self)
return self;
}
-MJIT_FUNC_EXPORTED VALUE
+VALUE
rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda)
{
VALUE procval;
+ enum imemo_type code_type = imemo_type(captured->code.val);
if (!VM_ENV_ESCAPED_P(captured->ep)) {
- rb_control_frame_t *cfp = VM_CAPTURED_BLOCK_TO_CFP(captured);
- vm_make_env_object(ec, cfp);
+ rb_control_frame_t *cfp = VM_CAPTURED_BLOCK_TO_CFP(captured);
+ vm_make_env_object(ec, cfp);
}
+
VM_ASSERT(VM_EP_IN_HEAP_P(ec, captured->ep));
- VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq) ||
- imemo_type_p(captured->code.val, imemo_ifunc));
+ VM_ASSERT(code_type == imemo_iseq || code_type == imemo_ifunc);
procval = vm_proc_create_from_captured(klass, captured,
- imemo_type(captured->code.val) == imemo_iseq ? block_type_iseq : block_type_ifunc, FALSE, is_lambda);
+ code_type == imemo_iseq ? block_type_iseq : block_type_ifunc,
+ FALSE, is_lambda);
+
+ if (code_type == imemo_ifunc) {
+ struct vm_ifunc *ifunc = (struct vm_ifunc *)captured->code.val;
+ if (ifunc->svar_lep) {
+ VALUE ep0 = ifunc->svar_lep[0];
+ if (RB_TYPE_P(ep0, T_IMEMO) && imemo_type_p(ep0, imemo_env)) {
+ // `ep0 == imemo_env` means this ep is escaped to heap (in env object).
+ const rb_env_t *env = (const rb_env_t *)ep0;
+ ifunc->svar_lep = (VALUE *)env->ep;
+ }
+ else {
+ VM_ASSERT(FIXNUM_P(ep0));
+ if (ep0 & VM_ENV_FLAG_ESCAPED) {
+ // ok. do nothing
+ }
+ else {
+ ifunc->svar_lep = NULL;
+ }
+ }
+ }
+ }
+
return procval;
}
@@ -1200,23 +1429,20 @@ rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *s
rb_binding_t *bind;
if (cfp == 0 || ruby_level_cfp == 0) {
- rb_raise(rb_eRuntimeError, "Can't create Binding Object on top of Fiber.");
+ rb_raise(rb_eRuntimeError, "Can't create Binding Object on top of Fiber.");
}
-
- while (1) {
- envval = vm_make_env_object(ec, cfp);
- if (cfp == ruby_level_cfp) {
- break;
- }
- cfp = rb_vm_get_binding_creatable_next_cfp(ec, RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
+ if (!VM_FRAME_RUBYFRAME_P(src_cfp) &&
+ !VM_FRAME_RUBYFRAME_P(RUBY_VM_PREVIOUS_CONTROL_FRAME(src_cfp))) {
+ rb_raise(rb_eRuntimeError, "Cannot create Binding object for non-Ruby caller");
}
+ envval = vm_make_env_object(ec, cfp);
bindval = rb_binding_alloc(rb_cBinding);
GetBindingPtr(bindval, bind);
vm_bind_update_env(bindval, bind, envval);
RB_OBJ_WRITE(bindval, &bind->block.as.captured.self, cfp->self);
RB_OBJ_WRITE(bindval, &bind->block.as.captured.code.iseq, cfp->iseq);
- RB_OBJ_WRITE(bindval, &bind->pathobj, ruby_level_cfp->iseq->body->location.pathobj);
+ RB_OBJ_WRITE(bindval, &bind->pathobj, ISEQ_BODY(ruby_level_cfp->iseq)->location.pathobj);
bind->first_lineno = rb_vm_get_sourceline(ruby_level_cfp);
return bindval;
@@ -1233,30 +1459,34 @@ rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const I
rb_execution_context_t *ec = GET_EC();
const rb_iseq_t *base_iseq, *iseq;
rb_ast_body_t ast;
- NODE tmp_node;
- ID minibuf[4], *dyns = minibuf;
- VALUE idtmp = 0;
+ rb_node_scope_t tmp_node;
if (dyncount < 0) return 0;
base_block = &bind->block;
base_iseq = vm_block_iseq(base_block);
- if (dyncount >= numberof(minibuf)) dyns = ALLOCV_N(ID, idtmp, dyncount + 1);
-
- dyns[0] = dyncount;
- MEMCPY(dyns + 1, dynvars, ID, dyncount);
- rb_node_init(&tmp_node, NODE_SCOPE, (VALUE)dyns, 0, 0);
- ast.root = &tmp_node;
- ast.compile_option = 0;
+ VALUE idtmp = 0;
+ rb_ast_id_table_t *dyns = ALLOCV(idtmp, sizeof(rb_ast_id_table_t) + dyncount * sizeof(ID));
+ dyns->size = dyncount;
+ MEMCPY(dyns->ids, dynvars, ID, dyncount);
+
+ rb_node_init(RNODE(&tmp_node), NODE_SCOPE);
+ tmp_node.nd_tbl = dyns;
+ tmp_node.nd_body = 0;
+ tmp_node.nd_args = 0;
+
+ ast.root = RNODE(&tmp_node);
+ ast.frozen_string_literal = -1;
+ ast.coverage_enabled = -1;
ast.script_lines = INT2FIX(-1);
if (base_iseq) {
- iseq = rb_iseq_new(&ast, base_iseq->body->location.label, path, realpath, base_iseq, ISEQ_TYPE_EVAL);
+ iseq = rb_iseq_new(&ast, ISEQ_BODY(base_iseq)->location.label, path, realpath, base_iseq, ISEQ_TYPE_EVAL);
}
else {
- VALUE tempstr = rb_fstring_lit("<temp>");
- iseq = rb_iseq_new_top(&ast, tempstr, tempstr, tempstr, NULL);
+ VALUE tempstr = rb_fstring_lit("<temp>");
+ iseq = rb_iseq_new_top(&ast, tempstr, tempstr, tempstr, NULL);
}
tmp_node.nd_tbl = 0; /* reset table */
ALLOCV_END(idtmp);
@@ -1274,54 +1504,38 @@ rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const I
static inline VALUE
invoke_block(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, const struct rb_captured_block *captured, const rb_cref_t *cref, VALUE type, int opt_pc)
{
- int arg_size = iseq->body->param.size;
+ int arg_size = ISEQ_BODY(iseq)->param.size;
vm_push_frame(ec, iseq, type | VM_FRAME_FLAG_FINISH, self,
- VM_GUARDED_PREV_EP(captured->ep),
- (VALUE)cref, /* cref or method */
- iseq->body->iseq_encoded + opt_pc,
- ec->cfp->sp + arg_size,
- iseq->body->local_table_size - arg_size,
- iseq->body->stack_max);
- return vm_exec(ec, true);
+ VM_GUARDED_PREV_EP(captured->ep),
+ (VALUE)cref, /* cref or method */
+ ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
+ ec->cfp->sp + arg_size,
+ ISEQ_BODY(iseq)->local_table_size - arg_size,
+ ISEQ_BODY(iseq)->stack_max);
+ return vm_exec(ec);
}
static VALUE
invoke_bmethod(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, const struct rb_captured_block *captured, const rb_callable_method_entry_t *me, VALUE type, int opt_pc)
{
- /* bmethod */
- int arg_size = iseq->body->param.size;
+ /* bmethod call from outside the VM */
+ int arg_size = ISEQ_BODY(iseq)->param.size;
VALUE ret;
- rb_hook_list_t *hooks;
VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
vm_push_frame(ec, iseq, type | VM_FRAME_FLAG_BMETHOD, self,
- VM_GUARDED_PREV_EP(captured->ep),
- (VALUE)me,
- iseq->body->iseq_encoded + opt_pc,
- ec->cfp->sp + arg_size,
- iseq->body->local_table_size - arg_size,
- iseq->body->stack_max);
-
- RUBY_DTRACE_METHOD_ENTRY_HOOK(ec, me->owner, me->def->original_id);
- EXEC_EVENT_HOOK(ec, RUBY_EVENT_CALL, self, me->def->original_id, me->called_id, me->owner, Qnil);
+ VM_GUARDED_PREV_EP(captured->ep),
+ (VALUE)me,
+ ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
+ ec->cfp->sp + 1 /* self */ + arg_size,
+ ISEQ_BODY(iseq)->local_table_size - arg_size,
+ ISEQ_BODY(iseq)->stack_max);
- if (UNLIKELY((hooks = me->def->body.bmethod.hooks) != NULL) &&
- hooks->events & RUBY_EVENT_CALL) {
- rb_exec_event_hook_orig(ec, hooks, RUBY_EVENT_CALL, self,
- me->def->original_id, me->called_id, me->owner, Qnil, FALSE);
- }
VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_FINISH);
- ret = vm_exec(ec, true);
+ ret = vm_exec(ec);
- EXEC_EVENT_HOOK(ec, RUBY_EVENT_RETURN, self, me->def->original_id, me->called_id, me->owner, ret);
- if ((hooks = me->def->body.bmethod.hooks) != NULL &&
- hooks->events & RUBY_EVENT_RETURN) {
- rb_exec_event_hook_orig(ec, hooks, RUBY_EVENT_RETURN, self,
- me->def->original_id, me->called_id, me->owner, ret, FALSE);
- }
- RUBY_DTRACE_METHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
return ret;
}
@@ -1332,64 +1546,78 @@ ALWAYS_INLINE(static VALUE
static inline VALUE
invoke_iseq_block_from_c(rb_execution_context_t *ec, const struct rb_captured_block *captured,
- VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler,
+ VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler,
const rb_cref_t *cref, int is_lambda, const rb_callable_method_entry_t *me)
{
const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
- int i, opt_pc;
+ int opt_pc;
VALUE type = VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0);
rb_control_frame_t *cfp = ec->cfp;
VALUE *sp = cfp->sp;
+ int flags = (kw_splat ? VM_CALL_KW_SPLAT : 0);
+ VALUE *use_argv = (VALUE *)argv;
+ VALUE av[2];
stack_check(ec);
- CHECK_VM_STACK_OVERFLOW(cfp, argc);
+ if (UNLIKELY(argc > VM_ARGC_STACK_MAX) &&
+ (VM_ARGC_STACK_MAX >= 1 ||
+ /* Skip ruby array for potential autosplat case */
+ (argc != 1 || is_lambda))) {
+ use_argv = vm_argv_ruby_array(av, argv, &flags, &argc, kw_splat);
+ }
+
+ CHECK_VM_STACK_OVERFLOW(cfp, argc + 1);
vm_check_canary(ec, sp);
- cfp->sp = sp + argc;
- for (i=0; i<argc; i++) {
- sp[i] = argv[i];
+
+ VALUE *stack_argv = sp;
+ if (me) {
+ *sp = self; // bemthods need `self` on the VM stack
+ stack_argv++;
}
+ cfp->sp = stack_argv + argc;
+ MEMCPY(stack_argv, use_argv, VALUE, argc); // restrict: new stack space
- opt_pc = vm_yield_setup_args(ec, iseq, argc, sp, kw_splat, passed_block_handler,
- (is_lambda ? arg_setup_method : arg_setup_block));
+ opt_pc = vm_yield_setup_args(ec, iseq, argc, stack_argv, flags, passed_block_handler,
+ (is_lambda ? arg_setup_method : arg_setup_block));
cfp->sp = sp;
if (me == NULL) {
- return invoke_block(ec, iseq, self, captured, cref, type, opt_pc);
+ return invoke_block(ec, iseq, self, captured, cref, type, opt_pc);
}
else {
- return invoke_bmethod(ec, iseq, self, captured, me, type, opt_pc);
+ return invoke_bmethod(ec, iseq, self, captured, me, type, opt_pc);
}
}
static inline VALUE
invoke_block_from_c_bh(rb_execution_context_t *ec, VALUE block_handler,
- int argc, const VALUE *argv,
- int kw_splat, VALUE passed_block_handler, const rb_cref_t *cref,
- int is_lambda, int force_blockarg)
+ int argc, const VALUE *argv,
+ int kw_splat, VALUE passed_block_handler, const rb_cref_t *cref,
+ int is_lambda, int force_blockarg)
{
again:
switch (vm_block_handler_type(block_handler)) {
case block_handler_type_iseq:
- {
- const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
- return invoke_iseq_block_from_c(ec, captured, captured->self,
- argc, argv, kw_splat, passed_block_handler,
+ {
+ const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
+ return invoke_iseq_block_from_c(ec, captured, captured->self,
+ argc, argv, kw_splat, passed_block_handler,
cref, is_lambda, NULL);
- }
+ }
case block_handler_type_ifunc:
- return vm_yield_with_cfunc(ec, VM_BH_TO_IFUNC_BLOCK(block_handler),
- VM_BH_TO_IFUNC_BLOCK(block_handler)->self,
+ return vm_yield_with_cfunc(ec, VM_BH_TO_IFUNC_BLOCK(block_handler),
+ VM_BH_TO_IFUNC_BLOCK(block_handler)->self,
argc, argv, kw_splat, passed_block_handler, NULL);
case block_handler_type_symbol:
- return vm_yield_with_symbol(ec, VM_BH_TO_SYMBOL(block_handler),
- argc, argv, kw_splat, passed_block_handler);
+ return vm_yield_with_symbol(ec, VM_BH_TO_SYMBOL(block_handler),
+ argc, argv, kw_splat, passed_block_handler);
case block_handler_type_proc:
- if (force_blockarg == FALSE) {
- is_lambda = block_proc_is_lambda(VM_BH_TO_PROC(block_handler));
- }
- block_handler = vm_proc_to_block_handler(VM_BH_TO_PROC(block_handler));
- goto again;
+ if (force_blockarg == FALSE) {
+ is_lambda = block_proc_is_lambda(VM_BH_TO_PROC(block_handler));
+ }
+ block_handler = vm_proc_to_block_handler(VM_BH_TO_PROC(block_handler));
+ goto again;
}
VM_UNREACHABLE(invoke_block_from_c_splattable);
return Qundef;
@@ -1401,7 +1629,7 @@ check_block_handler(rb_execution_context_t *ec)
VALUE block_handler = VM_CF_BLOCK_HANDLER(ec->cfp);
vm_block_handler_verify(block_handler);
if (UNLIKELY(block_handler == VM_BLOCK_HANDLER_NONE)) {
- rb_vm_localjump_error("no block given", Qnil, 0);
+ rb_vm_localjump_error("no block given", Qnil, 0);
}
return block_handler;
@@ -1412,7 +1640,7 @@ vm_yield_with_cref(rb_execution_context_t *ec, int argc, const VALUE *argv, int
{
return invoke_block_from_c_bh(ec, check_block_handler(ec),
argc, argv, kw_splat, VM_BLOCK_HANDLER_NONE,
- cref, is_lambda, FALSE);
+ cref, is_lambda, FALSE);
}
static VALUE
@@ -1426,7 +1654,7 @@ vm_yield_with_block(rb_execution_context_t *ec, int argc, const VALUE *argv, VAL
{
return invoke_block_from_c_bh(ec, check_block_handler(ec),
argc, argv, kw_splat, block_handler,
- NULL, FALSE, FALSE);
+ NULL, FALSE, FALSE);
}
static VALUE
@@ -1444,7 +1672,7 @@ ALWAYS_INLINE(static VALUE
static inline VALUE
invoke_block_from_c_proc(rb_execution_context_t *ec, const rb_proc_t *proc,
- VALUE self, int argc, const VALUE *argv,
+ VALUE self, int argc, const VALUE *argv,
int kw_splat, VALUE passed_block_handler, int is_lambda,
const rb_callable_method_entry_t *me)
{
@@ -1469,11 +1697,11 @@ invoke_block_from_c_proc(rb_execution_context_t *ec, const rb_proc_t *proc,
}
return vm_yield_with_cfunc(ec, &block->as.captured, self, argc, argv, kw_splat, passed_block_handler, me);
case block_type_symbol:
- return vm_yield_with_symbol(ec, block->as.symbol, argc, argv, kw_splat, passed_block_handler);
+ return vm_yield_with_symbol(ec, block->as.symbol, argc, argv, kw_splat, passed_block_handler);
case block_type_proc:
- is_lambda = block_proc_is_lambda(block->as.proc);
- block = vm_proc_block(block->as.proc);
- goto again;
+ is_lambda = block_proc_is_lambda(block->as.proc);
+ block = vm_proc_block(block->as.proc);
+ goto again;
}
VM_UNREACHABLE(invoke_block_from_c_proc);
return Qundef;
@@ -1481,21 +1709,21 @@ invoke_block_from_c_proc(rb_execution_context_t *ec, const rb_proc_t *proc,
static VALUE
vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
- int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
+ int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
{
return invoke_block_from_c_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler, proc->is_lambda, NULL);
}
-MJIT_FUNC_EXPORTED VALUE
+VALUE
rb_vm_invoke_bmethod(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
int argc, const VALUE *argv, int kw_splat, VALUE block_handler, const rb_callable_method_entry_t *me)
{
return invoke_block_from_c_proc(ec, proc, self, argc, argv, kw_splat, block_handler, TRUE, me);
}
-MJIT_FUNC_EXPORTED VALUE
+VALUE
rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc,
- int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
+ int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
{
VALUE self = vm_block_self(&proc->block);
vm_block_handler_verify(passed_block_handler);
@@ -1504,7 +1732,7 @@ rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc,
return rb_vm_invoke_bmethod(ec, proc, self, argc, argv, kw_splat, passed_block_handler, NULL);
}
else {
- return vm_invoke_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler);
+ return vm_invoke_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler);
}
}
@@ -1518,36 +1746,42 @@ rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE s
return rb_vm_invoke_bmethod(ec, proc, self, argc, argv, kw_splat, passed_block_handler, NULL);
}
else {
- return vm_invoke_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler);
+ return vm_invoke_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler);
}
}
/* special variable */
-static rb_control_frame_t *
-vm_normal_frame(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
+VALUE *
+rb_vm_svar_lep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
{
- while (cfp->pc == 0) {
- cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
- if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
- return 0;
- }
+ while (cfp->pc == 0 || cfp->iseq == 0) {
+ if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_IFUNC) {
+ struct vm_ifunc *ifunc = (struct vm_ifunc *)cfp->iseq;
+ return ifunc->svar_lep;
+ }
+ else {
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
+ }
+
+ if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
+ return NULL;
+ }
}
- return cfp;
+
+ return (VALUE *)VM_CF_LEP(cfp);
}
static VALUE
vm_cfp_svar_get(const rb_execution_context_t *ec, rb_control_frame_t *cfp, VALUE key)
{
- cfp = vm_normal_frame(ec, cfp);
- return lep_svar_get(ec, cfp ? VM_CF_LEP(cfp) : 0, key);
+ return lep_svar_get(ec, rb_vm_svar_lep(ec, cfp), key);
}
static void
vm_cfp_svar_set(const rb_execution_context_t *ec, rb_control_frame_t *cfp, VALUE key, const VALUE val)
{
- cfp = vm_normal_frame(ec, cfp);
- lep_svar_set(ec, cfp ? VM_CF_LEP(cfp) : 0, key, val);
+ lep_svar_set(ec, rb_vm_svar_lep(ec, cfp), key, val);
}
static VALUE
@@ -1586,6 +1820,17 @@ rb_lastline_set(VALUE val)
vm_svar_set(GET_EC(), VM_SVAR_LASTLINE, val);
}
+void
+rb_lastline_set_up(VALUE val, unsigned int up)
+{
+ rb_control_frame_t * cfp = GET_EC()->cfp;
+
+ for(unsigned int i = 0; i < up; i++) {
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
+ }
+ vm_cfp_svar_set(GET_EC(), cfp, VM_SVAR_LASTLINE, val);
+}
+
/* misc */
const char *
@@ -1595,10 +1840,10 @@ rb_sourcefile(void)
const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
if (cfp) {
- return RSTRING_PTR(rb_iseq_path(cfp->iseq));
+ return RSTRING_PTR(rb_iseq_path(cfp->iseq));
}
else {
- return 0;
+ return 0;
}
}
@@ -1609,10 +1854,10 @@ rb_sourceline(void)
const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
if (cfp) {
- return rb_vm_get_sourceline(cfp);
+ return rb_vm_get_sourceline(cfp);
}
else {
- return 0;
+ return 0;
}
}
@@ -1623,16 +1868,16 @@ rb_source_location(int *pline)
const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
if (cfp && VM_FRAME_RUBYFRAME_P(cfp)) {
- if (pline) *pline = rb_vm_get_sourceline(cfp);
- return rb_iseq_path(cfp->iseq);
+ if (pline) *pline = rb_vm_get_sourceline(cfp);
+ return rb_iseq_path(cfp->iseq);
}
else {
- if (pline) *pline = 0;
- return Qnil;
+ if (pline) *pline = 0;
+ return Qnil;
}
}
-MJIT_FUNC_EXPORTED const char *
+const char *
rb_source_location_cstr(int *pline)
{
VALUE path = rb_source_location(pline);
@@ -1675,9 +1920,9 @@ void
debug_cref(rb_cref_t *cref)
{
while (cref) {
- dp(CREF_CLASS(cref));
- printf("%ld\n", CREF_VISI(cref));
- cref = CREF_NEXT(cref);
+ dp(CREF_CLASS(cref));
+ printf("%ld\n", CREF_VISI(cref));
+ cref = CREF_NEXT(cref);
}
}
#endif
@@ -1689,7 +1934,7 @@ rb_vm_cbase(void)
const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
if (cfp == 0) {
- rb_raise(rb_eRuntimeError, "Can't call on top of Fiber or Thread");
+ rb_raise(rb_eRuntimeError, "Can't call on top of Fiber or Thread");
}
return vm_get_cbase(cfp->ep);
}
@@ -1705,30 +1950,30 @@ make_localjump_error(const char *mesg, VALUE value, int reason)
switch (reason) {
case TAG_BREAK:
- CONST_ID(id, "break");
- break;
+ CONST_ID(id, "break");
+ break;
case TAG_REDO:
- CONST_ID(id, "redo");
- break;
+ CONST_ID(id, "redo");
+ break;
case TAG_RETRY:
- CONST_ID(id, "retry");
- break;
+ CONST_ID(id, "retry");
+ break;
case TAG_NEXT:
- CONST_ID(id, "next");
- break;
+ CONST_ID(id, "next");
+ break;
case TAG_RETURN:
- CONST_ID(id, "return");
- break;
+ CONST_ID(id, "return");
+ break;
default:
- CONST_ID(id, "noreason");
- break;
+ CONST_ID(id, "noreason");
+ break;
}
rb_iv_set(exc, "@exit_value", value);
rb_iv_set(exc, "@reason", ID2SYM(id));
return exc;
}
-MJIT_FUNC_EXPORTED void
+void
rb_vm_localjump_error(const char *mesg, VALUE value, int reason)
{
VALUE exc = make_localjump_error(mesg, value, reason);
@@ -1736,39 +1981,39 @@ rb_vm_localjump_error(const char *mesg, VALUE value, int reason)
}
VALUE
-rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
+rb_vm_make_jump_tag_but_local_jump(enum ruby_tag_type state, VALUE val)
{
const char *mesg;
switch (state) {
case TAG_RETURN:
- mesg = "unexpected return";
- break;
+ mesg = "unexpected return";
+ break;
case TAG_BREAK:
- mesg = "unexpected break";
- break;
+ mesg = "unexpected break";
+ break;
case TAG_NEXT:
- mesg = "unexpected next";
- break;
+ mesg = "unexpected next";
+ break;
case TAG_REDO:
- mesg = "unexpected redo";
- val = Qnil;
- break;
+ mesg = "unexpected redo";
+ val = Qnil;
+ break;
case TAG_RETRY:
- mesg = "retry outside of rescue clause";
- val = Qnil;
- break;
+ mesg = "retry outside of rescue clause";
+ val = Qnil;
+ break;
default:
- return Qnil;
+ return Qnil;
}
- if (val == Qundef) {
- val = GET_EC()->tag->retval;
+ if (UNDEF_P(val)) {
+ val = GET_EC()->tag->retval;
}
return make_localjump_error(mesg, val, state);
}
void
-rb_vm_jump_tag_but_local_jump(int state)
+rb_vm_jump_tag_but_local_jump(enum ruby_tag_type state)
{
VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
if (!NIL_P(exc)) rb_exc_raise(exc);
@@ -1779,7 +2024,7 @@ static rb_control_frame_t *
next_not_local_frame(rb_control_frame_t *cfp)
{
while (VM_ENV_LOCAL_P(cfp->ep)) {
- cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
}
return cfp;
}
@@ -1793,11 +2038,9 @@ vm_iter_break(rb_execution_context_t *ec, VALUE val)
const VALUE *ep = VM_CF_PREV_EP(cfp);
const rb_control_frame_t *target_cfp = rb_vm_search_cf_from_ep(ec, cfp, ep);
-#if 0 /* raise LocalJumpError */
if (!target_cfp) {
- rb_vm_localjump_error("unexpected break", val, TAG_BREAK);
+ rb_vm_localjump_error("unexpected break", val, TAG_BREAK);
}
-#endif
ec->errinfo = (VALUE)THROW_DATA_NEW(val, target_cfp, TAG_BREAK);
EC_JUMP_TAG(ec, TAG_BREAK);
@@ -1817,9 +2060,17 @@ rb_iter_break_value(VALUE val)
/* optimization: redefine management */
+short ruby_vm_redefined_flag[BOP_LAST_];
static st_table *vm_opt_method_def_table = 0;
static st_table *vm_opt_mid_table = 0;
+void
+rb_free_vm_opt_tables(void)
+{
+ st_free_table(vm_opt_method_def_table);
+ st_free_table(vm_opt_mid_table);
+}
+
static int
vm_redefinition_check_flag(VALUE klass)
{
@@ -1851,14 +2102,21 @@ rb_vm_check_optimizable_mid(VALUE mid)
}
static int
-vm_redefinition_check_method_type(const rb_method_definition_t *def)
+vm_redefinition_check_method_type(const rb_method_entry_t *me)
{
+ if (me->called_id != me->def->original_id) {
+ return FALSE;
+ }
+
+ if (METHOD_ENTRY_BASIC(me)) return TRUE;
+
+ const rb_method_definition_t *def = me->def;
switch (def->type) {
case VM_METHOD_TYPE_CFUNC:
case VM_METHOD_TYPE_OPTIMIZED:
- return TRUE;
+ return TRUE;
default:
- return FALSE;
+ return FALSE;
}
}
@@ -1870,12 +2128,15 @@ rb_vm_check_redefinition_opt_method(const rb_method_entry_t *me, VALUE klass)
RB_TYPE_P(RBASIC_CLASS(klass), T_CLASS)) {
klass = RBASIC_CLASS(klass);
}
- if (vm_redefinition_check_method_type(me->def)) {
+ if (vm_redefinition_check_method_type(me)) {
if (st_lookup(vm_opt_method_def_table, (st_data_t)me->def, &bop)) {
int flag = vm_redefinition_check_flag(klass);
- rb_yjit_bop_redefined(klass, me, (enum ruby_basic_operators)bop);
- ruby_vm_redefined_flag[bop] |= flag;
- }
+ if (flag != 0) {
+ rb_yjit_bop_redefined(flag, (enum ruby_basic_operators)bop);
+ rb_rjit_bop_redefined(flag, (enum ruby_basic_operators)bop);
+ ruby_vm_redefined_flag[bop] |= flag;
+ }
+ }
}
}
@@ -1899,16 +2160,36 @@ rb_vm_check_redefinition_by_prepend(VALUE klass)
}
static void
-add_opt_method(VALUE klass, ID mid, VALUE bop)
+add_opt_method_entry_bop(const rb_method_entry_t *me, ID mid, enum ruby_basic_operators bop)
+{
+ st_insert(vm_opt_method_def_table, (st_data_t)me->def, (st_data_t)bop);
+ st_insert(vm_opt_mid_table, (st_data_t)mid, (st_data_t)Qtrue);
+}
+
+static void
+add_opt_method(VALUE klass, ID mid, enum ruby_basic_operators bop)
{
const rb_method_entry_t *me = rb_method_entry_at(klass, mid);
- if (me && vm_redefinition_check_method_type(me->def)) {
- st_insert(vm_opt_method_def_table, (st_data_t)me->def, (st_data_t)bop);
- st_insert(vm_opt_mid_table, (st_data_t)mid, (st_data_t)Qtrue);
+ if (me && vm_redefinition_check_method_type(me)) {
+ add_opt_method_entry_bop(me, mid, bop);
}
else {
- rb_bug("undefined optimized method: %s", rb_id2name(mid));
+ rb_bug("undefined optimized method: %s", rb_id2name(mid));
+ }
+}
+
+static enum ruby_basic_operators vm_redefinition_bop_for_id(ID mid);
+
+static void
+add_opt_method_entry(const rb_method_entry_t *me)
+{
+ if (me && vm_redefinition_check_method_type(me)) {
+ ID mid = me->called_id;
+ enum ruby_basic_operators bop = vm_redefinition_bop_for_id(mid);
+ if ((int)bop >= 0) {
+ add_opt_method_entry_bop(me, mid, bop);
+ }
}
}
@@ -1916,10 +2197,7 @@ static void
vm_init_redefined_flag(void)
{
ID mid;
- VALUE bop;
-
- vm_opt_method_def_table = st_init_numtable();
- vm_opt_mid_table = st_init_numtable();
+ enum ruby_basic_operators bop;
#define OP(mid_, bop_) (mid = id##mid_, bop = BOP_##bop_, ruby_vm_redefined_flag[bop] = 0)
#define C(k) add_opt_method(rb_c##k, mid, bop)
@@ -1930,7 +2208,7 @@ vm_init_redefined_flag(void)
OP(MOD, MOD), (C(Integer), C(Float));
OP(Eq, EQ), (C(Integer), C(Float), C(String), C(Symbol));
OP(Eqq, EQQ), (C(Integer), C(Float), C(Symbol), C(String),
- C(NilClass), C(TrueClass), C(FalseClass));
+ C(NilClass), C(TrueClass), C(FalseClass));
OP(LT, LT), (C(Integer), C(Float));
OP(LE, LE), (C(Integer), C(Float));
OP(GT, GT), (C(Integer), C(Float));
@@ -1947,14 +2225,57 @@ vm_init_redefined_flag(void)
OP(UMinus, UMINUS), (C(String));
OP(Max, MAX), (C(Array));
OP(Min, MIN), (C(Array));
+ OP(Hash, HASH), (C(Array));
OP(Call, CALL), (C(Proc));
OP(And, AND), (C(Integer));
OP(Or, OR), (C(Integer));
OP(NilP, NIL_P), (C(NilClass));
+ OP(Cmp, CMP), (C(Integer), C(Float), C(String));
+ OP(Default, DEFAULT), (C(Hash));
#undef C
#undef OP
}
+static enum ruby_basic_operators
+vm_redefinition_bop_for_id(ID mid)
+{
+ switch (mid) {
+#define OP(mid_, bop_) case id##mid_: return BOP_##bop_
+ OP(PLUS, PLUS);
+ OP(MINUS, MINUS);
+ OP(MULT, MULT);
+ OP(DIV, DIV);
+ OP(MOD, MOD);
+ OP(Eq, EQ);
+ OP(Eqq, EQQ);
+ OP(LT, LT);
+ OP(LE, LE);
+ OP(GT, GT);
+ OP(GE, GE);
+ OP(LTLT, LTLT);
+ OP(AREF, AREF);
+ OP(ASET, ASET);
+ OP(Length, LENGTH);
+ OP(Size, SIZE);
+ OP(EmptyP, EMPTY_P);
+ OP(Succ, SUCC);
+ OP(EqTilde, MATCH);
+ OP(Freeze, FREEZE);
+ OP(UMinus, UMINUS);
+ OP(Max, MAX);
+ OP(Min, MIN);
+ OP(Hash, HASH);
+ OP(Call, CALL);
+ OP(And, AND);
+ OP(Or, OR);
+ OP(NilP, NIL_P);
+ OP(Cmp, CMP);
+ OP(Default, DEFAULT);
+#undef OP
+ }
+ return -1;
+}
+
/* for vm development */
#if VMDEBUG
@@ -1971,7 +2292,7 @@ vm_frametype_name(const rb_control_frame_t *cfp)
case VM_FRAME_MAGIC_EVAL: return "eval";
case VM_FRAME_MAGIC_RESCUE: return "rescue";
default:
- rb_bug("unknown frame");
+ rb_bug("unknown frame");
}
}
#endif
@@ -1980,12 +2301,12 @@ static VALUE
frame_return_value(const struct vm_throw_data *err)
{
if (THROW_DATA_P(err) &&
- THROW_DATA_STATE(err) == TAG_BREAK &&
- THROW_DATA_CONSUMED_P(err) == FALSE) {
- return THROW_DATA_VAL(err);
+ THROW_DATA_STATE(err) == TAG_BREAK &&
+ THROW_DATA_CONSUMED_P(err) == FALSE) {
+ return THROW_DATA_VAL(err);
}
else {
- return Qnil;
+ return Qnil;
}
}
@@ -2012,15 +2333,16 @@ frame_name(const rb_control_frame_t *cfp)
}
#endif
+// cfp_returning_with_value:
+// Whether cfp is the last frame in the unwinding process for a non-local return.
static void
-hook_before_rewind(rb_execution_context_t *ec, const rb_control_frame_t *cfp,
- int will_finish_vm_exec, int state, struct vm_throw_data *err)
+hook_before_rewind(rb_execution_context_t *ec, bool cfp_returning_with_value, int state, struct vm_throw_data *err)
{
if (state == TAG_RAISE && RBASIC(err)->klass == rb_eSysStackError) {
- return;
+ return;
}
else {
- const rb_iseq_t *iseq = cfp->iseq;
+ const rb_iseq_t *iseq = ec->cfp->iseq;
rb_hook_list_t *local_hooks = iseq->aux.exec.local_hooks;
switch (VM_FRAME_TYPE(ec->cfp)) {
@@ -2037,32 +2359,36 @@ hook_before_rewind(rb_execution_context_t *ec, const rb_control_frame_t *cfp,
break;
case VM_FRAME_MAGIC_BLOCK:
if (VM_FRAME_BMETHOD_P(ec->cfp)) {
- EXEC_EVENT_HOOK(ec, RUBY_EVENT_B_RETURN, ec->cfp->self, 0, 0, 0, frame_return_value(err));
+ VALUE bmethod_return_value = frame_return_value(err);
+ if (cfp_returning_with_value) {
+ // Non-local return terminating at a BMETHOD control frame.
+ bmethod_return_value = THROW_DATA_VAL(err);
+ }
+
+
+ EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_B_RETURN, ec->cfp->self, 0, 0, 0, bmethod_return_value);
if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_B_RETURN)) {
rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_B_RETURN,
- ec->cfp->self, 0, 0, 0, frame_return_value(err), FALSE);
+ ec->cfp->self, 0, 0, 0, bmethod_return_value, TRUE);
}
- if (!will_finish_vm_exec) {
- const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(ec->cfp);
+ const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(ec->cfp);
- /* kick RUBY_EVENT_RETURN at invoke_block_from_c() for bmethod */
- EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_RETURN, ec->cfp->self,
- rb_vm_frame_method_entry(ec->cfp)->def->original_id,
- rb_vm_frame_method_entry(ec->cfp)->called_id,
- rb_vm_frame_method_entry(ec->cfp)->owner,
- frame_return_value(err));
+ EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_RETURN, ec->cfp->self,
+ rb_vm_frame_method_entry(ec->cfp)->def->original_id,
+ rb_vm_frame_method_entry(ec->cfp)->called_id,
+ rb_vm_frame_method_entry(ec->cfp)->owner,
+ bmethod_return_value);
- VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
- local_hooks = me->def->body.bmethod.hooks;
+ VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
+ local_hooks = me->def->body.bmethod.hooks;
- if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_RETURN)) {
- rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_RETURN, ec->cfp->self,
- rb_vm_frame_method_entry(ec->cfp)->def->original_id,
- rb_vm_frame_method_entry(ec->cfp)->called_id,
- rb_vm_frame_method_entry(ec->cfp)->owner,
- frame_return_value(err), TRUE);
- }
+ if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_RETURN)) {
+ rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_RETURN, ec->cfp->self,
+ rb_vm_frame_method_entry(ec->cfp)->def->original_id,
+ rb_vm_frame_method_entry(ec->cfp)->called_id,
+ rb_vm_frame_method_entry(ec->cfp)->owner,
+ bmethod_return_value, TRUE);
}
THROW_DATA_CONSUMED_SET(err);
}
@@ -2160,262 +2486,313 @@ hook_before_rewind(rb_execution_context_t *ec, const rb_control_frame_t *cfp,
VALUE *ep; // ep
void *code; //
};
-
- If mjit_exec is already called before calling vm_exec, `mjit_enable_p` should
- be FALSE to avoid calling `mjit_exec` twice.
*/
static inline VALUE
-vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
- VALUE errinfo, VALUE *initial);
+vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state, VALUE errinfo);
+static inline VALUE
+vm_exec_loop(rb_execution_context_t *ec, enum ruby_tag_type state, struct rb_vm_tag *tag, VALUE result);
+
+// for non-Emscripten Wasm build, use vm_exec with optimized setjmp for runtime performance
+#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
+
+struct rb_vm_exec_context {
+ rb_execution_context_t *const ec;
+ struct rb_vm_tag *const tag;
+
+ VALUE result;
+};
+
+static void
+vm_exec_bottom_main(void *context)
+{
+ struct rb_vm_exec_context *ctx = context;
+ rb_execution_context_t *ec = ctx->ec;
+
+ ctx->result = vm_exec_loop(ec, TAG_NONE, ctx->tag, vm_exec_core(ec));
+}
+
+static void
+vm_exec_bottom_rescue(void *context)
+{
+ struct rb_vm_exec_context *ctx = context;
+ rb_execution_context_t *ec = ctx->ec;
+
+ ctx->result = vm_exec_loop(ec, rb_ec_tag_state(ec), ctx->tag, ec->errinfo);
+}
+#endif
VALUE
-vm_exec(rb_execution_context_t *ec, bool mjit_enable_p)
+vm_exec(rb_execution_context_t *ec)
{
- enum ruby_tag_type state;
VALUE result = Qundef;
- VALUE initial = 0;
EC_PUSH_TAG(ec);
_tag.retval = Qnil;
+
+#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
+ struct rb_vm_exec_context ctx = {
+ .ec = ec,
+ .tag = &_tag,
+ };
+ struct rb_wasm_try_catch try_catch;
+
+ EC_REPUSH_TAG();
+
+ rb_wasm_try_catch_init(&try_catch, vm_exec_bottom_main, vm_exec_bottom_rescue, &ctx);
+
+ rb_wasm_try_catch_loop_run(&try_catch, &RB_VM_TAG_JMPBUF_GET(_tag.buf));
+
+ result = ctx.result;
+#else
+ enum ruby_tag_type state;
if ((state = EC_EXEC_TAG()) == TAG_NONE) {
- if (!mjit_enable_p || (result = mjit_exec(ec)) == Qundef) {
- result = vm_exec_core(ec, initial);
+ if (UNDEF_P(result = jit_exec(ec))) {
+ result = vm_exec_core(ec);
}
- goto vm_loop_start; /* fallback to the VM */
+ /* fallback to the VM */
+ result = vm_exec_loop(ec, TAG_NONE, &_tag, result);
}
else {
- result = ec->errinfo;
- rb_ec_raised_reset(ec, RAISED_STACKOVERFLOW | RAISED_NOMEMORY);
- while ((result = vm_exec_handle_exception(ec, state, result, &initial)) == Qundef) {
- /* caught a jump, exec the handler */
- result = vm_exec_core(ec, initial);
- vm_loop_start:
- VM_ASSERT(ec->tag == &_tag);
- /* when caught `throw`, `tag.state` is set. */
- if ((state = _tag.state) == TAG_NONE) break;
- _tag.state = TAG_NONE;
- }
+ result = vm_exec_loop(ec, state, &_tag, ec->errinfo);
}
+#endif
+
EC_POP_TAG();
return result;
}
static inline VALUE
-vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
- VALUE errinfo, VALUE *initial)
+vm_exec_loop(rb_execution_context_t *ec, enum ruby_tag_type state,
+ struct rb_vm_tag *tag, VALUE result)
+{
+ if (state == TAG_NONE) { /* no jumps, result is discarded */
+ goto vm_loop_start;
+ }
+
+ rb_ec_raised_reset(ec, RAISED_STACKOVERFLOW | RAISED_NOMEMORY);
+ while (UNDEF_P(result = vm_exec_handle_exception(ec, state, result))) {
+ // caught a jump, exec the handler. JIT code in jit_exec_exception()
+ // may return Qundef to run remaining frames with vm_exec_core().
+ if (UNDEF_P(result = jit_exec_exception(ec))) {
+ result = vm_exec_core(ec);
+ }
+ vm_loop_start:
+ VM_ASSERT(ec->tag == tag);
+ /* when caught `throw`, `tag.state` is set. */
+ if ((state = tag->state) == TAG_NONE) break;
+ tag->state = TAG_NONE;
+ }
+
+ return result;
+}
+
+static inline VALUE
+vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state, VALUE errinfo)
{
struct vm_throw_data *err = (struct vm_throw_data *)errinfo;
for (;;) {
- unsigned int i;
- const struct iseq_catch_table_entry *entry;
- const struct iseq_catch_table *ct;
- unsigned long epc, cont_pc, cont_sp;
- const rb_iseq_t *catch_iseq;
- rb_control_frame_t *cfp;
- VALUE type;
- const rb_control_frame_t *escape_cfp;
-
- cont_pc = cont_sp = 0;
- catch_iseq = NULL;
-
- while (ec->cfp->pc == 0 || ec->cfp->iseq == 0) {
- if (UNLIKELY(VM_FRAME_TYPE(ec->cfp) == VM_FRAME_MAGIC_CFUNC)) {
- EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_C_RETURN, ec->cfp->self,
- rb_vm_frame_method_entry(ec->cfp)->def->original_id,
- rb_vm_frame_method_entry(ec->cfp)->called_id,
- rb_vm_frame_method_entry(ec->cfp)->owner, Qnil);
- RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec,
- rb_vm_frame_method_entry(ec->cfp)->owner,
- rb_vm_frame_method_entry(ec->cfp)->def->original_id);
- }
- rb_vm_pop_frame(ec);
- }
-
- cfp = ec->cfp;
- epc = cfp->pc - cfp->iseq->body->iseq_encoded;
-
- escape_cfp = NULL;
- if (state == TAG_BREAK || state == TAG_RETURN) {
- escape_cfp = THROW_DATA_CATCH_FRAME(err);
-
- if (cfp == escape_cfp) {
- if (state == TAG_RETURN) {
- if (!VM_FRAME_FINISHED_P(cfp)) {
- THROW_DATA_CATCH_FRAME_SET(err, cfp + 1);
- THROW_DATA_STATE_SET(err, state = TAG_BREAK);
- }
- else {
- ct = cfp->iseq->body->catch_table;
- if (ct) for (i = 0; i < ct->size; i++) {
- entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
- if (entry->start < epc && entry->end >= epc) {
- if (entry->type == CATCH_TYPE_ENSURE) {
- catch_iseq = entry->iseq;
- cont_pc = entry->cont;
- cont_sp = entry->sp;
- break;
- }
- }
- }
- if (catch_iseq == NULL) {
- ec->errinfo = Qnil;
- THROW_DATA_CATCH_FRAME_SET(err, cfp + 1);
- hook_before_rewind(ec, ec->cfp, TRUE, state, err);
- rb_vm_pop_frame(ec);
- return THROW_DATA_VAL(err);
- }
- }
- /* through */
- }
- else {
- /* TAG_BREAK */
-#if OPT_STACK_CACHING
- *initial = THROW_DATA_VAL(err);
-#else
- *ec->cfp->sp++ = THROW_DATA_VAL(err);
-#endif
- ec->errinfo = Qnil;
- return Qundef;
- }
- }
- }
-
- if (state == TAG_RAISE) {
- ct = cfp->iseq->body->catch_table;
- if (ct) for (i = 0; i < ct->size; i++) {
- entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
- if (entry->start < epc && entry->end >= epc) {
-
- if (entry->type == CATCH_TYPE_RESCUE ||
- entry->type == CATCH_TYPE_ENSURE) {
- catch_iseq = entry->iseq;
- cont_pc = entry->cont;
- cont_sp = entry->sp;
- break;
- }
- }
- }
- }
- else if (state == TAG_RETRY) {
- ct = cfp->iseq->body->catch_table;
- if (ct) for (i = 0; i < ct->size; i++) {
- entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
- if (entry->start < epc && entry->end >= epc) {
-
- if (entry->type == CATCH_TYPE_ENSURE) {
- catch_iseq = entry->iseq;
- cont_pc = entry->cont;
- cont_sp = entry->sp;
- break;
- }
- else if (entry->type == CATCH_TYPE_RETRY) {
- const rb_control_frame_t *escape_cfp;
- escape_cfp = THROW_DATA_CATCH_FRAME(err);
- if (cfp == escape_cfp) {
- cfp->pc = cfp->iseq->body->iseq_encoded + entry->cont;
- ec->errinfo = Qnil;
- return Qundef;
- }
- }
- }
- }
- }
+ unsigned int i;
+ const struct iseq_catch_table_entry *entry;
+ const struct iseq_catch_table *ct;
+ unsigned long epc, cont_pc, cont_sp;
+ const rb_iseq_t *catch_iseq;
+ VALUE type;
+ const rb_control_frame_t *escape_cfp;
+
+ cont_pc = cont_sp = 0;
+ catch_iseq = NULL;
+
+ while (ec->cfp->pc == 0 || ec->cfp->iseq == 0) {
+ if (UNLIKELY(VM_FRAME_TYPE(ec->cfp) == VM_FRAME_MAGIC_CFUNC)) {
+ EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_C_RETURN, ec->cfp->self,
+ rb_vm_frame_method_entry(ec->cfp)->def->original_id,
+ rb_vm_frame_method_entry(ec->cfp)->called_id,
+ rb_vm_frame_method_entry(ec->cfp)->owner, Qnil);
+ RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec,
+ rb_vm_frame_method_entry(ec->cfp)->owner,
+ rb_vm_frame_method_entry(ec->cfp)->def->original_id);
+ }
+ rb_vm_pop_frame(ec);
+ }
+
+ rb_control_frame_t *const cfp = ec->cfp;
+ epc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded;
+
+ escape_cfp = NULL;
+ if (state == TAG_BREAK || state == TAG_RETURN) {
+ escape_cfp = THROW_DATA_CATCH_FRAME(err);
+
+ if (cfp == escape_cfp) {
+ if (state == TAG_RETURN) {
+ if (!VM_FRAME_FINISHED_P(cfp)) {
+ THROW_DATA_CATCH_FRAME_SET(err, cfp + 1);
+ THROW_DATA_STATE_SET(err, state = TAG_BREAK);
+ }
+ else {
+ ct = ISEQ_BODY(cfp->iseq)->catch_table;
+ if (ct) for (i = 0; i < ct->size; i++) {
+ entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
+ if (entry->start < epc && entry->end >= epc) {
+ if (entry->type == CATCH_TYPE_ENSURE) {
+ catch_iseq = entry->iseq;
+ cont_pc = entry->cont;
+ cont_sp = entry->sp;
+ break;
+ }
+ }
+ }
+ if (catch_iseq == NULL) {
+ ec->errinfo = Qnil;
+ THROW_DATA_CATCH_FRAME_SET(err, cfp + 1);
+ // cfp == escape_cfp here so calling with cfp_returning_with_value = true
+ hook_before_rewind(ec, true, state, err);
+ rb_vm_pop_frame(ec);
+ return THROW_DATA_VAL(err);
+ }
+ }
+ /* through */
+ }
+ else {
+ /* TAG_BREAK */
+ *cfp->sp++ = THROW_DATA_VAL(err);
+ ec->errinfo = Qnil;
+ return Qundef;
+ }
+ }
+ }
+
+ if (state == TAG_RAISE) {
+ ct = ISEQ_BODY(cfp->iseq)->catch_table;
+ if (ct) for (i = 0; i < ct->size; i++) {
+ entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
+ if (entry->start < epc && entry->end >= epc) {
+
+ if (entry->type == CATCH_TYPE_RESCUE ||
+ entry->type == CATCH_TYPE_ENSURE) {
+ catch_iseq = entry->iseq;
+ cont_pc = entry->cont;
+ cont_sp = entry->sp;
+ break;
+ }
+ }
+ }
+ }
+ else if (state == TAG_RETRY) {
+ ct = ISEQ_BODY(cfp->iseq)->catch_table;
+ if (ct) for (i = 0; i < ct->size; i++) {
+ entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
+ if (entry->start < epc && entry->end >= epc) {
+
+ if (entry->type == CATCH_TYPE_ENSURE) {
+ catch_iseq = entry->iseq;
+ cont_pc = entry->cont;
+ cont_sp = entry->sp;
+ break;
+ }
+ else if (entry->type == CATCH_TYPE_RETRY) {
+ const rb_control_frame_t *escape_cfp;
+ escape_cfp = THROW_DATA_CATCH_FRAME(err);
+ if (cfp == escape_cfp) {
+ cfp->pc = ISEQ_BODY(cfp->iseq)->iseq_encoded + entry->cont;
+ ec->errinfo = Qnil;
+ return Qundef;
+ }
+ }
+ }
+ }
+ }
else if ((state == TAG_BREAK && !escape_cfp) ||
(state == TAG_REDO) ||
(state == TAG_NEXT)) {
- type = (const enum catch_type[TAG_MASK]) {
+ type = (const enum rb_catch_type[TAG_MASK]) {
[TAG_BREAK] = CATCH_TYPE_BREAK,
[TAG_NEXT] = CATCH_TYPE_NEXT,
[TAG_REDO] = CATCH_TYPE_REDO,
/* otherwise = dontcare */
}[state];
- ct = cfp->iseq->body->catch_table;
- if (ct) for (i = 0; i < ct->size; i++) {
- entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
-
- if (entry->start < epc && entry->end >= epc) {
- if (entry->type == CATCH_TYPE_ENSURE) {
- catch_iseq = entry->iseq;
- cont_pc = entry->cont;
- cont_sp = entry->sp;
- break;
- }
- else if (entry->type == type) {
- cfp->pc = cfp->iseq->body->iseq_encoded + entry->cont;
- cfp->sp = vm_base_ptr(cfp) + entry->sp;
-
- if (state != TAG_REDO) {
-#if OPT_STACK_CACHING
- *initial = THROW_DATA_VAL(err);
-#else
- *ec->cfp->sp++ = THROW_DATA_VAL(err);
-#endif
- }
- ec->errinfo = Qnil;
- VM_ASSERT(ec->tag->state == TAG_NONE);
- return Qundef;
- }
- }
- }
- }
- else {
- ct = cfp->iseq->body->catch_table;
- if (ct) for (i = 0; i < ct->size; i++) {
- entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
- if (entry->start < epc && entry->end >= epc) {
-
- if (entry->type == CATCH_TYPE_ENSURE) {
- catch_iseq = entry->iseq;
- cont_pc = entry->cont;
- cont_sp = entry->sp;
- break;
- }
- }
- }
- }
-
- if (catch_iseq != NULL) { /* found catch table */
- /* enter catch scope */
- const int arg_size = 1;
-
- rb_iseq_check(catch_iseq);
- cfp->sp = vm_base_ptr(cfp) + cont_sp;
- cfp->pc = cfp->iseq->body->iseq_encoded + cont_pc;
-
- /* push block frame */
- cfp->sp[0] = (VALUE)err;
- vm_push_frame(ec, catch_iseq, VM_FRAME_MAGIC_RESCUE,
- cfp->self,
- VM_GUARDED_PREV_EP(cfp->ep),
- 0, /* cref or me */
- catch_iseq->body->iseq_encoded,
- cfp->sp + arg_size /* push value */,
- catch_iseq->body->local_table_size - arg_size,
- catch_iseq->body->stack_max);
-
- state = 0;
- ec->tag->state = TAG_NONE;
- ec->errinfo = Qnil;
-
- return Qundef;
- }
- else {
- hook_before_rewind(ec, ec->cfp, FALSE, state, err);
-
- if (VM_FRAME_FINISHED_P(ec->cfp)) {
- rb_vm_pop_frame(ec);
- ec->errinfo = (VALUE)err;
- ec->tag = ec->tag->prev;
- EC_JUMP_TAG(ec, state);
- }
- else {
- rb_vm_pop_frame(ec);
- }
- }
+ ct = ISEQ_BODY(cfp->iseq)->catch_table;
+ if (ct) for (i = 0; i < ct->size; i++) {
+ entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
+
+ if (entry->start < epc && entry->end >= epc) {
+ if (entry->type == CATCH_TYPE_ENSURE) {
+ catch_iseq = entry->iseq;
+ cont_pc = entry->cont;
+ cont_sp = entry->sp;
+ break;
+ }
+ else if (entry->type == type) {
+ cfp->pc = ISEQ_BODY(cfp->iseq)->iseq_encoded + entry->cont;
+ cfp->sp = vm_base_ptr(cfp) + entry->sp;
+
+ if (state != TAG_REDO) {
+ *cfp->sp++ = THROW_DATA_VAL(err);
+ }
+ ec->errinfo = Qnil;
+ VM_ASSERT(ec->tag->state == TAG_NONE);
+ return Qundef;
+ }
+ }
+ }
+ }
+ else {
+ ct = ISEQ_BODY(cfp->iseq)->catch_table;
+ if (ct) for (i = 0; i < ct->size; i++) {
+ entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
+ if (entry->start < epc && entry->end >= epc) {
+
+ if (entry->type == CATCH_TYPE_ENSURE) {
+ catch_iseq = entry->iseq;
+ cont_pc = entry->cont;
+ cont_sp = entry->sp;
+ break;
+ }
+ }
+ }
+ }
+
+ if (catch_iseq != NULL) { /* found catch table */
+ /* enter catch scope */
+ const int arg_size = 1;
+
+ rb_iseq_check(catch_iseq);
+ cfp->sp = vm_base_ptr(cfp) + cont_sp;
+ cfp->pc = ISEQ_BODY(cfp->iseq)->iseq_encoded + cont_pc;
+
+ /* push block frame */
+ cfp->sp[0] = (VALUE)err;
+ vm_push_frame(ec, catch_iseq, VM_FRAME_MAGIC_RESCUE,
+ cfp->self,
+ VM_GUARDED_PREV_EP(cfp->ep),
+ 0, /* cref or me */
+ ISEQ_BODY(catch_iseq)->iseq_encoded,
+ cfp->sp + arg_size /* push value */,
+ ISEQ_BODY(catch_iseq)->local_table_size - arg_size,
+ ISEQ_BODY(catch_iseq)->stack_max);
+
+ state = 0;
+ ec->tag->state = TAG_NONE;
+ ec->errinfo = Qnil;
+
+ return Qundef;
+ }
+ else {
+ hook_before_rewind(ec, (cfp == escape_cfp), state, err);
+
+ if (VM_FRAME_FINISHED_P(ec->cfp)) {
+ rb_vm_pop_frame(ec);
+ ec->errinfo = (VALUE)err;
+ ec->tag = ec->tag->prev;
+ EC_JUMP_TAG(ec, state);
+ }
+ else {
+ rb_vm_pop_frame(ec);
+ }
+ }
}
}
@@ -2427,7 +2804,7 @@ rb_iseq_eval(const rb_iseq_t *iseq)
rb_execution_context_t *ec = GET_EC();
VALUE val;
vm_set_top_stack(ec, iseq);
- val = vm_exec(ec, true);
+ val = vm_exec(ec);
return val;
}
@@ -2438,7 +2815,7 @@ rb_iseq_eval_main(const rb_iseq_t *iseq)
VALUE val;
vm_set_main_stack(ec, iseq);
- val = vm_exec(ec, true);
+ val = vm_exec(ec);
return val;
}
@@ -2448,13 +2825,13 @@ rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *cal
const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(cfp);
if (me) {
- if (idp) *idp = me->def->original_id;
- if (called_idp) *called_idp = me->called_id;
- if (klassp) *klassp = me->owner;
- return TRUE;
+ if (idp) *idp = me->def->original_id;
+ if (called_idp) *called_idp = me->called_id;
+ if (klassp) *klassp = me->owner;
+ return TRUE;
}
else {
- return FALSE;
+ return FALSE;
}
}
@@ -2472,7 +2849,7 @@ rb_frame_method_id_and_class(ID *idp, VALUE *klassp)
VALUE
rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg,
- VALUE block_handler, VALUE filename)
+ VALUE block_handler, VALUE filename)
{
rb_execution_context_t *ec = GET_EC();
const rb_control_frame_t *reg_cfp = ec->cfp;
@@ -2480,9 +2857,9 @@ rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg,
VALUE val;
vm_push_frame(ec, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH,
- recv, block_handler,
- (VALUE)vm_cref_new_toplevel(ec), /* cref or me */
- 0, reg_cfp->sp, 0, 0);
+ recv, block_handler,
+ (VALUE)vm_cref_new_toplevel(ec), /* cref or me */
+ 0, reg_cfp->sp, 0, 0);
val = (*func)(arg);
@@ -2498,6 +2875,7 @@ rb_vm_update_references(void *ptr)
if (ptr) {
rb_vm_t *vm = ptr;
+ rb_gc_update_tbl_refs(vm->ci_table);
rb_gc_update_tbl_refs(vm->frozen_strings);
vm->mark_object_ary = rb_gc_location(vm->mark_object_ary);
vm->load_path = rb_gc_location(vm->load_path);
@@ -2511,9 +2889,14 @@ rb_vm_update_references(void *ptr)
vm->loaded_features = rb_gc_location(vm->loaded_features);
vm->loaded_features_snapshot = rb_gc_location(vm->loaded_features_snapshot);
vm->loaded_features_realpaths = rb_gc_location(vm->loaded_features_realpaths);
+ vm->loaded_features_realpath_map = rb_gc_location(vm->loaded_features_realpath_map);
vm->top_self = rb_gc_location(vm->top_self);
vm->orig_progname = rb_gc_location(vm->orig_progname);
+ rb_gc_update_tbl_refs(vm->overloaded_cme_table);
+
+ rb_gc_update_values(RUBY_NSIG, vm->trap_list.cmd);
+
if (vm->coverages) {
vm->coverages = rb_gc_location(vm->coverages);
vm->me2counter = rb_gc_location(vm->me2counter);
@@ -2527,19 +2910,19 @@ rb_vm_each_stack_value(void *ptr, void (*cb)(VALUE, void*), void *ctx)
if (ptr) {
rb_vm_t *vm = ptr;
rb_ractor_t *r = 0;
- list_for_each(&vm->ractor.set, r, vmlr_node) {
+ ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
VM_ASSERT(rb_ractor_status_p(r, ractor_blocking) ||
rb_ractor_status_p(r, ractor_running));
if (r->threads.cnt > 0) {
rb_thread_t *th = 0;
- list_for_each(&r->threads.set, th, lt_node) {
+ ccan_list_for_each(&r->threads.set, th, lt_node) {
VM_ASSERT(th != NULL);
rb_execution_context_t * ec = th->ec;
if (ec->vm_stack) {
VALUE *p = ec->vm_stack;
VALUE *sp = ec->cfp->sp;
- while (p <= sp) {
- if (!rb_special_const_p(*p)) {
+ while (p < sp) {
+ if (!RB_SPECIAL_CONST_P(*p)) {
cb(*p, ctx);
}
p++;
@@ -2558,62 +2941,51 @@ vm_mark_negative_cme(VALUE val, void *dmy)
return ID_TABLE_CONTINUE;
}
+void rb_thread_sched_mark_zombies(rb_vm_t *vm);
+
void
rb_vm_mark(void *ptr)
{
RUBY_MARK_ENTER("vm");
RUBY_GC_INFO("-------------------------------------------------\n");
if (ptr) {
- rb_vm_t *vm = ptr;
+ rb_vm_t *vm = ptr;
rb_ractor_t *r = 0;
- long i, len;
- const VALUE *obj_ary;
+ long i;
- list_for_each(&vm->ractor.set, r, vmlr_node) {
+ ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
// ractor.set only contains blocking or running ractors
VM_ASSERT(rb_ractor_status_p(r, ractor_blocking) ||
rb_ractor_status_p(r, ractor_running));
rb_gc_mark(rb_ractor_self(r));
- }
-
- rb_gc_mark_movable(vm->mark_object_ary);
+ }
- len = RARRAY_LEN(vm->mark_object_ary);
- obj_ary = RARRAY_CONST_PTR(vm->mark_object_ary);
- for (i=0; i < len; i++) {
- const VALUE *ptr;
- long j, jlen;
-
- rb_gc_mark(*obj_ary);
- jlen = RARRAY_LEN(*obj_ary);
- ptr = RARRAY_CONST_PTR(*obj_ary);
- for (j=0; j < jlen; j++) {
- rb_gc_mark(*ptr++);
- }
- obj_ary++;
+ for (struct global_object_list *list = vm->global_object_list; list; list = list->next) {
+ rb_gc_mark_maybe(*list->varptr);
}
+ rb_gc_mark_movable(vm->mark_object_ary);
rb_gc_mark_movable(vm->load_path);
rb_gc_mark_movable(vm->load_path_snapshot);
- RUBY_MARK_MOVABLE_UNLESS_NULL(vm->load_path_check_cache);
+ rb_gc_mark_movable(vm->load_path_check_cache);
rb_gc_mark_movable(vm->expanded_load_path);
rb_gc_mark_movable(vm->loaded_features);
rb_gc_mark_movable(vm->loaded_features_snapshot);
rb_gc_mark_movable(vm->loaded_features_realpaths);
+ rb_gc_mark_movable(vm->loaded_features_realpath_map);
rb_gc_mark_movable(vm->top_self);
rb_gc_mark_movable(vm->orig_progname);
- RUBY_MARK_MOVABLE_UNLESS_NULL(vm->coverages);
- RUBY_MARK_MOVABLE_UNLESS_NULL(vm->me2counter);
- /* Prevent classes from moving */
- rb_mark_tbl(vm->defined_module_hash);
+ rb_gc_mark_movable(vm->coverages);
+ rb_gc_mark_movable(vm->me2counter);
- if (vm->loading_table) {
- rb_mark_tbl(vm->loading_table);
- }
+ if (vm->loading_table) {
+ rb_mark_tbl(vm->loading_table);
+ }
- rb_gc_mark_values(RUBY_NSIG, vm->trap_list.cmd);
+ rb_gc_mark_values(RUBY_NSIG, vm->trap_list.cmd);
rb_id_table_foreach_values(vm->negative_cme_table, vm_mark_negative_cme, NULL);
+ rb_mark_tbl_no_pin(vm->overloaded_cme_table);
for (i=0; i<VM_GLOBAL_CC_CACHE_TABLE_SIZE; i++) {
const struct rb_callcache *cc = vm->global_cc_cache_table[i];
@@ -2627,7 +2999,8 @@ rb_vm_mark(void *ptr)
}
}
- mjit_mark();
+ rb_thread_sched_mark_zombies(vm);
+ rb_rjit_mark();
}
RUBY_MARK_LEAVE("vm");
@@ -2641,17 +3014,7 @@ rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE cls,
VALUE exc = rb_exc_new3(cls, rb_obj_freeze(mesg));
OBJ_FREEZE(exc);
((VALUE *)vm->special_exceptions)[sp] = exc;
- rb_gc_register_mark_object(exc);
-}
-
-int
-rb_vm_add_root_module(VALUE module)
-{
- rb_vm_t *vm = GET_VM();
-
- st_insert(vm->defined_module_hash, (st_data_t)module, (st_data_t)module);
-
- return TRUE;
+ rb_vm_register_global_object(exc);
}
static int
@@ -2661,56 +3024,193 @@ free_loading_table_entry(st_data_t key, st_data_t value, st_data_t arg)
return ST_DELETE;
}
+void rb_free_loaded_features_index(rb_vm_t *vm);
+void rb_objspace_free_objects(void *objspace);
+
int
ruby_vm_destruct(rb_vm_t *vm)
{
RUBY_FREE_ENTER("vm");
if (vm) {
- rb_thread_t *th = vm->ractor.main_thread;
- struct rb_objspace *objspace = vm->objspace;
- vm->ractor.main_thread = NULL;
-
- if (th) {
- rb_fiber_reset_root_local_storage(th);
- thread_free(th);
- }
- rb_vm_living_threads_init(vm);
- ruby_vm_run_at_exit_hooks(vm);
- if (vm->loading_table) {
- st_foreach(vm->loading_table, free_loading_table_entry, 0);
- st_free_table(vm->loading_table);
- vm->loading_table = 0;
- }
- if (vm->frozen_strings) {
- st_free_table(vm->frozen_strings);
- vm->frozen_strings = 0;
- }
- RB_ALTSTACK_FREE(vm->main_altstack);
- if (objspace) {
- rb_objspace_free(objspace);
- }
- rb_native_mutex_destroy(&vm->waitpid_lock);
+ rb_thread_t *th = vm->ractor.main_thread;
+ VALUE *stack = th->ec->vm_stack;
+ if (rb_free_at_exit) {
+ rb_free_encoded_insn_data();
+ rb_free_global_enc_table();
+ rb_free_loaded_builtin_table();
+
+ rb_free_shared_fiber_pool();
+ rb_free_static_symid_str();
+ rb_free_transcoder_table();
+ rb_free_vm_opt_tables();
+ rb_free_warning();
+ rb_free_rb_global_tbl();
+ rb_free_loaded_features_index(vm);
+
+ rb_id_table_free(vm->negative_cme_table);
+ st_free_table(vm->overloaded_cme_table);
+
+ rb_id_table_free(RCLASS(rb_mRubyVMFrozenCore)->m_tbl);
+
+ rb_shape_t *cursor = rb_shape_get_root_shape();
+ rb_shape_t *end = rb_shape_get_shape_by_id(GET_SHAPE_TREE()->next_shape_id);
+ while (cursor < end) {
+ // 0x1 == SINGLE_CHILD_P
+ if (cursor->edges && !(((uintptr_t)cursor->edges) & 0x1))
+ rb_id_table_free(cursor->edges);
+ cursor += 1;
+ }
+
+ xfree(GET_SHAPE_TREE());
+
+ st_free_table(vm->static_ext_inits);
+ st_free_table(vm->ensure_rollback_table);
+
+ rb_vm_postponed_job_free();
+
+ rb_id_table_free(vm->constant_cache);
+
+ if (th) {
+ xfree(th->nt);
+ th->nt = NULL;
+ }
+
+#ifndef HAVE_SETPROCTITLE
+ ruby_free_proctitle();
+#endif
+ }
+ else {
+ if (th) {
+ rb_fiber_reset_root_local_storage(th);
+ thread_free(th);
+ }
+ }
+
+ struct rb_objspace *objspace = vm->objspace;
+
+ rb_vm_living_threads_init(vm);
+ ruby_vm_run_at_exit_hooks(vm);
+ if (vm->loading_table) {
+ st_foreach(vm->loading_table, free_loading_table_entry, 0);
+ st_free_table(vm->loading_table);
+ vm->loading_table = 0;
+ }
+ if (vm->ci_table) {
+ st_free_table(vm->ci_table);
+ vm->ci_table = NULL;
+ }
+ if (vm->frozen_strings) {
+ st_free_table(vm->frozen_strings);
+ vm->frozen_strings = 0;
+ }
+ RB_ALTSTACK_FREE(vm->main_altstack);
+
+ struct global_object_list *next;
+ for (struct global_object_list *list = vm->global_object_list; list; list = next) {
+ next = list->next;
+ xfree(list);
+ }
+
+ if (objspace) {
+ if (rb_free_at_exit) {
+ rb_objspace_free_objects(objspace);
+ rb_free_generic_iv_tbl_();
+ rb_free_default_rand_key();
+ if (th && vm->fork_gen == 0) {
+ /* If we have forked, main_thread may not be the initial thread */
+ xfree(stack);
+ ruby_mimfree(th);
+ }
+ }
+ rb_objspace_free(objspace);
+ }
rb_native_mutex_destroy(&vm->workqueue_lock);
- /* after freeing objspace, you *can't* use ruby_xfree() */
- ruby_mimfree(vm);
- ruby_current_vm_ptr = NULL;
+ /* after freeing objspace, you *can't* use ruby_xfree() */
+ ruby_mimfree(vm);
+ ruby_current_vm_ptr = NULL;
}
RUBY_FREE_LEAVE("vm");
return 0;
}
+size_t rb_vm_memsize_waiting_fds(struct ccan_list_head *waiting_fds); // thread.c
+size_t rb_vm_memsize_workqueue(struct ccan_list_head *workqueue); // vm_trace.c
+
+// Used for VM memsize reporting. Returns the size of the at_exit list by
+// looping through the linked list and adding up the size of the structs.
+static enum rb_id_table_iterator_result
+vm_memsize_constant_cache_i(ID id, VALUE ics, void *size)
+{
+ *((size_t *) size) += rb_st_memsize((st_table *) ics);
+ return ID_TABLE_CONTINUE;
+}
+
+// Returns a size_t representing the memory footprint of the VM's constant
+// cache, which is the memsize of the table as well as the memsize of all of the
+// nested tables.
static size_t
-vm_memsize(const void *ptr)
+vm_memsize_constant_cache(void)
{
- size_t size = sizeof(rb_vm_t);
+ rb_vm_t *vm = GET_VM();
+ size_t size = rb_id_table_memsize(vm->constant_cache);
- // TODO
- // size += vmobj->ractor_num * sizeof(rb_ractor_t);
+ rb_id_table_foreach(vm->constant_cache, vm_memsize_constant_cache_i, &size);
+ return size;
+}
+
+static size_t
+vm_memsize_at_exit_list(rb_at_exit_list *at_exit)
+{
+ size_t size = 0;
+
+ while (at_exit) {
+ size += sizeof(rb_at_exit_list);
+ at_exit = at_exit->next;
+ }
return size;
}
+// Used for VM memsize reporting. Returns the size of the builtin function
+// table if it has been defined.
+static size_t
+vm_memsize_builtin_function_table(const struct rb_builtin_function *builtin_function_table)
+{
+ return builtin_function_table == NULL ? 0 : sizeof(struct rb_builtin_function);
+}
+
+// Reports the memsize of the VM struct object and the structs that are
+// associated with it.
+static size_t
+vm_memsize(const void *ptr)
+{
+ rb_vm_t *vm = GET_VM();
+
+ return (
+ sizeof(rb_vm_t) +
+ rb_vm_memsize_waiting_fds(&vm->waiting_fds) +
+ rb_st_memsize(vm->loaded_features_index) +
+ rb_st_memsize(vm->loading_table) +
+ rb_st_memsize(vm->ensure_rollback_table) +
+ rb_vm_memsize_postponed_job_queue() +
+ rb_vm_memsize_workqueue(&vm->workqueue) +
+ vm_memsize_at_exit_list(vm->at_exit) +
+ rb_st_memsize(vm->ci_table) +
+ rb_st_memsize(vm->frozen_strings) +
+ vm_memsize_builtin_function_table(vm->builtin_function_table) +
+ rb_id_table_memsize(vm->negative_cme_table) +
+ rb_st_memsize(vm->overloaded_cme_table) +
+ vm_memsize_constant_cache() +
+ GET_SHAPE_TREE()->cache_size * sizeof(redblack_node_t)
+ );
+
+ // TODO
+ // struct { struct ccan_list_head set; } ractor;
+ // void *main_altstack; #ifdef USE_SIGALTSTACK
+ // struct rb_objspace *objspace;
+}
+
static const rb_data_type_t vm_data_type = {
"VM",
{0, 0, vm_memsize,},
@@ -2739,11 +3239,11 @@ get_param(const char *name, size_t default_value, size_t min_value)
const char *envval;
size_t result = default_value;
if ((envval = getenv(name)) != 0) {
- long val = atol(envval);
- if (val < (long)min_value) {
- val = (long)min_value;
- }
- result = (size_t)(((val -1 + RUBY_VM_SIZE_ALIGN) / RUBY_VM_SIZE_ALIGN) * RUBY_VM_SIZE_ALIGN);
+ long val = atol(envval);
+ if (val < (long)min_value) {
+ val = (long)min_value;
+ }
+ result = (size_t)(((val -1 + RUBY_VM_SIZE_ALIGN) / RUBY_VM_SIZE_ALIGN) * RUBY_VM_SIZE_ALIGN);
}
if (0) ruby_debug_printf("%s: %"PRIuSIZE"\n", name, result); /* debug print */
@@ -2759,7 +3259,7 @@ check_machine_stack_size(size_t *sizep)
#ifdef PTHREAD_STACK_MIN
if (size < (size_t)PTHREAD_STACK_MIN) {
- *sizep = (size_t)PTHREAD_STACK_MIN * 2;
+ *sizep = (size_t)PTHREAD_STACK_MIN * 2;
}
#endif
}
@@ -2769,23 +3269,23 @@ vm_default_params_setup(rb_vm_t *vm)
{
vm->default_params.thread_vm_stack_size =
get_param("RUBY_THREAD_VM_STACK_SIZE",
- RUBY_VM_THREAD_VM_STACK_SIZE,
- RUBY_VM_THREAD_VM_STACK_SIZE_MIN);
+ RUBY_VM_THREAD_VM_STACK_SIZE,
+ RUBY_VM_THREAD_VM_STACK_SIZE_MIN);
vm->default_params.thread_machine_stack_size =
get_param("RUBY_THREAD_MACHINE_STACK_SIZE",
- RUBY_VM_THREAD_MACHINE_STACK_SIZE,
- RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN);
+ RUBY_VM_THREAD_MACHINE_STACK_SIZE,
+ RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN);
vm->default_params.fiber_vm_stack_size =
get_param("RUBY_FIBER_VM_STACK_SIZE",
- RUBY_VM_FIBER_VM_STACK_SIZE,
- RUBY_VM_FIBER_VM_STACK_SIZE_MIN);
+ RUBY_VM_FIBER_VM_STACK_SIZE,
+ RUBY_VM_FIBER_VM_STACK_SIZE_MIN);
vm->default_params.fiber_machine_stack_size =
get_param("RUBY_FIBER_MACHINE_STACK_SIZE",
- RUBY_VM_FIBER_MACHINE_STACK_SIZE,
- RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN);
+ RUBY_VM_FIBER_MACHINE_STACK_SIZE,
+ RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN);
/* environment dependent check */
check_machine_stack_size(&vm->default_params.thread_machine_stack_size);
@@ -2804,7 +3304,7 @@ vm_init2(rb_vm_t *vm)
}
void
-rb_execution_context_update(const rb_execution_context_t *ec)
+rb_execution_context_update(rb_execution_context_t *ec)
{
/* update VM stack */
if (ec->vm_stack) {
@@ -2844,6 +3344,8 @@ rb_execution_context_update(const rb_execution_context_t *ec)
cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
}
}
+
+ ec->storage = rb_gc_location(ec->storage);
}
static enum rb_id_table_iterator_result
@@ -2859,55 +3361,57 @@ rb_execution_context_mark(const rb_execution_context_t *ec)
/* mark VM stack */
if (ec->vm_stack) {
VM_ASSERT(ec->cfp);
- VALUE *p = ec->vm_stack;
- VALUE *sp = ec->cfp->sp;
- rb_control_frame_t *cfp = ec->cfp;
- rb_control_frame_t *limit_cfp = (void *)(ec->vm_stack + ec->vm_stack_size);
+ VALUE *p = ec->vm_stack;
+ VALUE *sp = ec->cfp->sp;
+ rb_control_frame_t *cfp = ec->cfp;
+ rb_control_frame_t *limit_cfp = (void *)(ec->vm_stack + ec->vm_stack_size);
VM_ASSERT(sp == ec->cfp->sp);
rb_gc_mark_vm_stack_values((long)(sp - p), p);
- while (cfp != limit_cfp) {
- const VALUE *ep = cfp->ep;
- VM_ASSERT(!!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) == vm_ep_in_heap_p_(ec, ep));
- rb_gc_mark_movable(cfp->self);
- rb_gc_mark_movable((VALUE)cfp->iseq);
- rb_gc_mark_movable((VALUE)cfp->block_code);
+ while (cfp != limit_cfp) {
+ const VALUE *ep = cfp->ep;
+ VM_ASSERT(!!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) == vm_ep_in_heap_p_(ec, ep));
- if (!VM_ENV_LOCAL_P(ep)) {
- const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
- if (VM_ENV_FLAGS(prev_ep, VM_ENV_FLAG_ESCAPED)) {
- rb_gc_mark_movable(prev_ep[VM_ENV_DATA_INDEX_ENV]);
- }
+ if (VM_FRAME_TYPE(cfp) != VM_FRAME_MAGIC_DUMMY) {
+ rb_gc_mark_movable(cfp->self);
+ rb_gc_mark_movable((VALUE)cfp->iseq);
+ rb_gc_mark_movable((VALUE)cfp->block_code);
+
+ if (!VM_ENV_LOCAL_P(ep)) {
+ const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
+ if (VM_ENV_FLAGS(prev_ep, VM_ENV_FLAG_ESCAPED)) {
+ rb_gc_mark_movable(prev_ep[VM_ENV_DATA_INDEX_ENV]);
+ }
- if (VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED)) {
- rb_gc_mark_movable(ep[VM_ENV_DATA_INDEX_ENV]);
- rb_gc_mark(ep[VM_ENV_DATA_INDEX_ME_CREF]);
- }
+ if (VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED)) {
+ rb_gc_mark_movable(ep[VM_ENV_DATA_INDEX_ENV]);
+ rb_gc_mark(ep[VM_ENV_DATA_INDEX_ME_CREF]);
+ }
+ }
}
- cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
- }
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
+ }
}
/* mark machine stack */
if (ec->machine.stack_start && ec->machine.stack_end &&
- ec != GET_EC() /* marked for current ec at the first stage of marking */
- ) {
- rb_gc_mark_machine_stack(ec);
- rb_gc_mark_locations((VALUE *)&ec->machine.regs,
- (VALUE *)(&ec->machine.regs) +
- sizeof(ec->machine.regs) / (sizeof(VALUE)));
+ ec != GET_EC() /* marked for current ec at the first stage of marking */
+ ) {
+ rb_gc_mark_machine_context(ec);
}
- RUBY_MARK_UNLESS_NULL(ec->errinfo);
- RUBY_MARK_UNLESS_NULL(ec->root_svar);
+ rb_gc_mark(ec->errinfo);
+ rb_gc_mark(ec->root_svar);
if (ec->local_storage) {
rb_id_table_foreach_values(ec->local_storage, mark_local_storage_i, NULL);
}
- RUBY_MARK_UNLESS_NULL(ec->local_storage_recursive_hash);
- RUBY_MARK_UNLESS_NULL(ec->local_storage_recursive_hash_for_trace);
- RUBY_MARK_UNLESS_NULL(ec->private_const_reference);
+ rb_gc_mark(ec->local_storage_recursive_hash);
+ rb_gc_mark(ec->local_storage_recursive_hash_for_trace);
+ rb_gc_mark(ec->private_const_reference);
+
+ rb_gc_mark_movable(ec->storage);
}
void rb_fiber_mark_self(rb_fiber_t *fib);
@@ -2938,8 +3442,8 @@ thread_mark(void *ptr)
switch (th->invoke_type) {
case thread_invoke_type_proc:
case thread_invoke_type_ractor_proc:
- RUBY_MARK_UNLESS_NULL(th->invoke_arg.proc.proc);
- RUBY_MARK_UNLESS_NULL(th->invoke_arg.proc.args);
+ rb_gc_mark(th->invoke_arg.proc.proc);
+ rb_gc_mark(th->invoke_arg.proc.args);
break;
case thread_invoke_type_func:
rb_gc_mark_maybe((VALUE)th->invoke_arg.func.arg);
@@ -2949,46 +3453,53 @@ thread_mark(void *ptr)
}
rb_gc_mark(rb_ractor_self(th->ractor));
- RUBY_MARK_UNLESS_NULL(th->thgroup);
- RUBY_MARK_UNLESS_NULL(th->value);
- RUBY_MARK_UNLESS_NULL(th->pending_interrupt_queue);
- RUBY_MARK_UNLESS_NULL(th->pending_interrupt_mask_stack);
- RUBY_MARK_UNLESS_NULL(th->top_self);
- RUBY_MARK_UNLESS_NULL(th->top_wrapper);
+ rb_gc_mark(th->thgroup);
+ rb_gc_mark(th->value);
+ rb_gc_mark(th->pending_interrupt_queue);
+ rb_gc_mark(th->pending_interrupt_mask_stack);
+ rb_gc_mark(th->top_self);
+ rb_gc_mark(th->top_wrapper);
if (th->root_fiber) rb_fiber_mark_self(th->root_fiber);
- /* Ensure EC stack objects are pinned */
- rb_execution_context_mark(th->ec);
- RUBY_MARK_UNLESS_NULL(th->stat_insn_usage);
- RUBY_MARK_UNLESS_NULL(th->last_status);
- RUBY_MARK_UNLESS_NULL(th->locking_mutex);
- RUBY_MARK_UNLESS_NULL(th->name);
+ RUBY_ASSERT(th->ec == rb_fiberptr_get_ec(th->ec->fiber_ptr));
+ rb_gc_mark(th->stat_insn_usage);
+ rb_gc_mark(th->last_status);
+ rb_gc_mark(th->locking_mutex);
+ rb_gc_mark(th->name);
- RUBY_MARK_UNLESS_NULL(th->scheduler);
+ rb_gc_mark(th->scheduler);
RUBY_MARK_LEAVE("thread");
}
+void rb_threadptr_sched_free(rb_thread_t *th); // thread_*.c
+
static void
thread_free(void *ptr)
{
rb_thread_t *th = ptr;
RUBY_FREE_ENTER("thread");
+ rb_threadptr_sched_free(th);
+
if (th->locking_mutex != Qfalse) {
- rb_bug("thread_free: locking_mutex must be NULL (%p:%p)", (void *)th, (void *)th->locking_mutex);
+ rb_bug("thread_free: locking_mutex must be NULL (%p:%p)", (void *)th, (void *)th->locking_mutex);
}
if (th->keeping_mutexes != NULL) {
- rb_bug("thread_free: keeping_mutexes must be NULL (%p:%p)", (void *)th, (void *)th->keeping_mutexes);
+ rb_bug("thread_free: keeping_mutexes must be NULL (%p:%p)", (void *)th, (void *)th->keeping_mutexes);
}
+ ruby_xfree(th->specific_storage);
+
rb_threadptr_root_fiber_release(th);
if (th->vm && th->vm->ractor.main_thread == th) {
- RUBY_GC_INFO("MRI main thread\n");
+ RUBY_GC_INFO("MRI main thread\n");
}
else {
- ruby_xfree(ptr);
+ // ruby_xfree(th->nt);
+ // TODO: MN system collect nt, but without MN system it should be freed here.
+ ruby_xfree(th);
}
RUBY_FREE_LEAVE("thread");
@@ -3001,10 +3512,10 @@ thread_memsize(const void *ptr)
size_t size = sizeof(rb_thread_t);
if (!th->root_fiber) {
- size += th->ec->vm_stack_size * sizeof(VALUE);
+ size += th->ec->vm_stack_size * sizeof(VALUE);
}
if (th->ec->local_storage) {
- size += rb_id_table_memsize(th->ec->local_storage);
+ size += rb_id_table_memsize(th->ec->local_storage);
}
return size;
}
@@ -3013,9 +3524,9 @@ thread_memsize(const void *ptr)
const rb_data_type_t ruby_threadptr_data_type = {
"VM/thread",
{
- thread_mark,
- thread_free,
- thread_memsize,
+ thread_mark,
+ thread_free,
+ thread_memsize,
thread_compact,
},
0, 0, RUBY_TYPED_FREE_IMMEDIATELY
@@ -3030,11 +3541,8 @@ rb_obj_is_thread(VALUE obj)
static VALUE
thread_alloc(VALUE klass)
{
- VALUE obj;
rb_thread_t *th;
- obj = TypedData_Make_Struct(klass, rb_thread_t, &thread_data_type, th);
-
- return obj;
+ return TypedData_Make_Struct(klass, rb_thread_t, &thread_data_type, th);
}
inline void
@@ -3049,6 +3557,10 @@ rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
{
rb_ec_set_vm_stack(ec, stack, size);
+#if VM_CHECK_MODE > 0
+ MEMZERO(stack, VALUE, size); // malloc memory could have the VM canary in it
+#endif
+
ec->cfp = (void *)(ec->vm_stack + ec->vm_stack_size);
vm_push_frame(ec,
@@ -3070,9 +3582,10 @@ rb_ec_clear_vm_stack(rb_execution_context_t *ec)
}
static void
-th_init(rb_thread_t *th, VALUE self)
+th_init(rb_thread_t *th, VALUE self, rb_vm_t *vm)
{
th->self = self;
+
rb_threadptr_root_fiber_setup(th);
/* All threads are blocking until a non-blocking fiber is scheduled */
@@ -3080,7 +3593,7 @@ th_init(rb_thread_t *th, VALUE self)
th->scheduler = Qnil;
if (self == 0) {
- size_t size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE);
+ size_t size = vm->default_params.thread_vm_stack_size / sizeof(VALUE);
rb_ec_initialize_vm_stack(th->ec, ALLOC_N(VALUE, size), size);
}
else {
@@ -3091,47 +3604,39 @@ th_init(rb_thread_t *th, VALUE self)
th->status = THREAD_RUNNABLE;
th->last_status = Qnil;
+ th->top_wrapper = 0;
+ th->top_self = vm->top_self; // 0 while self == 0
+ th->value = Qundef;
+
th->ec->errinfo = Qnil;
th->ec->root_svar = Qfalse;
th->ec->local_storage_recursive_hash = Qnil;
th->ec->local_storage_recursive_hash_for_trace = Qnil;
-#ifdef NON_SCALAR_THREAD_ID
- th->thread_id_string[0] = '\0';
-#endif
- th->value = Qundef;
+ th->ec->storage = Qnil;
#if OPT_CALL_THREADED_CODE
th->retval = Qundef;
#endif
th->name = Qnil;
- th->report_on_exception = th->vm->thread_report_on_exception;
+ th->report_on_exception = vm->thread_report_on_exception;
th->ext_config.ractor_safe = true;
-}
-
-static VALUE
-ruby_thread_init(VALUE self)
-{
- rb_thread_t *th = GET_THREAD();
- rb_thread_t *target_th = rb_thread_ptr(self);
- rb_vm_t *vm = th->vm;
- target_th->vm = vm;
- th_init(target_th, self);
+#if USE_RUBY_DEBUG_LOG
+ static rb_atomic_t thread_serial = 1;
+ th->serial = RUBY_ATOMIC_FETCH_ADD(thread_serial, 1);
- target_th->top_wrapper = 0;
- target_th->top_self = rb_vm_top_self();
- target_th->ec->root_svar = Qfalse;
- target_th->ractor = th->ractor;
-
- return self;
+ RUBY_DEBUG_LOG("th:%u", th->serial);
+#endif
}
VALUE
rb_thread_alloc(VALUE klass)
{
VALUE self = thread_alloc(klass);
- ruby_thread_init(self);
+ rb_thread_t *target_th = rb_thread_ptr(self);
+ target_th->ractor = GET_RACTOR();
+ th_init(target_th, self, target_th->vm = GET_VM());
return self;
}
@@ -3148,7 +3653,7 @@ static VALUE
m_core_set_method_alias(VALUE self, VALUE cbase, VALUE sym1, VALUE sym2)
{
REWIND_CFP({
- rb_alias(cbase, SYM2ID(sym1), SYM2ID(sym2));
+ rb_alias(cbase, SYM2ID(sym1), SYM2ID(sym2));
});
return Qnil;
}
@@ -3157,7 +3662,7 @@ static VALUE
m_core_set_variable_alias(VALUE self, VALUE sym1, VALUE sym2)
{
REWIND_CFP({
- rb_alias_variable(SYM2ID(sym1), SYM2ID(sym2));
+ rb_alias_variable(SYM2ID(sym1), SYM2ID(sym2));
});
return Qnil;
}
@@ -3167,8 +3672,8 @@ m_core_undef_method(VALUE self, VALUE cbase, VALUE sym)
{
REWIND_CFP({
ID mid = SYM2ID(sym);
- rb_undef(cbase, mid);
- rb_clear_method_cache(self, mid);
+ rb_undef(cbase, mid);
+ rb_clear_method_cache(self, mid);
});
return Qnil;
}
@@ -3211,7 +3716,9 @@ kwmerge_i(VALUE key, VALUE value, VALUE hash)
static VALUE
m_core_hash_merge_kwd(VALUE recv, VALUE hash, VALUE kw)
{
- REWIND_CFP(hash = core_hash_merge_kwd(hash, kw));
+ if (!NIL_P(kw)) {
+ REWIND_CFP(hash = core_hash_merge_kwd(hash, kw));
+ }
return hash;
}
@@ -3240,36 +3747,6 @@ core_hash_merge_kwd(VALUE hash, VALUE kw)
return hash;
}
-/* Returns true if JIT is enabled */
-static VALUE
-jit_enabled_p(VALUE _)
-{
- return RBOOL(mjit_enabled);
-}
-
-static VALUE
-jit_pause_m(int argc, VALUE *argv, RB_UNUSED_VAR(VALUE self))
-{
- VALUE options = Qnil;
- VALUE wait = Qtrue;
- rb_scan_args(argc, argv, "0:", &options);
-
- if (!NIL_P(options)) {
- static ID keyword_ids[1];
- if (!keyword_ids[0])
- keyword_ids[0] = rb_intern("wait");
- rb_get_kwargs(options, keyword_ids, 0, 1, &wait);
- }
-
- return mjit_pause(RTEST(wait));
-}
-
-static VALUE
-jit_resume_m(VALUE _)
-{
- return mjit_resume();
-}
-
extern VALUE *rb_gc_stack_start;
extern size_t rb_gc_stack_maxsize;
@@ -3279,7 +3756,7 @@ extern size_t rb_gc_stack_maxsize;
static VALUE
sdr(VALUE self)
{
- rb_vm_bugreport(NULL);
+ rb_vm_bugreport(NULL, stderr);
return Qnil;
}
@@ -3297,11 +3774,11 @@ nsdr(VALUE self)
int i;
if (syms == 0) {
- rb_memerror();
+ rb_memerror();
}
for (i=0; i<n; i++) {
- rb_ary_push(ary, rb_str_new2(syms[i]));
+ rb_ary_push(ary, rb_str_new2(syms[i]));
}
free(syms); /* OK */
#endif
@@ -3347,6 +3824,7 @@ f_sprintf(int c, const VALUE *v, VALUE _)
return rb_f_sprintf(c, v);
}
+/* :nodoc: */
static VALUE
vm_mtbl(VALUE self, VALUE obj, VALUE sym)
{
@@ -3354,6 +3832,7 @@ vm_mtbl(VALUE self, VALUE obj, VALUE sym)
return Qnil;
}
+/* :nodoc: */
static VALUE
vm_mtbl2(VALUE self, VALUE obj, VALUE sym)
{
@@ -3402,7 +3881,6 @@ Init_VM(void)
VALUE opts;
VALUE klass;
VALUE fcore;
- VALUE jit;
/*
* Document-class: RubyVM
@@ -3430,6 +3908,7 @@ Init_VM(void)
/* FrozenCore (hidden) */
fcore = rb_class_new(rb_cBasicObject);
rb_set_class_path(fcore, rb_cRubyVM, "FrozenCore");
+ rb_vm_register_global_object(rb_class_path_cached(fcore));
RBASIC(fcore)->flags = T_ICLASS;
klass = rb_singleton_class(fcore);
rb_define_method_id(klass, id_core_set_method_alias, m_core_set_method_alias, 3);
@@ -3448,21 +3927,9 @@ Init_VM(void)
rb_obj_freeze(fcore);
RBASIC_CLEAR_CLASS(klass);
rb_obj_freeze(klass);
- rb_gc_register_mark_object(fcore);
+ rb_vm_register_global_object(fcore);
rb_mRubyVMFrozenCore = fcore;
- /* ::RubyVM::JIT
- * Provides access to the Method JIT compiler of MRI.
- * Of course, this module is MRI specific.
- */
- jit = rb_define_module_under(rb_cRubyVM, "JIT");
- rb_define_singleton_method(jit, "enabled?", jit_enabled_p, 0);
- rb_define_singleton_method(jit, "pause", jit_pause_m, -1);
- rb_define_singleton_method(jit, "resume", jit_resume_m, 0);
- /* RubyVM::MJIT for short-term backward compatibility */
- rb_const_set(rb_cRubyVM, rb_intern("MJIT"), jit);
- rb_deprecate_constant(rb_cRubyVM, "MJIT");
-
/*
* Document-class: Thread
*
@@ -3660,9 +4127,6 @@ Init_VM(void)
rb_ary_push(opts, rb_str_new2("call threaded code"));
#endif
-#if OPT_STACK_CACHING
- rb_ary_push(opts, rb_str_new2("stack caching"));
-#endif
#if OPT_OPERANDS_UNIFICATION
rb_ary_push(opts, rb_str_new2("operands unification"));
#endif
@@ -3672,9 +4136,6 @@ Init_VM(void)
#if OPT_INLINE_METHOD_CACHE
rb_ary_push(opts, rb_str_new2("inline method cache"));
#endif
-#if OPT_BLOCKINLINING
- rb_ary_push(opts, rb_str_new2("block inlining"));
-#endif
/* ::RubyVM::INSTRUCTION_NAMES
* A list of bytecode instruction names in MRI.
@@ -3705,47 +4166,49 @@ Init_VM(void)
/* VM bootstrap: phase 2 */
{
- rb_vm_t *vm = ruby_current_vm_ptr;
- rb_thread_t *th = GET_THREAD();
- VALUE filename = rb_fstring_lit("<main>");
- const rb_iseq_t *iseq = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
+ rb_vm_t *vm = ruby_current_vm_ptr;
+ rb_thread_t *th = GET_THREAD();
+ VALUE filename = rb_fstring_lit("<main>");
+ const rb_iseq_t *iseq = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
// Ractor setup
rb_ractor_main_setup(vm, th->ractor, th);
- /* create vm object */
- vm->self = TypedData_Wrap_Struct(rb_cRubyVM, &vm_data_type, vm);
+ /* create vm object */
+ vm->self = TypedData_Wrap_Struct(rb_cRubyVM, &vm_data_type, vm);
- /* create main thread */
+ /* create main thread */
th->self = TypedData_Wrap_Struct(rb_cThread, &thread_data_type, th);
- vm->ractor.main_thread = th;
+ vm->ractor.main_thread = th;
vm->ractor.main_ractor = th->ractor;
- th->vm = vm;
- th->top_wrapper = 0;
- th->top_self = rb_vm_top_self();
+ th->vm = vm;
+ th->top_wrapper = 0;
+ th->top_self = rb_vm_top_self();
- rb_gc_register_mark_object((VALUE)iseq);
- th->ec->cfp->iseq = iseq;
- th->ec->cfp->pc = iseq->body->iseq_encoded;
- th->ec->cfp->self = th->top_self;
+ rb_vm_register_global_object((VALUE)iseq);
+ th->ec->cfp->iseq = iseq;
+ th->ec->cfp->pc = ISEQ_BODY(iseq)->iseq_encoded;
+ th->ec->cfp->self = th->top_self;
- VM_ENV_FLAGS_UNSET(th->ec->cfp->ep, VM_FRAME_FLAG_CFRAME);
- VM_STACK_ENV_WRITE(th->ec->cfp->ep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE, FALSE, NULL, FALSE));
+ VM_ENV_FLAGS_UNSET(th->ec->cfp->ep, VM_FRAME_FLAG_CFRAME);
+ VM_STACK_ENV_WRITE(th->ec->cfp->ep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE, FALSE, NULL, FALSE, FALSE));
- /*
- * The Binding of the top level scope
- */
- rb_define_global_const("TOPLEVEL_BINDING", rb_binding_new());
+ /*
+ * The Binding of the top level scope
+ */
+ rb_define_global_const("TOPLEVEL_BINDING", rb_binding_new());
+#ifdef _WIN32
rb_objspace_gc_enable(vm->objspace);
+#endif
}
vm_init_redefined_flag();
rb_block_param_proxy = rb_obj_alloc(rb_cObject);
- rb_add_method(rb_singleton_class(rb_block_param_proxy), idCall, VM_METHOD_TYPE_OPTIMIZED,
- (void *)OPTIMIZED_METHOD_TYPE_BLOCK_CALL, METHOD_VISI_PUBLIC);
+ rb_add_method_optimized(rb_singleton_class(rb_block_param_proxy), idCall,
+ OPTIMIZED_METHOD_TYPE_BLOCK_CALL, 0, METHOD_VISI_PUBLIC);
rb_obj_freeze(rb_block_param_proxy);
- rb_gc_register_mark_object(rb_block_param_proxy);
+ rb_vm_register_global_object(rb_block_param_proxy);
/* vm_backtrace.c */
Init_vm_backtrace();
@@ -3758,7 +4221,8 @@ rb_vm_set_progname(VALUE filename)
rb_control_frame_t *cfp = (void *)(th->ec->vm_stack + th->ec->vm_stack_size);
--cfp;
- rb_iseq_pathobj_set(cfp->iseq, rb_str_dup(filename), rb_iseq_realpath(cfp->iseq));
+ filename = rb_str_new_frozen(filename);
+ rb_iseq_pathobj_set(cfp->iseq, filename, rb_iseq_realpath(cfp->iseq));
}
extern const struct st_hash_type rb_fstring_hash_type;
@@ -3770,26 +4234,162 @@ Init_BareVM(void)
rb_vm_t * vm = ruby_mimmalloc(sizeof(*vm));
rb_thread_t * th = ruby_mimmalloc(sizeof(*th));
if (!vm || !th) {
- fputs("[FATAL] failed to allocate memory\n", stderr);
- exit(EXIT_FAILURE);
+ fputs("[FATAL] failed to allocate memory\n", stderr);
+ exit(EXIT_FAILURE);
}
+
+ // setup the VM
MEMZERO(th, rb_thread_t, 1);
vm_init2(vm);
- vm->objspace = rb_objspace_alloc();
+ rb_vm_postponed_job_queue_init(vm);
ruby_current_vm_ptr = vm;
+ vm->objspace = rb_objspace_alloc();
vm->negative_cme_table = rb_id_table_create(16);
+ vm->overloaded_cme_table = st_init_numtable();
+ vm->constant_cache = rb_id_table_create(0);
- Init_native_thread(th);
+ // setup main thread
+ th->nt = ZALLOC(struct rb_native_thread);
th->vm = vm;
- th_init(th, 0);
- vm->ractor.main_ractor = th->ractor = rb_ractor_main_alloc();
+ th->ractor = vm->ractor.main_ractor = rb_ractor_main_alloc();
+ Init_native_thread(th);
+ rb_jit_cont_init();
+ th_init(th, 0, vm);
+
rb_ractor_set_current_ec(th->ractor, th->ec);
- ruby_thread_init_stack(th);
+ /* n.b. native_main_thread_stack_top is set by the INIT_STACK macro */
+ ruby_thread_init_stack(th, native_main_thread_stack_top);
+ // setup ractor system
rb_native_mutex_initialize(&vm->ractor.sync.lock);
- rb_native_cond_initialize(&vm->ractor.sync.barrier_cond);
rb_native_cond_initialize(&vm->ractor.sync.terminate_cond);
+
+ vm_opt_method_def_table = st_init_numtable();
+ vm_opt_mid_table = st_init_numtable();
+
+#ifdef RUBY_THREAD_WIN32_H
+ rb_native_cond_initialize(&vm->ractor.sync.barrier_cond);
+#endif
+}
+
+void
+ruby_init_stack(void *addr)
+{
+ native_main_thread_stack_top = addr;
+}
+
+#ifndef _WIN32
+#include <unistd.h>
+#include <sys/mman.h>
+#endif
+
+
+#ifndef MARK_OBJECT_ARY_BUCKET_SIZE
+#define MARK_OBJECT_ARY_BUCKET_SIZE 1024
+#endif
+
+struct pin_array_list {
+ VALUE next;
+ long len;
+ VALUE *array;
+};
+
+static void
+pin_array_list_mark(void *data)
+{
+ struct pin_array_list *array = (struct pin_array_list *)data;
+ rb_gc_mark_movable(array->next);
+
+ rb_gc_mark_vm_stack_values(array->len, array->array);
+}
+
+static void
+pin_array_list_free(void *data)
+{
+ struct pin_array_list *array = (struct pin_array_list *)data;
+ xfree(array->array);
+}
+
+static size_t
+pin_array_list_memsize(const void *data)
+{
+ return sizeof(struct pin_array_list) + (MARK_OBJECT_ARY_BUCKET_SIZE * sizeof(VALUE));
+}
+
+static void
+pin_array_list_update_references(void *data)
+{
+ struct pin_array_list *array = (struct pin_array_list *)data;
+ array->next = rb_gc_location(array->next);
+}
+
+static const rb_data_type_t pin_array_list_type = {
+ .wrap_struct_name = "VM/pin_array_list",
+ .function = {
+ .dmark = pin_array_list_mark,
+ .dfree = pin_array_list_free,
+ .dsize = pin_array_list_memsize,
+ .dcompact = pin_array_list_update_references,
+ },
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE,
+};
+
+static VALUE
+pin_array_list_new(VALUE next)
+{
+ struct pin_array_list *array_list;
+ VALUE obj = TypedData_Make_Struct(0, struct pin_array_list, &pin_array_list_type, array_list);
+ RB_OBJ_WRITE(obj, &array_list->next, next);
+ array_list->array = ALLOC_N(VALUE, MARK_OBJECT_ARY_BUCKET_SIZE);
+ return obj;
+}
+
+static VALUE
+pin_array_list_append(VALUE obj, VALUE item)
+{
+ struct pin_array_list *array_list;
+ TypedData_Get_Struct(obj, struct pin_array_list, &pin_array_list_type, array_list);
+
+ if (array_list->len >= MARK_OBJECT_ARY_BUCKET_SIZE) {
+ obj = pin_array_list_new(obj);
+ TypedData_Get_Struct(obj, struct pin_array_list, &pin_array_list_type, array_list);
+ }
+
+ RB_OBJ_WRITE(obj, &array_list->array[array_list->len], item);
+ array_list->len++;
+ return obj;
+}
+
+void
+rb_vm_register_global_object(VALUE obj)
+{
+ RUBY_ASSERT(!RB_SPECIAL_CONST_P(obj));
+ if (RB_SPECIAL_CONST_P(obj)) {
+ return;
+ }
+
+ switch (RB_BUILTIN_TYPE(obj)) {
+ case T_CLASS:
+ case T_MODULE:
+ if (FL_TEST(obj, RCLASS_IS_ROOT)) {
+ return;
+ }
+ FL_SET(obj, RCLASS_IS_ROOT);
+ break;
+ default:
+ break;
+ }
+ RB_VM_LOCK_ENTER();
+ {
+ VALUE list = GET_VM()->mark_object_ary;
+ VALUE head = pin_array_list_append(list, obj);
+ if (head != list) {
+ GET_VM()->mark_object_ary = head;
+ }
+ RB_GC_GUARD(obj);
+ }
+ RB_VM_LOCK_LEAVE();
}
void
@@ -3797,14 +4397,18 @@ Init_vm_objects(void)
{
rb_vm_t *vm = GET_VM();
- vm->defined_module_hash = st_init_numtable();
-
/* initialize mark object array, hash */
- vm->mark_object_ary = rb_ary_tmp_new(128);
+ vm->mark_object_ary = pin_array_list_new(Qnil);
vm->loading_table = st_init_strtable();
+ vm->ci_table = st_init_table(&vm_ci_hashtype);
vm->frozen_strings = st_init_table_with_size(&rb_fstring_hash_type, 10000);
}
+/* Stub for builtin function when not building YJIT units*/
+#if !USE_YJIT
+void Init_builtin_yjit(void) {}
+#endif
+
/* top self */
static VALUE
@@ -3836,6 +4440,14 @@ rb_ruby_verbose_ptr(void)
return &cr->verbose;
}
+static bool prism;
+
+bool *
+rb_ruby_prism_ptr(void)
+{
+ return &prism;
+}
+
VALUE *
rb_ruby_debug_ptr(void)
{
@@ -3843,10 +4455,18 @@ rb_ruby_debug_ptr(void)
return &cr->debug;
}
+bool rb_free_at_exit = false;
+
+bool
+ruby_free_at_exit_p(void)
+{
+ return rb_free_at_exit;
+}
+
/* iseq.c */
VALUE rb_insn_operand_intern(const rb_iseq_t *iseq,
- VALUE insn, int op_no, VALUE op,
- int len, size_t pos, VALUE *pnop, VALUE child);
+ VALUE insn, int op_no, VALUE op,
+ int len, size_t pos, VALUE *pnop, VALUE child);
st_table *
rb_vm_fstring_table(void)
@@ -3884,29 +4504,29 @@ vm_analysis_insn(int insn)
CONST_ID(bigram_hash, "USAGE_ANALYSIS_INSN_BIGRAM");
uh = rb_const_get(rb_cRubyVM, usage_hash);
if (NIL_P(ihash = rb_hash_aref(uh, INT2FIX(insn)))) {
- ihash = rb_hash_new();
- HASH_ASET(uh, INT2FIX(insn), ihash);
+ ihash = rb_hash_new();
+ HASH_ASET(uh, INT2FIX(insn), ihash);
}
if (NIL_P(cv = rb_hash_aref(ihash, INT2FIX(-1)))) {
- cv = INT2FIX(0);
+ cv = INT2FIX(0);
}
HASH_ASET(ihash, INT2FIX(-1), INT2FIX(FIX2INT(cv) + 1));
/* calc bigram */
if (prev_insn != -1) {
- VALUE bi;
- VALUE ary[2];
- VALUE cv;
+ VALUE bi;
+ VALUE ary[2];
+ VALUE cv;
- ary[0] = INT2FIX(prev_insn);
- ary[1] = INT2FIX(insn);
- bi = rb_ary_new4(2, &ary[0]);
+ ary[0] = INT2FIX(prev_insn);
+ ary[1] = INT2FIX(insn);
+ bi = rb_ary_new4(2, &ary[0]);
- uh = rb_const_get(rb_cRubyVM, bigram_hash);
- if (NIL_P(cv = rb_hash_aref(uh, bi))) {
- cv = INT2FIX(0);
- }
- HASH_ASET(uh, bi, INT2FIX(FIX2INT(cv) + 1));
+ uh = rb_const_get(rb_cRubyVM, bigram_hash);
+ if (NIL_P(cv = rb_hash_aref(uh, bi))) {
+ cv = INT2FIX(0);
+ }
+ HASH_ASET(uh, bi, INT2FIX(FIX2INT(cv) + 1));
}
prev_insn = insn;
}
@@ -3926,19 +4546,19 @@ vm_analysis_operand(int insn, int n, VALUE op)
uh = rb_const_get(rb_cRubyVM, usage_hash);
if (NIL_P(ihash = rb_hash_aref(uh, INT2FIX(insn)))) {
- ihash = rb_hash_new();
- HASH_ASET(uh, INT2FIX(insn), ihash);
+ ihash = rb_hash_new();
+ HASH_ASET(uh, INT2FIX(insn), ihash);
}
if (NIL_P(ophash = rb_hash_aref(ihash, INT2FIX(n)))) {
- ophash = rb_hash_new();
- HASH_ASET(ihash, INT2FIX(n), ophash);
+ ophash = rb_hash_new();
+ HASH_ASET(ihash, INT2FIX(n), ophash);
}
/* intern */
valstr = rb_insn_operand_intern(GET_EC()->cfp->iseq, insn, n, op, 0, 0, 0, 0);
/* set count */
if (NIL_P(cv = rb_hash_aref(ophash, valstr))) {
- cv = INT2FIX(0);
+ cv = INT2FIX(0);
}
HASH_ASET(ophash, valstr, INT2FIX(FIX2INT(cv) + 1));
}
@@ -3950,16 +4570,16 @@ vm_analysis_register(int reg, int isset)
VALUE uh;
VALUE valstr;
static const char regstrs[][5] = {
- "pc", /* 0 */
- "sp", /* 1 */
- "ep", /* 2 */
- "cfp", /* 3 */
- "self", /* 4 */
- "iseq", /* 5 */
+ "pc", /* 0 */
+ "sp", /* 1 */
+ "ep", /* 2 */
+ "cfp", /* 3 */
+ "self", /* 4 */
+ "iseq", /* 5 */
};
static const char getsetstr[][4] = {
- "get",
- "set",
+ "get",
+ "set",
};
static VALUE syms[sizeof(regstrs) / sizeof(regstrs[0])][2];
@@ -3967,22 +4587,22 @@ vm_analysis_register(int reg, int isset)
CONST_ID(usage_hash, "USAGE_ANALYSIS_REGS");
if (syms[0] == 0) {
- char buff[0x10];
- int i;
-
- for (i = 0; i < (int)(sizeof(regstrs) / sizeof(regstrs[0])); i++) {
- int j;
- for (j = 0; j < 2; j++) {
- snprintf(buff, 0x10, "%d %s %-4s", i, getsetstr[j], regstrs[i]);
- syms[i][j] = ID2SYM(rb_intern(buff));
- }
- }
+ char buff[0x10];
+ int i;
+
+ for (i = 0; i < (int)(sizeof(regstrs) / sizeof(regstrs[0])); i++) {
+ int j;
+ for (j = 0; j < 2; j++) {
+ snprintf(buff, 0x10, "%d %s %-4s", i, getsetstr[j], regstrs[i]);
+ syms[i][j] = ID2SYM(rb_intern(buff));
+ }
+ }
}
valstr = syms[reg][isset];
uh = rb_const_get(rb_cRubyVM, usage_hash);
if (NIL_P(cv = rb_hash_aref(uh, valstr))) {
- cv = INT2FIX(0);
+ cv = INT2FIX(0);
}
HASH_ASET(uh, valstr, INT2FIX(FIX2INT(cv) + 1));
}
@@ -4045,68 +4665,65 @@ usage_analysis_register_stop(VALUE self)
static VALUE
usage_analysis_insn_running(VALUE self)
{
- return RBOOL(ruby_vm_collect_usage_func_insn != 0);
+ return RBOOL(ruby_vm_collect_usage_func_insn != 0);
}
/* :nodoc: */
static VALUE
usage_analysis_operand_running(VALUE self)
{
- return RBOOL(ruby_vm_collect_usage_func_operand != 0);
+ return RBOOL(ruby_vm_collect_usage_func_operand != 0);
}
/* :nodoc: */
static VALUE
usage_analysis_register_running(VALUE self)
{
- return RBOOL(ruby_vm_collect_usage_func_register != 0);
+ return RBOOL(ruby_vm_collect_usage_func_register != 0);
+}
+
+static VALUE
+usage_analysis_clear(VALUE self, ID usage_hash)
+{
+ VALUE uh;
+ uh = rb_const_get(self, usage_hash);
+ rb_hash_clear(uh);
+
+ return Qtrue;
}
+
/* :nodoc: */
static VALUE
usage_analysis_insn_clear(VALUE self)
{
- ID usage_hash;
- ID bigram_hash;
- VALUE uh;
- VALUE bh;
-
- CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
- CONST_ID(bigram_hash, "USAGE_ANALYSIS_INSN_BIGRAM");
- uh = rb_const_get(rb_cRubyVM, usage_hash);
- bh = rb_const_get(rb_cRubyVM, bigram_hash);
- rb_hash_clear(uh);
- rb_hash_clear(bh);
+ ID usage_hash;
+ ID bigram_hash;
- return Qtrue;
+ CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
+ CONST_ID(bigram_hash, "USAGE_ANALYSIS_INSN_BIGRAM");
+ usage_analysis_clear(rb_cRubyVM, usage_hash);
+ return usage_analysis_clear(rb_cRubyVM, bigram_hash);
}
/* :nodoc: */
static VALUE
usage_analysis_operand_clear(VALUE self)
{
- ID usage_hash;
- VALUE uh;
-
- CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
- uh = rb_const_get(rb_cRubyVM, usage_hash);
- rb_hash_clear(uh);
+ ID usage_hash;
- return Qtrue;
+ CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
+ return usage_analysis_clear(self, usage_hash);
}
/* :nodoc: */
static VALUE
usage_analysis_register_clear(VALUE self)
{
- ID usage_hash;
- VALUE uh;
+ ID usage_hash;
- CONST_ID(usage_hash, "USAGE_ANALYSIS_REGS");
- uh = rb_const_get(rb_cRubyVM, usage_hash);
- rb_hash_clear(uh);
-
- return Qtrue;
+ CONST_ID(usage_hash, "USAGE_ANALYSIS_REGS");
+ return usage_analysis_clear(self, usage_hash);
}
#else
@@ -4123,10 +4740,10 @@ static void
vm_collect_usage_insn(int insn)
{
if (RUBY_DTRACE_INSN_ENABLED()) {
- RUBY_DTRACE_INSN(rb_insns_name(insn));
+ RUBY_DTRACE_INSN(rb_insns_name(insn));
}
if (ruby_vm_collect_usage_func_insn)
- (*ruby_vm_collect_usage_func_insn)(insn);
+ (*ruby_vm_collect_usage_func_insn)(insn);
}
/* @param insn instruction number
@@ -4137,15 +4754,15 @@ static void
vm_collect_usage_operand(int insn, int n, VALUE op)
{
if (RUBY_DTRACE_INSN_OPERAND_ENABLED()) {
- VALUE valstr;
+ VALUE valstr;
- valstr = rb_insn_operand_intern(GET_EC()->cfp->iseq, insn, n, op, 0, 0, 0, 0);
+ valstr = rb_insn_operand_intern(GET_EC()->cfp->iseq, insn, n, op, 0, 0, 0, 0);
- RUBY_DTRACE_INSN_OPERAND(RSTRING_PTR(valstr), rb_insns_name(insn));
- RB_GC_GUARD(valstr);
+ RUBY_DTRACE_INSN_OPERAND(RSTRING_PTR(valstr), rb_insns_name(insn));
+ RB_GC_GUARD(valstr);
}
if (ruby_vm_collect_usage_func_operand)
- (*ruby_vm_collect_usage_func_operand)(insn, n, op);
+ (*ruby_vm_collect_usage_func_operand)(insn, n, op);
}
/* @param reg register id. see code of vm_analysis_register() */
@@ -4154,16 +4771,20 @@ static void
vm_collect_usage_register(int reg, int isset)
{
if (ruby_vm_collect_usage_func_register)
- (*ruby_vm_collect_usage_func_register)(reg, isset);
+ (*ruby_vm_collect_usage_func_register)(reg, isset);
}
#endif
-MJIT_FUNC_EXPORTED const struct rb_callcache *
+const struct rb_callcache *
rb_vm_empty_cc(void)
{
return &vm_empty_cc;
}
-#endif /* #ifndef MJIT_HEADER */
+const struct rb_callcache *
+rb_vm_empty_cc_for_super(void)
+{
+ return &vm_empty_cc_for_super;
+}
#include "vm_call_iseq_optimized.inc" /* required from vm_insnhelper.c */