summaryrefslogtreecommitdiff
path: root/vm_core.h
diff options
context:
space:
mode:
Diffstat (limited to 'vm_core.h')
-rw-r--r--vm_core.h871
1 files changed, 501 insertions, 370 deletions
diff --git a/vm_core.h b/vm_core.h
index bcb0255a00..a8a3dde58a 100644
--- a/vm_core.h
+++ b/vm_core.h
@@ -53,22 +53,48 @@
#include "ruby_assert.h"
+#define RVALUE_SIZE (sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX]))
+
#if VM_CHECK_MODE > 0
-#define VM_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr)
+#define VM_ASSERT(/*expr, */...) RUBY_ASSERT_WHEN(VM_CHECK_MODE > 0, __VA_ARGS__)
#define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
-
+#define RUBY_ASSERT_CRITICAL_SECTION
+#define RUBY_DEBUG_THREAD_SCHEDULE() rb_thread_schedule()
#else
-#define VM_ASSERT(expr) ((void)0)
+#define VM_ASSERT(/*expr, */...) ((void)0)
#define VM_UNREACHABLE(func) UNREACHABLE
+#define RUBY_DEBUG_THREAD_SCHEDULE()
#endif
-#include <setjmp.h>
+#define RUBY_ASSERT_MUTEX_OWNED(mutex) VM_ASSERT(rb_mutex_owned_p(mutex))
+
+#if defined(RUBY_ASSERT_CRITICAL_SECTION)
+// TODO add documentation
+extern int ruby_assert_critical_section_entered;
+#define RUBY_ASSERT_CRITICAL_SECTION_ENTER() do{ruby_assert_critical_section_entered += 1;}while(false)
+#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE() do{VM_ASSERT(ruby_assert_critical_section_entered > 0);ruby_assert_critical_section_entered -= 1;}while(false)
+#else
+#define RUBY_ASSERT_CRITICAL_SECTION_ENTER()
+#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE()
+#endif
+
+#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
+# include "wasm/setjmp.h"
+#else
+# include <setjmp.h>
+#endif
+
+#if defined(__linux__) || defined(__FreeBSD__)
+# define RB_THREAD_T_HAS_NATIVE_ID
+#endif
#include "ruby/internal/stdbool.h"
#include "ccan/list/list.h"
#include "id.h"
#include "internal.h"
#include "internal/array.h"
+#include "internal/basic_operators.h"
+#include "internal/sanitizers.h"
#include "internal/serial.h"
#include "internal/vm.h"
#include "method.h"
@@ -77,12 +103,16 @@
#include "ruby/st.h"
#include "ruby_atomic.h"
#include "vm_opts.h"
-#include "darray.h"
#include "ruby/thread_native.h"
-#include THREAD_IMPL_H
-#define RUBY_VM_THREAD_MODEL 2
+#if USE_SHARED_GC
+typedef struct gc_function_map {
+ void *(*init)(void);
+} rb_gc_function_map_t;
+
+#define rb_gc_functions (GET_VM()->gc_functions_map)
+#endif
/*
* implementation selector of get_insn_info algorithm
@@ -111,23 +141,11 @@
#define RUBY_NSIG NSIG
#if defined(SIGCLD)
-# define RUBY_SIGCHLD (SIGCLD)
+# define RUBY_SIGCHLD (SIGCLD)
#elif defined(SIGCHLD)
-# define RUBY_SIGCHLD (SIGCHLD)
-#else
-# define RUBY_SIGCHLD (0)
+# define RUBY_SIGCHLD (SIGCHLD)
#endif
-/* platforms with broken or non-existent SIGCHLD work by polling */
-#if defined(__APPLE__)
-# define SIGCHLD_LOSSY (1)
-#else
-# define SIGCHLD_LOSSY (0)
-#endif
-
-/* define to 0 to test old code path */
-#define WAITPID_USE_SIGCHLD (RUBY_SIGCHLD || SIGCHLD_LOSSY)
-
#if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
# define USE_SIGALTSTACK
void *rb_allocate_sigaltstack(void);
@@ -141,6 +159,9 @@ void *rb_register_sigaltstack(void *);
# define RB_ALTSTACK(var) (0)
#endif
+#include THREAD_IMPL_H
+#define RUBY_VM_THREAD_MODEL 2
+
/*****************/
/* configuration */
/*****************/
@@ -170,9 +191,6 @@ void *rb_register_sigaltstack(void *);
#if OPT_DIRECT_THREADED_CODE
#undef OPT_DIRECT_THREADED_CODE
#endif /* OPT_DIRECT_THREADED_CODE */
-#if OPT_STACK_CACHING
-#undef OPT_STACK_CACHING
-#endif /* OPT_STACK_CACHING */
#endif /* OPT_CALL_THREADED_CODE */
void rb_vm_encoded_insn_data_table_init(void);
@@ -225,53 +243,34 @@ struct iseq_inline_constant_cache_entry {
VALUE flags;
VALUE value; // v0
- union ic_serial_entry ic_serial; // v1, v2
+ VALUE _unused1; // v1
+ VALUE _unused2; // v2
const rb_cref_t *ic_cref; // v3
};
STATIC_ASSERT(sizeof_iseq_inline_constant_cache_entry,
(offsetof(struct iseq_inline_constant_cache_entry, ic_cref) +
- sizeof(const rb_cref_t *)) <= sizeof(struct RObject));
-
-#if SIZEOF_SERIAL_T <= SIZEOF_VALUE
-
-#define GET_IC_SERIAL(ice) (ice)->ic_serial.raw
-#define SET_IC_SERIAL(ice, v) (ice)->ic_serial.raw = (v)
-
-#else
-
-static inline rb_serial_t
-get_ic_serial(const struct iseq_inline_constant_cache_entry *ice)
-{
- union ic_serial_entry tmp;
- tmp.data[0] = ice->ic_serial.data[0];
- tmp.data[1] = ice->ic_serial.data[1];
- return tmp.raw;
-}
-
-#define GET_IC_SERIAL(ice) get_ic_serial(ice)
-
-static inline void
-set_ic_serial(struct iseq_inline_constant_cache_entry *ice, rb_serial_t v)
-{
- union ic_serial_entry tmp;
- tmp.raw = v;
- ice->ic_serial.data[0] = tmp.data[0];
- ice->ic_serial.data[1] = tmp.data[1];
-}
-
-#define SET_IC_SERIAL(ice, v) set_ic_serial((ice), (v))
-
-#endif
+ sizeof(const rb_cref_t *)) <= RVALUE_SIZE);
struct iseq_inline_constant_cache {
struct iseq_inline_constant_cache_entry *entry;
- // For YJIT: the index to the opt_getinlinecache instruction in the same iseq.
- // It's set during compile time and constant once set.
- unsigned get_insn_idx;
+
+ /**
+ * A null-terminated list of ids, used to represent a constant's path
+ * idNULL is used to represent the :: prefix, and 0 is used to donate the end
+ * of the list.
+ *
+ * For example
+ * FOO {rb_intern("FOO"), 0}
+ * FOO::BAR {rb_intern("FOO"), rb_intern("BAR"), 0}
+ * ::FOO {idNULL, rb_intern("FOO"), 0}
+ * ::FOO::BAR {idNULL, rb_intern("FOO"), rb_intern("BAR"), 0}
+ */
+ const ID *segments;
};
struct iseq_inline_iv_cache_entry {
- struct rb_iv_index_tbl_entry *entry;
+ uintptr_t value; // attr_index in lower bits, dest_shape_id in upper bits
+ ID iv_set_name;
};
struct iseq_inline_cvar_cache_entry {
@@ -280,22 +279,29 @@ struct iseq_inline_cvar_cache_entry {
union iseq_inline_storage_entry {
struct {
- struct rb_thread_struct *running_thread;
- VALUE value;
+ struct rb_thread_struct *running_thread;
+ VALUE value;
} once;
struct iseq_inline_constant_cache ic_cache;
struct iseq_inline_iv_cache_entry iv_cache;
};
struct rb_calling_info {
- const struct rb_callinfo *ci;
+ const struct rb_call_data *cd;
const struct rb_callcache *cc;
VALUE block_handler;
VALUE recv;
int argc;
- int kw_splat;
+ bool kw_splat;
+ VALUE heap_argv;
};
+#ifndef VM_ARGC_STACK_MAX
+#define VM_ARGC_STACK_MAX 128
+#endif
+
+# define CALLING_ARGC(calling) ((calling)->heap_argv ? RARRAY_LENINT((calling)->heap_argv) : (calling)->argc)
+
struct rb_execution_context_struct;
#if 1
@@ -309,7 +315,7 @@ typedef struct rb_iseq_location_struct {
VALUE pathobj; /* String (path) or Array [path, realpath]. Frozen. */
VALUE base_label; /* String */
VALUE label; /* String */
- VALUE first_lineno; /* TODO: may be unsigned short */
+ int first_lineno;
int node_id;
rb_code_location_t code_location;
} rb_iseq_location_t;
@@ -321,11 +327,11 @@ static inline VALUE
pathobj_path(VALUE pathobj)
{
if (RB_TYPE_P(pathobj, T_STRING)) {
- return pathobj;
+ return pathobj;
}
else {
- VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
- return RARRAY_AREF(pathobj, PATHOBJ_PATH);
+ VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
+ return RARRAY_AREF(pathobj, PATHOBJ_PATH);
}
}
@@ -333,33 +339,51 @@ static inline VALUE
pathobj_realpath(VALUE pathobj)
{
if (RB_TYPE_P(pathobj, T_STRING)) {
- return pathobj;
+ return pathobj;
}
else {
- VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
- return RARRAY_AREF(pathobj, PATHOBJ_REALPATH);
+ VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
+ return RARRAY_AREF(pathobj, PATHOBJ_REALPATH);
}
}
/* Forward declarations */
-struct rb_mjit_unit;
+struct rb_rjit_unit;
+
+typedef uintptr_t iseq_bits_t;
+
+#define ISEQ_IS_SIZE(body) (body->ic_size + body->ivc_size + body->ise_size + body->icvarc_size)
+
+/* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
+#define ISEQ_IS_IC_ENTRY(body, idx) (body->is_entries[(idx) + body->ise_size + body->icvarc_size + body->ivc_size].ic_cache);
+
+/* instruction sequence type */
+enum rb_iseq_type {
+ ISEQ_TYPE_TOP,
+ ISEQ_TYPE_METHOD,
+ ISEQ_TYPE_BLOCK,
+ ISEQ_TYPE_CLASS,
+ ISEQ_TYPE_RESCUE,
+ ISEQ_TYPE_ENSURE,
+ ISEQ_TYPE_EVAL,
+ ISEQ_TYPE_MAIN,
+ ISEQ_TYPE_PLAIN
+};
-// List of YJIT block versions
-typedef rb_darray(struct yjit_block_version *) rb_yjit_block_array_t;
-typedef rb_darray(rb_yjit_block_array_t) rb_yjit_block_array_array_t;
+// Attributes specified by Primitive.attr!
+enum rb_builtin_attr {
+ // The iseq does not call methods.
+ BUILTIN_ATTR_LEAF = 0x01,
+ // This iseq only contains single `opt_invokebuiltin_delegate_leave` instruction with 0 arguments.
+ BUILTIN_ATTR_SINGLE_NOARG_LEAF = 0x02,
+ // This attribute signals JIT to duplicate the iseq for each block iseq so that its `yield` will be monomorphic.
+ BUILTIN_ATTR_INLINE_BLOCK = 0x04,
+};
+
+typedef VALUE (*rb_jit_func_t)(struct rb_execution_context_struct *, struct rb_control_frame_struct *);
struct rb_iseq_constant_body {
- enum iseq_type {
- ISEQ_TYPE_TOP,
- ISEQ_TYPE_METHOD,
- ISEQ_TYPE_BLOCK,
- ISEQ_TYPE_CLASS,
- ISEQ_TYPE_RESCUE,
- ISEQ_TYPE_ENSURE,
- ISEQ_TYPE_EVAL,
- ISEQ_TYPE_MAIN,
- ISEQ_TYPE_PLAIN
- } type; /* instruction sequence type */
+ enum rb_iseq_type type;
unsigned int iseq_size;
VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
@@ -388,63 +412,66 @@ struct rb_iseq_constant_body {
*/
struct {
- struct {
- unsigned int has_lead : 1;
- unsigned int has_opt : 1;
- unsigned int has_rest : 1;
- unsigned int has_post : 1;
- unsigned int has_kw : 1;
- unsigned int has_kwrest : 1;
- unsigned int has_block : 1;
-
- unsigned int ambiguous_param0 : 1; /* {|a|} */
- unsigned int accepts_no_kwarg : 1;
+ struct {
+ unsigned int has_lead : 1;
+ unsigned int has_opt : 1;
+ unsigned int has_rest : 1;
+ unsigned int has_post : 1;
+ unsigned int has_kw : 1;
+ unsigned int has_kwrest : 1;
+ unsigned int has_block : 1;
+
+ unsigned int ambiguous_param0 : 1; /* {|a|} */
+ unsigned int accepts_no_kwarg : 1;
unsigned int ruby2_keywords: 1;
- } flags;
-
- unsigned int size;
-
- int lead_num;
- int opt_num;
- int rest_start;
- int post_start;
- int post_num;
- int block_start;
-
- const VALUE *opt_table; /* (opt_num + 1) entries. */
- /* opt_num and opt_table:
- *
- * def foo o1=e1, o2=e2, ..., oN=eN
- * #=>
- * # prologue code
- * A1: e1
- * A2: e2
- * ...
- * AN: eN
- * AL: body
- * opt_num = N
- * opt_table = [A1, A2, ..., AN, AL]
- */
-
- const struct rb_iseq_param_keyword {
- int num;
- int required_num;
- int bits_start;
- int rest_start;
- const ID *table;
+ unsigned int anon_rest: 1;
+ unsigned int anon_kwrest: 1;
+ unsigned int use_block: 1;
+ } flags;
+
+ unsigned int size;
+
+ int lead_num;
+ int opt_num;
+ int rest_start;
+ int post_start;
+ int post_num;
+ int block_start;
+
+ const VALUE *opt_table; /* (opt_num + 1) entries. */
+ /* opt_num and opt_table:
+ *
+ * def foo o1=e1, o2=e2, ..., oN=eN
+ * #=>
+ * # prologue code
+ * A1: e1
+ * A2: e2
+ * ...
+ * AN: eN
+ * AL: body
+ * opt_num = N
+ * opt_table = [A1, A2, ..., AN, AL]
+ */
+
+ const struct rb_iseq_param_keyword {
+ int num;
+ int required_num;
+ int bits_start;
+ int rest_start;
+ const ID *table;
VALUE *default_values;
- } *keyword;
+ } *keyword;
} param;
rb_iseq_location_t location;
/* insn info, must be freed */
struct iseq_insn_info {
- const struct iseq_insn_info_entry *body;
- unsigned int *positions;
- unsigned int size;
+ const struct iseq_insn_info_entry *body;
+ unsigned int *positions;
+ unsigned int size;
#if VM_INSN_INFO_TABLE_IMPL == 2
- struct succ_index_table *succ_index_table;
+ struct succ_index_table *succ_index_table;
#endif
} insns_info;
@@ -457,42 +484,63 @@ struct rb_iseq_constant_body {
const struct rb_iseq_struct *parent_iseq;
struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
- union iseq_inline_storage_entry *is_entries;
+ union iseq_inline_storage_entry *is_entries; /* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
struct rb_call_data *call_data; //struct rb_call_data calls[ci_size];
struct {
- rb_snum_t flip_count;
+ rb_snum_t flip_count;
VALUE script_lines;
- VALUE coverage;
+ VALUE coverage;
VALUE pc2branchindex;
- VALUE *original_iseq;
+ VALUE *original_iseq;
} variable;
unsigned int local_table_size;
- unsigned int is_size;
+ unsigned int ic_size; // Number of IC caches
+ unsigned int ise_size; // Number of ISE caches
+ unsigned int ivc_size; // Number of IVC caches
+ unsigned int icvarc_size; // Number of ICVARC caches
unsigned int ci_size;
unsigned int stack_max; /* for stack overflow check */
- char catch_except_p; /* If a frame of this ISeq may catch exception, set TRUE */
- // If true, this ISeq is leaf *and* backtraces are not used, for example,
- // by rb_profile_frames. We verify only leafness on VM_CHECK_MODE though.
- // Note that GC allocations might use backtraces due to
- // ObjectSpace#trace_object_allocations.
- // For more details, see: https://bugs.ruby-lang.org/issues/16956
- bool builtin_inline_p;
+ unsigned int builtin_attrs; // Union of rb_builtin_attr
+
+ bool prism; // ISEQ was generated from prism compiler
+
+ union {
+ iseq_bits_t * list; /* Find references for GC */
+ iseq_bits_t single;
+ } mark_bits;
+
struct rb_id_table *outer_variables;
const rb_iseq_t *mandatory_only_iseq;
-#if USE_MJIT
- /* The following fields are MJIT related info. */
- VALUE (*jit_func)(struct rb_execution_context_struct *,
- struct rb_control_frame_struct *); /* function pointer for loaded native code */
- long unsigned total_calls; /* number of total calls with `mjit_exec()` */
- struct rb_mjit_unit *jit_unit;
+#if USE_RJIT || USE_YJIT
+ // Function pointer for JIT code on jit_exec()
+ rb_jit_func_t jit_entry;
+ // Number of calls on jit_exec()
+ long unsigned jit_entry_calls;
+#endif
+
+#if USE_YJIT
+ // Function pointer for JIT code on jit_exec_exception()
+ rb_jit_func_t jit_exception;
+ // Number of calls on jit_exec_exception()
+ long unsigned jit_exception_calls;
+#endif
+
+#if USE_RJIT
+ // RJIT stores some data on each iseq.
+ VALUE rjit_blocks;
#endif
- rb_yjit_block_array_array_t yjit_blocks; // empty, or has a size equal to iseq_size
+#if USE_YJIT
+ // YJIT stores some data on each iseq.
+ void *yjit_payload;
+ // Used to estimate how frequently this ISEQ gets called
+ uint64_t yjit_calls_at_interv;
+#endif
};
/* T_IMEMO/iseq */
@@ -504,12 +552,12 @@ struct rb_iseq_struct {
struct rb_iseq_constant_body *body; /* 3 */
union { /* 4, 5 words */
- struct iseq_compile_data *compile_data; /* used at compile time */
+ struct iseq_compile_data *compile_data; /* used at compile time */
- struct {
- VALUE obj;
- int index;
- } loader;
+ struct {
+ VALUE obj;
+ int index;
+ } loader;
struct {
struct rb_hook_list_struct *local_hooks;
@@ -518,22 +566,23 @@ struct rb_iseq_struct {
} aux;
};
-#ifndef USE_LAZY_LOAD
+#define ISEQ_BODY(iseq) ((iseq)->body)
+
+#if !defined(USE_LAZY_LOAD) || !(USE_LAZY_LOAD+0)
#define USE_LAZY_LOAD 0
#endif
-#if USE_LAZY_LOAD
-const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq);
+#if !USE_LAZY_LOAD
+static inline const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq) {return 0;}
#endif
+const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq);
static inline const rb_iseq_t *
rb_iseq_check(const rb_iseq_t *iseq)
{
-#if USE_LAZY_LOAD
- if (iseq->body == NULL) {
- rb_iseq_complete((rb_iseq_t *)iseq);
+ if (USE_LAZY_LOAD && ISEQ_BODY(iseq) == NULL) {
+ rb_iseq_complete((rb_iseq_t *)iseq);
}
-#endif
return iseq;
}
@@ -556,40 +605,6 @@ enum ruby_special_exceptions {
ruby_special_error_count
};
-enum ruby_basic_operators {
- BOP_PLUS,
- BOP_MINUS,
- BOP_MULT,
- BOP_DIV,
- BOP_MOD,
- BOP_EQ,
- BOP_EQQ,
- BOP_LT,
- BOP_LE,
- BOP_LTLT,
- BOP_AREF,
- BOP_ASET,
- BOP_LENGTH,
- BOP_SIZE,
- BOP_EMPTY_P,
- BOP_NIL_P,
- BOP_SUCC,
- BOP_GT,
- BOP_GE,
- BOP_NOT,
- BOP_NEQ,
- BOP_MATCH,
- BOP_FREEZE,
- BOP_UMINUS,
- BOP_MAX,
- BOP_MIN,
- BOP_CALL,
- BOP_AND,
- BOP_OR,
-
- BOP_LAST_
-};
-
#define GetVMPtr(obj, ptr) \
GetCoreDataFromValue((obj), rb_vm_t, (ptr))
@@ -609,19 +624,25 @@ void rb_objspace_call_finalizer(struct rb_objspace *);
typedef struct rb_hook_list_struct {
struct rb_event_hook_struct *hooks;
rb_event_flag_t events;
- unsigned int need_clean;
unsigned int running;
+ bool need_clean;
+ bool is_local;
} rb_hook_list_t;
// see builtin.h for definition
typedef const struct rb_builtin_function *RB_BUILTIN;
+struct global_object_list {
+ VALUE *varptr;
+ struct global_object_list *next;
+};
+
typedef struct rb_vm_struct {
VALUE self;
struct {
- struct list_head set;
+ struct ccan_list_head set;
unsigned int cnt;
unsigned int blocking_cnt;
@@ -634,15 +655,51 @@ typedef struct rb_vm_struct {
struct rb_ractor_struct *lock_owner;
unsigned int lock_rec;
- // barrier
- bool barrier_waiting;
- unsigned int barrier_cnt;
- rb_nativethread_cond_t barrier_cond;
-
// join at exit
rb_nativethread_cond_t terminate_cond;
bool terminate_waiting;
+
+#ifndef RUBY_THREAD_PTHREAD_H
+ bool barrier_waiting;
+ unsigned int barrier_cnt;
+ rb_nativethread_cond_t barrier_cond;
+#endif
} sync;
+
+ // ractor scheduling
+ struct {
+ rb_nativethread_lock_t lock;
+ struct rb_ractor_struct *lock_owner;
+ bool locked;
+
+ rb_nativethread_cond_t cond; // GRQ
+ unsigned int snt_cnt; // count of shared NTs
+ unsigned int dnt_cnt; // count of dedicated NTs
+
+ unsigned int running_cnt;
+
+ unsigned int max_cpu;
+ struct ccan_list_head grq; // // Global Ready Queue
+ unsigned int grq_cnt;
+
+ // running threads
+ struct ccan_list_head running_threads;
+
+ // threads which switch context by timeslice
+ struct ccan_list_head timeslice_threads;
+
+ struct ccan_list_head zombie_threads;
+
+ // true if timeslice timer is not enable
+ bool timeslice_wait_inf;
+
+ // barrier
+ rb_nativethread_cond_t barrier_complete_cond;
+ rb_nativethread_cond_t barrier_release_cond;
+ bool barrier_waiting;
+ unsigned int barrier_waiting_cnt;
+ unsigned int barrier_serial;
+ } sched;
} ractor;
#ifdef USE_SIGALTSTACK
@@ -650,10 +707,7 @@ typedef struct rb_vm_struct {
#endif
rb_serial_t fork_gen;
- rb_nativethread_lock_t waitpid_lock;
- struct list_head waiting_pids; /* PID > 0: <=> struct waitpid_state */
- struct list_head waiting_grps; /* PID <= 0: <=> struct waitpid_state */
- struct list_head waiting_fds; /* <=> struct waiting_fd */
+ struct ccan_list_head waiting_fds; /* <=> struct waiting_fd */
/* set in single-threaded processes only: */
volatile int ubf_async_safe;
@@ -665,6 +719,7 @@ typedef struct rb_vm_struct {
/* object management */
VALUE mark_object_ary;
+ struct global_object_list *global_object_list;
const VALUE special_exceptions[ruby_special_error_count];
/* load */
@@ -676,43 +731,56 @@ typedef struct rb_vm_struct {
VALUE loaded_features;
VALUE loaded_features_snapshot;
VALUE loaded_features_realpaths;
+ VALUE loaded_features_realpath_map;
struct st_table *loaded_features_index;
struct st_table *loading_table;
+ // For running the init function of statically linked
+ // extensions when they are loaded
+ struct st_table *static_ext_inits;
/* signal */
struct {
- VALUE cmd[RUBY_NSIG];
+ VALUE cmd[RUBY_NSIG];
} trap_list;
/* relation table of ensure - rollback for callcc */
struct st_table *ensure_rollback_table;
- /* postponed_job (async-signal-safe, NOT thread-safe) */
- struct rb_postponed_job_struct *postponed_job_buffer;
- rb_atomic_t postponed_job_index;
+ /* postponed_job (async-signal-safe, and thread-safe) */
+ struct rb_postponed_job_queue *postponed_job_queue;
int src_encoding_index;
/* workqueue (thread-safe, NOT async-signal-safe) */
- struct list_head workqueue; /* <=> rb_workqueue_job.jnode */
+ struct ccan_list_head workqueue; /* <=> rb_workqueue_job.jnode */
rb_nativethread_lock_t workqueue_lock;
VALUE orig_progname, progname;
VALUE coverages, me2counter;
int coverage_mode;
- st_table * defined_module_hash;
-
struct rb_objspace *objspace;
+#if USE_SHARED_GC
+ rb_gc_function_map_t *gc_functions_map;
+#endif
rb_at_exit_list *at_exit;
st_table *frozen_strings;
const struct rb_builtin_function *builtin_function_table;
- int builtin_inline_index;
+ st_table *ci_table;
struct rb_id_table *negative_cme_table;
+ st_table *overloaded_cme_table; // cme -> overloaded_cme
+ st_table *unused_block_warning_table;
+ bool unused_block_warning_strict;
+
+ // This id table contains a mapping from ID to ICs. It does this with ID
+ // keys and nested st_tables as values. The nested tables have ICs as keys
+ // and Qtrue as values. It is used when inline constant caches need to be
+ // invalidated or ISEQs are being freed.
+ struct rb_id_table *constant_cache;
#ifndef VM_GLOBAL_CC_CACHE_TABLE_SIZE
#define VM_GLOBAL_CC_CACHE_TABLE_SIZE 1023
@@ -725,13 +793,12 @@ typedef struct rb_vm_struct {
/* params */
struct { /* size in byte */
- size_t thread_vm_stack_size;
- size_t thread_machine_stack_size;
- size_t fiber_vm_stack_size;
- size_t fiber_machine_stack_size;
+ size_t thread_vm_stack_size;
+ size_t thread_machine_stack_size;
+ size_t fiber_vm_stack_size;
+ size_t fiber_machine_stack_size;
} default_params;
- short redefined_flag[BOP_LAST_];
} rb_vm_t;
/* default values */
@@ -746,8 +813,8 @@ typedef struct rb_vm_struct {
#define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
#define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */
-#if defined(__powerpc64__)
-#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE)) /* 128 KB or 256 KB */
+#if defined(__powerpc64__) || defined(__ppc64__) // macOS has __ppc64__
+#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE)) /* 128 KB or 256 KB */
#else
#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
#endif
@@ -764,23 +831,6 @@ typedef struct rb_vm_struct {
#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 128 * 1024 * sizeof(VALUE))
#endif
-/* optimize insn */
-#define INTEGER_REDEFINED_OP_FLAG (1 << 0)
-#define FLOAT_REDEFINED_OP_FLAG (1 << 1)
-#define STRING_REDEFINED_OP_FLAG (1 << 2)
-#define ARRAY_REDEFINED_OP_FLAG (1 << 3)
-#define HASH_REDEFINED_OP_FLAG (1 << 4)
-/* #define BIGNUM_REDEFINED_OP_FLAG (1 << 5) */
-#define SYMBOL_REDEFINED_OP_FLAG (1 << 6)
-#define TIME_REDEFINED_OP_FLAG (1 << 7)
-#define REGEXP_REDEFINED_OP_FLAG (1 << 8)
-#define NIL_REDEFINED_OP_FLAG (1 << 9)
-#define TRUE_REDEFINED_OP_FLAG (1 << 10)
-#define FALSE_REDEFINED_OP_FLAG (1 << 11)
-#define PROC_REDEFINED_OP_FLAG (1 << 12)
-
-#define BASIC_OP_UNREDEFINED_P(op, klass) (LIKELY((GET_VM()->redefined_flag[(op)]&(klass)) == 0))
-
#ifndef VM_DEBUG_BP_CHECK
#define VM_DEBUG_BP_CHECK 0
#endif
@@ -793,9 +843,9 @@ struct rb_captured_block {
VALUE self;
const VALUE *ep;
union {
- const rb_iseq_t *iseq;
- const struct vm_ifunc *ifunc;
- VALUE val;
+ const rb_iseq_t *iseq;
+ const struct vm_ifunc *ifunc;
+ VALUE val;
} code;
};
@@ -815,27 +865,24 @@ enum rb_block_type {
struct rb_block {
union {
- struct rb_captured_block captured;
- VALUE symbol;
- VALUE proc;
+ struct rb_captured_block captured;
+ VALUE symbol;
+ VALUE proc;
} as;
enum rb_block_type type;
};
typedef struct rb_control_frame_struct {
- const VALUE *pc; /* cfp[0] */
- VALUE *sp; /* cfp[1] */
- const rb_iseq_t *iseq; /* cfp[2] */
- VALUE self; /* cfp[3] / block[0] */
- const VALUE *ep; /* cfp[4] / block[1] */
- const void *block_code; /* cfp[5] / block[2] */ /* iseq or ifunc or forwarded block handler */
- VALUE *__bp__; /* cfp[6] */ /* outside vm_push_frame, use vm_base_ptr instead. */
-
+ const VALUE *pc; // cfp[0]
+ VALUE *sp; // cfp[1]
+ const rb_iseq_t *iseq; // cfp[2]
+ VALUE self; // cfp[3] / block[0]
+ const VALUE *ep; // cfp[4] / block[1]
+ const void *block_code; // cfp[5] / block[2] -- iseq, ifunc, or forwarded block handler
+ void *jit_return; // cfp[6] -- return address for JIT code
#if VM_DEBUG_BP_CHECK
- VALUE *bp_check; /* cfp[7] */
+ VALUE *bp_check; // cfp[7]
#endif
- // Return address for YJIT code
- void *jit_return;
} rb_control_frame_t;
extern const rb_data_type_t ruby_threadptr_data_type;
@@ -860,13 +907,68 @@ typedef void *rb_jmpbuf_t[5];
#endif
/*
+ `rb_vm_tag_jmpbuf_t` type represents a buffer used to
+ long jump to a C frame associated with `rb_vm_tag`.
+
+ Use-site of `rb_vm_tag_jmpbuf_t` is responsible for calling the
+ following functions:
+ - `rb_vm_tag_jmpbuf_init` once `rb_vm_tag_jmpbuf_t` is allocated.
+ - `rb_vm_tag_jmpbuf_deinit` once `rb_vm_tag_jmpbuf_t` is no longer necessary.
+
+ `RB_VM_TAG_JMPBUF_GET` transforms a `rb_vm_tag_jmpbuf_t` into a
+ `rb_jmpbuf_t` to be passed to `rb_setjmp/rb_longjmp`.
+*/
+#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
+/*
+ WebAssembly target with Asyncify-based SJLJ needs
+ to capture the execution context by unwind/rewind-ing
+ call frames into a jump buffer. The buffer space tends
+ to be considerably large unlike other architectures'
+ register-based buffers.
+ Therefore, we allocates the buffer on the heap on such
+ environments.
+*/
+typedef rb_jmpbuf_t *rb_vm_tag_jmpbuf_t;
+
+#define RB_VM_TAG_JMPBUF_GET(buf) (*buf)
+
+static inline void
+rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
+{
+ *jmpbuf = ruby_xmalloc(sizeof(rb_jmpbuf_t));
+}
+
+static inline void
+rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
+{
+ ruby_xfree(*jmpbuf);
+}
+#else
+typedef rb_jmpbuf_t rb_vm_tag_jmpbuf_t;
+
+#define RB_VM_TAG_JMPBUF_GET(buf) (buf)
+
+static inline void
+rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
+{
+ // no-op
+}
+
+static inline void
+rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
+{
+ // no-op
+}
+#endif
+
+/*
the members which are written in EC_PUSH_TAG() should be placed at
the beginning and the end, so that entire region is accessible.
*/
struct rb_vm_tag {
VALUE tag;
VALUE retval;
- rb_jmpbuf_t buf;
+ rb_vm_tag_jmpbuf_t buf;
struct rb_vm_tag *prev;
enum ruby_tag_type state;
unsigned int lock_rec;
@@ -874,8 +976,8 @@ struct rb_vm_tag {
STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
STATIC_ASSERT(rb_vm_tag_buf_end,
- offsetof(struct rb_vm_tag, buf) + sizeof(rb_jmpbuf_t) <
- sizeof(struct rb_vm_tag));
+ offsetof(struct rb_vm_tag, buf) + sizeof(rb_vm_tag_jmpbuf_t) <
+ sizeof(struct rb_vm_tag));
struct rb_unblock_callback {
rb_unblock_function_t *func;
@@ -895,8 +997,6 @@ typedef struct rb_ensure_list {
struct rb_ensure_entry entry;
} rb_ensure_list_t;
-typedef char rb_thread_id_string_t[sizeof(rb_nativethread_id_t) * 2 + 3];
-
typedef struct rb_fiber_struct rb_fiber_t;
struct rb_waiting_list {
@@ -928,6 +1028,9 @@ struct rb_execution_context_struct {
VALUE local_storage_recursive_hash;
VALUE local_storage_recursive_hash_for_trace;
+ /* Inheritable fiber storage. */
+ VALUE storage;
+
/* eval env */
const VALUE *root_lep;
VALUE root_svar;
@@ -951,10 +1054,14 @@ struct rb_execution_context_struct {
/* for GC */
struct {
- VALUE *stack_start;
- VALUE *stack_end;
- size_t stack_maxsize;
- RUBY_ALIGNAS(SIZEOF_VALUE) jmp_buf regs;
+ VALUE *stack_start;
+ VALUE *stack_end;
+ size_t stack_maxsize;
+ RUBY_ALIGNAS(SIZEOF_VALUE) jmp_buf regs;
+
+#ifdef RUBY_ASAN_ENABLED
+ void *asan_fake_stack_handle;
+#endif
} machine;
};
@@ -985,18 +1092,20 @@ struct rb_ext_config {
typedef struct rb_ractor_struct rb_ractor_t;
-#if defined(__linux__) || defined(__FreeBSD__)
-# define RB_THREAD_T_HAS_NATIVE_ID
-#endif
+struct rb_native_thread;
typedef struct rb_thread_struct {
- struct list_node lt_node; // managed by a ractor
+ struct ccan_list_node lt_node; // managed by a ractor
VALUE self;
rb_ractor_t *ractor;
rb_vm_t *vm;
-
+ struct rb_native_thread *nt;
rb_execution_context_t *ec;
+ struct rb_thread_sched_item sched;
+ bool mn_schedulable;
+ rb_atomic_t serial; // only for RUBY_DEBUG_LOG()
+
VALUE last_status; /* $? */
/* for cfunc */
@@ -1007,15 +1116,10 @@ typedef struct rb_thread_struct {
VALUE top_wrapper;
/* thread control */
- rb_nativethread_id_t thread_id;
-#ifdef NON_SCALAR_THREAD_ID
- rb_thread_id_string_t thread_id_string;
-#endif
-#ifdef RB_THREAD_T_HAS_NATIVE_ID
- int tid;
-#endif
+
BITFIELD(enum rb_thread_status, status, 2);
/* bit flags */
+ unsigned int has_dedicated_nt : 1;
unsigned int to_kill : 1;
unsigned int abort_on_exception: 1;
unsigned int report_on_exception: 1;
@@ -1023,7 +1127,6 @@ typedef struct rb_thread_struct {
int8_t priority; /* -3 .. 3 (RUBY_THREAD_PRIORITY_{MIN,MAX}) */
uint32_t running_time_us; /* 12500..800000 */
- native_thread_data_t native_thread_data;
void *blocking_region_buffer;
VALUE thgroup;
@@ -1072,18 +1175,21 @@ typedef struct rb_thread_struct {
rb_fiber_t *root_fiber;
VALUE scheduler;
- unsigned blocking;
+ unsigned int blocking;
/* misc */
VALUE name;
+ void **specific_storage;
struct rb_ext_config ext_config;
-
-#ifdef USE_SIGALTSTACK
- void *altstack;
-#endif
} rb_thread_t;
+static inline unsigned int
+rb_th_serial(const rb_thread_t *th)
+{
+ return th ? (unsigned int)th->serial : 0;
+}
+
typedef enum {
VM_DEFINECLASS_TYPE_CLASS = 0x00,
VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
@@ -1103,12 +1209,13 @@ typedef enum {
RUBY_SYMBOL_EXPORT_BEGIN
/* node -> iseq */
-rb_iseq_t *rb_iseq_new (const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum iseq_type);
+rb_iseq_t *rb_iseq_new (const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum rb_iseq_type);
rb_iseq_t *rb_iseq_new_top (const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent);
-rb_iseq_t *rb_iseq_new_main (const rb_ast_body_t *ast, VALUE path, VALUE realpath, const rb_iseq_t *parent);
-rb_iseq_t *rb_iseq_new_eval (const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, VALUE first_lineno, const rb_iseq_t *parent, int isolated_depth);
-rb_iseq_t *rb_iseq_new_with_opt(const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, VALUE first_lineno, const rb_iseq_t *parent, int isolated_depth,
- enum iseq_type, const rb_compile_option_t*);
+rb_iseq_t *rb_iseq_new_main (const rb_ast_body_t *ast, VALUE path, VALUE realpath, const rb_iseq_t *parent, int opt);
+rb_iseq_t *rb_iseq_new_eval (const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth);
+rb_iseq_t *rb_iseq_new_with_opt(const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth,
+ enum rb_iseq_type, const rb_compile_option_t*,
+ VALUE script_lines);
struct iseq_link_anchor;
struct rb_iseq_new_with_callback_callback_func {
@@ -1121,12 +1228,16 @@ static inline struct rb_iseq_new_with_callback_callback_func *
rb_iseq_new_with_callback_new_callback(
void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *), const void *ptr)
{
- VALUE memo = rb_imemo_new(imemo_ifunc, (VALUE)func, (VALUE)ptr, Qundef, Qfalse);
- return (struct rb_iseq_new_with_callback_callback_func *)memo;
+ struct rb_iseq_new_with_callback_callback_func *memo =
+ IMEMO_NEW(struct rb_iseq_new_with_callback_callback_func, imemo_ifunc, Qfalse);
+ memo->func = func;
+ memo->data = ptr;
+
+ return memo;
}
rb_iseq_t *rb_iseq_new_with_callback(const struct rb_iseq_new_with_callback_callback_func * ifunc,
- VALUE name, VALUE path, VALUE realpath, VALUE first_lineno,
- const rb_iseq_t *parent, enum iseq_type, const rb_compile_option_t*);
+ VALUE name, VALUE path, VALUE realpath, int first_lineno,
+ const rb_iseq_t *parent, enum rb_iseq_type, const rb_compile_option_t*);
VALUE rb_iseq_disasm(const rb_iseq_t *iseq);
int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
@@ -1171,7 +1282,7 @@ extern const rb_data_type_t ruby_binding_data_type;
typedef struct {
const struct rb_block block;
const VALUE pathobj;
- unsigned short first_lineno;
+ int first_lineno;
} rb_binding_t;
/* used by compile time and send insn */
@@ -1224,12 +1335,12 @@ typedef rb_control_frame_t *
#define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
#define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
-enum {
+enum vm_frame_env_flags {
/* Frame/Environment flag bits:
- * MMMM MMMM MMMM MMMM ____ _FFF FFFF EEEX (LSB)
+ * MMMM MMMM MMMM MMMM ____ FFFF FFFE EEEX (LSB)
*
* X : tag for GC marking (It seems as Fixnum)
- * EEE : 3 bits Env flags
+ * EEE : 4 bits Env flags
* FF..: 7 bits Frame flags
* MM..: 15 bits frame magic (to check frame corruption)
*/
@@ -1342,7 +1453,8 @@ static inline int
VM_FRAME_CFRAME_P(const rb_control_frame_t *cfp)
{
int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
- VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p);
+ VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p ||
+ (VM_FRAME_TYPE(cfp) & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY);
return cframe_p;
}
@@ -1418,7 +1530,9 @@ VM_ENV_ENVVAL_PTR(const VALUE *ep)
static inline const rb_env_t *
vm_env_new(VALUE *env_ep, VALUE *env_body, unsigned int env_size, const rb_iseq_t *iseq)
{
- rb_env_t *env = (rb_env_t *)rb_imemo_new(imemo_env, (VALUE)env_ep, (VALUE)env_body, 0, (VALUE)iseq);
+ rb_env_t *env = IMEMO_NEW(rb_env_t, imemo_env, (VALUE)iseq);
+ env->ep = env_ep;
+ env->env = env_body;
env->env_size = env_size;
env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
return env;
@@ -1474,13 +1588,13 @@ VM_BH_ISEQ_BLOCK_P(VALUE block_handler)
{
if ((block_handler & 0x03) == 0x01) {
#if VM_CHECK_MODE > 0
- struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
- VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
+ struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
+ VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
#endif
- return 1;
+ return 1;
}
else {
- return 0;
+ return 0;
}
}
@@ -1505,13 +1619,13 @@ VM_BH_IFUNC_P(VALUE block_handler)
{
if ((block_handler & 0x03) == 0x03) {
#if VM_CHECK_MODE > 0
- struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
- VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
+ struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
+ VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
#endif
- return 1;
+ return 1;
}
else {
- return 0;
+ return 0;
}
}
@@ -1543,17 +1657,17 @@ static inline enum rb_block_handler_type
vm_block_handler_type(VALUE block_handler)
{
if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
- return block_handler_type_iseq;
+ return block_handler_type_iseq;
}
else if (VM_BH_IFUNC_P(block_handler)) {
- return block_handler_type_ifunc;
+ return block_handler_type_ifunc;
}
else if (SYMBOL_P(block_handler)) {
- return block_handler_type_symbol;
+ return block_handler_type_symbol;
}
else {
- VM_ASSERT(rb_obj_is_proc(block_handler));
- return block_handler_type_proc;
+ VM_ASSERT(rb_obj_is_proc(block_handler));
+ return block_handler_type_proc;
}
}
@@ -1561,13 +1675,7 @@ static inline void
vm_block_handler_verify(MAYBE_UNUSED(VALUE block_handler))
{
VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
- (vm_block_handler_type(block_handler), 1));
-}
-
-static inline int
-vm_cfp_forwarded_bh_p(const rb_control_frame_t *cfp, VALUE block_handler)
-{
- return ((VALUE) cfp->block_code) == block_handler;
+ (vm_block_handler_type(block_handler), 1));
}
static inline enum rb_block_type
@@ -1576,17 +1684,17 @@ vm_block_type(const struct rb_block *block)
#if VM_CHECK_MODE > 0
switch (block->type) {
case block_type_iseq:
- VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
- break;
+ VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
+ break;
case block_type_ifunc:
- VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
- break;
+ VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
+ break;
case block_type_symbol:
- VM_ASSERT(SYMBOL_P(block->as.symbol));
- break;
+ VM_ASSERT(SYMBOL_P(block->as.symbol));
+ break;
case block_type_proc:
- VM_ASSERT(rb_obj_is_proc(block->as.proc));
- break;
+ VM_ASSERT(rb_obj_is_proc(block->as.proc));
+ break;
}
#endif
return block->type;
@@ -1653,11 +1761,11 @@ vm_block_self(const struct rb_block *block)
switch (vm_block_type(block)) {
case block_type_iseq:
case block_type_ifunc:
- return block->as.captured.self;
+ return block->as.captured.self;
case block_type_proc:
- return vm_block_self(vm_proc_block(block->as.proc));
+ return vm_block_self(vm_proc_block(block->as.proc));
case block_type_symbol:
- return Qundef;
+ return Qundef;
}
VM_UNREACHABLE(vm_block_self);
return Qundef;
@@ -1698,17 +1806,13 @@ VALUE rb_proc_alloc(VALUE klass);
VALUE rb_proc_dup(VALUE self);
/* for debug */
-extern void rb_vmdebug_stack_dump_raw(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
-extern void rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc);
-extern void rb_vmdebug_debug_print_post(const rb_execution_context_t *ec, const rb_control_frame_t *cfp
-#if OPT_STACK_CACHING
- , VALUE reg_a, VALUE reg_b
-#endif
-);
+extern bool rb_vmdebug_stack_dump_raw(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
+extern bool rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc, FILE *);
+extern bool rb_vmdebug_debug_print_post(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
-#define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp)
-#define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp))
-void rb_vm_bugreport(const void *);
+#define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp, stderr)
+#define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp), stderr)
+bool rb_vm_bugreport(const void *, FILE *);
typedef void (*ruby_sighandler_t)(int);
RBIMPL_ATTR_FORMAT(RBIMPL_PRINTF_FORMAT, 4, 5)
NORETURN(void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler, int sig, const void *, const char *fmt, ...));
@@ -1749,9 +1853,8 @@ const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dync
void rb_vm_inc_const_missing_count(void);
VALUE rb_vm_call_kw(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc,
const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat);
-MJIT_STATIC void rb_vm_pop_frame(rb_execution_context_t *ec);
-
-void rb_gvl_destroy(rb_global_vm_lock_t *gvl);
+void rb_vm_pop_frame_no_int(rb_execution_context_t *ec);
+void rb_vm_pop_frame(rb_execution_context_t *ec);
void rb_thread_start_timer_thread(void);
void rb_thread_stop_timer_thread(void);
@@ -1761,35 +1864,36 @@ void rb_thread_wakeup_timer_thread(int);
static inline void
rb_vm_living_threads_init(rb_vm_t *vm)
{
- list_head_init(&vm->waiting_fds);
- list_head_init(&vm->waiting_pids);
- list_head_init(&vm->workqueue);
- list_head_init(&vm->waiting_grps);
- list_head_init(&vm->ractor.set);
+ ccan_list_head_init(&vm->waiting_fds);
+ ccan_list_head_init(&vm->workqueue);
+ ccan_list_head_init(&vm->ractor.set);
+ ccan_list_head_init(&vm->ractor.sched.zombie_threads);
}
typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
+VALUE *rb_vm_svar_lep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
int rb_vm_get_sourceline(const rb_control_frame_t *);
void rb_vm_stack_to_heap(rb_execution_context_t *ec);
-void ruby_thread_init_stack(rb_thread_t *th);
+void ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame);
rb_thread_t * ruby_thread_from_native(void);
int ruby_thread_set_native(rb_thread_t *th);
int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp);
-MJIT_STATIC VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler);
+void rb_vm_env_write(const VALUE *ep, int index, VALUE v);
+VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler);
void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg);
#define rb_vm_register_special_exception(sp, e, m) \
rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
-void rb_gc_mark_machine_stack(const rb_execution_context_t *ec);
+void rb_gc_mark_machine_context(const rb_execution_context_t *ec);
void rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr);
-MJIT_STATIC const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_control_frame_t *cfp);
+const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_control_frame_t *cfp);
#define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
@@ -1812,7 +1916,6 @@ rb_execution_context_t *rb_vm_main_ractor_ec(rb_vm_t *vm); // ractor.c
/* for thread */
#if RUBY_VM_THREAD_MODEL == 2
-MJIT_SYMBOL_EXPORT_BEGIN
RUBY_EXTERN struct rb_ractor_struct *ruby_single_main_ractor; // ractor.c
RUBY_EXTERN rb_vm_t *ruby_current_vm_ptr;
@@ -1820,8 +1923,6 @@ RUBY_EXTERN rb_event_flag_t ruby_vm_event_flags;
RUBY_EXTERN rb_event_flag_t ruby_vm_event_enabled_global_flags;
RUBY_EXTERN unsigned int ruby_vm_event_local_num;
-MJIT_SYMBOL_EXPORT_END
-
#define GET_VM() rb_current_vm()
#define GET_RACTOR() rb_current_ractor()
#define GET_THREAD() rb_current_thread()
@@ -1839,7 +1940,7 @@ rb_ec_ractor_ptr(const rb_execution_context_t *ec)
const rb_thread_t *th = rb_ec_thread_ptr(ec);
if (th) {
VM_ASSERT(th->ractor != NULL);
- return th->ractor;
+ return th->ractor;
}
else {
return NULL;
@@ -1851,10 +1952,10 @@ rb_ec_vm_ptr(const rb_execution_context_t *ec)
{
const rb_thread_t *th = rb_ec_thread_ptr(ec);
if (th) {
- return th->vm;
+ return th->vm;
}
else {
- return NULL;
+ return NULL;
}
}
@@ -1867,6 +1968,20 @@ rb_current_execution_context(bool expect_ec)
#else
rb_execution_context_t *ec = ruby_current_ec;
#endif
+
+ /* On the shared objects, `__tls_get_addr()` is used to access the TLS
+ * and the address of the `ruby_current_ec` can be stored on a function
+ * frame. However, this address can be mis-used after native thread
+ * migration of a coroutine.
+ * 1) Get `ptr =&ruby_current_ec` op NT1 and store it on the frame.
+ * 2) Context switch and resume it on the NT2.
+ * 3) `ptr` is used on NT2 but it accesses to the TLS on NT1.
+ * This assertion checks such misusage.
+ *
+ * To avoid accidents, `GET_EC()` should be called once on the frame.
+ * Note that inlining can produce the problem.
+ */
+ VM_ASSERT(ec == rb_current_ec_noinline());
#else
rb_execution_context_t *ec = native_tls_get(ruby_current_ec_key);
#endif
@@ -1882,26 +1997,32 @@ rb_current_thread(void)
}
static inline rb_ractor_t *
-rb_current_ractor(void)
+rb_current_ractor_raw(bool expect)
{
if (ruby_single_main_ractor) {
return ruby_single_main_ractor;
}
else {
- const rb_execution_context_t *ec = GET_EC();
- return rb_ec_ractor_ptr(ec);
+ const rb_execution_context_t *ec = rb_current_execution_context(expect);
+ return (expect || ec) ? rb_ec_ractor_ptr(ec) : NULL;
}
}
+static inline rb_ractor_t *
+rb_current_ractor(void)
+{
+ return rb_current_ractor_raw(true);
+}
+
static inline rb_vm_t *
rb_current_vm(void)
{
#if 0 // TODO: reconsider the assertions
VM_ASSERT(ruby_current_vm_ptr == NULL ||
- ruby_current_execution_context_ptr == NULL ||
- rb_ec_thread_ptr(GET_EC()) == NULL ||
+ ruby_current_execution_context_ptr == NULL ||
+ rb_ec_thread_ptr(GET_EC()) == NULL ||
rb_ec_thread_ptr(GET_EC())->status == THREAD_KILLED ||
- rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
+ rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
#endif
return ruby_current_vm_ptr;
@@ -1944,7 +2065,7 @@ enum {
#define RUBY_VM_SET_TERMINATE_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TERMINATE_INTERRUPT_MASK)
#define RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, VM_BARRIER_INTERRUPT_MASK)
#define RUBY_VM_INTERRUPTED(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask & \
- (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
+ (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
static inline bool
RUBY_VM_INTERRUPTED_ANY(rb_execution_context_t *ec)
@@ -1973,7 +2094,7 @@ void rb_threadptr_pending_interrupt_clear(rb_thread_t *th);
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v);
VALUE rb_ec_get_errinfo(const rb_execution_context_t *ec);
void rb_ec_error_print(rb_execution_context_t * volatile ec, volatile VALUE errinfo);
-void rb_execution_context_update(const rb_execution_context_t *ec);
+void rb_execution_context_update(rb_execution_context_t *ec);
void rb_execution_context_mark(const rb_execution_context_t *ec);
void rb_fiber_close(rb_fiber_t *fib);
void Init_native_thread(rb_thread_t *th);
@@ -1987,9 +2108,14 @@ void rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned lo
static inline void
rb_vm_check_ints(rb_execution_context_t *ec)
{
+#ifdef RUBY_ASSERT_CRITICAL_SECTION
+ VM_ASSERT(ruby_assert_critical_section_entered == 0);
+#endif
+
VM_ASSERT(ec == GET_EC());
+
if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
- rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
+ rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
}
}
@@ -2013,6 +2139,7 @@ struct rb_trace_arg_struct {
};
void rb_hook_list_mark(rb_hook_list_t *hooks);
+void rb_hook_list_mark_and_update(rb_hook_list_t *hooks);
void rb_hook_list_free(rb_hook_list_t *hooks);
void rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line);
void rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval);
@@ -2078,6 +2205,10 @@ rb_exec_event_hook_script_compiled(rb_execution_context_t *ec, const rb_iseq_t *
}
void rb_vm_trap_exit(rb_vm_t *vm);
+void rb_vm_postponed_job_atfork(void); /* vm_trace.c */
+void rb_vm_postponed_job_free(void); /* vm_trace.c */
+size_t rb_vm_memsize_postponed_job_queue(void); /* vm_trace.c */
+void rb_vm_postponed_job_queue_init(rb_vm_t *vm); /* vm_trace.c */
RUBY_SYMBOL_EXPORT_BEGIN