diff options
Diffstat (limited to 'vm_core.h')
-rw-r--r-- | vm_core.h | 362 |
1 files changed, 240 insertions, 122 deletions
@@ -53,13 +53,15 @@ #include "ruby_assert.h" +#define RVALUE_SIZE (sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX])) + #if VM_CHECK_MODE > 0 -#define VM_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr) +#define VM_ASSERT(/*expr, */...) RUBY_ASSERT_WHEN(VM_CHECK_MODE > 0, __VA_ARGS__) #define VM_UNREACHABLE(func) rb_bug(#func ": unreachable") #define RUBY_ASSERT_CRITICAL_SECTION #define RUBY_DEBUG_THREAD_SCHEDULE() rb_thread_schedule() #else -#define VM_ASSERT(expr) ((void)0) +#define VM_ASSERT(/*expr, */...) ((void)0) #define VM_UNREACHABLE(func) UNREACHABLE #define RUBY_DEBUG_THREAD_SCHEDULE() #endif @@ -92,6 +94,7 @@ extern int ruby_assert_critical_section_entered; #include "internal.h" #include "internal/array.h" #include "internal/basic_operators.h" +#include "internal/sanitizers.h" #include "internal/serial.h" #include "internal/vm.h" #include "method.h" @@ -100,7 +103,6 @@ extern int ruby_assert_critical_section_entered; #include "ruby/st.h" #include "ruby_atomic.h" #include "vm_opts.h" -#include "shape.h" #include "ruby/thread_native.h" @@ -131,23 +133,11 @@ extern int ruby_assert_critical_section_entered; #define RUBY_NSIG NSIG #if defined(SIGCLD) -# define RUBY_SIGCHLD (SIGCLD) +# define RUBY_SIGCHLD (SIGCLD) #elif defined(SIGCHLD) -# define RUBY_SIGCHLD (SIGCHLD) -#else -# define RUBY_SIGCHLD (0) +# define RUBY_SIGCHLD (SIGCHLD) #endif -/* platforms with broken or non-existent SIGCHLD work by polling */ -#if defined(__APPLE__) -# define SIGCHLD_LOSSY (1) -#else -# define SIGCHLD_LOSSY (0) -#endif - -/* define to 0 to test old code path */ -#define WAITPID_USE_SIGCHLD (RUBY_SIGCHLD || SIGCHLD_LOSSY) - #if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__) # define USE_SIGALTSTACK void *rb_allocate_sigaltstack(void); @@ -193,9 +183,6 @@ void *rb_register_sigaltstack(void *); #if OPT_DIRECT_THREADED_CODE #undef OPT_DIRECT_THREADED_CODE #endif /* OPT_DIRECT_THREADED_CODE */ -#if OPT_STACK_CACHING -#undef OPT_STACK_CACHING -#endif /* OPT_STACK_CACHING */ #endif /* OPT_CALL_THREADED_CODE */ void rb_vm_encoded_insn_data_table_init(void); @@ -292,14 +279,21 @@ union iseq_inline_storage_entry { }; struct rb_calling_info { - const struct rb_callinfo *ci; + const struct rb_call_data *cd; const struct rb_callcache *cc; VALUE block_handler; VALUE recv; int argc; bool kw_splat; + VALUE heap_argv; }; +#ifndef VM_ARGC_STACK_MAX +#define VM_ARGC_STACK_MAX 128 +#endif + +# define CALLING_ARGC(calling) ((calling)->heap_argv ? RARRAY_LENINT((calling)->heap_argv) : (calling)->argc) + struct rb_execution_context_struct; #if 1 @@ -346,7 +340,7 @@ pathobj_realpath(VALUE pathobj) } /* Forward declarations */ -struct rb_mjit_unit; +struct rb_rjit_unit; typedef uintptr_t iseq_bits_t; @@ -368,6 +362,18 @@ enum rb_iseq_type { ISEQ_TYPE_PLAIN }; +// Attributes specified by Primitive.attr! +enum rb_builtin_attr { + // The iseq does not call methods. + BUILTIN_ATTR_LEAF = 0x01, + // This iseq only contains single `opt_invokebuiltin_delegate_leave` instruction with 0 arguments. + BUILTIN_ATTR_SINGLE_NOARG_LEAF = 0x02, + // This attribute signals JIT to duplicate the iseq for each block iseq so that its `yield` will be monomorphic. + BUILTIN_ATTR_INLINE_BLOCK = 0x04, +}; + +typedef VALUE (*rb_jit_func_t)(struct rb_execution_context_struct *, struct rb_control_frame_struct *); + struct rb_iseq_constant_body { enum rb_iseq_type type; @@ -410,6 +416,10 @@ struct rb_iseq_constant_body { unsigned int ambiguous_param0 : 1; /* {|a|} */ unsigned int accepts_no_kwarg : 1; unsigned int ruby2_keywords: 1; + unsigned int anon_rest: 1; + unsigned int anon_kwrest: 1; + unsigned int use_block: 1; + unsigned int forwardable: 1; } flags; unsigned int size; @@ -486,13 +496,9 @@ struct rb_iseq_constant_body { unsigned int ci_size; unsigned int stack_max; /* for stack overflow check */ - bool catch_except_p; // If a frame of this ISeq may catch exception, set true. - // If true, this ISeq is leaf *and* backtraces are not used, for example, - // by rb_profile_frames. We verify only leafness on VM_CHECK_MODE though. - // Note that GC allocations might use backtraces due to - // ObjectSpace#trace_object_allocations. - // For more details, see: https://bugs.ruby-lang.org/issues/16956 - bool builtin_inline_p; + unsigned int builtin_attrs; // Union of rb_builtin_attr + + bool prism; // ISEQ was generated from prism compiler union { iseq_bits_t * list; /* Find references for GC */ @@ -503,21 +509,30 @@ struct rb_iseq_constant_body { const rb_iseq_t *mandatory_only_iseq; -#if USE_MJIT || USE_YJIT - // Function pointer for JIT code - VALUE (*jit_func)(struct rb_execution_context_struct *, struct rb_control_frame_struct *); - // Number of total calls with jit_exec() - long unsigned total_calls; +#if USE_RJIT || USE_YJIT + // Function pointer for JIT code on jit_exec() + rb_jit_func_t jit_entry; + // Number of calls on jit_exec() + long unsigned jit_entry_calls; #endif -#if USE_MJIT - // MJIT stores some data on each iseq. - struct rb_mjit_unit *mjit_unit; +#if USE_YJIT + // Function pointer for JIT code on jit_exec_exception() + rb_jit_func_t jit_exception; + // Number of calls on jit_exec_exception() + long unsigned jit_exception_calls; +#endif + +#if USE_RJIT + // RJIT stores some data on each iseq. + VALUE rjit_blocks; #endif #if USE_YJIT // YJIT stores some data on each iseq. void *yjit_payload; + // Used to estimate how frequently this ISEQ gets called + uint64_t yjit_calls_at_interv; #endif }; @@ -546,26 +561,21 @@ struct rb_iseq_struct { #define ISEQ_BODY(iseq) ((iseq)->body) -#ifndef EXTSTATIC -#define EXTSTATIC 0 -#endif - -#ifndef USE_LAZY_LOAD +#if !defined(USE_LAZY_LOAD) || !(USE_LAZY_LOAD+0) #define USE_LAZY_LOAD 0 #endif -#if USE_LAZY_LOAD -const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq); +#if !USE_LAZY_LOAD +static inline const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq) {return 0;} #endif +const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq); static inline const rb_iseq_t * rb_iseq_check(const rb_iseq_t *iseq) { -#if USE_LAZY_LOAD - if (ISEQ_BODY(iseq) == NULL) { + if (USE_LAZY_LOAD && ISEQ_BODY(iseq) == NULL) { rb_iseq_complete((rb_iseq_t *)iseq); } -#endif return iseq; } @@ -616,6 +626,11 @@ typedef struct rb_hook_list_struct { // see builtin.h for definition typedef const struct rb_builtin_function *RB_BUILTIN; +struct global_object_list { + VALUE *varptr; + struct global_object_list *next; +}; + typedef struct rb_vm_struct { VALUE self; @@ -633,15 +648,51 @@ typedef struct rb_vm_struct { struct rb_ractor_struct *lock_owner; unsigned int lock_rec; - // barrier - bool barrier_waiting; - unsigned int barrier_cnt; - rb_nativethread_cond_t barrier_cond; - // join at exit rb_nativethread_cond_t terminate_cond; bool terminate_waiting; + +#ifndef RUBY_THREAD_PTHREAD_H + bool barrier_waiting; + unsigned int barrier_cnt; + rb_nativethread_cond_t barrier_cond; +#endif } sync; + + // ractor scheduling + struct { + rb_nativethread_lock_t lock; + struct rb_ractor_struct *lock_owner; + bool locked; + + rb_nativethread_cond_t cond; // GRQ + unsigned int snt_cnt; // count of shared NTs + unsigned int dnt_cnt; // count of dedicated NTs + + unsigned int running_cnt; + + unsigned int max_cpu; + struct ccan_list_head grq; // // Global Ready Queue + unsigned int grq_cnt; + + // running threads + struct ccan_list_head running_threads; + + // threads which switch context by timeslice + struct ccan_list_head timeslice_threads; + + struct ccan_list_head zombie_threads; + + // true if timeslice timer is not enable + bool timeslice_wait_inf; + + // barrier + rb_nativethread_cond_t barrier_complete_cond; + rb_nativethread_cond_t barrier_release_cond; + bool barrier_waiting; + unsigned int barrier_waiting_cnt; + unsigned int barrier_serial; + } sched; } ractor; #ifdef USE_SIGALTSTACK @@ -649,9 +700,6 @@ typedef struct rb_vm_struct { #endif rb_serial_t fork_gen; - rb_nativethread_lock_t waitpid_lock; - struct ccan_list_head waiting_pids; /* PID > 0: <=> struct waitpid_state */ - struct ccan_list_head waiting_grps; /* PID <= 0: <=> struct waitpid_state */ struct ccan_list_head waiting_fds; /* <=> struct waiting_fd */ /* set in single-threaded processes only: */ @@ -664,13 +712,9 @@ typedef struct rb_vm_struct { /* object management */ VALUE mark_object_ary; + struct global_object_list *global_object_list; const VALUE special_exceptions[ruby_special_error_count]; - /* object shapes */ - rb_shape_t *shape_list; - rb_shape_t *root_shape; - shape_id_t next_shape_id; - /* load */ VALUE top_self; VALUE load_path; @@ -680,13 +724,12 @@ typedef struct rb_vm_struct { VALUE loaded_features; VALUE loaded_features_snapshot; VALUE loaded_features_realpaths; + VALUE loaded_features_realpath_map; struct st_table *loaded_features_index; struct st_table *loading_table; -#if EXTSTATIC // For running the init function of statically linked // extensions when they are loaded struct st_table *static_ext_inits; -#endif /* signal */ struct { @@ -696,9 +739,8 @@ typedef struct rb_vm_struct { /* relation table of ensure - rollback for callcc */ struct st_table *ensure_rollback_table; - /* postponed_job (async-signal-safe, NOT thread-safe) */ - struct rb_postponed_job_struct *postponed_job_buffer; - rb_atomic_t postponed_job_index; + /* postponed_job (async-signal-safe, and thread-safe) */ + struct rb_postponed_job_queue *postponed_job_queue; int src_encoding_index; @@ -710,8 +752,6 @@ typedef struct rb_vm_struct { VALUE coverages, me2counter; int coverage_mode; - st_table * defined_module_hash; - struct rb_objspace *objspace; rb_at_exit_list *at_exit; @@ -719,10 +759,12 @@ typedef struct rb_vm_struct { st_table *frozen_strings; const struct rb_builtin_function *builtin_function_table; - int builtin_inline_index; + st_table *ci_table; struct rb_id_table *negative_cme_table; st_table *overloaded_cme_table; // cme -> overloaded_cme + st_table *unused_block_warning_table; + bool unused_block_warning_strict; // This id table contains a mapping from ID to ICs. It does this with ID // keys and nested st_tables as values. The nested tables have ICs as keys @@ -821,19 +863,16 @@ struct rb_block { }; typedef struct rb_control_frame_struct { - const VALUE *pc; /* cfp[0] */ - VALUE *sp; /* cfp[1] */ - const rb_iseq_t *iseq; /* cfp[2] */ - VALUE self; /* cfp[3] / block[0] */ - const VALUE *ep; /* cfp[4] / block[1] */ - const void *block_code; /* cfp[5] / block[2] */ /* iseq or ifunc or forwarded block handler */ - VALUE *__bp__; /* cfp[6] */ /* outside vm_push_frame, use vm_base_ptr instead. */ - + const VALUE *pc; // cfp[0] + VALUE *sp; // cfp[1] + const rb_iseq_t *iseq; // cfp[2] + VALUE self; // cfp[3] / block[0] + const VALUE *ep; // cfp[4] / block[1] + const void *block_code; // cfp[5] / block[2] -- iseq, ifunc, or forwarded block handler + void *jit_return; // cfp[6] -- return address for JIT code #if VM_DEBUG_BP_CHECK - VALUE *bp_check; /* cfp[7] */ + VALUE *bp_check; // cfp[7] #endif - // Return address for YJIT code - void *jit_return; } rb_control_frame_t; extern const rb_data_type_t ruby_threadptr_data_type; @@ -858,13 +897,68 @@ typedef void *rb_jmpbuf_t[5]; #endif /* + `rb_vm_tag_jmpbuf_t` type represents a buffer used to + long jump to a C frame associated with `rb_vm_tag`. + + Use-site of `rb_vm_tag_jmpbuf_t` is responsible for calling the + following functions: + - `rb_vm_tag_jmpbuf_init` once `rb_vm_tag_jmpbuf_t` is allocated. + - `rb_vm_tag_jmpbuf_deinit` once `rb_vm_tag_jmpbuf_t` is no longer necessary. + + `RB_VM_TAG_JMPBUF_GET` transforms a `rb_vm_tag_jmpbuf_t` into a + `rb_jmpbuf_t` to be passed to `rb_setjmp/rb_longjmp`. +*/ +#if defined(__wasm__) && !defined(__EMSCRIPTEN__) +/* + WebAssembly target with Asyncify-based SJLJ needs + to capture the execution context by unwind/rewind-ing + call frames into a jump buffer. The buffer space tends + to be considerably large unlike other architectures' + register-based buffers. + Therefore, we allocates the buffer on the heap on such + environments. +*/ +typedef rb_jmpbuf_t *rb_vm_tag_jmpbuf_t; + +#define RB_VM_TAG_JMPBUF_GET(buf) (*buf) + +static inline void +rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf) +{ + *jmpbuf = ruby_xmalloc(sizeof(rb_jmpbuf_t)); +} + +static inline void +rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf) +{ + ruby_xfree(*jmpbuf); +} +#else +typedef rb_jmpbuf_t rb_vm_tag_jmpbuf_t; + +#define RB_VM_TAG_JMPBUF_GET(buf) (buf) + +static inline void +rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf) +{ + // no-op +} + +static inline void +rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf) +{ + // no-op +} +#endif + +/* the members which are written in EC_PUSH_TAG() should be placed at the beginning and the end, so that entire region is accessible. */ struct rb_vm_tag { VALUE tag; VALUE retval; - rb_jmpbuf_t buf; + rb_vm_tag_jmpbuf_t buf; struct rb_vm_tag *prev; enum ruby_tag_type state; unsigned int lock_rec; @@ -872,7 +966,7 @@ struct rb_vm_tag { STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0); STATIC_ASSERT(rb_vm_tag_buf_end, - offsetof(struct rb_vm_tag, buf) + sizeof(rb_jmpbuf_t) < + offsetof(struct rb_vm_tag, buf) + sizeof(rb_vm_tag_jmpbuf_t) < sizeof(struct rb_vm_tag)); struct rb_unblock_callback { @@ -954,6 +1048,10 @@ struct rb_execution_context_struct { VALUE *stack_end; size_t stack_maxsize; RUBY_ALIGNAS(SIZEOF_VALUE) jmp_buf regs; + +#ifdef RUBY_ASAN_ENABLED + void *asan_fake_stack_handle; +#endif } machine; }; @@ -995,6 +1093,7 @@ typedef struct rb_thread_struct { rb_execution_context_t *ec; struct rb_thread_sched_item sched; + bool mn_schedulable; rb_atomic_t serial; // only for RUBY_DEBUG_LOG() VALUE last_status; /* $? */ @@ -1010,7 +1109,7 @@ typedef struct rb_thread_struct { BITFIELD(enum rb_thread_status, status, 2); /* bit flags */ - unsigned int locking_native_thread : 1; + unsigned int has_dedicated_nt : 1; unsigned int to_kill : 1; unsigned int abort_on_exception: 1; unsigned int report_on_exception: 1; @@ -1070,6 +1169,7 @@ typedef struct rb_thread_struct { /* misc */ VALUE name; + void **specific_storage; struct rb_ext_config ext_config; } rb_thread_t; @@ -1077,7 +1177,7 @@ typedef struct rb_thread_struct { static inline unsigned int rb_th_serial(const rb_thread_t *th) { - return (unsigned int)th->serial; + return th ? (unsigned int)th->serial : 0; } typedef enum { @@ -1099,12 +1199,13 @@ typedef enum { RUBY_SYMBOL_EXPORT_BEGIN /* node -> iseq */ -rb_iseq_t *rb_iseq_new (const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum rb_iseq_type); -rb_iseq_t *rb_iseq_new_top (const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent); -rb_iseq_t *rb_iseq_new_main (const rb_ast_body_t *ast, VALUE path, VALUE realpath, const rb_iseq_t *parent, int opt); -rb_iseq_t *rb_iseq_new_eval (const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth); -rb_iseq_t *rb_iseq_new_with_opt(const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth, - enum rb_iseq_type, const rb_compile_option_t*); +rb_iseq_t *rb_iseq_new (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum rb_iseq_type); +rb_iseq_t *rb_iseq_new_top (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent); +rb_iseq_t *rb_iseq_new_main (const VALUE ast_value, VALUE path, VALUE realpath, const rb_iseq_t *parent, int opt); +rb_iseq_t *rb_iseq_new_eval (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth); +rb_iseq_t *rb_iseq_new_with_opt(const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth, + enum rb_iseq_type, const rb_compile_option_t*, + VALUE script_lines); struct iseq_link_anchor; struct rb_iseq_new_with_callback_callback_func { @@ -1117,8 +1218,12 @@ static inline struct rb_iseq_new_with_callback_callback_func * rb_iseq_new_with_callback_new_callback( void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *), const void *ptr) { - VALUE memo = rb_imemo_new(imemo_ifunc, (VALUE)func, (VALUE)ptr, Qundef, Qfalse); - return (struct rb_iseq_new_with_callback_callback_func *)memo; + struct rb_iseq_new_with_callback_callback_func *memo = + IMEMO_NEW(struct rb_iseq_new_with_callback_callback_func, imemo_ifunc, Qfalse); + memo->func = func; + memo->data = ptr; + + return memo; } rb_iseq_t *rb_iseq_new_with_callback(const struct rb_iseq_new_with_callback_callback_func * ifunc, VALUE name, VALUE path, VALUE realpath, int first_lineno, @@ -1126,7 +1231,6 @@ rb_iseq_t *rb_iseq_new_with_callback(const struct rb_iseq_new_with_callback_call VALUE rb_iseq_disasm(const rb_iseq_t *iseq); int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child); -attr_index_t rb_estimate_iv_count(VALUE klass, const rb_iseq_t * initialize_iseq); VALUE rb_iseq_coverage(const rb_iseq_t *iseq); @@ -1416,7 +1520,9 @@ VM_ENV_ENVVAL_PTR(const VALUE *ep) static inline const rb_env_t * vm_env_new(VALUE *env_ep, VALUE *env_body, unsigned int env_size, const rb_iseq_t *iseq) { - rb_env_t *env = (rb_env_t *)rb_imemo_new(imemo_env, (VALUE)env_ep, (VALUE)env_body, 0, (VALUE)iseq); + rb_env_t *env = IMEMO_NEW(rb_env_t, imemo_env, (VALUE)iseq); + env->ep = env_ep; + env->env = env_body; env->env_size = env_size; env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env; return env; @@ -1562,12 +1668,6 @@ vm_block_handler_verify(MAYBE_UNUSED(VALUE block_handler)) (vm_block_handler_type(block_handler), 1)); } -static inline int -vm_cfp_forwarded_bh_p(const rb_control_frame_t *cfp, VALUE block_handler) -{ - return ((VALUE) cfp->block_code) == block_handler; -} - static inline enum rb_block_type vm_block_type(const struct rb_block *block) { @@ -1696,17 +1796,13 @@ VALUE rb_proc_alloc(VALUE klass); VALUE rb_proc_dup(VALUE self); /* for debug */ -extern void rb_vmdebug_stack_dump_raw(const rb_execution_context_t *ec, const rb_control_frame_t *cfp); -extern void rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc); -extern void rb_vmdebug_debug_print_post(const rb_execution_context_t *ec, const rb_control_frame_t *cfp -#if OPT_STACK_CACHING - , VALUE reg_a, VALUE reg_b -#endif -); +extern bool rb_vmdebug_stack_dump_raw(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *); +extern bool rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc, FILE *); +extern bool rb_vmdebug_debug_print_post(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *); -#define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp) -#define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp)) -void rb_vm_bugreport(const void *); +#define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp, stderr) +#define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp), stderr) +bool rb_vm_bugreport(const void *, FILE *); typedef void (*ruby_sighandler_t)(int); RBIMPL_ATTR_FORMAT(RBIMPL_PRINTF_FORMAT, 4, 5) NORETURN(void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler, int sig, const void *, const char *fmt, ...)); @@ -1748,7 +1844,7 @@ void rb_vm_inc_const_missing_count(void); VALUE rb_vm_call_kw(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc, const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat); void rb_vm_pop_frame_no_int(rb_execution_context_t *ec); -MJIT_STATIC void rb_vm_pop_frame(rb_execution_context_t *ec); +void rb_vm_pop_frame(rb_execution_context_t *ec); void rb_thread_start_timer_thread(void); void rb_thread_stop_timer_thread(void); @@ -1759,34 +1855,35 @@ static inline void rb_vm_living_threads_init(rb_vm_t *vm) { ccan_list_head_init(&vm->waiting_fds); - ccan_list_head_init(&vm->waiting_pids); ccan_list_head_init(&vm->workqueue); - ccan_list_head_init(&vm->waiting_grps); ccan_list_head_init(&vm->ractor.set); + ccan_list_head_init(&vm->ractor.sched.zombie_threads); } typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE); rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp); rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp); +VALUE *rb_vm_svar_lep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp); int rb_vm_get_sourceline(const rb_control_frame_t *); void rb_vm_stack_to_heap(rb_execution_context_t *ec); -void ruby_thread_init_stack(rb_thread_t *th); +void ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame); rb_thread_t * ruby_thread_from_native(void); int ruby_thread_set_native(rb_thread_t *th); int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp); void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp); -MJIT_STATIC VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler); +void rb_vm_env_write(const VALUE *ep, int index, VALUE v); +VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler); void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg); #define rb_vm_register_special_exception(sp, e, m) \ rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m))) -void rb_gc_mark_machine_stack(const rb_execution_context_t *ec); +void rb_gc_mark_machine_context(const rb_execution_context_t *ec); void rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr); -MJIT_STATIC const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_control_frame_t *cfp); +const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_control_frame_t *cfp); #define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack] @@ -1809,7 +1906,6 @@ rb_execution_context_t *rb_vm_main_ractor_ec(rb_vm_t *vm); // ractor.c /* for thread */ #if RUBY_VM_THREAD_MODEL == 2 -MJIT_SYMBOL_EXPORT_BEGIN RUBY_EXTERN struct rb_ractor_struct *ruby_single_main_ractor; // ractor.c RUBY_EXTERN rb_vm_t *ruby_current_vm_ptr; @@ -1817,8 +1913,6 @@ RUBY_EXTERN rb_event_flag_t ruby_vm_event_flags; RUBY_EXTERN rb_event_flag_t ruby_vm_event_enabled_global_flags; RUBY_EXTERN unsigned int ruby_vm_event_local_num; -MJIT_SYMBOL_EXPORT_END - #define GET_VM() rb_current_vm() #define GET_RACTOR() rb_current_ractor() #define GET_THREAD() rb_current_thread() @@ -1864,6 +1958,20 @@ rb_current_execution_context(bool expect_ec) #else rb_execution_context_t *ec = ruby_current_ec; #endif + + /* On the shared objects, `__tls_get_addr()` is used to access the TLS + * and the address of the `ruby_current_ec` can be stored on a function + * frame. However, this address can be mis-used after native thread + * migration of a coroutine. + * 1) Get `ptr =&ruby_current_ec` op NT1 and store it on the frame. + * 2) Context switch and resume it on the NT2. + * 3) `ptr` is used on NT2 but it accesses to the TLS on NT1. + * This assertion checks such misusage. + * + * To avoid accidents, `GET_EC()` should be called once on the frame. + * Note that inlining can produce the problem. + */ + VM_ASSERT(ec == rb_current_ec_noinline()); #else rb_execution_context_t *ec = native_tls_get(ruby_current_ec_key); #endif @@ -1879,17 +1987,23 @@ rb_current_thread(void) } static inline rb_ractor_t * -rb_current_ractor(void) +rb_current_ractor_raw(bool expect) { if (ruby_single_main_ractor) { return ruby_single_main_ractor; } else { - const rb_execution_context_t *ec = GET_EC(); - return rb_ec_ractor_ptr(ec); + const rb_execution_context_t *ec = rb_current_execution_context(expect); + return (expect || ec) ? rb_ec_ractor_ptr(ec) : NULL; } } +static inline rb_ractor_t * +rb_current_ractor(void) +{ + return rb_current_ractor_raw(true); +} + static inline rb_vm_t * rb_current_vm(void) { @@ -2081,6 +2195,10 @@ rb_exec_event_hook_script_compiled(rb_execution_context_t *ec, const rb_iseq_t * } void rb_vm_trap_exit(rb_vm_t *vm); +void rb_vm_postponed_job_atfork(void); /* vm_trace.c */ +void rb_vm_postponed_job_free(void); /* vm_trace.c */ +size_t rb_vm_memsize_postponed_job_queue(void); /* vm_trace.c */ +void rb_vm_postponed_job_queue_init(rb_vm_t *vm); /* vm_trace.c */ RUBY_SYMBOL_EXPORT_BEGIN |