summaryrefslogtreecommitdiff
path: root/vm_insnhelper.h
diff options
context:
space:
mode:
Diffstat (limited to 'vm_insnhelper.h')
-rw-r--r--vm_insnhelper.h52
1 files changed, 28 insertions, 24 deletions
diff --git a/vm_insnhelper.h b/vm_insnhelper.h
index 0d90eb9434..286bc1f671 100644
--- a/vm_insnhelper.h
+++ b/vm_insnhelper.h
@@ -11,24 +11,28 @@
**********************************************************************/
-MJIT_SYMBOL_EXPORT_BEGIN
-
RUBY_EXTERN VALUE ruby_vm_const_missing_count;
-RUBY_EXTERN rb_serial_t ruby_vm_global_constant_state;
-RUBY_EXTERN rb_serial_t ruby_vm_class_serial;
+RUBY_EXTERN rb_serial_t ruby_vm_constant_cache_invalidations;
+RUBY_EXTERN rb_serial_t ruby_vm_constant_cache_misses;
RUBY_EXTERN rb_serial_t ruby_vm_global_cvar_state;
-MJIT_SYMBOL_EXPORT_END
+#if USE_YJIT || USE_RJIT // We want vm_insns_count on any JIT-enabled build.
+// Increment vm_insns_count for --yjit-stats. We increment this even when
+// --yjit or --yjit-stats is not used because branching to skip it is slower.
+// We also don't use ATOMIC_INC for performance, allowing inaccuracy on Ractors.
+#define JIT_COLLECT_USAGE_INSN(insn) rb_vm_insns_count++
+#else
+#define JIT_COLLECT_USAGE_INSN(insn) // none
+#endif
#if VM_COLLECT_USAGE_DETAILS
#define COLLECT_USAGE_INSN(insn) vm_collect_usage_insn(insn)
#define COLLECT_USAGE_OPERAND(insn, n, op) vm_collect_usage_operand((insn), (n), ((VALUE)(op)))
-
#define COLLECT_USAGE_REGISTER(reg, s) vm_collect_usage_register((reg), (s))
#else
-#define COLLECT_USAGE_INSN(insn) /* none */
-#define COLLECT_USAGE_OPERAND(insn, n, op) /* none */
-#define COLLECT_USAGE_REGISTER(reg, s) /* none */
+#define COLLECT_USAGE_INSN(insn) JIT_COLLECT_USAGE_INSN(insn)
+#define COLLECT_USAGE_OPERAND(insn, n, op) // none
+#define COLLECT_USAGE_REGISTER(reg, s) // none
#endif
/**********************************************************/
@@ -166,28 +170,23 @@ CC_SET_FASTPATH(const struct rb_callcache *cc, vm_call_handler func, bool enable
/* others */
/**********************************************************/
-#ifndef MJIT_HEADER
#define CALL_SIMPLE_METHOD() do { \
- rb_snum_t x = leaf ? INSN_ATTR(width) : 0; \
- rb_snum_t y = attr_width_opt_send_without_block(0); \
- rb_snum_t z = x - y; \
- ADD_PC(z); \
+ rb_snum_t insn_width = attr_width_opt_send_without_block(0); \
+ ADD_PC(-insn_width); \
DISPATCH_ORIGINAL_INSN(opt_send_without_block); \
} while (0)
-#endif
-#define PREV_CLASS_SERIAL() (ruby_vm_class_serial)
-#define NEXT_CLASS_SERIAL() (++ruby_vm_class_serial)
-#define GET_GLOBAL_CONSTANT_STATE() (ruby_vm_global_constant_state)
-#define INC_GLOBAL_CONSTANT_STATE() (++ruby_vm_global_constant_state)
#define GET_GLOBAL_CVAR_STATE() (ruby_vm_global_cvar_state)
#define INC_GLOBAL_CVAR_STATE() (++ruby_vm_global_cvar_state)
static inline struct vm_throw_data *
THROW_DATA_NEW(VALUE val, const rb_control_frame_t *cf, int st)
{
- struct vm_throw_data *obj = (struct vm_throw_data *)rb_imemo_new(imemo_throw_data, val, (VALUE)cf, 0, 0);
+ struct vm_throw_data *obj = IMEMO_NEW(struct vm_throw_data, imemo_throw_data, 0);
+ *((VALUE *)&obj->throw_obj) = val;
+ *((struct rb_control_frame_struct **)&obj->catch_frame) = (struct rb_control_frame_struct *)cf;
obj->throw_state = st;
+
return obj;
}
@@ -237,8 +236,8 @@ static inline void
THROW_DATA_CONSUMED_SET(struct vm_throw_data *obj)
{
if (THROW_DATA_P(obj) &&
- THROW_DATA_STATE(obj) == TAG_BREAK) {
- obj->flags |= THROW_DATA_CONSUMED;
+ THROW_DATA_STATE(obj) == TAG_BREAK) {
+ obj->flags |= THROW_DATA_CONSUMED;
}
}
@@ -248,13 +247,18 @@ THROW_DATA_CONSUMED_SET(struct vm_throw_data *obj)
#define IS_ARGS_KW_OR_KW_SPLAT(ci) (vm_ci_flag(ci) & (VM_CALL_KWARG | VM_CALL_KW_SPLAT))
#define IS_ARGS_KW_SPLAT_MUT(ci) (vm_ci_flag(ci) & VM_CALL_KW_SPLAT_MUT)
+static inline bool
+vm_call_cacheable(const struct rb_callinfo *ci, const struct rb_callcache *cc)
+{
+ return (vm_ci_flag(ci) & VM_CALL_FCALL) ||
+ METHOD_ENTRY_VISI(vm_cc_cme(cc)) != METHOD_VISI_PROTECTED;
+}
/* If this returns true, an optimized function returned by `vm_call_iseq_setup_func`
can be used as a fastpath. */
static inline bool
vm_call_iseq_optimizable_p(const struct rb_callinfo *ci, const struct rb_callcache *cc)
{
- return !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
- METHOD_ENTRY_CACHEABLE(vm_cc_cme(cc));
+ return !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) && vm_call_cacheable(ci, cc);
}
#endif /* RUBY_INSNHELPER_H */