summaryrefslogtreecommitdiff
path: root/vm_insnhelper.c
diff options
context:
space:
mode:
Diffstat (limited to 'vm_insnhelper.c')
-rw-r--r--vm_insnhelper.c1729
1 files changed, 1164 insertions, 565 deletions
diff --git a/vm_insnhelper.c b/vm_insnhelper.c
index 0ce7fb6cc7..46737145ca 100644
--- a/vm_insnhelper.c
+++ b/vm_insnhelper.c
@@ -27,9 +27,7 @@
/* finish iseq array */
#include "insns.inc"
-#ifndef MJIT_HEADER
#include "insns_info.inc"
-#endif
extern rb_method_definition_t *rb_method_definition_create(rb_method_type_t type, ID mid);
extern void rb_method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *def, void *opts);
@@ -37,16 +35,14 @@ extern int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_me
extern VALUE rb_make_no_method_exception(VALUE exc, VALUE format, VALUE obj,
int argc, const VALUE *argv, int priv);
-#ifndef MJIT_HEADER
static const struct rb_callcache vm_empty_cc;
static const struct rb_callcache vm_empty_cc_for_super;
-#endif
/* control stack frame */
static rb_control_frame_t *vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
-MJIT_STATIC VALUE
+VALUE
ruby_vm_special_exception_copy(VALUE exc)
{
VALUE e = rb_obj_alloc(rb_class_real(RBASIC_CLASS(exc)));
@@ -71,9 +67,6 @@ ec_stack_overflow(rb_execution_context_t *ec, int setup)
}
NORETURN(static void vm_stackoverflow(void));
-#ifdef MJIT_HEADER
-NOINLINE(static COLDFUNC void vm_stackoverflow(void));
-#endif
static void
vm_stackoverflow(void)
@@ -81,8 +74,8 @@ vm_stackoverflow(void)
ec_stack_overflow(GET_EC(), TRUE);
}
-NORETURN(MJIT_STATIC void rb_ec_stack_overflow(rb_execution_context_t *ec, int crit));
-MJIT_STATIC void
+NORETURN(void rb_ec_stack_overflow(rb_execution_context_t *ec, int crit));
+void
rb_ec_stack_overflow(rb_execution_context_t *ec, int crit)
{
if (rb_during_gc()) {
@@ -100,6 +93,7 @@ rb_ec_stack_overflow(rb_execution_context_t *ec, int crit)
#endif
}
+static inline void stack_check(rb_execution_context_t *ec);
#if VM_CHECK_MODE > 0
static int
@@ -239,8 +233,24 @@ vm_check_frame(VALUE type,
static VALUE vm_stack_canary; /* Initialized later */
static bool vm_stack_canary_was_born = false;
-#ifndef MJIT_HEADER
-MJIT_FUNC_EXPORTED void
+// Return the index of the instruction right before the given PC.
+// This is needed because insn_entry advances PC before the insn body.
+static unsigned int
+previous_insn_index(const rb_iseq_t *iseq, const VALUE *pc)
+{
+ unsigned int pos = 0;
+ while (pos < ISEQ_BODY(iseq)->iseq_size) {
+ int opcode = rb_vm_insn_addr2opcode((void *)ISEQ_BODY(iseq)->iseq_encoded[pos]);
+ unsigned int next_pos = pos + insn_len(opcode);
+ if (ISEQ_BODY(iseq)->iseq_encoded + next_pos == pc) {
+ return pos;
+ }
+ pos = next_pos;
+ }
+ rb_bug("failed to find the previous insn");
+}
+
+void
rb_vm_check_canary(const rb_execution_context_t *ec, VALUE *sp)
{
const struct rb_control_frame_struct *reg_cfp = ec->cfp;
@@ -266,15 +276,14 @@ rb_vm_check_canary(const rb_execution_context_t *ec, VALUE *sp)
}
const VALUE *orig = rb_iseq_original_iseq(iseq);
- const VALUE *encoded = ISEQ_BODY(iseq)->iseq_encoded;
- const ptrdiff_t pos = GET_PC() - encoded;
- const enum ruby_vminsn_type insn = (enum ruby_vminsn_type)orig[pos];
- const char *name = insn_name(insn);
const VALUE iseqw = rb_iseqw_new(iseq);
const VALUE inspection = rb_inspect(iseqw);
const char *stri = rb_str_to_cstr(inspection);
const VALUE disasm = rb_iseq_disasm(iseq);
const char *strd = rb_str_to_cstr(disasm);
+ const ptrdiff_t pos = previous_insn_index(iseq, GET_PC());
+ const enum ruby_vminsn_type insn = (enum ruby_vminsn_type)orig[pos];
+ const char *name = insn_name(insn);
/* rb_bug() is not capable of outputting this large contents. It
is designed to run form a SIGSEGV handler, which tends to be
@@ -287,7 +296,6 @@ rb_vm_check_canary(const rb_execution_context_t *ec, VALUE *sp)
name, stri, pos, strd);
rb_bug("see above.");
}
-#endif
#define vm_check_canary(ec, sp) rb_vm_check_canary(ec, sp)
#else
@@ -345,6 +353,17 @@ vm_push_frame_debug_counter_inc(
#define vm_push_frame_debug_counter_inc(ec, cfp, t) /* void */
#endif
+// Return a poison value to be set above the stack top to verify leafness.
+VALUE
+rb_vm_stack_canary(void)
+{
+#if VM_CHECK_MODE > 0
+ return vm_stack_canary;
+#else
+ return 0;
+#endif
+}
+
STATIC_ASSERT(VM_ENV_DATA_INDEX_ME_CREF, VM_ENV_DATA_INDEX_ME_CREF == -2);
STATIC_ASSERT(VM_ENV_DATA_INDEX_SPECVAL, VM_ENV_DATA_INDEX_SPECVAL == -1);
STATIC_ASSERT(VM_ENV_DATA_INDEX_FLAGS, VM_ENV_DATA_INDEX_FLAGS == -0);
@@ -390,7 +409,6 @@ vm_push_frame(rb_execution_context_t *ec,
.self = self,
.ep = sp - 1,
.block_code = NULL,
- .__bp__ = sp, /* Store initial value of ep as bp to skip calculation cost of bp on JIT cancellation. */
#if VM_DEBUG_BP_CHECK
.bp_check = sp,
#endif
@@ -431,7 +449,7 @@ vm_pop_frame(rb_execution_context_t *ec, rb_control_frame_t *cfp, const VALUE *e
return flags & VM_FRAME_FLAG_FINISH;
}
-MJIT_STATIC void
+void
rb_vm_pop_frame(rb_execution_context_t *ec)
{
vm_pop_frame(ec, ec->cfp, ec->cfp->ep);
@@ -483,7 +501,7 @@ rb_arity_error_new(int argc, int min, int max)
return rb_exc_new3(rb_eArgError, err_mess);
}
-MJIT_STATIC void
+void
rb_error_arity(int argc, int min, int max)
{
rb_exc_raise(rb_arity_error_new(argc, min, max));
@@ -503,6 +521,7 @@ vm_env_write_slowpath(const VALUE *ep, int index, VALUE v)
RB_DEBUG_COUNTER_INC(lvar_set_slowpath);
}
+// YJIT assumes this function never runs GC
static inline void
vm_env_write(const VALUE *ep, int index, VALUE v)
{
@@ -515,7 +534,13 @@ vm_env_write(const VALUE *ep, int index, VALUE v)
}
}
-MJIT_STATIC VALUE
+void
+rb_vm_env_write(const VALUE *ep, int index, VALUE v)
+{
+ vm_env_write(ep, index, v);
+}
+
+VALUE
rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler)
{
if (block_handler == VM_BLOCK_HANDLER_NONE) {
@@ -615,7 +640,12 @@ lep_svar_get(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key)
static struct vm_svar *
svar_new(VALUE obj)
{
- return (struct vm_svar *)rb_imemo_new(imemo_svar, Qnil, Qnil, Qnil, obj);
+ struct vm_svar *svar = IMEMO_NEW(struct vm_svar, imemo_svar, obj);
+ *((VALUE *)&svar->lastline) = Qnil;
+ *((VALUE *)&svar->backref) = Qnil;
+ *((VALUE *)&svar->others) = Qnil;
+
+ return svar;
}
static void
@@ -681,6 +711,30 @@ vm_getspecial(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key,
return val;
}
+static inline VALUE
+vm_backref_defined(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t type)
+{
+ VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
+ int nth = 0;
+
+ if (type & 0x01) {
+ switch (type >> 1) {
+ case '&':
+ case '`':
+ case '\'':
+ break;
+ case '+':
+ return rb_reg_last_defined(backref);
+ default:
+ rb_bug("unexpected back-ref");
+ }
+ }
+ else {
+ nth = (int)(type >> 1);
+ }
+ return rb_reg_nth_defined(nth, backref);
+}
+
PUREFUNC(static rb_callable_method_entry_t *check_method_entry(VALUE obj, int can_be_svar));
static rb_callable_method_entry_t *
check_method_entry(VALUE obj, int can_be_svar)
@@ -708,7 +762,7 @@ check_method_entry(VALUE obj, int can_be_svar)
}
}
-MJIT_STATIC const rb_callable_method_entry_t *
+const rb_callable_method_entry_t *
rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
{
const VALUE *ep = cfp->ep;
@@ -903,8 +957,8 @@ vm_get_const_key_cref(const VALUE *ep)
const rb_cref_t *key_cref = cref;
while (cref) {
- if (FL_TEST(CREF_CLASS(cref), FL_SINGLETON) ||
- FL_TEST(CREF_CLASS(cref), RCLASS_CLONED)) {
+ if (RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
+ RCLASS_EXT(CREF_CLASS(cref))->cloned) {
return key_cref;
}
cref = CREF_NEXT(cref);
@@ -1117,7 +1171,7 @@ vm_get_cvar_base(const rb_cref_t *cref, const rb_control_frame_t *cfp, int top_l
}
while (CREF_NEXT(cref) &&
- (NIL_P(CREF_CLASS(cref)) || FL_TEST(CREF_CLASS(cref), FL_SINGLETON) ||
+ (NIL_P(CREF_CLASS(cref)) || RCLASS_SINGLETON_P(CREF_CLASS(cref)) ||
CREF_PUSHED_BY_EVAL(cref) || CREF_SINGLETON(cref))) {
cref = CREF_NEXT(cref);
}
@@ -1152,9 +1206,9 @@ fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, in
#define ATTR_INDEX_NOT_SET (attr_index_t)-1
-ALWAYS_INLINE(static VALUE vm_getivar(VALUE, ID, const rb_iseq_t *, IVC, const struct rb_callcache *, int));
+ALWAYS_INLINE(static VALUE vm_getivar(VALUE, ID, const rb_iseq_t *, IVC, const struct rb_callcache *, int, VALUE));
static inline VALUE
-vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
+vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, VALUE default_value)
{
#if OPT_IC_FOR_IVAR
VALUE val = Qundef;
@@ -1162,7 +1216,7 @@ vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_call
VALUE * ivar_list;
if (SPECIAL_CONST_P(obj)) {
- return Qnil;
+ return default_value;
}
#if SHAPE_IN_BASIC_FLAGS
@@ -1206,10 +1260,10 @@ vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_call
#if !SHAPE_IN_BASIC_FLAGS
shape_id = ivtbl->shape_id;
#endif
- ivar_list = ivtbl->ivptr;
+ ivar_list = ivtbl->as.shape.ivptr;
}
else {
- return Qnil;
+ return default_value;
}
}
@@ -1227,14 +1281,21 @@ vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_call
RUBY_ASSERT(cached_id != OBJ_TOO_COMPLEX_SHAPE_ID);
if (index == ATTR_INDEX_NOT_SET) {
- return Qnil;
+ return default_value;
}
val = ivar_list[index];
+#if USE_DEBUG_COUNTER
+ RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
+
+ if (RB_TYPE_P(obj, T_OBJECT)) {
+ RB_DEBUG_COUNTER_INC(ivar_get_obj_hit);
+ }
+#endif
RUBY_ASSERT(!UNDEF_P(val));
}
else { // cache miss case
-#if RUBY_DEBUG
+#if USE_DEBUG_COUNTER
if (is_attr) {
if (cached_id != INVALID_SHAPE_ID) {
RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_set);
@@ -1251,24 +1312,55 @@ vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_call
RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_unset);
}
}
-#endif
+ RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
- rb_shape_t *shape = rb_shape_get_shape_by_id(shape_id);
+ if (RB_TYPE_P(obj, T_OBJECT)) {
+ RB_DEBUG_COUNTER_INC(ivar_get_obj_miss);
+ }
+#endif
if (shape_id == OBJ_TOO_COMPLEX_SHAPE_ID) {
- if (!rb_id_table_lookup(ROBJECT_IV_HASH(obj), id, &val)) {
- val = Qnil;
+ st_table *table = NULL;
+ switch (BUILTIN_TYPE(obj)) {
+ case T_CLASS:
+ case T_MODULE:
+ table = (st_table *)RCLASS_IVPTR(obj);
+ break;
+
+ case T_OBJECT:
+ table = ROBJECT_IV_HASH(obj);
+ break;
+
+ default: {
+ struct gen_ivtbl *ivtbl;
+ if (rb_gen_ivtbl_get(obj, 0, &ivtbl)) {
+ table = ivtbl->as.complex.table;
+ }
+ break;
+ }
+ }
+
+ if (!table || !st_lookup(table, id, &val)) {
+ val = default_value;
}
}
else {
- if (rb_shape_get_iv_index(shape, id, &index)) {
+ shape_id_t previous_cached_id = cached_id;
+ if (rb_shape_get_iv_index_with_hint(shape_id, id, &index, &cached_id)) {
// This fills in the cache with the shared cache object.
// "ent" is the shared cache object
- fill_ivar_cache(iseq, ic, cc, is_attr, index, shape_id);
+ if (cached_id != previous_cached_id) {
+ fill_ivar_cache(iseq, ic, cc, is_attr, index, cached_id);
+ }
- // We fetched the ivar list above
- val = ivar_list[index];
- RUBY_ASSERT(!UNDEF_P(val));
+ if (index == ATTR_INDEX_NOT_SET) {
+ val = default_value;
+ }
+ else {
+ // We fetched the ivar list above
+ val = ivar_list[index];
+ RUBY_ASSERT(!UNDEF_P(val));
+ }
}
else {
if (is_attr) {
@@ -1278,13 +1370,15 @@ vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_call
vm_ic_attr_index_initialize(ic, shape_id);
}
- val = Qnil;
+ val = default_value;
}
}
}
- RUBY_ASSERT(!UNDEF_P(val));
+ if (!UNDEF_P(default_value)) {
+ RUBY_ASSERT(!UNDEF_P(val));
+ }
return val;
@@ -1322,48 +1416,23 @@ static VALUE
vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
{
#if OPT_IC_FOR_IVAR
- switch (BUILTIN_TYPE(obj)) {
- case T_OBJECT:
- {
- rb_check_frozen_internal(obj);
+ RB_DEBUG_COUNTER_INC(ivar_set_ic_miss);
- attr_index_t index = rb_obj_ivar_set(obj, id, val);
+ if (BUILTIN_TYPE(obj) == T_OBJECT) {
+ rb_check_frozen(obj);
- shape_id_t next_shape_id = ROBJECT_SHAPE_ID(obj);
+ attr_index_t index = rb_obj_ivar_set(obj, id, val);
- if (next_shape_id != OBJ_TOO_COMPLEX_SHAPE_ID) {
- populate_cache(index, next_shape_id, id, iseq, ic, cc, is_attr);
- }
+ shape_id_t next_shape_id = ROBJECT_SHAPE_ID(obj);
- RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_iv_hit);
- return val;
+ if (next_shape_id != OBJ_TOO_COMPLEX_SHAPE_ID) {
+ populate_cache(index, next_shape_id, id, iseq, ic, cc, is_attr);
}
- case T_CLASS:
- case T_MODULE:
- break;
- default:
- {
- rb_ivar_set(obj, id, val);
- shape_id_t next_shape_id = rb_shape_get_shape_id(obj);
- rb_shape_t *next_shape = rb_shape_get_shape_by_id(next_shape_id);
- attr_index_t index;
-
- if (rb_shape_get_iv_index(next_shape, id, &index)) { // based off the hash stored in the transition tree
- if (index >= MAX_IVARS) {
- rb_raise(rb_eArgError, "too many instance variables");
- }
- populate_cache(index, next_shape_id, id, iseq, ic, cc, is_attr);
- }
- else {
- rb_bug("didn't find the id\n");
- }
-
- return val;
- }
+ RB_DEBUG_COUNTER_INC(ivar_set_obj_miss);
+ return val;
}
#endif
- RB_DEBUG_COUNTER_INC(ivar_set_ic_miss);
return rb_ivar_set(obj, id, val);
}
@@ -1394,21 +1463,13 @@ vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_i
// Cache hit case
if (shape_id == dest_shape_id) {
RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
-
- // Just get the IV table
- rb_gen_ivtbl_get(obj, 0, &ivtbl);
}
else if (dest_shape_id != INVALID_SHAPE_ID) {
- rb_shape_t * dest_shape = rb_shape_get_shape_by_id(dest_shape_id);
- shape_id_t source_shape_id = dest_shape->parent_id;
+ rb_shape_t *shape = rb_shape_get_shape_by_id(shape_id);
+ rb_shape_t *dest_shape = rb_shape_get_shape_by_id(dest_shape_id);
- if (shape_id == source_shape_id && dest_shape->edge_name == id && dest_shape->type == SHAPE_IVAR) {
- ivtbl = rb_ensure_generic_iv_list_size(obj, index + 1);
-#if SHAPE_IN_BASIC_FLAGS
- RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
-#else
- ivtbl->shape_id = dest_shape_id;
-#endif
+ if (shape_id == dest_shape->parent_id && dest_shape->edge_name == id && shape->capacity == dest_shape->capacity) {
+ RUBY_ASSERT(index < dest_shape->capacity);
}
else {
return Qundef;
@@ -1418,9 +1479,17 @@ vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_i
return Qundef;
}
- VALUE *ptr = ivtbl->ivptr;
+ rb_gen_ivtbl_get(obj, 0, &ivtbl);
- RB_OBJ_WRITE(obj, &ptr[index], val);
+ if (shape_id != dest_shape_id) {
+#if SHAPE_IN_BASIC_FLAGS
+ RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
+#else
+ ivtbl->shape_id = dest_shape_id;
+#endif
+ }
+
+ RB_OBJ_WRITE(obj, &ivtbl->as.shape.ivptr[index], val);
RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
@@ -1444,10 +1513,11 @@ vm_setivar(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t i
VM_ASSERT(!rb_ractor_shareable_p(obj));
}
else if (dest_shape_id != INVALID_SHAPE_ID) {
+ rb_shape_t *shape = rb_shape_get_shape_by_id(shape_id);
rb_shape_t *dest_shape = rb_shape_get_shape_by_id(dest_shape_id);
shape_id_t source_shape_id = dest_shape->parent_id;
- if (shape_id == source_shape_id && dest_shape->edge_name == id) {
+ if (shape_id == source_shape_id && dest_shape->edge_name == id && shape->capacity == dest_shape->capacity) {
RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
ROBJECT_SET_SHAPE_ID(obj, dest_shape_id);
@@ -1469,6 +1539,7 @@ vm_setivar(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t i
RB_OBJ_WRITE(obj, &ptr[index], val);
RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
+ RB_DEBUG_COUNTER_INC(ivar_set_obj_hit);
return val;
}
break;
@@ -1484,7 +1555,7 @@ vm_setivar(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t i
}
static VALUE
-update_classvariable_cache(const rb_iseq_t *iseq, VALUE klass, ID id, ICVARC ic)
+update_classvariable_cache(const rb_iseq_t *iseq, VALUE klass, ID id, const rb_cref_t * cref, ICVARC ic)
{
VALUE defined_class = 0;
VALUE cvar_value = rb_cvar_find(klass, id, &defined_class);
@@ -1504,10 +1575,15 @@ update_classvariable_cache(const rb_iseq_t *iseq, VALUE klass, ID id, ICVARC ic)
}
struct rb_cvar_class_tbl_entry *ent = (void *)ent_data;
- ent->global_cvar_state = GET_GLOBAL_CVAR_STATE();
+ ent->global_cvar_state = GET_GLOBAL_CVAR_STATE();
+ ent->cref = cref;
ic->entry = ent;
+
+ RUBY_ASSERT(BUILTIN_TYPE((VALUE)cref) == T_IMEMO && IMEMO_TYPE_P(cref, imemo_cref));
+ RB_OBJ_WRITTEN(iseq, Qundef, ent->cref);
RB_OBJ_WRITTEN(iseq, Qundef, ent->class_value);
+ RB_OBJ_WRITTEN(ent->class_value, Qundef, ent->cref);
return cvar_value;
}
@@ -1516,8 +1592,9 @@ static inline VALUE
vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, ICVARC ic)
{
const rb_cref_t *cref;
+ cref = vm_get_cref(GET_EP());
- if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && LIKELY(rb_ractor_main_p())) {
+ if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
RB_DEBUG_COUNTER_INC(cvar_read_inline_hit);
VALUE v = rb_ivar_lookup(ic->entry->class_value, id, Qundef);
@@ -1526,10 +1603,9 @@ vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID
return v;
}
- cref = vm_get_cref(GET_EP());
VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
- return update_classvariable_cache(iseq, klass, id, ic);
+ return update_classvariable_cache(iseq, klass, id, cref, ic);
}
VALUE
@@ -1542,20 +1618,20 @@ static inline void
vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *reg_cfp, ID id, VALUE val, ICVARC ic)
{
const rb_cref_t *cref;
+ cref = vm_get_cref(GET_EP());
- if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE()) {
+ if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE() && ic->entry->cref == cref && LIKELY(rb_ractor_main_p())) {
RB_DEBUG_COUNTER_INC(cvar_write_inline_hit);
rb_class_ivar_set(ic->entry->class_value, id, val);
return;
}
- cref = vm_get_cref(GET_EP());
VALUE klass = vm_get_cvar_base(cref, reg_cfp, 1);
rb_cvar_set(klass, id, val);
- update_classvariable_cache(iseq, klass, id, ic);
+ update_classvariable_cache(iseq, klass, id, cref, ic);
}
void
@@ -1567,7 +1643,7 @@ rb_vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID
static inline VALUE
vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic)
{
- return vm_getivar(obj, id, iseq, ic, NULL, FALSE);
+ return vm_getivar(obj, id, iseq, ic, NULL, FALSE, Qnil);
}
static inline void
@@ -1609,7 +1685,7 @@ vm_throw_continue(const rb_execution_context_t *ec, VALUE err)
/* continue throw */
if (FIXNUM_P(err)) {
- ec->tag->state = FIX2INT(err);
+ ec->tag->state = RUBY_TAG_FATAL;
}
else if (SYMBOL_P(err)) {
ec->tag->state = TAG_THROW;
@@ -1766,7 +1842,16 @@ vm_throw_start(const rb_execution_context_t *ec, rb_control_frame_t *const reg_c
}
}
break;
- case ISEQ_TYPE_EVAL:
+ case ISEQ_TYPE_EVAL: {
+ const rb_iseq_t *is = escape_cfp->iseq;
+ enum rb_iseq_type t = ISEQ_BODY(is)->type;
+ while (t == ISEQ_TYPE_RESCUE || t == ISEQ_TYPE_ENSURE || t == ISEQ_TYPE_EVAL) {
+ if (!(is = ISEQ_BODY(is)->parent_iseq)) break;
+ t = ISEQ_BODY(is)->type;
+ }
+ toplevel = t == ISEQ_TYPE_TOP || t == ISEQ_TYPE_MAIN;
+ break;
+ }
case ISEQ_TYPE_CLASS:
toplevel = 0;
break;
@@ -1816,12 +1901,16 @@ vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
}
}
+VALUE
+rb_vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t throw_state, VALUE throwobj)
+{
+ return vm_throw(ec, reg_cfp, throw_state, throwobj);
+}
+
static inline void
-vm_expandarray(VALUE *sp, VALUE ary, rb_num_t num, int flag)
+vm_expandarray(struct rb_control_frame_struct *cfp, VALUE ary, rb_num_t num, int flag)
{
int is_splat = flag & 0x01;
- rb_num_t space_size = num + is_splat;
- VALUE *base = sp - 1;
const VALUE *ptr;
rb_num_t len;
const VALUE obj = ary;
@@ -1832,11 +1921,11 @@ vm_expandarray(VALUE *sp, VALUE ary, rb_num_t num, int flag)
len = 1;
}
else {
- ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
+ ptr = RARRAY_CONST_PTR(ary);
len = (rb_num_t)RARRAY_LEN(ary);
}
- if (space_size == 0) {
+ if (num + is_splat == 0) {
/* no space left on stack */
}
else if (flag & 0x02) {
@@ -1844,41 +1933,48 @@ vm_expandarray(VALUE *sp, VALUE ary, rb_num_t num, int flag)
rb_num_t i = 0, j;
if (len < num) {
- for (i=0; i<num-len; i++) {
- *base++ = Qnil;
+ for (i = 0; i < num - len; i++) {
+ *cfp->sp++ = Qnil;
}
}
- for (j=0; i<num; i++, j++) {
+
+ for (j = 0; i < num; i++, j++) {
VALUE v = ptr[len - j - 1];
- *base++ = v;
+ *cfp->sp++ = v;
}
+
if (is_splat) {
- *base = rb_ary_new4(len - j, ptr);
+ *cfp->sp++ = rb_ary_new4(len - j, ptr);
}
}
else {
/* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */
- rb_num_t i;
- VALUE *bptr = &base[space_size - 1];
-
- for (i=0; i<num; i++) {
- if (len <= i) {
- for (; i<num; i++) {
- *bptr-- = Qnil;
- }
- break;
- }
- *bptr-- = ptr[i];
- }
if (is_splat) {
if (num > len) {
- *bptr = rb_ary_new();
+ *cfp->sp++ = rb_ary_new();
}
else {
- *bptr = rb_ary_new4(len - num, ptr + num);
+ *cfp->sp++ = rb_ary_new4(len - num, ptr + num);
+ }
+ }
+
+ if (num > len) {
+ rb_num_t i = 0;
+ for (; i < num - len; i++) {
+ *cfp->sp++ = Qnil;
+ }
+
+ for (rb_num_t j = 0; i < num; i++, j++) {
+ *cfp->sp++ = ptr[len - j - 1];
+ }
+ }
+ else {
+ for (rb_num_t j = 0; j < num; j++) {
+ *cfp->sp++ = ptr[num - j - 1];
}
}
}
+
RB_GC_GUARD(ary);
}
@@ -1910,9 +2006,6 @@ vm_ccs_push(VALUE klass, struct rb_class_cc_entries *ccs, const struct rb_callin
if (! vm_cc_markable(cc)) {
return;
}
- else if (! vm_ci_markable(ci)) {
- return;
- }
if (UNLIKELY(ccs->len == ccs->capa)) {
if (ccs->capa == 0) {
@@ -1927,7 +2020,8 @@ vm_ccs_push(VALUE klass, struct rb_class_cc_entries *ccs, const struct rb_callin
VM_ASSERT(ccs->len < ccs->capa);
const int pos = ccs->len++;
- RB_OBJ_WRITE(klass, &ccs->entries[pos].ci, ci);
+ ccs->entries[pos].argc = vm_ci_argc(ci);
+ ccs->entries[pos].flag = vm_ci_flag(ci);
RB_OBJ_WRITE(klass, &ccs->entries[pos].cc, cc);
if (RB_DEBUG_COUNTER_SETMAX(ccs_maxlen, ccs->len)) {
@@ -1942,7 +2036,9 @@ rb_vm_ccs_dump(struct rb_class_cc_entries *ccs)
{
ruby_debug_printf("ccs:%p (%d,%d)\n", (void *)ccs, ccs->len, ccs->capa);
for (int i=0; i<ccs->len; i++) {
- vm_ci_dump(ccs->entries[i].ci);
+ ruby_debug_printf("CCS CI ID:flag:%x argc:%u\n",
+ ccs->entries[i].flag,
+ ccs->entries[i].argc);
rp(ccs->entries[i].cc);
}
}
@@ -1954,22 +2050,19 @@ vm_ccs_verify(struct rb_class_cc_entries *ccs, ID mid, VALUE klass)
VM_ASSERT(ccs->len <= ccs->capa);
for (int i=0; i<ccs->len; i++) {
- const struct rb_callinfo *ci = ccs->entries[i].ci;
const struct rb_callcache *cc = ccs->entries[i].cc;
- VM_ASSERT(vm_ci_p(ci));
- VM_ASSERT(vm_ci_mid(ci) == mid);
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
VM_ASSERT(vm_cc_class_check(cc, klass));
VM_ASSERT(vm_cc_check_cme(cc, ccs->cme));
+ VM_ASSERT(!vm_cc_super_p(cc));
+ VM_ASSERT(!vm_cc_refinement_p(cc));
}
return TRUE;
}
#endif
-#ifndef MJIT_HEADER
-
-static const rb_callable_method_entry_t *check_overloaded_cme(const rb_callable_method_entry_t *cme, const struct rb_callinfo * const ci);
+const rb_callable_method_entry_t *rb_check_overloaded_cme(const rb_callable_method_entry_t *cme, const struct rb_callinfo * const ci);
static const struct rb_callcache *
vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
@@ -1980,6 +2073,8 @@ vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
VALUE ccs_data;
if (cc_tbl) {
+ // CCS data is keyed on method id, so we don't need the method id
+ // for doing comparisons in the `for` loop below.
if (rb_id_table_lookup(cc_tbl, mid, &ccs_data)) {
ccs = (struct rb_class_cc_entries *)ccs_data;
const int ccs_len = ccs->len;
@@ -1992,14 +2087,20 @@ vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
else {
VM_ASSERT(vm_ccs_verify(ccs, mid, klass));
+ // We already know the method id is correct because we had
+ // to look up the ccs_data by method id. All we need to
+ // compare is argc and flag
+ unsigned int argc = vm_ci_argc(ci);
+ unsigned int flag = vm_ci_flag(ci);
+
for (int i=0; i<ccs_len; i++) {
- const struct rb_callinfo *ccs_ci = ccs->entries[i].ci;
+ unsigned int ccs_ci_argc = ccs->entries[i].argc;
+ unsigned int ccs_ci_flag = ccs->entries[i].flag;
const struct rb_callcache *ccs_cc = ccs->entries[i].cc;
- VM_ASSERT(vm_ci_p(ccs_ci));
VM_ASSERT(IMEMO_TYPE_P(ccs_cc, imemo_callcache));
- if (ccs_ci == ci) { // TODO: equality
+ if (ccs_ci_argc == argc && ccs_ci_flag == flag) {
RB_DEBUG_COUNTER_INC(cc_found_in_ccs);
VM_ASSERT(vm_cc_cme(ccs_cc)->called_id == mid);
@@ -2055,9 +2156,9 @@ vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
}
}
- cme = check_overloaded_cme(cme, ci);
+ cme = rb_check_overloaded_cme(cme, ci);
- const struct rb_callcache *cc = vm_cc_new(klass, cme, vm_call_general);
+ const struct rb_callcache *cc = vm_cc_new(klass, cme, vm_call_general, cc_type_normal);
vm_ccs_push(klass, ccs, ci, cc);
VM_ASSERT(vm_cc_cme(cc) != NULL);
@@ -2067,7 +2168,7 @@ vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
return cc;
}
-MJIT_FUNC_EXPORTED const struct rb_callcache *
+const struct rb_callcache *
rb_vm_search_method_slowpath(const struct rb_callinfo *ci, VALUE klass)
{
const struct rb_callcache *cc;
@@ -2089,7 +2190,6 @@ rb_vm_search_method_slowpath(const struct rb_callinfo *ci, VALUE klass)
return cc;
}
-#endif
static const struct rb_callcache *
vm_search_method_slowpath0(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
@@ -2103,16 +2203,13 @@ vm_search_method_slowpath0(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
#if OPT_INLINE_METHOD_CACHE
cd->cc = cc;
- const struct rb_callcache *empty_cc =
-#ifdef MJIT_HEADER
- rb_vm_empty_cc();
-#else
- &vm_empty_cc;
-#endif
- if (cd_owner && cc != empty_cc) RB_OBJ_WRITTEN(cd_owner, Qundef, cc);
+ const struct rb_callcache *empty_cc = &vm_empty_cc;
+ if (cd_owner && cc != empty_cc) {
+ RB_OBJ_WRITTEN(cd_owner, Qundef, cc);
+ }
#if USE_DEBUG_COUNTER
- if (old_cc == empty_cc) {
+ if (!old_cc || old_cc == empty_cc) {
// empty
RB_DEBUG_COUNTER_INC(mc_inline_miss_empty);
}
@@ -2138,9 +2235,7 @@ vm_search_method_slowpath0(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
return cc;
}
-#ifndef MJIT_HEADER
ALWAYS_INLINE(static const struct rb_callcache *vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass));
-#endif
static const struct rb_callcache *
vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
{
@@ -2326,15 +2421,13 @@ opt_equality(const rb_iseq_t *cd_owner, VALUE recv, VALUE obj, CALL_DATA cd)
#undef EQ_UNREDEFINED_P
-#ifndef MJIT_HEADER
-
-static inline const struct rb_callcache *gccct_method_search(rb_execution_context_t *ec, VALUE recv, ID mid, int argc); // vm_eval.c
+static inline const struct rb_callcache *gccct_method_search(rb_execution_context_t *ec, VALUE recv, ID mid, const struct rb_callinfo *ci); // vm_eval.c
NOINLINE(static VALUE opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid));
static VALUE
opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid)
{
- const struct rb_callcache *cc = gccct_method_search(GET_EC(), recv, mid, 1);
+ const struct rb_callcache *cc = gccct_method_search(GET_EC(), recv, mid, &VM_CI_ON_STACK(mid, 0, 1, NULL));
if (cc && check_cfunc(vm_cc_cme(cc), rb_obj_equal)) {
return RBOOL(recv == obj);
@@ -2368,8 +2461,6 @@ rb_eql_opt(VALUE obj1, VALUE obj2)
return opt_equality_by_mid(obj1, obj2, idEqlP);
}
-#endif // MJIT_HEADER
-
extern VALUE rb_vm_call0(rb_execution_context_t *ec, VALUE, ID, int, const VALUE*, const rb_callable_method_entry_t *, int kw_splat);
extern VALUE rb_vm_call_with_refinements(rb_execution_context_t *, VALUE, ID, int, const VALUE *, int);
@@ -2427,15 +2518,15 @@ double_cmp_ge(double a, double b)
return RBOOL(a >= b);
}
+// Copied by vm_dump.c
static inline VALUE *
vm_base_ptr(const rb_control_frame_t *cfp)
{
-#if 0 // we may optimize and use this once we confirm it does not spoil performance on JIT.
const rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
if (cfp->iseq && VM_FRAME_RUBYFRAME_P(cfp)) {
VALUE *bp = prev_cfp->sp + ISEQ_BODY(cfp->iseq)->local_table_size + VM_ENV_DATA_SIZE;
- if (ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_METHOD) {
+ if (ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_METHOD || VM_FRAME_BMETHOD_P(cfp)) {
/* adjust `self' */
bp += 1;
}
@@ -2452,9 +2543,12 @@ vm_base_ptr(const rb_control_frame_t *cfp)
else {
return NULL;
}
-#else
- return cfp->__bp__;
-#endif
+}
+
+VALUE *
+rb_vm_base_ptr(const rb_control_frame_t *cfp)
+{
+ return vm_base_ptr(cfp);
}
/* method call processes with call_info */
@@ -2491,7 +2585,7 @@ vm_call_iseq_setup_normal_0start(rb_execution_context_t *ec, rb_control_frame_t
return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
}
-MJIT_STATIC bool
+bool
rb_simple_iseq_p(const rb_iseq_t *iseq)
{
return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
@@ -2503,7 +2597,7 @@ rb_simple_iseq_p(const rb_iseq_t *iseq)
ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
}
-MJIT_FUNC_EXPORTED bool
+bool
rb_iseq_only_optparam_p(const rb_iseq_t *iseq)
{
return ISEQ_BODY(iseq)->param.flags.has_opt == TRUE &&
@@ -2515,7 +2609,7 @@ rb_iseq_only_optparam_p(const rb_iseq_t *iseq)
ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
}
-MJIT_FUNC_EXPORTED bool
+bool
rb_iseq_only_kwparam_p(const rb_iseq_t *iseq)
{
return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
@@ -2526,71 +2620,214 @@ rb_iseq_only_kwparam_p(const rb_iseq_t *iseq)
ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
}
-// If true, cc->call needs to include `CALLER_SETUP_ARG` (i.e. can't be skipped in fastpath)
-MJIT_STATIC bool
-rb_splat_or_kwargs_p(const struct rb_callinfo *restrict ci)
+#define ALLOW_HEAP_ARGV (-2)
+#define ALLOW_HEAP_ARGV_KEEP_KWSPLAT (-3)
+
+static inline bool
+vm_caller_setup_arg_splat(rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE ary, int max_args)
{
- return IS_ARGS_SPLAT(ci) || IS_ARGS_KW_OR_KW_SPLAT(ci);
-}
+ vm_check_canary(GET_EC(), cfp->sp);
+ bool ret = false;
+ if (!NIL_P(ary)) {
+ const VALUE *ptr = RARRAY_CONST_PTR(ary);
+ long len = RARRAY_LEN(ary);
+ int argc = calling->argc;
-static inline void
-CALLER_SETUP_ARG(struct rb_control_frame_struct *restrict cfp,
- struct rb_calling_info *restrict calling,
- const struct rb_callinfo *restrict ci)
-{
- if (UNLIKELY(IS_ARGS_SPLAT(ci))) {
- VALUE final_hash;
- /* This expands the rest argument to the stack.
- * So, vm_ci_flag(ci) & VM_CALL_ARGS_SPLAT is now inconsistent.
- */
- vm_caller_setup_arg_splat(cfp, calling);
- if (!IS_ARGS_KW_OR_KW_SPLAT(ci) &&
- calling->argc > 0 &&
- RB_TYPE_P((final_hash = *(cfp->sp - 1)), T_HASH) &&
- (((struct RHash *)final_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
- *(cfp->sp - 1) = rb_hash_dup(final_hash);
- calling->kw_splat = 1;
- }
- }
- if (UNLIKELY(IS_ARGS_KW_OR_KW_SPLAT(ci))) {
- if (IS_ARGS_KEYWORD(ci)) {
- /* This converts VM_CALL_KWARG style to VM_CALL_KW_SPLAT style
- * by creating a keyword hash.
- * So, vm_ci_flag(ci) & VM_CALL_KWARG is now inconsistent.
+ if (UNLIKELY(max_args <= ALLOW_HEAP_ARGV && len + argc > VM_ARGC_STACK_MAX)) {
+ /* Avoid SystemStackError when splatting large arrays by storing arguments in
+ * a temporary array, instead of trying to keeping arguments on the VM stack.
*/
- vm_caller_setup_arg_kw(cfp, calling, ci);
+ VALUE *argv = cfp->sp - argc;
+ VALUE argv_ary = rb_ary_hidden_new(len + argc + 1);
+ rb_ary_cat(argv_ary, argv, argc);
+ rb_ary_cat(argv_ary, ptr, len);
+ cfp->sp -= argc - 1;
+ cfp->sp[-1] = argv_ary;
+ calling->argc = 1;
+ calling->heap_argv = argv_ary;
+ RB_GC_GUARD(ary);
}
else {
- VALUE keyword_hash = cfp->sp[-1];
- if (!RB_TYPE_P(keyword_hash, T_HASH)) {
- /* Convert a non-hash keyword splat to a new hash */
- cfp->sp[-1] = rb_hash_dup(rb_to_hash_type(keyword_hash));
+ long i;
+
+ if (max_args >= 0 && len + argc > max_args) {
+ /* If only a given max_args is allowed, copy up to max args.
+ * Used by vm_callee_setup_block_arg for non-lambda blocks,
+ * where additional arguments are ignored.
+ *
+ * Also, copy up to one more argument than the maximum,
+ * in case it is an empty keyword hash that will be removed.
+ */
+ calling->argc += len - (max_args - argc + 1);
+ len = max_args - argc + 1;
+ ret = true;
}
- else if (!IS_ARGS_KW_SPLAT_MUT(ci)) {
- /* Convert a hash keyword splat to a new hash unless
- * a mutable keyword splat was passed.
+ else {
+ /* Unset heap_argv if set originally. Can happen when
+ * forwarding modified arguments, where heap_argv was used
+ * originally, but heap_argv not supported by the forwarded
+ * method in all cases.
*/
- cfp->sp[-1] = rb_hash_dup(keyword_hash);
+ calling->heap_argv = 0;
}
+ CHECK_VM_STACK_OVERFLOW(cfp, len);
+
+ for (i = 0; i < len; i++) {
+ *cfp->sp++ = ptr[i];
+ }
+ calling->argc += i;
}
}
+
+ return ret;
}
static inline void
-CALLER_REMOVE_EMPTY_KW_SPLAT(struct rb_control_frame_struct *restrict cfp,
- struct rb_calling_info *restrict calling,
- const struct rb_callinfo *restrict ci)
+vm_caller_setup_arg_kw(rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci)
{
- if (UNLIKELY(calling->kw_splat)) {
- /* This removes the last Hash object if it is empty.
- * So, vm_ci_flag(ci) & VM_CALL_KW_SPLAT is now inconsistent.
+ const VALUE *const passed_keywords = vm_ci_kwarg(ci)->keywords;
+ const int kw_len = vm_ci_kwarg(ci)->keyword_len;
+ const VALUE h = rb_hash_new_with_size(kw_len);
+ VALUE *sp = cfp->sp;
+ int i;
+
+ for (i=0; i<kw_len; i++) {
+ rb_hash_aset(h, passed_keywords[i], (sp - kw_len)[i]);
+ }
+ (sp-kw_len)[0] = h;
+
+ cfp->sp -= kw_len - 1;
+ calling->argc -= kw_len - 1;
+ calling->kw_splat = 1;
+}
+
+static inline VALUE
+vm_caller_setup_keyword_hash(const struct rb_callinfo *ci, VALUE keyword_hash)
+{
+ if (UNLIKELY(!RB_TYPE_P(keyword_hash, T_HASH))) {
+ if (keyword_hash != Qnil) {
+ /* Convert a non-hash keyword splat to a new hash */
+ keyword_hash = rb_hash_dup(rb_to_hash_type(keyword_hash));
+ }
+ }
+ else if (!IS_ARGS_KW_SPLAT_MUT(ci)) {
+ /* Convert a hash keyword splat to a new hash unless
+ * a mutable keyword splat was passed.
*/
- if (RHASH_EMPTY_P(cfp->sp[-1])) {
+ keyword_hash = rb_hash_dup(keyword_hash);
+ }
+ return keyword_hash;
+}
+
+static inline void
+CALLER_SETUP_ARG(struct rb_control_frame_struct *restrict cfp,
+ struct rb_calling_info *restrict calling,
+ const struct rb_callinfo *restrict ci, int max_args)
+{
+ if (UNLIKELY(IS_ARGS_SPLAT(ci))) {
+ if (IS_ARGS_KW_SPLAT(ci)) {
+ // f(*a, **kw)
+ VM_ASSERT(calling->kw_splat == 1);
+
+ cfp->sp -= 2;
+ calling->argc -= 2;
+ VALUE ary = cfp->sp[0];
+ VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[1]);
+
+ // splat a
+ if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) return;
+
+ // put kw
+ if (kwh != Qnil && !RHASH_EMPTY_P(kwh)) {
+ if (UNLIKELY(calling->heap_argv)) {
+ rb_ary_push(calling->heap_argv, kwh);
+ ((struct RHash *)kwh)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
+ if (max_args != ALLOW_HEAP_ARGV_KEEP_KWSPLAT) {
+ calling->kw_splat = 0;
+ }
+ }
+ else {
+ cfp->sp[0] = kwh;
+ cfp->sp++;
+ calling->argc++;
+
+ VM_ASSERT(calling->kw_splat == 1);
+ }
+ }
+ else {
+ calling->kw_splat = 0;
+ }
+ }
+ else {
+ // f(*a)
+ VM_ASSERT(calling->kw_splat == 0);
+
+ cfp->sp -= 1;
+ calling->argc -= 1;
+ VALUE ary = cfp->sp[0];
+
+ if (vm_caller_setup_arg_splat(cfp, calling, ary, max_args)) {
+ goto check_keyword;
+ }
+
+ // check the last argument
+ VALUE last_hash, argv_ary;
+ if (UNLIKELY(argv_ary = calling->heap_argv)) {
+ if (!IS_ARGS_KEYWORD(ci) &&
+ RARRAY_LEN(argv_ary) > 0 &&
+ RB_TYPE_P((last_hash = rb_ary_last(0, NULL, argv_ary)), T_HASH) &&
+ (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
+
+ rb_ary_pop(argv_ary);
+ if (!RHASH_EMPTY_P(last_hash)) {
+ rb_ary_push(argv_ary, rb_hash_dup(last_hash));
+ calling->kw_splat = 1;
+ }
+ }
+ }
+ else {
+check_keyword:
+ if (!IS_ARGS_KEYWORD(ci) &&
+ calling->argc > 0 &&
+ RB_TYPE_P((last_hash = cfp->sp[-1]), T_HASH) &&
+ (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
+
+ if (RHASH_EMPTY_P(last_hash)) {
+ calling->argc--;
+ cfp->sp -= 1;
+ }
+ else {
+ cfp->sp[-1] = rb_hash_dup(last_hash);
+ calling->kw_splat = 1;
+ }
+ }
+ }
+ }
+ }
+ else if (UNLIKELY(IS_ARGS_KW_SPLAT(ci))) {
+ // f(**kw)
+ VM_ASSERT(calling->kw_splat == 1);
+ VALUE kwh = vm_caller_setup_keyword_hash(ci, cfp->sp[-1]);
+
+ if (kwh == Qnil || RHASH_EMPTY_P(kwh)) {
cfp->sp--;
calling->argc--;
calling->kw_splat = 0;
}
+ else {
+ cfp->sp[-1] = kwh;
+ }
+ }
+ else if (UNLIKELY(IS_ARGS_KEYWORD(ci))) {
+ // f(k1:1, k2:2)
+ VM_ASSERT(calling->kw_splat == 0);
+
+ /* This converts VM_CALL_KWARG style to VM_CALL_KW_SPLAT style
+ * by creating a keyword hash.
+ * So, vm_ci_flag(ci) & VM_CALL_KWARG is now inconsistent.
+ */
+ vm_caller_setup_arg_kw(cfp, calling, ci);
}
}
@@ -2671,7 +2908,7 @@ static VALUE
vm_call_iseq_setup_kwparm_kwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
- const struct rb_callinfo *ci = calling->ci;
+ const struct rb_callinfo *ci = calling->cd->ci;
const struct rb_callcache *cc = calling->cc;
VM_ASSERT(vm_ci_flag(ci) & VM_CALL_KWARG);
@@ -2698,7 +2935,7 @@ static VALUE
vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
- const struct rb_callinfo *MAYBE_UNUSED(ci) = calling->ci;
+ const struct rb_callinfo *MAYBE_UNUSED(ci) = calling->cd->ci;
const struct rb_callcache *cc = calling->cc;
VM_ASSERT((vm_ci_flag(ci) & VM_CALL_KWARG) == 0);
@@ -2723,36 +2960,122 @@ vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t
return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
}
+static VALUE builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
+
+static VALUE
+vm_call_single_noarg_leaf_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp,
+ struct rb_calling_info *calling)
+{
+ const struct rb_builtin_function *bf = calling->cc->aux_.bf;
+ cfp->sp -= (calling->argc + 1);
+ return builtin_invoker0(ec, calling->recv, NULL, (rb_insn_func_t)bf->func_ptr);
+}
+
+VALUE rb_gen_method_name(VALUE owner, VALUE name); // in vm_backtrace.c
+
+static void
+warn_unused_block(const rb_callable_method_entry_t *cme, const rb_iseq_t *iseq, void *pc)
+{
+ rb_vm_t *vm = GET_VM();
+ st_table *dup_check_table = vm->unused_block_warning_table;
+ st_data_t key;
+
+ union {
+ VALUE v;
+ unsigned char b[SIZEOF_VALUE];
+ } k1 = {
+ .v = (VALUE)pc,
+ }, k2 = {
+ .v = (VALUE)cme->def,
+ };
+
+ // relax check
+ if (!vm->unused_block_warning_strict) {
+ key = (st_data_t)cme->def->original_id;
+
+ if (st_lookup(dup_check_table, key, NULL)) {
+ return;
+ }
+ }
+
+ // strict check
+ // make unique key from pc and me->def pointer
+ key = 0;
+ for (int i=0; i<SIZEOF_VALUE; i++) {
+ // fprintf(stderr, "k1:%3d k2:%3d\n", k1.b[i], k2.b[SIZEOF_VALUE-1-i]);
+ key |= (st_data_t)(k1.b[i] ^ k2.b[SIZEOF_VALUE-1-i]) << (8 * i);
+ }
+
+ if (0) {
+ fprintf(stderr, "SIZEOF_VALUE:%d\n", SIZEOF_VALUE);
+ fprintf(stderr, "pc:%p def:%p\n", pc, (void *)cme->def);
+ fprintf(stderr, "key:%p\n", (void *)key);
+ }
+
+ // duplication check
+ if (st_insert(dup_check_table, key, 1)) {
+ // already shown
+ }
+ else {
+ VALUE m_loc = rb_method_entry_location((const rb_method_entry_t *)cme);
+ VALUE name = rb_gen_method_name(cme->defined_class, ISEQ_BODY(iseq)->location.base_label);
+
+ if (!NIL_P(m_loc)) {
+ rb_warning("the block passed to '%"PRIsVALUE"' defined at %"PRIsVALUE":%"PRIsVALUE" may be ignored",
+ name, RARRAY_AREF(m_loc, 0), RARRAY_AREF(m_loc, 1));
+ }
+ else {
+ rb_warning("the block may be ignored because '%"PRIsVALUE"' does not use a block", name);
+ }
+ }
+}
+
static inline int
vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
const rb_iseq_t *iseq, VALUE *argv, int param_size, int local_size)
{
- const struct rb_callinfo *ci = calling->ci;
+ const struct rb_callinfo *ci = calling->cd->ci;
const struct rb_callcache *cc = calling->cc;
- bool cacheable_ci = vm_ci_markable(ci);
+
+ if (UNLIKELY(!ISEQ_BODY(iseq)->param.flags.use_block &&
+ calling->block_handler != VM_BLOCK_HANDLER_NONE &&
+ !(vm_ci_flag(calling->cd->ci) & VM_CALL_SUPER))) {
+ warn_unused_block(vm_cc_cme(cc), iseq, (void *)ec->cfp->pc);
+ }
if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_KW_SPLAT))) {
if (LIKELY(rb_simple_iseq_p(iseq))) {
rb_control_frame_t *cfp = ec->cfp;
- CALLER_SETUP_ARG(cfp, calling, ci);
- CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
+ int lead_num = ISEQ_BODY(iseq)->param.lead_num;
+ CALLER_SETUP_ARG(cfp, calling, ci, lead_num);
- if (calling->argc != ISEQ_BODY(iseq)->param.lead_num) {
- argument_arity_error(ec, iseq, calling->argc, ISEQ_BODY(iseq)->param.lead_num, ISEQ_BODY(iseq)->param.lead_num);
+ if (calling->argc != lead_num) {
+ argument_arity_error(ec, iseq, calling->argc, lead_num, lead_num);
}
- VM_ASSERT(ci == calling->ci);
+ VM_ASSERT(ci == calling->cd->ci);
VM_ASSERT(cc == calling->cc);
- CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), cacheable_ci && vm_call_iseq_optimizable_p(ci, cc));
+
+ if (vm_call_iseq_optimizable_p(ci, cc)) {
+ if ((iseq->body->builtin_attrs & BUILTIN_ATTR_SINGLE_NOARG_LEAF) &&
+ !(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) {
+ VM_ASSERT(iseq->body->builtin_attrs & BUILTIN_ATTR_LEAF);
+ vm_cc_bf_set(cc, (void *)iseq->body->iseq_encoded[1]);
+ CC_SET_FASTPATH(cc, vm_call_single_noarg_leaf_builtin, true);
+ }
+ else {
+ CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), true);
+ }
+ }
return 0;
}
else if (rb_iseq_only_optparam_p(iseq)) {
rb_control_frame_t *cfp = ec->cfp;
- CALLER_SETUP_ARG(cfp, calling, ci);
- CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
+
+ CALLER_SETUP_ARG(cfp, calling, ci, lead_num + opt_num);
const int argc = calling->argc;
const int opt = argc - lead_num;
@@ -2763,12 +3086,12 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
CC_SET_FASTPATH(cc, vm_call_iseq_setup_normal_opt_start,
!IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
- cacheable_ci && vm_call_cacheable(ci, cc));
+ vm_call_cacheable(ci, cc));
}
else {
CC_SET_FASTPATH(cc, vm_call_iseq_setup_tailcall_opt_start,
!IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
- cacheable_ci && vm_call_cacheable(ci, cc));
+ vm_call_cacheable(ci, cc));
}
/* initialize opt vars for self-references */
@@ -2796,7 +3119,7 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
args_setup_kw_parameters(ec, iseq, ci_kws, ci_kw_len, ci_keywords, klocals);
CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_kwarg,
- cacheable_ci && vm_call_cacheable(ci, cc));
+ vm_call_cacheable(ci, cc));
return 0;
}
@@ -2809,7 +3132,7 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
if (klocals[kw_param->num] == INT2FIX(0)) {
/* copy from default_values */
CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_nokwarg,
- cacheable_ci && vm_call_cacheable(ci, cc));
+ vm_call_cacheable(ci, cc));
}
return 0;
@@ -2829,7 +3152,7 @@ vm_call_iseq_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct r
const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
const int param_size = ISEQ_BODY(iseq)->param.size;
const int local_size = ISEQ_BODY(iseq)->local_table_size;
- const int opt_pc = vm_callee_setup_arg(ec, calling, def_iseq_ptr(vm_cc_cme(cc)->def), cfp->sp - calling->argc, param_size, local_size);
+ const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
}
@@ -2837,7 +3160,7 @@ static inline VALUE
vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
int opt_pc, int param_size, int local_size)
{
- const struct rb_callinfo *ci = calling->ci;
+ const struct rb_callinfo *ci = calling->cd->ci;
const struct rb_callcache *cc = calling->cc;
if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
@@ -3241,7 +3564,7 @@ vm_call_cfunc_with_frame_(rb_execution_context_t *ec, rb_control_frame_t *reg_cf
int argc, VALUE *argv, VALUE *stack_bottom)
{
RB_DEBUG_COUNTER_INC(ccf_cfunc_with_frame);
- const struct rb_callinfo *ci = calling->ci;
+ const struct rb_callinfo *ci = calling->cd->ci;
const struct rb_callcache *cc = calling->cc;
VALUE val;
const rb_callable_method_entry_t *me = vm_cc_cme(cc);
@@ -3282,6 +3605,31 @@ vm_call_cfunc_with_frame_(rb_execution_context_t *ec, rb_control_frame_t *reg_cf
return val;
}
+// Push a C method frame for a given cme. This is called when JIT code skipped
+// pushing a frame but the C method reached a point where a frame is needed.
+void
+rb_vm_push_cfunc_frame(const rb_callable_method_entry_t *cme, int recv_idx)
+{
+ VM_ASSERT(cme->def->type == VM_METHOD_TYPE_CFUNC);
+ rb_execution_context_t *ec = GET_EC();
+ VALUE *sp = ec->cfp->sp;
+ VALUE recv = *(sp - recv_idx - 1);
+ VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
+ VALUE block_handler = VM_BLOCK_HANDLER_NONE;
+#if VM_CHECK_MODE > 0
+ // Clean up the stack canary since we're about to satisfy the "leaf or lazy push" assumption
+ *(GET_EC()->cfp->sp) = Qfalse;
+#endif
+ vm_push_frame(ec, NULL, frame_type, recv, block_handler, (VALUE)cme, 0, ec->cfp->sp, 0, 0);
+}
+
+// If true, cc->call needs to include `CALLER_SETUP_ARG` (i.e. can't be skipped in fastpath)
+bool
+rb_splat_or_kwargs_p(const struct rb_callinfo *restrict ci)
+{
+ return IS_ARGS_SPLAT(ci) || IS_ARGS_KW_OR_KW_SPLAT(ci);
+}
+
static VALUE
vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
{
@@ -3292,107 +3640,112 @@ vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp
return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
}
-#ifndef VM_ARGC_STACK_MAX
-#define VM_ARGC_STACK_MAX 128
-#endif
-
static VALUE
-vm_call_cfunc_setup_argv_ary(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
+vm_call_cfunc_other(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
{
- int argc = calling->argc;
- VALUE *argv = cfp->sp - argc;
- VALUE ary = argv[argc-1];
- long len = RARRAY_LEN(ary);
+ const struct rb_callinfo *ci = calling->cd->ci;
+ RB_DEBUG_COUNTER_INC(ccf_cfunc_other);
- if (UNLIKELY(len + argc > VM_ARGC_STACK_MAX)) {
- vm_check_canary(ec, cfp->sp);
- const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
- VALUE argv_ary = rb_ary_new_capa(len + argc - 1);
- rb_obj_hide(argv_ary);
- rb_ary_cat(argv_ary, argv, argc-1);
- rb_ary_cat(argv_ary, ptr, len);
- cfp->sp -= argc - 1;
- cfp->sp[-1] = argv_ary;
- calling->argc = 1;
+ CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
+ VALUE argv_ary;
+ if (UNLIKELY(argv_ary = calling->heap_argv)) {
+ VM_ASSERT(!IS_ARGS_KEYWORD(ci));
+ int argc = RARRAY_LENINT(argv_ary);
+ VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
+ VALUE *stack_bottom = reg_cfp->sp - 2;
- return argv_ary;
+ VM_ASSERT(calling->argc == 1);
+ VM_ASSERT(RB_TYPE_P(argv_ary, T_ARRAY));
+ VM_ASSERT(RBASIC_CLASS(argv_ary) == 0); // hidden ary
+
+ return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
}
else {
- return Qfalse;
+ CC_SET_FASTPATH(calling->cc, vm_call_cfunc_with_frame, !rb_splat_or_kwargs_p(ci) && !calling->kw_splat);
+
+ return vm_call_cfunc_with_frame(ec, reg_cfp, calling);
}
}
-static VALUE
-vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
+static inline VALUE
+vm_call_cfunc_array_argv(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int stack_offset, int argc_offset)
{
- const struct rb_callinfo *ci = calling->ci;
- RB_DEBUG_COUNTER_INC(ccf_cfunc);
-
- VALUE argv_ary;
+ VALUE argv_ary = reg_cfp->sp[-1 - stack_offset];
+ int argc = RARRAY_LENINT(argv_ary) - argc_offset;
- if (UNLIKELY(IS_ARGS_SPLAT(ci)) && (argv_ary = vm_call_cfunc_setup_argv_ary(ec, reg_cfp, calling))) {
- // special case of CALLER_SETUP_ARG
- if (!IS_ARGS_KW_OR_KW_SPLAT(ci)) {
- long hash_idx = RARRAY_LEN(argv_ary) - 1;
- VALUE final_hash = RARRAY_AREF(argv_ary, hash_idx);
+ if (UNLIKELY(argc > VM_ARGC_STACK_MAX)) {
+ return vm_call_cfunc_other(ec, reg_cfp, calling);
+ }
- if (RB_TYPE_P(final_hash, T_HASH) &&
- (((struct RHash *)final_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
- if (RHASH_EMPTY_P(final_hash)) {
- rb_ary_pop(argv_ary);
- }
- else {
- final_hash = rb_hash_dup(final_hash);
- RARRAY_ASET(argv_ary, hash_idx, final_hash);
- calling->kw_splat = 1;
- }
- }
- }
+ VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
+ calling->kw_splat = 0;
+ int i;
+ VALUE *stack_bottom = reg_cfp->sp - 2 - stack_offset;
+ VALUE *sp = stack_bottom;
+ CHECK_VM_STACK_OVERFLOW(reg_cfp, argc);
+ for(i = 0; i < argc; i++) {
+ *++sp = argv[i];
+ }
+ reg_cfp->sp = sp+1;
- if (UNLIKELY(IS_ARGS_KW_OR_KW_SPLAT(ci))) {
- VM_ASSERT(!IS_ARGS_KEYWORD(ci)); // should be KW_SPLAT
+ return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, stack_bottom+1, stack_bottom);
+}
- long hash_idx = RARRAY_LEN(argv_ary) - 1;
- VALUE keyword_hash = RARRAY_AREF(argv_ary, hash_idx);
+static inline VALUE
+vm_call_cfunc_only_splat(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
+{
+ RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat);
+ VALUE argv_ary = reg_cfp->sp[-1];
+ int argc = RARRAY_LENINT(argv_ary);
+ VALUE *argv = (VALUE *)RARRAY_CONST_PTR(argv_ary);
+ VALUE last_hash;
+ int argc_offset = 0;
- if (!RB_TYPE_P(keyword_hash, T_HASH)) {
- /* Convert a non-hash keyword splat to a new hash */
- RARRAY_ASET(argv_ary, hash_idx, rb_hash_dup(rb_to_hash_type(keyword_hash)));
- }
- else if (!IS_ARGS_KW_SPLAT_MUT(ci)) {
- /* Convert a hash keyword splat to a new hash unless
- * a mutable keyword splat was passed.
- */
- RARRAY_ASET(argv_ary, hash_idx, rb_hash_dup(keyword_hash));
- }
+ if (UNLIKELY(argc > 0 &&
+ RB_TYPE_P((last_hash = argv[argc-1]), T_HASH) &&
+ (((struct RHash *)last_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS))) {
+ if (!RHASH_EMPTY_P(last_hash)) {
+ return vm_call_cfunc_other(ec, reg_cfp, calling);
}
+ argc_offset++;
+ }
+ return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 0, argc_offset);
+}
- // special case of CALLER_REMOVE_EMPTY_KW_SPLAT()
- if (UNLIKELY(calling->kw_splat)) {
- VALUE kw_hash = RARRAY_AREF(argv_ary, RARRAY_LEN(argv_ary)-1);
- if (RHASH_EMPTY_P(kw_hash)) {
- rb_ary_pop(argv_ary);
- calling->kw_splat = false;
- }
- }
+static inline VALUE
+vm_call_cfunc_only_splat_kw(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
+{
+ RB_DEBUG_COUNTER_INC(ccf_cfunc_only_splat_kw);
+ VALUE keyword_hash = reg_cfp->sp[-1];
- int argc = RARRAY_LENINT(argv_ary);
- VALUE *argv = (void *)RARRAY_CONST_PTR_TRANSIENT(argv_ary);
- VALUE *stack_bottom = reg_cfp->sp - 2;
+ if (keyword_hash == Qnil || (RB_TYPE_P(keyword_hash, T_HASH) && RHASH_EMPTY_P(keyword_hash))) {
+ return vm_call_cfunc_array_argv(ec, reg_cfp, calling, 1, 0);
+ }
- VM_ASSERT(calling->argc == 1);
- VM_ASSERT(RB_TYPE_P(argv_ary, T_ARRAY));
- VM_ASSERT(RBASIC_CLASS(argv_ary) == 0); // hidden ary
+ return vm_call_cfunc_other(ec, reg_cfp, calling);
+}
- return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
- }
- else {
- CALLER_SETUP_ARG(reg_cfp, calling, ci);
- CALLER_REMOVE_EMPTY_KW_SPLAT(reg_cfp, calling, ci);
- CC_SET_FASTPATH(calling->cc, vm_call_cfunc_with_frame, !rb_splat_or_kwargs_p(ci) && !calling->kw_splat);
+static VALUE
+vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
+{
+ const struct rb_callinfo *ci = calling->cd->ci;
+ RB_DEBUG_COUNTER_INC(ccf_cfunc);
- return vm_call_cfunc_with_frame(ec, reg_cfp, calling);
+ if (IS_ARGS_SPLAT(ci)) {
+ if (!IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 1) {
+ // f(*a)
+ CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat, TRUE);
+ return vm_call_cfunc_only_splat(ec, reg_cfp, calling);
+ }
+ if (IS_ARGS_KW_SPLAT(ci) && vm_ci_argc(ci) == 2) {
+ // f(*a, **kw)
+ CC_SET_FASTPATH(calling->cc, vm_call_cfunc_only_splat_kw, TRUE);
+ return vm_call_cfunc_only_splat_kw(ec, reg_cfp, calling);
+ }
}
+
+ CC_SET_FASTPATH(calling->cc, vm_call_cfunc_other, TRUE);
+ return vm_call_cfunc_other(ec, reg_cfp, calling);
}
static VALUE
@@ -3401,7 +3754,7 @@ vm_call_ivar(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_call
const struct rb_callcache *cc = calling->cc;
RB_DEBUG_COUNTER_INC(ccf_ivar);
cfp->sp -= 1;
- VALUE ivar = vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, NULL, cc, TRUE);
+ VALUE ivar = vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, NULL, cc, TRUE, Qnil);
return ivar;
}
@@ -3414,7 +3767,7 @@ vm_call_attrset_direct(rb_execution_context_t *ec, rb_control_frame_t *cfp, cons
attr_index_t index = vm_cc_attr_index(cc);
shape_id_t dest_shape_id = vm_cc_attr_index_dest_shape_id(cc);
ID id = vm_cc_cme(cc)->def->body.attr.id;
- rb_check_frozen_internal(obj);
+ rb_check_frozen(obj);
VALUE res = vm_setivar(obj, id, val, dest_shape_id, index);
if (UNDEF_P(res)) {
switch (BUILTIN_TYPE(obj)) {
@@ -3441,12 +3794,6 @@ vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_c
return vm_call_attrset_direct(ec, cfp, calling->cc, calling->recv);
}
-bool
-rb_vm_call_ivar_attrset_p(const vm_call_handler ch)
-{
- return (ch == vm_call_ivar || ch == vm_call_attrset);
-}
-
static inline VALUE
vm_call_bmethod_body(rb_execution_context_t *ec, struct rb_calling_info *calling, const VALUE *argv)
{
@@ -3463,30 +3810,112 @@ vm_call_bmethod_body(rb_execution_context_t *ec, struct rb_calling_info *calling
/* control block frame */
GetProcPtr(procv, proc);
- val = rb_vm_invoke_bmethod(ec, proc, calling->recv, calling->argc, argv, calling->kw_splat, calling->block_handler, vm_cc_cme(cc));
+ val = rb_vm_invoke_bmethod(ec, proc, calling->recv, CALLING_ARGC(calling), argv, calling->kw_splat, calling->block_handler, vm_cc_cme(cc));
return val;
}
+static int vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type);
+static VALUE invoke_bmethod(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, const struct rb_captured_block *captured, const rb_callable_method_entry_t *me, VALUE type, int opt_pc);
+
static VALUE
-vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
+vm_call_iseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
{
- RB_DEBUG_COUNTER_INC(ccf_bmethod);
+ RB_DEBUG_COUNTER_INC(ccf_iseq_bmethod);
+
+ const struct rb_callcache *cc = calling->cc;
+ const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
+ VALUE procv = cme->def->body.bmethod.proc;
+
+ if (!RB_OBJ_SHAREABLE_P(procv) &&
+ cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
+ rb_raise(rb_eRuntimeError, "defined with an un-shareable Proc in a different Ractor");
+ }
+
+ rb_proc_t *proc;
+ GetProcPtr(procv, proc);
+ const struct rb_block *block = &proc->block;
+
+ while (vm_block_type(block) == block_type_proc) {
+ block = vm_proc_block(block->as.proc);
+ }
+ VM_ASSERT(vm_block_type(block) == block_type_iseq);
+
+ const struct rb_captured_block *captured = &block->as.captured;
+ const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
+ VALUE * const argv = cfp->sp - calling->argc;
+ const int arg_size = ISEQ_BODY(iseq)->param.size;
+
+ int opt_pc;
+ if (vm_ci_flag(calling->cd->ci) & VM_CALL_ARGS_SIMPLE) {
+ opt_pc = vm_callee_setup_block_arg(ec, calling, calling->cd->ci, iseq, argv, arg_setup_method);
+ }
+ else {
+ opt_pc = setup_parameters_complex(ec, iseq, calling, calling->cd->ci, argv, arg_setup_method);
+ }
+
+ cfp->sp = argv - 1; // -1 for the receiver
+
+ vm_push_frame(ec, iseq,
+ VM_FRAME_MAGIC_BLOCK | VM_FRAME_FLAG_BMETHOD | VM_FRAME_FLAG_LAMBDA,
+ calling->recv,
+ VM_GUARDED_PREV_EP(captured->ep),
+ (VALUE)cme,
+ ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
+ argv + arg_size,
+ ISEQ_BODY(iseq)->local_table_size - arg_size,
+ ISEQ_BODY(iseq)->stack_max);
+
+ return Qundef;
+}
+
+static VALUE
+vm_call_noniseq_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
+{
+ RB_DEBUG_COUNTER_INC(ccf_noniseq_bmethod);
VALUE *argv;
int argc;
- const struct rb_callinfo *ci = calling->ci;
-
- CALLER_SETUP_ARG(cfp, calling, ci);
- argc = calling->argc;
- argv = ALLOCA_N(VALUE, argc);
- MEMCPY(argv, cfp->sp - argc, VALUE, argc);
- cfp->sp += - argc - 1;
+ CALLER_SETUP_ARG(cfp, calling, calling->cd->ci, ALLOW_HEAP_ARGV);
+ if (UNLIKELY(calling->heap_argv)) {
+ argv = RARRAY_PTR(calling->heap_argv);
+ cfp->sp -= 2;
+ }
+ else {
+ argc = calling->argc;
+ argv = ALLOCA_N(VALUE, argc);
+ MEMCPY(argv, cfp->sp - argc, VALUE, argc);
+ cfp->sp += - argc - 1;
+ }
return vm_call_bmethod_body(ec, calling, argv);
}
-MJIT_FUNC_EXPORTED VALUE
+static VALUE
+vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
+{
+ RB_DEBUG_COUNTER_INC(ccf_bmethod);
+
+ const struct rb_callcache *cc = calling->cc;
+ const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
+ VALUE procv = cme->def->body.bmethod.proc;
+ rb_proc_t *proc;
+ GetProcPtr(procv, proc);
+ const struct rb_block *block = &proc->block;
+
+ while (vm_block_type(block) == block_type_proc) {
+ block = vm_proc_block(block->as.proc);
+ }
+ if (vm_block_type(block) == block_type_iseq) {
+ CC_SET_FASTPATH(cc, vm_call_iseq_bmethod, TRUE);
+ return vm_call_iseq_bmethod(ec, cfp, calling);
+ }
+
+ CC_SET_FASTPATH(cc, vm_call_noniseq_bmethod, TRUE);
+ return vm_call_noniseq_bmethod(ec, cfp, calling);
+}
+
+VALUE
rb_find_defined_class_by_owner(VALUE current_class, VALUE target_owner)
{
VALUE klass = current_class;
@@ -3519,7 +3948,7 @@ aliased_callable_method_entry(const rb_callable_method_entry_t *me)
VM_ASSERT(RB_TYPE_P(orig_me->owner, T_MODULE));
cme = rb_method_entry_complement_defined_class(orig_me, me->called_id, defined_class);
- if (me->def->alias_count + me->def->complemented_count == 0) {
+ if (me->def->reference_count == 1) {
RB_OBJ_WRITE(me, &me->def->body.alias.original_me, cme);
}
else {
@@ -3570,55 +3999,73 @@ vm_call_symbol(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE symbol, int flags)
{
ASSUME(calling->argc >= 0);
- /* Also assumes CALLER_SETUP_ARG is already done. */
enum method_missing_reason missing_reason = MISSING_NOENTRY;
int argc = calling->argc;
VALUE recv = calling->recv;
VALUE klass = CLASS_OF(recv);
ID mid = rb_check_id(&symbol);
- flags |= VM_CALL_OPT_SEND | (calling->kw_splat ? VM_CALL_KW_SPLAT : 0);
+ flags |= VM_CALL_OPT_SEND;
if (UNLIKELY(! mid)) {
mid = idMethodMissing;
missing_reason = ci_missing_reason(ci);
ec->method_missing_reason = missing_reason;
- /* E.g. when argc == 2
- *
- * | | | | TOPN
- * | | +------+
- * | | +---> | arg1 | 0
- * +------+ | +------+
- * | arg1 | -+ +-> | arg0 | 1
- * +------+ | +------+
- * | arg0 | ---+ | sym | 2
- * +------+ +------+
- * | recv | | recv | 3
- * --+------+--------+------+------
- */
- int i = argc;
- CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
- INC_SP(1);
- MEMMOVE(&TOPN(i - 1), &TOPN(i), VALUE, i);
- argc = ++calling->argc;
-
- if (rb_method_basic_definition_p(klass, idMethodMissing)) {
- /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
- TOPN(i) = symbol;
- int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
- const VALUE *argv = STACK_ADDR_FROM_TOP(argc);
- VALUE exc = rb_make_no_method_exception(
- rb_eNoMethodError, 0, recv, argc, argv, priv);
-
- rb_exc_raise(exc);
+ VALUE argv_ary;
+ if (UNLIKELY(argv_ary = calling->heap_argv)) {
+ if (rb_method_basic_definition_p(klass, idMethodMissing)) {
+ rb_ary_unshift(argv_ary, symbol);
+
+ /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
+ int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
+ VALUE exc = rb_make_no_method_exception(
+ rb_eNoMethodError, 0, recv, RARRAY_LENINT(argv_ary), RARRAY_CONST_PTR(argv_ary), priv);
+
+ rb_exc_raise(exc);
+ }
+ rb_ary_unshift(argv_ary, rb_str_intern(symbol));
}
else {
- TOPN(i) = rb_str_intern(symbol);
+ /* E.g. when argc == 2
+ *
+ * | | | | TOPN
+ * | | +------+
+ * | | +---> | arg1 | 0
+ * +------+ | +------+
+ * | arg1 | -+ +-> | arg0 | 1
+ * +------+ | +------+
+ * | arg0 | ---+ | sym | 2
+ * +------+ +------+
+ * | recv | | recv | 3
+ * --+------+--------+------+------
+ */
+ int i = argc;
+ CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
+ INC_SP(1);
+ MEMMOVE(&TOPN(i - 1), &TOPN(i), VALUE, i);
+ argc = ++calling->argc;
+
+ if (rb_method_basic_definition_p(klass, idMethodMissing)) {
+ /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
+ TOPN(i) = symbol;
+ int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
+ const VALUE *argv = STACK_ADDR_FROM_TOP(argc);
+ VALUE exc = rb_make_no_method_exception(
+ rb_eNoMethodError, 0, recv, argc, argv, priv);
+
+ rb_exc_raise(exc);
+ }
+ else {
+ TOPN(i) = rb_str_intern(symbol);
+ }
}
}
- calling->ci = &VM_CI_ON_STACK(mid, flags, argc, vm_ci_kwarg(ci));
+ calling->cd = &(struct rb_call_data) {
+ .ci = &VM_CI_ON_STACK(mid, flags, argc, vm_ci_kwarg(ci)),
+ .cc = NULL,
+ };
calling->cc = &VM_CC_ON_STACK(klass,
vm_call_general,
{ .method_missing_reason = missing_reason },
@@ -3651,44 +4098,92 @@ vm_call_symbol(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
}
static VALUE
-vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
+vm_call_opt_send0(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, int flags)
{
- RB_DEBUG_COUNTER_INC(ccf_opt_send);
-
+ const struct rb_callinfo *ci = calling->cd->ci;
int i;
VALUE sym;
- CALLER_SETUP_ARG(reg_cfp, calling, calling->ci);
-
i = calling->argc - 1;
if (calling->argc == 0) {
rb_raise(rb_eArgError, "no method name given");
}
- else {
- sym = TOPN(i);
- /* E.g. when i == 2
- *
- * | | | | TOPN
- * +------+ | |
- * | arg1 | ---+ | | 0
- * +------+ | +------+
- * | arg0 | -+ +-> | arg1 | 1
- * +------+ | +------+
- * | sym | +---> | arg0 | 2
- * +------+ +------+
- * | recv | | recv | 3
- * --+------+--------+------+------
- */
- /* shift arguments */
- if (i > 0) {
- MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
+
+ sym = TOPN(i);
+ /* E.g. when i == 2
+ *
+ * | | | | TOPN
+ * +------+ | |
+ * | arg1 | ---+ | | 0
+ * +------+ | +------+
+ * | arg0 | -+ +-> | arg1 | 1
+ * +------+ | +------+
+ * | sym | +---> | arg0 | 2
+ * +------+ +------+
+ * | recv | | recv | 3
+ * --+------+--------+------+------
+ */
+ /* shift arguments */
+ if (i > 0) {
+ MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
+ }
+ calling->argc -= 1;
+ DEC_SP(1);
+
+ return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
+}
+
+static VALUE
+vm_call_opt_send_complex(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
+{
+ RB_DEBUG_COUNTER_INC(ccf_opt_send_complex);
+ const struct rb_callinfo *ci = calling->cd->ci;
+ int flags = VM_CALL_FCALL;
+ VALUE sym;
+
+ VALUE argv_ary;
+ CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
+ if (UNLIKELY(argv_ary = calling->heap_argv)) {
+ sym = rb_ary_shift(argv_ary);
+ flags |= VM_CALL_ARGS_SPLAT;
+ if (calling->kw_splat) {
+ VALUE last_hash = rb_ary_last(0, NULL, argv_ary);
+ ((struct RHash *)last_hash)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
+ calling->kw_splat = 0;
}
- calling->argc -= 1;
- DEC_SP(1);
+ return vm_call_symbol(ec, reg_cfp, calling, ci, sym, flags);
+ }
- return vm_call_symbol(ec, reg_cfp, calling, calling->ci, sym, VM_CALL_FCALL);
+ if (calling->kw_splat) flags |= VM_CALL_KW_SPLAT;
+ return vm_call_opt_send0(ec, reg_cfp, calling, flags);
+}
+
+static VALUE
+vm_call_opt_send_simple(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
+{
+ RB_DEBUG_COUNTER_INC(ccf_opt_send_simple);
+ return vm_call_opt_send0(ec, reg_cfp, calling, vm_ci_flag(calling->cd->ci) | VM_CALL_FCALL);
+}
+
+static VALUE
+vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
+{
+ RB_DEBUG_COUNTER_INC(ccf_opt_send);
+
+ const struct rb_callinfo *ci = calling->cd->ci;
+ int flags = vm_ci_flag(ci);
+
+ if (UNLIKELY(!(flags & VM_CALL_ARGS_SIMPLE) &&
+ ((calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
+ (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
+ ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc))))) {
+ CC_SET_FASTPATH(calling->cc, vm_call_opt_send_complex, TRUE);
+ return vm_call_opt_send_complex(ec, reg_cfp, calling);
}
+
+ CC_SET_FASTPATH(calling->cc, vm_call_opt_send_simple, TRUE);
+ return vm_call_opt_send_simple(ec, reg_cfp, calling);
}
static VALUE
@@ -3698,13 +4193,10 @@ vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_
RB_DEBUG_COUNTER_INC(ccf_method_missing);
VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
- unsigned int argc;
-
- CALLER_SETUP_ARG(reg_cfp, calling, orig_ci);
- argc = calling->argc + 1;
+ unsigned int argc, flag;
- unsigned int flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | (calling->kw_splat ? VM_CALL_KW_SPLAT : 0);
- calling->argc = argc;
+ flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | vm_ci_flag(orig_ci);
+ argc = ++calling->argc;
/* shift arguments: m(a, b, c) #=> method_missing(:m, a, b, c) */
CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
@@ -3716,7 +4208,10 @@ vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_
INC_SP(1);
ec->method_missing_reason = reason;
- calling->ci = &VM_CI_ON_STACK(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci));
+ calling->cd = &(struct rb_call_data) {
+ .ci = &VM_CI_ON_STACK(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci)),
+ .cc = NULL,
+ };
calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }},
rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), idMethodMissing, NULL));
return vm_call_method(ec, reg_cfp, calling);
@@ -3725,7 +4220,7 @@ vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_
static VALUE
vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
{
- return vm_call_method_missing_body(ec, reg_cfp, calling, calling->ci, vm_cc_cmethod_missing_reason(calling->cc));
+ return vm_call_method_missing_body(ec, reg_cfp, calling, calling->cd->ci, vm_cc_cmethod_missing_reason(calling->cc));
}
static const rb_callable_method_entry_t *refined_method_callable_without_refinement(const rb_callable_method_entry_t *me);
@@ -3734,7 +4229,7 @@ vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_ca
{
klass = RCLASS_SUPER(klass);
- const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, vm_ci_mid(calling->ci)) : NULL;
+ const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, vm_ci_mid(calling->cd->ci)) : NULL;
if (cme == NULL) {
return vm_call_method_nome(ec, cfp, calling);
}
@@ -3803,7 +4298,7 @@ refined_method_callable_without_refinement(const rb_callable_method_entry_t *me)
static const rb_callable_method_entry_t *
search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
{
- ID mid = vm_ci_mid(calling->ci);
+ ID mid = vm_ci_mid(calling->cd->ci);
const rb_cref_t *cref = vm_get_cref(cfp->ep);
const struct rb_callcache * const cc = calling->cc;
const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
@@ -3850,12 +4345,19 @@ search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struc
static VALUE
vm_call_refined(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
{
- struct rb_callcache *ref_cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }},
- search_refined_method(ec, cfp, calling));
+ const rb_callable_method_entry_t *ref_cme = search_refined_method(ec, cfp, calling);
- if (vm_cc_cme(ref_cc)) {
- calling->cc= ref_cc;
- return vm_call_method(ec, cfp, calling);
+ if (ref_cme) {
+ if (calling->cd->cc) {
+ const struct rb_callcache *cc = calling->cc = vm_cc_new(vm_cc_cme(calling->cc)->defined_class, ref_cme, vm_call_general, cc_type_refinement);
+ RB_OBJ_WRITE(cfp->iseq, &calling->cd->cc, cc);
+ return vm_call_method(ec, cfp, calling);
+ }
+ else {
+ struct rb_callcache *ref_cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, ref_cme);
+ calling->cc= ref_cc;
+ return vm_call_method(ec, cfp, calling);
+ }
}
else {
return vm_call_method_nome(ec, cfp, calling);
@@ -3886,7 +4388,7 @@ vm_call_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct
{
RB_DEBUG_COUNTER_INC(ccf_opt_call);
- const struct rb_callinfo *ci = calling->ci;
+ const struct rb_callinfo *ci = calling->cd->ci;
VALUE procval = calling->recv;
return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, VM_BH_FROM_PROC(procval));
}
@@ -3897,7 +4399,7 @@ vm_call_opt_block_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
RB_DEBUG_COUNTER_INC(ccf_opt_block_call);
VALUE block_handler = VM_ENV_BLOCK_HANDLER(VM_CF_LEP(reg_cfp));
- const struct rb_callinfo *ci = calling->ci;
+ const struct rb_callinfo *ci = calling->cd->ci;
if (BASIC_OP_UNREDEFINED_P(BOP_CALL, PROC_REDEFINED_OP_FLAG)) {
return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, block_handler);
@@ -3962,6 +4464,19 @@ vm_call_opt_struct_aset(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
NOINLINE(static VALUE vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
const struct rb_callinfo *ci, const struct rb_callcache *cc));
+#define VM_CALL_METHOD_ATTR(var, func, nohook) \
+ if (UNLIKELY(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) { \
+ EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, calling->recv, vm_cc_cme(cc)->def->original_id, \
+ vm_ci_mid(ci), vm_cc_cme(cc)->owner, Qundef); \
+ var = func; \
+ EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, calling->recv, vm_cc_cme(cc)->def->original_id, \
+ vm_ci_mid(ci), vm_cc_cme(cc)->owner, (var)); \
+ } \
+ else { \
+ nohook; \
+ var = func; \
+ }
+
static VALUE
vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
const struct rb_callinfo *ci, const struct rb_callcache *cc)
@@ -3976,45 +4491,43 @@ vm_call_optimized(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb
case OPTIMIZED_METHOD_TYPE_BLOCK_CALL:
CC_SET_FASTPATH(cc, vm_call_opt_block_call, TRUE);
return vm_call_opt_block_call(ec, cfp, calling);
- case OPTIMIZED_METHOD_TYPE_STRUCT_AREF:
- CALLER_SETUP_ARG(cfp, calling, ci);
- CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
+ case OPTIMIZED_METHOD_TYPE_STRUCT_AREF: {
+ CALLER_SETUP_ARG(cfp, calling, ci, 0);
rb_check_arity(calling->argc, 0, 0);
- CC_SET_FASTPATH(cc, vm_call_opt_struct_aref, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE));
- return vm_call_opt_struct_aref(ec, cfp, calling);
- case OPTIMIZED_METHOD_TYPE_STRUCT_ASET:
- CALLER_SETUP_ARG(cfp, calling, ci);
- CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
+ VALUE v;
+ VM_CALL_METHOD_ATTR(v,
+ vm_call_opt_struct_aref(ec, cfp, calling),
+ set_vm_cc_ivar(cc); \
+ CC_SET_FASTPATH(cc, vm_call_opt_struct_aref, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
+ return v;
+ }
+ case OPTIMIZED_METHOD_TYPE_STRUCT_ASET: {
+ CALLER_SETUP_ARG(cfp, calling, ci, 1);
rb_check_arity(calling->argc, 1, 1);
- CC_SET_FASTPATH(cc, vm_call_opt_struct_aset, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE));
- return vm_call_opt_struct_aset(ec, cfp, calling);
+
+ VALUE v;
+ VM_CALL_METHOD_ATTR(v,
+ vm_call_opt_struct_aset(ec, cfp, calling),
+ set_vm_cc_ivar(cc); \
+ CC_SET_FASTPATH(cc, vm_call_opt_struct_aset, (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE)))
+ return v;
+ }
default:
rb_bug("vm_call_method: unsupported optimized method type (%d)", vm_cc_cme(cc)->def->body.optimized.type);
}
}
-#define VM_CALL_METHOD_ATTR(var, func, nohook) \
- if (UNLIKELY(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) { \
- EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, calling->recv, vm_cc_cme(cc)->def->original_id, \
- vm_ci_mid(ci), vm_cc_cme(cc)->owner, Qundef); \
- var = func; \
- EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, calling->recv, vm_cc_cme(cc)->def->original_id, \
- vm_ci_mid(ci), vm_cc_cme(cc)->owner, (var)); \
- } \
- else { \
- nohook; \
- var = func; \
- }
-
static VALUE
vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
{
- const struct rb_callinfo *ci = calling->ci;
+ const struct rb_callinfo *ci = calling->cd->ci;
const struct rb_callcache *cc = calling->cc;
const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
VALUE v;
+ VM_ASSERT(! METHOD_ENTRY_INVALIDATED(cme));
+
switch (cme->def->type) {
case VM_METHOD_TYPE_ISEQ:
CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
@@ -4026,8 +4539,7 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st
return vm_call_cfunc(ec, cfp, calling);
case VM_METHOD_TYPE_ATTRSET:
- CALLER_SETUP_ARG(cfp, calling, ci);
- CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
+ CALLER_SETUP_ARG(cfp, calling, ci, 1);
rb_check_arity(calling->argc, 1, 1);
@@ -4062,8 +4574,7 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st
return v;
case VM_METHOD_TYPE_IVAR:
- CALLER_SETUP_ARG(cfp, calling, ci);
- CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
+ CALLER_SETUP_ARG(cfp, calling, ci, 0);
rb_check_arity(calling->argc, 0, 0);
vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
const unsigned int ivar_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT);
@@ -4109,13 +4620,18 @@ static VALUE
vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
{
/* method missing */
- const struct rb_callinfo *ci = calling->ci;
+ const struct rb_callinfo *ci = calling->cd->ci;
const int stat = ci_missing_reason(ci);
if (vm_ci_mid(ci) == idMethodMissing) {
- rb_control_frame_t *reg_cfp = cfp;
- VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
- vm_raise_method_missing(ec, calling->argc, argv, calling->recv, stat);
+ if (UNLIKELY(calling->heap_argv)) {
+ vm_raise_method_missing(ec, RARRAY_LENINT(calling->heap_argv), RARRAY_CONST_PTR(calling->heap_argv), calling->recv, stat);
+ }
+ else {
+ rb_control_frame_t *reg_cfp = cfp;
+ VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
+ vm_raise_method_missing(ec, calling->argc, argv, calling->recv, stat);
+ }
}
else {
return vm_call_method_missing_body(ec, cfp, calling, ci, stat);
@@ -4138,7 +4654,7 @@ vm_defined_class_for_protected_call(const rb_callable_method_entry_t *me)
static inline VALUE
vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
{
- const struct rb_callinfo *ci = calling->ci;
+ const struct rb_callinfo *ci = calling->cd->ci;
const struct rb_callcache *cc = calling->cc;
VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
@@ -4242,11 +4758,7 @@ vm_super_outside(void)
static const struct rb_callcache *
empty_cc_for_super(void)
{
-#ifdef MJIT_HEADER
- return rb_vm_empty_cc_for_super();
-#else
return &vm_empty_cc_for_super;
-#endif
}
static const struct rb_callcache *
@@ -4298,7 +4810,7 @@ vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *c
if (!klass) {
/* bound instance method of module */
- cc = vm_cc_new(klass, NULL, vm_call_method_missing);
+ cc = vm_cc_new(klass, NULL, vm_call_method_missing, cc_type_super);
RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
}
else {
@@ -4313,7 +4825,7 @@ vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *c
else if (cached_cme->called_id != mid) {
const rb_callable_method_entry_t *cme = rb_callable_method_entry(klass, mid);
if (cme) {
- cc = vm_cc_new(klass, cme, vm_call_super_method);
+ cc = vm_cc_new(klass, cme, vm_call_super_method, cc_type_super);
RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
}
else {
@@ -4396,6 +4908,12 @@ vm_yield_with_cfunc(rb_execution_context_t *ec,
return val;
}
+VALUE
+rb_vm_yield_with_cfunc(rb_execution_context_t *ec, const struct rb_captured_block *captured, int argc, const VALUE *argv)
+{
+ return vm_yield_with_cfunc(ec, captured, captured->self, argc, argv, 0, VM_BLOCK_HANDLER_NONE, NULL);
+}
+
static VALUE
vm_yield_with_symbol(rb_execution_context_t *ec, VALUE symbol, int argc, const VALUE *argv, int kw_splat, VALUE block_handler)
{
@@ -4437,8 +4955,7 @@ vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *ca
rb_control_frame_t *cfp = ec->cfp;
VALUE arg0;
- CALLER_SETUP_ARG(cfp, calling, ci);
- CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
+ CALLER_SETUP_ARG(cfp, calling, ci, ISEQ_BODY(iseq)->param.lead_num);
if (arg_setup_type == arg_setup_block &&
calling->argc == 1 &&
@@ -4473,16 +4990,17 @@ vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *ca
}
static int
-vm_yield_setup_args(rb_execution_context_t *ec, const rb_iseq_t *iseq, const int argc, VALUE *argv, int kw_splat, VALUE block_handler, enum arg_setup_type arg_setup_type)
+vm_yield_setup_args(rb_execution_context_t *ec, const rb_iseq_t *iseq, const int argc, VALUE *argv, int flags, VALUE block_handler, enum arg_setup_type arg_setup_type)
{
struct rb_calling_info calling_entry, *calling;
calling = &calling_entry;
calling->argc = argc;
calling->block_handler = block_handler;
- calling->kw_splat = kw_splat;
+ calling->kw_splat = (flags & VM_CALL_KW_SPLAT) ? 1 : 0;
calling->recv = Qundef;
- struct rb_callinfo dummy_ci = VM_CI_ON_STACK(0, (kw_splat ? VM_CALL_KW_SPLAT : 0), 0, 0);
+ calling->heap_argv = 0;
+ struct rb_callinfo dummy_ci = VM_CI_ON_STACK(0, flags, 0, 0);
return vm_callee_setup_block_arg(ec, calling, &dummy_ci, iseq, argv, arg_setup_type);
}
@@ -4498,7 +5016,8 @@ vm_invoke_iseq_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
const int arg_size = ISEQ_BODY(iseq)->param.size;
VALUE * const rsp = GET_SP() - calling->argc;
- int opt_pc = vm_callee_setup_block_arg(ec, calling, ci, iseq, rsp, is_lambda ? arg_setup_method : arg_setup_block);
+ VALUE * const argv = rsp;
+ int opt_pc = vm_callee_setup_block_arg(ec, calling, ci, iseq, argv, is_lambda ? arg_setup_method : arg_setup_block);
SET_SP(rsp);
@@ -4518,15 +5037,47 @@ vm_invoke_symbol_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
struct rb_calling_info *calling, const struct rb_callinfo *ci,
MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
{
- if (calling->argc < 1) {
- rb_raise(rb_eArgError, "no receiver given");
+ VALUE symbol = VM_BH_TO_SYMBOL(block_handler);
+ int flags = vm_ci_flag(ci);
+
+ if (UNLIKELY(!(flags & VM_CALL_ARGS_SIMPLE) &&
+ ((calling->argc == 0) ||
+ (calling->argc == 1 && (flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT))) ||
+ (calling->argc == 2 && (flags & VM_CALL_ARGS_SPLAT) && (flags & VM_CALL_KW_SPLAT)) ||
+ ((flags & VM_CALL_KWARG) && (vm_ci_kwarg(ci)->keyword_len == calling->argc))))) {
+ CALLER_SETUP_ARG(reg_cfp, calling, ci, ALLOW_HEAP_ARGV);
+ flags = 0;
+ if (UNLIKELY(calling->heap_argv)) {
+#if VM_ARGC_STACK_MAX < 0
+ if (RARRAY_LEN(calling->heap_argv) < 1) {
+ rb_raise(rb_eArgError, "no receiver given");
+ }
+#endif
+ calling->recv = rb_ary_shift(calling->heap_argv);
+ // Modify stack to avoid cfp consistency error
+ reg_cfp->sp++;
+ reg_cfp->sp[-1] = reg_cfp->sp[-2];
+ reg_cfp->sp[-2] = calling->recv;
+ flags |= VM_CALL_ARGS_SPLAT;
+ }
+ else {
+ if (calling->argc < 1) {
+ rb_raise(rb_eArgError, "no receiver given");
+ }
+ calling->recv = TOPN(--calling->argc);
+ }
+ if (calling->kw_splat) {
+ flags |= VM_CALL_KW_SPLAT;
+ }
}
else {
- VALUE symbol = VM_BH_TO_SYMBOL(block_handler);
- CALLER_SETUP_ARG(reg_cfp, calling, ci);
+ if (calling->argc < 1) {
+ rb_raise(rb_eArgError, "no receiver given");
+ }
calling->recv = TOPN(--calling->argc);
- return vm_call_symbol(ec, reg_cfp, calling, ci, symbol, 0);
}
+
+ return vm_call_symbol(ec, reg_cfp, calling, ci, symbol, flags);
}
static VALUE
@@ -4537,10 +5088,9 @@ vm_invoke_ifunc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
VALUE val;
int argc;
const struct rb_captured_block *captured = VM_BH_TO_IFUNC_BLOCK(block_handler);
- CALLER_SETUP_ARG(ec->cfp, calling, ci);
- CALLER_REMOVE_EMPTY_KW_SPLAT(ec->cfp, calling, ci);
+ CALLER_SETUP_ARG(ec->cfp, calling, ci, ALLOW_HEAP_ARGV_KEEP_KWSPLAT);
argc = calling->argc;
- val = vm_yield_with_cfunc(ec, captured, captured->self, argc, STACK_ADDR_FROM_TOP(argc), calling->kw_splat, calling->block_handler, NULL);
+ val = vm_yield_with_cfunc(ec, captured, captured->self, CALLING_ARGC(calling), calling->heap_argv ? RARRAY_CONST_PTR(calling->heap_argv) : STACK_ADDR_FROM_TOP(argc), calling->kw_splat, calling->block_handler, NULL);
POPN(argc); /* TODO: should put before C/yield? */
return val;
}
@@ -4713,16 +5263,16 @@ vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_
if (me) {
VALUE klass = vm_search_normal_superclass(me->defined_class);
+ if (!klass) return false;
+
ID id = me->def->original_id;
return rb_method_boundp(klass, id, 0);
}
}
break;
- case DEFINED_REF:{
- return vm_getspecial(ec, GET_LEP(), Qfalse, FIX2INT(obj)) != Qnil;
- break;
- }
+ case DEFINED_REF:
+ return RTEST(vm_backref_defined(ec, GET_LEP(), FIX2INT(obj)));
default:
rb_bug("unimplemented defined? type (VM)");
break;
@@ -4774,15 +5324,29 @@ vm_concat_array(VALUE ary1, VALUE ary2st)
if (NIL_P(tmp1)) {
tmp1 = rb_ary_new3(1, ary1);
}
+ if (tmp1 == ary1) {
+ tmp1 = rb_ary_dup(ary1);
+ }
if (NIL_P(tmp2)) {
- tmp2 = rb_ary_new3(1, ary2);
+ return rb_ary_push(tmp1, ary2);
+ } else {
+ return rb_ary_concat(tmp1, tmp2);
}
+}
- if (tmp1 == ary1) {
- tmp1 = rb_ary_dup(ary1);
+static VALUE
+vm_concat_to_array(VALUE ary1, VALUE ary2st)
+{
+ /* ary1 must be a newly created array */
+ const VALUE ary2 = ary2st;
+ VALUE tmp2 = rb_check_to_array(ary2);
+
+ if (NIL_P(tmp2)) {
+ return rb_ary_push(ary1, ary2);
+ } else {
+ return rb_ary_concat(ary1, tmp2);
}
- return rb_ary_concat(tmp1, tmp2);
}
// YJIT implementation is using the C function
@@ -4793,6 +5357,12 @@ rb_vm_concat_array(VALUE ary1, VALUE ary2st)
return vm_concat_array(ary1, ary2st);
}
+VALUE
+rb_vm_concat_to_array(VALUE ary1, VALUE ary2st)
+{
+ return vm_concat_to_array(ary1, ary2st);
+}
+
static VALUE
vm_splat_array(VALUE flag, VALUE ary)
{
@@ -4840,6 +5410,12 @@ vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t
}
}
+VALUE
+rb_vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
+{
+ return vm_check_match(ec, target, pattern, flag);
+}
+
static VALUE
vm_check_keyword(lindex_t bits, lindex_t idx, const VALUE *ep)
{
@@ -5087,7 +5663,7 @@ vm_define_method(const rb_execution_context_t *ec, VALUE obj, ID id, VALUE iseqv
rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, visi);
// Set max_iv_count on klasses based on number of ivar sets that are in the initialize method
- if (id == rb_intern("initialize") && klass != rb_cObject && RB_TYPE_P(klass, T_CLASS) && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
+ if (id == idInitialize && klass != rb_cObject && RB_TYPE_P(klass, T_CLASS) && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
RCLASS_EXT(klass)->max_iv_count = rb_estimate_iv_count(klass, (const rb_iseq_t *)iseqval);
}
@@ -5103,7 +5679,7 @@ vm_invokeblock_i(struct rb_execution_context_struct *ec,
struct rb_control_frame_struct *reg_cfp,
struct rb_calling_info *calling)
{
- const struct rb_callinfo *ci = calling->ci;
+ const struct rb_callinfo *ci = calling->cd->ci;
VALUE block_handler = VM_CF_BLOCK_HANDLER(GET_CFP());
if (block_handler == VM_BLOCK_HANDLER_NONE) {
@@ -5114,52 +5690,19 @@ vm_invokeblock_i(struct rb_execution_context_struct *ec,
}
}
-#ifdef MJIT_HEADER
-static const struct rb_callcache *
-vm_search_method_wrap(const struct rb_control_frame_struct *reg_cfp, struct rb_call_data *cd, VALUE recv)
-{
- return vm_search_method((VALUE)reg_cfp->iseq, cd, recv);
-}
-
-static const struct rb_callcache *
-vm_search_invokeblock(const struct rb_control_frame_struct *reg_cfp, struct rb_call_data *cd, VALUE recv)
-{
- static const struct rb_callcache cc = {
- .flags = T_IMEMO | (imemo_callcache << FL_USHIFT) | VM_CALLCACHE_UNMARKABLE,
- .klass = 0,
- .cme_ = 0,
- .call_ = vm_invokeblock_i,
- .aux_ = {0},
- };
- return &cc;
-}
-
-# define mexp_search_method vm_search_method_wrap
-# define mexp_search_super vm_search_super_method
-# define mexp_search_invokeblock vm_search_invokeblock
-#else
enum method_explorer_type {
mexp_search_method,
mexp_search_invokeblock,
mexp_search_super,
};
-#endif
-static
-#ifndef MJIT_HEADER
-inline
-#endif
-VALUE
+static inline VALUE
vm_sendish(
struct rb_execution_context_struct *ec,
struct rb_control_frame_struct *reg_cfp,
struct rb_call_data *cd,
VALUE block_handler,
-#ifdef MJIT_HEADER
- const struct rb_callcache *(*method_explorer)(const struct rb_control_frame_struct *cfp, struct rb_call_data *cd, VALUE recv)
-#else
enum method_explorer_type method_explorer
-#endif
) {
VALUE val = Qundef;
const struct rb_callinfo *ci = cd->ci;
@@ -5171,14 +5714,9 @@ vm_sendish(
.kw_splat = IS_ARGS_KW_SPLAT(ci) > 0,
.recv = recv,
.argc = argc,
- .ci = ci,
+ .cd = cd,
};
-// The enum-based branch and inlining are faster in VM, but function pointers without inlining are faster in JIT.
-#ifdef MJIT_HEADER
- calling.cc = cc = method_explorer(GET_CFP(), cd, recv);
- val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
-#else
switch (method_explorer) {
case mexp_search_method:
calling.cc = cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, CLASS_OF(recv));
@@ -5186,45 +5724,53 @@ vm_sendish(
break;
case mexp_search_super:
calling.cc = cc = vm_search_super_method(reg_cfp, cd, recv);
- calling.ci = cd->ci; // TODO: does it safe?
val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
break;
case mexp_search_invokeblock:
val = vm_invokeblock_i(ec, GET_CFP(), &calling);
break;
}
-#endif
+ return val;
+}
- if (!UNDEF_P(val)) {
- return val; /* CFUNC normal return */
- }
- else {
- RESTORE_REGS(); /* CFP pushed in cc->call() */
- }
+VALUE
+rb_vm_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
+{
+ stack_check(ec);
+ VALUE bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, false);
+ VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
+ VM_EXEC(ec, val);
+ return val;
+}
-#ifdef MJIT_HEADER
- /* When calling ISeq which may catch an exception from JIT-ed
- code, we should not call jit_exec directly to prevent the
- caller frame from being canceled. That's because the caller
- frame may have stack values in the local variables and the
- cancelling the caller frame will purge them. But directly
- calling jit_exec is faster... */
- if (ISEQ_BODY(GET_ISEQ())->catch_except_p) {
- VM_ENV_FLAGS_SET(GET_EP(), VM_FRAME_FLAG_FINISH);
- return vm_exec(ec, true);
- }
- else if (UNDEF_P(val = jit_exec(ec))) {
- VM_ENV_FLAGS_SET(GET_EP(), VM_FRAME_FLAG_FINISH);
- return vm_exec(ec, false);
- }
- else {
- return val;
- }
-#else
- /* When calling from VM, longjmp in the callee won't purge any
- JIT-ed caller frames. So it's safe to directly call jit_exec. */
- return jit_exec(ec);
-#endif
+VALUE
+rb_vm_opt_send_without_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
+{
+ stack_check(ec);
+ VALUE bh = VM_BLOCK_HANDLER_NONE;
+ VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
+ VM_EXEC(ec, val);
+ return val;
+}
+
+VALUE
+rb_vm_invokesuper(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
+{
+ stack_check(ec);
+ VALUE bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, true);
+ VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_super);
+ VM_EXEC(ec, val);
+ return val;
+}
+
+VALUE
+rb_vm_invokeblock(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
+{
+ stack_check(ec);
+ VALUE bh = VM_BLOCK_HANDLER_NONE;
+ VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_invokeblock);
+ VM_EXEC(ec, val);
+ return val;
}
/* object.c */
@@ -5368,6 +5914,40 @@ rb_vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *pt
return vm_opt_newarray_min(ec, num, ptr);
}
+static VALUE
+vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
+{
+ // If Array#hash is _not_ monkeypatched, use the optimized call
+ if (BASIC_OP_UNREDEFINED_P(BOP_HASH, ARRAY_REDEFINED_OP_FLAG)) {
+ return rb_ary_hash_values(num, ptr);
+ }
+ else {
+ return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idHash, 0, NULL, RB_NO_KEYWORDS);
+ }
+}
+
+VALUE
+rb_vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
+{
+ return vm_opt_newarray_hash(ec, num, ptr);
+}
+
+VALUE rb_setup_fake_ary(struct RArray *fake_ary, const VALUE *list, long len, bool freeze);
+VALUE rb_ec_pack_ary(rb_execution_context_t *ec, VALUE ary, VALUE fmt, VALUE buffer);
+
+VALUE
+rb_vm_opt_newarray_pack(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt)
+{
+ if (BASIC_OP_UNREDEFINED_P(BOP_PACK, ARRAY_REDEFINED_OP_FLAG)) {
+ struct RArray fake_ary;
+ VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, num, true);
+ return rb_ec_pack_ary(ec, ary, fmt, Qnil);
+ }
+ else {
+ return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idPack, 1, &fmt, RB_PASS_CALLED_KEYWORDS);
+ }
+}
+
#undef id_cmp
#define IMEMO_CONST_CACHE_SHAREABLE IMEMO_FL_USER0
@@ -5404,7 +5984,7 @@ vm_ic_track_const_chain(rb_control_frame_t *cfp, IC ic, const ID *segments)
RB_VM_LOCK_LEAVE();
}
-// For MJIT inlining
+// For RJIT inlining
static inline bool
vm_inlined_ic_hit_p(VALUE flags, VALUE value, const rb_cref_t *ic_cref, const VALUE *reg_ep)
{
@@ -5440,7 +6020,7 @@ vm_ic_update(const rb_iseq_t *iseq, IC ic, VALUE val, const VALUE *reg_ep, const
return;
}
- struct iseq_inline_constant_cache_entry *ice = (struct iseq_inline_constant_cache_entry *)rb_imemo_new(imemo_constcache, 0, 0, 0, 0);
+ struct iseq_inline_constant_cache_entry *ice = IMEMO_NEW(struct iseq_inline_constant_cache_entry, imemo_constcache, 0);
RB_OBJ_WRITE(ice, &ice->value, val);
ice->ic_cref = vm_get_const_key_cref(reg_ep);
if (rb_ractor_shareable_p(val)) ice->flags |= IMEMO_CONST_CACHE_SHAREABLE;
@@ -5449,7 +6029,29 @@ vm_ic_update(const rb_iseq_t *iseq, IC ic, VALUE val, const VALUE *reg_ep, const
RUBY_ASSERT(pc >= ISEQ_BODY(iseq)->iseq_encoded);
unsigned pos = (unsigned)(pc - ISEQ_BODY(iseq)->iseq_encoded);
rb_yjit_constant_ic_update(iseq, ic, pos);
- rb_mjit_constant_ic_update(iseq, ic, pos);
+ rb_rjit_constant_ic_update(iseq, ic, pos);
+}
+
+VALUE
+rb_vm_opt_getconstant_path(rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, IC ic)
+{
+ VALUE val;
+ const ID *segments = ic->segments;
+ struct iseq_inline_constant_cache_entry *ice = ic->entry;
+ if (ice && vm_ic_hit_p(ice, GET_EP())) {
+ val = ice->value;
+
+ VM_ASSERT(val == vm_get_ev_const_chain(ec, segments));
+ }
+ else {
+ ruby_vm_constant_cache_misses++;
+ val = vm_get_ev_const_chain(ec, segments);
+ vm_ic_track_const_chain(GET_CFP(), ic, segments);
+ // Undo the PC increment to get the address to this instruction
+ // INSN_ATTR(width) == 2
+ vm_ic_update(GET_ISEQ(), ic, val, GET_EP(), GET_PC() - 2);
+ }
+ return val;
}
static VALUE
@@ -5904,6 +6506,12 @@ vm_opt_aref_with(VALUE recv, VALUE key)
}
}
+VALUE
+rb_vm_opt_aref_with(VALUE recv, VALUE key)
+{
+ return vm_opt_aref_with(recv, key);
+}
+
static VALUE
vm_opt_aset_with(VALUE recv, VALUE key, VALUE val)
{
@@ -6084,28 +6692,20 @@ vm_trace_hook(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VAL
}
}
-// Return true if given cc has cfunc which is NOT handled by opt_send_without_block.
-bool
-rb_vm_opt_cfunc_p(CALL_CACHE cc, int insn)
-{
- switch (insn) {
- case BIN(opt_eq):
- return check_cfunc(vm_cc_cme(cc), rb_obj_equal);
- case BIN(opt_nil_p):
- return check_cfunc(vm_cc_cme(cc), rb_false);
- case BIN(opt_not):
- return check_cfunc(vm_cc_cme(cc), rb_obj_not);
- default:
- return false;
- }
-}
-
#define VM_TRACE_HOOK(target_event, val) do { \
if ((pc_events & (target_event)) & enabled_flags) { \
vm_trace_hook(ec, reg_cfp, pc, pc_events, (target_event), global_hooks, local_hooks_ptr, (val)); \
} \
} while (0)
+static VALUE
+rescue_errinfo(rb_execution_context_t *ec, rb_control_frame_t *cfp)
+{
+ VM_ASSERT(VM_FRAME_RUBYFRAME_P(cfp));
+ VM_ASSERT(ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_RESCUE);
+ return cfp->ep[VM_ENV_INDEX_LAST_LVAR];
+}
+
static void
vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
{
@@ -6182,6 +6782,7 @@ vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_CALL, RUBY_EVENT_CALL, global_hooks, bmethod_local_hooks_ptr, Qundef);
}
VM_TRACE_HOOK(RUBY_EVENT_CLASS | RUBY_EVENT_CALL | RUBY_EVENT_B_CALL, Qundef);
+ VM_TRACE_HOOK(RUBY_EVENT_RESCUE, rescue_errinfo(ec, reg_cfp));
VM_TRACE_HOOK(RUBY_EVENT_LINE, Qundef);
VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_LINE, Qundef);
VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_BRANCH, Qundef);
@@ -6217,8 +6818,7 @@ Init_vm_stack_canary(void)
VM_ASSERT(n == 0);
}
-#ifndef MJIT_HEADER
-MJIT_FUNC_EXPORTED void
+void
rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)
{
/* Because a method has already been called, why not call
@@ -6229,7 +6829,6 @@ rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)
rb_bug("dead canary found at %s: %s", insn, str);
}
-#endif
#else
void Init_vm_stack_canary(void) { /* nothing to do */ }
@@ -6402,7 +7001,7 @@ lookup_builtin_invoker(int argc)
static inline VALUE
invoke_bf(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const struct rb_builtin_function* bf, const VALUE *argv)
{
- const bool canary_p = ISEQ_BODY(reg_cfp->iseq)->builtin_inline_p; // Verify an assumption of `Primitive.attr! 'inline'`
+ const bool canary_p = ISEQ_BODY(reg_cfp->iseq)->builtin_attrs & BUILTIN_ATTR_LEAF; // Verify an assumption of `Primitive.attr! :leaf`
SETUP_CANARY(canary_p);
VALUE ret = (*lookup_builtin_invoker(bf->argc))(ec, reg_cfp->self, argv, (rb_insn_func_t)bf->func_ptr);
CHECK_CANARY(canary_p, BIN(invokebuiltin));