summaryrefslogtreecommitdiff
path: root/vm_callinfo.h
diff options
context:
space:
mode:
Diffstat (limited to 'vm_callinfo.h')
-rw-r--r--vm_callinfo.h263
1 files changed, 214 insertions, 49 deletions
diff --git a/vm_callinfo.h b/vm_callinfo.h
index d5f4388fa8..21e0755aa8 100644
--- a/vm_callinfo.h
+++ b/vm_callinfo.h
@@ -9,21 +9,23 @@
*/
#include "debug_counter.h"
+#include "internal/class.h"
+#include "shape.h"
enum vm_call_flag_bits {
- VM_CALL_ARGS_SPLAT_bit, /* m(*args) */
- VM_CALL_ARGS_BLOCKARG_bit, /* m(&block) */
- VM_CALL_FCALL_bit, /* m(...) */
- VM_CALL_VCALL_bit, /* m */
- VM_CALL_ARGS_SIMPLE_bit, /* (ci->flag & (SPLAT|BLOCKARG)) && blockiseq == NULL && ci->kw_arg == NULL */
- VM_CALL_BLOCKISEQ_bit, /* has blockiseq */
- VM_CALL_KWARG_bit, /* has kwarg */
- VM_CALL_KW_SPLAT_bit, /* m(**opts) */
- VM_CALL_TAILCALL_bit, /* located at tail position */
- VM_CALL_SUPER_bit, /* super */
- VM_CALL_ZSUPER_bit, /* zsuper */
- VM_CALL_OPT_SEND_bit, /* internal flag */
- VM_CALL_KW_SPLAT_MUT_bit, /* kw splat hash can be modified (to avoid allocating a new one) */
+ VM_CALL_ARGS_SPLAT_bit, // m(*args)
+ VM_CALL_ARGS_BLOCKARG_bit, // m(&block)
+ VM_CALL_FCALL_bit, // m(args) # receiver is self
+ VM_CALL_VCALL_bit, // m # method call that looks like a local variable
+ VM_CALL_ARGS_SIMPLE_bit, // (ci->flag & (SPLAT|BLOCKARG)) && blockiseq == NULL && ci->kw_arg == NULL
+ VM_CALL_KWARG_bit, // has kwarg
+ VM_CALL_KW_SPLAT_bit, // m(**opts)
+ VM_CALL_TAILCALL_bit, // located at tail position
+ VM_CALL_SUPER_bit, // super
+ VM_CALL_ZSUPER_bit, // zsuper
+ VM_CALL_OPT_SEND_bit, // internal flag
+ VM_CALL_KW_SPLAT_MUT_bit, // kw splat hash can be modified (to avoid allocating a new one)
+ VM_CALL_ARGS_SPLAT_MUT_bit, // args splat can be modified (to avoid allocating a new one)
VM_CALL__END
};
@@ -32,7 +34,6 @@ enum vm_call_flag_bits {
#define VM_CALL_FCALL (0x01 << VM_CALL_FCALL_bit)
#define VM_CALL_VCALL (0x01 << VM_CALL_VCALL_bit)
#define VM_CALL_ARGS_SIMPLE (0x01 << VM_CALL_ARGS_SIMPLE_bit)
-#define VM_CALL_BLOCKISEQ (0x01 << VM_CALL_BLOCKISEQ_bit)
#define VM_CALL_KWARG (0x01 << VM_CALL_KWARG_bit)
#define VM_CALL_KW_SPLAT (0x01 << VM_CALL_KW_SPLAT_bit)
#define VM_CALL_TAILCALL (0x01 << VM_CALL_TAILCALL_bit)
@@ -40,9 +41,11 @@ enum vm_call_flag_bits {
#define VM_CALL_ZSUPER (0x01 << VM_CALL_ZSUPER_bit)
#define VM_CALL_OPT_SEND (0x01 << VM_CALL_OPT_SEND_bit)
#define VM_CALL_KW_SPLAT_MUT (0x01 << VM_CALL_KW_SPLAT_MUT_bit)
+#define VM_CALL_ARGS_SPLAT_MUT (0x01 << VM_CALL_ARGS_SPLAT_MUT_bit)
struct rb_callinfo_kwarg {
int keyword_len;
+ int references;
VALUE keywords[];
};
@@ -65,8 +68,12 @@ struct rb_callinfo {
VALUE argc;
};
-#ifndef USE_EMBED_CI
+#if !defined(USE_EMBED_CI) || (USE_EMBED_CI+0)
+#undef USE_EMBED_CI
#define USE_EMBED_CI 1
+#else
+#undef USE_EMBED_CI
+#define USE_EMBED_CI 0
#endif
#if SIZEOF_VALUE == 8
@@ -96,7 +103,9 @@ struct rb_callinfo {
static inline bool
vm_ci_packed_p(const struct rb_callinfo *ci)
{
-#if USE_EMBED_CI
+ if (!USE_EMBED_CI) {
+ return 0;
+ }
if (LIKELY(((VALUE)ci) & 0x01)) {
return 1;
}
@@ -104,9 +113,6 @@ vm_ci_packed_p(const struct rb_callinfo *ci)
VM_ASSERT(IMEMO_TYPE_P(ci, imemo_callinfo));
return 0;
}
-#else
- return 0;
-#endif
}
static inline bool
@@ -179,7 +185,7 @@ vm_ci_dump(const struct rb_callinfo *ci)
#define vm_ci_new(mid, flag, argc, kwarg) vm_ci_new_(mid, flag, argc, kwarg, __FILE__, __LINE__)
#define vm_ci_new_runtime(mid, flag, argc, kwarg) vm_ci_new_runtime_(mid, flag, argc, kwarg, __FILE__, __LINE__)
-#/* This is passed to STATIC_ASSERT. Cannot be an inline function. */
+/* This is passed to STATIC_ASSERT. Cannot be an inline function. */
#define VM_CI_EMBEDDABLE_P(mid, flag, argc, kwarg) \
(((mid ) & ~CI_EMBED_ID_MASK) ? false : \
((flag) & ~CI_EMBED_FLAG_MASK) ? false : \
@@ -193,26 +199,23 @@ vm_ci_dump(const struct rb_callinfo *ci)
(((VALUE)(argc)) << CI_EMBED_ARGC_SHFT) | \
RUBY_FIXNUM_FLAG))
+// vm_method.c
+const struct rb_callinfo *rb_vm_ci_lookup(ID mid, unsigned int flag, unsigned int argc, const struct rb_callinfo_kwarg *kwarg);
+void rb_vm_ci_free(const struct rb_callinfo *);
+
static inline const struct rb_callinfo *
vm_ci_new_(ID mid, unsigned int flag, unsigned int argc, const struct rb_callinfo_kwarg *kwarg, const char *file, int line)
{
-#if USE_EMBED_CI
- if (VM_CI_EMBEDDABLE_P(mid, flag, argc, kwarg)) {
+ if (USE_EMBED_CI && VM_CI_EMBEDDABLE_P(mid, flag, argc, kwarg)) {
RB_DEBUG_COUNTER_INC(ci_packed);
return vm_ci_new_id(mid, flag, argc, kwarg);
}
-#endif
const bool debug = 0;
if (debug) ruby_debug_printf("%s:%d ", file, line);
- // TODO: dedup
- const struct rb_callinfo *ci = (const struct rb_callinfo *)
- rb_imemo_new(imemo_callinfo,
- (VALUE)mid,
- (VALUE)flag,
- (VALUE)argc,
- (VALUE)kwarg);
+ const struct rb_callinfo *ci = rb_vm_ci_lookup(mid, flag, argc, kwarg);
+
if (debug) rp(ci);
if (kwarg) {
RB_DEBUG_COUNTER_INC(ci_kw);
@@ -283,29 +286,87 @@ struct rb_callcache {
const vm_call_handler call_;
union {
- const unsigned int attr_index;
+ struct {
+ uintptr_t value; // Shape ID in upper bits, index in lower bits
+ } attr;
const enum method_missing_reason method_missing_reason; /* used by method_missing */
VALUE v;
+ const struct rb_builtin_function *bf;
} aux_;
};
-#define VM_CALLCACHE_UNMARKABLE IMEMO_FL_USER0
+#define VM_CALLCACHE_UNMARKABLE FL_FREEZE
+#define VM_CALLCACHE_ON_STACK FL_EXIVAR
+
+/* VM_CALLCACHE_IVAR used for IVAR/ATTRSET/STRUCT_AREF/STRUCT_ASET methods */
+#define VM_CALLCACHE_IVAR IMEMO_FL_USER0
+#define VM_CALLCACHE_BF IMEMO_FL_USER1
+#define VM_CALLCACHE_SUPER IMEMO_FL_USER2
+#define VM_CALLCACHE_REFINEMENT IMEMO_FL_USER3
+
+enum vm_cc_type {
+ cc_type_normal, // chained from ccs
+ cc_type_super,
+ cc_type_refinement,
+};
+
+extern const struct rb_callcache *rb_vm_empty_cc(void);
+extern const struct rb_callcache *rb_vm_empty_cc_for_super(void);
+
+#define vm_cc_empty() rb_vm_empty_cc()
+
+static inline void vm_cc_attr_index_set(const struct rb_callcache *cc, attr_index_t index, shape_id_t dest_shape_id);
+
+static inline void
+vm_cc_attr_index_initialize(const struct rb_callcache *cc, shape_id_t shape_id)
+{
+ vm_cc_attr_index_set(cc, (attr_index_t)-1, shape_id);
+}
static inline const struct rb_callcache *
vm_cc_new(VALUE klass,
const struct rb_callable_method_entry_struct *cme,
- vm_call_handler call)
+ vm_call_handler call,
+ enum vm_cc_type type)
{
- const struct rb_callcache *cc = (const struct rb_callcache *)rb_imemo_new(imemo_callcache, (VALUE)cme, (VALUE)call, 0, klass);
+ struct rb_callcache *cc = IMEMO_NEW(struct rb_callcache, imemo_callcache, klass);
+ *((struct rb_callable_method_entry_struct **)&cc->cme_) = (struct rb_callable_method_entry_struct *)cme;
+ *((vm_call_handler *)&cc->call_) = call;
+
+ switch (type) {
+ case cc_type_normal:
+ break;
+ case cc_type_super:
+ *(VALUE *)&cc->flags |= VM_CALLCACHE_SUPER;
+ break;
+ case cc_type_refinement:
+ *(VALUE *)&cc->flags |= VM_CALLCACHE_REFINEMENT;
+ break;
+ }
+
+ vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
RB_DEBUG_COUNTER_INC(cc_new);
return cc;
}
+static inline bool
+vm_cc_super_p(const struct rb_callcache *cc)
+{
+ return (cc->flags & VM_CALLCACHE_SUPER) != 0;
+}
+
+static inline bool
+vm_cc_refinement_p(const struct rb_callcache *cc)
+{
+ return (cc->flags & VM_CALLCACHE_REFINEMENT) != 0;
+}
+
#define VM_CC_ON_STACK(clazz, call, aux, cme) \
(struct rb_callcache) { \
.flags = T_IMEMO | \
(imemo_callcache << FL_USHIFT) | \
- VM_CALLCACHE_UNMARKABLE, \
+ VM_CALLCACHE_UNMARKABLE | \
+ VM_CALLCACHE_ON_STACK, \
.klass = clazz, \
.cme_ = cme, \
.call_ = call, \
@@ -321,10 +382,21 @@ vm_cc_class_check(const struct rb_callcache *cc, VALUE klass)
return cc->klass == klass;
}
+static inline int
+vm_cc_markable(const struct rb_callcache *cc)
+{
+ VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
+ return FL_TEST_RAW((VALUE)cc, VM_CALLCACHE_UNMARKABLE) == 0;
+}
+
static inline const struct rb_callable_method_entry_struct *
vm_cc_cme(const struct rb_callcache *cc)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
+ VM_ASSERT(cc->call_ == NULL || // not initialized yet
+ !vm_cc_markable(cc) ||
+ cc->cme_ != NULL);
+
return cc->cme_;
}
@@ -332,28 +404,54 @@ static inline vm_call_handler
vm_cc_call(const struct rb_callcache *cc)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
+ VM_ASSERT(cc->call_ != NULL);
return cc->call_;
}
-static inline unsigned int
+static inline attr_index_t
vm_cc_attr_index(const struct rb_callcache *cc)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
- return cc->aux_.attr_index;
+ return (attr_index_t)((cc->aux_.attr.value & SHAPE_FLAG_MASK) - 1);
}
-static inline unsigned int
-vm_cc_cmethod_missing_reason(const struct rb_callcache *cc)
+static inline shape_id_t
+vm_cc_attr_index_dest_shape_id(const struct rb_callcache *cc)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
- return cc->aux_.method_missing_reason;
+
+ return cc->aux_.attr.value >> SHAPE_FLAG_SHIFT;
}
-static inline int
-vm_cc_markable(const struct rb_callcache *cc)
+static inline void
+vm_cc_atomic_shape_and_index(const struct rb_callcache *cc, shape_id_t * shape_id, attr_index_t * index)
+{
+ uintptr_t cache_value = cc->aux_.attr.value; // Atomically read 64 bits
+ *shape_id = (shape_id_t)(cache_value >> SHAPE_FLAG_SHIFT);
+ *index = (attr_index_t)(cache_value & SHAPE_FLAG_MASK) - 1;
+ return;
+}
+
+static inline void
+vm_ic_atomic_shape_and_index(const struct iseq_inline_iv_cache_entry *ic, shape_id_t * shape_id, attr_index_t * index)
+{
+ uintptr_t cache_value = ic->value; // Atomically read 64 bits
+ *shape_id = (shape_id_t)(cache_value >> SHAPE_FLAG_SHIFT);
+ *index = (attr_index_t)(cache_value & SHAPE_FLAG_MASK) - 1;
+ return;
+}
+
+static inline shape_id_t
+vm_ic_attr_index_dest_shape_id(const struct iseq_inline_iv_cache_entry *ic)
+{
+ return (shape_id_t)(ic->value >> SHAPE_FLAG_SHIFT);
+}
+
+static inline unsigned int
+vm_cc_cmethod_missing_reason(const struct rb_callcache *cc)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
- return FL_TEST_RAW((VALUE)cc, VM_CALLCACHE_UNMARKABLE) == 0;
+ return cc->aux_.method_missing_reason;
}
static inline bool
@@ -367,7 +465,7 @@ vm_cc_invalidated_p(const struct rb_callcache *cc)
}
}
-// For MJIT. cc_cme is supposed to have inlined `vm_cc_cme(cc)`.
+// For RJIT. cc_cme is supposed to have inlined `vm_cc_cme(cc)`.
static inline bool
vm_cc_valid_p(const struct rb_callcache *cc, const rb_callable_method_entry_t *cc_cme, VALUE klass)
{
@@ -380,9 +478,6 @@ vm_cc_valid_p(const struct rb_callcache *cc, const rb_callable_method_entry_t *c
}
}
-extern const struct rb_callcache *rb_vm_empty_cc(void);
-#define vm_cc_empty() rb_vm_empty_cc()
-
/* callcache: mutate */
static inline void
@@ -394,11 +489,41 @@ vm_cc_call_set(const struct rb_callcache *cc, vm_call_handler call)
}
static inline void
-vm_cc_attr_index_set(const struct rb_callcache *cc, int index)
+set_vm_cc_ivar(const struct rb_callcache *cc)
+{
+ *(VALUE *)&cc->flags |= VM_CALLCACHE_IVAR;
+}
+
+static inline void
+vm_cc_attr_index_set(const struct rb_callcache *cc, attr_index_t index, shape_id_t dest_shape_id)
{
+ uintptr_t *attr_value = (uintptr_t *)&cc->aux_.attr.value;
+ if (!vm_cc_markable(cc)) {
+ *attr_value = (uintptr_t)INVALID_SHAPE_ID << SHAPE_FLAG_SHIFT;
+ return;
+ }
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
VM_ASSERT(cc != vm_cc_empty());
- *(int *)&cc->aux_.attr_index = index;
+ *attr_value = (attr_index_t)(index + 1) | ((uintptr_t)(dest_shape_id) << SHAPE_FLAG_SHIFT);
+ set_vm_cc_ivar(cc);
+}
+
+static inline bool
+vm_cc_ivar_p(const struct rb_callcache *cc)
+{
+ return (cc->flags & VM_CALLCACHE_IVAR) != 0;
+}
+
+static inline void
+vm_ic_attr_index_set(const rb_iseq_t *iseq, const struct iseq_inline_iv_cache_entry *ic, attr_index_t index, shape_id_t dest_shape_id)
+{
+ *(uintptr_t *)&ic->value = ((uintptr_t)dest_shape_id << SHAPE_FLAG_SHIFT) | (attr_index_t)(index + 1);
+}
+
+static inline void
+vm_ic_attr_index_initialize(const struct iseq_inline_iv_cache_entry *ic, shape_id_t shape_id)
+{
+ *(uintptr_t *)&ic->value = (uintptr_t)shape_id << SHAPE_FLAG_SHIFT;
}
static inline void
@@ -410,6 +535,21 @@ vm_cc_method_missing_reason_set(const struct rb_callcache *cc, enum method_missi
}
static inline void
+vm_cc_bf_set(const struct rb_callcache *cc, const struct rb_builtin_function *bf)
+{
+ VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
+ VM_ASSERT(cc != vm_cc_empty());
+ *(const struct rb_builtin_function **)&cc->aux_.bf = bf;
+ *(VALUE *)&cc->flags |= VM_CALLCACHE_BF;
+}
+
+static inline bool
+vm_cc_bf_p(const struct rb_callcache *cc)
+{
+ return (cc->flags & VM_CALLCACHE_BF) != 0;
+}
+
+static inline void
vm_cc_invalidate(const struct rb_callcache *cc)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
@@ -441,11 +581,36 @@ struct rb_class_cc_entries {
};
#if VM_CHECK_MODE > 0
+
+const rb_callable_method_entry_t *rb_vm_lookup_overloaded_cme(const rb_callable_method_entry_t *cme);
+void rb_vm_dump_overloaded_cme_table(void);
+
static inline bool
vm_ccs_p(const struct rb_class_cc_entries *ccs)
{
return ccs->debug_sig == ~(VALUE)ccs;
}
+
+static inline bool
+vm_cc_check_cme(const struct rb_callcache *cc, const rb_callable_method_entry_t *cme)
+{
+ if (vm_cc_cme(cc) == cme ||
+ (cme->def->iseq_overload && vm_cc_cme(cc) == rb_vm_lookup_overloaded_cme(cme))) {
+ return true;
+ }
+ else {
+#if 1
+ // debug print
+
+ fprintf(stderr, "iseq_overload:%d\n", (int)cme->def->iseq_overload);
+ rp(cme);
+ rp(vm_cc_cme(cc));
+ rb_vm_lookup_overloaded_cme(cme);
+#endif
+ return false;
+ }
+}
+
#endif
// gc.c