summaryrefslogtreecommitdiff
path: root/vm_insnhelper.c
diff options
context:
space:
mode:
authorJemma Issroff <jemmaissroff@gmail.com>2022-09-23 13:54:42 -0400
committerAaron Patterson <aaron.patterson@gmail.com>2022-09-26 09:21:30 -0700
commit9ddfd2ca004d1952be79cf1b84c52c79a55978f4 (patch)
treefe5fa943d9a2dc7438db920a09173ab06f869993 /vm_insnhelper.c
parent2e88bca24ff4cafeb6afe5b062ff7181bc4b3a9b (diff)
This commit implements the Object Shapes technique in CRuby.
Object Shapes is used for accessing instance variables and representing the "frozenness" of objects. Object instances have a "shape" and the shape represents some attributes of the object (currently which instance variables are set and the "frozenness"). Shapes form a tree data structure, and when a new instance variable is set on an object, that object "transitions" to a new shape in the shape tree. Each shape has an ID that is used for caching. The shape structure is independent of class, so objects of different types can have the same shape. For example: ```ruby class Foo def initialize # Starts with shape id 0 @a = 1 # transitions to shape id 1 @b = 1 # transitions to shape id 2 end end class Bar def initialize # Starts with shape id 0 @a = 1 # transitions to shape id 1 @b = 1 # transitions to shape id 2 end end foo = Foo.new # `foo` has shape id 2 bar = Bar.new # `bar` has shape id 2 ``` Both `foo` and `bar` instances have the same shape because they both set instance variables of the same name in the same order. This technique can help to improve inline cache hits as well as generate more efficient machine code in JIT compilers. This commit also adds some methods for debugging shapes on objects. See `RubyVM::Shape` for more details. For more context on Object Shapes, see [Feature: #18776] Co-Authored-By: Aaron Patterson <tenderlove@ruby-lang.org> Co-Authored-By: Eileen M. Uchitelle <eileencodes@gmail.com> Co-Authored-By: John Hawthorn <john@hawthorn.email>
Notes
Notes: Merged: https://github.com/ruby/ruby/pull/6386
Diffstat (limited to 'vm_insnhelper.c')
-rw-r--r--vm_insnhelper.c490
1 files changed, 344 insertions, 146 deletions
diff --git a/vm_insnhelper.c b/vm_insnhelper.c
index a662de468d..1cd66cf1eb 100644
--- a/vm_insnhelper.c
+++ b/vm_insnhelper.c
@@ -50,6 +50,11 @@ MJIT_STATIC VALUE
ruby_vm_special_exception_copy(VALUE exc)
{
VALUE e = rb_obj_alloc(rb_class_real(RBASIC_CLASS(exc)));
+ rb_shape_t * shape = rb_shape_get_shape(exc);
+ if (rb_shape_frozen_shape_p(shape)) {
+ shape = shape->parent;
+ }
+ rb_shape_set_shape(e, shape);
rb_obj_copy_ivar(e, exc);
return e;
}
@@ -1085,35 +1090,19 @@ vm_get_cvar_base(const rb_cref_t *cref, const rb_control_frame_t *cfp, int top_l
return klass;
}
-static bool
-iv_index_tbl_lookup(struct st_table *iv_index_tbl, ID id, struct rb_iv_index_tbl_entry **ent)
-{
- int found;
- st_data_t ent_data;
-
- if (iv_index_tbl == NULL) return false;
-
- RB_VM_LOCK_ENTER();
- {
- found = st_lookup(iv_index_tbl, (st_data_t)id, &ent_data);
- }
- RB_VM_LOCK_LEAVE();
- if (found) *ent = (struct rb_iv_index_tbl_entry *)ent_data;
-
- return found ? true : false;
-}
-
-ALWAYS_INLINE(static void fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, struct rb_iv_index_tbl_entry *ent));
-
+ALWAYS_INLINE(static void fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id));
static inline void
-fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, struct rb_iv_index_tbl_entry *ent)
+fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id)
{
- // fill cache
- if (!is_attr) {
- vm_ic_entry_set(ic, ent, iseq);
+ if (is_attr) {
+ if (vm_cc_markable(cc)) {
+ vm_cc_attr_index_set(cc, index, shape_id, shape_id);
+ RB_OBJ_WRITTEN(cc, Qundef, rb_shape_get_shape_by_id(shape_id));
+ }
}
else {
- vm_cc_attr_index_set(cc, ent->index);
+ vm_ic_attr_index_set(iseq, ic, index, shape_id, shape_id);
+ RB_OBJ_WRITTEN(iseq, Qundef, rb_shape_get_shape_by_id(shape_id));
}
}
@@ -1123,68 +1112,120 @@ vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_call
{
#if OPT_IC_FOR_IVAR
VALUE val = Qundef;
+ shape_id_t shape_id;
+ VALUE * ivar_list;
if (SPECIAL_CONST_P(obj)) {
- // frozen?
+ return Qnil;
}
- else if (LIKELY(is_attr ?
- RB_DEBUG_COUNTER_INC_UNLESS(ivar_get_ic_miss_unset, vm_cc_attr_index_p(cc)) :
- RB_DEBUG_COUNTER_INC_UNLESS(ivar_get_ic_miss_serial, vm_ic_entry_p(ic) && ic->entry->class_serial == RCLASS_SERIAL(RBASIC(obj)->klass)))) {
- uint32_t index = !is_attr ? vm_ic_entry_index(ic): (vm_cc_attr_index(cc));
- RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
-
- if (LIKELY(BUILTIN_TYPE(obj) == T_OBJECT) &&
- LIKELY(index < ROBJECT_NUMIV(obj))) {
- val = ROBJECT_IVPTR(obj)[index];
+#if SHAPE_IN_BASIC_FLAGS
+ shape_id = RBASIC_SHAPE_ID(obj);
+#endif
+ switch (BUILTIN_TYPE(obj)) {
+ case T_OBJECT:
+ ivar_list = ROBJECT_IVPTR(obj);
VM_ASSERT(rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true);
- }
- else if (FL_TEST_RAW(obj, FL_EXIVAR)) {
- val = rb_ivar_generic_lookup_with_index(obj, id, index);
- }
- goto ret;
+#if !SHAPE_IN_BASIC_FLAGS
+ shape_id = ROBJECT_SHAPE_ID(obj);
+#endif
+ break;
+ case T_CLASS:
+ case T_MODULE:
+ {
+ goto general_path;
+ }
+ default:
+ if (FL_TEST_RAW(obj, FL_EXIVAR)) {
+ struct gen_ivtbl *ivtbl;
+ rb_gen_ivtbl_get(obj, id, &ivtbl);
+#if !SHAPE_IN_BASIC_FLAGS
+ shape_id = ivtbl->shape_id;
+#endif
+ ivar_list = ivtbl->ivptr;
+ } else {
+ return Qnil;
+ }
}
- else {
- struct rb_iv_index_tbl_entry *ent;
- if (BUILTIN_TYPE(obj) == T_OBJECT) {
- struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
+ shape_id_t cached_id;
- if (iv_index_tbl && iv_index_tbl_lookup(iv_index_tbl, id, &ent)) {
- fill_ivar_cache(iseq, ic, cc, is_attr, ent);
+ if (is_attr) {
+ cached_id = vm_cc_attr_shape_id(cc);
+ }
+ else {
+ cached_id = vm_ic_attr_shape_id(ic);
+ }
- // get value
- if (ent->index < ROBJECT_NUMIV(obj)) {
- val = ROBJECT_IVPTR(obj)[ent->index];
+ attr_index_t index;
- VM_ASSERT(rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true);
- }
- }
+ if (LIKELY(cached_id == shape_id)) {
+ RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
+
+ if (is_attr && vm_cc_attr_index_p(cc)) {
+ index = vm_cc_attr_index(cc);
+ }
+ else if (!is_attr && vm_ic_attr_index_p(ic)) {
+ index = vm_ic_attr_index(ic);
+ }
+ else {
+ return Qnil;
}
- else if (FL_TEST_RAW(obj, FL_EXIVAR)) {
- struct st_table *iv_index_tbl = RCLASS_IV_INDEX_TBL(rb_obj_class(obj));
- if (iv_index_tbl && iv_index_tbl_lookup(iv_index_tbl, id, &ent)) {
- fill_ivar_cache(iseq, ic, cc, is_attr, ent);
- val = rb_ivar_generic_lookup_with_index(obj, id, ent->index);
+ val = ivar_list[index];
+ VM_ASSERT(rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true);
+ }
+ else { // cache miss case
+#if RUBY_DEBUG
+ if (is_attr) {
+ if (cached_id != INVALID_SHAPE_ID) {
+ RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_set);
+ } else {
+ RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_unset);
}
}
else {
- // T_CLASS / T_MODULE
- goto general_path;
+ if (cached_id != INVALID_SHAPE_ID) {
+ RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_set);
+ } else {
+ RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_unset);
+ }
}
+#endif
- ret:
- if (LIKELY(val != Qundef)) {
- return val;
+ attr_index_t index;
+ rb_shape_t *shape = rb_shape_get_shape_by_id(shape_id);
+
+ if (rb_shape_get_iv_index(shape, id, &index)) {
+ // This fills in the cache with the shared cache object.
+ // "ent" is the shared cache object
+ fill_ivar_cache(iseq, ic, cc, is_attr, index, shape_id);
+
+ // We fetched the ivar list above
+ val = ivar_list[index];
}
else {
- return Qnil;
+ if (is_attr) {
+ if (vm_cc_markable(cc)) {
+ vm_cc_attr_index_initialize(cc, shape_id);
+ }
+ }
+ else {
+ vm_ic_attr_index_initialize(ic, shape_id);
+ }
+
+ val = Qnil;
}
+
}
- general_path:
+
+ RUBY_ASSERT(val != Qundef);
+
+ return val;
+
+general_path:
#endif /* OPT_IC_FOR_IVAR */
RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
@@ -1196,6 +1237,24 @@ vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_call
}
}
+static void
+populate_cache(attr_index_t index, rb_shape_t *shape, rb_shape_t *next_shape, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, bool is_attr)
+{
+ // Cache population code
+ if (is_attr) {
+ if (vm_cc_markable(cc)) {
+ vm_cc_attr_index_set(cc, index, SHAPE_ID(shape), SHAPE_ID(next_shape));
+ RB_OBJ_WRITTEN(cc, Qundef, (VALUE)shape);
+ RB_OBJ_WRITTEN(cc, Qundef, (VALUE)next_shape);
+ }
+ }
+ else {
+ vm_ic_attr_index_set(iseq, ic, index, SHAPE_ID(shape), SHAPE_ID(next_shape));
+ RB_OBJ_WRITTEN(iseq, Qundef, (VALUE)shape);
+ RB_OBJ_WRITTEN(iseq, Qundef, (VALUE)next_shape);
+ }
+}
+
ALWAYS_INLINE(static VALUE vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr));
NOINLINE(static VALUE vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic));
NOINLINE(static VALUE vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc));
@@ -1203,35 +1262,66 @@ NOINLINE(static VALUE vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, cons
static VALUE
vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
{
- rb_check_frozen_internal(obj);
-
#if OPT_IC_FOR_IVAR
- if (RB_TYPE_P(obj, T_OBJECT)) {
- struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
- struct rb_iv_index_tbl_entry *ent;
+ switch (BUILTIN_TYPE(obj)) {
+ case T_OBJECT:
+ {
+ rb_check_frozen_internal(obj);
+
+ attr_index_t index;
+
+ uint32_t num_iv = ROBJECT_NUMIV(obj);
+ rb_shape_t* shape = rb_shape_get_shape(obj);
+ rb_shape_t* next_shape = rb_shape_get_next(shape, obj, id);
+ if (shape != next_shape) {
+ rb_shape_set_shape(obj, next_shape);
+ }
- if (iv_index_tbl_lookup(iv_index_tbl, id, &ent)) {
- if (!is_attr) {
- vm_ic_entry_set(ic, ent, iseq);
- }
- else if (ent->index >= INT_MAX) {
- rb_raise(rb_eArgError, "too many instance variables");
- }
- else {
- vm_cc_attr_index_set(cc, (int)(ent->index));
- }
+ if (rb_shape_get_iv_index(next_shape, id, &index)) { // based off the hash stored in the transition tree
+ if (index >= MAX_IVARS) {
+ rb_raise(rb_eArgError, "too many instance variables");
+ }
- uint32_t index = ent->index;
+ populate_cache(index, shape, next_shape, id, iseq, ic, cc, is_attr);
+ }
+ else {
+ rb_bug("Didn't find instance variable %s\n", rb_id2name(id));
+ }
+
+ // Ensure the IV buffer is wide enough to store the IV
+ if (UNLIKELY(index >= num_iv)) {
+ rb_init_iv_list(obj);
+ }
- if (UNLIKELY(index >= ROBJECT_NUMIV(obj))) {
- rb_init_iv_list(obj);
+ VALUE *ptr = ROBJECT_IVPTR(obj);
+ RB_OBJ_WRITE(obj, &ptr[index], val);
+ RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_iv_hit);
+
+ return val;
}
- VALUE *ptr = ROBJECT_IVPTR(obj);
- RB_OBJ_WRITE(obj, &ptr[index], val);
- RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_iv_hit);
+ case T_CLASS:
+ case T_MODULE:
+ break;
+ default:
+ {
+ rb_shape_t * shape = rb_shape_get_shape(obj);
+ rb_ivar_set(obj, id, val);
+ rb_shape_t * next_shape = rb_shape_get_shape(obj);
+ attr_index_t index;
+
+ if (rb_shape_get_iv_index(next_shape, id, &index)) { // based off the hash stored in the transition tree
+ if (index >= MAX_IVARS) {
+ rb_raise(rb_eArgError, "too many instance variables");
+ }
- return val;
- }
+ populate_cache(index, shape, next_shape, id, iseq, ic, cc, is_attr);
+ }
+ else {
+ rb_bug("didn't find the id\n");
+ }
+
+ return val;
+ }
}
#endif
RB_DEBUG_COUNTER_INC(ivar_set_ic_miss);
@@ -1250,39 +1340,99 @@ vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache
return vm_setivar_slowpath(obj, id, val, NULL, NULL, cc, true);
}
+NOINLINE(static VALUE vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t source_shape_id, shape_id_t dest_shape_id, attr_index_t index));
+static VALUE
+vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t source_shape_id, shape_id_t dest_shape_id, attr_index_t index)
+{
+#if SHAPE_IN_BASIC_FLAGS
+ shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
+#else
+ shape_id_t shape_id = rb_generic_shape_id(obj);
+#endif
+
+ // Cache hit case
+ if (shape_id == source_shape_id) {
+ RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
+
+ struct gen_ivtbl *ivtbl = 0;
+ if (dest_shape_id != shape_id) {
+ ivtbl = rb_ensure_generic_iv_list_size(obj, index + 1);
+#if SHAPE_IN_BASIC_FLAGS
+ RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
+#else
+ ivtbl->shape_id = dest_shape_id;
+#endif
+ RB_OBJ_WRITTEN(obj, Qundef, rb_shape_get_shape_by_id(dest_shape_id));
+ }
+ else {
+ // Just get the IV table
+ RUBY_ASSERT(GET_VM()->shape_list[dest_shape_id]);
+ rb_gen_ivtbl_get(obj, 0, &ivtbl);
+ }
+
+ VALUE *ptr = ivtbl->ivptr;
+
+ RB_OBJ_WRITE(obj, &ptr[index], val);
+
+ RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
+
+ return val;
+ }
+
+ return Qundef;
+}
+
static inline VALUE
-vm_setivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
+vm_setivar(VALUE obj, ID id, VALUE val, shape_id_t source_shape_id, shape_id_t dest_shape_id, attr_index_t index)
{
#if OPT_IC_FOR_IVAR
- if (LIKELY(RB_TYPE_P(obj, T_OBJECT)) &&
- LIKELY(!RB_OBJ_FROZEN_RAW(obj))) {
+ switch (BUILTIN_TYPE(obj)) {
+ case T_OBJECT:
+ {
+ VM_ASSERT(!rb_ractor_shareable_p(obj) || rb_obj_frozen_p(obj));
+ // If object's shape id is the same as the source
+ // then do the shape transition and write the ivar
+ // If object's shape id is the same as the dest
+ // then write the ivar
+ shape_id_t shape_id = ROBJECT_SHAPE_ID(obj);
+
+ // Do we have a cache hit *and* is the CC intitialized
+ if (shape_id == source_shape_id) {
+ RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
+
+ VM_ASSERT(!rb_ractor_shareable_p(obj));
+
+ if (dest_shape_id != shape_id) {
+ if (UNLIKELY(index >= ROBJECT_NUMIV(obj))) {
+ rb_init_iv_list(obj);
+ }
+ ROBJECT_SET_SHAPE_ID(obj, dest_shape_id);
+ }
+ else {
+ RUBY_ASSERT(GET_VM()->shape_list[dest_shape_id]);
+ }
+
+ RUBY_ASSERT(index < ROBJECT_NUMIV(obj));
- VM_ASSERT(!rb_ractor_shareable_p(obj));
+ VALUE *ptr = ROBJECT_IVPTR(obj);
- if (LIKELY(
- (!is_attr && RB_DEBUG_COUNTER_INC_UNLESS(ivar_set_ic_miss_serial, vm_ic_entry_p(ic) && ic->entry->class_serial == RCLASS_SERIAL(RBASIC(obj)->klass))) ||
- ( is_attr && RB_DEBUG_COUNTER_INC_UNLESS(ivar_set_ic_miss_unset, vm_cc_attr_index_p(cc))))) {
- uint32_t index = !is_attr ? vm_ic_entry_index(ic) : vm_cc_attr_index(cc);
+ RB_OBJ_WRITE(obj, &ptr[index], val);
- if (UNLIKELY(index >= ROBJECT_NUMIV(obj))) {
- rb_init_iv_list(obj);
+ RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
+
+ return val;
+ }
}
- VALUE *ptr = ROBJECT_IVPTR(obj);
- RB_OBJ_WRITE(obj, &ptr[index], val);
- RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
- return val; /* inline cache hit */
- }
- }
- else {
+ break;
+ case T_CLASS:
+ case T_MODULE:
RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_noobject);
+ default:
+ break;
}
+
+ return Qundef;
#endif /* OPT_IC_FOR_IVAR */
- if (is_attr) {
- return vm_setivar_slowpath_attr(obj, id, val, cc);
- }
- else {
- return vm_setivar_slowpath_ivar(obj, id, val, iseq, ic);
- }
}
static VALUE
@@ -1377,7 +1527,22 @@ vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic)
static inline void
vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
{
- vm_setivar(obj, id, val, iseq, ic, 0, 0);
+ shape_id_t source_shape_id = vm_ic_attr_index_source_shape_id(ic);
+ attr_index_t index = vm_ic_attr_index(ic);
+ shape_id_t dest_shape_id = vm_ic_attr_index_dest_shape_id(ic);
+ if (UNLIKELY(vm_setivar(obj, id, val, source_shape_id, dest_shape_id, index) == Qundef)) {
+ switch (BUILTIN_TYPE(obj)) {
+ case T_OBJECT:
+ case T_CLASS:
+ case T_MODULE:
+ break;
+ default:
+ if (vm_setivar_default(obj, id, val, source_shape_id, dest_shape_id, index) != Qundef) {
+ return;
+ }
+ }
+ vm_setivar_slowpath_ivar(obj, id, val, iseq, ic);
+ }
}
void
@@ -1386,28 +1551,6 @@ rb_vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IV
vm_setinstancevariable(iseq, obj, id, val, ic);
}
-/* Set the instance variable +val+ on object +obj+ at the +index+.
- * This function only works with T_OBJECT objects, so make sure
- * +obj+ is of type T_OBJECT before using this function.
- */
-VALUE
-rb_vm_set_ivar_idx(VALUE obj, uint32_t index, VALUE val)
-{
- RUBY_ASSERT(RB_TYPE_P(obj, T_OBJECT));
-
- rb_check_frozen_internal(obj);
-
- VM_ASSERT(!rb_ractor_shareable_p(obj));
-
- if (UNLIKELY(index >= ROBJECT_NUMIV(obj))) {
- rb_init_iv_list(obj);
- }
- VALUE *ptr = ROBJECT_IVPTR(obj);
- RB_OBJ_WRITE(obj, &ptr[index], val);
-
- return val;
-}
-
static VALUE
vm_throw_continue(const rb_execution_context_t *ec, VALUE err)
{
@@ -3100,17 +3243,45 @@ vm_call_ivar(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_call
const struct rb_callcache *cc = calling->cc;
RB_DEBUG_COUNTER_INC(ccf_ivar);
cfp->sp -= 1;
- return vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, NULL, cc, TRUE);
+ VALUE ivar = vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, NULL, cc, TRUE);
+ return ivar;
}
static VALUE
-vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
+vm_call_attrset_direct(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_callcache *cc, VALUE obj)
{
- const struct rb_callcache *cc = calling->cc;
RB_DEBUG_COUNTER_INC(ccf_attrset);
VALUE val = *(cfp->sp - 1);
cfp->sp -= 2;
- return vm_setivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, val, NULL, NULL, cc, 1);
+ shape_id_t source_shape_id = vm_cc_attr_index_source_shape_id(cc);
+ attr_index_t index = vm_cc_attr_index(cc);
+ shape_id_t dest_shape_id = vm_cc_attr_index_dest_shape_id(cc);
+ ID id = vm_cc_cme(cc)->def->body.attr.id;
+ rb_check_frozen_internal(obj);
+ VALUE res = vm_setivar(obj, id, val, source_shape_id, dest_shape_id, index);
+ if (res == Qundef) {
+ switch (BUILTIN_TYPE(obj)) {
+ case T_OBJECT:
+ case T_CLASS:
+ case T_MODULE:
+ break;
+ default:
+ {
+ res = vm_setivar_default(obj, id, val, source_shape_id, dest_shape_id, index);
+ if (res != Qundef) {
+ return res;
+ }
+ }
+ }
+ res = vm_setivar_slowpath_attr(obj, id, val, cc);
+ }
+ return res;
+}
+
+static VALUE
+vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
+{
+ return vm_call_attrset_direct(ec, cfp, calling->cc, calling->recv);
}
bool
@@ -3219,7 +3390,7 @@ vm_call_alias(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_cal
{
calling->cc = &VM_CC_ON_STACK(Qundef,
vm_call_general,
- { 0 },
+ {{0}},
aliased_callable_method_entry(vm_cc_cme(calling->cc)));
return vm_call_method_each_type(ec, cfp, calling);
@@ -3389,7 +3560,7 @@ vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_
ec->method_missing_reason = reason;
calling->ci = &VM_CI_ON_STACK(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci));
- calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, { 0 },
+ calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }},
rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), idMethodMissing, NULL));
return vm_call_method(ec, reg_cfp, calling);
}
@@ -3415,7 +3586,7 @@ vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_ca
cme = refined_method_callable_without_refinement(cme);
}
- calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, { 0 }, cme);
+ calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, cme);
return vm_call_method_each_type(ec, cfp, calling);
}
@@ -3522,7 +3693,7 @@ search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struc
static VALUE
vm_call_refined(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
{
- struct rb_callcache *ref_cc = &VM_CC_ON_STACK(Qundef, vm_call_general, { 0 },
+ struct rb_callcache *ref_cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }},
search_refined_method(ec, cfp, calling));
if (vm_cc_cme(ref_cc)) {
@@ -3702,18 +3873,45 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st
CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
rb_check_arity(calling->argc, 1, 1);
- vm_cc_attr_index_initialize(cc);
+
const unsigned int aset_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG);
- VM_CALL_METHOD_ATTR(v,
- vm_call_attrset(ec, cfp, calling),
- CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
+
+ if (vm_cc_markable(cc)) {
+ vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
+ VM_CALL_METHOD_ATTR(v,
+ vm_call_attrset_direct(ec, cfp, cc, calling->recv),
+ CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
+ } else {
+ cc = &((struct rb_callcache) {
+ .flags = T_IMEMO |
+ (imemo_callcache << FL_USHIFT) |
+ VM_CALLCACHE_UNMARKABLE |
+ ((VALUE)INVALID_SHAPE_ID << SHAPE_FLAG_SHIFT) |
+ VM_CALLCACHE_ON_STACK,
+ .klass = cc->klass,
+ .cme_ = cc->cme_,
+ .call_ = cc->call_,
+ .aux_ = {
+ .attr = {
+ .index = 0,
+ .dest_shape_id = INVALID_SHAPE_ID,
+ }
+ },
+ });
+
+ VM_CALL_METHOD_ATTR(v,
+ vm_call_attrset_direct(ec, cfp, cc, calling->recv),
+ CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
+ }
return v;
case VM_METHOD_TYPE_IVAR:
CALLER_SETUP_ARG(cfp, calling, ci);
CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
rb_check_arity(calling->argc, 0, 0);
- vm_cc_attr_index_initialize(cc);
+ if (vm_cc_markable(cc)) {
+ vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
+ }
const unsigned int ivar_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT);
VM_CALL_METHOD_ATTR(v,
vm_call_ivar(ec, cfp, calling),