summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMaxime Chevalier-Boisvert <maxime.chevalierboisvert@shopify.com>2020-11-17 15:04:36 -0500
committerAlan Wu <XrXr@users.noreply.github.com>2021-10-20 18:19:26 -0400
commit4d964e25823fdbaae28cea7b0416902f7afd7cdc (patch)
treef8a5f451dbf4a6b3678500b9c0b5b2b26f777522
parente2e70f69f3af939b0ba687424b6a30043de2ea9e (diff)
Implemented setinstancevariable
-rw-r--r--ujit_compile.c78
1 files changed, 78 insertions, 0 deletions
diff --git a/ujit_compile.c b/ujit_compile.c
index 2520932ad1..ff0ce8717c 100644
--- a/ujit_compile.c
+++ b/ujit_compile.c
@@ -693,6 +693,83 @@ gen_getinstancevariable(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
}
static bool
+gen_setinstancevariable(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
+{
+ IVC ic = (IVC)ctx_get_arg(ctx, 1);
+
+ // Check that the inline cache has been set, slot index is known
+ if (!ic->entry)
+ {
+ return false;
+ }
+
+ // If the class uses the default allocator, instances should all be T_OBJECT
+ // NOTE: This assumes nobody changes the allocator of the class after allocation.
+ // Eventually, we can encode whether an object is T_OBJECT or not
+ // inside object shapes.
+ if (rb_get_alloc_func(ic->entry->class_value) != rb_class_allocate_instance)
+ {
+ return false;
+ }
+
+ uint32_t ivar_index = ic->entry->index;
+
+ // Create a size-exit to fall back to the interpreter
+ uint8_t* side_exit = ujit_side_exit(ocb, ctx, ctx->pc);
+
+ // Load self from CFP
+ mov(cb, REG0, member_opnd(REG_CFP, rb_control_frame_t, self));
+
+ // Check that the receiver is a heap object
+ test(cb, REG0, imm_opnd(RUBY_IMMEDIATE_MASK));
+ jnz_ptr(cb, side_exit);
+ cmp(cb, REG0, imm_opnd(Qfalse));
+ je_ptr(cb, side_exit);
+ cmp(cb, REG0, imm_opnd(Qnil));
+ je_ptr(cb, side_exit);
+
+ // Bail if receiver class is different from compiled time call cache class
+ x86opnd_t klass_opnd = mem_opnd(64, REG0, offsetof(struct RBasic, klass));
+ mov(cb, REG1, klass_opnd);
+ x86opnd_t serial_opnd = mem_opnd(64, REG1, offsetof(struct RClass, class_serial));
+ cmp(cb, serial_opnd, imm_opnd(ic->entry->class_serial));
+ jne_ptr(cb, side_exit);
+
+ // Bail if the ivars are not on the extended table
+ // See ROBJECT_IVPTR() from include/ruby/internal/core/robject.h
+ x86opnd_t flags_opnd = member_opnd(REG0, struct RBasic, flags);
+ test(cb, flags_opnd, imm_opnd(ROBJECT_EMBED));
+ jnz_ptr(cb, side_exit);
+
+ // If we can't guarantee that the extended table is big enoughg
+ if (ivar_index >= ROBJECT_EMBED_LEN_MAX + 1)
+ {
+ // Check that the slot is inside the extended table (num_slots > index)
+ x86opnd_t num_slots = mem_opnd(32, REG0, offsetof(struct RObject, as.heap.numiv));
+ cmp(cb, num_slots, imm_opnd(ivar_index));
+ jle_ptr(cb, side_exit);
+ }
+
+ // Get a pointer to the extended table
+ x86opnd_t tbl_opnd = mem_opnd(64, REG0, offsetof(struct RObject, as.heap.ivptr));
+ mov(cb, REG0, tbl_opnd);
+
+ // Pop the value to write from the stack
+ x86opnd_t stack_top = ctx_stack_pop(ctx, 1);
+ mov(cb, REG1, stack_top);
+
+ // Bail if this is a heap object, because this needs a write barrier
+ test(cb, REG1, imm_opnd(RUBY_IMMEDIATE_MASK));
+ jz_ptr(cb, side_exit);
+
+ // Write the ivar to the extended table
+ x86opnd_t ivar_opnd = mem_opnd(64, REG0, sizeof(VALUE) * ivar_index);
+ mov(cb, ivar_opnd, REG1);
+
+ return true;
+}
+
+static bool
gen_opt_minus(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
{
// Create a size-exit to fall back to the interpreter
@@ -1104,6 +1181,7 @@ rb_ujit_init(void)
st_insert(gen_fns, (st_data_t)BIN(getlocal_WC_0), (st_data_t)&gen_getlocal_wc0);
st_insert(gen_fns, (st_data_t)BIN(setlocal_WC_0), (st_data_t)&gen_setlocal_wc0);
st_insert(gen_fns, (st_data_t)BIN(getinstancevariable), (st_data_t)&gen_getinstancevariable);
+ st_insert(gen_fns, (st_data_t)BIN(setinstancevariable), (st_data_t)&gen_setinstancevariable);
st_insert(gen_fns, (st_data_t)BIN(opt_minus), (st_data_t)&gen_opt_minus);
st_insert(gen_fns, (st_data_t)BIN(opt_send_without_block), (st_data_t)&gen_opt_send_without_block);