summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMaxime Chevalier-Boisvert <maxime.chevalierboisvert@shopify.com>2021-03-31 15:54:46 -0400
committerAlan Wu <XrXr@users.noreply.github.com>2021-10-20 18:19:32 -0400
commitaee44e4f2bcd795b5a81c72cb75d742103bb070b (patch)
treebded8242c24efc33da06cea490dbefc75334ff15
parente47dd8bb88a10eb20afb8dc37f19ae6ee8e56f4b (diff)
Part 1 of improved type tracking logic
-rw-r--r--yjit_codegen.c89
-rw-r--r--yjit_core.c119
-rw-r--r--yjit_core.h72
3 files changed, 171 insertions, 109 deletions
diff --git a/yjit_codegen.c b/yjit_codegen.c
index 56b82a5b17..12aa893993 100644
--- a/yjit_codegen.c
+++ b/yjit_codegen.c
@@ -369,8 +369,8 @@ static codegen_status_t
gen_dup(jitstate_t* jit, ctx_t* ctx)
{
// Get the top value and its type
+ val_type_t dup_type = ctx_get_temp_type(ctx, 0);
x86opnd_t dup_val = ctx_stack_pop(ctx, 0);
- int dup_type = ctx_get_top_type(ctx);
// Push the same value on top
x86opnd_t loc0 = ctx_stack_push(ctx, dup_type);
@@ -399,7 +399,7 @@ static codegen_status_t
gen_putnil(jitstate_t* jit, ctx_t* ctx)
{
// Write constant at SP
- x86opnd_t stack_top = ctx_stack_push(ctx, T_NIL);
+ x86opnd_t stack_top = ctx_stack_push(ctx, TYPE_NIL);
mov(cb, stack_top, imm_opnd(Qnil));
return YJIT_KEEP_COMPILING;
}
@@ -412,7 +412,7 @@ gen_putobject(jitstate_t* jit, ctx_t* ctx)
if (FIXNUM_P(arg))
{
// Keep track of the fixnum type tag
- x86opnd_t stack_top = ctx_stack_push(ctx, T_FIXNUM);
+ x86opnd_t stack_top = ctx_stack_push(ctx, TYPE_FIXNUM);
x86opnd_t imm = imm_opnd((int64_t)arg);
@@ -429,7 +429,7 @@ gen_putobject(jitstate_t* jit, ctx_t* ctx)
}
else if (arg == Qtrue || arg == Qfalse)
{
- x86opnd_t stack_top = ctx_stack_push(ctx, T_NONE);
+ x86opnd_t stack_top = ctx_stack_push(ctx, TYPE_UNKNOWN);
mov(cb, stack_top, imm_opnd((int64_t)arg));
}
else
@@ -441,7 +441,7 @@ gen_putobject(jitstate_t* jit, ctx_t* ctx)
mov(cb, RAX, mem_opnd(64, RAX, 0));
// Write argument at SP
- x86opnd_t stack_top = ctx_stack_push(ctx, T_NONE);
+ x86opnd_t stack_top = ctx_stack_push(ctx, TYPE_UNKNOWN);
mov(cb, stack_top, RAX);
}
@@ -455,7 +455,7 @@ gen_putobject_int2fix(jitstate_t* jit, ctx_t* ctx)
int cst_val = (opcode == BIN(putobject_INT2FIX_0_))? 0:1;
// Write constant at SP
- x86opnd_t stack_top = ctx_stack_push(ctx, T_FIXNUM);
+ x86opnd_t stack_top = ctx_stack_push(ctx, TYPE_FIXNUM);
mov(cb, stack_top, imm_opnd(INT2FIX(cst_val)));
return YJIT_KEEP_COMPILING;
@@ -468,7 +468,7 @@ gen_putself(jitstate_t* jit, ctx_t* ctx)
mov(cb, RAX, member_opnd(REG_CFP, rb_control_frame_t, self));
// Write it on the stack
- x86opnd_t stack_top = ctx_stack_push(ctx, T_NONE);
+ x86opnd_t stack_top = ctx_stack_push(ctx, TYPE_UNKNOWN);
mov(cb, stack_top, RAX);
return YJIT_KEEP_COMPILING;
@@ -488,7 +488,7 @@ gen_getlocal_wc0(jitstate_t* jit, ctx_t* ctx)
mov(cb, REG0, mem_opnd(64, REG0, offs));
// Write the local at SP
- x86opnd_t stack_top = ctx_stack_push(ctx, T_NONE);
+ x86opnd_t stack_top = ctx_stack_push(ctx, TYPE_UNKNOWN);
mov(cb, stack_top, REG0);
return YJIT_KEEP_COMPILING;
@@ -515,7 +515,7 @@ gen_getlocal_wc1(jitstate_t* jit, ctx_t* ctx)
mov(cb, REG0, mem_opnd(64, REG0, offs));
// Write the local at SP
- x86opnd_t stack_top = ctx_stack_push(ctx, T_NONE);
+ x86opnd_t stack_top = ctx_stack_push(ctx, TYPE_UNKNOWN);
mov(cb, stack_top, REG0);
return YJIT_KEEP_COMPILING;
@@ -564,10 +564,10 @@ gen_setlocal_wc0(jitstate_t* jit, ctx_t* ctx)
// Check that `self` is a pointer to an object on the GC heap
static void
-guard_self_is_object(codeblock_t *cb, x86opnd_t self_opnd, uint8_t *side_exit, ctx_t *ctx)
+guard_self_is_heap(codeblock_t *cb, x86opnd_t self_opnd, uint8_t *side_exit, ctx_t *ctx)
{
// `self` is constant throughout the entire region, so we only need to do this check once.
- if (!ctx->self_is_object) {
+ if (!ctx->self_type.is_heap) {
test(cb, self_opnd, imm_opnd(RUBY_IMMEDIATE_MASK));
jnz_ptr(cb, side_exit);
cmp(cb, self_opnd, imm_opnd(Qfalse));
@@ -580,11 +580,10 @@ guard_self_is_object(codeblock_t *cb, x86opnd_t self_opnd, uint8_t *side_exit, c
// cmp(cb, self_opnd, imm_opnd(Qnil));
// jbe(cb, side_exit);
- ctx->self_is_object = true;
+ ctx->self_type.is_heap = 1;
}
}
-
// Generate a stubbed unconditional jump to the next bytecode instruction.
// Blocks that are part of a guard chain can use this to share the same successor.
static void
@@ -726,7 +725,7 @@ gen_getinstancevariable(jitstate_t* jit, ctx_t* ctx)
// Load self from CFP
mov(cb, REG0, member_opnd(REG_CFP, rb_control_frame_t, self));
- guard_self_is_object(cb, REG0, COUNTED_EXIT(side_exit, getivar_se_self_not_heap), ctx);
+ guard_self_is_heap(cb, REG0, COUNTED_EXIT(side_exit, getivar_se_self_not_heap), ctx);
// Guard that self has a known class
x86opnd_t klass_opnd = mem_opnd(64, REG0, offsetof(struct RBasic, klass));
@@ -754,7 +753,7 @@ gen_getinstancevariable(jitstate_t* jit, ctx_t* ctx)
je_ptr(cb, COUNTED_EXIT(side_exit, getivar_undef));
// Push the ivar on the stack
- x86opnd_t out_opnd = ctx_stack_push(ctx, T_NONE);
+ x86opnd_t out_opnd = ctx_stack_push(ctx, TYPE_UNKNOWN);
mov(cb, out_opnd, REG1);
}
else {
@@ -787,7 +786,7 @@ gen_getinstancevariable(jitstate_t* jit, ctx_t* ctx)
je_ptr(cb, COUNTED_EXIT(side_exit, getivar_undef));
// Push the ivar on the stack
- x86opnd_t out_opnd = ctx_stack_push(ctx, T_NONE);
+ x86opnd_t out_opnd = ctx_stack_push(ctx, TYPE_UNKNOWN);
mov(cb, out_opnd, REG0);
}
@@ -825,7 +824,7 @@ gen_setinstancevariable(jitstate_t* jit, ctx_t* ctx)
// Load self from CFP
mov(cb, REG0, member_opnd(REG_CFP, rb_control_frame_t, self));
- guard_self_is_object(cb, REG0, side_exit, ctx);
+ guard_self_is_heap(cb, REG0, side_exit, ctx);
// Bail if receiver class is different from compiled time call cache class
x86opnd_t klass_opnd = mem_opnd(64, REG0, offsetof(struct RBasic, klass));
@@ -882,17 +881,17 @@ gen_fixnum_cmp(jitstate_t* jit, ctx_t* ctx, cmov_fn cmov_op)
}
// Get the operands and destination from the stack
- int arg1_type = ctx_get_top_type(ctx);
+ val_type_t arg1_type = ctx_get_temp_type(ctx, 0);
x86opnd_t arg1 = ctx_stack_pop(ctx, 1);
- int arg0_type = ctx_get_top_type(ctx);
+ val_type_t arg0_type = ctx_get_temp_type(ctx, 0);
x86opnd_t arg0 = ctx_stack_pop(ctx, 1);
// If not fixnums, fall back
- if (arg0_type != T_FIXNUM) {
+ if (arg0_type.type != ETYPE_FIXNUM) {
test(cb, arg0, imm_opnd(RUBY_FIXNUM_FLAG));
jz_ptr(cb, side_exit);
}
- if (arg1_type != T_FIXNUM) {
+ if (arg1_type.type != ETYPE_FIXNUM) {
test(cb, arg1, imm_opnd(RUBY_FIXNUM_FLAG));
jz_ptr(cb, side_exit);
}
@@ -905,7 +904,7 @@ gen_fixnum_cmp(jitstate_t* jit, ctx_t* ctx, cmov_fn cmov_op)
cmov_op(cb, REG0, REG1);
// Push the output on the stack
- x86opnd_t dst = ctx_stack_push(ctx, T_NONE);
+ x86opnd_t dst = ctx_stack_push(ctx, TYPE_UNKNOWN);
mov(cb, dst, REG0);
return YJIT_KEEP_COMPILING;
@@ -1007,7 +1006,7 @@ gen_opt_aref(jitstate_t *jit, ctx_t *ctx)
yjit_load_regs(cb);
// Push the return value onto the stack
- x86opnd_t stack_ret = ctx_stack_push(ctx, T_NONE);
+ x86opnd_t stack_ret = ctx_stack_push(ctx, TYPE_UNKNOWN);
mov(cb, stack_ret, RAX);
}
@@ -1068,7 +1067,7 @@ gen_opt_aref(jitstate_t *jit, ctx_t *ctx)
yjit_load_regs(cb);
// Push the return value onto the stack
- x86opnd_t stack_ret = ctx_stack_push(ctx, T_NONE);
+ x86opnd_t stack_ret = ctx_stack_push(ctx, TYPE_UNKNOWN);
mov(cb, stack_ret, RAX);
}
@@ -1092,17 +1091,17 @@ gen_opt_and(jitstate_t* jit, ctx_t* ctx)
}
// Get the operands and destination from the stack
- int arg1_type = ctx_get_top_type(ctx);
+ val_type_t arg1_type = ctx_get_temp_type(ctx, 0);
x86opnd_t arg1 = ctx_stack_pop(ctx, 1);
- int arg0_type = ctx_get_top_type(ctx);
+ val_type_t arg0_type = ctx_get_temp_type(ctx, 0);
x86opnd_t arg0 = ctx_stack_pop(ctx, 1);
// If not fixnums, fall back
- if (arg0_type != T_FIXNUM) {
+ if (arg0_type.type != ETYPE_FIXNUM) {
test(cb, arg0, imm_opnd(RUBY_FIXNUM_FLAG));
jz_ptr(cb, side_exit);
}
- if (arg1_type != T_FIXNUM) {
+ if (arg1_type.type != ETYPE_FIXNUM) {
test(cb, arg1, imm_opnd(RUBY_FIXNUM_FLAG));
jz_ptr(cb, side_exit);
}
@@ -1112,7 +1111,7 @@ gen_opt_and(jitstate_t* jit, ctx_t* ctx)
and(cb, REG0, arg1);
// Push the output on the stack
- x86opnd_t dst = ctx_stack_push(ctx, T_FIXNUM);
+ x86opnd_t dst = ctx_stack_push(ctx, TYPE_FIXNUM);
mov(cb, dst, REG0);
return YJIT_KEEP_COMPILING;
@@ -1130,14 +1129,20 @@ gen_opt_minus(jitstate_t* jit, ctx_t* ctx)
}
// Get the operands and destination from the stack
+ val_type_t arg1_type = ctx_get_temp_type(ctx, 0);
x86opnd_t arg1 = ctx_stack_pop(ctx, 1);
+ val_type_t arg0_type = ctx_get_temp_type(ctx, 0);
x86opnd_t arg0 = ctx_stack_pop(ctx, 1);
// If not fixnums, fall back
- test(cb, arg0, imm_opnd(RUBY_FIXNUM_FLAG));
- jz_ptr(cb, side_exit);
- test(cb, arg1, imm_opnd(RUBY_FIXNUM_FLAG));
- jz_ptr(cb, side_exit);
+ if (arg0_type.type != ETYPE_FIXNUM) {
+ test(cb, arg0, imm_opnd(RUBY_FIXNUM_FLAG));
+ jz_ptr(cb, side_exit);
+ }
+ if (arg1_type.type != ETYPE_FIXNUM) {
+ test(cb, arg1, imm_opnd(RUBY_FIXNUM_FLAG));
+ jz_ptr(cb, side_exit);
+ }
// Subtract arg0 - arg1 and test for overflow
mov(cb, REG0, arg0);
@@ -1146,7 +1151,7 @@ gen_opt_minus(jitstate_t* jit, ctx_t* ctx)
add(cb, REG0, imm_opnd(1));
// Push the output on the stack
- x86opnd_t dst = ctx_stack_push(ctx, T_FIXNUM);
+ x86opnd_t dst = ctx_stack_push(ctx, TYPE_FIXNUM);
mov(cb, dst, REG0);
return YJIT_KEEP_COMPILING;
@@ -1164,17 +1169,17 @@ gen_opt_plus(jitstate_t* jit, ctx_t* ctx)
}
// Get the operands and destination from the stack
- int arg1_type = ctx_get_top_type(ctx);
+ val_type_t arg1_type = ctx_get_temp_type(ctx, 0);
x86opnd_t arg1 = ctx_stack_pop(ctx, 1);
- int arg0_type = ctx_get_top_type(ctx);
+ val_type_t arg0_type = ctx_get_temp_type(ctx, 0);
x86opnd_t arg0 = ctx_stack_pop(ctx, 1);
// If not fixnums, fall back
- if (arg0_type != T_FIXNUM) {
+ if (arg0_type.type != ETYPE_FIXNUM) {
test(cb, arg0, imm_opnd(RUBY_FIXNUM_FLAG));
jz_ptr(cb, side_exit);
}
- if (arg1_type != T_FIXNUM) {
+ if (arg1_type.type != ETYPE_FIXNUM) {
test(cb, arg1, imm_opnd(RUBY_FIXNUM_FLAG));
jz_ptr(cb, side_exit);
}
@@ -1186,7 +1191,7 @@ gen_opt_plus(jitstate_t* jit, ctx_t* ctx)
jo_ptr(cb, side_exit);
// Push the output on the stack
- x86opnd_t dst = ctx_stack_push(ctx, T_FIXNUM);
+ x86opnd_t dst = ctx_stack_push(ctx, TYPE_FIXNUM);
mov(cb, dst, REG0);
return YJIT_KEEP_COMPILING;
@@ -1529,7 +1534,7 @@ gen_oswb_cfunc(jitstate_t *jit, ctx_t *ctx, const struct rb_callinfo *ci, const
yjit_load_regs(cb);
// Push the return value on the Ruby stack
- x86opnd_t stack_ret = ctx_stack_push(ctx, T_NONE);
+ x86opnd_t stack_ret = ctx_stack_push(ctx, TYPE_UNKNOWN);
mov(cb, stack_ret, RAX);
// If this function needs a Ruby stack frame
@@ -1677,7 +1682,7 @@ gen_oswb_iseq(jitstate_t *jit, ctx_t *ctx, const struct rb_callinfo *ci, const r
// After the return, the JIT and interpreter SP will match up
ctx_t return_ctx = *ctx;
ctx_stack_pop(&return_ctx, argc + 1);
- ctx_stack_push(&return_ctx, T_NONE);
+ ctx_stack_push(&return_ctx, TYPE_UNKNOWN);
return_ctx.sp_offset = 0;
return_ctx.chain_depth = 0;
@@ -1912,7 +1917,7 @@ gen_opt_getinlinecache(jitstate_t *jit, ctx_t *ctx)
// FIXME: This leaks when st_insert raises NoMemoryError
if (!assume_stable_global_constant_state(jit->block)) return YJIT_CANT_COMPILE;
- x86opnd_t stack_top = ctx_stack_push(ctx, T_NONE);
+ x86opnd_t stack_top = ctx_stack_push(ctx, TYPE_UNKNOWN);
jit_mov_gc_ptr(jit, cb, REG0, ice->value);
mov(cb, stack_top, REG0);
diff --git a/yjit_core.c b/yjit_core.c
index 4d710b7499..23bddfb573 100644
--- a/yjit_core.c
+++ b/yjit_core.c
@@ -34,12 +34,33 @@ Push one new value on the temp stack
Return a pointer to the new stack top
*/
x86opnd_t
-ctx_stack_push(ctx_t* ctx, int type)
+ctx_stack_push(ctx_t* ctx, val_type_t type)
{
// Keep track of the type of the value
- RUBY_ASSERT(type <= RUBY_T_MASK);
- if (ctx->stack_size < MAX_TEMP_TYPES)
+ if (ctx->stack_size < MAX_TEMP_TYPES) {
+ ctx->temp_mapping[ctx->stack_size] = MAP_STACK;
ctx->temp_types[ctx->stack_size] = type;
+ }
+
+ ctx->stack_size += 1;
+ ctx->sp_offset += 1;
+
+ // SP points just above the topmost value
+ int32_t offset = (ctx->sp_offset - 1) * sizeof(VALUE);
+ return mem_opnd(64, REG_SP, offset);
+}
+
+/*
+Push the self value on the stack
+*/
+x86opnd_t
+ctx_stack_push_self(ctx_t* ctx)
+{
+ // Keep track of the type of the value
+ if (ctx->stack_size < MAX_TEMP_TYPES) {
+ ctx->temp_mapping[ctx->stack_size] = MAP_SELF;
+ ctx->temp_types[ctx->stack_size] = ctx->self_type;
+ }
ctx->stack_size += 1;
ctx->sp_offset += 1;
@@ -66,8 +87,10 @@ ctx_stack_pop(ctx_t* ctx, size_t n)
for (size_t i = 0; i < n; ++i)
{
size_t idx = ctx->stack_size - i - 1;
- if (idx < MAX_TEMP_TYPES)
- ctx->temp_types[idx] = T_NONE;
+ if (idx < MAX_TEMP_TYPES) {
+ ctx->temp_types[idx] = TYPE_UNKNOWN;
+ ctx->temp_mapping[idx] = MAP_STACK;
+ }
}
ctx->stack_size -= n;
@@ -90,18 +113,58 @@ ctx_stack_opnd(ctx_t* ctx, int32_t idx)
}
/**
-Get the type of the topmost value on the temp stack
+Get the type of a value on the temp stack
Returns T_NONE if unknown
*/
-int
-ctx_get_top_type(ctx_t* ctx)
+val_type_t
+ctx_get_temp_type(const ctx_t* ctx, size_t idx)
{
- RUBY_ASSERT(ctx->stack_size > 0);
+ RUBY_ASSERT(idx < ctx->stack_size);
if (ctx->stack_size > MAX_TEMP_TYPES)
- return T_NONE;
+ return TYPE_UNKNOWN;
+
+ temp_mapping_t mapping = ctx->temp_mapping[ctx->stack_size - 1 - idx];
+
+ if (mapping.kind == TEMP_SELF)
+ return ctx->self_type;
+ else if (mapping.kind == TEMP_STACK)
+ return ctx->temp_types[ctx->stack_size - 1 - idx];
+
+ RUBY_ASSERT(false);
+ return TYPE_UNKNOWN;
+}
+
+/*
+Compute a difference between two value types
+Returns 0 if the two are the same
+Returns > 0 if different but compatible
+Returns INT_MAX if incompatible
+*/
+int type_diff(val_type_t src, val_type_t dst)
+{
+ RUBY_ASSERT(!src.is_heap || !src.is_imm);
+ RUBY_ASSERT(!dst.is_heap || !dst.is_imm);
+
+ if (src.type != dst.type && dst.type != ETYPE_UNKNOWN)
+ return INT_MAX;
+
+ if (src.is_heap && !dst.is_heap)
+ return INT_MAX;
+
+ if (src.is_imm && !dst.is_imm)
+ return INT_MAX;
+
+ if (src.is_heap != dst.is_heap)
+ return 1;
+
+ if (src.is_imm != dst.is_imm)
+ return 1;
- return ctx->temp_types[ctx->stack_size - 1];
+ if (src.type != dst.type)
+ return 1;
+
+ return 0;
}
/**
@@ -127,26 +190,30 @@ int ctx_diff(const ctx_t* src, const ctx_t* dst)
if (dst->sp_offset != src->sp_offset)
return INT_MAX;
- if (dst->self_is_object != src->self_is_object)
- return INT_MAX;
-
// Difference sum
int diff = 0;
- // For each temporary variable
- for (size_t i = 0; i < MAX_TEMP_TYPES; ++i)
+ // Check the type of self
+ int self_diff = type_diff(src->self_type, dst->self_type);
+
+ if (self_diff == INT_MAX)
+ return INT_MAX;
+
+ diff += self_diff;
+
+ // TODO: when we track local types, need to check them too
+
+ // For each value on the temp stack
+ for (size_t i = 0; i < src->stack_size; ++i)
{
- int t_src = src->temp_types[i];
- int t_dst = dst->temp_types[i];
+ val_type_t t_src = ctx_get_temp_type(src, i);
+ val_type_t t_dst = ctx_get_temp_type(dst, i);
+ int temp_diff = type_diff(t_src, t_dst);
- if (t_dst != t_src)
- {
- // It's OK to lose some type information
- if (t_dst == T_NONE)
- diff += 1;
- else
- return INT_MAX;
- }
+ if (temp_diff == INT_MAX)
+ return INT_MAX;
+
+ diff += temp_diff;
}
return diff;
diff --git a/yjit_core.h b/yjit_core.h
index 020719dd43..6001838c50 100644
--- a/yjit_core.h
+++ b/yjit_core.h
@@ -26,10 +26,21 @@
// Default versioning context (no type information)
#define DEFAULT_CTX ( (ctx_t){ 0 } )
+typedef enum yjit_type_enum
+{
+ ETYPE_UNKNOWN = 0,
+ ETYPE_NIL,
+ ETYPE_FIXNUM,
+ //ETYPE_ARRAY
+ //ETYPE_SYMBOL
+ //ETYPE_STRING
+
+} type_enum_t;
+
/**
Represent the type of a value (local/stack/self) in YJIT
*/
-typedef struct yjit_val_type
+typedef struct yjit_type_struct
{
// Value is definitely a heap object
uint8_t is_heap : 1;
@@ -37,17 +48,8 @@ typedef struct yjit_val_type
// Value is definitely an immediate
uint8_t is_imm : 1;
- // Not Qfalse or Qnil
- // Is this useful?
- //uint8_t is_truthy: 1;
-
- // NOTE: we could switch to using an enum to track multiple types
- // but then we also need a value for "unknown type"
- uint8_t is_fixnum : 1;
- //uint8_t is_array : 1; // for opt_aref
- //uint8_t is_hash : 1; // for opt_aref
- //uint8_t is_symbol : 1;
- //uint8_t is_string : 1;
+ // Specific value type, if known
+ uint8_t type : 3;
} val_type_t;
STATIC_ASSERT(val_type_size, sizeof(val_type_t) == 1);
@@ -61,15 +63,16 @@ STATIC_ASSERT(val_type_size, sizeof(val_type_t) == 1);
// Could be any immediate
#define TYPE_IMM ( (val_type_t){ .is_imm = 1 } )
-// Immediate integer
-#define TYPE_FIXNUM ( (val_type_t){ .is_imm = 1, .is_fixnum = 1 } )
+// Immediate types
+#define TYPE_NIL ( (val_type_t){ .is_imm = 1, .type = ETYPE_NIL } )
+#define TYPE_FIXNUM ( (val_type_t){ .is_imm = 1, .type = ETYPE_FIXNUM } )
typedef enum yjit_temp_loc
{
TEMP_STACK = 0,
TEMP_SELF,
//TEMP_LOCAL, // Local with index
- //TEMP_CONST, // Small constant
+ //TEMP_CONST, // Small constant (0, 1, 2, Qnil, Qfalse, Qtrue)
} temp_loc_t;
@@ -91,26 +94,12 @@ STATIC_ASSERT(temp_mapping_size, sizeof(temp_mapping_t) == 1);
// Temp value is actually self
#define MAP_SELF ( (temp_mapping_t) { .kind = TEMP_SELF } )
-
-
-
-
-
-
/**
Code generation context
Contains information we can use to optimize code
*/
typedef struct yjit_context
{
- // Depth of this block in the sidechain (eg: inline-cache chain)
- uint8_t chain_depth;
-
- // Temporary variable types we keep track of
- // Values are `ruby_value_type`
- // T_NONE==0 is the unknown type
- uint8_t temp_types[MAX_TEMP_TYPES];
-
// Number of values currently on the temporary stack
uint16_t stack_size;
@@ -118,22 +107,23 @@ typedef struct yjit_context
// This represents how far the JIT's SP is from the "real" SP
int16_t sp_offset;
+ // Depth of this block in the sidechain (eg: inline-cache chain)
+ uint8_t chain_depth;
+ // Local variable types we keepp track of
+ val_type_t local_types[MAX_LOCAL_TYPES];
+ // Temporary variable types we keep track of
+ val_type_t temp_types[MAX_TEMP_TYPES];
+ // Type we track for self
+ val_type_t self_type;
-
- // FIXME: no longer need this bit after type mapping refactoring
- // Whether we know self is a heap object
- bool self_is_object : 1;
-
-
-
-
-
-
+ // Mapping of temp stack entries to types we track
+ temp_mapping_t temp_mapping[MAX_TEMP_TYPES];
} ctx_t;
+STATIC_ASSERT(yjit_ctx_size, sizeof(ctx_t) <= 32);
// Tuple of (iseq, idx) used to idenfity basic blocks
typedef struct BlockId
@@ -224,10 +214,10 @@ typedef struct yjit_block_version
// Context object methods
x86opnd_t ctx_sp_opnd(ctx_t* ctx, int32_t offset_bytes);
-x86opnd_t ctx_stack_push(ctx_t* ctx, int type);
+x86opnd_t ctx_stack_push(ctx_t* ctx, val_type_t type);
x86opnd_t ctx_stack_pop(ctx_t* ctx, size_t n);
x86opnd_t ctx_stack_opnd(ctx_t* ctx, int32_t idx);
-int ctx_get_top_type(ctx_t* ctx);
+val_type_t ctx_get_temp_type(const ctx_t* ctx, size_t idx);
int ctx_diff(const ctx_t* src, const ctx_t* dst);
block_t* find_block_version(blockid_t blockid, const ctx_t* ctx);