summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--test/ruby/test_zjit.rb19
-rw-r--r--zjit/src/codegen.rs17
-rw-r--r--zjit/src/hir.rs122
-rw-r--r--zjit/src/hir/opt_tests.rs65
4 files changed, 174 insertions, 49 deletions
diff --git a/test/ruby/test_zjit.rb b/test/ruby/test_zjit.rb
index 2e04dbddd5..64372c231c 100644
--- a/test/ruby/test_zjit.rb
+++ b/test/ruby/test_zjit.rb
@@ -2957,6 +2957,25 @@ class TestZJIT < Test::Unit::TestCase
}
end
+ def test_struct_set
+ assert_compiles '[42, 42, :frozen_error]', %q{
+ C = Struct.new(:foo).new(1)
+
+ def test
+ C.foo = Object.new
+ 42
+ end
+
+ r = [test, test]
+ C.freeze
+ r << begin
+ test
+ rescue FrozenError
+ :frozen_error
+ end
+ }, call_threshold: 2
+ end
+
def test_global_tracepoint
assert_compiles 'true', %q{
def foo = 1
diff --git a/zjit/src/codegen.rs b/zjit/src/codegen.rs
index 7d72acfe14..fd72ea8a47 100644
--- a/zjit/src/codegen.rs
+++ b/zjit/src/codegen.rs
@@ -460,6 +460,8 @@ fn gen_insn(cb: &mut CodeBlock, jit: &mut JITState, asm: &mut Assembler, functio
Insn::LoadPC => gen_load_pc(asm),
Insn::LoadSelf => gen_load_self(),
&Insn::LoadField { recv, id, offset, return_type: _ } => gen_load_field(asm, opnd!(recv), id, offset),
+ &Insn::StoreField { recv, id, offset, val } => no_output!(gen_store_field(asm, opnd!(recv), id, offset, opnd!(val))),
+ &Insn::WriteBarrier { recv, val } => no_output!(gen_write_barrier(asm, opnd!(recv), opnd!(val), function.type_of(val))),
&Insn::IsBlockGiven => gen_is_block_given(jit, asm),
Insn::ArrayInclude { elements, target, state } => gen_array_include(jit, asm, opnds!(elements), opnd!(target), &function.frame_state(*state)),
&Insn::DupArrayInclude { ary, target, state } => gen_dup_array_include(jit, asm, ary, opnd!(target), &function.frame_state(state)),
@@ -1042,6 +1044,21 @@ fn gen_load_field(asm: &mut Assembler, recv: Opnd, id: ID, offset: i32) -> Opnd
asm.load(Opnd::mem(64, recv, offset))
}
+fn gen_store_field(asm: &mut Assembler, recv: Opnd, id: ID, offset: i32, val: Opnd) {
+ asm_comment!(asm, "Store field id={} offset={}", id.contents_lossy(), offset);
+ let recv = asm.load(recv);
+ asm.store(Opnd::mem(64, recv, offset), val);
+}
+
+fn gen_write_barrier(asm: &mut Assembler, recv: Opnd, val: Opnd, val_type: Type) {
+ // See RB_OBJ_WRITE/rb_obj_write: it's just assignment and rb_obj_written()->rb_gc_writebarrier()
+ if !val_type.is_immediate() {
+ asm_comment!(asm, "Write barrier");
+ let recv = asm.load(recv);
+ asm_ccall!(asm, rb_gc_writebarrier, recv, val);
+ }
+}
+
/// Compile an interpreter entry block to be inserted into an ISEQ
fn gen_entry_prologue(asm: &mut Assembler) {
asm_comment!(asm, "ZJIT entry trampoline");
diff --git a/zjit/src/hir.rs b/zjit/src/hir.rs
index d68ddd2479..e1a61b2399 100644
--- a/zjit/src/hir.rs
+++ b/zjit/src/hir.rs
@@ -721,6 +721,10 @@ pub enum Insn {
/// Load cfp->self
LoadSelf,
LoadField { recv: InsnId, id: ID, offset: i32, return_type: Type },
+ /// Write `val` at an offset of `recv`.
+ /// When writing a Ruby object to a Ruby object, one must use GuardNotFrozen (or equivalent) before and WriteBarrier after.
+ StoreField { recv: InsnId, id: ID, offset: i32, val: InsnId },
+ WriteBarrier { recv: InsnId, val: InsnId },
/// Get a local variable from a higher scope or the heap.
/// If `use_sp` is true, it uses the SP register to optimize the read.
@@ -908,7 +912,7 @@ impl Insn {
| Insn::PatchPoint { .. } | Insn::SetIvar { .. } | Insn::SetClassVar { .. } | Insn::ArrayExtend { .. }
| Insn::ArrayPush { .. } | Insn::SideExit { .. } | Insn::SetGlobal { .. }
| Insn::SetLocal { .. } | Insn::Throw { .. } | Insn::IncrCounter(_) | Insn::IncrCounterPtr { .. }
- | Insn::CheckInterrupts { .. } | Insn::GuardBlockParamProxy { .. } | Insn::SetInstanceVariable { .. } => false,
+ | Insn::CheckInterrupts { .. } | Insn::GuardBlockParamProxy { .. } | Insn::SetInstanceVariable { .. } | Insn::StoreField { .. } | Insn::WriteBarrier { .. } => false,
_ => true,
}
}
@@ -1241,6 +1245,8 @@ impl<'a> std::fmt::Display for InsnPrinter<'a> {
Insn::LoadPC => write!(f, "LoadPC"),
Insn::LoadSelf => write!(f, "LoadSelf"),
&Insn::LoadField { recv, id, offset, return_type: _ } => write!(f, "LoadField {recv}, :{}@{:p}", id.contents_lossy(), self.ptr_map.map_offset(offset)),
+ &Insn::StoreField { recv, id, offset, val } => write!(f, "StoreField {recv}, :{}@{:p}, {val}", id.contents_lossy(), self.ptr_map.map_offset(offset)),
+ &Insn::WriteBarrier { recv, val } => write!(f, "WriteBarrier {recv}, {val}"),
Insn::SetIvar { self_val, id, val, .. } => write!(f, "SetIvar {self_val}, :{}, {val}", id.contents_lossy()),
Insn::SetInstanceVariable { self_val, id, val, .. } => write!(f, "SetInstanceVariable {self_val}, :{}, {val}", id.contents_lossy()),
Insn::GetGlobal { id, .. } => write!(f, "GetGlobal :{}", id.contents_lossy()),
@@ -1888,6 +1894,8 @@ impl Function {
&SetGlobal { id, val, state } => SetGlobal { id, val: find!(val), state },
&GetIvar { self_val, id, state } => GetIvar { self_val: find!(self_val), id, state },
&LoadField { recv, id, offset, return_type } => LoadField { recv: find!(recv), id, offset, return_type },
+ &StoreField { recv, id, offset, val } => StoreField { recv: find!(recv), id, offset, val: find!(val) },
+ &WriteBarrier { recv, val } => WriteBarrier { recv: find!(recv), val: find!(val) },
&SetIvar { self_val, id, val, state } => SetIvar { self_val: find!(self_val), id, val: find!(val), state },
&SetInstanceVariable { self_val, id, ic, val, state } => SetInstanceVariable { self_val: find!(self_val), id, ic, val: find!(val), state },
&GetClassVar { id, ic, state } => GetClassVar { id, ic, state },
@@ -1944,8 +1952,8 @@ impl Function {
| Insn::PatchPoint { .. } | Insn::SetIvar { .. } | Insn::SetClassVar { .. } | Insn::ArrayExtend { .. }
| Insn::ArrayPush { .. } | Insn::SideExit { .. } | Insn::SetLocal { .. } | Insn::IncrCounter(_)
| Insn::CheckInterrupts { .. } | Insn::GuardBlockParamProxy { .. } | Insn::IncrCounterPtr { .. }
- | Insn::SetInstanceVariable { .. } =>
- panic!("Cannot infer type of instruction with no output: {}", self.insns[insn.0]),
+ | Insn::SetInstanceVariable { .. } | Insn::StoreField { .. } | Insn::WriteBarrier { .. } =>
+ panic!("Cannot infer type of instruction with no output: {}. See Insn::has_output().", self.insns[insn.0]),
Insn::Const { val: Const::Value(val) } => Type::from_value(*val),
Insn::Const { val: Const::CBool(val) } => Type::from_cbool(*val),
Insn::Const { val: Const::CInt8(val) } => Type::from_cint(types::CInt8, *val as i64),
@@ -2463,53 +2471,62 @@ impl Function {
self.push_insn(block, Insn::SetIvar { self_val: recv, id, val, state });
self.make_equal_to(insn_id, val);
} else if def_type == VM_METHOD_TYPE_OPTIMIZED {
- let opt_type = unsafe { get_cme_def_body_optimized_type(cme) };
- if opt_type == OPTIMIZED_METHOD_TYPE_STRUCT_AREF {
- if unsafe { vm_ci_argc(ci) } != 0 {
- self.push_insn_id(block, insn_id); continue;
- }
- let index: i32 = unsafe { get_cme_def_body_optimized_index(cme) }
- .try_into()
- .unwrap();
- // We are going to use an encoding that takes a 4-byte immediate which
- // limits the offset to INT32_MAX.
- {
- let native_index = (index as i64) * (SIZEOF_VALUE as i64);
- if native_index > (i32::MAX as i64) {
+ let opt_type: OptimizedMethodType = unsafe { get_cme_def_body_optimized_type(cme) }.into();
+ match (opt_type, args.as_slice()) {
+ (OptimizedMethodType::StructAref, &[]) | (OptimizedMethodType::StructAset, &[_]) => {
+ let index: i32 = unsafe { get_cme_def_body_optimized_index(cme) }
+ .try_into()
+ .unwrap();
+ // We are going to use an encoding that takes a 4-byte immediate which
+ // limits the offset to INT32_MAX.
+ {
+ let native_index = (index as i64) * (SIZEOF_VALUE as i64);
+ if native_index > (i32::MAX as i64) {
+ self.push_insn_id(block, insn_id); continue;
+ }
+ }
+ // Get the profiled type to check if the fields is embedded or heap allocated.
+ let Some(is_embedded) = self.profiled_type_of_at(recv, frame_state.insn_idx).map(|t| t.flags().is_struct_embedded()) else {
+ // No (monomorphic/skewed polymorphic) profile info
self.push_insn_id(block, insn_id); continue;
+ };
+ self.push_insn(block, Insn::PatchPoint { invariant: Invariant::MethodRedefined { klass, method: mid, cme }, state });
+ if klass.instance_can_have_singleton_class() {
+ self.push_insn(block, Insn::PatchPoint { invariant: Invariant::NoSingletonClass { klass }, state });
}
- }
- // Get the profiled type to check if the fields is embedded or heap allocated.
- let Some(is_embedded) = self.profiled_type_of_at(recv, frame_state.insn_idx).map(|t| t.flags().is_struct_embedded()) else {
- // No (monomorphic/skewed polymorphic) profile info
+ if let Some(profiled_type) = profiled_type {
+ recv = self.push_insn(block, Insn::GuardType { val: recv, guard_type: Type::from_profiled_type(profiled_type), state });
+ }
+ // All structs from the same Struct class should have the same
+ // length. So if our recv is embedded all runtime
+ // structs of the same class should be as well, and the same is
+ // true of the converse.
+ //
+ // No need for a GuardShape.
+ let (target, offset) = if is_embedded {
+ let offset = RUBY_OFFSET_RSTRUCT_AS_ARY + (SIZEOF_VALUE_I32 * index);
+ (recv, offset)
+ } else {
+ let as_heap = self.push_insn(block, Insn::LoadField { recv, id: ID!(_as_heap), offset: RUBY_OFFSET_RSTRUCT_AS_HEAP_PTR, return_type: types::CPtr });
+ let offset = SIZEOF_VALUE_I32 * index;
+ (as_heap, offset)
+ };
+
+ let replacement = if let (OptimizedMethodType::StructAset, &[val]) = (opt_type, args.as_slice()) {
+ self.push_insn(block, Insn::GuardNotFrozen { val: recv, state });
+ self.push_insn(block, Insn::StoreField { recv: target, id: mid, offset, val });
+ self.push_insn(block, Insn::WriteBarrier { recv, val });
+ val
+ } else { // StructAref
+ self.push_insn(block, Insn::LoadField { recv: target, id: mid, offset, return_type: types::BasicObject })
+ };
+ self.make_equal_to(insn_id, replacement);
+ },
+ _ => {
+ self.set_dynamic_send_reason(insn_id, SendWithoutBlockNotOptimizedMethodTypeOptimized(OptimizedMethodType::from(opt_type)));
self.push_insn_id(block, insn_id); continue;
- };
- self.push_insn(block, Insn::PatchPoint { invariant: Invariant::MethodRedefined { klass, method: mid, cme }, state });
- if klass.instance_can_have_singleton_class() {
- self.push_insn(block, Insn::PatchPoint { invariant: Invariant::NoSingletonClass { klass }, state });
- }
- if let Some(profiled_type) = profiled_type {
- recv = self.push_insn(block, Insn::GuardType { val: recv, guard_type: Type::from_profiled_type(profiled_type), state });
- }
- // All structs from the same Struct class should have the same
- // length. So if our recv is embedded all runtime
- // structs of the same class should be as well, and the same is
- // true of the converse.
- //
- // No need for a GuardShape.
- let replacement = if is_embedded {
- let offset = RUBY_OFFSET_RSTRUCT_AS_ARY + (SIZEOF_VALUE_I32 * index);
- self.push_insn(block, Insn::LoadField { recv, id: mid, offset, return_type: types::BasicObject })
- } else {
- let as_heap = self.push_insn(block, Insn::LoadField { recv, id: ID!(_as_heap), offset: RUBY_OFFSET_RSTRUCT_AS_HEAP_PTR, return_type: types::CPtr });
- let offset = SIZEOF_VALUE_I32 * index;
- self.push_insn(block, Insn::LoadField { recv: as_heap, id: mid, offset, return_type: types::BasicObject })
- };
- self.make_equal_to(insn_id, replacement);
- } else {
- self.set_dynamic_send_reason(insn_id, SendWithoutBlockNotOptimizedMethodTypeOptimized(OptimizedMethodType::from(opt_type)));
- self.push_insn_id(block, insn_id); continue;
- }
+ },
+ };
} else {
self.set_dynamic_send_reason(insn_id, SendWithoutBlockNotOptimizedMethodType(MethodType::from(def_type)));
self.push_insn_id(block, insn_id); continue;
@@ -3511,6 +3528,11 @@ impl Function {
&Insn::LoadField { recv, .. } => {
worklist.push_back(recv);
}
+ &Insn::StoreField { recv, val, .. }
+ | &Insn::WriteBarrier { recv, val } => {
+ worklist.push_back(recv);
+ worklist.push_back(val);
+ }
&Insn::GuardBlockParamProxy { state, .. } |
&Insn::GetGlobal { state, .. } |
&Insn::GetSpecialSymbol { state, .. } |
@@ -3851,7 +3873,8 @@ impl Function {
| Insn::GetClassVar { .. }
| Insn::GetSpecialNumber { .. }
| Insn::GetSpecialSymbol { .. }
- | Insn::GetLocal { .. } => {
+ | Insn::GetLocal { .. }
+ | Insn::StoreField { .. } => {
Ok(())
}
// Instructions with 1 Ruby object operand
@@ -3882,7 +3905,8 @@ impl Function {
Insn::SetIvar { self_val: left, val: right, .. }
| Insn::SetInstanceVariable { self_val: left, val: right, .. }
| Insn::NewRange { low: left, high: right, .. }
- | Insn::AnyToString { val: left, str: right, .. } => {
+ | Insn::AnyToString { val: left, str: right, .. }
+ | Insn::WriteBarrier { recv: left, val: right } => {
self.assert_subtype(insn_id, left, types::BasicObject)?;
self.assert_subtype(insn_id, right, types::BasicObject)
}
diff --git a/zjit/src/hir/opt_tests.rs b/zjit/src/hir/opt_tests.rs
index 2c56120bf6..be77055376 100644
--- a/zjit/src/hir/opt_tests.rs
+++ b/zjit/src/hir/opt_tests.rs
@@ -5336,6 +5336,71 @@ mod hir_opt_tests {
}
#[test]
+ fn test_inline_struct_aset_embedded() {
+ eval(r#"
+ C = Struct.new(:foo)
+ def test(o, v) = o.foo = v
+ value = Object.new
+ test C.new, value
+ test C.new, value
+ "#);
+ assert_snapshot!(hir_string("test"), @r"
+ fn test@<compiled>:3:
+ bb0():
+ EntryPoint interpreter
+ v1:BasicObject = LoadSelf
+ v2:BasicObject = GetLocal l0, SP@5
+ v3:BasicObject = GetLocal l0, SP@4
+ Jump bb2(v1, v2, v3)
+ bb1(v6:BasicObject, v7:BasicObject, v8:BasicObject):
+ EntryPoint JIT(0)
+ Jump bb2(v6, v7, v8)
+ bb2(v10:BasicObject, v11:BasicObject, v12:BasicObject):
+ PatchPoint MethodRedefined(C@0x1000, foo=@0x1008, cme:0x1010)
+ PatchPoint NoSingletonClass(C@0x1000)
+ v29:HeapObject[class_exact:C] = GuardType v11, HeapObject[class_exact:C]
+ v30:HeapObject[class_exact:C] = GuardNotFrozen v29
+ StoreField v29, :foo=@0x1038, v12
+ WriteBarrier v29, v12
+ CheckInterrupts
+ Return v12
+ ");
+ }
+
+ #[test]
+ fn test_inline_struct_aset_heap() {
+ eval(r#"
+ C = Struct.new(*(0..1000).map {|i| :"a#{i}"}, :foo)
+ def test(o, v) = o.foo = v
+ value = Object.new
+ test C.new, value
+ test C.new, value
+ "#);
+ assert_snapshot!(hir_string("test"), @r"
+ fn test@<compiled>:3:
+ bb0():
+ EntryPoint interpreter
+ v1:BasicObject = LoadSelf
+ v2:BasicObject = GetLocal l0, SP@5
+ v3:BasicObject = GetLocal l0, SP@4
+ Jump bb2(v1, v2, v3)
+ bb1(v6:BasicObject, v7:BasicObject, v8:BasicObject):
+ EntryPoint JIT(0)
+ Jump bb2(v6, v7, v8)
+ bb2(v10:BasicObject, v11:BasicObject, v12:BasicObject):
+ PatchPoint MethodRedefined(C@0x1000, foo=@0x1008, cme:0x1010)
+ PatchPoint NoSingletonClass(C@0x1000)
+ v29:HeapObject[class_exact:C] = GuardType v11, HeapObject[class_exact:C]
+ v30:CPtr = LoadField v29, :_as_heap@0x1038
+ v31:HeapObject[class_exact:C] = GuardNotFrozen v29
+ StoreField v30, :foo=@0x1039, v12
+ WriteBarrier v29, v12
+ CheckInterrupts
+ Return v12
+ ");
+ }
+
+ #[test]
fn test_array_reverse_returns_array() {
eval(r#"
def test = [].reverse