summaryrefslogtreecommitdiff
path: root/yjit/src/codegen.rs
diff options
context:
space:
mode:
Diffstat (limited to 'yjit/src/codegen.rs')
-rw-r--r--yjit/src/codegen.rs355
1 files changed, 297 insertions, 58 deletions
diff --git a/yjit/src/codegen.rs b/yjit/src/codegen.rs
index b047aa3310..961d6438e3 100644
--- a/yjit/src/codegen.rs
+++ b/yjit/src/codegen.rs
@@ -30,7 +30,6 @@ pub use crate::virtualmem::CodePtr;
/// Status returned by code generation functions
#[derive(PartialEq, Debug)]
enum CodegenStatus {
- SkipNextInsn,
KeepCompiling,
EndBlock,
}
@@ -197,6 +196,13 @@ impl JITState {
self.insn_idx + insn_len(self.get_opcode()) as u16
}
+ /// Get the index of the next instruction of the next instruction
+ fn next_next_insn_idx(&self) -> u16 {
+ let next_pc = unsafe { rb_iseq_pc_at_idx(self.iseq, self.next_insn_idx().into()) };
+ let next_opcode: usize = unsafe { rb_iseq_opcode_at_pc(self.iseq, next_pc) }.try_into().unwrap();
+ self.next_insn_idx() + insn_len(next_opcode) as u16
+ }
+
// Check if we are compiling the instruction at the stub PC
// Meaning we are compiling the instruction that is next to execute
pub fn at_current_insn(&self) -> bool {
@@ -250,6 +256,33 @@ impl JITState {
}
}
+ pub fn assume_expected_cfunc(
+ &mut self,
+ asm: &mut Assembler,
+ ocb: &mut OutlinedCb,
+ class: VALUE,
+ method: ID,
+ cfunc: *mut c_void,
+ ) -> bool {
+ let cme = unsafe { rb_callable_method_entry(class, method) };
+
+ if cme.is_null() {
+ return false;
+ }
+
+ let def_type = unsafe { get_cme_def_type(cme) };
+ if def_type != VM_METHOD_TYPE_CFUNC {
+ return false;
+ }
+ if unsafe { get_mct_func(get_cme_def_body_cfunc(cme)) } != cfunc {
+ return false;
+ }
+
+ self.assume_method_lookup_stable(asm, ocb, cme);
+
+ true
+ }
+
pub fn assume_method_lookup_stable(&mut self, asm: &mut Assembler, ocb: &mut OutlinedCb, cme: CmePtr) -> Option<()> {
jit_ensure_block_entry_exit(self, asm, ocb)?;
self.method_lookup_assumptions.push(cme);
@@ -567,14 +600,36 @@ fn verify_ctx(jit: &JITState, ctx: &Context) {
unsafe { CStr::from_ptr(rb_obj_info(val)).to_str().unwrap() }
}
+ // Some types such as CString only assert the class field of the object
+ // when there has never been a singleton class created for objects of that class.
+ // Once there is a singleton class created they become their weaker
+ // `T*` variant, and we more objects should pass the verification.
+ fn relax_type_with_singleton_class_assumption(ty: Type) -> Type {
+ if let Type::CString | Type::CArray | Type::CHash = ty {
+ if has_singleton_class_of(ty.known_class().unwrap()) {
+ match ty {
+ Type::CString => return Type::TString,
+ Type::CArray => return Type::TArray,
+ Type::CHash => return Type::THash,
+ _ => (),
+ }
+ }
+ }
+
+ ty
+ }
+
// Only able to check types when at current insn
assert!(jit.at_current_insn());
let self_val = jit.peek_at_self();
let self_val_type = Type::from(self_val);
+ let learned_self_type = ctx.get_opnd_type(SelfOpnd);
+ let learned_self_type = relax_type_with_singleton_class_assumption(learned_self_type);
+
// Verify self operand type
- if self_val_type.diff(ctx.get_opnd_type(SelfOpnd)) == TypeDiff::Incompatible {
+ if self_val_type.diff(learned_self_type) == TypeDiff::Incompatible {
panic!(
"verify_ctx: ctx self type ({:?}) incompatible with actual value of self {}",
ctx.get_opnd_type(SelfOpnd),
@@ -587,6 +642,7 @@ fn verify_ctx(jit: &JITState, ctx: &Context) {
for i in 0..top_idx {
let learned_mapping = ctx.get_opnd_mapping(StackOpnd(i));
let learned_type = ctx.get_opnd_type(StackOpnd(i));
+ let learned_type = relax_type_with_singleton_class_assumption(learned_type);
let stack_val = jit.peek_at_stack(ctx, i as isize);
let val_type = Type::from(stack_val);
@@ -632,6 +688,7 @@ fn verify_ctx(jit: &JITState, ctx: &Context) {
let top_idx: usize = cmp::min(local_table_size as usize, MAX_TEMP_TYPES);
for i in 0..top_idx {
let learned_type = ctx.get_local_type(i);
+ let learned_type = relax_type_with_singleton_class_assumption(learned_type);
let local_val = jit.peek_at_local(i as i32);
let local_type = Type::from(local_val);
@@ -1047,7 +1104,16 @@ fn jump_to_next_insn(
jit: &mut JITState,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
-) -> Option<()> {
+) -> Option<CodegenStatus> {
+ end_block_with_jump(jit, asm, ocb, jit.next_insn_idx())
+}
+
+fn end_block_with_jump(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ ocb: &mut OutlinedCb,
+ continuation_insn_idx: u16,
+) -> Option<CodegenStatus> {
// Reset the depth since in current usages we only ever jump to
// chain_depth > 0 from the same instruction.
let mut reset_depth = asm.ctx;
@@ -1055,20 +1121,20 @@ fn jump_to_next_insn(
let jump_block = BlockId {
iseq: jit.iseq,
- idx: jit.next_insn_idx(),
+ idx: continuation_insn_idx,
};
// We are at the end of the current instruction. Record the boundary.
if jit.record_boundary_patch_point {
jit.record_boundary_patch_point = false;
- let exit_pc = unsafe { jit.pc.offset(insn_len(jit.opcode).try_into().unwrap()) };
+ let exit_pc = unsafe { rb_iseq_pc_at_idx(jit.iseq, continuation_insn_idx.into())};
let exit_pos = gen_outlined_exit(exit_pc, &reset_depth, ocb);
record_global_inval_patch(asm, exit_pos?);
}
// Generate the jump instruction
gen_direct_jump(jit, &reset_depth, jump_block, asm);
- Some(())
+ Some(EndBlock)
}
// Compile a sequence of bytecode instructions for a given basic block version.
@@ -1232,13 +1298,6 @@ pub fn gen_single_block(
// Move to the next instruction to compile
insn_idx += insn_len(opcode) as u16;
- // Move past next instruction when instructed
- if status == Some(SkipNextInsn) {
- let next_pc = unsafe { rb_iseq_pc_at_idx(iseq, insn_idx.into()) };
- let next_opcode: usize = unsafe { rb_iseq_opcode_at_pc(iseq, next_pc) }.try_into().unwrap();
- insn_idx += insn_len(next_opcode) as u16;
- }
-
// If the instruction terminates this block
if status == Some(EndBlock) {
break;
@@ -1468,7 +1527,7 @@ fn fuse_putobject_opt_ltlt(
asm.stack_pop(1);
fixnum_left_shift_body(asm, lhs, shift_amt as u64);
- return Some(SkipNextInsn);
+ return end_block_with_jump(jit, asm, ocb, jit.next_next_insn_idx());
}
return None;
}
@@ -2911,7 +2970,7 @@ fn gen_set_ivar(
// The current shape doesn't contain this iv, we need to transition to another shape.
let new_shape = if !shape_too_complex && receiver_t_object && ivar_index.is_none() {
let current_shape = comptime_receiver.shape_of();
- let next_shape = unsafe { rb_shape_get_next(current_shape, comptime_receiver, ivar_name) };
+ let next_shape = unsafe { rb_shape_get_next_no_warnings(current_shape, comptime_receiver, ivar_name) };
let next_shape_id = unsafe { rb_shape_id(next_shape) };
// If the VM ran out of shapes, or this class generated too many leaf,
@@ -4184,11 +4243,50 @@ fn gen_opt_newarray_send(
gen_opt_newarray_max(jit, asm, _ocb)
} else if method == ID!(hash) {
gen_opt_newarray_hash(jit, asm, _ocb)
+ } else if method == ID!(pack) {
+ gen_opt_newarray_pack(jit, asm, _ocb)
} else {
None
}
}
+fn gen_opt_newarray_pack(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ _ocb: &mut OutlinedCb,
+) -> Option<CodegenStatus> {
+ // num == 4 ( for this code )
+ let num = jit.get_arg(0).as_u32();
+
+ // Save the PC and SP because we may call #pack
+ jit_prepare_non_leaf_call(jit, asm);
+
+ extern "C" {
+ fn rb_vm_opt_newarray_pack(ec: EcPtr, num: u32, elts: *const VALUE, fmt: VALUE) -> VALUE;
+ }
+
+ let values_opnd = asm.ctx.sp_opnd(-(num as i32));
+ let values_ptr = asm.lea(values_opnd);
+
+ let fmt_string = asm.ctx.sp_opnd(-1);
+
+ let val_opnd = asm.ccall(
+ rb_vm_opt_newarray_pack as *const u8,
+ vec![
+ EC,
+ (num - 1).into(),
+ values_ptr,
+ fmt_string
+ ],
+ );
+
+ asm.stack_pop(num.as_usize());
+ let stack_ret = asm.stack_push(Type::CString);
+ asm.mov(stack_ret, val_opnd);
+
+ Some(KeepCompiling)
+}
+
fn gen_opt_newarray_hash(
jit: &mut JITState,
asm: &mut Assembler,
@@ -5521,7 +5619,7 @@ fn jit_rb_str_uplus(
let recv_opnd = asm.stack_pop(1);
let recv_opnd = asm.load(recv_opnd);
let flags_opnd = asm.load(Opnd::mem(64, recv_opnd, RUBY_OFFSET_RBASIC_FLAGS));
- asm.test(flags_opnd, Opnd::Imm(RUBY_FL_FREEZE as i64));
+ asm.test(flags_opnd, Opnd::Imm(RUBY_FL_FREEZE as i64 | RSTRING_CHILLED as i64));
let ret_label = asm.new_label("stack_ret");
@@ -5691,7 +5789,7 @@ fn jit_rb_str_getbyte(
RUBY_OFFSET_RSTRING_LEN as i32,
);
- // Exit if the indes is out of bounds
+ // Exit if the index is out of bounds
asm.cmp(idx, str_len_opnd);
asm.jge(Target::side_exit(Counter::getbyte_idx_out_of_bounds));
@@ -6110,6 +6208,64 @@ fn gen_block_given(
asm.mov(out_opnd, block_given);
}
+// Codegen for rb_class_superclass()
+fn jit_rb_class_superclass(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ _ocb: &mut OutlinedCb,
+ _ci: *const rb_callinfo,
+ cme: *const rb_callable_method_entry_t,
+ _block: Option<crate::codegen::BlockHandler>,
+ _argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ extern "C" {
+ fn rb_class_superclass(klass: VALUE) -> VALUE;
+ }
+
+ if !jit_prepare_lazy_frame_call(jit, asm, cme, StackOpnd(0)) {
+ return false;
+ }
+
+ asm_comment!(asm, "Class#superclass");
+ let recv_opnd = asm.stack_opnd(0);
+ let ret = asm.ccall(rb_class_superclass as *const u8, vec![recv_opnd]);
+
+ asm.stack_pop(1);
+ let ret_opnd = asm.stack_push(Type::Unknown);
+ asm.mov(ret_opnd, ret);
+
+ true
+}
+
+fn jit_rb_case_equal(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ ocb: &mut OutlinedCb,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ known_recv_class: Option<VALUE>,
+) -> bool {
+ if !jit.assume_expected_cfunc( asm, ocb, known_recv_class.unwrap(), ID!(eq), rb_obj_equal as _) {
+ return false;
+ }
+
+ asm_comment!(asm, "case_equal: {}#===", get_class_name(known_recv_class));
+
+ // Compare the arguments
+ let arg1 = asm.stack_pop(1);
+ let arg0 = asm.stack_pop(1);
+ asm.cmp(arg0, arg1);
+ let ret_opnd = asm.csel_e(Qtrue.into(), Qfalse.into());
+
+ let stack_ret = asm.stack_push(Type::UnknownImm);
+ asm.mov(stack_ret, ret_opnd);
+
+ true
+}
+
fn jit_thread_s_current(
_jit: &mut JITState,
asm: &mut Assembler,
@@ -6805,20 +6961,20 @@ fn push_splat_args(required_args: u32, asm: &mut Assembler) {
asm.cmp(array_len_opnd, required_args.into());
asm.jne(Target::side_exit(Counter::guard_send_splatarray_length_not_equal));
- asm_comment!(asm, "Check last argument is not ruby2keyword hash");
-
- // Need to repeat this here to deal with register allocation
- let array_reg = asm.load(asm.stack_opnd(0));
-
- let ary_opnd = get_array_ptr(asm, array_reg);
-
- let last_array_value = asm.load(Opnd::mem(64, ary_opnd, (required_args as i32 - 1) * (SIZEOF_VALUE as i32)));
+ // Check last element of array if present
+ if required_args > 0 {
+ asm_comment!(asm, "Check last argument is not ruby2keyword hash");
- guard_object_is_not_ruby2_keyword_hash(
- asm,
- last_array_value,
- Counter::guard_send_splatarray_last_ruby2_keywords,
- );
+ // Need to repeat this here to deal with register allocation
+ let array_reg = asm.load(asm.stack_opnd(0));
+ let ary_opnd = get_array_ptr(asm, array_reg);
+ let last_array_value = asm.load(Opnd::mem(64, ary_opnd, (required_args as i32 - 1) * (SIZEOF_VALUE as i32)));
+ guard_object_is_not_ruby2_keyword_hash(
+ asm,
+ last_array_value,
+ Counter::guard_send_splatarray_last_ruby2_keywords,
+ );
+ }
asm_comment!(asm, "Push arguments from array");
let array_opnd = asm.stack_pop(1);
@@ -6962,6 +7118,8 @@ fn gen_send_iseq(
let kw_splat = flags & VM_CALL_KW_SPLAT != 0;
let splat_call = flags & VM_CALL_ARGS_SPLAT != 0;
+ let forwarding_call = unsafe { rb_get_iseq_flags_forwardable(iseq) };
+
// For computing offsets to callee locals
let num_params = unsafe { get_iseq_body_param_size(iseq) as i32 };
let num_locals = unsafe { get_iseq_body_local_table_size(iseq) as i32 };
@@ -7004,9 +7162,15 @@ fn gen_send_iseq(
exit_if_supplying_kw_and_has_no_kw(asm, supplying_kws, doing_kw_call)?;
exit_if_supplying_kws_and_accept_no_kwargs(asm, supplying_kws, iseq)?;
exit_if_doing_kw_and_splat(asm, doing_kw_call, flags)?;
- exit_if_wrong_number_arguments(asm, arg_setup_block, opts_filled, flags, opt_num, iseq_has_rest)?;
+ if !forwarding_call {
+ exit_if_wrong_number_arguments(asm, arg_setup_block, opts_filled, flags, opt_num, iseq_has_rest)?;
+ }
exit_if_doing_kw_and_opts_missing(asm, doing_kw_call, opts_missing)?;
exit_if_has_rest_and_optional_and_block(asm, iseq_has_rest, opt_num, iseq, block_arg)?;
+ if forwarding_call && flags & VM_CALL_OPT_SEND != 0 {
+ gen_counter_incr(asm, Counter::send_iseq_send_forwarding);
+ return None;
+ }
let block_arg_type = exit_if_unsupported_block_arg_type(jit, asm, block_arg)?;
// Bail if we can't drop extra arguments for a yield by just popping them
@@ -7515,25 +7679,26 @@ fn gen_send_iseq(
}
}
- // Nil-initialize missing optional parameters
- nil_fill(
- "nil-initialize missing optionals",
- {
- let begin = -argc + required_num + opts_filled;
- let end = -argc + required_num + opt_num;
+ if !forwarding_call {
+ // Nil-initialize missing optional parameters
+ nil_fill(
+ "nil-initialize missing optionals",
+ {
+ let begin = -argc + required_num + opts_filled;
+ let end = -argc + required_num + opt_num;
- begin..end
- },
- asm
- );
- // Nil-initialize the block parameter. It's the last parameter local
- if iseq_has_block_param {
- let block_param = asm.ctx.sp_opnd(-argc + num_params - 1);
- asm.store(block_param, Qnil.into());
- }
- // Nil-initialize non-parameter locals
- nil_fill(
- "nil-initialize locals",
+ begin..end
+ },
+ asm
+ );
+ // Nil-initialize the block parameter. It's the last parameter local
+ if iseq_has_block_param {
+ let block_param = asm.ctx.sp_opnd(-argc + num_params - 1);
+ asm.store(block_param, Qnil.into());
+ }
+ // Nil-initialize non-parameter locals
+ nil_fill(
+ "nil-initialize locals",
{
let begin = -argc + num_params;
let end = -argc + num_locals;
@@ -7541,7 +7706,13 @@ fn gen_send_iseq(
begin..end
},
asm
- );
+ );
+ }
+
+ if forwarding_call {
+ assert_eq!(1, num_params);
+ asm.mov(asm.stack_opnd(-1), VALUE(ci as usize).into());
+ }
// Points to the receiver operand on the stack unless a captured environment is used
let recv = match captured_opnd {
@@ -7560,7 +7731,13 @@ fn gen_send_iseq(
jit_save_pc(jit, asm);
// Adjust the callee's stack pointer
- let callee_sp = asm.lea(asm.ctx.sp_opnd(-argc + num_locals + VM_ENV_DATA_SIZE as i32));
+ let callee_sp = if forwarding_call {
+ let offs = num_locals + VM_ENV_DATA_SIZE as i32;
+ asm.lea(asm.ctx.sp_opnd(offs))
+ } else {
+ let offs = -argc + num_locals + VM_ENV_DATA_SIZE as i32;
+ asm.lea(asm.ctx.sp_opnd(offs))
+ };
let specval = if let Some(prev_ep) = prev_ep {
// We've already side-exited if the callee expects a block, so we
@@ -8200,6 +8377,13 @@ fn gen_struct_aref(
}
}
+ if c_method_tracing_currently_enabled(jit) {
+ // Struct accesses need fire c_call and c_return events, which we can't support
+ // See :attr-tracing:
+ gen_counter_incr(asm, Counter::send_cfunc_tracing);
+ return None;
+ }
+
// This is a .send call and we need to adjust the stack
if flags & VM_CALL_OPT_SEND != 0 {
handle_opt_send_shift_stack(asm, argc);
@@ -8244,6 +8428,13 @@ fn gen_struct_aset(
return None;
}
+ if c_method_tracing_currently_enabled(jit) {
+ // Struct accesses need fire c_call and c_return events, which we can't support
+ // See :attr-tracing:
+ gen_counter_incr(asm, Counter::send_cfunc_tracing);
+ return None;
+ }
+
// This is a .send call and we need to adjust the stack
if flags & VM_CALL_OPT_SEND != 0 {
handle_opt_send_shift_stack(asm, argc);
@@ -8349,6 +8540,14 @@ fn gen_send_general(
return Some(EndBlock);
}
+ let ci_flags = unsafe { vm_ci_flag(ci) };
+
+ // Dynamic stack layout. No good way to support without inlining.
+ if ci_flags & VM_CALL_FORWARDING != 0 {
+ gen_counter_incr(asm, Counter::send_iseq_forwarding);
+ return None;
+ }
+
let recv_idx = argc + if flags & VM_CALL_ARGS_BLOCKARG != 0 { 1 } else { 0 };
let comptime_recv = jit.peek_at_stack(&asm.ctx, recv_idx as isize);
let comptime_recv_klass = comptime_recv.class_of();
@@ -8510,10 +8709,8 @@ fn gen_send_general(
// Handling the C method tracing events for attr_accessor
// methods is easier than regular C methods as we know the
// "method" we are calling into never enables those tracing
- // events. Once global invalidation runs, the code for the
- // attr_accessor is invalidated and we exit at the closest
- // instruction boundary which is always outside of the body of
- // the attr_accessor code.
+ // events. We are never inside the code that needs to be
+ // invalidated when invalidation happens.
gen_counter_incr(asm, Counter::send_cfunc_tracing);
return None;
}
@@ -8773,11 +8970,16 @@ fn gen_send_general(
}
}
+/// Get class name from a class pointer.
+fn get_class_name(class: Option<VALUE>) -> String {
+ class.and_then(|class| unsafe {
+ cstr_to_rust_string(rb_class2name(class))
+ }).unwrap_or_else(|| "Unknown".to_string())
+}
+
/// Assemble "{class_name}#{method_name}" from a class pointer and a method ID
fn get_method_name(class: Option<VALUE>, mid: u64) -> String {
- let class_name = class.and_then(|class| unsafe {
- cstr_to_rust_string(rb_class2name(class))
- }).unwrap_or_else(|| "Unknown".to_string());
+ let class_name = get_class_name(class);
let method_name = if mid != 0 {
unsafe { cstr_to_rust_string(rb_id2name(mid)) }
} else {
@@ -8868,6 +9070,14 @@ fn gen_send(
})
}
+fn gen_sendforward(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ ocb: &mut OutlinedCb,
+) -> Option<CodegenStatus> {
+ return gen_send(jit, asm, ocb);
+}
+
fn gen_invokeblock(
jit: &mut JITState,
asm: &mut Assembler,
@@ -9051,6 +9261,14 @@ fn gen_invokesuper(
})
}
+fn gen_invokesuperforward(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ ocb: &mut OutlinedCb,
+) -> Option<CodegenStatus> {
+ return gen_invokesuper(jit, asm, ocb);
+}
+
fn gen_invokesuper_specialized(
jit: &mut JITState,
asm: &mut Assembler,
@@ -9113,6 +9331,10 @@ fn gen_invokesuper_specialized(
gen_counter_incr(asm, Counter::invokesuper_kw_splat);
return None;
}
+ if ci_flags & VM_CALL_FORWARDING != 0 {
+ gen_counter_incr(asm, Counter::invokesuper_forwarding);
+ return None;
+ }
// Ensure we haven't rebound this method onto an incompatible class.
// In the interpreter we try to avoid making this check by performing some
@@ -10026,8 +10248,10 @@ fn get_gen_fn(opcode: VALUE) -> Option<InsnGenFn> {
YARVINSN_getblockparam => Some(gen_getblockparam),
YARVINSN_opt_send_without_block => Some(gen_opt_send_without_block),
YARVINSN_send => Some(gen_send),
+ YARVINSN_sendforward => Some(gen_sendforward),
YARVINSN_invokeblock => Some(gen_invokeblock),
YARVINSN_invokesuper => Some(gen_invokesuper),
+ YARVINSN_invokesuperforward => Some(gen_invokesuperforward),
YARVINSN_leave => Some(gen_leave),
YARVINSN_getglobal => Some(gen_getglobal),
@@ -10112,6 +10336,10 @@ pub fn yjit_reg_method_codegen_fns() {
yjit_reg_method(rb_cString, "<<", jit_rb_str_concat);
yjit_reg_method(rb_cString, "+@", jit_rb_str_uplus);
+ yjit_reg_method(rb_cNilClass, "===", jit_rb_case_equal);
+ yjit_reg_method(rb_cTrueClass, "===", jit_rb_case_equal);
+ yjit_reg_method(rb_cFalseClass, "===", jit_rb_case_equal);
+
yjit_reg_method(rb_cArray, "empty?", jit_rb_ary_empty_p);
yjit_reg_method(rb_cArray, "length", jit_rb_ary_length);
yjit_reg_method(rb_cArray, "size", jit_rb_ary_length);
@@ -10122,6 +10350,8 @@ pub fn yjit_reg_method_codegen_fns() {
yjit_reg_method(rb_mKernel, "respond_to?", jit_obj_respond_to);
yjit_reg_method(rb_mKernel, "block_given?", jit_rb_f_block_given_p);
+ yjit_reg_method(rb_cClass, "superclass", jit_rb_class_superclass);
+
yjit_reg_method(rb_singleton_class(rb_cThread), "current", jit_thread_s_current);
}
}
@@ -10154,6 +10384,9 @@ fn yjit_reg_method(klass: VALUE, mid_str: &str, gen_fn: MethodGenFn) {
/// Global state needed for code generation
pub struct CodegenGlobals {
+ /// Flat vector of bits to store compressed context data
+ context_data: BitVector,
+
/// Inline code block (fast path)
inline_cb: CodeBlock,
@@ -10269,6 +10502,7 @@ impl CodegenGlobals {
ocb.unwrap().mark_all_executable();
let codegen_globals = CodegenGlobals {
+ context_data: BitVector::new(),
inline_cb: cb,
outlined_cb: ocb,
leave_exit_code,
@@ -10297,6 +10531,11 @@ impl CodegenGlobals {
unsafe { CODEGEN_GLOBALS.as_mut().is_some() }
}
+ /// Get a mutable reference to the context data
+ pub fn get_context_data() -> &'static mut BitVector {
+ &mut CodegenGlobals::get_instance().context_data
+ }
+
/// Get a mutable reference to the inline code block
pub fn get_inline_cb() -> &'static mut CodeBlock {
&mut CodegenGlobals::get_instance().inline_cb