summaryrefslogtreecommitdiff
path: root/yjit
diff options
context:
space:
mode:
authorAlan Wu <XrXr@users.noreply.github.com>2022-10-04 22:48:05 -0400
committerGitHub <noreply@github.com>2022-10-04 22:48:05 -0400
commit7293bfe1bf8a696bd218a52582e9026e5e638458 (patch)
treee45b5f267ce6567c783d1b14e16afb007de40497 /yjit
parent5fc68e1101d38c9ced4d07ef1526467618109de3 (diff)
YJIT: add support for calling bmethods (#6489)
* YJIT: fix a parameter name * YJIT: add support for calling bmethods This commit adds support for the VM_METHOD_TYPE_BMETHOD method type in YJIT. You can get these type of methods from facilities like Kernel#define_singleton_method and Module#define_method. Even though the body of these methods are blocks, the parameter setup for them is exactly the same as VM_METHOD_TYPE_ISEQ, so we can reuse the same logic in gen_send_iseq(). You can see this from how vm_call_bmethod() eventually calls setup_parameters_complex() with arg_setup_method. Bmethods do need their frame environment to be setup differently. We handle this by allowing callers of gen_send_iseq() to control the iseq, the frame flag, and the prev_ep. The `prev_ep` goes into the same location as the block handler would go into in an iseq method frame. Co-authored-by: John Hawthorn <john@hawthorn.email> Co-authored-by: John Hawthorn <john@hawthorn.email>
Notes
Notes: Merged-By: maximecb <maximecb@ruby-lang.org>
Diffstat (limited to 'yjit')
-rw-r--r--yjit/bindgen/src/main.rs2
-rw-r--r--yjit/src/codegen.rs94
-rw-r--r--yjit/src/cruby_bindings.inc.rs246
-rw-r--r--yjit/src/stats.rs3
4 files changed, 324 insertions, 21 deletions
diff --git a/yjit/bindgen/src/main.rs b/yjit/bindgen/src/main.rs
index c3d4a39a2b..ebeeab14b1 100644
--- a/yjit/bindgen/src/main.rs
+++ b/yjit/bindgen/src/main.rs
@@ -273,6 +273,7 @@ fn main() {
.allowlist_function("rb_RSTRING_PTR")
.allowlist_function("rb_RSTRING_LEN")
.allowlist_function("rb_ENCODING_GET")
+ .allowlist_function("rb_yjit_get_proc_ptr")
.allowlist_function("rb_yjit_exit_locations_dict")
.allowlist_function("rb_yjit_icache_invalidate")
@@ -332,6 +333,7 @@ fn main() {
.allowlist_function("rb_get_mct_argc")
.allowlist_function("rb_get_mct_func")
.allowlist_function("rb_get_def_iseq_ptr")
+ .allowlist_function("rb_get_def_bmethod_proc")
.allowlist_function("rb_iseq_encoded_size")
.allowlist_function("rb_get_iseq_body_local_iseq")
.allowlist_function("rb_get_iseq_body_iseq_encoded")
diff --git a/yjit/src/codegen.rs b/yjit/src/codegen.rs
index 42ec05aafc..22634a7de0 100644
--- a/yjit/src/codegen.rs
+++ b/yjit/src/codegen.rs
@@ -4035,6 +4035,7 @@ struct ControlFrame {
pc: Option<u64>,
frame_type: u32,
block_handler: BlockHandler,
+ prev_ep: Option<*const VALUE>,
cme: *const rb_callable_method_entry_t,
local_size: i32
}
@@ -4051,14 +4052,14 @@ struct ControlFrame {
// * Provided sp should point to the new frame's sp, immediately following locals and the environment
// * At entry, CFP points to the caller (not callee) frame
// * At exit, ec->cfp is updated to the pushed CFP
-// * CFP and SP registers are updated only if switch_in_jit is set
+// * CFP and SP registers are updated only if set_sp_cfp is set
// * Stack overflow is not checked (should be done by the caller)
// * Interrupts are not checked (should be done by the caller)
fn gen_push_frame(
_jit: &mut JITState,
_ctx: &mut Context,
asm: &mut Assembler,
- set_pc_cfp: bool, // if true CFP and SP will be switched to the callee
+ set_sp_cfp: bool, // if true CFP and SP will be switched to the callee
frame: ControlFrame,
) {
assert!(frame.local_size >= 0);
@@ -4076,7 +4077,7 @@ fn gen_push_frame(
}
}
- asm.comment("push cme, block handler, frame type");
+ asm.comment("push cme, specval, frame type");
// Write method entry at sp[-3]
// sp[-3] = me;
@@ -4084,18 +4085,24 @@ fn gen_push_frame(
// any cme we depend on become outdated. See yjit_method_lookup_change().
asm.store(Opnd::mem(64, sp, SIZEOF_VALUE_I32 * -3), VALUE::from(frame.cme).into());
- // Write block handler at sp[-2]
- // sp[-2] = block_handler;
- let block_handler: Opnd = match frame.block_handler {
- BlockHandler::None => {
+ // Write special value at sp[-2]. It's either a block handler or a pointer to
+ // the outer environment depending on the frame type.
+ // sp[-2] = specval;
+ let specval: Opnd = match (frame.prev_ep, frame.block_handler) {
+ (None, BlockHandler::None) => {
VM_BLOCK_HANDLER_NONE.into()
- },
- BlockHandler::CurrentFrame => {
+ }
+ (None, BlockHandler::CurrentFrame) => {
let cfp_self = asm.lea(Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SELF));
asm.or(cfp_self, Opnd::Imm(1))
- },
+ }
+ (Some(prev_ep), BlockHandler::None) => {
+ let tagged_prev_ep = (prev_ep as usize) | 1;
+ VALUE(tagged_prev_ep).into()
+ }
+ (_, _) => panic!("specval can only be one of prev_ep or block_handler")
};
- asm.store(Opnd::mem(64, sp, SIZEOF_VALUE_I32 * -2), block_handler);
+ asm.store(Opnd::mem(64, sp, SIZEOF_VALUE_I32 * -2), specval);
// Write env flags at sp[-1]
// sp[-1] = frame_type;
@@ -4134,7 +4141,7 @@ fn gen_push_frame(
asm.mov(cfp_opnd(RUBY_OFFSET_CFP_SELF), frame.recv);
asm.mov(cfp_opnd(RUBY_OFFSET_CFP_BLOCK_CODE), 0.into());
- if set_pc_cfp {
+ if set_sp_cfp {
// Saving SP before calculating ep avoids a dependency on a register
// However this must be done after referencing frame.recv, which may be SP-relative
asm.mov(SP, sp);
@@ -4144,7 +4151,7 @@ fn gen_push_frame(
asm.comment("switch to new CFP");
let new_cfp = asm.lea(cfp_opnd(0));
- if set_pc_cfp {
+ if set_sp_cfp {
asm.mov(CFP, new_cfp);
asm.store(Opnd::mem(64, EC, RUBY_OFFSET_EC_CFP), CFP);
} else {
@@ -4274,6 +4281,7 @@ fn gen_send_cfunc(
cme,
recv,
sp,
+ prev_ep: None,
pc: Some(0),
iseq: None,
local_size: 0,
@@ -4443,17 +4451,59 @@ fn push_splat_args(required_args: i32, ctx: &mut Context, asm: &mut Assembler, o
}
}
+fn gen_send_bmethod(
+ jit: &mut JITState,
+ ctx: &mut Context,
+ asm: &mut Assembler,
+ ocb: &mut OutlinedCb,
+ ci: *const rb_callinfo,
+ cme: *const rb_callable_method_entry_t,
+ block: Option<IseqPtr>,
+ argc: i32,
+) -> CodegenStatus {
+ let procv = unsafe { rb_get_def_bmethod_proc((*cme).def) };
+
+ let proc = unsafe { rb_yjit_get_proc_ptr(procv) };
+ let proc_block = unsafe { &(*proc).block };
+
+ if proc_block.type_ != block_type_iseq {
+ return CantCompile;
+ }
+
+ let capture = unsafe { proc_block.as_.captured.as_ref() };
+ let iseq = unsafe { *capture.code.iseq.as_ref() };
+
+ // Optimize for single ractor mode and avoid runtime check for
+ // "defined with an un-shareable Proc in a different Ractor"
+ if !assume_single_ractor_mode(jit, ocb) {
+ gen_counter_incr!(asm, send_bmethod_ractor);
+ return CantCompile;
+ }
+
+ // Passing a block to a block needs logic different from passing
+ // a block to a method and sometimes requires allocation. Bail for now.
+ if block.is_some() {
+ gen_counter_incr!(asm, send_bmethod_block_arg);
+ return CantCompile;
+ }
+
+ let frame_type = VM_FRAME_MAGIC_BLOCK | VM_FRAME_FLAG_BMETHOD | VM_FRAME_FLAG_LAMBDA;
+ gen_send_iseq(jit, ctx, asm, ocb, iseq, ci, frame_type, Some(capture.ep), cme, block, argc)
+}
+
fn gen_send_iseq(
jit: &mut JITState,
ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
+ iseq: *const rb_iseq_t,
ci: *const rb_callinfo,
+ frame_type: u32,
+ prev_ep: Option<*const VALUE>,
cme: *const rb_callable_method_entry_t,
block: Option<IseqPtr>,
argc: i32,
) -> CodegenStatus {
- let iseq = unsafe { get_def_iseq_ptr((*cme).def) };
let mut argc = argc;
let flags = unsafe { vm_ci_flag(ci) };
@@ -4893,8 +4943,6 @@ fn gen_send_iseq(
BlockHandler::None
};
- let frame_type = VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL;
-
// Setup the new frame
gen_push_frame(jit, ctx, asm, true, ControlFrame {
frame_type,
@@ -4902,6 +4950,7 @@ fn gen_send_iseq(
cme,
recv,
sp: callee_sp,
+ prev_ep,
iseq: Some(iseq),
pc: None, // We are calling into jitted code, which will set the PC as necessary
local_size: num_locals
@@ -5173,7 +5222,9 @@ fn gen_send_general(
match def_type {
VM_METHOD_TYPE_ISEQ => {
- return gen_send_iseq(jit, ctx, asm, ocb, ci, cme, block, argc);
+ let iseq = unsafe { get_def_iseq_ptr((*cme).def) };
+ let frame_type = VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL;
+ return gen_send_iseq(jit, ctx, asm, ocb, iseq, ci, frame_type, None, cme, block, argc);
}
VM_METHOD_TYPE_CFUNC => {
return gen_send_cfunc(
@@ -5243,8 +5294,7 @@ fn gen_send_general(
}
// Block method, e.g. define_method(:foo) { :my_block }
VM_METHOD_TYPE_BMETHOD => {
- gen_counter_incr!(asm, send_bmethod);
- return CantCompile;
+ return gen_send_bmethod(jit, ctx, asm, ocb, ci, cme, block, argc);
}
VM_METHOD_TYPE_ZSUPER => {
gen_counter_incr!(asm, send_zsuper_method);
@@ -5481,7 +5531,11 @@ fn gen_invokesuper(
ctx.clear_local_types();
match cme_def_type {
- VM_METHOD_TYPE_ISEQ => gen_send_iseq(jit, ctx, asm, ocb, ci, cme, block, argc),
+ VM_METHOD_TYPE_ISEQ => {
+ let iseq = unsafe { get_def_iseq_ptr((*cme).def) };
+ let frame_type = VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL;
+ gen_send_iseq(jit, ctx, asm, ocb, iseq, ci, frame_type, None, cme, block, argc)
+ }
VM_METHOD_TYPE_CFUNC => {
gen_send_cfunc(jit, ctx, asm, ocb, ci, cme, block, argc, ptr::null())
}
diff --git a/yjit/src/cruby_bindings.inc.rs b/yjit/src/cruby_bindings.inc.rs
index f58bf1ca05..3bffc30731 100644
--- a/yjit/src/cruby_bindings.inc.rs
+++ b/yjit/src/cruby_bindings.inc.rs
@@ -1,5 +1,128 @@
/* automatically generated by rust-bindgen 0.60.1 */
+#[repr(C)]
+#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub struct __BindgenBitfieldUnit<Storage> {
+ storage: Storage,
+}
+impl<Storage> __BindgenBitfieldUnit<Storage> {
+ #[inline]
+ pub const fn new(storage: Storage) -> Self {
+ Self { storage }
+ }
+}
+impl<Storage> __BindgenBitfieldUnit<Storage>
+where
+ Storage: AsRef<[u8]> + AsMut<[u8]>,
+{
+ #[inline]
+ pub fn get_bit(&self, index: usize) -> bool {
+ debug_assert!(index / 8 < self.storage.as_ref().len());
+ let byte_index = index / 8;
+ let byte = self.storage.as_ref()[byte_index];
+ let bit_index = if cfg!(target_endian = "big") {
+ 7 - (index % 8)
+ } else {
+ index % 8
+ };
+ let mask = 1 << bit_index;
+ byte & mask == mask
+ }
+ #[inline]
+ pub fn set_bit(&mut self, index: usize, val: bool) {
+ debug_assert!(index / 8 < self.storage.as_ref().len());
+ let byte_index = index / 8;
+ let byte = &mut self.storage.as_mut()[byte_index];
+ let bit_index = if cfg!(target_endian = "big") {
+ 7 - (index % 8)
+ } else {
+ index % 8
+ };
+ let mask = 1 << bit_index;
+ if val {
+ *byte |= mask;
+ } else {
+ *byte &= !mask;
+ }
+ }
+ #[inline]
+ pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 {
+ debug_assert!(bit_width <= 64);
+ debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
+ debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len());
+ let mut val = 0;
+ for i in 0..(bit_width as usize) {
+ if self.get_bit(i + bit_offset) {
+ let index = if cfg!(target_endian = "big") {
+ bit_width as usize - 1 - i
+ } else {
+ i
+ };
+ val |= 1 << index;
+ }
+ }
+ val
+ }
+ #[inline]
+ pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) {
+ debug_assert!(bit_width <= 64);
+ debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
+ debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len());
+ for i in 0..(bit_width as usize) {
+ let mask = 1 << i;
+ let val_bit_is_set = val & mask == mask;
+ let index = if cfg!(target_endian = "big") {
+ bit_width as usize - 1 - i
+ } else {
+ i
+ };
+ self.set_bit(index + bit_offset, val_bit_is_set);
+ }
+ }
+}
+#[repr(C)]
+pub struct __BindgenUnionField<T>(::std::marker::PhantomData<T>);
+impl<T> __BindgenUnionField<T> {
+ #[inline]
+ pub const fn new() -> Self {
+ __BindgenUnionField(::std::marker::PhantomData)
+ }
+ #[inline]
+ pub unsafe fn as_ref(&self) -> &T {
+ ::std::mem::transmute(self)
+ }
+ #[inline]
+ pub unsafe fn as_mut(&mut self) -> &mut T {
+ ::std::mem::transmute(self)
+ }
+}
+impl<T> ::std::default::Default for __BindgenUnionField<T> {
+ #[inline]
+ fn default() -> Self {
+ Self::new()
+ }
+}
+impl<T> ::std::clone::Clone for __BindgenUnionField<T> {
+ #[inline]
+ fn clone(&self) -> Self {
+ Self::new()
+ }
+}
+impl<T> ::std::marker::Copy for __BindgenUnionField<T> {}
+impl<T> ::std::fmt::Debug for __BindgenUnionField<T> {
+ fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
+ fmt.write_str("__BindgenUnionField")
+ }
+}
+impl<T> ::std::hash::Hash for __BindgenUnionField<T> {
+ fn hash<H: ::std::hash::Hasher>(&self, _state: &mut H) {}
+}
+impl<T> ::std::cmp::PartialEq for __BindgenUnionField<T> {
+ fn eq(&self, _other: &__BindgenUnionField<T>) -> bool {
+ true
+ }
+}
+impl<T> ::std::cmp::Eq for __BindgenUnionField<T> {}
pub const USE_RVARGC: u32 = 1;
pub const INTEGER_REDEFINED_OP_FLAG: u32 = 1;
pub const FLOAT_REDEFINED_OP_FLAG: u32 = 2;
@@ -157,6 +280,16 @@ extern "C" {
extern "C" {
pub static mut rb_cTrueClass: VALUE;
}
+pub type rb_block_call_func = ::std::option::Option<
+ unsafe extern "C" fn(
+ yielded_arg: VALUE,
+ callback_arg: VALUE,
+ argc: ::std::os::raw::c_int,
+ argv: *const VALUE,
+ blockarg: VALUE,
+ ) -> VALUE,
+>;
+pub type rb_block_call_func_t = rb_block_call_func;
extern "C" {
pub fn rb_ary_new_capa(capa: ::std::os::raw::c_long) -> VALUE;
}
@@ -514,6 +647,20 @@ pub const imemo_callinfo: imemo_type = 11;
pub const imemo_callcache: imemo_type = 12;
pub const imemo_constcache: imemo_type = 13;
pub type imemo_type = u32;
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct vm_ifunc_argc {
+ pub min: ::std::os::raw::c_int,
+ pub max: ::std::os::raw::c_int,
+}
+#[repr(C)]
+pub struct vm_ifunc {
+ pub flags: VALUE,
+ pub reserved: VALUE,
+ pub func: rb_block_call_func_t,
+ pub data: *const ::std::os::raw::c_void,
+ pub argc: vm_ifunc_argc,
+}
pub const METHOD_VISI_UNDEF: rb_method_visibility_t = 0;
pub const METHOD_VISI_PUBLIC: rb_method_visibility_t = 1;
pub const METHOD_VISI_PRIVATE: rb_method_visibility_t = 2;
@@ -650,6 +797,36 @@ pub const BOP_AND: ruby_basic_operators = 27;
pub const BOP_OR: ruby_basic_operators = 28;
pub const BOP_LAST_: ruby_basic_operators = 29;
pub type ruby_basic_operators = u32;
+#[repr(C)]
+pub struct rb_captured_block {
+ pub self_: VALUE,
+ pub ep: *const VALUE,
+ pub code: rb_captured_block__bindgen_ty_1,
+}
+#[repr(C)]
+pub struct rb_captured_block__bindgen_ty_1 {
+ pub iseq: __BindgenUnionField<*const rb_iseq_t>,
+ pub ifunc: __BindgenUnionField<*const vm_ifunc>,
+ pub val: __BindgenUnionField<VALUE>,
+ pub bindgen_union_field: u64,
+}
+pub const block_type_iseq: rb_block_type = 0;
+pub const block_type_ifunc: rb_block_type = 1;
+pub const block_type_symbol: rb_block_type = 2;
+pub const block_type_proc: rb_block_type = 3;
+pub type rb_block_type = u32;
+#[repr(C)]
+pub struct rb_block {
+ pub as_: rb_block__bindgen_ty_1,
+ pub type_: rb_block_type,
+}
+#[repr(C)]
+pub struct rb_block__bindgen_ty_1 {
+ pub captured: __BindgenUnionField<rb_captured_block>,
+ pub symbol: __BindgenUnionField<VALUE>,
+ pub proc_: __BindgenUnionField<VALUE>,
+ pub bindgen_union_field: [u64; 3usize],
+}
pub type rb_control_frame_t = rb_control_frame_struct;
extern "C" {
pub static mut rb_mRubyVMFrozenCore: VALUE;
@@ -657,6 +834,69 @@ extern "C" {
extern "C" {
pub static mut rb_block_param_proxy: VALUE;
}
+#[repr(C)]
+pub struct rb_proc_t {
+ pub block: rb_block,
+ pub _bitfield_align_1: [u8; 0],
+ pub _bitfield_1: __BindgenBitfieldUnit<[u8; 1usize]>,
+ pub __bindgen_padding_0: [u8; 7usize],
+}
+impl rb_proc_t {
+ #[inline]
+ pub fn is_from_method(&self) -> ::std::os::raw::c_uint {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u32) }
+ }
+ #[inline]
+ pub fn set_is_from_method(&mut self, val: ::std::os::raw::c_uint) {
+ unsafe {
+ let val: u32 = ::std::mem::transmute(val);
+ self._bitfield_1.set(0usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn is_lambda(&self) -> ::std::os::raw::c_uint {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u32) }
+ }
+ #[inline]
+ pub fn set_is_lambda(&mut self, val: ::std::os::raw::c_uint) {
+ unsafe {
+ let val: u32 = ::std::mem::transmute(val);
+ self._bitfield_1.set(1usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn is_isolated(&self) -> ::std::os::raw::c_uint {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u32) }
+ }
+ #[inline]
+ pub fn set_is_isolated(&mut self, val: ::std::os::raw::c_uint) {
+ unsafe {
+ let val: u32 = ::std::mem::transmute(val);
+ self._bitfield_1.set(2usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn new_bitfield_1(
+ is_from_method: ::std::os::raw::c_uint,
+ is_lambda: ::std::os::raw::c_uint,
+ is_isolated: ::std::os::raw::c_uint,
+ ) -> __BindgenBitfieldUnit<[u8; 1usize]> {
+ let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default();
+ __bindgen_bitfield_unit.set(0usize, 1u8, {
+ let is_from_method: u32 = unsafe { ::std::mem::transmute(is_from_method) };
+ is_from_method as u64
+ });
+ __bindgen_bitfield_unit.set(1usize, 1u8, {
+ let is_lambda: u32 = unsafe { ::std::mem::transmute(is_lambda) };
+ is_lambda as u64
+ });
+ __bindgen_bitfield_unit.set(2usize, 1u8, {
+ let is_isolated: u32 = unsafe { ::std::mem::transmute(is_isolated) };
+ is_isolated as u64
+ });
+ __bindgen_bitfield_unit
+ }
+}
pub const VM_SPECIAL_OBJECT_VMCORE: vm_special_object_type = 1;
pub const VM_SPECIAL_OBJECT_CBASE: vm_special_object_type = 2;
pub const VM_SPECIAL_OBJECT_CONST_BASE: vm_special_object_type = 3;
@@ -1060,6 +1300,9 @@ extern "C" {
extern "C" {
pub fn rb_RSTRING_PTR(str_: VALUE) -> *mut ::std::os::raw::c_char;
}
+extern "C" {
+ pub fn rb_yjit_get_proc_ptr(procv: VALUE) -> *mut rb_proc_t;
+}
pub type rb_seq_param_keyword_struct = rb_iseq_constant_body__bindgen_ty_1_rb_iseq_param_keyword;
extern "C" {
pub fn rb_insn_name(insn: VALUE) -> *const ::std::os::raw::c_char;
@@ -1128,6 +1371,9 @@ extern "C" {
pub fn rb_get_def_iseq_ptr(def: *mut rb_method_definition_t) -> *const rb_iseq_t;
}
extern "C" {
+ pub fn rb_get_def_bmethod_proc(def: *mut rb_method_definition_t) -> VALUE;
+}
+extern "C" {
pub fn rb_get_iseq_body_local_iseq(iseq: *const rb_iseq_t) -> *const rb_iseq_t;
}
extern "C" {
diff --git a/yjit/src/stats.rs b/yjit/src/stats.rs
index 7669308f9b..74319ec4ed 100644
--- a/yjit/src/stats.rs
+++ b/yjit/src/stats.rs
@@ -175,7 +175,6 @@ make_counters! {
send_optimized_method_call,
send_optimized_method_block_call,
send_missing_method,
- send_bmethod,
send_refined_method,
send_cfunc_ruby_array_varg,
send_cfunc_argc_mismatch,
@@ -198,6 +197,8 @@ make_counters! {
send_args_splat_non_iseq,
send_args_splat_cfunc,
send_iseq_ruby2_keywords,
+ send_bmethod_ractor,
+ send_bmethod_block_arg,
traced_cfunc_return,