summaryrefslogtreecommitdiff
path: root/vm_insnhelper.c
diff options
context:
space:
mode:
Diffstat (limited to 'vm_insnhelper.c')
-rw-r--r--vm_insnhelper.c324
1 files changed, 296 insertions, 28 deletions
diff --git a/vm_insnhelper.c b/vm_insnhelper.c
index a1893b1ba2..be43e737fe 100644
--- a/vm_insnhelper.c
+++ b/vm_insnhelper.c
@@ -1419,7 +1419,7 @@ vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic,
RB_DEBUG_COUNTER_INC(ivar_set_ic_miss);
if (BUILTIN_TYPE(obj) == T_OBJECT) {
- rb_check_frozen_internal(obj);
+ rb_check_frozen(obj);
attr_index_t index = rb_obj_ivar_set(obj, id, val);
@@ -2209,7 +2209,7 @@ vm_search_method_slowpath0(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
}
#if USE_DEBUG_COUNTER
- if (old_cc == empty_cc) {
+ if (!old_cc || old_cc == empty_cc) {
// empty
RB_DEBUG_COUNTER_INC(mc_inline_miss_empty);
}
@@ -2526,6 +2526,15 @@ vm_base_ptr(const rb_control_frame_t *cfp)
if (cfp->iseq && VM_FRAME_RUBYFRAME_P(cfp)) {
VALUE *bp = prev_cfp->sp + ISEQ_BODY(cfp->iseq)->local_table_size + VM_ENV_DATA_SIZE;
+
+ if (ISEQ_BODY(cfp->iseq)->param.flags.forwardable && VM_ENV_LOCAL_P(cfp->ep)) {
+ int lts = ISEQ_BODY(cfp->iseq)->local_table_size;
+ int params = ISEQ_BODY(cfp->iseq)->param.size;
+
+ CALL_INFO ci = (CALL_INFO)cfp->ep[-(VM_ENV_DATA_SIZE + (lts - params))]; // skip EP stuff, CI should be last local
+ bp += vm_ci_argc(ci);
+ }
+
if (ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_METHOD || VM_FRAME_BMETHOD_P(cfp)) {
/* adjust `self' */
bp += 1;
@@ -2594,6 +2603,7 @@ rb_simple_iseq_p(const rb_iseq_t *iseq)
ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
+ ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
}
@@ -2606,6 +2616,7 @@ rb_iseq_only_optparam_p(const rb_iseq_t *iseq)
ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
+ ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
}
@@ -2617,6 +2628,7 @@ rb_iseq_only_kwparam_p(const rb_iseq_t *iseq)
ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
ISEQ_BODY(iseq)->param.flags.has_kw == TRUE &&
ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
+ ISEQ_BODY(iseq)->param.flags.forwardable == FALSE &&
ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
}
@@ -2905,6 +2917,26 @@ args_setup_kw_parameters(rb_execution_context_t *const ec, const rb_iseq_t *cons
VALUE *const locals);
static VALUE
+vm_call_iseq_forwardable(rb_execution_context_t *ec, rb_control_frame_t *cfp,
+ struct rb_calling_info *calling)
+{
+ const struct rb_callcache *cc = calling->cc;
+ const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
+ int param_size = ISEQ_BODY(iseq)->param.size;
+ int local_size = ISEQ_BODY(iseq)->local_table_size;
+
+ // Setting up local size and param size
+ VM_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
+
+ local_size = local_size + vm_ci_argc(calling->cd->ci);
+ param_size = param_size + vm_ci_argc(calling->cd->ci);
+
+ cfp->sp[0] = (VALUE)calling->cd->ci;
+
+ return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param_size, local_size);
+}
+
+static VALUE
vm_call_iseq_setup_kwparm_kwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
@@ -3008,7 +3040,7 @@ warn_unused_block(const rb_callable_method_entry_t *cme, const rb_iseq_t *iseq,
if (0) {
fprintf(stderr, "SIZEOF_VALUE:%d\n", SIZEOF_VALUE);
- fprintf(stderr, "pc:%p def:%p\n", pc, cme->def);
+ fprintf(stderr, "pc:%p def:%p\n", pc, (void *)cme->def);
fprintf(stderr, "key:%p\n", (void *)key);
}
@@ -3037,6 +3069,9 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
const struct rb_callinfo *ci = calling->cd->ci;
const struct rb_callcache *cc = calling->cc;
+ VM_ASSERT((vm_ci_argc(ci), 1));
+ VM_ASSERT(vm_cc_cme(cc) != NULL);
+
if (UNLIKELY(!ISEQ_BODY(iseq)->param.flags.use_block &&
calling->block_handler != VM_BLOCK_HANDLER_NONE &&
!(vm_ci_flag(calling->cd->ci) & VM_CALL_SUPER))) {
@@ -3053,7 +3088,7 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
argument_arity_error(ec, iseq, calling->argc, lead_num, lead_num);
}
- VM_ASSERT(ci == calling->cd->ci);
+ //VM_ASSERT(ci == calling->cd->ci);
VM_ASSERT(cc == calling->cc);
if (vm_call_iseq_optimizable_p(ci, cc)) {
@@ -3140,9 +3175,127 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
}
}
+ // Called iseq is using ... param
+ // def foo(...) # <- iseq for foo will have "forwardable"
+ //
+ // We want to set the `...` local to the caller's CI
+ // foo(1, 2) # <- the ci for this should end up as `...`
+ //
+ // So hopefully the stack looks like:
+ //
+ // => 1
+ // => 2
+ // => *
+ // => **
+ // => &
+ // => ... # <- points at `foo`s CI
+ // => cref_or_me
+ // => specval
+ // => type
+ //
+ if (ISEQ_BODY(iseq)->param.flags.forwardable) {
+ bool can_fastpath = true;
+
+ if ((vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
+ struct rb_forwarding_call_data * forward_cd = (struct rb_forwarding_call_data *)calling->cd;
+ if (vm_ci_argc(ci) != vm_ci_argc(forward_cd->caller_ci)) {
+ ci = vm_ci_new_runtime(
+ vm_ci_mid(ci),
+ vm_ci_flag(ci),
+ vm_ci_argc(ci),
+ vm_ci_kwarg(ci));
+ } else {
+ ci = forward_cd->caller_ci;
+ }
+ can_fastpath = false;
+ }
+ // C functions calling iseqs will stack allocate a CI,
+ // so we need to convert it to heap allocated
+ if (!vm_ci_markable(ci)) {
+ ci = vm_ci_new_runtime(
+ vm_ci_mid(ci),
+ vm_ci_flag(ci),
+ vm_ci_argc(ci),
+ vm_ci_kwarg(ci));
+ can_fastpath = false;
+ }
+ argv[param_size - 1] = (VALUE)ci;
+ CC_SET_FASTPATH(cc, vm_call_iseq_forwardable, can_fastpath);
+ return 0;
+ }
+
return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_method);
}
+static void
+vm_adjust_stack_forwarding(const struct rb_execution_context_struct *ec, struct rb_control_frame_struct *cfp, int argc, VALUE splat)
+{
+ // This case is when the caller is using a ... parameter.
+ // For example `bar(...)`. The call info will have VM_CALL_FORWARDING
+ // In this case the caller's caller's CI will be on the stack.
+ //
+ // For example:
+ //
+ // def bar(a, b); a + b; end
+ // def foo(...); bar(...); end
+ // foo(1, 2) # <- this CI will be on the stack when we call `bar(...)`
+ //
+ // Stack layout will be:
+ //
+ // > 1
+ // > 2
+ // > CI for foo(1, 2)
+ // > cref_or_me
+ // > specval
+ // > type
+ // > receiver
+ // > CI for foo(1, 2), via `getlocal ...`
+ // > ( SP points here )
+ const VALUE * lep = VM_CF_LEP(cfp);
+
+ const rb_iseq_t *iseq;
+
+ // If we're in an escaped environment (lambda for example), get the iseq
+ // from the captured env.
+ if (VM_ENV_FLAGS(lep, VM_ENV_FLAG_ESCAPED)) {
+ rb_env_t * env = (rb_env_t *)lep[VM_ENV_DATA_INDEX_ENV];
+ iseq = env->iseq;
+ }
+ else { // Otherwise use the lep to find the caller
+ iseq = rb_vm_search_cf_from_ep(ec, cfp, lep)->iseq;
+ }
+
+ // Our local storage is below the args we need to copy
+ int local_size = ISEQ_BODY(iseq)->local_table_size + argc;
+
+ const VALUE * from = lep - (local_size + VM_ENV_DATA_SIZE - 1); // 2 for EP values
+ VALUE * to = cfp->sp - 1; // clobber the CI
+
+ if (RTEST(splat)) {
+ to -= 1; // clobber the splat array
+ CHECK_VM_STACK_OVERFLOW0(cfp, to, RARRAY_LEN(splat));
+ MEMCPY(to, RARRAY_CONST_PTR(splat), VALUE, RARRAY_LEN(splat));
+ to += RARRAY_LEN(splat);
+ }
+
+ CHECK_VM_STACK_OVERFLOW0(cfp, to, argc);
+ MEMCPY(to, from, VALUE, argc);
+ cfp->sp = to + argc;
+
+ // Stack layout should now be:
+ //
+ // > 1
+ // > 2
+ // > CI for foo(1, 2)
+ // > cref_or_me
+ // > specval
+ // > type
+ // > receiver
+ // > 1
+ // > 2
+ // > ( SP points here )
+}
+
static VALUE
vm_call_iseq_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
{
@@ -3150,8 +3303,31 @@ vm_call_iseq_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct r
const struct rb_callcache *cc = calling->cc;
const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
- const int param_size = ISEQ_BODY(iseq)->param.size;
- const int local_size = ISEQ_BODY(iseq)->local_table_size;
+ int param_size = ISEQ_BODY(iseq)->param.size;
+ int local_size = ISEQ_BODY(iseq)->local_table_size;
+
+ RUBY_ASSERT(!ISEQ_BODY(iseq)->param.flags.forwardable);
+
+ const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
+ return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
+}
+
+static VALUE
+vm_call_iseq_fwd_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
+{
+ RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
+
+ const struct rb_callcache *cc = calling->cc;
+ const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
+ int param_size = ISEQ_BODY(iseq)->param.size;
+ int local_size = ISEQ_BODY(iseq)->local_table_size;
+
+ RUBY_ASSERT(ISEQ_BODY(iseq)->param.flags.forwardable);
+
+ // Setting up local size and param size
+ local_size = local_size + vm_ci_argc(calling->cd->ci);
+ param_size = param_size + vm_ci_argc(calling->cd->ci);
+
const int opt_pc = vm_callee_setup_arg(ec, calling, iseq, cfp->sp - calling->argc, param_size, local_size);
return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
}
@@ -3661,7 +3837,7 @@ vm_call_cfunc_other(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, str
return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
}
else {
- CC_SET_FASTPATH(calling->cc, vm_call_cfunc_with_frame, !rb_splat_or_kwargs_p(ci) && !calling->kw_splat);
+ CC_SET_FASTPATH(calling->cc, vm_call_cfunc_with_frame, !rb_splat_or_kwargs_p(ci) && !calling->kw_splat && !(vm_ci_flag(ci) & VM_CALL_FORWARDING));
return vm_call_cfunc_with_frame(ec, reg_cfp, calling);
}
@@ -3767,7 +3943,7 @@ vm_call_attrset_direct(rb_execution_context_t *ec, rb_control_frame_t *cfp, cons
attr_index_t index = vm_cc_attr_index(cc);
shape_id_t dest_shape_id = vm_cc_attr_index_dest_shape_id(cc);
ID id = vm_cc_cme(cc)->def->body.attr.id;
- rb_check_frozen_internal(obj);
+ rb_check_frozen(obj);
VALUE res = vm_setivar(obj, id, val, dest_shape_id, index);
if (UNDEF_P(res)) {
switch (BUILTIN_TYPE(obj)) {
@@ -4062,10 +4238,23 @@ vm_call_symbol(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
}
}
- calling->cd = &(struct rb_call_data) {
- .ci = &VM_CI_ON_STACK(mid, flags, argc, vm_ci_kwarg(ci)),
- .cc = NULL,
+ struct rb_forwarding_call_data new_fcd = {
+ .cd = {
+ .ci = &VM_CI_ON_STACK(mid, flags, argc, vm_ci_kwarg(ci)),
+ .cc = NULL,
+ },
+ .caller_ci = NULL,
};
+
+ if (!(vm_ci_flag(ci) & VM_CALL_FORWARDING)) {
+ calling->cd = &new_fcd.cd;
+ }
+ else {
+ const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
+ VM_ASSERT((vm_ci_argc(caller_ci), 1));
+ new_fcd.caller_ci = caller_ci;
+ calling->cd = (struct rb_call_data *)&new_fcd;
+ }
calling->cc = &VM_CC_ON_STACK(klass,
vm_call_general,
{ .method_missing_reason = missing_reason },
@@ -4208,10 +4397,25 @@ vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_
INC_SP(1);
ec->method_missing_reason = reason;
- calling->cd = &(struct rb_call_data) {
- .ci = &VM_CI_ON_STACK(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci)),
- .cc = NULL,
+
+ struct rb_forwarding_call_data new_fcd = {
+ .cd = {
+ .ci = &VM_CI_ON_STACK(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci)),
+ .cc = NULL,
+ },
+ .caller_ci = NULL,
};
+
+ if (!(flag & VM_CALL_FORWARDING)) {
+ calling->cd = &new_fcd.cd;
+ }
+ else {
+ const struct rb_callinfo *caller_ci = ((struct rb_forwarding_call_data *)calling->cd)->caller_ci;
+ VM_ASSERT((vm_ci_argc(caller_ci), 1));
+ new_fcd.caller_ci = caller_ci;
+ calling->cd = (struct rb_call_data *)&new_fcd;
+ }
+
calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }},
rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), idMethodMissing, NULL));
return vm_call_method(ec, reg_cfp, calling);
@@ -4530,8 +4734,14 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st
switch (cme->def->type) {
case VM_METHOD_TYPE_ISEQ:
- CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
- return vm_call_iseq_setup(ec, cfp, calling);
+ if (ISEQ_BODY(def_iseq_ptr(cme->def))->param.flags.forwardable) {
+ CC_SET_FASTPATH(cc, vm_call_iseq_fwd_setup, TRUE);
+ return vm_call_iseq_fwd_setup(ec, cfp, calling);
+ }
+ else {
+ CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
+ return vm_call_iseq_setup(ec, cfp, calling);
+ }
case VM_METHOD_TYPE_NOTIMPLEMENTED:
case VM_METHOD_TYPE_CFUNC:
@@ -4543,7 +4753,7 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st
rb_check_arity(calling->argc, 1, 1);
- const unsigned int aset_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG);
+ const unsigned int aset_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG | VM_CALL_FORWARDING);
if (vm_cc_markable(cc)) {
vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
@@ -4577,7 +4787,7 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st
CALLER_SETUP_ARG(cfp, calling, ci, 0);
rb_check_arity(calling->argc, 0, 0);
vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
- const unsigned int ivar_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT);
+ const unsigned int ivar_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_FORWARDING);
VM_CALL_METHOD_ATTR(v,
vm_call_ivar(ec, cfp, calling),
CC_SET_FASTPATH(cc, vm_call_ivar, !(vm_ci_flag(ci) & ivar_mask)));
@@ -4796,13 +5006,18 @@ vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *c
ID mid = me->def->original_id;
- // update iseq. really? (TODO)
- cd->ci = vm_ci_new_runtime(mid,
- vm_ci_flag(cd->ci),
- vm_ci_argc(cd->ci),
- vm_ci_kwarg(cd->ci));
+ if (!vm_ci_markable(cd->ci)) {
+ VM_FORCE_WRITE((const VALUE *)&cd->ci->mid, (VALUE)mid);
+ }
+ else {
+ // update iseq. really? (TODO)
+ cd->ci = vm_ci_new_runtime(mid,
+ vm_ci_flag(cd->ci),
+ vm_ci_argc(cd->ci),
+ vm_ci_kwarg(cd->ci));
- RB_OBJ_WRITTEN(reg_cfp->iseq, Qundef, cd->ci);
+ RB_OBJ_WRITTEN(reg_cfp->iseq, Qundef, cd->ci);
+ }
const struct rb_callcache *cc;
@@ -5737,8 +5952,27 @@ VALUE
rb_vm_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
{
stack_check(ec);
- VALUE bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, false);
- VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
+
+ struct rb_forwarding_call_data adjusted_cd;
+ struct rb_callinfo adjusted_ci;
+
+ VALUE bh;
+ VALUE val;
+
+ if (vm_ci_flag(cd->ci) & VM_CALL_FORWARDING) {
+ bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, false, &adjusted_cd, &adjusted_ci);
+
+ val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_method);
+
+ if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
+ RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, adjusted_cd.cd.cc);
+ }
+ }
+ else {
+ bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, false);
+ val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
+ }
+
VM_EXEC(ec, val);
return val;
}
@@ -5757,8 +5991,26 @@ VALUE
rb_vm_invokesuper(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
{
stack_check(ec);
- VALUE bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, true);
- VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_super);
+ struct rb_forwarding_call_data adjusted_cd;
+ struct rb_callinfo adjusted_ci;
+
+ VALUE bh;
+ VALUE val;
+
+ if (vm_ci_flag(cd->ci) & VM_CALL_FORWARDING) {
+ bh = vm_caller_setup_fwd_args(GET_EC(), GET_CFP(), cd, blockiseq, true, &adjusted_cd, &adjusted_ci);
+
+ val = vm_sendish(ec, GET_CFP(), &adjusted_cd.cd, bh, mexp_search_super);
+
+ if (cd->cc != adjusted_cd.cd.cc && vm_cc_markable(adjusted_cd.cd.cc)) {
+ RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, adjusted_cd.cd.cc);
+ }
+ }
+ else {
+ bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, true);
+ val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_super);
+ }
+
VM_EXEC(ec, val);
return val;
}
@@ -5932,6 +6184,22 @@ rb_vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t num, const VALUE *p
return vm_opt_newarray_hash(ec, num, ptr);
}
+VALUE rb_setup_fake_ary(struct RArray *fake_ary, const VALUE *list, long len, bool freeze);
+VALUE rb_ec_pack_ary(rb_execution_context_t *ec, VALUE ary, VALUE fmt, VALUE buffer);
+
+VALUE
+rb_vm_opt_newarray_pack(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt)
+{
+ if (BASIC_OP_UNREDEFINED_P(BOP_PACK, ARRAY_REDEFINED_OP_FLAG)) {
+ struct RArray fake_ary;
+ VALUE ary = rb_setup_fake_ary(&fake_ary, ptr, num, true);
+ return rb_ec_pack_ary(ec, ary, fmt, Qnil);
+ }
+ else {
+ return rb_vm_call_with_refinements(ec, rb_ary_new4(num, ptr), idPack, 1, &fmt, RB_PASS_CALLED_KEYWORDS);
+ }
+}
+
#undef id_cmp
#define IMEMO_CONST_CACHE_SHAREABLE IMEMO_FL_USER0