diff options
author | Koichi Sasada <ko1@atdot.net> | 2021-11-18 00:43:40 +0900 |
---|---|---|
committer | Koichi Sasada <ko1@atdot.net> | 2021-11-19 08:32:39 +0900 |
commit | be71c95b88019a1ca7a030a757ce343b743d8aff (patch) | |
tree | acb9ed31e3a519ac147368dfcbbcf91374ab1b53 | |
parent | 6c64013978e77637995a0348f7e232068b9f61b4 (diff) |
`rb_method_optimized_t` for further extension
Now `rb_method_optimized_t optimized` field is added to represent
optimized method type.
Notes
Notes:
Merged: https://github.com/ruby/ruby/pull/5131
-rw-r--r-- | method.h | 10 | ||||
-rw-r--r-- | proc.c | 14 | ||||
-rw-r--r-- | vm.c | 4 | ||||
-rw-r--r-- | vm_eval.c | 4 | ||||
-rw-r--r-- | vm_insnhelper.c | 4 | ||||
-rw-r--r-- | vm_method.c | 15 |
6 files changed, 30 insertions, 21 deletions
@@ -170,6 +170,10 @@ enum method_optimized_type { OPTIMIZED_METHOD_TYPE__MAX }; +typedef struct rb_method_optimized { + enum method_optimized_type type; +} rb_method_optimized_t; + struct rb_method_definition_struct { BITFIELD(rb_method_type_t, type, VM_METHOD_TYPE_MINIMUM_BITS); unsigned int iseq_overload: 1; @@ -183,8 +187,7 @@ struct rb_method_definition_struct { rb_method_alias_t alias; rb_method_refined_t refined; rb_method_bmethod_t bmethod; - - enum method_optimized_type optimize_type; + rb_method_optimized_t optimized; } body; ID original_id; @@ -201,10 +204,11 @@ STATIC_ASSERT(sizeof_method_def, offsetof(rb_method_definition_t, body)==8); ((def)->type == VM_METHOD_TYPE_REFINED && \ UNDEFINED_METHOD_ENTRY_P((def)->body.refined.orig_me)) +void rb_add_method(VALUE klass, ID mid, rb_method_type_t type, void *option, rb_method_visibility_t visi); void rb_add_method_cfunc(VALUE klass, ID mid, VALUE (*func)(ANYARGS), int argc, rb_method_visibility_t visi); void rb_add_method_iseq(VALUE klass, ID mid, const rb_iseq_t *iseq, rb_cref_t *cref, rb_method_visibility_t visi); +void rb_add_method_optimized(VALUE klass, ID mid, enum method_optimized_type, unsigned int index, rb_method_visibility_t visi); void rb_add_refined_method_entry(VALUE refined_class, ID mid); -void rb_add_method(VALUE klass, ID mid, rb_method_type_t type, void *option, rb_method_visibility_t visi); rb_method_entry_t *rb_method_entry_set(VALUE klass, ID mid, const rb_method_entry_t *, rb_method_visibility_t noex); rb_method_entry_t *rb_method_entry_create(ID called_id, VALUE klass, rb_method_visibility_t visi, const rb_method_definition_t *def); @@ -2671,7 +2671,7 @@ rb_method_entry_min_max_arity(const rb_method_entry_t *me, int *max) *max = UNLIMITED_ARGUMENTS; return 0; case VM_METHOD_TYPE_OPTIMIZED: { - switch (def->body.optimize_type) { + switch (def->body.optimized.type) { case OPTIMIZED_METHOD_TYPE_SEND: *max = UNLIMITED_ARGUMENTS; return 0; @@ -4087,14 +4087,10 @@ Init_Proc(void) rb_undef_alloc_func(rb_cProc); rb_define_singleton_method(rb_cProc, "new", rb_proc_s_new, -1); - rb_add_method(rb_cProc, idCall, VM_METHOD_TYPE_OPTIMIZED, - (void *)OPTIMIZED_METHOD_TYPE_CALL, METHOD_VISI_PUBLIC); - rb_add_method(rb_cProc, rb_intern("[]"), VM_METHOD_TYPE_OPTIMIZED, - (void *)OPTIMIZED_METHOD_TYPE_CALL, METHOD_VISI_PUBLIC); - rb_add_method(rb_cProc, rb_intern("==="), VM_METHOD_TYPE_OPTIMIZED, - (void *)OPTIMIZED_METHOD_TYPE_CALL, METHOD_VISI_PUBLIC); - rb_add_method(rb_cProc, rb_intern("yield"), VM_METHOD_TYPE_OPTIMIZED, - (void *)OPTIMIZED_METHOD_TYPE_CALL, METHOD_VISI_PUBLIC); + rb_add_method_optimized(rb_cProc, idCall, OPTIMIZED_METHOD_TYPE_CALL, 0, METHOD_VISI_PUBLIC); + rb_add_method_optimized(rb_cProc, rb_intern("[]"), OPTIMIZED_METHOD_TYPE_CALL, 0, METHOD_VISI_PUBLIC); + rb_add_method_optimized(rb_cProc, rb_intern("==="), OPTIMIZED_METHOD_TYPE_CALL, 0, METHOD_VISI_PUBLIC); + rb_add_method_optimized(rb_cProc, rb_intern("yield"), OPTIMIZED_METHOD_TYPE_CALL, 0, METHOD_VISI_PUBLIC); #if 0 /* for RDoc */ rb_define_method(rb_cProc, "call", proc_call, -1); @@ -3764,8 +3764,8 @@ Init_VM(void) vm_init_redefined_flag(); rb_block_param_proxy = rb_obj_alloc(rb_cObject); - rb_add_method(rb_singleton_class(rb_block_param_proxy), idCall, VM_METHOD_TYPE_OPTIMIZED, - (void *)OPTIMIZED_METHOD_TYPE_BLOCK_CALL, METHOD_VISI_PUBLIC); + rb_add_method_optimized(rb_singleton_class(rb_block_param_proxy), idCall, + OPTIMIZED_METHOD_TYPE_BLOCK_CALL, 0, METHOD_VISI_PUBLIC); rb_obj_freeze(rb_block_param_proxy); rb_gc_register_mark_object(rb_block_param_proxy); @@ -263,7 +263,7 @@ vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, const argv, MISSING_NOENTRY, calling->kw_splat); } case VM_METHOD_TYPE_OPTIMIZED: - switch (vm_cc_cme(cc)->def->body.optimize_type) { + switch (vm_cc_cme(cc)->def->body.optimized.type) { case OPTIMIZED_METHOD_TYPE_SEND: ret = send_internal(calling->argc, argv, calling->recv, calling->kw_splat ? CALL_FCALL_KW : CALL_FCALL); goto success; @@ -275,7 +275,7 @@ vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, const goto success; } default: - rb_bug("vm_call0: unsupported optimized method type (%d)", vm_cc_cme(cc)->def->body.optimize_type); + rb_bug("vm_call0: unsupported optimized method type (%d)", vm_cc_cme(cc)->def->body.optimized.type); } break; case VM_METHOD_TYPE_UNDEF: diff --git a/vm_insnhelper.c b/vm_insnhelper.c index 9643bdd861..07e3cdbfaa 100644 --- a/vm_insnhelper.c +++ b/vm_insnhelper.c @@ -3597,7 +3597,7 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st return vm_call_alias(ec, cfp, calling); case VM_METHOD_TYPE_OPTIMIZED: - switch (vm_cc_cme(cc)->def->body.optimize_type) { + switch (vm_cc_cme(cc)->def->body.optimized.type) { case OPTIMIZED_METHOD_TYPE_SEND: CC_SET_FASTPATH(cc, vm_call_opt_send, TRUE); return vm_call_opt_send(ec, cfp, calling); @@ -3609,7 +3609,7 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st return vm_call_opt_block_call(ec, cfp, calling); default: rb_bug("vm_call_method: unsupported optimized method type (%d)", - vm_cc_cme(cc)->def->body.optimize_type); + vm_cc_cme(cc)->def->body.optimized.type); } case VM_METHOD_TYPE_UNDEF: diff --git a/vm_method.c b/vm_method.c index 7c5147d4d4..1034332dde 100644 --- a/vm_method.c +++ b/vm_method.c @@ -345,6 +345,15 @@ rb_add_method_cfunc(VALUE klass, ID mid, VALUE (*func)(ANYARGS), int argc, rb_me } } +void +rb_add_method_optimized(VALUE klass, ID mid, enum method_optimized_type opt_type, unsigned int index, rb_method_visibility_t visi) +{ + rb_method_optimized_t opt = { + .type = opt_type, + }; + rb_add_method(klass, mid, VM_METHOD_TYPE_OPTIMIZED, &opt, visi); +} + static void rb_method_definition_release(rb_method_definition_t *def, int complemented) { @@ -509,7 +518,7 @@ rb_method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *de setup_method_cfunc_struct(UNALIGNED_MEMBER_PTR(def, body.cfunc), rb_f_notimplement, -1); return; case VM_METHOD_TYPE_OPTIMIZED: - def->body.optimize_type = (enum method_optimized_type)(intptr_t)opts; + def->body.optimized = *(rb_method_optimized_t *)opts; return; case VM_METHOD_TYPE_REFINED: { @@ -1931,7 +1940,7 @@ rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_defini case VM_METHOD_TYPE_UNDEF: return 1; case VM_METHOD_TYPE_OPTIMIZED: - return d1->body.optimize_type == d2->body.optimize_type; + return (d1->body.optimized.type == d2->body.optimized.type); case VM_METHOD_TYPE_REFINED: case VM_METHOD_TYPE_ALIAS: break; @@ -1965,7 +1974,7 @@ rb_hash_method_definition(st_index_t hash, const rb_method_definition_t *def) case VM_METHOD_TYPE_UNDEF: return hash; case VM_METHOD_TYPE_OPTIMIZED: - return rb_hash_uint(hash, def->body.optimize_type); + return rb_hash_uint(hash, def->body.optimized.type); case VM_METHOD_TYPE_REFINED: case VM_METHOD_TYPE_ALIAS: break; /* unreachable */ |