summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Zhu <peter@peterzhu.ca>2022-03-23 15:19:48 -0400
committerPeter Zhu <peter@peterzhu.ca>2022-03-24 10:03:51 -0400
commit5f10bd634fb6ae8f74a4ea730176233b0ca96954 (patch)
tree170c1c81ea63184290c9e021cc45bffbfc3f4f41
parent04591e1be7618f64bd3bed8c53c0fcde5fcbddb8 (diff)
Add ISEQ_BODY macro
Use ISEQ_BODY macro to get the rb_iseq_constant_body of the ISeq. Using this macro will make it easier for us to change the allocation strategy of rb_iseq_constant_body when using Variable Width Allocation.
Notes
Notes: Merged: https://github.com/ruby/ruby/pull/5698
-rw-r--r--ast.c4
-rw-r--r--compile.c334
-rw-r--r--cont.c2
-rw-r--r--eval.c4
-rw-r--r--gc.c6
-rw-r--r--insns.def6
-rw-r--r--iseq.c104
-rw-r--r--iseq.h22
-rw-r--r--mjit.c62
-rw-r--r--mjit.h2
-rw-r--r--mjit_compile.c38
-rw-r--r--mjit_worker.c52
-rw-r--r--proc.c32
-rw-r--r--thread.c8
-rw-r--r--tool/mk_builtin_loader.rb2
-rw-r--r--tool/ruby_vm/views/_mjit_compile_send.erb6
-rw-r--r--vm.c80
-rw-r--r--vm_args.c86
-rw-r--r--vm_backtrace.c26
-rw-r--r--vm_core.h4
-rw-r--r--vm_dump.c24
-rw-r--r--vm_eval.c8
-rw-r--r--vm_exec.h6
-rw-r--r--vm_insnhelper.c172
-rw-r--r--vm_method.c26
-rw-r--r--yjit_codegen.c62
-rw-r--r--yjit_core.c10
-rw-r--r--yjit_iface.c14
28 files changed, 601 insertions, 601 deletions
diff --git a/ast.c b/ast.c
index 0515689a29..42d4126a5b 100644
--- a/ast.c
+++ b/ast.c
@@ -215,14 +215,14 @@ ast_s_of(rb_execution_context_t *ec, VALUE module, VALUE body, VALUE keep_script
iseq = rb_method_iseq(body);
}
if (iseq) {
- node_id = iseq->body->location.node_id;
+ node_id = ISEQ_BODY(iseq)->location.node_id;
}
}
if (!iseq) {
return Qnil;
}
- lines = iseq->body->variable.script_lines;
+ lines = ISEQ_BODY(iseq)->variable.script_lines;
VALUE path = rb_iseq_path(iseq);
int e_option = RSTRING_LEN(path) == 2 && memcmp(RSTRING_PTR(path), "-e", 2) == 0;
diff --git a/compile.c b/compile.c
index cd3f0e2a9d..67641fcfb6 100644
--- a/compile.c
+++ b/compile.c
@@ -534,11 +534,9 @@ static void
verify_call_cache(rb_iseq_t *iseq)
{
#if CPDEBUG
- // fprintf(stderr, "ci_size:%d\t", iseq->body->ci_size); rp(iseq);
-
VALUE *original = rb_iseq_original_iseq(iseq);
size_t i = 0;
- while (i < iseq->body->iseq_size) {
+ while (i < ISEQ_BODY(iseq)->iseq_size) {
VALUE insn = original[i];
const char *types = insn_op_types(insn);
@@ -556,8 +554,8 @@ verify_call_cache(rb_iseq_t *iseq)
i += insn_len(insn);
}
- for (unsigned int i=0; i<iseq->body->ci_size; i++) {
- struct rb_call_data *cd = &iseq->body->call_data[i];
+ for (unsigned int i=0; i<ISEQ_BODY(iseq)->ci_size; i++) {
+ struct rb_call_data *cd = &ISEQ_BODY(iseq)->call_data[i];
const struct rb_callinfo *ci = cd->ci;
const struct rb_callcache *cc = cd->cc;
if (cc != NULL && cc != vm_cc_empty()) {
@@ -758,7 +756,7 @@ rb_iseq_compile_node(rb_iseq_t *iseq, const NODE *node)
iseq_set_local_table(iseq, node->nd_tbl);
iseq_set_arguments(iseq, ret, node->nd_args);
- switch (iseq->body->type) {
+ switch (ISEQ_BODY(iseq)->type) {
case ISEQ_TYPE_BLOCK:
{
LABEL *start = ISEQ_COMPILE_DATA(iseq)->start_label = NEW_LABEL(0);
@@ -768,13 +766,13 @@ rb_iseq_compile_node(rb_iseq_t *iseq, const NODE *node)
end->rescued = LABEL_RESCUE_END;
ADD_TRACE(ret, RUBY_EVENT_B_CALL);
- NODE dummy_line_node = generate_dummy_line_node(FIX2INT(iseq->body->location.first_lineno), -1);
+ NODE dummy_line_node = generate_dummy_line_node(FIX2INT(ISEQ_BODY(iseq)->location.first_lineno), -1);
ADD_INSN (ret, &dummy_line_node, nop);
ADD_LABEL(ret, start);
CHECK(COMPILE(ret, "block body", node->nd_body));
ADD_LABEL(ret, end);
ADD_TRACE(ret, RUBY_EVENT_B_RETURN);
- ISEQ_COMPILE_DATA(iseq)->last_line = iseq->body->location.code_location.end_pos.lineno;
+ ISEQ_COMPILE_DATA(iseq)->last_line = ISEQ_BODY(iseq)->location.code_location.end_pos.lineno;
/* wide range catch handler must put at last */
ADD_CATCH_ENTRY(CATCH_TYPE_REDO, start, end, NULL, start);
@@ -809,7 +807,7 @@ rb_iseq_compile_node(rb_iseq_t *iseq, const NODE *node)
const char *m;
#define INVALID_ISEQ_TYPE(type) \
ISEQ_TYPE_##type: m = #type; goto invalid_iseq_type
- switch (iseq->body->type) {
+ switch (ISEQ_BODY(iseq)->type) {
case INVALID_ISEQ_TYPE(METHOD);
case INVALID_ISEQ_TYPE(CLASS);
case INVALID_ISEQ_TYPE(BLOCK);
@@ -829,7 +827,7 @@ rb_iseq_compile_node(rb_iseq_t *iseq, const NODE *node)
CHECK(COMPILE(ret, "ensure", node));
break;
default:
- COMPILE_ERROR(ERROR_ARGS "unknown scope: %d", iseq->body->type);
+ COMPILE_ERROR(ERROR_ARGS "unknown scope: %d", ISEQ_BODY(iseq)->type);
return COMPILE_NG;
invalid_iseq_type:
COMPILE_ERROR(ERROR_ARGS "compile/ISEQ_TYPE_%s should not be reached", m);
@@ -837,7 +835,7 @@ rb_iseq_compile_node(rb_iseq_t *iseq, const NODE *node)
}
}
- if (iseq->body->type == ISEQ_TYPE_RESCUE || iseq->body->type == ISEQ_TYPE_ENSURE) {
+ if (ISEQ_BODY(iseq)->type == ISEQ_TYPE_RESCUE || ISEQ_BODY(iseq)->type == ISEQ_TYPE_ENSURE) {
NODE dummy_line_node = generate_dummy_line_node(0, -1);
ADD_GETLOCAL(ret, &dummy_line_node, LVAR_ERRINFO, 0);
ADD_INSN1(ret, &dummy_line_node, throw, INT2FIX(0) /* continue throw */ );
@@ -864,10 +862,10 @@ rb_iseq_translate_threaded_code(rb_iseq_t *iseq)
#if OPT_DIRECT_THREADED_CODE || OPT_CALL_THREADED_CODE
const void * const *table = rb_vm_get_insns_address_table();
unsigned int i;
- VALUE *encoded = (VALUE *)iseq->body->iseq_encoded;
+ VALUE *encoded = (VALUE *)ISEQ_BODY(iseq)->iseq_encoded;
- for (i = 0; i < iseq->body->iseq_size; /* */ ) {
- int insn = (int)iseq->body->iseq_encoded[i];
+ for (i = 0; i < ISEQ_BODY(iseq)->iseq_size; /* */ ) {
+ int insn = (int)ISEQ_BODY(iseq)->iseq_encoded[i];
int len = insn_len(insn);
encoded[i] = (VALUE)table[insn];
i += len;
@@ -883,14 +881,14 @@ rb_iseq_original_iseq(const rb_iseq_t *iseq) /* cold path */
VALUE *original_code;
if (ISEQ_ORIGINAL_ISEQ(iseq)) return ISEQ_ORIGINAL_ISEQ(iseq);
- original_code = ISEQ_ORIGINAL_ISEQ_ALLOC(iseq, iseq->body->iseq_size);
- MEMCPY(original_code, iseq->body->iseq_encoded, VALUE, iseq->body->iseq_size);
+ original_code = ISEQ_ORIGINAL_ISEQ_ALLOC(iseq, ISEQ_BODY(iseq)->iseq_size);
+ MEMCPY(original_code, ISEQ_BODY(iseq)->iseq_encoded, VALUE, ISEQ_BODY(iseq)->iseq_size);
#if OPT_DIRECT_THREADED_CODE || OPT_CALL_THREADED_CODE
{
unsigned int i;
- for (i = 0; i < iseq->body->iseq_size; /* */ ) {
+ for (i = 0; i < ISEQ_BODY(iseq)->iseq_size; /* */ ) {
const void *addr = (const void *)original_code[i];
const int insn = rb_vm_insn_addr2insn(addr);
@@ -1298,8 +1296,7 @@ new_callinfo(rb_iseq_t *iseq, ID mid, int argc, unsigned int flag, struct rb_cal
argc += kw_arg->keyword_len;
}
- // fprintf(stderr, "[%d] id:%s\t", (int)iseq->body->ci_size, rb_id2name(mid)); rp(iseq);
- iseq->body->ci_size++;
+ ISEQ_BODY(iseq)->ci_size++;
const struct rb_callinfo *ci = vm_ci_new(mid, flag, argc, kw_arg);
RB_OBJ_WRITTEN(iseq, Qundef, ci);
return ci;
@@ -1330,7 +1327,7 @@ new_child_iseq(rb_iseq_t *iseq, const NODE *const node,
ast.root = node;
ast.compile_option = 0;
- ast.script_lines = iseq->body->variable.script_lines;
+ ast.script_lines = ISEQ_BODY(iseq)->variable.script_lines;
debugs("[new_child_iseq]> ---------------------------------------\n");
int isolated_depth = ISEQ_COMPILE_DATA(iseq)->isolated_depth;
@@ -1362,7 +1359,7 @@ set_catch_except_p(struct rb_iseq_constant_body *body)
{
body->catch_except_p = TRUE;
if (body->parent_iseq != NULL) {
- set_catch_except_p(body->parent_iseq->body);
+ set_catch_except_p(ISEQ_BODY(body->parent_iseq));
}
}
@@ -1499,16 +1496,16 @@ iseq_setup(rb_iseq_t *iseq, LINK_ANCHOR *const anchor)
if (!rb_iseq_translate_threaded_code(iseq)) return COMPILE_NG;
debugs("[compile step 6 (update_catch_except_flags)] \n");
- update_catch_except_flags(iseq->body);
+ update_catch_except_flags(ISEQ_BODY(iseq));
debugs("[compile step 6.1 (remove unused catch tables)] \n");
- if (!iseq->body->catch_except_p && iseq->body->catch_table) {
- xfree(iseq->body->catch_table);
- iseq->body->catch_table = NULL;
+ if (!ISEQ_BODY(iseq)->catch_except_p && ISEQ_BODY(iseq)->catch_table) {
+ xfree(ISEQ_BODY(iseq)->catch_table);
+ ISEQ_BODY(iseq)->catch_table = NULL;
}
#if VM_INSN_INFO_TABLE_IMPL == 2
- if (iseq->body->insns_info.succ_index_table == NULL) {
+ if (ISEQ_BODY(iseq)->insns_info.succ_index_table == NULL) {
debugs("[compile step 7 (rb_iseq_insns_info_encode_positions)] \n");
rb_iseq_insns_info_encode_positions(iseq);
}
@@ -1527,8 +1524,8 @@ iseq_setup(rb_iseq_t *iseq, LINK_ANCHOR *const anchor)
static int
iseq_set_exception_local_table(rb_iseq_t *iseq)
{
- iseq->body->local_table_size = numberof(rb_iseq_shared_exc_local_tbl);
- iseq->body->local_table = rb_iseq_shared_exc_local_tbl;
+ ISEQ_BODY(iseq)->local_table_size = numberof(rb_iseq_shared_exc_local_tbl);
+ ISEQ_BODY(iseq)->local_table = rb_iseq_shared_exc_local_tbl;
return COMPILE_OK;
}
@@ -1536,9 +1533,9 @@ static int
get_lvar_level(const rb_iseq_t *iseq)
{
int lev = 0;
- while (iseq != iseq->body->local_iseq) {
+ while (iseq != ISEQ_BODY(iseq)->local_iseq) {
lev++;
- iseq = iseq->body->parent_iseq;
+ iseq = ISEQ_BODY(iseq)->parent_iseq;
}
return lev;
}
@@ -1548,8 +1545,8 @@ get_dyna_var_idx_at_raw(const rb_iseq_t *iseq, ID id)
{
unsigned int i;
- for (i = 0; i < iseq->body->local_table_size; i++) {
- if (iseq->body->local_table[i] == id) {
+ for (i = 0; i < ISEQ_BODY(iseq)->local_table_size; i++) {
+ if (ISEQ_BODY(iseq)->local_table[i] == id) {
return (int)i;
}
}
@@ -1559,7 +1556,7 @@ get_dyna_var_idx_at_raw(const rb_iseq_t *iseq, ID id)
static int
get_local_var_idx(const rb_iseq_t *iseq, ID id)
{
- int idx = get_dyna_var_idx_at_raw(iseq->body->local_iseq, id);
+ int idx = get_dyna_var_idx_at_raw(ISEQ_BODY(iseq)->local_iseq, id);
if (idx < 0) {
COMPILE_ERROR(iseq, ISEQ_LAST_LINE(iseq),
@@ -1580,7 +1577,7 @@ get_dyna_var_idx(const rb_iseq_t *iseq, ID id, int *level, int *ls)
if (idx >= 0) {
break;
}
- iseq = iseq->body->parent_iseq;
+ iseq = ISEQ_BODY(iseq)->parent_iseq;
lv++;
}
@@ -1590,7 +1587,7 @@ get_dyna_var_idx(const rb_iseq_t *iseq, ID id, int *level, int *ls)
}
*level = lv;
- *ls = iseq->body->local_table_size;
+ *ls = ISEQ_BODY(iseq)->local_table_size;
return idx;
}
@@ -1599,10 +1596,10 @@ iseq_local_block_param_p(const rb_iseq_t *iseq, unsigned int idx, unsigned int l
{
const struct rb_iseq_constant_body *body;
while (level > 0) {
- iseq = iseq->body->parent_iseq;
+ iseq = ISEQ_BODY(iseq)->parent_iseq;
level--;
}
- body = iseq->body;
+ body = ISEQ_BODY(iseq);
if (body->local_iseq == iseq && /* local variables */
body->param.flags.has_block &&
body->local_table_size - body->param.block_start == idx) {
@@ -1644,22 +1641,22 @@ access_outer_variables(const rb_iseq_t *iseq, int level, ID id, bool write)
for (int i=0; i<level; i++) {
VALUE val;
- struct rb_id_table *ovs = iseq->body->outer_variables;
+ struct rb_id_table *ovs = ISEQ_BODY(iseq)->outer_variables;
if (!ovs) {
- ovs = iseq->body->outer_variables = rb_id_table_create(8);
+ ovs = ISEQ_BODY(iseq)->outer_variables = rb_id_table_create(8);
}
- if (rb_id_table_lookup(iseq->body->outer_variables, id, &val)) {
+ if (rb_id_table_lookup(ISEQ_BODY(iseq)->outer_variables, id, &val)) {
if (write && !val) {
- rb_id_table_insert(iseq->body->outer_variables, id, Qtrue);
+ rb_id_table_insert(ISEQ_BODY(iseq)->outer_variables, id, Qtrue);
}
}
else {
- rb_id_table_insert(iseq->body->outer_variables, id, RBOOL(write));
+ rb_id_table_insert(ISEQ_BODY(iseq)->outer_variables, id, RBOOL(write));
}
- iseq = iseq->body->parent_iseq;
+ iseq = ISEQ_BODY(iseq)->parent_iseq;
}
}
@@ -1667,10 +1664,10 @@ static ID
iseq_lvar_id(const rb_iseq_t *iseq, int idx, int level)
{
for (int i=0; i<level; i++) {
- iseq = iseq->body->parent_iseq;
+ iseq = ISEQ_BODY(iseq)->parent_iseq;
}
- ID id = iseq->body->local_table[iseq->body->local_table_size - idx];
+ ID id = ISEQ_BODY(iseq)->local_table[ISEQ_BODY(iseq)->local_table_size - idx];
// fprintf(stderr, "idx:%d level:%d ID:%s\n", idx, level, rb_id2name(id));
return id;
}
@@ -1704,7 +1701,7 @@ iseq_add_setlocal(rb_iseq_t *iseq, LINK_ANCHOR *const seq, const NODE *const lin
static void
iseq_calc_param_size(rb_iseq_t *iseq)
{
- struct rb_iseq_constant_body *const body = iseq->body;
+ struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
if (body->param.flags.has_opt ||
body->param.flags.has_post ||
body->param.flags.has_rest ||
@@ -1744,7 +1741,7 @@ iseq_set_arguments_keywords(rb_iseq_t *iseq, LINK_ANCHOR *const optargs,
const struct rb_args_info *args, int arg_size)
{
const NODE *node = args->kw_args;
- struct rb_iseq_constant_body *const body = iseq->body;
+ struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
struct rb_iseq_param_keyword *keyword;
const VALUE default_values = rb_ary_tmp_new(1);
const VALUE complex_mark = rb_str_tmp_new(0);
@@ -1826,7 +1823,7 @@ iseq_set_arguments(rb_iseq_t *iseq, LINK_ANCHOR *const optargs, const NODE *cons
debugs("iseq_set_arguments: %s\n", node_args ? "" : "0");
if (node_args) {
- struct rb_iseq_constant_body *const body = iseq->body;
+ struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
struct rb_args_info *args = node_args->nd_ainfo;
ID rest_id = 0;
int last_comma = 0;
@@ -1953,11 +1950,11 @@ iseq_set_local_table(rb_iseq_t *iseq, const rb_ast_id_table_t *tbl)
if (size > 0) {
ID *ids = (ID *)ALLOC_N(ID, size);
MEMCPY(ids, tbl->ids, ID, size);
- iseq->body->local_table = ids;
+ ISEQ_BODY(iseq)->local_table = ids;
}
- iseq->body->local_table_size = size;
+ ISEQ_BODY(iseq)->local_table_size = size;
- debugs("iseq_set_local_table: %u\n", iseq->body->local_table_size);
+ debugs("iseq_set_local_table: %u\n", ISEQ_BODY(iseq)->local_table_size);
return COMPILE_OK;
}
@@ -2072,7 +2069,7 @@ get_ivar_ic_value(rb_iseq_t *iseq,ID id)
tbl = rb_id_table_create(1);
ISEQ_COMPILE_DATA(iseq)->ivar_cache_table = tbl;
}
- val = INT2FIX(iseq->body->is_size++);
+ val = INT2FIX(ISEQ_BODY(iseq)->is_size++);
rb_id_table_insert(tbl,id,val);
return val;
}
@@ -2247,7 +2244,7 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *const anchor)
{
VALUE iseqv = (VALUE)iseq;
struct iseq_insn_info_entry *insns_info;
- struct rb_iseq_constant_body *const body = iseq->body;
+ struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
unsigned int *positions;
LINK_ELEMENT *list;
VALUE *generated_iseq;
@@ -2543,7 +2540,7 @@ iseq_set_exception_table(rb_iseq_t *iseq)
unsigned int tlen, i;
struct iseq_catch_table_entry *entry;
- iseq->body->catch_table = NULL;
+ ISEQ_BODY(iseq)->catch_table = NULL;
if (NIL_P(ISEQ_COMPILE_DATA(iseq)->catch_table_ary)) return COMPILE_OK;
tlen = (int)RARRAY_LEN(ISEQ_COMPILE_DATA(iseq)->catch_table_ary);
tptr = RARRAY_CONST_PTR_TRANSIENT(ISEQ_COMPILE_DATA(iseq)->catch_table_ary);
@@ -2578,7 +2575,7 @@ iseq_set_exception_table(rb_iseq_t *iseq)
entry->cont = 0;
}
}
- iseq->body->catch_table = table;
+ ISEQ_BODY(iseq)->catch_table = table;
RB_OBJ_WRITE(iseq, &ISEQ_COMPILE_DATA(iseq)->catch_table_ary, 0); /* free */
}
@@ -2598,10 +2595,10 @@ static int
iseq_set_optargs_table(rb_iseq_t *iseq)
{
int i;
- VALUE *opt_table = (VALUE *)iseq->body->param.opt_table;
+ VALUE *opt_table = (VALUE *)ISEQ_BODY(iseq)->param.opt_table;
- if (iseq->body->param.flags.has_opt) {
- for (i = 0; i < iseq->body->param.opt_num + 1; i++) {
+ if (ISEQ_BODY(iseq)->param.flags.has_opt) {
+ for (i = 0; i < ISEQ_BODY(iseq)->param.opt_num + 1; i++) {
opt_table[i] = label_get_position((LABEL *)opt_table[i]);
}
}
@@ -2745,7 +2742,7 @@ remove_unreachable_chunk(rb_iseq_t *iseq, LINK_ELEMENT *i)
i = first;
do {
if (IS_INSN(i)) {
- struct rb_iseq_constant_body *body = iseq->body;
+ struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
VALUE insn = INSN_OF(i);
int pos, len = insn_len(insn);
for (pos = 0; pos < len; ++pos) {
@@ -3555,7 +3552,7 @@ iseq_specialized_instruction(rb_iseq_t *iseq, INSN *iobj)
static inline int
tailcallable_p(rb_iseq_t *iseq)
{
- switch (iseq->body->type) {
+ switch (ISEQ_BODY(iseq)->type) {
case ISEQ_TYPE_TOP:
case ISEQ_TYPE_EVAL:
case ISEQ_TYPE_MAIN:
@@ -3585,7 +3582,7 @@ iseq_optimize(rb_iseq_t *iseq, LINK_ANCHOR *const anchor)
int do_block_optimization = 0;
- if (iseq->body->type == ISEQ_TYPE_BLOCK && !iseq->body->catch_except_p) {
+ if (ISEQ_BODY(iseq)->type == ISEQ_TYPE_BLOCK && !ISEQ_BODY(iseq)->catch_except_p) {
do_block_optimization = 1;
}
@@ -3997,7 +3994,7 @@ compile_flip_flop(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const nod
{
const int line = nd_line(node);
LABEL *lend = NEW_LABEL(line);
- rb_num_t cnt = ISEQ_FLIP_CNT_INCREMENT(iseq->body->local_iseq)
+ rb_num_t cnt = ISEQ_FLIP_CNT_INCREMENT(ISEQ_BODY(iseq)->local_iseq)
+ VM_SVAR_FLIPFLOP_START;
VALUE key = INT2FIX(cnt);
@@ -5396,9 +5393,9 @@ defined_expr(rb_iseq_t *iseq, LINK_ANCHOR *const ret,
struct rb_iseq_new_with_callback_callback_func *ifunc =
rb_iseq_new_with_callback_new_callback(build_defined_rescue_iseq, NULL);
rescue = new_child_iseq_with_callback(iseq, ifunc,
- rb_str_concat(rb_str_new2("defined guard in "),
- iseq->body->location.label),
- iseq, ISEQ_TYPE_RESCUE, 0);
+ rb_str_concat(rb_str_new2("defined guard in "),
+ ISEQ_BODY(iseq)->location.label),
+ iseq, ISEQ_TYPE_RESCUE, 0);
lstart->rescued = LABEL_RESCUE_BEG;
lend->rescued = LABEL_RESCUE_END;
APPEND_LABEL(ret, lcur, lstart);
@@ -5443,20 +5440,20 @@ make_name_for_block(const rb_iseq_t *orig_iseq)
int level = 1;
const rb_iseq_t *iseq = orig_iseq;
- if (orig_iseq->body->parent_iseq != 0) {
- while (orig_iseq->body->local_iseq != iseq) {
- if (iseq->body->type == ISEQ_TYPE_BLOCK) {
+ if (ISEQ_BODY(orig_iseq)->parent_iseq != 0) {
+ while (ISEQ_BODY(orig_iseq)->local_iseq != iseq) {
+ if (ISEQ_BODY(iseq)->type == ISEQ_TYPE_BLOCK) {
level++;
}
- iseq = iseq->body->parent_iseq;
+ iseq = ISEQ_BODY(iseq)->parent_iseq;
}
}
if (level == 1) {
- return rb_sprintf("block in %"PRIsVALUE, iseq->body->location.label);
+ return rb_sprintf("block in %"PRIsVALUE, ISEQ_BODY(iseq)->location.label);
}
else {
- return rb_sprintf("block (%d levels) in %"PRIsVALUE, level, iseq->body->location.label);
+ return rb_sprintf("block (%d levels) in %"PRIsVALUE, level, ISEQ_BODY(iseq)->location.label);
}
}
@@ -5652,7 +5649,7 @@ build_postexe_iseq(rb_iseq_t *iseq, LINK_ANCHOR *ret, const void *ptr)
const NODE *body = ptr;
int line = nd_line(body);
VALUE argc = INT2FIX(0);
- const rb_iseq_t *block = NEW_CHILD_ISEQ(body, make_name_for_block(iseq->body->parent_iseq), ISEQ_TYPE_BLOCK, line);
+ const rb_iseq_t *block = NEW_CHILD_ISEQ(body, make_name_for_block(ISEQ_BODY(iseq)->parent_iseq), ISEQ_TYPE_BLOCK, line);
ADD_INSN1(ret, body, putspecialobject, INT2FIX(VM_SPECIAL_OBJECT_VMCORE));
ADD_CALL_WITH_BLOCK(ret, body, id_core_set_postexe, argc, block);
@@ -5737,7 +5734,7 @@ optimizable_range_item_p(const NODE *n)
static int
compile_if(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const node, int popped, const enum node_type type)
{
- struct rb_iseq_constant_body *const body = iseq->body;
+ struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
const NODE *const node_body = type == NODE_IF ? node->nd_body : node->nd_else;
const NODE *const node_else = type == NODE_IF ? node->nd_else : node->nd_body;
@@ -6607,9 +6604,9 @@ iseq_compile_pattern_each(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *c
ADD_INSNL(ret, line_node, jump, unmatched);
break;
case NODE_LASGN: {
- struct rb_iseq_constant_body *const body = iseq->body;
+ struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
ID id = node->nd_vid;
- int idx = body->local_iseq->body->local_table_size - get_local_var_idx(iseq, id);
+ int idx = ISEQ_BODY(body->local_iseq)->local_table_size - get_local_var_idx(iseq, id);
if (in_alt_pattern) {
const char *name = rb_id2name(id);
@@ -7298,15 +7295,15 @@ compile_break(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const node, i
if (ISEQ_COMPILE_DATA(ip)->redo_label != 0) {
throw_flag = VM_THROW_NO_ESCAPE_FLAG;
}
- else if (ip->body->type == ISEQ_TYPE_BLOCK) {
+ else if (ISEQ_BODY(ip)->type == ISEQ_TYPE_BLOCK) {
throw_flag = 0;
}
- else if (ip->body->type == ISEQ_TYPE_EVAL) {
+ else if (ISEQ_BODY(ip)->type == ISEQ_TYPE_EVAL) {
COMPILE_ERROR(ERROR_ARGS "Can't escape from eval with break");
return COMPILE_NG;
}
else {
- ip = ip->body->parent_iseq;
+ ip = ISEQ_BODY(ip)->parent_iseq;
continue;
}
@@ -7372,15 +7369,15 @@ compile_next(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const node, in
/* while loop */
break;
}
- else if (ip->body->type == ISEQ_TYPE_BLOCK) {
+ else if (ISEQ_BODY(ip)->type == ISEQ_TYPE_BLOCK) {
break;
}
- else if (ip->body->type == ISEQ_TYPE_EVAL) {
+ else if (ISEQ_BODY(ip)->type == ISEQ_TYPE_EVAL) {
COMPILE_ERROR(ERROR_ARGS "Can't escape from eval with next");
return COMPILE_NG;
}
- ip = ip->body->parent_iseq;
+ ip = ISEQ_BODY(ip)->parent_iseq;
}
if (ip != 0) {
CHECK(COMPILE(ret, "next val", node->nd_stts));
@@ -7415,7 +7412,7 @@ compile_redo(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const node, in
ADD_INSN(ret, line_node, putnil);
}
}
- else if (iseq->body->type != ISEQ_TYPE_EVAL && ISEQ_COMPILE_DATA(iseq)->start_label && can_add_ensure_iseq(iseq)) {
+ else if (ISEQ_BODY(iseq)->type != ISEQ_TYPE_EVAL && ISEQ_COMPILE_DATA(iseq)->start_label && can_add_ensure_iseq(iseq)) {
LABEL *splabel = NEW_LABEL(0);
debugs("redo in block");
@@ -7441,15 +7438,15 @@ compile_redo(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const node, in
if (ISEQ_COMPILE_DATA(ip)->redo_label != 0) {
break;
}
- else if (ip->body->type == ISEQ_TYPE_BLOCK) {
+ else if (ISEQ_BODY(ip)->type == ISEQ_TYPE_BLOCK) {
break;
}
- else if (ip->body->type == ISEQ_TYPE_EVAL) {
+ else if (ISEQ_BODY(ip)->type == ISEQ_TYPE_EVAL) {
COMPILE_ERROR(ERROR_ARGS "Can't escape from eval with redo");
return COMPILE_NG;
}
- ip = ip->body->parent_iseq;
+ ip = ISEQ_BODY(ip)->parent_iseq;
}
if (ip != 0) {
ADD_INSN(ret, line_node, putnil);
@@ -7472,7 +7469,7 @@ compile_retry(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const node, i
{
const NODE *line_node = node;
- if (iseq->body->type == ISEQ_TYPE_RESCUE) {
+ if (ISEQ_BODY(iseq)->type == ISEQ_TYPE_RESCUE) {
ADD_INSN(ret, line_node, putnil);
ADD_INSN1(ret, line_node, throw, INT2FIX(TAG_RETRY));
@@ -7496,8 +7493,9 @@ compile_rescue(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const node,
LABEL *lend = NEW_LABEL(line);
LABEL *lcont = NEW_LABEL(line);
const rb_iseq_t *rescue = NEW_CHILD_ISEQ(node->nd_resq,
- rb_str_concat(rb_str_new2("rescue in "), iseq->body->location.label),
- ISEQ_TYPE_RESCUE, line);
+ rb_str_concat(rb_str_new2("rescue in "),
+ ISEQ_BODY(iseq)->location.label),
+ ISEQ_TYPE_RESCUE, line);
lstart->rescued = LABEL_RESCUE_BEG;
lend->rescued = LABEL_RESCUE_END;
@@ -7591,7 +7589,7 @@ compile_ensure(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const node,
const NODE *line_node = node;
DECL_ANCHOR(ensr);
const rb_iseq_t *ensure = NEW_CHILD_ISEQ(node->nd_ensr,
- rb_str_concat(rb_str_new2 ("ensure in "), iseq->body->location.label),
+ rb_str_concat(rb_str_new2 ("ensure in "), ISEQ_BODY(iseq)->location.label),
ISEQ_TYPE_ENSURE, line);
LABEL *lstart = NEW_LABEL(line);
LABEL *lend = NEW_LABEL(line);
@@ -7639,15 +7637,15 @@ compile_return(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const node,
const NODE *line_node = node;
if (iseq) {
- enum iseq_type type = iseq->body->type;
+ enum iseq_type type = ISEQ_BODY(iseq)->type;
const rb_iseq_t *is = iseq;
enum iseq_type t = type;
const NODE *retval = node->nd_stts;
LABEL *splabel = 0;
while (t == ISEQ_TYPE_RESCUE || t == ISEQ_TYPE_ENSURE) {
- if (!(is = is->body->parent_iseq)) break;
- t = is->body->type;
+ if (!(is = ISEQ_BODY(is)->parent_iseq)) break;
+ t = ISEQ_BODY(is)->type;
}
switch (t) {
case ISEQ_TYPE_TOP:
@@ -7713,7 +7711,7 @@ compile_evstr(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const node, i
static void
compile_lvar(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *line_node, ID id)
{
- int idx = iseq->body->local_iseq->body->local_table_size - get_local_var_idx(iseq, id);
+ int idx = ISEQ_BODY(ISEQ_BODY(iseq)->local_iseq)->local_table_size - get_local_var_idx(iseq, id);
debugs("id: %s idx: %d\n", rb_id2name(id), idx);
ADD_GETLOCAL(ret, line_node, idx, get_lvar_level(iseq));
@@ -7856,13 +7854,13 @@ delegate_call_p(const rb_iseq_t *iseq, unsigned int argc, const LINK_ANCHOR *arg
*pstart_index = 0;
return TRUE;
}
- else if (argc <= iseq->body->local_table_size) {
+ else if (argc <= ISEQ_BODY(iseq)->local_table_size) {
unsigned int start=0;
// local_table: [p1, p2, p3, l1, l2, l3]
// arguments: [p3, l1, l2] -> 2
for (start = 0;
- argc + start <= iseq->body->local_table_size;
+ argc + start <= ISEQ_BODY(iseq)->local_table_size;
start++) {
const LINK_ELEMENT *elem = FIRST_ELEMENT(args);
@@ -7873,12 +7871,12 @@ delegate_call_p(const rb_iseq_t *iseq, unsigned int argc, const LINK_ANCHOR *arg
int local_level = FIX2INT(OPERAND_AT(elem, 1));
if (local_level == 0) {
- unsigned int index = iseq->body->local_table_size - (local_index - VM_ENV_DATA_SIZE + 1);
+ unsigned int index = ISEQ_BODY(iseq)->local_table_size - (local_index - VM_ENV_DATA_SIZE + 1);
if (0) { // for debug
fprintf(stderr, "lvar:%s (%d), id:%s (%d) local_index:%d, local_size:%d\n",
- rb_id2name(iseq->body->local_table[i]), i,
- rb_id2name(iseq->body->local_table[index]), index,
- local_index, (int)iseq->body->local_table_size);
+ rb_id2name(ISEQ_BODY(iseq)->local_table[i]), i,
+ rb_id2name(ISEQ_BODY(iseq)->local_table[index]), index,
+ local_index, (int)ISEQ_BODY(iseq)->local_table_size);
}
if (i == index) {
elem = elem->next;
@@ -7956,14 +7954,14 @@ compile_builtin_mandatory_only_method(rb_iseq_t *iseq, const NODE *node, const N
{
// arguments
struct rb_args_info args = {
- .pre_args_num = iseq->body->param.lead_num,
+ .pre_args_num = ISEQ_BODY(iseq)->param.lead_num,
};
NODE args_node;
rb_node_init(&args_node, NODE_ARGS, 0, 0, (VALUE)&args);
// local table without non-mandatory parameters
- const int skip_local_size = iseq->body->param.size - iseq->body->param.lead_num;
- const int table_size = iseq->body->local_table_size - skip_local_size;
+ const int skip_local_size = ISEQ_BODY(iseq)->param.size - ISEQ_BODY(iseq)->param.lead_num;
+ const int table_size = ISEQ_BODY(iseq)->local_table_size - skip_local_size;
VALUE idtmp = 0;
rb_ast_id_table_t *tbl = ALLOCV(idtmp, sizeof(rb_ast_id_table_t) + table_size * sizeof(ID));
@@ -7972,12 +7970,12 @@ compile_builtin_mandatory_only_method(rb_iseq_t *iseq, const NODE *node, const N
int i;
// lead parameters
- for (i=0; i<iseq->body->param.lead_num; i++) {
- tbl->ids[i] = iseq->body->local_table[i];
+ for (i=0; i<ISEQ_BODY(iseq)->param.lead_num; i++) {
+ tbl->ids[i] = ISEQ_BODY(iseq)->local_table[i];
}
// local variables
for (; i<table_size; i++) {
- tbl->ids[i] = iseq->body->local_table[i + skip_local_size];
+ tbl->ids[i] = ISEQ_BODY(iseq)->local_table[i + skip_local_size];
}
NODE scope_node;
@@ -7986,12 +7984,12 @@ compile_builtin_mandatory_only_method(rb_iseq_t *iseq, const NODE *node, const N
rb_ast_body_t ast = {
.root = &scope_node,
.compile_option = 0,
- .script_lines = iseq->body->variable.script_lines,
+ .script_lines = ISEQ_BODY(iseq)->variable.script_lines,
};
int prev_inline_index = GET_VM()->builtin_inline_index;
- iseq->body->mandatory_only_iseq =
+ ISEQ_BODY(iseq)->mandatory_only_iseq =
rb_iseq_new_with_opt(&ast, rb_iseq_base_label(iseq),
rb_iseq_path(iseq), rb_iseq_realpath(iseq),
INT2FIX(nd_line(line_node)), NULL, 0,
@@ -8034,7 +8032,7 @@ compile_builtin_function_call(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NOD
}
else if (strcmp("attr!", builtin_func) == 0) {
// There's only "inline" attribute for now
- iseq->body->builtin_inline_p = true;
+ ISEQ_BODY(iseq)->builtin_inline_p = true;
return COMPILE_OK;
}
else if (strcmp("arg!", builtin_func) == 0) {
@@ -8618,7 +8616,7 @@ compile_op_log(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const node,
static int
compile_super(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const node, int popped, const enum node_type type)
{
- struct rb_iseq_constant_body *const body = iseq->body;
+ struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
DECL_ANCHOR(args);
int argc;
unsigned int flag = 0;
@@ -8636,7 +8634,7 @@ compile_super(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const node, i
/* NODE_ZSUPER */
int i;
const rb_iseq_t *liseq = body->local_iseq;
- const struct rb_iseq_constant_body *const local_body = liseq->body;
+ const struct rb_iseq_constant_body *const local_body = ISEQ_BODY(liseq);
const struct rb_iseq_param_keyword *const local_kwd = local_body->param.keyword;
int lvar_level = get_lvar_level(iseq);
@@ -8763,7 +8761,7 @@ compile_yield(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const node, i
INIT_ANCHOR(args);
- switch (iseq->body->local_iseq->body->type) {
+ switch (ISEQ_BODY(ISEQ_BODY(iseq)->local_iseq)->type) {
case ISEQ_TYPE_TOP:
case ISEQ_TYPE_MAIN:
case ISEQ_TYPE_CLASS:
@@ -8789,8 +8787,8 @@ compile_yield(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const node, i
int level = 0;
const rb_iseq_t *tmp_iseq = iseq;
- for (; tmp_iseq != iseq->body->local_iseq; level++ ) {
- tmp_iseq = tmp_iseq->body->parent_iseq;
+ for (; tmp_iseq != ISEQ_BODY(iseq)->local_iseq; level++ ) {
+ tmp_iseq = ISEQ_BODY(tmp_iseq)->parent_iseq;
}
if (level > 0) access_outer_variables(iseq, level, rb_intern("yield"), true);
@@ -8842,7 +8840,7 @@ compile_colon2(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const node,
if (rb_is_const_id(node->nd_mid)) {
/* constant */
LABEL *lend = NEW_LABEL(line);
- int ic_index = iseq->body->is_size++;
+ int ic_index = ISEQ_BODY(iseq)->is_size++;
DECL_ANCHOR(pref);
DECL_ANCHOR(body);
@@ -8887,7 +8885,7 @@ compile_colon3(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const node,
{
const int line = nd_line(node);
LABEL *lend = NEW_LABEL(line);
- int ic_index = iseq->body->is_size++;
+ int ic_index = ISEQ_BODY(iseq)->is_size++;
debugi("colon3#nd_mid", node->nd_mid);
@@ -8942,17 +8940,17 @@ static int
compile_errinfo(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const node, int popped)
{
if (!popped) {
- if (iseq->body->type == ISEQ_TYPE_RESCUE) {
+ if (ISEQ_BODY(iseq)->type == ISEQ_TYPE_RESCUE) {
ADD_GETLOCAL(ret, node, LVAR_ERRINFO, 0);
}
else {
const rb_iseq_t *ip = iseq;
int level = 0;
while (ip) {
- if (ip->body->type == ISEQ_TYPE_RESCUE) {
+ if (ISEQ_BODY(ip)->type == ISEQ_TYPE_RESCUE) {
break;
}
- ip = ip->body->parent_iseq;
+ ip = ISEQ_BODY(ip)->parent_iseq;
level++;
}
if (ip) {
@@ -8969,7 +8967,7 @@ compile_errinfo(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const node,
static int
compile_kw_arg(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const node, int popped)
{
- struct rb_iseq_constant_body *const body = iseq->body;
+ struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
LABEL *end_label = NEW_LABEL(nd_line(node));
const NODE *default_value = node->nd_body->nd_value;
@@ -9117,7 +9115,7 @@ iseq_compile_each0(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const no
{
const int line = (int)nd_line(node);
const enum node_type type = nd_type(node);
- struct rb_iseq_constant_body *const body = iseq->body;
+ struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
if (ISEQ_COMPILE_DATA(iseq)->last_line == line) {
/* ignore */
@@ -9219,7 +9217,7 @@ iseq_compile_each0(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const no
case NODE_LASGN:{
ID id = node->nd_vid;
- int idx = body->local_iseq->body->local_table_size - get_local_var_idx(iseq, id);
+ int idx = ISEQ_BODY(body->local_iseq)->local_table_size - get_local_var_idx(iseq, id);
debugs("lvar: %s idx: %d\n", rb_id2name(id), idx);
CHECK(COMPILE(ret, "rvalue", node->nd_value));
@@ -10297,9 +10295,9 @@ iseq_build_from_ary_body(rb_iseq_t *iseq, LINK_ANCHOR *const anchor,
case TS_IVC: /* inline ivar cache */
case TS_ICVARC: /* inline cvar cache */
argv[j] = op;
- if (NUM2UINT(op) >= iseq->body->is_size) {
- iseq->body->is_size = NUM2INT(op) + 1;
- }
+ if (NUM2UINT(op) >= ISEQ_BODY(iseq)->is_size) {
+ ISEQ_BODY(iseq)->is_size = NUM2INT(op) + 1;
+ }
FL_SET((VALUE)iseq, ISEQ_MARKABLE_ISEQ);
break;
case TS_CALLDATA:
@@ -10388,13 +10386,13 @@ iseq_build_kw(rb_iseq_t *iseq, VALUE params, VALUE keywords)
ID *ids;
struct rb_iseq_param_keyword *keyword = ZALLOC(struct rb_iseq_param_keyword);
- iseq->body->param.flags.has_kw = TRUE;
+ ISEQ_BODY(iseq)->param.flags.has_kw = TRUE;
keyword->num = len;
#define SYM(s) ID2SYM(rb_intern_const(#s))
(void)int_param(&keyword->bits_start, params, SYM(kwbits));
i = keyword->bits_start - keyword->num;
- ids = (ID *)&iseq->body->local_table[i];
+ ids = (ID *)&ISEQ_BODY(iseq)->local_table[i];
#undef SYM
/* required args */
@@ -10517,8 +10515,8 @@ rb_iseq_build_from_ary(rb_iseq_t *iseq, VALUE misc, VALUE locals, VALUE params,
INIT_ANCHOR(anchor);
len = RARRAY_LENINT(locals);
- iseq->body->local_table_size = len;
- iseq->body->local_table = tbl = len > 0 ? (ID *)ALLOC_N(ID, iseq->body->local_table_size) : NULL;
+ ISEQ_BODY(iseq)->local_table_size = len;
+ ISEQ_BODY(iseq)->local_table = tbl = len > 0 ? (ID *)ALLOC_N(ID, ISEQ_BODY(iseq)->local_table_size) : NULL;
for (i = 0; i < len; i++) {
VALUE lv = RARRAY_AREF(locals, i);
@@ -10531,14 +10529,14 @@ rb_iseq_build_from_ary(rb_iseq_t *iseq, VALUE misc, VALUE locals, VALUE params,
}
}
-#define INT_PARAM(F) int_param(&iseq->body->param.F, params, SYM(F))
+#define INT_PARAM(F) int_param(&ISEQ_BODY(iseq)->param.F, params, SYM(F))
if (INT_PARAM(lead_num)) {
- iseq->body->param.flags.has_lead = TRUE;
+ ISEQ_BODY(iseq)->param.flags.has_lead = TRUE;
}
- if (INT_PARAM(post_num)) iseq->body->param.flags.has_post = TRUE;
- if (INT_PARAM(post_start)) iseq->body->param.flags.has_post = TRUE;
- if (INT_PARAM(rest_start)) iseq->body->param.flags.has_rest = TRUE;
- if (INT_PARAM(block_start)) iseq->body->param.flags.has_block = TRUE;
+ if (INT_PARAM(post_num)) ISEQ_BODY(iseq)->param.flags.has_post = TRUE;
+ if (INT_PARAM(post_start)) ISEQ_BODY(iseq)->param.flags.has_post = TRUE;
+ if (INT_PARAM(rest_start)) ISEQ_BODY(iseq)->param.flags.has_rest = TRUE;
+ if (INT_PARAM(block_start)) ISEQ_BODY(iseq)->param.flags.has_block = TRUE;
#undef INT_PARAM
{
#define INT_PARAM(F) F = (int_param(&x, misc, SYM(F)) ? (unsigned int)x : 0)
@@ -10559,9 +10557,9 @@ rb_iseq_build_from_ary(rb_iseq_t *iseq, VALUE misc, VALUE locals, VALUE params,
if (RB_TYPE_P(arg_opt_labels, T_ARRAY)) {
len = RARRAY_LENINT(arg_opt_labels);
- iseq->body->param.flags.has_opt = !!(len - 1 >= 0);
+ ISEQ_BODY(iseq)->param.flags.has_opt = !!(len - 1 >= 0);
- if (iseq->body->param.flags.has_opt) {
+ if (ISEQ_BODY(iseq)->param.flags.has_opt) {
VALUE *opt_table = ALLOC_N(VALUE, len);
for (i = 0; i < len; i++) {
@@ -10570,8 +10568,8 @@ rb_iseq_build_from_ary(rb_iseq_t *iseq, VALUE misc, VALUE locals, VALUE params,
opt_table[i] = (VALUE)label;
}
- iseq->body->param.opt_num = len - 1;
- iseq->body->param.opt_table = opt_table;
+ ISEQ_BODY(iseq)->param.opt_num = len - 1;
+ ISEQ_BODY(iseq)->param.opt_table = opt_table;
}
}
else if (!NIL_P(arg_opt_labels)) {
@@ -10580,7 +10578,7 @@ rb_iseq_build_from_ary(rb_iseq_t *iseq, VALUE misc, VALUE locals, VALUE params,
}
if (RB_TYPE_P(keywords, T_ARRAY)) {
- iseq->body->param.keyword = iseq_build_kw(iseq, params, keywords);
+ ISEQ_BODY(iseq)->param.keyword = iseq_build_kw(iseq, params, keywords);
}
else if (!NIL_P(keywords)) {
rb_raise(rb_eTypeError, ":keywords param is not an array: %+"PRIsVALUE,
@@ -10588,16 +10586,16 @@ rb_iseq_build_from_ary(rb_iseq_t *iseq, VALUE misc, VALUE locals, VALUE params,
}
if (Qtrue == rb_hash_aref(params, SYM(ambiguous_param0))) {
- iseq->body->param.flags.ambiguous_param0 = TRUE;
+ ISEQ_BODY(iseq)->param.flags.ambiguous_param0 = TRUE;
}
if (int_param(&i, params, SYM(kwrest))) {
- struct rb_iseq_param_keyword *keyword = (struct rb_iseq_param_keyword *)iseq->body->param.keyword;
+ struct rb_iseq_param_keyword *keyword = (struct rb_iseq_param_keyword *)ISEQ_BODY(iseq)->param.keyword;
if (keyword == NULL) {
- iseq->body->param.keyword = keyword = ZALLOC(struct rb_iseq_param_keyword);
+ ISEQ_BODY(iseq)->param.keyword = keyword = ZALLOC(struct rb_iseq_param_keyword);
}
keyword->rest_start = i;
- iseq->body->param.flags.has_kwrest = TRUE;
+ ISEQ_BODY(iseq)->param.flags.has_kwrest = TRUE;
}
#undef SYM
iseq_calc_param_size(iseq);
@@ -10608,9 +10606,9 @@ rb_iseq_build_from_ary(rb_iseq_t *iseq, VALUE misc, VALUE locals, VALUE params,
/* body */
iseq_build_from_ary_body(iseq, anchor, body, node_ids, labels_wrapper);
- iseq->body->param.size = arg_size;
- iseq->body->local_table_size = local_size;
- iseq->body->stack_max = stack_max;
+ ISEQ_BODY(iseq)->param.size = arg_size;
+ ISEQ_BODY(iseq)->local_table_size = local_size;
+ ISEQ_BODY(iseq)->stack_max = stack_max;
}
/* for parser */
@@ -10619,7 +10617,7 @@ int
rb_dvar_defined(ID id, const rb_iseq_t *iseq)
{
if (iseq) {
- const struct rb_iseq_constant_body *body = iseq->body;
+ const struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
while (body->type == ISEQ_TYPE_BLOCK ||
body->type == ISEQ_TYPE_RESCUE ||
body->type == ISEQ_TYPE_ENSURE ||
@@ -10634,7 +10632,7 @@ rb_dvar_defined(ID id, const rb_iseq_t *iseq)
}
}
iseq = body->parent_iseq;
- body = iseq->body;
+ body = ISEQ_BODY(iseq);
}
}
return 0;
@@ -10645,7 +10643,7 @@ rb_local_defined(ID id, const rb_iseq_t *iseq)
{
if (iseq) {
unsigned int i;
- const struct rb_iseq_constant_body *const body = iseq->body->local_iseq->body;
+ const struct rb_iseq_constant_body *const body = ISEQ_BODY(ISEQ_BODY(iseq)->local_iseq);
for (i=0; i<body->local_table_size; i++) {
if (body->local_table[i] == id) {
@@ -11081,7 +11079,7 @@ ibf_load_builtin(const struct ibf_load *load, ibf_offset_t *offset)
static ibf_offset_t
ibf_dump_code(struct ibf_dump *dump, const rb_iseq_t *iseq)
{
- const struct rb_iseq_constant_body *const body = iseq->body;
+ const struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
const int iseq_size = body->iseq_size;
int code_index;
const VALUE *orig_code = rb_iseq_original_iseq(iseq);
@@ -11158,7 +11156,7 @@ ibf_load_code(const struct ibf_load *load, rb_iseq_t *iseq, ibf_offset_t bytecod
ibf_offset_t reading_pos = bytecode_offset;
VALUE *code = ALLOC_N(VALUE, iseq_size);
- struct rb_iseq_constant_body *load_body = iseq->body;
+ struct rb_iseq_constant_body *load_body = ISEQ_BODY(iseq);
struct rb_call_data *cd_entries = load_body->call_data;
union iseq_inline_storage_entry *is_entries = load_body->is_entries;
@@ -11269,11 +11267,11 @@ ibf_load_code(const struct ibf_load *load, rb_iseq_t *iseq, ibf_offset_t bytecod
static ibf_offset_t
ibf_dump_param_opt_table(struct ibf_dump *dump, const rb_iseq_t *iseq)
{
- int opt_num = iseq->body->param.opt_num;
+ int opt_num = ISEQ_BODY(iseq)->param.opt_num;
if (opt_num > 0) {
IBF_W_ALIGN(VALUE);
- return ibf_dump_write(dump, iseq->body->param.opt_table, sizeof(VALUE) * (opt_num + 1));
+ return ibf_dump_write(dump, ISEQ_BODY(iseq)->param.opt_table, sizeof(VALUE) * (opt_num + 1));
}
else {
return ibf_dump_pos(dump);
@@ -11296,7 +11294,7 @@ ibf_load_param_opt_table(const struct ibf_load *load, ibf_offset_t opt_table_off
static ibf_offset_t
ibf_dump_param_keyword(struct ibf_dump *dump, const rb_iseq_t *iseq)
{
- const struct rb_iseq_param_keyword *kw = iseq->body->param.keyword;
+ const struct rb_iseq_param_keyword *kw = ISEQ_BODY(iseq)->param.keyword;
if (kw) {
struct rb_iseq_param_keyword dump_kw = *kw;
@@ -11348,10 +11346,10 @@ static ibf_offset_t
ibf_dump_insns_info_body(struct ibf_dump *dump, const rb_iseq_t *iseq)
{
ibf_offset_t offset = ibf_dump_pos(dump);
- const struct iseq_insn_info_entry *entries = iseq->body->insns_info.body;
+ const struct iseq_insn_info_entry *entries = ISEQ_BODY(iseq)->insns_info.body;
unsigned int i;
- for (i = 0; i < iseq->body->insns_info.size; i++) {
+ for (i = 0; i < ISEQ_BODY(iseq)->insns_info.size; i++) {
ibf_dump_write_small_value(dump, entries[i].line_no);
#ifdef USE_ISEQ_NODE_ID
ibf_dump_write_small_value(dump, entries[i].node_id);
@@ -11414,7 +11412,7 @@ ibf_load_insns_info_positions(const struct ibf_load *load, ibf_offset_t position
static ibf_offset_t
ibf_dump_local_table(struct ibf_dump *dump, const rb_iseq_t *iseq)
{
- const struct rb_iseq_constant_body *const body = iseq->body;
+ const struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
const int size = body->local_table_size;
ID *table = ALLOCA_N(ID, size);
int i;
@@ -11447,7 +11445,7 @@ ibf_load_local_table(const struct ibf_load *load, ibf_offset_t local_table_offse
static ibf_offset_t
ibf_dump_catch_table(struct ibf_dump *dump, const rb_iseq_t *iseq)
{
- const struct iseq_catch_table *table = iseq->body->catch_table;
+ const struct iseq_catch_table *table = ISEQ_BODY(iseq)->catch_table;
if (table) {
int *iseq_indices = ALLOCA_N(int, table->size);
@@ -11504,7 +11502,7 @@ ibf_load_catch_table(const struct ibf_load *load, ibf_offset_t catch_table_offse
static ibf_offset_t
ibf_dump_ci_entries(struct ibf_dump *dump, const rb_iseq_t *iseq)
{
- const struct rb_iseq_constant_body *const body = iseq->body;
+ const struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
const unsigned int ci_size = body->ci_size;
const struct rb_call_data *cds = body->call_data;
@@ -11553,7 +11551,7 @@ dump_outer_variable(ID id, VALUE val, void *dump)
static ibf_offset_t
ibf_dump_outer_variables(struct ibf_dump *dump, const rb_iseq_t *iseq)
{
- struct rb_id_table * ovs = iseq->body->outer_variables;
+ struct rb_id_table * ovs = ISEQ_BODY(iseq)->outer_variables;
ibf_offset_t offset = ibf_dump_pos(dump);
@@ -11642,7 +11640,7 @@ ibf_dump_iseq_each(struct ibf_dump *dump, const rb_iseq_t *iseq)
unsigned int *positions;
- const struct rb_iseq_constant_body *body = iseq->body;
+ const struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
const VALUE location_pathobj_index = ibf_dump_object(dump, body->location.pathobj); /* TODO: freeze */
const VALUE location_base_label_index = ibf_dump_object(dump, body->location.base_label);
@@ -11664,16 +11662,16 @@ ibf_dump_iseq_each(struct ibf_dump *dump, const rb_iseq_t *iseq)
const ibf_offset_t param_keyword_offset = ibf_dump_param_keyword(dump, iseq);
const ibf_offset_t insns_info_body_offset = ibf_dump_insns_info_body(dump, iseq);
- positions = rb_iseq_insns_info_decode_positions(iseq->body);
+ positions = rb_iseq_insns_info_decode_positions(ISEQ_BODY(iseq));
const ibf_offset_t insns_info_positions_offset = ibf_dump_insns_info_positions(dump, positions, body->insns_info.size);
ruby_xfree(positions);
const ibf_offset_t local_table_offset = ibf_dump_local_table(dump, iseq);
const unsigned int catch_table_size = body->catch_table ? body->catch_table->size : 0;
const ibf_offset_t catch_table_offset = ibf_dump_catch_table(dump, iseq);
- const int parent_iseq_index = ibf_dump_iseq(dump, iseq->body->parent_iseq);
- const int local_iseq_index = ibf_dump_iseq(dump, iseq->body->local_iseq);
- const int mandatory_only_iseq_index = ibf_dump_iseq(dump, iseq->body->mandatory_only_iseq);
+ const int parent_iseq_index = ibf_dump_iseq(dump, ISEQ_BODY(iseq)->parent_iseq);
+ const int local_iseq_index = ibf_dump_iseq(dump, ISEQ_BODY(iseq)->local_iseq);
+ const int mandatory_only_iseq_index = ibf_dump_iseq(dump, ISEQ_BODY(iseq)->mandatory_only_iseq);
const ibf_offset_t ci_entries_offset = ibf_dump_ci_entries(dump, iseq);
const ibf_offset_t outer_variables_offset = ibf_dump_outer_variables(dump, iseq);
@@ -11784,7 +11782,7 @@ ibf_load_location_str(const struct ibf_load *load, VALUE str_index)
static void
ibf_load_iseq_each(struct ibf_load *load, rb_iseq_t *iseq, ibf_offset_t offset)
{
- struct rb_iseq_constant_body *load_body = iseq->body = rb_iseq_constant_body_alloc();
+ struct rb_iseq_constant_body *load_body = ISEQ_BODY(iseq) = rb_iseq_constant_body_alloc();
ibf_offset_t reading_pos = offset;
@@ -12729,8 +12727,8 @@ rb_iseq_ibf_dump(const rb_iseq_t *iseq, VALUE opt)
VALUE dump_obj;
VALUE str;
- if (iseq->body->parent_iseq != NULL ||
- iseq->body->local_iseq != iseq) {
+ if (ISEQ_BODY(iseq)->parent_iseq != NULL ||
+ ISEQ_BODY(iseq)->local_iseq != iseq) {
rb_raise(rb_eRuntimeError, "should be top of iseq");
}
if (RTEST(ISEQ_COVERAGE(iseq))) {
diff --git a/cont.c b/cont.c
index 667e6846b3..ee7b856bb1 100644
--- a/cont.c
+++ b/cont.c
@@ -1218,7 +1218,7 @@ show_vm_pcs(const rb_control_frame_t *cfp,
while (cfp != end_of_cfp) {
int pc = 0;
if (cfp->iseq) {
- pc = cfp->pc - cfp->iseq->body->iseq_encoded;
+ pc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded;
}
fprintf(stderr, "%2d pc: %d\n", i++, pc);
cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
diff --git a/eval.c b/eval.c
index 3a1a6b45df..237c0fbd66 100644
--- a/eval.c
+++ b/eval.c
@@ -1816,10 +1816,10 @@ errinfo_place(const rb_execution_context_t *ec)
while (RUBY_VM_VALID_CONTROL_FRAME_P(cfp, end_cfp)) {
if (VM_FRAME_RUBYFRAME_P(cfp)) {
- if (cfp->iseq->body->type == ISEQ_TYPE_RESCUE) {
+ if (ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_RESCUE) {
return &cfp->ep[VM_ENV_INDEX_LAST_LVAR];
}
- else if (cfp->iseq->body->type == ISEQ_TYPE_ENSURE &&
+ else if (ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_ENSURE &&
!THROW_DATA_P(cfp->ep[VM_ENV_INDEX_LAST_LVAR]) &&
!FIXNUM_P(cfp->ep[VM_ENV_INDEX_LAST_LVAR])) {
return &cfp->ep[VM_ENV_INDEX_LAST_LVAR];
diff --git a/gc.c b/gc.c
index 134100584c..b036a81b8b 100644
--- a/gc.c
+++ b/gc.c
@@ -13372,11 +13372,11 @@ rb_method_type_name(rb_method_type_t type)
static void
rb_raw_iseq_info(char *buff, const int buff_size, const rb_iseq_t *iseq)
{
- if (buff_size > 0 && iseq->body && iseq->body->location.label && !RB_TYPE_P(iseq->body->location.pathobj, T_MOVED)) {
+ if (buff_size > 0 && ISEQ_BODY(iseq) && ISEQ_BODY(iseq)->location.label && !RB_TYPE_P(ISEQ_BODY(iseq)->location.pathobj, T_MOVED)) {
VALUE path = rb_iseq_path(iseq);
- VALUE n = iseq->body->location.first_lineno;
+ VALUE n = ISEQ_BODY(iseq)->location.first_lineno;
snprintf(buff, buff_size, " %s@%s:%d",
- RSTRING_PTR(iseq->body->location.label),
+ RSTRING_PTR(ISEQ_BODY(iseq)->location.label),
RSTRING_PTR(path),
n ? FIX2INT(n) : 0 );
}
diff --git a/insns.def b/insns.def
index d686118688..c3b2eb9a97 100644
--- a/insns.def
+++ b/insns.def
@@ -717,9 +717,9 @@ defineclass
vm_push_frame(ec, class_iseq, VM_FRAME_MAGIC_CLASS | VM_ENV_FLAG_LOCAL, klass,
GET_BLOCK_HANDLER(),
(VALUE)vm_cref_push(ec, klass, NULL, FALSE, FALSE),
- class_iseq->body->iseq_encoded, GET_SP(),
- class_iseq->body->local_table_size,
- class_iseq->body->stack_max);
+ ISEQ_BODY(class_iseq)->iseq_encoded, GET_SP(),
+ ISEQ_BODY(class_iseq)->local_table_size,
+ ISEQ_BODY(class_iseq)->stack_max);
RESTORE_REGS();
NEXT_INSN();
}
diff --git a/iseq.c b/iseq.c
index d0afe535ca..ffbe9859c3 100644
--- a/iseq.c
+++ b/iseq.c
@@ -107,8 +107,8 @@ rb_iseq_free(const rb_iseq_t *iseq)
{
RUBY_FREE_ENTER("iseq");
- if (iseq && iseq->body) {
- struct rb_iseq_constant_body *const body = iseq->body;
+ if (iseq && ISEQ_BODY(iseq)) {
+ struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
mjit_free_iseq(iseq); /* Notify MJIT */
rb_yjit_iseq_free(body);
ruby_xfree((void *)body->iseq_encoded);
@@ -240,7 +240,7 @@ rb_iseq_each_value(const rb_iseq_t *iseq, iseq_value_itr_t * func, void *data)
(FL_TEST((VALUE)iseq, ISEQ_TRANSLATED)) ? rb_vm_insn_addr2insn2 :
#endif
rb_vm_insn_null_translator;
- const struct rb_iseq_constant_body *const body = iseq->body;
+ const struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
size = body->iseq_size;
code = body->iseq_encoded;
@@ -259,8 +259,8 @@ update_each_insn_value(void *ctx, VALUE obj)
void
rb_iseq_update_references(rb_iseq_t *iseq)
{
- if (iseq->body) {
- struct rb_iseq_constant_body *body = iseq->body;
+ if (ISEQ_BODY(iseq)) {
+ struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
body->variable.coverage = rb_gc_location(body->variable.coverage);
body->variable.pc2branchindex = rb_gc_location(body->variable.pc2branchindex);
@@ -343,8 +343,8 @@ rb_iseq_mark(const rb_iseq_t *iseq)
RUBY_MARK_UNLESS_NULL(iseq->wrapper);
- if (iseq->body) {
- const struct rb_iseq_constant_body *const body = iseq->body;
+ if (ISEQ_BODY(iseq)) {
+ const struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
if (FL_TEST((VALUE)iseq, ISEQ_MARKABLE_ISEQ)) {
rb_iseq_each_value(iseq, each_insn_value, NULL);
@@ -458,7 +458,7 @@ size_t
rb_iseq_memsize(const rb_iseq_t *iseq)
{
size_t size = 0; /* struct already counted as RVALUE size */
- const struct rb_iseq_constant_body *body = iseq->body;
+ const struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
const struct iseq_compile_data *compile_data;
/* TODO: should we count original_iseq? */
@@ -510,7 +510,7 @@ static rb_iseq_t *
iseq_alloc(void)
{
rb_iseq_t *iseq = iseq_imemo_alloc();
- iseq->body = rb_iseq_constant_body_alloc();
+ ISEQ_BODY(iseq) = rb_iseq_constant_body_alloc();
return iseq;
}
@@ -536,14 +536,14 @@ rb_iseq_pathobj_new(VALUE path, VALUE realpath)
void
rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath)
{
- RB_OBJ_WRITE(iseq, &iseq->body->location.pathobj,
+ RB_OBJ_WRITE(iseq, &ISEQ_BODY(iseq)->location.pathobj,
rb_iseq_pathobj_new(path, realpath));
}
static rb_iseq_location_t *
iseq_location_setup(rb_iseq_t *iseq, VALUE name, VALUE path, VALUE realpath, VALUE first_lineno, const rb_code_location_t *code_location, const int node_id)
{
- rb_iseq_location_t *loc = &iseq->body->location;
+ rb_iseq_location_t *loc = &ISEQ_BODY(iseq)->location;
rb_iseq_pathobj_set(iseq, path, realpath);
RB_OBJ_WRITE(iseq, &loc->label, name);
@@ -566,7 +566,7 @@ iseq_location_setup(rb_iseq_t *iseq, VALUE name, VALUE path, VALUE realpath, VAL
static void
set_relation(rb_iseq_t *iseq, const rb_iseq_t *piseq)
{
- struct rb_iseq_constant_body *const body = iseq->body;
+ struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
const VALUE type = body->type;
/* set class nest stack */
@@ -577,7 +577,7 @@ set_relation(rb_iseq_t *iseq, const rb_iseq_t *piseq)
body->local_iseq = iseq;
}
else if (piseq) {
- body->local_iseq = piseq->body->local_iseq;
+ body->local_iseq = ISEQ_BODY(piseq)->local_iseq;
}
if (piseq) {
@@ -612,7 +612,7 @@ prepare_iseq_build(rb_iseq_t *iseq,
{
VALUE coverage = Qfalse;
VALUE err_info = Qnil;
- struct rb_iseq_constant_body *const body = iseq->body;
+ struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
if (parent && (type == ISEQ_TYPE_MAIN || type == ISEQ_TYPE_TOP))
err_info = Qfalse;
@@ -623,7 +623,7 @@ prepare_iseq_build(rb_iseq_t *iseq,
name = rb_fstring(name);
iseq_location_setup(iseq, name, path, realpath, first_lineno, code_location, node_id);
if (iseq != body->local_iseq) {
- RB_OBJ_WRITE(iseq, &body->location.base_label, body->local_iseq->body->location.label);
+ RB_OBJ_WRITE(iseq, &body->location.base_label, ISEQ_BODY(body->local_iseq)->location.label);
}
ISEQ_COVERAGE_SET(iseq, Qnil);
ISEQ_ORIGINAL_ISEQ_CLEAR(iseq);
@@ -671,7 +671,7 @@ rb_iseq_insns_info_encode_positions(const rb_iseq_t *iseq)
{
#if VM_INSN_INFO_TABLE_IMPL == 2
/* create succ_index_table */
- struct rb_iseq_constant_body *const body = iseq->body;
+ struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
int size = body->insns_info.size;
int max_pos = body->iseq_size;
int *data = (int *)body->insns_info.positions;
@@ -708,7 +708,7 @@ static VALUE
finish_iseq_build(rb_iseq_t *iseq)
{
struct iseq_compile_data *data = ISEQ_COMPILE_DATA(iseq);
- const struct rb_iseq_constant_body *const body = iseq->body;
+ const struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
VALUE err = data->err_info;
ISEQ_COMPILE_DATA_CLEAR(iseq);
compile_data_free(data);
@@ -725,7 +725,7 @@ finish_iseq_build(rb_iseq_t *iseq)
}
RB_DEBUG_COUNTER_INC(iseq_num);
- RB_DEBUG_COUNTER_ADD(iseq_cd_num, iseq->body->ci_size);
+ RB_DEBUG_COUNTER_ADD(iseq_cd_num, ISEQ_BODY(iseq)->ci_size);
rb_iseq_init_trace(iseq);
return Qtrue;
@@ -920,7 +920,7 @@ rb_iseq_new_with_opt(const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE rea
script_lines = ast->script_lines;
}
else if (parent) {
- script_lines = parent->body->variable.script_lines;
+ script_lines = ISEQ_BODY(parent)->variable.script_lines;
}
prepare_iseq_build(iseq, name, path, realpath, first_lineno, node ? &node->nd_loc : NULL, node ? nd_node_id(node) : -1,
@@ -1036,7 +1036,7 @@ iseq_load(VALUE data, const rb_iseq_t *parent, VALUE opt)
exception = CHECK_ARRAY(rb_ary_entry(data, i++));
body = CHECK_ARRAY(rb_ary_entry(data, i++));
- iseq->body->local_iseq = iseq;
+ ISEQ_BODY(iseq)->local_iseq = iseq;
iseq_type = iseq_type_from_sym(type);
if (iseq_type == (enum iseq_type)-1) {
@@ -1134,13 +1134,13 @@ rb_iseq_compile_with_option(VALUE src, VALUE file, VALUE realpath, VALUE line, V
VALUE
rb_iseq_path(const rb_iseq_t *iseq)
{
- return pathobj_path(iseq->body->location.pathobj);
+ return pathobj_path(ISEQ_BODY(iseq)->location.pathobj);
}
VALUE
rb_iseq_realpath(const rb_iseq_t *iseq)
{
- return pathobj_realpath(iseq->body->location.pathobj);
+ return pathobj_realpath(ISEQ_BODY(iseq)->location.pathobj);
}
VALUE
@@ -1158,25 +1158,25 @@ rb_iseq_from_eval_p(const rb_iseq_t *iseq)
VALUE
rb_iseq_label(const rb_iseq_t *iseq)
{
- return iseq->body->location.label;
+ return ISEQ_BODY(iseq)->location.label;
}
VALUE
rb_iseq_base_label(const rb_iseq_t *iseq)
{
- return iseq->body->location.base_label;
+ return ISEQ_BODY(iseq)->location.base_label;
}
VALUE
rb_iseq_first_lineno(const rb_iseq_t *iseq)
{
- return iseq->body->location.first_lineno;
+ return ISEQ_BODY(iseq)->location.first_lineno;
}
VALUE
rb_iseq_method_name(const rb_iseq_t *iseq)
{
- struct rb_iseq_constant_body *const body = iseq->body->local_iseq->body;
+ struct rb_iseq_constant_body *const body = ISEQ_BODY(ISEQ_BODY(iseq)->local_iseq);
if (body->type == ISEQ_TYPE_METHOD) {
return body->location.base_label;
@@ -1189,7 +1189,7 @@ rb_iseq_method_name(const rb_iseq_t *iseq)
void
rb_iseq_code_location(const rb_iseq_t *iseq, int *beg_pos_lineno, int *beg_pos_column, int *end_pos_lineno, int *end_pos_column)
{
- const rb_code_location_t *loc = &iseq->body->location.code_location;
+ const rb_code_location_t *loc = &ISEQ_BODY(iseq)->location.code_location;
if (beg_pos_lineno) *beg_pos_lineno = loc->beg_pos.lineno;
if (beg_pos_column) *beg_pos_column = loc->beg_pos.column;
if (end_pos_lineno) *end_pos_lineno = loc->end_pos.lineno;
@@ -1201,7 +1201,7 @@ static ID iseq_type_id(enum iseq_type type);
VALUE
rb_iseq_type(const rb_iseq_t *iseq)
{
- return ID2SYM(iseq_type_id(iseq->body->type));
+ return ID2SYM(iseq_type_id(ISEQ_BODY(iseq)->type));
}
VALUE
@@ -1458,11 +1458,11 @@ iseqw_check(VALUE iseqw)
{
rb_iseq_t *iseq = DATA_PTR(iseqw);
- if (!iseq->body) {
+ if (!ISEQ_BODY(iseq)) {
rb_ibf_load_iseq_complete(iseq);
}
- if (!iseq->body->location.label) {
+ if (!ISEQ_BODY(iseq)->location.label) {
rb_raise(rb_eTypeError, "uninitialized InstructionSequence");
}
return iseq;
@@ -1496,7 +1496,7 @@ static VALUE
iseqw_inspect(VALUE self)
{
const rb_iseq_t *iseq = iseqw_check(self);
- const struct rb_iseq_constant_body *const body = iseq->body;
+ const struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
VALUE klass = rb_class_name(rb_obj_class(self));
if (!body->location.label) {
@@ -1726,7 +1726,7 @@ iseqw_to_a(VALUE self)
static const struct iseq_insn_info_entry *
get_insn_info_binary_search(const rb_iseq_t *iseq, size_t pos)
{
- const struct rb_iseq_constant_body *const body = iseq->body;
+ const struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
size_t size = body->insns_info.size;
const struct iseq_insn_info_entry *insns_info = body->insns_info.body;
const unsigned int *positions = body->insns_info.positions;
@@ -1779,7 +1779,7 @@ get_insn_info(const rb_iseq_t *iseq, size_t pos)
static const struct iseq_insn_info_entry *
get_insn_info_succinct_bitvector(const rb_iseq_t *iseq, size_t pos)
{
- const struct rb_iseq_constant_body *const body = iseq->body;
+ const struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
size_t size = body->insns_info.size;
const struct iseq_insn_info_entry *insns_info = body->insns_info.body;
const int debug = 0;
@@ -1822,7 +1822,7 @@ get_insn_info(const rb_iseq_t *iseq, size_t pos)
static const struct iseq_insn_info_entry *
get_insn_info_linear_search(const rb_iseq_t *iseq, size_t pos)
{
- const struct rb_iseq_constant_body *const body = iseq->body;
+ const struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
size_t i = 0, size = body->insns_info.size;
const struct iseq_insn_info_entry *insns_info = body->insns_info.body;
const unsigned int *positions = body->insns_info.positions;
@@ -1869,7 +1869,7 @@ get_insn_info(const rb_iseq_t *iseq, size_t pos)
static void
validate_get_insn_info(const rb_iseq_t *iseq)
{
- const struct rb_iseq_constant_body *const body = iseq->body;
+ const struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
size_t i;
for (i = 0; i < body->iseq_size; i++) {
if (get_insn_info_linear_search(iseq, i) != get_insn_info(iseq, i)) {
@@ -1941,10 +1941,10 @@ local_var_name(const rb_iseq_t *diseq, VALUE level, VALUE op)
int idx;
for (i = 0; i < level; i++) {
- diseq = diseq->body->parent_iseq;
+ diseq = ISEQ_BODY(diseq)->parent_iseq;
}
- idx = diseq->body->local_table_size - (int)op - 1;
- lid = diseq->body->local_table[idx];
+ idx = ISEQ_BODY(diseq)->local_table_size - (int)op - 1;
+ lid = ISEQ_BODY(diseq)->local_table[idx];
name = rb_id2str(lid);
if (!name) {
name = rb_str_new_cstr("?");
@@ -2048,7 +2048,7 @@ rb_insn_operand_intern(const rb_iseq_t *iseq,
{
if (op) {
const rb_iseq_t *iseq = rb_iseq_check((rb_iseq_t *)op);
- ret = iseq->body->location.label;
+ ret = ISEQ_BODY(iseq)->location.label;
if (child) {
rb_ary_push(child, (VALUE)iseq);
}
@@ -2063,7 +2063,7 @@ rb_insn_operand_intern(const rb_iseq_t *iseq,
case TS_IVC:
case TS_ICVARC:
case TS_ISE:
- ret = rb_sprintf("<is:%"PRIdPTRDIFF">", (union iseq_inline_storage_entry *)op - iseq->body->is_entries);
+ ret = rb_sprintf("<is:%"PRIdPTRDIFF">", (union iseq_inline_storage_entry *)op - ISEQ_BODY(iseq)->is_entries);
break;
case TS_CALLDATA:
@@ -2248,7 +2248,7 @@ catch_type(int type)
static VALUE
iseq_inspect(const rb_iseq_t *iseq)
{
- const struct rb_iseq_constant_body *const body = iseq->body;
+ const struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
if (!body->location.label) {
return rb_sprintf("#<ISeq: uninitialized>");
}
@@ -2273,7 +2273,7 @@ static const rb_data_type_t tmp_set = {
static VALUE
rb_iseq_disasm_recursive(const rb_iseq_t *iseq, VALUE indent)
{
- const struct rb_iseq_constant_body *const body = iseq->body;
+ const struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
VALUE *code;
VALUE str = rb_str_new(0, 0);
VALUE child = rb_ary_tmp_new(3);
@@ -2446,7 +2446,7 @@ iseq_iterate_children(const rb_iseq_t *iseq, void (*iter_func)(const rb_iseq_t *
{
unsigned int i;
VALUE *code = rb_iseq_original_iseq(iseq);
- const struct rb_iseq_constant_body *const body = iseq->body;
+ const struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
const rb_iseq_t *child;
VALUE all_children = rb_obj_hide(rb_ident_hash_new());
@@ -2538,7 +2538,7 @@ static VALUE
iseqw_trace_points(VALUE self)
{
const rb_iseq_t *iseq = iseqw_check(self);
- const struct rb_iseq_constant_body *const body = iseq->body;
+ const struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
unsigned int i;
VALUE ary = rb_ary_new();
@@ -2768,7 +2768,7 @@ iseq_data_to_ary(const rb_iseq_t *iseq)
{
unsigned int i;
long l;
- const struct rb_iseq_constant_body *const iseq_body = iseq->body;
+ const struct rb_iseq_constant_body *const iseq_body = ISEQ_BODY(iseq);
const struct iseq_insn_info_entry *prev_insn_info;
unsigned int pos;
int last_line = 0;
@@ -3091,7 +3091,7 @@ VALUE
rb_iseq_parameters(const rb_iseq_t *iseq, int is_proc)
{
int i, r;
- const struct rb_iseq_constant_body *const body = iseq->body;
+ const struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
const struct rb_iseq_param_keyword *const keyword = body->param.keyword;
VALUE a, args = rb_ary_new2(body->param.size);
ID req, opt, rest, block, key, keyrest;
@@ -3294,7 +3294,7 @@ rb_vm_insn_addr2opcode(const void *addr)
rb_bug("rb_vm_insn_addr2opcode: invalid insn address: %p", addr);
}
-// Decode `iseq->body->iseq_encoded[i]` to an insn.
+// Decode `ISEQ_BODY(iseq)->iseq_encoded[i]` to an insn.
int
rb_vm_insn_decode(const VALUE encoded)
{
@@ -3327,7 +3327,7 @@ encoded_iseq_trace_instrument(VALUE *iseq_encoded_insn, rb_event_flag_t turnon,
void
rb_iseq_trace_flag_cleared(const rb_iseq_t *iseq, size_t pos)
{
- const struct rb_iseq_constant_body *const body = iseq->body;
+ const struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
VALUE *iseq_encoded = (VALUE *)body->iseq_encoded;
encoded_iseq_trace_instrument(&iseq_encoded[pos], 0, false);
}
@@ -3354,7 +3354,7 @@ iseq_add_local_tracepoint(const rb_iseq_t *iseq, rb_event_flag_t turnon_events,
{
unsigned int pc;
int n = 0;
- const struct rb_iseq_constant_body *const body = iseq->body;
+ const struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
VALUE *iseq_encoded = (VALUE *)body->iseq_encoded;
VM_ASSERT(ISEQ_EXECUTABLE_P(iseq));
@@ -3428,7 +3428,7 @@ iseq_remove_local_tracepoint(const rb_iseq_t *iseq, VALUE tpval)
if (iseq->aux.exec.local_hooks) {
unsigned int pc;
- const struct rb_iseq_constant_body *const body = iseq->body;
+ const struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
VALUE *iseq_encoded = (VALUE *)body->iseq_encoded;
rb_event_flag_t local_events = 0;
@@ -3486,7 +3486,7 @@ rb_iseq_trace_set(const rb_iseq_t *iseq, rb_event_flag_t turnon_events)
}
else {
unsigned int pc;
- const struct rb_iseq_constant_body *const body = iseq->body;
+ const struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
VALUE *iseq_encoded = (VALUE *)body->iseq_encoded;
rb_event_flag_t enabled_events;
rb_event_flag_t local_events = iseq->aux.exec.local_hooks ? iseq->aux.exec.local_hooks->events : 0;
@@ -3767,7 +3767,7 @@ static VALUE
iseqw_script_lines(VALUE self)
{
const rb_iseq_t *iseq = iseqw_check(self);
- return iseq->body->variable.script_lines;
+ return ISEQ_BODY(iseq)->variable.script_lines;
}
/*
diff --git a/iseq.h b/iseq.h
index fc61d03b76..f90b0be7ab 100644
--- a/iseq.h
+++ b/iseq.h
@@ -28,35 +28,35 @@ typedef struct rb_iseq_struct rb_iseq_t;
extern const ID rb_iseq_shared_exc_local_tbl[];
-#define ISEQ_COVERAGE(iseq) iseq->body->variable.coverage
-#define ISEQ_COVERAGE_SET(iseq, cov) RB_OBJ_WRITE(iseq, &iseq->body->variable.coverage, cov)
+#define ISEQ_COVERAGE(iseq) ISEQ_BODY(iseq)->variable.coverage
+#define ISEQ_COVERAGE_SET(iseq, cov) RB_OBJ_WRITE(iseq, &ISEQ_BODY(iseq)->variable.coverage, cov)
#define ISEQ_LINE_COVERAGE(iseq) RARRAY_AREF(ISEQ_COVERAGE(iseq), COVERAGE_INDEX_LINES)
#define ISEQ_BRANCH_COVERAGE(iseq) RARRAY_AREF(ISEQ_COVERAGE(iseq), COVERAGE_INDEX_BRANCHES)
-#define ISEQ_PC2BRANCHINDEX(iseq) iseq->body->variable.pc2branchindex
-#define ISEQ_PC2BRANCHINDEX_SET(iseq, h) RB_OBJ_WRITE(iseq, &iseq->body->variable.pc2branchindex, h)
+#define ISEQ_PC2BRANCHINDEX(iseq) ISEQ_BODY(iseq)->variable.pc2branchindex
+#define ISEQ_PC2BRANCHINDEX_SET(iseq, h) RB_OBJ_WRITE(iseq, &ISEQ_BODY(iseq)->variable.pc2branchindex, h)
-#define ISEQ_FLIP_CNT(iseq) (iseq)->body->variable.flip_count
+#define ISEQ_FLIP_CNT(iseq) ISEQ_BODY(iseq)->variable.flip_count
static inline rb_snum_t
ISEQ_FLIP_CNT_INCREMENT(const rb_iseq_t *iseq)
{
- rb_snum_t cnt = iseq->body->variable.flip_count;
- iseq->body->variable.flip_count += 1;
+ rb_snum_t cnt = ISEQ_BODY(iseq)->variable.flip_count;
+ ISEQ_BODY(iseq)->variable.flip_count += 1;
return cnt;
}
static inline VALUE *
ISEQ_ORIGINAL_ISEQ(const rb_iseq_t *iseq)
{
- return iseq->body->variable.original_iseq;
+ return ISEQ_BODY(iseq)->variable.original_iseq;
}
static inline void
ISEQ_ORIGINAL_ISEQ_CLEAR(const rb_iseq_t *iseq)
{
- void *ptr = iseq->body->variable.original_iseq;
- iseq->body->variable.original_iseq = NULL;
+ void *ptr = ISEQ_BODY(iseq)->variable.original_iseq;
+ ISEQ_BODY(iseq)->variable.original_iseq = NULL;
if (ptr) {
ruby_xfree(ptr);
}
@@ -65,7 +65,7 @@ ISEQ_ORIGINAL_ISEQ_CLEAR(const rb_iseq_t *iseq)
static inline VALUE *
ISEQ_ORIGINAL_ISEQ_ALLOC(const rb_iseq_t *iseq, long size)
{
- return iseq->body->variable.original_iseq =
+ return ISEQ_BODY(iseq)->variable.original_iseq =
ALLOC_N(VALUE, size);
}
diff --git a/mjit.c b/mjit.c
index e66623584e..2870b2b304 100644
--- a/mjit.c
+++ b/mjit.c
@@ -105,8 +105,8 @@ mjit_update_references(const rb_iseq_t *iseq)
return;
CRITICAL_SECTION_START(4, "mjit_update_references");
- if (iseq->body->jit_unit) {
- iseq->body->jit_unit->iseq = (rb_iseq_t *)rb_gc_location((VALUE)iseq->body->jit_unit->iseq);
+ if (ISEQ_BODY(iseq)->jit_unit) {
+ ISEQ_BODY(iseq)->jit_unit->iseq = (rb_iseq_t *)rb_gc_location((VALUE)ISEQ_BODY(iseq)->jit_unit->iseq);
// We need to invalidate JIT-ed code for the ISeq because it embeds pointer addresses.
// To efficiently do that, we use the same thing as TracePoint and thus everything is cancelled for now.
// See mjit.h and tool/ruby_vm/views/_mjit_compile_insn.erb for how `mjit_call_p` is used.
@@ -114,7 +114,7 @@ mjit_update_references(const rb_iseq_t *iseq)
}
// Units in stale_units (list of over-speculated and invalidated code) are not referenced from
- // `iseq->body->jit_unit` anymore (because new one replaces that). So we need to check them too.
+ // `ISEQ_BODY(iseq)->jit_unit` anymore (because new one replaces that). So we need to check them too.
// TODO: we should be able to reduce the number of units checked here.
struct rb_mjit_unit *unit = NULL;
list_for_each(&stale_units.head, unit, unode) {
@@ -136,13 +136,13 @@ mjit_free_iseq(const rb_iseq_t *iseq)
CRITICAL_SECTION_START(4, "mjit_free_iseq");
RUBY_ASSERT_ALWAYS(in_gc);
RUBY_ASSERT_ALWAYS(!in_jit);
- if (iseq->body->jit_unit) {
+ if (ISEQ_BODY(iseq)->jit_unit) {
// jit_unit is not freed here because it may be referred by multiple
// lists of units. `get_from_list` and `mjit_finish` do the job.
- iseq->body->jit_unit->iseq = NULL;
+ ISEQ_BODY(iseq)->jit_unit->iseq = NULL;
}
// Units in stale_units (list of over-speculated and invalidated code) are not referenced from
- // `iseq->body->jit_unit` anymore (because new one replaces that). So we need to check them too.
+ // `ISEQ_BODY(iseq)->jit_unit` anymore (because new one replaces that). So we need to check them too.
// TODO: we should be able to reduce the number of units checked here.
struct rb_mjit_unit *unit = NULL;
list_for_each(&stale_units.head, unit, unode) {
@@ -257,7 +257,7 @@ create_unit(const rb_iseq_t *iseq)
unit->id = current_unit_num++;
unit->iseq = (rb_iseq_t *)iseq;
- iseq->body->jit_unit = unit;
+ ISEQ_BODY(iseq)->jit_unit = unit;
}
// Return true if given ISeq body should be compiled by MJIT
@@ -275,8 +275,8 @@ mjit_add_iseq_to_process(const rb_iseq_t *iseq, const struct rb_mjit_compile_inf
{
if (!mjit_enabled || pch_status == PCH_FAILED)
return;
- if (!mjit_target_iseq_p(iseq->body)) {
- iseq->body->jit_func = (mjit_func_t)NOT_COMPILED_JIT_ISEQ_FUNC; // skip mjit_wait
+ if (!mjit_target_iseq_p(ISEQ_BODY(iseq))) {
+ ISEQ_BODY(iseq)->jit_func = (mjit_func_t)NOT_COMPILED_JIT_ISEQ_FUNC; // skip mjit_wait
return;
}
@@ -284,21 +284,21 @@ mjit_add_iseq_to_process(const rb_iseq_t *iseq, const struct rb_mjit_compile_inf
CRITICAL_SECTION_START(3, "in add_iseq_to_process");
// This prevents multiple Ractors from enqueueing the same ISeq twice.
- if (rb_multi_ractor_p() && (uintptr_t)iseq->body->jit_func != NOT_ADDED_JIT_ISEQ_FUNC) {
+ if (rb_multi_ractor_p() && (uintptr_t)ISEQ_BODY(iseq)->jit_func != NOT_ADDED_JIT_ISEQ_FUNC) {
CRITICAL_SECTION_FINISH(3, "in add_iseq_to_process");
return;
}
}
RB_DEBUG_COUNTER_INC(mjit_add_iseq_to_process);
- iseq->body->jit_func = (mjit_func_t)NOT_READY_JIT_ISEQ_FUNC;
+ ISEQ_BODY(iseq)->jit_func = (mjit_func_t)NOT_READY_JIT_ISEQ_FUNC;
create_unit(iseq);
- if (iseq->body->jit_unit == NULL)
+ if (ISEQ_BODY(iseq)->jit_unit == NULL)
// Failure in creating the unit.
return;
if (compile_info != NULL)
- iseq->body->jit_unit->compile_info = *compile_info;
- add_to_list(iseq->body->jit_unit, &unit_queue);
+ ISEQ_BODY(iseq)->jit_unit->compile_info = *compile_info;
+ add_to_list(ISEQ_BODY(iseq)->jit_unit, &unit_queue);
if (active_units.length >= mjit_opts.max_cache_size) {
unload_requests++;
}
@@ -370,28 +370,28 @@ rb_mjit_iseq_compile_info(const struct rb_iseq_constant_body *body)
static void
mjit_recompile(const rb_iseq_t *iseq)
{
- if ((uintptr_t)iseq->body->jit_func <= (uintptr_t)LAST_JIT_ISEQ_FUNC)
+ if ((uintptr_t)ISEQ_BODY(iseq)->jit_func <= (uintptr_t)LAST_JIT_ISEQ_FUNC)
return;
- verbose(1, "JIT recompile: %s@%s:%d", RSTRING_PTR(iseq->body->location.label),
- RSTRING_PTR(rb_iseq_path(iseq)), FIX2INT(iseq->body->location.first_lineno));
- assert(iseq->body->jit_unit != NULL);
+ verbose(1, "JIT recompile: %s@%s:%d", RSTRING_PTR(ISEQ_BODY(iseq)->location.label),
+ RSTRING_PTR(rb_iseq_path(iseq)), FIX2INT(ISEQ_BODY(iseq)->location.first_lineno));
+ assert(ISEQ_BODY(iseq)->jit_unit != NULL);
if (UNLIKELY(mjit_opts.wait)) {
CRITICAL_SECTION_START(3, "in rb_mjit_recompile_iseq");
- remove_from_list(iseq->body->jit_unit, &active_units);
- add_to_list(iseq->body->jit_unit, &stale_units);
- mjit_add_iseq_to_process(iseq, &iseq->body->jit_unit->compile_info, true);
+ remove_from_list(ISEQ_BODY(iseq)->jit_unit, &active_units);
+ add_to_list(ISEQ_BODY(iseq)->jit_unit, &stale_units);
+ mjit_add_iseq_to_process(iseq, &ISEQ_BODY(iseq)->jit_unit->compile_info, true);
CRITICAL_SECTION_FINISH(3, "in rb_mjit_recompile_iseq");
- mjit_wait(iseq->body);
+ mjit_wait(ISEQ_BODY(iseq));
}
else {
// Lazily move active_units to stale_units to avoid race conditions around active_units with compaction.
// Also, it's lazily moved to unit_queue as well because otherwise it won't be added to stale_units properly.
// It's good to avoid a race condition between mjit_add_iseq_to_process and mjit_compile around jit_unit as well.
CRITICAL_SECTION_START(3, "in rb_mjit_recompile_iseq");
- iseq->body->jit_unit->stale_p = true;
- iseq->body->jit_func = (mjit_func_t)NOT_READY_JIT_ISEQ_FUNC;
+ ISEQ_BODY(iseq)->jit_unit->stale_p = true;
+ ISEQ_BODY(iseq)->jit_func = (mjit_func_t)NOT_READY_JIT_ISEQ_FUNC;
pending_stale_p = true;
CRITICAL_SECTION_FINISH(3, "in rb_mjit_recompile_iseq");
}
@@ -401,7 +401,7 @@ mjit_recompile(const rb_iseq_t *iseq)
void
rb_mjit_recompile_send(const rb_iseq_t *iseq)
{
- rb_mjit_iseq_compile_info(iseq->body)->disable_send_cache = true;
+ rb_mjit_iseq_compile_info(ISEQ_BODY(iseq))->disable_send_cache = true;
mjit_recompile(iseq);
}
@@ -409,7 +409,7 @@ rb_mjit_recompile_send(const rb_iseq_t *iseq)
void
rb_mjit_recompile_ivar(const rb_iseq_t *iseq)
{
- rb_mjit_iseq_compile_info(iseq->body)->disable_ivar_cache = true;
+ rb_mjit_iseq_compile_info(ISEQ_BODY(iseq))->disable_ivar_cache = true;
mjit_recompile(iseq);
}
@@ -417,7 +417,7 @@ rb_mjit_recompile_ivar(const rb_iseq_t *iseq)
void
rb_mjit_recompile_exivar(const rb_iseq_t *iseq)
{
- rb_mjit_iseq_compile_info(iseq->body)->disable_exivar_cache = true;
+ rb_mjit_iseq_compile_info(ISEQ_BODY(iseq))->disable_exivar_cache = true;
mjit_recompile(iseq);
}
@@ -425,7 +425,7 @@ rb_mjit_recompile_exivar(const rb_iseq_t *iseq)
void
rb_mjit_recompile_inlining(const rb_iseq_t *iseq)
{
- rb_mjit_iseq_compile_info(iseq->body)->disable_inlining = true;
+ rb_mjit_iseq_compile_info(ISEQ_BODY(iseq))->disable_inlining = true;
mjit_recompile(iseq);
}
@@ -433,7 +433,7 @@ rb_mjit_recompile_inlining(const rb_iseq_t *iseq)
void
rb_mjit_recompile_const(const rb_iseq_t *iseq)
{
- rb_mjit_iseq_compile_info(iseq->body)->disable_const_cache = true;
+ rb_mjit_iseq_compile_info(ISEQ_BODY(iseq))->disable_const_cache = true;
mjit_recompile(iseq);
}
@@ -932,8 +932,8 @@ mjit_dump_total_calls(void)
fprintf(stderr, "[MJIT_COUNTER] total_calls of active_units:\n");
list_for_each(&active_units.head, unit, unode) {
const rb_iseq_t *iseq = unit->iseq;
- fprintf(stderr, "%8ld: %s@%s:%d\n", iseq->body->total_calls, RSTRING_PTR(iseq->body->location.label),
- RSTRING_PTR(rb_iseq_path(iseq)), FIX2INT(iseq->body->location.first_lineno));
+ fprintf(stderr, "%8ld: %s@%s:%d\n", ISEQ_BODY(iseq)->total_calls, RSTRING_PTR(ISEQ_BODY(iseq)->location.label),
+ RSTRING_PTR(rb_iseq_path(iseq)), FIX2INT(ISEQ_BODY(iseq)->location.first_lineno));
}
}
#endif
diff --git a/mjit.h b/mjit.h
index 4e91decb01..fb216da8b3 100644
--- a/mjit.h
+++ b/mjit.h
@@ -143,7 +143,7 @@ static inline VALUE
mjit_exec(rb_execution_context_t *ec)
{
const rb_iseq_t *iseq = ec->cfp->iseq;
- struct rb_iseq_constant_body *body = iseq->body;
+ struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
bool yjit_enabled = false;
#ifndef MJIT_HEADER
// Don't want to compile with YJIT or use code generated by YJIT
diff --git a/mjit_compile.c b/mjit_compile.c
index 946edc0ec6..2d426b39fc 100644
--- a/mjit_compile.c
+++ b/mjit_compile.c
@@ -123,7 +123,7 @@ fastpath_applied_iseq_p(const CALL_INFO ci, const CALL_CACHE cc, const rb_iseq_t
extern bool rb_simple_iseq_p(const rb_iseq_t *iseq);
return iseq != NULL
&& !(vm_ci_flag(ci) & VM_CALL_KW_SPLAT) && rb_simple_iseq_p(iseq) // Top of vm_callee_setup_arg. In this case, opt_pc is 0.
- && vm_ci_argc(ci) == (unsigned int)iseq->body->param.lead_num // exclude argument_arity_error (assumption: `calling->argc == ci->orig_argc` in send insns)
+ && vm_ci_argc(ci) == (unsigned int)ISEQ_BODY(iseq)->param.lead_num // exclude argument_arity_error (assumption: `calling->argc == ci->orig_argc` in send insns)
&& vm_call_iseq_optimizable_p(ci, cc); // CC_SET_FASTPATH condition
}
@@ -333,7 +333,7 @@ mjit_capture_is_entries(const struct rb_iseq_constant_body *body, union iseq_inl
static bool
mjit_compile_body(FILE *f, const rb_iseq_t *iseq, struct compile_status *status)
{
- const struct rb_iseq_constant_body *body = iseq->body;
+ const struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
status->success = true;
status->local_stack_p = !body->catch_except_p;
@@ -370,7 +370,7 @@ mjit_compile_body(FILE *f, const rb_iseq_t *iseq, struct compile_status *status)
if (body->param.flags.has_opt) {
int i;
fprintf(f, "\n");
- fprintf(f, " switch (reg_cfp->pc - reg_cfp->iseq->body->iseq_encoded) {\n");
+ fprintf(f, " switch (reg_cfp->pc - reg_cfp->ISEQ_BODY(iseq)->iseq_encoded) {\n");
for (i = 0; i <= body->param.opt_num; i++) {
VALUE pc_offset = body->param.opt_table[i];
fprintf(f, " case %"PRIdVALUE":\n", pc_offset);
@@ -443,7 +443,7 @@ rb_mjit_inlinable_iseq(const struct rb_callinfo *ci, const struct rb_callcache *
vm_cc_cme(cc)->def->type == VM_METHOD_TYPE_ISEQ &&
fastpath_applied_iseq_p(ci, cc, iseq = def_iseq_ptr(vm_cc_cme(cc)->def)) &&
// CC_SET_FASTPATH in vm_callee_setup_arg
- inlinable_iseq_p(iseq->body)) {
+ inlinable_iseq_p(ISEQ_BODY(iseq))) {
return iseq;
}
return NULL;
@@ -513,17 +513,17 @@ precompile_inlinable_child_iseq(FILE *f, const rb_iseq_t *child_iseq, struct com
const struct rb_callinfo *ci, const struct rb_callcache *cc, unsigned int pos)
{
struct compile_status child_status = { .compiled_iseq = status->compiled_iseq, .compiled_id = status->compiled_id };
- INIT_COMPILE_STATUS(child_status, child_iseq->body, false);
+ INIT_COMPILE_STATUS(child_status, ISEQ_BODY(child_iseq), false);
child_status.inline_context = (struct inlined_call_context){
.orig_argc = vm_ci_argc(ci),
.me = (VALUE)vm_cc_cme(cc),
- .param_size = child_iseq->body->param.size,
- .local_size = child_iseq->body->local_table_size
+ .param_size = ISEQ_BODY(child_iseq)->param.size,
+ .local_size = ISEQ_BODY(child_iseq)->local_table_size
};
- if (child_iseq->body->ci_size > 0 && child_status.cc_entries_index == -1) {
+ if (ISEQ_BODY(child_iseq)->ci_size > 0 && child_status.cc_entries_index == -1) {
return false;
}
- init_ivar_compile_status(child_iseq->body, &child_status);
+ init_ivar_compile_status(ISEQ_BODY(child_iseq), &child_status);
fprintf(f, "ALWAYS_INLINE(static VALUE _mjit%d_inlined_%d(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE orig_self, const rb_iseq_t *original_iseq));\n", status->compiled_id, pos);
fprintf(f, "static inline VALUE\n_mjit%d_inlined_%d(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE orig_self, const rb_iseq_t *original_iseq)\n{\n", status->compiled_id, pos);
@@ -539,7 +539,7 @@ precompile_inlinable_child_iseq(FILE *f, const rb_iseq_t *child_iseq, struct com
static bool
precompile_inlinable_iseqs(FILE *f, const rb_iseq_t *iseq, struct compile_status *status)
{
- const struct rb_iseq_constant_body *body = iseq->body;
+ const struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
unsigned int pos = 0;
while (pos < body->iseq_size) {
int insn = rb_vm_insn_decode(body->iseq_encoded[pos]);
@@ -551,14 +551,14 @@ precompile_inlinable_iseqs(FILE *f, const rb_iseq_t *iseq, struct compile_status
extern bool rb_mjit_compiling_iseq_p(const rb_iseq_t *iseq);
const rb_iseq_t *child_iseq;
if ((child_iseq = rb_mjit_inlinable_iseq(ci, cc)) != NULL && rb_mjit_compiling_iseq_p(child_iseq)) {
- status->inlined_iseqs[pos] = child_iseq->body;
+ status->inlined_iseqs[pos] = ISEQ_BODY(child_iseq);
if (mjit_opts.verbose >= 1) // print beforehand because ISeq may be GCed during copy job.
fprintf(stderr, "JIT inline: %s@%s:%d => %s@%s:%d\n",
- RSTRING_PTR(child_iseq->body->location.label),
- RSTRING_PTR(rb_iseq_path(child_iseq)), FIX2INT(child_iseq->body->location.first_lineno),
- RSTRING_PTR(iseq->body->location.label),
- RSTRING_PTR(rb_iseq_path(iseq)), FIX2INT(iseq->body->location.first_lineno));
+ RSTRING_PTR(ISEQ_BODY(child_iseq)->location.label),
+ RSTRING_PTR(rb_iseq_path(child_iseq)), FIX2INT(ISEQ_BODY(child_iseq)->location.first_lineno),
+ RSTRING_PTR(ISEQ_BODY(iseq)->location.label),
+ RSTRING_PTR(rb_iseq_path(iseq)), FIX2INT(ISEQ_BODY(iseq)->location.first_lineno));
if (!precompile_inlinable_child_iseq(f, child_iseq, status, ci, cc, pos))
return false;
}
@@ -572,12 +572,12 @@ precompile_inlinable_iseqs(FILE *f, const rb_iseq_t *iseq, struct compile_status
bool
mjit_compile(FILE *f, const rb_iseq_t *iseq, const char *funcname, int id)
{
- struct compile_status status = { .compiled_iseq = iseq->body, .compiled_id = id };
- INIT_COMPILE_STATUS(status, iseq->body, true);
- if (iseq->body->ci_size > 0 && status.cc_entries_index == -1) {
+ struct compile_status status = { .compiled_iseq = ISEQ_BODY(iseq), .compiled_id = id };
+ INIT_COMPILE_STATUS(status, ISEQ_BODY(iseq), true);
+ if (ISEQ_BODY(iseq)->ci_size > 0 && status.cc_entries_index == -1) {
return false;
}
- init_ivar_compile_status(iseq->body, &status);
+ init_ivar_compile_status(ISEQ_BODY(iseq), &status);
if (!status.compile_info->disable_send_cache && !status.compile_info->disable_inlining) {
if (!precompile_inlinable_iseqs(f, iseq, &status))
diff --git a/mjit_worker.c b/mjit_worker.c
index 879237eacc..986ce93cca 100644
--- a/mjit_worker.c
+++ b/mjit_worker.c
@@ -170,7 +170,7 @@ struct rb_mjit_unit {
struct rb_mjit_compile_info compile_info;
// captured CC values, they should be marked with iseq.
const struct rb_callcache **cc_entries;
- unsigned int cc_entries_size; // iseq->body->ci_size + ones of inlined iseqs
+ unsigned int cc_entries_size; // ISEQ_BODY(iseq)->ci_size + ones of inlined iseqs
};
// Linked list of struct rb_mjit_unit.
@@ -424,8 +424,8 @@ static void
free_unit(struct rb_mjit_unit *unit)
{
if (unit->iseq) { // ISeq is not GCed
- unit->iseq->body->jit_func = (mjit_func_t)NOT_COMPILED_JIT_ISEQ_FUNC;
- unit->iseq->body->jit_unit = NULL;
+ ISEQ_BODY(unit->iseq)->jit_func = (mjit_func_t)NOT_COMPILED_JIT_ISEQ_FUNC;
+ ISEQ_BODY(unit->iseq)->jit_unit = NULL;
}
if (unit->cc_entries) {
void *entries = (void *)unit->cc_entries;
@@ -510,7 +510,7 @@ get_from_list(struct rb_mjit_unit_list *list)
continue;
}
- if (best == NULL || best->iseq->body->total_calls < unit->iseq->body->total_calls) {
+ if (best == NULL || ISEQ_BODY(best->iseq)->total_calls < ISEQ_BODY(unit->iseq)->total_calls) {
best = unit;
}
}
@@ -706,7 +706,7 @@ sprint_funcname(char *funcname, const struct rb_mjit_unit *unit)
path = strstr(path, version) + strlen(version);
// Annotate all-normalized method names
- const char *method = RSTRING_PTR(iseq->body->location.label);
+ const char *method = RSTRING_PTR(ISEQ_BODY(iseq)->location.label);
if (!strcmp(method, "[]")) method = "AREF";
if (!strcmp(method, "[]=")) method = "ASET";
@@ -725,7 +725,7 @@ static const rb_iseq_t **compiling_iseqs = NULL;
static bool
set_compiling_iseqs(const rb_iseq_t *iseq)
{
- compiling_iseqs = calloc(iseq->body->iseq_size + 2, sizeof(rb_iseq_t *)); // 2: 1 (unit->iseq) + 1 (NULL end)
+ compiling_iseqs = calloc(ISEQ_BODY(iseq)->iseq_size + 2, sizeof(rb_iseq_t *)); // 2: 1 (unit->iseq) + 1 (NULL end)
if (compiling_iseqs == NULL)
return false;
@@ -733,10 +733,10 @@ set_compiling_iseqs(const rb_iseq_t *iseq)
int i = 1;
unsigned int pos = 0;
- while (pos < iseq->body->iseq_size) {
- int insn = rb_vm_insn_decode(iseq->body->iseq_encoded[pos]);
+ while (pos < ISEQ_BODY(iseq)->iseq_size) {
+ int insn = rb_vm_insn_decode(ISEQ_BODY(iseq)->iseq_encoded[pos]);
if (insn == BIN(opt_send_without_block) || insn == BIN(opt_size)) {
- CALL_DATA cd = (CALL_DATA)iseq->body->iseq_encoded[pos + 1];
+ CALL_DATA cd = (CALL_DATA)ISEQ_BODY(iseq)->iseq_encoded[pos + 1];
extern const rb_iseq_t *rb_mjit_inlinable_iseq(const struct rb_callinfo *ci, const struct rb_callcache *cc);
const rb_iseq_t *iseq = rb_mjit_inlinable_iseq(cd->ci, cd->cc);
if (iseq != NULL) {
@@ -1012,11 +1012,11 @@ compile_compact_jit_code(char* c_file)
sprint_funcname(funcname, child_unit);
long iseq_lineno = 0;
- if (FIXNUM_P(child_unit->iseq->body->location.first_lineno))
+ if (FIXNUM_P(ISEQ_BODY(child_unit->iseq)->location.first_lineno))
// FIX2INT may fallback to rb_num2long(), which is a method call and dangerous in MJIT worker. So using only FIX2LONG.
- iseq_lineno = FIX2LONG(child_unit->iseq->body->location.first_lineno);
+ iseq_lineno = FIX2LONG(ISEQ_BODY(child_unit->iseq)->location.first_lineno);
const char *sep = "@";
- const char *iseq_label = RSTRING_PTR(child_unit->iseq->body->location.label);
+ const char *iseq_label = RSTRING_PTR(ISEQ_BODY(child_unit->iseq)->location.label);
const char *iseq_path = RSTRING_PTR(rb_iseq_path(child_unit->iseq));
if (!iseq_label) iseq_label = sep = "";
fprintf(f, "\n/* %s%s%s:%ld */\n", iseq_label, sep, iseq_path, iseq_lineno);
@@ -1092,7 +1092,7 @@ compact_all_jit_code(void)
if (cur->iseq) { // Check whether GCed or not
// Usage of jit_code might be not in a critical section.
- MJIT_ATOMIC_SET(cur->iseq->body->jit_func, (mjit_func_t)func);
+ MJIT_ATOMIC_SET(ISEQ_BODY(cur->iseq)->jit_func, (mjit_func_t)func);
}
}
CRITICAL_SECTION_FINISH(3, "in compact_all_jit_code to read list");
@@ -1209,12 +1209,12 @@ convert_unit_to_func(struct rb_mjit_unit *unit)
// To make MJIT worker thread-safe against GC.compact, copy ISeq values while `in_jit` is true.
long iseq_lineno = 0;
- if (FIXNUM_P(unit->iseq->body->location.first_lineno))
+ if (FIXNUM_P(ISEQ_BODY(unit->iseq)->location.first_lineno))
// FIX2INT may fallback to rb_num2long(), which is a method call and dangerous in MJIT worker. So using only FIX2LONG.
- iseq_lineno = FIX2LONG(unit->iseq->body->location.first_lineno);
- char *iseq_label = alloca(RSTRING_LEN(unit->iseq->body->location.label) + 1);
+ iseq_lineno = FIX2LONG(ISEQ_BODY(unit->iseq)->location.first_lineno);
+ char *iseq_label = alloca(RSTRING_LEN(ISEQ_BODY(unit->iseq)->location.label) + 1);
char *iseq_path = alloca(RSTRING_LEN(rb_iseq_path(unit->iseq)) + 1);
- strcpy(iseq_label, RSTRING_PTR(unit->iseq->body->location.label));
+ strcpy(iseq_label, RSTRING_PTR(ISEQ_BODY(unit->iseq)->location.label));
strcpy(iseq_path, RSTRING_PTR(rb_iseq_path(unit->iseq)));
verbose(2, "start compilation: %s@%s:%ld -> %s", iseq_label, iseq_path, iseq_lineno, c_file);
@@ -1317,8 +1317,8 @@ mark_ec_units(rb_execution_context_t *ec)
const rb_iseq_t *iseq;
if (cfp->pc && (iseq = cfp->iseq) != NULL
&& imemo_type((VALUE) iseq) == imemo_iseq
- && (iseq->body->jit_unit) != NULL) {
- iseq->body->jit_unit->used_code_p = true;
+ && (ISEQ_BODY(iseq)->jit_unit) != NULL) {
+ ISEQ_BODY(iseq)->jit_unit->used_code_p = true;
}
if (cfp == ec->cfp)
@@ -1373,9 +1373,9 @@ unload_units(void)
// Calculate the next max total_calls in unit_queue
long unsigned max_queue_calls = 0;
list_for_each(&unit_queue.head, unit, unode) {
- if (unit->iseq != NULL && max_queue_calls < unit->iseq->body->total_calls
- && unit->iseq->body->total_calls < prev_queue_calls) {
- max_queue_calls = unit->iseq->body->total_calls;
+ if (unit->iseq != NULL && max_queue_calls < ISEQ_BODY(unit->iseq)->total_calls
+ && ISEQ_BODY(unit->iseq)->total_calls < prev_queue_calls) {
+ max_queue_calls = ISEQ_BODY(unit->iseq)->total_calls;
}
}
prev_queue_calls = max_queue_calls;
@@ -1385,9 +1385,9 @@ unload_units(void)
if (unit->used_code_p) // We can't unload code on stack.
continue;
- if (max_queue_calls > unit->iseq->body->total_calls) {
+ if (max_queue_calls > ISEQ_BODY(unit->iseq)->total_calls) {
verbose(2, "Unloading unit %d (calls=%lu, threshold=%lu)",
- unit->id, unit->iseq->body->total_calls, max_queue_calls);
+ unit->id, ISEQ_BODY(unit->iseq)->total_calls, max_queue_calls);
assert(unit->handle != NULL);
remove_from_list(unit, &active_units);
free_unit(unit);
@@ -1455,7 +1455,7 @@ mjit_worker(void)
remove_from_list(unit, &active_units);
add_to_list(unit, &stale_units);
// Lazily put it to unit_queue as well to avoid race conditions on jit_unit with mjit_compile.
- mjit_add_iseq_to_process(unit->iseq, &unit->iseq->body->jit_unit->compile_info, true);
+ mjit_add_iseq_to_process(unit->iseq, &ISEQ_BODY(unit->iseq)->jit_unit->compile_info, true);
}
}
}
@@ -1499,7 +1499,7 @@ mjit_worker(void)
add_to_list(unit, &active_units);
}
// Usage of jit_code might be not in a critical section.
- MJIT_ATOMIC_SET(unit->iseq->body->jit_func, func);
+ MJIT_ATOMIC_SET(ISEQ_BODY(unit->iseq)->jit_func, func);
}
else {
free_unit(unit);
diff --git a/proc.c b/proc.c
index faf3a5166e..93b4013c31 100644
--- a/proc.c
+++ b/proc.c
@@ -448,11 +448,11 @@ get_local_variable_ptr(const rb_env_t **envp, ID lid)
VM_ASSERT(rb_obj_is_iseq((VALUE)iseq));
- for (i=0; i<iseq->body->local_table_size; i++) {
- if (iseq->body->local_table[i] == lid) {
- if (iseq->body->local_iseq == iseq &&
- iseq->body->param.flags.has_block &&
- (unsigned int)iseq->body->param.block_start == i) {
+ for (i=0; i<ISEQ_BODY(iseq)->local_table_size; i++) {
+ if (ISEQ_BODY(iseq)->local_table[i] == lid) {
+ if (ISEQ_BODY(iseq)->local_iseq == iseq &&
+ ISEQ_BODY(iseq)->param.flags.has_block &&
+ (unsigned int)ISEQ_BODY(iseq)->param.block_start == i) {
const VALUE *ep = env->ep;
if (!VM_ENV_FLAGS(ep, VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM)) {
RB_OBJ_WRITE(env, &env->env[i], rb_vm_bh_to_procval(GET_EC(), VM_ENV_BLOCK_HANDLER(ep)));
@@ -1078,11 +1078,11 @@ proc_arity(VALUE self)
static inline int
rb_iseq_min_max_arity(const rb_iseq_t *iseq, int *max)
{
- *max = iseq->body->param.flags.has_rest == FALSE ?
- iseq->body->param.lead_num + iseq->body->param.opt_num + iseq->body->param.post_num +
- (iseq->body->param.flags.has_kw == TRUE || iseq->body->param.flags.has_kwrest == TRUE)
+ *max = ISEQ_BODY(iseq)->param.flags.has_rest == FALSE ?
+ ISEQ_BODY(iseq)->param.lead_num + ISEQ_BODY(iseq)->param.opt_num + ISEQ_BODY(iseq)->param.post_num +
+ (ISEQ_BODY(iseq)->param.flags.has_kw == TRUE || ISEQ_BODY(iseq)->param.flags.has_kwrest == TRUE)
: UNLIMITED_ARGUMENTS;
- return iseq->body->param.lead_num + iseq->body->param.post_num + (iseq->body->param.flags.has_kw && iseq->body->param.keyword->required_num > 0);
+ return ISEQ_BODY(iseq)->param.lead_num + ISEQ_BODY(iseq)->param.post_num + (ISEQ_BODY(iseq)->param.flags.has_kw && ISEQ_BODY(iseq)->param.keyword->required_num > 0);
}
static int
@@ -1369,7 +1369,7 @@ iseq_location(const rb_iseq_t *iseq)
if (!iseq) return Qnil;
rb_iseq_check(iseq);
loc[0] = rb_iseq_path(iseq);
- loc[1] = iseq->body->location.first_lineno;
+ loc[1] = ISEQ_BODY(iseq)->location.first_lineno;
return rb_ary_new4(2, loc);
}
@@ -1535,7 +1535,7 @@ rb_block_to_s(VALUE self, const struct rb_block *block, const char *additional_i
const rb_iseq_t *iseq = rb_iseq_check(block->as.captured.code.iseq);
rb_str_catf(str, "%p %"PRIsVALUE":%d", (void *)self,
rb_iseq_path(iseq),
- FIX2INT(iseq->body->location.first_lineno));
+ FIX2INT(ISEQ_BODY(iseq)->location.first_lineno));
}
break;
case block_type_symbol:
@@ -3503,7 +3503,7 @@ proc_binding(VALUE self)
if (iseq) {
rb_iseq_check(iseq);
- RB_OBJ_WRITE(bindval, &bind->pathobj, iseq->body->location.pathobj);
+ RB_OBJ_WRITE(bindval, &bind->pathobj, ISEQ_BODY(iseq)->location.pathobj);
bind->first_lineno = FIX2INT(rb_iseq_first_lineno(iseq));
}
else {
@@ -3889,10 +3889,10 @@ proc_ruby2_keywords(VALUE procval)
switch (proc->block.type) {
case block_type_iseq:
- if (proc->block.as.captured.code.iseq->body->param.flags.has_rest &&
- !proc->block.as.captured.code.iseq->body->param.flags.has_kw &&
- !proc->block.as.captured.code.iseq->body->param.flags.has_kwrest) {
- proc->block.as.captured.code.iseq->body->param.flags.ruby2_keywords = 1;
+ if (ISEQ_BODY(proc->block.as.captured.code.iseq)->param.flags.has_rest &&
+ !ISEQ_BODY(proc->block.as.captured.code.iseq)->param.flags.has_kw &&
+ !ISEQ_BODY(proc->block.as.captured.code.iseq)->param.flags.has_kwrest) {
+ ISEQ_BODY(proc->block.as.captured.code.iseq)->param.flags.ruby2_keywords = 1;
}
else {
rb_warn("Skipping set of ruby2_keywords flag for proc (proc accepts keywords or proc does not accept argument splat)");
diff --git a/thread.c b/thread.c
index 46ae21b589..26d9bb4b14 100644
--- a/thread.c
+++ b/thread.c
@@ -5603,7 +5603,7 @@ update_line_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
VALUE num;
void rb_iseq_clear_event_flags(const rb_iseq_t *iseq, size_t pos, rb_event_flag_t reset);
if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
- rb_iseq_clear_event_flags(cfp->iseq, cfp->pc - cfp->iseq->body->iseq_encoded - 1, RUBY_EVENT_COVERAGE_LINE);
+ rb_iseq_clear_event_flags(cfp->iseq, cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1, RUBY_EVENT_COVERAGE_LINE);
rb_ary_push(lines, LONG2FIX(line + 1));
return;
}
@@ -5628,7 +5628,7 @@ update_branch_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
if (branches) {
- long pc = cfp->pc - cfp->iseq->body->iseq_encoded - 1;
+ long pc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1;
long idx = FIX2INT(RARRAY_AREF(ISEQ_PC2BRANCHINDEX(cfp->iseq), pc)), count;
VALUE counters = RARRAY_AREF(branches, 1);
VALUE num = RARRAY_AREF(counters, idx);
@@ -5651,7 +5651,7 @@ rb_resolve_me_location(const rb_method_entry_t *me, VALUE resolved_location[5])
switch (me->def->type) {
case VM_METHOD_TYPE_ISEQ: {
const rb_iseq_t *iseq = me->def->body.iseq.iseqptr;
- rb_iseq_location_t *loc = &iseq->body->location;
+ rb_iseq_location_t *loc = &ISEQ_BODY(iseq)->location;
path = rb_iseq_path(iseq);
beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
@@ -5665,7 +5665,7 @@ rb_resolve_me_location(const rb_method_entry_t *me, VALUE resolved_location[5])
rb_iseq_location_t *loc;
rb_iseq_check(iseq);
path = rb_iseq_path(iseq);
- loc = &iseq->body->location;
+ loc = &ISEQ_BODY(iseq)->location;
beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
diff --git a/tool/mk_builtin_loader.rb b/tool/mk_builtin_loader.rb
index 02941735f7..23e6a01017 100644
--- a/tool/mk_builtin_loader.rb
+++ b/tool/mk_builtin_loader.rb
@@ -321,7 +321,7 @@ def mk_builtin_header file
f.puts %' fprintf(f, " const VALUE *argv = &stack[%d];\\n", stack_size - #{argc});'
f.puts %' }'
f.puts %' else {'
- f.puts %' fprintf(f, " const unsigned int lnum = GET_ISEQ()->body->local_table_size;\\n");'
+ f.puts %' fprintf(f, " const unsigned int lnum = ISEQ_BODY(GET_ISEQ())->local_table_size;\\n");'
f.puts %' fprintf(f, " const VALUE *argv = GET_EP() - lnum - VM_ENV_DATA_SIZE + 1 + %ld;\\n", index);'
f.puts %' }'
end
diff --git a/tool/ruby_vm/views/_mjit_compile_send.erb b/tool/ruby_vm/views/_mjit_compile_send.erb
index 28e316a1ef..8900ee6425 100644
--- a/tool/ruby_vm/views/_mjit_compile_send.erb
+++ b/tool/ruby_vm/views/_mjit_compile_send.erb
@@ -54,7 +54,7 @@
<%= render 'mjit_compile_pc_and_sp', locals: { insn: insn } -%>
% # JIT: If ISeq is inlinable, call the inlined method without pushing a frame.
- if (iseq && status->inlined_iseqs != NULL && iseq->body == status->inlined_iseqs[pos]) {
+ if (iseq && status->inlined_iseqs != NULL && ISEQ_BODY(iseq) == status->inlined_iseqs[pos]) {
fprintf(f, " {\n");
fprintf(f, " VALUE orig_self = reg_cfp->self;\n");
fprintf(f, " reg_cfp->self = stack[%d];\n", b->stack_size + sp_inc - 1);
@@ -84,8 +84,8 @@
}
else { // VM_METHOD_TYPE_ISEQ
% # fastpath_applied_iseq_p checks rb_simple_iseq_p, which ensures has_opt == FALSE
- fprintf(f, " vm_call_iseq_setup_normal(ec, reg_cfp, &calling, cc_cme, 0, %d, %d);\n", iseq->body->param.size, iseq->body->local_table_size);
- if (iseq->body->catch_except_p) {
+ fprintf(f, " vm_call_iseq_setup_normal(ec, reg_cfp, &calling, cc_cme, 0, %d, %d);\n", ISEQ_BODY(iseq)->param.size, ISEQ_BODY(iseq)->local_table_size);
+ if (ISEQ_BODY(iseq)->catch_except_p) {
fprintf(f, " VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_FINISH);\n");
fprintf(f, " val = vm_exec(ec, true);\n");
}
diff --git a/vm.c b/vm.c
index 0bc9784046..2c9f3063ab 100644
--- a/vm.c
+++ b/vm.c
@@ -568,7 +568,7 @@ vm_stat(int argc, VALUE *argv, VALUE self)
static void
vm_set_top_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq)
{
- if (iseq->body->type != ISEQ_TYPE_TOP) {
+ if (ISEQ_BODY(iseq)->type != ISEQ_TYPE_TOP) {
rb_raise(rb_eTypeError, "Not a toplevel InstructionSequence");
}
@@ -576,8 +576,8 @@ vm_set_top_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq)
vm_push_frame(ec, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, rb_ec_thread_ptr(ec)->top_self,
VM_BLOCK_HANDLER_NONE,
(VALUE)vm_cref_new_toplevel(ec), /* cref or me */
- iseq->body->iseq_encoded, ec->cfp->sp,
- iseq->body->local_table_size, iseq->body->stack_max);
+ ISEQ_BODY(iseq)->iseq_encoded, ec->cfp->sp,
+ ISEQ_BODY(iseq)->local_table_size, ISEQ_BODY(iseq)->stack_max);
}
static void
@@ -586,9 +586,9 @@ vm_set_eval_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq, const rb_cr
vm_push_frame(ec, iseq, VM_FRAME_MAGIC_EVAL | VM_FRAME_FLAG_FINISH,
vm_block_self(base_block), VM_GUARDED_PREV_EP(vm_block_ep(base_block)),
(VALUE)cref, /* cref or me */
- iseq->body->iseq_encoded,
- ec->cfp->sp, iseq->body->local_table_size,
- iseq->body->stack_max);
+ ISEQ_BODY(iseq)->iseq_encoded,
+ ec->cfp->sp, ISEQ_BODY(iseq)->local_table_size,
+ ISEQ_BODY(iseq)->stack_max);
}
static void
@@ -603,7 +603,7 @@ vm_set_main_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq)
vm_set_eval_stack(ec, iseq, 0, &bind->block);
/* save binding */
- if (iseq->body->local_table_size > 0) {
+ if (ISEQ_BODY(iseq)->local_table_size > 0) {
vm_bind_update_env(toplevel_binding, bind, vm_make_env_object(ec, ec->cfp));
}
}
@@ -799,7 +799,7 @@ vm_make_env_each(const rb_execution_context_t * const ec, rb_control_frame_t *co
local_size = VM_ENV_DATA_SIZE;
}
else {
- local_size = cfp->iseq->body->local_table_size + VM_ENV_DATA_SIZE;
+ local_size = ISEQ_BODY(cfp->iseq)->local_table_size + VM_ENV_DATA_SIZE;
}
/*
@@ -880,8 +880,8 @@ collect_local_variables_in_iseq(const rb_iseq_t *iseq, const struct local_var_li
{
unsigned int i;
if (!iseq) return 0;
- for (i = 0; i < iseq->body->local_table_size; i++) {
- local_var_list_add(vars, iseq->body->local_table[i]);
+ for (i = 0; i < ISEQ_BODY(iseq)->local_table_size; i++) {
+ local_var_list_add(vars, ISEQ_BODY(iseq)->local_table[i]);
}
return 1;
}
@@ -922,7 +922,7 @@ rb_iseq_local_variables(const rb_iseq_t *iseq)
struct local_var_list vars;
local_var_list_init(&vars);
while (collect_local_variables_in_iseq(iseq, &vars)) {
- iseq = iseq->body->parent_iseq;
+ iseq = ISEQ_BODY(iseq)->parent_iseq;
}
return local_var_list_finish(&vars);
}
@@ -1062,8 +1062,8 @@ env_copy(const VALUE *src_ep, VALUE read_only_variables)
for (int i=RARRAY_LENINT(read_only_variables)-1; i>=0; i--) {
ID id = NUM2ID(RARRAY_AREF(read_only_variables, i));
- for (unsigned int j=0; j<src_env->iseq->body->local_table_size; j++) {
- if (id == src_env->iseq->body->local_table[j]) {
+ for (unsigned int j=0; j<ISEQ_BODY(src_env->iseq)->local_table_size; j++) {
+ if (id == ISEQ_BODY(src_env->iseq)->local_table[j]) {
VALUE v = src_env->env[j];
if (!rb_ractor_shareable_p(v)) {
VALUE name = rb_id2str(id);
@@ -1152,8 +1152,8 @@ rb_proc_isolate_bang(VALUE self)
rb_proc_t *proc = (rb_proc_t *)RTYPEDDATA_DATA(self);
if (proc->block.type != block_type_iseq) rb_raise(rb_eRuntimeError, "not supported yet");
- if (iseq->body->outer_variables) {
- proc_shared_outer_variables(iseq->body->outer_variables, true, "isolate a Proc");
+ if (ISEQ_BODY(iseq)->outer_variables) {
+ proc_shared_outer_variables(ISEQ_BODY(iseq)->outer_variables, true, "isolate a Proc");
}
proc_isolate_env(self, proc, Qfalse);
@@ -1189,9 +1189,9 @@ rb_proc_ractor_make_shareable(VALUE self)
VALUE read_only_variables = Qfalse;
- if (iseq->body->outer_variables) {
+ if (ISEQ_BODY(iseq)->outer_variables) {
read_only_variables =
- proc_shared_outer_variables(iseq->body->outer_variables, false, "make a Proc shareable");
+ proc_shared_outer_variables(ISEQ_BODY(iseq)->outer_variables, false, "make a Proc shareable");
}
proc_isolate_env(self, proc, read_only_variables);
@@ -1247,7 +1247,7 @@ rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *s
vm_bind_update_env(bindval, bind, envval);
RB_OBJ_WRITE(bindval, &bind->block.as.captured.self, cfp->self);
RB_OBJ_WRITE(bindval, &bind->block.as.captured.code.iseq, cfp->iseq);
- RB_OBJ_WRITE(bindval, &bind->pathobj, ruby_level_cfp->iseq->body->location.pathobj);
+ RB_OBJ_WRITE(bindval, &bind->pathobj, ISEQ_BODY(ruby_level_cfp->iseq)->location.pathobj);
bind->first_lineno = rb_vm_get_sourceline(ruby_level_cfp);
return bindval;
@@ -1282,7 +1282,7 @@ rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const I
ast.script_lines = INT2FIX(-1);
if (base_iseq) {
- iseq = rb_iseq_new(&ast, base_iseq->body->location.label, path, realpath, base_iseq, ISEQ_TYPE_EVAL);
+ iseq = rb_iseq_new(&ast, ISEQ_BODY(base_iseq)->location.label, path, realpath, base_iseq, ISEQ_TYPE_EVAL);
}
else {
VALUE tempstr = rb_fstring_lit("<temp>");
@@ -1304,15 +1304,15 @@ rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const I
static inline VALUE
invoke_block(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, const struct rb_captured_block *captured, const rb_cref_t *cref, VALUE type, int opt_pc)
{
- int arg_size = iseq->body->param.size;
+ int arg_size = ISEQ_BODY(iseq)->param.size;
vm_push_frame(ec, iseq, type | VM_FRAME_FLAG_FINISH, self,
VM_GUARDED_PREV_EP(captured->ep),
(VALUE)cref, /* cref or method */
- iseq->body->iseq_encoded + opt_pc,
+ ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
ec->cfp->sp + arg_size,
- iseq->body->local_table_size - arg_size,
- iseq->body->stack_max);
+ ISEQ_BODY(iseq)->local_table_size - arg_size,
+ ISEQ_BODY(iseq)->stack_max);
return vm_exec(ec, true);
}
@@ -1320,7 +1320,7 @@ static VALUE
invoke_bmethod(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, const struct rb_captured_block *captured, const rb_callable_method_entry_t *me, VALUE type, int opt_pc)
{
/* bmethod */
- int arg_size = iseq->body->param.size;
+ int arg_size = ISEQ_BODY(iseq)->param.size;
VALUE ret;
VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
@@ -1328,10 +1328,10 @@ invoke_bmethod(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, co
vm_push_frame(ec, iseq, type | VM_FRAME_FLAG_BMETHOD, self,
VM_GUARDED_PREV_EP(captured->ep),
(VALUE)me,
- iseq->body->iseq_encoded + opt_pc,
+ ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
ec->cfp->sp + arg_size,
- iseq->body->local_table_size - arg_size,
- iseq->body->stack_max);
+ ISEQ_BODY(iseq)->local_table_size - arg_size,
+ ISEQ_BODY(iseq)->stack_max);
VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_FINISH);
ret = vm_exec(ec, true);
@@ -2342,7 +2342,7 @@ vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
}
cfp = ec->cfp;
- epc = cfp->pc - cfp->iseq->body->iseq_encoded;
+ epc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded;
escape_cfp = NULL;
if (state == TAG_BREAK || state == TAG_RETURN) {
@@ -2355,7 +2355,7 @@ vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
THROW_DATA_STATE_SET(err, state = TAG_BREAK);
}
else {
- ct = cfp->iseq->body->catch_table;
+ ct = ISEQ_BODY(cfp->iseq)->catch_table;
if (ct) for (i = 0; i < ct->size; i++) {
entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
if (entry->start < epc && entry->end >= epc) {
@@ -2392,7 +2392,7 @@ vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
}
if (state == TAG_RAISE) {
- ct = cfp->iseq->body->catch_table;
+ ct = ISEQ_BODY(cfp->iseq)->catch_table;
if (ct) for (i = 0; i < ct->size; i++) {
entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
if (entry->start < epc && entry->end >= epc) {
@@ -2408,7 +2408,7 @@ vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
}
}
else if (state == TAG_RETRY) {
- ct = cfp->iseq->body->catch_table;
+ ct = ISEQ_BODY(cfp->iseq)->catch_table;
if (ct) for (i = 0; i < ct->size; i++) {
entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
if (entry->start < epc && entry->end >= epc) {
@@ -2423,7 +2423,7 @@ vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
const rb_control_frame_t *escape_cfp;
escape_cfp = THROW_DATA_CATCH_FRAME(err);
if (cfp == escape_cfp) {
- cfp->pc = cfp->iseq->body->iseq_encoded + entry->cont;
+ cfp->pc = ISEQ_BODY(cfp->iseq)->iseq_encoded + entry->cont;
ec->errinfo = Qnil;
return Qundef;
}
@@ -2441,7 +2441,7 @@ vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
/* otherwise = dontcare */
}[state];
- ct = cfp->iseq->body->catch_table;
+ ct = ISEQ_BODY(cfp->iseq)->catch_table;
if (ct) for (i = 0; i < ct->size; i++) {
entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
@@ -2453,7 +2453,7 @@ vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
break;
}
else if (entry->type == type) {
- cfp->pc = cfp->iseq->body->iseq_encoded + entry->cont;
+ cfp->pc = ISEQ_BODY(cfp->iseq)->iseq_encoded + entry->cont;
cfp->sp = vm_base_ptr(cfp) + entry->sp;
if (state != TAG_REDO) {
@@ -2471,7 +2471,7 @@ vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
}
}
else {
- ct = cfp->iseq->body->catch_table;
+ ct = ISEQ_BODY(cfp->iseq)->catch_table;
if (ct) for (i = 0; i < ct->size; i++) {
entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
if (entry->start < epc && entry->end >= epc) {
@@ -2492,7 +2492,7 @@ vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
rb_iseq_check(catch_iseq);
cfp->sp = vm_base_ptr(cfp) + cont_sp;
- cfp->pc = cfp->iseq->body->iseq_encoded + cont_pc;
+ cfp->pc = ISEQ_BODY(cfp->iseq)->iseq_encoded + cont_pc;
/* push block frame */
cfp->sp[0] = (VALUE)err;
@@ -2500,10 +2500,10 @@ vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
cfp->self,
VM_GUARDED_PREV_EP(cfp->ep),
0, /* cref or me */
- catch_iseq->body->iseq_encoded,
+ ISEQ_BODY(catch_iseq)->iseq_encoded,
cfp->sp + arg_size /* push value */,
- catch_iseq->body->local_table_size - arg_size,
- catch_iseq->body->stack_max);
+ ISEQ_BODY(catch_iseq)->local_table_size - arg_size,
+ ISEQ_BODY(catch_iseq)->stack_max);
state = 0;
ec->tag->state = TAG_NONE;
@@ -3881,7 +3881,7 @@ Init_VM(void)
rb_gc_register_mark_object((VALUE)iseq);
th->ec->cfp->iseq = iseq;
- th->ec->cfp->pc = iseq->body->iseq_encoded;
+ th->ec->cfp->pc = ISEQ_BODY(iseq)->iseq_encoded;
th->ec->cfp->self = th->top_self;
VM_ENV_FLAGS_UNSET(th->ec->cfp->ep, VM_FRAME_FLAG_CFRAME);
diff --git a/vm_args.c b/vm_args.c
index 58ec4a0826..7439579f43 100644
--- a/vm_args.c
+++ b/vm_args.c
@@ -328,10 +328,10 @@ args_setup_kw_parameters(rb_execution_context_t *const ec, const rb_iseq_t *cons
VALUE *const passed_values, const int passed_keyword_len, const VALUE *const passed_keywords,
VALUE *const locals)
{
- const ID *acceptable_keywords = iseq->body->param.keyword->table;
- const int req_key_num = iseq->body->param.keyword->required_num;
- const int key_num = iseq->body->param.keyword->num;
- const VALUE * const default_values = iseq->body->param.keyword->default_values;
+ const ID *acceptable_keywords = ISEQ_BODY(iseq)->param.keyword->table;
+ const int req_key_num = ISEQ_BODY(iseq)->param.keyword->required_num;
+ const int key_num = ISEQ_BODY(iseq)->param.keyword->num;
+ const VALUE * const default_values = ISEQ_BODY(iseq)->param.keyword->default_values;
VALUE missing = 0;
int i, di, found = 0;
int unspecified_bits = 0;
@@ -382,7 +382,7 @@ args_setup_kw_parameters(rb_execution_context_t *const ec, const rb_iseq_t *cons
}
}
- if (iseq->body->param.flags.has_kwrest) {
+ if (ISEQ_BODY(iseq)->param.flags.has_kwrest) {
const int rest_hash_index = key_num + 1;
locals[rest_hash_index] = make_rest_kw_hash(passed_keywords, passed_keyword_len, passed_values);
}
@@ -441,14 +441,14 @@ ignore_keyword_hash_p(VALUE keyword_hash, const rb_iseq_t * const iseq, unsigned
keyword_hash = rb_to_hash_type(keyword_hash);
}
if (!(*kw_flag & VM_CALL_KW_SPLAT_MUT) &&
- (iseq->body->param.flags.has_kwrest ||
- iseq->body->param.flags.ruby2_keywords)) {
+ (ISEQ_BODY(iseq)->param.flags.has_kwrest ||
+ ISEQ_BODY(iseq)->param.flags.ruby2_keywords)) {
*kw_flag |= VM_CALL_KW_SPLAT_MUT;
keyword_hash = rb_hash_dup(keyword_hash);
}
*converted_keyword_hash = keyword_hash;
- return !(iseq->body->param.flags.has_kw) &&
- !(iseq->body->param.flags.has_kwrest) &&
+ return !(ISEQ_BODY(iseq)->param.flags.has_kw) &&
+ !(ISEQ_BODY(iseq)->param.flags.has_kwrest) &&
RHASH_EMPTY_P(keyword_hash);
}
@@ -458,8 +458,8 @@ setup_parameters_complex(rb_execution_context_t * const ec, const rb_iseq_t * co
const struct rb_callinfo *ci,
VALUE * const locals, const enum arg_setup_type arg_setup_type)
{
- const int min_argc = iseq->body->param.lead_num + iseq->body->param.post_num;
- const int max_argc = (iseq->body->param.flags.has_rest == FALSE) ? min_argc + iseq->body->param.opt_num : UNLIMITED_ARGUMENTS;
+ const int min_argc = ISEQ_BODY(iseq)->param.lead_num + ISEQ_BODY(iseq)->param.post_num;
+ const int max_argc = (ISEQ_BODY(iseq)->param.flags.has_rest == FALSE) ? min_argc + ISEQ_BODY(iseq)->param.opt_num : UNLIMITED_ARGUMENTS;
int given_argc;
unsigned int kw_flag = vm_ci_flag(ci) & (VM_CALL_KWARG | VM_CALL_KW_SPLAT | VM_CALL_KW_SPLAT_MUT);
int opt_pc = 0, allow_autosplat = !kw_flag;
@@ -476,16 +476,16 @@ setup_parameters_complex(rb_execution_context_t * const ec, const rb_iseq_t * co
*
* [pushed values] [uninitialized values]
* <- ci->argc -->
- * <- iseq->body->param.size------------>
+ * <- ISEQ_BODY(iseq)->param.size------------>
* ^ locals ^ sp
*
* =>
* [pushed values] [initialized values ]
* <- ci->argc -->
- * <- iseq->body->param.size------------>
+ * <- ISEQ_BODY(iseq)->param.size------------>
* ^ locals ^ sp
*/
- for (i=calling->argc; i<iseq->body->param.size; i++) {
+ for (i=calling->argc; i<ISEQ_BODY(iseq)->param.size; i++) {
locals[i] = Qnil;
}
ec->cfp->sp = &locals[i];
@@ -499,7 +499,7 @@ setup_parameters_complex(rb_execution_context_t * const ec, const rb_iseq_t * co
if (kw_flag & VM_CALL_KWARG) {
args->kw_arg = vm_ci_kwarg(ci);
- if (iseq->body->param.flags.has_kw) {
+ if (ISEQ_BODY(iseq)->param.flags.has_kw) {
int kw_len = args->kw_arg->keyword_len;
/* copy kw_argv */
args->kw_argv = ALLOCA_N(VALUE, kw_len);
@@ -552,10 +552,10 @@ setup_parameters_complex(rb_execution_context_t * const ec, const rb_iseq_t * co
RARRAY_ASET(args->rest, len - 1, rest_last);
}
- if (iseq->body->param.flags.ruby2_keywords && rest_last) {
+ if (ISEQ_BODY(iseq)->param.flags.ruby2_keywords && rest_last) {
flag_keyword_hash = rest_last;
}
- else if (iseq->body->param.flags.has_kw || iseq->body->param.flags.has_kwrest) {
+ else if (ISEQ_BODY(iseq)->param.flags.has_kw || ISEQ_BODY(iseq)->param.flags.has_kwrest) {
arg_rest_dup(args);
rb_ary_pop(args->rest);
given_argc--;
@@ -578,10 +578,10 @@ setup_parameters_complex(rb_execution_context_t * const ec, const rb_iseq_t * co
args->argv[args->argc-1] = last_arg;
}
- if (iseq->body->param.flags.ruby2_keywords) {
+ if (ISEQ_BODY(iseq)->param.flags.ruby2_keywords) {
flag_keyword_hash = last_arg;
}
- else if (iseq->body->param.flags.has_kw || iseq->body->param.flags.has_kwrest) {
+ else if (ISEQ_BODY(iseq)->param.flags.has_kw || ISEQ_BODY(iseq)->param.flags.has_kwrest) {
args->argc--;
given_argc--;
keyword_hash = last_arg;
@@ -595,7 +595,7 @@ setup_parameters_complex(rb_execution_context_t * const ec, const rb_iseq_t * co
((struct RHash *)flag_keyword_hash)->basic.flags |= RHASH_PASS_AS_KEYWORDS;
}
- if (kw_flag && iseq->body->param.flags.accepts_no_kwarg) {
+ if (kw_flag && ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg) {
rb_raise(rb_eArgError, "no keywords accepted");
}
@@ -606,8 +606,8 @@ setup_parameters_complex(rb_execution_context_t * const ec, const rb_iseq_t * co
case arg_setup_block:
if (given_argc == (NIL_P(keyword_hash) ? 1 : 2) &&
allow_autosplat &&
- (min_argc > 0 || iseq->body->param.opt_num > 1) &&
- !iseq->body->param.flags.ambiguous_param0 &&
+ (min_argc > 0 || ISEQ_BODY(iseq)->param.opt_num > 1) &&
+ !ISEQ_BODY(iseq)->param.flags.ambiguous_param0 &&
args_check_block_arg0(args)) {
given_argc = RARRAY_LENINT(args->rest);
}
@@ -637,29 +637,29 @@ setup_parameters_complex(rb_execution_context_t * const ec, const rb_iseq_t * co
}
}
- if (iseq->body->param.flags.has_lead) {
- args_setup_lead_parameters(args, iseq->body->param.lead_num, locals + 0);
+ if (ISEQ_BODY(iseq)->param.flags.has_lead) {
+ args_setup_lead_parameters(args, ISEQ_BODY(iseq)->param.lead_num, locals + 0);
}
- if (iseq->body->param.flags.has_rest || iseq->body->param.flags.has_post){
+ if (ISEQ_BODY(iseq)->param.flags.has_rest || ISEQ_BODY(iseq)->param.flags.has_post){
args_copy(args);
}
- if (iseq->body->param.flags.has_post) {
- args_setup_post_parameters(args, iseq->body->param.post_num, locals + iseq->body->param.post_start);
+ if (ISEQ_BODY(iseq)->param.flags.has_post) {
+ args_setup_post_parameters(args, ISEQ_BODY(iseq)->param.post_num, locals + ISEQ_BODY(iseq)->param.post_start);
}
- if (iseq->body->param.flags.has_opt) {
- int opt = args_setup_opt_parameters(args, iseq->body->param.opt_num, locals + iseq->body->param.lead_num);
- opt_pc = (int)iseq->body->param.opt_table[opt];
+ if (ISEQ_BODY(iseq)->param.flags.has_opt) {
+ int opt = args_setup_opt_parameters(args, ISEQ_BODY(iseq)->param.opt_num, locals + ISEQ_BODY(iseq)->param.lead_num);
+ opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
}
- if (iseq->body->param.flags.has_rest) {
- args_setup_rest_parameter(args, locals + iseq->body->param.rest_start);
+ if (ISEQ_BODY(iseq)->param.flags.has_rest) {
+ args_setup_rest_parameter(args, locals + ISEQ_BODY(iseq)->param.rest_start);
}
- if (iseq->body->param.flags.has_kw) {
- VALUE * const klocals = locals + iseq->body->param.keyword->bits_start - iseq->body->param.keyword->num;
+ if (ISEQ_BODY(iseq)->param.flags.has_kw) {
+ VALUE * const klocals = locals + ISEQ_BODY(iseq)->param.keyword->bits_start - ISEQ_BODY(iseq)->param.keyword->num;
if (args->kw_argv != NULL) {
const struct rb_callinfo_kwarg *kw_arg = args->kw_arg;
@@ -681,26 +681,26 @@ setup_parameters_complex(rb_execution_context_t * const ec, const rb_iseq_t * co
args_setup_kw_parameters(ec, iseq, NULL, 0, NULL, klocals);
}
}
- else if (iseq->body->param.flags.has_kwrest) {
- args_setup_kw_rest_parameter(keyword_hash, locals + iseq->body->param.keyword->rest_start, kw_flag);
+ else if (ISEQ_BODY(iseq)->param.flags.has_kwrest) {
+ args_setup_kw_rest_parameter(keyword_hash, locals + ISEQ_BODY(iseq)->param.keyword->rest_start, kw_flag);
}
else if (!NIL_P(keyword_hash) && RHASH_SIZE(keyword_hash) > 0 && arg_setup_type == arg_setup_method) {
argument_kw_error(ec, iseq, "unknown", rb_hash_keys(keyword_hash));
}
- if (iseq->body->param.flags.has_block) {
- if (iseq->body->local_iseq == iseq) {
+ if (ISEQ_BODY(iseq)->param.flags.has_block) {
+ if (ISEQ_BODY(iseq)->local_iseq == iseq) {
/* Do nothing */
}
else {
- args_setup_block_parameter(ec, calling, locals + iseq->body->param.block_start);
+ args_setup_block_parameter(ec, calling, locals + ISEQ_BODY(iseq)->param.block_start);
}
}
#if 0
{
int i;
- for (i=0; i<iseq->body->param.size; i++) {
+ for (i=0; i<ISEQ_BODY(iseq)->param.size; i++) {
ruby_debug_printf("local[%d] = %p\n", i, (void *)locals[i]);
}
}
@@ -718,7 +718,7 @@ raise_argument_error(rb_execution_context_t *ec, const rb_iseq_t *iseq, const VA
if (iseq) {
vm_push_frame(ec, iseq, VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL, Qnil /* self */,
VM_BLOCK_HANDLER_NONE /* specval*/, Qfalse /* me or cref */,
- iseq->body->iseq_encoded,
+ ISEQ_BODY(iseq)->iseq_encoded,
ec->cfp->sp, 0, 0 /* stack_max */);
at = rb_ec_backtrace_object(ec);
rb_backtrace_use_iseq_first_lineno_for_last_location(at);
@@ -737,8 +737,8 @@ static void
argument_arity_error(rb_execution_context_t *ec, const rb_iseq_t *iseq, const int miss_argc, const int min_argc, const int max_argc)
{
VALUE exc = rb_arity_error_new(miss_argc, min_argc, max_argc);
- if (iseq->body->param.flags.has_kw) {
- const struct rb_iseq_param_keyword *const kw = iseq->body->param.keyword;
+ if (ISEQ_BODY(iseq)->param.flags.has_kw) {
+ const struct rb_iseq_param_keyword *const kw = ISEQ_BODY(iseq)->param.keyword;
const ID *keywords = kw->table;
int req_key_num = kw->required_num;
if (req_key_num > 0) {
diff --git a/vm_backtrace.c b/vm_backtrace.c
index 8cb4881e15..be48326f3d 100644
--- a/vm_backtrace.c
+++ b/vm_backtrace.c
@@ -37,24 +37,24 @@ inline static int
calc_pos(const rb_iseq_t *iseq, const VALUE *pc, int *lineno, int *node_id)
{
VM_ASSERT(iseq);
- VM_ASSERT(iseq->body);
- VM_ASSERT(iseq->body->iseq_encoded);
- VM_ASSERT(iseq->body->iseq_size);
+ VM_ASSERT(ISEQ_BODY(iseq));
+ VM_ASSERT(ISEQ_BODY(iseq)->iseq_encoded);
+ VM_ASSERT(ISEQ_BODY(iseq)->iseq_size);
if (! pc) {
- if (iseq->body->type == ISEQ_TYPE_TOP) {
- VM_ASSERT(! iseq->body->local_table);
- VM_ASSERT(! iseq->body->local_table_size);
+ if (ISEQ_BODY(iseq)->type == ISEQ_TYPE_TOP) {
+ VM_ASSERT(! ISEQ_BODY(iseq)->local_table);
+ VM_ASSERT(! ISEQ_BODY(iseq)->local_table_size);
return 0;
}
- if (lineno) *lineno = FIX2INT(iseq->body->location.first_lineno);
+ if (lineno) *lineno = FIX2INT(ISEQ_BODY(iseq)->location.first_lineno);
#ifdef USE_ISEQ_NODE_ID
if (node_id) *node_id = -1;
#endif
return 1;
}
else {
- ptrdiff_t n = pc - iseq->body->iseq_encoded;
- VM_ASSERT(n <= iseq->body->iseq_size);
+ ptrdiff_t n = pc - ISEQ_BODY(iseq)->iseq_encoded;
+ VM_ASSERT(n <= ISEQ_BODY(iseq)->iseq_size);
VM_ASSERT(n >= 0);
ASSUME(n >= 0);
size_t pos = n; /* no overflow */
@@ -216,7 +216,7 @@ location_label(rb_backtrace_location_t *loc)
{
switch (loc->type) {
case LOCATION_TYPE_ISEQ:
- return loc->iseq->body->location.label;
+ return ISEQ_BODY(loc->iseq)->location.label;
case LOCATION_TYPE_CFUNC:
return rb_id2str(loc->mid);
default:
@@ -263,7 +263,7 @@ location_base_label(rb_backtrace_location_t *loc)
{
switch (loc->type) {
case LOCATION_TYPE_ISEQ:
- return loc->iseq->body->location.base_label;
+ return ISEQ_BODY(loc->iseq)->location.base_label;
case LOCATION_TYPE_CFUNC:
return rb_id2str(loc->mid);
default:
@@ -407,7 +407,7 @@ location_to_str(rb_backtrace_location_t *loc)
switch (loc->type) {
case LOCATION_TYPE_ISEQ:
file = rb_iseq_path(loc->iseq);
- name = loc->iseq->body->location.label;
+ name = ISEQ_BODY(loc->iseq)->location.label;
lineno = calc_lineno(loc->iseq, loc->pc);
break;
@@ -950,7 +950,7 @@ oldbt_iter_iseq(void *ptr, const rb_control_frame_t *cfp)
const VALUE *pc = cfp->pc;
struct oldbt_arg *arg = (struct oldbt_arg *)ptr;
VALUE file = arg->filename = rb_iseq_path(iseq);
- VALUE name = iseq->body->location.label;
+ VALUE name = ISEQ_BODY(iseq)->location.label;
int lineno = arg->lineno = calc_lineno(iseq, pc);
(arg->func)(arg->data, file, lineno, name);
diff --git a/vm_core.h b/vm_core.h
index 12f563df17..d985bd40ba 100644
--- a/vm_core.h
+++ b/vm_core.h
@@ -522,6 +522,8 @@ struct rb_iseq_struct {
} aux;
};
+#define ISEQ_BODY(iseq) ((iseq)->body)
+
#ifndef USE_LAZY_LOAD
#define USE_LAZY_LOAD 0
#endif
@@ -534,7 +536,7 @@ static inline const rb_iseq_t *
rb_iseq_check(const rb_iseq_t *iseq)
{
#if USE_LAZY_LOAD
- if (iseq->body == NULL) {
+ if (ISEQ_BODY(iseq) == NULL) {
rb_iseq_complete((rb_iseq_t *)iseq);
}
#endif
diff --git a/vm_dump.c b/vm_dump.c
index a8bfbe97f2..48f37808a5 100644
--- a/vm_dump.c
+++ b/vm_dump.c
@@ -118,8 +118,8 @@ control_frame_dump(const rb_execution_context_t *ec, const rb_control_frame_t *c
}
else {
iseq = cfp->iseq;
- pc = cfp->pc - iseq->body->iseq_encoded;
- iseq_name = RSTRING_PTR(iseq->body->location.label);
+ pc = cfp->pc - ISEQ_BODY(iseq)->iseq_encoded;
+ iseq_name = RSTRING_PTR(ISEQ_BODY(iseq)->location.label);
line = rb_vm_get_sourceline(cfp);
if (line) {
snprintf(posbuf, MAX_POSBUF, "%s:%d", RSTRING_PTR(rb_iseq_path(iseq)), line);
@@ -178,12 +178,12 @@ control_frame_dump(const rb_execution_context_t *ec, const rb_control_frame_t *c
fprintf(stderr, " self: %s\n", rb_raw_obj_info(buff, 0x100, cfp->self));
if (iseq) {
- if (iseq->body->local_table_size > 0) {
+ if (ISEQ_BODY(iseq)->local_table_size > 0) {
fprintf(stderr, " lvars:\n");
- for (unsigned int i=0; i<iseq->body->local_table_size; i++) {
- const VALUE *argv = cfp->ep - cfp->iseq->body->local_table_size - VM_ENV_DATA_SIZE + 1;
+ for (unsigned int i=0; i<ISEQ_BODY(iseq)->local_table_size; i++) {
+ const VALUE *argv = cfp->ep - ISEQ_BODY(cfp->iseq)->local_table_size - VM_ENV_DATA_SIZE + 1;
fprintf(stderr, " %s: %s\n",
- rb_id2name(iseq->body->local_table[i]),
+ rb_id2name(ISEQ_BODY(iseq)->local_table[i]),
rb_raw_obj_info(buff, 0x100, argv[i]));
}
}
@@ -278,9 +278,9 @@ static const VALUE *
vm_base_ptr(const rb_control_frame_t *cfp)
{
const rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
- const VALUE *bp = prev_cfp->sp + cfp->iseq->body->local_table_size + VM_ENV_DATA_SIZE;
+ const VALUE *bp = prev_cfp->sp + ISEQ_BODY(cfp->iseq)->local_table_size + VM_ENV_DATA_SIZE;
- if (cfp->iseq->body->type == ISEQ_TYPE_METHOD) {
+ if (ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_METHOD) {
bp += 1;
}
return bp;
@@ -296,8 +296,8 @@ vm_stack_dump_each(const rb_execution_context_t *ec, const rb_control_frame_t *c
if (VM_FRAME_RUBYFRAME_P(cfp)) {
const rb_iseq_t *iseq = cfp->iseq;
- argc = iseq->body->param.lead_num;
- local_table_size = iseq->body->local_table_size;
+ argc = ISEQ_BODY(iseq)->param.lead_num;
+ local_table_size = ISEQ_BODY(iseq)->local_table_size;
}
/* stack trace header */
@@ -366,7 +366,7 @@ rb_vmdebug_debug_print_register(const rb_execution_context_t *ec)
ptrdiff_t cfpi;
if (VM_FRAME_RUBYFRAME_P(cfp)) {
- pc = cfp->pc - cfp->iseq->body->iseq_encoded;
+ pc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded;
}
if (ep < 0 || (size_t)ep > ec->vm_stack_size) {
@@ -390,7 +390,7 @@ rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_fr
const rb_iseq_t *iseq = cfp->iseq;
if (iseq != 0) {
- ptrdiff_t pc = _pc - iseq->body->iseq_encoded;
+ ptrdiff_t pc = _pc - ISEQ_BODY(iseq)->iseq_encoded;
int i;
for (i=0; i<(int)VM_CFP_CNT(ec, cfp); i++) {
diff --git a/vm_eval.c b/vm_eval.c
index 0abb4644f9..5313267b44 100644
--- a/vm_eval.c
+++ b/vm_eval.c
@@ -1709,7 +1709,7 @@ eval_make_iseq(VALUE src, VALUE fname, int line, const rb_binding_t *bind,
ast = rb_parser_compile_string_path(parser, fname, src, line);
if (ast->body.root) {
iseq = rb_iseq_new_eval(&ast->body,
- parent->body->location.label,
+ ISEQ_BODY(parent)->location.label,
fname, Qnil, INT2FIX(line),
parent, isolated_depth);
}
@@ -1772,7 +1772,7 @@ eval_string_with_scope(VALUE scope, VALUE src, VALUE file, int line)
vm_set_eval_stack(ec, iseq, NULL, &bind->block);
/* save new env */
- if (iseq->body->local_table_size > 0) {
+ if (ISEQ_BODY(iseq)->local_table_size > 0) {
vm_bind_update_env(scope, bind, vm_make_env_object(ec, ec->cfp));
}
@@ -2466,8 +2466,8 @@ rb_f_local_variables(VALUE _)
local_var_list_init(&vars);
while (cfp) {
if (cfp->iseq) {
- for (i = 0; i < cfp->iseq->body->local_table_size; i++) {
- local_var_list_add(&vars, cfp->iseq->body->local_table[i]);
+ for (i = 0; i < ISEQ_BODY(cfp->iseq)->local_table_size; i++) {
+ local_var_list_add(&vars, ISEQ_BODY(cfp->iseq)->local_table[i]);
}
}
if (!VM_ENV_LOCAL_P(cfp->ep)) {
diff --git a/vm_exec.h b/vm_exec.h
index 89c925cbb4..edea61d4f8 100644
--- a/vm_exec.h
+++ b/vm_exec.h
@@ -76,10 +76,10 @@ error !
#define INSN_ENTRY_SIG(insn) \
if (0) { \
ruby_debug_printf("exec: %s@(%"PRIdPTRDIFF", %"PRIdPTRDIFF")@%s:%u\n", #insn, \
- (reg_pc - reg_cfp->iseq->body->iseq_encoded), \
- (reg_cfp->pc - reg_cfp->iseq->body->iseq_encoded), \
+ (reg_pc - ISEQ_BODY(reg_cfp->iseq)->iseq_encoded), \
+ (reg_cfp->pc - ISEQ_BODY(reg_cfp->iseq)->iseq_encoded), \
RSTRING_PTR(rb_iseq_path(reg_cfp->iseq)), \
- rb_iseq_line_no(reg_cfp->iseq, reg_pc - reg_cfp->iseq->body->iseq_encoded)); \
+ rb_iseq_line_no(reg_cfp->iseq, reg_pc - ISEQ_BODY(reg_cfp->iseq)->iseq_encoded)); \
} \
if (USE_INSNS_COUNTER) vm_insns_counter_count_insn(BIN(insn));
diff --git a/vm_insnhelper.c b/vm_insnhelper.c
index f6d9c2c634..cbc53b5455 100644
--- a/vm_insnhelper.c
+++ b/vm_insnhelper.c
@@ -264,7 +264,7 @@ rb_vm_check_canary(const rb_execution_context_t *ec, VALUE *sp)
}
const VALUE *orig = rb_iseq_original_iseq(iseq);
- const VALUE *encoded = iseq->body->iseq_encoded;
+ const VALUE *encoded = ISEQ_BODY(iseq)->iseq_encoded;
const ptrdiff_t pos = GET_PC() - encoded;
const enum ruby_vminsn_type insn = (enum ruby_vminsn_type)orig[pos];
const char *name = insn_name(insn);
@@ -1441,15 +1441,15 @@ vm_throw_start(const rb_execution_context_t *ec, rb_control_frame_t *const reg_c
const rb_iseq_t *base_iseq = GET_ISEQ();
escape_cfp = reg_cfp;
- while (base_iseq->body->type != ISEQ_TYPE_BLOCK) {
- if (escape_cfp->iseq->body->type == ISEQ_TYPE_CLASS) {
+ while (ISEQ_BODY(base_iseq)->type != ISEQ_TYPE_BLOCK) {
+ if (ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_CLASS) {
escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
ep = escape_cfp->ep;
base_iseq = escape_cfp->iseq;
}
else {
ep = VM_ENV_PREV_EP(ep);
- base_iseq = base_iseq->body->parent_iseq;
+ base_iseq = ISEQ_BODY(base_iseq)->parent_iseq;
escape_cfp = rb_vm_search_cf_from_ep(ec, escape_cfp, ep);
VM_ASSERT(escape_cfp->iseq == base_iseq);
}
@@ -1466,8 +1466,8 @@ vm_throw_start(const rb_execution_context_t *ec, rb_control_frame_t *const reg_c
while (escape_cfp < eocfp) {
if (escape_cfp->ep == ep) {
const rb_iseq_t *const iseq = escape_cfp->iseq;
- const VALUE epc = escape_cfp->pc - iseq->body->iseq_encoded;
- const struct iseq_catch_table *const ct = iseq->body->catch_table;
+ const VALUE epc = escape_cfp->pc - ISEQ_BODY(iseq)->iseq_encoded;
+ const struct iseq_catch_table *const ct = ISEQ_BODY(iseq)->catch_table;
unsigned int i;
if (!ct) break;
@@ -1525,7 +1525,7 @@ vm_throw_start(const rb_execution_context_t *ec, rb_control_frame_t *const reg_c
if (lep == target_lep &&
VM_FRAME_RUBYFRAME_P(escape_cfp) &&
- escape_cfp->iseq->body->type == ISEQ_TYPE_CLASS) {
+ ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_CLASS) {
in_class_frame = 1;
target_lep = 0;
}
@@ -1555,7 +1555,7 @@ vm_throw_start(const rb_execution_context_t *ec, rb_control_frame_t *const reg_c
}
}
else if (VM_FRAME_RUBYFRAME_P(escape_cfp)) {
- switch (escape_cfp->iseq->body->type) {
+ switch (ISEQ_BODY(escape_cfp->iseq)->type) {
case ISEQ_TYPE_TOP:
case ISEQ_TYPE_MAIN:
if (toplevel) {
@@ -1578,7 +1578,7 @@ vm_throw_start(const rb_execution_context_t *ec, rb_control_frame_t *const reg_c
}
}
- if (escape_cfp->ep == target_lep && escape_cfp->iseq->body->type == ISEQ_TYPE_METHOD) {
+ if (escape_cfp->ep == target_lep && ISEQ_BODY(escape_cfp->iseq)->type == ISEQ_TYPE_METHOD) {
if (target_ep == NULL) {
goto valid_return;
}
@@ -2205,8 +2205,8 @@ vm_base_ptr(const rb_control_frame_t *cfp)
const rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
if (cfp->iseq && VM_FRAME_RUBYFRAME_P(cfp)) {
- VALUE *bp = prev_cfp->sp + cfp->iseq->body->local_table_size + VM_ENV_DATA_SIZE;
- if (cfp->iseq->body->type == ISEQ_TYPE_METHOD) {
+ VALUE *bp = prev_cfp->sp + ISEQ_BODY(cfp->iseq)->local_table_size + VM_ENV_DATA_SIZE;
+ if (ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_METHOD) {
/* adjust `self' */
bp += 1;
}
@@ -2257,44 +2257,44 @@ vm_call_iseq_setup_normal_0start(rb_execution_context_t *ec, rb_control_frame_t
const struct rb_callcache *cc = calling->cc;
const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
- int param = iseq->body->param.size;
- int local = iseq->body->local_table_size;
+ int param = ISEQ_BODY(iseq)->param.size;
+ int local = ISEQ_BODY(iseq)->local_table_size;
return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
}
MJIT_STATIC bool
rb_simple_iseq_p(const rb_iseq_t *iseq)
{
- return iseq->body->param.flags.has_opt == FALSE &&
- iseq->body->param.flags.has_rest == FALSE &&
- iseq->body->param.flags.has_post == FALSE &&
- iseq->body->param.flags.has_kw == FALSE &&
- iseq->body->param.flags.has_kwrest == FALSE &&
- iseq->body->param.flags.accepts_no_kwarg == FALSE &&
- iseq->body->param.flags.has_block == FALSE;
+ return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
+ ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
+ ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
+ ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
+ ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
+ ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
+ ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
}
MJIT_FUNC_EXPORTED bool
rb_iseq_only_optparam_p(const rb_iseq_t *iseq)
{
- return iseq->body->param.flags.has_opt == TRUE &&
- iseq->body->param.flags.has_rest == FALSE &&
- iseq->body->param.flags.has_post == FALSE &&
- iseq->body->param.flags.has_kw == FALSE &&
- iseq->body->param.flags.has_kwrest == FALSE &&
- iseq->body->param.flags.accepts_no_kwarg == FALSE &&
- iseq->body->param.flags.has_block == FALSE;
+ return ISEQ_BODY(iseq)->param.flags.has_opt == TRUE &&
+ ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
+ ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
+ ISEQ_BODY(iseq)->param.flags.has_kw == FALSE &&
+ ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
+ ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg == FALSE &&
+ ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
}
MJIT_FUNC_EXPORTED bool
rb_iseq_only_kwparam_p(const rb_iseq_t *iseq)
{
- return iseq->body->param.flags.has_opt == FALSE &&
- iseq->body->param.flags.has_rest == FALSE &&
- iseq->body->param.flags.has_post == FALSE &&
- iseq->body->param.flags.has_kw == TRUE &&
- iseq->body->param.flags.has_kwrest == FALSE &&
- iseq->body->param.flags.has_block == FALSE;
+ return ISEQ_BODY(iseq)->param.flags.has_opt == FALSE &&
+ ISEQ_BODY(iseq)->param.flags.has_rest == FALSE &&
+ ISEQ_BODY(iseq)->param.flags.has_post == FALSE &&
+ ISEQ_BODY(iseq)->param.flags.has_kw == TRUE &&
+ ISEQ_BODY(iseq)->param.flags.has_kwrest == FALSE &&
+ ISEQ_BODY(iseq)->param.flags.has_block == FALSE;
}
// If true, cc->call needs to include `CALLER_SETUP_ARG` (i.e. can't be skipped in fastpath)
@@ -2387,12 +2387,12 @@ vm_call_iseq_setup_normal_opt_start(rb_execution_context_t *ec, rb_control_frame
{
const struct rb_callcache *cc = calling->cc;
const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
- const int lead_num = iseq->body->param.lead_num;
+ const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
const int opt = calling->argc - lead_num;
- const int opt_num = iseq->body->param.opt_num;
- const int opt_pc = (int)iseq->body->param.opt_table[opt];
- const int param = iseq->body->param.size;
- const int local = iseq->body->local_table_size;
+ const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
+ const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
+ const int param = ISEQ_BODY(iseq)->param.size;
+ const int local = ISEQ_BODY(iseq)->local_table_size;
const int delta = opt_num - opt;
RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
@@ -2415,9 +2415,9 @@ vm_call_iseq_setup_tailcall_opt_start(rb_execution_context_t *ec, rb_control_fra
{
const struct rb_callcache *cc = calling->cc;
const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
- const int lead_num = iseq->body->param.lead_num;
+ const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
const int opt = calling->argc - lead_num;
- const int opt_pc = (int)iseq->body->param.opt_table[opt];
+ const int opt_pc = (int)ISEQ_BODY(iseq)->param.opt_table[opt];
RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
@@ -2449,19 +2449,19 @@ vm_call_iseq_setup_kwparm_kwarg(rb_execution_context_t *ec, rb_control_frame_t *
RB_DEBUG_COUNTER_INC(ccf_iseq_kw1);
const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
- const struct rb_iseq_param_keyword *kw_param = iseq->body->param.keyword;
+ const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
const int ci_kw_len = kw_arg->keyword_len;
const VALUE * const ci_keywords = kw_arg->keywords;
VALUE *argv = cfp->sp - calling->argc;
VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
- const int lead_num = iseq->body->param.lead_num;
+ const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
args_setup_kw_parameters(ec, iseq, ci_kws, ci_kw_len, ci_keywords, klocals);
- int param = iseq->body->param.size;
- int local = iseq->body->local_table_size;
+ int param = ISEQ_BODY(iseq)->param.size;
+ int local = ISEQ_BODY(iseq)->local_table_size;
return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
}
@@ -2476,7 +2476,7 @@ vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t
RB_DEBUG_COUNTER_INC(ccf_iseq_kw2);
const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
- const struct rb_iseq_param_keyword *kw_param = iseq->body->param.keyword;
+ const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
VALUE * const argv = cfp->sp - calling->argc;
VALUE * const klocals = argv + kw_param->bits_start - kw_param->num;
@@ -2489,8 +2489,8 @@ vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t
// nobody check this value, but it should be cleared because it can
// points invalid VALUE (T_NONE objects, raw pointer and so on).
- int param = iseq->body->param.size;
- int local = iseq->body->local_table_size;
+ int param = ISEQ_BODY(iseq)->param.size;
+ int local = ISEQ_BODY(iseq)->local_table_size;
return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
}
@@ -2508,8 +2508,8 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
CALLER_SETUP_ARG(cfp, calling, ci);
CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
- if (calling->argc != iseq->body->param.lead_num) {
- argument_arity_error(ec, iseq, calling->argc, iseq->body->param.lead_num, iseq->body->param.lead_num);
+ if (calling->argc != ISEQ_BODY(iseq)->param.lead_num) {
+ argument_arity_error(ec, iseq, calling->argc, ISEQ_BODY(iseq)->param.lead_num, ISEQ_BODY(iseq)->param.lead_num);
}
VM_ASSERT(ci == calling->ci);
@@ -2522,8 +2522,8 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
CALLER_SETUP_ARG(cfp, calling, ci);
CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
- const int lead_num = iseq->body->param.lead_num;
- const int opt_num = iseq->body->param.opt_num;
+ const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
+ const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
const int argc = calling->argc;
const int opt = argc - lead_num;
@@ -2543,16 +2543,16 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
}
/* initialize opt vars for self-references */
- VM_ASSERT((int)iseq->body->param.size == lead_num + opt_num);
+ VM_ASSERT((int)ISEQ_BODY(iseq)->param.size == lead_num + opt_num);
for (int i=argc; i<lead_num + opt_num; i++) {
argv[i] = Qnil;
}
- return (int)iseq->body->param.opt_table[opt];
+ return (int)ISEQ_BODY(iseq)->param.opt_table[opt];
}
else if (rb_iseq_only_kwparam_p(iseq) && !IS_ARGS_SPLAT(ci)) {
- const int lead_num = iseq->body->param.lead_num;
+ const int lead_num = ISEQ_BODY(iseq)->param.lead_num;
const int argc = calling->argc;
- const struct rb_iseq_param_keyword *kw_param = iseq->body->param.keyword;
+ const struct rb_iseq_param_keyword *kw_param = ISEQ_BODY(iseq)->param.keyword;
if (vm_ci_flag(ci) & VM_CALL_KWARG) {
const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
@@ -2598,8 +2598,8 @@ vm_call_iseq_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct r
const struct rb_callcache *cc = calling->cc;
const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
- const int param_size = iseq->body->param.size;
- const int local_size = iseq->body->local_table_size;
+ const int param_size = ISEQ_BODY(iseq)->param.size;
+ const int local_size = ISEQ_BODY(iseq)->local_table_size;
const int opt_pc = vm_callee_setup_arg(ec, calling, def_iseq_ptr(vm_cc_cme(cc)->def), cfp->sp - calling->argc, param_size, local_size);
return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
}
@@ -2630,9 +2630,9 @@ vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, s
vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL, calling->recv,
calling->block_handler, (VALUE)me,
- iseq->body->iseq_encoded + opt_pc, sp,
+ ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
local_size - param_size,
- iseq->body->stack_max);
+ ISEQ_BODY(iseq)->stack_max);
return Qundef;
}
@@ -2670,15 +2670,15 @@ vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp,
sp++;
/* copy arguments */
- for (i=0; i < iseq->body->param.size; i++) {
+ for (i=0; i < ISEQ_BODY(iseq)->param.size; i++) {
*sp++ = src_argv[i];
}
vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL | finish_flag,
- calling->recv, calling->block_handler, (VALUE)me,
- iseq->body->iseq_encoded + opt_pc, sp,
- iseq->body->local_table_size - iseq->body->param.size,
- iseq->body->stack_max);
+ calling->recv, calling->block_handler, (VALUE)me,
+ ISEQ_BODY(iseq)->iseq_encoded + opt_pc, sp,
+ ISEQ_BODY(iseq)->local_table_size - ISEQ_BODY(iseq)->param.size,
+ ISEQ_BODY(iseq)->stack_max);
cfp->sp = sp_orig;
@@ -3376,8 +3376,8 @@ current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
{
rb_control_frame_t *top_cfp = cfp;
- if (cfp->iseq && cfp->iseq->body->type == ISEQ_TYPE_BLOCK) {
- const rb_iseq_t *local_iseq = cfp->iseq->body->local_iseq;
+ if (cfp->iseq && ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_BLOCK) {
+ const rb_iseq_t *local_iseq = ISEQ_BODY(cfp->iseq)->local_iseq;
do {
cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
@@ -3987,9 +3987,9 @@ vm_callee_setup_block_arg_arg0_splat(rb_control_frame_t *cfp, const rb_iseq_t *i
int i;
long len = RARRAY_LEN(ary);
- CHECK_VM_STACK_OVERFLOW(cfp, iseq->body->param.lead_num);
+ CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
- for (i=0; i<len && i<iseq->body->param.lead_num; i++) {
+ for (i=0; i<len && i<ISEQ_BODY(iseq)->param.lead_num; i++) {
argv[i] = RARRAY_AREF(ary, i);
}
@@ -4021,26 +4021,26 @@ vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *ca
if (arg_setup_type == arg_setup_block &&
calling->argc == 1 &&
- iseq->body->param.flags.has_lead &&
- !iseq->body->param.flags.ambiguous_param0 &&
+ ISEQ_BODY(iseq)->param.flags.has_lead &&
+ !ISEQ_BODY(iseq)->param.flags.ambiguous_param0 &&
!NIL_P(arg0 = vm_callee_setup_block_arg_arg0_check(argv))) {
calling->argc = vm_callee_setup_block_arg_arg0_splat(cfp, iseq, argv, arg0);
}
- if (calling->argc != iseq->body->param.lead_num) {
+ if (calling->argc != ISEQ_BODY(iseq)->param.lead_num) {
if (arg_setup_type == arg_setup_block) {
- if (calling->argc < iseq->body->param.lead_num) {
+ if (calling->argc < ISEQ_BODY(iseq)->param.lead_num) {
int i;
- CHECK_VM_STACK_OVERFLOW(cfp, iseq->body->param.lead_num);
- for (i=calling->argc; i<iseq->body->param.lead_num; i++) argv[i] = Qnil;
- calling->argc = iseq->body->param.lead_num; /* fill rest parameters */
+ CHECK_VM_STACK_OVERFLOW(cfp, ISEQ_BODY(iseq)->param.lead_num);
+ for (i=calling->argc; i<ISEQ_BODY(iseq)->param.lead_num; i++) argv[i] = Qnil;
+ calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* fill rest parameters */
}
- else if (calling->argc > iseq->body->param.lead_num) {
- calling->argc = iseq->body->param.lead_num; /* simply truncate arguments */
+ else if (calling->argc > ISEQ_BODY(iseq)->param.lead_num) {
+ calling->argc = ISEQ_BODY(iseq)->param.lead_num; /* simply truncate arguments */
}
}
else {
- argument_arity_error(ec, iseq, calling->argc, iseq->body->param.lead_num, iseq->body->param.lead_num);
+ argument_arity_error(ec, iseq, calling->argc, ISEQ_BODY(iseq)->param.lead_num, ISEQ_BODY(iseq)->param.lead_num);
}
}
@@ -4075,7 +4075,7 @@ vm_invoke_iseq_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
{
const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
- const int arg_size = iseq->body->param.size;
+ const int arg_size = ISEQ_BODY(iseq)->param.size;
VALUE * const rsp = GET_SP() - calling->argc;
int opt_pc = vm_callee_setup_block_arg(ec, calling, ci, iseq, rsp, is_lambda ? arg_setup_method : arg_setup_block);
@@ -4085,9 +4085,9 @@ vm_invoke_iseq_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0),
captured->self,
VM_GUARDED_PREV_EP(captured->ep), 0,
- iseq->body->iseq_encoded + opt_pc,
+ ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
rsp + arg_size,
- iseq->body->local_table_size - arg_size, iseq->body->stack_max);
+ ISEQ_BODY(iseq)->local_table_size - arg_size, ISEQ_BODY(iseq)->stack_max);
return Qundef;
}
@@ -4775,7 +4775,7 @@ vm_sendish(
frame may have stack values in the local variables and the
cancelling the caller frame will purge them. But directly
calling mjit_exec is faster... */
- if (GET_ISEQ()->body->catch_except_p) {
+ if (ISEQ_BODY(GET_ISEQ())->catch_except_p) {
VM_ENV_FLAGS_SET(GET_EP(), VM_FRAME_FLAG_FINISH);
return vm_exec(ec, true);
}
@@ -5637,7 +5637,7 @@ vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
}
else {
const rb_iseq_t *iseq = reg_cfp->iseq;
- size_t pos = pc - iseq->body->iseq_encoded;
+ size_t pos = pc - ISEQ_BODY(iseq)->iseq_encoded;
rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pos);
rb_hook_list_t *local_hooks = iseq->aux.exec.local_hooks;
rb_event_flag_t iseq_local_events = local_hooks != NULL ? local_hooks->events : 0;
@@ -5910,7 +5910,7 @@ lookup_builtin_invoker(int argc)
static inline VALUE
invoke_bf(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const struct rb_builtin_function* bf, const VALUE *argv)
{
- const bool canary_p = reg_cfp->iseq->body->builtin_inline_p; // Verify an assumption of `Primitive.attr! 'inline'`
+ const bool canary_p = ISEQ_BODY(reg_cfp->iseq)->builtin_inline_p; // Verify an assumption of `Primitive.attr! 'inline'`
SETUP_CANARY(canary_p);
VALUE ret = (*lookup_builtin_invoker(bf->argc))(ec, reg_cfp->self, argv, (rb_insn_func_t)bf->func_ptr);
CHECK_CANARY(canary_p, BIN(invokebuiltin));
@@ -5929,7 +5929,7 @@ vm_invoke_builtin_delegate(rb_execution_context_t *ec, rb_control_frame_t *cfp,
if (0) { // debug print
fputs("vm_invoke_builtin_delegate: passing -> ", stderr);
for (int i=0; i<bf->argc; i++) {
- ruby_debug_printf(":%s ", rb_id2name(cfp->iseq->body->local_table[i+start_index]));
+ ruby_debug_printf(":%s ", rb_id2name(ISEQ_BODY(cfp->iseq)->local_table[i+start_index]));
}
ruby_debug_printf("\n" "%s %s(%d):%p\n", RUBY_FUNCTION_NAME_STRING, bf->name, bf->argc, bf->func_ptr);
}
@@ -5938,7 +5938,7 @@ vm_invoke_builtin_delegate(rb_execution_context_t *ec, rb_control_frame_t *cfp,
return invoke_bf(ec, cfp, bf, NULL);
}
else {
- const VALUE *argv = cfp->ep - cfp->iseq->body->local_table_size - VM_ENV_DATA_SIZE + 1 + start_index;
+ const VALUE *argv = cfp->ep - ISEQ_BODY(cfp->iseq)->local_table_size - VM_ENV_DATA_SIZE + 1 + start_index;
return invoke_bf(ec, cfp, bf, argv);
}
}
diff --git a/vm_method.c b/vm_method.c
index 0400aab801..03d2ed09d1 100644
--- a/vm_method.c
+++ b/vm_method.c
@@ -496,7 +496,7 @@ rb_method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *de
/* setup iseq first (before invoking GC) */
RB_OBJ_WRITE(me, &def->body.iseq.iseqptr, iseq);
- if (iseq->body->mandatory_only_iseq) def->iseq_overload = 1;
+ if (ISEQ_BODY(iseq)->mandatory_only_iseq) def->iseq_overload = 1;
if (0) vm_cref_dump("rb_method_definition_create", cref);
@@ -889,7 +889,7 @@ rb_method_entry_make(VALUE klass, ID mid, VALUE defined_class, rb_method_visibil
}
if (iseq) {
rb_compile_warning(RSTRING_PTR(rb_iseq_path(iseq)),
- FIX2INT(iseq->body->location.first_lineno),
+ FIX2INT(ISEQ_BODY(iseq)->location.first_lineno),
"previous definition of %"PRIsVALUE" was here",
rb_id2str(old_def->original_id));
}
@@ -1020,7 +1020,7 @@ get_overloaded_cme(const rb_callable_method_entry_t *cme)
// create
rb_method_definition_t *def = rb_method_definition_create(VM_METHOD_TYPE_ISEQ, cme->def->original_id);
def->body.iseq.cref = cme->def->body.iseq.cref;
- def->body.iseq.iseqptr = cme->def->body.iseq.iseqptr->body->mandatory_only_iseq;
+ def->body.iseq.iseqptr = ISEQ_BODY(cme->def->body.iseq.iseqptr)->mandatory_only_iseq;
rb_method_entry_t *me = rb_method_entry_alloc(cme->called_id,
cme->owner,
@@ -1040,7 +1040,7 @@ check_overloaded_cme(const rb_callable_method_entry_t *cme, const struct rb_call
{
if (UNLIKELY(cme->def->iseq_overload) &&
(vm_ci_flag(ci) & (VM_CALL_ARGS_SIMPLE)) &&
- (int)vm_ci_argc(ci) == method_entry_iseqptr(cme)->body->param.lead_num) {
+ (int)vm_ci_argc(ci) == ISEQ_BODY(method_entry_iseqptr(cme))->param.lead_num) {
VM_ASSERT(cme->def->type == VM_METHOD_TYPE_ISEQ); // iseq_overload is marked only on ISEQ methods
cme = get_overloaded_cme(cme);
@@ -1675,7 +1675,7 @@ scope_visibility_check(void)
{
/* Check for public/protected/private/module_function called inside a method */
rb_control_frame_t *cfp = GET_EC()->cfp+1;
- if (cfp && cfp->iseq && cfp->iseq->body->type == ISEQ_TYPE_METHOD) {
+ if (cfp && cfp->iseq && ISEQ_BODY(cfp->iseq)->type == ISEQ_TYPE_METHOD) {
rb_warn("calling %s without arguments inside a method may not have the intended effect",
rb_id2name(rb_frame_this_func()));
}
@@ -2416,10 +2416,10 @@ rb_mod_ruby2_keywords(int argc, VALUE *argv, VALUE module)
if (module == defined_class || origin_class == defined_class) {
switch (me->def->type) {
case VM_METHOD_TYPE_ISEQ:
- if (me->def->body.iseq.iseqptr->body->param.flags.has_rest &&
- !me->def->body.iseq.iseqptr->body->param.flags.has_kw &&
- !me->def->body.iseq.iseqptr->body->param.flags.has_kwrest) {
- me->def->body.iseq.iseqptr->body->param.flags.ruby2_keywords = 1;
+ if (ISEQ_BODY(me->def->body.iseq.iseqptr)->param.flags.has_rest &&
+ !ISEQ_BODY(me->def->body.iseq.iseqptr)->param.flags.has_kw &&
+ !ISEQ_BODY(me->def->body.iseq.iseqptr)->param.flags.has_kwrest) {
+ ISEQ_BODY(me->def->body.iseq.iseqptr)->param.flags.ruby2_keywords = 1;
rb_clear_method_cache(module, name);
}
else {
@@ -2435,10 +2435,10 @@ rb_mod_ruby2_keywords(int argc, VALUE *argv, VALUE module)
if (vm_block_handler_type(procval) == block_handler_type_iseq) {
const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(procval);
const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
- if (iseq->body->param.flags.has_rest &&
- !iseq->body->param.flags.has_kw &&
- !iseq->body->param.flags.has_kwrest) {
- iseq->body->param.flags.ruby2_keywords = 1;
+ if (ISEQ_BODY(iseq)->param.flags.has_rest &&
+ !ISEQ_BODY(iseq)->param.flags.has_kw &&
+ !ISEQ_BODY(iseq)->param.flags.has_kwrest) {
+ ISEQ_BODY(iseq)->param.flags.ruby2_keywords = 1;
rb_clear_method_cache(module, name);
}
else {
diff --git a/yjit_codegen.c b/yjit_codegen.c
index a95428ce4a..4b53b737a0 100644
--- a/yjit_codegen.c
+++ b/yjit_codegen.c
@@ -138,8 +138,8 @@ jit_peek_at_local(jitstate_t *jit, ctx_t *ctx, int n)
{
RUBY_ASSERT(jit_at_current_insn(jit));
- int32_t local_table_size = jit->iseq->body->local_table_size;
- RUBY_ASSERT(n < (int)jit->iseq->body->local_table_size);
+ int32_t local_table_size = ISEQ_BODY(jit->iseq)->local_table_size;
+ RUBY_ASSERT(n < (int)ISEQ_BODY(jit->iseq)->local_table_size);
const VALUE *ep = jit->ec->cfp->ep;
return ep[-VM_ENV_DATA_SIZE - local_table_size + n + 1];
@@ -271,7 +271,7 @@ verify_ctx(jitstate_t *jit, ctx_t *ctx)
}
}
- int32_t local_table_size = jit->iseq->body->local_table_size;
+ int32_t local_table_size = ISEQ_BODY(jit->iseq)->local_table_size;
for (int i = 0; i < local_table_size && i < MAX_TEMP_TYPES; i++) {
val_type_t learned = ctx->local_types[i];
VALUE val = jit_peek_at_local(jit, ctx, i);
@@ -459,7 +459,7 @@ yjit_pc_guard(codeblock_t *cb, const rb_iseq_t *iseq)
RUBY_ASSERT(cb != NULL);
mov(cb, REG0, member_opnd(REG_CFP, rb_control_frame_t, pc));
- mov(cb, REG1, const_ptr_opnd(iseq->body->iseq_encoded));
+ mov(cb, REG1, const_ptr_opnd(ISEQ_BODY(iseq)->iseq_encoded));
xor(cb, REG0, REG1);
// xor should impact ZF, so we can jz here
@@ -584,7 +584,7 @@ yjit_entry_prologue(codeblock_t *cb, const rb_iseq_t *iseq)
// has optional parameters, we'll add a runtime check that the PC we've
// compiled for is the same PC that the interpreter wants us to run with.
// If they don't match, then we'll take a side exit.
- if (iseq->body->param.flags.has_opt) {
+ if (ISEQ_BODY(iseq)->param.flags.has_opt) {
yjit_pc_guard(cb, iseq);
}
@@ -663,7 +663,7 @@ gen_single_block(blockid_t blockid, const ctx_t *start_ctx, rb_execution_context
RUBY_ASSERT(!(blockid.idx == 0 && start_ctx->stack_size > 0));
const rb_iseq_t *iseq = block->blockid.iseq;
- const unsigned int iseq_size = iseq->body->iseq_size;
+ const unsigned int iseq_size = ISEQ_BODY(iseq)->iseq_size;
uint32_t insn_idx = block->blockid.idx;
const uint32_t starting_insn_idx = insn_idx;
@@ -1330,7 +1330,7 @@ slot_to_local_idx(const rb_iseq_t *iseq, int32_t slot_idx)
// See usages of local_var_name() from iseq.c for similar calculation.
// FIXME: unsigned to signed cast below can truncate
- int32_t local_table_size = iseq->body->local_table_size;
+ int32_t local_table_size = ISEQ_BODY(iseq)->local_table_size;
int32_t op = slot_idx - VM_ENV_DATA_SIZE;
int32_t local_idx = local_table_size - op - 1;
RUBY_ASSERT(local_idx >= 0 && local_idx < local_table_size);
@@ -1446,7 +1446,7 @@ gen_checkkeyword(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
{
// When a keyword is unspecified past index 32, a hash will be used
// instead. This can only happen in iseqs taking more than 32 keywords.
- if (jit->iseq->body->param.keyword->num >= 32) {
+ if (ISEQ_BODY(jit->iseq)->param.keyword->num >= 32) {
return YJIT_CANT_COMPILE;
}
@@ -3514,10 +3514,10 @@ rb_leaf_invokebuiltin_iseq_p(const rb_iseq_t *iseq)
unsigned int invokebuiltin_len = insn_len(BIN(opt_invokebuiltin_delegate_leave));
unsigned int leave_len = insn_len(BIN(leave));
- return (iseq->body->iseq_size == (invokebuiltin_len + leave_len) &&
- rb_vm_insn_addr2opcode((void *)iseq->body->iseq_encoded[0]) == BIN(opt_invokebuiltin_delegate_leave) &&
- rb_vm_insn_addr2opcode((void *)iseq->body->iseq_encoded[invokebuiltin_len]) == BIN(leave) &&
- iseq->body->builtin_inline_p
+ return (ISEQ_BODY(iseq)->iseq_size == (invokebuiltin_len + leave_len) &&
+ rb_vm_insn_addr2opcode((void *)ISEQ_BODY(iseq)->iseq_encoded[0]) == BIN(opt_invokebuiltin_delegate_leave) &&
+ rb_vm_insn_addr2opcode((void *)ISEQ_BODY(iseq)->iseq_encoded[invokebuiltin_len]) == BIN(leave) &&
+ ISEQ_BODY(iseq)->builtin_inline_p
);
}
@@ -3527,7 +3527,7 @@ rb_leaf_builtin_function(const rb_iseq_t *iseq)
{
if (!rb_leaf_invokebuiltin_iseq_p(iseq))
return NULL;
- return (const struct rb_builtin_function *)iseq->body->iseq_encoded[1];
+ return (const struct rb_builtin_function *)ISEQ_BODY(iseq)->iseq_encoded[1];
}
static codegen_status_t
@@ -3540,7 +3540,7 @@ gen_send_iseq(jitstate_t *jit, ctx_t *ctx, const struct rb_callinfo *ci, const r
// specified at the call site. We need to keep track of the fact that this
// value is present on the stack in order to properly set up the callee's
// stack pointer.
- const bool doing_kw_call = iseq->body->param.flags.has_kw;
+ const bool doing_kw_call = ISEQ_BODY(iseq)->param.flags.has_kw;
const bool supplying_kws = vm_ci_flag(ci) & VM_CALL_KWARG;
if (vm_ci_flag(ci) & VM_CALL_TAILCALL) {
@@ -3551,9 +3551,9 @@ gen_send_iseq(jitstate_t *jit, ctx_t *ctx, const struct rb_callinfo *ci, const r
// No support for callees with these parameters yet as they require allocation
// or complex handling.
- if (iseq->body->param.flags.has_rest ||
- iseq->body->param.flags.has_post ||
- iseq->body->param.flags.has_kwrest) {
+ if (ISEQ_BODY(iseq)->param.flags.has_rest ||
+ ISEQ_BODY(iseq)->param.flags.has_post ||
+ ISEQ_BODY(iseq)->param.flags.has_kwrest) {
GEN_COUNTER_INC(cb, send_iseq_complex_callee);
return YJIT_CANT_COMPILE;
}
@@ -3561,24 +3561,24 @@ gen_send_iseq(jitstate_t *jit, ctx_t *ctx, const struct rb_callinfo *ci, const r
// If we have keyword arguments being passed to a callee that only takes
// positionals, then we need to allocate a hash. For now we're going to
// call that too complex and bail.
- if (supplying_kws && !iseq->body->param.flags.has_kw) {
+ if (supplying_kws && !ISEQ_BODY(iseq)->param.flags.has_kw) {
GEN_COUNTER_INC(cb, send_iseq_complex_callee);
return YJIT_CANT_COMPILE;
}
// If we have a method accepting no kwargs (**nil), exit if we have passed
// it any kwargs.
- if (supplying_kws && iseq->body->param.flags.accepts_no_kwarg) {
+ if (supplying_kws && ISEQ_BODY(iseq)->param.flags.accepts_no_kwarg) {
GEN_COUNTER_INC(cb, send_iseq_complex_callee);
return YJIT_CANT_COMPILE;
}
// For computing number of locals to setup for the callee
- int num_params = iseq->body->param.size;
+ int num_params = ISEQ_BODY(iseq)->param.size;
// Block parameter handling. This mirrors setup_parameters_complex().
- if (iseq->body->param.flags.has_block) {
- if (iseq->body->local_iseq == iseq) {
+ if (ISEQ_BODY(iseq)->param.flags.has_block) {
+ if (ISEQ_BODY(iseq)->local_iseq == iseq) {
// Block argument is passed through EP and not setup as a local in
// the callee.
num_params--;
@@ -3594,7 +3594,7 @@ gen_send_iseq(jitstate_t *jit, ctx_t *ctx, const struct rb_callinfo *ci, const r
uint32_t start_pc_offset = 0;
- const int required_num = iseq->body->param.lead_num;
+ const int required_num = ISEQ_BODY(iseq)->param.lead_num;
// This struct represents the metadata about the caller-specified
// keyword arguments.
@@ -3603,7 +3603,7 @@ gen_send_iseq(jitstate_t *jit, ctx_t *ctx, const struct rb_callinfo *ci, const r
// Arity handling and optional parameter setup
const int opts_filled = argc - required_num - kw_arg_num;
- const int opt_num = iseq->body->param.opt_num;
+ const int opt_num = ISEQ_BODY(iseq)->param.opt_num;
const int opts_missing = opt_num - opts_filled;
if (opts_filled < 0 || opts_filled > opt_num) {
@@ -3621,7 +3621,7 @@ gen_send_iseq(jitstate_t *jit, ctx_t *ctx, const struct rb_callinfo *ci, const r
if (opt_num > 0) {
num_params -= opt_num - opts_filled;
- start_pc_offset = (uint32_t)iseq->body->param.opt_table[opts_filled];
+ start_pc_offset = (uint32_t)ISEQ_BODY(iseq)->param.opt_table[opts_filled];
}
if (doing_kw_call) {
@@ -3630,7 +3630,7 @@ gen_send_iseq(jitstate_t *jit, ctx_t *ctx, const struct rb_callinfo *ci, const r
// This struct represents the metadata about the callee-specified
// keyword parameters.
- const struct rb_iseq_param_keyword *keyword = iseq->body->param.keyword;
+ const struct rb_iseq_param_keyword *keyword = ISEQ_BODY(iseq)->param.keyword;
int required_kwargs_filled = 0;
@@ -3692,7 +3692,7 @@ gen_send_iseq(jitstate_t *jit, ctx_t *ctx, const struct rb_callinfo *ci, const r
}
// Number of locals that are not parameters
- const int num_locals = iseq->body->local_table_size - num_params;
+ const int num_locals = ISEQ_BODY(iseq)->local_table_size - num_params;
// Create a side-exit to fall back to the interpreter
uint8_t *side_exit = yjit_side_exit(jit, ctx);
@@ -3731,7 +3731,7 @@ gen_send_iseq(jitstate_t *jit, ctx_t *ctx, const struct rb_callinfo *ci, const r
// Note that vm_push_frame checks it against a decremented cfp, hence the multiply by 2.
// #define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin)
ADD_COMMENT(cb, "stack overflow check");
- lea(cb, REG0, ctx_sp_opnd(ctx, sizeof(VALUE) * (num_locals + iseq->body->stack_max) + 2 * sizeof(rb_control_frame_t)));
+ lea(cb, REG0, ctx_sp_opnd(ctx, sizeof(VALUE) * (num_locals + ISEQ_BODY(iseq)->stack_max) + 2 * sizeof(rb_control_frame_t)));
cmp(cb, REG_CFP, REG0);
jle_ptr(cb, COUNTED_EXIT(jit, side_exit, send_se_cf_overflow));
@@ -3754,7 +3754,7 @@ gen_send_iseq(jitstate_t *jit, ctx_t *ctx, const struct rb_callinfo *ci, const r
// This struct represents the metadata about the callee-specified
// keyword parameters.
- const struct rb_iseq_param_keyword *const keyword = iseq->body->param.keyword;
+ const struct rb_iseq_param_keyword *const keyword = ISEQ_BODY(iseq)->param.keyword;
ADD_COMMENT(cb, "keyword args");
@@ -4864,7 +4864,7 @@ gen_opt_invokebuiltin_delegate(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
// Copy arguments from locals
for (int32_t i = 0; i < bf->argc; i++) {
- const int32_t offs = start_index + i - jit->iseq->body->local_table_size - VM_ENV_DATA_SIZE + 1;
+ const int32_t offs = start_index + i - ISEQ_BODY(jit->iseq)->local_table_size - VM_ENV_DATA_SIZE + 1;
x86opnd_t local_opnd = mem_opnd(64, REG0, offs * SIZEOF_VALUE);
x86opnd_t c_arg_reg = C_ARG_REGS[i + 2];
mov(cb, c_arg_reg, local_opnd);
@@ -4957,7 +4957,7 @@ tracing_invalidate_all_i(void *vstart, void *vend, size_t stride, void *data)
static void
invalidate_all_blocks_for_tracing(const rb_iseq_t *iseq)
{
- struct rb_iseq_constant_body *body = iseq->body;
+ struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
if (!body) return; // iseq yet to be initialized
ASSERT_vm_locking();
diff --git a/yjit_core.c b/yjit_core.c
index b70abcafb2..eee2c52d7f 100644
--- a/yjit_core.c
+++ b/yjit_core.c
@@ -524,7 +524,7 @@ ctx_diff(const ctx_t *src, const ctx_t *dst)
static rb_yjit_block_array_t
yjit_get_version_array(const rb_iseq_t *iseq, unsigned idx)
{
- struct rb_iseq_constant_body *body = iseq->body;
+ struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
if (rb_darray_size(body->yjit_blocks) == 0) {
return NULL;
@@ -546,7 +546,7 @@ add_block_version(block_t *block)
{
const blockid_t blockid = block->blockid;
const rb_iseq_t *iseq = blockid.iseq;
- struct rb_iseq_constant_body *body = iseq->body;
+ struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
// Function entry blocks must have stack size 0
RUBY_ASSERT(!(block->blockid.idx == 0 && block->ctx.stack_size > 0));
@@ -834,7 +834,7 @@ gen_entry_point(const rb_iseq_t *iseq, uint32_t insn_idx, rb_execution_context_t
{
// If we aren't at PC 0, don't generate code
// See yjit_pc_guard
- if (iseq->body->iseq_encoded != ec->cfp->pc) {
+ if (ISEQ_BODY(iseq)->iseq_encoded != ec->cfp->pc) {
return NULL;
}
@@ -1219,7 +1219,7 @@ verify_blockid(const blockid_t blockid)
{
const rb_iseq_t *const iseq = blockid.iseq;
RUBY_ASSERT_ALWAYS(IMEMO_TYPE_P(iseq, imemo_iseq));
- RUBY_ASSERT_ALWAYS(blockid.idx < iseq->body->iseq_size);
+ RUBY_ASSERT_ALWAYS(blockid.idx < ISEQ_BODY(iseq)->iseq_size);
}
// Invalidate one specific block version
@@ -1336,7 +1336,7 @@ invalidate_block_version(block_t *block)
// change this in the future when we support optional parameters because
// they enter the function with a non-zero PC
if (block->blockid.idx == 0) {
- iseq->body->jit_func = 0;
+ ISEQ_BODY(iseq)->jit_func = 0;
}
#endif
diff --git a/yjit_iface.c b/yjit_iface.c
index 4601b5b06e..636a39abdf 100644
--- a/yjit_iface.c
+++ b/yjit_iface.c
@@ -53,8 +53,8 @@ static VALUE *
yjit_iseq_pc_at_idx(const rb_iseq_t *iseq, uint32_t insn_idx)
{
RUBY_ASSERT(iseq != NULL);
- RUBY_ASSERT(insn_idx < iseq->body->iseq_size);
- VALUE *encoded = iseq->body->iseq_encoded;
+ RUBY_ASSERT(insn_idx < ISEQ_BODY(iseq)->iseq_size);
+ VALUE *encoded = ISEQ_BODY(iseq)->iseq_encoded;
VALUE *pc = &encoded[insn_idx];
return pc;
}
@@ -484,10 +484,10 @@ rb_yjit_compile_iseq(const rb_iseq_t *iseq, rb_execution_context_t *ec)
uint8_t *code_ptr = gen_entry_point(iseq, 0, ec);
if (code_ptr) {
- iseq->body->jit_func = (yjit_func_t)code_ptr;
+ ISEQ_BODY(iseq)->jit_func = (yjit_func_t)code_ptr;
}
else {
- iseq->body->jit_func = 0;
+ ISEQ_BODY(iseq)->jit_func = 0;
success = false;
}
@@ -514,8 +514,8 @@ yjit_blocks_for(VALUE mod, VALUE rb_iseq)
const rb_iseq_t *iseq = rb_iseqw_to_iseq(rb_iseq);
VALUE all_versions = rb_ary_new();
- rb_darray_for(iseq->body->yjit_blocks, version_array_idx) {
- rb_yjit_block_array_t versions = rb_darray_get(iseq->body->yjit_blocks, version_array_idx);
+ rb_darray_for(ISEQ_BODY(iseq)->yjit_blocks, version_array_idx) {
+ rb_yjit_block_array_t versions = rb_darray_get(ISEQ_BODY(iseq)->yjit_blocks, version_array_idx);
rb_darray_for(versions, block_idx) {
block_t *block = rb_darray_get(versions, block_idx);
@@ -617,7 +617,7 @@ rb_yjit_constant_ic_update(const rb_iseq_t *const iseq, IC ic)
RB_VM_LOCK_ENTER();
rb_vm_barrier(); // Stop other ractors since we are going to patch machine code.
{
- const struct rb_iseq_constant_body *const body = iseq->body;
+ const struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
VALUE *code = body->iseq_encoded;
const unsigned get_insn_idx = ic->get_insn_idx;