summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--iseq.c5
-rw-r--r--mjit.c8
-rw-r--r--mjit.h30
-rw-r--r--mjit_compile.c2
-rw-r--r--mjit_worker.c30
-rw-r--r--tool/ruby_vm/views/_mjit_compile_send.erb2
-rw-r--r--tool/ruby_vm/views/mjit_compile.inc.erb2
7 files changed, 42 insertions, 37 deletions
diff --git a/iseq.c b/iseq.c
index 40be6c76c4..065791b74b 100644
--- a/iseq.c
+++ b/iseq.c
@@ -359,10 +359,9 @@ rb_iseq_mark(const rb_iseq_t *iseq)
}
#if USE_MJIT
- if (body->jit_unit && body->jit_unit->cc_entries != NULL) {
- // TODO: move to mjit.c?
+ if (body->jit_unit && mjit_iseq_cc_entries(body) != NULL) {
for (unsigned int i=0; i<body->ci_size; i++) {
- const struct rb_callcache *cc = body->jit_unit->cc_entries[i];
+ const struct rb_callcache *cc = mjit_iseq_cc_entries(body)[i];
if (cc != NULL) {
rb_gc_mark((VALUE)cc); // pindown
}
diff --git a/mjit.c b/mjit.c
index cae4c13a3f..5cce71fdfa 100644
--- a/mjit.c
+++ b/mjit.c
@@ -45,7 +45,7 @@ mjit_copy_job_handler(void *data)
CRITICAL_SECTION_FINISH(3, "in mjit_copy_job_handler");
return;
}
- else if (job->iseq == NULL) { // ISeq GC notified in mjit_mark_iseq
+ else if (job->iseq == NULL) { // ISeq GC notified in mjit_free_iseq
job->finish_p = true;
CRITICAL_SECTION_FINISH(3, "in mjit_copy_job_handler");
return;
@@ -1019,6 +1019,12 @@ mjit_mark(void)
RUBY_MARK_LEAVE("mjit");
}
+const struct rb_callcache **
+mjit_iseq_cc_entries(const struct rb_iseq_constant_body *const body)
+{
+ return body->jit_unit->cc_entries;
+}
+
// A hook to update valid_class_serials.
void
mjit_add_class_serial(rb_serial_t class_serial)
diff --git a/mjit.h b/mjit.h
index 15be560786..9a6af2f4af 100644
--- a/mjit.h
+++ b/mjit.h
@@ -70,35 +70,6 @@ struct rb_mjit_compile_info {
bool disable_inlining;
};
-// The unit structure that holds metadata of ISeq for MJIT.
-struct rb_mjit_unit {
- // Unique order number of unit.
- int id;
- // Dlopen handle of the loaded object file.
- void *handle;
- rb_iseq_t *iseq;
-#ifndef _MSC_VER
- // This value is always set for `compact_all_jit_code`. Also used for lazy deletion.
- char *o_file;
- // true if it's inherited from parent Ruby process and lazy deletion should be skipped.
- // `o_file = NULL` can't be used to skip lazy deletion because `o_file` could be used
- // by child for `compact_all_jit_code`.
- bool o_file_inherited_p;
-#endif
-#if defined(_WIN32)
- // DLL cannot be removed while loaded on Windows. If this is set, it'll be lazily deleted.
- char *so_file;
-#endif
- // Only used by unload_units. Flag to check this unit is currently on stack or not.
- char used_code_p;
- struct list_node unode;
- // mjit_compile's optimization switches
- struct rb_mjit_compile_info compile_info;
-
- // captured CC values, they should be marked with iseq.
- const struct rb_callcache **cc_entries; // size: iseq->body->ci_size
-};
-
typedef VALUE (*mjit_func_t)(rb_execution_context_t *, rb_control_frame_t *);
RUBY_SYMBOL_EXPORT_BEGIN
@@ -122,6 +93,7 @@ extern struct mjit_cont *mjit_cont_new(rb_execution_context_t *ec);
extern void mjit_cont_free(struct mjit_cont *cont);
extern void mjit_add_class_serial(rb_serial_t class_serial);
extern void mjit_remove_class_serial(rb_serial_t class_serial);
+const struct rb_callcache ** mjit_iseq_cc_entries(const struct rb_iseq_constant_body *const body);
// A threshold used to reject long iseqs from JITting as such iseqs
// takes too much time to be compiled.
diff --git a/mjit_compile.c b/mjit_compile.c
index e4f7cf292a..c0e5b18856 100644
--- a/mjit_compile.c
+++ b/mjit_compile.c
@@ -393,7 +393,7 @@ precompile_inlinable_iseqs(FILE *f, const rb_iseq_t *iseq, struct compile_status
if (insn == BIN(opt_send_without_block)) { // `compile_inlined_cancel_handler` supports only `opt_send_without_block`
CALL_DATA cd = (CALL_DATA)body->iseq_encoded[pos + 1];
const struct rb_callinfo *ci = cd->ci;
- const struct rb_callcache *cc = iseq->body->jit_unit->cc_entries[call_data_index(cd, body)]; // use copy to avoid race condition
+ const struct rb_callcache *cc = mjit_iseq_cc_entries(iseq->body)[call_data_index(cd, body)]; // use copy to avoid race condition
const rb_iseq_t *child_iseq;
if (has_valid_method_type(cc) &&
diff --git a/mjit_worker.c b/mjit_worker.c
index 40debd10f4..09f07d8e1f 100644
--- a/mjit_worker.c
+++ b/mjit_worker.c
@@ -135,6 +135,34 @@ typedef intptr_t pid_t;
#define MJIT_TMP_PREFIX "_ruby_mjit_"
+// The unit structure that holds metadata of ISeq for MJIT.
+struct rb_mjit_unit {
+ // Unique order number of unit.
+ int id;
+ // Dlopen handle of the loaded object file.
+ void *handle;
+ rb_iseq_t *iseq;
+#ifndef _MSC_VER
+ // This value is always set for `compact_all_jit_code`. Also used for lazy deletion.
+ char *o_file;
+ // true if it's inherited from parent Ruby process and lazy deletion should be skipped.
+ // `o_file = NULL` can't be used to skip lazy deletion because `o_file` could be used
+ // by child for `compact_all_jit_code`.
+ bool o_file_inherited_p;
+#endif
+#if defined(_WIN32)
+ // DLL cannot be removed while loaded on Windows. If this is set, it'll be lazily deleted.
+ char *so_file;
+#endif
+ // Only used by unload_units. Flag to check this unit is currently on stack or not.
+ char used_code_p;
+ struct list_node unode;
+ // mjit_compile's optimization switches
+ struct rb_mjit_compile_info compile_info;
+ // captured CC values, they should be marked with iseq.
+ const struct rb_callcache **cc_entries; // size: iseq->body->ci_size
+};
+
// Linked list of struct rb_mjit_unit.
struct rb_mjit_unit_list {
struct list_head head;
@@ -1184,7 +1212,7 @@ mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq, union iseq_inline_storag
job->finish_p = true;
in_jit = true; // Prohibit GC during JIT compilation
- if (job->iseq == NULL) // ISeq GC is notified in mjit_mark_iseq
+ if (job->iseq == NULL) // ISeq GC is notified in mjit_free_iseq
success_p = false;
job->iseq = NULL; // Allow future GC of this ISeq from here
CRITICAL_SECTION_FINISH(3, "in mjit_copy_cache_from_main_thread");
diff --git a/tool/ruby_vm/views/_mjit_compile_send.erb b/tool/ruby_vm/views/_mjit_compile_send.erb
index 6c5a362699..9f51f856fe 100644
--- a/tool/ruby_vm/views/_mjit_compile_send.erb
+++ b/tool/ruby_vm/views/_mjit_compile_send.erb
@@ -14,7 +14,7 @@
MAYBE_UNUSED(<%= ope.fetch(:decl) %>) = (<%= ope.fetch(:type) %>)operands[<%= i %>];
% end
% # compiler: Use copied cc to avoid race condition
- const struct rb_callcache *captured_cc = body->jit_unit->cc_entries[call_data_index(cd, body)];
+ const struct rb_callcache *captured_cc = mjit_iseq_cc_entries(body)[call_data_index(cd, body)];
%
if (!status->compile_info->disable_send_cache && has_valid_method_type(captured_cc)) {
const rb_iseq_t *iseq;
diff --git a/tool/ruby_vm/views/mjit_compile.inc.erb b/tool/ruby_vm/views/mjit_compile.inc.erb
index 6ab57ae164..de0dfde604 100644
--- a/tool/ruby_vm/views/mjit_compile.inc.erb
+++ b/tool/ruby_vm/views/mjit_compile.inc.erb
@@ -57,7 +57,7 @@ switch (insn) {
% when *send_compatible_opt_insns
% # To avoid cancel, just emit `opt_send_without_block` instead of `opt_*` insn if call cache is populated.
% cd_index = insn.opes.index { |o| o.fetch(:type) == 'CALL_DATA' }
- if (has_valid_method_type(body->jit_unit->cc_entries[call_data_index((CALL_DATA)operands[<%= cd_index %>], body)])) {
+ if (has_valid_method_type(mjit_iseq_cc_entries(body)[call_data_index((CALL_DATA)operands[<%= cd_index %>], body)])) {
<%= render 'mjit_compile_send', locals: { insn: opt_send_without_block } -%>
<%= render 'mjit_compile_insn', locals: { insn: opt_send_without_block } -%>
break;