From a60507f616a7f5beb4aef99aa56f6b964c16880d Mon Sep 17 00:00:00 2001 From: Takashi Kokubun Date: Sun, 21 Aug 2022 11:30:32 -0700 Subject: Rename mjit_compile.c to mjit_compiler.c I'm planning to introduce mjit_compiler.rb, and I want to make this consistent with it. Consistency with compile.c doesn't seem important for MJIT anyway. --- benchmark/lib/benchmark_driver/runner/mjit_exec.rb | 2 +- common.mk | 406 +++++++------- mjit_compile.c | 596 --------------------- mjit_compiler.c | 596 +++++++++++++++++++++ tool/ruby_vm/views/mjit_compile.inc.erb | 2 +- 5 files changed, 801 insertions(+), 801 deletions(-) delete mode 100644 mjit_compile.c create mode 100644 mjit_compiler.c diff --git a/benchmark/lib/benchmark_driver/runner/mjit_exec.rb b/benchmark/lib/benchmark_driver/runner/mjit_exec.rb index 121791eb2b..63f5d472b2 100644 --- a/benchmark/lib/benchmark_driver/runner/mjit_exec.rb +++ b/benchmark/lib/benchmark_driver/runner/mjit_exec.rb @@ -210,7 +210,7 @@ class BenchmarkDriver::Runner::MjitExec # You may need to: # * Increase `JIT_ISEQ_SIZE_THRESHOLD` to 10000000 in mjit.h - # * Always return false in `inlinable_iseq_p()` of mjit_compile.c + # * Always return false in `inlinable_iseq_p()` of mjit_compiler.c def jit t = Process.clock_gettime(Process::CLOCK_MONOTONIC) i = 0 diff --git a/common.mk b/common.mk index 56d283599b..c8434b9b7b 100644 --- a/common.mk +++ b/common.mk @@ -113,7 +113,7 @@ COMMONOBJS = array.$(OBJEXT) \ math.$(OBJEXT) \ memory_view.$(OBJEXT) \ mjit.$(OBJEXT) \ - mjit_compile.$(OBJEXT) \ + mjit_compiler.$(OBJEXT) \ node.$(OBJEXT) \ numeric.$(OBJEXT) \ object.$(OBJEXT) \ @@ -9686,208 +9686,208 @@ mjit.$(OBJEXT): {$(VPATH)}vm_debug.h mjit.$(OBJEXT): {$(VPATH)}vm_opts.h mjit.$(OBJEXT): {$(VPATH)}vm_sync.h mjit.$(OBJEXT): {$(VPATH)}yjit.h -mjit_compile.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h -mjit_compile.$(OBJEXT): $(CCAN_DIR)/container_of/container_of.h -mjit_compile.$(OBJEXT): $(CCAN_DIR)/list/list.h -mjit_compile.$(OBJEXT): $(CCAN_DIR)/str/str.h -mjit_compile.$(OBJEXT): $(hdrdir)/ruby.h -mjit_compile.$(OBJEXT): $(hdrdir)/ruby/ruby.h -mjit_compile.$(OBJEXT): $(top_srcdir)/internal/array.h -mjit_compile.$(OBJEXT): $(top_srcdir)/internal/class.h -mjit_compile.$(OBJEXT): $(top_srcdir)/internal/compile.h -mjit_compile.$(OBJEXT): $(top_srcdir)/internal/compilers.h -mjit_compile.$(OBJEXT): $(top_srcdir)/internal/gc.h -mjit_compile.$(OBJEXT): $(top_srcdir)/internal/hash.h -mjit_compile.$(OBJEXT): $(top_srcdir)/internal/imemo.h -mjit_compile.$(OBJEXT): $(top_srcdir)/internal/object.h -mjit_compile.$(OBJEXT): $(top_srcdir)/internal/serial.h -mjit_compile.$(OBJEXT): $(top_srcdir)/internal/static_assert.h -mjit_compile.$(OBJEXT): $(top_srcdir)/internal/variable.h -mjit_compile.$(OBJEXT): $(top_srcdir)/internal/vm.h -mjit_compile.$(OBJEXT): $(top_srcdir)/internal/warnings.h -mjit_compile.$(OBJEXT): {$(VPATH)}assert.h -mjit_compile.$(OBJEXT): {$(VPATH)}atomic.h -mjit_compile.$(OBJEXT): {$(VPATH)}backward/2/assume.h -mjit_compile.$(OBJEXT): {$(VPATH)}backward/2/attributes.h -mjit_compile.$(OBJEXT): {$(VPATH)}backward/2/bool.h -mjit_compile.$(OBJEXT): {$(VPATH)}backward/2/gcc_version_since.h -mjit_compile.$(OBJEXT): {$(VPATH)}backward/2/inttypes.h -mjit_compile.$(OBJEXT): {$(VPATH)}backward/2/limits.h -mjit_compile.$(OBJEXT): {$(VPATH)}backward/2/long_long.h -mjit_compile.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h -mjit_compile.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h -mjit_compile.$(OBJEXT): {$(VPATH)}builtin.h -mjit_compile.$(OBJEXT): {$(VPATH)}config.h -mjit_compile.$(OBJEXT): {$(VPATH)}constant.h -mjit_compile.$(OBJEXT): {$(VPATH)}debug_counter.h -mjit_compile.$(OBJEXT): {$(VPATH)}defines.h -mjit_compile.$(OBJEXT): {$(VPATH)}id.h -mjit_compile.$(OBJEXT): {$(VPATH)}id_table.h -mjit_compile.$(OBJEXT): {$(VPATH)}insns.def -mjit_compile.$(OBJEXT): {$(VPATH)}insns.inc -mjit_compile.$(OBJEXT): {$(VPATH)}insns_info.inc -mjit_compile.$(OBJEXT): {$(VPATH)}intern.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/abi.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/anyargs.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/arithmetic.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/arithmetic/char.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/arithmetic/double.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/arithmetic/fixnum.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/arithmetic/gid_t.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/arithmetic/int.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/arithmetic/intptr_t.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/arithmetic/long.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/arithmetic/long_long.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/arithmetic/mode_t.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/arithmetic/off_t.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/arithmetic/pid_t.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/arithmetic/short.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/arithmetic/size_t.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/arithmetic/st_data_t.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/arithmetic/uid_t.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/assume.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/attr/alloc_size.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/attr/artificial.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/attr/cold.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/attr/const.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/attr/constexpr.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/attr/deprecated.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/attr/diagnose_if.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/attr/enum_extensibility.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/attr/error.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/attr/flag_enum.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/attr/forceinline.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/attr/format.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/attr/maybe_unused.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/attr/noalias.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/attr/nodiscard.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/attr/noexcept.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/attr/noinline.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/attr/nonnull.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/attr/noreturn.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/attr/pure.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/attr/restrict.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/attr/returns_nonnull.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/attr/warning.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/attr/weakref.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/cast.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/compiler_is.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/compiler_is/apple.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/compiler_is/clang.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/compiler_is/gcc.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/compiler_is/intel.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/compiler_is/msvc.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/compiler_is/sunpro.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/compiler_since.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/config.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/constant_p.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/core.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/core/rarray.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/core/rbasic.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/core/rbignum.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/core/rclass.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/core/rdata.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/core/rfile.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/core/rhash.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/core/robject.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/core/rregexp.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/core/rstring.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/core/rstruct.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/core/rtypeddata.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/ctype.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/dllexport.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/dosish.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/error.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/eval.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/event.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/fl_type.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/gc.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/glob.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/globals.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/has/attribute.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/has/builtin.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/has/c_attribute.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/has/cpp_attribute.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/has/declspec_attribute.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/has/extension.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/has/feature.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/has/warning.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/array.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/bignum.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/class.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/compar.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/complex.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/cont.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/dir.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/enum.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/enumerator.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/error.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/eval.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/file.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/gc.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/hash.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/io.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/load.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/marshal.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/numeric.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/object.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/parse.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/proc.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/process.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/random.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/range.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/rational.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/re.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/ruby.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/select.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/select/largesize.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/signal.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/sprintf.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/string.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/struct.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/thread.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/time.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/variable.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/intern/vm.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/interpreter.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/iterator.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/memory.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/method.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/module.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/newobj.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/rgengc.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/scan_args.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/special_consts.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/static_assert.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/stdalign.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/stdbool.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/symbol.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/value.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/value_type.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/variable.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/warning_push.h -mjit_compile.$(OBJEXT): {$(VPATH)}internal/xmalloc.h -mjit_compile.$(OBJEXT): {$(VPATH)}iseq.h -mjit_compile.$(OBJEXT): {$(VPATH)}method.h -mjit_compile.$(OBJEXT): {$(VPATH)}missing.h -mjit_compile.$(OBJEXT): {$(VPATH)}mjit.h -mjit_compile.$(OBJEXT): {$(VPATH)}mjit_compile.c -mjit_compile.$(OBJEXT): {$(VPATH)}mjit_compile.inc -mjit_compile.$(OBJEXT): {$(VPATH)}mjit_unit.h -mjit_compile.$(OBJEXT): {$(VPATH)}node.h -mjit_compile.$(OBJEXT): {$(VPATH)}ruby_assert.h -mjit_compile.$(OBJEXT): {$(VPATH)}ruby_atomic.h -mjit_compile.$(OBJEXT): {$(VPATH)}st.h -mjit_compile.$(OBJEXT): {$(VPATH)}subst.h -mjit_compile.$(OBJEXT): {$(VPATH)}thread_$(THREAD_MODEL).h -mjit_compile.$(OBJEXT): {$(VPATH)}thread_native.h -mjit_compile.$(OBJEXT): {$(VPATH)}vm_callinfo.h -mjit_compile.$(OBJEXT): {$(VPATH)}vm_core.h -mjit_compile.$(OBJEXT): {$(VPATH)}vm_exec.h -mjit_compile.$(OBJEXT): {$(VPATH)}vm_insnhelper.h -mjit_compile.$(OBJEXT): {$(VPATH)}vm_opts.h -mjit_compile.$(OBJEXT): {$(VPATH)}yjit.h +mjit_compiler.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h +mjit_compiler.$(OBJEXT): $(CCAN_DIR)/container_of/container_of.h +mjit_compiler.$(OBJEXT): $(CCAN_DIR)/list/list.h +mjit_compiler.$(OBJEXT): $(CCAN_DIR)/str/str.h +mjit_compiler.$(OBJEXT): $(hdrdir)/ruby.h +mjit_compiler.$(OBJEXT): $(hdrdir)/ruby/ruby.h +mjit_compiler.$(OBJEXT): $(top_srcdir)/internal/array.h +mjit_compiler.$(OBJEXT): $(top_srcdir)/internal/class.h +mjit_compiler.$(OBJEXT): $(top_srcdir)/internal/compile.h +mjit_compiler.$(OBJEXT): $(top_srcdir)/internal/compilers.h +mjit_compiler.$(OBJEXT): $(top_srcdir)/internal/gc.h +mjit_compiler.$(OBJEXT): $(top_srcdir)/internal/hash.h +mjit_compiler.$(OBJEXT): $(top_srcdir)/internal/imemo.h +mjit_compiler.$(OBJEXT): $(top_srcdir)/internal/object.h +mjit_compiler.$(OBJEXT): $(top_srcdir)/internal/serial.h +mjit_compiler.$(OBJEXT): $(top_srcdir)/internal/static_assert.h +mjit_compiler.$(OBJEXT): $(top_srcdir)/internal/variable.h +mjit_compiler.$(OBJEXT): $(top_srcdir)/internal/vm.h +mjit_compiler.$(OBJEXT): $(top_srcdir)/internal/warnings.h +mjit_compiler.$(OBJEXT): {$(VPATH)}assert.h +mjit_compiler.$(OBJEXT): {$(VPATH)}atomic.h +mjit_compiler.$(OBJEXT): {$(VPATH)}backward/2/assume.h +mjit_compiler.$(OBJEXT): {$(VPATH)}backward/2/attributes.h +mjit_compiler.$(OBJEXT): {$(VPATH)}backward/2/bool.h +mjit_compiler.$(OBJEXT): {$(VPATH)}backward/2/gcc_version_since.h +mjit_compiler.$(OBJEXT): {$(VPATH)}backward/2/inttypes.h +mjit_compiler.$(OBJEXT): {$(VPATH)}backward/2/limits.h +mjit_compiler.$(OBJEXT): {$(VPATH)}backward/2/long_long.h +mjit_compiler.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h +mjit_compiler.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h +mjit_compiler.$(OBJEXT): {$(VPATH)}builtin.h +mjit_compiler.$(OBJEXT): {$(VPATH)}config.h +mjit_compiler.$(OBJEXT): {$(VPATH)}constant.h +mjit_compiler.$(OBJEXT): {$(VPATH)}debug_counter.h +mjit_compiler.$(OBJEXT): {$(VPATH)}defines.h +mjit_compiler.$(OBJEXT): {$(VPATH)}id.h +mjit_compiler.$(OBJEXT): {$(VPATH)}id_table.h +mjit_compiler.$(OBJEXT): {$(VPATH)}insns.def +mjit_compiler.$(OBJEXT): {$(VPATH)}insns.inc +mjit_compiler.$(OBJEXT): {$(VPATH)}insns_info.inc +mjit_compiler.$(OBJEXT): {$(VPATH)}intern.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/abi.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/anyargs.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/arithmetic.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/arithmetic/char.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/arithmetic/double.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/arithmetic/fixnum.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/arithmetic/gid_t.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/arithmetic/int.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/arithmetic/intptr_t.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/arithmetic/long.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/arithmetic/long_long.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/arithmetic/mode_t.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/arithmetic/off_t.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/arithmetic/pid_t.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/arithmetic/short.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/arithmetic/size_t.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/arithmetic/st_data_t.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/arithmetic/uid_t.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/assume.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/attr/alloc_size.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/attr/artificial.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/attr/cold.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/attr/const.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/attr/constexpr.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/attr/deprecated.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/attr/diagnose_if.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/attr/enum_extensibility.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/attr/error.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/attr/flag_enum.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/attr/forceinline.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/attr/format.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/attr/maybe_unused.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/attr/noalias.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/attr/nodiscard.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/attr/noexcept.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/attr/noinline.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/attr/nonnull.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/attr/noreturn.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/attr/pure.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/attr/restrict.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/attr/returns_nonnull.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/attr/warning.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/attr/weakref.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/cast.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/compiler_is.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/compiler_is/apple.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/compiler_is/clang.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/compiler_is/gcc.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/compiler_is/intel.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/compiler_is/msvc.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/compiler_is/sunpro.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/compiler_since.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/config.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/constant_p.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/core.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/core/rarray.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/core/rbasic.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/core/rbignum.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/core/rclass.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/core/rdata.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/core/rfile.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/core/rhash.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/core/robject.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/core/rregexp.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/core/rstring.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/core/rstruct.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/core/rtypeddata.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/ctype.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/dllexport.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/dosish.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/error.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/eval.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/event.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/fl_type.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/gc.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/glob.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/globals.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/has/attribute.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/has/builtin.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/has/c_attribute.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/has/cpp_attribute.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/has/declspec_attribute.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/has/extension.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/has/feature.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/has/warning.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/array.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/bignum.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/class.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/compar.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/complex.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/cont.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/dir.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/enum.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/enumerator.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/error.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/eval.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/file.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/gc.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/hash.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/io.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/load.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/marshal.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/numeric.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/object.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/parse.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/proc.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/process.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/random.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/range.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/rational.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/re.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/ruby.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/select.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/select/largesize.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/signal.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/sprintf.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/string.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/struct.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/thread.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/time.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/variable.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/intern/vm.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/interpreter.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/iterator.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/memory.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/method.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/module.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/newobj.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/rgengc.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/scan_args.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/special_consts.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/static_assert.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/stdalign.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/stdbool.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/symbol.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/value.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/value_type.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/variable.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/warning_push.h +mjit_compiler.$(OBJEXT): {$(VPATH)}internal/xmalloc.h +mjit_compiler.$(OBJEXT): {$(VPATH)}iseq.h +mjit_compiler.$(OBJEXT): {$(VPATH)}method.h +mjit_compiler.$(OBJEXT): {$(VPATH)}missing.h +mjit_compiler.$(OBJEXT): {$(VPATH)}mjit.h +mjit_compiler.$(OBJEXT): {$(VPATH)}mjit_compile.inc +mjit_compiler.$(OBJEXT): {$(VPATH)}mjit_compiler.c +mjit_compiler.$(OBJEXT): {$(VPATH)}mjit_unit.h +mjit_compiler.$(OBJEXT): {$(VPATH)}node.h +mjit_compiler.$(OBJEXT): {$(VPATH)}ruby_assert.h +mjit_compiler.$(OBJEXT): {$(VPATH)}ruby_atomic.h +mjit_compiler.$(OBJEXT): {$(VPATH)}st.h +mjit_compiler.$(OBJEXT): {$(VPATH)}subst.h +mjit_compiler.$(OBJEXT): {$(VPATH)}thread_$(THREAD_MODEL).h +mjit_compiler.$(OBJEXT): {$(VPATH)}thread_native.h +mjit_compiler.$(OBJEXT): {$(VPATH)}vm_callinfo.h +mjit_compiler.$(OBJEXT): {$(VPATH)}vm_core.h +mjit_compiler.$(OBJEXT): {$(VPATH)}vm_exec.h +mjit_compiler.$(OBJEXT): {$(VPATH)}vm_insnhelper.h +mjit_compiler.$(OBJEXT): {$(VPATH)}vm_opts.h +mjit_compiler.$(OBJEXT): {$(VPATH)}yjit.h node.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h node.$(OBJEXT): $(CCAN_DIR)/container_of/container_of.h node.$(OBJEXT): $(CCAN_DIR)/list/list.h diff --git a/mjit_compile.c b/mjit_compile.c deleted file mode 100644 index e85eaaa6cb..0000000000 --- a/mjit_compile.c +++ /dev/null @@ -1,596 +0,0 @@ -/********************************************************************** - - mjit_compile.c - MRI method JIT compiler - - Copyright (C) 2017 Takashi Kokubun . - -**********************************************************************/ - -// NOTE: All functions in this file are executed on MJIT worker. So don't -// call Ruby methods (C functions that may call rb_funcall) or trigger -// GC (using ZALLOC, xmalloc, xfree, etc.) in this file. - -#include "ruby/internal/config.h" // defines USE_MJIT - -#if USE_MJIT - -#include "internal.h" -#include "internal/compile.h" -#include "internal/hash.h" -#include "internal/object.h" -#include "internal/variable.h" -#include "mjit.h" -#include "mjit_unit.h" -#include "yjit.h" -#include "vm_core.h" -#include "vm_callinfo.h" -#include "vm_exec.h" -#include "vm_insnhelper.h" - -#include "builtin.h" -#include "insns.inc" -#include "insns_info.inc" - -// Macros to check if a position is already compiled using compile_status.stack_size_for_pos -#define NOT_COMPILED_STACK_SIZE -1 -#define ALREADY_COMPILED_P(status, pos) (status->stack_size_for_pos[pos] != NOT_COMPILED_STACK_SIZE) - -// For propagating information needed for lazily pushing a frame. -struct inlined_call_context { - int orig_argc; // ci->orig_argc - VALUE me; // vm_cc_cme(cc) - int param_size; // def_iseq_ptr(vm_cc_cme(cc)->def)->body->param.size - int local_size; // def_iseq_ptr(vm_cc_cme(cc)->def)->body->local_table_size -}; - -// Storage to keep compiler's status. This should have information -// which is global during one `mjit_compile` call. Ones conditional -// in each branch should be stored in `compile_branch`. -struct compile_status { - bool success; // has true if compilation has had no issue - int *stack_size_for_pos; // stack_size_for_pos[pos] has stack size for the position (otherwise -1) - // If true, JIT-ed code will use local variables to store pushed values instead of - // using VM's stack and moving stack pointer. - bool local_stack_p; - // Safely-accessible ivar cache entries copied from main thread. - union iseq_inline_storage_entry *is_entries; - // Index of call cache entries captured to compiled_iseq to be marked on GC - int cc_entries_index; - // A pointer to root (i.e. not inlined) iseq being compiled. - const struct rb_iseq_constant_body *compiled_iseq; - int compiled_id; // Just a copy of compiled_iseq->jit_unit->id - // Mutated optimization levels - struct rb_mjit_compile_info *compile_info; - bool merge_ivar_guards_p; // If true, merge guards of ivar accesses - rb_serial_t ivar_serial; // ic_serial of IVC in is_entries (used only when merge_ivar_guards_p) - size_t max_ivar_index; // Max IVC index in is_entries (used only when merge_ivar_guards_p) - // If `inlined_iseqs[pos]` is not NULL, `mjit_compile_body` tries to inline ISeq there. - const struct rb_iseq_constant_body **inlined_iseqs; - struct inlined_call_context inline_context; -}; - -// Storage to keep data which is consistent in each conditional branch. -// This is created and used for one `compile_insns` call and its values -// should be copied for extra `compile_insns` call. -struct compile_branch { - unsigned int stack_size; // this simulates sp (stack pointer) of YARV - bool finish_p; // if true, compilation in this branch should stop and let another branch to be compiled -}; - -struct case_dispatch_var { - FILE *f; - unsigned int base_pos; - VALUE last_value; -}; - -static size_t -call_data_index(CALL_DATA cd, const struct rb_iseq_constant_body *body) -{ - return cd - body->call_data; -} - -// Using this function to refer to cc_entries allocated by `mjit_capture_cc_entries` -// instead of storing cc_entries in status directly so that we always refer to a new address -// returned by `realloc` inside it. -static const struct rb_callcache ** -captured_cc_entries(const struct compile_status *status) -{ - VM_ASSERT(status->cc_entries_index != -1); - return status->compiled_iseq->jit_unit->cc_entries + status->cc_entries_index; -} - -// Returns true if call cache is still not obsoleted and vm_cc_cme(cc)->def->type is available. -static bool -has_valid_method_type(CALL_CACHE cc) -{ - return vm_cc_cme(cc) != NULL; -} - -// Returns true if MJIT thinks this cc's opt_* insn may fallback to opt_send_without_block. -static bool -has_cache_for_send(CALL_CACHE cc, int insn) -{ - extern bool rb_vm_opt_cfunc_p(CALL_CACHE cc, int insn); - return has_valid_method_type(cc) && - !(vm_cc_cme(cc)->def->type == VM_METHOD_TYPE_CFUNC && rb_vm_opt_cfunc_p(cc, insn)); -} - -// Returns true if iseq can use fastpath for setup, otherwise NULL. This becomes true in the same condition -// as CC_SET_FASTPATH (in vm_callee_setup_arg) is called from vm_call_iseq_setup. -static bool -fastpath_applied_iseq_p(const CALL_INFO ci, const CALL_CACHE cc, const rb_iseq_t *iseq) -{ - extern bool rb_simple_iseq_p(const rb_iseq_t *iseq); - return iseq != NULL - && !(vm_ci_flag(ci) & VM_CALL_KW_SPLAT) && rb_simple_iseq_p(iseq) // Top of vm_callee_setup_arg. In this case, opt_pc is 0. - && vm_ci_argc(ci) == (unsigned int)ISEQ_BODY(iseq)->param.lead_num // exclude argument_arity_error (assumption: `calling->argc == ci->orig_argc` in send insns) - && vm_call_iseq_optimizable_p(ci, cc); // CC_SET_FASTPATH condition -} - -// Return true if an object of the klass may be a special const. See: rb_class_of -static bool -maybe_special_const_class_p(const VALUE klass) -{ - return klass == rb_cFalseClass - || klass == rb_cNilClass - || klass == rb_cTrueClass - || klass == rb_cInteger - || klass == rb_cSymbol - || klass == rb_cFloat; -} - -static int -compile_case_dispatch_each(VALUE key, VALUE value, VALUE arg) -{ - struct case_dispatch_var *var = (struct case_dispatch_var *)arg; - unsigned int offset; - - if (var->last_value != value) { - offset = FIX2INT(value); - var->last_value = value; - fprintf(var->f, " case %d:\n", offset); - fprintf(var->f, " goto label_%d;\n", var->base_pos + offset); - fprintf(var->f, " break;\n"); - } - return ST_CONTINUE; -} - -// Calling rb_id2str in MJIT worker causes random SEGV. So this is disabled by default. -static void -comment_id(FILE *f, ID id) -{ -#ifdef MJIT_COMMENT_ID - VALUE name = rb_id2str(id); - const char *p, *e; - char c, prev = '\0'; - - if (!name) return; - p = RSTRING_PTR(name); - e = RSTRING_END(name); - fputs("/* :\"", f); - for (; p < e; ++p) { - switch (c = *p) { - case '*': case '/': if (prev != (c ^ ('/' ^ '*'))) break; - case '\\': case '"': fputc('\\', f); - } - fputc(c, f); - prev = c; - } - fputs("\" */", f); -#endif -} - -static void compile_insns(FILE *f, const struct rb_iseq_constant_body *body, unsigned int stack_size, - unsigned int pos, struct compile_status *status); - -// Main function of JIT compilation, vm_exec_core counterpart for JIT. Compile one insn to `f`, may modify -// b->stack_size and return next position. -// -// When you add a new instruction to insns.def, it would be nice to have JIT compilation support here but -// it's optional. This JIT compiler just ignores ISeq which includes unknown instruction, and ISeq which -// does not have it can be compiled as usual. -static unsigned int -compile_insn(FILE *f, const struct rb_iseq_constant_body *body, const int insn, const VALUE *operands, - const unsigned int pos, struct compile_status *status, struct compile_branch *b) -{ - unsigned int next_pos = pos + insn_len(insn); - -/*****************/ - #include "mjit_compile.inc" -/*****************/ - - // If next_pos is already compiled and this branch is not finished yet, - // next instruction won't be compiled in C code next and will need `goto`. - if (!b->finish_p && next_pos < body->iseq_size && ALREADY_COMPILED_P(status, next_pos)) { - fprintf(f, "goto label_%d;\n", next_pos); - - // Verify stack size assumption is the same among multiple branches - if ((unsigned int)status->stack_size_for_pos[next_pos] != b->stack_size) { - if (mjit_opts.warnings || mjit_opts.verbose) - fprintf(stderr, "MJIT warning: JIT stack assumption is not the same between branches (%d != %u)\n", - status->stack_size_for_pos[next_pos], b->stack_size); - status->success = false; - } - } - - return next_pos; -} - -// Compile one conditional branch. If it has branchXXX insn, this should be -// called multiple times for each branch. -static void -compile_insns(FILE *f, const struct rb_iseq_constant_body *body, unsigned int stack_size, - unsigned int pos, struct compile_status *status) -{ - struct compile_branch branch; - - branch.stack_size = stack_size; - branch.finish_p = false; - - while (pos < body->iseq_size && !ALREADY_COMPILED_P(status, pos) && !branch.finish_p) { - int insn = rb_vm_insn_decode(body->iseq_encoded[pos]); - status->stack_size_for_pos[pos] = (int)branch.stack_size; - - fprintf(f, "\nlabel_%d: /* %s */\n", pos, insn_name(insn)); - pos = compile_insn(f, body, insn, body->iseq_encoded + (pos+1), pos, status, &branch); - if (status->success && branch.stack_size > body->stack_max) { - if (mjit_opts.warnings || mjit_opts.verbose) - fprintf(stderr, "MJIT warning: JIT stack size (%d) exceeded its max size (%d)\n", branch.stack_size, body->stack_max); - status->success = false; - } - if (!status->success) - break; - } -} - -// Print the block to cancel inlined method call. It's supporting only `opt_send_without_block` for now. -static void -compile_inlined_cancel_handler(FILE *f, const struct rb_iseq_constant_body *body, struct inlined_call_context *inline_context) -{ - fprintf(f, "\ncancel:\n"); - fprintf(f, " RB_DEBUG_COUNTER_INC(mjit_cancel);\n"); - fprintf(f, " rb_mjit_recompile_inlining(original_iseq);\n"); - - // Swap pc/sp set on cancel with original pc/sp. - fprintf(f, " const VALUE *current_pc = reg_cfp->pc;\n"); - fprintf(f, " VALUE *current_sp = reg_cfp->sp;\n"); - fprintf(f, " reg_cfp->pc = orig_pc;\n"); - fprintf(f, " reg_cfp->sp = orig_sp;\n\n"); - - // Lazily push the current call frame. - fprintf(f, " struct rb_calling_info calling;\n"); - fprintf(f, " calling.block_handler = VM_BLOCK_HANDLER_NONE;\n"); // assumes `opt_send_without_block` - fprintf(f, " calling.argc = %d;\n", inline_context->orig_argc); - fprintf(f, " calling.recv = reg_cfp->self;\n"); - fprintf(f, " reg_cfp->self = orig_self;\n"); - fprintf(f, " vm_call_iseq_setup_normal(ec, reg_cfp, &calling, (const rb_callable_method_entry_t *)0x%"PRIxVALUE", 0, %d, %d);\n\n", - inline_context->me, inline_context->param_size, inline_context->local_size); // fastpath_applied_iseq_p checks rb_simple_iseq_p, which ensures has_opt == FALSE - - // Start usual cancel from here. - fprintf(f, " reg_cfp = ec->cfp;\n"); // work on the new frame - fprintf(f, " reg_cfp->pc = current_pc;\n"); - fprintf(f, " reg_cfp->sp = current_sp;\n"); - for (unsigned int i = 0; i < body->stack_max; i++) { // should be always `status->local_stack_p` - fprintf(f, " *(vm_base_ptr(reg_cfp) + %d) = stack[%d];\n", i, i); - } - // We're not just returning Qundef here so that caller's normal cancel handler can - // push back `stack` to `cfp->sp`. - fprintf(f, " return vm_exec(ec, false);\n"); -} - -// Print the block to cancel JIT execution. -static void -compile_cancel_handler(FILE *f, const struct rb_iseq_constant_body *body, struct compile_status *status) -{ - if (status->inlined_iseqs == NULL) { // the current ISeq is being inlined - compile_inlined_cancel_handler(f, body, &status->inline_context); - return; - } - - fprintf(f, "\nsend_cancel:\n"); - fprintf(f, " RB_DEBUG_COUNTER_INC(mjit_cancel_send_inline);\n"); - fprintf(f, " rb_mjit_recompile_send(original_iseq);\n"); - fprintf(f, " goto cancel;\n"); - - fprintf(f, "\nivar_cancel:\n"); - fprintf(f, " RB_DEBUG_COUNTER_INC(mjit_cancel_ivar_inline);\n"); - fprintf(f, " rb_mjit_recompile_ivar(original_iseq);\n"); - fprintf(f, " goto cancel;\n"); - - fprintf(f, "\nexivar_cancel:\n"); - fprintf(f, " RB_DEBUG_COUNTER_INC(mjit_cancel_exivar_inline);\n"); - fprintf(f, " rb_mjit_recompile_exivar(original_iseq);\n"); - fprintf(f, " goto cancel;\n"); - - fprintf(f, "\nconst_cancel:\n"); - fprintf(f, " rb_mjit_recompile_const(original_iseq);\n"); - fprintf(f, " goto cancel;\n"); - - fprintf(f, "\ncancel:\n"); - fprintf(f, " RB_DEBUG_COUNTER_INC(mjit_cancel);\n"); - if (status->local_stack_p) { - for (unsigned int i = 0; i < body->stack_max; i++) { - fprintf(f, " *(vm_base_ptr(reg_cfp) + %d) = stack[%d];\n", i, i); - } - } - fprintf(f, " return Qundef;\n"); -} - -extern int -mjit_capture_cc_entries(const struct rb_iseq_constant_body *compiled_iseq, const struct rb_iseq_constant_body *captured_iseq); - -// Copy current is_entries and use it throughout the current compilation consistently. -// While ic->entry has been immutable since https://github.com/ruby/ruby/pull/3662, -// we still need this to avoid a race condition between entries and ivar_serial/max_ivar_index. -static void -mjit_capture_is_entries(const struct rb_iseq_constant_body *body, union iseq_inline_storage_entry *is_entries) -{ - if (is_entries == NULL) - return; - memcpy(is_entries, body->is_entries, sizeof(union iseq_inline_storage_entry) * ISEQ_IS_SIZE(body)); -} - -static bool -mjit_compile_body(FILE *f, const rb_iseq_t *iseq, struct compile_status *status) -{ - const struct rb_iseq_constant_body *body = ISEQ_BODY(iseq); - status->success = true; - status->local_stack_p = !body->catch_except_p; - - if (status->local_stack_p) { - fprintf(f, " VALUE stack[%d];\n", body->stack_max); - } - else { - fprintf(f, " VALUE *stack = reg_cfp->sp;\n"); - } - if (status->inlined_iseqs != NULL) // i.e. compile root - fprintf(f, " static const rb_iseq_t *original_iseq = (const rb_iseq_t *)0x%"PRIxVALUE";\n", (VALUE)iseq); - fprintf(f, " static const VALUE *const original_body_iseq = (VALUE *)0x%"PRIxVALUE";\n", - (VALUE)body->iseq_encoded); - fprintf(f, " VALUE cfp_self = reg_cfp->self;\n"); // cache self across the method - fprintf(f, "#undef GET_SELF\n"); - fprintf(f, "#define GET_SELF() cfp_self\n"); - - // Generate merged ivar guards first if needed - if (!status->compile_info->disable_ivar_cache && status->merge_ivar_guards_p) { - fprintf(f, " if (UNLIKELY(!(RB_TYPE_P(GET_SELF(), T_OBJECT) && (rb_serial_t)%"PRI_SERIALT_PREFIX"u == RCLASS_SERIAL(RBASIC(GET_SELF())->klass) &&", status->ivar_serial); -#if USE_RVARGC - fprintf(f, "%"PRIuSIZE" < ROBJECT_NUMIV(GET_SELF())", status->max_ivar_index); // index < ROBJECT_NUMIV(obj) -#else - if (status->max_ivar_index >= ROBJECT_EMBED_LEN_MAX) { - fprintf(f, "%"PRIuSIZE" < ROBJECT_NUMIV(GET_SELF())", status->max_ivar_index); // index < ROBJECT_NUMIV(obj) && !RB_FL_ANY_RAW(obj, ROBJECT_EMBED) - } - else { - fprintf(f, "ROBJECT_EMBED_LEN_MAX == ROBJECT_NUMIV(GET_SELF())"); // index < ROBJECT_NUMIV(obj) && RB_FL_ANY_RAW(obj, ROBJECT_EMBED) - } -#endif - fprintf(f, "))) {\n"); - fprintf(f, " goto ivar_cancel;\n"); - fprintf(f, " }\n"); - } - - // Simulate `opt_pc` in setup_parameters_complex. Other PCs which may be passed by catch tables - // are not considered since vm_exec doesn't call jit_exec for catch tables. - if (body->param.flags.has_opt) { - int i; - fprintf(f, "\n"); - fprintf(f, " switch (reg_cfp->pc - ISEQ_BODY(reg_cfp->iseq)->iseq_encoded) {\n"); - for (i = 0; i <= body->param.opt_num; i++) { - VALUE pc_offset = body->param.opt_table[i]; - fprintf(f, " case %"PRIdVALUE":\n", pc_offset); - fprintf(f, " goto label_%"PRIdVALUE";\n", pc_offset); - } - fprintf(f, " }\n"); - } - - compile_insns(f, body, 0, 0, status); - compile_cancel_handler(f, body, status); - fprintf(f, "#undef GET_SELF"); - return status->success; -} - -// Return true if the ISeq can be inlined without pushing a new control frame. -static bool -inlinable_iseq_p(const struct rb_iseq_constant_body *body) -{ - // 1) If catch_except_p, caller frame should be preserved when callee catches an exception. - // Then we need to wrap `vm_exec()` but then we can't inline the call inside it. - // - // 2) If `body->catch_except_p` is false and `handles_sp?` of an insn is false, - // sp is not moved as we assume `status->local_stack_p = !body->catch_except_p`. - // - // 3) If `body->catch_except_p` is false and `always_leaf?` of an insn is true, - // pc is not moved. - if (body->catch_except_p) - return false; - - unsigned int pos = 0; - while (pos < body->iseq_size) { - int insn = rb_vm_insn_decode(body->iseq_encoded[pos]); - // All insns in the ISeq except `leave` (to be overridden in the inlined code) - // should meet following strong assumptions: - // * Do not require `cfp->sp` motion - // * Do not move `cfp->pc` - // * Do not read any `cfp->pc` - if (insn == BIN(invokebuiltin) || insn == BIN(opt_invokebuiltin_delegate) || insn == BIN(opt_invokebuiltin_delegate_leave)) { - // builtin insn's inlinability is handled by `Primitive.attr! 'inline'` per iseq - if (!body->builtin_inline_p) - return false; - } - else if (insn != BIN(leave) && insn_may_depend_on_sp_or_pc(insn, body->iseq_encoded + (pos + 1))) - return false; - // At this moment, `cfp->ep` in an inlined method is not working. - switch (insn) { - case BIN(getlocal): - case BIN(getlocal_WC_0): - case BIN(getlocal_WC_1): - case BIN(setlocal): - case BIN(setlocal_WC_0): - case BIN(setlocal_WC_1): - case BIN(getblockparam): - case BIN(getblockparamproxy): - case BIN(setblockparam): - return false; - } - pos += insn_len(insn); - } - return true; -} - -// Return an iseq pointer if cc has inlinable iseq. -const rb_iseq_t * -rb_mjit_inlinable_iseq(const struct rb_callinfo *ci, const struct rb_callcache *cc) -{ - const rb_iseq_t *iseq; - if (has_valid_method_type(cc) && - !(vm_ci_flag(ci) & VM_CALL_TAILCALL) && // inlining only non-tailcall path - vm_cc_cme(cc)->def->type == VM_METHOD_TYPE_ISEQ && - fastpath_applied_iseq_p(ci, cc, iseq = def_iseq_ptr(vm_cc_cme(cc)->def)) && - // CC_SET_FASTPATH in vm_callee_setup_arg - inlinable_iseq_p(ISEQ_BODY(iseq))) { - return iseq; - } - return NULL; -} - -static void -init_ivar_compile_status(const struct rb_iseq_constant_body *body, struct compile_status *status) -{ - mjit_capture_is_entries(body, status->is_entries); - - int num_ivars = 0; - unsigned int pos = 0; - status->max_ivar_index = 0; - status->ivar_serial = 0; - - while (pos < body->iseq_size) { - int insn = rb_vm_insn_decode(body->iseq_encoded[pos]); - if (insn == BIN(getinstancevariable) || insn == BIN(setinstancevariable)) { - IVC ic = (IVC)body->iseq_encoded[pos+2]; - IVC ic_copy = &(status->is_entries + ((union iseq_inline_storage_entry *)ic - body->is_entries))->iv_cache; - if (ic_copy->entry) { // Only initialized (ic_serial > 0) IVCs are optimized - num_ivars++; - - if (status->max_ivar_index < ic_copy->entry->index) { - status->max_ivar_index = ic_copy->entry->index; - } - - if (status->ivar_serial == 0) { - status->ivar_serial = ic_copy->entry->class_serial; - } - else if (status->ivar_serial != ic_copy->entry->class_serial) { - // Multiple classes have used this ISeq. Give up assuming one serial. - status->merge_ivar_guards_p = false; - return; - } - } - } - pos += insn_len(insn); - } - status->merge_ivar_guards_p = status->ivar_serial > 0 && num_ivars >= 2; -} - -// This needs to be macro instead of a function because it's using `alloca`. -#define INIT_COMPILE_STATUS(status, body, compile_root_p) do { \ - status = (struct compile_status){ \ - .stack_size_for_pos = (int *)alloca(sizeof(int) * body->iseq_size), \ - .inlined_iseqs = compile_root_p ? \ - alloca(sizeof(const struct rb_iseq_constant_body *) * body->iseq_size) : NULL, \ - .is_entries = (ISEQ_IS_SIZE(body) > 0) ? \ - alloca(sizeof(union iseq_inline_storage_entry) * ISEQ_IS_SIZE(body)) : NULL, \ - .cc_entries_index = (body->ci_size > 0) ? \ - mjit_capture_cc_entries(status.compiled_iseq, body) : -1, \ - .compiled_id = status.compiled_id, \ - .compiled_iseq = status.compiled_iseq, \ - .compile_info = compile_root_p ? \ - rb_mjit_iseq_compile_info(body) : alloca(sizeof(struct rb_mjit_compile_info)) \ - }; \ - memset(status.stack_size_for_pos, NOT_COMPILED_STACK_SIZE, sizeof(int) * body->iseq_size); \ - if (compile_root_p) \ - memset((void *)status.inlined_iseqs, 0, sizeof(const struct rb_iseq_constant_body *) * body->iseq_size); \ - else \ - memset(status.compile_info, 0, sizeof(struct rb_mjit_compile_info)); \ -} while (0) - -static bool -precompile_inlinable_child_iseq(FILE *f, const rb_iseq_t *child_iseq, struct compile_status *status, - const struct rb_callinfo *ci, const struct rb_callcache *cc, unsigned int pos) -{ - struct compile_status child_status = { .compiled_iseq = status->compiled_iseq, .compiled_id = status->compiled_id }; - INIT_COMPILE_STATUS(child_status, ISEQ_BODY(child_iseq), false); - child_status.inline_context = (struct inlined_call_context){ - .orig_argc = vm_ci_argc(ci), - .me = (VALUE)vm_cc_cme(cc), - .param_size = ISEQ_BODY(child_iseq)->param.size, - .local_size = ISEQ_BODY(child_iseq)->local_table_size - }; - if (ISEQ_BODY(child_iseq)->ci_size > 0 && child_status.cc_entries_index == -1) { - return false; - } - init_ivar_compile_status(ISEQ_BODY(child_iseq), &child_status); - - fprintf(f, "ALWAYS_INLINE(static VALUE _mjit%d_inlined_%d(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE orig_self, const rb_iseq_t *original_iseq));\n", status->compiled_id, pos); - fprintf(f, "static inline VALUE\n_mjit%d_inlined_%d(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE orig_self, const rb_iseq_t *original_iseq)\n{\n", status->compiled_id, pos); - fprintf(f, " const VALUE *orig_pc = reg_cfp->pc;\n"); - fprintf(f, " VALUE *orig_sp = reg_cfp->sp;\n"); - bool success = mjit_compile_body(f, child_iseq, &child_status); - fprintf(f, "\n} /* end of _mjit%d_inlined_%d */\n\n", status->compiled_id, pos); - - return success; -} - -// Compile inlinable ISeqs to C code in `f`. It returns true if it succeeds to compile them. -static bool -precompile_inlinable_iseqs(FILE *f, const rb_iseq_t *iseq, struct compile_status *status) -{ - const struct rb_iseq_constant_body *body = ISEQ_BODY(iseq); - unsigned int pos = 0; - while (pos < body->iseq_size) { - int insn = rb_vm_insn_decode(body->iseq_encoded[pos]); - if (insn == BIN(opt_send_without_block) || insn == BIN(opt_size)) { // `compile_inlined_cancel_handler` supports only `opt_send_without_block` - CALL_DATA cd = (CALL_DATA)body->iseq_encoded[pos + 1]; - const struct rb_callinfo *ci = cd->ci; - const struct rb_callcache *cc = captured_cc_entries(status)[call_data_index(cd, body)]; // use copy to avoid race condition - - const rb_iseq_t *child_iseq; - if ((child_iseq = rb_mjit_inlinable_iseq(ci, cc)) != NULL) { - status->inlined_iseqs[pos] = ISEQ_BODY(child_iseq); - - if (mjit_opts.verbose >= 1) // print beforehand because ISeq may be GCed during copy job. - fprintf(stderr, "JIT inline: %s@%s:%d => %s@%s:%d\n", - RSTRING_PTR(ISEQ_BODY(child_iseq)->location.label), - RSTRING_PTR(rb_iseq_path(child_iseq)), FIX2INT(ISEQ_BODY(child_iseq)->location.first_lineno), - RSTRING_PTR(ISEQ_BODY(iseq)->location.label), - RSTRING_PTR(rb_iseq_path(iseq)), FIX2INT(ISEQ_BODY(iseq)->location.first_lineno)); - if (!precompile_inlinable_child_iseq(f, child_iseq, status, ci, cc, pos)) - return false; - } - } - pos += insn_len(insn); - } - return true; -} - -// Compile ISeq to C code in `f`. It returns true if it succeeds to compile. -bool -mjit_compile(FILE *f, const rb_iseq_t *iseq, const char *funcname, int id) -{ - struct compile_status status = { .compiled_iseq = ISEQ_BODY(iseq), .compiled_id = id }; - INIT_COMPILE_STATUS(status, ISEQ_BODY(iseq), true); - if (ISEQ_BODY(iseq)->ci_size > 0 && status.cc_entries_index == -1) { - return false; - } - init_ivar_compile_status(ISEQ_BODY(iseq), &status); - - if (!status.compile_info->disable_send_cache && !status.compile_info->disable_inlining) { - if (!precompile_inlinable_iseqs(f, iseq, &status)) - return false; - } - - fprintf(f, "VALUE\n%s(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)\n{\n", funcname); - bool success = mjit_compile_body(f, iseq, &status); - fprintf(f, "\n} // end of %s\n", funcname); - return success; -} - -#endif // USE_MJIT diff --git a/mjit_compiler.c b/mjit_compiler.c new file mode 100644 index 0000000000..c02397e53c --- /dev/null +++ b/mjit_compiler.c @@ -0,0 +1,596 @@ +/********************************************************************** + + mjit_compiler.c - MRI method JIT compiler + + Copyright (C) 2017 Takashi Kokubun . + +**********************************************************************/ + +// NOTE: All functions in this file are executed on MJIT worker. So don't +// call Ruby methods (C functions that may call rb_funcall) or trigger +// GC (using ZALLOC, xmalloc, xfree, etc.) in this file. + +#include "ruby/internal/config.h" // defines USE_MJIT + +#if USE_MJIT + +#include "internal.h" +#include "internal/compile.h" +#include "internal/hash.h" +#include "internal/object.h" +#include "internal/variable.h" +#include "mjit.h" +#include "mjit_unit.h" +#include "yjit.h" +#include "vm_core.h" +#include "vm_callinfo.h" +#include "vm_exec.h" +#include "vm_insnhelper.h" + +#include "builtin.h" +#include "insns.inc" +#include "insns_info.inc" + +// Macros to check if a position is already compiled using compile_status.stack_size_for_pos +#define NOT_COMPILED_STACK_SIZE -1 +#define ALREADY_COMPILED_P(status, pos) (status->stack_size_for_pos[pos] != NOT_COMPILED_STACK_SIZE) + +// For propagating information needed for lazily pushing a frame. +struct inlined_call_context { + int orig_argc; // ci->orig_argc + VALUE me; // vm_cc_cme(cc) + int param_size; // def_iseq_ptr(vm_cc_cme(cc)->def)->body->param.size + int local_size; // def_iseq_ptr(vm_cc_cme(cc)->def)->body->local_table_size +}; + +// Storage to keep compiler's status. This should have information +// which is global during one `mjit_compile` call. Ones conditional +// in each branch should be stored in `compile_branch`. +struct compile_status { + bool success; // has true if compilation has had no issue + int *stack_size_for_pos; // stack_size_for_pos[pos] has stack size for the position (otherwise -1) + // If true, JIT-ed code will use local variables to store pushed values instead of + // using VM's stack and moving stack pointer. + bool local_stack_p; + // Safely-accessible ivar cache entries copied from main thread. + union iseq_inline_storage_entry *is_entries; + // Index of call cache entries captured to compiled_iseq to be marked on GC + int cc_entries_index; + // A pointer to root (i.e. not inlined) iseq being compiled. + const struct rb_iseq_constant_body *compiled_iseq; + int compiled_id; // Just a copy of compiled_iseq->jit_unit->id + // Mutated optimization levels + struct rb_mjit_compile_info *compile_info; + bool merge_ivar_guards_p; // If true, merge guards of ivar accesses + rb_serial_t ivar_serial; // ic_serial of IVC in is_entries (used only when merge_ivar_guards_p) + size_t max_ivar_index; // Max IVC index in is_entries (used only when merge_ivar_guards_p) + // If `inlined_iseqs[pos]` is not NULL, `mjit_compile_body` tries to inline ISeq there. + const struct rb_iseq_constant_body **inlined_iseqs; + struct inlined_call_context inline_context; +}; + +// Storage to keep data which is consistent in each conditional branch. +// This is created and used for one `compile_insns` call and its values +// should be copied for extra `compile_insns` call. +struct compile_branch { + unsigned int stack_size; // this simulates sp (stack pointer) of YARV + bool finish_p; // if true, compilation in this branch should stop and let another branch to be compiled +}; + +struct case_dispatch_var { + FILE *f; + unsigned int base_pos; + VALUE last_value; +}; + +static size_t +call_data_index(CALL_DATA cd, const struct rb_iseq_constant_body *body) +{ + return cd - body->call_data; +} + +// Using this function to refer to cc_entries allocated by `mjit_capture_cc_entries` +// instead of storing cc_entries in status directly so that we always refer to a new address +// returned by `realloc` inside it. +static const struct rb_callcache ** +captured_cc_entries(const struct compile_status *status) +{ + VM_ASSERT(status->cc_entries_index != -1); + return status->compiled_iseq->jit_unit->cc_entries + status->cc_entries_index; +} + +// Returns true if call cache is still not obsoleted and vm_cc_cme(cc)->def->type is available. +static bool +has_valid_method_type(CALL_CACHE cc) +{ + return vm_cc_cme(cc) != NULL; +} + +// Returns true if MJIT thinks this cc's opt_* insn may fallback to opt_send_without_block. +static bool +has_cache_for_send(CALL_CACHE cc, int insn) +{ + extern bool rb_vm_opt_cfunc_p(CALL_CACHE cc, int insn); + return has_valid_method_type(cc) && + !(vm_cc_cme(cc)->def->type == VM_METHOD_TYPE_CFUNC && rb_vm_opt_cfunc_p(cc, insn)); +} + +// Returns true if iseq can use fastpath for setup, otherwise NULL. This becomes true in the same condition +// as CC_SET_FASTPATH (in vm_callee_setup_arg) is called from vm_call_iseq_setup. +static bool +fastpath_applied_iseq_p(const CALL_INFO ci, const CALL_CACHE cc, const rb_iseq_t *iseq) +{ + extern bool rb_simple_iseq_p(const rb_iseq_t *iseq); + return iseq != NULL + && !(vm_ci_flag(ci) & VM_CALL_KW_SPLAT) && rb_simple_iseq_p(iseq) // Top of vm_callee_setup_arg. In this case, opt_pc is 0. + && vm_ci_argc(ci) == (unsigned int)ISEQ_BODY(iseq)->param.lead_num // exclude argument_arity_error (assumption: `calling->argc == ci->orig_argc` in send insns) + && vm_call_iseq_optimizable_p(ci, cc); // CC_SET_FASTPATH condition +} + +// Return true if an object of the klass may be a special const. See: rb_class_of +static bool +maybe_special_const_class_p(const VALUE klass) +{ + return klass == rb_cFalseClass + || klass == rb_cNilClass + || klass == rb_cTrueClass + || klass == rb_cInteger + || klass == rb_cSymbol + || klass == rb_cFloat; +} + +static int +compile_case_dispatch_each(VALUE key, VALUE value, VALUE arg) +{ + struct case_dispatch_var *var = (struct case_dispatch_var *)arg; + unsigned int offset; + + if (var->last_value != value) { + offset = FIX2INT(value); + var->last_value = value; + fprintf(var->f, " case %d:\n", offset); + fprintf(var->f, " goto label_%d;\n", var->base_pos + offset); + fprintf(var->f, " break;\n"); + } + return ST_CONTINUE; +} + +// Calling rb_id2str in MJIT worker causes random SEGV. So this is disabled by default. +static void +comment_id(FILE *f, ID id) +{ +#ifdef MJIT_COMMENT_ID + VALUE name = rb_id2str(id); + const char *p, *e; + char c, prev = '\0'; + + if (!name) return; + p = RSTRING_PTR(name); + e = RSTRING_END(name); + fputs("/* :\"", f); + for (; p < e; ++p) { + switch (c = *p) { + case '*': case '/': if (prev != (c ^ ('/' ^ '*'))) break; + case '\\': case '"': fputc('\\', f); + } + fputc(c, f); + prev = c; + } + fputs("\" */", f); +#endif +} + +static void compile_insns(FILE *f, const struct rb_iseq_constant_body *body, unsigned int stack_size, + unsigned int pos, struct compile_status *status); + +// Main function of JIT compilation, vm_exec_core counterpart for JIT. Compile one insn to `f`, may modify +// b->stack_size and return next position. +// +// When you add a new instruction to insns.def, it would be nice to have JIT compilation support here but +// it's optional. This JIT compiler just ignores ISeq which includes unknown instruction, and ISeq which +// does not have it can be compiled as usual. +static unsigned int +compile_insn(FILE *f, const struct rb_iseq_constant_body *body, const int insn, const VALUE *operands, + const unsigned int pos, struct compile_status *status, struct compile_branch *b) +{ + unsigned int next_pos = pos + insn_len(insn); + +/*****************/ + #include "mjit_compile.inc" +/*****************/ + + // If next_pos is already compiled and this branch is not finished yet, + // next instruction won't be compiled in C code next and will need `goto`. + if (!b->finish_p && next_pos < body->iseq_size && ALREADY_COMPILED_P(status, next_pos)) { + fprintf(f, "goto label_%d;\n", next_pos); + + // Verify stack size assumption is the same among multiple branches + if ((unsigned int)status->stack_size_for_pos[next_pos] != b->stack_size) { + if (mjit_opts.warnings || mjit_opts.verbose) + fprintf(stderr, "MJIT warning: JIT stack assumption is not the same between branches (%d != %u)\n", + status->stack_size_for_pos[next_pos], b->stack_size); + status->success = false; + } + } + + return next_pos; +} + +// Compile one conditional branch. If it has branchXXX insn, this should be +// called multiple times for each branch. +static void +compile_insns(FILE *f, const struct rb_iseq_constant_body *body, unsigned int stack_size, + unsigned int pos, struct compile_status *status) +{ + struct compile_branch branch; + + branch.stack_size = stack_size; + branch.finish_p = false; + + while (pos < body->iseq_size && !ALREADY_COMPILED_P(status, pos) && !branch.finish_p) { + int insn = rb_vm_insn_decode(body->iseq_encoded[pos]); + status->stack_size_for_pos[pos] = (int)branch.stack_size; + + fprintf(f, "\nlabel_%d: /* %s */\n", pos, insn_name(insn)); + pos = compile_insn(f, body, insn, body->iseq_encoded + (pos+1), pos, status, &branch); + if (status->success && branch.stack_size > body->stack_max) { + if (mjit_opts.warnings || mjit_opts.verbose) + fprintf(stderr, "MJIT warning: JIT stack size (%d) exceeded its max size (%d)\n", branch.stack_size, body->stack_max); + status->success = false; + } + if (!status->success) + break; + } +} + +// Print the block to cancel inlined method call. It's supporting only `opt_send_without_block` for now. +static void +compile_inlined_cancel_handler(FILE *f, const struct rb_iseq_constant_body *body, struct inlined_call_context *inline_context) +{ + fprintf(f, "\ncancel:\n"); + fprintf(f, " RB_DEBUG_COUNTER_INC(mjit_cancel);\n"); + fprintf(f, " rb_mjit_recompile_inlining(original_iseq);\n"); + + // Swap pc/sp set on cancel with original pc/sp. + fprintf(f, " const VALUE *current_pc = reg_cfp->pc;\n"); + fprintf(f, " VALUE *current_sp = reg_cfp->sp;\n"); + fprintf(f, " reg_cfp->pc = orig_pc;\n"); + fprintf(f, " reg_cfp->sp = orig_sp;\n\n"); + + // Lazily push the current call frame. + fprintf(f, " struct rb_calling_info calling;\n"); + fprintf(f, " calling.block_handler = VM_BLOCK_HANDLER_NONE;\n"); // assumes `opt_send_without_block` + fprintf(f, " calling.argc = %d;\n", inline_context->orig_argc); + fprintf(f, " calling.recv = reg_cfp->self;\n"); + fprintf(f, " reg_cfp->self = orig_self;\n"); + fprintf(f, " vm_call_iseq_setup_normal(ec, reg_cfp, &calling, (const rb_callable_method_entry_t *)0x%"PRIxVALUE", 0, %d, %d);\n\n", + inline_context->me, inline_context->param_size, inline_context->local_size); // fastpath_applied_iseq_p checks rb_simple_iseq_p, which ensures has_opt == FALSE + + // Start usual cancel from here. + fprintf(f, " reg_cfp = ec->cfp;\n"); // work on the new frame + fprintf(f, " reg_cfp->pc = current_pc;\n"); + fprintf(f, " reg_cfp->sp = current_sp;\n"); + for (unsigned int i = 0; i < body->stack_max; i++) { // should be always `status->local_stack_p` + fprintf(f, " *(vm_base_ptr(reg_cfp) + %d) = stack[%d];\n", i, i); + } + // We're not just returning Qundef here so that caller's normal cancel handler can + // push back `stack` to `cfp->sp`. + fprintf(f, " return vm_exec(ec, false);\n"); +} + +// Print the block to cancel JIT execution. +static void +compile_cancel_handler(FILE *f, const struct rb_iseq_constant_body *body, struct compile_status *status) +{ + if (status->inlined_iseqs == NULL) { // the current ISeq is being inlined + compile_inlined_cancel_handler(f, body, &status->inline_context); + return; + } + + fprintf(f, "\nsend_cancel:\n"); + fprintf(f, " RB_DEBUG_COUNTER_INC(mjit_cancel_send_inline);\n"); + fprintf(f, " rb_mjit_recompile_send(original_iseq);\n"); + fprintf(f, " goto cancel;\n"); + + fprintf(f, "\nivar_cancel:\n"); + fprintf(f, " RB_DEBUG_COUNTER_INC(mjit_cancel_ivar_inline);\n"); + fprintf(f, " rb_mjit_recompile_ivar(original_iseq);\n"); + fprintf(f, " goto cancel;\n"); + + fprintf(f, "\nexivar_cancel:\n"); + fprintf(f, " RB_DEBUG_COUNTER_INC(mjit_cancel_exivar_inline);\n"); + fprintf(f, " rb_mjit_recompile_exivar(original_iseq);\n"); + fprintf(f, " goto cancel;\n"); + + fprintf(f, "\nconst_cancel:\n"); + fprintf(f, " rb_mjit_recompile_const(original_iseq);\n"); + fprintf(f, " goto cancel;\n"); + + fprintf(f, "\ncancel:\n"); + fprintf(f, " RB_DEBUG_COUNTER_INC(mjit_cancel);\n"); + if (status->local_stack_p) { + for (unsigned int i = 0; i < body->stack_max; i++) { + fprintf(f, " *(vm_base_ptr(reg_cfp) + %d) = stack[%d];\n", i, i); + } + } + fprintf(f, " return Qundef;\n"); +} + +extern int +mjit_capture_cc_entries(const struct rb_iseq_constant_body *compiled_iseq, const struct rb_iseq_constant_body *captured_iseq); + +// Copy current is_entries and use it throughout the current compilation consistently. +// While ic->entry has been immutable since https://github.com/ruby/ruby/pull/3662, +// we still need this to avoid a race condition between entries and ivar_serial/max_ivar_index. +static void +mjit_capture_is_entries(const struct rb_iseq_constant_body *body, union iseq_inline_storage_entry *is_entries) +{ + if (is_entries == NULL) + return; + memcpy(is_entries, body->is_entries, sizeof(union iseq_inline_storage_entry) * ISEQ_IS_SIZE(body)); +} + +static bool +mjit_compile_body(FILE *f, const rb_iseq_t *iseq, struct compile_status *status) +{ + const struct rb_iseq_constant_body *body = ISEQ_BODY(iseq); + status->success = true; + status->local_stack_p = !body->catch_except_p; + + if (status->local_stack_p) { + fprintf(f, " VALUE stack[%d];\n", body->stack_max); + } + else { + fprintf(f, " VALUE *stack = reg_cfp->sp;\n"); + } + if (status->inlined_iseqs != NULL) // i.e. compile root + fprintf(f, " static const rb_iseq_t *original_iseq = (const rb_iseq_t *)0x%"PRIxVALUE";\n", (VALUE)iseq); + fprintf(f, " static const VALUE *const original_body_iseq = (VALUE *)0x%"PRIxVALUE";\n", + (VALUE)body->iseq_encoded); + fprintf(f, " VALUE cfp_self = reg_cfp->self;\n"); // cache self across the method + fprintf(f, "#undef GET_SELF\n"); + fprintf(f, "#define GET_SELF() cfp_self\n"); + + // Generate merged ivar guards first if needed + if (!status->compile_info->disable_ivar_cache && status->merge_ivar_guards_p) { + fprintf(f, " if (UNLIKELY(!(RB_TYPE_P(GET_SELF(), T_OBJECT) && (rb_serial_t)%"PRI_SERIALT_PREFIX"u == RCLASS_SERIAL(RBASIC(GET_SELF())->klass) &&", status->ivar_serial); +#if USE_RVARGC + fprintf(f, "%"PRIuSIZE" < ROBJECT_NUMIV(GET_SELF())", status->max_ivar_index); // index < ROBJECT_NUMIV(obj) +#else + if (status->max_ivar_index >= ROBJECT_EMBED_LEN_MAX) { + fprintf(f, "%"PRIuSIZE" < ROBJECT_NUMIV(GET_SELF())", status->max_ivar_index); // index < ROBJECT_NUMIV(obj) && !RB_FL_ANY_RAW(obj, ROBJECT_EMBED) + } + else { + fprintf(f, "ROBJECT_EMBED_LEN_MAX == ROBJECT_NUMIV(GET_SELF())"); // index < ROBJECT_NUMIV(obj) && RB_FL_ANY_RAW(obj, ROBJECT_EMBED) + } +#endif + fprintf(f, "))) {\n"); + fprintf(f, " goto ivar_cancel;\n"); + fprintf(f, " }\n"); + } + + // Simulate `opt_pc` in setup_parameters_complex. Other PCs which may be passed by catch tables + // are not considered since vm_exec doesn't call jit_exec for catch tables. + if (body->param.flags.has_opt) { + int i; + fprintf(f, "\n"); + fprintf(f, " switch (reg_cfp->pc - ISEQ_BODY(reg_cfp->iseq)->iseq_encoded) {\n"); + for (i = 0; i <= body->param.opt_num; i++) { + VALUE pc_offset = body->param.opt_table[i]; + fprintf(f, " case %"PRIdVALUE":\n", pc_offset); + fprintf(f, " goto label_%"PRIdVALUE";\n", pc_offset); + } + fprintf(f, " }\n"); + } + + compile_insns(f, body, 0, 0, status); + compile_cancel_handler(f, body, status); + fprintf(f, "#undef GET_SELF"); + return status->success; +} + +// Return true if the ISeq can be inlined without pushing a new control frame. +static bool +inlinable_iseq_p(const struct rb_iseq_constant_body *body) +{ + // 1) If catch_except_p, caller frame should be preserved when callee catches an exception. + // Then we need to wrap `vm_exec()` but then we can't inline the call inside it. + // + // 2) If `body->catch_except_p` is false and `handles_sp?` of an insn is false, + // sp is not moved as we assume `status->local_stack_p = !body->catch_except_p`. + // + // 3) If `body->catch_except_p` is false and `always_leaf?` of an insn is true, + // pc is not moved. + if (body->catch_except_p) + return false; + + unsigned int pos = 0; + while (pos < body->iseq_size) { + int insn = rb_vm_insn_decode(body->iseq_encoded[pos]); + // All insns in the ISeq except `leave` (to be overridden in the inlined code) + // should meet following strong assumptions: + // * Do not require `cfp->sp` motion + // * Do not move `cfp->pc` + // * Do not read any `cfp->pc` + if (insn == BIN(invokebuiltin) || insn == BIN(opt_invokebuiltin_delegate) || insn == BIN(opt_invokebuiltin_delegate_leave)) { + // builtin insn's inlinability is handled by `Primitive.attr! 'inline'` per iseq + if (!body->builtin_inline_p) + return false; + } + else if (insn != BIN(leave) && insn_may_depend_on_sp_or_pc(insn, body->iseq_encoded + (pos + 1))) + return false; + // At this moment, `cfp->ep` in an inlined method is not working. + switch (insn) { + case BIN(getlocal): + case BIN(getlocal_WC_0): + case BIN(getlocal_WC_1): + case BIN(setlocal): + case BIN(setlocal_WC_0): + case BIN(setlocal_WC_1): + case BIN(getblockparam): + case BIN(getblockparamproxy): + case BIN(setblockparam): + return false; + } + pos += insn_len(insn); + } + return true; +} + +// Return an iseq pointer if cc has inlinable iseq. +const rb_iseq_t * +rb_mjit_inlinable_iseq(const struct rb_callinfo *ci, const struct rb_callcache *cc) +{ + const rb_iseq_t *iseq; + if (has_valid_method_type(cc) && + !(vm_ci_flag(ci) & VM_CALL_TAILCALL) && // inlining only non-tailcall path + vm_cc_cme(cc)->def->type == VM_METHOD_TYPE_ISEQ && + fastpath_applied_iseq_p(ci, cc, iseq = def_iseq_ptr(vm_cc_cme(cc)->def)) && + // CC_SET_FASTPATH in vm_callee_setup_arg + inlinable_iseq_p(ISEQ_BODY(iseq))) { + return iseq; + } + return NULL; +} + +static void +init_ivar_compile_status(const struct rb_iseq_constant_body *body, struct compile_status *status) +{ + mjit_capture_is_entries(body, status->is_entries); + + int num_ivars = 0; + unsigned int pos = 0; + status->max_ivar_index = 0; + status->ivar_serial = 0; + + while (pos < body->iseq_size) { + int insn = rb_vm_insn_decode(body->iseq_encoded[pos]); + if (insn == BIN(getinstancevariable) || insn == BIN(setinstancevariable)) { + IVC ic = (IVC)body->iseq_encoded[pos+2]; + IVC ic_copy = &(status->is_entries + ((union iseq_inline_storage_entry *)ic - body->is_entries))->iv_cache; + if (ic_copy->entry) { // Only initialized (ic_serial > 0) IVCs are optimized + num_ivars++; + + if (status->max_ivar_index < ic_copy->entry->index) { + status->max_ivar_index = ic_copy->entry->index; + } + + if (status->ivar_serial == 0) { + status->ivar_serial = ic_copy->entry->class_serial; + } + else if (status->ivar_serial != ic_copy->entry->class_serial) { + // Multiple classes have used this ISeq. Give up assuming one serial. + status->merge_ivar_guards_p = false; + return; + } + } + } + pos += insn_len(insn); + } + status->merge_ivar_guards_p = status->ivar_serial > 0 && num_ivars >= 2; +} + +// This needs to be macro instead of a function because it's using `alloca`. +#define INIT_COMPILE_STATUS(status, body, compile_root_p) do { \ + status = (struct compile_status){ \ + .stack_size_for_pos = (int *)alloca(sizeof(int) * body->iseq_size), \ + .inlined_iseqs = compile_root_p ? \ + alloca(sizeof(const struct rb_iseq_constant_body *) * body->iseq_size) : NULL, \ + .is_entries = (ISEQ_IS_SIZE(body) > 0) ? \ + alloca(sizeof(union iseq_inline_storage_entry) * ISEQ_IS_SIZE(body)) : NULL, \ + .cc_entries_index = (body->ci_size > 0) ? \ + mjit_capture_cc_entries(status.compiled_iseq, body) : -1, \ + .compiled_id = status.compiled_id, \ + .compiled_iseq = status.compiled_iseq, \ + .compile_info = compile_root_p ? \ + rb_mjit_iseq_compile_info(body) : alloca(sizeof(struct rb_mjit_compile_info)) \ + }; \ + memset(status.stack_size_for_pos, NOT_COMPILED_STACK_SIZE, sizeof(int) * body->iseq_size); \ + if (compile_root_p) \ + memset((void *)status.inlined_iseqs, 0, sizeof(const struct rb_iseq_constant_body *) * body->iseq_size); \ + else \ + memset(status.compile_info, 0, sizeof(struct rb_mjit_compile_info)); \ +} while (0) + +static bool +precompile_inlinable_child_iseq(FILE *f, const rb_iseq_t *child_iseq, struct compile_status *status, + const struct rb_callinfo *ci, const struct rb_callcache *cc, unsigned int pos) +{ + struct compile_status child_status = { .compiled_iseq = status->compiled_iseq, .compiled_id = status->compiled_id }; + INIT_COMPILE_STATUS(child_status, ISEQ_BODY(child_iseq), false); + child_status.inline_context = (struct inlined_call_context){ + .orig_argc = vm_ci_argc(ci), + .me = (VALUE)vm_cc_cme(cc), + .param_size = ISEQ_BODY(child_iseq)->param.size, + .local_size = ISEQ_BODY(child_iseq)->local_table_size + }; + if (ISEQ_BODY(child_iseq)->ci_size > 0 && child_status.cc_entries_index == -1) { + return false; + } + init_ivar_compile_status(ISEQ_BODY(child_iseq), &child_status); + + fprintf(f, "ALWAYS_INLINE(static VALUE _mjit%d_inlined_%d(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE orig_self, const rb_iseq_t *original_iseq));\n", status->compiled_id, pos); + fprintf(f, "static inline VALUE\n_mjit%d_inlined_%d(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE orig_self, const rb_iseq_t *original_iseq)\n{\n", status->compiled_id, pos); + fprintf(f, " const VALUE *orig_pc = reg_cfp->pc;\n"); + fprintf(f, " VALUE *orig_sp = reg_cfp->sp;\n"); + bool success = mjit_compile_body(f, child_iseq, &child_status); + fprintf(f, "\n} /* end of _mjit%d_inlined_%d */\n\n", status->compiled_id, pos); + + return success; +} + +// Compile inlinable ISeqs to C code in `f`. It returns true if it succeeds to compile them. +static bool +precompile_inlinable_iseqs(FILE *f, const rb_iseq_t *iseq, struct compile_status *status) +{ + const struct rb_iseq_constant_body *body = ISEQ_BODY(iseq); + unsigned int pos = 0; + while (pos < body->iseq_size) { + int insn = rb_vm_insn_decode(body->iseq_encoded[pos]); + if (insn == BIN(opt_send_without_block) || insn == BIN(opt_size)) { // `compile_inlined_cancel_handler` supports only `opt_send_without_block` + CALL_DATA cd = (CALL_DATA)body->iseq_encoded[pos + 1]; + const struct rb_callinfo *ci = cd->ci; + const struct rb_callcache *cc = captured_cc_entries(status)[call_data_index(cd, body)]; // use copy to avoid race condition + + const rb_iseq_t *child_iseq; + if ((child_iseq = rb_mjit_inlinable_iseq(ci, cc)) != NULL) { + status->inlined_iseqs[pos] = ISEQ_BODY(child_iseq); + + if (mjit_opts.verbose >= 1) // print beforehand because ISeq may be GCed during copy job. + fprintf(stderr, "JIT inline: %s@%s:%d => %s@%s:%d\n", + RSTRING_PTR(ISEQ_BODY(child_iseq)->location.label), + RSTRING_PTR(rb_iseq_path(child_iseq)), FIX2INT(ISEQ_BODY(child_iseq)->location.first_lineno), + RSTRING_PTR(ISEQ_BODY(iseq)->location.label), + RSTRING_PTR(rb_iseq_path(iseq)), FIX2INT(ISEQ_BODY(iseq)->location.first_lineno)); + if (!precompile_inlinable_child_iseq(f, child_iseq, status, ci, cc, pos)) + return false; + } + } + pos += insn_len(insn); + } + return true; +} + +// Compile ISeq to C code in `f`. It returns true if it succeeds to compile. +bool +mjit_compile(FILE *f, const rb_iseq_t *iseq, const char *funcname, int id) +{ + struct compile_status status = { .compiled_iseq = ISEQ_BODY(iseq), .compiled_id = id }; + INIT_COMPILE_STATUS(status, ISEQ_BODY(iseq), true); + if (ISEQ_BODY(iseq)->ci_size > 0 && status.cc_entries_index == -1) { + return false; + } + init_ivar_compile_status(ISEQ_BODY(iseq), &status); + + if (!status.compile_info->disable_send_cache && !status.compile_info->disable_inlining) { + if (!precompile_inlinable_iseqs(f, iseq, &status)) + return false; + } + + fprintf(f, "VALUE\n%s(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)\n{\n", funcname); + bool success = mjit_compile_body(f, iseq, &status); + fprintf(f, "\n} // end of %s\n", funcname); + return success; +} + +#endif // USE_MJIT diff --git a/tool/ruby_vm/views/mjit_compile.inc.erb b/tool/ruby_vm/views/mjit_compile.inc.erb index 5820f81770..0e66f78007 100644 --- a/tool/ruby_vm/views/mjit_compile.inc.erb +++ b/tool/ruby_vm/views/mjit_compile.inc.erb @@ -11,7 +11,7 @@ % # This is an ERB template that generates Ruby code that generates C code that % # generates JIT-ed C code. <%= render 'notice', locals: { - this_file: 'is the main part of compile_insn() in mjit_compile.c', + this_file: 'is the main part of compile_insn() in mjit_compiler.c', edit: __FILE__, } -%> % -- cgit v1.2.3