summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKoichi Sasada <ko1@atdot.net>2020-03-10 02:22:11 +0900
committerKoichi Sasada <ko1@atdot.net>2020-09-03 21:11:06 +0900
commit79df14c04b452411b9d17e26a398e491bca1a811 (patch)
tree7598cee0f105439efd5bb328a727b0fe27d7c666
parenteeb5325d3bfd71301896360c17e8f51abcb9a7e5 (diff)
Introduce Ractor mechanism for parallel execution
This commit introduces Ractor mechanism to run Ruby program in parallel. See doc/ractor.md for more details about Ractor. See ticket [Feature #17100] to see the implementation details and discussions. [Feature #17100] This commit does not complete the implementation. You can find many bugs on using Ractor. Also the specification will be changed so that this feature is experimental. You will see a warning when you make the first Ractor with `Ractor.new`. I hope this feature can help programmers from thread-safety issues.
Notes
Notes: Merged: https://github.com/ruby/ruby/pull/3365
-rw-r--r--bootstraptest/test_ractor.rb516
-rw-r--r--common.mk447
-rw-r--r--compile.c17
-rw-r--r--cont.c18
-rw-r--r--debug.c3
-rw-r--r--doc/ractor.md883
-rw-r--r--eval.c3
-rw-r--r--ext/ripper/depend29
-rw-r--r--gc.c347
-rw-r--r--gc.h7
-rw-r--r--include/ruby/internal/fl_type.h1
-rw-r--r--inits.c2
-rw-r--r--internal/variable.h1
-rw-r--r--io.c146
-rw-r--r--mjit.c11
-rw-r--r--parse.y7
-rw-r--r--process.c3
-rw-r--r--ractor.c1877
-rw-r--r--ractor.h269
-rw-r--r--ractor.rb162
-rw-r--r--ractor_pub.h33
-rw-r--r--ruby_assert.h1
-rw-r--r--signal.c4
-rw-r--r--thread.c581
-rw-r--r--thread_pthread.c226
-rw-r--r--thread_pthread.h32
-rw-r--r--thread_sync.c11
-rw-r--r--thread_win32.c59
-rw-r--r--thread_win32.h31
-rw-r--r--tool/ruby_vm/views/_mjit_compile_ivar.erb2
-rw-r--r--transient_heap.c193
-rw-r--r--variable.c155
-rw-r--r--variable.h2
-rw-r--r--vm.c124
-rw-r--r--vm_core.h119
-rw-r--r--vm_dump.c8
-rw-r--r--vm_insnhelper.c55
-rw-r--r--vm_insnhelper.h2
-rw-r--r--vm_sync.c250
-rw-r--r--vm_sync.h96
-rw-r--r--vm_trace.c3
41 files changed, 5952 insertions, 784 deletions
diff --git a/bootstraptest/test_ractor.rb b/bootstraptest/test_ractor.rb
new file mode 100644
index 0000000..026b6ad
--- /dev/null
+++ b/bootstraptest/test_ractor.rb
@@ -0,0 +1,516 @@
+# Ractor.current returns a current ractor
+assert_equal 'Ractor', %q{
+ Ractor.current.class
+}
+
+# Ractor.new returns new Ractor
+assert_equal 'Ractor', %q{
+ Ractor.new{}.class
+}
+
+# Ractor.new must call with a block
+assert_equal "must be called with a block", %q{
+ begin
+ Ractor.new
+ rescue ArgumentError => e
+ e.message
+ end
+}
+
+
+# A return value of a Ractor block will be a message from the Ractor.
+assert_equal 'ok', %q{
+ # join
+ r = Ractor.new do
+ 'ok'
+ end
+ r.take
+}
+
+# Passed arguments to Ractor.new will be a block parameter
+# The values are passed with Ractor-communication pass.
+assert_equal 'ok', %q{
+ # ping-pong with arg
+ r = Ractor.new 'ok' do |msg|
+ msg
+ end
+ r.take
+}
+
+assert_equal 'ok', %q{
+ # ping-pong with two args
+ r = Ractor.new 'ping', 'pong' do |msg, msg2|
+ [msg, msg2]
+ end
+ 'ok' if r.take == ['ping', 'pong']
+}
+
+# Ractor#send passes an object with copy to a Ractor
+# and Ractor.recv in the Ractor block can receive the passed value.
+assert_equal 'ok', %q{
+ r = Ractor.new do
+ msg = Ractor.recv
+ end
+ r.send 'ok'
+ r.take
+}
+
+# Ractor.select(*ractors) receives a values from a ractors.
+# It is similar to select(2) and Go's select syntax.
+# The return value is [ch, received_value]
+assert_equal 'ok', %q{
+ # select 1
+ r1 = Ractor.new{'r1'}
+ r, obj = Ractor.select(r1)
+ 'ok' if r == r1 and obj == 'r1'
+}
+
+assert_equal '["r1", "r2"]', %q{
+ # select 2
+ r1 = Ractor.new{'r1'}
+ r2 = Ractor.new{'r2'}
+ rs = [r1, r2]
+ as = []
+ r, obj = Ractor.select(*rs)
+ rs.delete(r)
+ as << obj
+ r, obj = Ractor.select(*rs)
+ as << obj
+ as.sort #=> ["r1", "r2"]
+}
+
+assert_equal 'true', %q{
+ def test n
+ rs = (1..n).map do |i|
+ Ractor.new(i) do |i|
+ "r#{i}"
+ end
+ end
+ as = []
+ all_rs = rs.dup
+
+ n.times{
+ r, obj = Ractor.select(*rs)
+ as << [r, obj]
+ rs.delete(r)
+ }
+
+ if as.map{|r, o| r.inspect}.sort == all_rs.map{|r| r.inspect}.sort &&
+ as.map{|r, o| o}.sort == (1..n).map{|i| "r#{i}"}.sort
+ 'ok'
+ else
+ 'ng'
+ end
+ end
+
+ 30.times.map{|i|
+ test i
+ }.all?('ok')
+}
+
+# Outgoing port of a ractor will be closed when the Ractor is terminated.
+assert_equal 'ok', %q{
+ r = Ractor.new do
+ 'finish'
+ end
+
+ r.take
+ sleep 0.1 # wait for terminate
+
+ begin
+ o = r.take
+ rescue Ractor::ClosedError
+ 'ok'
+ else
+ "ng: #{o}"
+ end
+}
+
+assert_equal 'ok', %q{
+ r = Ractor.new do
+ end
+
+ r.take # closed
+ sleep 0.1 # wait for terminate
+
+ begin
+ r.send(1)
+ rescue Ractor::ClosedError
+ 'ok'
+ else
+ 'ng'
+ end
+}
+
+# multiple Ractors can recv (wait) from one Ractor
+assert_equal '[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]', %q{
+ pipe = Ractor.new do
+ loop do
+ Ractor.yield Ractor.recv
+ end
+ end
+
+ RN = 10
+ rs = RN.times.map{|i|
+ Ractor.new pipe, i do |pipe, i|
+ msg = pipe.take
+ msg # ping-pong
+ end
+ }
+ RN.times{|i|
+ pipe << i
+ }
+ RN.times.map{
+ r, n = Ractor.select(*rs)
+ rs.delete r
+ n
+ }.sort
+}
+
+# Ractor.select also support multiple take, recv and yiled
+assert_equal '[true, true, true]', %q{
+ RN = 10
+ CR = Ractor.current
+
+ rs = (1..RN).map{
+ Ractor.new do
+ CR.send 'send' + CR.take #=> 'sendyield'
+ 'take'
+ end
+ }
+ recv = []
+ take = []
+ yiel = []
+ until rs.empty?
+ r, v = Ractor.select(CR, *rs, yield_value: 'yield')
+ case r
+ when :recv
+ recv << v
+ when :yield
+ yiel << v
+ else
+ take << v
+ rs.delete r
+ end
+ end
+ [recv.all?('sendyield'), yiel.all?(nil), take.all?('take')]
+}
+
+# multiple Ractors can send to one Ractor
+assert_equal '[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]', %q{
+ pipe = Ractor.new do
+ loop do
+ Ractor.yield Ractor.recv
+ end
+ end
+
+ RN = 10
+ RN.times.map{|i|
+ Ractor.new pipe, i do |pipe, i|
+ pipe << i
+ end
+ }
+ RN.times.map{
+ pipe.take
+ }.sort
+}
+
+# an exception in a Ractor will be re-raised at Ractor#recv
+assert_equal '[RuntimeError, "ok", true]', %q{
+ r = Ractor.new do
+ raise 'ok' # exception will be transferred receiver
+ end
+ begin
+ r.take
+ rescue Ractor::RemoteError => e
+ [e.cause.class, #=> RuntimeError
+ e.cause.message, #=> 'ok'
+ e.ractor == r] #=> true
+ end
+}
+
+# unshareable object are copied
+assert_equal 'false', %q{
+ obj = 'str'.dup
+ r = Ractor.new obj do |msg|
+ msg.object_id
+ end
+
+ obj.object_id == r.take
+}
+
+# To copy the object, now Marshal#dump is used
+assert_equal 'no _dump_data is defined for class Thread', %q{
+ obj = Thread.new{}
+ begin
+ r = Ractor.new obj do |msg|
+ msg
+ end
+ rescue TypeError => e
+ e.message #=> no _dump_data is defined for class Thread
+ else
+ 'ng'
+ end
+}
+
+# send sharable and unsharable objects
+assert_equal "[[[1, true], [:sym, true], [:xyzzy, true], [\"frozen\", true], " \
+ "[(3/1), true], [(3+4i), true], [/regexp/, true], [C, true]], " \
+ "[[\"mutable str\", false], [[:array], false], [{:hash=>true}, false]]]", %q{
+ r = Ractor.new do
+ while v = Ractor.recv
+ Ractor.yield v
+ end
+ end
+
+ class C
+ end
+
+ sharable_objects = [1, :sym, 'xyzzy'.to_sym, 'frozen'.freeze, 1+2r, 3+4i, /regexp/, C]
+
+ sr = sharable_objects.map{|o|
+ r << o
+ o2 = r.take
+ [o, o.object_id == o2.object_id]
+ }
+
+ ur = unsharable_objects = ['mutable str'.dup, [:array], {hash: true}].map{|o|
+ r << o
+ o2 = r.take
+ [o, o.object_id == o2.object_id]
+ }
+ [sr, ur].inspect
+}
+
+# move example2: String
+# touching moved object causes an error
+assert_equal 'hello world', %q{
+ # move
+ r = Ractor.new do
+ obj = Ractor.recv
+ obj << ' world'
+ end
+
+ str = 'hello'
+ r.send str, move: true
+ modified = r.take
+
+ begin
+ str << ' exception' # raise Ractor::MovedError
+ rescue Ractor::MovedError
+ modified #=> 'hello world'
+ else
+ raise 'unreachable'
+ end
+}
+
+# move example2: Array
+assert_equal '[0, 1]', %q{
+ r = Ractor.new do
+ ary = Ractor.recv
+ ary << 1
+ end
+
+ a1 = [0]
+ r.send a1, move: true
+ a2 = r.take
+ begin
+ a1 << 2 # raise Ractor::MovedError
+ rescue Ractor::MovedError
+ a2.inspect
+ end
+}
+
+# move with yield
+assert_equal 'hello', %q{
+ r = Ractor.new do
+ Thread.current.report_on_exception = false
+ obj = 'hello'
+ Ractor.yield obj, move: true
+ obj << 'world'
+ end
+
+ str = r.take
+ begin
+ r.take
+ rescue Ractor::RemoteError
+ str #=> "hello"
+ end
+}
+
+# Access to global-variables are prohibitted
+assert_equal 'can not access global variables $gv from non-main Ractors', %q{
+ $gv = 1
+ r = Ractor.new do
+ $gv
+ end
+
+ begin
+ r.take
+ rescue Ractor::RemoteError => e
+ e.cause.message
+ end
+}
+
+# Access to global-variables are prohibitted
+assert_equal 'can not access global variables $gv from non-main Ractors', %q{
+ r = Ractor.new do
+ $gv = 1
+ end
+
+ begin
+ r.take
+ rescue Ractor::RemoteError => e
+ e.cause.message
+ end
+}
+
+# $stdin,out,err is Ractor local, but shared fds
+assert_equal 'ok', %q{
+ r = Ractor.new do
+ [$stdin, $stdout, $stderr].map{|io|
+ [io.object_id, io.fileno]
+ }
+ end
+
+ [$stdin, $stdout, $stderr].zip(r.take){|io, (oid, fno)|
+ raise "should not be different object" if io.object_id == oid
+ raise "fd should be same" unless io.fileno == fno
+ }
+ 'ok'
+}
+
+# selfs are different objects
+assert_equal 'false', %q{
+ r = Ractor.new do
+ self.object_id
+ end
+ r.take == self.object_id #=> false
+}
+
+# self is a Ractor instance
+assert_equal 'true', %q{
+ r = Ractor.new do
+ self.object_id
+ end
+ r.object_id == r.take #=> true
+}
+
+# given block Proc will be isolated, so can not access outer variables.
+assert_equal 'ArgumentError', %q{
+ begin
+ a = true
+ r = Ractor.new do
+ a
+ end
+ rescue => e
+ e.class
+ end
+}
+
+# ivar in sharable-objects are not allowed to access from non-main Ractor
+assert_equal 'can not access instance variables of classes/modules from non-main Ractors', %q{
+ class C
+ @iv = 'str'
+ end
+
+ r = Ractor.new do
+ class C
+ p @iv
+ end
+ end
+
+
+ begin
+ r.take
+ rescue Ractor::RemoteError => e
+ e.cause.message
+ end
+}
+
+# ivar in sharable-objects are not allowed to access from non-main Ractor
+assert_equal 'can not access instance variables of shareable objects from non-main Ractors', %q{
+ shared = Ractor.new{}
+ shared.instance_variable_set(:@iv, 'str')
+
+ r = Ractor.new shared do |shared|
+ p shared.instance_variable_get(:@iv)
+ end
+
+ begin
+ r.take
+ rescue Ractor::RemoteError => e
+ e.cause.message
+ end
+}
+
+# cvar in sharable-objects are not allowed to access from non-main Ractor
+assert_equal 'can not access class variables from non-main Ractors', %q{
+ class C
+ @@cv = 'str'
+ end
+
+ r = Ractor.new do
+ class C
+ p @@cv
+ end
+ end
+
+ begin
+ r.take
+ rescue Ractor::RemoteError => e
+ e.cause.message
+ end
+}
+
+# Getting non-sharable objects via constants by other Ractors is not allowed
+assert_equal 'can not access non-sharable objects in constant C::CONST by non-main Ractor.', %q{
+ class C
+ CONST = 'str'
+ end
+ r = Ractor.new do
+ C::CONST
+ end
+ begin
+ r.take
+ rescue Ractor::RemoteError => e
+ e.cause.message
+ end
+}
+
+# Setting non-sharable objects into constants by other Ractors is not allowed
+assert_equal 'can not set constants with non-shareable objects by non-main Ractors', %q{
+ class C
+ end
+ r = Ractor.new do
+ C::CONST = 'str'
+ end
+ begin
+ r.take
+ rescue Ractor::RemoteError => e
+ e.cause.message
+ end
+}
+
+# Immutable Array and Hash are shareable, so it can be shared with constants
+assert_equal '[1000, 3]', %q{
+ A = Array.new(1000).freeze # [nil, ...]
+ H = {a: 1, b: 2, c: 3}.freeze
+
+ Ractor.new{ [A.size, H.size] }.take
+}
+
+# A Ractor can have a name
+assert_equal 'test-name', %q{
+ r = Ractor.new name: 'test-name' do
+ end
+ r.name
+}
+
+# If Ractor doesn't have a name, Ractor#name returns nil.
+assert_equal 'nil', %q{
+ r = Ractor.new do
+ end
+ r.name.inspect
+}
+
diff --git a/common.mk b/common.mk
index 990fa2c..9447fb7 100644
--- a/common.mk
+++ b/common.mk
@@ -116,6 +116,7 @@ COMMONOBJS = array.$(OBJEXT) \
parse.$(OBJEXT) \
proc.$(OBJEXT) \
process.$(OBJEXT) \
+ ractor.$(OBJEXT) \
random.$(OBJEXT) \
range.$(OBJEXT) \
rational.$(OBJEXT) \
@@ -144,6 +145,7 @@ COMMONOBJS = array.$(OBJEXT) \
vm.$(OBJEXT) \
vm_backtrace.$(OBJEXT) \
vm_dump.$(OBJEXT) \
+ vm_sync.$(OBJEXT) \
vm_trace.$(OBJEXT) \
$(COROUTINE_OBJ) \
$(DTRACE_OBJ) \
@@ -759,6 +761,9 @@ no-btest-ruby: PHONY
yes-btest-ruby: prog PHONY
$(Q)$(exec) $(RUNRUBY) "$(srcdir)/bootstraptest/runner.rb" --ruby="$(PROGRAM) -I$(srcdir)/lib $(RUN_OPTS)" -q $(OPTS) $(TESTOPTS) $(BTESTS)
+rtest: fake miniruby$(EXEEXT) PHONY
+ $(Q)$(exec) $(BOOTSTRAPRUBY) "$(srcdir)/bootstraptest/runner.rb" --ruby="$(BTESTRUBY) $(RUN_OPTS)" --sets=ractor -v
+
test-basic: $(TEST_RUNNABLE)-test-basic
no-test-basic: PHONY
yes-test-basic: prog PHONY
@@ -1008,15 +1013,16 @@ $(srcs_vpath)mjit_compile.inc: $(tooldir)/ruby_vm/views/mjit_compile.inc.erb $(i
BUILTIN_RB_SRCS = \
$(srcdir)/ast.rb \
+ $(srcdir)/dir.rb \
$(srcdir)/gc.rb \
$(srcdir)/integer.rb \
$(srcdir)/io.rb \
- $(srcdir)/dir.rb \
$(srcdir)/pack.rb \
$(srcdir)/trace_point.rb \
$(srcdir)/warning.rb \
$(srcdir)/array.rb \
$(srcdir)/kernel.rb \
+ $(srcdir)/ractor.rb \
$(srcdir)/prelude.rb \
$(srcdir)/gem_prelude.rb \
$(empty)
@@ -3361,6 +3367,8 @@ cont.$(OBJEXT): {$(VPATH)}method.h
cont.$(OBJEXT): {$(VPATH)}missing.h
cont.$(OBJEXT): {$(VPATH)}mjit.h
cont.$(OBJEXT): {$(VPATH)}node.h
+cont.$(OBJEXT): {$(VPATH)}ractor.h
+cont.$(OBJEXT): {$(VPATH)}ractor_pub.h
cont.$(OBJEXT): {$(VPATH)}ruby_assert.h
cont.$(OBJEXT): {$(VPATH)}ruby_atomic.h
cont.$(OBJEXT): {$(VPATH)}st.h
@@ -3368,6 +3376,7 @@ cont.$(OBJEXT): {$(VPATH)}subst.h
cont.$(OBJEXT): {$(VPATH)}thread_$(THREAD_MODEL).h
cont.$(OBJEXT): {$(VPATH)}thread_native.h
cont.$(OBJEXT): {$(VPATH)}vm_core.h
+cont.$(OBJEXT): {$(VPATH)}vm_debug.h
cont.$(OBJEXT): {$(VPATH)}vm_opts.h
debug.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h
debug.$(OBJEXT): $(CCAN_DIR)/container_of/container_of.h
@@ -3403,6 +3412,7 @@ debug.$(OBJEXT): {$(VPATH)}encoding.h
debug.$(OBJEXT): {$(VPATH)}eval_intern.h
debug.$(OBJEXT): {$(VPATH)}gc.h
debug.$(OBJEXT): {$(VPATH)}id.h
+debug.$(OBJEXT): {$(VPATH)}id_table.h
debug.$(OBJEXT): {$(VPATH)}intern.h
debug.$(OBJEXT): {$(VPATH)}internal.h
debug.$(OBJEXT): {$(VPATH)}internal/anyargs.h
@@ -3551,6 +3561,8 @@ debug.$(OBJEXT): {$(VPATH)}missing.h
debug.$(OBJEXT): {$(VPATH)}node.h
debug.$(OBJEXT): {$(VPATH)}onigmo.h
debug.$(OBJEXT): {$(VPATH)}oniguruma.h
+debug.$(OBJEXT): {$(VPATH)}ractor.h
+debug.$(OBJEXT): {$(VPATH)}ractor_pub.h
debug.$(OBJEXT): {$(VPATH)}ruby_assert.h
debug.$(OBJEXT): {$(VPATH)}ruby_atomic.h
debug.$(OBJEXT): {$(VPATH)}st.h
@@ -5378,6 +5390,8 @@ eval.$(OBJEXT): {$(VPATH)}oniguruma.h
eval.$(OBJEXT): {$(VPATH)}probes.dmyh
eval.$(OBJEXT): {$(VPATH)}probes.h
eval.$(OBJEXT): {$(VPATH)}probes_helper.h
+eval.$(OBJEXT): {$(VPATH)}ractor.h
+eval.$(OBJEXT): {$(VPATH)}ractor_pub.h
eval.$(OBJEXT): {$(VPATH)}ruby_assert.h
eval.$(OBJEXT): {$(VPATH)}ruby_atomic.h
eval.$(OBJEXT): {$(VPATH)}st.h
@@ -5386,6 +5400,7 @@ eval.$(OBJEXT): {$(VPATH)}thread_$(THREAD_MODEL).h
eval.$(OBJEXT): {$(VPATH)}thread_native.h
eval.$(OBJEXT): {$(VPATH)}vm.h
eval.$(OBJEXT): {$(VPATH)}vm_core.h
+eval.$(OBJEXT): {$(VPATH)}vm_debug.h
eval.$(OBJEXT): {$(VPATH)}vm_opts.h
explicit_bzero.$(OBJEXT): {$(VPATH)}config.h
explicit_bzero.$(OBJEXT): {$(VPATH)}explicit_bzero.c
@@ -5797,6 +5812,8 @@ gc.$(OBJEXT): {$(VPATH)}onigmo.h
gc.$(OBJEXT): {$(VPATH)}oniguruma.h
gc.$(OBJEXT): {$(VPATH)}probes.dmyh
gc.$(OBJEXT): {$(VPATH)}probes.h
+gc.$(OBJEXT): {$(VPATH)}ractor.h
+gc.$(OBJEXT): {$(VPATH)}ractor_pub.h
gc.$(OBJEXT): {$(VPATH)}re.h
gc.$(OBJEXT): {$(VPATH)}regenc.h
gc.$(OBJEXT): {$(VPATH)}regex.h
@@ -5813,7 +5830,9 @@ gc.$(OBJEXT): {$(VPATH)}transient_heap.h
gc.$(OBJEXT): {$(VPATH)}util.h
gc.$(OBJEXT): {$(VPATH)}vm_callinfo.h
gc.$(OBJEXT): {$(VPATH)}vm_core.h
+gc.$(OBJEXT): {$(VPATH)}vm_debug.h
gc.$(OBJEXT): {$(VPATH)}vm_opts.h
+gc.$(OBJEXT): {$(VPATH)}vm_sync.h
golf_prelude.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h
golf_prelude.$(OBJEXT): $(CCAN_DIR)/container_of/container_of.h
golf_prelude.$(OBJEXT): $(CCAN_DIR)/list/list.h
@@ -6713,6 +6732,7 @@ io.$(OBJEXT): {$(VPATH)}missing.h
io.$(OBJEXT): {$(VPATH)}node.h
io.$(OBJEXT): {$(VPATH)}onigmo.h
io.$(OBJEXT): {$(VPATH)}oniguruma.h
+io.$(OBJEXT): {$(VPATH)}ractor_pub.h
io.$(OBJEXT): {$(VPATH)}ruby_assert.h
io.$(OBJEXT): {$(VPATH)}ruby_atomic.h
io.$(OBJEXT): {$(VPATH)}st.h
@@ -8163,6 +8183,7 @@ miniinit.$(OBJEXT): {$(VPATH)}onigmo.h
miniinit.$(OBJEXT): {$(VPATH)}oniguruma.h
miniinit.$(OBJEXT): {$(VPATH)}pack.rb
miniinit.$(OBJEXT): {$(VPATH)}prelude.rb
+miniinit.$(OBJEXT): {$(VPATH)}ractor.rb
miniinit.$(OBJEXT): {$(VPATH)}ruby_assert.h
miniinit.$(OBJEXT): {$(VPATH)}ruby_atomic.h
miniinit.$(OBJEXT): {$(VPATH)}st.h
@@ -9557,6 +9578,7 @@ parse.$(OBJEXT): {$(VPATH)}parse.h
parse.$(OBJEXT): {$(VPATH)}parse.y
parse.$(OBJEXT): {$(VPATH)}probes.dmyh
parse.$(OBJEXT): {$(VPATH)}probes.h
+parse.$(OBJEXT): {$(VPATH)}ractor_pub.h
parse.$(OBJEXT): {$(VPATH)}regenc.h
parse.$(OBJEXT): {$(VPATH)}regex.h
parse.$(OBJEXT): {$(VPATH)}ruby_assert.h
@@ -9978,6 +10000,7 @@ process.$(OBJEXT): {$(VPATH)}node.h
process.$(OBJEXT): {$(VPATH)}onigmo.h
process.$(OBJEXT): {$(VPATH)}oniguruma.h
process.$(OBJEXT): {$(VPATH)}process.c
+process.$(OBJEXT): {$(VPATH)}ractor_pub.h
process.$(OBJEXT): {$(VPATH)}ruby_assert.h
process.$(OBJEXT): {$(VPATH)}ruby_atomic.h
process.$(OBJEXT): {$(VPATH)}st.h
@@ -9988,6 +10011,203 @@ process.$(OBJEXT): {$(VPATH)}thread_native.h
process.$(OBJEXT): {$(VPATH)}util.h
process.$(OBJEXT): {$(VPATH)}vm_core.h
process.$(OBJEXT): {$(VPATH)}vm_opts.h
+ractor.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h
+ractor.$(OBJEXT): $(CCAN_DIR)/container_of/container_of.h
+ractor.$(OBJEXT): $(CCAN_DIR)/list/list.h
+ractor.$(OBJEXT): $(CCAN_DIR)/str/str.h
+ractor.$(OBJEXT): $(hdrdir)/ruby/ruby.h
+ractor.$(OBJEXT): $(top_srcdir)/internal/array.h
+ractor.$(OBJEXT): $(top_srcdir)/internal/compilers.h
+ractor.$(OBJEXT): $(top_srcdir)/internal/error.h
+ractor.$(OBJEXT): $(top_srcdir)/internal/gc.h
+ractor.$(OBJEXT): $(top_srcdir)/internal/imemo.h
+ractor.$(OBJEXT): $(top_srcdir)/internal/serial.h
+ractor.$(OBJEXT): $(top_srcdir)/internal/static_assert.h
+ractor.$(OBJEXT): $(top_srcdir)/internal/string.h
+ractor.$(OBJEXT): $(top_srcdir)/internal/vm.h
+ractor.$(OBJEXT): $(top_srcdir)/internal/warnings.h
+ractor.$(OBJEXT): {$(VPATH)}assert.h
+ractor.$(OBJEXT): {$(VPATH)}backward/2/assume.h
+ractor.$(OBJEXT): {$(VPATH)}backward/2/attributes.h
+ractor.$(OBJEXT): {$(VPATH)}backward/2/bool.h
+ractor.$(OBJEXT): {$(VPATH)}backward/2/gcc_version_since.h
+ractor.$(OBJEXT): {$(VPATH)}backward/2/inttypes.h
+ractor.$(OBJEXT): {$(VPATH)}backward/2/limits.h
+ractor.$(OBJEXT): {$(VPATH)}backward/2/long_long.h
+ractor.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h
+ractor.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
+ractor.$(OBJEXT): {$(VPATH)}builtin.h
+ractor.$(OBJEXT): {$(VPATH)}config.h
+ractor.$(OBJEXT): {$(VPATH)}constant.h
+ractor.$(OBJEXT): {$(VPATH)}debug.h
+ractor.$(OBJEXT): {$(VPATH)}debug_counter.h
+ractor.$(OBJEXT): {$(VPATH)}defines.h
+ractor.$(OBJEXT): {$(VPATH)}encoding.h
+ractor.$(OBJEXT): {$(VPATH)}id.h
+ractor.$(OBJEXT): {$(VPATH)}id_table.h
+ractor.$(OBJEXT): {$(VPATH)}intern.h
+ractor.$(OBJEXT): {$(VPATH)}internal.h
+ractor.$(OBJEXT): {$(VPATH)}internal/anyargs.h
+ractor.$(OBJEXT): {$(VPATH)}internal/arithmetic.h
+ractor.$(OBJEXT): {$(VPATH)}internal/arithmetic/char.h
+ractor.$(OBJEXT): {$(VPATH)}internal/arithmetic/double.h
+ractor.$(OBJEXT): {$(VPATH)}internal/arithmetic/fixnum.h
+ractor.$(OBJEXT): {$(VPATH)}internal/arithmetic/gid_t.h
+ractor.$(OBJEXT): {$(VPATH)}internal/arithmetic/int.h
+ractor.$(OBJEXT): {$(VPATH)}internal/arithmetic/intptr_t.h
+ractor.$(OBJEXT): {$(VPATH)}internal/arithmetic/long.h
+ractor.$(OBJEXT): {$(VPATH)}internal/arithmetic/long_long.h
+ractor.$(OBJEXT): {$(VPATH)}internal/arithmetic/mode_t.h
+ractor.$(OBJEXT): {$(VPATH)}internal/arithmetic/off_t.h
+ractor.$(OBJEXT): {$(VPATH)}internal/arithmetic/pid_t.h
+ractor.$(OBJEXT): {$(VPATH)}internal/arithmetic/short.h
+ractor.$(OBJEXT): {$(VPATH)}internal/arithmetic/size_t.h
+ractor.$(OBJEXT): {$(VPATH)}internal/arithmetic/st_data_t.h
+ractor.$(OBJEXT): {$(VPATH)}internal/arithmetic/uid_t.h
+ractor.$(OBJEXT): {$(VPATH)}internal/assume.h
+ractor.$(OBJEXT): {$(VPATH)}internal/attr/alloc_size.h
+ractor.$(OBJEXT): {$(VPATH)}internal/attr/artificial.h
+ractor.$(OBJEXT): {$(VPATH)}internal/attr/cold.h
+ractor.$(OBJEXT): {$(VPATH)}internal/attr/const.h
+ractor.$(OBJEXT): {$(VPATH)}internal/attr/constexpr.h
+ractor.$(OBJEXT): {$(VPATH)}internal/attr/deprecated.h
+ractor.$(OBJEXT): {$(VPATH)}internal/attr/diagnose_if.h
+ractor.$(OBJEXT): {$(VPATH)}internal/attr/enum_extensibility.h
+ractor.$(OBJEXT): {$(VPATH)}internal/attr/error.h
+ractor.$(OBJEXT): {$(VPATH)}internal/attr/flag_enum.h
+ractor.$(OBJEXT): {$(VPATH)}internal/attr/forceinline.h
+ractor.$(OBJEXT): {$(VPATH)}internal/attr/format.h
+ractor.$(OBJEXT): {$(VPATH)}internal/attr/maybe_unused.h
+ractor.$(OBJEXT): {$(VPATH)}internal/attr/noalias.h
+ractor.$(OBJEXT): {$(VPATH)}internal/attr/nodiscard.h
+ractor.$(OBJEXT): {$(VPATH)}internal/attr/noexcept.h
+ractor.$(OBJEXT): {$(VPATH)}internal/attr/noinline.h
+ractor.$(OBJEXT): {$(VPATH)}internal/attr/nonnull.h
+ractor.$(OBJEXT): {$(VPATH)}internal/attr/noreturn.h
+ractor.$(OBJEXT): {$(VPATH)}internal/attr/pure.h
+ractor.$(OBJEXT): {$(VPATH)}internal/attr/restrict.h
+ractor.$(OBJEXT): {$(VPATH)}internal/attr/returns_nonnull.h
+ractor.$(OBJEXT): {$(VPATH)}internal/attr/warning.h
+ractor.$(OBJEXT): {$(VPATH)}internal/attr/weakref.h
+ractor.$(OBJEXT): {$(VPATH)}internal/cast.h
+ractor.$(OBJEXT): {$(VPATH)}internal/compiler_is.h
+ractor.$(OBJEXT): {$(VPATH)}internal/compiler_is/apple.h
+ractor.$(OBJEXT): {$(VPATH)}internal/compiler_is/clang.h
+ractor.$(OBJEXT): {$(VPATH)}internal/compiler_is/gcc.h
+ractor.$(OBJEXT): {$(VPATH)}internal/compiler_is/intel.h
+ractor.$(OBJEXT): {$(VPATH)}internal/compiler_is/msvc.h
+ractor.$(OBJEXT): {$(VPATH)}internal/compiler_is/sunpro.h
+ractor.$(OBJEXT): {$(VPATH)}internal/compiler_since.h
+ractor.$(OBJEXT): {$(VPATH)}internal/config.h
+ractor.$(OBJEXT): {$(VPATH)}internal/constant_p.h
+ractor.$(OBJEXT): {$(VPATH)}internal/core.h
+ractor.$(OBJEXT): {$(VPATH)}internal/core/rarray.h
+ractor.$(OBJEXT): {$(VPATH)}internal/core/rbasic.h
+ractor.$(OBJEXT): {$(VPATH)}internal/core/rbignum.h
+ractor.$(OBJEXT): {$(VPATH)}internal/core/rclass.h
+ractor.$(OBJEXT): {$(VPATH)}internal/core/rdata.h
+ractor.$(OBJEXT): {$(VPATH)}internal/core/rfile.h
+ractor.$(OBJEXT): {$(VPATH)}internal/core/rhash.h
+ractor.$(OBJEXT): {$(VPATH)}internal/core/robject.h
+ractor.$(OBJEXT): {$(VPATH)}internal/core/rregexp.h
+ractor.$(OBJEXT): {$(VPATH)}internal/core/rstring.h
+ractor.$(OBJEXT): {$(VPATH)}internal/core/rstruct.h
+ractor.$(OBJEXT): {$(VPATH)}internal/core/rtypeddata.h
+ractor.$(OBJEXT): {$(VPATH)}internal/ctype.h
+ractor.$(OBJEXT): {$(VPATH)}internal/dllexport.h
+ractor.$(OBJEXT): {$(VPATH)}internal/dosish.h
+ractor.$(OBJEXT): {$(VPATH)}internal/error.h
+ractor.$(OBJEXT): {$(VPATH)}internal/eval.h
+ractor.$(OBJEXT): {$(VPATH)}internal/event.h
+ractor.$(OBJEXT): {$(VPATH)}internal/fl_type.h
+ractor.$(OBJEXT): {$(VPATH)}internal/gc.h
+ractor.$(OBJEXT): {$(VPATH)}internal/glob.h
+ractor.$(OBJEXT): {$(VPATH)}internal/globals.h
+ractor.$(OBJEXT): {$(VPATH)}internal/has/attribute.h
+ractor.$(OBJEXT): {$(VPATH)}internal/has/builtin.h
+ractor.$(OBJEXT): {$(VPATH)}internal/has/c_attribute.h
+ractor.$(OBJEXT): {$(VPATH)}internal/has/cpp_attribute.h
+ractor.$(OBJEXT): {$(VPATH)}internal/has/declspec_attribute.h
+ractor.$(OBJEXT): {$(VPATH)}internal/has/extension.h
+ractor.$(OBJEXT): {$(VPATH)}internal/has/feature.h
+ractor.$(OBJEXT): {$(VPATH)}internal/has/warning.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/array.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/bignum.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/class.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/compar.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/complex.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/cont.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/dir.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/enum.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/enumerator.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/error.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/eval.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/file.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/gc.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/hash.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/io.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/load.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/marshal.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/numeric.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/object.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/parse.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/proc.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/process.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/random.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/range.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/rational.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/re.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/ruby.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/select.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/select/largesize.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/signal.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/sprintf.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/string.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/struct.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/thread.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/time.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/variable.h
+ractor.$(OBJEXT): {$(VPATH)}internal/intern/vm.h
+ractor.$(OBJEXT): {$(VPATH)}internal/interpreter.h
+ractor.$(OBJEXT): {$(VPATH)}internal/iterator.h
+ractor.$(OBJEXT): {$(VPATH)}internal/memory.h
+ractor.$(OBJEXT): {$(VPATH)}internal/method.h
+ractor.$(OBJEXT): {$(VPATH)}internal/module.h
+ractor.$(OBJEXT): {$(VPATH)}internal/newobj.h
+ractor.$(OBJEXT): {$(VPATH)}internal/rgengc.h
+ractor.$(OBJEXT): {$(VPATH)}internal/scan_args.h
+ractor.$(OBJEXT): {$(VPATH)}internal/special_consts.h
+ractor.$(OBJEXT): {$(VPATH)}internal/static_assert.h
+ractor.$(OBJEXT): {$(VPATH)}internal/stdalign.h
+ractor.$(OBJEXT): {$(VPATH)}internal/stdbool.h
+ractor.$(OBJEXT): {$(VPATH)}internal/symbol.h
+ractor.$(OBJEXT): {$(VPATH)}internal/token_paste.h
+ractor.$(OBJEXT): {$(VPATH)}internal/value.h
+ractor.$(OBJEXT): {$(VPATH)}internal/value_type.h
+ractor.$(OBJEXT): {$(VPATH)}internal/variable.h
+ractor.$(OBJEXT): {$(VPATH)}internal/warning_push.h
+ractor.$(OBJEXT): {$(VPATH)}internal/xmalloc.h
+ractor.$(OBJEXT): {$(VPATH)}method.h
+ractor.$(OBJEXT): {$(VPATH)}missing.h
+ractor.$(OBJEXT): {$(VPATH)}node.h
+ractor.$(OBJEXT): {$(VPATH)}onigmo.h
+ractor.$(OBJEXT): {$(VPATH)}oniguruma.h
+ractor.$(OBJEXT): {$(VPATH)}ractor.c
+ractor.$(OBJEXT): {$(VPATH)}ractor.h
+ractor.$(OBJEXT): {$(VPATH)}ractor.rb
+ractor.$(OBJEXT): {$(VPATH)}ractor.rbinc
+ractor.$(OBJEXT): {$(VPATH)}ractor_pub.h
+ractor.$(OBJEXT): {$(VPATH)}ruby_assert.h
+ractor.$(OBJEXT): {$(VPATH)}ruby_atomic.h
+ractor.$(OBJEXT): {$(VPATH)}st.h
+ractor.$(OBJEXT): {$(VPATH)}subst.h
+ractor.$(OBJEXT): {$(VPATH)}thread.h
+ractor.$(OBJEXT): {$(VPATH)}thread_$(THREAD_MODEL).h
+ractor.$(OBJEXT): {$(VPATH)}thread_native.h
+ractor.$(OBJEXT): {$(VPATH)}vm_core.h
+ractor.$(OBJEXT): {$(VPATH)}vm_debug.h
+ractor.$(OBJEXT): {$(VPATH)}vm_opts.h
+ractor.$(OBJEXT): {$(VPATH)}vm_sync.h
random.$(OBJEXT): $(hdrdir)/ruby.h
random.$(OBJEXT): $(hdrdir)/ruby/ruby.h
random.$(OBJEXT): $(top_srcdir)/internal/array.h
@@ -13569,6 +13789,8 @@ thread.$(OBJEXT): {$(VPATH)}mjit.h
thread.$(OBJEXT): {$(VPATH)}node.h
thread.$(OBJEXT): {$(VPATH)}onigmo.h
thread.$(OBJEXT): {$(VPATH)}oniguruma.h
+thread.$(OBJEXT): {$(VPATH)}ractor.h
+thread.$(OBJEXT): {$(VPATH)}ractor_pub.h
thread.$(OBJEXT): {$(VPATH)}ruby_assert.h
thread.$(OBJEXT): {$(VPATH)}ruby_atomic.h
thread.$(OBJEXT): {$(VPATH)}st.h
@@ -13581,7 +13803,9 @@ thread.$(OBJEXT): {$(VPATH)}thread_native.h
thread.$(OBJEXT): {$(VPATH)}thread_sync.c
thread.$(OBJEXT): {$(VPATH)}timev.h
thread.$(OBJEXT): {$(VPATH)}vm_core.h
+thread.$(OBJEXT): {$(VPATH)}vm_debug.h
thread.$(OBJEXT): {$(VPATH)}vm_opts.h
+thread.$(OBJEXT): {$(VPATH)}vm_sync.h
time.$(OBJEXT): $(hdrdir)/ruby.h
time.$(OBJEXT): $(hdrdir)/ruby/ruby.h
time.$(OBJEXT): $(top_srcdir)/internal/array.h
@@ -13942,15 +14166,23 @@ transcode.$(OBJEXT): {$(VPATH)}st.h
transcode.$(OBJEXT): {$(VPATH)}subst.h
transcode.$(OBJEXT): {$(VPATH)}transcode.c
transcode.$(OBJEXT): {$(VPATH)}transcode_data.h
+transient_heap.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h
+transient_heap.$(OBJEXT): $(CCAN_DIR)/container_of/container_of.h
+transient_heap.$(OBJEXT): $(CCAN_DIR)/list/list.h
+transient_heap.$(OBJEXT): $(CCAN_DIR)/str/str.h
transient_heap.$(OBJEXT): $(hdrdir)/ruby.h
transient_heap.$(OBJEXT): $(hdrdir)/ruby/ruby.h
+transient_heap.$(OBJEXT): $(top_srcdir)/internal/array.h
transient_heap.$(OBJEXT): $(top_srcdir)/internal/compilers.h
transient_heap.$(OBJEXT): $(top_srcdir)/internal/gc.h
transient_heap.$(OBJEXT): $(top_srcdir)/internal/hash.h
+transient_heap.$(OBJEXT): $(top_srcdir)/internal/imemo.h
transient_heap.$(OBJEXT): $(top_srcdir)/internal/sanitizers.h
+transient_heap.$(OBJEXT): $(top_srcdir)/internal/serial.h
transient_heap.$(OBJEXT): $(top_srcdir)/internal/static_assert.h
transient_heap.$(OBJEXT): $(top_srcdir)/internal/struct.h
transient_heap.$(OBJEXT): $(top_srcdir)/internal/variable.h
+transient_heap.$(OBJEXT): $(top_srcdir)/internal/vm.h
transient_heap.$(OBJEXT): $(top_srcdir)/internal/warnings.h
transient_heap.$(OBJEXT): {$(VPATH)}assert.h
transient_heap.$(OBJEXT): {$(VPATH)}backward/2/assume.h
@@ -13968,6 +14200,7 @@ transient_heap.$(OBJEXT): {$(VPATH)}debug.h
transient_heap.$(OBJEXT): {$(VPATH)}debug_counter.h
transient_heap.$(OBJEXT): {$(VPATH)}defines.h
transient_heap.$(OBJEXT): {$(VPATH)}gc.h
+transient_heap.$(OBJEXT): {$(VPATH)}id.h
transient_heap.$(OBJEXT): {$(VPATH)}id_table.h
transient_heap.$(OBJEXT): {$(VPATH)}intern.h
transient_heap.$(OBJEXT): {$(VPATH)}internal.h
@@ -14111,14 +14344,21 @@ transient_heap.$(OBJEXT): {$(VPATH)}internal/value_type.h
transient_heap.$(OBJEXT): {$(VPATH)}internal/variable.h
transient_heap.$(OBJEXT): {$(VPATH)}internal/warning_push.h
transient_heap.$(OBJEXT): {$(VPATH)}internal/xmalloc.h
+transient_heap.$(OBJEXT): {$(VPATH)}method.h
transient_heap.$(OBJEXT): {$(VPATH)}missing.h
transient_heap.$(OBJEXT): {$(VPATH)}node.h
transient_heap.$(OBJEXT): {$(VPATH)}ruby_assert.h
+transient_heap.$(OBJEXT): {$(VPATH)}ruby_atomic.h
transient_heap.$(OBJEXT): {$(VPATH)}st.h
transient_heap.$(OBJEXT): {$(VPATH)}subst.h
+transient_heap.$(OBJEXT): {$(VPATH)}thread_$(THREAD_MODEL).h
+transient_heap.$(OBJEXT): {$(VPATH)}thread_native.h
transient_heap.$(OBJEXT): {$(VPATH)}transient_heap.c
transient_heap.$(OBJEXT): {$(VPATH)}transient_heap.h
+transient_heap.$(OBJEXT): {$(VPATH)}vm_core.h
transient_heap.$(OBJEXT): {$(VPATH)}vm_debug.h
+transient_heap.$(OBJEXT): {$(VPATH)}vm_opts.h
+transient_heap.$(OBJEXT): {$(VPATH)}vm_sync.h
util.$(OBJEXT): $(hdrdir)/ruby.h
util.$(OBJEXT): $(hdrdir)/ruby/ruby.h
util.$(OBJEXT): $(top_srcdir)/internal/compilers.h
@@ -14472,6 +14712,8 @@ variable.$(OBJEXT): {$(VPATH)}missing.h
variable.$(OBJEXT): {$(VPATH)}node.h
variable.$(OBJEXT): {$(VPATH)}onigmo.h
variable.$(OBJEXT): {$(VPATH)}oniguruma.h
+variable.$(OBJEXT): {$(VPATH)}ractor.h
+variable.$(OBJEXT): {$(VPATH)}ractor_pub.h
variable.$(OBJEXT): {$(VPATH)}ruby_assert.h
variable.$(OBJEXT): {$(VPATH)}ruby_atomic.h
variable.$(OBJEXT): {$(VPATH)}st.h
@@ -14483,6 +14725,7 @@ variable.$(OBJEXT): {$(VPATH)}util.h
variable.$(OBJEXT): {$(VPATH)}variable.c
variable.$(OBJEXT): {$(VPATH)}variable.h
variable.$(OBJEXT): {$(VPATH)}vm_core.h
+variable.$(OBJEXT): {$(VPATH)}vm_debug.h
variable.$(OBJEXT): {$(VPATH)}vm_opts.h
version.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h
version.$(OBJEXT): $(CCAN_DIR)/container_of/container_of.h
@@ -14881,6 +15124,8 @@ vm.$(OBJEXT): {$(VPATH)}oniguruma.h
vm.$(OBJEXT): {$(VPATH)}probes.dmyh
vm.$(OBJEXT): {$(VPATH)}probes.h
vm.$(OBJEXT): {$(VPATH)}probes_helper.h
+vm.$(OBJEXT): {$(VPATH)}ractor.h
+vm.$(OBJEXT): {$(VPATH)}ractor_pub.h
vm.$(OBJEXT): {$(VPATH)}ruby_assert.h
vm.$(OBJEXT): {$(VPATH)}ruby_atomic.h
vm.$(OBJEXT): {$(VPATH)}st.h
@@ -14903,6 +15148,7 @@ vm.$(OBJEXT): {$(VPATH)}vm_insnhelper.c
vm.$(OBJEXT): {$(VPATH)}vm_insnhelper.h
vm.$(OBJEXT): {$(VPATH)}vm_method.c
vm.$(OBJEXT): {$(VPATH)}vm_opts.h
+vm.$(OBJEXT): {$(VPATH)}vm_sync.h
vm.$(OBJEXT): {$(VPATH)}vmtc.inc
vm_backtrace.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h
vm_backtrace.$(OBJEXT): $(CCAN_DIR)/container_of/container_of.h
@@ -15270,6 +15516,8 @@ vm_dump.$(OBJEXT): {$(VPATH)}method.h
vm_dump.$(OBJEXT): {$(VPATH)}missing.h
vm_dump.$(OBJEXT): {$(VPATH)}node.h
vm_dump.$(OBJEXT): {$(VPATH)}procstat_vm.c
+vm_dump.$(OBJEXT): {$(VPATH)}ractor.h
+vm_dump.$(OBJEXT): {$(VPATH)}ractor_pub.h
vm_dump.$(OBJEXT): {$(VPATH)}ruby_assert.h
vm_dump.$(OBJEXT): {$(VPATH)}ruby_atomic.h
vm_dump.$(OBJEXT): {$(VPATH)}st.h
@@ -15277,8 +15525,205 @@ vm_dump.$(OBJEXT): {$(VPATH)}subst.h
vm_dump.$(OBJEXT): {$(VPATH)}thread_$(THREAD_MODEL).h
vm_dump.$(OBJEXT): {$(VPATH)}thread_native.h
vm_dump.$(OBJEXT): {$(VPATH)}vm_core.h
+vm_dump.$(OBJEXT): {$(VPATH)}vm_debug.h
vm_dump.$(OBJEXT): {$(VPATH)}vm_dump.c
vm_dump.$(OBJEXT): {$(VPATH)}vm_opts.h
+vm_sync.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h
+vm_sync.$(OBJEXT): $(CCAN_DIR)/container_of/container_of.h
+vm_sync.$(OBJEXT): $(CCAN_DIR)/list/list.h
+vm_sync.$(OBJEXT): $(CCAN_DIR)/str/str.h
+vm_sync.$(OBJEXT): $(hdrdir)/ruby.h
+vm_sync.$(OBJEXT): $(hdrdir)/ruby/ruby.h
+vm_sync.$(OBJEXT): $(top_srcdir)/internal/array.h
+vm_sync.$(OBJEXT): $(top_srcdir)/internal/compilers.h
+vm_sync.$(OBJEXT): $(top_srcdir)/internal/gc.h
+vm_sync.$(OBJEXT): $(top_srcdir)/internal/imemo.h
+vm_sync.$(OBJEXT): $(top_srcdir)/internal/serial.h
+vm_sync.$(OBJEXT): $(top_srcdir)/internal/static_assert.h
+vm_sync.$(OBJEXT): $(top_srcdir)/internal/vm.h
+vm_sync.$(OBJEXT): {$(VPATH)}addr2line.h
+vm_sync.$(OBJEXT): {$(VPATH)}assert.h
+vm_sync.$(OBJEXT): {$(VPATH)}backward/2/assume.h
+vm_sync.$(OBJEXT): {$(VPATH)}backward/2/attributes.h
+vm_sync.$(OBJEXT): {$(VPATH)}backward/2/bool.h
+vm_sync.$(OBJEXT): {$(VPATH)}backward/2/gcc_version_since.h
+vm_sync.$(OBJEXT): {$(VPATH)}backward/2/inttypes.h
+vm_sync.$(OBJEXT): {$(VPATH)}backward/2/limits.h
+vm_sync.$(OBJEXT): {$(VPATH)}backward/2/long_long.h
+vm_sync.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h
+vm_sync.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
+vm_sync.$(OBJEXT): {$(VPATH)}config.h
+vm_sync.$(OBJEXT): {$(VPATH)}constant.h
+vm_sync.$(OBJEXT): {$(VPATH)}defines.h
+vm_sync.$(OBJEXT): {$(VPATH)}gc.h
+vm_sync.$(OBJEXT): {$(VPATH)}id.h
+vm_sync.$(OBJEXT): {$(VPATH)}id_table.h
+vm_sync.$(OBJEXT): {$(VPATH)}intern.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/anyargs.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/arithmetic.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/arithmetic/char.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/arithmetic/double.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/arithmetic/fixnum.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/arithmetic/gid_t.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/arithmetic/int.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/arithmetic/intptr_t.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/arithmetic/long.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/arithmetic/long_long.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/arithmetic/mode_t.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/arithmetic/off_t.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/arithmetic/pid_t.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/arithmetic/short.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/arithmetic/size_t.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/arithmetic/st_data_t.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/arithmetic/uid_t.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/array.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/assume.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/alloc_size.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/artificial.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/cold.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/const.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/constexpr.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/deprecated.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/diagnose_if.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/enum_extensibility.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/error.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/flag_enum.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/forceinline.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/format.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/maybe_unused.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/noalias.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/nodiscard.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/noexcept.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/noinline.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/nonnull.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/noreturn.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/pure.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/restrict.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/returns_nonnull.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/warning.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/attr/weakref.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/cast.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/compiler_is.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/compiler_is/apple.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/compiler_is/clang.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/compiler_is/gcc.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/compiler_is/intel.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/compiler_is/msvc.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/compiler_is/sunpro.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/compiler_since.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/compilers.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/config.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/constant_p.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/core.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/core/rarray.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/core/rbasic.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/core/rbignum.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/core/rclass.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/core/rdata.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/core/rfile.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/core/rhash.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/core/robject.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/core/rregexp.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/core/rstring.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/core/rstruct.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/core/rtypeddata.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/ctype.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/dllexport.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/dosish.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/error.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/eval.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/event.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/fl_type.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/gc.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/glob.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/globals.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/has/attribute.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/has/builtin.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/has/c_attribute.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/has/cpp_attribute.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/has/declspec_attribute.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/has/extension.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/has/feature.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/has/warning.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/imemo.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/array.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/bignum.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/class.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/compar.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/complex.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/cont.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/dir.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/enum.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/enumerator.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/error.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/eval.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/file.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/gc.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/hash.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/io.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/load.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/marshal.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/numeric.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/object.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/parse.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/proc.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/process.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/random.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/range.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/rational.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/re.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/ruby.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/select.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/select/largesize.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/signal.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/sprintf.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/string.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/struct.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/thread.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/time.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/variable.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/intern/vm.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/interpreter.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/iterator.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/memory.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/method.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/module.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/newobj.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/rgengc.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/scan_args.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/serial.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/special_consts.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/static_assert.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/stdalign.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/stdbool.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/symbol.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/token_paste.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/value.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/value_type.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/variable.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/vm.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/warning_push.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/warnings.h
+vm_sync.$(OBJEXT): {$(VPATH)}internal/xmalloc.h
+vm_sync.$(OBJEXT): {$(VPATH)}iseq.h
+vm_sync.$(OBJEXT): {$(VPATH)}method.h
+vm_sync.$(OBJEXT): {$(VPATH)}missing.h
+vm_sync.$(OBJEXT): {$(VPATH)}node.h
+vm_sync.$(OBJEXT): {$(VPATH)}procstat_vm.c
+vm_sync.$(OBJEXT): {$(VPATH)}ractor.h
+vm_sync.$(OBJEXT): {$(VPATH)}ractor_pub.h
+vm_sync.$(OBJEXT): {$(VPATH)}ruby_assert.h
+vm_sync.$(OBJEXT): {$(VPATH)}ruby_atomic.h
+vm_sync.$(OBJEXT): {$(VPATH)}st.h
+vm_sync.$(OBJEXT): {$(VPATH)}subst.h
+vm_sync.$(OBJEXT): {$(VPATH)}thread_$(THREAD_MODEL).h
+vm_sync.$(OBJEXT): {$(VPATH)}thread_native.h
+vm_sync.$(OBJEXT): {$(VPATH)}vm_core.h
+vm_sync.$(OBJEXT): {$(VPATH)}vm_debug.h
+vm_sync.$(OBJEXT): {$(VPATH)}vm_opts.h
+vm_sync.$(OBJEXT): {$(VPATH)}vm_sync.c
+vm_sync.$(OBJEXT): {$(VPATH)}vm_sync.h
vm_trace.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h
vm_trace.$(OBJEXT): $(CCAN_DIR)/container_of/container_of.h
vm_trace.$(OBJEXT): $(CCAN_DIR)/list/list.h
diff --git a/compile.c b/compile.c
index daa1485..96b8bc6 100644
--- a/compile.c
+++ b/compile.c
@@ -1601,6 +1601,16 @@ iseq_block_param_id_p(const rb_iseq_t *iseq, ID id, int *pidx, int *plevel)
}
static void
+check_access_outer_variables(const rb_iseq_t *iseq, int level)
+{
+ // set access_outer_variables
+ for (int i=0; i<level; i++) {
+ iseq->body->access_outer_variables = TRUE;
+ iseq = iseq->body->parent_iseq;
+ }
+}
+
+static void
iseq_add_getlocal(rb_iseq_t *iseq, LINK_ANCHOR *const seq, int line, int idx, int level)
{
if (iseq_local_block_param_p(iseq, idx, level)) {
@@ -1609,6 +1619,7 @@ iseq_add_getlocal(rb_iseq_t *iseq, LINK_ANCHOR *const seq, int line, int idx, in
else {
ADD_INSN2(seq, line, getlocal, INT2FIX((idx) + VM_ENV_DATA_SIZE - 1), INT2FIX(level));
}
+ check_access_outer_variables(iseq, level);
}
static void
@@ -1620,6 +1631,7 @@ iseq_add_setlocal(rb_iseq_t *iseq, LINK_ANCHOR *const seq, int line, int idx, in
else {
ADD_INSN2(seq, line, setlocal, INT2FIX((idx) + VM_ENV_DATA_SIZE - 1), INT2FIX(level));
}
+ check_access_outer_variables(iseq, level);
}
@@ -8222,6 +8234,8 @@ iseq_compile_each0(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *node, in
if (popped) {
ADD_INSN(ret, line, pop);
}
+
+ iseq->body->access_outer_variables = TRUE;
break;
}
case NODE_LVAR:{
@@ -8680,7 +8694,8 @@ iseq_compile_each0(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *node, in
VALUE flag = INT2FIX(excl);
const NODE *b = node->nd_beg;
const NODE *e = node->nd_end;
- if (optimizable_range_item_p(b) && optimizable_range_item_p(e)) {
+ // TODO: Ractor can not use cached Range objects
+ if (0 && optimizable_range_item_p(b) && optimizable_range_item_p(e)) {
if (!popped) {
VALUE bv = nd_type(b) == NODE_LIT ? b->nd_lit : Qnil;
VALUE ev = nd_type(e) == NODE_LIT ? e->nd_lit : Qnil;
diff --git a/cont.c b/cont.c
index 654fc55..efff86f 100644
--- a/cont.c
+++ b/cont.c
@@ -28,6 +28,7 @@
#include "mjit.h"
#include "vm_core.h"
#include "id_table.h"
+#include "ractor.h"
static const int DEBUG = 0;
@@ -808,14 +809,15 @@ static inline void
ec_switch(rb_thread_t *th, rb_fiber_t *fiber)
{
rb_execution_context_t *ec = &fiber->cont.saved_ec;
-
- ruby_current_execution_context_ptr = th->ec = ec;
+ rb_ractor_set_current_ec(th->ractor, th->ec = ec);
+ // ruby_current_execution_context_ptr = th->ec = ec;
/*
* timer-thread may set trap interrupt on previous th->ec at any time;
* ensure we do not delay (or lose) the trap interrupt handling.
*/
- if (th->vm->main_thread == th && rb_signal_buff_size() > 0) {
+ if (th->vm->ractor.main_thread == th &&
+ rb_signal_buff_size() > 0) {
RUBY_VM_SET_TRAP_INTERRUPT(ec);
}
@@ -1873,7 +1875,7 @@ rb_fiber_start(void)
enum ruby_tag_type state;
int need_interrupt = TRUE;
- VM_ASSERT(th->ec == ruby_current_execution_context_ptr);
+ VM_ASSERT(th->ec == GET_EC());
VM_ASSERT(FIBER_RESUMED_P(fiber));
if (fiber->blocking) {
@@ -1964,13 +1966,15 @@ rb_threadptr_root_fiber_release(rb_thread_t *th)
/* ignore. A root fiber object will free th->ec */
}
else {
+ rb_execution_context_t *ec = GET_EC();
+
VM_ASSERT(th->ec->fiber_ptr->cont.type == FIBER_CONTEXT);
VM_ASSERT(th->ec->fiber_ptr->cont.self == 0);
- fiber_free(th->ec->fiber_ptr);
- if (th->ec == ruby_current_execution_context_ptr) {
- ruby_current_execution_context_ptr = NULL;
+ if (th->ec == ec) {
+ rb_ractor_set_current_ec(th->ractor, NULL);
}
+ fiber_free(th->ec->fiber_ptr);
th->ec = NULL;
}
}
diff --git a/debug.c b/debug.c
index 1525231..cc6a98a 100644
--- a/debug.c
+++ b/debug.c
@@ -26,6 +26,7 @@
#include "vm_debug.h"
#include "vm_callinfo.h"
#include "ruby/thread_native.h"
+#include "ractor.h"
/* This is the only place struct RIMemo is actually used */
struct RIMemo {
@@ -422,7 +423,6 @@ ruby_debug_log(const char *file, int line, const char *func_name, const char *fm
len += r;
}
-#if 0 // not yet
// ractor information
if (GET_VM()->ractor.cnt > 1) {
rb_ractor_t *cr = GET_RACTOR();
@@ -433,7 +433,6 @@ ruby_debug_log(const char *file, int line, const char *func_name, const char *fm
len += r;
}
}
-#endif
// thread information
if (!rb_thread_alone()) {
diff --git a/doc/ractor.md b/doc/ractor.md
new file mode 100644
index 0000000..cb8f6aa
--- /dev/null
+++ b/doc/ractor.md
@@ -0,0 +1,883 @@
+# Ractor - Ruby's Actor-like concurrent abstraction
+
+Ractor is designed to provide a parallel execution feature fo Ruby without thread-safety concerns.
+
+## Summary
+
+### Multiple Ractors in an interpreter process
+
+You can make multiple Ractors and they run in parallel.
+
+* Ractors run in parallel.
+* Interpreter invokes with the first Ractor (called *main Ractor*).
+* If main Ractor terminated, all Ractors receive terminate request like Threads (if main thread (first invoked Thread), Ruby interpreter sends all running threads to terminate execution).
+* Each Ractor has 1 or more Threads.
+ * Threads in a Ractor shares a Ractor-wide global lock like GIL (GVL in MRI terminology), so they can't run in parallel (without releasing GVL explicitly in C-level).
+ * The overhead of creating a Ractor is similar to overhead of one Thread creation.
+
+### Limited sharing
+
+Ractors don't share everything, unlike threads.
+
+* Most of objects are *Unshareable objects*, so you don't need to care about thread-safety problem which is caused by sharing.
+* Some objects are *Shareable objects*.
+ * Immutable objects: frozen object which doesn't refer unshareable-objects.
+ * `i = 123`: `i` is an immutable object.
+ * `s = "str".freeze`: `s` is an immutable object.
+ * `a = [1, [2], 3].freeze`: `a` is not an immutable object because `a` refer unshareable-object `[2]` (which is not frozen).
+ * Class/Module objects
+ * Special shareable objects
+ * Ractor object itself.
+ * And more...
+
+### Two-types communication between Ractors
+
+Ractors communicate each other and synchronize the execution by message exchanging between Ractors. There are two message exchange protocol: push type (message passing) and pull type.
+
+* Push type message passing: `Ractor#send(obj)` and `Ractor.receive()` pair.
+ * Sender ractor passes the `obj` to receiver Ractor.
+ * Sender knows a destination Ractor (the receiver of `r.send(obj)`) and receiver does not know the sender (accept all message from any ractors).
+ * Receiver has infinite queue and sender enqueues the message. Sender doesn't block to put message.
+ * This type is based on actor model
+* Pull type communication: `Ractor.yield(obj)` and `Ractor#take()` pair.
+ * Sender ractor declare to yield the `obj` and receiver Ractor take it.
+ * Sender doesn't know a destination Ractor and receiver knows the sender (the receiver of `r.take`).
+ * Sender or receiver will block if there is no other side.
+
+### Copy & Move semantics to send messages
+
+To send unshareable objects as messages, objects are copied or moved.
+
+* Copy: use deep-copy (like dRuby)
+* Move: move membership
+ * Sender can not access to the moved object after moving the object.
+ * Guarantee that at least only 1 Ractor can access the object.
+
+### Thread-safety
+
+Ractor helps to write a thread-safe program, but we can make thread-unsafe programs with Ractors.
+
+* GOOD: Sharing limitation
+ * Most of objects are unshareable, so we can't make data-racy and race-conditional programs.
+ * Shareable objects are protected by an interpreter or locking mechanism.
+* BAD: Class/Module can violate this assumption
+ * To make compatible with old behavior, classes and modules can introduce data-race and so on.
+ * Ruby programmer should take care if they modify class/module objects on multi Ractor programs.
+* BAD: Ractor can't solve all thread-safety problems
+ * There are several blocking operations (waiting send, waiting yield and waiting take) so you can make a program which has dead-lock and live-lock issues.
+ * Some kind of shareable objects can introduce transactions (STM, for example). However, misusing transactions will generate inconsistent state.
+
+Without Ractor, we need to trace all of state-mutations to debug thread-safety issues.
+With Ractor, you can concentrate to suspicious
+
+## Creation and termination
+
+### `Ractor.new`
+
+* `Ractor.new do expr end` generates another Ractor.
+
+```ruby
+# Ractor.new with a block creates new Ractor
+r = Ractor.new do
+ # This block will be run in parallel
+end
+
+# You can name a Ractor with `name:` argument.
+r = Ractor.new name: 'test-name' do
+end
+
+# and Ractor#name returns its name.
+r.name #=> 'test-name'
+```
+
+### Given block isolation
+
+The Ractor execute given `expr` in a given block.
+Given block will be isolated from outer scope by `Proc#isolate`.
+
+```ruby
+# To prevent sharing unshareable objects between ractors,
+# block outer-variables, `self` and other information are isolated.
+# Given block will be isolated by `Proc#isolate` method.
+# `Proc#isolate` is called at Ractor creation timing (`Ractor.new` is called)
+# and it can cause an error if block accesses outer variables.
+
+begin
+ a = true
+ r = Ractor.new do
+ a #=> ArgumentError because this block accesses `a`.
+ end
+ r.take # see later
+rescue ArgumentError
+end
+```
+
+* The `self` of the given block is `Ractor` object itself.
+
+```ruby
+r = Ractor.new do
+ self.object_id
+end
+r.take == self.object_id #=> false
+```
+
+Passed arguments to `Ractor.new()` becomes block parameters for the given block. However, an interpreter does not pass the parameter object references, but send as messages (see bellow for details).
+
+```ruby
+r = Ractor.new 'ok' do |msg|
+ msg #=> 'ok'
+end
+r.take #=> 'ok'
+```
+
+```ruby
+# almost similar to the last example
+r = Ractor.new do
+ msg = Ractor.recv
+ msg
+end
+r.send 'ok'
+r.take #=> 'ok'
+```
+
+### An execution result of given block
+
+Return value of the given block becomes an outgoing message (see bellow for details).
+
+```ruby
+r = Ractor.new do
+ 'ok'
+end
+r.take #=> `ok`
+```
+
+```ruby
+# almost similar to the last example
+r = Ractor.new do
+ Ractor.yield 'ok'
+end
+r.take #=> 'ok'
+```
+
+Error in the given block will be propagated to the receiver of an outgoing message.
+
+```ruby
+r = Ractor.new do
+ raise 'ok' # exception will be transferred receiver
+end
+
+begin
+ r.take
+rescue Ractor::RemoteError => e
+ e.cause.class #=> RuntimeError
+ e.cause.message #=> 'ok'
+ e.ractor #=> r
+end
+```
+
+## Communication between Ractors
+
+Communication between Ractors is achieved by sending and receiving messages.
+
+* (1) Message sending/receiving
+ * (1-1) push type send/recv (sender knows receiver). similar to the Actor model.
+ * (1-2) pull type yield/take (receiver knows sender).
+* (2) Using shareable container objects (not implemented yet)
+
+Users can control blocking on (1), but should not control on (2) (only manage as critical section).
+
+* (1-1) send/recv (push type)
+ * `Ractor#send(obj)` (`Ractor#<<(obj)` is an aliases) send a message to the Ractor's incoming port. Incoming port is connected to the infinite size incoming queue so `Ractor#send` will never block.
+ * `Ractor.recv` dequeue a message from own incoming queue. If the incoming queue is empty, `Ractor.recv` calling will block.
+* (1-2) yield/take (pull type)
+ * `Ractor.yield(obj)` send an message to a Ractor which are calling `Ractor#take` via outgoing port . If no Ractors are waiting for it, the `Ractor.yield(obj)` will block. If multiple Ractors are waiting for `Ractor.yield(obj)`, only one Ractor can receive the message.
+ * `Ractor#take` receives a message which is waiting by `Ractor.yield(obj)` method from the specified Ractor. If the Ractor does not call `Ractor.yield` yet, the `Ractor#take` call will block.
+* `Ractor.select()` can wait for the success of `take`, `yield` and `recv`.
+* You can close the incoming port or outgoing port.
+ * You can close then with `Ractor#close_incoming` and `Ractor#close_outgoing`.
+ * If the incoming port is closed for a Ractor, you can't `send` to the Ractor. If `Ractor.recv` is blocked for the closed incoming port, then it will raise an exception.
+ * If the outgoing port is closed for a Ractor, you can't call `Ractor#take` and `Ractor.yield` on the Ractor. If `Ractor#take` is blocked for the Ractor, then it will raise an exception.
+ * When a Ractor is terminated, the Ractor's ports are closed.
+* There are 3 methods to send an object as a message
+ * (1) Send a reference: Send a shareable object, send only a reference to the object (fast)
+ * (2) Copy an object: Send an unshareable object by copying deeply and send copied object (slow). Note that you can not send an object which is not support deep copy. Current implementation uses Marshal protocol to get deep copy.
+ * (3) Move an object: Send an unshareable object reference with a membership. Sender Ractor can not access moved objects anymore (raise an exception). Current implementation makes new object as a moved object for receiver Ractor and copy references of sending object to moved object.
+ * You can choose "Copy" and "Send" as a keyword for `Ractor#send(obj)` and `Ractor.yield(obj)` (default is "Copy").
+
+### Sending/Receiving ports
+
+Each Ractor has _incoming-port_ and _outgoing-port_. Incoming-port is connected to the infinite sized incoming queue.
+
+```
+ Ractor r
+ +-------------------------------------------+
+ | incoming outgoing |
+ | port port |
+ r.send(obj) ->*->[incoming queue] Ractor.yield(obj) ->*-> r.take
+ | | |
+ | v |
+ | Ractor.recv |
+ +-------------------------------------------+
+
+
+Connection example: r2.send obj on r1态Ractor.recv on r2
+ +----+ +----+
+ * r1 |-----* r2 *
+ +----+ +----+
+
+
+Connection example: Ractor.yield(obj) on r1, r1.take on r2
+ +----+ +----+
+ * r1 *------ r2 *
+ +----+ +----+
+
+Connection example: Ractor.yield(obj) on r1 and r2,
+ and waiting for both simultaneously by Ractor.select(r1, r2)
+
+ +----+
+ * r1 *------+
+ +----+ |
+ +----- Ractor.select(r1, r2)
+ +----+ |
+ * r2 *------|
+ +----+
+```
+
+```ruby
+ r = Ractor.new do
+ msg = Ractor.recv # Receive from r's incoming queue
+ msg # send back msg as block return value
+ end
+ r.send 'ok' # Send 'ok' to r's incoming port -> incoming queue
+ r.take # Receive from r's outgoing port
+```
+
+```ruby
+ # Actual argument 'ok' for `Ractor.new()` will be send to created Ractor.
+ r = Ractor.new 'ok' do |msg|
+ # Values for formal parameters will be received from incoming queue.
+ # Similar to: msg = Ractor.recv
+
+ msg # Return value of the given block will be sent via outgoing port
+ end
+
+ # receive from the r's outgoing port.
+ r.take #=> `ok`
+```
+
+### Wait for multiple Ractors with `Ractor.select`
+
+You can wait multiple Ractor's `yield` with `Ractor.select(*ractors)`.
+The return value of `Ractor.select()` is `[r, msg]` where `r` is yielding Ractor and `msg` is yielded message.
+
+Wait for a single ractor (same as `Ractor.take`):
+
+```ruby
+r1 = Ractor.new{'r1'}
+
+r, obj = Ractor.select(r1)
+r == r1 and obj == 'r1' #=> true
+```
+
+Wait for two ractors:
+
+```ruby
+r1 = Ractor.new{'r1'}
+r2 = Ractor.new{'r2'}
+rs = [r1, r2]
+as = []
+
+# Wait for r1 or r2's Ractor.yield
+r, obj = Ractor.select(*rs)
+rs.delete(r)
+as << obj
+
+# Second try (rs only contain not-closed ractors)
+r, obj = Ractor.select(*rs)
+rs.delete(r)
+as << obj
+as.sort == ['r1', 'r2'] #=> true
+```
+
+Complex example:
+
+```ruby
+ pipe = Ractor.new do
+ loop do
+ Ractor.yield Ractor.recv
+ end
+ end
+
+ RN = 10
+ rs = RN.times.map{|i|
+ Ractor.new pipe, i do |pipe, i|
+ msg = pipe.take
+ msg # ping-pong
+ end
+ }
+ RN.times{|i|
+ pipe << i
+ }
+ RN.times.map{
+ r, n = Ractor.select(*rs)
+ rs.delete r
+ n
+ }.sort #=> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+```
+
+Multiple Ractors can send to one Ractor.
+
+```ruby
+# Create 10 ractors and they send objects to pipe ractor.
+# pipe ractor yield received objects
+
+ pipe = Ractor.new do
+ loop do
+ Ractor.yield Ractor.recv
+ end
+ end
+
+ RN = 10
+ rs = RN.times.map{|i|
+ Ractor.new pipe, i do |pipe, i|
+ pipe << i
+ end
+ }
+
+ RN.times.map{
+ pipe.take
+ }.sort #=> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+```
+
+TODO: Current `Ractor.select()` has same issue of `select(2)`, so this interface should be refined.
+
+TODO: `select` syntax of go-language uses round-robbin technique to make fair scheduling. Now `Ractor.select()` doesn't use it.
+
+### Closing Ractor's ports
+
+* `Ractor#close_incoming/outgoing` close incoming/outgoing ports (similar to `Queue#close`).
+* `Ractor#close_incoming`
+ * `r.send(obj) ` where `r`'s incoming port is closed, will raise an exception.
+ * When the incoming queue is empty and incoming port is closed, `Ractor.recv` raise an exception. If incoming queue is not empty, it dequeues an object.
+* `Ractor#close_outgoing`
+ * `Ractor.yield` on a Ractor which closed the outgoing port, it will raise an exception.
+ * `Ractor#take` for a Ractor which closed the outgoing port, it will raise an exception. If `Ractor#take` is blocking, it will raise an exception.
+* When a Ractor terminates, the ports are closed automatically.
+ * Return value of the Ractor's block will be yield as `Ractor.yield(ret_val)`, even if the implementation terminate the based native thread.
+
+
+Example (try to take from closed Ractor):
+
+```ruby
+ r = Ractor.new do
+ 'finish'
+ end
+ r.take # success (will return 'finish')
+ begin
+ o = r.take # try to take from closed Ractor
+ rescue Ractor::ClosedError
+ 'ok'
+ else
+ "ng: #{o}"
+ end
+```
+
+Example (try to send to closed (terminated) Ractor):
+
+```ruby
+ r = Ractor.new do
+ end
+
+ r.take # wait terminate
+
+ begin
+ r.send(1)
+ rescue Ractor::ClosedError
+ 'ok'
+ else
+ 'ng'
+ end
+```
+
+When multiple Ractors waiting for `Ractor.yield()`, `Ractor#close_outgoing` will cancel all blocking by raise an exception (`ClosedError`).
+
+### Send a message by copying
+
+`Ractor#send(obj)` or `Ractor.yield(obj)` copy `obj` deeply if `obj` is an unshareable object.
+
+```ruby
+obj = 'str'.dup
+r = Ractor.new obj do |msg|
+ # return received msg's object_id
+ msg.object_id
+end
+
+obj.object_id == r.take #=> false
+```
+
+Current implementation uses Marshal protocol (similar to dRuby). We can not send Marshal unsupported objects.
+
+```ruby
+obj = Thread.new{}
+begin
+ Ractor.new obj do |msg|
+ msg
+ end
+rescue TypeError => e
+ e.message #=> no _dump_data is defined for class Thread
+else
+ 'ng' # unreachable here
+end
+```
+
+### Send a message by moving
+
+`Ractor#send(obj, move: true)` or `Ractor.yield(obj, move: true)` move `obj` to the destination Ractor.
+If the source Ractor touches the moved object (for example, call the method like `obj.foo()`), it will be an error.
+
+```ruby
+# move with Ractor#send
+r = Ractor.new do
+ obj = Ractor.recv
+ obj << ' world'
+end
+
+str = 'hello'
+r.send str, move: true
+modified = r.take #=> 'hello world'
+
+# str is moved, and accessing str from this Ractor is prohibited
+
+begin
+ # Error because it touches moved str.
+ str << ' exception' # raise Ractor::MovedError
+rescue Ractor::MovedError
+ modified #=> 'hello world'
+else
+ raise 'unreachable'
+end
+```
+
+```ruby
+ # move with Ractor.yield
+ r = Ractor.new do
+ obj = 'hello'
+ Ractor.yield obj, move: true
+ obj << 'world' # raise Ractor::MovedError
+ end
+
+ str = r.take
+ begin
+ r.take
+ rescue Ractor::RemoteError
+ p str #=> "hello"
+ end
+```
+
+Now only `T_FILE`, `T_STRING` and `T_ARRAY` objects are supported.
+
+* `T_FILE` (`IO`, `File`): support to send accepted socket etc.
+* `T_STRING` (`String`): support to send a huge string without copying (fast).
+* `T_ARRAY` (`Array'): support to send a huge Array without re-allocating the array's buffer. However, all of referred objects from the array should be moved, so it is not so fast.
+
+To achieve the access prohibition for moved objects, _class replacement_ technique is used to implement it.
+
+### Shareable objects
+
+The following objects are shareable.
+
+* Immutable objects
+ * Small integers, some symbols, `true`, `false`, `nil` (a.k.a. `SPECIAL_CONST_P()` objects in internal)
+ * Frozen native objects
+ * Numeric objects: `Float`, `Complex`, `Rational`, big integers (`T_BIGNUM` in internal)
+ * All Symbols.
+ * Frozen `String` and `Regexp` objects (which does not have instance variables)
+ * In future, "Immutable" objects (frozen and only refer shareable objects) will be supported (TODO: introduce an `immutable` flag for objects?)
+* Class, Module objects (`T_CLASS`, `T_MODULE` and `T_ICLASS` in internal)
+* `Ractor` and other objects which care about synchronization.
+
+Implementation: Now shareable objects (`RVALUE`) have `FL_SHAREABLE` flag. This flag can be added lazily.
+
+```ruby
+ r = Ractor.new do
+ while v = Ractor.recv
+ Ractor.yield v
+ end
+ end
+
+ class C
+ end
+
+ shareable_objects = [1, :sym, 'xyzzy'.to_sym, 'frozen'.freeze, 1+2r, 3+4i, /regexp/, C]
+
+ shareable_objects.map{|o|
+ r << o
+ o2 = r.take
+ [o, o.object_id == o2.object_id]
+ }
+ #=> [[1, true], [:sym, true], [:xyzzy, true], [\"frozen\", true], [(3/1), true], [(3+4i), true], [/regexp/, true], [C, true]]
+
+ unshareable_objects = ['mutable str'.dup, [:array], {hash: true}].map{|o|
+ r << o
+ o2 = r.take
+ [o, o.object_id == o2.object_id]
+ }
+ #+> "[[\"mutable str\", false], [[:array], false], [{:hash=>true}, false]]]"
+```
+
+## Language changes to isolate unshareable objects between Ractors
+
+To isolate unshareable objects between Ractors, we introduced additional language semantics on multi-Ractor.
+
+Note that without using Ractors, these additional semantics is not needed (100% compatible with Ruby 2).
+
+### Global variables
+
+Only main Ractor (a Ractor created at starting of interpreter) can access global variables.
+
+```ruby
+ $gv = 1
+ r = Ractor.new do
+ $gv
+ end
+
+ begin
+ r.take
+ rescue Ractor::RemoteError => e
+ e.cause.message #=> 'can not access global variables from non-main Ractors'
+ end
+```
+
+### Instance variables of shareable objects
+
+Only main Ractor can access instance variables of shareable objects.
+
+```ruby
+ class C
+ @iv = 'str'
+ end
+
+ r = Ractor.new do
+ class C
+ p @iv
+ end
+ end
+
+
+ begin
+ r.take
+ rescue => e
+ e.class #=> RuntimeError
+ end
+```
+
+```ruby
+ shared = Ractor.new{}
+ shared.instance_variable_set(:@iv, 'str')
+
+ r = Ractor.new shared do |shared|
+ p shared.instance_variable_get(:@iv)
+ end
+
+ begin
+ r.take
+ rescue Ractor::RemoteError => e
+ e.cause.message #=> can not access instance variables of shareable objects from non-main Ractors
+ end
+```
+
+Note that instance variables for class/module objects are also prohibited on Ractors.
+
+### Class variables
+
+Only main Ractor can access class variables.
+
+```ruby
+ class C
+ @@cv = 'str'
+ end
+
+ r = Ractor.new do
+ class C
+ p @@cv
+ end
+ end
+
+
+ begin
+ r.take
+ rescue => e
+ e.class #=> RuntimeError
+ end
+```
+
+### Constants
+
+Only main Ractor can read constants which refer to the unshareable object.
+
+```ruby
+ class C
+ CONST = 'str'
+ end
+ r = Ractor.new do
+ C::CONST
+ end
+ begin
+ r.take
+ rescue => e
+ e.class #=> NameError
+ end
+```
+
+Only main Ractor can define constants which refer to the unshareable object.
+
+```ruby
+ class C
+ end
+ r = Ractor.new do
+ C::CONST = 'str'
+ end
+ begin
+ r.take
+ rescue => e
+ e.class #=> NameError
+ end
+```
+
+## Implementation note
+
+* Each Ractor has its own thread, it means each Ractor has at least 1 native thread.
+* Each Ractor has its own ID (`rb_ractor_t::id`).
+ * On debug mode, all unshareable objects are labeled with current Ractor's id, and it is checked to detect unshareable object leak (access an object from different Ractor) in VM.
+
+## Examples
+
+### Traditional Ring example in Actor-model
+
+```ruby
+RN = 1000
+CR = Ractor.current
+
+r = Ractor.new do
+ p Ractor.recv
+ CR << :fin
+end
+
+RN.times{
+ Ractor.new r do |next_r|
+ next_r << Ractor.recv
+ end
+}
+
+p :setup_ok
+r << 1
+p Ractor.recv
+```
+
+### Fork-join
+
+```ruby
+def fib n
+ if n < 2
+ 1
+ else
+ fib(n-2) + fib(n-1)
+ end
+end
+
+RN = 10
+rs = (1..RN).map do |i|
+ Ractor.new i do |i|
+ [i, fib(i)]
+ end
+end
+
+until rs.empty?
+ r, v = Ractor.select(*rs)
+ rs.delete r
+ p answer: v
+end
+```
+
+### Worker pool
+
+```ruby
+require 'prime'
+
+pipe = Ractor.new do
+ loop do
+ Ractor.yield Ractor.recv
+ end
+end
+
+N = 1000
+RN = 10
+workers = (1..RN).map do
+ Ractor.new pipe do |pipe|
+ while n = pipe.take
+ Ractor.yield [n, n.prime?]
+ end
+ end
+end
+
+(1..N).each{|i|
+ pipe << i
+}
+
+pp (1..N).map{
+ _r, (n, b) = Ractor.select(*workers)
+ [n, b]
+}.sort_by{|(n, b)| n}
+```
+
+### Pipeline
+
+```ruby
+# pipeline with yield/take
+r1 = Ractor.new do
+ 'r1'
+end
+
+r2 = Ractor.new r1 do |r1|
+ r1.take + 'r2'
+end
+
+r3 = Ractor.new r2 do |r2|
+ r2.take + 'r3'
+end
+
+p r3.take #=> 'r1r2r3'
+```
+
+```ruby
+# pipeline with send/recv
+
+r3 = Ractor.new Ractor.current do |cr|
+ cr.send Ractor.recv + 'r3'
+end
+
+r2 = Ractor.new r3 do |r3|
+ r3.send Ractor.recv + 'r2'
+end
+
+r1 = Ractor.new r2 do |r2|
+ r2.send Ractor.recv + 'r1'
+end
+
+r1 << 'r0'
+p Ractor.recv #=> "r0r1r2r3"
+```
+
+### Supervise
+
+```ruby
+# ring example again
+
+r = Ractor.current
+(1..10).map{|i|
+ r = Ractor.new r, i do |r, i|
+ r.send Ractor.recv + "r#{i}"
+ end
+}
+
+r.send "r0"
+p Ractor.recv #=> "r0r10r9r8r7r6r5r4r3r2r1"
+```
+
+```ruby
+# ring example with an error
+
+r = Ractor.current
+rs = (1..10).map{|i|
+ r = Ractor.new r, i do |r, i|
+ loop do
+ msg = Ractor.recv
+ raise if /e/ =~ msg
+ r.send msg + "r#{i}"
+ end
+ end
+}
+
+r.send "r0"
+p Ractor.recv #=> "r0r10r9r8r7r6r5r4r3r2r1"
+r.send "r0"
+p Ractor.select(*rs, Ractor.current) #=> [:recv, "r0r10r9r8r7r6r5r4r3r2r1"]
+[:recv, "r0r10r9r8r7r6r5r4r3r2r1"]
+r.send "e0"
+p Ractor.select(*rs, Ractor.current)
+#=>
+#<Thread:0x000056262de28bd8 run> terminated with exception (report_on_exception is true):
+Traceback (most recent call last):
+ 2: from /home/ko1/src/ruby/trunk/test.rb:7:in `block (2 levels) in <main>'
+ 1: from /home/ko1/src/ruby/trunk/test.rb:7:in `loop'
+/home/ko1/src/ruby/trunk/test.rb:9:in `block (3 levels) in <main>': unhandled exception
+Traceback (most recent call last):
+ 2: from /home/ko1/src/ruby/trunk/test.rb:7:in `block (2 levels) in <main>'
+ 1: from /home/ko1/src/ruby/trunk/test.rb:7:in `loop'
+/home/ko1/src/ruby/trunk/test.rb:9:in `block (3 levels) in <main>': unhandled exception
+ 1: from /home/ko1/src/ruby/trunk/test.rb:21:in `<main>'
+<internal:ractor>:69:in `select': thrown by remote Ractor. (Ractor::RemoteError)
+```
+
+```ruby
+# resend non-error message
+
+r = Ractor.current
+rs = (1..10).map{|i|
+ r = Ractor.new r, i do |r, i|
+ loop do
+ msg = Ractor.recv
+ raise if /e/ =~ msg
+ r.send msg + "r#{i}"
+ end
+ end
+}
+
+r.send "r0"
+p Ractor.recv #=> "r0r10r9r8r7r6r5r4r3r2r1"
+r.send "r0"
+p Ractor.select(*rs, Ractor.current)
+[:recv, "r0r10r9r8r7r6r5r4r3r2r1"]
+msg = 'e0'
+begin
+ r.send msg
+ p Ractor.select(*rs, Ractor.current)
+rescue Ractor::RemoteError
+ msg = 'r0'
+ retry
+end
+
+#=> <internal:ractor>:100:in `send': The incoming-port is already closed (Ractor::ClosedError)
+# because r == r[-1] is terminated.
+```
+
+```ruby
+# ring example with supervisor and re-start
+
+def make_ractor r, i
+ Ractor.new r, i do |r, i|
+ loop do
+ msg = Ractor.recv
+ raise if /e/ =~ msg
+ r.send msg + "r#{i}"
+ end
+ end
+end
+
+r = Ractor.current
+rs = (1..10).map{|i|
+ r = make_ractor(r, i)
+}
+
+msg = 'e0' # error causing message
+begin
+ r.send msg
+ p Ractor.select(*rs, Ractor.current)
+rescue Ractor::RemoteError
+ r = rs[-1] = make_ractor(rs[-2], rs.size-1)
+ msg = 'x0'
+ retry
+end
+
+#=> [:recv, "x0r9r9r8r7r6r5r4r3r2r1"]
+```
diff --git a/eval.c b/eval.c
index e4fec3d..0b51b83 100644
--- a/eval.c
+++ b/eval.c
@@ -35,6 +35,7 @@
#include "probes_helper.h"
#include "ruby/vm.h"
#include "vm_core.h"
+#include "ractor.h"
NORETURN(void rb_raise_jump(VALUE, VALUE));
void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec);
@@ -227,7 +228,7 @@ rb_ec_cleanup(rb_execution_context_t *ec, volatile int ex)
th->status = THREAD_KILLED;
errs[0] = ec->errinfo;
- SAVE_ROOT_JMPBUF(th, rb_thread_terminate_all());
+ SAVE_ROOT_JMPBUF(th, rb_ractor_terminate_all());
}
else {
switch (step) {
diff --git a/ext/ripper/depend b/ext/ripper/depend
index 519687a..bfd6738 100644
--- a/ext/ripper/depend
+++ b/ext/ripper/depend
@@ -52,6 +52,20 @@ ripper.E: ripper.c
ripper.o: $(RUBY_EXTCONF_H)
ripper.o: $(arch_hdrdir)/ruby/config.h
ripper.o: $(hdrdir)/ruby.h
+ripper.o: $(hdrdir)/ruby/assert.h
+ripper.o: $(hdrdir)/ruby/backward.h
+ripper.o: $(hdrdir)/ruby/backward/2/assume.h
+ripper.o: $(hdrdir)/ruby/backward/2/attributes.h
+ripper.o: $(hdrdir)/ruby/backward/2/bool.h
+ripper.o: $(hdrdir)/ruby/backward/2/gcc_version_since.h
+ripper.o: $(hdrdir)/ruby/backward/2/inttypes.h
+ripper.o: $(hdrdir)/ruby/backward/2/limits.h
+ripper.o: $(hdrdir)/ruby/backward/2/long_long.h
+ripper.o: $(hdrdir)/ruby/backward/2/stdalign.h
+ripper.o: $(hdrdir)/ruby/backward/2/stdarg.h
+ripper.o: $(hdrdir)/ruby/defines.h
+ripper.o: $(hdrdir)/ruby/encoding.h
+ripper.o: $(hdrdir)/ruby/intern.h
ripper.o: $(hdrdir)/ruby/internal/anyargs.h
ripper.o: $(hdrdir)/ruby/internal/arithmetic.h
ripper.o: $(hdrdir)/ruby/internal/arithmetic/char.h
@@ -192,20 +206,6 @@ ripper.o: $(hdrdir)/ruby/internal/value_type.h
ripper.o: $(hdrdir)/ruby/internal/variable.h
ripper.o: $(hdrdir)/ruby/internal/warning_push.h
ripper.o: $(hdrdir)/ruby/internal/xmalloc.h
-ripper.o: $(hdrdir)/ruby/assert.h
-ripper.o: $(hdrdir)/ruby/backward.h
-ripper.o: $(hdrdir)/ruby/backward/2/assume.h
-ripper.o: $(hdrdir)/ruby/backward/2/attributes.h
-ripper.o: $(hdrdir)/ruby/backward/2/bool.h
-ripper.o: $(hdrdir)/ruby/backward/2/gcc_version_since.h
-ripper.o: $(hdrdir)/ruby/backward/2/inttypes.h
-ripper.o: $(hdrdir)/ruby/backward/2/limits.h
-ripper.o: $(hdrdir)/ruby/backward/2/long_long.h
-ripper.o: $(hdrdir)/ruby/backward/2/stdalign.h
-ripper.o: $(hdrdir)/ruby/backward/2/stdarg.h
-ripper.o: $(hdrdir)/ruby/defines.h
-ripper.o: $(hdrdir)/ruby/encoding.h
-ripper.o: $(hdrdir)/ruby/intern.h
ripper.o: $(hdrdir)/ruby/io.h
ripper.o: $(hdrdir)/ruby/missing.h
ripper.o: $(hdrdir)/ruby/onigmo.h
@@ -244,6 +244,7 @@ ripper.o: $(top_srcdir)/internal/variable.h
ripper.o: $(top_srcdir)/internal/vm.h
ripper.o: $(top_srcdir)/internal/warnings.h
ripper.o: $(top_srcdir)/node.h
+ripper.o: $(top_srcdir)/ractor_pub.h
ripper.o: $(top_srcdir)/regenc.h
ripper.o: $(top_srcdir)/ruby_assert.h
ripper.o: $(top_srcdir)/symbol.h
diff --git a/gc.c b/gc.c
index 9b453cb..535f526 100644
--- a/gc.c
+++ b/gc.c
@@ -101,7 +101,9 @@
#include "symbol.h"
#include "transient_heap.h"
#include "vm_core.h"
+#include "vm_sync.h"
#include "vm_callinfo.h"
+#include "ractor.h"
#include "builtin.h"
@@ -402,7 +404,7 @@ int ruby_rgengc_debug;
* 2: enable profiling for each types
*/
#ifndef RGENGC_PROFILE
-#define RGENGC_PROFILE 0
+#define RGENGC_PROFILE 1
#endif
/* RGENGC_ESTIMATE_OLDMALLOC
@@ -884,7 +886,6 @@ VALUE *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
#define heap_pages_deferred_final objspace->heap_pages.deferred_final
#define heap_eden (&objspace->eden_heap)
#define heap_tomb (&objspace->tomb_heap)
-#define dont_gc objspace->flags.dont_gc
#define during_gc objspace->flags.during_gc
#define finalizing objspace->atomic_flags.finalizing
#define finalizer_table objspace->finalizer_table
@@ -897,6 +898,18 @@ VALUE *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
#define stress_to_class 0
#endif
+#if 0
+#define dont_gc_on() (fprintf(stderr, "dont_gc_on@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 1)
+#define dont_gc_off() (fprintf(stderr, "dont_gc_off@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 0)
+#define dont_gc_set(b) (fprintf(stderr, "dont_gc_set(%d)@%s:%d\n", __FILE__, __LINE__), (int)b), objspace->flags.dont_gc = (b))
+#define dont_gc_val() (objspace->flags.dont_gc)
+#else
+#define dont_gc_on() (objspace->flags.dont_gc = 1)
+#define dont_gc_off() (objspace->flags.dont_gc = 0)
+#define dont_gc_set(b) (((int)b), objspace->flags.dont_gc = (b))
+#define dont_gc_val() (objspace->flags.dont_gc)
+#endif
+
static inline enum gc_mode
gc_mode_verify(enum gc_mode mode)
{
@@ -984,8 +997,8 @@ static int garbage_collect(rb_objspace_t *, int reason);
static int gc_start(rb_objspace_t *objspace, int reason);
static void gc_rest(rb_objspace_t *objspace);
-static inline void gc_enter(rb_objspace_t *objspace, const char *event);
-static inline void gc_exit(rb_objspace_t *objspace, const char *event);
+static inline void gc_enter(rb_objspace_t *objspace, const char *event, unsigned int *lock_lev);
+static inline void gc_exit(rb_objspace_t *objspace, const char *event, unsigned int *lock_lev);
static void gc_marks(rb_objspace_t *objspace, int full_mark);
static void gc_marks_start(rb_objspace_t *objspace, int full);
@@ -1233,6 +1246,7 @@ check_rvalue_consistency_force(const VALUE obj, int terminate)
goto skip;
}
}
+ bp();
fprintf(stderr, "check_rvalue_consistency: %p is not a Ruby object.\n", (void *)obj);
err++;
skip:
@@ -1561,7 +1575,7 @@ rb_objspace_alloc(void)
malloc_limit = gc_params.malloc_limit_min;
list_head_init(&objspace->eden_heap.pages);
list_head_init(&objspace->tomb_heap.pages);
- dont_gc = TRUE;
+ dont_gc_on();
return objspace;
}
@@ -2032,7 +2046,7 @@ heap_get_freeobj_head(rb_objspace_t *objspace, rb_heap_t *heap)
{
RVALUE *p = heap->freelist;
if (LIKELY(p != NULL)) {
- heap->freelist = p->as.free.next;
+ heap->freelist = p->as.free.next;
}
asan_unpoison_object((VALUE)p, true);
return (VALUE)p;
@@ -2108,6 +2122,10 @@ newobj_init(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_prote
};
MEMCPY(RANY(obj), &buf, RVALUE, 1);
+#if RACTOR_CHECK_MODE
+ rb_ractor_setup_belonging(obj);
+#endif
+
#if RGENGC_CHECK_MODE
GC_ASSERT(RVALUE_MARKED(obj) == FALSE);
GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
@@ -2115,58 +2133,57 @@ newobj_init(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_prote
GC_ASSERT(RVALUE_WB_UNPROTECTED(obj) == FALSE);
if (flags & FL_PROMOTED1) {
- if (RVALUE_AGE(obj) != 2) rb_bug("newobj: %s of age (%d) != 2.", obj_info(obj), RVALUE_AGE(obj));
+ if (RVALUE_AGE(obj) != 2) rb_bug("newobj: %s of age (%d) != 2.", obj_info(obj), RVALUE_AGE(obj));
}
else {
- if (RVALUE_AGE(obj) > 0) rb_bug("newobj: %s of age (%d) > 0.", obj_info(obj), RVALUE_AGE(obj));
+ if (RVALUE_AGE(obj) > 0) rb_bug("newobj: %s of age (%d) > 0.", obj_info(obj), RVALUE_AGE(obj));
}
if (rgengc_remembered(objspace, (VALUE)obj)) rb_bug("newobj: %s is remembered.", obj_info(obj));
#endif
if (UNLIKELY(wb_protected == FALSE)) {
- MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
+ MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
}
#if RGENGC_PROFILE
if (wb_protected) {
- objspace->profile.total_generated_normal_object_count++;
+ objspace->profile.total_generated_normal_object_count++;
#if RGENGC_PROFILE >= 2
- objspace->profile.generated_normal_object_count_types[BUILTIN_TYPE(obj)]++;
+ objspace->profile.generated_normal_object_count_types[BUILTIN_TYPE(obj)]++;
#endif
}
else {
- objspace->profile.total_generated_shady_object_count++;
+ objspace->profile.total_generated_shady_object_count++;
#if RGENGC_PROFILE >= 2
- objspace->profile.generated_shady_object_count_types[BUILTIN_TYPE(obj)]++;
+ objspace->profile.generated_shady_object_count_types[BUILTIN_TYPE(obj)]++;
#endif
}
#endif
+ objspace->total_allocated_objects++;
#if GC_DEBUG
RANY(obj)->file = rb_source_location_cstr(&RANY(obj)->line);
GC_ASSERT(!SPECIAL_CONST_P(obj)); /* check alignment */
#endif
- objspace->total_allocated_objects++;
-
gc_report(5, objspace, "newobj: %s\n", obj_info(obj));
#if RGENGC_OLD_NEWOBJ_CHECK > 0
{
- static int newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
+ static int newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
- if (!is_incremental_marking(objspace) &&
- flags & FL_WB_PROTECTED && /* do not promote WB unprotected objects */
- ! RB_TYPE_P(obj, T_ARRAY)) { /* array.c assumes that allocated objects are new */
- if (--newobj_cnt == 0) {
- newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
+ if (!is_incremental_marking(objspace) &&
+ flags & FL_WB_PROTECTED && /* do not promote WB unprotected objects */
+ ! RB_TYPE_P(obj, T_ARRAY)) { /* array.c assumes that allocated objects are new */
+ if (--newobj_cnt == 0) {
+ newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
- gc_mark_set(objspace, obj);
- RVALUE_AGE_SET_OLD(objspace, obj);
+ gc_mark_set(objspace, obj);
+ RVALUE_AGE_SET_OLD(objspace, obj);
- rb_gc_writebarrier_remember(obj);
- }
- }
+ rb_gc_writebarrier_remember(obj);
+ }
+ }
}
#endif
check_rvalue_consistency(obj);
@@ -2179,20 +2196,21 @@ newobj_slowpath(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objsp
VALUE obj;
if (UNLIKELY(during_gc || ruby_gc_stressful)) {
- if (during_gc) {
- dont_gc = 1;
- during_gc = 0;
- rb_bug("object allocation during garbage collection phase");
- }
+ if (during_gc) {
+ dont_gc_on();
+ during_gc = 0;
+ rb_bug("object allocation during garbage collection phase");
+ }
- if (ruby_gc_stressful) {
- if (!garbage_collect(objspace, GPR_FLAG_NEWOBJ)) {
- rb_memerror();
- }
- }
+ if (ruby_gc_stressful) {
+ if (!garbage_collect(objspace, GPR_FLAG_NEWOBJ)) {
+ rb_memerror();
+ }
+ }
}
obj = heap_get_freeobj(objspace, heap_eden);
+
newobj_init(klass, flags, v1, v2, v3, wb_protected, objspace, obj);
gc_event_hook(objspace, RUBY_INTERNAL_EVENT_NEWOBJ, obj);
return obj;
@@ -2219,30 +2237,36 @@ newobj_of(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protect
rb_objspace_t *objspace = &rb_objspace;
VALUE obj;
- RB_DEBUG_COUNTER_INC(obj_newobj);
- (void)RB_DEBUG_COUNTER_INC_IF(obj_newobj_wb_unprotected, !wb_protected);
+ RB_VM_LOCK_ENTER();
+ {
+
+ RB_DEBUG_COUNTER_INC(obj_newobj);
+ (void)RB_DEBUG_COUNTER_INC_IF(obj_newobj_wb_unprotected, !wb_protected);
#if GC_DEBUG_STRESS_TO_CLASS
- if (UNLIKELY(stress_to_class)) {
- long i, cnt = RARRAY_LEN(stress_to_class);
- for (i = 0; i < cnt; ++i) {
- if (klass == RARRAY_AREF(stress_to_class, i)) rb_memerror();
+ if (UNLIKELY(stress_to_class)) {
+ long i, cnt = RARRAY_LEN(stress_to_class);
+ for (i = 0; i < cnt; ++i) {
+ if (klass == RARRAY_AREF(stress_to_class, i)) rb_memerror();
+ }
}
- }
#endif
- if (!(during_gc ||
- ruby_gc_stressful ||
- gc_event_hook_available_p(objspace)) &&
- (obj = heap_get_freeobj_head(objspace, heap_eden)) != Qfalse) {
- return newobj_init(klass, flags, v1, v2, v3, wb_protected, objspace, obj);
+ if (!(during_gc ||
+ ruby_gc_stressful ||
+ gc_event_hook_available_p(objspace)) &&
+ (obj = heap_get_freeobj_head(objspace, heap_eden)) != Qfalse) {
+ newobj_init(klass, flags, v1, v2, v3, wb_protected, objspace, obj);
+ }
+ else {
+ RB_DEBUG_COUNTER_INC(obj_newobj_slowpath);
+ obj = wb_protected ?
+ newobj_slowpath_wb_protected(klass, flags, v1, v2, v3, objspace) :
+ newobj_slowpath_wb_unprotected(klass, flags, v1, v2, v3, objspace);
+ }
}
- else {
- RB_DEBUG_COUNTER_INC(obj_newobj_slowpath);
+ RB_VM_LOCK_LEAVE();
- return wb_protected ?
- newobj_slowpath_wb_protected(klass, flags, v1, v2, v3, objspace) :
- newobj_slowpath_wb_unprotected(klass, flags, v1, v2, v3, objspace);
- }
+ return obj;
}
VALUE
@@ -2273,6 +2297,18 @@ rb_newobj_of(VALUE klass, VALUE flags)
return newobj_of(klass, flags & ~FL_WB_PROTECTED, 0, 0, 0, flags & FL_WB_PROTECTED);
}
+VALUE
+rb_newobj_with(VALUE src)
+{
+ VALUE klass = RBASIC_CLASS(src);
+ VALUE flags = RBASIC(src)->flags;
+
+ VALUE v1 = RANY(src)->as.values.v1;
+ VALUE v2 = RANY(src)->as.values.v2;
+ VALUE v3 = RANY(src)->as.values.v3;
+ return newobj_of(klass, flags & ~FL_WB_PROTECTED, v1, v2, v3, flags & FL_WB_PROTECTED);
+}
+
#define UNEXPECTED_NODE(func) \
rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
@@ -3597,10 +3633,11 @@ rb_objspace_call_finalizer(rb_objspace_t *objspace)
}
/* prohibit GC because force T_DATA finalizers can break an object graph consistency */
- dont_gc = 1;
+ dont_gc_on();
/* running data/file finalizers are part of garbage collection */
- gc_enter(objspace, "rb_objspace_call_finalizer");
+ unsigned int lock_lev;
+ gc_enter(objspace, "rb_objspace_call_finalizer", &lock_lev);
/* run data/file object's finalizers */
for (i = 0; i < heap_allocated_pages; i++) {
@@ -3642,7 +3679,7 @@ rb_objspace_call_finalizer(rb_objspace_t *objspace)
}
}
- gc_exit(objspace, "rb_objspace_call_finalizer");
+ gc_exit(objspace, "rb_objspace_call_finalizer", &lock_lev);
if (heap_pages_deferred_final) {
finalize_list(objspace, heap_pages_deferred_final);
@@ -4529,15 +4566,16 @@ gc_sweep_rest(rb_objspace_t *objspace)
static void
gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap)
{
- GC_ASSERT(dont_gc == FALSE);
+ GC_ASSERT(dont_gc_val() == FALSE);
if (!GC_ENABLE_LAZY_SWEEP) return;
- gc_enter(objspace, "sweep_continue");
+ unsigned int lock_lev;
+ gc_enter(objspace, "sweep_continue", &lock_lev);
if (objspace->rgengc.need_major_gc == GPR_FLAG_NONE && heap_increment(objspace, heap)) {
gc_report(3, objspace, "gc_sweep_continue: success heap_increment().\n");
}
gc_sweep_step(objspace, heap);
- gc_exit(objspace, "sweep_continue");
+ gc_exit(objspace, "sweep_continue", &lock_lev);
}
static void
@@ -5944,8 +5982,8 @@ objspace_allrefs(rb_objspace_t *objspace)
struct allrefs data;
struct mark_func_data_struct mfd;
VALUE obj;
- int prev_dont_gc = dont_gc;
- dont_gc = TRUE;
+ int prev_dont_gc = dont_gc_val();
+ dont_gc_on();
data.objspace = objspace;
data.references = st_init_numtable();
@@ -5966,7 +6004,7 @@ objspace_allrefs(rb_objspace_t *objspace)
}
free_stack_chunks(&data.mark_stack);
- dont_gc = prev_dont_gc;
+ dont_gc_set(prev_dont_gc);
return data.references;
}
@@ -6288,8 +6326,8 @@ gc_verify_heap_pages(rb_objspace_t *objspace)
static VALUE
gc_verify_internal_consistency_m(VALUE dummy)
{
+ ASSERT_vm_locking();
gc_verify_internal_consistency(&rb_objspace);
-
return Qnil;
}
@@ -6639,10 +6677,11 @@ gc_marks_rest(rb_objspace_t *objspace)
static void
gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap)
{
- GC_ASSERT(dont_gc == FALSE);
+ GC_ASSERT(dont_gc_val() == FALSE);
#if GC_ENABLE_INCREMENTAL_MARK
- gc_enter(objspace, "marks_continue");
+ unsigned int lock_lev;
+ gc_enter(objspace, "marks_continue", &lock_lev);
PUSH_MARK_FUNC_DATA(NULL);
{
@@ -6674,7 +6713,7 @@ gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap)
}
POP_MARK_FUNC_DATA();
- gc_exit(objspace, "marks_continue");
+ gc_exit(objspace, "marks_continue", &lock_lev);
#endif
}
@@ -6973,21 +7012,50 @@ void
rb_gc_writebarrier(VALUE a, VALUE b)
{
rb_objspace_t *objspace = &rb_objspace;
+ bool retry;
if (RGENGC_CHECK_MODE && SPECIAL_CONST_P(a)) rb_bug("rb_gc_writebarrier: a is special const");
if (RGENGC_CHECK_MODE && SPECIAL_CONST_P(b)) rb_bug("rb_gc_writebarrier: b is special const");
+ retry_:
+ retry = false;
if (!is_incremental_marking(objspace)) {
- if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
- return;
- }
- else {
- gc_writebarrier_generational(a, b, objspace);
- }
+ if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
+ // do nothing
+ }
+ else {
+ RB_VM_LOCK_ENTER(); // can change GC state
+ {
+ if (!is_incremental_marking(objspace)) {
+ if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
+ // do nothing
+ }
+ else {
+ gc_writebarrier_generational(a, b, objspace);
+ }
+ }
+ else {
+ retry = true;
+ }
+ }
+ RB_VM_LOCK_LEAVE();
+ }
}
else { /* slow path */
- gc_writebarrier_incremental(a, b, objspace);
+ RB_VM_LOCK_ENTER(); // can change GC state
+ {
+ if (is_incremental_marking(objspace)) {
+ gc_writebarrier_incremental(a, b, objspace);
+ }
+ else {
+ retry = true;
+ }
+ }
+ RB_VM_LOCK_LEAVE();
}
+ if (retry) goto retry_;
+
+ return;
}
void
@@ -7154,46 +7222,49 @@ void
rb_gc_force_recycle(VALUE obj)
{
rb_objspace_t *objspace = &rb_objspace;
+ RB_VM_LOCK_ENTER();
+ {
+ int is_old = RVALUE_OLD_P(obj);
- int is_old = RVALUE_OLD_P(obj);
-
- gc_report(2, objspace, "rb_gc_force_recycle: %s\n", obj_info(obj));
+ gc_report(2, objspace, "rb_gc_force_recycle: %s\n", obj_info(obj));
- if (is_old) {
- if (RVALUE_MARKED(obj)) {
- objspace->rgengc.old_objects--;
- }
- }
- CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), obj);
- CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
+ if (is_old) {
+ if (RVALUE_MARKED(obj)) {
+ objspace->rgengc.old_objects--;
+ }
+ }
+ CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), obj);
+ CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
#if GC_ENABLE_INCREMENTAL_MARK
- if (is_incremental_marking(objspace)) {
- if (MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj)) {
- invalidate_mark_stack(&objspace->mark_stack, obj);
- CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
- }
- CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
- }
- else {
+ if (is_incremental_marking(objspace)) {
+ if (MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj)) {
+ invalidate_mark_stack(&objspace->mark_stack, obj);
+ CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
+ }
+ CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
+ }
+ else {
#endif
- if (is_old || !GET_HEAP_PAGE(obj)->flags.before_sweep) {
- CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
- }
- CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
+ if (is_old || !GET_HEAP_PAGE(obj)->flags.before_sweep) {
+ CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
+ }
+ CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
#if GC_ENABLE_INCREMENTAL_MARK
- }
+ }
#endif
- objspace->profile.total_freed_objects++;
+ objspace->profile.total_freed_objects++;
- heap_page_add_freeobj(objspace, GET_HEAP_PAGE(obj), obj);
+ heap_page_add_freeobj(objspace, GET_HEAP_PAGE(obj), obj);
- /* Disable counting swept_slots because there are no meaning.
- * if (!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(p), p)) {
- * objspace->heap.swept_slots++;
- * }
- */
+ /* Disable counting swept_slots because there are no meaning.
+ * if (!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(p), p)) {
+ * objspace->heap.swept_slots++;
+ * }
+ */
+ }
+ RB_VM_LOCK_LEAVE();
}
#ifndef MARK_OBJECT_ARY_BUCKET_SIZE
@@ -7281,7 +7352,7 @@ heap_ready_to_gc(rb_objspace_t *objspace, rb_heap_t *heap)
static int
ready_to_gc(rb_objspace_t *objspace)
{
- if (dont_gc || during_gc || ruby_disable_gc) {
+ if (dont_gc_val() || during_gc || ruby_disable_gc) {
heap_ready_to_gc(objspace, heap_eden);
return FALSE;
}
@@ -7361,17 +7432,25 @@ gc_reset_malloc_info(rb_objspace_t *objspace)
static int
garbage_collect(rb_objspace_t *objspace, int reason)
{
+ int ret;
+
+ RB_VM_LOCK_ENTER();
+ {
#if GC_PROFILE_MORE_DETAIL
- objspace->profile.prepare_time = getrusage_time();
+ objspace->profile.prepare_time = getrusage_time();
#endif
- gc_rest(objspace);
+ gc_rest(objspace);
#if GC_PROFILE_MORE_DETAIL
- objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
+ objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
#endif
- return gc_start(objspace, reason);
+ ret = gc_start(objspace, reason);
+ }
+ RB_VM_LOCK_LEAVE();
+
+ return ret;
}
static int
@@ -7389,12 +7468,14 @@ gc_start(rb_objspace_t *objspace, int reason)
GC_ASSERT(gc_mode(objspace) == gc_mode_none);
GC_ASSERT(!is_lazy_sweeping(heap_eden));
GC_ASSERT(!is_incremental_marking(objspace));
+
+ unsigned int lock_lev;
+ gc_enter(objspace, "gc_start", &lock_lev);
+
#if RGENGC_CHECK_MODE >= 2
gc_verify_internal_consistency(objspace);
#endif
- gc_enter(objspace, "gc_start");
-
if (ruby_gc_stressful) {
int flag = FIXNUM_P(ruby_gc_stress_mode) ? FIX2INT(ruby_gc_stress_mode) : 0;
@@ -7478,7 +7559,7 @@ gc_start(rb_objspace_t *objspace, int reason)
}
gc_prof_timer_stop(objspace);
- gc_exit(objspace, "gc_start");
+ gc_exit(objspace, "gc_start", &lock_lev);
return TRUE;
}
@@ -7489,7 +7570,8 @@ gc_rest(rb_objspace_t *objspace)
int sweeping = is_lazy_sweeping(heap_eden);
if (marking || sweeping) {
- gc_enter(objspace, "gc_rest");
+ unsigned int lock_lev;
+ gc_enter(objspace, "gc_rest", &lock_lev);
if (RGENGC_CHECK_MODE >= 2) gc_verify_internal_consistency(objspace);
@@ -7501,7 +7583,7 @@ gc_rest(rb_objspace_t *objspace)
if (is_lazy_sweeping(heap_eden)) {
gc_sweep_rest(objspace);
}
- gc_exit(objspace, "gc_rest");
+ gc_exit(objspace, "gc_rest", &lock_lev);
}
}
@@ -7587,30 +7669,38 @@ gc_record(rb_objspace_t *objspace, int direction, const char *event)
#endif /* PRINT_ENTER_EXIT_TICK */
static inline void
-gc_enter(rb_objspace_t *objspace, const char *event)
+gc_enter(rb_objspace_t *objspace, const char *event, unsigned int *lock_lev)
{
+ // stop other ractors
+
+ RB_VM_LOCK_ENTER_LEV(lock_lev);
+ rb_vm_barrier();
+
GC_ASSERT(during_gc == 0);
if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
mjit_gc_start_hook();
during_gc = TRUE;
+ RUBY_DEBUG_LOG("%s (%s)", event, gc_current_status(objspace));
gc_report(1, objspace, "gc_enter: %s [%s]\n", event, gc_current_status(objspace));
gc_record(objspace, 0, event);
gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_ENTER, 0); /* TODO: which parameter should be passed? */
}
static inline void
-gc_exit(rb_objspace_t *objspace, const char *event)
+gc_exit(rb_objspace_t *objspace, const char *event, unsigned int *lock_lev)
{
GC_ASSERT(during_gc != 0);
gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_EXIT, 0); /* TODO: which parameter should be passsed? */
gc_record(objspace, 1, event);
+ RUBY_DEBUG_LOG("%s (%s)", event, gc_current_status(objspace));
gc_report(1, objspace, "gc_exit: %s [%s]\n", event, gc_current_status(objspace));
during_gc = FALSE;
mjit_gc_exit_hook();
+ RB_VM_LOCK_LEAVE_LEV(lock_lev);
}
static void *
@@ -7623,7 +7713,7 @@ gc_with_gvl(void *ptr)
static int
garbage_collect_with_gvl(rb_objspace_t *objspace, int reason)
{
- if (dont_gc) return TRUE;
+ if (dont_gc_val()) return TRUE;
if (ruby_thread_has_gvl_p()) {
return garbage_collect(objspace, reason);
}
@@ -8647,7 +8737,7 @@ static VALUE
rb_gc_compact(rb_execution_context_t *ec, VALUE self)
{
rb_objspace_t *objspace = &rb_objspace;
- if (dont_gc) return Qnil;
+ if (dont_gc_val()) return Qnil;
gc_compact(objspace, FALSE, FALSE, FALSE);
return gc_compact_stats(objspace);
@@ -9349,9 +9439,9 @@ rb_gc_enable(void)
VALUE
rb_objspace_gc_enable(rb_objspace_t *objspace)
{
- int old = dont_gc;
+ int old = dont_gc_val();
- dont_gc = FALSE;
+ dont_gc_off();
return old ? Qtrue : Qfalse;
}
@@ -9371,8 +9461,8 @@ rb_gc_disable_no_rest(void)
static VALUE
gc_disable_no_rest(rb_objspace_t *objspace)
{
- int old = dont_gc;
- dont_gc = TRUE;
+ int old = dont_gc_val();
+ dont_gc_on();
return old ? Qtrue : Qfalse;
}
@@ -9742,7 +9832,10 @@ rb_memerror(void)
sleep(60);
}
- if (during_gc) gc_exit(objspace, "rb_memerror");
+ if (during_gc) {
+ // TODO: OMG!! How to implement it?
+ gc_exit(objspace, "rb_memerror", NULL);
+ }
exc = nomem_error;
if (!exc ||
@@ -9869,7 +9962,7 @@ objspace_malloc_increase(rb_objspace_t *objspace, void *mem, size_t new_size, si
if (type == MEMOP_TYPE_MALLOC) {
retry:
- if (malloc_increase > malloc_limit && ruby_native_thread_p() && !dont_gc) {
+ if (malloc_increase > malloc_limit && ruby_native_thread_p() && !dont_gc_val()) {
if (ruby_thread_has_gvl_p() && is_lazy_sweeping(heap_eden)) {
gc_rest(objspace); /* gc_rest can reduce malloc_increase */
goto retry;
@@ -11607,6 +11700,8 @@ rb_raw_iseq_info(char *buff, const int buff_size, const rb_iseq_t *iseq)
}
}
+bool rb_ractor_p(VALUE rv);
+
const char *
rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
{
@@ -11750,6 +11845,10 @@ rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
(iseq = vm_block_iseq(block)) != NULL) {
rb_raw_iseq_info(BUFF_ARGS, iseq);
}
+ else if (rb_ractor_p(obj)) {
+ rb_ractor_t *r = (void *)DATA_PTR(obj);
+ APPENDF((BUFF_ARGS, "r:%d", r->id));
+ }
else {
const char * const type_name = rb_objspace_data_type_name(obj);
if (type_name) {
diff --git a/gc.h b/gc.h
index 47a4ca1..5d113ca 100644
--- a/gc.h
+++ b/gc.h
@@ -16,6 +16,13 @@ NOINLINE(void rb_gc_set_stack_end(VALUE **stack_end_p));
#define USE_CONSERVATIVE_STACK_END
#endif
+#define RB_GC_SAVE_MACHINE_CONTEXT(th) \
+ do { \
+ FLUSH_REGISTER_WINDOWS; \
+ setjmp((th)->ec->machine.regs); \
+ SET_MACHINE_STACK_END(&(th)->ec->machine.stack_end); \
+ } while (0)
+
/* for GC debug */
#ifndef RUBY_MARK_FREE_DEBUG
diff --git a/include/ruby/internal/fl_type.h b/include/ruby/internal/fl_type.h
index d593edf..6d8df59 100644
--- a/include/ruby/internal/fl_type.h
+++ b/include/ruby/internal/fl_type.h
@@ -165,6 +165,7 @@ ruby_fl_type {
RUBY_FL_PROMOTED = RUBY_FL_PROMOTED0 | RUBY_FL_PROMOTED1,
RUBY_FL_FINALIZE = (1<<7),
RUBY_FL_TAINT = (1<<8),
+ RUBY_FL_SHAREABLE = (1<<8),
RUBY_FL_UNTRUSTED = RUBY_FL_TAINT,
RUBY_FL_SEEN_OBJ_ID = (1<<9),
RUBY_FL_EXIVAR = (1<<10),
diff --git a/inits.c b/inits.c
index ad57b75..a3eec16 100644
--- a/inits.c
+++ b/inits.c
@@ -60,6 +60,7 @@ rb_call_inits(void)
CALL(Math);
CALL(GC);
CALL(Enumerator);
+ CALL(Ractor);
CALL(VM);
CALL(ISeq);
CALL(Thread);
@@ -82,6 +83,7 @@ rb_call_builtin_inits(void)
{
#define BUILTIN(n) CALL(builtin_##n)
BUILTIN(gc);
+ BUILTIN(ractor);
BUILTIN(integer);
BUILTIN(io);
BUILTIN(dir);
diff --git a/internal/variable.h b/internal/variable.h
index 6ed7280..d5d0ccc 100644
--- a/internal/variable.h
+++ b/internal/variable.h
@@ -32,6 +32,7 @@ NORETURN(VALUE rb_mod_const_missing(VALUE,VALUE));
rb_gvar_getter_t *rb_gvar_getter_function_of(ID);
rb_gvar_setter_t *rb_gvar_setter_function_of(ID);
void rb_gvar_readonly_setter(VALUE v, ID id, VALUE *_);
+void rb_gvar_ractor_local(const char *name);
static inline bool ROBJ_TRANSIENT_P(VALUE obj);
static inline void ROBJ_TRANSIENT_SET(VALUE obj);
static inline void ROBJ_TRANSIENT_UNSET(VALUE obj);
diff --git a/io.c b/io.c
index 1b9a68a..8cc5211 100644
--- a/io.c
+++ b/io.c
@@ -132,6 +132,7 @@
#include "ruby/thread.h"
#include "ruby/util.h"
#include "ruby_atomic.h"
+#include "ractor_pub.h"
#if !USE_POLL
# include "vm_core.h"
@@ -1478,7 +1479,7 @@ io_binwrite(VALUE str, const char *ptr, long len, rb_io_t *fptr, int nosync)
fptr->wbuf.len = 0;
fptr->wbuf.capa = IO_WBUF_CAPA_MIN;
fptr->wbuf.ptr = ALLOC_N(char, fptr->wbuf.capa);
- fptr->write_lock = rb_mutex_new();
+ fptr->write_lock = rb_mutex_new();
rb_mutex_allow_trap(fptr->write_lock, 1);
}
if ((!nosync && (fptr->mode & (FMODE_SYNC|FMODE_TTY))) ||
@@ -1491,7 +1492,7 @@ io_binwrite(VALUE str, const char *ptr, long len, rb_io_t *fptr, int nosync)
arg.ptr = ptr + offset;
arg.length = n;
if (fptr->write_lock) {
- r = rb_mutex_synchronize(fptr->write_lock, io_binwrite_string, (VALUE)&arg);
+ r = rb_mutex_synchronize(fptr->write_lock, io_binwrite_string, (VALUE)&arg);
}
else {
r = io_binwrite_string((VALUE)&arg);
@@ -1877,7 +1878,7 @@ static VALUE
rb_io_writev(VALUE io, int argc, const VALUE *argv)
{
if (argc > 1 && rb_obj_method_arity(io, id_write) == 1) {
- if (io != rb_stderr && RTEST(ruby_verbose)) {
+ if (io != rb_ractor_stderr() && RTEST(ruby_verbose)) {
VALUE klass = CLASS_OF(io);
char sep = FL_TEST(klass, FL_SINGLETON) ? (klass = io, '.') : '#';
rb_warning("%+"PRIsVALUE"%c""write is outdated interface"
@@ -4291,11 +4292,12 @@ rb_io_getbyte(VALUE io)
GetOpenFile(io, fptr);
rb_io_check_byte_readable(fptr);
READ_CHECK(fptr);
- if (fptr->fd == 0 && (fptr->mode & FMODE_TTY) && RB_TYPE_P(rb_stdout, T_FILE)) {
+ VALUE r_stdout = rb_ractor_stdout();
+ if (fptr->fd == 0 && (fptr->mode & FMODE_TTY) && RB_TYPE_P(r_stdout, T_FILE)) {
rb_io_t *ofp;
- GetOpenFile(rb_stdout, ofp);
+ GetOpenFile(r_stdout, ofp);
if (ofp->mode & FMODE_TTY) {
- rb_io_flush(rb_stdout);
+ rb_io_flush(r_stdout);
}
}
if (io_fillbuf(fptr) < 0) {
@@ -7034,8 +7036,8 @@ popen_finish(VALUE port, VALUE klass)
/* child */
if (rb_block_given_p()) {
rb_yield(Qnil);
- rb_io_flush(rb_stdout);
- rb_io_flush(rb_stderr);
+ rb_io_flush(rb_ractor_stdout());
+ rb_io_flush(rb_ractor_stderr());
_exit(0);
}
return Qnil;
@@ -7624,7 +7626,7 @@ rb_f_printf(int argc, VALUE *argv, VALUE _)
if (argc == 0) return Qnil;
if (RB_TYPE_P(argv[0], T_STRING)) {
- out = rb_stdout;
+ out = rb_ractor_stdout();
}
else {
out = argv[0];
@@ -7724,7 +7726,7 @@ rb_io_print(int argc, const VALUE *argv, VALUE out)
static VALUE
rb_f_print(int argc, const VALUE *argv, VALUE _)
{
- rb_io_print(argc, argv, rb_stdout);
+ rb_io_print(argc, argv, rb_ractor_stdout());
return Qnil;
}
@@ -7775,10 +7777,11 @@ rb_io_putc(VALUE io, VALUE ch)
static VALUE
rb_f_putc(VALUE recv, VALUE ch)
{
- if (recv == rb_stdout) {
+ VALUE r_stdout = rb_ractor_stdout();
+ if (recv == r_stdout) {
return rb_io_putc(recv, ch);
}
- return rb_funcallv(rb_stdout, rb_intern("putc"), 1, &ch);
+ return rb_funcallv(r_stdout, rb_intern("putc"), 1, &ch);
}
@@ -7889,10 +7892,11 @@ rb_io_puts(int argc, const VALUE *argv, VALUE out)
static VALUE
rb_f_puts(int argc, VALUE *argv, VALUE recv)
{
- if (recv == rb_stdout) {
+ VALUE r_stdout = rb_ractor_stdout();
+ if (recv == r_stdout) {
return rb_io_puts(argc, argv, recv);
}
- return rb_funcallv(rb_stdout, rb_intern("puts"), argc, argv);
+ return rb_funcallv(r_stdout, rb_intern("puts"), argc, argv);
}
static VALUE
@@ -7901,12 +7905,13 @@ rb_p_write(VALUE str)
VALUE args[2];
args[0] = str;
args[1] = rb_default_rs;
- if (RB_TYPE_P(rb_stdout, T_FILE) &&
- rb_method_basic_definition_p(CLASS_OF(rb_stdout), id_write)) {
- io_writev(2, args, rb_stdout);
+ VALUE r_stdout = rb_ractor_stdout();
+ if (RB_TYPE_P(r_stdout, T_FILE) &&
+ rb_method_basic_definition_p(CLASS_OF(r_stdout), id_write)) {
+ io_writev(2, args, r_stdout);
}
else {
- rb_io_writev(rb_stdout, 2, args);
+ rb_io_writev(r_stdout, 2, args);
}
return Qnil;
}
@@ -7928,8 +7933,9 @@ rb_p_result(int argc, const VALUE *argv)
else if (argc > 1) {
ret = rb_ary_new4(argc, argv);
}
- if (RB_TYPE_P(rb_stdout, T_FILE)) {
- rb_io_flush(rb_stdout);
+ VALUE r_stdout = rb_ractor_stdout();
+ if (RB_TYPE_P(r_stdout, T_FILE)) {
+ rb_io_flush(r_stdout);
}
return ret;
}
@@ -7992,7 +7998,7 @@ rb_obj_display(int argc, VALUE *argv, VALUE self)
{
VALUE out;
- out = (!rb_check_arity(argc, 0, 1) ? rb_stdout : argv[0]);
+ out = (!rb_check_arity(argc, 0, 1) ? rb_ractor_stdout() : argv[0]);
rb_io_write(out, self);
return Qnil;
@@ -8001,7 +8007,7 @@ rb_obj_display(int argc, VALUE *argv, VALUE self)
static int
rb_stderr_to_original_p(void)
{
- return (rb_stderr == orig_stderr || RFILE(orig_stderr)->fptr->fd < 0);
+ return (rb_ractor_stderr() == orig_stderr || RFILE(orig_stderr)->fptr->fd < 0);
}
void
@@ -8019,7 +8025,7 @@ rb_write_error2(const char *mesg, long len)
}
}
else {
- rb_io_write(rb_stderr, rb_str_new(mesg, len));
+ rb_io_write(rb_ractor_stderr(), rb_str_new(mesg, len));
}
}
@@ -8047,7 +8053,7 @@ rb_write_error_str(VALUE mesg)
}
else {
/* may unlock GVL, and */
- rb_io_write(rb_stderr, mesg);
+ rb_io_write(rb_ractor_stderr(), mesg);
}
}
@@ -8070,10 +8076,41 @@ must_respond_to(ID mid, VALUE val, ID id)
}
static void
-stdout_setter(VALUE val, ID id, VALUE *variable)
+stdin_setter(VALUE val, ID id, VALUE *ptr)
+{
+ rb_ractor_stdin_set(val);
+}
+
+static VALUE
+stdin_getter(ID id, VALUE *ptr)
+{
+ return rb_ractor_stdin();
+}
+
+static void
+stdout_setter(VALUE val, ID id, VALUE *ptr)
+{
+ must_respond_to(id_write, val, id);
+ rb_ractor_stdout_set(val);
+}
+
+static VALUE
+stdout_getter(ID id, VALUE *ptr)
+{
+ return rb_ractor_stdout();
+}
+
+static void
+stderr_setter(VALUE val, ID id, VALUE *ptr)
{
must_respond_to(id_write, val, id);
- *variable = val;
+ rb_ractor_stderr_set(val);
+}
+
+static VALUE
+stderr_getter(ID id, VALUE *ptr)
+{
+ return rb_ractor_stderr();
}
static VALUE
@@ -8125,6 +8162,24 @@ prep_stdio(FILE *f, int fmode, VALUE klass, const char *path)
return io;
}
+VALUE
+rb_io_prep_stdin(void)
+{
+ return prep_stdio(stdin, FMODE_READABLE, rb_cIO, "<STDIN>");
+}
+
+VALUE
+rb_io_prep_stdout(void)
+{
+ return prep_stdio(stdout, FMODE_WRITABLE|FMODE_SIGNAL_ON_EPIPE, rb_cIO, "<STDOUT>");
+}
+
+VALUE
+rb_io_prep_stderr(void)
+{
+ return prep_stdio(stderr, FMODE_WRITABLE|FMODE_SYNC, rb_cIO, "<STDERR>");
+}
+
FILE *
rb_io_stdio_file(rb_io_t *fptr)
{
@@ -8707,8 +8762,10 @@ argf_next_argv(VALUE argf)
int stdout_binmode = 0;
int fmode;
- if (RB_TYPE_P(rb_stdout, T_FILE)) {
- GetOpenFile(rb_stdout, fptr);
+ VALUE r_stdout = rb_ractor_stdout();
+
+ if (RB_TYPE_P(r_stdout, T_FILE)) {
+ GetOpenFile(r_stdout, fptr);
if (fptr->mode & FMODE_BINMODE)
stdout_binmode = 1;
}
@@ -8759,8 +8816,8 @@ argf_next_argv(VALUE argf)
VALUE str;
int fw;
- if (RB_TYPE_P(rb_stdout, T_FILE) && rb_stdout != orig_stdout) {
- rb_io_close(rb_stdout);
+ if (RB_TYPE_P(r_stdout, T_FILE) && r_stdout != orig_stdout) {
+ rb_io_close(r_stdout);
}
fstat(fr, &st);
str = filename;
@@ -8829,7 +8886,7 @@ argf_next_argv(VALUE argf)
}
#endif
write_io = prep_io(fw, FMODE_WRITABLE, rb_cFile, fn);
- rb_stdout = write_io;
+ rb_ractor_stdout_set(write_io);
if (stdout_binmode) rb_io_binmode(rb_stdout);
}
fmode = FMODE_READABLE;
@@ -8869,7 +8926,7 @@ argf_next_argv(VALUE argf)
ARGF.filename = rb_str_new2("-");
if (ARGF.inplace) {
rb_warn("Can't do inplace edit for stdio");
- rb_stdout = orig_stdout;
+ rb_ractor_stdout_set(orig_stdout);
}
}
if (ARGF.init_p == -1) ARGF.init_p = 1;
@@ -13500,13 +13557,24 @@ Init_IO(void)
rb_define_method(rb_cIO, "autoclose?", rb_io_autoclose_p, 0);
rb_define_method(rb_cIO, "autoclose=", rb_io_set_autoclose, 1);
- rb_define_variable("$stdin", &rb_stdin);
- rb_stdin = prep_stdio(stdin, FMODE_READABLE, rb_cIO, "<STDIN>");
- rb_define_hooked_variable("$stdout", &rb_stdout, 0, stdout_setter);
- rb_stdout = prep_stdio(stdout, FMODE_WRITABLE|FMODE_SIGNAL_ON_EPIPE, rb_cIO, "<STDOUT>");
- rb_define_hooked_variable("$stderr", &rb_stderr, 0, stdout_setter);
- rb_stderr = prep_stdio(stderr, FMODE_WRITABLE|FMODE_SYNC, rb_cIO, "<STDERR>");
- rb_define_hooked_variable("$>", &rb_stdout, 0, stdout_setter);
+ rb_define_virtual_variable("$stdin", stdin_getter, stdin_setter);
+ rb_define_virtual_variable("$stdout", stdout_getter, stdout_setter);
+ rb_define_virtual_variable("$>", stdout_getter, stdout_setter);
+ rb_define_virtual_variable("$stderr", stderr_getter, stderr_setter);
+
+ rb_gvar_ractor_local("$stdin");
+ rb_gvar_ractor_local("$stdout");
+ rb_gvar_ractor_local("$>");
+ rb_gvar_ractor_local("$stderr");
+
+ rb_stdin = rb_io_prep_stdin();
+ rb_stdout = rb_io_prep_stdout();
+ rb_stderr = rb_io_prep_stderr();
+
+ rb_global_variable(&rb_stdin);
+ rb_global_variable(&rb_stdout);
+ rb_global_variable(&rb_stderr);
+
orig_stdout = rb_stdout;
orig_stderr = rb_stderr;
diff --git a/mjit.c b/mjit.c
index 39de0e0..b6abcf4 100644
--- a/mjit.c
+++ b/mjit.c
@@ -309,8 +309,8 @@ mark_ec_units(rb_execution_context_t *ec)
static void
unload_units(void)
{
- rb_vm_t *vm = GET_THREAD()->vm;
- rb_thread_t *th = NULL;
+ //rb_vm_t *vm = GET_THREAD()->vm;
+ //rb_thread_t *th = NULL;
struct rb_mjit_unit *unit = 0, *next, *worst;
struct mjit_cont *cont;
int delete_num, units_num = active_units.length;
@@ -329,9 +329,10 @@ unload_units(void)
assert(unit->iseq != NULL && unit->handle != NULL);
unit->used_code_p = FALSE;
}
- list_for_each(&vm->living_threads, th, vmlt_node) {
- mark_ec_units(th->ec);
- }
+ // TODO
+ //list_for_each(&vm->living_threads, th, lt_node) {
+ // mark_ec_units(th->ec);
+ //}
for (cont = first_cont; cont != NULL; cont = cont->next) {
mark_ec_units(cont->ec);
}
diff --git a/parse.y b/parse.y
index 2a7d985..93bc2ef 100644
--- a/parse.y
+++ b/parse.y
@@ -58,6 +58,7 @@ struct lex_context {
#include "ruby/st.h"
#include "ruby/util.h"
#include "symbol.h"
+#include "ractor_pub.h"
#define AREF(ary, i) RARRAY_AREF(ary, i)
@@ -10514,8 +10515,8 @@ rb_parser_fatal(struct parser_params *p, const char *fmt, ...)
rb_str_resize(mesg, 0);
append_bitstack_value(p->cmdarg_stack, mesg);
compile_error(p, "cmdarg_stack: %"PRIsVALUE, mesg);
- if (p->debug_output == rb_stdout)
- p->debug_output = rb_stderr;
+ if (p->debug_output == rb_ractor_stdout())
+ p->debug_output = rb_ractor_stderr();
p->debug = TRUE;
}
@@ -12554,7 +12555,7 @@ parser_initialize(struct parser_params *p)
p->error_buffer = Qfalse;
#endif
p->debug_buffer = Qnil;
- p->debug_output = rb_stdout;
+ p->debug_output = rb_ractor_stdout();
p->enc = rb_utf8_encoding();
}
diff --git a/process.c b/process.c
index fb8d26a..34dd986 100644
--- a/process.c
+++ b/process.c
@@ -112,6 +112,7 @@ int initgroups(const char *, rb_gid_t);
#include "ruby/thread.h"
#include "ruby/util.h"
#include "vm_core.h"
+#include "ractor_pub.h"
/* define system APIs */
#ifdef _WIN32
@@ -4342,7 +4343,7 @@ rb_f_abort(int argc, const VALUE *argv)
args[1] = args[0] = argv[0];
StringValue(args[0]);
- rb_io_puts(1, args, rb_stderr);
+ rb_io_puts(1, args, rb_ractor_stderr());
args[0] = INT2NUM(EXIT_FAILURE);
rb_exc_raise(rb_class_new_instance(2, args, rb_eSystemExit));
}
diff --git a/ractor.c b/ractor.c
new file mode 100644
index 0000000..9059d42
--- /dev/null
+++ b/ractor.c
@@ -0,0 +1,1877 @@
+// Ractor implementation
+
+#include "ruby/ruby.h"
+#include "ruby/thread.h"
+#include "ruby/thread_native.h"
+#include "vm_core.h"
+#include "vm_sync.h"
+#include "ractor.h"
+#include "internal/error.h"
+
+static VALUE rb_cRactor;
+static VALUE rb_eRactorError;
+static VALUE rb_eRactorRemoteError;
+static VALUE rb_eRactorMovedError;
+static VALUE rb_eRactorClosedError;
+static VALUE rb_cRactorMovedObject;
+
+bool ruby_multi_ractor;
+static void vm_ractor_blocking_cnt_inc(rb_vm_t *vm, rb_ractor_t *r, const char *file, int line);
+
+
+static void
+ASSERT_ractor_unlocking(rb_ractor_t *r)
+{
+#if RACTOR_CHECK_MODE > 0
+ if (r->locked_by == GET_RACTOR()->self) {
+ rb_bug("recursive ractor locking");
+ }
+#endif
+}
+
+static void
+ASSERT_ractor_locking(rb_ractor_t *r)
+{
+#if RACTOR_CHECK_MODE > 0
+ if (r->locked_by != GET_RACTOR()->self) {
+ rp(r->locked_by);
+ rb_bug("ractor lock is not acquired.");
+ }
+#endif
+}
+
+static void
+ractor_lock(rb_ractor_t *r, const char *file, int line)
+{
+ RUBY_DEBUG_LOG2(file, line, "locking r:%u%s", r->id, GET_RACTOR() == r ? " (self)" : "");
+
+ ASSERT_ractor_unlocking(r);
+ rb_native_mutex_lock(&r->lock);
+
+#if RACTOR_CHECK_MODE > 0
+ r->locked_by = GET_RACTOR()->self;
+#endif
+
+ RUBY_DEBUG_LOG2(file, line, "locked r:%u%s", r->id, GET_RACTOR() == r ? " (self)" : "");
+}
+
+static void
+ractor_lock_self(rb_ractor_t *cr, const char *file, int line)
+{
+ VM_ASSERT(cr == GET_RACTOR());
+ VM_ASSERT(cr->locked_by != cr->self);
+ ractor_lock(cr, file, line);
+}
+
+static void
+ractor_unlock(rb_ractor_t *r, const char *file, int line)
+{
+ ASSERT_ractor_locking(r);
+#if RACTOR_CHECK_MODE > 0
+ r->locked_by = Qnil;
+#endif
+ rb_native_mutex_unlock(&r->lock);
+
+ RUBY_DEBUG_LOG2(file, line, "r:%u%s", r->id, GET_RACTOR() == r ? " (self)" : "");
+}
+
+static void
+ractor_unlock_self(rb_ractor_t *cr, const char *file, int line)
+{
+ VM_ASSERT(cr == GET_RACTOR());
+ VM_ASSERT(cr->locked_by == cr->self);
+ ractor_unlock(cr, file, line);
+}
+
+#define RACTOR_LOCK(r) ractor_lock(r, __FILE__, __LINE__)
+#define RACTOR_UNLOCK(r) ractor_unlock(r, __FILE__, __LINE__)
+#define RACTOR_LOCK_SELF(r) ractor_lock_self(r, __FILE__, __LINE__)
+#define RACTOR_UNLOCK_SELF(r) ractor_unlock_self(r, __FILE__, __LINE__)
+
+static void
+ractor_cond_wait(rb_ractor_t *r)
+{
+#if RACTOR_CHECK_MODE > 0
+ VALUE locked_by = r->locked_by;
+ r->locked_by = Qnil;
+#endif
+ rb_native_cond_wait(&r->wait.cond, &r->lock);
+
+#if RACTOR_CHECK_MODE > 0
+ r->locked_by = locked_by;
+#endif
+}
+
+static const char *
+ractor_status_str(enum ractor_status status)
+{
+ switch (status) {
+ case ractor_created: return "created";
+ case ractor_running: return "running";
+ case ractor_blocking: return "blocking";
+ case ractor_terminated: return "terminated";
+ }
+ rb_bug("unreachable");
+}
+
+static void
+ractor_status_set(rb_ractor_t *r, enum ractor_status status)
+{
+ RUBY_DEBUG_LOG("r:%u [%s]->[%s]", r->id, ractor_status_str(r->status_), ractor_status_str(status));
+
+ // check 1
+ if (r->status_ != ractor_created) {
+ VM_ASSERT(r == GET_RACTOR()); // only self-modification is allowed.
+ ASSERT_vm_locking();
+ }
+
+ // check2: transition check. assume it will be vanished on non-debug build.
+ switch (r->status_) {
+ case ractor_created:
+ VM_ASSERT(status == ractor_blocking);
+ break;
+ case ractor_running:
+ VM_ASSERT(status == ractor_blocking||
+ status == ractor_terminated);
+ break;
+ case ractor_blocking:
+ VM_ASSERT(status == ractor_running);
+ break;
+ case ractor_terminated:
+ VM_ASSERT(0); // unreachable
+ break;
+ }
+
+ r->status_ = status;
+}
+
+static bool
+ractor_status_p(rb_ractor_t *r, enum ractor_status status)
+{
+ return rb_ractor_status_p(r, status);
+}
+
+static void
+ractor_queue_mark(struct rb_ractor_queue *rq)
+{
+ for (int i=0; i<rq->cnt; i++) {
+ rb_gc_mark(rq->baskets[i].v);
+ rb_gc_mark(rq->baskets[i].sender);
+ }
+}
+
+static void
+ractor_mark(void *ptr)
+{
+ rb_ractor_t *r = (rb_ractor_t *)ptr;
+
+ ractor_queue_mark(&r->incoming_queue);
+ rb_gc_mark(r->wait.taken_basket.v);
+ rb_gc_mark(r->wait.taken_basket.sender);
+ rb_gc_mark(r->wait.yielded_basket.v);
+ rb_gc_mark(r->wait.yielded_basket.sender);
+ rb_gc_mark(r->loc);
+ rb_gc_mark(r->name);
+ rb_gc_mark(r->r_stdin);
+ rb_gc_mark(r->r_stdout);
+ rb_gc_mark(r->r_stderr);
+
+ if (r->threads.cnt > 0) {
+ rb_thread_t *th;
+ list_for_each(&r->threads.set, th, lt_node) {
+ VM_ASSERT(th != NULL);
+ rb_gc_mark(th->self);
+ }
+ }
+}
+
+static void
+ractor_queue_free(struct rb_ractor_queue *rq)
+{
+ free(rq->baskets);
+}
+
+static void
+ractor_waiting_list_free(struct rb_ractor_waiting_list *wl)
+{
+ free(wl->ractors);
+}
+
+static void
+ractor_free(void *ptr)
+{
+ rb_ractor_t *r = (rb_ractor_t *)ptr;
+ rb_native_mutex_destroy(&r->lock);
+ rb_native_cond_destroy(&r->wait.cond);
+ ractor_queue_free(&r->incoming_queue);
+ ractor_waiting_list_free(&r->taking_ractors);
+ ruby_xfree(r);
+}
+
+static size_t
+ractor_queue_memsize(const struct rb_ractor_queue *rq)
+{
+ return sizeof(struct rb_ractor_basket) * rq->size;
+}
+
+static size_t
+ractor_waiting_list_memsize(const struct rb_ractor_waiting_list *wl)
+{
+ return sizeof(rb_ractor_t *) * wl->size;
+}
+
+static size_t
+ractor_memsize(const void *ptr)
+{
+ rb_ractor_t *r = (rb_ractor_t *)ptr;
+
+ // TODO
+ return sizeof(rb_ractor_t) +
+ ractor_queue_memsize(&r->incoming_queue) +
+ ractor_waiting_list_memsize(&r->taking_ractors);
+}
+
+static const rb_data_type_t ractor_data_type = {
+ "ractor",
+ {
+ ractor_mark,
+ ractor_free,
+ ractor_memsize,
+ NULL, // update
+ },
+ 0, 0, RUBY_TYPED_FREE_IMMEDIATELY /* | RUBY_TYPED_WB_PROTECTED */
+};
+
+bool
+rb_ractor_p(VALUE gv)
+{
+ if (rb_typeddata_is_kind_of(gv, &ractor_data_type)) {
+ return true;
+ }
+ else {
+ return false;
+ }
+}
+
+static inline rb_ractor_t *
+RACTOR_PTR(VALUE self)
+{
+ VM_ASSERT(rb_ractor_p(self));
+
+ rb_ractor_t *r = DATA_PTR(self);
+ // TODO: check
+ return r;
+}
+
+uint32_t
+rb_ractor_id(const rb_ractor_t *g)
+{
+ return g->id;
+}
+
+static uint32_t ractor_last_id;
+
+#if RACTOR_CHECK_MODE > 0
+MJIT_FUNC_EXPORTED uint32_t
+rb_ractor_current_id(void)
+{
+ if (GET_THREAD()->ractor == NULL) {
+ return 1; // main ractor
+ }
+ else {
+ return GET_RACTOR()->id;
+ }
+}
+#endif
+
+static void
+ractor_queue_setup(struct rb_ractor_queue *rq)
+{
+ rq->size = 2;
+ rq->cnt = 0;
+ rq->baskets = malloc(sizeof(struct rb_ractor_basket) * rq->size);
+}
+
+static bool
+ractor_queue_empty_p(rb_ractor_t *r, struct rb_ractor_queue *rq)
+{
+ ASSERT_ractor_locking(r);
+ return rq->cnt == 0;
+}
+
+static bool
+ractor_queue_deq(rb_ractor_t *r, struct rb_ractor_queue *rq, struct rb_ractor_basket *basket)
+{
+ bool b;
+
+ RACTOR_LOCK(r);
+ {
+ if (!ractor_queue_empty_p(r, rq)) {
+ // TODO: use good Queue data structure
+ *basket = rq->baskets[0];
+ rq->cnt--;
+ for (int i=0; i<rq->cnt; i++) {
+ rq->baskets[i] = rq->baskets[i+1];
+ }
+ b = true;
+ }
+ else {
+ b = false;
+ }
+ }
+ RACTOR_UNLOCK(r);
+
+ return b;
+}
+
+static void
+ractor_queue_enq(rb_ractor_t *r, struct rb_ractor_queue *rq, struct rb_ractor_basket *basket)
+{
+ ASSERT_ractor_locking(r);
+
+ if (rq->size <= rq->cnt) {
+ rq->size *= 2;
+ rq->baskets = realloc(rq->baskets, sizeof(struct rb_ractor_basket) * rq->size);
+ }
+ rq->baskets[rq->cnt++] = *basket;
+ // fprintf(stderr, "%s %p->cnt:%d\n", __func__, rq, rq->cnt);
+}
+
+VALUE rb_newobj_with(VALUE src); // gc.c
+
+static VALUE
+ractor_moving_new(VALUE obj)
+{
+ // create moving object
+ VALUE v = rb_newobj_with(obj);
+
+ // invalidate src object
+ struct RVALUE {
+ VALUE flags;
+ VALUE klass;
+ VALUE v1;
+ VALUE v2;
+ VALUE v3;
+ } *rv = (void *)obj;
+
+ rv->klass = rb_cRactorMovedObject;
+ rv->v1 = 0;
+ rv->v2 = 0;
+ rv->v3 = 0;
+
+ // TODO: record moved location
+ // TOOD: check flags for each data types
+
+ return v;
+}
+
+static VALUE
+ractor_move_shallow_copy(VALUE obj)
+{
+ if (rb_ractor_shareable_p(obj)) {
+ return obj;
+ }
+ else {
+ switch (BUILTIN_TYPE(obj)) {
+ case T_STRING:
+ case T_FILE:
+ if (!FL_TEST_RAW(obj, RUBY_FL_EXIVAR)) {
+ return ractor_moving_new(obj);
+ }
+ break;
+ case T_ARRAY:
+ if (!FL_TEST_RAW(obj, RUBY_FL_EXIVAR)) {
+ VALUE ary = ractor_moving_new(obj);
+ long len = RARRAY_LEN(ary);
+ for (long i=0; i<len; i++) {
+ VALUE e = RARRAY_AREF(ary, i);
+ RARRAY_ASET(ary, i, ractor_move_shallow_copy(e)); // confirm WB
+ }
+ return ary;
+ }
+ break;
+ default:
+ break;
+ }
+
+ rb_raise(rb_eRactorError, "can't move this this kind of object:%"PRIsVALUE, obj);
+ }
+}
+
+static VALUE
+ractor_moved_setup(VALUE obj)
+{
+#if RACTOR_CHECK_MODE
+ switch (BUILTIN_TYPE(obj)) {
+ case T_STRING:
+ case T_FILE:
+ rb_ractor_setup_belonging(obj);
+ break;
+ case T_ARRAY:
+ rb_ractor_setup_belonging(obj);
+ long len = RARRAY_LEN(obj);
+ for (long i=0; i<len; i++) {
+ VALUE e = RARRAY_AREF(obj, i);
+ if (!rb_ractor_shareable_p(e)) {
+ ractor_moved_setup(e);
+ }
+ }
+ break;
+ default:
+ rb_bug("unreachable");
+ }
+#endif
+ return obj;
+}
+
+static void
+ractor_move_setup(struct rb_ractor_basket *b, VALUE obj)
+{
+ if (rb_ractor_shareable_p(obj)) {
+ b->type = basket_type_shareable;
+ b->v = obj;
+ }
+ else {
+ b->type = basket_type_move;
+ b->v = ractor_move_shallow_copy(obj);
+ return;
+ }
+}
+
+static void
+ractor_basket_clear(struct rb_ractor_basket *b)
+{
+ b->type = basket_type_none;
+ b->v = Qfalse;
+ b->sender = Qfalse;
+}
+
+static VALUE
+ractor_basket_accept(struct rb_ractor_basket *b)
+{
+ VALUE v;
+ switch (b->type) {
+ case basket_type_shareable:
+ VM_ASSERT(rb_ractor_shareable_p(b->v));
+ v = b->v;
+ break;
+ case basket_type_copy_marshal:
+ v = rb_marshal_load(b->v);
+ break;
+ case basket_type_exception:
+ {
+ VALUE cause = rb_marshal_load(b->v);
+ VALUE err = rb_exc_new_cstr(rb_eRactorRemoteError, "thrown by remote Ractor.");
+ rb_ivar_set(err, rb_intern("@ractor"), b->sender);
+ ractor_basket_clear(b);
+ rb_ec_setup_exception(NULL, err, cause);
+ rb_exc_raise(err);
+ }
+ // unreachable
+ case basket_type_move:
+ v = ractor_moved_setup(b->v);
+ break;
+ default:
+ rb_bug("unreachable");
+ }
+ ractor_basket_clear(b);
+ return v;
+}
+
+static void
+ractor_copy_setup(struct rb_ractor_basket *b, VALUE obj)
+{
+ if (rb_ractor_shareable_p(obj)) {
+ b->type = basket_type_shareable;
+ b->v = obj;
+ }
+ else {
+#if 0
+ // TODO: consider custom copy protocol
+ switch (BUILTIN_TYPE(obj)) {
+
+ }
+#endif
+ b->v = rb_marshal_dump(obj, Qnil);
+ b->type = basket_type_copy_marshal;
+ }
+}
+
+static VALUE
+ractor_try_recv(rb_execution_context_t *ec, rb_ractor_t *r)
+{
+ struct rb_ractor_queue *rq = &r->incoming_queue;
+ struct rb_ractor_basket basket;
+
+ if (ractor_queue_deq(r, rq, &basket) == false) {
+ if (r->incoming_port_closed) {
+ rb_raise(rb_eRactorClosedError, "The incoming port is already closed");
+ }
+ else {
+ return Qundef;
+ }
+ }
+
+ return ractor_basket_accept(&basket);
+}
+
+static void *
+ractor_sleep_wo_gvl(void *ptr)
+{
+ rb_ractor_t *cr = ptr;
+ RACTOR_LOCK_SELF(cr);
+ VM_ASSERT(cr->wait.status != wait_none);
+ if (cr->wait.wakeup_status == wakeup_none) {
+ ractor_cond_wait(cr);
+ }
+ cr->wait.status = wait_none;
+ RACTOR_UNLOCK_SELF(cr);
+ return NULL;
+}
+
+static void
+ractor_sleep_interrupt(void *ptr)
+{
+ rb_ractor_t *r = ptr;
+
+ RACTOR_LOCK(r);
+ if (r->wait.wakeup_status == wakeup_none) {
+ r->wait.wakeup_status = wakeup_by_interrupt;
+ rb_native_cond_signal(&r->wait.cond);
+ }
+ RACTOR_UNLOCK(r);
+}
+
+#if USE_RUBY_DEBUG_LOG
+static const char *
+wait_status_str(enum ractor_wait_status wait_status)
+{
+ switch ((int)wait_status) {
+ case wait_none: return "none";
+ case wait_recving: return "recving";
+ case wait_taking: return "taking";
+ case wait_yielding: return "yielding";
+ case wait_recving|wait_taking: return "recving|taking";
+ case wait_recving|wait_yielding: return "recving|yielding";
+ case wait_taking|wait_yielding: return "taking|yielding";
+ case wait_recving|wait_taking|wait_yielding: return "recving|taking|yielding";
+ }
+ rb_bug("unrechable");
+}
+
+static const char *
+wakeup_status_str(enum ractor_wakeup_status wakeup_status)
+{
+ switch (wakeup_status) {
+ case wakeup_none: return "none";
+ case wakeup_by_send: return "by_send";
+ case wakeup_by_yield: return "by_yield";
+ case wakeup_by_take: return "by_take";
+ case wakeup_by_close: return "by_close";
+ case wakeup_by_interrupt: return "by_interrupt";
+ case wakeup_by_retry: return "by_retry";
+ }
+ rb_bug("unrechable");
+}
+#endif // USE_RUBY_DEBUG_LOG
+
+static void
+ractor_sleep(rb_execution_context_t *ec, rb_ractor_t *cr)
+{
+ VM_ASSERT(GET_RACTOR() == cr);
+ VM_ASSERT(cr->wait.status != wait_none);
+ // fprintf(stderr, "%s r:%p status:%s, wakeup_status:%s\n", __func__, cr,
+ // wait_status_str(cr->wait.status), wakeup_status_str(cr->wait.wakeup_status));
+
+ RACTOR_UNLOCK(cr);
+ rb_nogvl(ractor_sleep_wo_gvl, cr,
+ ractor_sleep_interrupt, cr,
+ RB_NOGVL_UBF_ASYNC_SAFE);
+ RACTOR_LOCK(cr);
+}
+
+static bool
+ractor_sleeping_by(const rb_ractor_t *r, enum ractor_wait_status wait_status)
+{
+ return (r->wait.status & wait_status) && r->wait.wakeup_status == wakeup_none;
+}
+
+static bool
+ractor_wakeup(rb_ractor_t *r, enum ractor_wait_status wait_status, enum ractor_wakeup_status wakeup_status)
+{
+ ASSERT_ractor_locking(r);
+
+ // fprintf(stderr, "%s r:%p status:%s/%s wakeup_status:%s/%s\n", __func__, r,
+ // wait_status_str(r->wait.status), wait_status_str(wait_status),
+ // wakeup_status_str(r->wait.wakeup_status), wakeup_status_str(wakeup_status));
+
+ if (ractor_sleeping_by(r, wait_status)) {
+ r->wait.wakeup_status = wakeup_status;
+ rb_native_cond_signal(&r->wait.cond);
+ return true;
+ }
+ else {
+ return false;
+ }
+}
+
+static void
+ractor_register_taking(rb_ractor_t *r, rb_ractor_t *cr)
+{
+ VM_ASSERT(cr == GET_RACTOR());
+ bool retry_try = false;
+
+ RACTOR_LOCK(r);
+ {
+ if (ractor_sleeping_by(r, wait_yielding)) {
+ // already waiting for yielding. retry try_take.
+ retry_try = true;
+ }
+ else {
+ // insert cr into taking list
+ struct rb_ractor_waiting_list *wl = &r->taking_ractors;
+
+ for (int i=0; i<wl->cnt; i++) {
+ if (wl->ractors[i] == cr) {
+ // TODO: make it clean code.
+ rb_native_mutex_unlock(&r->lock);
+ rb_raise(rb_eRuntimeError, "Already another thread of same ractor is waiting.");
+ }
+ }
+
+ if (wl->size == 0) {
+ wl->size = 1;
+ wl->ractors = malloc(sizeof(rb_ractor_t *) * wl->size);
+ if (wl->ractors == NULL) rb_bug("can't allocate buffer");
+ }
+ else if (wl->size <= wl->cnt + 1) {
+ wl->size *= 2;
+ wl->ractors = realloc(wl->ractors, sizeof(rb_ractor_t *) * wl->size);
+ if (wl->ractors == NULL) rb_bug("can't re-allocate buffer");
+ }
+ wl->ractors[wl->cnt++] = cr;
+ }
+ }
+ RACTOR_UNLOCK(r);
+
+ if (retry_try) {
+ RACTOR_LOCK(cr);
+ {
+ if (cr->wait.wakeup_status == wakeup_none) {
+ VM_ASSERT(cr->wait.status != wait_none);
+
+ cr->wait.wakeup_status = wakeup_by_retry;
+ cr->wait.status = wait_none;
+ }
+ }
+ RACTOR_UNLOCK(cr);
+ }
+}
+
+static void
+ractor_waiting_list_del(rb_ractor_t *r, struct rb_ractor_waiting_list *wl, rb_ractor_t *wr)
+{
+ RACTOR_LOCK(r);
+ {
+ int pos = -1;
+ for (int i=0; i<wl->cnt; i++) {
+ if (wl->ractors[i] == wr) {
+ pos = i;
+ break;
+ }
+ }
+ if (pos >= 0) { // found
+ wl->cnt--;
+ for (int i=pos; i<wl->cnt; i++) {
+ wl->ractors[i] = wl->ractors[i+1];
+ }
+ }
+ }
+ RACTOR_UNLOCK(r);
+}
+
+static rb_ractor_t *
+ractor_waiting_list_shift(rb_ractor_t *r, struct rb_ractor_waiting_list *wl)
+{
+ ASSERT_ractor_locking(r);
+ VM_ASSERT(&r->taking_ractors == wl);
+
+ if (wl->cnt > 0) {
+ rb_ractor_t *tr = wl->ractors[0];
+ for (int i=1; i<wl->cnt; i++) {
+ wl->ractors[i-1] = wl->ractors[i];
+ }
+ wl->cnt--;
+ return tr;
+ }
+ else {
+ return NULL;
+ }
+}
+
+static VALUE
+ractor_recv(rb_execution_context_t *ec, rb_ractor_t *r)
+{
+ VM_ASSERT(r == rb_ec_ractor_ptr(ec));
+ VALUE v;
+
+ while ((v = ractor_try_recv(ec, r)) == Qundef) {
+ RACTOR_LOCK(r);
+ {
+ if (ractor_queue_empty_p(r, &r->incoming_queue)) {
+ VM_ASSERT(r->wait.status == wait_none);
+ VM_ASSERT(r->wait.wakeup_status == wakeup_none);
+ r->wait.status = wait_recving;
+
+ ractor_sleep(ec, r);
+
+ r->wait.wakeup_status = wakeup_none;
+ }
+ }
+ RACTOR_UNLOCK(r);
+ }
+
+ return v;
+}
+
+static void
+ractor_send_basket(rb_execution_context_t *ec, rb_ractor_t *r, struct rb_ractor_basket *b)
+{
+ bool closed = false;
+ struct rb_ractor_queue *rq = &r->incoming_queue;
+
+ RACTOR_LOCK(r);
+ {
+ if (r->incoming_port_closed) {
+ closed = true;
+ }
+ else {
+ ractor_queue_enq(r, rq, b);
+ if (ractor_wakeup(r, wait_recving, wakeup_by_send)) {
+ RUBY_DEBUG_LOG("wakeup", 0);
+ }
+ }
+ }
+ RACTOR_UNLOCK(r);
+
+ if (closed) {
+ rb_raise(rb_eRactorClosedError, "The incoming-port is already closed");
+ }
+}
+
+static void
+ractor_basket_setup(rb_execution_context_t *ec, struct rb_ractor_basket *basket, VALUE obj, VALUE move, bool exc)
+{
+ basket->sender = rb_ec_ractor_ptr(ec)->self;
+
+ if (!RTEST(move)) {
+ ractor_copy_setup(basket, obj);
+ }
+ else {
+ ractor_move_setup(basket, obj);
+ }
+
+ if (exc) {
+ basket->type = basket_type_exception;
+ }
+}
+
+static VALUE
+ractor_send(rb_execution_context_t *ec, rb_ractor_t *r, VALUE obj, VALUE move)
+{
+ struct rb_ractor_basket basket;
+ ractor_basket_setup(ec, &basket, obj, move, false);
+ ractor_send_basket(ec, r, &basket);
+ return r->self;
+}
+
+static VALUE
+ractor_try_take(rb_execution_context_t *ec, rb_ractor_t *r)
+{
+ struct rb_ractor_basket basket = {
+ .type = basket_type_none,
+ };
+ bool closed = false;
+
+ RACTOR_LOCK(r);
+ {
+ if (ractor_wakeup(r, wait_yielding, wakeup_by_take)) {
+ VM_ASSERT(r->wait.yielded_basket.type != basket_type_none);
+ basket = r->wait.yielded_basket;
+ ractor_basket_clear(&r->wait.yielded_basket);
+ }
+ else if (r->outgoing_port_closed) {
+ closed = true;
+ }
+ else {
+ // not reached.
+ }
+ }
+ RACTOR_UNLOCK(r);
+
+ if (basket.type == basket_type_none) {
+ if (closed) {
+ rb_raise(rb_eRactorClosedError, "The outgoing-port is already closed");
+ }
+ else {
+ return Qundef;
+ }
+ }
+ else {
+ return ractor_basket_accept(&basket);
+ }
+}
+
+static bool
+ractor_try_yield(rb_execution_context_t *ec, rb_ractor_t *cr, struct rb_ractor_basket *basket)
+{
+ ASSERT_ractor_unlocking(cr);
+ VM_ASSERT(basket->type != basket_type_none);
+
+ rb_ractor_t *r;
+
+ retry_shift:
+ RACTOR_LOCK(cr);
+ {
+ r = ractor_waiting_list_shift(cr, &cr->taking_ractors);
+ }
+ RACTOR_UNLOCK(cr);
+
+ if (r) {
+ bool retry_shift = false;
+
+ RACTOR_LOCK(r);
+ {
+ if (ractor_wakeup(r, wait_taking, wakeup_by_yield)) {
+ VM_ASSERT(r->wait.taken_basket.type == basket_type_none);
+ r->wait.taken_basket = *basket;
+ }
+ else {
+ retry_shift = true;
+ }
+ }
+ RACTOR_UNLOCK(r);
+
+ if (retry_shift) {
+ // get candidate take-waiting ractor, but already woke up by another reason.
+ // retry to check another ractor.
+ goto retry_shift;
+ }
+ else {
+ return true;
+ }
+ }
+ else {
+ return false;
+ }
+}
+
+// select(r1, r2, r3, receive: true, yield: obj)
+static VALUE
+ractor_select(rb_execution_context_t *ec, const VALUE *rs, int alen, VALUE yielded_value, bool move, VALUE *ret_r)
+{
+ rb_ractor_t *cr = rb_ec_ractor_ptr(ec);
+ VALUE crv = cr->self;
+ VALUE ret = Qundef;
+ int i;
+ enum ractor_wait_status wait_status = 0;
+ bool yield_p = (yielded_value != Qundef) ? true : false;
+
+ struct ractor_select_action {
+ enum ractor_select_action_type {
+ ractor_select_action_take,
+ ractor_select_action_recv,
+ ractor_select_action_yield,
+ } type;
+ VALUE v;
+ } *actions = ALLOCA_N(struct ractor_select_action, alen + (yield_p ? 1 : 0));
+
+ VM_ASSERT(cr->wait.status == wait_none);
+ VM_ASSERT(cr->wait.wakeup_status == wakeup_none);
+ VM_ASSERT(cr->wait.taken_basket.type == basket_type_none);
+ VM_ASSERT(cr->wait.yielded_basket.type == basket_type_none);
+
+ // setup actions
+ for (i=0; i<alen; i++) {
+ VALUE v = rs[i];
+
+ if (v == crv) {
+ actions[i].type = ractor_select_action_recv;
+ actions[i].v = Qnil;
+ wait_status |= wait_recving;
+ }
+ else if (rb_ractor_p(v)) {
+ actions[i].type = ractor_select_action_take;
+ actions[i].v = v;
+ wait_status |= wait_taking;
+ }
+ else {
+ rb_raise(rb_eArgError, "It should be ractor objects");
+ }
+ }
+ rs = NULL;
+
+ if (yield_p) {
+ actions[i].type = ractor_select_action_yield;
+ actions[i].v = Qundef;
+ wait_status |= wait_yielding;
+ alen++;
+
+ ractor_basket_setup(ec, &cr->wait.yielded_basket, yielded_value, move, false);
+ }
+
+ // TODO: shuffle actions
+
+ while (1) {
+ RUBY_DEBUG_LOG("try actions (%s)", wait_status_str(wait_status));
+
+ for (i=0; i<alen; i++) {
+ VALUE v, rv;
+ switch (actions[i].type) {
+ case ractor_select_action_take:
+ rv = actions[i].v;
+ v = ractor_try_take(ec, RACTOR_PTR(rv));
+ if (v != Qundef) {
+ *ret_r = rv;
+ ret = v;
+ goto cleanup;
+ }
+ break;
+ case ractor_select_action_recv:
+ v = ractor_try_recv(ec, cr);
+ if (v != Qundef) {
+ *ret_r = ID2SYM(rb_intern("recv"));
+ ret = v;
+ goto cleanup;
+ }
+ break;
+ case ractor_select_action_yield:
+ {
+ if (ractor_try_yield(ec, cr, &cr->wait.yielded_basket)) {
+ *ret_r = ID2SYM(rb_intern("yield"));
+ ret = Qnil;
+ goto cleanup;
+ }
+ }
+ break;
+ }
+ }
+
+ RUBY_DEBUG_LOG("wait actions (%s)", wait_status_str(wait_status));
+
+ RACTOR_LOCK(cr);
+ {
+ VM_ASSERT(cr->wait.status == wait_none);
+ VM_ASSERT(cr->wait.wakeup_status == wakeup_none);
+ cr->wait.status = wait_status;
+ }
+ RACTOR_UNLOCK(cr);
+
+ // prepare waiting
+ for (i=0; i<alen; i++) {
+ rb_ractor_t *r;
+ switch (actions[i].type) {
+ case ractor_select_action_take:
+ r = RACTOR_PTR(actions[i].v);
+ ractor_register_taking(r, cr);
+ break;
+ case ractor_select_action_yield:
+ case ractor_select_action_recv:
+ break;
+ }
+ }
+
+ // wait
+ RACTOR_LOCK(cr);
+ {
+ if (cr->wait.wakeup_status == wakeup_none) {
+ for (i=0; i<alen; i++) {
+ rb_ractor_t *r;
+
+ switch (actions[i].type) {
+ case ractor_select_action_take:
+ r = RACTOR_PTR(actions[i].v);
+ if (ractor_sleeping_by(r, wait_yielding)) {
+ RUBY_DEBUG_LOG("wakeup_none, but r:%u is waiting for yielding", r->id);
+ cr->wait.wakeup_status = wakeup_by_retry;
+ goto skip_sleep;
+ }
+ break;
+ case ractor_select_action_recv:
+ if (cr->incoming_queue.cnt > 0) {
+ RUBY_DEBUG_LOG("wakeup_none, but incoming_queue has %u messages", cr->incoming_queue.cnt);
+ cr->wait.wakeup_status = wakeup_by_retry;
+ goto skip_sleep;
+ }
+ break;
+ case ractor_select_action_yield:
+ if (cr->taking_ractors.cnt > 0) {
+ RUBY_DEBUG_LOG("wakeup_none, but %u taking_ractors are waiting", cr->taking_ractors.cnt);
+ cr->wait.wakeup_status = wakeup_by_retry;
+ goto skip_sleep;
+ }
+ break;
+ }
+ }
+
+ RUBY_DEBUG_LOG("sleep %s", wait_status_str(cr->wait.status));
+ ractor_sleep(ec, cr);
+ RUBY_DEBUG_LOG("awaken %s", wakeup_status_str(cr->wait.wakeup_status));
+ }
+ else {
+ skip_sleep:
+ RUBY_DEBUG_LOG("no need to sleep %s->%s",
+ wait_status_str(cr->wait.status),
+ wakeup_status_str(cr->wait.wakeup_status));
+ cr->wait.status = wait_none;
+ }
+ }
+ RACTOR_UNLOCK(cr);
+
+ // cleanup waiting
+ for (i=0; i<alen; i++) {
+ rb_ractor_t *r;
+ switch (actions[i].type) {
+ case ractor_select_action_take:
+ r = RACTOR_PTR(actions[i].v);
+ ractor_waiting_list_del(r, &r->taking_ractors, cr);
+ break;
+ case ractor_select_action_recv:
+ case ractor_select_action_yield:
+ break;
+ }
+ }
+
+ // check results
+ enum ractor_wakeup_status wakeup_status = cr->wait.wakeup_status;
+ cr->wait.wakeup_status = wakeup_none;
+
+ switch (wakeup_status) {
+ case wakeup_none:
+ // OK. something happens.
+ // retry loop.
+ break;
+ case wakeup_by_retry:
+ // Retry request.
+ break;
+ case wakeup_by_send:
+ // OK.
+ // retry loop and try_recv will succss.
+ break;
+ case wakeup_by_yield:
+ // take was succeeded!
+ // cr.wait.taken_basket contains passed block
+ VM_ASSERT(cr->wait.taken_basket.type != basket_type_none);
+ *ret_r = cr->wait.taken_basket.sender;
+ VM_ASSERT(rb_ractor_p(*ret_r));
+ ret = ractor_basket_accept(&cr->wait.taken_basket);
+ goto cleanup;
+ case wakeup_by_take:
+ *ret_r = ID2SYM(rb_intern("yield"));
+ ret = Qnil;
+ goto cleanup;
+ case wakeup_by_close:
+ // OK.
+ // retry loop and will get CloseError.
+ break;
+ case wakeup_by_interrupt:
+ ret = Qundef;
+ goto cleanup;
+ }
+ }
+
+ cleanup:
+ RUBY_DEBUG_LOG("cleanup actions (%s)", wait_status_str(wait_status));
+
+ if (cr->wait.yielded_basket.type != basket_type_none) {
+ ractor_basket_clear(&cr->wait.yielded_basket);
+ }
+
+ VM_ASSERT(cr->wait.status == wait_none);
+ VM_ASSERT(cr->wait.wakeup_status == wakeup_none);
+ VM_ASSERT(cr->wait.taken_basket.type == basket_type_none);
+ VM_ASSERT(cr->wait.yielded_basket.type == basket_type_none);
+
+ RUBY_VM_CHECK_INTS(ec);
+
+ VM_ASSERT(ret != Qundef);
+ return ret;
+}
+
+static VALUE
+ractor_yield(rb_execution_context_t *ec, rb_ractor_t *r, VALUE obj, VALUE move)
+{
+ VALUE ret_r;
+ ractor_select(ec, NULL, 0, obj, RTEST(move) ? true : false, &ret_r);
+ return Qnil;
+}
+
+static VALUE
+ractor_take(rb_execution_context_t *ec, rb_ractor_t *r)
+{
+ VALUE ret_r;
+ VALUE v = ractor_select(ec, &r->self, 1, Qundef, false, &ret_r);
+ return v;
+}
+
+static VALUE
+ractor_close_incoming(rb_execution_context_t *ec, rb_ractor_t *r)
+{
+ VALUE prev;
+
+ RACTOR_LOCK(r);
+ {
+ if (!r->incoming_port_closed) {
+ prev = Qfalse;
+ r->incoming_port_closed = true;
+ if (ractor_wakeup(r, wait_recving, wakeup_by_close)) {
+ VM_ASSERT(r->incoming_queue.cnt == 0);
+ }
+ }
+ else {
+ prev = Qtrue;
+ }
+ }
+ RACTOR_UNLOCK(r);
+ return prev;
+}
+
+static VALUE
+ractor_close_outgoing(rb_execution_context_t *ec, rb_ractor_t *cr)
+{
+ VALUE prev;
+
+ RACTOR_LOCK(cr);
+ {
+ if (!cr->outgoing_port_closed) {
+ prev = Qfalse;
+ cr->outgoing_port_closed = true;
+ }
+ else {
+ prev = Qtrue;
+ }
+
+ // wakeup all taking ractors
+ rb_ractor_t *taking_ractor;
+ while ((taking_ractor = ractor_waiting_list_shift(cr, &cr->taking_ractors)) != NULL) {
+ RACTOR_LOCK(taking_ractor);
+ ractor_wakeup(taking_ractor, wait_taking, wakeup_by_close);
+ RACTOR_UNLOCK(taking_ractor);
+ }
+ }
+ RACTOR_UNLOCK(cr);
+ return prev;
+}
+
+// creation/termination
+
+static uint32_t
+ractor_next_id(void)
+{
+ uint32_t id;
+
+ RB_VM_LOCK();
+ {
+ id = ++ractor_last_id;
+ }
+ RB_VM_UNLOCK();
+
+ return id;
+}
+
+static void
+vm_insert_ractor0(rb_vm_t *vm, rb_ractor_t *r)
+{
+ RUBY_DEBUG_LOG("r:%u ractor.cnt:%u++", r->id, vm->ractor.cnt);
+ VM_ASSERT(!rb_multi_ractor_p() || RB_VM_LOCKED_P());
+
+ list_add_tail(&vm->ractor.set, &r->vmlr_node);
+ vm->ractor.cnt++;
+}
+
+static void
+vm_insert_ractor(rb_vm_t *vm, rb_ractor_t *r)
+{
+ VM_ASSERT(ractor_status_p(r, ractor_created));
+
+ if (rb_multi_ractor_p()) {
+ RB_VM_LOCK();
+ {
+ vm_insert_ractor0(vm, r);
+ vm_ractor_blocking_cnt_inc(vm, r, __FILE__, __LINE__);
+ }
+ RB_VM_UNLOCK();
+ }
+ else {
+ vm_insert_ractor0(vm, r);
+
+ if (vm->ractor.cnt == 1) {
+ // main ractor
+ ractor_status_set(r, ractor_blocking);
+ ractor_status_set(r, ractor_running);
+ }
+ else {
+ vm_ractor_blocking_cnt_inc(vm, r, __FILE__, __LINE__);
+
+ // enable multi-ractor mode
+ ruby_multi_ractor = true;
+
+ if (rb_warning_category_enabled_p(RB_WARN_CATEGORY_EXPERIMENTAL)) {
+ rb_warn("Ractor is experimental, and the behavior may change in future versions of Ruby! Also there are many implementation issues.");
+ }
+ }
+ }
+}
+
+static void
+vm_remove_ractor(rb_vm_t *vm, rb_ractor_t *cr)
+{
+ VM_ASSERT(ractor_status_p(cr, ractor_running));
+ VM_ASSERT(vm->ractor.cnt > 1);
+ VM_ASSERT(cr->threads.cnt == 1);
+
+ RB_VM_LOCK();
+ {
+ RUBY_DEBUG_LOG("ractor.cnt:%u-- terminate_waiting:%d",
+ vm->ractor.cnt, vm->ractor.sync.terminate_waiting);
+
+ VM_ASSERT(vm->ractor.cnt > 0);
+ list_del(&cr->vmlr_node);
+
+ if (vm->ractor.cnt <= 2 && vm->ractor.sync.terminate_waiting) {
+ rb_native_cond_signal(&vm->ractor.sync.terminate_cond);
+ }
+ vm->ractor.cnt--;
+
+ ractor_status_set(cr, ractor_terminated);
+ }
+ RB_VM_UNLOCK();
+}
+
+static VALUE
+ractor_alloc(VALUE klass)
+{
+ rb_ractor_t *r;
+ VALUE rv = TypedData_Make_Struct(klass, rb_ractor_t, &ractor_data_type, r);
+ FL_SET_RAW(rv, RUBY_FL_SHAREABLE);
+ r->self = rv;
+ VM_ASSERT(ractor_status_p(r, ractor_created));
+ return rv;
+}
+
+rb_ractor_t *
+rb_ractor_main_alloc(void)
+{
+ rb_ractor_t *r = ruby_mimmalloc(sizeof(rb_ractor_t));
+ if (r == NULL) {
+ fprintf(stderr, "[FATAL] failed to allocate memory for main ractor\n");
+ exit(EXIT_FAILURE);
+ }
+ MEMZERO(r, rb_ractor_t, 1);
+ r->id = ++ractor_last_id;
+ r->loc = Qnil;
+ r->name = Qnil;
+
+ return r;
+}
+
+void rb_gvl_init(rb_global_vm_lock_t *gvl);
+
+void
+rb_ractor_living_threads_init(rb_ractor_t *r)
+{
+ list_head_init(&r->threads.set);
+ r->threads.cnt = 0;
+ r->threads.blocking_cnt = 0;
+}
+
+static void
+ractor_init(rb_ractor_t *r, VALUE name, VALUE loc)
+{
+ ractor_queue_setup(&r->incoming_queue);
+ rb_native_mutex_initialize(&r->lock);
+ rb_native_cond_initialize(&r->wait.cond);
+ rb_native_cond_initialize(&r->barrier_wait_cond);
+
+ // thread management
+ rb_gvl_init(&r->threads.gvl);
+ rb_ractor_living_threads_init(r);
+
+ // naming
+ r->name = name;
+ r->loc = loc;
+}
+
+void
+rb_ractor_main_setup(rb_vm_t *vm, rb_ractor_t *r, rb_thread_t *th)
+{
+ r->self = TypedData_Wrap_Struct(rb_cRactor, &ractor_data_type, r);
+ FL_SET_RAW(r->self, RUBY_FL_SHAREABLE);
+ ractor_init(r, Qnil, Qnil);
+ r->threads.main = th;
+ rb_ractor_living_threads_insert(r, th);
+}
+
+// io.c
+VALUE rb_io_prep_stdin(void);
+VALUE rb_io_prep_stdout(void);
+VALUE rb_io_prep_stderr(void);
+
+static VALUE
+ractor_create(rb_execution_context_t *ec, VALUE self, VALUE loc, VALUE name, VALUE args, VALUE block)
+{
+ VALUE rv = ractor_alloc(self);
+ rb_ractor_t *r = RACTOR_PTR(rv);
+ ractor_init(r, name, loc);
+
+ // can block here
+ r->id = ractor_next_id();
+ RUBY_DEBUG_LOG("r:%u", r->id);
+
+ r->r_stdin = rb_io_prep_stdin();
+ r->r_stdout = rb_io_prep_stdout();
+ r->r_stderr = rb_io_prep_stderr();
+
+ rb_thread_create_ractor(r, args, block);
+
+ RB_GC_GUARD(rv);
+ return rv;
+}
+
+static void
+ractor_atexit_yield(rb_execution_context_t *ec, rb_ractor_t *cr, VALUE v, bool exc)
+{
+ ASSERT_ractor_unlocking(cr);
+
+ struct rb_ractor_basket basket;
+ ractor_basket_setup(ec, &basket, v, Qfalse, exc);
+
+ retry:
+ if (ractor_try_yield(ec, cr, &basket)) {
+ // OK.
+ }
+ else {
+ bool retry = false;
+ RACTOR_LOCK(cr);
+ {
+ if (cr->taking_ractors.cnt == 0) {
+ cr->wait.yielded_basket = basket;
+
+ VM_ASSERT(cr->wait.status == wait_none);
+ cr->wait.status = wait_yielding;
+ }
+ else {
+ retry = true; // another ractor is waiting for the yield.
+ }
+ }
+ RACTOR_UNLOCK(cr);
+
+ if (retry) goto retry;
+ }
+}
+
+void
+rb_ractor_teardown(rb_execution_context_t *ec)
+{
+ rb_ractor_t *cr = rb_ec_ractor_ptr(ec);
+ ractor_close_incoming(ec, cr);
+ ractor_close_outgoing(ec, cr);
+
+ // sync with rb_ractor_terminate_interrupt_main_thread()
+ RB_VM_LOCK_ENTER();
+ {
+ VM_ASSERT(cr->threads.main != NULL);
+ cr->threads.main = NULL;
+ }
+ RB_VM_LOCK_LEAVE();
+}
+
+void
+rb_ractor_atexit(rb_execution_context_t *ec, VALUE result)
+{
+ rb_ractor_t *cr = rb_ec_ractor_ptr(ec);
+ ractor_atexit_yield(ec, cr, result, false);
+}
+
+void
+rb_ractor_atexit_exception(rb_execution_context_t *ec)
+{
+ rb_ractor_t *cr = rb_ec_ractor_ptr(ec);
+ ractor_atexit_yield(ec, cr, ec->errinfo, true);
+}
+
+void
+rb_ractor_recv_parameters(rb_execution_context_t *ec, rb_ractor_t *r, int len, VALUE *ptr)
+{
+ for (int i=0; i<len; i++) {
+ ptr[i] = ractor_recv(ec, r);
+ }
+}
+
+void
+rb_ractor_send_parameters(rb_execution_context_t *ec, rb_ractor_t *r, VALUE args)
+{
+ int len = RARRAY_LENINT(args);
+ for (int i=0; i<len; i++) {
+ ractor_send(ec, r, RARRAY_AREF(args, i), false);
+ }
+}
+
+VALUE
+rb_ractor_self(const rb_ractor_t *r)
+{
+ return r->self;
+}
+
+MJIT_FUNC_EXPORTED int
+rb_ractor_main_p(void)
+{
+ rb_execution_context_t *ec = GET_EC();
+ return rb_ec_ractor_ptr(ec) == rb_ec_vm_ptr(ec)->ractor.main_ractor;
+}
+
+rb_global_vm_lock_t *
+rb_ractor_gvl(rb_ractor_t *r)
+{
+ return &r->threads.gvl;
+}
+
+int
+rb_ractor_living_thread_num(const rb_ractor_t *r)
+{
+ return r->threads.cnt;
+}
+
+VALUE
+rb_ractor_thread_list(rb_ractor_t *r)
+{
+ VALUE ary = rb_ary_new();
+ rb_thread_t *th = 0;
+
+ RACTOR_LOCK(r);
+ list_for_each(&r->threads.set, th, lt_node) {
+ switch (th->status) {
+ case THREAD_RUNNABLE:
+ case THREAD_STOPPED:
+ case THREAD_STOPPED_FOREVER:
+ rb_ary_push(ary, th->self);
+ default:
+ break;
+ }
+ }
+ RACTOR_UNLOCK(r);
+ return ary;
+}
+
+void
+rb_ractor_living_threads_insert(rb_ractor_t *r, rb_thread_t *th)
+{
+ VM_ASSERT(th != NULL);
+
+ RACTOR_LOCK(r);
+ {
+ RUBY_DEBUG_LOG("r(%d)->threads.cnt:%d++", r->id, r->threads.cnt);
+ list_add_tail(&r->threads.set, &th->lt_node);
+ r->threads.cnt++;
+ }
+ RACTOR_UNLOCK(r);
+
+ // first thread for a ractor
+ if (r->threads.cnt == 1) {
+ VM_ASSERT(ractor_status_p(r, ractor_created));
+ vm_insert_ractor(th->vm, r);
+ }
+}
+
+static void
+vm_ractor_blocking_cnt_inc(rb_vm_t *vm, rb_ractor_t *r, const char *file, int line)
+{
+ ractor_status_set(r, ractor_blocking);
+
+ RUBY_DEBUG_LOG2(file, line, "vm->ractor.blocking_cnt:%d++", vm->ractor.blocking_cnt);
+ vm->ractor.blocking_cnt++;
+ VM_ASSERT(vm->ractor.blocking_cnt <= vm->ractor.cnt);
+}
+
+void
+rb_vm_ractor_blocking_cnt_inc(rb_vm_t *vm, rb_ractor_t *cr, const char *file, int line)
+{
+ ASSERT_vm_locking();
+ VM_ASSERT(GET_RACTOR() == cr);
+ vm_ractor_blocking_cnt_inc(vm, cr, file, line);
+}
+
+void
+rb_vm_ractor_blocking_cnt_dec(rb_vm_t *vm, rb_ractor_t *cr, const char *file, int line)
+{
+ ASSERT_vm_locking();
+ VM_ASSERT(GET_RACTOR() == cr);
+
+ RUBY_DEBUG_LOG2(file, line, "vm->ractor.blocking_cnt:%d--", vm->ractor.blocking_cnt);
+ VM_ASSERT(vm->ractor.blocking_cnt > 0);
+ vm->ractor.blocking_cnt--;
+
+ ractor_status_set(cr, ractor_running);
+}
+
+static void
+ractor_check_blocking(rb_ractor_t *cr, unsigned int remaind_thread_cnt, const char *file, int line)
+{
+ VM_ASSERT(cr == GET_RACTOR());
+
+ RUBY_DEBUG_LOG2(file, line,
+ "cr->threads.cnt:%u cr->threads.blocking_cnt:%u vm->ractor.cnt:%u vm->ractor.blocking_cnt:%u",
+ cr->threads.cnt, cr->threads.blocking_cnt,
+ GET_VM()->ractor.cnt, GET_VM()->ractor.blocking_cnt);
+
+ VM_ASSERT(cr->threads.cnt >= cr->threads.blocking_cnt + 1);
+
+ if (remaind_thread_cnt > 0 &&
+ // will be block
+ cr->threads.cnt == cr->threads.blocking_cnt + 1) {
+ // change ractor status: running -> blocking
+ rb_vm_t *vm = GET_VM();
+ ASSERT_vm_unlocking();
+
+ RB_VM_LOCK();
+ {
+ rb_vm_ractor_blocking_cnt_inc(vm, cr, file, line);
+ }
+ RB_VM_UNLOCK();
+ }
+}
+
+void
+rb_ractor_living_threads_remove(rb_ractor_t *cr, rb_thread_t *th)
+{
+ VM_ASSERT(cr == GET_RACTOR());
+ RUBY_DEBUG_LOG("r->threads.cnt:%d--", cr->threads.cnt);
+ ractor_check_blocking(cr, cr->threads.cnt - 1, __FILE__, __LINE__);
+
+ if (cr->threads.cnt == 1) {
+ vm_remove_ractor(th->vm, cr);
+ }
+ else {
+ RACTOR_LOCK(cr);
+ {
+ list_del(&th->lt_node);
+ cr->threads.cnt--;
+ }
+ RACTOR_UNLOCK(cr);
+ }
+}
+
+void
+rb_ractor_blocking_threads_inc(rb_ractor_t *cr, const char *file, int line)
+{
+ RUBY_DEBUG_LOG2(file, line, "cr->threads.blocking_cnt:%d++", cr->threads.blocking_cnt);
+
+ VM_ASSERT(cr->threads.cnt > 0);
+ VM_ASSERT(cr == GET_RACTOR());
+
+ ractor_check_blocking(cr, cr->threads.cnt, __FILE__, __LINE__);
+ cr->threads.blocking_cnt++;
+}
+
+void
+rb_ractor_blocking_threads_dec(rb_ractor_t *cr, const char *file, int line)
+{
+ RUBY_DEBUG_LOG2(file, line,
+ "r->threads.blocking_cnt:%d--, r->threads.cnt:%u",
+ cr->threads.blocking_cnt, cr->threads.cnt);
+
+ VM_ASSERT(cr == GET_RACTOR());
+
+ if (cr->threads.cnt == cr->threads.blocking_cnt) {
+ rb_vm_t *vm = GET_VM();
+
+ RB_VM_LOCK_ENTER();
+ {
+ rb_vm_ractor_blocking_cnt_dec(vm, cr, __FILE__, __LINE__);
+ }
+ RB_VM_LOCK_LEAVE();
+ }
+
+ cr->threads.blocking_cnt--;
+}
+
+void
+rb_ractor_vm_barrier_interrupt_running_thread(rb_ractor_t *r)
+{
+ VM_ASSERT(r != GET_RACTOR());
+ ASSERT_ractor_unlocking(r);
+ ASSERT_vm_locking();
+
+ RACTOR_LOCK(r);
+ {
+ if (ractor_status_p(r, ractor_running)) {
+ rb_execution_context_t *ec = r->threads.running_ec;
+ if (ec) {
+ RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec);
+ }
+ }
+ }
+ RACTOR_UNLOCK(r);
+}
+
+void
+rb_ractor_terminate_interrupt_main_thread(rb_ractor_t *r)
+{
+ VM_ASSERT(r != GET_RACTOR());
+ ASSERT_ractor_unlocking(r);
+ ASSERT_vm_locking();
+
+ rb_thread_t *main_th = r->threads.main;
+ if (main_th) {
+ if (main_th->status != THREAD_KILLED) {
+ RUBY_VM_SET_TERMINATE_INTERRUPT(main_th->ec);
+ rb_threadptr_interrupt(main_th);
+ }
+ else {
+ RUBY_DEBUG_LOG("killed (%p)", main_th);
+ }
+ }
+}
+
+void rb_thread_terminate_all(void); // thread.c
+
+static void
+ractor_terminal_interrupt_all(rb_vm_t *vm)
+{
+ if (vm->ractor.cnt > 1) {
+ // send terminate notification to all ractors
+ rb_ractor_t *r;
+ list_for_each(&vm->ractor.set, r, vmlr_node) {
+ if (r != vm->ractor.main_ractor) {
+ rb_ractor_terminate_interrupt_main_thread(r);
+ }
+ }
+ }
+}
+
+void
+rb_ractor_terminate_all(void)
+{
+ rb_vm_t *vm = GET_VM();
+ rb_ractor_t *cr = vm->ractor.main_ractor;
+
+ VM_ASSERT(cr == GET_RACTOR()); // only main-ractor's main-thread should kick it.
+
+ if (vm->ractor.cnt > 1) {
+ RB_VM_LOCK();
+ ractor_terminal_interrupt_all(vm); // kill all ractors
+ RB_VM_UNLOCK();
+ }
+ rb_thread_terminate_all(); // kill other threads in main-ractor and wait
+
+ RB_VM_LOCK();
+ {
+ while (vm->ractor.cnt > 1) {
+ RUBY_DEBUG_LOG("terminate_waiting:%d", vm->ractor.sync.terminate_waiting);
+ vm->ractor.sync.terminate_waiting = true;
+
+ // wait for 1sec
+ rb_vm_ractor_blocking_cnt_inc(vm, cr, __FILE__, __LINE__);
+ rb_vm_cond_timedwait(vm, &vm->ractor.sync.terminate_cond, 1000 /* ms */);
+ rb_vm_ractor_blocking_cnt_dec(vm, cr, __FILE__, __LINE__);
+
+ ractor_terminal_interrupt_all(vm);
+ }
+ }
+ RB_VM_UNLOCK();
+}
+
+rb_execution_context_t *
+rb_vm_main_ractor_ec(rb_vm_t *vm)
+{
+ return vm->ractor.main_ractor->threads.running_ec;
+}
+
+#include "ractor.rbinc"
+
+static VALUE
+ractor_moved_missing(int argc, VALUE *argv, VALUE self)
+{
+ rb_raise(rb_eRactorMovedError, "can not send any methods to a moved object");
+}
+
+void
+Init_Ractor(void)
+{
+ rb_cRactor = rb_define_class("Ractor", rb_cObject);
+ rb_eRactorError = rb_define_class_under(rb_cRactor, "Error", rb_eRuntimeError);
+ rb_eRactorRemoteError = rb_define_class_under(rb_cRactor, "RemoteError", rb_eRactorError);
+ rb_eRactorMovedError = rb_define_class_under(rb_cRactor, "MovedError", rb_eRactorError);
+ rb_eRactorClosedError = rb_define_class_under(rb_cRactor, "ClosedError", rb_eStopIteration);
+
+ rb_cRactorMovedObject = rb_define_class_under(rb_cRactor, "MovedObject", rb_cBasicObject);
+ rb_undef_alloc_func(rb_cRactorMovedObject);
+ rb_define_method(rb_cRactorMovedObject, "method_missing", ractor_moved_missing, -1);
+
+ // override methods defined in BasicObject
+ rb_define_method(rb_cRactorMovedObject, "__send__", ractor_moved_missing, -1);
+ rb_define_method(rb_cRactorMovedObject, "!", ractor_moved_missing, -1);
+ rb_define_method(rb_cRactorMovedObject, "==", ractor_moved_missing, -1);
+ rb_define_method(rb_cRactorMovedObject, "!=", ractor_moved_missing, -1);
+ rb_define_method(rb_cRactorMovedObject, "__id__", ractor_moved_missing, -1);
+ rb_define_method(rb_cRactorMovedObject, "equal?", ractor_moved_missing, -1);
+ rb_define_method(rb_cRactorMovedObject, "instance_eval", ractor_moved_missing, -1);
+ rb_define_method(rb_cRactorMovedObject, "instance_exec", ractor_moved_missing, -1);
+
+ rb_obj_freeze(rb_cRactorMovedObject);
+}
+
+static int
+rb_ractor_shareable_p_hash_i(VALUE key, VALUE value, VALUE arg)
+{
+ // TODO: should we need to avoid recursion to prevent stack overflow?
+ if (!rb_ractor_shareable_p(key) || !rb_ractor_shareable_p(value)) {
+ bool *shareable = (bool*)arg;
+ *shareable = false;
+ return ST_STOP;
+ }
+ return ST_CONTINUE;
+}
+
+MJIT_FUNC_EXPORTED bool
+rb_ractor_shareable_p_continue(VALUE obj)
+{
+ switch (BUILTIN_TYPE(obj)) {
+ case T_CLASS:
+ case T_MODULE:
+ case T_ICLASS:
+ goto shareable;
+
+ case T_FLOAT:
+ case T_COMPLEX:
+ case T_RATIONAL:
+ case T_BIGNUM:
+ case T_SYMBOL:
+ VM_ASSERT(RB_OBJ_FROZEN_RAW(obj));
+ goto shareable;
+
+ case T_STRING:
+ case T_REGEXP:
+ if (RB_OBJ_FROZEN_RAW(obj) &&
+ !FL_TEST_RAW(obj, RUBY_FL_EXIVAR)) {
+ goto shareable;
+ }
+ return false;
+ case T_ARRAY:
+ if (!RB_OBJ_FROZEN_RAW(obj) ||
+ FL_TEST_RAW(obj, RUBY_FL_EXIVAR)) {
+ return false;
+ }
+ else {
+ for (int i = 0; i < RARRAY_LEN(obj); i++) {
+ if (!rb_ractor_shareable_p(rb_ary_entry(obj, i))) return false;
+ }
+ goto shareable;
+ }
+ case T_HASH:
+ if (!RB_OBJ_FROZEN_RAW(obj) ||
+ FL_TEST_RAW(obj, RUBY_FL_EXIVAR)) {
+ return false;
+ }
+ else {
+ bool shareable = true;
+ rb_hash_foreach(obj, rb_ractor_shareable_p_hash_i, (VALUE)&shareable);
+ if (shareable) {
+ goto shareable;
+ }
+ else {
+ return false;
+ }
+ }
+ default:
+ return false;
+ }
+ shareable:
+ FL_SET_RAW(obj, RUBY_FL_SHAREABLE);
+ return true;
+}
+
+void
+rb_ractor_dump(void)
+{
+ rb_vm_t *vm = GET_VM();
+ rb_ractor_t *r;
+
+ list_for_each(&vm->ractor.set, r, vmlr_node) {
+ if (r != vm->ractor.main_ractor) {
+ fprintf(stderr, "r:%u (%s)\n", r->id, ractor_status_str(r->status_));
+ }
+ }
+}
+
+VALUE
+rb_ractor_stdin(void)
+{
+ if (rb_ractor_main_p()) {
+ return rb_stdin;
+ }
+ else {
+ rb_ractor_t *cr = GET_RACTOR();
+ return cr->r_stdin;
+ }
+}
+
+VALUE
+rb_ractor_stdout(void)
+{
+ if (rb_ractor_main_p()) {
+ return rb_stdout;
+ }
+ else {
+ rb_ractor_t *cr = GET_RACTOR();
+ return cr->r_stdout;
+ }
+}
+
+VALUE
+rb_ractor_stderr(void)
+{
+ if (rb_ractor_main_p()) {
+ return rb_stderr;
+ }
+ else {
+ rb_ractor_t *cr = GET_RACTOR();
+ return cr->r_stderr;
+ }
+}
+
+void
+rb_ractor_stdin_set(VALUE in)
+{
+ if (rb_ractor_main_p()) {
+ rb_stdin = in;
+ }
+ else {
+ rb_ractor_t *cr = GET_RACTOR();
+ RB_OBJ_WRITE(cr->self, &cr->r_stdin, in);
+ }
+}
+
+void
+rb_ractor_stdout_set(VALUE out)
+{
+ if (rb_ractor_main_p()) {
+ rb_stdout = out;
+ }
+ else {
+ rb_ractor_t *cr = GET_RACTOR();
+ RB_OBJ_WRITE(cr->self, &cr->r_stdout, out);
+ }
+}
+
+void
+rb_ractor_stderr_set(VALUE err)
+{
+ if (rb_ractor_main_p()) {
+ rb_stderr = err;
+ }
+ else {
+ rb_ractor_t *cr = GET_RACTOR();
+ RB_OBJ_WRITE(cr->self, &cr->r_stderr, err);
+ }
+}
diff --git a/ractor.h b/ractor.h
new file mode 100644
index 0000000..de4d722
--- /dev/null
+++ b/ractor.h
@@ -0,0 +1,269 @@
+#include "ruby/ruby.h"
+#include "vm_core.h"
+#include "id_table.h"
+#include "vm_debug.h"
+#include "ractor_pub.h"
+
+#ifndef RACTOR_CHECK_MODE
+#define RACTOR_CHECK_MODE (0 || VM_CHECK_MODE || RUBY_DEBUG)
+#endif
+
+enum rb_ractor_basket_type {
+ basket_type_none,
+ basket_type_shareable,
+ basket_type_copy_marshal,
+ basket_type_copy_custom,
+ basket_type_move,
+ basket_type_exception,
+};
+
+struct rb_ractor_basket {
+ enum rb_ractor_basket_type type;
+ VALUE v;
+ VALUE sender;
+};
+
+struct rb_ractor_queue {
+ struct rb_ractor_basket *baskets;
+ int cnt;
+ int size;
+};
+
+struct rb_ractor_waiting_list {
+ int cnt;
+ int size;
+ rb_ractor_t **ractors;
+};
+
+struct rb_ractor_struct {
+ // ractor lock
+ rb_nativethread_lock_t lock;
+#if RACTOR_CHECK_MODE > 0
+ VALUE locked_by;
+#endif
+
+ // communication
+ struct rb_ractor_queue incoming_queue;
+
+ bool incoming_port_closed;
+ bool outgoing_port_closed;
+
+ struct rb_ractor_waiting_list taking_ractors;
+
+ struct ractor_wait {
+ enum ractor_wait_status {
+ wait_none = 0x00,
+ wait_recving = 0x01,
+ wait_taking = 0x02,
+ wait_yielding = 0x04,
+ } status;
+
+ enum ractor_wakeup_status {
+ wakeup_none,
+ wakeup_by_send,
+ wakeup_by_yield,
+ wakeup_by_take,
+ wakeup_by_close,
+ wakeup_by_interrupt,
+ wakeup_by_retry,
+ } wakeup_status;
+
+ struct rb_ractor_basket taken_basket;
+ struct rb_ractor_basket yielded_basket;
+
+ rb_nativethread_cond_t cond;
+ } wait;
+
+ // vm wide barrier synchronization
+ rb_nativethread_cond_t barrier_wait_cond;
+
+ // thread management
+ struct {
+ struct list_head set;
+ unsigned int cnt;
+ unsigned int blocking_cnt;
+ unsigned int sleeper;
+ rb_global_vm_lock_t gvl;
+ rb_execution_context_t *running_ec;
+ rb_thread_t *main;
+ } threads;
+ VALUE thgroup_default;
+
+ // identity
+ VALUE self;
+ uint32_t id;
+ VALUE name;
+ VALUE loc;
+
+ // created
+ // | ready to run
+ // ====================== inserted to vm->ractor
+ // v
+ // blocking <---+ all threads are blocking
+ // | |
+ // v |
+ // running -----+
+ // | all threads are terminated.
+ // ====================== removed from vm->ractor
+ // v
+ // terminated
+ //
+ // status is protected by VM lock (global state)
+
+ enum ractor_status {
+ ractor_created,
+ ractor_running,
+ ractor_blocking,
+ ractor_terminated,
+ } status_;
+
+ struct list_node vmlr_node;
+
+
+ VALUE r_stdin;
+ VALUE r_stdout;
+ VALUE r_stderr;
+}; // rb_ractor_t is defined in vm_core.h
+
+rb_ractor_t *rb_ractor_main_alloc(void);
+void rb_ractor_main_setup(rb_vm_t *vm, rb_ractor_t *main_ractor, rb_thread_t *main_thread);
+VALUE rb_ractor_self(const rb_ractor_t *g);
+void rb_ractor_atexit(rb_execution_context_t *ec, VALUE result);
+void rb_ractor_atexit_exception(rb_execution_context_t *ec);
+void rb_ractor_teardown(rb_execution_context_t *ec);
+void rb_ractor_recv_parameters(rb_execution_context_t *ec, rb_ractor_t *g, int len, VALUE *ptr);
+void rb_ractor_send_parameters(rb_execution_context_t *ec, rb_ractor_t *g, VALUE args);
+
+VALUE rb_thread_create_ractor(rb_ractor_t *g, VALUE args, VALUE proc); // defined in thread.c
+
+rb_global_vm_lock_t *rb_ractor_gvl(rb_ractor_t *);
+int rb_ractor_living_thread_num(const rb_ractor_t *);
+VALUE rb_ractor_thread_list(rb_ractor_t *r);
+
+void rb_ractor_living_threads_init(rb_ractor_t *r);
+void rb_ractor_living_threads_insert(rb_ractor_t *r, rb_thread_t *th);
+void rb_ractor_living_threads_remove(rb_ractor_t *r, rb_thread_t *th);
+void rb_ractor_blocking_threads_inc(rb_ractor_t *r, const char *file, int line); // TODO: file, line only for RUBY_DEBUG_LOG
+void rb_ractor_blocking_threads_dec(rb_ractor_t *r, const char *file, int line); // TODO: file, line only for RUBY_DEBUG_LOG
+
+void rb_ractor_vm_barrier_interrupt_running_thread(rb_ractor_t *r);
+void rb_ractor_terminate_interrupt_main_thread(rb_ractor_t *r);
+void rb_ractor_terminate_all(void);
+
+static inline bool
+rb_ractor_status_p(rb_ractor_t *r, enum ractor_status status)
+{
+ return r->status_ == status;
+}
+
+static inline void
+rb_ractor_sleeper_threads_inc(rb_ractor_t *r)
+{
+ r->threads.sleeper++;
+}
+
+static inline void
+rb_ractor_sleeper_threads_dec(rb_ractor_t *r)
+{
+ r->threads.sleeper--;
+}
+
+static inline void
+rb_ractor_sleeper_threads_clear(rb_ractor_t *r)
+{
+ r->threads.sleeper = 0;
+}
+
+static inline int
+rb_ractor_sleeper_thread_num(rb_ractor_t *r)
+{
+ return r->threads.sleeper;
+}
+
+static inline void
+rb_ractor_thread_switch(rb_ractor_t *cr, rb_thread_t *th)
+{
+ if (cr->threads.running_ec != th->ec) {
+ if (0) fprintf(stderr, "rb_ractor_thread_switch ec:%p->%p\n",
+ (void *)cr->threads.running_ec, (void *)th->ec);
+ }
+ else {
+ return;
+ }
+
+ if (cr->threads.running_ec != th->ec) {
+ th->running_time_us = 0;
+ }
+
+ cr->threads.running_ec = th->ec;
+
+ VM_ASSERT(cr == GET_RACTOR());
+}
+
+static inline void
+rb_ractor_set_current_ec(rb_ractor_t *cr, rb_execution_context_t *ec)
+{
+ native_tls_set(ruby_current_ec_key, ec);
+
+ if (cr->threads.running_ec != ec) {
+ if (0) fprintf(stderr, "rb_ractor_set_current_ec ec:%p->%p\n",
+ (void *)cr->threads.running_ec, (void *)ec);
+ }
+ else {
+ VM_ASSERT(0); // should be different
+ }
+
+ cr->threads.running_ec = ec;
+}
+
+void rb_vm_ractor_blocking_cnt_inc(rb_vm_t *vm, rb_ractor_t *cr, const char *file, int line);
+void rb_vm_ractor_blocking_cnt_dec(rb_vm_t *vm, rb_ractor_t *cr, const char *file, int line);
+
+uint32_t rb_ractor_id(const rb_ractor_t *r);
+
+#if RACTOR_CHECK_MODE > 0
+uint32_t rb_ractor_current_id(void);
+
+static inline void
+rb_ractor_setup_belonging_to(VALUE obj, uint32_t rid)
+{
+ VALUE flags = RBASIC(obj)->flags & 0xffffffff; // 4B
+ RBASIC(obj)->flags = flags | ((VALUE)rid << 32);
+}
+
+static inline void
+rb_ractor_setup_belonging(VALUE obj)
+{
+ rb_ractor_setup_belonging_to(obj, rb_ractor_current_id());
+}
+
+static inline uint32_t
+rb_ractor_belonging(VALUE obj)
+{
+ if (rb_ractor_shareable_p(obj)) {
+ return 0;
+ }
+ else {
+ return RBASIC(obj)->flags >> 32;
+ }
+}
+
+static inline VALUE
+rb_ractor_confirm_belonging(VALUE obj)
+{
+ uint32_t id = rb_ractor_belonging(obj);
+
+ if (id == 0) {
+ if (!rb_ractor_shareable_p(obj)) {
+ rp(obj);
+ rb_bug("id == 0 but not shareable");
+ }
+ }
+ else if (id != rb_ractor_current_id()) {
+ rb_bug("rb_ractor_confirm_belonging object-ractor id:%u, current-ractor id:%u", id, rb_ractor_current_id());
+ }
+ return obj;
+}
+#else
+#define rb_ractor_confirm_belonging(obj) obj
+#endif
diff --git a/ractor.rb b/ractor.rb
new file mode 100644
index 0000000..893a3f1
--- /dev/null
+++ b/ractor.rb
@@ -0,0 +1,162 @@
+class Ractor
+ # Create a new Ractor with args and a block.
+ # args are passed via incoming channel.
+ # A block (Proc) will be isolated (can't acccess to outer variables)
+ #
+ # A ractor has default two channels:
+ # an incoming channel and an outgoing channel.
+ #
+ # Other ractors send objects to the ractor via the incoming channel and
+ # the ractor receives them.
+ # The ractor send objects via the outgoing channel and other ractors can
+ # receive them.
+ #
+ # The result of the block is sent via the outgoing channel
+ # and other
+ #
+ # r = Ractor.new do
+ # Ractor.recv # recv via r's mailbox => 1
+ # Ractor.recv # recv via r's mailbox => 2
+ # Ractor.yield 3 # yield a message (3) and wait for taking by another ractor.
+ # 'ok' # the return value will be yielded.
+ # # and r's incoming/outgoing ports are closed automatically.
+ # end
+ # r.send 1 # send a message (1) into r's mailbox.
+ # r << 2 # << is an alias of `send`.
+ # p r.take # take a message from r's outgoing port #=> 3
+ # p r.take # => 'ok'
+ # p r.take # raise Ractor::ClosedError
+ #
+ # other options:
+ # name: Ractor's name
+ #
+ def self.new *args, name: nil, &block
+ b = block # TODO: builtin bug
+ raise ArgumentError, "must be called with a block" unless block
+ loc = caller_locations(1, 1).first
+ loc = "#{loc.path}:#{loc.lineno}"
+ __builtin_ractor_create(loc, name, args, b)
+ end
+
+ # return current Ractor
+ def self.current
+ __builtin_cexpr! %q{
+ rb_ec_ractor_ptr(ec)->self
+ }
+ end
+
+ def self.count
+ __builtin_cexpr! %q{
+ ULONG2NUM(GET_VM()->ractor.cnt);
+ }
+ end
+
+ # Multiplex multiple Ractor communications.
+ #
+ # r, obj = Ractor.select(r1, r2)
+ # #=> wait for taking from r1 or r2
+ # # returned obj is a taken object from Ractor r
+ #
+ # r, obj = Ractor.select(r1, r2, Ractor.current)
+ # #=> wait for taking from r1 or r2
+ # # or recv from incoming queue
+ # # If recv is succeed, then obj is received value
+ # # and r is :recv (Ractor.current)
+ #
+ # r, obj = Ractor.select(r1, r2, Ractor.current, yield_value: obj)
+ # #=> wait for taking from r1 or r2
+ # # or recv from incoming queue
+ # # or yield (Ractor.yield) obj
+ # # If yield is succeed, then obj is nil
+ # # and r is :yield
+ #
+ def self.select *ractors, yield_value: yield_unspecified = true, move: false
+ __builtin_cstmt! %q{
+ const VALUE *rs = RARRAY_CONST_PTR_TRANSIENT(ractors);
+ VALUE rv;
+ VALUE v = ractor_select(ec, rs, RARRAY_LENINT(ractors),
+ yield_unspecified == Qtrue ? Qundef : yield_value,
+ (bool)RTEST(move) ? true : false, &rv);
+ return rb_ary_new_from_args(2, rv, v);
+ }
+ end
+
+ # Receive an incoming message from Ractor's incoming queue.
+ def self.recv
+ __builtin_cexpr! %q{
+ ractor_recv(ec, rb_ec_ractor_ptr(ec))
+ }
+ end
+
+ private def recv
+ __builtin_cexpr! %q{
+ // TODO: check current actor
+ ractor_recv(ec, RACTOR_PTR(self))
+ }
+ end
+
+ # Send a message to a Ractor's incoming queue.
+ #
+ # # Example:
+ # r = Ractor.new do
+ # p Ractor.recv #=> 'ok'
+ # end
+ # r.send 'ok' # send to r's incoming queue.
+ def send obj, move: false
+ __builtin_cexpr! %q{
+ ractor_send(ec, RACTOR_PTR(self), obj, move)
+ }
+ end
+
+ # yield a message to the ractor's outgoing port.
+ def self.yield obj, move: false
+ __builtin_cexpr! %q{
+ ractor_yield(ec, rb_ec_ractor_ptr(ec), obj, move)
+ }
+ end
+
+ # Take a message from ractor's outgoing port.
+ #
+ # Example:
+ # r = Ractor.new{ 'oK' }
+ # p r.take #=> 'ok'
+ def take
+ __builtin_cexpr! %q{
+ ractor_take(ec, RACTOR_PTR(self))
+ }
+ end
+
+ alias << send
+
+ def inspect
+ loc = __builtin_cexpr! %q{ RACTOR_PTR(self)->loc }
+ name = __builtin_cexpr! %q{ RACTOR_PTR(self)->name }
+ id = __builtin_cexpr! %q{ INT2FIX(RACTOR_PTR(self)->id) }
+ "#<Ractor:##{id}#{name ? ' '+name : ''}#{loc ? " " + loc : ''}>"
+ end
+
+ def name
+ __builtin_cexpr! %q{ RACTOR_PTR(self)->name }
+ end
+
+ class RemoteError
+ attr_reader :ractor
+ end
+
+ def close_incoming
+ __builtin_cexpr! %q{
+ ractor_close_incoming(ec, RACTOR_PTR(self));
+ }
+ end
+
+ def close_outgoing
+ __builtin_cexpr! %q{
+ ractor_close_outgoing(ec, RACTOR_PTR(self));
+ }
+ end
+
+ def close
+ close_incoming
+ close_outgoing
+ end
+end
diff --git a/ractor_pub.h b/ractor_pub.h
new file mode 100644
index 0000000..5062782
--- /dev/null
+++ b/ractor_pub.h
@@ -0,0 +1,33 @@
+
+int rb_ractor_main_p(void);
+
+bool rb_ractor_shareable_p_continue(VALUE obj);
+
+#define RB_OBJ_SHAREABLE_P(obj) FL_TEST_RAW((obj), RUBY_FL_SHAREABLE)
+
+// TODO: deep frozen
+
+static inline bool
+rb_ractor_shareable_p(VALUE obj)
+{
+ if (SPECIAL_CONST_P(obj)) {
+ return true;
+ }
+ else if (RB_OBJ_SHAREABLE_P(obj)) {
+ return true;
+ }
+ else {
+ return rb_ractor_shareable_p_continue(obj);
+ }
+}
+
+RUBY_SYMBOL_EXPORT_BEGIN
+
+VALUE rb_ractor_stdin(void);
+VALUE rb_ractor_stdout(void);
+VALUE rb_ractor_stderr(void);
+void rb_ractor_stdin_set(VALUE);
+void rb_ractor_stdout_set(VALUE);
+void rb_ractor_stderr_set(VALUE);
+
+RUBY_SYMBOL_EXPORT_END
diff --git a/ruby_assert.h b/ruby_assert.h
index 5ccc615..ddd348c 100644
--- a/ruby_assert.h
+++ b/ruby_assert.h
@@ -8,7 +8,6 @@
* modify this file, provided that the conditions mentioned in the
* file COPYING are met. Consult the file for details.
*/
-
#include "ruby/assert.h"
#undef assert
#define assert RUBY_ASSERT_NDEBUG
diff --git a/signal.c b/signal.c
index 615ceb7..08de1bc 100644
--- a/signal.c
+++ b/signal.c
@@ -448,7 +448,7 @@ rb_f_kill(int argc, const VALUE *argv)
}
}
else {
- const rb_pid_t self = (GET_THREAD() == GET_VM()->main_thread) ? getpid() : -1;
+ const rb_pid_t self = (GET_THREAD() == GET_VM()->ractor.main_thread) ? getpid() : -1;
int wakeup = 0;
for (i=1; i<argc; i++) {
@@ -495,7 +495,7 @@ rb_f_kill(int argc, const VALUE *argv)
}
}
if (wakeup) {
- rb_threadptr_check_signal(GET_VM()->main_thread);
+ rb_threadptr_check_signal(GET_VM()->ractor.main_thread);
}
}
rb_thread_execute_interrupts(rb_thread_current());
diff --git a/thread.c b/thread.c
index 141df11..063d960 100644
--- a/thread.c
+++ b/thread.c
@@ -92,6 +92,9 @@
#include "ruby/thread_native.h"
#include "timev.h"
#include "vm_core.h"
+#include "ractor.h"
+#include "vm_debug.h"
+#include "vm_sync.h"
#ifndef USE_NATIVE_THREAD_PRIORITY
#define USE_NATIVE_THREAD_PRIORITY 0
@@ -133,7 +136,7 @@ static void sleep_hrtime(rb_thread_t *, rb_hrtime_t, unsigned int fl);
static void sleep_forever(rb_thread_t *th, unsigned int fl);
static void rb_thread_sleep_deadly_allow_spurious_wakeup(void);
static int rb_threadptr_dead(rb_thread_t *th);
-static void rb_check_deadlock(rb_vm_t *vm);
+static void rb_check_deadlock(rb_ractor_t *r);
static int rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th);
static const char *thread_status_name(rb_thread_t *th, int detail);
static int hrtime_update_expire(rb_hrtime_t *, const rb_hrtime_t);
@@ -167,20 +170,13 @@ static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_regi
rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
-#define RB_GC_SAVE_MACHINE_CONTEXT(th) \
- do { \
- FLUSH_REGISTER_WINDOWS; \
- setjmp((th)->ec->machine.regs); \
- SET_MACHINE_STACK_END(&(th)->ec->machine.stack_end); \
- } while (0)
-
#define GVL_UNLOCK_BEGIN(th) do { \
RB_GC_SAVE_MACHINE_CONTEXT(th); \
- gvl_release(th->vm);
+ gvl_release(rb_ractor_gvl(th->ractor));
#define GVL_UNLOCK_END(th) \
- gvl_acquire(th->vm, th); \
- rb_thread_set_current(th); \
+ gvl_acquire(rb_ractor_gvl(th->ractor), th); \
+ rb_ractor_thread_switch(th->ractor, th); \
} while(0)
#ifdef __GNUC__
@@ -222,12 +218,6 @@ vm_check_ints_blocking(rb_execution_context_t *ec)
return rb_threadptr_execute_interrupts(th, 1);
}
-static int
-vm_living_thread_num(const rb_vm_t *vm)
-{
- return vm->living_thread_num;
-}
-
/*
* poll() is supported by many OSes, but so far Linux is the only
* one we know of that supports using poll() in all places select()
@@ -345,7 +335,7 @@ rb_thread_s_debug_set(VALUE self, VALUE val)
#endif
NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start));
-static void timer_thread_function(void);
+static void timer_thread_function(rb_execution_context_t *ec);
void ruby_sigchld_handler(rb_vm_t *); /* signal.c */
static void
@@ -425,11 +415,13 @@ rb_thread_debug(
#include "thread_sync.c"
void
-rb_vm_gvl_destroy(rb_vm_t *vm)
+rb_vm_gvl_destroy(rb_global_vm_lock_t *gvl)
{
- gvl_release(vm);
- gvl_destroy(vm);
+ gvl_release(gvl);
+ gvl_destroy(gvl);
+
if (0) {
+ rb_vm_t *vm = GET_VM();
/* may be held by running threads */
rb_native_mutex_destroy(&vm->waitpid_lock);
rb_native_mutex_destroy(&vm->workqueue_lock);
@@ -498,6 +490,7 @@ static void
rb_threadptr_interrupt_common(rb_thread_t *th, int trap)
{
rb_native_mutex_lock(&th->interrupt_lock);
+
if (trap) {
RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
}
@@ -526,12 +519,12 @@ threadptr_trap_interrupt(rb_thread_t *th)
}
static void
-terminate_all(rb_vm_t *vm, const rb_thread_t *main_thread)
+terminate_all(rb_ractor_t *r, const rb_thread_t *main_thread)
{
rb_thread_t *th = 0;
- list_for_each(&vm->living_threads, th, vmlt_node) {
- if (th != main_thread) {
+ list_for_each(&r->threads.set, th, lt_node) {
+ if (th != main_thread) {
thread_debug("terminate_all: begin (thid: %"PRI_THREAD_ID", status: %s)\n",
thread_id_str(th), thread_status_name(th, TRUE));
rb_threadptr_pending_interrupt_enque(th, eTerminateSignal);
@@ -567,12 +560,12 @@ rb_thread_terminate_all(void)
{
rb_thread_t *volatile th = GET_THREAD(); /* main thread */
rb_execution_context_t * volatile ec = th->ec;
- rb_vm_t *volatile vm = th->vm;
+ rb_ractor_t *r = th->ractor;
volatile int sleeping = 0;
- if (vm->main_thread != th) {
- rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
- (void *)vm->main_thread, (void *)th);
+ if (r->threads.main != th) {
+ rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
+ (void *)r->threads.main, (void *)th);
}
/* unlock all locking mutexes */
@@ -582,11 +575,11 @@ rb_thread_terminate_all(void)
if (EC_EXEC_TAG() == TAG_NONE) {
retry:
thread_debug("rb_thread_terminate_all (main thread: %p)\n", (void *)th);
- terminate_all(vm, th);
+ terminate_all(th->ractor, th);
- while (vm_living_thread_num(vm) > 1) {
+ while (rb_ractor_living_thread_num(th->ractor) > 1) {
rb_hrtime_t rel = RB_HRTIME_PER_SEC;
- /*
+ /*q
* Thread exiting routine in thread_start_func_2 notify
* me when the last sub-thread exit.
*/
@@ -669,26 +662,43 @@ rb_vm_proc_local_ep(VALUE proc)
}
}
-static void
-thread_do_start(rb_thread_t *th)
-{
- native_set_thread_name(th);
+// for ractor, defined in vm.c
+VALUE rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
+ int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler);
- if (th->invoke_type == thread_invoke_type_proc) {
- VALUE args = th->invoke_arg.proc.args;
- int args_len = (int)RARRAY_LEN(args);
- const VALUE *args_ptr;
- VALUE procval = th->invoke_arg.proc.proc;
- rb_proc_t *proc;
- GetProcPtr(procval, proc);
-
- th->ec->errinfo = Qnil;
- th->ec->root_lep = rb_vm_proc_local_ep(procval);
- th->ec->root_svar = Qfalse;
-
- EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, 0, Qundef);
+static void
+thread_do_start_proc(rb_thread_t *th)
+{
+ VALUE args = th->invoke_arg.proc.args;
+ const VALUE *args_ptr;
+ int args_len;
+ VALUE procval = th->invoke_arg.proc.proc;
+ rb_proc_t *proc;
+ GetProcPtr(procval, proc);
+
+ th->ec->errinfo = Qnil;
+ th->ec->root_lep = rb_vm_proc_local_ep(procval);
+ th->ec->root_svar = Qfalse;
+
+ EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, 0, Qundef);
+ vm_check_ints_blocking(th->ec);
+
+ if (th->invoke_type == thread_invoke_type_ractor_proc) {
+ VALUE self = rb_ractor_self(th->ractor);
+ VM_ASSERT(FIXNUM_P(args));
+ args_len = FIX2INT(args);
+ args_ptr = ALLOCA_N(VALUE, args_len);
+ rb_ractor_recv_parameters(th->ec, th->ractor, args_len, (VALUE *)args_ptr);
vm_check_ints_blocking(th->ec);
+ // kick thread
+ th->value = rb_vm_invoke_proc_with_self(th->ec, proc, self,
+ args_len, args_ptr,
+ th->invoke_arg.proc.kw_splat,
+ VM_BLOCK_HANDLER_NONE);
+ }
+ else {
+ args_len = RARRAY_LENINT(args);
if (args_len < 8) {
/* free proc.args if the length is enough small */
args_ptr = ALLOCA_N(VALUE, args_len);
@@ -700,15 +710,36 @@ thread_do_start(rb_thread_t *th)
}
vm_check_ints_blocking(th->ec);
+
+ // kick thread
th->value = rb_vm_invoke_proc(th->ec, proc,
args_len, args_ptr,
th->invoke_arg.proc.kw_splat,
VM_BLOCK_HANDLER_NONE);
+ }
+
+ EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_END, th->self, 0, 0, 0, Qundef);
- EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_END, th->self, 0, 0, 0, Qundef);
+ if (th->invoke_type == thread_invoke_type_ractor_proc) {
+ rb_ractor_atexit(th->ec, th->value);
}
- else {
+}
+
+static void
+thread_do_start(rb_thread_t *th)
+{
+ native_set_thread_name(th);
+
+ switch (th->invoke_type) {
+ case thread_invoke_type_proc:
+ case thread_invoke_type_ractor_proc:
+ thread_do_start_proc(th);
+ break;
+ case thread_invoke_type_func:
th->value = (*th->invoke_arg.func.func)(th->invoke_arg.func.arg);
+ break;
+ case thread_invoke_type_none:
+ rb_bug("unreachable");
}
VALUE scheduler = th->scheduler;
@@ -725,32 +756,40 @@ thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
STACK_GROW_DIR_DETECTION;
enum ruby_tag_type state;
rb_thread_list_t *join_list;
- rb_thread_t *main_th;
VALUE errinfo = Qnil;
size_t size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE);
+ rb_thread_t *ractor_main_th = th->ractor->threads.main;
VALUE * vm_stack = NULL;
- if (th == th->vm->main_thread) {
- rb_bug("thread_start_func_2 must not be used for main thread");
+ VM_ASSERT(th != th->vm->ractor.main_thread);
+ thread_debug("thread start: %p\n", (void *)th);
+
+ // setup native thread
+ gvl_acquire(rb_ractor_gvl(th->ractor), th);
+ ruby_thread_set_native(th);
+
+ // setup ractor
+ if (rb_ractor_status_p(th->ractor, ractor_blocking)) {
+ RB_VM_LOCK();
+ {
+ rb_vm_ractor_blocking_cnt_dec(th->vm, th->ractor, __FILE__, __LINE__);
+ }
+ RB_VM_UNLOCK();
}
- thread_debug("thread start: %p\n", (void *)th);
- VM_ASSERT((size * sizeof(VALUE)) <= th->ec->machine.stack_maxsize);
+ // This assertion is not passed on win32 env. Check it later.
+ // VM_ASSERT((size * sizeof(VALUE)) <= th->ec->machine.stack_maxsize);
+ // setup VM and machine stack
vm_stack = alloca(size * sizeof(VALUE));
VM_ASSERT(vm_stack);
- gvl_acquire(th->vm, th);
-
rb_ec_initialize_vm_stack(th->ec, vm_stack, size);
th->ec->machine.stack_start = STACK_DIR_UPPER(vm_stack + size, vm_stack);
th->ec->machine.stack_maxsize -= size * sizeof(VALUE);
- ruby_thread_set_native(th);
-
{
thread_debug("thread start (get lock): %p\n", (void *)th);
- rb_thread_set_current(th);
EC_PUSH_TAG(th->ec);
if ((state = EC_EXEC_TAG()) == TAG_NONE) {
@@ -758,14 +797,19 @@ thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
}
else {
errinfo = th->ec->errinfo;
- if (state == TAG_FATAL) {
+
+ if (state == TAG_FATAL) {
/* fatal error within this thread, need to stop whole script */
}
else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
/* exit on main_thread. */
}
else {
- if (th->report_on_exception) {
+ if (th->invoke_type == thread_invoke_type_ractor_proc) {
+ rb_ractor_atexit_exception(th->ec);
+ }
+
+ if (th->report_on_exception) {
VALUE mesg = rb_thread_to_s(th->self);
rb_str_cat_cstr(mesg, " terminated with exception (report_on_exception is true):\n");
rb_write_error_str(mesg);
@@ -782,16 +826,20 @@ thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
th->value = Qnil;
}
+ if (th->invoke_type == thread_invoke_type_ractor_proc) {
+ rb_ractor_teardown(th->ec);
+ }
+
th->status = THREAD_KILLED;
thread_debug("thread end: %p\n", (void *)th);
- main_th = th->vm->main_thread;
- if (main_th == th) {
+ if (th->vm->ractor.main_thread == th) {
ruby_stop(0);
}
- if (RB_TYPE_P(errinfo, T_OBJECT)) {
+
+ if (RB_TYPE_P(errinfo, T_OBJECT)) {
/* treat with normal error object */
- rb_threadptr_raise(main_th, 1, &errinfo);
+ rb_threadptr_raise(ractor_main_th, 1, &errinfo);
}
EC_POP_TAG();
@@ -803,11 +851,10 @@ thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
(void *)th, th->locking_mutex);
}
- /* delete self other than main thread from living_threads */
- rb_vm_living_threads_remove(th->vm, th);
- if (main_th->status == THREAD_KILLED && rb_thread_alone()) {
+ if (ractor_main_th->status == THREAD_KILLED &&
+ th->ractor->threads.cnt <= 2 /* main thread and this thread */) {
/* I'm last thread. wake up main thread from rb_thread_terminate_all */
- rb_threadptr_interrupt(main_th);
+ rb_threadptr_interrupt(ractor_main_th);
}
/* wake up joining threads */
@@ -823,7 +870,7 @@ thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
}
rb_threadptr_unlock_all_locking_mutexes(th);
- rb_check_deadlock(th->vm);
+ rb_check_deadlock(th->ractor);
rb_fiber_close(th->ec->fiber_ptr);
}
@@ -831,15 +878,40 @@ thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
thread_cleanup_func(th, FALSE);
VM_ASSERT(th->ec->vm_stack == NULL);
- gvl_release(th->vm);
+ if (th->invoke_type == thread_invoke_type_ractor_proc) {
+ // after rb_ractor_living_threads_remove()
+ // GC will happen anytime and this ractor can be collected (and destroy GVL).
+ // So gvl_release() should be before it.
+ gvl_release(rb_ractor_gvl(th->ractor));
+ rb_ractor_living_threads_remove(th->ractor, th);
+ }
+ else {
+ rb_ractor_living_threads_remove(th->ractor, th);
+ gvl_release(rb_ractor_gvl(th->ractor));
+ }
return 0;
}
+struct thread_create_params {
+ enum thread_invoke_type type;
+
+ // for normal proc thread
+ VALUE args;
+ VALUE proc;
+
+ // for ractor
+ rb_ractor_t *g;
+
+ // for func
+ VALUE (*fn)(void *);
+};
+
static VALUE
-thread_create_core(VALUE thval, VALUE args, VALUE (*fn)(void *))
+thread_create_core(VALUE thval, struct thread_create_params *params)
{
- rb_thread_t *th = rb_thread_ptr(thval), *current_th = GET_THREAD();
+ rb_execution_context_t *ec = GET_EC();
+ rb_thread_t *th = rb_thread_ptr(thval), *current_th = rb_ec_thread_ptr(ec);
int err;
if (OBJ_FROZEN(current_th->thgroup)) {
@@ -847,17 +919,35 @@ thread_create_core(VALUE thval, VALUE args, VALUE (*fn)(void *))
"can't start a new thread (frozen ThreadGroup)");
}
- if (fn) {
- th->invoke_type = thread_invoke_type_func;
- th->invoke_arg.func.func = fn;
- th->invoke_arg.func.arg = (void *)args;
- }
- else {
- (void)RARRAY_LENINT(args);
+ switch (params->type) {
+ case thread_invoke_type_proc:
th->invoke_type = thread_invoke_type_proc;
- th->invoke_arg.proc.proc = rb_block_proc();
- th->invoke_arg.proc.args = args;
+ th->invoke_arg.proc.args = params->args;
+ th->invoke_arg.proc.proc = params->proc;
+ th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
+ break;
+
+ case thread_invoke_type_ractor_proc:
+#if RACTOR_CHECK_MODE > 0
+ rb_ractor_setup_belonging_to(thval, rb_ractor_id(params->g));
+#endif
+ th->invoke_type = thread_invoke_type_ractor_proc;
+ th->ractor = params->g;
+ th->ractor->threads.main = th;
+ th->invoke_arg.proc.proc = rb_proc_isolate_bang(params->proc);
+ th->invoke_arg.proc.args = INT2FIX(RARRAY_LENINT(params->args));
th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
+ rb_ractor_send_parameters(ec, params->g, params->args);
+ break;
+
+ case thread_invoke_type_func:
+ th->invoke_type = thread_invoke_type_func;
+ th->invoke_arg.func.func = params->fn;
+ th->invoke_arg.func.arg = (void *)params->args;
+ break;
+
+ default:
+ rb_bug("unreachable");
}
th->priority = current_th->priority;
@@ -870,13 +960,17 @@ thread_create_core(VALUE thval, VALUE args, VALUE (*fn)(void *))
rb_native_mutex_initialize(&th->interrupt_lock);
+ RUBY_DEBUG_LOG("r:%u th:%p", th->ractor->id, th);
+
+ rb_ractor_living_threads_insert(th->ractor, th);
+
/* kick thread */
err = native_thread_create(th);
if (err) {
th->status = THREAD_KILLED;
- rb_raise(rb_eThreadError, "can't create Thread: %s", strerror(err));
+ rb_ractor_living_threads_remove(th->ractor, th);
+ rb_raise(rb_eThreadError, "can't create Thread: %s", strerror(err));
}
- rb_vm_living_threads_insert(th->vm, th);
return thval;
}
@@ -908,8 +1002,9 @@ thread_s_new(int argc, VALUE *argv, VALUE klass)
rb_thread_t *th;
VALUE thread = rb_thread_alloc(klass);
- if (GET_VM()->main_thread->status == THREAD_KILLED)
- rb_raise(rb_eThreadError, "can't alloc thread");
+ if (GET_RACTOR()->threads.main->status == THREAD_KILLED) {
+ rb_raise(rb_eThreadError, "can't alloc thread");
+ }
rb_obj_call_init_kw(thread, argc, argv, RB_PASS_CALLED_KEYWORDS);
th = rb_thread_ptr(thread);
@@ -933,7 +1028,12 @@ thread_s_new(int argc, VALUE *argv, VALUE klass)
static VALUE
thread_start(VALUE klass, VALUE args)
{
- return thread_create_core(rb_thread_alloc(klass), args, 0);
+ struct thread_create_params params = {
+ .type = thread_invoke_type_proc,
+ .args = args,
+ .proc = rb_block_proc(),
+ };
+ return thread_create_core(rb_thread_alloc(klass), &params);
}
static VALUE
@@ -968,14 +1068,36 @@ thread_initialize(VALUE thread, VALUE args)
}
}
else {
- return thread_create_core(thread, args, 0);
+ struct thread_create_params params = {
+ .type = thread_invoke_type_proc,
+ .args = args,
+ .proc = rb_block_proc(),
+ };
+ return thread_create_core(thread, &params);
}
}
VALUE
rb_thread_create(VALUE (*fn)(void *), void *arg)
{
- return thread_create_core(rb_thread_alloc(rb_cThread), (VALUE)arg, fn);
+ struct thread_create_params params = {
+ .type = thread_invoke_type_func,
+ .fn = fn,
+ .args = (VALUE)arg,
+ };
+ return thread_create_core(rb_thread_alloc(rb_cThread), &params);
+}
+
+VALUE
+rb_thread_create_ractor(rb_ractor_t *g, VALUE args, VALUE proc)
+{
+ struct thread_create_params params = {
+ .type = thread_invoke_type_ractor_proc,
+ .g = g,
+ .args = args,
+ .proc = proc,
+ };
+ return thread_create_core(rb_thread_alloc(rb_cThread), &params);
}
@@ -1019,10 +1141,10 @@ thread_join_sleep(VALUE arg)
while (target_th->status != THREAD_KILLED) {
if (!p->limit) {
th->status = THREAD_STOPPED_FOREVER;
- th->vm->sleeper++;
- rb_check_deadlock(th->vm);
+ rb_ractor_sleeper_threads_inc(th->ractor);
+ rb_check_deadlock(th->ractor);
native_sleep(th, 0);
- th->vm->sleeper--;
+ rb_ractor_sleeper_threads_dec(th->ractor);
}
else {
if (hrtime_update_expire(p->limit, end)) {
@@ -1050,7 +1172,7 @@ thread_join(rb_thread_t *target_th, rb_hrtime_t *rel)
if (th == target_th) {
rb_raise(rb_eThreadError, "Target thread must not be current thread");
}
- if (GET_VM()->main_thread == target_th) {
+ if (th->ractor->threads.main == target_th) {
rb_raise(rb_eThreadError, "Target thread must not be main thread");
}
@@ -1266,12 +1388,12 @@ sleep_forever(rb_thread_t *th, unsigned int fl)
RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
while (th->status == status) {
if (fl & SLEEP_DEADLOCKABLE) {
- th->vm->sleeper++;
- rb_check_deadlock(th->vm);
+ rb_ractor_sleeper_threads_inc(th->ractor);
+ rb_check_deadlock(th->ractor);
}
native_sleep(th, 0);
if (fl & SLEEP_DEADLOCKABLE) {
- th->vm->sleeper--;
+ rb_ractor_sleeper_threads_dec(th->ractor);
}
woke = vm_check_ints_blocking(th->ec);
if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
@@ -1417,8 +1539,8 @@ rb_thread_schedule_limits(uint32_t limits_us)
if (th->running_time_us >= limits_us) {
thread_debug("rb_thread_schedule/switch start\n");
RB_GC_SAVE_MACHINE_CONTEXT(th);
- gvl_yield(th->vm, th);
- rb_thread_set_current(th);
+ gvl_yield(rb_ractor_gvl(th->ractor), th);
+ rb_ractor_thread_switch(th->ractor, th);
thread_debug("rb_thread_schedule/switch done\n");
}
}
@@ -1441,9 +1563,10 @@ blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
th->blocking_region_buffer = region;
th->status = THREAD_STOPPED;
+ rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
thread_debug("enter blocking region (%p)\n", (void *)th);
RB_GC_SAVE_MACHINE_CONTEXT(th);
- gvl_release(th->vm);
+ gvl_release(rb_ractor_gvl(th->ractor));
return TRUE;
}
else {
@@ -1459,10 +1582,12 @@ blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
/* entry to ubf_list impossible at this point, so unregister is safe: */
unregister_ubf_list(th);
- gvl_acquire(th->vm, th);
- rb_thread_set_current(th);
+ gvl_acquire(rb_ractor_gvl(th->ractor), th);
+ rb_ractor_thread_switch(th->ractor, th);
+
thread_debug("leave blocking region (%p)\n", (void *)th);
th->blocking_region_buffer = 0;
+ rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
if (th->status == THREAD_STOPPED) {
th->status = region->prev_status;
}
@@ -1484,7 +1609,7 @@ rb_nogvl(void *(*func)(void *), void *data1,
ubf = ubf_select;
data2 = th;
}
- else if (ubf && vm_living_thread_num(th->vm) == 1) {
+ else if (ubf && rb_ractor_living_thread_num(th->ractor) == 1) {
if (flags & RB_NOGVL_UBF_ASYNC_SAFE) {
th->vm->ubf_async_safe = 1;
}
@@ -1631,7 +1756,12 @@ rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
wfd.fd = fd;
wfd.th = rb_ec_thread_ptr(ec);
- list_add(&rb_ec_vm_ptr(ec)->waiting_fds, &wfd.wfd_node);
+
+ RB_VM_LOCK_ENTER();
+ {
+ list_add(&rb_ec_vm_ptr(ec)->waiting_fds, &wfd.wfd_node);
+ }
+ RB_VM_LOCK_LEAVE();
EC_PUSH_TAG(ec);
if ((state = EC_EXEC_TAG()) == TAG_NONE) {
@@ -1646,7 +1776,11 @@ rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
* must be deleted before jump
* this will delete either from waiting_fds or on-stack LIST_HEAD(busy)
*/
- list_del(&wfd.wfd_node);
+ RB_VM_LOCK_ENTER();
+ {
+ list_del(&wfd.wfd_node);
+ }
+ RB_VM_LOCK_LEAVE();
if (state) {
EC_JUMP_TAG(ec, state);
@@ -1700,7 +1834,7 @@ rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
* because this thread is not Ruby's thread.
* What should we do?
*/
-
+ bp();
fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
exit(EXIT_FAILURE);
}
@@ -2233,18 +2367,25 @@ rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
int timer_interrupt;
int pending_interrupt;
int trap_interrupt;
+ int terminate_interrupt;
timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
+ terminate_interrupt = interrupt & TERMINATE_INTERRUPT_MASK; // request from other ractors
+
+ if (interrupt & VM_BARRIER_INTERRUPT_MASK) {
+ RB_VM_LOCK_ENTER();
+ RB_VM_LOCK_LEAVE();
+ }
if (postponed_job_interrupt) {
rb_postponed_job_flush(th->vm);
}
/* signal handling */
- if (trap_interrupt && (th == th->vm->main_thread)) {
+ if (trap_interrupt && (th == th->vm->ractor.main_thread)) {
enum rb_thread_status prev_status = th->status;
int sigwait_fd = rb_sigwait_fd_get(th);
@@ -2273,7 +2414,7 @@ rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
else if (err == eKillSignal /* Thread#kill received */ ||
err == eTerminateSignal /* Terminate thread */ ||
err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
- rb_threadptr_to_kill(th);
+ terminate_interrupt = 1;
}
else {
if (err == th->vm->special_exceptions[ruby_error_stream_closed]) {
@@ -2288,7 +2429,11 @@ rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
}
}
- if (timer_interrupt) {
+ if (terminate_interrupt) {
+ rb_threadptr_to_kill(th);
+ }
+
+ if (timer_interrupt) {
uint32_t limits_us = TIME_QUANTUM_USEC;
if (th->priority > 0)
@@ -2356,7 +2501,7 @@ rb_threadptr_signal_raise(rb_thread_t *th, int sig)
argv[0] = rb_eSignal;
argv[1] = INT2FIX(sig);
- rb_threadptr_raise(th->vm->main_thread, 2, argv);
+ rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
}
void
@@ -2366,7 +2511,9 @@ rb_threadptr_signal_exit(rb_thread_t *th)
argv[0] = rb_eSystemExit;
argv[1] = rb_str_new2("exit");
- rb_threadptr_raise(th->vm->main_thread, 2, argv);
+
+ // TODO: check signal raise deliverly
+ rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
}
int
@@ -2395,19 +2542,24 @@ rb_notify_fd_close(int fd, struct list_head *busy)
rb_vm_t *vm = GET_THREAD()->vm;
struct waiting_fd *wfd = 0, *next;
- list_for_each_safe(&vm->waiting_fds, wfd, next, wfd_node) {
- if (wfd->fd == fd) {
- rb_thread_t *th = wfd->th;
- VALUE err;
+ RB_VM_LOCK_ENTER();
+ {
+ list_for_each_safe(&vm->waiting_fds, wfd, next, wfd_node) {
+ if (wfd->fd == fd) {
+ rb_thread_t *th = wfd->th;
+ VALUE err;
- list_del(&wfd->wfd_node);
- list_add(busy, &wfd->wfd_node);
+ list_del(&wfd->wfd_node);
+ list_add(busy, &wfd->wfd_node);
- err = th->vm->special_exceptions[ruby_error_stream_closed];
- rb_threadptr_pending_interrupt_enque(th, err);
- rb_threadptr_interrupt(th);
- }
+ err = th->vm->special_exceptions[ruby_error_stream_closed];
+ rb_threadptr_pending_interrupt_enque(th, err);
+ rb_threadptr_interrupt(th);
+ }
+ }
}
+ RB_VM_LOCK_LEAVE();
+
return !list_empty(busy);
}
@@ -2479,7 +2631,7 @@ rb_thread_kill(VALUE thread)
if (th->to_kill || th->status == THREAD_KILLED) {
return thread;
}
- if (th == th->vm->main_thread) {
+ if (th == th->vm->ractor.main_thread) {
rb_exit(EXIT_SUCCESS);
}
@@ -2658,21 +2810,8 @@ thread_stop(VALUE _)
VALUE
rb_thread_list(void)
{
- VALUE ary = rb_ary_new();
- rb_vm_t *vm = GET_THREAD()->vm;
- rb_thread_t *th = 0;
-
- list_for_each(&vm->living_threads, th, vmlt_node) {
- switch (th->status) {
- case THREAD_RUNNABLE:
- case THREAD_STOPPED:
- case THREAD_STOPPED_FOREVER:
- rb_ary_push(ary, th->self);
- default:
- break;
- }
- }
- return ary;
+ // TODO
+ return rb_ractor_thread_list(GET_RACTOR());
}
/*
@@ -2725,7 +2864,7 @@ thread_s_current(VALUE klass)
VALUE
rb_thread_main(void)
{
- return GET_THREAD()->vm->main_thread->self;
+ return GET_RACTOR()->threads.main->self;
}
/*
@@ -3521,7 +3660,8 @@ thread_keys_i(ID key, VALUE value, void *ary)
int
rb_thread_alone(void)
{
- return vm_living_thread_num(GET_VM()) == 1;
+ // TODO
+ return rb_ractor_living_thread_num(GET_RACTOR()) == 1;
}
/*
@@ -4248,7 +4388,13 @@ rb_wait_for_single_fd(int fd, int events, struct timeval *timeout)
wfd.th = GET_THREAD();
wfd.fd = fd;
- list_add(&wfd.th->vm->waiting_fds, &wfd.wfd_node);
+
+ RB_VM_LOCK_ENTER();
+ {
+ list_add(&wfd.th->vm->waiting_fds, &wfd.wfd_node);
+ }
+ RB_VM_LOCK_LEAVE();
+
EC_PUSH_TAG(wfd.th->ec);
if ((state = EC_EXEC_TAG()) == TAG_NONE) {
RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
@@ -4401,7 +4547,12 @@ rb_wait_for_single_fd(int fd, int events, struct timeval *timeout)
args.wfd.fd = fd;
args.wfd.th = GET_THREAD();
- list_add(&args.wfd.th->vm->waiting_fds, &args.wfd.wfd_node);
+ RB_VM_LOCK_ENTER();
+ {
+ list_add(&args.wfd.th->vm->waiting_fds, &args.wfd.wfd_node);
+ }
+ RB_VM_LOCK_LEAVE();
+
r = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
if (r == -1)
errno = args.as.error;
@@ -4438,14 +4589,12 @@ rb_threadptr_check_signal(rb_thread_t *mth)
}
static void
-timer_thread_function(void)
+timer_thread_function(rb_execution_context_t *ec)
{
- volatile rb_execution_context_t *ec;
-
- /* for time slice */
- ec = ACCESS_ONCE(rb_execution_context_t *,
- ruby_current_execution_context_ptr);
- if (ec) RUBY_VM_SET_TIMER_INTERRUPT(ec);
+ // strictly speaking, accessing gvl->owner is not thread-safe
+ if (ec) {
+ RUBY_VM_SET_TIMER_INTERRUPT(ec);
+ }
}
static void
@@ -4516,11 +4665,13 @@ check_signals_nogvl(rb_thread_t *th, int sigwait_fd)
ubf_wakeup_all_threads();
ruby_sigchld_handler(vm);
if (rb_signal_buff_size()) {
- if (th == vm->main_thread)
+ if (th == vm->ractor.main_thread) {
/* no need to lock + wakeup if already in main thread */
RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
- else
- threadptr_trap_interrupt(vm->main_thread);
+ }
+ else {
+ threadptr_trap_interrupt(vm->ractor.main_thread);
+ }
ret = TRUE; /* for SIGCHLD_LOSSY && rb_sigwait_sleep */
}
return ret;
@@ -4592,16 +4743,28 @@ rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const r
{
rb_thread_t *i = 0;
rb_vm_t *vm = th->vm;
- vm->main_thread = th;
+ rb_ractor_t *r = th->ractor;
+ vm->ractor.main_ractor = r;
+ vm->ractor.main_thread = th;
+ r->threads.main = th;
+ r->status_ = ractor_created;
- gvl_atfork(th->vm);
+ gvl_atfork(rb_ractor_gvl(th->ractor));
ubf_list_atfork();
- list_for_each(&vm->living_threads, i, vmlt_node) {
- atfork(i, th);
+ // OK. Only this thread accesses:
+ list_for_each(&vm->ractor.set, r, vmlr_node) {
+ list_for_each(&r->threads.set, i, lt_node) {
+ atfork(i, th);
+ }
}
rb_vm_living_threads_init(vm);
- rb_vm_living_threads_insert(vm, th);
+
+ // threads
+ vm->ractor.cnt = 0;
+ rb_ractor_living_threads_init(th->ractor);
+ rb_ractor_living_threads_insert(th->ractor, th);
+
/* may be held by MJIT threads in parent */
rb_native_mutex_initialize(&vm->waitpid_lock);
@@ -4611,9 +4774,10 @@ rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const r
rb_native_mutex_initialize(&th->interrupt_lock);
vm->fork_gen++;
-
- vm->sleeper = 0;
+ rb_ractor_sleeper_threads_clear(th->ractor);
rb_clear_coverages();
+
+ VM_ASSERT(vm->ractor.cnt == 1);
}
static void
@@ -4730,11 +4894,11 @@ static VALUE
thgroup_list(VALUE group)
{
VALUE ary = rb_ary_new();
- rb_vm_t *vm = GET_THREAD()->vm;
rb_thread_t *th = 0;
+ rb_ractor_t *r = GET_RACTOR();
- list_for_each(&vm->living_threads, th, vmlt_node) {
- if (th->thgroup == group) {
+ list_for_each(&r->threads.set, th, lt_node) {
+ if (th->thgroup == group) {
rb_ary_push(ary, th->self);
}
}
@@ -5364,7 +5528,7 @@ Init_Thread(void)
rb_define_method(cThGroup, "add", thgroup_add, 1);
{
- th->thgroup = th->vm->thgroup_default = rb_obj_alloc(cThGroup);
+ th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
rb_define_const(cThGroup, "Default", th->thgroup);
}
@@ -5376,8 +5540,8 @@ Init_Thread(void)
/* main thread setting */
{
/* acquire global vm lock */
- gvl_init(th->vm);
- gvl_acquire(th->vm, th);
+ rb_global_vm_lock_t *gvl = rb_ractor_gvl(th->ractor);
+ gvl_acquire(gvl, th);
rb_native_mutex_initialize(&th->vm->waitpid_lock);
rb_native_mutex_initialize(&th->vm->workqueue_lock);
rb_native_mutex_initialize(&th->interrupt_lock);
@@ -5390,9 +5554,6 @@ Init_Thread(void)
rb_thread_create_timer_thread();
- /* suppress warnings on cygwin, mingw and mswin.*/
- (void)native_mutex_trylock;
-
Init_thread_sync();
}
@@ -5405,67 +5566,73 @@ ruby_native_thread_p(void)
}
static void
-debug_deadlock_check(rb_vm_t *vm, VALUE msg)
+debug_deadlock_check(rb_ractor_t *r, VALUE msg)
{
rb_thread_t *th = 0;
VALUE sep = rb_str_new_cstr("\n ");
rb_str_catf(msg, "\n%d threads, %d sleeps current:%p main thread:%p\n",
- vm_living_thread_num(vm), vm->sleeper, (void *)GET_THREAD(), (void *)vm->main_thread);
- list_for_each(&vm->living_threads, th, vmlt_node) {
- rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p "
- "native:%"PRI_THREAD_ID" int:%u",
- th->self, (void *)th, thread_id_str(th), th->ec->interrupt_flag);
- if (th->locking_mutex) {
- rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
- rb_str_catf(msg, " mutex:%p cond:%"PRIuSIZE,
- (void *)mutex->th, rb_mutex_num_waiting(mutex));
- }
- {
- rb_thread_list_t *list = th->join_list;
- while (list) {
- rb_str_catf(msg, "\n depended by: tb_thread_id:%p", (void *)list->th);
- list = list->next;
- }
- }
- rb_str_catf(msg, "\n ");
- rb_str_concat(msg, rb_ary_join(rb_ec_backtrace_str_ary(th->ec, 0, 0), sep));
- rb_str_catf(msg, "\n");
+ rb_ractor_living_thread_num(r), rb_ractor_sleeper_thread_num(r),
+ (void *)GET_THREAD(), (void *)r->threads.main);
+
+ list_for_each(&r->threads.set, th, lt_node) {
+ rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p "
+ "native:%"PRI_THREAD_ID" int:%u",
+ th->self, (void *)th, thread_id_str(th), th->ec->interrupt_flag);
+
+ if (th->locking_mutex) {
+ rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
+ rb_str_catf(msg, " mutex:%p cond:%"PRIuSIZE,
+ (void *)mutex->th, rb_mutex_num_waiting(mutex));
+ }
+
+ {
+ rb_thread_list_t *list = th->join_list;
+ while (list) {
+ rb_str_catf(msg, "\n depended by: tb_thread_id:%p", (void *)list->th);
+ list = list->next;
+ }
+ }
+ rb_str_catf(msg, "\n ");
+ rb_str_concat(msg, rb_ary_join(rb_ec_backtrace_str_ary(th->ec, 0, 0), sep));
+ rb_str_catf(msg, "\n");
}
}
static void
-rb_check_deadlock(rb_vm_t *vm)
+rb_check_deadlock(rb_ractor_t *r)
{
int found = 0;
- rb_thread_t *th = 0;
+ rb_thread_t *th = NULL;
+ int sleeper_num = rb_ractor_sleeper_thread_num(r);
+ int ltnum = rb_ractor_living_thread_num(r);
- if (vm_living_thread_num(vm) > vm->sleeper) return;
- if (vm_living_thread_num(vm) < vm->sleeper) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
+ if (ltnum > sleeper_num) return;
+ if (ltnum < sleeper_num) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
if (patrol_thread && patrol_thread != GET_THREAD()) return;
- list_for_each(&vm->living_threads, th, vmlt_node) {
- if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
- found = 1;
- }
- else if (th->locking_mutex) {
- rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
+ list_for_each(&r->threads.set, th, lt_node) {
+ if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
+ found = 1;
+ }
+ else if (th->locking_mutex) {
+ rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
- if (mutex->th == th || (!mutex->th && !list_empty(&mutex->waitq))) {
- found = 1;
- }
- }
- if (found)
- break;
+ if (mutex->th == th || (!mutex->th && !list_empty(&mutex->waitq))) {
+ found = 1;
+ }
+ }
+ if (found)
+ break;
}
if (!found) {
VALUE argv[2];
argv[0] = rb_eFatal;
argv[1] = rb_str_new2("No live threads left. Deadlock?");
- debug_deadlock_check(vm, argv[1]);
- vm->sleeper--;
- rb_threadptr_raise(vm->main_thread, 2, argv);
+ debug_deadlock_check(r, argv[1]);
+ rb_ractor_sleeper_threads_dec(GET_RACTOR());
+ rb_threadptr_raise(r->threads.main, 2, argv);
}
}
diff --git a/thread_pthread.c b/thread_pthread.c
index ee2f7bc..427897c 100644
--- a/thread_pthread.c
+++ b/thread_pthread.c
@@ -122,26 +122,14 @@ static struct {
};
#endif
-void rb_native_mutex_lock(rb_nativethread_lock_t *lock);
-void rb_native_mutex_unlock(rb_nativethread_lock_t *lock);
-static int native_mutex_trylock(rb_nativethread_lock_t *lock);
-void rb_native_mutex_initialize(rb_nativethread_lock_t *lock);
-void rb_native_mutex_destroy(rb_nativethread_lock_t *lock);
-void rb_native_cond_signal(rb_nativethread_cond_t *cond);
-void rb_native_cond_broadcast(rb_nativethread_cond_t *cond);
-void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex);
-void rb_native_cond_initialize(rb_nativethread_cond_t *cond);
-void rb_native_cond_destroy(rb_nativethread_cond_t *cond);
-static void clear_thread_cache_altstack(void);
-static void ubf_wakeup_all_threads(void);
-static int ubf_threads_empty(void);
-static int native_cond_timedwait(rb_nativethread_cond_t *, pthread_mutex_t *,
- const rb_hrtime_t *abs);
static const rb_hrtime_t *sigwait_timeout(rb_thread_t *, int sigwait_fd,
const rb_hrtime_t *,
int *drained_p);
static void ubf_timer_disarm(void);
static void threadptr_trap_interrupt(rb_thread_t *);
+static void clear_thread_cache_altstack(void);
+static void ubf_wakeup_all_threads(void);
+static int ubf_threads_empty(void);
#define TIMER_THREAD_CREATED_P() (signal_self_pipe.owner_process == getpid())
@@ -180,17 +168,18 @@ static const void *const condattr_monotonic = NULL;
#define TIME_QUANTUM_NSEC (TIME_QUANTUM_USEC * 1000)
static rb_hrtime_t native_cond_timeout(rb_nativethread_cond_t *, rb_hrtime_t);
+static int native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex, const rb_hrtime_t *abs);
/*
* Designate the next gvl.timer thread, favor the last thread in
* the waitq since it will be in waitq longest
*/
static int
-designate_timer_thread(rb_vm_t *vm)
+designate_timer_thread(rb_global_vm_lock_t *gvl)
{
native_thread_data_t *last;
- last = list_tail(&vm->gvl.waitq, native_thread_data_t, node.ubf);
+ last = list_tail(&gvl->waitq, native_thread_data_t, node.ubf);
if (last) {
rb_native_cond_signal(&last->cond.gvlq);
return TRUE;
@@ -203,29 +192,30 @@ designate_timer_thread(rb_vm_t *vm)
* periodically. Continue on old timeout if it expired.
*/
static void
-do_gvl_timer(rb_vm_t *vm, rb_thread_t *th)
+do_gvl_timer(rb_global_vm_lock_t *gvl, rb_thread_t *th)
{
static rb_hrtime_t abs;
native_thread_data_t *nd = &th->native_thread_data;
- vm->gvl.timer = th;
+ gvl->timer = th;
/* take over wakeups from UBF_TIMER */
ubf_timer_disarm();
- if (vm->gvl.timer_err == ETIMEDOUT) {
+ if (gvl->timer_err == ETIMEDOUT) {
abs = native_cond_timeout(&nd->cond.gvlq, TIME_QUANTUM_NSEC);
}
- vm->gvl.timer_err = native_cond_timedwait(&nd->cond.gvlq, &vm->gvl.lock, &abs);
+ gvl->timer_err = native_cond_timedwait(&nd->cond.gvlq, &gvl->lock, &abs);
ubf_wakeup_all_threads();
- ruby_sigchld_handler(vm);
+ ruby_sigchld_handler(GET_VM());
+
if (UNLIKELY(rb_signal_buff_size())) {
- if (th == vm->main_thread) {
+ if (th == GET_VM()->ractor.main_thread) {
RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
}
else {
- threadptr_trap_interrupt(vm->main_thread);
+ threadptr_trap_interrupt(GET_VM()->ractor.main_thread);
}
}
@@ -233,77 +223,77 @@ do_gvl_timer(rb_vm_t *vm, rb_thread_t *th)
* Timeslice. Warning: the process may fork while this
* thread is contending for GVL:
*/
- if (vm->gvl.owner) timer_thread_function();
- vm->gvl.timer = 0;
+ if (gvl->owner) timer_thread_function(gvl->owner->ec);
+ gvl->timer = 0;
}
static void
-gvl_acquire_common(rb_vm_t *vm, rb_thread_t *th)
+gvl_acquire_common(rb_global_vm_lock_t *gvl, rb_thread_t *th)
{
- if (vm->gvl.owner) {
+ if (gvl->owner) {
native_thread_data_t *nd = &th->native_thread_data;
VM_ASSERT(th->unblock.func == 0 &&
"we must not be in ubf_list and GVL waitq at the same time");
- list_add_tail(&vm->gvl.waitq, &nd->node.gvl);
+ list_add_tail(&gvl->waitq, &nd->node.gvl);
do {
- if (!vm->gvl.timer) {
- do_gvl_timer(vm, th);
+ if (!gvl->timer) {
+ do_gvl_timer(gvl, th);
}
else {
- rb_native_cond_wait(&nd->cond.gvlq, &vm->gvl.lock);
+ rb_native_cond_wait(&nd->cond.gvlq, &gvl->lock);
}
- } while (vm->gvl.owner);
+ } while (gvl->owner);
list_del_init(&nd->node.gvl);
- if (vm->gvl.need_yield) {
- vm->gvl.need_yield = 0;
- rb_native_cond_signal(&vm->gvl.switch_cond);
+ if (gvl->need_yield) {
+ gvl->need_yield = 0;
+ rb_native_cond_signal(&gvl->switch_cond);
}
}
else { /* reset timer if uncontended */
- vm->gvl.timer_err = ETIMEDOUT;
+ gvl->timer_err = ETIMEDOUT;
}
- vm->gvl.owner = th;
- if (!vm->gvl.timer) {
- if (!designate_timer_thread(vm) && !ubf_threads_empty()) {
+ gvl->owner = th;
+ if (!gvl->timer) {
+ if (!designate_timer_thread(gvl) && !ubf_threads_empty()) {
rb_thread_wakeup_timer_thread(-1);
}
}
}
static void
-gvl_acquire(rb_vm_t *vm, rb_thread_t *th)
+gvl_acquire(rb_global_vm_lock_t *gvl, rb_thread_t *th)
{
- rb_native_mutex_lock(&vm->gvl.lock);
- gvl_acquire_common(vm, th);
- rb_native_mutex_unlock(&vm->gvl.lock);
+ rb_native_mutex_lock(&gvl->lock);
+ gvl_acquire_common(gvl, th);
+ rb_native_mutex_unlock(&gvl->lock);
}
static const native_thread_data_t *
-gvl_release_common(rb_vm_t *vm)
+gvl_release_common(rb_global_vm_lock_t *gvl)
{
native_thread_data_t *next;
- vm->gvl.owner = 0;
- next = list_top(&vm->gvl.waitq, native_thread_data_t, node.ubf);
+ gvl->owner = 0;
+ next = list_top(&gvl->waitq, native_thread_data_t, node.ubf);
if (next) rb_native_cond_signal(&next->cond.gvlq);
return next;
}
static void
-gvl_release(rb_vm_t *vm)
+gvl_release(rb_global_vm_lock_t *gvl)
{
- rb_native_mutex_lock(&vm->gvl.lock);
- gvl_release_common(vm);
- rb_native_mutex_unlock(&vm->gvl.lock);
+ rb_native_mutex_lock(&gvl->lock);
+ gvl_release_common(gvl);
+ rb_native_mutex_unlock(&gvl->lock);
}
static void
-gvl_yield(rb_vm_t *vm, rb_thread_t *th)
+gvl_yield(rb_global_vm_lock_t *gvl, rb_thread_t *th)
{
const native_thread_data_t *next;
@@ -312,49 +302,49 @@ gvl_yield(rb_vm_t *vm, rb_thread_t *th)
* (perhaps looping in io_close_fptr) so we kick them:
*/
ubf_wakeup_all_threads();
- rb_native_mutex_lock(&vm->gvl.lock);
- next = gvl_release_common(vm);
+ rb_native_mutex_lock(&gvl->lock);
+ next = gvl_release_common(gvl);
/* An another thread is processing GVL yield. */
- if (UNLIKELY(vm->gvl.wait_yield)) {
- while (vm->gvl.wait_yield)
- rb_native_cond_wait(&vm->gvl.switch_wait_cond, &vm->gvl.lock);
+ if (UNLIKELY(gvl->wait_yield)) {
+ while (gvl->wait_yield)
+ rb_native_cond_wait(&gvl->switch_wait_cond, &gvl->lock);
}
else if (next) {
/* Wait until another thread task takes GVL. */
- vm->gvl.need_yield = 1;
- vm->gvl.wait_yield = 1;
- while (vm->gvl.need_yield)
- rb_native_cond_wait(&vm->gvl.switch_cond, &vm->gvl.lock);
- vm->gvl.wait_yield = 0;
- rb_native_cond_broadcast(&vm->gvl.switch_wait_cond);
+ gvl->need_yield = 1;
+ gvl->wait_yield = 1;
+ while (gvl->need_yield)
+ rb_native_cond_wait(&gvl->switch_cond, &gvl->lock);
+ gvl->wait_yield = 0;
+ rb_native_cond_broadcast(&gvl->switch_wait_cond);
}
else {
- rb_native_mutex_unlock(&vm->gvl.lock);
+ rb_native_mutex_unlock(&gvl->lock);
native_thread_yield();
- rb_native_mutex_lock(&vm->gvl.lock);
- rb_native_cond_broadcast(&vm->gvl.switch_wait_cond);
+ rb_native_mutex_lock(&gvl->lock);
+ rb_native_cond_broadcast(&gvl->switch_wait_cond);
}
- gvl_acquire_common(vm, th);
- rb_native_mutex_unlock(&vm->gvl.lock);
+ gvl_acquire_common(gvl, th);
+ rb_native_mutex_unlock(&gvl->lock);
}
-static void
-gvl_init(rb_vm_t *vm)
+void
+rb_gvl_init(rb_global_vm_lock_t *gvl)
{
- rb_native_mutex_initialize(&vm->gvl.lock);
- rb_native_cond_initialize(&vm->gvl.switch_cond);
- rb_native_cond_initialize(&vm->gvl.switch_wait_cond);
- list_head_init(&vm->gvl.waitq);
- vm->gvl.owner = 0;
- vm->gvl.timer = 0;
- vm->gvl.timer_err = ETIMEDOUT;
- vm->gvl.need_yield = 0;
- vm->gvl.wait_yield = 0;
+ rb_native_mutex_initialize(&gvl->lock);
+ rb_native_cond_initialize(&gvl->switch_cond);
+ rb_native_cond_initialize(&gvl->switch_wait_cond);
+ list_head_init(&gvl->waitq);
+ gvl->owner = 0;
+ gvl->timer = 0;
+ gvl->timer_err = ETIMEDOUT;
+ gvl->need_yield = 0;
+ gvl->wait_yield = 0;
}
static void
-gvl_destroy(rb_vm_t *vm)
+gvl_destroy(rb_global_vm_lock_t *gvl)
{
/*
* only called once at VM shutdown (not atfork), another thread
@@ -362,9 +352,9 @@ gvl_destroy(rb_vm_t *vm)
* the end of thread_start_func_2
*/
if (0) {
- rb_native_cond_destroy(&vm->gvl.switch_wait_cond);
- rb_native_cond_destroy(&vm->gvl.switch_cond);
- rb_native_mutex_destroy(&vm->gvl.lock);
+ rb_native_cond_destroy(&gvl->switch_wait_cond);
+ rb_native_cond_destroy(&gvl->switch_cond);
+ rb_native_mutex_destroy(&gvl->lock);
}
clear_thread_cache_altstack();
}
@@ -372,11 +362,11 @@ gvl_destroy(rb_vm_t *vm)
#if defined(HAVE_WORKING_FORK)
static void thread_cache_reset(void);
static void
-gvl_atfork(rb_vm_t *vm)
+gvl_atfork(rb_global_vm_lock_t *gvl)
{
thread_cache_reset();
- gvl_init(vm);
- gvl_acquire(vm, GET_THREAD());
+ rb_gvl_init(gvl);
+ gvl_acquire(gvl, GET_THREAD());
}
#endif
@@ -415,8 +405,8 @@ rb_native_mutex_unlock(pthread_mutex_t *lock)
}
}
-static inline int
-native_mutex_trylock(pthread_mutex_t *lock)
+int
+rb_native_mutex_trylock(pthread_mutex_t *lock)
{
int r;
mutex_debug("trylock", lock);
@@ -513,8 +503,7 @@ rb_native_cond_wait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex)
}
static int
-native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex,
- const rb_hrtime_t *abs)
+native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex, const rb_hrtime_t *abs)
{
int r;
struct timespec ts;
@@ -526,16 +515,24 @@ native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex,
* Let's hide it from arch generic code.
*/
do {
- r = pthread_cond_timedwait(cond, mutex, rb_hrtime2timespec(&ts, abs));
+ rb_hrtime2timespec(&ts, abs);
+ r = pthread_cond_timedwait(cond, mutex, &ts);
} while (r == EINTR);
if (r != 0 && r != ETIMEDOUT) {
- rb_bug_errno("pthread_cond_timedwait", r);
+ rb_bug_errno("pthread_cond_timedwait", r);
}
return r;
}
+void
+rb_native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex, unsigned long msec)
+{
+ rb_hrtime_t hrmsec = native_cond_timeout(cond, RB_HRTIME_PER_MSEC * msec);
+ native_cond_timedwait(cond, mutex, &hrmsec);
+}
+
static rb_hrtime_t
native_cond_timeout(rb_nativethread_cond_t *cond, const rb_hrtime_t rel)
{
@@ -570,6 +567,9 @@ ruby_thread_from_native(void)
static int
ruby_thread_set_native(rb_thread_t *th)
{
+ if (th && th->ec) {
+ rb_ractor_set_current_ec(th->ractor, th->ec);
+ }
return pthread_setspecific(ruby_native_thread_key, th) == 0;
}
@@ -587,8 +587,14 @@ Init_native_thread(rb_thread_t *th)
if (r) condattr_monotonic = NULL;
}
#endif
- pthread_key_create(&ruby_native_thread_key, 0);
+ if (pthread_key_create(&ruby_native_thread_key, 0) == EAGAIN) {
+ rb_bug("pthread_key_create failed (ruby_native_thread_key)");
+ }
+ if (pthread_key_create(&ruby_current_ec_key, 0) == EAGAIN) {
+ rb_bug("pthread_key_create failed (ruby_current_ec_key)");
+ }
th->thread_id = pthread_self();
+ ruby_thread_set_native(th);
fill_thread_id_str(th);
native_thread_init(th);
posix_signal(SIGVTALRM, null_func);
@@ -605,7 +611,6 @@ native_thread_init(rb_thread_t *th)
rb_native_cond_initialize(&nd->cond.gvlq);
if (&nd->cond.gvlq != &nd->cond.intr)
rb_native_cond_initialize(&nd->cond.intr);
- ruby_thread_set_native(th);
}
#ifndef USE_THREAD_CACHE
@@ -1116,7 +1121,7 @@ native_thread_create(rb_thread_t *th)
# endif
CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
- err = pthread_create(&th->thread_id, &attr, thread_start_func_1, th);
+ err = pthread_create(&th->thread_id, &attr, thread_start_func_1, th);
thread_debug("create: %p (%d)\n", (void *)th, err);
/* should be done in the created thread */
fill_thread_id_str(th);
@@ -1207,7 +1212,7 @@ native_cond_sleep(rb_thread_t *th, rb_hrtime_t *rel)
}
end = native_cond_timeout(cond, *rel);
- native_cond_timedwait(cond, lock, &end);
+ native_cond_timedwait(cond, lock, &end);
}
}
th->unblock.func = 0;
@@ -1277,7 +1282,7 @@ static void
ubf_select(void *ptr)
{
rb_thread_t *th = (rb_thread_t *)ptr;
- rb_vm_t *vm = th->vm;
+ rb_global_vm_lock_t *gvl = rb_ractor_gvl(th->ractor);
const rb_thread_t *cur = ruby_thread_from_native(); /* may be 0 */
register_ubf_list(th);
@@ -1292,17 +1297,17 @@ ubf_select(void *ptr)
* sigwait_th thread, otherwise we can deadlock with a thread
* in unblock_function_clear.
*/
- if (cur != vm->gvl.timer && cur != sigwait_th) {
+ if (cur != gvl->timer && cur != sigwait_th) {
/*
* Double-checked locking above was to prevent nested locking
* by the SAME thread. We use trylock here to prevent deadlocks
* between DIFFERENT threads
*/
- if (native_mutex_trylock(&vm->gvl.lock) == 0) {
- if (!vm->gvl.timer) {
+ if (rb_native_mutex_trylock(&gvl->lock) == 0) {
+ if (!gvl->timer) {
rb_thread_wakeup_timer_thread(-1);
}
- rb_native_mutex_unlock(&vm->gvl.lock);
+ rb_native_mutex_unlock(&gvl->lock);
}
}
@@ -1471,7 +1476,7 @@ rb_thread_wakeup_timer_thread(int sig)
* on heap for maximum safety (and startup/shutdown speed)
*/
if (!vm) return;
- mth = vm->main_thread;
+ mth = vm->ractor.main_thread;
if (!mth || system_working <= 0) return;
/* this relies on GC for grace period before cont_free */
@@ -2063,12 +2068,12 @@ ubf_ppoll_sleep(void *ignore)
*/
#define GVL_UNLOCK_BEGIN_YIELD(th) do { \
const native_thread_data_t *next; \
- rb_vm_t *vm = th->vm; \
+ rb_global_vm_lock_t *gvl = rb_ractor_gvl(th->ractor); \
RB_GC_SAVE_MACHINE_CONTEXT(th); \
- rb_native_mutex_lock(&vm->gvl.lock); \
- next = gvl_release_common(vm); \
- rb_native_mutex_unlock(&vm->gvl.lock); \
- if (!next && vm_living_thread_num(vm) > 1) { \
+ rb_native_mutex_lock(&gvl->lock); \
+ next = gvl_release_common(gvl); \
+ rb_native_mutex_unlock(&gvl->lock); \
+ if (!next && rb_ractor_living_thread_num(th->ractor) > 1) { \
native_thread_yield(); \
}
@@ -2117,6 +2122,7 @@ static void
native_sleep(rb_thread_t *th, rb_hrtime_t *rel)
{
int sigwait_fd = rb_sigwait_fd_get(th);
+ rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
if (sigwait_fd >= 0) {
rb_native_mutex_lock(&th->interrupt_lock);
@@ -2136,12 +2142,14 @@ native_sleep(rb_thread_t *th, rb_hrtime_t *rel)
rb_sigwait_fd_put(th, sigwait_fd);
rb_sigwait_fd_migrate(th->vm);
}
- else if (th == th->vm->main_thread) { /* always able to handle signals */
+ else if (th == th->vm->ractor.main_thread) { /* always able to handle signals */
native_ppoll_sleep(th, rel);
}
else {
native_cond_sleep(th, rel);
}
+
+ rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
}
#if UBF_TIMER == UBF_TIMER_PTHREAD
@@ -2149,7 +2157,7 @@ static void *
timer_pthread_fn(void *p)
{
rb_vm_t *vm = p;
- pthread_t main_thread_id = vm->main_thread->thread_id;
+ pthread_t main_thread_id = vm->ractor.main_thread->thread_id;
struct pollfd pfd;
int timeout = -1;
int ccp;
diff --git a/thread_pthread.h b/thread_pthread.h
index 27b2210..d14857b 100644
--- a/thread_pthread.h
+++ b/thread_pthread.h
@@ -39,6 +39,18 @@ typedef struct native_thread_data_struct {
} cond;
} native_thread_data_t;
+void rb_native_mutex_lock(rb_nativethread_lock_t *lock);
+int rb_native_mutex_trylock(rb_nativethread_lock_t *lock);
+void rb_native_mutex_unlock(rb_nativethread_lock_t *lock);
+void rb_native_mutex_initialize(rb_nativethread_lock_t *lock);
+void rb_native_mutex_destroy(rb_nativethread_lock_t *lock);
+void rb_native_cond_signal(rb_nativethread_cond_t *cond);
+void rb_native_cond_broadcast(rb_nativethread_cond_t *cond);
+void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex);
+void rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec);
+void rb_native_cond_initialize(rb_nativethread_cond_t *cond);
+void rb_native_cond_destroy(rb_nativethread_cond_t *cond);
+
#undef except
#undef try
#undef leave
@@ -71,4 +83,24 @@ typedef struct rb_global_vm_lock_struct {
int wait_yield;
} rb_global_vm_lock_t;
+typedef pthread_key_t native_tls_key_t;
+
+static inline void *
+native_tls_get(native_tls_key_t key)
+{
+ void *ptr = pthread_getspecific(key);
+ if (UNLIKELY(ptr == NULL)) {
+ rb_bug("pthread_getspecific returns NULL");
+ }
+ return ptr;
+}
+
+static inline void
+native_tls_set(native_tls_key_t key, void *ptr)
+{
+ if (UNLIKELY(pthread_setspecific(key, ptr) != 0)) {
+ rb_bug("pthread_setspecific error");
+ }
+}
+
#endif /* RUBY_THREAD_PTHREAD_H */
diff --git a/thread_sync.c b/thread_sync.c
index 3b8c536..deb3858 100644
--- a/thread_sync.c
+++ b/thread_sync.c
@@ -264,13 +264,13 @@ do_mutex_lock(VALUE self, int interruptible_p)
th->status = THREAD_STOPPED_FOREVER;
th->locking_mutex = self;
- th->vm->sleeper++;
+ rb_ractor_sleeper_threads_inc(th->ractor);
/*
* Carefully! while some contended threads are in native_sleep(),
- * vm->sleeper is unstable value. we have to avoid both deadlock
+ * ractor->sleeper is unstable value. we have to avoid both deadlock
* and busy loop.
*/
- if ((vm_living_thread_num(th->vm) == th->vm->sleeper) &&
+ if ((rb_ractor_living_thread_num(th->ractor) == rb_ractor_sleeper_thread_num(th->ractor)) &&
!patrol_thread) {
timeout = &rel;
patrol_thread = th;
@@ -289,17 +289,18 @@ do_mutex_lock(VALUE self, int interruptible_p)
th->locking_mutex = Qfalse;
if (mutex->th && timeout && !RUBY_VM_INTERRUPTED(th->ec)) {
- rb_check_deadlock(th->vm);
+ rb_check_deadlock(th->ractor);
}
if (th->status == THREAD_STOPPED_FOREVER) {
th->status = prev_status;
}
- th->vm->sleeper--;
+ rb_ractor_sleeper_threads_dec(th->ractor);
if (interruptible_p) {
/* release mutex before checking for interrupts...as interrupt checking
* code might call rb_raise() */
if (mutex->th == th) mutex->th = 0;
+
RUBY_VM_CHECK_INTS_BLOCKING(th->ec); /* may release mutex */
if (!mutex->th) {
mutex->th = th;
diff --git a/thread_win32.c b/thread_win32.c
index d8eb2b1..842a9ef 100644
--- a/thread_win32.c
+++ b/thread_win32.c
@@ -28,8 +28,6 @@
static volatile DWORD ruby_native_thread_key = TLS_OUT_OF_INDEXES;
static int w32_wait_events(HANDLE *events, int count, DWORD timeout, rb_thread_t *th);
-void rb_native_mutex_lock(rb_nativethread_lock_t *lock);
-void rb_native_mutex_unlock(rb_nativethread_lock_t *lock);
static void
w32_error(const char *func)
@@ -97,38 +95,38 @@ w32_mutex_create(void)
#define GVL_DEBUG 0
static void
-gvl_acquire(rb_vm_t *vm, rb_thread_t *th)
+gvl_acquire(rb_global_vm_lock_t *gvl, rb_thread_t *th)
{
- w32_mutex_lock(vm->gvl.lock);
+ w32_mutex_lock(gvl->lock);
if (GVL_DEBUG) fprintf(stderr, "gvl acquire (%p): acquire\n", th);
}
static void
-gvl_release(rb_vm_t *vm)
+gvl_release(rb_global_vm_lock_t *gvl)
{
- ReleaseMutex(vm->gvl.lock);
+ ReleaseMutex(gvl->lock);
}
static void
-gvl_yield(rb_vm_t *vm, rb_thread_t *th)
+gvl_yield(rb_global_vm_lock_t *gvl, rb_thread_t *th)
{
- gvl_release(th->vm);
+ gvl_release(gvl);
native_thread_yield();
- gvl_acquire(vm, th);
+ gvl_acquire(gvl, th);
}
-static void
-gvl_init(rb_vm_t *vm)
+void
+rb_gvl_init(rb_global_vm_lock_t *gvl)
{
if (GVL_DEBUG) fprintf(stderr, "gvl init\n");
- vm->gvl.lock = w32_mutex_create();
+ gvl->lock = w32_mutex_create();
}
static void
-gvl_destroy(rb_vm_t *vm)
+gvl_destroy(rb_global_vm_lock_t *gvl)
{
if (GVL_DEBUG) fprintf(stderr, "gvl destroy\n");
- CloseHandle(vm->gvl.lock);
+ CloseHandle(gvl->lock);
}
static rb_thread_t *
@@ -140,13 +138,21 @@ ruby_thread_from_native(void)
static int
ruby_thread_set_native(rb_thread_t *th)
{
+ if (th && th->ec) {
+ rb_ractor_set_current_ec(th->ractor, th->ec);
+ }
return TlsSetValue(ruby_native_thread_key, th);
}
void
Init_native_thread(rb_thread_t *th)
{
- ruby_native_thread_key = TlsAlloc();
+ if ((ruby_current_ec_key = TlsAlloc()) == TLS_OUT_OF_INDEXES) {
+ rb_bug("TlsAlloc() for ruby_current_ec_key fails");
+ }
+ if ((ruby_native_thread_key = TlsAlloc()) == TLS_OUT_OF_INDEXES) {
+ rb_bug("TlsAlloc() for ruby_native_thread_key fails");
+ }
ruby_thread_set_native(th);
DuplicateHandle(GetCurrentProcess(),
GetCurrentThread(),
@@ -458,7 +464,6 @@ rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
native_cond_timedwait_ms(cond, mutex, INFINITE);
}
-#if 0
static unsigned long
abs_timespec_to_timeout_ms(const struct timespec *ts)
{
@@ -487,6 +492,19 @@ native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mute
return native_cond_timedwait_ms(cond, mutex, timeout_ms);
}
+static struct timespec native_cond_timeout(rb_nativethread_cond_t *cond, struct timespec timeout_rel);
+
+void
+rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
+{
+ struct timespec rel = {
+ .tv_sec = msec / 1000,
+ .tv_nsec = (msec % 1000) * 1000 * 1000,
+ };
+ struct timespec ts = native_cond_timeout(cond, rel);
+ native_cond_timedwait(cond, mutex, &ts);
+}
+
static struct timespec
native_cond_timeout(rb_nativethread_cond_t *cond, struct timespec timeout_rel)
{
@@ -516,7 +534,6 @@ native_cond_timeout(rb_nativethread_cond_t *cond, struct timespec timeout_rel)
return timeout;
}
-#endif
void
rb_native_cond_initialize(rb_nativethread_cond_t *cond)
@@ -694,9 +711,13 @@ timer_thread_func(void *dummy)
rb_w32_set_thread_description(GetCurrentThread(), L"ruby-timer-thread");
while (WaitForSingleObject(timer_thread.lock, TIME_QUANTUM_USEC/1000) ==
WAIT_TIMEOUT) {
- timer_thread_function();
+ rb_execution_context_t *running_ec = vm->ractor.main_ractor->threads.running_ec;
+
+ if (running_ec) {
+ timer_thread_function(running_ec);
+ }
ruby_sigchld_handler(vm); /* probably no-op */
- rb_threadptr_check_signal(vm->main_thread);
+ rb_threadptr_check_signal(vm->ractor.main_thread);
}
thread_debug("timer killed\n");
return 0;
diff --git a/thread_win32.h b/thread_win32.h
index 4fd5f8b..0d95731 100644
--- a/thread_win32.h
+++ b/thread_win32.h
@@ -32,4 +32,35 @@ typedef struct rb_global_vm_lock_struct {
HANDLE lock;
} rb_global_vm_lock_t;
+typedef DWORD native_tls_key_t; // TLS index
+
+static inline void *
+native_tls_get(native_tls_key_t key)
+{
+ void *ptr = TlsGetValue(key);
+ if (UNLIKELY(ptr == NULL)) {
+ rb_bug("TlsGetValue() returns NULL");
+ }
+ return ptr;
+}
+
+static inline void
+native_tls_set(native_tls_key_t key, void *ptr)
+{
+ if (UNLIKELY(TlsSetValue(key, ptr) == 0)) {
+ rb_bug("TlsSetValue() error");
+ }
+}
+
+void rb_native_mutex_lock(rb_nativethread_lock_t *lock);
+void rb_native_mutex_unlock(rb_nativethread_lock_t *lock);
+void rb_native_mutex_initialize(rb_nativethread_lock_t *lock);
+void rb_native_mutex_destroy(rb_nativethread_lock_t *lock);
+void rb_native_cond_signal(rb_nativethread_cond_t *cond);
+void rb_native_cond_broadcast(rb_nativethread_cond_t *cond);
+void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex);
+void rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec);
+void rb_native_cond_initialize(rb_nativethread_cond_t *cond);
+void rb_native_cond_destroy(rb_nativethread_cond_t *cond);
+
#endif /* RUBY_THREAD_WIN32_H */
diff --git a/tool/ruby_vm/views/_mjit_compile_ivar.erb b/tool/ruby_vm/views/_mjit_compile_ivar.erb
index e50f4bf..7283d37 100644
--- a/tool/ruby_vm/views/_mjit_compile_ivar.erb
+++ b/tool/ruby_vm/views/_mjit_compile_ivar.erb
@@ -82,7 +82,7 @@
% # JIT: cache hit path of vm_getivar, or cancel JIT (recompile it without any ivar optimization)
fprintf(f, " struct gen_ivtbl *ivtbl;\n");
fprintf(f, " VALUE val;\n");
- fprintf(f, " if (LIKELY(FL_TEST_RAW(obj, FL_EXIVAR) && ic_serial == RCLASS_SERIAL(RBASIC(obj)->klass) && st_lookup(rb_ivar_generic_ivtbl(), (st_data_t)obj, (st_data_t *)&ivtbl) && index < ivtbl->numiv && (val = ivtbl->ivptr[index]) != Qundef)) {\n");
+ fprintf(f, " if (LIKELY(FL_TEST_RAW(obj, FL_EXIVAR) && ic_serial == RCLASS_SERIAL(RBASIC(obj)->klass) && st_lookup(rb_ivar_generic_ivtbl(obj), (st_data_t)obj, (st_data_t *)&ivtbl) && index < ivtbl->numiv && (val = ivtbl->ivptr[index]) != Qundef)) {\n");
fprintf(f, " stack[%d] = val;\n", b->stack_size);
fprintf(f, " }\n");
fprintf(f, " else {\n");
diff --git a/transient_heap.c b/transient_heap.c
index aa0d8de..809a237 100644
--- a/transient_heap.c
+++ b/transient_heap.c
@@ -20,6 +20,7 @@
#include "ruby_assert.h"
#include "transient_heap.h"
#include "vm_debug.h"
+#include "vm_sync.h"
#if USE_TRANSIENT_HEAP /* USE_TRANSIENT_HEAP */
/*
@@ -364,68 +365,76 @@ transient_heap_allocatable_header(struct transient_heap* theap, size_t size)
void *
rb_transient_heap_alloc(VALUE obj, size_t req_size)
{
- struct transient_heap* theap = transient_heap_get();
- size_t size = ROUND_UP(req_size + sizeof(struct transient_alloc_header), TRANSIENT_HEAP_ALLOC_ALIGN);
+ void *ret;
- TH_ASSERT(RB_TYPE_P(obj, T_ARRAY) ||
- RB_TYPE_P(obj, T_OBJECT) ||
- RB_TYPE_P(obj, T_STRUCT) ||
- RB_TYPE_P(obj, T_HASH)); /* supported types */
+ RB_VM_LOCK_ENTER();
+ {
+ struct transient_heap* theap = transient_heap_get();
+ size_t size = ROUND_UP(req_size + sizeof(struct transient_alloc_header), TRANSIENT_HEAP_ALLOC_ALIGN);
- if (size > TRANSIENT_HEAP_ALLOC_MAX) {
- if (TRANSIENT_HEAP_DEBUG >= 3) fprintf(stderr, "rb_transient_heap_alloc: [too big: %ld] %s\n", (long)size, rb_obj_info(obj));
- return NULL;
- }
+ TH_ASSERT(RB_TYPE_P(obj, T_ARRAY) ||
+ RB_TYPE_P(obj, T_OBJECT) ||
+ RB_TYPE_P(obj, T_STRUCT) ||
+ RB_TYPE_P(obj, T_HASH)); /* supported types */
+
+ if (size > TRANSIENT_HEAP_ALLOC_MAX) {
+ if (TRANSIENT_HEAP_DEBUG >= 3) fprintf(stderr, "rb_transient_heap_alloc: [too big: %ld] %s\n", (long)size, rb_obj_info(obj));
+ ret = NULL;
+ }
#if TRANSIENT_HEAP_DEBUG_DONT_PROMOTE == 0
- else if (RB_OBJ_PROMOTED_RAW(obj)) {
- if (TRANSIENT_HEAP_DEBUG >= 3) fprintf(stderr, "rb_transient_heap_alloc: [promoted object] %s\n", rb_obj_info(obj));
- return NULL;
- }
+ else if (RB_OBJ_PROMOTED_RAW(obj)) {
+ if (TRANSIENT_HEAP_DEBUG >= 3) fprintf(stderr, "rb_transient_heap_alloc: [promoted object] %s\n", rb_obj_info(obj));
+ ret = NULL;
+ }
#else
- else if (RBASIC_CLASS(obj) == 0) {
- if (TRANSIENT_HEAP_DEBUG >= 3) fprintf(stderr, "rb_transient_heap_alloc: [hidden object] %s\n", rb_obj_info(obj));
- return NULL;
- }
+ else if (RBASIC_CLASS(obj) == 0) {
+ if (TRANSIENT_HEAP_DEBUG >= 3) fprintf(stderr, "rb_transient_heap_alloc: [hidden object] %s\n", rb_obj_info(obj));
+ ret = NULL;
+ }
#endif
- else {
- struct transient_alloc_header *header = transient_heap_allocatable_header(theap, size);
- if (header) {
- void *ptr;
+ else {
+ struct transient_alloc_header *header = transient_heap_allocatable_header(theap, size);
+ if (header) {
+ void *ptr;
- /* header is poisoned to prevent buffer overflow, should
- * unpoison first... */
- asan_unpoison_memory_region(header, sizeof *header, true);
+ /* header is poisoned to prevent buffer overflow, should
+ * unpoison first... */
+ asan_unpoison_memory_region(header, sizeof *header, true);
- header->size = size;
- header->magic = TRANSIENT_HEAP_ALLOC_MAGIC;
- header->next_marked_index = TRANSIENT_HEAP_ALLOC_MARKING_FREE;
- header->obj = obj; /* TODO: can we eliminate it? */
+ header->size = size;
+ header->magic = TRANSIENT_HEAP_ALLOC_MAGIC;
+ header->next_marked_index = TRANSIENT_HEAP_ALLOC_MARKING_FREE;
+ header->obj = obj; /* TODO: can we eliminate it? */
- /* header is fixed; shall poison again */
- asan_poison_memory_region(header, sizeof *header);
- ptr = header + 1;
+ /* header is fixed; shall poison again */
+ asan_poison_memory_region(header, sizeof *header);
+ ptr = header + 1;
- theap->total_objects++; /* statistics */
+ theap->total_objects++; /* statistics */
#if TRANSIENT_HEAP_DEBUG_DONT_PROMOTE
- if (RB_OBJ_PROMOTED_RAW(obj)) {
- transient_heap_promote_add(theap, obj);
- }
+ if (RB_OBJ_PROMOTED_RAW(obj)) {
+ transient_heap_promote_add(theap, obj);
+ }
#endif
- if (TRANSIENT_HEAP_DEBUG >= 3) fprintf(stderr, "rb_transient_heap_alloc: header:%p ptr:%p size:%d obj:%s\n", (void *)header, ptr, (int)size, rb_obj_info(obj));
+ if (TRANSIENT_HEAP_DEBUG >= 3) fprintf(stderr, "rb_transient_heap_alloc: header:%p ptr:%p size:%d obj:%s\n", (void *)header, ptr, (int)size, rb_obj_info(obj));
- RB_DEBUG_COUNTER_INC(theap_alloc);
+ RB_DEBUG_COUNTER_INC(theap_alloc);
- /* ptr is set up; OK to unpoison. */
- asan_unpoison_memory_region(ptr, size - sizeof *header, true);
- return ptr;
- }
- else {
- if (TRANSIENT_HEAP_DEBUG >= 3) fprintf(stderr, "rb_transient_heap_alloc: [no enough space: %ld] %s\n", (long)size, rb_obj_info(obj));
- RB_DEBUG_COUNTER_INC(theap_alloc_fail);
- return NULL;
+ /* ptr is set up; OK to unpoison. */
+ asan_unpoison_memory_region(ptr, size - sizeof *header, true);
+ ret = ptr;
+ }
+ else {
+ if (TRANSIENT_HEAP_DEBUG >= 3) fprintf(stderr, "rb_transient_heap_alloc: [no enough space: %ld] %s\n", (long)size, rb_obj_info(obj));
+ RB_DEBUG_COUNTER_INC(theap_alloc_fail);
+ ret = NULL;
+ }
}
}
+ RB_VM_LOCK_LEAVE();
+
+ return ret;
}
void
@@ -534,6 +543,8 @@ alloc_header_to_block(struct transient_heap *theap, struct transient_alloc_heade
void
rb_transient_heap_mark(VALUE obj, const void *ptr)
{
+ ASSERT_vm_locking();
+
struct transient_alloc_header *header = ptr_to_alloc_header(ptr);
asan_unpoison_memory_region(header, sizeof *header, false);
if (header->magic != TRANSIENT_HEAP_ALLOC_MAGIC) rb_bug("rb_transient_heap_mark: wrong header, %s (%p)", rb_obj_info(obj), ptr);
@@ -645,6 +656,8 @@ transient_heap_promote_add(struct transient_heap* theap, VALUE obj)
void
rb_transient_heap_promote(VALUE obj)
{
+ ASSERT_vm_locking();
+
if (transient_heap_ptr(obj, FALSE)) {
struct transient_heap* theap = transient_heap_get();
transient_heap_promote_add(theap, obj);
@@ -663,6 +676,8 @@ alloc_header(struct transient_heap_block* block, int index)
static void
transient_heap_reset(void)
{
+ ASSERT_vm_locking();
+
struct transient_heap* theap = transient_heap_get();
struct transient_heap_block* block;
@@ -759,53 +774,61 @@ transient_heap_update_status(struct transient_heap* theap, enum transient_heap_s
static void
transient_heap_evacuate(void *dmy)
{
- struct transient_heap* theap = transient_heap_get();
+ RB_VM_LOCK_ENTER();
+ {
+ struct transient_heap* theap = transient_heap_get();
- if (theap->status == transient_heap_marking) {
- if (TRANSIENT_HEAP_DEBUG >= 1) fprintf(stderr, "!! transient_heap_evacuate: skip while transient_heap_marking\n");
- }
- else {
- VALUE gc_disabled = rb_gc_disable_no_rest();
- struct transient_heap_block* block;
-
- if (TRANSIENT_HEAP_DEBUG >= 1) {
- int i;
- fprintf(stderr, "!! transient_heap_evacuate start total_blocks:%d\n", theap->total_blocks);
- if (TRANSIENT_HEAP_DEBUG >= 4) {
- for (i=0; i<theap->promoted_objects_index; i++) fprintf(stderr, "%4d %s\n", i, rb_obj_info(theap->promoted_objects[i]));
- }
+ if (theap->status == transient_heap_marking) {
+ if (TRANSIENT_HEAP_DEBUG >= 1) fprintf(stderr, "!! transient_heap_evacuate: skip while transient_heap_marking\n");
}
- if (TRANSIENT_HEAP_DEBUG >= 2) transient_heap_dump(theap);
+ else {
+ VALUE gc_disabled = rb_gc_disable_no_rest();
+ struct transient_heap_block* block;
- TH_ASSERT(theap->status == transient_heap_none);
- transient_heap_update_status(theap, transient_heap_escaping);
+ RUBY_DEBUG_LOG("start gc_disabled:%d", RTEST(gc_disabled));
- /* evacuate from marked blocks */
- block = theap->marked_blocks;
- while (block) {
- transient_heap_block_evacuate(theap, block);
- block = block->info.next_block;
- }
+ if (TRANSIENT_HEAP_DEBUG >= 1) {
+ int i;
+ fprintf(stderr, "!! transient_heap_evacuate start total_blocks:%d\n", theap->total_blocks);
+ if (TRANSIENT_HEAP_DEBUG >= 4) {
+ for (i=0; i<theap->promoted_objects_index; i++) fprintf(stderr, "%4d %s\n", i, rb_obj_info(theap->promoted_objects[i]));
+ }
+ }
+ if (TRANSIENT_HEAP_DEBUG >= 2) transient_heap_dump(theap);
+
+ TH_ASSERT(theap->status == transient_heap_none);
+ transient_heap_update_status(theap, transient_heap_escaping);
+
+ /* evacuate from marked blocks */
+ block = theap->marked_blocks;
+ while (block) {
+ transient_heap_block_evacuate(theap, block);
+ block = block->info.next_block;
+ }
- /* evacuate from using blocks
+ /* evacuate from using blocks
only affect incremental marking */
- block = theap->using_blocks;
- while (block) {
- transient_heap_block_evacuate(theap, block);
- block = block->info.next_block;
- }
+ block = theap->using_blocks;
+ while (block) {
+ transient_heap_block_evacuate(theap, block);
+ block = block->info.next_block;
+ }
- /* all objects in marked_objects are escaped. */
- transient_heap_reset();
+ /* all objects in marked_objects are escaped. */
+ transient_heap_reset();
- if (TRANSIENT_HEAP_DEBUG > 0) {
- fprintf(stderr, "!! transient_heap_evacuate end total_blocks:%d\n", theap->total_blocks);
- }
+ if (TRANSIENT_HEAP_DEBUG > 0) {
+ fprintf(stderr, "!! transient_heap_evacuate end total_blocks:%d\n", theap->total_blocks);
+ }
- transient_heap_verify(theap);
- transient_heap_update_status(theap, transient_heap_none);
- if (gc_disabled != Qtrue) rb_gc_enable();
+ transient_heap_verify(theap);
+ transient_heap_update_status(theap, transient_heap_none);
+
+ if (gc_disabled != Qtrue) rb_gc_enable();
+ RUBY_DEBUG_LOG("finish", 0);
+ }
}
+ RB_VM_LOCK_LEAVE();
}
static void
@@ -875,6 +898,8 @@ transient_heap_blocks_update_refs(struct transient_heap* theap, struct transient
void
rb_transient_heap_update_references(void)
{
+ ASSERT_vm_locking();
+
struct transient_heap* theap = transient_heap_get();
int i;
@@ -890,6 +915,7 @@ rb_transient_heap_update_references(void)
void
rb_transient_heap_start_marking(int full_marking)
{
+ ASSERT_vm_locking();
RUBY_DEBUG_LOG("full?:%d", full_marking);
struct transient_heap* theap = transient_heap_get();
@@ -940,6 +966,7 @@ rb_transient_heap_start_marking(int full_marking)
void
rb_transient_heap_finish_marking(void)
{
+ ASSERT_vm_locking();
RUBY_DEBUG_LOG("", 0);
struct transient_heap* theap = transient_heap_get();
diff --git a/variable.c b/variable.c
index 5dd286b..0689112 100644
--- a/variable.c
+++ b/variable.c
@@ -36,6 +36,7 @@
#include "transient_heap.h"
#include "variable.h"
#include "vm_core.h"
+#include "ractor_pub.h"
typedef void rb_gvar_compact_t(void *var);
@@ -46,7 +47,7 @@ static VALUE autoload_featuremap; /* feature => autoload_i */
static void check_before_mod_set(VALUE, ID, VALUE, const char *);
static void setup_const_entry(rb_const_entry_t *, VALUE, VALUE, rb_const_flag_t);
static VALUE rb_const_search(VALUE klass, ID id, int exclude, int recurse, int visibility);
-static st_table *generic_iv_tbl;
+static st_table *generic_iv_tbl_;
struct ivar_update {
union {
@@ -61,7 +62,7 @@ void
Init_var_tables(void)
{
rb_global_tbl = rb_id_table_create(0);
- generic_iv_tbl = st_init_numtable();
+ generic_iv_tbl_ = st_init_numtable();
autoload = rb_intern_const("__autoload__");
/* __classpath__: fully qualified class path */
classpath = rb_intern_const("__classpath__");
@@ -329,28 +330,37 @@ struct rb_global_variable {
struct rb_global_entry {
struct rb_global_variable *var;
ID id;
+ bool ractor_local;
};
-static struct rb_id_table *
-global_tbl(void)
-{
- return rb_global_tbl;
-}
-
static struct rb_global_entry*
rb_find_global_entry(ID id)
{
struct rb_global_entry *entry;
VALUE data;
- if (!rb_id_table_lookup(global_tbl(), id, &data)) {
- return NULL;
+ if (!rb_id_table_lookup(rb_global_tbl, id, &data)) {
+ entry = NULL;
+ }
+ else {
+ entry = (struct rb_global_entry *)data;
+ RUBY_ASSERT(entry != NULL);
+ }
+
+ if (UNLIKELY(!rb_ractor_main_p()) && (!entry || !entry->ractor_local)) {
+ rb_raise(rb_eRuntimeError, "can not access global variables %s from non-main Ractors", rb_id2name(id));
}
- entry = (struct rb_global_entry *)data;
- ASSUME(entry != NULL);
+
return entry;
}
+void
+rb_gvar_ractor_local(const char *name)
+{
+ struct rb_global_entry *entry = rb_find_global_entry(rb_intern(name));
+ entry->ractor_local = true;
+}
+
static void
rb_gvar_undef_compactor(void *var)
{
@@ -366,6 +376,7 @@ rb_global_entry(ID id)
var = ALLOC(struct rb_global_variable);
entry->id = id;
entry->var = var;
+ entry->ractor_local = false;
var->counter = 1;
var->data = 0;
var->getter = rb_gvar_undef_getter;
@@ -375,7 +386,7 @@ rb_global_entry(ID id)
var->block_trace = 0;
var->trace = 0;
- rb_id_table_insert(global_tbl(), id, (VALUE)entry);
+ rb_id_table_insert(rb_global_tbl, id, (VALUE)entry);
}
return entry;
}
@@ -502,8 +513,9 @@ update_global_entry(VALUE v, void *ignored)
void
rb_gc_update_global_tbl(void)
{
- if (rb_global_tbl)
+ if (rb_global_tbl) {
rb_id_table_foreach_values(rb_global_tbl, update_global_entry, 0);
+ }
}
static ID
@@ -646,18 +658,17 @@ rb_f_untrace_var(int argc, const VALUE *argv)
ID id;
struct rb_global_entry *entry;
struct trace_var *trace;
- VALUE data;
rb_scan_args(argc, argv, "11", &var, &cmd);
id = rb_check_id(&var);
if (!id) {
rb_name_error_str(var, "undefined global variable %"PRIsVALUE"", QUOTE(var));
}
- if (!rb_id_table_lookup(global_tbl(), id, &data)) {
+ if ((entry = rb_find_global_entry(id)) == NULL) {
rb_name_error(id, "undefined global variable %"PRIsVALUE"", QUOTE_ID(id));
}
- trace = (entry = (struct rb_global_entry *)data)->var->trace;
+ trace = entry->var->trace;
if (NIL_P(cmd)) {
VALUE ary = rb_ary_new();
@@ -801,7 +812,11 @@ rb_f_global_variables(void)
VALUE ary = rb_ary_new();
VALUE sym, backref = rb_backref_get();
- rb_id_table_foreach(global_tbl(), gvar_i, (void *)ary);
+ if (!rb_ractor_main_p()) {
+ rb_raise(rb_eRuntimeError, "can not access global variables from non-main Ractors");
+ }
+
+ rb_id_table_foreach(rb_global_tbl, gvar_i, (void *)ary);
if (!NIL_P(backref)) {
char buf[2];
int i, nmatch = rb_match_count(backref);
@@ -828,7 +843,11 @@ rb_alias_variable(ID name1, ID name2)
{
struct rb_global_entry *entry1, *entry2;
VALUE data1;
- struct rb_id_table *gtbl = global_tbl();
+ struct rb_id_table *gtbl = rb_global_tbl;
+
+ if (!rb_ractor_main_p()) {
+ rb_raise(rb_eRuntimeError, "can not access global variables from non-main Ractors");
+ }
entry2 = rb_global_entry(name2);
if (!rb_id_table_lookup(gtbl, name1, &data1)) {
@@ -859,30 +878,61 @@ rb_alias_variable(ID name1, ID name2)
entry1->var = entry2->var;
}
+static void
+IVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR(ID id)
+{
+ if (UNLIKELY(!rb_ractor_main_p())) {
+ if (rb_is_instance_id(id)) { // check only normal ivars
+ rb_raise(rb_eRuntimeError, "can not access instance variables of classes/modules from non-main Ractors");
+ }
+ }
+}
+
+#define CVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR() \
+ if (UNLIKELY(!rb_ractor_main_p())) { \
+ rb_raise(rb_eRuntimeError, "can not access class variables from non-main Ractors"); \
+ }
+
+static inline struct st_table *
+generic_ivtbl(VALUE obj, ID id, bool force_check_ractor)
+{
+ if ((force_check_ractor || rb_is_instance_id(id)) && // not internal ID
+ UNLIKELY(rb_ractor_shareable_p(obj) && !rb_ractor_main_p())) {
+ rb_raise(rb_eRuntimeError, "can not access instance variables of shareable objects from non-main Ractors");
+ }
+ return generic_iv_tbl_;
+}
+
+static inline struct st_table *
+generic_ivtbl_no_ractor_check(VALUE obj)
+{
+ return generic_ivtbl(obj, 0, false);
+}
+
+MJIT_FUNC_EXPORTED struct st_table *
+rb_ivar_generic_ivtbl(VALUE obj)
+{
+ return generic_ivtbl(obj, 0, true);
+}
+
static int
-gen_ivtbl_get(VALUE obj, struct gen_ivtbl **ivtbl)
+gen_ivtbl_get(VALUE obj, ID id, struct gen_ivtbl **ivtbl)
{
st_data_t data;
- if (st_lookup(generic_iv_tbl, (st_data_t)obj, &data)) {
+ if (st_lookup(generic_ivtbl(obj, id, false), (st_data_t)obj, &data)) {
*ivtbl = (struct gen_ivtbl *)data;
return 1;
}
return 0;
}
-MJIT_FUNC_EXPORTED struct st_table *
-rb_ivar_generic_ivtbl(void)
-{
- return generic_iv_tbl;
-}
-
static VALUE
generic_ivar_delete(VALUE obj, ID id, VALUE undef)
{
struct gen_ivtbl *ivtbl;
- if (gen_ivtbl_get(obj, &ivtbl)) {
+ if (gen_ivtbl_get(obj, id, &ivtbl)) {
st_table *iv_index_tbl = RCLASS_IV_INDEX_TBL(rb_obj_class(obj));
st_data_t index;
@@ -903,7 +953,7 @@ generic_ivar_get(VALUE obj, ID id, VALUE undef)
{
struct gen_ivtbl *ivtbl;
- if (gen_ivtbl_get(obj, &ivtbl)) {
+ if (gen_ivtbl_get(obj, id, &ivtbl)) {
st_table *iv_index_tbl = RCLASS_IV_INDEX_TBL(rb_obj_class(obj));
st_data_t index;
@@ -993,7 +1043,7 @@ generic_ivar_defined(VALUE obj, ID id)
if (!iv_index_tbl) return Qfalse;
if (!st_lookup(iv_index_tbl, (st_data_t)id, &index)) return Qfalse;
- if (!gen_ivtbl_get(obj, &ivtbl)) return Qfalse;
+ if (!gen_ivtbl_get(obj, id, &ivtbl)) return Qfalse;
if ((index < ivtbl->numiv) && (ivtbl->ivptr[index] != Qundef))
return Qtrue;
@@ -1011,7 +1061,7 @@ generic_ivar_remove(VALUE obj, ID id, VALUE *valp)
if (!iv_index_tbl) return 0;
if (!st_lookup(iv_index_tbl, key, &index)) return 0;
- if (!gen_ivtbl_get(obj, &ivtbl)) return 0;
+ if (!gen_ivtbl_get(obj, id, &ivtbl)) return 0;
if (index < ivtbl->numiv) {
if (ivtbl->ivptr[index] != Qundef) {
@@ -1038,7 +1088,7 @@ rb_mark_generic_ivar(VALUE obj)
{
struct gen_ivtbl *ivtbl;
- if (gen_ivtbl_get(obj, &ivtbl)) {
+ if (gen_ivtbl_get(obj, 0, &ivtbl)) {
gen_ivtbl_mark(ivtbl);
}
}
@@ -1049,8 +1099,8 @@ rb_mv_generic_ivar(VALUE rsrc, VALUE dst)
st_data_t key = (st_data_t)rsrc;
struct gen_ivtbl *ivtbl;
- if (st_delete(generic_iv_tbl, &key, (st_data_t *)&ivtbl))
- st_insert(generic_iv_tbl, (st_data_t)dst, (st_data_t)ivtbl);
+ if (st_delete(generic_ivtbl_no_ractor_check(rsrc), &key, (st_data_t *)&ivtbl))
+ st_insert(generic_ivtbl_no_ractor_check(dst), (st_data_t)dst, (st_data_t)ivtbl);
}
void
@@ -1059,7 +1109,7 @@ rb_free_generic_ivar(VALUE obj)
st_data_t key = (st_data_t)obj;
struct gen_ivtbl *ivtbl;
- if (st_delete(generic_iv_tbl, &key, (st_data_t *)&ivtbl))
+ if (st_delete(generic_ivtbl_no_ractor_check(obj), &key, (st_data_t *)&ivtbl))
xfree(ivtbl);
}
@@ -1068,7 +1118,7 @@ rb_generic_ivar_memsize(VALUE obj)
{
struct gen_ivtbl *ivtbl;
- if (gen_ivtbl_get(obj, &ivtbl))
+ if (gen_ivtbl_get(obj, 0, &ivtbl))
return gen_ivtbl_bytes(ivtbl->numiv);
return 0;
}
@@ -1111,6 +1161,7 @@ rb_ivar_lookup(VALUE obj, ID id, VALUE undef)
break;
case T_CLASS:
case T_MODULE:
+ IVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR(id);
if (RCLASS_IV_TBL(obj) &&
st_lookup(RCLASS_IV_TBL(obj), (st_data_t)id, &index))
return (VALUE)index;
@@ -1167,6 +1218,7 @@ rb_ivar_delete(VALUE obj, ID id, VALUE undef)
break;
case T_CLASS:
case T_MODULE:
+ IVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR(id);
if (RCLASS_IV_TBL(obj) &&
st_delete(RCLASS_IV_TBL(obj), (st_data_t *)&id, &index))
return (VALUE)index;
@@ -1223,7 +1275,7 @@ generic_ivar_set(VALUE obj, ID id, VALUE val)
ivup.iv_extended = 0;
ivup.u.iv_index_tbl = iv_index_tbl_make(obj);
iv_index_tbl_extend(&ivup, id);
- st_update(generic_iv_tbl, (st_data_t)obj, generic_ivar_update,
+ st_update(generic_ivtbl(obj, id, false), (st_data_t)obj, generic_ivar_update,
(st_data_t)&ivup);
ivup.u.ivtbl->ivptr[ivup.index] = val;
@@ -1347,6 +1399,7 @@ ivar_set(VALUE obj, ID id, VALUE val)
break;
case T_CLASS:
case T_MODULE:
+ IVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR(id);
if (!RCLASS_IV_TBL(obj)) RCLASS_IV_TBL(obj) = st_init_numtable();
rb_class_ivar_set(obj, id, val);
break;
@@ -1393,6 +1446,7 @@ rb_ivar_defined(VALUE obj, ID id)
break;
case T_CLASS:
case T_MODULE:
+ IVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR(id);
if (RCLASS_IV_TBL(obj) && st_is_member(RCLASS_IV_TBL(obj), (st_data_t)id))
return Qtrue;
break;
@@ -1469,7 +1523,7 @@ gen_ivar_each(VALUE obj, rb_ivar_foreach_callback_func *func, st_data_t arg)
st_table *iv_index_tbl = RCLASS_IV_INDEX_TBL(rb_obj_class(obj));
if (!iv_index_tbl) return;
- if (!gen_ivtbl_get(obj, &data.ivtbl)) return;
+ if (!gen_ivtbl_get(obj, 0, &data.ivtbl)) return;
data.func = (int (*)(ID key, VALUE val, st_data_t arg))func;
data.arg = arg;
@@ -1513,14 +1567,14 @@ rb_copy_generic_ivar(VALUE clone, VALUE obj)
if (!FL_TEST(obj, FL_EXIVAR)) {
goto clear;
}
- if (gen_ivtbl_get(obj, &ivtbl)) {
+ if (gen_ivtbl_get(obj, 0, &ivtbl)) {
struct givar_copy c;
uint32_t i;
if (gen_ivtbl_count(ivtbl) == 0)
goto clear;
- if (gen_ivtbl_get(clone, &c.ivtbl)) {
+ if (gen_ivtbl_get(clone, 0, &c.ivtbl)) {
for (i = 0; i < c.ivtbl->numiv; i++)
c.ivtbl->ivptr[i] = Qundef;
}
@@ -1536,7 +1590,8 @@ rb_copy_generic_ivar(VALUE clone, VALUE obj)
* c.ivtbl may change in gen_ivar_copy due to realloc,
* no need to free
*/
- st_insert(generic_iv_tbl, (st_data_t)clone, (st_data_t)c.ivtbl);
+ generic_ivtbl_no_ractor_check(clone);
+ st_insert(generic_ivtbl_no_ractor_check(obj), (st_data_t)clone, (st_data_t)c.ivtbl);
}
return;
@@ -1557,6 +1612,7 @@ rb_ivar_foreach(VALUE obj, rb_ivar_foreach_callback_func *func, st_data_t arg)
break;
case T_CLASS:
case T_MODULE:
+ IVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR(0);
if (RCLASS_IV_TBL(obj)) {
st_foreach_safe(RCLASS_IV_TBL(obj), func, arg);
}
@@ -1599,7 +1655,7 @@ rb_ivar_count(VALUE obj)
if (FL_TEST(obj, FL_EXIVAR)) {
struct gen_ivtbl *ivtbl;
- if (gen_ivtbl_get(obj, &ivtbl)) {
+ if (gen_ivtbl_get(obj, 0, &ivtbl)) {
return gen_ivtbl_count(ivtbl);
}
}
@@ -1720,6 +1776,7 @@ rb_obj_remove_instance_variable(VALUE obj, VALUE name)
break;
case T_CLASS:
case T_MODULE:
+ IVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR(id);
n = id;
if (RCLASS_IV_TBL(obj) && st_delete(RCLASS_IV_TBL(obj), &n, &v)) {
return (VALUE)v;
@@ -2383,7 +2440,14 @@ static VALUE
rb_const_get_0(VALUE klass, ID id, int exclude, int recurse, int visibility)
{
VALUE c = rb_const_search(klass, id, exclude, recurse, visibility);
- if (c != Qundef) return c;
+ if (c != Qundef) {
+ if (UNLIKELY(!rb_ractor_main_p())) {
+ if (!rb_ractor_shareable_p(c)) {
+ rb_raise(rb_eNameError, "can not access non-sharable objects in constant %"PRIsVALUE"::%s by non-main Ractor.", rb_class_path(klass), rb_id2name(id));
+ }
+ }
+ return c;
+ }
return rb_const_missing(klass, ID2SYM(id));
}
@@ -2824,6 +2888,10 @@ rb_const_set(VALUE klass, ID id, VALUE val)
QUOTE_ID(id));
}
+ if (!rb_ractor_shareable_p(val) && !rb_ractor_main_p()) {
+ rb_raise(rb_eNameError, "can not set constants with non-shareable objects by non-main Ractors");
+ }
+
check_before_mod_set(klass, id, val, "constant");
if (!tbl) {
RCLASS_CONST_TBL(klass) = tbl = rb_id_table_create(0);
@@ -3141,6 +3209,7 @@ cvar_overtaken(VALUE front, VALUE target, ID id)
}
#define CVAR_LOOKUP(v,r) do {\
+ CVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR(); \
if (cvar_lookup_at(klass, id, (v))) {r;}\
CVAR_FOREACH_ANCESTORS(klass, v, r);\
} while(0)
diff --git a/variable.h b/variable.h
index ec62023..2f010b6 100644
--- a/variable.h
+++ b/variable.h
@@ -16,6 +16,6 @@ struct gen_ivtbl {
VALUE ivptr[FLEX_ARY_LEN];
};
-struct st_table *rb_ivar_generic_ivtbl(void);
+struct st_table *rb_ivar_generic_ivtbl(VALUE obj);
#endif /* RUBY_TOPLEVEL_VARIABLE_H */
diff --git a/vm.c b/vm.c
index 6b78f54..f2ce857 100644
--- a/vm.c
+++ b/vm.c
@@ -1,6 +1,6 @@
/**********************************************************************
- vm.c -
+ Vm.c -
$Author$
@@ -34,6 +34,8 @@
#include "vm_debug.h"
#include "vm_exec.h"
#include "vm_insnhelper.h"
+#include "ractor.h"
+#include "vm_sync.h"
#include "builtin.h"
@@ -376,7 +378,7 @@ VALUE rb_block_param_proxy;
#define ruby_vm_redefined_flag GET_VM()->redefined_flag
VALUE ruby_vm_const_missing_count = 0;
rb_vm_t *ruby_current_vm_ptr = NULL;
-rb_execution_context_t *ruby_current_execution_context_ptr = NULL;
+native_tls_key_t ruby_current_ec_key;
rb_event_flag_t ruby_vm_event_flags;
rb_event_flag_t ruby_vm_event_enabled_global_flags;
@@ -398,6 +400,8 @@ static const struct rb_callcache vm_empty_cc = {
static void thread_free(void *ptr);
+//
+
void
rb_vm_inc_const_missing_count(void)
{
@@ -568,7 +572,6 @@ rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_
MJIT_FUNC_EXPORTED rb_control_frame_t *
rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
{
- if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) bp();
while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
if (VM_FRAME_RUBYFRAME_P(cfp)) {
return (rb_control_frame_t *)cfp;
@@ -944,6 +947,27 @@ rb_proc_dup(VALUE self)
return procval;
}
+VALUE
+rb_proc_isolate_bang(VALUE self)
+{
+ // check accesses
+ const rb_iseq_t *iseq = vm_proc_iseq(self);
+ if (iseq && iseq->body->access_outer_variables) {
+ rb_raise(rb_eArgError, "can not isolate a Proc because it can accesses outer variables.");
+ }
+
+ rb_proc_t *proc = (rb_proc_t *)RTYPEDDATA_DATA(self);
+ proc->is_isolated = TRUE;
+ return self;
+}
+
+VALUE
+rb_proc_isolate(VALUE self)
+{
+ VALUE dst = rb_proc_dup(self);
+ rb_proc_isolate_bang(dst);
+ return dst;
+}
MJIT_FUNC_EXPORTED VALUE
rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda)
@@ -1283,6 +1307,20 @@ rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc,
}
}
+VALUE
+rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
+ int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
+{
+ vm_block_handler_verify(passed_block_handler);
+
+ if (proc->is_from_method) {
+ return rb_vm_invoke_bmethod(ec, proc, self, argc, argv, kw_splat, passed_block_handler, NULL);
+ }
+ else {
+ return vm_invoke_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler);
+ }
+}
+
/* special variable */
static rb_control_frame_t *
@@ -2257,17 +2295,9 @@ rb_vm_update_references(void *ptr)
{
if (ptr) {
rb_vm_t *vm = ptr;
- rb_thread_t *th = 0;
rb_gc_update_tbl_refs(vm->frozen_strings);
-
- list_for_each(&vm->living_threads, th, vmlt_node) {
- th->self = rb_gc_location(th->self);
- }
-
- vm->thgroup_default = rb_gc_location(vm->thgroup_default);
vm->mark_object_ary = rb_gc_location(vm->mark_object_ary);
-
vm->load_path = rb_gc_location(vm->load_path);
vm->load_path_snapshot = rb_gc_location(vm->load_path_snapshot);
@@ -2294,14 +2324,17 @@ rb_vm_mark(void *ptr)
RUBY_GC_INFO("-------------------------------------------------\n");
if (ptr) {
rb_vm_t *vm = ptr;
- rb_thread_t *th = 0;
+ rb_ractor_t *r;
long i, len;
const VALUE *obj_ary;
- list_for_each(&vm->living_threads, th, vmlt_node) {
- rb_gc_mark_movable(th->self);
- }
- rb_gc_mark_movable(vm->thgroup_default);
+ list_for_each(&vm->ractor.set, r, vmlr_node) {
+ // ractor.set only contains blocking or running ractors
+ VM_ASSERT(rb_ractor_status_p(r, ractor_blocking) ||
+ rb_ractor_status_p(r, ractor_running));
+ rb_gc_mark(rb_ractor_self(r));
+ }
+
rb_gc_mark_movable(vm->mark_object_ary);
len = RARRAY_LEN(vm->mark_object_ary);
@@ -2379,10 +2412,11 @@ ruby_vm_destruct(rb_vm_t *vm)
RUBY_FREE_ENTER("vm");
if (vm) {
- rb_thread_t *th = vm->main_thread;
+ rb_thread_t *th = vm->ractor.main_thread;
struct rb_objspace *objspace = vm->objspace;
- vm->main_thread = 0;
- if (th) {
+ vm->ractor.main_thread = NULL;
+
+ if (th) {
rb_fiber_reset_root_local_storage(th);
thread_free(th);
}
@@ -2397,7 +2431,6 @@ ruby_vm_destruct(rb_vm_t *vm)
st_free_table(vm->frozen_strings);
vm->frozen_strings = 0;
}
- rb_vm_gvl_destroy(vm);
RB_ALTSTACK_FREE(vm->main_altstack);
if (objspace) {
rb_objspace_free(objspace);
@@ -2416,7 +2449,8 @@ vm_memsize(const void *ptr)
const rb_vm_t *vmobj = ptr;
size_t size = sizeof(rb_vm_t);
- size += vmobj->living_thread_num * sizeof(rb_thread_t);
+ // TODO
+ // size += vmobj->ractor_num * sizeof(rb_ractor_t);
if (vmobj->defined_strings) {
size += DEFINED_EXPR * sizeof(VALUE);
@@ -2573,6 +2607,7 @@ rb_execution_context_mark(const rb_execution_context_t *ec)
rb_control_frame_t *cfp = ec->cfp;
rb_control_frame_t *limit_cfp = (void *)(ec->vm_stack + ec->vm_stack_size);
+ VM_ASSERT(sp == ec->cfp->sp);
rb_gc_mark_vm_stack_values((long)(sp - p), p);
while (cfp != limit_cfp) {
@@ -2640,6 +2675,7 @@ thread_mark(void *ptr)
/* mark ruby objects */
switch (th->invoke_type) {
case thread_invoke_type_proc:
+ case thread_invoke_type_ractor_proc:
RUBY_MARK_UNLESS_NULL(th->invoke_arg.proc.proc);
RUBY_MARK_UNLESS_NULL(th->invoke_arg.proc.args);
break;
@@ -2650,6 +2686,7 @@ thread_mark(void *ptr)
break;
}
+ rb_gc_mark(rb_ractor_self(th->ractor));
RUBY_MARK_UNLESS_NULL(th->thgroup);
RUBY_MARK_UNLESS_NULL(th->value);
RUBY_MARK_UNLESS_NULL(th->pending_interrupt_queue);
@@ -2685,8 +2722,8 @@ thread_free(void *ptr)
rb_threadptr_root_fiber_release(th);
- if (th->vm && th->vm->main_thread == th) {
- RUBY_GC_INFO("main thread\n");
+ if (th->vm && th->vm->ractor.main_thread == th) {
+ RUBY_GC_INFO("MRI main thread\n");
}
else {
ruby_xfree(ptr);
@@ -2815,15 +2852,17 @@ th_init(rb_thread_t *th, VALUE self)
static VALUE
ruby_thread_init(VALUE self)
{
- rb_thread_t *th = rb_thread_ptr(self);
- rb_vm_t *vm = GET_THREAD()->vm;
+ rb_thread_t *th = GET_THREAD();
+ rb_thread_t *targe_th = rb_thread_ptr(self);
+ rb_vm_t *vm = th->vm;
- th->vm = vm;
- th_init(th, self);
+ targe_th->vm = vm;
+ th_init(targe_th, self);
- th->top_wrapper = 0;
- th->top_self = rb_vm_top_self();
- th->ec->root_svar = Qfalse;
+ targe_th->top_wrapper = 0;
+ targe_th->top_self = rb_vm_top_self();
+ targe_th->ec->root_svar = Qfalse;
+ targe_th->ractor = th->ractor;
return self;
}
@@ -3341,23 +3380,21 @@ Init_VM(void)
VALUE filename = rb_fstring_lit("<main>");
const rb_iseq_t *iseq = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
+ // Ractor setup
+ rb_ractor_main_setup(vm, th->ractor, th);
+
/* create vm object */
vm->self = TypedData_Wrap_Struct(rb_cRubyVM, &vm_data_type, vm);
/* create main thread */
- th->self = TypedData_Wrap_Struct(rb_cThread, &thread_data_type, th);
-
- vm->main_thread = th;
- vm->running_thread = th;
+ th->self = TypedData_Wrap_Struct(rb_cThread, &thread_data_type, th);
+ vm->ractor.main_thread = th;
+ vm->ractor.main_ractor = th->ractor;
th->vm = vm;
th->top_wrapper = 0;
th->top_self = rb_vm_top_self();
- rb_thread_set_current(th);
-
- rb_vm_living_threads_insert(vm, th);
-
- rb_gc_register_mark_object((VALUE)iseq);
+ rb_gc_register_mark_object((VALUE)iseq);
th->ec->cfp->iseq = iseq;
th->ec->cfp->pc = iseq->body->iseq_encoded;
th->ec->cfp->self = th->top_self;
@@ -3385,7 +3422,7 @@ Init_VM(void)
void
rb_vm_set_progname(VALUE filename)
{
- rb_thread_t *th = GET_VM()->main_thread;
+ rb_thread_t *th = GET_VM()->ractor.main_thread;
rb_control_frame_t *cfp = (void *)(th->ec->vm_stack + th->ec->vm_stack_size);
--cfp;
@@ -3413,8 +3450,13 @@ Init_BareVM(void)
Init_native_thread(th);
th->vm = vm;
th_init(th, 0);
- rb_thread_set_current_raw(th);
+ vm->ractor.main_ractor = th->ractor = rb_ractor_main_alloc();
+ rb_ractor_set_current_ec(th->ractor, th->ec);
ruby_thread_init_stack(th);
+
+ rb_native_mutex_initialize(&vm->ractor.sync.lock);
+ rb_native_cond_initialize(&vm->ractor.sync.barrier_cond);
+ rb_native_cond_initialize(&vm->ractor.sync.terminate_cond);
}
void
diff --git a/vm_core.h b/vm_core.h
index bf04288..7071442 100644
--- a/vm_core.h
+++ b/vm_core.h
@@ -419,6 +419,7 @@ struct rb_iseq_constant_body {
char catch_except_p; /* If a frame of this ISeq may catch exception, set TRUE */
bool builtin_inline_p; // This ISeq's builtin func is safe to be inlined by MJIT
+ char access_outer_variables;
#if USE_MJIT
/* The following fields are MJIT related info. */
@@ -554,12 +555,30 @@ typedef const struct rb_builtin_function *RB_BUILTIN;
typedef struct rb_vm_struct {
VALUE self;
- rb_global_vm_lock_t gvl;
+ struct {
+ struct list_head set;
+ unsigned int cnt;
+ unsigned int blocking_cnt;
- struct rb_thread_struct *main_thread;
+ struct rb_ractor_struct *main_ractor;
+ struct rb_thread_struct *main_thread; // == vm->ractor.main_ractor->threads.main
- /* persists across uncontended GVL release/acquire for time slice */
- const struct rb_thread_struct *running_thread;
+ struct {
+ // monitor
+ rb_nativethread_lock_t lock;
+ struct rb_ractor_struct *lock_owner;
+ unsigned int lock_rec;
+
+ // barrier
+ bool barrier_waiting;
+ unsigned int barrier_cnt;
+ rb_nativethread_cond_t barrier_cond;
+
+ // join at exit
+ rb_nativethread_cond_t terminate_cond;
+ bool terminate_waiting;
+ } sync;
+ } ractor;
#ifdef USE_SIGALTSTACK
void *main_altstack;
@@ -570,9 +589,6 @@ typedef struct rb_vm_struct {
struct list_head waiting_pids; /* PID > 0: <=> struct waitpid_state */
struct list_head waiting_grps; /* PID <= 0: <=> struct waitpid_state */
struct list_head waiting_fds; /* <=> struct waiting_fd */
- struct list_head living_threads;
- VALUE thgroup_default;
- int living_thread_num;
/* set in single-threaded processes only: */
volatile int ubf_async_safe;
@@ -580,9 +596,7 @@ typedef struct rb_vm_struct {
unsigned int running: 1;
unsigned int thread_abort_on_exception: 1;
unsigned int thread_report_on_exception: 1;
-
unsigned int safe_level_: 1;
- int sleeper;
/* object management */
VALUE mark_object_ary;
@@ -890,9 +904,12 @@ void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t
// @param ec the execution context to update.
void rb_ec_clear_vm_stack(rb_execution_context_t *ec);
+typedef struct rb_ractor_struct rb_ractor_t;
+
typedef struct rb_thread_struct {
- struct list_node vmlt_node;
+ struct list_node lt_node; // managed by a ractor
VALUE self;
+ rb_ractor_t *ractor;
rb_vm_t *vm;
rb_execution_context_t *ec;
@@ -955,9 +972,10 @@ typedef struct rb_thread_struct {
} func;
} invoke_arg;
- enum {
+ enum thread_invoke_type {
thread_invoke_type_none = 0,
thread_invoke_type_proc,
+ thread_invoke_type_ractor_proc,
thread_invoke_type_func
} invoke_type;
@@ -1039,8 +1057,12 @@ typedef struct {
const struct rb_block block;
unsigned int is_from_method: 1; /* bool */
unsigned int is_lambda: 1; /* bool */
+ unsigned int is_isolated: 1; /* bool */
} rb_proc_t;
+VALUE rb_proc_isolate(VALUE self);
+VALUE rb_proc_isolate_bang(VALUE self);
+
typedef struct {
VALUE flags; /* imemo header */
rb_iseq_t *iseq;
@@ -1628,11 +1650,12 @@ VALUE rb_vm_env_local_variables(const rb_env_t *env);
const rb_env_t *rb_vm_env_prev_env(const rb_env_t *env);
const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars);
void rb_vm_inc_const_missing_count(void);
-void rb_vm_gvl_destroy(rb_vm_t *vm);
VALUE rb_vm_call_kw(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc,
const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat);
MJIT_STATIC void rb_vm_pop_frame(rb_execution_context_t *ec);
+void rb_gvl_destroy(rb_global_vm_lock_t *gvl);
+
void rb_thread_start_timer_thread(void);
void rb_thread_stop_timer_thread(void);
void rb_thread_reset_timer_thread(void);
@@ -1645,22 +1668,7 @@ rb_vm_living_threads_init(rb_vm_t *vm)
list_head_init(&vm->waiting_pids);
list_head_init(&vm->workqueue);
list_head_init(&vm->waiting_grps);
- list_head_init(&vm->living_threads);
- vm->living_thread_num = 0;
-}
-
-static inline void
-rb_vm_living_threads_insert(rb_vm_t *vm, rb_thread_t *th)
-{
- list_add_tail(&vm->living_threads, &th->vmlt_node);
- vm->living_thread_num++;
-}
-
-static inline void
-rb_vm_living_threads_remove(rb_vm_t *vm, rb_thread_t *th)
-{
- list_del(&th->vmlt_node);
- vm->living_thread_num--;
+ list_head_init(&vm->ractor.set);
}
typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
@@ -1700,20 +1708,24 @@ MJIT_STATIC const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_
VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr);
+rb_execution_context_t *rb_vm_main_ractor_ec(rb_vm_t *vm); // ractor.c
+
/* for thread */
#if RUBY_VM_THREAD_MODEL == 2
RUBY_SYMBOL_EXPORT_BEGIN
RUBY_EXTERN rb_vm_t *ruby_current_vm_ptr;
-RUBY_EXTERN rb_execution_context_t *ruby_current_execution_context_ptr;
RUBY_EXTERN rb_event_flag_t ruby_vm_event_flags;
RUBY_EXTERN rb_event_flag_t ruby_vm_event_enabled_global_flags;
RUBY_EXTERN unsigned int ruby_vm_event_local_num;
+RUBY_EXTERN native_tls_key_t ruby_current_ec_key;
+
RUBY_SYMBOL_EXPORT_END
#define GET_VM() rb_current_vm()
+#define GET_RACTOR() rb_current_ractor()
#define GET_THREAD() rb_current_thread()
#define GET_EC() rb_current_execution_context()
@@ -1723,6 +1735,19 @@ rb_ec_thread_ptr(const rb_execution_context_t *ec)
return ec->thread_ptr;
}
+static inline rb_ractor_t *
+rb_ec_ractor_ptr(const rb_execution_context_t *ec)
+{
+ const rb_thread_t *th = rb_ec_thread_ptr(ec);
+ if (th) {
+ VM_ASSERT(th->ractor != NULL);
+ return th->ractor;
+ }
+ else {
+ return NULL;
+ }
+}
+
static inline rb_vm_t *
rb_ec_vm_ptr(const rb_execution_context_t *ec)
{
@@ -1738,7 +1763,9 @@ rb_ec_vm_ptr(const rb_execution_context_t *ec)
static inline rb_execution_context_t *
rb_current_execution_context(void)
{
- return ruby_current_execution_context_ptr;
+ rb_execution_context_t *ec = native_tls_get(ruby_current_ec_key);
+ VM_ASSERT(ec != NULL);
+ return ec;
}
static inline rb_thread_t *
@@ -1748,31 +1775,25 @@ rb_current_thread(void)
return rb_ec_thread_ptr(ec);
}
+static inline rb_ractor_t *
+rb_current_ractor(void)
+{
+ const rb_execution_context_t *ec = GET_EC();
+ return rb_ec_ractor_ptr(ec);
+}
+
static inline rb_vm_t *
rb_current_vm(void)
{
+#if 0 // TODO: reconsider the assertions
VM_ASSERT(ruby_current_vm_ptr == NULL ||
ruby_current_execution_context_ptr == NULL ||
rb_ec_thread_ptr(GET_EC()) == NULL ||
rb_ec_thread_ptr(GET_EC())->status == THREAD_KILLED ||
rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
- return ruby_current_vm_ptr;
-}
-
-static inline void
-rb_thread_set_current_raw(const rb_thread_t *th)
-{
- ruby_current_execution_context_ptr = th->ec;
-}
+#endif
-static inline void
-rb_thread_set_current(rb_thread_t *th)
-{
- if (th->vm->running_thread != th) {
- th->running_time_us = 0;
- }
- rb_thread_set_current_raw(th);
- th->vm->running_thread = th;
+ return ruby_current_vm_ptr;
}
#else
@@ -1783,13 +1804,17 @@ enum {
TIMER_INTERRUPT_MASK = 0x01,
PENDING_INTERRUPT_MASK = 0x02,
POSTPONED_JOB_INTERRUPT_MASK = 0x04,
- TRAP_INTERRUPT_MASK = 0x08
+ TRAP_INTERRUPT_MASK = 0x08,
+ TERMINATE_INTERRUPT_MASK = 0x10,
+ VM_BARRIER_INTERRUPT_MASK = 0x20,
};
#define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
#define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
#define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
#define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
+#define RUBY_VM_SET_TERMINATE_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TERMINATE_INTERRUPT_MASK)
+#define RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, VM_BARRIER_INTERRUPT_MASK)
#define RUBY_VM_INTERRUPTED(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask & \
(PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
#define RUBY_VM_INTERRUPTED_ANY(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask)
diff --git a/vm_dump.c b/vm_dump.c
index 5b65860..88c3207 100644
--- a/vm_dump.c
+++ b/vm_dump.c
@@ -35,6 +35,7 @@
#include "internal/vm.h"
#include "iseq.h"
#include "vm_core.h"
+#include "ractor.h"
#define MAX_POSBUF 128
@@ -1092,12 +1093,13 @@ const char *ruby_fill_thread_id_string(rb_nativethread_id_t thid, rb_thread_id_s
void
rb_vmdebug_stack_dump_all_threads(void)
{
- rb_vm_t *vm = GET_VM();
rb_thread_t *th = NULL;
+ rb_ractor_t *r = GET_RACTOR();
- list_for_each(&vm->living_threads, th, vmlt_node) {
+ // TODO: now it only shows current ractor
+ list_for_each(&r->threads.set, th, lt_node) {
#ifdef NON_SCALAR_THREAD_ID
- rb_thread_id_string_t buf;
+ rb_thread_id_string_t buf;
ruby_fill_thread_id_string(th->thread_id, buf);
fprintf(stderr, "th: %p, native_id: %s\n", th, buf);
#else
diff --git a/vm_insnhelper.c b/vm_insnhelper.c
index 9dcd75c..428331a 100644
--- a/vm_insnhelper.c
+++ b/vm_insnhelper.c
@@ -985,7 +985,13 @@ vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, bool allow_
if (is_defined) {
return 1;
}
- else {
+ else {
+ if (UNLIKELY(!rb_ractor_main_p())) {
+ if (!rb_ractor_shareable_p(val)) {
+ rb_raise(rb_eNameError,
+ "can not access non-sharable objects in constant %"PRIsVALUE"::%s by non-main ractor.", rb_class_path(klass), rb_id2name(id));
+ }
+ }
return val;
}
}
@@ -1084,7 +1090,7 @@ vm_getivar(VALUE obj, ID id, IVC ic, const struct rb_callcache *cc, int is_attr)
else if (FL_TEST_RAW(obj, FL_EXIVAR)) {
struct gen_ivtbl *ivtbl;
- if (LIKELY(st_lookup(rb_ivar_generic_ivtbl(), (st_data_t)obj, (st_data_t *)&ivtbl)) &&
+ if (LIKELY(st_lookup(rb_ivar_generic_ivtbl(obj), (st_data_t)obj, (st_data_t *)&ivtbl)) &&
LIKELY(index < ivtbl->numiv)) {
val = ivtbl->ivptr[index];
}
@@ -1106,8 +1112,7 @@ vm_getivar(VALUE obj, ID id, IVC ic, const struct rb_callcache *cc, int is_attr)
}
else if (FL_TEST_RAW(obj, FL_EXIVAR)) {
struct gen_ivtbl *ivtbl;
-
- if (LIKELY(st_lookup(rb_ivar_generic_ivtbl(), (st_data_t)obj, (st_data_t *)&ivtbl))) {
+ if (LIKELY(st_lookup(rb_ivar_generic_ivtbl(obj), (st_data_t)obj, (st_data_t *)&ivtbl))) {
numiv = ivtbl->numiv;
ivptr = ivtbl->ivptr;
iv_index_tbl = RCLASS_IV_INDEX_TBL(rb_obj_class(obj));
@@ -1634,26 +1639,30 @@ vm_search_cc(VALUE klass, const struct rb_callinfo *ci)
MJIT_FUNC_EXPORTED void
rb_vm_search_method_slowpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
{
- const struct rb_callcache *cc = vm_search_cc(klass, cd->ci);
+ RB_VM_LOCK_ENTER();
+ {
+ const struct rb_callcache *cc = vm_search_cc(klass, cd->ci);
- VM_ASSERT(cc);
- VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
+ VM_ASSERT(cc);
+ VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
- if (! cd_owner) {
- cd->cc = cc;
- }
- else if (cc == &vm_empty_cc) {
- cd->cc = cc;
- }
- else {
- VM_ASSERT(vm_cc_markable(cc));
- RB_OBJ_WRITE(cd_owner, &cd->cc, cc);
- }
+ if (! cd_owner) {
+ cd->cc = cc;
+ }
+ else if (cc == &vm_empty_cc) {
+ cd->cc = cc;
+ }
+ else {
+ VM_ASSERT(vm_cc_markable(cc));
+ RB_OBJ_WRITE(cd_owner, &cd->cc, cc);
+ }
- VM_ASSERT(cc == vm_cc_empty() || cc->klass == klass);
- VM_ASSERT(cc == vm_cc_empty() || callable_method_entry_p(vm_cc_cme(cc)));
- VM_ASSERT(cc == vm_cc_empty() || !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)));
- VM_ASSERT(cc == vm_cc_empty() || vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci));
+ VM_ASSERT(cc == vm_cc_empty() || cc->klass == klass);
+ VM_ASSERT(cc == vm_cc_empty() || callable_method_entry_p(vm_cc_cme(cc)));
+ VM_ASSERT(cc == vm_cc_empty() || !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)));
+ VM_ASSERT(cc == vm_cc_empty() || vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci));
+ }
+ RB_VM_LOCK_LEAVE();
}
#endif
@@ -4297,7 +4306,8 @@ vm_opt_newarray_min(rb_num_t num, const VALUE *ptr)
static int
vm_ic_hit_p(IC ic, const VALUE *reg_ep)
{
- if (ic->ic_serial == GET_GLOBAL_CONSTANT_STATE()) {
+ if (ic->ic_serial == GET_GLOBAL_CONSTANT_STATE() &&
+ rb_ractor_main_p()) {
return (ic->ic_cref == NULL || // no need to check CREF
ic->ic_cref == vm_get_cref(reg_ep));
}
@@ -5023,6 +5033,7 @@ Init_vm_stack_canary(void)
{
/* This has to be called _after_ our PRNG is properly set up. */
int n = ruby_fill_random_bytes(&vm_stack_canary, sizeof vm_stack_canary, false);
+ vm_stack_canary |= 0x01; // valid VALUE (Fixnum)
vm_stack_canary_was_born = true;
VM_ASSERT(n == 0);
diff --git a/vm_insnhelper.h b/vm_insnhelper.h
index 53099b2..40c3225 100644
--- a/vm_insnhelper.h
+++ b/vm_insnhelper.h
@@ -93,7 +93,7 @@ enum vm_regan_acttype {
#define SET_SP(x) (VM_REG_SP = (COLLECT_USAGE_REGISTER_HELPER(SP, SET, (x))))
#define INC_SP(x) (VM_REG_SP += (COLLECT_USAGE_REGISTER_HELPER(SP, SET, (x))))
#define DEC_SP(x) (VM_REG_SP -= (COLLECT_USAGE_REGISTER_HELPER(SP, SET, (x))))
-#define SET_SV(x) (*GET_SP() = (x))
+#define SET_SV(x) (*GET_SP() = rb_ractor_confirm_belonging(x))
/* set current stack value as x */
/* instruction sequence C struct */
diff --git a/vm_sync.c b/vm_sync.c
new file mode 100644
index 0000000..e3104d1
--- /dev/null
+++ b/vm_sync.c
@@ -0,0 +1,250 @@
+#include "vm_core.h"
+#include "vm_sync.h"
+#include "ractor.h"
+#include "vm_debug.h"
+#include "gc.h"
+
+static bool vm_barrier_finish_p(rb_vm_t *vm);
+
+static bool
+vm_locked(rb_vm_t *vm)
+{
+ return vm->ractor.sync.lock_owner == GET_RACTOR();
+}
+
+#if VM_CHECK_MODE > 0
+void
+ASSERT_vm_locking(void)
+{
+ if (rb_multi_ractor_p()) {
+ rb_vm_t *vm = GET_VM();
+ VM_ASSERT(vm_locked(vm));
+ }
+}
+#endif
+
+#if VM_CHECK_MODE > 0
+void
+ASSERT_vm_unlocking(void)
+{
+ if (rb_multi_ractor_p()) {
+ rb_vm_t *vm = GET_VM();
+ VM_ASSERT(!vm_locked(vm));
+ }
+}
+#endif
+
+bool
+rb_vm_locked_p(void)
+{
+ return vm_locked(GET_VM());
+}
+
+static void
+vm_lock_enter(rb_vm_t *vm, bool locked, unsigned int *lev APPEND_LOCATION_ARGS)
+{
+ if (locked) {
+ ASSERT_vm_locking();
+ }
+ else {
+ rb_ractor_t *cr = GET_RACTOR();
+#if RACTOR_CHECK_MODE
+ // locking ractor and acquire VM lock will cause deadlock
+ VM_ASSERT(cr->locked_by != cr->self);
+#endif
+
+ // lock
+ rb_native_mutex_lock(&vm->ractor.sync.lock);
+ VM_ASSERT(vm->ractor.sync.lock_owner == NULL);
+ vm->ractor.sync.lock_owner = cr;
+
+ // barrier
+ while (vm->ractor.sync.barrier_waiting) {
+ unsigned int barrier_cnt = vm->ractor.sync.barrier_cnt;
+ rb_thread_t *th = GET_THREAD();
+ bool running;
+
+ RB_GC_SAVE_MACHINE_CONTEXT(th);
+
+ if (rb_ractor_status_p(cr, ractor_running)) {
+ rb_vm_ractor_blocking_cnt_inc(vm, cr, __FILE__, __LINE__);
+ running = true;
+ }
+ else {
+ running = false;
+ }
+ VM_ASSERT(rb_ractor_status_p(cr, ractor_blocking));
+
+ if (vm_barrier_finish_p(vm)) {
+ RUBY_DEBUG_LOG("wakeup barrier owner", 0);
+ rb_native_cond_signal(&vm->ractor.sync.barrier_cond);
+ }
+ else {
+ RUBY_DEBUG_LOG("wait for barrier finish", 0);
+ }
+
+ // wait for restart
+ while (barrier_cnt == vm->ractor.sync.barrier_cnt) {
+ vm->ractor.sync.lock_owner = NULL;
+ rb_native_cond_wait(&cr->barrier_wait_cond, &vm->ractor.sync.lock);
+ VM_ASSERT(vm->ractor.sync.lock_owner == NULL);
+ vm->ractor.sync.lock_owner = cr;
+ }
+
+ RUBY_DEBUG_LOG("barrier is released. Acquire vm_lock", 0);
+
+ if (running) {
+ rb_vm_ractor_blocking_cnt_dec(vm, cr, __FILE__, __LINE__);
+ }
+ }
+
+ VM_ASSERT(vm->ractor.sync.lock_rec == 0);
+ VM_ASSERT(vm->ractor.sync.lock_owner == cr);
+ }
+
+ vm->ractor.sync.lock_rec++;
+ *lev = vm->ractor.sync.lock_rec;
+
+ RUBY_DEBUG_LOG2(file, line, "rec:%u owner:%d", vm->ractor.sync.lock_rec, rb_ractor_id(vm->ractor.sync.lock_owner));
+}
+
+static void
+vm_lock_leave(rb_vm_t *vm, unsigned int *lev APPEND_LOCATION_ARGS)
+{
+ RUBY_DEBUG_LOG2(file, line, "rec:%u owner:%d", vm->ractor.sync.lock_rec, rb_ractor_id(vm->ractor.sync.lock_owner));
+
+ ASSERT_vm_locking();
+ VM_ASSERT(vm->ractor.sync.lock_rec > 0);
+ VM_ASSERT(vm->ractor.sync.lock_rec == *lev);
+
+ vm->ractor.sync.lock_rec--;
+
+ if (vm->ractor.sync.lock_rec == 0) {
+ vm->ractor.sync.lock_owner = NULL;
+ rb_native_mutex_unlock(&vm->ractor.sync.lock);
+ }
+}
+
+void
+rb_vm_lock_enter_body(unsigned int *lev APPEND_LOCATION_ARGS)
+{
+ rb_vm_t *vm = GET_VM();
+ vm_lock_enter(vm, vm_locked(vm), lev APPEND_LOCATION_PARAMS);
+}
+
+void
+rb_vm_lock_leave_body(unsigned int *lev APPEND_LOCATION_ARGS)
+{
+ vm_lock_leave(GET_VM(), lev APPEND_LOCATION_PARAMS);
+}
+
+void
+rb_vm_lock_body(LOCATION_ARGS)
+{
+ rb_vm_t *vm = GET_VM();
+ ASSERT_vm_unlocking();
+ vm_lock_enter(vm, false, &vm->ractor.sync.lock_rec APPEND_LOCATION_PARAMS);
+}
+
+void
+rb_vm_unlock_body(LOCATION_ARGS)
+{
+ rb_vm_t *vm = GET_VM();
+ ASSERT_vm_locking();
+ VM_ASSERT(vm->ractor.sync.lock_rec == 1);
+ vm_lock_leave(vm, &vm->ractor.sync.lock_rec APPEND_LOCATION_PARAMS);
+}
+
+static void
+vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec)
+{
+ ASSERT_vm_locking();
+ unsigned int lock_rec = vm->ractor.sync.lock_rec;
+ rb_ractor_t *cr = vm->ractor.sync.lock_owner;
+
+ vm->ractor.sync.lock_rec = 0;
+ vm->ractor.sync.lock_owner = NULL;
+ if (msec > 0) {
+ rb_native_cond_timedwait(cond, &vm->ractor.sync.lock, msec);
+ }
+ else {
+ rb_native_cond_wait(cond, &vm->ractor.sync.lock);
+ }
+ vm->ractor.sync.lock_rec = lock_rec;
+ vm->ractor.sync.lock_owner = cr;
+}
+
+void
+rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond)
+{
+ vm_cond_wait(vm, cond, 0);
+}
+
+void
+rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec)
+{
+ vm_cond_wait(vm, cond, msec);
+}
+
+static bool
+vm_barrier_finish_p(rb_vm_t *vm)
+{
+ RUBY_DEBUG_LOG("cnt:%u living:%u blocking:%u",
+ vm->ractor.sync.barrier_cnt,
+ vm->ractor.cnt,
+ vm->ractor.blocking_cnt);
+
+ VM_ASSERT(vm->ractor.blocking_cnt <= vm->ractor.cnt);
+ return vm->ractor.blocking_cnt == vm->ractor.cnt;
+}
+
+void
+rb_vm_barrier(void)
+{
+ if (!rb_multi_ractor_p()) {
+ // no other ractors
+ return;
+ }
+ else {
+ rb_vm_t *vm = GET_VM();
+ VM_ASSERT(vm->ractor.sync.barrier_waiting == false);
+ ASSERT_vm_locking();
+
+ rb_ractor_t *cr = vm->ractor.sync.lock_owner;
+ VM_ASSERT(cr == GET_RACTOR());
+ VM_ASSERT(rb_ractor_status_p(cr, ractor_running));
+
+ vm->ractor.sync.barrier_waiting = true;
+
+ RUBY_DEBUG_LOG("barrier start. cnt:%u living:%u blocking:%u",
+ vm->ractor.sync.barrier_cnt,
+ vm->ractor.cnt,
+ vm->ractor.blocking_cnt);
+
+ rb_vm_ractor_blocking_cnt_inc(vm, cr, __FILE__, __LINE__);
+
+ // send signal
+ rb_ractor_t *r;
+ list_for_each(&vm->ractor.set, r, vmlr_node) {
+ if (r != cr) {
+ rb_ractor_vm_barrier_interrupt_running_thread(r);
+ }
+ }
+
+ // wait
+ while (!vm_barrier_finish_p(vm)) {
+ rb_vm_cond_wait(vm, &vm->ractor.sync.barrier_cond);
+ }
+
+ RUBY_DEBUG_LOG("cnt:%u barrier success", vm->ractor.sync.barrier_cnt);
+
+ rb_vm_ractor_blocking_cnt_dec(vm, cr, __FILE__, __LINE__);
+
+ vm->ractor.sync.barrier_waiting = false;
+ vm->ractor.sync.barrier_cnt++;
+
+ list_for_each(&vm->ractor.set, r, vmlr_node) {
+ rb_native_cond_signal(&r->barrier_wait_cond);
+ }
+ }
+}
diff --git a/vm_sync.h b/vm_sync.h
new file mode 100644
index 0000000..e9cc274
--- /dev/null
+++ b/vm_sync.h
@@ -0,0 +1,96 @@
+
+#ifndef RUBY_VM_SYNC_H
+#define RUBY_VM_SYNC_H
+
+#include "vm_core.h"
+#include "vm_debug.h"
+
+#if USE_RUBY_DEBUG_LOG
+#define LOCATION_ARGS const char *file, int line
+#define LOCATION_PARAMS file, line
+#define APPEND_LOCATION_ARGS , const char *file, int line
+#define APPEND_LOCATION_PARAMS , file, line
+#else
+#define LOCATION_ARGS void
+#define LOCATION_PARAMS
+#define APPEND_LOCATION_ARGS
+#define APPEND_LOCATION_PARAMS
+#endif
+
+bool rb_vm_locked_p(void);
+void rb_vm_lock_body(LOCATION_ARGS);
+void rb_vm_unlock_body(LOCATION_ARGS);
+void rb_vm_lock_enter_body(unsigned int *lev APPEND_LOCATION_ARGS);
+void rb_vm_lock_leave_body(unsigned int *lev APPEND_LOCATION_ARGS);
+void rb_vm_barrier(void);
+void rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond);
+void rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec);
+
+extern bool ruby_multi_ractor;
+
+static inline bool
+rb_multi_ractor_p(void)
+{
+ if (LIKELY(!ruby_multi_ractor)) {
+ // 0 on boot time.
+ VM_ASSERT(GET_VM()->ractor.cnt <= 1);
+ return false;
+ }
+ else {
+ // multi-ractor mode can run ractor.cnt == 1
+ return true;
+ }
+}
+
+static inline void
+rb_vm_lock(const char *file, int line)
+{
+ if (rb_multi_ractor_p()) {
+ rb_vm_lock_body(LOCATION_PARAMS);
+ }
+}
+
+static inline void
+rb_vm_unlock(const char *file, int line)
+{
+ if (rb_multi_ractor_p()) {
+ rb_vm_unlock_body(LOCATION_PARAMS);
+ }
+}
+
+static inline void
+rb_vm_lock_enter(unsigned int *lev, const char *file, int line)
+{
+ if (rb_multi_ractor_p()) {
+ rb_vm_lock_enter_body(lev APPEND_LOCATION_PARAMS);
+ }
+}
+
+static inline void
+rb_vm_lock_leave(unsigned int *lev, const char *file, int line)
+{
+ if (rb_multi_ractor_p()) {
+ rb_vm_lock_leave_body(lev APPEND_LOCATION_PARAMS);
+ }
+}
+
+#define RB_VM_LOCKED_P() rb_vm_locked_p()
+
+#define RB_VM_LOCK() rb_vm_lock(__FILE__, __LINE__)
+#define RB_VM_UNLOCK() rb_vm_unlock(__FILE__, __LINE__)
+
+#define RB_VM_LOCK_ENTER_LEV(levp) rb_vm_lock_enter(levp, __FILE__, __LINE__);
+#define RB_VM_LOCK_LEAVE_LEV(levp) rb_vm_lock_leave(levp, __FILE__, __LINE__);
+
+#define RB_VM_LOCK_ENTER() { unsigned int _lev; RB_VM_LOCK_ENTER_LEV(&_lev);
+#define RB_VM_LOCK_LEAVE() RB_VM_LOCK_LEAVE_LEV(&_lev); }
+
+#if VM_CHECK_MODE > 0
+void ASSERT_vm_locking(void);
+void ASSERT_vm_unlocking(void);
+#else
+#define ASSERT_vm_locking()
+#define ASSERT_vm_unlocking()
+#endif
+
+#endif // RUBY_VM_SYNC_H
diff --git a/vm_trace.c b/vm_trace.c
index 7b39779..50cdf5f 100644
--- a/vm_trace.c
+++ b/vm_trace.c
@@ -1653,7 +1653,8 @@ rb_workqueue_register(unsigned flags, rb_postponed_job_func_t func, void *data)
list_add_tail(&vm->workqueue, &wq_job->jnode);
rb_nativethread_lock_unlock(&vm->workqueue_lock);
- RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(GET_EC());
+ // TODO: current implementation affects only main ractor
+ RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(rb_vm_main_ractor_ec(vm));
return TRUE;
}