summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--test/ruby/test_thread.rb28
-rw-r--r--thread_pthread.c12
-rw-r--r--thread_pthread_mn.c9
3 files changed, 39 insertions, 10 deletions
diff --git a/test/ruby/test_thread.rb b/test/ruby/test_thread.rb
index c08d41cb86..e4d6e1ad62 100644
--- a/test/ruby/test_thread.rb
+++ b/test/ruby/test_thread.rb
@@ -1649,4 +1649,32 @@ q.pop
end
end;
end
+
+ # [Bug #21926]
+ def test_thread_join_during_finalizers
+ assert_separately([], "#{<<~"begin;"}\n#{<<~'end;'}", timeout: 30)
+ begin;
+ require 'open3'
+
+ class ProcessWrapper
+ def initialize
+ @stdin, @stdout, @stderr, @wait_thread = Open3.popen3("cat") # hangs until we close our stdin side
+ ObjectSpace.define_finalizer(self, self.class.make_finalizer(@stdin, @stdout, @stderr, @wait_thread))
+ end
+
+ def self.make_finalizer(stdin, stdout, stderr, wait_thread)
+ proc do
+ stdin.close rescue nil
+ stdout.close rescue nil
+ stderr.close rescue nil
+ wait_thread.value
+ end
+ end
+ end
+
+ 50.times { ProcessWrapper.new }
+ GC.stress = true
+ 1000.times { Object.new }
+ end;
+ end
end
diff --git a/thread_pthread.c b/thread_pthread.c
index 9c7754067b..df2940d5cf 100644
--- a/thread_pthread.c
+++ b/thread_pthread.c
@@ -1099,23 +1099,21 @@ thread_sched_to_waiting_until_wakeup(struct rb_thread_sched *sched, rb_thread_t
RB_VM_SAVE_MACHINE_CONTEXT(th);
- if (ubf_set(th, ubf_waiting, (void *)th)) {
- return;
- }
RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
thread_sched_lock(sched, th);
{
- if (!RUBY_VM_INTERRUPTED(th->ec)) {
+ // NOTE: there's a lock ordering inversion here with the ubf call, but it's benign.
+ if (ubf_set(th, ubf_waiting, (void *)th)) {
+ RUBY_DEBUG_LOG("th:%u interrupted", rb_th_serial(th));
+ }
+ else {
bool can_direct_transfer = !th_has_dedicated_nt(th);
// NOTE: th->status is set before and after this sleep outside of this function in `sleep_forever`
thread_sched_wakeup_next_thread(sched, th, can_direct_transfer);
thread_sched_wait_running_turn(sched, th, can_direct_transfer);
}
- else {
- RUBY_DEBUG_LOG("th:%u interrupted", rb_th_serial(th));
- }
}
thread_sched_unlock(sched, th);
diff --git a/thread_pthread_mn.c b/thread_pthread_mn.c
index 9651baf51f..0d774c67b5 100644
--- a/thread_pthread_mn.c
+++ b/thread_pthread_mn.c
@@ -67,12 +67,15 @@ thread_sched_wait_events(struct rb_thread_sched *sched, rb_thread_t *th, int fd,
uint32_t event_serial = ++th->sched.event_serial; // overflow is okay
- if (ubf_set(th, ubf_event_waiting, (void *)th)) {
- return false;
- }
thread_sched_lock(sched, th);
{
+ // NOTE: there's a lock ordering inversion here with the ubf call, but it's benign.
+ if (ubf_set(th, ubf_event_waiting, (void *)th)) {
+ thread_sched_unlock(sched, th);
+ return false;
+ }
+
if (timer_thread_register_waiting(th, fd, events, rel, event_serial)) {
RUBY_DEBUG_LOG("wait fd:%d", fd);