From 054a412d540e7ed2de63d68da753f585ea6616c3 Mon Sep 17 00:00:00 2001 From: normal Date: Wed, 27 Jun 2018 03:14:30 +0000 Subject: hijack SIGCHLD handler for internal use Use a global SIGCHLD handler to guard all callers of rb_waitpid. To work safely with multi-threaded programs, we introduce a VM-wide waitpid_lock to be acquired BEFORE fork/vfork spawns the process. This is to be combined with the new ruby_waitpid_locked function used by mjit.c in a non-Ruby thread. Ruby-level SIGCHLD handlers registered with Signal.trap(:CHLD) continues to work as before and there should be no regressions in any existing use cases. Splitting the wait queues for PID > 0 and groups (PID <= 0) ensures we favor PID > 0 callers. The disabling of SIGCHLD in rb_f_system is longer necessary, as we use deferred signal handling and no longer make ANY blocking waitpid syscalls in other threads which could "beat" the waitpid call made by rb_f_system. We prevent SIGCHLD from firing in normal Ruby Threads and only enable it in the timer-thread, to prevent spurious wakeups from in test/-ext-/gvl/test_last_thread.rb with MJIT enabled. I've tried to guard as much of the code for RUBY_SIGCHLD==0 using C "if" statements rather than CPP "#if" so to reduce the likelyhood of portability problems as the compiler will see more code. We also work to suppress false-positives from Process.wait(-1, Process::WNOHANG) to quiets warnings from spec/ruby/core/process/wait2_spec.rb with MJIT enabled. Lastly, we must implement rb_grantpt for ext/pty. We need a MJIT-compatible way of supporting grantpt(3) which may spawn the `pt_chown' binary and call waitpid(2) on it. [ruby-core:87605] [Ruby trunk Bug#14867] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63758 b2dd03c8-39d4-4d8f-98ff-823fe69b080e --- thread.c | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) (limited to 'thread.c') diff --git a/thread.c b/thread.c index 5a5e32379d..c58d3e0b9d 100644 --- a/thread.c +++ b/thread.c @@ -413,6 +413,10 @@ rb_vm_gvl_destroy(rb_vm_t *vm) gvl_release(vm); gvl_destroy(vm); rb_native_mutex_destroy(&vm->thread_destruct_lock); + if (0) { + /* may be held by running threads */ + rb_native_mutex_destroy(&vm->waitpid_lock); + } } void @@ -4131,6 +4135,9 @@ rb_gc_set_stack_end(VALUE **stack_end_p) #endif +/* signal.c */ +void ruby_sigchld_handler(rb_vm_t *); + /* * */ @@ -4163,6 +4170,7 @@ timer_thread_function(void *arg) rb_native_mutex_unlock(&vm->thread_destruct_lock); /* check signal */ + ruby_sigchld_handler(vm); rb_threadptr_check_signal(vm->main_thread); #if 0 @@ -4247,6 +4255,9 @@ rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const r } rb_vm_living_threads_init(vm); rb_vm_living_threads_insert(vm, th); + + /* may be held by MJIT threads in parent */ + rb_native_mutex_initialize(&vm->waitpid_lock); vm->fork_gen++; vm->sleeper = 0; @@ -4999,6 +5010,7 @@ Init_Thread(void) gvl_init(th->vm); gvl_acquire(th->vm, th); rb_native_mutex_initialize(&th->vm->thread_destruct_lock); + rb_native_mutex_initialize(&th->vm->waitpid_lock); rb_native_mutex_initialize(&th->interrupt_lock); th->pending_interrupt_queue = rb_ary_tmp_new(0); @@ -5302,3 +5314,25 @@ rb_uninterruptible(VALUE (*b_proc)(ANYARGS), VALUE data) return rb_ensure(b_proc, data, rb_ary_pop, cur_th->pending_interrupt_mask_stack); } + +#ifndef USE_NATIVE_SLEEP_COND +# define USE_NATIVE_SLEEP_COND (0) +#endif + +#if !USE_NATIVE_SLEEP_COND +rb_nativethread_cond_t * +rb_sleep_cond_get(const rb_execution_context_t *ec) +{ + rb_nativethread_cond_t *cond = ALLOC(rb_nativethread_cond_t); + rb_native_cond_initialize(cond); + + return cond; +} + +void +rb_sleep_cond_put(rb_nativethread_cond_t *cond) +{ + rb_native_cond_destroy(cond); + xfree(cond); +} +#endif /* !USE_NATIVE_SLEEP_COND */ -- cgit v1.2.3