summaryrefslogtreecommitdiff
path: root/thread_pthread.c
diff options
context:
space:
mode:
Diffstat (limited to 'thread_pthread.c')
-rw-r--r--thread_pthread.c90
1 files changed, 74 insertions, 16 deletions
diff --git a/thread_pthread.c b/thread_pthread.c
index 82b5e362cc..6c8ec35671 100644
--- a/thread_pthread.c
+++ b/thread_pthread.c
@@ -592,7 +592,7 @@ thread_sched_setup_running_threads(struct rb_thread_sched *sched, rb_ractor_t *c
}
if (add_th) {
- if (UNLIKELY(vm->ractor.sched.barrier_waiting)) {
+ while (UNLIKELY(vm->ractor.sched.barrier_waiting)) {
RUBY_DEBUG_LOG("barrier-wait");
ractor_sched_barrier_join_signal_locked(vm);
@@ -605,6 +605,7 @@ thread_sched_setup_running_threads(struct rb_thread_sched *sched, rb_ractor_t *c
ccan_list_add(&vm->ractor.sched.running_threads, &add_th->sched.node.running_threads);
vm->ractor.sched.running_cnt++;
sched->is_running = true;
+ VM_ASSERT(!vm->ractor.sched.barrier_waiting);
}
if (add_timeslice_th) {
@@ -1320,7 +1321,7 @@ rb_ractor_sched_sleep(rb_execution_context_t *ec, rb_ractor_t *cr, rb_unblock_fu
{
// ractor lock of cr is acquired
// r is sleeping statuss
- rb_thread_t *th = rb_ec_thread_ptr(ec);
+ rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
struct rb_thread_sched *sched = TH_SCHED(th);
cr->sync.wait.waiting_thread = th; // TODO: multi-thread
@@ -1555,6 +1556,7 @@ thread_sched_atfork(struct rb_thread_sched *sched)
}
vm->ractor.sched.running_cnt = 0;
+ rb_native_mutex_initialize(&vm->ractor.sched.lock);
// rb_native_cond_destroy(&vm->ractor.sched.cond);
rb_native_cond_initialize(&vm->ractor.sched.cond);
rb_native_cond_initialize(&vm->ractor.sched.barrier_complete_cond);
@@ -2569,10 +2571,7 @@ ubf_wakeup_thread(rb_thread_t *th)
{
RUBY_DEBUG_LOG("th:%u thread_id:%p", rb_th_serial(th), (void *)th->nt->thread_id);
- int r = pthread_kill(th->nt->thread_id, SIGVTALRM);
- if (r != 0) {
- rb_bug_errno("pthread_kill", r);
- }
+ pthread_kill(th->nt->thread_id, SIGVTALRM);
}
static void
@@ -2875,6 +2874,17 @@ static void timer_thread_wakeup_thread(rb_thread_t *th);
#include "thread_pthread_mn.c"
+static rb_thread_t *
+thread_sched_waiting_thread(struct rb_thread_sched_waiting *w)
+{
+ if (w) {
+ return (rb_thread_t *)((size_t)w - offsetof(rb_thread_t, sched.waiting_reason));
+ }
+ else {
+ return NULL;
+ }
+}
+
static int
timer_thread_set_timeout(rb_vm_t *vm)
{
@@ -2907,7 +2917,9 @@ timer_thread_set_timeout(rb_vm_t *vm)
if (vm->ractor.sched.timeslice_wait_inf) {
rb_native_mutex_lock(&timer_th.waiting_lock);
{
- rb_thread_t *th = ccan_list_top(&timer_th.waiting, rb_thread_t, sched.waiting_reason.node);
+ struct rb_thread_sched_waiting *w = ccan_list_top(&timer_th.waiting, struct rb_thread_sched_waiting, node);
+ rb_thread_t *th = thread_sched_waiting_thread(w);
+
if (th && (th->sched.waiting_reason.flags & thread_sched_waiting_timeout)) {
rb_hrtime_t now = rb_hrtime_now();
rb_hrtime_t hrrel = rb_hrtime_sub(th->sched.waiting_reason.data.timeout, now);
@@ -2957,22 +2969,22 @@ timer_thread_check_exceed(rb_hrtime_t abs, rb_hrtime_t now)
static rb_thread_t *
timer_thread_deq_wakeup(rb_vm_t *vm, rb_hrtime_t now)
{
- rb_thread_t *th = ccan_list_top(&timer_th.waiting, rb_thread_t, sched.waiting_reason.node);
+ struct rb_thread_sched_waiting *w = ccan_list_top(&timer_th.waiting, struct rb_thread_sched_waiting, node);
- if (th != NULL &&
- (th->sched.waiting_reason.flags & thread_sched_waiting_timeout) &&
- timer_thread_check_exceed(th->sched.waiting_reason.data.timeout, now)) {
+ if (w != NULL &&
+ (w->flags & thread_sched_waiting_timeout) &&
+ timer_thread_check_exceed(w->data.timeout, now)) {
- RUBY_DEBUG_LOG("wakeup th:%u", rb_th_serial(th));
+ RUBY_DEBUG_LOG("wakeup th:%u", rb_th_serial(thread_sched_waiting_thread(w)));
// delete from waiting list
- ccan_list_del_init(&th->sched.waiting_reason.node);
+ ccan_list_del_init(&w->node);
// setup result
- th->sched.waiting_reason.flags = thread_sched_waiting_none;
- th->sched.waiting_reason.data.result = 0;
+ w->flags = thread_sched_waiting_none;
+ w->data.result = 0;
- return th;
+ return thread_sched_waiting_thread(w);
}
return NULL;
@@ -3334,6 +3346,52 @@ native_sleep(rb_thread_t *th, rb_hrtime_t *rel)
RUBY_DEBUG_LOG("wakeup");
}
+// fork read-write lock (only for pthread)
+static pthread_rwlock_t rb_thread_fork_rw_lock = PTHREAD_RWLOCK_INITIALIZER;
+
+void
+rb_thread_release_fork_lock(void)
+{
+ int r;
+ if ((r = pthread_rwlock_unlock(&rb_thread_fork_rw_lock))) {
+ rb_bug_errno("pthread_rwlock_unlock", r);
+ }
+}
+
+void
+rb_thread_reset_fork_lock(void)
+{
+ int r;
+ if ((r = pthread_rwlock_destroy(&rb_thread_fork_rw_lock))) {
+ rb_bug_errno("pthread_rwlock_destroy", r);
+ }
+
+ if ((r = pthread_rwlock_init(&rb_thread_fork_rw_lock, NULL))) {
+ rb_bug_errno("pthread_rwlock_init", r);
+ }
+}
+
+void *
+rb_thread_prevent_fork(void *(*func)(void *), void *data)
+{
+ int r;
+ if ((r = pthread_rwlock_rdlock(&rb_thread_fork_rw_lock))) {
+ rb_bug_errno("pthread_rwlock_rdlock", r);
+ }
+ void *result = func(data);
+ rb_thread_release_fork_lock();
+ return result;
+}
+
+void
+rb_thread_acquire_fork_lock(void)
+{
+ int r;
+ if ((r = pthread_rwlock_wrlock(&rb_thread_fork_rw_lock))) {
+ rb_bug_errno("pthread_rwlock_wrlock", r);
+ }
+}
+
// thread internal event hooks (only for pthread)
struct rb_internal_thread_event_hook {