summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authornagachika <nagachika@ruby-lang.org>2025-11-02 14:50:06 +0900
committernagachika <nagachika@ruby-lang.org>2025-11-02 14:50:06 +0900
commit609f957ebede1a062b1f34515382e4c306f77444 (patch)
tree837167a2cdfbc04df2497ba864f23b76dd763f5d
parent780c2b9853219b56a18c2114ffb43801780b346a (diff)
merge revision(s) ffc69eec0a5746d48ef3cf649639c67631a6a609, 0531fa4d6fea100f69f0bac9e03973fe49ecd570: [Backport #21560]
[PATCH] `struct rb_thread_sched_waiting` Introduce `struct rb_thread_sched_waiting` and `timer_th.waiting` can contain other than `rb_thread_t`. [PATCH] mn timer thread: force wakeups for timeouts
-rw-r--r--thread_pthread.c56
-rw-r--r--thread_pthread.h50
-rw-r--r--thread_pthread_mn.c44
-rw-r--r--version.h2
4 files changed, 88 insertions, 64 deletions
diff --git a/thread_pthread.c b/thread_pthread.c
index 305cbdbec1..1d26ce3d10 100644
--- a/thread_pthread.c
+++ b/thread_pthread.c
@@ -2821,6 +2821,17 @@ static void timer_thread_wakeup_thread(rb_thread_t *th);
#include "thread_pthread_mn.c"
+static rb_thread_t *
+thread_sched_waiting_thread(struct rb_thread_sched_waiting *w)
+{
+ if (w) {
+ return (rb_thread_t *)((size_t)w - offsetof(rb_thread_t, sched.waiting_reason));
+ }
+ else {
+ return NULL;
+ }
+}
+
static int
timer_thread_set_timeout(rb_vm_t *vm)
{
@@ -2850,22 +2861,29 @@ timer_thread_set_timeout(rb_vm_t *vm)
}
ractor_sched_unlock(vm, NULL);
- if (vm->ractor.sched.timeslice_wait_inf) {
- rb_native_mutex_lock(&timer_th.waiting_lock);
- {
- rb_thread_t *th = ccan_list_top(&timer_th.waiting, rb_thread_t, sched.waiting_reason.node);
- if (th && (th->sched.waiting_reason.flags & thread_sched_waiting_timeout)) {
- rb_hrtime_t now = rb_hrtime_now();
- rb_hrtime_t hrrel = rb_hrtime_sub(th->sched.waiting_reason.data.timeout, now);
+ // Always check waiting threads to find minimum timeout
+ // even when scheduler has work (grq_cnt > 0)
+ rb_native_mutex_lock(&timer_th.waiting_lock);
+ {
+ struct rb_thread_sched_waiting *w = ccan_list_top(&timer_th.waiting, struct rb_thread_sched_waiting, node);
+ rb_thread_t *th = thread_sched_waiting_thread(w);
- RUBY_DEBUG_LOG("th:%u now:%lu rel:%lu", rb_th_serial(th), (unsigned long)now, (unsigned long)hrrel);
+ if (th && (th->sched.waiting_reason.flags & thread_sched_waiting_timeout)) {
+ rb_hrtime_t now = rb_hrtime_now();
+ rb_hrtime_t hrrel = rb_hrtime_sub(th->sched.waiting_reason.data.timeout, now);
- // TODO: overflow?
- timeout = (int)((hrrel + RB_HRTIME_PER_MSEC - 1) / RB_HRTIME_PER_MSEC); // ms
+ RUBY_DEBUG_LOG("th:%u now:%lu rel:%lu", rb_th_serial(th), (unsigned long)now, (unsigned long)hrrel);
+
+ // TODO: overflow?
+ int thread_timeout = (int)((hrrel + RB_HRTIME_PER_MSEC - 1) / RB_HRTIME_PER_MSEC); // ms
+
+ // Use minimum of scheduler timeout and thread sleep timeout
+ if (timeout < 0 || thread_timeout < timeout) {
+ timeout = thread_timeout;
}
}
- rb_native_mutex_unlock(&timer_th.waiting_lock);
}
+ rb_native_mutex_unlock(&timer_th.waiting_lock);
RUBY_DEBUG_LOG("timeout:%d inf:%d", timeout, (int)vm->ractor.sched.timeslice_wait_inf);
@@ -2903,22 +2921,22 @@ timer_thread_check_exceed(rb_hrtime_t abs, rb_hrtime_t now)
static rb_thread_t *
timer_thread_deq_wakeup(rb_vm_t *vm, rb_hrtime_t now)
{
- rb_thread_t *th = ccan_list_top(&timer_th.waiting, rb_thread_t, sched.waiting_reason.node);
+ struct rb_thread_sched_waiting *w = ccan_list_top(&timer_th.waiting, struct rb_thread_sched_waiting, node);
- if (th != NULL &&
- (th->sched.waiting_reason.flags & thread_sched_waiting_timeout) &&
- timer_thread_check_exceed(th->sched.waiting_reason.data.timeout, now)) {
+ if (w != NULL &&
+ (w->flags & thread_sched_waiting_timeout) &&
+ timer_thread_check_exceed(w->data.timeout, now)) {
RUBY_DEBUG_LOG("wakeup th:%u", rb_th_serial(th));
// delete from waiting list
- ccan_list_del_init(&th->sched.waiting_reason.node);
+ ccan_list_del_init(&w->node);
// setup result
- th->sched.waiting_reason.flags = thread_sched_waiting_none;
- th->sched.waiting_reason.data.result = 0;
+ w->flags = thread_sched_waiting_none;
+ w->data.result = 0;
- return th;
+ return thread_sched_waiting_thread(w);
}
return NULL;
diff --git a/thread_pthread.h b/thread_pthread.h
index a10e788950..26a88ca933 100644
--- a/thread_pthread.h
+++ b/thread_pthread.h
@@ -17,6 +17,31 @@
#define RB_NATIVETHREAD_LOCK_INIT PTHREAD_MUTEX_INITIALIZER
#define RB_NATIVETHREAD_COND_INIT PTHREAD_COND_INITIALIZER
+// this data should be protected by timer_th.waiting_lock
+struct rb_thread_sched_waiting {
+ enum thread_sched_waiting_flag {
+ thread_sched_waiting_none = 0x00,
+ thread_sched_waiting_timeout = 0x01,
+ thread_sched_waiting_io_read = 0x02,
+ thread_sched_waiting_io_write = 0x08,
+ thread_sched_waiting_io_force = 0x40, // ignore readable
+ } flags;
+
+ struct {
+ // should be compat with hrtime.h
+#ifdef MY_RUBY_BUILD_MAY_TIME_TRAVEL
+ int128_t timeout;
+#else
+ uint64_t timeout;
+#endif
+ int fd; // -1 for timeout only
+ int result;
+ } data;
+
+ // connected to timer_th.waiting
+ struct ccan_list_node node;
+};
+
// per-Thead scheduler helper data
struct rb_thread_sched_item {
struct {
@@ -38,30 +63,7 @@ struct rb_thread_sched_item {
struct ccan_list_node zombie_threads;
} node;
- // this data should be protected by timer_th.waiting_lock
- struct {
- enum thread_sched_waiting_flag {
- thread_sched_waiting_none = 0x00,
- thread_sched_waiting_timeout = 0x01,
- thread_sched_waiting_io_read = 0x02,
- thread_sched_waiting_io_write = 0x08,
- thread_sched_waiting_io_force = 0x40, // ignore readable
- } flags;
-
- struct {
- // should be compat with hrtime.h
-#ifdef MY_RUBY_BUILD_MAY_TIME_TRAVEL
- int128_t timeout;
-#else
- uint64_t timeout;
-#endif
- int fd; // -1 for timeout only
- int result;
- } data;
-
- // connected to timer_th.waiting
- struct ccan_list_node node;
- } waiting_reason;
+ struct rb_thread_sched_waiting waiting_reason;
bool finished;
bool malloc_stack;
diff --git a/thread_pthread_mn.c b/thread_pthread_mn.c
index c8c7d9f173..a70167ca29 100644
--- a/thread_pthread_mn.c
+++ b/thread_pthread_mn.c
@@ -533,15 +533,18 @@ static void
verify_waiting_list(void)
{
#if VM_CHECK_MODE > 0
- rb_thread_t *wth, *prev_wth = NULL;
- ccan_list_for_each(&timer_th.waiting, wth, sched.waiting_reason.node) {
+ struct rb_thread_sched_waiting *w, *prev_w = NULL;
+
+ // waiting list's timeout order should be [1, 2, 3, ..., 0, 0, 0]
+
+ ccan_list_for_each(&timer_th.waiting, w, node) {
// fprintf(stderr, "verify_waiting_list th:%u abs:%lu\n", rb_th_serial(wth), (unsigned long)wth->sched.waiting_reason.data.timeout);
- if (prev_wth) {
- rb_hrtime_t timeout = wth->sched.waiting_reason.data.timeout;
- rb_hrtime_t prev_timeout = prev_wth->sched.waiting_reason.data.timeout;
+ if (prev_w) {
+ rb_hrtime_t timeout = w->data.timeout;
+ rb_hrtime_t prev_timeout = w->data.timeout;
VM_ASSERT(timeout == 0 || prev_timeout <= timeout);
}
- prev_wth = wth;
+ prev_w = w;
}
#endif
}
@@ -619,16 +622,17 @@ kqueue_unregister_waiting(int fd, enum thread_sched_waiting_flag flags)
static bool
kqueue_already_registered(int fd)
{
- rb_thread_t *wth, *found_wth = NULL;
- ccan_list_for_each(&timer_th.waiting, wth, sched.waiting_reason.node) {
+ struct rb_thread_sched_waiting *w, *found_w = NULL;
+
+ ccan_list_for_each(&timer_th.waiting, w, node) {
// Similar to EEXIST in epoll_ctl, but more strict because it checks fd rather than flags
// for simplicity
- if (wth->sched.waiting_reason.flags && wth->sched.waiting_reason.data.fd == fd) {
- found_wth = wth;
+ if (w->flags && w->data.fd == fd) {
+ found_w = w;
break;
}
}
- return found_wth != NULL;
+ return found_w != NULL;
}
#endif // HAVE_SYS_EVENT_H
@@ -773,20 +777,20 @@ timer_thread_register_waiting(rb_thread_t *th, int fd, enum thread_sched_waiting
VM_ASSERT(flags & thread_sched_waiting_timeout);
// insert th to sorted list (TODO: O(n))
- rb_thread_t *wth, *prev_wth = NULL;
+ struct rb_thread_sched_waiting *w, *prev_w = NULL;
- ccan_list_for_each(&timer_th.waiting, wth, sched.waiting_reason.node) {
- if ((wth->sched.waiting_reason.flags & thread_sched_waiting_timeout) &&
- wth->sched.waiting_reason.data.timeout < abs) {
- prev_wth = wth;
+ ccan_list_for_each(&timer_th.waiting, w, node) {
+ if ((w->flags & thread_sched_waiting_timeout) &&
+ w->data.timeout < abs) {
+ prev_w = w;
}
else {
break;
}
}
- if (prev_wth) {
- ccan_list_add_after(&timer_th.waiting, &prev_wth->sched.waiting_reason.node, &th->sched.waiting_reason.node);
+ if (prev_w) {
+ ccan_list_add_after(&timer_th.waiting, &prev_w->node, &th->sched.waiting_reason.node);
}
else {
ccan_list_add(&timer_th.waiting, &th->sched.waiting_reason.node);
@@ -794,8 +798,8 @@ timer_thread_register_waiting(rb_thread_t *th, int fd, enum thread_sched_waiting
verify_waiting_list();
- // update timeout seconds
- timer_thread_wakeup();
+ // update timeout seconds; force wake so timer thread notices short deadlines
+ timer_thread_wakeup_force();
}
}
else {
diff --git a/version.h b/version.h
index 346e87c202..388a15b796 100644
--- a/version.h
+++ b/version.h
@@ -11,7 +11,7 @@
# define RUBY_VERSION_MINOR RUBY_API_VERSION_MINOR
#define RUBY_VERSION_TEENY 10
#define RUBY_RELEASE_DATE RUBY_RELEASE_YEAR_STR"-"RUBY_RELEASE_MONTH_STR"-"RUBY_RELEASE_DAY_STR
-#define RUBY_PATCHLEVEL 185
+#define RUBY_PATCHLEVEL 186
#include "ruby/version.h"
#include "ruby/internal/abi.h"