diff options
author | JP Camara <jp@jpcamara.com> | 2023-12-06 20:01:14 -0500 |
---|---|---|
committer | Koichi Sasada <ko1@atdot.net> | 2023-12-20 16:23:38 +0900 |
commit | 8782e02138e6fe18b6c0dcc29bb877d6cdae57e5 (patch) | |
tree | b77e899dc4cd1ac14c496bc0fdcf034261451667 /thread.c | |
parent | 7ef90b3978dad057ad6360a94d2d64e8ca5e9c38 (diff) |
KQueue support for M:N threads
* Allows macOS users to use M:N threads (and technically FreeBSD, though it has not been verified on FreeBSD)
* Include sys/event.h header check for macros, and include sys/event.h when present
* Rename epoll_fd to more generic kq_fd (Kernel event Queue) for use by both epoll and kqueue
* MAP_STACK is not available on macOS so conditionall apply it to mmap flags
* Set fd to close on exec
* Log debug messages specific to kqueue and epoll on creation
* close_invalidate raises an error for the kqueue fd on child process fork. It's unclear rn if that's a bug, or if it's kqueue specific behavior
Use kq with rb_thread_wait_for_single_fd
* Only platforms with `USE_POLL` (linux) had changes applied to take advantage of kernel event queues. It needed to be applied to the `select` so that kqueue could be properly applied
* Clean up kqueue specific code and make sure only flags that were actually set are removed (or an error is raised)
* Also handle kevent specific errnos, since most don't apply from epoll to kqueue
* Use the more platform standard close-on-exec approach of `fcntl` and `FD_CLOEXEC`. The io-event gem uses `ioctl`, but fcntl seems to be the recommended choice. It is also what Go, Bun, and Libuv use
* We're making changes in this file anyways - may as well fix a couple spelling mistakes while here
Make sure FD_CLOEXEC carries over in dup
* Otherwise the kqueue descriptor should have FD_CLOEXEC, but doesn't and fails in assert_close_on_exec
Diffstat (limited to 'thread.c')
-rw-r--r-- | thread.c | 44 |
1 files changed, 32 insertions, 12 deletions
@@ -4265,6 +4265,27 @@ rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * return (int)rb_ensure(do_select, (VALUE)&set, select_set_free, (VALUE)&set); } +#ifdef RUBY_THREAD_PTHREAD_H + +static bool +thread_sched_wait_events_timeval(int fd, int events, struct timeval *timeout) +{ + rb_thread_t *th = GET_THREAD(); + rb_hrtime_t rel, *prel; + + if (timeout) { + rel = rb_timeval2hrtime(timeout); + prel = &rel; + } + else { + prel = NULL; + } + + return thread_sched_wait_events(TH_SCHED(th), th, fd, waitfd_to_waiting_flag(events), prel); +} + +#endif + #ifdef USE_POLL /* The same with linux kernel. TODO: make platform independent definition. */ @@ -4294,18 +4315,8 @@ rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout) wfd.busy = NULL; #ifdef RUBY_THREAD_PTHREAD_H - if (!th->nt->dedicated) { - rb_hrtime_t rel, *prel; - - if (timeout) { - rel = rb_timeval2hrtime(timeout); - prel = &rel; - } - else { - prel = NULL; - } - - if (thread_sched_wait_events(TH_SCHED(th), th, fd, waitfd_to_waiting_flag(events), prel)) { + if (!th_has_dedicated_nt(th)) { + if (thread_sched_wait_events_timeval(fd, events, timeout)) { return 0; // timeout } } @@ -4445,6 +4456,15 @@ rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout) int r; VALUE ptr = (VALUE)&args; +#ifdef RUBY_THREAD_PTHREAD_H + rb_thread_t *th = GET_THREAD(); + if (!th_has_dedicated_nt(th)) { + if (thread_sched_wait_events_timeval(fd, events, timeout)) { + return 0; // timeout + } + } +#endif + args.as.fd = fd; args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL; args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL; |