diff options
| author | Luke Gruber <luke.gruber@shopify.com> | 2025-12-12 14:47:43 -0500 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2025-12-12 14:47:43 -0500 |
| commit | 3add3db797c4216423fdaa4bef6e2ee3c7630303 (patch) | |
| tree | 449d300354e1aa957c0922cc1be9c6f25310d5ce | |
| parent | 5903ed7ba9ca60546aa0dd97e92b3d381b7918d3 (diff) | |
Fewer calls to `GET_EC()` and `GET_THREAD()` (#15506)
The changes are to `io.c` and `thread.c`.
I changed the API of 2 exported thread functions from `internal/thread.h` that
didn't look like they had any use in C extensions:
* rb_thread_wait_for_single_fd
* rb_thread_io_wait
I didn't change the following exported internal function because it's
used in C extensions:
* rb_thread_fd_select
I added a comment to note that this function, although internal, is used
in C extensions.
| -rw-r--r-- | include/ruby/fiber/scheduler.h | 14 | ||||
| -rw-r--r-- | include/ruby/internal/intern/select.h | 2 | ||||
| -rw-r--r-- | internal/thread.h | 4 | ||||
| -rw-r--r-- | io.c | 41 | ||||
| -rw-r--r-- | scheduler.c | 11 | ||||
| -rw-r--r-- | thread.c | 29 |
6 files changed, 64 insertions, 37 deletions
diff --git a/include/ruby/fiber/scheduler.h b/include/ruby/fiber/scheduler.h index 537a3a7bb2..4d764f68ae 100644 --- a/include/ruby/fiber/scheduler.h +++ b/include/ruby/fiber/scheduler.h @@ -27,6 +27,7 @@ RBIMPL_SYMBOL_EXPORT_BEGIN() #define RUBY_FIBER_SCHEDULER_VERSION 3 struct timeval; +struct rb_thread_struct; /** * Wrap a `ssize_t` and `int errno` into a single `VALUE`. This interface should @@ -118,7 +119,7 @@ VALUE rb_fiber_scheduler_current(void); /** * Identical to rb_fiber_scheduler_current(), except it queries for that of the - * passed thread instead of the implicit current one. + * passed thread value instead of the implicit current one. * * @param[in] thread Target thread. * @exception rb_eTypeError `thread` is not a thread. @@ -128,6 +129,17 @@ VALUE rb_fiber_scheduler_current(void); VALUE rb_fiber_scheduler_current_for_thread(VALUE thread); /** + * Identical to rb_fiber_scheduler_current_for_thread(), except it expects + * a threadptr instead of a thread value. + * + * @param[in] thread Target thread. + * @exception rb_eTypeError `thread` is not a thread. + * @retval RUBY_Qnil No scheduler is in effect in `thread`. + * @retval otherwise The scheduler that is in effect in `thread`. + */ +VALUE rb_fiber_scheduler_current_for_threadptr(struct rb_thread_struct *thread); + +/** * Converts the passed timeout to an expression that rb_fiber_scheduler_block() * etc. expects. * diff --git a/include/ruby/internal/intern/select.h b/include/ruby/internal/intern/select.h index 6ba84c6e63..ba75213618 100644 --- a/include/ruby/internal/intern/select.h +++ b/include/ruby/internal/intern/select.h @@ -72,6 +72,8 @@ struct timeval; * someone else, vastly varies among operating systems. You would better avoid * touching an fd from more than one threads. * + * NOTE: this function is used in native extensions, so change its API with care. + * * @internal * * Although any file descriptors are possible here, it makes completely no diff --git a/internal/thread.h b/internal/thread.h index 21efeeebc0..ea891b4372 100644 --- a/internal/thread.h +++ b/internal/thread.h @@ -56,8 +56,8 @@ VALUE rb_mutex_owned_p(VALUE self); VALUE rb_exec_recursive_outer_mid(VALUE (*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h, ID mid); void ruby_mn_threads_params(void); -int rb_thread_io_wait(struct rb_io *io, int events, struct timeval * timeout); -int rb_thread_wait_for_single_fd(int fd, int events, struct timeval * timeout); +int rb_thread_io_wait(struct rb_thread_struct *th, struct rb_io *io, int events, struct timeval * timeout); +int rb_thread_wait_for_single_fd(struct rb_thread_struct *th, int fd, int events, struct timeval * timeout); size_t rb_thread_io_close_interrupt(struct rb_io *); void rb_thread_io_close_wait(struct rb_io *); @@ -1291,7 +1291,8 @@ internal_writev_func(void *ptr) static ssize_t rb_io_read_memory(rb_io_t *fptr, void *buf, size_t count) { - VALUE scheduler = rb_fiber_scheduler_current(); + rb_thread_t *th = GET_THREAD(); + VALUE scheduler = rb_fiber_scheduler_current_for_threadptr(th); if (scheduler != Qnil) { VALUE result = rb_fiber_scheduler_io_read_memory(scheduler, fptr->self, buf, count, 0); @@ -1301,7 +1302,7 @@ rb_io_read_memory(rb_io_t *fptr, void *buf, size_t count) } struct io_internal_read_struct iis = { - .th = rb_thread_current(), + .th = th->self, .fptr = fptr, .nonblock = 0, .fd = fptr->fd, @@ -1324,7 +1325,8 @@ rb_io_read_memory(rb_io_t *fptr, void *buf, size_t count) static ssize_t rb_io_write_memory(rb_io_t *fptr, const void *buf, size_t count) { - VALUE scheduler = rb_fiber_scheduler_current(); + rb_thread_t *th = GET_THREAD(); + VALUE scheduler = rb_fiber_scheduler_current_for_threadptr(th); if (scheduler != Qnil) { VALUE result = rb_fiber_scheduler_io_write_memory(scheduler, fptr->self, buf, count, 0); @@ -1334,7 +1336,7 @@ rb_io_write_memory(rb_io_t *fptr, const void *buf, size_t count) } struct io_internal_write_struct iis = { - .th = rb_thread_current(), + .th = th->self, .fptr = fptr, .nonblock = 0, .fd = fptr->fd, @@ -1360,7 +1362,9 @@ rb_writev_internal(rb_io_t *fptr, const struct iovec *iov, int iovcnt) { if (!iovcnt) return 0; - VALUE scheduler = rb_fiber_scheduler_current(); + rb_thread_t *th = GET_THREAD(); + + VALUE scheduler = rb_fiber_scheduler_current_for_threadptr(th); if (scheduler != Qnil) { // This path assumes at least one `iov`: VALUE result = rb_fiber_scheduler_io_write_memory(scheduler, fptr->self, iov[0].iov_base, iov[0].iov_len, 0); @@ -1371,7 +1375,7 @@ rb_writev_internal(rb_io_t *fptr, const struct iovec *iov, int iovcnt) } struct io_internal_writev_struct iis = { - .th = rb_thread_current(), + .th = th->self, .fptr = fptr, .nonblock = 0, .fd = fptr->fd, @@ -1453,7 +1457,8 @@ io_fflush(rb_io_t *fptr) VALUE rb_io_wait(VALUE io, VALUE events, VALUE timeout) { - VALUE scheduler = rb_fiber_scheduler_current(); + rb_thread_t *th = GET_THREAD(); + VALUE scheduler = rb_fiber_scheduler_current_for_threadptr(th); if (scheduler != Qnil) { return rb_fiber_scheduler_io_wait(scheduler, io, events, timeout); @@ -1474,7 +1479,7 @@ rb_io_wait(VALUE io, VALUE events, VALUE timeout) tv = &tv_storage; } - int ready = rb_thread_io_wait(fptr, RB_NUM2INT(events), tv); + int ready = rb_thread_io_wait(th, fptr, RB_NUM2INT(events), tv); if (ready < 0) { rb_sys_fail(0); @@ -1498,17 +1503,15 @@ io_from_fd(int fd) } static int -io_wait_for_single_fd(int fd, int events, struct timeval *timeout) +io_wait_for_single_fd(int fd, int events, struct timeval *timeout, rb_thread_t *th, VALUE scheduler) { - VALUE scheduler = rb_fiber_scheduler_current(); - if (scheduler != Qnil) { return RTEST( rb_fiber_scheduler_io_wait(scheduler, io_from_fd(fd), RB_INT2NUM(events), rb_fiber_scheduler_make_timeout(timeout)) ); } - return rb_thread_wait_for_single_fd(fd, events, timeout); + return rb_thread_wait_for_single_fd(th, fd, events, timeout); } int @@ -1516,7 +1519,8 @@ rb_io_wait_readable(int f) { io_fd_check_closed(f); - VALUE scheduler = rb_fiber_scheduler_current(); + rb_thread_t *th = GET_THREAD(); + VALUE scheduler = rb_fiber_scheduler_current_for_threadptr(th); switch (errno) { case EINTR: @@ -1536,7 +1540,7 @@ rb_io_wait_readable(int f) ); } else { - io_wait_for_single_fd(f, RUBY_IO_READABLE, NULL); + io_wait_for_single_fd(f, RUBY_IO_READABLE, NULL, th, scheduler); } return TRUE; @@ -1550,7 +1554,8 @@ rb_io_wait_writable(int f) { io_fd_check_closed(f); - VALUE scheduler = rb_fiber_scheduler_current(); + rb_thread_t *th = GET_THREAD(); + VALUE scheduler = rb_fiber_scheduler_current_for_threadptr(th); switch (errno) { case EINTR: @@ -1579,7 +1584,7 @@ rb_io_wait_writable(int f) ); } else { - io_wait_for_single_fd(f, RUBY_IO_WRITABLE, NULL); + io_wait_for_single_fd(f, RUBY_IO_WRITABLE, NULL, th, scheduler); } return TRUE; @@ -1591,7 +1596,9 @@ rb_io_wait_writable(int f) int rb_wait_for_single_fd(int fd, int events, struct timeval *timeout) { - return io_wait_for_single_fd(fd, events, timeout); + rb_thread_t *th = GET_THREAD(); + VALUE scheduler = rb_fiber_scheduler_current_for_threadptr(th); + return io_wait_for_single_fd(fd, events, timeout, th, scheduler); } int diff --git a/scheduler.c b/scheduler.c index 63c22b55aa..592bdcd1ef 100644 --- a/scheduler.c +++ b/scheduler.c @@ -451,7 +451,7 @@ rb_fiber_scheduler_set(VALUE scheduler) } static VALUE -rb_fiber_scheduler_current_for_threadptr(rb_thread_t *thread) +fiber_scheduler_current_for_threadptr(rb_thread_t *thread) { RUBY_ASSERT(thread); @@ -467,13 +467,18 @@ VALUE rb_fiber_scheduler_current(void) { RUBY_ASSERT(ruby_thread_has_gvl_p()); - return rb_fiber_scheduler_current_for_threadptr(GET_THREAD()); + return fiber_scheduler_current_for_threadptr(GET_THREAD()); } // This function is allowed to be called without holding the GVL. VALUE rb_fiber_scheduler_current_for_thread(VALUE thread) { - return rb_fiber_scheduler_current_for_threadptr(rb_thread_ptr(thread)); + return fiber_scheduler_current_for_threadptr(rb_thread_ptr(thread)); +} + +VALUE rb_fiber_scheduler_current_for_threadptr(rb_thread_t *thread) +{ + return fiber_scheduler_current_for_threadptr(thread); } /* @@ -226,7 +226,7 @@ vm_check_ints_blocking(rb_execution_context_t *ec) // When a signal is received, we yield to the scheduler as soon as possible: if (result || RUBY_VM_INTERRUPTED(ec)) { - VALUE scheduler = rb_fiber_scheduler_current(); + VALUE scheduler = rb_fiber_scheduler_current_for_threadptr(th); if (scheduler != Qnil) { rb_fiber_scheduler_yield(scheduler); } @@ -1075,7 +1075,7 @@ thread_join_sleep(VALUE arg) } while (!thread_finished(target_th)) { - VALUE scheduler = rb_fiber_scheduler_current(); + VALUE scheduler = rb_fiber_scheduler_current_for_threadptr(th); if (!limit) { if (scheduler != Qnil) { @@ -1424,17 +1424,18 @@ rb_thread_sleep_deadly(void) static void rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end) { - VALUE scheduler = rb_fiber_scheduler_current(); + rb_thread_t *th = GET_THREAD(); + VALUE scheduler = rb_fiber_scheduler_current_for_threadptr(th); if (scheduler != Qnil) { rb_fiber_scheduler_block(scheduler, blocker, timeout); } else { RUBY_DEBUG_LOG("..."); if (end) { - sleep_hrtime_until(GET_THREAD(), end, SLEEP_SPURIOUS_CHECK); + sleep_hrtime_until(th, end, SLEEP_SPURIOUS_CHECK); } else { - sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE); + sleep_forever(th, SLEEP_DEADLOCKABLE); } } } @@ -4601,7 +4602,7 @@ wait_for_single_fd_blocking_region(rb_thread_t *th, struct pollfd *fds, nfds_t n * returns a mask of events */ static int -thread_io_wait(struct rb_io *io, int fd, int events, struct timeval *timeout) +thread_io_wait(rb_thread_t *th, struct rb_io *io, int fd, int events, struct timeval *timeout) { struct pollfd fds[1] = {{ .fd = fd, @@ -4614,8 +4615,8 @@ thread_io_wait(struct rb_io *io, int fd, int events, struct timeval *timeout) enum ruby_tag_type state; volatile int lerrno; - rb_execution_context_t *ec = GET_EC(); - rb_thread_t *th = rb_ec_thread_ptr(ec); + RUBY_ASSERT(th); + rb_execution_context_t *ec = th->ec; if (io) { blocking_operation.ec = ec; @@ -4749,7 +4750,7 @@ init_set_fd(int fd, rb_fdset_t *fds) } static int -thread_io_wait(struct rb_io *io, int fd, int events, struct timeval *timeout) +thread_io_wait(rb_thread_t *th, struct rb_io *io, int fd, int events, struct timeval *timeout) { rb_fdset_t rfds, wfds, efds; struct select_args args; @@ -4758,7 +4759,7 @@ thread_io_wait(struct rb_io *io, int fd, int events, struct timeval *timeout) struct rb_io_blocking_operation blocking_operation; if (io) { args.io = io; - blocking_operation.ec = GET_EC(); + blocking_operation.ec = th->ec; rb_io_blocking_operation_enter(io, &blocking_operation); args.blocking_operation = &blocking_operation; } @@ -4783,15 +4784,15 @@ thread_io_wait(struct rb_io *io, int fd, int events, struct timeval *timeout) #endif /* ! USE_POLL */ int -rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout) +rb_thread_wait_for_single_fd(rb_thread_t *th, int fd, int events, struct timeval *timeout) { - return thread_io_wait(NULL, fd, events, timeout); + return thread_io_wait(th, NULL, fd, events, timeout); } int -rb_thread_io_wait(struct rb_io *io, int events, struct timeval * timeout) +rb_thread_io_wait(rb_thread_t *th, struct rb_io *io, int events, struct timeval * timeout) { - return thread_io_wait(io, io->fd, events, timeout); + return thread_io_wait(th, io, io->fd, events, timeout); } /* |
