summaryrefslogtreecommitdiff
path: root/thread_sync.c
diff options
context:
space:
mode:
authorSamuel Williams <samuel.williams@oriontransfer.co.nz>2020-11-08 16:17:04 +1300
committerSamuel Williams <samuel.williams@oriontransfer.co.nz>2020-11-08 20:40:52 +1300
commitc39984ec5cc6dc94eb0c0e4169e7160f71e89240 (patch)
tree188b0990b63aaa9ed19442d0f6073a6f206bb899 /thread_sync.c
parentf73135233b1437eb2cc6a5c8c732adca1dbd41f2 (diff)
Tidy up book keeping for `thread->keeping_mutexes`.
When a scheduler is present, it's entirely possible for `th->keeping_mutexes` to be updated while enumerating the waitq. Therefore it must be fetched only during the removal operation.
Notes
Notes: Merged: https://github.com/ruby/ruby/pull/3743
Diffstat (limited to 'thread_sync.c')
-rw-r--r--thread_sync.c53
1 files changed, 35 insertions, 18 deletions
diff --git a/thread_sync.c b/thread_sync.c
index ef8cb5b853..86387ac2e7 100644
--- a/thread_sync.c
+++ b/thread_sync.c
@@ -193,14 +193,35 @@ rb_mutex_locked_p(VALUE self)
}
static void
+thread_mutex_insert(rb_thread_t *thread, rb_mutex_t *mutex) {
+ if (thread->keeping_mutexes) {
+ mutex->next_mutex = thread->keeping_mutexes;
+ }
+
+ thread->keeping_mutexes = mutex;
+}
+
+static void
+thread_mutex_remove(rb_thread_t *thread, rb_mutex_t *mutex) {
+ rb_mutex_t **keeping_mutexes = &thread->keeping_mutexes;
+
+ while (*keeping_mutexes && *keeping_mutexes != mutex) {
+ // Move to the next mutex in the list:
+ keeping_mutexes = &(*keeping_mutexes)->next_mutex;
+ }
+
+ if (*keeping_mutexes) {
+ *keeping_mutexes = mutex->next_mutex;
+ mutex->next_mutex = NULL;
+ }
+}
+
+static void
mutex_locked(rb_thread_t *th, VALUE self)
{
rb_mutex_t *mutex = mutex_ptr(self);
- if (th->keeping_mutexes) {
- mutex->next_mutex = th->keeping_mutexes;
- }
- th->keeping_mutexes = mutex;
+ thread_mutex_insert(th, mutex);
}
/*
@@ -392,18 +413,17 @@ rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t *th, rb_fiber_t *fiber)
const char *err = NULL;
if (mutex->fiber == 0) {
- err = "Attempt to unlock a mutex which is not locked";
+ err = "Attempt to unlock a mutex which is not locked";
}
else if (mutex->fiber != fiber) {
- err = "Attempt to unlock a mutex which is locked by another thread/fiber";
+ err = "Attempt to unlock a mutex which is locked by another thread/fiber";
}
else {
- struct sync_waiter *cur = 0, *next;
- rb_mutex_t **th_mutex = &th->keeping_mutexes;
+ struct sync_waiter *cur = 0, *next;
- mutex->fiber = 0;
- list_for_each_safe(&mutex->waitq, cur, next, node) {
- list_del_init(&cur->node);
+ mutex->fiber = 0;
+ list_for_each_safe(&mutex->waitq, cur, next, node) {
+ list_del_init(&cur->node);
if (cur->th->scheduler != Qnil) {
rb_scheduler_unblock(cur->th->scheduler, cur->self, rb_fiberptr_self(cur->fiber));
@@ -422,13 +442,10 @@ rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t *th, rb_fiber_t *fiber)
continue;
}
}
- }
- found:
- while (*th_mutex != mutex) {
- th_mutex = &(*th_mutex)->next_mutex;
- }
- *th_mutex = mutex->next_mutex;
- mutex->next_mutex = NULL;
+ }
+
+ found:
+ thread_mutex_remove(th, mutex);
}
return err;