summaryrefslogtreecommitdiff
path: root/ruby_atomic.h
diff options
context:
space:
mode:
authorJohn Hawthorn <john@hawthorn.email>2025-11-05 12:27:26 -0800
committerJohn Hawthorn <john@hawthorn.email>2025-11-20 14:06:33 -0800
commitff1d23eccba3ab37e77bf2d2222cad9d6f99a0ab (patch)
tree46ca6947b1297af3264c831353fdd7e5d98d86e1 /ruby_atomic.h
parentd1b11592af75a5eee9199951a0c330eb8caa2825 (diff)
Use a serial to keep track of Mutex-owning Fiber
Previously this held a pointer to the Fiber itself, which requires marking it (which was only implemented recently, prior to that it was buggy). Using a monotonically increasing integer instead allows us to avoid having a free function and keeps everything simpler. My main motivations in making this change are that the root fiber lazily allocates self, which makes the writebarrier implementation challenging to do correctly, and wanting to avoid sending Mutexes to the remembered set when locked by a short-lived Fiber.
Diffstat (limited to 'ruby_atomic.h')
-rw-r--r--ruby_atomic.h23
1 files changed, 23 insertions, 0 deletions
diff --git a/ruby_atomic.h b/ruby_atomic.h
index ad53356f06..9eaa5a9651 100644
--- a/ruby_atomic.h
+++ b/ruby_atomic.h
@@ -63,4 +63,27 @@ rbimpl_atomic_u64_set_relaxed(volatile rbimpl_atomic_uint64_t *address, uint64_t
}
#define ATOMIC_U64_SET_RELAXED(var, val) rbimpl_atomic_u64_set_relaxed(&(var), val)
+static inline uint64_t
+rbimpl_atomic_u64_fetch_add(volatile rbimpl_atomic_uint64_t *ptr, uint64_t val)
+{
+#if defined(HAVE_GCC_ATOMIC_BUILTINS_64)
+ return __atomic_fetch_add(ptr, val, __ATOMIC_SEQ_CST);
+#elif defined(_WIN32)
+ return InterlockedExchangeAdd64((volatile LONG64 *)ptr, val);
+#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
+ return atomic_add_64_nv(ptr, val) - val;
+#elif defined(HAVE_STDATOMIC_H)
+ return atomic_fetch_add_explicit((_Atomic uint64_t *)ptr, val, memory_order_seq_cst);
+#else
+ // Fallback using mutex for platforms without 64-bit atomics
+ static rb_native_mutex_t lock = RB_NATIVE_MUTEX_INITIALIZER;
+ rb_native_mutex_lock(&lock);
+ uint64_t old = *ptr;
+ *ptr = old + val;
+ rb_native_mutex_unlock(&lock);
+ return old;
+#endif
+}
+#define ATOMIC_U64_FETCH_ADD(var, val) rbimpl_atomic_u64_fetch_add(&(var), val)
+
#endif