summaryrefslogtreecommitdiff
path: root/include/ruby/atomic.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/ruby/atomic.h')
-rw-r--r--include/ruby/atomic.h503
1 files changed, 379 insertions, 124 deletions
diff --git a/include/ruby/atomic.h b/include/ruby/atomic.h
index 3eb80fbf7d..fcc48f532c 100644
--- a/include/ruby/atomic.h
+++ b/include/ruby/atomic.h
@@ -34,7 +34,7 @@
# include <sys/types.h> /* ssize_t */
#endif
-#if RBIMPL_COMPILER_SINCE(MSVC, 13, 0, 0)
+#if RBIMPL_COMPILER_IS(MSVC)
# pragma intrinsic(_InterlockedOr)
#elif defined(__sun) && defined(HAVE_ATOMIC_H)
# include <atomic.h>
@@ -72,13 +72,40 @@ typedef unsigned int rb_atomic_t;
#elif defined(HAVE_GCC_SYNC_BUILTINS)
typedef unsigned int rb_atomic_t;
#elif defined(_WIN32)
+# include <winsock2.h> // to prevent macro redefinitions
+# include <windows.h> // for `LONG` and `Interlocked` functions
typedef LONG rb_atomic_t;
#elif defined(__sun) && defined(HAVE_ATOMIC_H)
typedef unsigned int rb_atomic_t;
+#elif defined(HAVE_STDATOMIC_H)
+# include <stdatomic.h>
+typedef unsigned int rb_atomic_t;
#else
# error No atomic operation found
#endif
+/* Memory ordering constants */
+#if defined(HAVE_GCC_ATOMIC_BUILTINS)
+# define RBIMPL_ATOMIC_RELAXED __ATOMIC_RELAXED
+# define RBIMPL_ATOMIC_ACQUIRE __ATOMIC_ACQUIRE
+# define RBIMPL_ATOMIC_RELEASE __ATOMIC_RELEASE
+# define RBIMPL_ATOMIC_ACQ_REL __ATOMIC_ACQ_REL
+# define RBIMPL_ATOMIC_SEQ_CST __ATOMIC_SEQ_CST
+#elif defined(HAVE_STDATOMIC_H)
+# define RBIMPL_ATOMIC_RELAXED memory_order_relaxed
+# define RBIMPL_ATOMIC_ACQUIRE memory_order_acquire
+# define RBIMPL_ATOMIC_RELEASE memory_order_release
+# define RBIMPL_ATOMIC_ACQ_REL memory_order_acq_rel
+# define RBIMPL_ATOMIC_SEQ_CST memory_order_seq_cst
+#else
+/* Dummy values for unsupported platforms */
+# define RBIMPL_ATOMIC_RELAXED 0
+# define RBIMPL_ATOMIC_ACQUIRE 1
+# define RBIMPL_ATOMIC_RELEASE 2
+# define RBIMPL_ATOMIC_ACQ_REL 3
+# define RBIMPL_ATOMIC_SEQ_CST 4
+#endif
+
/**
* Atomically replaces the value pointed by `var` with the result of addition
* of `val` to the old value of `var`.
@@ -88,7 +115,7 @@ typedef unsigned int rb_atomic_t;
* @return What was stored in `var` before the addition.
* @post `var` holds `var + val`.
*/
-#define RUBY_ATOMIC_FETCH_ADD(var, val) rbimpl_atomic_fetch_add(&(var), (val))
+#define RUBY_ATOMIC_FETCH_ADD(var, val) rbimpl_atomic_fetch_add(&(var), (val), RBIMPL_ATOMIC_SEQ_CST)
/**
* Atomically replaces the value pointed by `var` with the result of
@@ -99,7 +126,7 @@ typedef unsigned int rb_atomic_t;
* @return What was stored in `var` before the subtraction.
* @post `var` holds `var - val`.
*/
-#define RUBY_ATOMIC_FETCH_SUB(var, val) rbimpl_atomic_fetch_sub(&(var), (val))
+#define RUBY_ATOMIC_FETCH_SUB(var, val) rbimpl_atomic_fetch_sub(&(var), (val), RBIMPL_ATOMIC_SEQ_CST)
/**
* Atomically replaces the value pointed by `var` with the result of
@@ -111,7 +138,7 @@ typedef unsigned int rb_atomic_t;
* @post `var` holds `var | val`.
* @note For portability, this macro can return void.
*/
-#define RUBY_ATOMIC_OR(var, val) rbimpl_atomic_or(&(var), (val))
+#define RUBY_ATOMIC_OR(var, val) rbimpl_atomic_or(&(var), (val), RBIMPL_ATOMIC_SEQ_CST)
/**
* Atomically replaces the value pointed by `var` with `val`. This is just an
@@ -122,7 +149,7 @@ typedef unsigned int rb_atomic_t;
* @return What was stored in `var` before the assignment.
* @post `var` holds `val`.
*/
-#define RUBY_ATOMIC_EXCHANGE(var, val) rbimpl_atomic_exchange(&(var), (val))
+#define RUBY_ATOMIC_EXCHANGE(var, val) rbimpl_atomic_exchange(&(var), (val), RBIMPL_ATOMIC_SEQ_CST)
/**
* Atomic compare-and-swap. This stores `val` to `var` if and only if the
@@ -136,7 +163,16 @@ typedef unsigned int rb_atomic_t;
* @retval otherwise Something else is at `var`; not updated.
*/
#define RUBY_ATOMIC_CAS(var, oldval, newval) \
- rbimpl_atomic_cas(&(var), (oldval), (newval))
+ rbimpl_atomic_cas(&(var), (oldval), (newval), RBIMPL_ATOMIC_SEQ_CST, RBIMPL_ATOMIC_SEQ_CST)
+
+/**
+ * Atomic load. This loads `var` with an atomic intrinsic and returns
+ * its value.
+ *
+ * @param var A variable of ::rb_atomic_t
+ * @return What was stored in `var`j
+ */
+#define RUBY_ATOMIC_LOAD(var) rbimpl_atomic_load(&(var), RBIMPL_ATOMIC_SEQ_CST)
/**
* Identical to #RUBY_ATOMIC_EXCHANGE, except for the return type.
@@ -146,7 +182,7 @@ typedef unsigned int rb_atomic_t;
* @return void
* @post `var` holds `val`.
*/
-#define RUBY_ATOMIC_SET(var, val) rbimpl_atomic_set(&(var), (val))
+#define RUBY_ATOMIC_SET(var, val) rbimpl_atomic_store(&(var), (val), RBIMPL_ATOMIC_SEQ_CST)
/**
* Identical to #RUBY_ATOMIC_FETCH_ADD, except for the return type.
@@ -156,7 +192,7 @@ typedef unsigned int rb_atomic_t;
* @return void
* @post `var` holds `var + val`.
*/
-#define RUBY_ATOMIC_ADD(var, val) rbimpl_atomic_add(&(var), (val))
+#define RUBY_ATOMIC_ADD(var, val) rbimpl_atomic_add(&(var), (val), RBIMPL_ATOMIC_SEQ_CST)
/**
* Identical to #RUBY_ATOMIC_FETCH_SUB, except for the return type.
@@ -166,7 +202,7 @@ typedef unsigned int rb_atomic_t;
* @return void
* @post `var` holds `var - val`.
*/
-#define RUBY_ATOMIC_SUB(var, val) rbimpl_atomic_sub(&(var), (val))
+#define RUBY_ATOMIC_SUB(var, val) rbimpl_atomic_sub(&(var), (val), RBIMPL_ATOMIC_SEQ_CST)
/**
* Atomically increments the value pointed by `var`.
@@ -175,7 +211,7 @@ typedef unsigned int rb_atomic_t;
* @return void
* @post `var` holds `var + 1`.
*/
-#define RUBY_ATOMIC_INC(var) rbimpl_atomic_inc(&(var))
+#define RUBY_ATOMIC_INC(var) rbimpl_atomic_inc(&(var), RBIMPL_ATOMIC_SEQ_CST)
/**
* Atomically decrements the value pointed by `var`.
@@ -184,7 +220,19 @@ typedef unsigned int rb_atomic_t;
* @return void
* @post `var` holds `var - 1`.
*/
-#define RUBY_ATOMIC_DEC(var) rbimpl_atomic_dec(&(var))
+#define RUBY_ATOMIC_DEC(var) rbimpl_atomic_dec(&(var), RBIMPL_ATOMIC_SEQ_CST)
+
+/**
+ * Identical to #RUBY_ATOMIC_FETCH_ADD, except it expects its arguments to be `size_t`.
+ * There are cases where ::rb_atomic_t is 32bit while `size_t` is 64bit. This
+ * should be used for size related operations to support such platforms.
+ *
+ * @param var A variable of `size_t`.
+ * @param val Value to add.
+ * @return What was stored in `var` before the addition.
+ * @post `var` holds `var + val`.
+ */
+#define RUBY_ATOMIC_SIZE_FETCH_ADD(var, val) rbimpl_atomic_size_fetch_add(&(var), (val), RBIMPL_ATOMIC_SEQ_CST)
/**
* Identical to #RUBY_ATOMIC_INC, except it expects its argument is `size_t`.
@@ -195,7 +243,7 @@ typedef unsigned int rb_atomic_t;
* @return void
* @post `var` holds `var + 1`.
*/
-#define RUBY_ATOMIC_SIZE_INC(var) rbimpl_atomic_size_inc(&(var))
+#define RUBY_ATOMIC_SIZE_INC(var) rbimpl_atomic_size_inc(&(var), RBIMPL_ATOMIC_SEQ_CST)
/**
* Identical to #RUBY_ATOMIC_DEC, except it expects its argument is `size_t`.
@@ -206,7 +254,7 @@ typedef unsigned int rb_atomic_t;
* @return void
* @post `var` holds `var - 1`.
*/
-#define RUBY_ATOMIC_SIZE_DEC(var) rbimpl_atomic_size_dec(&(var))
+#define RUBY_ATOMIC_SIZE_DEC(var) rbimpl_atomic_size_dec(&(var), RBIMPL_ATOMIC_SEQ_CST)
/**
* Identical to #RUBY_ATOMIC_EXCHANGE, except it expects its arguments are
@@ -220,7 +268,7 @@ typedef unsigned int rb_atomic_t;
* @post `var` holds `val`.
*/
#define RUBY_ATOMIC_SIZE_EXCHANGE(var, val) \
- rbimpl_atomic_size_exchange(&(var), (val))
+ rbimpl_atomic_size_exchange(&(var), (val), RBIMPL_ATOMIC_SEQ_CST)
/**
* Identical to #RUBY_ATOMIC_CAS, except it expects its arguments are `size_t`.
@@ -234,7 +282,7 @@ typedef unsigned int rb_atomic_t;
* @retval otherwise Something else is at `var`; not updated.
*/
#define RUBY_ATOMIC_SIZE_CAS(var, oldval, newval) \
- rbimpl_atomic_size_cas(&(var), (oldval), (newval))
+ rbimpl_atomic_size_cas(&(var), (oldval), (newval), RBIMPL_ATOMIC_SEQ_CST, RBIMPL_ATOMIC_SEQ_CST)
/**
* Identical to #RUBY_ATOMIC_ADD, except it expects its arguments are `size_t`.
@@ -246,7 +294,7 @@ typedef unsigned int rb_atomic_t;
* @return void
* @post `var` holds `var + val`.
*/
-#define RUBY_ATOMIC_SIZE_ADD(var, val) rbimpl_atomic_size_add(&(var), (val))
+#define RUBY_ATOMIC_SIZE_ADD(var, val) rbimpl_atomic_size_add(&(var), (val), RBIMPL_ATOMIC_SEQ_CST)
/**
* Identical to #RUBY_ATOMIC_SUB, except it expects its arguments are `size_t`.
@@ -258,7 +306,7 @@ typedef unsigned int rb_atomic_t;
* @return void
* @post `var` holds `var - val`.
*/
-#define RUBY_ATOMIC_SIZE_SUB(var, val) rbimpl_atomic_size_sub(&(var), (val))
+#define RUBY_ATOMIC_SIZE_SUB(var, val) rbimpl_atomic_size_sub(&(var), (val), RBIMPL_ATOMIC_SEQ_CST)
/**
* Identical to #RUBY_ATOMIC_EXCHANGE, except it expects its arguments are
@@ -277,7 +325,31 @@ typedef unsigned int rb_atomic_t;
* some pointers, most notably function pointers.
*/
#define RUBY_ATOMIC_PTR_EXCHANGE(var, val) \
- RBIMPL_CAST(rbimpl_atomic_ptr_exchange((void **)&(var), (void *)val))
+ RBIMPL_CAST(rbimpl_atomic_ptr_exchange((void **)&(var), (void *)val, RBIMPL_ATOMIC_SEQ_CST))
+
+/**
+ * Identical to #RUBY_ATOMIC_LOAD, except it expects its arguments are `void*`.
+ * There are cases where ::rb_atomic_t is 32bit while `void*` is 64bit. This
+ * should be used for size related operations to support such platforms.
+ *
+ * @param var A variable of `void*`
+ * @return The value of `var` (without tearing)
+ */
+#define RUBY_ATOMIC_PTR_LOAD(var) \
+ RBIMPL_CAST(rbimpl_atomic_ptr_load((void **)&var, RBIMPL_ATOMIC_SEQ_CST))
+
+/**
+* Identical to #RUBY_ATOMIC_SET, except it expects its arguments are
+* `void*`. There are cases where ::rb_atomic_t is 32bit while ::VALUE is
+* 64bit. This should be used for pointer related operations to support such
+* platforms.
+*
+* @param var A variable of `void*`.
+* @param val Value to set.
+* @post `var` holds `val`.
+*/
+#define RUBY_ATOMIC_PTR_SET(var, val) \
+ rbimpl_atomic_ptr_store((volatile void **)&(var), (val), RBIMPL_ATOMIC_SEQ_CST)
/**
* Identical to #RUBY_ATOMIC_CAS, except it expects its arguments are `void*`.
@@ -291,7 +363,20 @@ typedef unsigned int rb_atomic_t;
* @retval otherwise Something else is at `var`; not updated.
*/
#define RUBY_ATOMIC_PTR_CAS(var, oldval, newval) \
- RBIMPL_CAST(rbimpl_atomic_ptr_cas((void **)&(var), (oldval), (newval)))
+ RBIMPL_CAST(rbimpl_atomic_ptr_cas((void **)&(var), (void *)(oldval), (void *)(newval), RBIMPL_ATOMIC_SEQ_CST, RBIMPL_ATOMIC_SEQ_CST))
+
+/**
+ * Identical to #RUBY_ATOMIC_SET, except it expects its arguments are
+ * ::VALUE. There are cases where ::rb_atomic_t is 32bit while ::VALUE is
+ * 64bit. This should be used for pointer related operations to support such
+ * platforms.
+ *
+ * @param var A variable of ::VALUE.
+ * @param val Value to set.
+ * @post `var` holds `val`.
+ */
+#define RUBY_ATOMIC_VALUE_SET(var, val) \
+ rbimpl_atomic_value_store(&(var), (val), RBIMPL_ATOMIC_SEQ_CST)
/**
* Identical to #RUBY_ATOMIC_EXCHANGE, except it expects its arguments are
@@ -305,7 +390,7 @@ typedef unsigned int rb_atomic_t;
* @post `var` holds `val`.
*/
#define RUBY_ATOMIC_VALUE_EXCHANGE(var, val) \
- rbimpl_atomic_value_exchange(&(var), (val))
+ rbimpl_atomic_value_exchange(&(var), (val), RBIMPL_ATOMIC_SEQ_CST)
/**
* Identical to #RUBY_ATOMIC_CAS, except it expects its arguments are ::VALUE.
@@ -319,19 +404,20 @@ typedef unsigned int rb_atomic_t;
* @retval otherwise Something else is at `var`; not updated.
*/
#define RUBY_ATOMIC_VALUE_CAS(var, oldval, newval) \
- rbimpl_atomic_value_cas(&(var), (oldval), (newval))
+ rbimpl_atomic_value_cas(&(var), (oldval), (newval), RBIMPL_ATOMIC_SEQ_CST, RBIMPL_ATOMIC_SEQ_CST)
/** @cond INTERNAL_MACRO */
RBIMPL_ATTR_ARTIFICIAL()
RBIMPL_ATTR_NOALIAS()
RBIMPL_ATTR_NONNULL((1))
static inline rb_atomic_t
-rbimpl_atomic_fetch_add(volatile rb_atomic_t *ptr, rb_atomic_t val)
+rbimpl_atomic_fetch_add(volatile rb_atomic_t *ptr, rb_atomic_t val, int memory_order)
{
+ (void)memory_order;
#if 0
#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
- return __atomic_fetch_add(ptr, val, __ATOMIC_SEQ_CST);
+ return __atomic_fetch_add(ptr, val, memory_order);
#elif defined(HAVE_GCC_SYNC_BUILTINS)
return __sync_fetch_and_add(ptr, val);
@@ -348,6 +434,47 @@ rbimpl_atomic_fetch_add(volatile rb_atomic_t *ptr, rb_atomic_t val)
RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX);
return atomic_add_int_nv(ptr, val) - val;
+#elif defined(HAVE_STDATOMIC_H)
+ return atomic_fetch_add_explicit((_Atomic volatile rb_atomic_t *)ptr, val, memory_order);
+
+#else
+# error Unsupported platform.
+#endif
+}
+
+/** @cond INTERNAL_MACRO */
+RBIMPL_ATTR_ARTIFICIAL()
+RBIMPL_ATTR_NOALIAS()
+RBIMPL_ATTR_NONNULL((1))
+static inline size_t
+rbimpl_atomic_size_fetch_add(volatile size_t *ptr, size_t val, int memory_order)
+{
+ (void)memory_order;
+#if 0
+
+#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
+ return __atomic_fetch_add(ptr, val, memory_order);
+
+#elif defined(HAVE_GCC_SYNC_BUILTINS)
+ return __sync_fetch_and_add(ptr, val);
+
+#elif defined(_WIN32)
+ return InterlockedExchangeAdd64(ptr, val);
+
+#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
+ /* Ditto for `atomic_add_int_nv`. */
+ RBIMPL_ASSERT_OR_ASSUME(val <= LONG_MAX);
+ atomic_add_long(ptr, val);
+
+#elif defined(__sun) && defined(HAVE_ATOMIC_H)
+ RBIMPL_STATIC_ASSERT(size_of_rb_atomic_t, sizeof *ptr == sizeof(rb_atomic_t));
+
+ volatile rb_atomic_t *const tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
+ rbimpl_atomic_fetch_add(tmp, val, memory_order);
+
+#elif defined(HAVE_STDATOMIC_H)
+ return atomic_fetch_add_explicit((_Atomic volatile size_t *)ptr, val, memory_order);
+
#else
# error Unsupported platform.
#endif
@@ -357,8 +484,9 @@ RBIMPL_ATTR_ARTIFICIAL()
RBIMPL_ATTR_NOALIAS()
RBIMPL_ATTR_NONNULL((1))
static inline void
-rbimpl_atomic_add(volatile rb_atomic_t *ptr, rb_atomic_t val)
+rbimpl_atomic_add(volatile rb_atomic_t *ptr, rb_atomic_t val, int memory_order)
{
+ (void)memory_order;
#if 0
#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
@@ -367,7 +495,7 @@ rbimpl_atomic_add(volatile rb_atomic_t *ptr, rb_atomic_t val)
* return value is not used, then compiles it into single `LOCK ADD`
* instruction.
*/
- __atomic_add_fetch(ptr, val, __ATOMIC_SEQ_CST);
+ __atomic_add_fetch(ptr, val, memory_order);
#elif defined(HAVE_GCC_SYNC_BUILTINS)
__sync_add_and_fetch(ptr, val);
@@ -385,6 +513,9 @@ rbimpl_atomic_add(volatile rb_atomic_t *ptr, rb_atomic_t val)
RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX);
atomic_add_int(ptr, val);
+#elif defined(HAVE_STDATOMIC_H)
+ atomic_fetch_add_explicit((_Atomic volatile rb_atomic_t *)ptr, val, memory_order);
+
#else
# error Unsupported platform.
#endif
@@ -394,17 +525,18 @@ RBIMPL_ATTR_ARTIFICIAL()
RBIMPL_ATTR_NOALIAS()
RBIMPL_ATTR_NONNULL((1))
static inline void
-rbimpl_atomic_size_add(volatile size_t *ptr, size_t val)
+rbimpl_atomic_size_add(volatile size_t *ptr, size_t val, int memory_order)
{
+ (void)memory_order;
#if 0
#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
- __atomic_add_fetch(ptr, val, __ATOMIC_SEQ_CST);
+ __atomic_add_fetch(ptr, val, memory_order);
#elif defined(HAVE_GCC_SYNC_BUILTINS)
__sync_add_and_fetch(ptr, val);
-#elif defined(_WIN32) && defined(_M_AMD64)
+#elif defined(_WIN64)
/* Ditto for `InterlockeExchangedAdd`. */
InterlockedExchangeAdd64(ptr, val);
@@ -413,12 +545,17 @@ rbimpl_atomic_size_add(volatile size_t *ptr, size_t val)
RBIMPL_ASSERT_OR_ASSUME(val <= LONG_MAX);
atomic_add_long(ptr, val);
-#else
+#elif defined(_WIN32) || (defined(__sun) && defined(HAVE_ATOMIC_H))
RBIMPL_STATIC_ASSERT(size_of_rb_atomic_t, sizeof *ptr == sizeof(rb_atomic_t));
volatile rb_atomic_t *const tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
- rbimpl_atomic_add(tmp, val);
+ rbimpl_atomic_add(tmp, val, memory_order);
+#elif defined(HAVE_STDATOMIC_H)
+ atomic_fetch_add_explicit((_Atomic volatile size_t *)ptr, val, memory_order);
+
+#else
+# error Unsupported platform.
#endif
}
@@ -426,12 +563,13 @@ RBIMPL_ATTR_ARTIFICIAL()
RBIMPL_ATTR_NOALIAS()
RBIMPL_ATTR_NONNULL((1))
static inline void
-rbimpl_atomic_inc(volatile rb_atomic_t *ptr)
+rbimpl_atomic_inc(volatile rb_atomic_t *ptr, int memory_order)
{
+ (void)memory_order;
#if 0
#elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
- rbimpl_atomic_add(ptr, 1);
+ rbimpl_atomic_add(ptr, 1, memory_order);
#elif defined(_WIN32)
InterlockedIncrement(ptr);
@@ -439,9 +577,11 @@ rbimpl_atomic_inc(volatile rb_atomic_t *ptr)
#elif defined(__sun) && defined(HAVE_ATOMIC_H)
atomic_inc_uint(ptr);
-#else
- rbimpl_atomic_add(ptr, 1);
+#elif defined(HAVE_STDATOMIC_H)
+ rbimpl_atomic_add(ptr, 1, memory_order);
+#else
+# error Unsupported platform.
#endif
}
@@ -449,22 +589,30 @@ RBIMPL_ATTR_ARTIFICIAL()
RBIMPL_ATTR_NOALIAS()
RBIMPL_ATTR_NONNULL((1))
static inline void
-rbimpl_atomic_size_inc(volatile size_t *ptr)
+rbimpl_atomic_size_inc(volatile size_t *ptr, int memory_order)
{
+ (void)memory_order;
#if 0
#elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
- rbimpl_atomic_size_add(ptr, 1);
+ rbimpl_atomic_size_add(ptr, 1, memory_order);
-#elif defined(_WIN32) && defined(_M_AMD64)
+#elif defined(_WIN64)
InterlockedIncrement64(ptr);
#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
atomic_inc_ulong(ptr);
-#else
- rbimpl_atomic_size_add(ptr, 1);
+#elif defined(_WIN32) || (defined(__sun) && defined(HAVE_ATOMIC_H))
+ RBIMPL_STATIC_ASSERT(size_of_size_t, sizeof *ptr == sizeof(rb_atomic_t));
+
+ rbimpl_atomic_size_add(ptr, 1, memory_order);
+#elif defined(HAVE_STDATOMIC_H)
+ rbimpl_atomic_size_add(ptr, 1, memory_order);
+
+#else
+# error Unsupported platform.
#endif
}
@@ -472,12 +620,13 @@ RBIMPL_ATTR_ARTIFICIAL()
RBIMPL_ATTR_NOALIAS()
RBIMPL_ATTR_NONNULL((1))
static inline rb_atomic_t
-rbimpl_atomic_fetch_sub(volatile rb_atomic_t *ptr, rb_atomic_t val)
+rbimpl_atomic_fetch_sub(volatile rb_atomic_t *ptr, rb_atomic_t val, int memory_order)
{
+ (void)memory_order;
#if 0
#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
- return __atomic_fetch_sub(ptr, val, __ATOMIC_SEQ_CST);
+ return __atomic_fetch_sub(ptr, val, memory_order);
#elif defined(HAVE_GCC_SYNC_BUILTINS)
return __sync_fetch_and_sub(ptr, val);
@@ -492,6 +641,9 @@ rbimpl_atomic_fetch_sub(volatile rb_atomic_t *ptr, rb_atomic_t val)
RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX);
return atomic_add_int_nv(ptr, neg * val) + val;
+#elif defined(HAVE_STDATOMIC_H)
+ return atomic_fetch_sub_explicit((_Atomic volatile rb_atomic_t *)ptr, val, memory_order);
+
#else
# error Unsupported platform.
#endif
@@ -501,12 +653,13 @@ RBIMPL_ATTR_ARTIFICIAL()
RBIMPL_ATTR_NOALIAS()
RBIMPL_ATTR_NONNULL((1))
static inline void
-rbimpl_atomic_sub(volatile rb_atomic_t *ptr, rb_atomic_t val)
+rbimpl_atomic_sub(volatile rb_atomic_t *ptr, rb_atomic_t val, int memory_order)
{
+ (void)memory_order;
#if 0
#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
- __atomic_sub_fetch(ptr, val, __ATOMIC_SEQ_CST);
+ __atomic_sub_fetch(ptr, val, memory_order);
#elif defined(HAVE_GCC_SYNC_BUILTINS)
__sync_sub_and_fetch(ptr, val);
@@ -519,6 +672,9 @@ rbimpl_atomic_sub(volatile rb_atomic_t *ptr, rb_atomic_t val)
RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX);
atomic_add_int(ptr, neg * val);
+#elif defined(HAVE_STDATOMIC_H)
+ atomic_fetch_sub_explicit((_Atomic volatile rb_atomic_t *)ptr, val, memory_order);
+
#else
# error Unsupported platform.
#endif
@@ -528,17 +684,18 @@ RBIMPL_ATTR_ARTIFICIAL()
RBIMPL_ATTR_NOALIAS()
RBIMPL_ATTR_NONNULL((1))
static inline void
-rbimpl_atomic_size_sub(volatile size_t *ptr, size_t val)
+rbimpl_atomic_size_sub(volatile size_t *ptr, size_t val, int memory_order)
{
+ (void)memory_order;
#if 0
#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
- __atomic_sub_fetch(ptr, val, __ATOMIC_SEQ_CST);
+ __atomic_sub_fetch(ptr, val, memory_order);
#elif defined(HAVE_GCC_SYNC_BUILTINS)
__sync_sub_and_fetch(ptr, val);
-#elif defined(_WIN32) && defined(_M_AMD64)
+#elif defined(_WIN64)
const ssize_t neg = -1;
InterlockedExchangeAdd64(ptr, neg * val);
@@ -547,12 +704,17 @@ rbimpl_atomic_size_sub(volatile size_t *ptr, size_t val)
RBIMPL_ASSERT_OR_ASSUME(val <= LONG_MAX);
atomic_add_long(ptr, neg * val);
-#else
+#elif defined(_WIN32) || (defined(__sun) && defined(HAVE_ATOMIC_H))
RBIMPL_STATIC_ASSERT(size_of_rb_atomic_t, sizeof *ptr == sizeof(rb_atomic_t));
volatile rb_atomic_t *const tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
- rbimpl_atomic_sub(tmp, val);
+ rbimpl_atomic_sub(tmp, val, memory_order);
+#elif defined(HAVE_STDATOMIC_H)
+ atomic_fetch_sub_explicit((_Atomic volatile size_t *)ptr, val, memory_order);
+
+#else
+# error Unsupported platform.
#endif
}
@@ -560,12 +722,13 @@ RBIMPL_ATTR_ARTIFICIAL()
RBIMPL_ATTR_NOALIAS()
RBIMPL_ATTR_NONNULL((1))
static inline void
-rbimpl_atomic_dec(volatile rb_atomic_t *ptr)
+rbimpl_atomic_dec(volatile rb_atomic_t *ptr, int memory_order)
{
+ (void)memory_order;
#if 0
#elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
- rbimpl_atomic_sub(ptr, 1);
+ rbimpl_atomic_sub(ptr, 1, memory_order);
#elif defined(_WIN32)
InterlockedDecrement(ptr);
@@ -573,9 +736,11 @@ rbimpl_atomic_dec(volatile rb_atomic_t *ptr)
#elif defined(__sun) && defined(HAVE_ATOMIC_H)
atomic_dec_uint(ptr);
-#else
- rbimpl_atomic_sub(ptr, 1);
+#elif defined(HAVE_STDATOMIC_H)
+ rbimpl_atomic_sub(ptr, 1, memory_order);
+#else
+# error Unsupported platform.
#endif
}
@@ -583,22 +748,30 @@ RBIMPL_ATTR_ARTIFICIAL()
RBIMPL_ATTR_NOALIAS()
RBIMPL_ATTR_NONNULL((1))
static inline void
-rbimpl_atomic_size_dec(volatile size_t *ptr)
+rbimpl_atomic_size_dec(volatile size_t *ptr, int memory_order)
{
+ (void)memory_order;
#if 0
#elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
- rbimpl_atomic_size_sub(ptr, 1);
+ rbimpl_atomic_size_sub(ptr, 1, memory_order);
-#elif defined(_WIN32) && defined(_M_AMD64)
+#elif defined(_WIN64)
InterlockedDecrement64(ptr);
#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
atomic_dec_ulong(ptr);
-#else
- rbimpl_atomic_size_sub(ptr, 1);
+#elif defined(_WIN32) || (defined(__sun) && defined(HAVE_ATOMIC_H))
+ RBIMPL_STATIC_ASSERT(size_of_size_t, sizeof *ptr == sizeof(rb_atomic_t));
+
+ rbimpl_atomic_size_sub(ptr, 1, memory_order);
+#elif defined(HAVE_STDATOMIC_H)
+ rbimpl_atomic_size_sub(ptr, 1, memory_order);
+
+#else
+# error Unsupported platform.
#endif
}
@@ -606,59 +779,42 @@ RBIMPL_ATTR_ARTIFICIAL()
RBIMPL_ATTR_NOALIAS()
RBIMPL_ATTR_NONNULL((1))
static inline void
-rbimpl_atomic_or(volatile rb_atomic_t *ptr, rb_atomic_t val)
+rbimpl_atomic_or(volatile rb_atomic_t *ptr, rb_atomic_t val, int memory_order)
{
+ (void)memory_order;
#if 0
#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
- __atomic_or_fetch(ptr, val, __ATOMIC_SEQ_CST);
+ __atomic_or_fetch(ptr, val, memory_order);
#elif defined(HAVE_GCC_SYNC_BUILTINS)
__sync_or_and_fetch(ptr, val);
-#elif RBIMPL_COMPILER_SINCE(MSVC, 13, 0, 0)
+#elif RBIMPL_COMPILER_IS(MSVC)
_InterlockedOr(ptr, val);
-#elif defined(_WIN32) && defined(__GNUC__)
- /* This was for old MinGW. Maybe not needed any longer? */
- __asm__(
- "lock\n\t"
- "orl\t%1, %0"
- : "=m"(ptr)
- : "Ir"(val));
-
-#elif defined(_WIN32) && defined(_M_IX86)
- __asm mov eax, ptr;
- __asm mov ecx, val;
- __asm lock or [eax], ecx;
-
#elif defined(__sun) && defined(HAVE_ATOMIC_H)
atomic_or_uint(ptr, val);
+#elif !defined(_WIN32) && defined(HAVE_STDATOMIC_H)
+ atomic_fetch_or_explicit((_Atomic volatile rb_atomic_t *)ptr, val, memory_order);
+
#else
# error Unsupported platform.
#endif
}
-/* Nobody uses this but for theoretical backwards compatibility... */
-#if RBIMPL_COMPILER_BEFORE(MSVC, 13, 0, 0)
-static inline rb_atomic_t
-rb_w32_atomic_or(volatile rb_atomic_t *var, rb_atomic_t val)
-{
- return rbimpl_atomic_or(var, val);
-}
-#endif
-
RBIMPL_ATTR_ARTIFICIAL()
RBIMPL_ATTR_NOALIAS()
RBIMPL_ATTR_NONNULL((1))
static inline rb_atomic_t
-rbimpl_atomic_exchange(volatile rb_atomic_t *ptr, rb_atomic_t val)
+rbimpl_atomic_exchange(volatile rb_atomic_t *ptr, rb_atomic_t val, int memory_order)
{
+ (void)memory_order;
#if 0
#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
- return __atomic_exchange_n(ptr, val, __ATOMIC_SEQ_CST);
+ return __atomic_exchange_n(ptr, val, memory_order);
#elif defined(HAVE_GCC_SYNC_BUILTINS)
return __sync_lock_test_and_set(ptr, val);
@@ -669,6 +825,9 @@ rbimpl_atomic_exchange(volatile rb_atomic_t *ptr, rb_atomic_t val)
#elif defined(__sun) && defined(HAVE_ATOMIC_H)
return atomic_swap_uint(ptr, val);
+#elif defined(HAVE_STDATOMIC_H)
+ return atomic_exchange_explicit((_Atomic volatile rb_atomic_t *)ptr, val, memory_order);
+
#else
# error Unsupported platform.
#endif
@@ -678,29 +837,53 @@ RBIMPL_ATTR_ARTIFICIAL()
RBIMPL_ATTR_NOALIAS()
RBIMPL_ATTR_NONNULL((1))
static inline size_t
-rbimpl_atomic_size_exchange(volatile size_t *ptr, size_t val)
+rbimpl_atomic_size_exchange(volatile size_t *ptr, size_t val, int memory_order)
{
+ (void)memory_order;
#if 0
#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
- return __atomic_exchange_n(ptr, val, __ATOMIC_SEQ_CST);
+ return __atomic_exchange_n(ptr, val, memory_order);
#elif defined(HAVE_GCC_SYNC_BUILTINS)
return __sync_lock_test_and_set(ptr, val);
-#elif defined(_WIN32) && defined(_M_AMD64)
+#elif defined(_WIN64)
return InterlockedExchange64(ptr, val);
#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
return atomic_swap_ulong(ptr, val);
-#else
+#elif defined(_WIN32) || (defined(__sun) && defined(HAVE_ATOMIC_H))
RBIMPL_STATIC_ASSERT(size_of_size_t, sizeof *ptr == sizeof(rb_atomic_t));
volatile rb_atomic_t *const tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
- const rb_atomic_t ret = rbimpl_atomic_exchange(tmp, val);
+ const rb_atomic_t ret = rbimpl_atomic_exchange(tmp, val, memory_order);
return RBIMPL_CAST((size_t)ret);
+#elif defined(HAVE_STDATOMIC_H)
+ return atomic_exchange_explicit((_Atomic volatile size_t *)ptr, val, memory_order);
+
+#else
+# error Unsupported platform.
+#endif
+}
+
+RBIMPL_ATTR_ARTIFICIAL()
+RBIMPL_ATTR_NOALIAS()
+RBIMPL_ATTR_NONNULL((1))
+static inline void
+rbimpl_atomic_size_store(volatile size_t *ptr, size_t val, int memory_order)
+{
+ (void)memory_order;
+#if 0
+
+#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
+ __atomic_store_n(ptr, val, memory_order);
+
+#else
+ rbimpl_atomic_size_exchange(ptr, val, memory_order);
+
#endif
}
@@ -708,8 +891,9 @@ RBIMPL_ATTR_ARTIFICIAL()
RBIMPL_ATTR_NOALIAS()
RBIMPL_ATTR_NONNULL((1))
static inline void *
-rbimpl_atomic_ptr_exchange(void *volatile *ptr, const void *val)
+rbimpl_atomic_ptr_exchange(void *volatile *ptr, const void *val, int memory_order)
{
+ (void)memory_order;
#if 0
#elif defined(InterlockedExchangePointer)
@@ -726,7 +910,7 @@ rbimpl_atomic_ptr_exchange(void *volatile *ptr, const void *val)
const size_t sval = RBIMPL_CAST((size_t)val);
volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
- const size_t sret = rbimpl_atomic_size_exchange(sptr, sval);
+ const size_t sret = rbimpl_atomic_size_exchange(sptr, sval, memory_order);
return RBIMPL_CAST((void *)sret);
#endif
@@ -735,14 +919,27 @@ rbimpl_atomic_ptr_exchange(void *volatile *ptr, const void *val)
RBIMPL_ATTR_ARTIFICIAL()
RBIMPL_ATTR_NOALIAS()
RBIMPL_ATTR_NONNULL((1))
+static inline void
+rbimpl_atomic_ptr_store(volatile void **ptr, void *val, int memory_order)
+{
+ RBIMPL_STATIC_ASSERT(sizeof_value, sizeof *ptr == sizeof(size_t));
+
+ const size_t sval = RBIMPL_CAST((size_t)val);
+ volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
+ rbimpl_atomic_size_store(sptr, sval, memory_order);
+}
+
+RBIMPL_ATTR_ARTIFICIAL()
+RBIMPL_ATTR_NOALIAS()
+RBIMPL_ATTR_NONNULL((1))
static inline VALUE
-rbimpl_atomic_value_exchange(volatile VALUE *ptr, VALUE val)
+rbimpl_atomic_value_exchange(volatile VALUE *ptr, VALUE val, int memory_order)
{
RBIMPL_STATIC_ASSERT(sizeof_value, sizeof *ptr == sizeof(size_t));
const size_t sval = RBIMPL_CAST((size_t)val);
volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
- const size_t sret = rbimpl_atomic_size_exchange(sptr, sval);
+ const size_t sret = rbimpl_atomic_size_exchange(sptr, sval, memory_order);
return RBIMPL_CAST((VALUE)sret);
}
@@ -750,16 +947,46 @@ RBIMPL_ATTR_ARTIFICIAL()
RBIMPL_ATTR_NOALIAS()
RBIMPL_ATTR_NONNULL((1))
static inline void
-rbimpl_atomic_set(volatile rb_atomic_t *ptr, rb_atomic_t val)
+rbimpl_atomic_value_store(volatile VALUE *ptr, VALUE val, int memory_order)
+{
+ RBIMPL_STATIC_ASSERT(sizeof_value, sizeof *ptr == sizeof(size_t));
+
+ const size_t sval = RBIMPL_CAST((size_t)val);
+ volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
+ rbimpl_atomic_size_store(sptr, sval, memory_order);
+}
+
+RBIMPL_ATTR_ARTIFICIAL()
+RBIMPL_ATTR_NOALIAS()
+RBIMPL_ATTR_NONNULL((1))
+static inline rb_atomic_t
+rbimpl_atomic_load(volatile rb_atomic_t *ptr, int memory_order)
{
+ (void)memory_order;
#if 0
#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
- __atomic_store_n(ptr, val, __ATOMIC_SEQ_CST);
+ return __atomic_load_n(ptr, memory_order);
+#else
+ return rbimpl_atomic_fetch_add(ptr, 0, memory_order);
+#endif
+}
+
+RBIMPL_ATTR_ARTIFICIAL()
+RBIMPL_ATTR_NOALIAS()
+RBIMPL_ATTR_NONNULL((1))
+static inline void
+rbimpl_atomic_store(volatile rb_atomic_t *ptr, rb_atomic_t val, int memory_order)
+{
+ (void)memory_order;
+#if 0
+
+#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
+ __atomic_store_n(ptr, val, memory_order);
#else
/* Maybe std::atomic<rb_atomic_t>::store can be faster? */
- rbimpl_atomic_exchange(ptr, val);
+ rbimpl_atomic_exchange(ptr, val, memory_order);
#endif
}
@@ -768,73 +995,73 @@ RBIMPL_ATTR_ARTIFICIAL()
RBIMPL_ATTR_NOALIAS()
RBIMPL_ATTR_NONNULL((1))
static inline rb_atomic_t
-rbimpl_atomic_cas(volatile rb_atomic_t *ptr, rb_atomic_t oldval, rb_atomic_t newval)
+rbimpl_atomic_cas(volatile rb_atomic_t *ptr, rb_atomic_t oldval, rb_atomic_t newval, int success_memorder, int failure_memorder)
{
+ (void)success_memorder;
+ (void)failure_memorder;
#if 0
#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
__atomic_compare_exchange_n(
- ptr, &oldval, newval, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+ ptr, &oldval, newval, 0, success_memorder, failure_memorder);
return oldval;
#elif defined(HAVE_GCC_SYNC_BUILTINS)
return __sync_val_compare_and_swap(ptr, oldval, newval);
-#elif RBIMPL_COMPILER_SINCE(MSVC, 13, 0, 0)
+#elif RBIMPL_COMPILER_IS(MSVC)
return InterlockedCompareExchange(ptr, newval, oldval);
-#elif defined(_WIN32)
- PVOID *pptr = RBIMPL_CAST((PVOID *)ptr);
- PVOID pold = RBIMPL_CAST((PVOID)oldval);
- PVOID pnew = RBIMPL_CAST((PVOID)newval);
- PVOID pret = InterlockedCompareExchange(pptr, pnew, pold);
- return RBIMPL_CAST((rb_atomic_t)pret);
-
#elif defined(__sun) && defined(HAVE_ATOMIC_H)
return atomic_cas_uint(ptr, oldval, newval);
+#elif defined(HAVE_STDATOMIC_H)
+ atomic_compare_exchange_strong_explicit(
+ (_Atomic volatile rb_atomic_t *)ptr, &oldval, newval, success_memorder, failure_memorder);
+ return oldval;
+
#else
# error Unsupported platform.
#endif
}
-/* Nobody uses this but for theoretical backwards compatibility... */
-#if RBIMPL_COMPILER_BEFORE(MSVC, 13, 0, 0)
-static inline rb_atomic_t
-rb_w32_atomic_cas(volatile rb_atomic_t *var, rb_atomic_t oldval, rb_atomic_t newval)
-{
- return rbimpl_atomic_cas(var, oldval, newval);
-}
-#endif
-
RBIMPL_ATTR_ARTIFICIAL()
RBIMPL_ATTR_NOALIAS()
RBIMPL_ATTR_NONNULL((1))
static inline size_t
-rbimpl_atomic_size_cas(volatile size_t *ptr, size_t oldval, size_t newval)
+rbimpl_atomic_size_cas(volatile size_t *ptr, size_t oldval, size_t newval, int success_memorder, int failure_memorder)
{
+ (void)success_memorder;
+ (void)failure_memorder;
#if 0
#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
__atomic_compare_exchange_n(
- ptr, &oldval, newval, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+ ptr, &oldval, newval, 0, success_memorder, failure_memorder);
return oldval;
#elif defined(HAVE_GCC_SYNC_BUILTINS)
return __sync_val_compare_and_swap(ptr, oldval, newval);
-#elif defined(_WIN32) && defined(_M_AMD64)
+#elif defined(_WIN64)
return InterlockedCompareExchange64(ptr, newval, oldval);
#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
return atomic_cas_ulong(ptr, oldval, newval);
-#else
+#elif defined(_WIN32) || (defined(__sun) && defined(HAVE_ATOMIC_H))
RBIMPL_STATIC_ASSERT(size_of_size_t, sizeof *ptr == sizeof(rb_atomic_t));
volatile rb_atomic_t *tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
- return rbimpl_atomic_cas(tmp, oldval, newval);
+ return rbimpl_atomic_cas(tmp, oldval, newval, success_memorder, failure_memorder);
+
+#elif defined(HAVE_STDATOMIC_H)
+ atomic_compare_exchange_strong_explicit(
+ (_Atomic volatile size_t *)ptr, &oldval, newval, success_memorder, failure_memorder);
+ return oldval;
+#else
+# error Unsupported platform.
#endif
}
@@ -842,8 +1069,10 @@ RBIMPL_ATTR_ARTIFICIAL()
RBIMPL_ATTR_NOALIAS()
RBIMPL_ATTR_NONNULL((1))
static inline void *
-rbimpl_atomic_ptr_cas(void **ptr, const void *oldval, const void *newval)
+rbimpl_atomic_ptr_cas(void **ptr, const void *oldval, const void *newval, int success_memorder, int failure_memorder)
{
+ (void)success_memorder;
+ (void)failure_memorder;
#if 0
#elif defined(InterlockedExchangePointer)
@@ -866,7 +1095,7 @@ rbimpl_atomic_ptr_cas(void **ptr, const void *oldval, const void *newval)
const size_t snew = RBIMPL_CAST((size_t)newval);
const size_t sold = RBIMPL_CAST((size_t)oldval);
volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
- const size_t sret = rbimpl_atomic_size_cas(sptr, sold, snew);
+ const size_t sret = rbimpl_atomic_size_cas(sptr, sold, snew, success_memorder, failure_memorder);
return RBIMPL_CAST((void *)sret);
#endif
@@ -875,15 +1104,41 @@ rbimpl_atomic_ptr_cas(void **ptr, const void *oldval, const void *newval)
RBIMPL_ATTR_ARTIFICIAL()
RBIMPL_ATTR_NOALIAS()
RBIMPL_ATTR_NONNULL((1))
+static inline void *
+rbimpl_atomic_ptr_load(void **ptr, int memory_order)
+{
+ (void)memory_order;
+#if 0
+
+#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
+ return __atomic_load_n(ptr, memory_order);
+#else
+ void *val = *ptr;
+ return rbimpl_atomic_ptr_cas(ptr, val, val, memory_order, memory_order);
+#endif
+}
+
+RBIMPL_ATTR_ARTIFICIAL()
+RBIMPL_ATTR_NOALIAS()
+RBIMPL_ATTR_NONNULL((1))
+static inline VALUE
+rbimpl_atomic_value_load(volatile VALUE *ptr, int memory_order)
+{
+ return RBIMPL_CAST((VALUE)rbimpl_atomic_ptr_load((void **)ptr, memory_order));
+}
+
+RBIMPL_ATTR_ARTIFICIAL()
+RBIMPL_ATTR_NOALIAS()
+RBIMPL_ATTR_NONNULL((1))
static inline VALUE
-rbimpl_atomic_value_cas(volatile VALUE *ptr, VALUE oldval, VALUE newval)
+rbimpl_atomic_value_cas(volatile VALUE *ptr, VALUE oldval, VALUE newval, int success_memorder, int failure_memorder)
{
RBIMPL_STATIC_ASSERT(sizeof_value, sizeof *ptr == sizeof(size_t));
const size_t snew = RBIMPL_CAST((size_t)newval);
const size_t sold = RBIMPL_CAST((size_t)oldval);
volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
- const size_t sret = rbimpl_atomic_size_cas(sptr, sold, snew);
+ const size_t sret = rbimpl_atomic_size_cas(sptr, sold, snew, success_memorder, failure_memorder);
return RBIMPL_CAST((VALUE)sret);
}
/** @endcond */