summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNicolai Hähnle <nicolai.haehnle@amd.com>2016-10-04 16:06:31 +0200
committerNicolai Hähnle <nicolai.haehnle@amd.com>2016-10-05 15:39:39 +0200
commit8915f0c0de84fa593ca6c31518c1292f94b3bb7b (patch)
tree2c719a8879546dc843ed07aa14375eff7291ce6c
parentd51c1f9d51ef0e80873a9a32c48606cdce25a209 (diff)
downloadexternal_mesa3d-8915f0c0de84fa593ca6c31518c1292f94b3bb7b.zip
external_mesa3d-8915f0c0de84fa593ca6c31518c1292f94b3bb7b.tar.gz
external_mesa3d-8915f0c0de84fa593ca6c31518c1292f94b3bb7b.tar.bz2
util: use GCC atomic intrinsics with explicit memory model
This is motivated by the fact that p_atomic_read and p_atomic_set may somewhat surprisingly not do the right thing in the old version: while stores and loads are de facto atomic at least on x86, the compiler may apply re-ordering and speculation quite liberally. Basically, the old version uses the "relaxed" memory ordering. The new ordering always uses acquire/release ordering. This is the strongest possible memory ordering that doesn't require additional fence instructions on x86. (And the only stronger ordering is "sequentially consistent", which is usually more than you need anyway.) I would feel more comfortable if p_atomic_set/read in the old implementation were at least using volatile loads and stores, but I don't see a way to get there without typeof (which we cannot use here since the code is compiled with -std=c99). Eventually, we should really just move to something that is based on the atomics in C11 / C++11. Acked-by: Marek Olšák <marek.olsak@amd.com> Reviewed-by: Emil Velikov <emil.velikov@collabora.com>
-rw-r--r--configure.ac11
-rw-r--r--src/util/u_atomic.h21
2 files changed, 32 insertions, 0 deletions
diff --git a/configure.ac b/configure.ac
index 1bfac3b..421f4f3 100644
--- a/configure.ac
+++ b/configure.ac
@@ -387,6 +387,17 @@ fi
AM_CONDITIONAL([SSE41_SUPPORTED], [test x$SSE41_SUPPORTED = x1])
AC_SUBST([SSE41_CFLAGS], $SSE41_CFLAGS)
+dnl Check for new-style atomic builtins
+AC_COMPILE_IFELSE([AC_LANG_SOURCE([[
+int main() {
+ int n;
+ return __atomic_load_n(&n, __ATOMIC_ACQUIRE);
+}]])], GCC_ATOMIC_BUILTINS_SUPPORTED=1)
+if test "x$GCC_ATOMIC_BUILTINS_SUPPORTED" = x1; then
+ DEFINES="$DEFINES -DUSE_GCC_ATOMIC_BUILTINS"
+fi
+AM_CONDITIONAL([GCC_ATOMIC_BUILTINS_SUPPORTED], [test x$GCC_ATOMIC_BUILTINS_SUPPORTED = x1])
+
dnl Check for Endianness
AC_C_BIGENDIAN(
little_endian=no,
diff --git a/src/util/u_atomic.h b/src/util/u_atomic.h
index 8675903..2a5bbae 100644
--- a/src/util/u_atomic.h
+++ b/src/util/u_atomic.h
@@ -36,6 +36,20 @@
#define PIPE_ATOMIC "GCC Sync Intrinsics"
+#if defined(USE_GCC_ATOMIC_BUILTINS)
+
+/* The builtins with explicit memory model are available since GCC 4.7. */
+#define p_atomic_set(_v, _i) __atomic_store_n((_v), (_i), __ATOMIC_RELEASE)
+#define p_atomic_read(_v) __atomic_load_n((_v), __ATOMIC_ACQUIRE)
+#define p_atomic_dec_zero(v) (__atomic_sub_fetch((v), 1, __ATOMIC_ACQ_REL) == 0)
+#define p_atomic_inc(v) (void) __atomic_add_fetch((v), 1, __ATOMIC_ACQ_REL)
+#define p_atomic_dec(v) (void) __atomic_sub_fetch((v), 1, __ATOMIC_ACQ_REL)
+#define p_atomic_add(v, i) (void) __atomic_add_fetch((v), (i), __ATOMIC_ACQ_REL)
+#define p_atomic_inc_return(v) __atomic_add_fetch((v), 1, __ATOMIC_ACQ_REL)
+#define p_atomic_dec_return(v) __atomic_sub_fetch((v), 1, __ATOMIC_ACQ_REL)
+
+#else
+
#define p_atomic_set(_v, _i) (*(_v) = (_i))
#define p_atomic_read(_v) (*(_v))
#define p_atomic_dec_zero(v) (__sync_sub_and_fetch((v), 1) == 0)
@@ -44,6 +58,13 @@
#define p_atomic_add(v, i) (void) __sync_add_and_fetch((v), (i))
#define p_atomic_inc_return(v) __sync_add_and_fetch((v), 1)
#define p_atomic_dec_return(v) __sync_sub_and_fetch((v), 1)
+
+#endif
+
+/* There is no __atomic_* compare and exchange that returns the current value.
+ * Also, GCC 5.4 seems unable to optimize a compound statement expression that
+ * uses an additional stack variable with __atomic_compare_exchange[_n].
+ */
#define p_atomic_cmpxchg(v, old, _new) \
__sync_val_compare_and_swap((v), (old), (_new))