@@ -16,6 +16,23 @@
#endif
#define HAVE_al8_fast (ATOMIC_REG_SIZE >= 8)
+/*
+ * If __alignof(unsigned __int128) < 16, GCC may refuse to inline atomics
+ * that are supported by the host, e.g. s390x. We can force the pointer to
+ * have our known alignment with __builtin_assume_aligned, however prior to
+ * GCC 13 that was only reliable with optimization enabled. See
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=107389
+ */
+#if defined(CONFIG_ATOMIC128_OPT)
+# if !defined(__OPTIMIZE__)
+# define ATTRIBUTE_ATOMIC128_OPT __attribute__((optimize("O1")))
+# endif
+# define CONFIG_ATOMIC128
+#endif
+#ifndef ATTRIBUTE_ATOMIC128_OPT
+# define ATTRIBUTE_ATOMIC128_OPT
+#endif
+
#if defined(CONFIG_ATOMIC128)
# define HAVE_al16_fast true
#else
@@ -136,7 +153,8 @@ static inline uint64_t load_atomic8(void *pv)
*
* Atomically load 16 aligned bytes from @pv.
*/
-static inline Int128 load_atomic16(void *pv)
+static inline Int128 ATTRIBUTE_ATOMIC128_OPT
+load_atomic16(void *pv)
{
#ifdef CONFIG_ATOMIC128
__uint128_t *p = __builtin_assume_aligned(pv, 16);
@@ -340,7 +358,8 @@ static uint64_t load_atom_extract_al16_or_exit(CPUArchState *env, uintptr_t ra,
* cross an 16-byte boundary then the access must be 16-byte atomic,
* otherwise the access must be 8-byte atomic.
*/
-static inline uint64_t load_atom_extract_al16_or_al8(void *pv, int s)
+static inline uint64_t ATTRIBUTE_ATOMIC128_OPT
+load_atom_extract_al16_or_al8(void *pv, int s)
{
#if defined(CONFIG_ATOMIC128)
uintptr_t pi = (uintptr_t)pv;
@@ -676,28 +695,24 @@ static inline void store_atomic8(void *pv, uint64_t val)
*
* Atomically store 16 aligned bytes to @pv.
*/
-static inline void store_atomic16(void *pv, Int128 val)
+static inline void ATTRIBUTE_ATOMIC128_OPT
+store_atomic16(void *pv, Int128Alias val)
{
#if defined(CONFIG_ATOMIC128)
__uint128_t *pu = __builtin_assume_aligned(pv, 16);
- Int128Alias new;
-
- new.s = val;
- qatomic_set__nocheck(pu, new.u);
+ qatomic_set__nocheck(pu, val.u);
#elif defined(CONFIG_CMPXCHG128)
__uint128_t *pu = __builtin_assume_aligned(pv, 16);
__uint128_t o;
- Int128Alias n;
/*
* Without CONFIG_ATOMIC128, __atomic_compare_exchange_n will always
* defer to libatomic, so we must use __sync_val_compare_and_swap_16
* and accept the sequential consistency that comes with it.
*/
- n.s = val;
do {
o = *pu;
- } while (!__sync_bool_compare_and_swap_16(pu, o, n.u));
+ } while (!__sync_bool_compare_and_swap_16(pu, o, val.u));
#else
qemu_build_not_reached();
#endif
@@ -779,7 +794,8 @@ static void store_atom_insert_al8(uint64_t *p, uint64_t val, uint64_t msk)
*
* Atomically store @val to @p masked by @msk.
*/
-static void store_atom_insert_al16(Int128 *ps, Int128Alias val, Int128Alias msk)
+static void ATTRIBUTE_ATOMIC128_OPT
+store_atom_insert_al16(Int128 *ps, Int128Alias val, Int128Alias msk)
{
#if defined(CONFIG_ATOMIC128)
__uint128_t *pu, old, new;
@@ -2241,23 +2241,21 @@ config_host_data.set('HAVE_BROKEN_SIZE_MAX', not cc.compiles('''
return printf("%zu", SIZE_MAX);
}''', args: ['-Werror']))
-atomic_test = '''
+# See if 64-bit atomic operations are supported.
+# Note that without __atomic builtins, we can only
+# assume atomic loads/stores max at pointer size.
+config_host_data.set('CONFIG_ATOMIC64', cc.links('''
#include <stdint.h>
int main(void)
{
- @0@ x = 0, y = 0;
+ uint64_t x = 0, y = 0;
y = __atomic_load_n(&x, __ATOMIC_RELAXED);
__atomic_store_n(&x, y, __ATOMIC_RELAXED);
__atomic_compare_exchange_n(&x, &y, x, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
__atomic_exchange_n(&x, y, __ATOMIC_RELAXED);
__atomic_fetch_add(&x, y, __ATOMIC_RELAXED);
return 0;
- }'''
-
-# See if 64-bit atomic operations are supported.
-# Note that without __atomic builtins, we can only
-# assume atomic loads/stores max at pointer size.
-config_host_data.set('CONFIG_ATOMIC64', cc.links(atomic_test.format('uint64_t')))
+ }'''))
has_int128 = cc.links('''
__int128_t a;
@@ -2275,21 +2273,39 @@ if has_int128
# "do we have 128-bit atomics which are handled inline and specifically not
# via libatomic". The reason we can't use libatomic is documented in the
# comment starting "GCC is a house divided" in include/qemu/atomic128.h.
- has_atomic128 = cc.links(atomic_test.format('unsigned __int128'))
+ # We only care about these operations on 16-byte aligned pointers, so
+ # force 16-byte alignment of the pointer, which may be greater than
+ # __alignof(unsigned __int128) for the host.
+ atomic_test_128 = '''
+ int main(int ac, char **av) {
+ unsigned __int128 *p = __builtin_assume_aligned(av[ac - 1], sizeof(16));
+ p[1] = __atomic_load_n(&p[0], __ATOMIC_RELAXED);
+ __atomic_store_n(&p[2], p[3], __ATOMIC_RELAXED);
+ __atomic_compare_exchange_n(&p[4], &p[5], p[6], 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ return 0;
+ }'''
+ has_atomic128 = cc.links(atomic_test_128)
config_host_data.set('CONFIG_ATOMIC128', has_atomic128)
if not has_atomic128
- has_cmpxchg128 = cc.links('''
- int main(void)
- {
- unsigned __int128 x = 0, y = 0;
- __sync_val_compare_and_swap_16(&x, y, x);
- return 0;
- }
- ''')
+ # Even with __builtin_assume_aligned, the above test may have failed
+ # without optimization enabled. Try again with optimizations locally
+ # enabled for the function. See
+ # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=107389
+ has_atomic128_opt = cc.links('__attribute__((optimize("O1")))' + atomic_test_128)
+ config_host_data.set('CONFIG_ATOMIC128_OPT', has_atomic128_opt)
- config_host_data.set('CONFIG_CMPXCHG128', has_cmpxchg128)
+ if not has_atomic128_opt
+ config_host_data.set('CONFIG_CMPXCHG128', cc.links('''
+ int main(void)
+ {
+ unsigned __int128 x = 0, y = 0;
+ __sync_val_compare_and_swap_16(&x, y, x);
+ return 0;
+ }
+ '''))
+ endif
endif
endif
There is an edge condition prior to gcc13 for which optimization is required to generate 16-byte atomic sequences. Detect this. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- accel/tcg/ldst_atomicity.c.inc | 38 ++++++++++++++++++------- meson.build | 52 ++++++++++++++++++++++------------ 2 files changed, 61 insertions(+), 29 deletions(-)