@@ -87,33 +87,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
return ret;
}
-#if DATA_SIZE >= 16
-#if HAVE_ATOMIC128
-ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
- PAGE_READ, retaddr);
- DATA_TYPE val;
-
- val = atomic16_read(haddr);
- ATOMIC_MMU_CLEANUP;
- atomic_trace_ld_post(env, addr, oi);
- return val;
-}
-
-void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
- PAGE_WRITE, retaddr);
-
- atomic16_set(haddr, val);
- ATOMIC_MMU_CLEANUP;
- atomic_trace_st_post(env, addr, oi);
-}
-#endif
-#else
+#if DATA_SIZE < 16
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
@@ -188,7 +162,7 @@ GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new)
GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
#undef GEN_ATOMIC_HELPER_FN
-#endif /* DATA SIZE >= 16 */
+#endif /* DATA SIZE < 16 */
#undef END
@@ -220,34 +194,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
return BSWAP(ret);
}
-#if DATA_SIZE >= 16
-#if HAVE_ATOMIC128
-ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
- PAGE_READ, retaddr);
- DATA_TYPE val;
-
- val = atomic16_read(haddr);
- ATOMIC_MMU_CLEANUP;
- atomic_trace_ld_post(env, addr, oi);
- return BSWAP(val);
-}
-
-void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
- PAGE_WRITE, retaddr);
-
- val = BSWAP(val);
- atomic16_set(haddr, val);
- ATOMIC_MMU_CLEANUP;
- atomic_trace_st_post(env, addr, oi);
-}
-#endif
-#else
+#if DATA_SIZE < 16
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
@@ -326,7 +273,7 @@ GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new)
#undef ADD
#undef GEN_ATOMIC_HELPER_FN
-#endif /* DATA_SIZE >= 16 */
+#endif /* DATA_SIZE < 16 */
#undef END
#endif /* DATA_SIZE > 1 */
@@ -300,15 +300,6 @@ Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
Int128 cmpv, Int128 newv,
MemOpIdx oi, uintptr_t retaddr);
-Int128 cpu_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, uintptr_t retaddr);
-Int128 cpu_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, uintptr_t retaddr);
-void cpu_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
- MemOpIdx oi, uintptr_t retaddr);
-void cpu_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
- MemOpIdx oi, uintptr_t retaddr);
-
#if defined(CONFIG_USER_ONLY)
extern __thread uintptr_t helper_retaddr;
@@ -19,20 +19,6 @@ static void atomic_trace_rmw_post(CPUArchState *env, uint64_t addr,
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_RW);
}
-#if HAVE_ATOMIC128
-static void atomic_trace_ld_post(CPUArchState *env, uint64_t addr,
- MemOpIdx oi)
-{
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
-}
-
-static void atomic_trace_st_post(CPUArchState *env, uint64_t addr,
- MemOpIdx oi)
-{
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
-}
-#endif
-
/*
* Atomic helpers callable from TCG.
* These have a common interface and all defer to cpu_atomic_*
Atomic load/store of 128-byte quantities is now handled by cpu_{ld,st}16_mmu. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- accel/tcg/atomic_template.h | 61 +++-------------------------------- include/exec/cpu_ldst.h | 9 ------ accel/tcg/atomic_common.c.inc | 14 -------- 3 files changed, 4 insertions(+), 80 deletions(-)