diff mbox series

[v2,20/55] accel/tcg: Report unaligned atomics for user-only

Message ID 20210803041443.55452-21-richard.henderson@linaro.org
State Superseded
Headers show
Series Unaligned access for user-only | expand

Commit Message

Richard Henderson Aug. 3, 2021, 4:14 a.m. UTC
Use the newly exposed cpu_unaligned_access for atomic_mmu_lookup,
which has access to complete alignment info from the TCGMemOpIdx arg.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

---
 accel/tcg/user-exec.c | 14 +++++++++++++-
 1 file changed, 13 insertions(+), 1 deletion(-)

-- 
2.25.1

Comments

Alex Bennée Aug. 3, 2021, 3:54 p.m. UTC | #1
Richard Henderson <richard.henderson@linaro.org> writes:

> Use the newly exposed cpu_unaligned_access for atomic_mmu_lookup,

> which has access to complete alignment info from the TCGMemOpIdx arg.

>

> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>


Reviewed-by: Alex Bennée <alex.bennee@linaro.org>


<snip>
> -    void *ret = g2h(env_cpu(env), addr);

> +

> +    ret = g2h(env_cpu(env), addr);

>      set_helper_retaddr(retaddr);

>      return ret;

>  }



-- 
Alex Bennée
Philippe Mathieu-Daudé Aug. 18, 2021, 8:51 a.m. UTC | #2
Hi Richard,

On 8/3/21 6:14 AM, Richard Henderson wrote:
> Use the newly exposed cpu_unaligned_access for atomic_mmu_lookup,

> which has access to complete alignment info from the TCGMemOpIdx arg.

> 

> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

> ---

>  accel/tcg/user-exec.c | 14 +++++++++++++-

>  1 file changed, 13 insertions(+), 1 deletion(-)

> 

> diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c

> index 90d1a2d327..5ad808a25a 100644

> --- a/accel/tcg/user-exec.c

> +++ b/accel/tcg/user-exec.c

> @@ -28,6 +28,7 @@

>  #include "qemu/atomic128.h"

>  #include "trace/trace-root.h"

>  #include "trace/mem.h"

> +#include "internal.h"

>  

>  #undef EAX

>  #undef ECX

> @@ -1230,11 +1231,22 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,

>                                 TCGMemOpIdx oi, int size, int prot,

>                                 uintptr_t retaddr)

>  {

> +    MemOp mop = get_memop(oi);

> +    int a_bits = get_alignment_bits(mop);

> +    void *ret;

> +

> +    /* Enforce guest required alignment.  */

> +    if (unlikely(addr & ((1 << a_bits) - 1))) {

> +        MMUAccessType t = prot == PAGE_READ ? MMU_DATA_LOAD : MMU_DATA_STORE;

> +        cpu_unaligned_access(env_cpu(env), addr, t, get_mmuidx(oi), retaddr);

> +    }

> +

>      /* Enforce qemu required alignment.  */

>      if (unlikely(addr & (size - 1))) {

>          cpu_loop_exit_atomic(env_cpu(env), retaddr);

>      }

> -    void *ret = g2h(env_cpu(env), addr);

> +

> +    ret = g2h(env_cpu(env), addr);

>      set_helper_retaddr(retaddr);

>      return ret;


Can't we simply do:

       return g2h(env_cpu(env), addr);

?

>  }

>
Richard Henderson Aug. 18, 2021, 5:47 p.m. UTC | #3
On 8/17/21 10:51 PM, Philippe Mathieu-Daudé wrote:
>> -    void *ret = g2h(env_cpu(env), addr);

>> +

>> +    ret = g2h(env_cpu(env), addr);

>>       set_helper_retaddr(retaddr);

>>       return ret;

> 

> Can't we simply do:

> 

>         return g2h(env_cpu(env), addr);

> 

> ?


I think the idea was to narrow the range of instructions in which helper_retaddr is set, 
because {set,clear}_helper_retaddr contain barriers.  I didn't give it more thought this 
time around, just kept the ordering.

r~
diff mbox series

Patch

diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
index 90d1a2d327..5ad808a25a 100644
--- a/accel/tcg/user-exec.c
+++ b/accel/tcg/user-exec.c
@@ -28,6 +28,7 @@ 
 #include "qemu/atomic128.h"
 #include "trace/trace-root.h"
 #include "trace/mem.h"
+#include "internal.h"
 
 #undef EAX
 #undef ECX
@@ -1230,11 +1231,22 @@  static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
                                TCGMemOpIdx oi, int size, int prot,
                                uintptr_t retaddr)
 {
+    MemOp mop = get_memop(oi);
+    int a_bits = get_alignment_bits(mop);
+    void *ret;
+
+    /* Enforce guest required alignment.  */
+    if (unlikely(addr & ((1 << a_bits) - 1))) {
+        MMUAccessType t = prot == PAGE_READ ? MMU_DATA_LOAD : MMU_DATA_STORE;
+        cpu_unaligned_access(env_cpu(env), addr, t, get_mmuidx(oi), retaddr);
+    }
+
     /* Enforce qemu required alignment.  */
     if (unlikely(addr & (size - 1))) {
         cpu_loop_exit_atomic(env_cpu(env), retaddr);
     }
-    void *ret = g2h(env_cpu(env), addr);
+
+    ret = g2h(env_cpu(env), addr);
     set_helper_retaddr(retaddr);
     return ret;
 }