@@ -2129,6 +2129,36 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
device = S1_attrs_are_device(result->cacheattrs.attrs);
}
+ /*
+ * Enable alignment checks on Device memory.
+ *
+ * Per R_XCHFJ, the correct ordering for alignment, permission,
+ * and stage 2 faults is:
+ * - Alignment fault caused by the memory type
+ * - Permission fault
+ * - A stage 2 fault on the memory access
+ * Perform the alignment check now, so that we recognize it in
+ * the correct order. Set TLB_CHECK_ALIGNED so that any subsequent
+ * softmmu tlb hit will also check the alignment; clear along the
+ * non-device path so that tlb_fill_flags is consistent in the
+ * event of restart_atomic_update.
+ *
+ * In v7, for a CPU without the Virtualization Extensions this
+ * access is UNPREDICTABLE; we choose to make it take the alignment
+ * fault as is required for a v7VE CPU. (QEMU doesn't emulate any
+ * CPUs with ARM_FEATURE_LPAE but not ARM_FEATURE_V7VE anyway.)
+ */
+ if (device) {
+ unsigned a_bits = memop_atomicity_bits(memop);
+ if (address & ((1 << a_bits) - 1)) {
+ fi->type = ARMFault_Alignment;
+ goto do_fault;
+ }
+ result->f.tlb_fill_flags = TLB_CHECK_ALIGNED;
+ } else {
+ result->f.tlb_fill_flags = 0;
+ }
+
if (!(result->f.prot & (1 << access_type))) {
fi->type = ARMFault_Permission;
goto do_fault;
@@ -2156,27 +2186,6 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
result->f.attrs.space = out_space;
result->f.attrs.secure = arm_space_is_secure(out_space);
- /*
- * Enable alignment checks on Device memory.
- *
- * Per R_XCHFJ, this check is mis-ordered. The correct ordering
- * for alignment, permission, and stage 2 faults should be:
- * - Alignment fault caused by the memory type
- * - Permission fault
- * - A stage 2 fault on the memory access
- * but due to the way the TCG softmmu TLB operates, we will have
- * implicitly done the permission check and the stage2 lookup in
- * finding the TLB entry, so the alignment check cannot be done sooner.
- *
- * In v7, for a CPU without the Virtualization Extensions this
- * access is UNPREDICTABLE; we choose to make it take the alignment
- * fault as is required for a v7VE CPU. (QEMU doesn't emulate any
- * CPUs with ARM_FEATURE_LPAE but not ARM_FEATURE_V7VE anyway.)
- */
- if (device) {
- result->f.tlb_fill_flags |= TLB_CHECK_ALIGNED;
- }
-
/*
* For FEAT_LPA2 and effective DS, the SH field in the attributes
* was re-purposed for output address bits. The SH attribute in