@@ -47,3 +47,4 @@ stable hypervisors.
| ARM | Cortex-A53 | #819472 | ARM64_ERRATUM_819472 |
| ARM | Cortex-A57 | #852523 | N/A |
| ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 |
+| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 |
@@ -142,6 +142,27 @@ config ARM64_ERRATUM_832075
If unsure, say Y.
+config ARM64_ERRATUM_834220
+ bool "Cortex-A57: 834220: Stage 2 translation fault might be incorrectly reported in presence of a Stage 1 fault"
+ default y
+ depends on ARM_64
+ help
+ This option adds an alternative code sequence to work around ARM
+ erratum 834220 on Cortex-A57 parts up to r1p2.
+
+ Affected Cortex-A57 parts might report a Stage 2 translation
+ fault as the result of a Stage 1 fault for load crossing a
+ page boundary when there is a permission or device memory
+ alignment fault at Stage 1 and a translation fault at Stage 2.
+
+ The workaround is to verify that the Stage 1 translation
+ doesn't generate a fault before handling the Stage 2 fault.
+ Please note that this does not necessarily enable the workaround,
+ as it depends on the alternative framework, which will only patch
+ the kernel if an affected CPU is detected.
+
+ If unsure, say Y.
+
endmenu
source "common/Kconfig"
@@ -49,6 +49,15 @@ static const struct arm_cpu_capabilities arm_errata[] = {
(1 << MIDR_VARIANT_SHIFT) | 2),
},
#endif
+#ifdef CONFIG_ARM64_ERRATUM_834220
+ {
+ /* Cortex-A57 r0p0 - r1p2 */
+ .desc = "ARM erratum 834220",
+ .capability = ARM64_WORKAROUND_834220,
+ MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
+ (1 << MIDR_VARIANT_SHIFT) | 2),
+ },
+#endif
{},
};
@@ -2388,12 +2388,13 @@ static inline bool hpfar_is_valid(bool s1ptw, uint8_t fsc)
* HPFAR is valid if one of the following cases are true:
* 1. the stage 2 fault happen during a stage 1 page table walk
* (the bit ESR_EL2.S1PTW is set)
- * 2. the fault was due to a translation fault
+ * 2. the fault was due to a translation fault and the processor
+ * does not carry erratum #8342220
*
* Note that technically HPFAR is valid for other cases, but they
* are currently not supported by Xen.
*/
- return s1ptw || (fsc == FSC_FLT_TRANS);
+ return s1ptw || (fsc == FSC_FLT_TRANS && !check_workaround_834220());
}
static void do_trap_instr_abort_guest(struct cpu_user_regs *regs,
@@ -41,6 +41,7 @@ static inline bool_t check_workaround_##erratum(void) \
#endif
CHECK_WORKAROUND_HELPER(766422, ARM32_WORKAROUND_766422, CONFIG_ARM_32)
+CHECK_WORKAROUND_HELPER(834220, ARM64_WORKAROUND_834220, CONFIG_ARM_64)
#undef CHECK_WORKAROUND_HELPER
@@ -38,8 +38,9 @@
#define ARM64_WORKAROUND_CLEAN_CACHE 0
#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
#define ARM32_WORKAROUND_766422 2
+#define ARM64_WORKAROUND_834220 3
-#define ARM_NCAPS 3
+#define ARM_NCAPS 4
#ifndef __ASSEMBLY__