@@ -244,7 +244,7 @@ cpu_init_done:
* Alignment checking enabled,
* MMU translation disabled (for now).
*/
- ldr r0, =(HSCTLR_BASE|SCTLR_A)
+ ldr r0, =(HSCTLR_BASE|SCTLR_Axx_ELx_A)
mcr CP32(r0, HSCTLR)
/*
@@ -369,7 +369,8 @@ virtphys_clash:
ldr r1, =paging /* Explicit vaddr, not RIP-relative */
mrc CP32(r0, HSCTLR)
- orr r0, r0, #(SCTLR_M|SCTLR_C) /* Enable MMU and D-cache */
+ /* Enable MMU and D-cache */
+ orr r0, r0, #(SCTLR_Axx_ELx_M|SCTLR_Axx_ELx_C)
dsb /* Flush PTE writes and finish reads */
mcr CP32(r0, HSCTLR) /* now paging is enabled */
isb /* Now, flush the icache */
@@ -514,8 +514,8 @@ virtphys_clash:
ldr x1, =paging /* Explicit vaddr, not RIP-relative */
mrs x0, SCTLR_EL2
- orr x0, x0, #SCTLR_M /* Enable MMU */
- orr x0, x0, #SCTLR_C /* Enable D-cache */
+ orr x0, x0, #SCTLR_Axx_ELx_M /* Enable MMU */
+ orr x0, x0, #SCTLR_Axx_ELx_C /* Enable D-cache */
dsb sy /* Flush PTE writes and finish reads */
msr SCTLR_EL2, x0 /* now paging is enabled */
isb /* Now, flush the icache */
@@ -612,7 +612,7 @@ bool guest_walk_tables(const struct vcpu *v, vaddr_t gva,
*perms = GV2M_READ;
/* If the MMU is disabled, there is no need to translate the gva. */
- if ( !(sctlr & SCTLR_M) )
+ if ( !(sctlr & SCTLR_Axx_ELx_M) )
{
*ipa = gva;
@@ -609,7 +609,7 @@ void __init remove_early_mappings(void)
*/
static void xen_pt_enforce_wnx(void)
{
- WRITE_SYSREG32(READ_SYSREG32(SCTLR_EL2) | SCTLR_WXN, SCTLR_EL2);
+ WRITE_SYSREG32(READ_SYSREG32(SCTLR_EL2) | SCTLR_Axx_ELx_WXN, SCTLR_EL2);
/*
* The TLBs may cache SCTLR_EL2.WXN. So ensure it is synchronized
* before flushing the TLBs.
@@ -392,9 +392,9 @@ static void cpsr_switch_mode(struct cpu_user_regs *regs, int mode)
regs->cpsr |= PSR_IRQ_MASK;
if ( mode == PSR_MODE_ABT )
regs->cpsr |= PSR_ABT_MASK;
- if ( sctlr & SCTLR_TE )
+ if ( sctlr & SCTLR_A32_ELx_TE )
regs->cpsr |= PSR_THUMB;
- if ( sctlr & SCTLR_EE )
+ if ( sctlr & SCTLR_Axx_ELx_EE )
regs->cpsr |= PSR_BIG_ENDIAN;
}
@@ -402,7 +402,7 @@ static vaddr_t exception_handler32(vaddr_t offset)
{
uint32_t sctlr = READ_SYSREG32(SCTLR_EL1);
- if ( sctlr & SCTLR_V )
+ if ( sctlr & SCTLR_A32_EL1_V )
return 0xffff0000 + offset;
else /* always have security exceptions */
return READ_SYSREG(VBAR_EL1) + offset;
@@ -391,10 +391,12 @@ static inline int set_foreign_p2m_entry(struct domain *d, unsigned long gfn,
*/
static inline bool vcpu_has_cache_enabled(struct vcpu *v)
{
+ const uint32_t mask = SCTLR_Axx_ELx_C | SCTLR_Axx_ELx_M;
+
/* Only works with the current vCPU */
ASSERT(current == v);
- return (READ_SYSREG32(SCTLR_EL1) & (SCTLR_C|SCTLR_M)) == (SCTLR_C|SCTLR_M);
+ return (READ_SYSREG32(SCTLR_EL1) & mask) == mask;
}
#endif /* _XEN_P2M_H */
@@ -117,26 +117,23 @@
#define TTBCR_PD1 (_AC(1,U)<<5)
/* SCTLR System Control Register. */
-/* HSCTLR is a subset of this. */
-#define SCTLR_TE (_AC(1,U)<<30)
-#define SCTLR_AFE (_AC(1,U)<<29)
-#define SCTLR_TRE (_AC(1,U)<<28)
-#define SCTLR_NMFI (_AC(1,U)<<27)
-#define SCTLR_EE (_AC(1,U)<<25)
-#define SCTLR_VE (_AC(1,U)<<24)
-#define SCTLR_U (_AC(1,U)<<22)
-#define SCTLR_FI (_AC(1,U)<<21)
-#define SCTLR_WXN (_AC(1,U)<<19)
-#define SCTLR_HA (_AC(1,U)<<17)
-#define SCTLR_RR (_AC(1,U)<<14)
-#define SCTLR_V (_AC(1,U)<<13)
-#define SCTLR_I (_AC(1,U)<<12)
-#define SCTLR_Z (_AC(1,U)<<11)
-#define SCTLR_SW (_AC(1,U)<<10)
-#define SCTLR_B (_AC(1,U)<<7)
-#define SCTLR_C (_AC(1,U)<<2)
-#define SCTLR_A (_AC(1,U)<<1)
-#define SCTLR_M (_AC(1,U)<<0)
+
+/* Bits specific to SCTLR_EL1 for Arm32 */
+
+#define SCTLR_A32_EL1_V (_AC(1,U)<<13)
+
+/* Common bits for SCTLR_ELx for Arm32 */
+
+#define SCTLR_A32_ELx_TE (_AC(1,U)<<30)
+#define SCTLR_A32_ELx_FI (_AC(1,U)<<21)
+
+/* Common bits for SCTLR_ELx on all architectures */
+#define SCTLR_Axx_ELx_EE (_AC(1,U)<<25)
+#define SCTLR_Axx_ELx_WXN (_AC(1,U)<<19)
+#define SCTLR_Axx_ELx_I (_AC(1,U)<<12)
+#define SCTLR_Axx_ELx_C (_AC(1,U)<<2)
+#define SCTLR_Axx_ELx_A (_AC(1,U)<<1)
+#define SCTLR_Axx_ELx_M (_AC(1,U)<<0)
#define HSCTLR_BASE _AC(0x30c51878,U)