@@ -7,6 +7,7 @@
#include <xen/vm_event.h>
#include <xen/monitor.h>
#include <xen/iocap.h>
+#include <xen/xmalloc.h>
#include <public/vm_event.h>
#include <asm/flushtlb.h>
#include <asm/gic.h>
@@ -14,15 +15,23 @@
#include <asm/hardirq.h>
#include <asm/page.h>
+#define MAX_VMID_8_BIT (1UL << 8)
+#define MAX_VMID_16_BIT (1UL << 16)
+
+#define INVALID_VMID 0 /* VMID 0 is reserved */
+
#ifdef CONFIG_ARM_64
static unsigned int __read_mostly p2m_root_order;
static unsigned int __read_mostly p2m_root_level;
#define P2M_ROOT_ORDER p2m_root_order
#define P2M_ROOT_LEVEL p2m_root_level
+static unsigned int __read_mostly max_vmid = MAX_VMID_8_BIT;
+#define MAX_VMID max_vmid
#else
/* First level P2M is alway 2 consecutive pages */
#define P2M_ROOT_LEVEL 1
#define P2M_ROOT_ORDER 1
+#define MAX_VMID MAX_VMID_8_BIT
#endif
#define P2M_ROOT_PAGES (1<<P2M_ROOT_ORDER)
@@ -1219,7 +1228,7 @@ static int p2m_alloc_table(struct domain *d)
p2m->root = page;
- p2m->vttbr = page_to_maddr(p2m->root) | ((uint64_t)p2m->vmid & 0xff) << 48;
+ p2m->vttbr = page_to_maddr(p2m->root) | ((uint64_t)p2m->vmid << 48);
/*
* Make sure that all TLBs corresponding to the new VMID are flushed
@@ -1230,20 +1239,32 @@ static int p2m_alloc_table(struct domain *d)
return 0;
}
-#define MAX_VMID 256
-#define INVALID_VMID 0 /* VMID 0 is reserved */
static spinlock_t vmid_alloc_lock = SPIN_LOCK_UNLOCKED;
/*
- * VTTBR_EL2 VMID field is 8 bits. Using a bitmap here limits us to
- * 256 concurrent domains.
+ * VTTBR_EL2 VMID field is 8 or 16 bits. Aarch64 supports 16-bit VMID.
+ * Using a bitmap here limits us to 256 or 65536 (for Aarch64) concurrent
+ * domains. The bitmap space will be allocated dynamically based on
+ * whether 8 or 16 bit VMIDs are supported.
*/
-static DECLARE_BITMAP(vmid_mask, MAX_VMID);
+static unsigned long *vmid_mask;
-void p2m_vmid_allocator_init(void)
+int p2m_vmid_allocator_init(void)
{
- set_bit(INVALID_VMID, vmid_mask);
+ int ret = 0;
+
+ /*
+ * allocate space for vmid_mask based on MAX_VMID
+ */
+ vmid_mask = xzalloc_array(unsigned long, BITS_TO_LONGS(MAX_VMID));
+
+ if ( vmid_mask )
+ set_bit(INVALID_VMID, vmid_mask);
+ else
+ ret = -1;
+
+ return ret;
}
static int p2m_alloc_vmid(struct domain *d)
@@ -1632,20 +1653,36 @@ void __init setup_virt_paging(void)
unsigned int cpu;
unsigned int pa_range = 0x10; /* Larger than any possible value */
+ unsigned int vmid_8_bit_flag = 0;
for_each_online_cpu ( cpu )
{
const struct cpuinfo_arm *info = &cpu_data[cpu];
if ( info->mm64.pa_range < pa_range )
pa_range = info->mm64.pa_range;
+
+ /* set a flag if the current cpu does not suppot 16 bit VMIDs */
+ if ( info->mm64.vmid_bits != MM64_VMID_16_BITS_SUPPORT )
+ vmid_8_bit_flag = 1;
}
+ /*
+ * if the flag is not set then it means all CPUs support 16-bit
+ * VMIDs.
+ */
+ if ( !vmid_8_bit_flag )
+ max_vmid = MAX_VMID_16_BIT;
+
/* pa_range is 4 bits, but the defined encodings are only 3 bits */
if ( pa_range&0x8 || !pa_range_info[pa_range].pabits )
panic("Unknown encoding of ID_AA64MMFR0_EL1.PARange %x\n", pa_range);
val |= VTCR_PS(pa_range);
val |= VTCR_TG0_4K;
+
+ /* set the VS bit only if 16 bit VMID is supported */
+ if ( MAX_VMID == MAX_VMID_16_BIT )
+ val |= VTCR_VS;
val |= VTCR_SL0(pa_range_info[pa_range].sl0);
val |= VTCR_T0SZ(pa_range_info[pa_range].t0sz);
@@ -1660,7 +1697,8 @@ void __init setup_virt_paging(void)
printk("P2M: %d levels with order-%d root, VTCR 0x%lx\n",
4 - P2M_ROOT_LEVEL, P2M_ROOT_ORDER, val);
- p2m_vmid_allocator_init();
+ if ( p2m_vmid_allocator_init() != 0 )
+ panic("Could not allocate VMID bitmap space");
/* It is not allowed to concatenate a level zero root */
BUG_ON( P2M_ROOT_LEVEL == 0 && P2M_ROOT_ORDER > 0 );
@@ -30,7 +30,7 @@ struct p2m_domain {
struct page_info *root;
/* Current VMID in use */
- uint8_t vmid;
+ uint16_t vmid;
/* Current Translation Table Base Register for the p2m */
uint64_t vttbr;
@@ -215,6 +215,8 @@
#define VTCR_PS(x) ((x)<<16)
+#define VTCR_VS (_AC(0x1,UL)<<19)
+
#endif
#define VTCR_RES1 (_AC(1,UL)<<31)
@@ -269,6 +271,11 @@
/* FSR long format */
#define FSRL_STATUS_DEBUG (_AC(0x22,UL)<<0)
+#ifdef CONFIG_ARM_64
+#define MM64_VMID_8_BITS_SUPPORT 0x0
+#define MM64_VMID_16_BITS_SUPPORT 0x2
+#endif
+
#ifndef __ASSEMBLY__
struct cpuinfo_arm {
@@ -337,7 +344,16 @@ struct cpuinfo_arm {
unsigned long tgranule_64K:4;
unsigned long tgranule_4K:4;
unsigned long __res0:32;
- };
+
+ unsigned long hafdbs:4;
+ unsigned long vmid_bits:4;
+ unsigned long vh:4;
+ unsigned long hpds:4;
+ unsigned long lo:4;
+ unsigned long pan:4;
+ unsigned long __res1:8;
+ unsigned long __res2:32;
+ };
} mm64;
struct {
VMID space is increased to 16-bits from 8-bits in ARMv8 8.1 revision. This allows more than 256 VMs to be supported by Xen. This change adds support for 16-bit VMIDs in Xen based on whether the architecture supports it. Signed-off-by: Bhupinder Thakur <bhupinder.thakur@linaro.org> --- xen/arch/arm/p2m.c | 56 ++++++++++++++++++++++++++++++++++------- xen/include/asm-arm/p2m.h | 2 +- xen/include/asm-arm/processor.h | 18 ++++++++++++- 3 files changed, 65 insertions(+), 11 deletions(-)