@@ -922,6 +922,8 @@ struct ArchCPU {
/* CPU has memory protection unit */
bool has_mpu;
+ /* CPU has MTE enabled in KVM mode */
+ bool kvm_mte;
/* PMSAv7 MPU number of supported regions */
uint32_t pmsav7_dregion;
/* PMSAv8 MPU number of supported hyp regions */
@@ -188,6 +188,13 @@ bool kvm_arm_pmu_supported(void);
*/
bool kvm_arm_sve_supported(void);
+/**
+ * kvm_arm_mte_supported:
+ *
+ * Returns: true if KVM can enable MTE, and false otherwise.
+ */
+bool kvm_arm_mte_supported(void);
+
/**
* kvm_arm_get_max_vm_ipa_size:
* @ms: Machine state handle
@@ -214,6 +221,8 @@ void kvm_arm_pvtime_init(ARMCPU *cpu, uint64_t ipa);
int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level);
+void kvm_arm_enable_mte(Object *cpuobj, Error **errp);
+
#else
/*
@@ -235,6 +244,11 @@ static inline bool kvm_arm_sve_supported(void)
return false;
}
+static inline bool kvm_arm_mte_supported(void)
+{
+ return false;
+}
+
/*
* These functions should never actually be called without KVM support.
*/
@@ -283,6 +297,11 @@ static inline uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu)
g_assert_not_reached();
}
+static inline void kvm_arm_enable_mte(Object *cpuobj, Error **errp)
+{
+ g_assert_not_reached();
+}
+
#endif
#endif
@@ -2213,7 +2213,7 @@ static void machvirt_init(MachineState *machine)
exit(1);
}
- if (vms->mte && (kvm_enabled() || hvf_enabled())) {
+ if (vms->mte && hvf_enabled()) {
error_report("mach-virt: %s does not support providing "
"MTE to the guest CPU",
current_accel_name());
@@ -2283,39 +2283,51 @@ static void machvirt_init(MachineState *machine)
}
if (vms->mte) {
- /* Create the memory region only once, but link to all cpus. */
- if (!tag_sysmem) {
- /*
- * The property exists only if MemTag is supported.
- * If it is, we must allocate the ram to back that up.
- */
- if (!object_property_find(cpuobj, "tag-memory")) {
- error_report("MTE requested, but not supported "
- "by the guest CPU");
+ if (tcg_enabled()) {
+ /* Create the memory region only once, but link to all cpus. */
+ if (!tag_sysmem) {
+ /*
+ * The property exists only if MemTag is supported.
+ * If it is, we must allocate the ram to back that up.
+ */
+ if (!object_property_find(cpuobj, "tag-memory")) {
+ error_report("MTE requested, but not supported "
+ "by the guest CPU");
+ exit(1);
+ }
+
+ tag_sysmem = g_new(MemoryRegion, 1);
+ memory_region_init(tag_sysmem, OBJECT(machine),
+ "tag-memory", UINT64_MAX / 32);
+
+ if (vms->secure) {
+ secure_tag_sysmem = g_new(MemoryRegion, 1);
+ memory_region_init(secure_tag_sysmem, OBJECT(machine),
+ "secure-tag-memory",
+ UINT64_MAX / 32);
+
+ /* As with ram, secure-tag takes precedence over tag. */
+ memory_region_add_subregion_overlap(secure_tag_sysmem,
+ 0, tag_sysmem, -1);
+ }
+ }
+
+ object_property_set_link(cpuobj, "tag-memory",
+ OBJECT(tag_sysmem), &error_abort);
+ if (vms->secure) {
+ object_property_set_link(cpuobj, "secure-tag-memory",
+ OBJECT(secure_tag_sysmem),
+ &error_abort);
+ }
+ } else if (kvm_enabled()) {
+ if (!kvm_arm_mte_supported()) {
+ error_report("MTE requested, but not supported by KVM");
exit(1);
}
-
- tag_sysmem = g_new(MemoryRegion, 1);
- memory_region_init(tag_sysmem, OBJECT(machine),
- "tag-memory", UINT64_MAX / 32);
-
- if (vms->secure) {
- secure_tag_sysmem = g_new(MemoryRegion, 1);
- memory_region_init(secure_tag_sysmem, OBJECT(machine),
- "secure-tag-memory", UINT64_MAX / 32);
-
- /* As with ram, secure-tag takes precedence over tag. */
- memory_region_add_subregion_overlap(secure_tag_sysmem, 0,
- tag_sysmem, -1);
- }
- }
-
- object_property_set_link(cpuobj, "tag-memory", OBJECT(tag_sysmem),
- &error_abort);
- if (vms->secure) {
- object_property_set_link(cpuobj, "secure-tag-memory",
- OBJECT(secure_tag_sysmem),
- &error_abort);
+ kvm_arm_enable_mte(cpuobj, &error_abort);
+ } else {
+ error_report("MTE requested, but not supported ");
+ exit(1);
}
}
@@ -2390,14 +2390,22 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
#ifndef CONFIG_USER_ONLY
/*
- * If we do not have tag-memory provided by the machine,
- * reduce MTE support to instructions enabled at EL0.
+ * If we run with TCG and do not have tag-memory provided by
+ * the machine, then reduce MTE support to instructions enabled at EL0.
* This matches Cortex-A710 BROADCASTMTE input being LOW.
*/
- if (cpu->tag_memory == NULL) {
+ if (tcg_enabled() && cpu->tag_memory == NULL) {
cpu->isar.id_aa64pfr1 =
FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 1);
}
+
+ /*
+ * If MTE is supported by the host, however it should not be
+ * enabled on the guest (i.e mte=off), clear guest's MTE bits."
+ */
+ if (kvm_enabled() && !cpu->kvm_mte) {
+ FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0);
+ }
#endif
}
@@ -39,6 +39,7 @@
#include "hw/acpi/acpi.h"
#include "hw/acpi/ghes.h"
#include "target/arm/gtimer.h"
+#include "migration/blocker.h"
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
KVM_CAP_LAST_INFO
@@ -119,6 +120,21 @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
if (vmfd < 0) {
goto err;
}
+
+ /*
+ * The MTE capability must be enabled by the VMM before creating
+ * any VCPUs in order to allow the MTE bits of the ID_AA64PFR1
+ * register to be probed correctly, as they are masked if MTE
+ * is not enabled.
+ */
+ if (kvm_arm_mte_supported()) {
+ KVMState kvm_state;
+
+ kvm_state.fd = kvmfd;
+ kvm_state.vmfd = vmfd;
+ kvm_vm_enable_cap(&kvm_state, KVM_CAP_ARM_MTE, 0);
+ }
+
cpufd = ioctl(vmfd, KVM_CREATE_VCPU, 0);
if (cpufd < 0) {
goto err;
@@ -1793,6 +1809,11 @@ bool kvm_arm_sve_supported(void)
return kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE);
}
+bool kvm_arm_mte_supported(void)
+{
+ return kvm_check_extension(kvm_state, KVM_CAP_ARM_MTE);
+}
+
QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1);
uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu)
@@ -2417,3 +2438,40 @@ int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
}
return 0;
}
+
+void kvm_arm_enable_mte(Object *cpuobj, Error **errp)
+{
+ static bool tried_to_enable;
+ static bool succeeded_to_enable;
+ Error *mte_migration_blocker = NULL;
+ ARMCPU *cpu = ARM_CPU(cpuobj);
+ int ret;
+
+ if (!tried_to_enable) {
+ /*
+ * MTE on KVM is enabled on a per-VM basis (and retrying doesn't make
+ * sense), and we only want a single migration blocker as well.
+ */
+ tried_to_enable = true;
+
+ ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_MTE, 0);
+ if (ret) {
+ error_setg_errno(errp, -ret, "Failed to enable KVM_CAP_ARM_MTE");
+ return;
+ }
+
+ /* TODO: Add migration support with MTE enabled */
+ error_setg(&mte_migration_blocker,
+ "Live migration disabled due to MTE enabled");
+ if (migrate_add_blocker(&mte_migration_blocker, errp)) {
+ error_free(mte_migration_blocker);
+ return;
+ }
+
+ succeeded_to_enable = true;
+ }
+
+ if (succeeded_to_enable) {
+ cpu->kvm_mte = true;
+ }
+}