@@ -184,8 +184,9 @@ struct kvm_vcpu_events {
__u8 serror_pending;
__u8 serror_has_esr;
__u8 ext_dabt_pending;
+ __u8 ext_iabt_pending;
/* Align it to 8 bytes */
- __u8 pad[5];
+ __u8 pad[4];
__u64 serror_esr;
} exception;
__u32 reserved[12];
@@ -319,6 +319,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2:
case KVM_CAP_ARM_NISV_TO_USER:
case KVM_CAP_ARM_INJECT_EXT_DABT:
+ case KVM_CAP_ARM_INJECT_EXT_IABT:
case KVM_CAP_SET_GUEST_DEBUG:
case KVM_CAP_VCPU_ATTRIBUTES:
case KVM_CAP_PTP_KVM:
@@ -825,9 +825,9 @@ int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
events->exception.serror_esr = vcpu_get_vsesr(vcpu);
/*
- * We never return a pending ext_dabt here because we deliver it to
- * the virtual CPU directly when setting the event and it's no longer
- * 'pending' at this point.
+ * We never return a pending ext_dabt or ext_iabt here because we
+ * deliver it to the virtual CPU directly when setting the event
+ * and it's no longer 'pending' at this point.
*/
return 0;
@@ -839,6 +839,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
bool serror_pending = events->exception.serror_pending;
bool has_esr = events->exception.serror_has_esr;
bool ext_dabt_pending = events->exception.ext_dabt_pending;
+ bool ext_iabt_pending = events->exception.ext_iabt_pending;
if (serror_pending && has_esr) {
if (!cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
@@ -852,8 +853,14 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
kvm_inject_vabt(vcpu);
}
+ /* DABT and IABT cannot happen at the same time. */
+ if (ext_dabt_pending && ext_iabt_pending)
+ return -EINVAL;
+
if (ext_dabt_pending)
kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
+ else if (ext_iabt_pending)
+ kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
return 0;
}
@@ -941,6 +941,7 @@ struct kvm_enable_cap {
#define KVM_CAP_X86_GUEST_MODE 238
#define KVM_CAP_ARM_WRITABLE_IMP_ID_REGS 239
#define KVM_CAP_ARM_SEA_TO_USER 240
+#define KVM_CAP_ARM_INJECT_EXT_IABT 241
struct kvm_irq_routing_irqchip {
__u32 irqchip;