@@ -84,6 +84,8 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
void kvm_vcpu_pmu_resync_el0(void);
void kvm_host_pmu_init(struct arm_pmu *pmu);
+bool kvm_pmu_emul_overflow_status(struct kvm_vcpu *vcpu);
+bool kvm_pmu_part_overflow_status(struct kvm_vcpu *vcpu);
#define kvm_vcpu_has_pmu(vcpu) \
(vcpu_has_feature(vcpu, KVM_ARM_VCPU_PMU_V3))
@@ -268,7 +268,7 @@ void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val)
* counter where the values of the global enable control, PMOVSSET_EL0[n], and
* PMINTENSET_EL1[n] are all 1.
*/
-bool kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
+bool kvm_pmu_emul_overflow_status(struct kvm_vcpu *vcpu)
{
u64 reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
@@ -405,7 +405,7 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
kvm_pmu_counter_increment(vcpu, BIT(idx + 1),
ARMV8_PMUV3_PERFCTR_CHAIN);
- if (kvm_pmu_overflow_status(vcpu)) {
+ if (kvm_pmu_emul_overflow_status(vcpu)) {
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
if (!in_nmi())
@@ -260,7 +260,7 @@ void kvm_pmu_load(struct kvm_vcpu *vcpu)
write_pmcr(val);
/*
- * Loading these registers is tricky because of
+ * Loading these registers is more intricate because of
* 1. Applying only the bits for guest counters (indicated by mask)
* 2. Setting and clearing are different registers
*/
@@ -334,5 +334,25 @@ void kvm_pmu_handle_guest_irq(u64 govf)
if (!vcpu)
return;
- __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= govf;
+ __vcpu_assign_sys_reg(vcpu, PMOVSSET_EL0, govf);
+}
+
+/**
+ * kvm_pmu_part_overflow_status() - Determine if any guest counters have overflowed
+ * @vcpu: Ponter to struct kvm_vcpu
+ *
+ * Determine if any guest counters have overflowed and therefore an
+ * IRQ needs to be injected into the guest.
+ *
+ * Return: True if there was an overflow, false otherwise
+ */
+bool kvm_pmu_part_overflow_status(struct kvm_vcpu *vcpu)
+{
+ struct arm_pmu *pmu = vcpu->kvm->arch.arm_pmu;
+ u64 mask = kvm_pmu_guest_counter_mask(pmu);
+ u64 pmovs = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
+ u64 pmint = read_pmintenset();
+ u64 pmcr = read_pmcr();
+
+ return (pmcr & ARMV8_PMU_PMCR_E) && (mask & pmovs & pmint);
}
@@ -407,7 +407,11 @@ static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
struct kvm_pmu *pmu = &vcpu->arch.pmu;
bool overflow;
- overflow = kvm_pmu_overflow_status(vcpu);
+ if (kvm_vcpu_pmu_is_partitioned(vcpu))
+ overflow = kvm_pmu_part_overflow_status(vcpu);
+ else
+ overflow = kvm_pmu_emul_overflow_status(vcpu);
+
if (pmu->irq_level == overflow)
return;
@@ -683,6 +687,7 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
return -EBUSY;
kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
+
vcpu->arch.pmu.irq_num = irq;
return 0;
}
When we re-enter the VM after handling a PMU interrupt, calculate whether it was any of the guest counters that overflowed and inject an interrupt into the guest if so. Signed-off-by: Colton Lewis <coltonlewis@google.com> --- arch/arm64/include/asm/kvm_pmu.h | 2 ++ arch/arm64/kvm/pmu-emul.c | 4 ++-- arch/arm64/kvm/pmu-part.c | 24 ++++++++++++++++++++++-- arch/arm64/kvm/pmu.c | 7 ++++++- 4 files changed, 32 insertions(+), 5 deletions(-)