@@ -464,6 +464,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
val &= ~ARMV8_PMCR_MASK;
val |= p->regval & ARMV8_PMCR_MASK;
vcpu_sys_reg(vcpu, r->reg) = val;
+ kvm_pmu_handle_pmcr(vcpu, val);
} else {
/* PMCR.P & PMCR.C are RAZ */
val = vcpu_sys_reg(vcpu, r->reg)
@@ -41,6 +41,7 @@ void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
+void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
u64 select_idx);
#else
@@ -59,6 +60,7 @@ void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {}
void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {}
void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {}
void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
+void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {}
void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
u64 select_idx) {}
#endif
@@ -189,6 +189,48 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
}
}
+/**
+ * kvm_pmu_handle_pmcr - handle PMCR register
+ * @vcpu: The vcpu pointer
+ * @val: the value guest writes to PMCR register
+ */
+void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
+{
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+ struct kvm_pmc *pmc;
+ u64 mask;
+ int i;
+
+ mask = kvm_pmu_valid_counter_mask(vcpu);
+ if (val & ARMV8_PMCR_E) {
+ kvm_pmu_enable_counter(vcpu,
+ vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
+ } else {
+ kvm_pmu_disable_counter(vcpu, mask);
+ }
+
+ if (val & ARMV8_PMCR_C) {
+ pmc = &pmu->pmc[ARMV8_CYCLE_IDX];
+ if (pmc->perf_event)
+ local64_set(&pmc->perf_event->count, 0);
+ vcpu_sys_reg(vcpu, PMCCNTR_EL0) = 0;
+ }
+
+ if (val & ARMV8_PMCR_P) {
+ for (i = 0; i < ARMV8_CYCLE_IDX; i++) {
+ pmc = &pmu->pmc[i];
+ if (pmc->perf_event)
+ local64_set(&pmc->perf_event->count, 0);
+ vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = 0;
+ }
+ }
+
+ if (val & ARMV8_PMCR_LC) {
+ pmc = &pmu->pmc[ARMV8_CYCLE_IDX];
+ pmc->bitmask = 0xffffffffffffffffUL;
+ }
+}
+
static inline bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu,
u64 select_idx)
{