diff mbox

[v10,10/21] KVM: ARM64: Add access handler for PMCNTENSET and PMCNTENCLR register

Message ID 1453866709-20324-11-git-send-email-zhaoshenglong@huawei.com
State Superseded
Headers show

Commit Message

Shannon Zhao Jan. 27, 2016, 3:51 a.m. UTC
From: Shannon Zhao <shannon.zhao@linaro.org>


Since the reset value of PMCNTENSET and PMCNTENCLR is UNKNOWN, use
reset_unknown for its reset handler. Add a handler to emulate writing
PMCNTENSET or PMCNTENCLR register.

When writing to PMCNTENSET, call perf_event_enable to enable the perf
event. When writing to PMCNTENCLR, call perf_event_disable to disable
the perf event.

Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>

---
 arch/arm64/kvm/sys_regs.c | 35 +++++++++++++++++++++++---
 include/kvm/arm_pmu.h     |  9 +++++++
 virt/kvm/arm/pmu.c        | 63 +++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 103 insertions(+), 4 deletions(-)

-- 
2.0.4



_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

Comments

Andrew Jones Jan. 28, 2016, 6:08 p.m. UTC | #1
On Wed, Jan 27, 2016 at 11:51:38AM +0800, Shannon Zhao wrote:
> From: Shannon Zhao <shannon.zhao@linaro.org>

> 

> Since the reset value of PMCNTENSET and PMCNTENCLR is UNKNOWN, use

> reset_unknown for its reset handler. Add a handler to emulate writing

> PMCNTENSET or PMCNTENCLR register.

> 

> When writing to PMCNTENSET, call perf_event_enable to enable the perf

> event. When writing to PMCNTENCLR, call perf_event_disable to disable

> the perf event.

> 

> Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>

> ---

>  arch/arm64/kvm/sys_regs.c | 35 +++++++++++++++++++++++---

>  include/kvm/arm_pmu.h     |  9 +++++++

>  virt/kvm/arm/pmu.c        | 63 +++++++++++++++++++++++++++++++++++++++++++++++

>  3 files changed, 103 insertions(+), 4 deletions(-)

> 

> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c

> index 6a50262..d43a9a4 100644

> --- a/arch/arm64/kvm/sys_regs.c

> +++ b/arch/arm64/kvm/sys_regs.c

> @@ -603,6 +603,33 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,

>  	return true;

>  }

>  

> +static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,

> +			   const struct sys_reg_desc *r)

> +{

> +	u64 val, mask;

> +

> +	if (!kvm_arm_pmu_v3_ready(vcpu))

> +		return trap_raz_wi(vcpu, p, r);

> +

> +	mask = kvm_pmu_valid_counter_mask(vcpu);

> +	if (p->is_write) {

> +		val = p->regval & mask;

> +		if (r->Op2 & 0x1) {

> +			/* accessing PMCNTENSET_EL0 */

> +			vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;

> +			kvm_pmu_enable_counter(vcpu, val);

> +		} else {

> +			/* accessing PMCNTENCLR_EL0 */

> +			vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;

> +			kvm_pmu_disable_counter(vcpu, val);

> +		}

> +	} else {

> +		p->regval = vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;

> +	}

> +

> +	return true;

> +}

> +

>  /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */

>  #define DBG_BCR_BVR_WCR_WVR_EL1(n)					\

>  	/* DBGBVRn_EL1 */						\

> @@ -804,10 +831,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {

>  	  access_pmcr, reset_pmcr, },

>  	/* PMCNTENSET_EL0 */

>  	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),

> -	  trap_raz_wi },

> +	  access_pmcnten, reset_unknown, PMCNTENSET_EL0 },

>  	/* PMCNTENCLR_EL0 */

>  	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),

> -	  trap_raz_wi },

> +	  access_pmcnten, NULL, PMCNTENSET_EL0 },


I don't think the reg field is needed, as the reset handler isn't
defined and the access handler doesn't use it. Oh, and shouldn't it be
PMCNTENCLR_EL0 anyway?

>  	/* PMOVSCLR_EL0 */

>  	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),

>  	  trap_raz_wi },

> @@ -1149,8 +1176,8 @@ static const struct sys_reg_desc cp15_regs[] = {

>  

>  	/* PMU */

>  	{ Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },

> -	{ Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi },

> -	{ Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi },

> +	{ Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },

> +	{ Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },

>  	{ Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi },

>  	{ Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },

>  	{ Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },

> diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h

> index ee4b15c..a7e5485 100644

> --- a/include/kvm/arm_pmu.h

> +++ b/include/kvm/arm_pmu.h

> @@ -37,6 +37,9 @@ struct kvm_pmu {

>  

>  #define kvm_arm_pmu_v3_ready(v)		((v)->arch.pmu.ready)

>  u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);

> +u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);

> +void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val);

> +void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val);

>  void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,

>  				    u64 select_idx);

>  #else

> @@ -49,6 +52,12 @@ static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,

>  {

>  	return 0;

>  }

> +static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)

> +{

> +	return 0;

> +}

> +static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {}

> +static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {}

>  static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,

>  						  u64 data, u64 select_idx) {}

>  #endif

> diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c

> index 673ec55..0873977 100644

> --- a/virt/kvm/arm/pmu.c

> +++ b/virt/kvm/arm/pmu.c

> @@ -68,6 +68,69 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)

>  	}

>  }

>  

> +u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)

> +{

> +	u64 val = vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMCR_N_SHIFT;

> +

> +	val &= ARMV8_PMCR_N_MASK;

> +	return GENMASK(val - 1, 0) | BIT(ARMV8_CYCLE_IDX);


val can be zero if PMCR.N is zero (meaning only the cycle counter is
implemented). We should confirm it's not zero before calling GENMASK.

> +}

> +

> +/**

> + * kvm_pmu_enable_counter - enable selected PMU counter

> + * @vcpu: The vcpu pointer

> + * @val: the value guest writes to PMCNTENSET register

> + *

> + * Call perf_event_enable to start counting the perf event

> + */

> +void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val)

> +{

> +	int i;

> +	struct kvm_pmu *pmu = &vcpu->arch.pmu;

> +	struct kvm_pmc *pmc;

> +

> +	if (!(vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMCR_E) || !val)

> +		return;

> +

> +	for (i = 0; i < ARMV8_MAX_COUNTERS; i++) {

> +		if (!(val & BIT(i)))

> +			continue;

> +

> +		pmc = &pmu->pmc[i];

> +		if (pmc->perf_event) {

> +			perf_event_enable(pmc->perf_event);

> +			if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)

> +				kvm_debug("fail to enable perf event\n");

> +		}

> +	}

> +}

> +

> +/**

> + * kvm_pmu_disable_counter - disable selected PMU counter

> + * @vcpu: The vcpu pointer

> + * @val: the value guest writes to PMCNTENCLR register

> + *

> + * Call perf_event_disable to stop counting the perf event

> + */

> +void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val)

> +{

> +	int i;

> +	struct kvm_pmu *pmu = &vcpu->arch.pmu;

> +	struct kvm_pmc *pmc;

> +

> +	if (!val)

> +		return;

> +

> +	for (i = 0; i < ARMV8_MAX_COUNTERS; i++) {

> +		if (!(val & BIT(i)))

> +			continue;

> +

> +		pmc = &pmu->pmc[i];

> +		if (pmc->perf_event)

> +			perf_event_disable(pmc->perf_event);

> +	}

> +}

> +

>  static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)

>  {

>  	return (vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMCR_E) &&

> -- 

> 2.0.4

> 

> 

> --

> To unsubscribe from this list: send the line "unsubscribe kvm" in

> the body of a message to majordomo@vger.kernel.org

> More majordomo info at  http://vger.kernel.org/majordomo-info.html


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
Andrew Jones Jan. 28, 2016, 6:12 p.m. UTC | #2
On Thu, Jan 28, 2016 at 07:08:56PM +0100, Andrew Jones wrote:
> On Wed, Jan 27, 2016 at 11:51:38AM +0800, Shannon Zhao wrote:

> > @@ -804,10 +831,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {

> >  	  access_pmcr, reset_pmcr, },

> >  	/* PMCNTENSET_EL0 */

> >  	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),

> > -	  trap_raz_wi },

> > +	  access_pmcnten, reset_unknown, PMCNTENSET_EL0 },

> >  	/* PMCNTENCLR_EL0 */

> >  	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),

> > -	  trap_raz_wi },

> > +	  access_pmcnten, NULL, PMCNTENSET_EL0 },

> 

> I don't think the reg field is needed, as the reset handler isn't

> defined and the access handler doesn't use it. Oh, and shouldn't it be

> PMCNTENCLR_EL0 anyway?


eh.. nevermind. Of course we just have the one sys_reg for both set/clr...

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
diff mbox

Patch

diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 6a50262..d43a9a4 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -603,6 +603,33 @@  static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
 	return true;
 }
 
+static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+			   const struct sys_reg_desc *r)
+{
+	u64 val, mask;
+
+	if (!kvm_arm_pmu_v3_ready(vcpu))
+		return trap_raz_wi(vcpu, p, r);
+
+	mask = kvm_pmu_valid_counter_mask(vcpu);
+	if (p->is_write) {
+		val = p->regval & mask;
+		if (r->Op2 & 0x1) {
+			/* accessing PMCNTENSET_EL0 */
+			vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
+			kvm_pmu_enable_counter(vcpu, val);
+		} else {
+			/* accessing PMCNTENCLR_EL0 */
+			vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
+			kvm_pmu_disable_counter(vcpu, val);
+		}
+	} else {
+		p->regval = vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
+	}
+
+	return true;
+}
+
 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
 #define DBG_BCR_BVR_WCR_WVR_EL1(n)					\
 	/* DBGBVRn_EL1 */						\
@@ -804,10 +831,10 @@  static const struct sys_reg_desc sys_reg_descs[] = {
 	  access_pmcr, reset_pmcr, },
 	/* PMCNTENSET_EL0 */
 	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
-	  trap_raz_wi },
+	  access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
 	/* PMCNTENCLR_EL0 */
 	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
-	  trap_raz_wi },
+	  access_pmcnten, NULL, PMCNTENSET_EL0 },
 	/* PMOVSCLR_EL0 */
 	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
 	  trap_raz_wi },
@@ -1149,8 +1176,8 @@  static const struct sys_reg_desc cp15_regs[] = {
 
 	/* PMU */
 	{ Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
-	{ Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi },
-	{ Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi },
+	{ Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
+	{ Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
 	{ Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi },
 	{ Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
 	{ Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index ee4b15c..a7e5485 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -37,6 +37,9 @@  struct kvm_pmu {
 
 #define kvm_arm_pmu_v3_ready(v)		((v)->arch.pmu.ready)
 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
+u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
+void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val);
+void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val);
 void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
 				    u64 select_idx);
 #else
@@ -49,6 +52,12 @@  static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
 {
 	return 0;
 }
+static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
+{
+	return 0;
+}
+static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {}
+static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {}
 static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,
 						  u64 data, u64 select_idx) {}
 #endif
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index 673ec55..0873977 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -68,6 +68,69 @@  static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
 	}
 }
 
+u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
+{
+	u64 val = vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMCR_N_SHIFT;
+
+	val &= ARMV8_PMCR_N_MASK;
+	return GENMASK(val - 1, 0) | BIT(ARMV8_CYCLE_IDX);
+}
+
+/**
+ * kvm_pmu_enable_counter - enable selected PMU counter
+ * @vcpu: The vcpu pointer
+ * @val: the value guest writes to PMCNTENSET register
+ *
+ * Call perf_event_enable to start counting the perf event
+ */
+void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val)
+{
+	int i;
+	struct kvm_pmu *pmu = &vcpu->arch.pmu;
+	struct kvm_pmc *pmc;
+
+	if (!(vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMCR_E) || !val)
+		return;
+
+	for (i = 0; i < ARMV8_MAX_COUNTERS; i++) {
+		if (!(val & BIT(i)))
+			continue;
+
+		pmc = &pmu->pmc[i];
+		if (pmc->perf_event) {
+			perf_event_enable(pmc->perf_event);
+			if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
+				kvm_debug("fail to enable perf event\n");
+		}
+	}
+}
+
+/**
+ * kvm_pmu_disable_counter - disable selected PMU counter
+ * @vcpu: The vcpu pointer
+ * @val: the value guest writes to PMCNTENCLR register
+ *
+ * Call perf_event_disable to stop counting the perf event
+ */
+void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val)
+{
+	int i;
+	struct kvm_pmu *pmu = &vcpu->arch.pmu;
+	struct kvm_pmc *pmc;
+
+	if (!val)
+		return;
+
+	for (i = 0; i < ARMV8_MAX_COUNTERS; i++) {
+		if (!(val & BIT(i)))
+			continue;
+
+		pmc = &pmu->pmc[i];
+		if (pmc->perf_event)
+			perf_event_disable(pmc->perf_event);
+	}
+}
+
 static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
 {
 	return (vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMCR_E) &&