diff mbox

[v10,08/21] KVM: ARM64: Add access handler for event type register

Message ID 1453866709-20324-9-git-send-email-zhaoshenglong@huawei.com
State Superseded
Headers show

Commit Message

Shannon Zhao Jan. 27, 2016, 3:51 a.m. UTC
From: Shannon Zhao <shannon.zhao@linaro.org>


These kind of registers include PMEVTYPERn, PMCCFILTR and PMXEVTYPER
which is mapped to PMEVTYPERn or PMCCFILTR.

The access handler translates all aarch32 register offsets to aarch64
ones and uses vcpu_sys_reg() to access their values to avoid taking care
of big endian.

When writing to these registers, create a perf_event for the selected
event type.

Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>

---
 arch/arm64/kvm/sys_regs.c | 140 +++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 138 insertions(+), 2 deletions(-)

-- 
2.0.4



_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

Comments

Andrew Jones Jan. 28, 2016, 8:11 p.m. UTC | #1
On Wed, Jan 27, 2016 at 11:51:36AM +0800, Shannon Zhao wrote:
> From: Shannon Zhao <shannon.zhao@linaro.org>

> 

> These kind of registers include PMEVTYPERn, PMCCFILTR and PMXEVTYPER

> which is mapped to PMEVTYPERn or PMCCFILTR.

> 

> The access handler translates all aarch32 register offsets to aarch64

> ones and uses vcpu_sys_reg() to access their values to avoid taking care

> of big endian.

> 

> When writing to these registers, create a perf_event for the selected

> event type.

> 

> Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>

> ---

>  arch/arm64/kvm/sys_regs.c | 140 +++++++++++++++++++++++++++++++++++++++++++++-

>  1 file changed, 138 insertions(+), 2 deletions(-)

> 

> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c

> index 06257e2..298ae94 100644

> --- a/arch/arm64/kvm/sys_regs.c

> +++ b/arch/arm64/kvm/sys_regs.c

> @@ -513,6 +513,54 @@ static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,

>  	return true;

>  }

>  

> +static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)

> +{

> +	u64 pmcr, val;

> +

> +	pmcr = vcpu_sys_reg(vcpu, PMCR_EL0);

> +	val = (pmcr >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK;

> +	if (idx >= val && idx != ARMV8_CYCLE_IDX)

> +		return false;

> +

> +	return true;

> +}

> +

> +static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,

> +			       const struct sys_reg_desc *r)

> +{

> +	u64 idx, reg;

> +

> +	if (!kvm_arm_pmu_v3_ready(vcpu))

> +		return trap_raz_wi(vcpu, p, r);

> +

> +	if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {

> +		/* PMXEVTYPER_EL0 */

> +		idx = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_COUNTER_MASK;

> +		reg = PMEVTYPER0_EL0 + idx;

> +	} else if (r->CRn == 14 && (r->CRm & 12) == 12) {

> +		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);

> +		if (idx == ARMV8_CYCLE_IDX)

> +			reg = PMCCFILTR_EL0;

> +		else

> +			/* PMEVTYPERn_EL0 */

> +			reg = PMEVTYPER0_EL0 + idx;

> +	} else {

> +		BUG();

> +	}

> +

> +	if (!pmu_counter_idx_valid(vcpu, idx))

> +		return false;

> +

> +	if (p->is_write) {

> +		kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);

> +		vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_EVTYPE_MASK;

> +	} else {

> +		p->regval = vcpu_sys_reg(vcpu, reg) & ARMV8_EVTYPE_MASK;


Related to my comment in 5/21. Why should we need to mask it here when
reading it, since it was masked on writing?

> +	}

> +

> +	return true;

> +}

> +

>  /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */

>  #define DBG_BCR_BVR_WCR_WVR_EL1(n)					\

>  	/* DBGBVRn_EL1 */						\

> @@ -528,6 +576,13 @@ static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,

>  	{ Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111),	\

>  	  trap_wcr, reset_wcr, n, 0,  get_wcr, set_wcr }

>  

> +/* Macro to expand the PMEVTYPERn_EL0 register */

> +#define PMU_PMEVTYPER_EL0(n)						\

> +	/* PMEVTYPERn_EL0 */						\

> +	{ Op0(0b11), Op1(0b011), CRn(0b1110),				\

> +	  CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)),		\

> +	  access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }

> +

>  /*

>   * Architected system registers.

>   * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2

> @@ -724,7 +779,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {

>  	  trap_raz_wi },

>  	/* PMXEVTYPER_EL0 */

>  	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),

> -	  trap_raz_wi },

> +	  access_pmu_evtyper },

>  	/* PMXEVCNTR_EL0 */

>  	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),

>  	  trap_raz_wi },

> @@ -742,6 +797,45 @@ static const struct sys_reg_desc sys_reg_descs[] = {

>  	{ Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),

>  	  NULL, reset_unknown, TPIDRRO_EL0 },

>  

> +	/* PMEVTYPERn_EL0 */

> +	PMU_PMEVTYPER_EL0(0),

> +	PMU_PMEVTYPER_EL0(1),

> +	PMU_PMEVTYPER_EL0(2),

> +	PMU_PMEVTYPER_EL0(3),

> +	PMU_PMEVTYPER_EL0(4),

> +	PMU_PMEVTYPER_EL0(5),

> +	PMU_PMEVTYPER_EL0(6),

> +	PMU_PMEVTYPER_EL0(7),

> +	PMU_PMEVTYPER_EL0(8),

> +	PMU_PMEVTYPER_EL0(9),

> +	PMU_PMEVTYPER_EL0(10),

> +	PMU_PMEVTYPER_EL0(11),

> +	PMU_PMEVTYPER_EL0(12),

> +	PMU_PMEVTYPER_EL0(13),

> +	PMU_PMEVTYPER_EL0(14),

> +	PMU_PMEVTYPER_EL0(15),

> +	PMU_PMEVTYPER_EL0(16),

> +	PMU_PMEVTYPER_EL0(17),

> +	PMU_PMEVTYPER_EL0(18),

> +	PMU_PMEVTYPER_EL0(19),

> +	PMU_PMEVTYPER_EL0(20),

> +	PMU_PMEVTYPER_EL0(21),

> +	PMU_PMEVTYPER_EL0(22),

> +	PMU_PMEVTYPER_EL0(23),

> +	PMU_PMEVTYPER_EL0(24),

> +	PMU_PMEVTYPER_EL0(25),

> +	PMU_PMEVTYPER_EL0(26),

> +	PMU_PMEVTYPER_EL0(27),

> +	PMU_PMEVTYPER_EL0(28),

> +	PMU_PMEVTYPER_EL0(29),

> +	PMU_PMEVTYPER_EL0(30),

> +	/* PMCCFILTR_EL0

> +	 * This register resets as unknown in 64bit mode while it resets as zero

> +	 * in 32bit mode. Here we choose to reset it as zero for consistency.

> +	 */

> +	{ Op0(0b11), Op1(0b011), CRn(0b1110), CRm(0b1111), Op2(0b111),

> +	  access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },

> +

>  	/* DACR32_EL2 */

>  	{ Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),

>  	  NULL, reset_unknown, DACR32_EL2 },

> @@ -931,6 +1025,13 @@ static const struct sys_reg_desc cp14_64_regs[] = {

>  	{ Op1( 0), CRm( 2), .access = trap_raz_wi },

>  };

>  

> +/* Macro to expand the PMEVTYPERn register */

> +#define PMU_PMEVTYPER(n)						\

> +	/* PMEVTYPERn */						\

> +	{ Op1(0), CRn(0b1110),						\

> +	  CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)),		\

> +	  access_pmu_evtyper }

> +

>  /*

>   * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,

>   * depending on the way they are accessed (as a 32bit or a 64bit

> @@ -967,7 +1068,7 @@ static const struct sys_reg_desc cp15_regs[] = {

>  	{ Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },

>  	{ Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },

>  	{ Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi },

> -	{ Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi },

> +	{ Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },

>  	{ Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi },

>  	{ Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi },

>  	{ Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi },

> @@ -982,6 +1083,41 @@ static const struct sys_reg_desc cp15_regs[] = {

>  	{ Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi },

>  

>  	{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },

> +

> +	/* PMEVTYPERn */

> +	PMU_PMEVTYPER(0),

> +	PMU_PMEVTYPER(1),

> +	PMU_PMEVTYPER(2),

> +	PMU_PMEVTYPER(3),

> +	PMU_PMEVTYPER(4),

> +	PMU_PMEVTYPER(5),

> +	PMU_PMEVTYPER(6),

> +	PMU_PMEVTYPER(7),

> +	PMU_PMEVTYPER(8),

> +	PMU_PMEVTYPER(9),

> +	PMU_PMEVTYPER(10),

> +	PMU_PMEVTYPER(11),

> +	PMU_PMEVTYPER(12),

> +	PMU_PMEVTYPER(13),

> +	PMU_PMEVTYPER(14),

> +	PMU_PMEVTYPER(15),

> +	PMU_PMEVTYPER(16),

> +	PMU_PMEVTYPER(17),

> +	PMU_PMEVTYPER(18),

> +	PMU_PMEVTYPER(19),

> +	PMU_PMEVTYPER(20),

> +	PMU_PMEVTYPER(21),

> +	PMU_PMEVTYPER(22),

> +	PMU_PMEVTYPER(23),

> +	PMU_PMEVTYPER(24),

> +	PMU_PMEVTYPER(25),

> +	PMU_PMEVTYPER(26),

> +	PMU_PMEVTYPER(27),

> +	PMU_PMEVTYPER(28),

> +	PMU_PMEVTYPER(29),

> +	PMU_PMEVTYPER(30),

> +	/* PMCCFILTR */

> +	{ Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },

>  };

>  

>  static const struct sys_reg_desc cp15_64_regs[] = {

> -- 

> 2.0.4

> 

> 

> --

> To unsubscribe from this list: send the line "unsubscribe kvm" in

> the body of a message to majordomo@vger.kernel.org

> More majordomo info at  http://vger.kernel.org/majordomo-info.html


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
Andrew Jones Jan. 29, 2016, 11:25 a.m. UTC | #2
On Fri, Jan 29, 2016 at 09:42:00AM +0800, Shannon Zhao wrote:
> 

> 

> On 2016/1/29 4:11, Andrew Jones wrote:

> > On Wed, Jan 27, 2016 at 11:51:36AM +0800, Shannon Zhao wrote:

> >> > From: Shannon Zhao <shannon.zhao@linaro.org>

> >> > 

> >> > These kind of registers include PMEVTYPERn, PMCCFILTR and PMXEVTYPER

> >> > which is mapped to PMEVTYPERn or PMCCFILTR.

> >> > 

> >> > The access handler translates all aarch32 register offsets to aarch64

> >> > ones and uses vcpu_sys_reg() to access their values to avoid taking care

> >> > of big endian.

> >> > 

> >> > When writing to these registers, create a perf_event for the selected

> >> > event type.

> >> > 

> >> > Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>

> >> > ---

> >> >  arch/arm64/kvm/sys_regs.c | 140 +++++++++++++++++++++++++++++++++++++++++++++-

> >> >  1 file changed, 138 insertions(+), 2 deletions(-)

> >> > 

> >> > diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c

> >> > index 06257e2..298ae94 100644

> >> > --- a/arch/arm64/kvm/sys_regs.c

> >> > +++ b/arch/arm64/kvm/sys_regs.c

> >> > @@ -513,6 +513,54 @@ static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,

> >> >  	return true;

> >> >  }

> >> >  

> >> > +static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)

> >> > +{

> >> > +	u64 pmcr, val;

> >> > +

> >> > +	pmcr = vcpu_sys_reg(vcpu, PMCR_EL0);

> >> > +	val = (pmcr >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK;

> >> > +	if (idx >= val && idx != ARMV8_CYCLE_IDX)

> >> > +		return false;

> >> > +

> >> > +	return true;

> >> > +}

> >> > +

> >> > +static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,

> >> > +			       const struct sys_reg_desc *r)

> >> > +{

> >> > +	u64 idx, reg;

> >> > +

> >> > +	if (!kvm_arm_pmu_v3_ready(vcpu))

> >> > +		return trap_raz_wi(vcpu, p, r);

> >> > +

> >> > +	if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {

> >> > +		/* PMXEVTYPER_EL0 */

> >> > +		idx = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_COUNTER_MASK;

> >> > +		reg = PMEVTYPER0_EL0 + idx;

> >> > +	} else if (r->CRn == 14 && (r->CRm & 12) == 12) {

> >> > +		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);

> >> > +		if (idx == ARMV8_CYCLE_IDX)

> >> > +			reg = PMCCFILTR_EL0;

> >> > +		else

> >> > +			/* PMEVTYPERn_EL0 */

> >> > +			reg = PMEVTYPER0_EL0 + idx;

> >> > +	} else {

> >> > +		BUG();

> >> > +	}

> >> > +

> >> > +	if (!pmu_counter_idx_valid(vcpu, idx))

> >> > +		return false;

> >> > +

> >> > +	if (p->is_write) {

> >> > +		kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);

> >> > +		vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_EVTYPE_MASK;

> >> > +	} else {

> >> > +		p->regval = vcpu_sys_reg(vcpu, reg) & ARMV8_EVTYPE_MASK;

> > Related to my comment in 5/21. Why should we need to mask it here when

> > reading it, since it was masked on writing?

> > 

> But what if guest reads this register before writing to it?


Oh, I see. The need comes from the use of the reset_unknown reset function.
It might be nice to have a reset_unknown_mask function that uses r->val
as the mask, as there are many registers that have RES0/1 and/or RO fields.

Thanks,
drew

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
diff mbox

Patch

diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 06257e2..298ae94 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -513,6 +513,54 @@  static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 	return true;
 }
 
+static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
+{
+	u64 pmcr, val;
+
+	pmcr = vcpu_sys_reg(vcpu, PMCR_EL0);
+	val = (pmcr >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK;
+	if (idx >= val && idx != ARMV8_CYCLE_IDX)
+		return false;
+
+	return true;
+}
+
+static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+			       const struct sys_reg_desc *r)
+{
+	u64 idx, reg;
+
+	if (!kvm_arm_pmu_v3_ready(vcpu))
+		return trap_raz_wi(vcpu, p, r);
+
+	if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
+		/* PMXEVTYPER_EL0 */
+		idx = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_COUNTER_MASK;
+		reg = PMEVTYPER0_EL0 + idx;
+	} else if (r->CRn == 14 && (r->CRm & 12) == 12) {
+		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
+		if (idx == ARMV8_CYCLE_IDX)
+			reg = PMCCFILTR_EL0;
+		else
+			/* PMEVTYPERn_EL0 */
+			reg = PMEVTYPER0_EL0 + idx;
+	} else {
+		BUG();
+	}
+
+	if (!pmu_counter_idx_valid(vcpu, idx))
+		return false;
+
+	if (p->is_write) {
+		kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
+		vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_EVTYPE_MASK;
+	} else {
+		p->regval = vcpu_sys_reg(vcpu, reg) & ARMV8_EVTYPE_MASK;
+	}
+
+	return true;
+}
+
 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
 #define DBG_BCR_BVR_WCR_WVR_EL1(n)					\
 	/* DBGBVRn_EL1 */						\
@@ -528,6 +576,13 @@  static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 	{ Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111),	\
 	  trap_wcr, reset_wcr, n, 0,  get_wcr, set_wcr }
 
+/* Macro to expand the PMEVTYPERn_EL0 register */
+#define PMU_PMEVTYPER_EL0(n)						\
+	/* PMEVTYPERn_EL0 */						\
+	{ Op0(0b11), Op1(0b011), CRn(0b1110),				\
+	  CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)),		\
+	  access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
+
 /*
  * Architected system registers.
  * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
@@ -724,7 +779,7 @@  static const struct sys_reg_desc sys_reg_descs[] = {
 	  trap_raz_wi },
 	/* PMXEVTYPER_EL0 */
 	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
-	  trap_raz_wi },
+	  access_pmu_evtyper },
 	/* PMXEVCNTR_EL0 */
 	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
 	  trap_raz_wi },
@@ -742,6 +797,45 @@  static const struct sys_reg_desc sys_reg_descs[] = {
 	{ Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
 	  NULL, reset_unknown, TPIDRRO_EL0 },
 
+	/* PMEVTYPERn_EL0 */
+	PMU_PMEVTYPER_EL0(0),
+	PMU_PMEVTYPER_EL0(1),
+	PMU_PMEVTYPER_EL0(2),
+	PMU_PMEVTYPER_EL0(3),
+	PMU_PMEVTYPER_EL0(4),
+	PMU_PMEVTYPER_EL0(5),
+	PMU_PMEVTYPER_EL0(6),
+	PMU_PMEVTYPER_EL0(7),
+	PMU_PMEVTYPER_EL0(8),
+	PMU_PMEVTYPER_EL0(9),
+	PMU_PMEVTYPER_EL0(10),
+	PMU_PMEVTYPER_EL0(11),
+	PMU_PMEVTYPER_EL0(12),
+	PMU_PMEVTYPER_EL0(13),
+	PMU_PMEVTYPER_EL0(14),
+	PMU_PMEVTYPER_EL0(15),
+	PMU_PMEVTYPER_EL0(16),
+	PMU_PMEVTYPER_EL0(17),
+	PMU_PMEVTYPER_EL0(18),
+	PMU_PMEVTYPER_EL0(19),
+	PMU_PMEVTYPER_EL0(20),
+	PMU_PMEVTYPER_EL0(21),
+	PMU_PMEVTYPER_EL0(22),
+	PMU_PMEVTYPER_EL0(23),
+	PMU_PMEVTYPER_EL0(24),
+	PMU_PMEVTYPER_EL0(25),
+	PMU_PMEVTYPER_EL0(26),
+	PMU_PMEVTYPER_EL0(27),
+	PMU_PMEVTYPER_EL0(28),
+	PMU_PMEVTYPER_EL0(29),
+	PMU_PMEVTYPER_EL0(30),
+	/* PMCCFILTR_EL0
+	 * This register resets as unknown in 64bit mode while it resets as zero
+	 * in 32bit mode. Here we choose to reset it as zero for consistency.
+	 */
+	{ Op0(0b11), Op1(0b011), CRn(0b1110), CRm(0b1111), Op2(0b111),
+	  access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
+
 	/* DACR32_EL2 */
 	{ Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),
 	  NULL, reset_unknown, DACR32_EL2 },
@@ -931,6 +1025,13 @@  static const struct sys_reg_desc cp14_64_regs[] = {
 	{ Op1( 0), CRm( 2), .access = trap_raz_wi },
 };
 
+/* Macro to expand the PMEVTYPERn register */
+#define PMU_PMEVTYPER(n)						\
+	/* PMEVTYPERn */						\
+	{ Op1(0), CRn(0b1110),						\
+	  CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)),		\
+	  access_pmu_evtyper }
+
 /*
  * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
  * depending on the way they are accessed (as a 32bit or a 64bit
@@ -967,7 +1068,7 @@  static const struct sys_reg_desc cp15_regs[] = {
 	{ Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
 	{ Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
 	{ Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi },
-	{ Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi },
+	{ Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
 	{ Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi },
 	{ Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi },
 	{ Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi },
@@ -982,6 +1083,41 @@  static const struct sys_reg_desc cp15_regs[] = {
 	{ Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi },
 
 	{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
+
+	/* PMEVTYPERn */
+	PMU_PMEVTYPER(0),
+	PMU_PMEVTYPER(1),
+	PMU_PMEVTYPER(2),
+	PMU_PMEVTYPER(3),
+	PMU_PMEVTYPER(4),
+	PMU_PMEVTYPER(5),
+	PMU_PMEVTYPER(6),
+	PMU_PMEVTYPER(7),
+	PMU_PMEVTYPER(8),
+	PMU_PMEVTYPER(9),
+	PMU_PMEVTYPER(10),
+	PMU_PMEVTYPER(11),
+	PMU_PMEVTYPER(12),
+	PMU_PMEVTYPER(13),
+	PMU_PMEVTYPER(14),
+	PMU_PMEVTYPER(15),
+	PMU_PMEVTYPER(16),
+	PMU_PMEVTYPER(17),
+	PMU_PMEVTYPER(18),
+	PMU_PMEVTYPER(19),
+	PMU_PMEVTYPER(20),
+	PMU_PMEVTYPER(21),
+	PMU_PMEVTYPER(22),
+	PMU_PMEVTYPER(23),
+	PMU_PMEVTYPER(24),
+	PMU_PMEVTYPER(25),
+	PMU_PMEVTYPER(26),
+	PMU_PMEVTYPER(27),
+	PMU_PMEVTYPER(28),
+	PMU_PMEVTYPER(29),
+	PMU_PMEVTYPER(30),
+	/* PMCCFILTR */
+	{ Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
 };
 
 static const struct sys_reg_desc cp15_64_regs[] = {