@@ -1332,6 +1332,7 @@ FIELD(CPTR_EL3, TTA, 20, 1)
FIELD(CPTR_EL3, TAM, 30, 1)
FIELD(CPTR_EL3, TCPAC, 31, 1)
+#define MDCR_HLP (1U << 26) /* MDCR_EL2 */
#define MDCR_SCCD (1U << 23) /* MDCR_EL3 */
#define MDCR_HCCD (1U << 23) /* MDCR_EL2 */
#define MDCR_EPMAD (1U << 21)
@@ -1256,6 +1256,7 @@ enum MVEECIState {
/* Definitions for the PMU registers */
#define PMCRN_MASK 0xf800
#define PMCRN_SHIFT 11
+#define PMCRLP 0x80
#define PMCRLC 0x40
#define PMCRDP 0x20
#define PMCRX 0x10
@@ -1267,7 +1268,7 @@ enum MVEECIState {
* Mask of PMCR bits writable by guest (not including WO bits like C, P,
* which can be written as 1 to trigger behaviour but which stay RAZ).
*/
-#define PMCR_WRITABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
+#define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
#define PMXEVTYPER_P 0x80000000
#define PMXEVTYPER_U 0x40000000
@@ -1084,7 +1084,8 @@ static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
* We use these to decide whether we need to wrap a write to MDCR_EL2
* or MDCR_EL3 in pmu_op_start()/pmu_op_finish() calls.
*/
-#define MDCR_EL2_PMU_ENABLE_BITS (MDCR_HPME | MDCR_HPMD | MDCR_HPMN | MDCR_HCCD)
+#define MDCR_EL2_PMU_ENABLE_BITS \
+ (MDCR_HPME | MDCR_HPMD | MDCR_HPMN | MDCR_HCCD | MDCR_HLP)
#define MDCR_EL3_PMU_ENABLE_BITS (MDCR_SPME | MDCR_SCCD)
/* Returns true if the counter (pass 31 for PMCCNTR) should count events using
@@ -1193,6 +1194,32 @@ static bool pmccntr_clockdiv_enabled(CPUARMState *env)
return (env->cp15.c9_pmcr & (PMCRD | PMCRLC)) == PMCRD;
}
+static bool pmevcntr_is_64_bit(CPUARMState *env, int counter)
+{
+ /* Return true if the specified event counter is configured to be 64 bit */
+
+ /* This isn't intended to be used with the cycle counter */
+ assert(counter < 31);
+
+ if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
+ return false;
+ }
+
+ if (arm_feature(env, ARM_FEATURE_EL2)) {
+ /*
+ * MDCR_EL2.HLP still applies even when EL2 is disabled in the
+ * current security state, so we don't use arm_mdcr_el2_eff() here.
+ */
+ bool hlp = env->cp15.mdcr_el2 & MDCR_HLP;
+ int hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
+
+ if (hpmn != 0 && counter >= hpmn) {
+ return hlp;
+ }
+ }
+ return env->cp15.c9_pmcr & PMCRLP;
+}
+
/*
* Ensure c15_ccnt is the guest-visible count so that operations such as
* enabling/disabling the counter or filtering, modifying the count itself,
@@ -1269,9 +1296,11 @@ static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
}
if (pmu_counter_enabled(env, counter)) {
- uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
+ uint64_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
+ uint64_t overflow_mask = pmevcntr_is_64_bit(env, counter) ?
+ 1ULL << 63 : 1ULL << 31;
- if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) {
+ if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & overflow_mask) {
env->cp15.c9_pmovsr |= (1 << counter);
pmu_update_irq(env);
}
@@ -1286,9 +1315,13 @@ static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
#ifndef CONFIG_USER_ONLY
uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
uint16_t event_idx = supported_event_map[event];
- uint64_t delta = UINT32_MAX -
- (uint32_t)env->cp15.c14_pmevcntr[counter] + 1;
- int64_t overflow_in = pm_events[event_idx].ns_per_count(delta);
+ uint64_t delta = -(env->cp15.c14_pmevcntr[counter] + 1);
+ int64_t overflow_in;
+
+ if (!pmevcntr_is_64_bit(env, counter)) {
+ delta = (uint32_t)delta;
+ }
+ overflow_in = pm_events[event_idx].ns_per_count(delta);
if (overflow_in > 0) {
int64_t overflow_at;
@@ -1375,6 +1408,8 @@ static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
unsigned int i;
+ uint64_t overflow_mask, new_pmswinc;
+
for (i = 0; i < pmu_num_counters(env); i++) {
/* Increment a counter's count iff: */
if ((value & (1 << i)) && /* counter's bit is set */
@@ -1388,9 +1423,12 @@ static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
* Detect if this write causes an overflow since we can't predict
* PMSWINC overflows like we can for other events
*/
- uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
+ new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
- if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
+ overflow_mask = pmevcntr_is_64_bit(env, i) ?
+ 1ULL << 63 : 1ULL << 31;
+
+ if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & overflow_mask) {
env->cp15.c9_pmovsr |= (1 << i);
pmu_update_irq(env);
}
@@ -1597,6 +1635,10 @@ static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value, uint8_t counter)
{
+ if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
+ /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
+ value &= MAKE_64BIT_MASK(0, 32);
+ }
if (counter < pmu_num_counters(env)) {
pmevcntr_op_start(env, counter);
env->cp15.c14_pmevcntr[counter] = value;
@@ -1616,6 +1658,10 @@ static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
pmevcntr_op_start(env, counter);
ret = env->cp15.c14_pmevcntr[counter];
pmevcntr_op_finish(env, counter);
+ if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
+ /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
+ ret &= MAKE_64BIT_MASK(0, 32);
+ }
return ret;
} else {
/* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR