diff mbox series

perf/smmuv3: Enable HiSilicon Erratum 162001900 quirk for HIP08/09

Message ID 20230809100654.32036-1-yangyicong@huawei.com
State New
Headers show
Series perf/smmuv3: Enable HiSilicon Erratum 162001900 quirk for HIP08/09 | expand

Commit Message

Yicong Yang Aug. 9, 2023, 10:06 a.m. UTC
From: Yicong Yang <yangyicong@hisilicon.com>

Some HiSilicon SMMU PMCG suffers the erratum 162001900 that the PMU
disable control sometimes fail to disable the counters. This will lead
to error or inaccurate data since before we enable the counters the
counter's still counting for the event used in last perf session.

This patch tries to fix this by hardening the global disable process.
Before disable the PMU, writing an invalid event type (0xffff) to
focibly stop the counters. Correspondingly restore each events on
pmu::pmu_enable().

Signed-off-by: Yicong Yang <yangyicong@hisilicon.com>
---
 Documentation/arch/arm64/silicon-errata.rst |  3 ++
 drivers/acpi/arm64/iort.c                   |  5 ++-
 drivers/perf/arm_smmuv3_pmu.c               | 49 ++++++++++++++++++++-
 include/linux/acpi_iort.h                   |  1 +
 4 files changed, 56 insertions(+), 2 deletions(-)

Comments

Will Deacon Aug. 11, 2023, 11:17 a.m. UTC | #1
On Wed, Aug 09, 2023 at 06:06:54PM +0800, Yicong Yang wrote:
> diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
> index 25a269d431e4..b854b67b81fc 100644
> --- a/drivers/perf/arm_smmuv3_pmu.c
> +++ b/drivers/perf/arm_smmuv3_pmu.c
> @@ -115,6 +115,7 @@
>  #define SMMU_PMCG_PA_SHIFT              12
>  
>  #define SMMU_PMCG_EVCNTR_RDONLY         BIT(0)
> +#define SMMU_PMCG_HARDEN_DISABLE        BIT(1)
>  
>  static int cpuhp_state_num;
>  
> @@ -150,6 +151,22 @@ SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_stream_id, config1, 0, 31);
>  SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_span, config1, 32, 32);
>  SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_enable, config1, 33, 33);
>  
> +static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
> +				       struct perf_event *event, int idx);
> +
> +static inline void smmu_pmu_enable_quirk_hip08_09(struct pmu *pmu)
> +{
> +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
> +	unsigned int idx;
> +
> +	for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters)
> +		smmu_pmu_apply_event_filter(smmu_pmu, smmu_pmu->events[idx], idx);
> +
> +	writel(SMMU_PMCG_IRQ_CTRL_IRQEN,
> +	       smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
> +	writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR);

Can you tail-call smmu_pmu_enable() instead of duplicating it here?

> +static inline void smmu_pmu_disable_quirk_hip08_09(struct pmu *pmu)
> +{
> +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
> +	unsigned int idx;
> +
> +	/*
> +	 * The global disable of PMU sometimes fail to stop the counting.
> +	 * Harden this by writing an invalid event type to each used counter
> +	 * to forcibly stop counting.
> +	 */
> +	for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters)
> +		writel(0xffff, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
> +
> +	writel(0, smmu_pmu->reg_base + SMMU_PMCG_CR);
> +	writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);

Same things here, but with smmu_pmu_disable()

Will
Yicong Yang Aug. 14, 2023, 8:53 a.m. UTC | #2
On 2023/8/11 19:17, Will Deacon wrote:
> On Wed, Aug 09, 2023 at 06:06:54PM +0800, Yicong Yang wrote:
>> diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
>> index 25a269d431e4..b854b67b81fc 100644
>> --- a/drivers/perf/arm_smmuv3_pmu.c
>> +++ b/drivers/perf/arm_smmuv3_pmu.c
>> @@ -115,6 +115,7 @@
>>  #define SMMU_PMCG_PA_SHIFT              12
>>  
>>  #define SMMU_PMCG_EVCNTR_RDONLY         BIT(0)
>> +#define SMMU_PMCG_HARDEN_DISABLE        BIT(1)
>>  
>>  static int cpuhp_state_num;
>>  
>> @@ -150,6 +151,22 @@ SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_stream_id, config1, 0, 31);
>>  SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_span, config1, 32, 32);
>>  SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_enable, config1, 33, 33);
>>  
>> +static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
>> +				       struct perf_event *event, int idx);
>> +
>> +static inline void smmu_pmu_enable_quirk_hip08_09(struct pmu *pmu)
>> +{
>> +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
>> +	unsigned int idx;
>> +
>> +	for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters)
>> +		smmu_pmu_apply_event_filter(smmu_pmu, smmu_pmu->events[idx], idx);
>> +
>> +	writel(SMMU_PMCG_IRQ_CTRL_IRQEN,
>> +	       smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
>> +	writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR);
> 
> Can you tail-call smmu_pmu_enable() instead of duplicating it here?
> 
>> +static inline void smmu_pmu_disable_quirk_hip08_09(struct pmu *pmu)
>> +{
>> +	struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
>> +	unsigned int idx;
>> +
>> +	/*
>> +	 * The global disable of PMU sometimes fail to stop the counting.
>> +	 * Harden this by writing an invalid event type to each used counter
>> +	 * to forcibly stop counting.
>> +	 */
>> +	for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters)
>> +		writel(0xffff, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
>> +
>> +	writel(0, smmu_pmu->reg_base + SMMU_PMCG_CR);
>> +	writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
> 
> Same things here, but with smmu_pmu_disable()
> 

Sure. Will tail call smmu_pmu_{enable, disable} in both case to avoid duplication.
Thanks for the comments.

Thanks.
diff mbox series

Patch

diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst
index 496cdca5cb99..d54626cfcbda 100644
--- a/Documentation/arch/arm64/silicon-errata.rst
+++ b/Documentation/arch/arm64/silicon-errata.rst
@@ -195,6 +195,9 @@  stable kernels.
 +----------------+-----------------+-----------------+-----------------------------+
 | Hisilicon      | Hip08 SMMU PMCG | #162001800      | N/A                         |
 +----------------+-----------------+-----------------+-----------------------------+
+| Hisilicon      | Hip08 SMMU PMCG | #162001900      | N/A                         |
+|                | Hip09 SMMU PMCG |                 |                             |
++----------------+-----------------+-----------------+-----------------------------+
 +----------------+-----------------+-----------------+-----------------------------+
 | Qualcomm Tech. | Kryo/Falkor v1  | E1003           | QCOM_FALKOR_ERRATUM_1003    |
 +----------------+-----------------+-----------------+-----------------------------+
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 56d887323ae5..6496ff5a6ba2 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -1708,7 +1708,10 @@  static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res,
 static struct acpi_platform_list pmcg_plat_info[] __initdata = {
 	/* HiSilicon Hip08 Platform */
 	{"HISI  ", "HIP08   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
-	 "Erratum #162001800", IORT_SMMU_V3_PMCG_HISI_HIP08},
+	 "Erratum #162001800, Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP08},
+	/* HiSilicon Hip09 Platform */
+	{"HISI  ", "HIP09   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
+	 "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
 	{ }
 };
 
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
index 25a269d431e4..b854b67b81fc 100644
--- a/drivers/perf/arm_smmuv3_pmu.c
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -115,6 +115,7 @@ 
 #define SMMU_PMCG_PA_SHIFT              12
 
 #define SMMU_PMCG_EVCNTR_RDONLY         BIT(0)
+#define SMMU_PMCG_HARDEN_DISABLE        BIT(1)
 
 static int cpuhp_state_num;
 
@@ -150,6 +151,22 @@  SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_stream_id, config1, 0, 31);
 SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_span, config1, 32, 32);
 SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_enable, config1, 33, 33);
 
+static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
+				       struct perf_event *event, int idx);
+
+static inline void smmu_pmu_enable_quirk_hip08_09(struct pmu *pmu)
+{
+	struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
+	unsigned int idx;
+
+	for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters)
+		smmu_pmu_apply_event_filter(smmu_pmu, smmu_pmu->events[idx], idx);
+
+	writel(SMMU_PMCG_IRQ_CTRL_IRQEN,
+	       smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
+	writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR);
+}
+
 static inline void smmu_pmu_enable(struct pmu *pmu)
 {
 	struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
@@ -159,6 +176,23 @@  static inline void smmu_pmu_enable(struct pmu *pmu)
 	writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR);
 }
 
+static inline void smmu_pmu_disable_quirk_hip08_09(struct pmu *pmu)
+{
+	struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
+	unsigned int idx;
+
+	/*
+	 * The global disable of PMU sometimes fail to stop the counting.
+	 * Harden this by writing an invalid event type to each used counter
+	 * to forcibly stop counting.
+	 */
+	for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters)
+		writel(0xffff, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
+
+	writel(0, smmu_pmu->reg_base + SMMU_PMCG_CR);
+	writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
+}
+
 static inline void smmu_pmu_disable(struct pmu *pmu)
 {
 	struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
@@ -765,7 +799,10 @@  static void smmu_pmu_get_acpi_options(struct smmu_pmu *smmu_pmu)
 	switch (model) {
 	case IORT_SMMU_V3_PMCG_HISI_HIP08:
 		/* HiSilicon Erratum 162001800 */
-		smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY;
+		smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY | SMMU_PMCG_HARDEN_DISABLE;
+		break;
+	case IORT_SMMU_V3_PMCG_HISI_HIP09:
+		smmu_pmu->options |= SMMU_PMCG_HARDEN_DISABLE;
 		break;
 	}
 
@@ -890,6 +927,16 @@  static int smmu_pmu_probe(struct platform_device *pdev)
 	if (!dev->of_node)
 		smmu_pmu_get_acpi_options(smmu_pmu);
 
+	/*
+	 * For platforms suffer this quirk, the PMU disable sometimes fails to
+	 * stop the counters. This will leads to inaccurate or error counting.
+	 * Forcibly disable the counters with these quirk handler.
+	 */
+	if (smmu_pmu->options & SMMU_PMCG_HARDEN_DISABLE) {
+		smmu_pmu->pmu.pmu_enable = smmu_pmu_enable_quirk_hip08_09;
+		smmu_pmu->pmu.pmu_disable = smmu_pmu_disable_quirk_hip08_09;
+	}
+
 	/* Pick one CPU to be the preferred one to use */
 	smmu_pmu->on_cpu = raw_smp_processor_id();
 	WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(smmu_pmu->on_cpu)));
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
index ee7cb6aaff71..1cb65592c95d 100644
--- a/include/linux/acpi_iort.h
+++ b/include/linux/acpi_iort.h
@@ -21,6 +21,7 @@ 
  */
 #define IORT_SMMU_V3_PMCG_GENERIC        0x00000000 /* Generic SMMUv3 PMCG */
 #define IORT_SMMU_V3_PMCG_HISI_HIP08     0x00000001 /* HiSilicon HIP08 PMCG */
+#define IORT_SMMU_V3_PMCG_HISI_HIP09     0x00000002 /* HiSilicon HIP09 PMCG */
 
 int iort_register_domain_token(int trans_id, phys_addr_t base,
 			       struct fwnode_handle *fw_node);