diff mbox series

[RFC] arm64: Enable perf events based hard lockup detector

Message ID 1589373048-25932-1-git-send-email-sumit.garg@linaro.org
State New
Headers show
Series [RFC] arm64: Enable perf events based hard lockup detector | expand

Commit Message

Sumit Garg May 13, 2020, 12:30 p.m. UTC
With the recent feature added to enable perf events to use pseudo NMIs
as interrupts on platforms which support GICv3 or later, its now been
possible to enable hard lockup detector (or NMI watchdog) on arm64
platforms. So enable corresponding support.

One thing to note here is that normally lockup detector is initialized
just after the early initcalls but PMU on arm64 comes up much later as
device_initcall(). So we need to re-initialize lockup detection once
PMU has been initialized.

Signed-off-by: Sumit Garg <sumit.garg@linaro.org>

---

This patch is dependent on perf NMI patch-set [1].

[1] https://patchwork.kernel.org/cover/11047407/

 arch/arm64/Kconfig             |  2 ++
 arch/arm64/kernel/perf_event.c | 32 ++++++++++++++++++++++++++++++--
 drivers/perf/arm_pmu.c         | 11 +++++++++++
 include/linux/perf/arm_pmu.h   |  2 ++
 4 files changed, 45 insertions(+), 2 deletions(-)

-- 
2.7.4

Comments

Sumit Garg May 15, 2020, 7:35 a.m. UTC | #1
Hi Daniel,

On Wed, 13 May 2020 at 18:01, Sumit Garg <sumit.garg@linaro.org> wrote:
>

> With the recent feature added to enable perf events to use pseudo NMIs

> as interrupts on platforms which support GICv3 or later, its now been

> possible to enable hard lockup detector (or NMI watchdog) on arm64

> platforms. So enable corresponding support.

>

> One thing to note here is that normally lockup detector is initialized

> just after the early initcalls but PMU on arm64 comes up much later as

> device_initcall(). So we need to re-initialize lockup detection once

> PMU has been initialized.

>

> Signed-off-by: Sumit Garg <sumit.garg@linaro.org>

> ---

>


Do you have any major feedback on the approach used here or is it fine
to post this patch as an RFC in upstream?

-Sumit

> This patch is dependent on perf NMI patch-set [1].

>

> [1] https://patchwork.kernel.org/cover/11047407/

>

>  arch/arm64/Kconfig             |  2 ++

>  arch/arm64/kernel/perf_event.c | 32 ++++++++++++++++++++++++++++++--

>  drivers/perf/arm_pmu.c         | 11 +++++++++++

>  include/linux/perf/arm_pmu.h   |  2 ++

>  4 files changed, 45 insertions(+), 2 deletions(-)

>

> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig

> index 40fb05d..36f75c2 100644

> --- a/arch/arm64/Kconfig

> +++ b/arch/arm64/Kconfig

> @@ -160,6 +160,8 @@ config ARM64

>         select HAVE_NMI

>         select HAVE_PATA_PLATFORM

>         select HAVE_PERF_EVENTS

> +       select HAVE_PERF_EVENTS_NMI if ARM64_PSEUDO_NMI

> +       select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI

>         select HAVE_PERF_REGS

>         select HAVE_PERF_USER_STACK_DUMP

>         select HAVE_REGS_AND_STACK_ACCESS_API

> diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c

> index 3ad5c8f..df57360 100644

> --- a/arch/arm64/kernel/perf_event.c

> +++ b/arch/arm64/kernel/perf_event.c

> @@ -20,6 +20,8 @@

>  #include <linux/perf/arm_pmu.h>

>  #include <linux/platform_device.h>

>  #include <linux/smp.h>

> +#include <linux/nmi.h>

> +#include <linux/cpufreq.h>

>

>  /* ARMv8 Cortex-A53 specific event types. */

>  #define ARMV8_A53_PERFCTR_PREF_LINEFILL                                0xC2

> @@ -1190,10 +1192,21 @@ static struct platform_driver armv8_pmu_driver = {

>

>  static int __init armv8_pmu_driver_init(void)

>  {

> +       int ret;

> +

>         if (acpi_disabled)

> -               return platform_driver_register(&armv8_pmu_driver);

> +               ret = platform_driver_register(&armv8_pmu_driver);

>         else

> -               return arm_pmu_acpi_probe(armv8_pmuv3_init);

> +               ret = arm_pmu_acpi_probe(armv8_pmuv3_init);

> +

> +       /*

> +        * Try to re-initialize lockup detector after PMU init in

> +        * case PMU events are triggered via NMIs.

> +        */

> +       if (arm_pmu_irq_is_nmi())

> +               lockup_detector_init();

> +

> +       return ret;

>  }

>  device_initcall(armv8_pmu_driver_init)

>

> @@ -1225,3 +1238,18 @@ void arch_perf_update_userpage(struct perf_event *event,

>         userpg->time_shift = (u16)shift;

>         userpg->time_offset = -now;

>  }

> +

> +#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF

> +#define SAFE_MAX_CPU_FREQ      4000000000UL // 4 GHz

> +u64 hw_nmi_get_sample_period(int watchdog_thresh)

> +{

> +       unsigned int cpu = smp_processor_id();

> +       unsigned int max_cpu_freq;

> +

> +       max_cpu_freq = cpufreq_get_hw_max_freq(cpu);

> +       if (max_cpu_freq)

> +               return (u64)max_cpu_freq * 1000 * watchdog_thresh;

> +       else

> +               return (u64)SAFE_MAX_CPU_FREQ * watchdog_thresh;

> +}

> +#endif

> diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c

> index f96cfc4..691dfc9 100644

> --- a/drivers/perf/arm_pmu.c

> +++ b/drivers/perf/arm_pmu.c

> @@ -718,6 +718,17 @@ static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)

>         return per_cpu(hw_events->irq, cpu);

>  }

>

> +bool arm_pmu_irq_is_nmi(void)

> +{

> +       const struct pmu_irq_ops *irq_ops;

> +

> +       irq_ops = per_cpu(cpu_irq_ops, smp_processor_id());

> +       if (irq_ops == &pmunmi_ops || irq_ops == &percpu_pmunmi_ops)

> +               return true;

> +       else

> +               return false;

> +}

> +

>  /*

>   * PMU hardware loses all context when a CPU goes offline.

>   * When a CPU is hotplugged back in, since some hardware registers are

> diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h

> index d9b8b76..a71f029 100644

> --- a/include/linux/perf/arm_pmu.h

> +++ b/include/linux/perf/arm_pmu.h

> @@ -155,6 +155,8 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn);

>  static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }

>  #endif

>

> +bool arm_pmu_irq_is_nmi(void);

> +

>  /* Internal functions only for core arm_pmu code */

>  struct arm_pmu *armpmu_alloc(void);

>  struct arm_pmu *armpmu_alloc_atomic(void);

> --

> 2.7.4

>
Daniel Thompson May 15, 2020, 8:26 a.m. UTC | #2
On Fri, May 15, 2020 at 01:05:46PM +0530, Sumit Garg wrote:
> Hi Daniel,

> 

> On Wed, 13 May 2020 at 18:01, Sumit Garg <sumit.garg@linaro.org> wrote:

> >

> > With the recent feature added to enable perf events to use pseudo NMIs

> > as interrupts on platforms which support GICv3 or later, its now been

> > possible to enable hard lockup detector (or NMI watchdog) on arm64

> > platforms. So enable corresponding support.

> >

> > One thing to note here is that normally lockup detector is initialized

> > just after the early initcalls but PMU on arm64 comes up much later as

> > device_initcall(). So we need to re-initialize lockup detection once

> > PMU has been initialized.

> >

> > Signed-off-by: Sumit Garg <sumit.garg@linaro.org>

> > ---

> >

> 

> Do you have any major feedback on the approach used here or is it fine

> to post this patch as an RFC in upstream?


To be honest I hadn't realized this wasn't already Cc:ed to the list...


Daniel.


> 

> -Sumit

> 

> > This patch is dependent on perf NMI patch-set [1].

> >

> > [1] https://patchwork.kernel.org/cover/11047407/

> >

> >  arch/arm64/Kconfig             |  2 ++

> >  arch/arm64/kernel/perf_event.c | 32 ++++++++++++++++++++++++++++++--

> >  drivers/perf/arm_pmu.c         | 11 +++++++++++

> >  include/linux/perf/arm_pmu.h   |  2 ++

> >  4 files changed, 45 insertions(+), 2 deletions(-)

> >

> > diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig

> > index 40fb05d..36f75c2 100644

> > --- a/arch/arm64/Kconfig

> > +++ b/arch/arm64/Kconfig

> > @@ -160,6 +160,8 @@ config ARM64

> >         select HAVE_NMI

> >         select HAVE_PATA_PLATFORM

> >         select HAVE_PERF_EVENTS

> > +       select HAVE_PERF_EVENTS_NMI if ARM64_PSEUDO_NMI

> > +       select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI

> >         select HAVE_PERF_REGS

> >         select HAVE_PERF_USER_STACK_DUMP

> >         select HAVE_REGS_AND_STACK_ACCESS_API

> > diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c

> > index 3ad5c8f..df57360 100644

> > --- a/arch/arm64/kernel/perf_event.c

> > +++ b/arch/arm64/kernel/perf_event.c

> > @@ -20,6 +20,8 @@

> >  #include <linux/perf/arm_pmu.h>

> >  #include <linux/platform_device.h>

> >  #include <linux/smp.h>

> > +#include <linux/nmi.h>

> > +#include <linux/cpufreq.h>

> >

> >  /* ARMv8 Cortex-A53 specific event types. */

> >  #define ARMV8_A53_PERFCTR_PREF_LINEFILL                                0xC2

> > @@ -1190,10 +1192,21 @@ static struct platform_driver armv8_pmu_driver = {

> >

> >  static int __init armv8_pmu_driver_init(void)

> >  {

> > +       int ret;

> > +

> >         if (acpi_disabled)

> > -               return platform_driver_register(&armv8_pmu_driver);

> > +               ret = platform_driver_register(&armv8_pmu_driver);

> >         else

> > -               return arm_pmu_acpi_probe(armv8_pmuv3_init);

> > +               ret = arm_pmu_acpi_probe(armv8_pmuv3_init);

> > +

> > +       /*

> > +        * Try to re-initialize lockup detector after PMU init in

> > +        * case PMU events are triggered via NMIs.

> > +        */

> > +       if (arm_pmu_irq_is_nmi())

> > +               lockup_detector_init();

> > +

> > +       return ret;

> >  }

> >  device_initcall(armv8_pmu_driver_init)

> >

> > @@ -1225,3 +1238,18 @@ void arch_perf_update_userpage(struct perf_event *event,

> >         userpg->time_shift = (u16)shift;

> >         userpg->time_offset = -now;

> >  }

> > +

> > +#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF

> > +#define SAFE_MAX_CPU_FREQ      4000000000UL // 4 GHz

> > +u64 hw_nmi_get_sample_period(int watchdog_thresh)

> > +{

> > +       unsigned int cpu = smp_processor_id();

> > +       unsigned int max_cpu_freq;

> > +

> > +       max_cpu_freq = cpufreq_get_hw_max_freq(cpu);

> > +       if (max_cpu_freq)

> > +               return (u64)max_cpu_freq * 1000 * watchdog_thresh;

> > +       else

> > +               return (u64)SAFE_MAX_CPU_FREQ * watchdog_thresh;

> > +}

> > +#endif

> > diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c

> > index f96cfc4..691dfc9 100644

> > --- a/drivers/perf/arm_pmu.c

> > +++ b/drivers/perf/arm_pmu.c

> > @@ -718,6 +718,17 @@ static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)

> >         return per_cpu(hw_events->irq, cpu);

> >  }

> >

> > +bool arm_pmu_irq_is_nmi(void)

> > +{

> > +       const struct pmu_irq_ops *irq_ops;

> > +

> > +       irq_ops = per_cpu(cpu_irq_ops, smp_processor_id());

> > +       if (irq_ops == &pmunmi_ops || irq_ops == &percpu_pmunmi_ops)

> > +               return true;

> > +       else

> > +               return false;

> > +}

> > +

> >  /*

> >   * PMU hardware loses all context when a CPU goes offline.

> >   * When a CPU is hotplugged back in, since some hardware registers are

> > diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h

> > index d9b8b76..a71f029 100644

> > --- a/include/linux/perf/arm_pmu.h

> > +++ b/include/linux/perf/arm_pmu.h

> > @@ -155,6 +155,8 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn);

> >  static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }

> >  #endif

> >

> > +bool arm_pmu_irq_is_nmi(void);

> > +

> >  /* Internal functions only for core arm_pmu code */

> >  struct arm_pmu *armpmu_alloc(void);

> >  struct arm_pmu *armpmu_alloc_atomic(void);

> > --

> > 2.7.4

> >
Sumit Garg May 15, 2020, 8:56 a.m. UTC | #3
On Fri, 15 May 2020 at 13:56, Daniel Thompson
<daniel.thompson@linaro.org> wrote:
>

> On Fri, May 15, 2020 at 01:05:46PM +0530, Sumit Garg wrote:

> > Hi Daniel,

> >

> > On Wed, 13 May 2020 at 18:01, Sumit Garg <sumit.garg@linaro.org> wrote:

> > >

> > > With the recent feature added to enable perf events to use pseudo NMIs

> > > as interrupts on platforms which support GICv3 or later, its now been

> > > possible to enable hard lockup detector (or NMI watchdog) on arm64

> > > platforms. So enable corresponding support.

> > >

> > > One thing to note here is that normally lockup detector is initialized

> > > just after the early initcalls but PMU on arm64 comes up much later as

> > > device_initcall(). So we need to re-initialize lockup detection once

> > > PMU has been initialized.

> > >

> > > Signed-off-by: Sumit Garg <sumit.garg@linaro.org>

> > > ---

> > >

> >

> > Do you have any major feedback on the approach used here or is it fine

> > to post this patch as an RFC in upstream?

>

> To be honest I hadn't realized this wasn't already Cc:ed to the list...

>


Thanks, posted this patch in upstream now.

-Sumit

>

> Daniel.

>

>

> >

> > -Sumit

> >

> > > This patch is dependent on perf NMI patch-set [1].

> > >

> > > [1] https://patchwork.kernel.org/cover/11047407/

> > >

> > >  arch/arm64/Kconfig             |  2 ++

> > >  arch/arm64/kernel/perf_event.c | 32 ++++++++++++++++++++++++++++++--

> > >  drivers/perf/arm_pmu.c         | 11 +++++++++++

> > >  include/linux/perf/arm_pmu.h   |  2 ++

> > >  4 files changed, 45 insertions(+), 2 deletions(-)

> > >

> > > diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig

> > > index 40fb05d..36f75c2 100644

> > > --- a/arch/arm64/Kconfig

> > > +++ b/arch/arm64/Kconfig

> > > @@ -160,6 +160,8 @@ config ARM64

> > >         select HAVE_NMI

> > >         select HAVE_PATA_PLATFORM

> > >         select HAVE_PERF_EVENTS

> > > +       select HAVE_PERF_EVENTS_NMI if ARM64_PSEUDO_NMI

> > > +       select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI

> > >         select HAVE_PERF_REGS

> > >         select HAVE_PERF_USER_STACK_DUMP

> > >         select HAVE_REGS_AND_STACK_ACCESS_API

> > > diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c

> > > index 3ad5c8f..df57360 100644

> > > --- a/arch/arm64/kernel/perf_event.c

> > > +++ b/arch/arm64/kernel/perf_event.c

> > > @@ -20,6 +20,8 @@

> > >  #include <linux/perf/arm_pmu.h>

> > >  #include <linux/platform_device.h>

> > >  #include <linux/smp.h>

> > > +#include <linux/nmi.h>

> > > +#include <linux/cpufreq.h>

> > >

> > >  /* ARMv8 Cortex-A53 specific event types. */

> > >  #define ARMV8_A53_PERFCTR_PREF_LINEFILL                                0xC2

> > > @@ -1190,10 +1192,21 @@ static struct platform_driver armv8_pmu_driver = {

> > >

> > >  static int __init armv8_pmu_driver_init(void)

> > >  {

> > > +       int ret;

> > > +

> > >         if (acpi_disabled)

> > > -               return platform_driver_register(&armv8_pmu_driver);

> > > +               ret = platform_driver_register(&armv8_pmu_driver);

> > >         else

> > > -               return arm_pmu_acpi_probe(armv8_pmuv3_init);

> > > +               ret = arm_pmu_acpi_probe(armv8_pmuv3_init);

> > > +

> > > +       /*

> > > +        * Try to re-initialize lockup detector after PMU init in

> > > +        * case PMU events are triggered via NMIs.

> > > +        */

> > > +       if (arm_pmu_irq_is_nmi())

> > > +               lockup_detector_init();

> > > +

> > > +       return ret;

> > >  }

> > >  device_initcall(armv8_pmu_driver_init)

> > >

> > > @@ -1225,3 +1238,18 @@ void arch_perf_update_userpage(struct perf_event *event,

> > >         userpg->time_shift = (u16)shift;

> > >         userpg->time_offset = -now;

> > >  }

> > > +

> > > +#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF

> > > +#define SAFE_MAX_CPU_FREQ      4000000000UL // 4 GHz

> > > +u64 hw_nmi_get_sample_period(int watchdog_thresh)

> > > +{

> > > +       unsigned int cpu = smp_processor_id();

> > > +       unsigned int max_cpu_freq;

> > > +

> > > +       max_cpu_freq = cpufreq_get_hw_max_freq(cpu);

> > > +       if (max_cpu_freq)

> > > +               return (u64)max_cpu_freq * 1000 * watchdog_thresh;

> > > +       else

> > > +               return (u64)SAFE_MAX_CPU_FREQ * watchdog_thresh;

> > > +}

> > > +#endif

> > > diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c

> > > index f96cfc4..691dfc9 100644

> > > --- a/drivers/perf/arm_pmu.c

> > > +++ b/drivers/perf/arm_pmu.c

> > > @@ -718,6 +718,17 @@ static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)

> > >         return per_cpu(hw_events->irq, cpu);

> > >  }

> > >

> > > +bool arm_pmu_irq_is_nmi(void)

> > > +{

> > > +       const struct pmu_irq_ops *irq_ops;

> > > +

> > > +       irq_ops = per_cpu(cpu_irq_ops, smp_processor_id());

> > > +       if (irq_ops == &pmunmi_ops || irq_ops == &percpu_pmunmi_ops)

> > > +               return true;

> > > +       else

> > > +               return false;

> > > +}

> > > +

> > >  /*

> > >   * PMU hardware loses all context when a CPU goes offline.

> > >   * When a CPU is hotplugged back in, since some hardware registers are

> > > diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h

> > > index d9b8b76..a71f029 100644

> > > --- a/include/linux/perf/arm_pmu.h

> > > +++ b/include/linux/perf/arm_pmu.h

> > > @@ -155,6 +155,8 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn);

> > >  static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }

> > >  #endif

> > >

> > > +bool arm_pmu_irq_is_nmi(void);

> > > +

> > >  /* Internal functions only for core arm_pmu code */

> > >  struct arm_pmu *armpmu_alloc(void);

> > >  struct arm_pmu *armpmu_alloc_atomic(void);

> > > --

> > > 2.7.4

> > >
diff mbox series

Patch

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 40fb05d..36f75c2 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -160,6 +160,8 @@  config ARM64
 	select HAVE_NMI
 	select HAVE_PATA_PLATFORM
 	select HAVE_PERF_EVENTS
+	select HAVE_PERF_EVENTS_NMI if ARM64_PSEUDO_NMI
+	select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI
 	select HAVE_PERF_REGS
 	select HAVE_PERF_USER_STACK_DUMP
 	select HAVE_REGS_AND_STACK_ACCESS_API
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 3ad5c8f..df57360 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -20,6 +20,8 @@ 
 #include <linux/perf/arm_pmu.h>
 #include <linux/platform_device.h>
 #include <linux/smp.h>
+#include <linux/nmi.h>
+#include <linux/cpufreq.h>
 
 /* ARMv8 Cortex-A53 specific event types. */
 #define ARMV8_A53_PERFCTR_PREF_LINEFILL				0xC2
@@ -1190,10 +1192,21 @@  static struct platform_driver armv8_pmu_driver = {
 
 static int __init armv8_pmu_driver_init(void)
 {
+	int ret;
+
 	if (acpi_disabled)
-		return platform_driver_register(&armv8_pmu_driver);
+		ret = platform_driver_register(&armv8_pmu_driver);
 	else
-		return arm_pmu_acpi_probe(armv8_pmuv3_init);
+		ret = arm_pmu_acpi_probe(armv8_pmuv3_init);
+
+	/*
+	 * Try to re-initialize lockup detector after PMU init in
+	 * case PMU events are triggered via NMIs.
+	 */
+	if (arm_pmu_irq_is_nmi())
+		lockup_detector_init();
+
+	return ret;
 }
 device_initcall(armv8_pmu_driver_init)
 
@@ -1225,3 +1238,18 @@  void arch_perf_update_userpage(struct perf_event *event,
 	userpg->time_shift = (u16)shift;
 	userpg->time_offset = -now;
 }
+
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
+#define SAFE_MAX_CPU_FREQ	4000000000UL // 4 GHz
+u64 hw_nmi_get_sample_period(int watchdog_thresh)
+{
+	unsigned int cpu = smp_processor_id();
+	unsigned int max_cpu_freq;
+
+	max_cpu_freq = cpufreq_get_hw_max_freq(cpu);
+	if (max_cpu_freq)
+		return (u64)max_cpu_freq * 1000 * watchdog_thresh;
+	else
+		return (u64)SAFE_MAX_CPU_FREQ * watchdog_thresh;
+}
+#endif
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index f96cfc4..691dfc9 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -718,6 +718,17 @@  static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
 	return per_cpu(hw_events->irq, cpu);
 }
 
+bool arm_pmu_irq_is_nmi(void)
+{
+	const struct pmu_irq_ops *irq_ops;
+
+	irq_ops = per_cpu(cpu_irq_ops, smp_processor_id());
+	if (irq_ops == &pmunmi_ops || irq_ops == &percpu_pmunmi_ops)
+		return true;
+	else
+		return false;
+}
+
 /*
  * PMU hardware loses all context when a CPU goes offline.
  * When a CPU is hotplugged back in, since some hardware registers are
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index d9b8b76..a71f029 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -155,6 +155,8 @@  int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
 static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
 #endif
 
+bool arm_pmu_irq_is_nmi(void);
+
 /* Internal functions only for core arm_pmu code */
 struct arm_pmu *armpmu_alloc(void);
 struct arm_pmu *armpmu_alloc_atomic(void);