diff mbox

[3.17-rc4,v7,4/6] irqchip: gic: Add support for IPI FIQ

Message ID 1410970218-28847-5-git-send-email-daniel.thompson@linaro.org
State New
Headers show

Commit Message

Daniel Thompson Sept. 17, 2014, 4:10 p.m. UTC
This patch provides support for arm's newly added IPI FIQ. It works
by placing all interrupt sources *except* IPI FIQ in group 1 and
then flips a configuration bit in the GIC such that group 1
interrupts use IRQ and group 0 interrupts use FIQ.

All GIC hardware except GICv1-without-TrustZone support provides a means
to group exceptions into group 0 and group 1. However the hardware
functionality is unavailable to the kernel when a secure monitor is
present because access to the grouping registers are prohibited outside
"secure world" (a feature that allows grouping to be used to allow
hardware peripherals to send interrupts into the secure world). However
when grouping is not available we can rely on the GIC's RAZ/WI semantics
and avoid conditional code.

gic_raise_softirq() also needs changing to make it safe to call from
FIQ. This is due to its use by wake_up_klogd() during console_unlock().

Tested on Freescale i.MX6 (quad A9, GICv1-with-TrustZone running in
secure mode).

Signed-off-by: Daniel Thompson <daniel.thompson@linaro.org>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Jason Cooper <jason@lakedaemon.net>
---
 arch/arm/kernel/traps.c         |   5 +-
 drivers/irqchip/irq-gic.c       | 162 ++++++++++++++++++++++++++++++++++++++--
 include/linux/irqchip/arm-gic.h |   3 +
 3 files changed, 162 insertions(+), 8 deletions(-)

Comments

Russell King - ARM Linux Sept. 17, 2014, 6:51 p.m. UTC | #1
On Wed, Sep 17, 2014 at 09:10:16AM -0700, Daniel Thompson wrote:
> This patch provides support for arm's newly added IPI FIQ. It works
> by placing all interrupt sources *except* IPI FIQ in group 1 and
> then flips a configuration bit in the GIC such that group 1
> interrupts use IRQ and group 0 interrupts use FIQ.
> 
> All GIC hardware except GICv1-without-TrustZone support provides a means
> to group exceptions into group 0 and group 1. However the hardware
> functionality is unavailable to the kernel when a secure monitor is
> present because access to the grouping registers are prohibited outside
> "secure world" (a feature that allows grouping to be used to allow
> hardware peripherals to send interrupts into the secure world). However
> when grouping is not available we can rely on the GIC's RAZ/WI semantics
> and avoid conditional code.

I've been chasing a bug with this on the Versatile Express CT9x4.  It
seems that the GIC there is a GICv1, with secure extensions.  It seems
to support interrupt grouping.

However, setting SPIs to group 1, with the control registers enabling
both group 0 and group 1 (such that both groups are treated as IRQs)
results in no SPIs being delivered to the kernel.  In other words,
setting GIC_DIST_IGROUP for SPIs, and setting both GIC_CPU_CTRL and
GIC_DIST_CTRL to 3.

This is rather worrying, because we seem to have a GIC which for all
intents and purposes appears to be compatible with what we want to do,
appears to conform with the GIC architecture specifications, but doesn't
actually work.

I suspect that running the Versatile Express CT9x4 in non-secure mode
wouldn't work (because in non-secure mode, the GIC only allows access
to group 1 interrupts.)

I've added Will and Mark to this to see whether they have any comment.

> gic_raise_softirq() also needs changing to make it safe to call from
> FIQ. This is due to its use by wake_up_klogd() during console_unlock().
> 
> Tested on Freescale i.MX6 (quad A9, GICv1-with-TrustZone running in
> secure mode).
> 
> Signed-off-by: Daniel Thompson <daniel.thompson@linaro.org>
> Cc: Russell King <linux@arm.linux.org.uk>
> Cc: Thomas Gleixner <tglx@linutronix.de>
> Cc: Jason Cooper <jason@lakedaemon.net>
> ---
>  arch/arm/kernel/traps.c         |   5 +-
>  drivers/irqchip/irq-gic.c       | 162 ++++++++++++++++++++++++++++++++++++++--
>  include/linux/irqchip/arm-gic.h |   3 +
>  3 files changed, 162 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
> index 439138d..92c4ea1 100644
> --- a/arch/arm/kernel/traps.c
> +++ b/arch/arm/kernel/traps.c
> @@ -26,6 +26,7 @@
>  #include <linux/init.h>
>  #include <linux/sched.h>
>  #include <linux/irq.h>
> +#include <linux/irqchip/arm-gic.h>
>  
>  #include <linux/atomic.h>
>  #include <asm/cacheflush.h>
> @@ -479,7 +480,9 @@ asmlinkage void __exception_irq_entry handle_fiq_as_nmi(struct pt_regs *regs)
>  
>  	nmi_enter();
>  
> -	/* nop. FIQ handlers for special arch/arm features can be added here. */
> +#ifdef CONFIG_ARM_GIC
> +	gic_handle_fiq_ipi();
> +#endif
>  
>  	nmi_exit();
>  
> diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
> index 4b959e6..05b5d62 100644
> --- a/drivers/irqchip/irq-gic.c
> +++ b/drivers/irqchip/irq-gic.c
> @@ -39,8 +39,10 @@
>  #include <linux/slab.h>
>  #include <linux/irqchip/chained_irq.h>
>  #include <linux/irqchip/arm-gic.h>
> +#include <linux/ratelimit.h>
>  
>  #include <asm/cputype.h>
> +#include <asm/fiq.h>
>  #include <asm/irq.h>
>  #include <asm/exception.h>
>  #include <asm/smp_plat.h>
> @@ -48,6 +50,10 @@
>  #include "irq-gic-common.h"
>  #include "irqchip.h"
>  
> +#ifndef SMP_IPI_FIQ_MASK
> +#define SMP_IPI_FIQ_MASK 0
> +#endif
> +
>  union gic_base {
>  	void __iomem *common_base;
>  	void __percpu * __iomem *percpu_base;
> @@ -71,6 +77,8 @@ struct gic_chip_data {
>  };
>  
>  static DEFINE_RAW_SPINLOCK(irq_controller_lock);
> +/* A fiq-safe spinlock must only be locked when the FIQ is masked */
> +static DEFINE_RAW_SPINLOCK(fiq_safe_migration_lock);
>  
>  /*
>   * The GIC mapping of CPU interfaces does not necessarily match
> @@ -325,6 +333,94 @@ static struct irq_chip gic_chip = {
>  	.irq_set_wake		= gic_set_wake,
>  };
>  
> +/*
> + * Shift an interrupt between Group 0 and Group 1.
> + *
> + * In addition to changing the group we also modify the priority to
> + * match what "ARM strongly recommends" for a system where no Group 1
> + * interrupt must ever preempt a Group 0 interrupt.
> + *
> + * If is safe to call this function on systems which do not support
> + * grouping (it will have no effect).
> + */
> +static void gic_set_group_irq(void __iomem *base, unsigned int hwirq,
> +				int group)
> +{
> +	unsigned int grp_reg = hwirq / 32 * 4;
> +	u32 grp_mask = BIT(hwirq % 32);
> +	u32 grp_val;
> +
> +	unsigned int pri_reg = (hwirq / 4) * 4;
> +	u32 pri_mask = BIT(7 + ((hwirq % 4) * 8));
> +	u32 pri_val;
> +
> +	/*
> +	 * Systems which do not support grouping will have no bits
> +	 * set in IGROUP[0] (and all systems which do will have set bits).
> +	 */
> +	grp_val = readl_relaxed(base + GIC_DIST_IGROUP + 0);
> +	if (!grp_val)
> +		return;
> +
> +	raw_spin_lock(&irq_controller_lock);
> +
> +	grp_val = readl_relaxed(base + GIC_DIST_IGROUP + grp_reg);
> +	pri_val = readl_relaxed(base + GIC_DIST_PRI + pri_reg);
> +
> +	if (group) {
> +		grp_val |= grp_mask;
> +		pri_val |= pri_mask;
> +	} else {
> +		grp_val &= ~grp_mask;
> +		pri_val &= ~pri_mask;
> +	}
> +
> +	writel_relaxed(grp_val, base + GIC_DIST_IGROUP + grp_reg);
> +	writel_relaxed(pri_val, base + GIC_DIST_PRI + pri_reg);
> +
> +	raw_spin_unlock(&irq_controller_lock);
> +}
> +
> +/*
> + * Test which group an interrupt belongs to.
> + *
> + * Returns 0 if the controller does not support grouping.
> + */
> +static int gic_get_group_irq(void __iomem *base, unsigned int hwirq)
> +{
> +	unsigned int grp_reg = hwirq / 32 * 4;
> +	u32 grp_val;
> +
> +	grp_val = readl_relaxed(base + GIC_DIST_IGROUP + grp_reg);
> +
> +	return (grp_val >> (hwirq % 32)) & 1;
> +}
> +
> +/*
> + * Fully acknowledge (both ack and eoi) any outstanding FIQ-based IPI,
> + * otherwise do nothing.
> + */
> +void gic_handle_fiq_ipi(void)
> +{
> +	struct gic_chip_data *gic = &gic_data[0];
> +	void __iomem *cpu_base = gic_data_cpu_base(gic);
> +	unsigned long irqstat, irqnr;
> +
> +	if (WARN_ON(!in_nmi()))
> +		return;
> +
> +	while ((1u << readl_relaxed(cpu_base + GIC_CPU_HIGHPRI)) &
> +	       SMP_IPI_FIQ_MASK) {
> +		irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
> +		writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
> +
> +		irqnr = irqstat & GICC_IAR_INT_ID_MASK;
> +		WARN_RATELIMIT(irqnr > 16,
> +			       "Unexpected irqnr %lu (bad prioritization?)\n",
> +			       irqnr);
> +	}
> +}
> +
>  void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
>  {
>  	if (gic_nr >= MAX_GIC_NR)
> @@ -373,7 +469,18 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
>  
>  	gic_dist_config(base, gic_irqs, NULL);
>  
> -	writel_relaxed(1, base + GIC_DIST_CTRL);
> +	/*
> +	 * Set all global interrupts to be group 1 (this register is
> +	 * RAZ/WI if not accessible in current mode)
> +	 */
> +	for (i = 32; i < gic_irqs; i += 32)
> +		writel_relaxed(0xffffffff, base + GIC_DIST_IGROUP + i * 4 / 32);
> +
> +	/*
> +	 * Set EnableGrp1/EnableGrp0 (bit 1 and 0) or EnableGrp (bit 0 only,
> +	 * bit 1 ignored)
> +	 */
> +	writel_relaxed(3, base + GIC_DIST_CTRL);
>  }
>  
>  static void gic_cpu_init(struct gic_chip_data *gic)
> @@ -382,6 +489,7 @@ static void gic_cpu_init(struct gic_chip_data *gic)
>  	void __iomem *base = gic_data_cpu_base(gic);
>  	unsigned int cpu_mask, cpu = smp_processor_id();
>  	int i;
> +	unsigned long secure_irqs, secure_irq;
>  
>  	/*
>  	 * Get what the GIC says our CPU mask is.
> @@ -400,8 +508,27 @@ static void gic_cpu_init(struct gic_chip_data *gic)
>  
>  	gic_cpu_config(dist_base, NULL);
>  
> +	/*
> +	 * Set any PPI and SGI interrupts not set in SMP_IPI_FIQ_MASK
> +	 * to be group 1 (this register is RAZ/WI if not accessible)
> +	 */
> +	writel_relaxed(~SMP_IPI_FIQ_MASK, dist_base + GIC_DIST_IGROUP + 0);
> +
> +	/*
> +	 * Update the priority of any resulting group0 interrupts.
> +	 */
> +	secure_irqs = ~readl_relaxed(dist_base + GIC_DIST_IGROUP + 0);
> +	for_each_set_bit(secure_irq, &secure_irqs, 16)
> +		gic_set_group_irq(dist_base, i, 0);
> +
>  	writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
> -	writel_relaxed(1, base + GIC_CPU_CTRL);
> +
> +	/* The bottom most bit will be set for all GIC variants (and is
> +	 * called Enable or EnableGrp0 depending on operating mode). The
> +	 * remaining four bits (CBPR, FIQEn, AckCtl and EnableGrp1) are
> +	 * RAZ/WI if not accessible.
> +	 */
> +	writel_relaxed(0x1f, base + GIC_CPU_CTRL);
>  }
>  
>  void gic_cpu_if_down(void)
> @@ -485,7 +612,7 @@ static void gic_dist_restore(unsigned int gic_nr)
>  		writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
>  			dist_base + GIC_DIST_ENABLE_SET + i * 4);
>  
> -	writel_relaxed(1, dist_base + GIC_DIST_CTRL);
> +	writel_relaxed(3, dist_base + GIC_DIST_CTRL);
>  }
>  
>  static void gic_cpu_save(unsigned int gic_nr)
> @@ -542,7 +669,7 @@ static void gic_cpu_restore(unsigned int gic_nr)
>  		writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4);
>  
>  	writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK);
> -	writel_relaxed(1, cpu_base + GIC_CPU_CTRL);
> +	writel_relaxed(0x1f, cpu_base + GIC_CPU_CTRL);
>  }
>  
>  static int gic_notifier(struct notifier_block *self, unsigned long cmd,	void *v)
> @@ -604,8 +731,19 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
>  {
>  	int cpu;
>  	unsigned long flags, map = 0;
> +	unsigned long softint;
>  
> -	raw_spin_lock_irqsave(&irq_controller_lock, flags);
> +	/*
> +	 * The locking in this function ensures we don't use stale cpu mappings
> +	 * and thus we never route an IPI to the wrong physical core during a
> +	 * big.LITTLE switch. The switch code takes both of these locks meaning
> +	 * we can choose whichever lock is safe to use from our current calling
> +	 * context.
> +	 */
> +	if (in_nmi())
> +		raw_spin_lock(&fiq_safe_migration_lock);
> +	else
> +		raw_spin_lock_irqsave(&irq_controller_lock, flags);
>  
>  	/* Convert our logical CPU mask into a physical one. */
>  	for_each_cpu(cpu, mask)
> @@ -618,7 +756,15 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
>  	dmb(ishst);
>  
>  	/* this always happens on GIC0 */
> -	writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
> +	softint = map << 16 | irq;
> +	if (gic_get_group_irq(gic_data_dist_base(&gic_data[0]), irq))
> +		softint |= 0x8000;
> +	writel_relaxed(softint,
> +		       gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
> +
> +	if (in_nmi())
> +		raw_spin_unlock(&fiq_safe_migration_lock);
> +	else
>  
>  	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
>  }
> @@ -668,7 +814,7 @@ int gic_get_cpu_id(unsigned int cpu)
>   * Migrate all peripheral interrupts with a target matching the current CPU
>   * to the interface corresponding to @new_cpu_id.  The CPU interface mapping
>   * is also updated.  Targets to other CPU interfaces are unchanged.
> - * This must be called with IRQs locally disabled.
> + * This must be called with IRQ and FIQ locally disabled.
>   */
>  void gic_migrate_target(unsigned int new_cpu_id)
>  {
> @@ -690,6 +836,7 @@ void gic_migrate_target(unsigned int new_cpu_id)
>  	ror_val = (cur_cpu_id - new_cpu_id) & 31;
>  
>  	raw_spin_lock(&irq_controller_lock);
> +	raw_spin_lock(&fiq_safe_migration_lock);
>  
>  	/* Update the target interface for this logical CPU */
>  	gic_cpu_map[cpu] = 1 << new_cpu_id;
> @@ -709,6 +856,7 @@ void gic_migrate_target(unsigned int new_cpu_id)
>  		}
>  	}
>  
> +	raw_spin_unlock(&fiq_safe_migration_lock);
>  	raw_spin_unlock(&irq_controller_lock);
>  
>  	/*
> diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
> index 45e2d8c..52a5676 100644
> --- a/include/linux/irqchip/arm-gic.h
> +++ b/include/linux/irqchip/arm-gic.h
> @@ -101,5 +101,8 @@ static inline void __init register_routable_domain_ops
>  {
>  	gic_routable_irq_domain_ops = ops;
>  }
> +
> +void gic_handle_fiq_ipi(void);
> +
>  #endif /* __ASSEMBLY */
>  #endif
> -- 
> 1.9.3
>
Daniel Thompson Sept. 17, 2014, 8:12 p.m. UTC | #2
On 17/09/14 11:51, Russell King - ARM Linux wrote:
> On Wed, Sep 17, 2014 at 09:10:16AM -0700, Daniel Thompson wrote:
>> This patch provides support for arm's newly added IPI FIQ. It works
>> by placing all interrupt sources *except* IPI FIQ in group 1 and
>> then flips a configuration bit in the GIC such that group 1
>> interrupts use IRQ and group 0 interrupts use FIQ.
>>
>> All GIC hardware except GICv1-without-TrustZone support provides a means
>> to group exceptions into group 0 and group 1. However the hardware
>> functionality is unavailable to the kernel when a secure monitor is
>> present because access to the grouping registers are prohibited outside
>> "secure world" (a feature that allows grouping to be used to allow
>> hardware peripherals to send interrupts into the secure world). However
>> when grouping is not available we can rely on the GIC's RAZ/WI semantics
>> and avoid conditional code.
> 
> I've been chasing a bug with this on the Versatile Express CT9x4.  It
> seems that the GIC there is a GICv1, with secure extensions.  It seems
> to support interrupt grouping.
> 
> However, setting SPIs to group 1, with the control registers enabling
> both group 0 and group 1 (such that both groups are treated as IRQs)
> results in no SPIs being delivered to the kernel.  In other words,
> setting GIC_DIST_IGROUP for SPIs, and setting both GIC_CPU_CTRL and
> GIC_DIST_CTRL to 3.

I may have missed something but this sounds like the expected behaviour
to me.

Without AckCtl set (GIC_CPU_CTRL bit 2) then it is not possible to
acknowledge group 1 interrupts from secure mode. Thus in the
circumstances described above I would expect the system to wedge in the
interrupt handler because IRQ is raised but the software is not able to
acknowledge it.

I think there may also problems with leaving CBPR unset. Leaving CBPR
unset certainly causes a change of behaviour compared to an all group0
setup.


Daniel.


> This is rather worrying, because we seem to have a GIC which for all
> intents and purposes appears to be compatible with what we want to do,
> appears to conform with the GIC architecture specifications, but doesn't
> actually work.
> 
> I suspect that running the Versatile Express CT9x4 in non-secure mode
> wouldn't work (because in non-secure mode, the GIC only allows access
> to group 1 interrupts.)
> 
> I've added Will and Mark to this to see whether they have any comment.
> 
>> gic_raise_softirq() also needs changing to make it safe to call from
>> FIQ. This is due to its use by wake_up_klogd() during console_unlock().
>>
>> Tested on Freescale i.MX6 (quad A9, GICv1-with-TrustZone running in
>> secure mode).
>>
>> Signed-off-by: Daniel Thompson <daniel.thompson@linaro.org>
>> Cc: Russell King <linux@arm.linux.org.uk>
>> Cc: Thomas Gleixner <tglx@linutronix.de>
>> Cc: Jason Cooper <jason@lakedaemon.net>
>> ---
>>  arch/arm/kernel/traps.c         |   5 +-
>>  drivers/irqchip/irq-gic.c       | 162 ++++++++++++++++++++++++++++++++++++++--
>>  include/linux/irqchip/arm-gic.h |   3 +
>>  3 files changed, 162 insertions(+), 8 deletions(-)
>>
>> diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
>> index 439138d..92c4ea1 100644
>> --- a/arch/arm/kernel/traps.c
>> +++ b/arch/arm/kernel/traps.c
>> @@ -26,6 +26,7 @@
>>  #include <linux/init.h>
>>  #include <linux/sched.h>
>>  #include <linux/irq.h>
>> +#include <linux/irqchip/arm-gic.h>
>>  
>>  #include <linux/atomic.h>
>>  #include <asm/cacheflush.h>
>> @@ -479,7 +480,9 @@ asmlinkage void __exception_irq_entry handle_fiq_as_nmi(struct pt_regs *regs)
>>  
>>  	nmi_enter();
>>  
>> -	/* nop. FIQ handlers for special arch/arm features can be added here. */
>> +#ifdef CONFIG_ARM_GIC
>> +	gic_handle_fiq_ipi();
>> +#endif
>>  
>>  	nmi_exit();
>>  
>> diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
>> index 4b959e6..05b5d62 100644
>> --- a/drivers/irqchip/irq-gic.c
>> +++ b/drivers/irqchip/irq-gic.c
>> @@ -39,8 +39,10 @@
>>  #include <linux/slab.h>
>>  #include <linux/irqchip/chained_irq.h>
>>  #include <linux/irqchip/arm-gic.h>
>> +#include <linux/ratelimit.h>
>>  
>>  #include <asm/cputype.h>
>> +#include <asm/fiq.h>
>>  #include <asm/irq.h>
>>  #include <asm/exception.h>
>>  #include <asm/smp_plat.h>
>> @@ -48,6 +50,10 @@
>>  #include "irq-gic-common.h"
>>  #include "irqchip.h"
>>  
>> +#ifndef SMP_IPI_FIQ_MASK
>> +#define SMP_IPI_FIQ_MASK 0
>> +#endif
>> +
>>  union gic_base {
>>  	void __iomem *common_base;
>>  	void __percpu * __iomem *percpu_base;
>> @@ -71,6 +77,8 @@ struct gic_chip_data {
>>  };
>>  
>>  static DEFINE_RAW_SPINLOCK(irq_controller_lock);
>> +/* A fiq-safe spinlock must only be locked when the FIQ is masked */
>> +static DEFINE_RAW_SPINLOCK(fiq_safe_migration_lock);
>>  
>>  /*
>>   * The GIC mapping of CPU interfaces does not necessarily match
>> @@ -325,6 +333,94 @@ static struct irq_chip gic_chip = {
>>  	.irq_set_wake		= gic_set_wake,
>>  };
>>  
>> +/*
>> + * Shift an interrupt between Group 0 and Group 1.
>> + *
>> + * In addition to changing the group we also modify the priority to
>> + * match what "ARM strongly recommends" for a system where no Group 1
>> + * interrupt must ever preempt a Group 0 interrupt.
>> + *
>> + * If is safe to call this function on systems which do not support
>> + * grouping (it will have no effect).
>> + */
>> +static void gic_set_group_irq(void __iomem *base, unsigned int hwirq,
>> +				int group)
>> +{
>> +	unsigned int grp_reg = hwirq / 32 * 4;
>> +	u32 grp_mask = BIT(hwirq % 32);
>> +	u32 grp_val;
>> +
>> +	unsigned int pri_reg = (hwirq / 4) * 4;
>> +	u32 pri_mask = BIT(7 + ((hwirq % 4) * 8));
>> +	u32 pri_val;
>> +
>> +	/*
>> +	 * Systems which do not support grouping will have no bits
>> +	 * set in IGROUP[0] (and all systems which do will have set bits).
>> +	 */
>> +	grp_val = readl_relaxed(base + GIC_DIST_IGROUP + 0);
>> +	if (!grp_val)
>> +		return;
>> +
>> +	raw_spin_lock(&irq_controller_lock);
>> +
>> +	grp_val = readl_relaxed(base + GIC_DIST_IGROUP + grp_reg);
>> +	pri_val = readl_relaxed(base + GIC_DIST_PRI + pri_reg);
>> +
>> +	if (group) {
>> +		grp_val |= grp_mask;
>> +		pri_val |= pri_mask;
>> +	} else {
>> +		grp_val &= ~grp_mask;
>> +		pri_val &= ~pri_mask;
>> +	}
>> +
>> +	writel_relaxed(grp_val, base + GIC_DIST_IGROUP + grp_reg);
>> +	writel_relaxed(pri_val, base + GIC_DIST_PRI + pri_reg);
>> +
>> +	raw_spin_unlock(&irq_controller_lock);
>> +}
>> +
>> +/*
>> + * Test which group an interrupt belongs to.
>> + *
>> + * Returns 0 if the controller does not support grouping.
>> + */
>> +static int gic_get_group_irq(void __iomem *base, unsigned int hwirq)
>> +{
>> +	unsigned int grp_reg = hwirq / 32 * 4;
>> +	u32 grp_val;
>> +
>> +	grp_val = readl_relaxed(base + GIC_DIST_IGROUP + grp_reg);
>> +
>> +	return (grp_val >> (hwirq % 32)) & 1;
>> +}
>> +
>> +/*
>> + * Fully acknowledge (both ack and eoi) any outstanding FIQ-based IPI,
>> + * otherwise do nothing.
>> + */
>> +void gic_handle_fiq_ipi(void)
>> +{
>> +	struct gic_chip_data *gic = &gic_data[0];
>> +	void __iomem *cpu_base = gic_data_cpu_base(gic);
>> +	unsigned long irqstat, irqnr;
>> +
>> +	if (WARN_ON(!in_nmi()))
>> +		return;
>> +
>> +	while ((1u << readl_relaxed(cpu_base + GIC_CPU_HIGHPRI)) &
>> +	       SMP_IPI_FIQ_MASK) {
>> +		irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
>> +		writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
>> +
>> +		irqnr = irqstat & GICC_IAR_INT_ID_MASK;
>> +		WARN_RATELIMIT(irqnr > 16,
>> +			       "Unexpected irqnr %lu (bad prioritization?)\n",
>> +			       irqnr);
>> +	}
>> +}
>> +
>>  void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
>>  {
>>  	if (gic_nr >= MAX_GIC_NR)
>> @@ -373,7 +469,18 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
>>  
>>  	gic_dist_config(base, gic_irqs, NULL);
>>  
>> -	writel_relaxed(1, base + GIC_DIST_CTRL);
>> +	/*
>> +	 * Set all global interrupts to be group 1 (this register is
>> +	 * RAZ/WI if not accessible in current mode)
>> +	 */
>> +	for (i = 32; i < gic_irqs; i += 32)
>> +		writel_relaxed(0xffffffff, base + GIC_DIST_IGROUP + i * 4 / 32);
>> +
>> +	/*
>> +	 * Set EnableGrp1/EnableGrp0 (bit 1 and 0) or EnableGrp (bit 0 only,
>> +	 * bit 1 ignored)
>> +	 */
>> +	writel_relaxed(3, base + GIC_DIST_CTRL);
>>  }
>>  
>>  static void gic_cpu_init(struct gic_chip_data *gic)
>> @@ -382,6 +489,7 @@ static void gic_cpu_init(struct gic_chip_data *gic)
>>  	void __iomem *base = gic_data_cpu_base(gic);
>>  	unsigned int cpu_mask, cpu = smp_processor_id();
>>  	int i;
>> +	unsigned long secure_irqs, secure_irq;
>>  
>>  	/*
>>  	 * Get what the GIC says our CPU mask is.
>> @@ -400,8 +508,27 @@ static void gic_cpu_init(struct gic_chip_data *gic)
>>  
>>  	gic_cpu_config(dist_base, NULL);
>>  
>> +	/*
>> +	 * Set any PPI and SGI interrupts not set in SMP_IPI_FIQ_MASK
>> +	 * to be group 1 (this register is RAZ/WI if not accessible)
>> +	 */
>> +	writel_relaxed(~SMP_IPI_FIQ_MASK, dist_base + GIC_DIST_IGROUP + 0);
>> +
>> +	/*
>> +	 * Update the priority of any resulting group0 interrupts.
>> +	 */
>> +	secure_irqs = ~readl_relaxed(dist_base + GIC_DIST_IGROUP + 0);
>> +	for_each_set_bit(secure_irq, &secure_irqs, 16)
>> +		gic_set_group_irq(dist_base, i, 0);
>> +
>>  	writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
>> -	writel_relaxed(1, base + GIC_CPU_CTRL);
>> +
>> +	/* The bottom most bit will be set for all GIC variants (and is
>> +	 * called Enable or EnableGrp0 depending on operating mode). The
>> +	 * remaining four bits (CBPR, FIQEn, AckCtl and EnableGrp1) are
>> +	 * RAZ/WI if not accessible.
>> +	 */
>> +	writel_relaxed(0x1f, base + GIC_CPU_CTRL);
>>  }
>>  
>>  void gic_cpu_if_down(void)
>> @@ -485,7 +612,7 @@ static void gic_dist_restore(unsigned int gic_nr)
>>  		writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
>>  			dist_base + GIC_DIST_ENABLE_SET + i * 4);
>>  
>> -	writel_relaxed(1, dist_base + GIC_DIST_CTRL);
>> +	writel_relaxed(3, dist_base + GIC_DIST_CTRL);
>>  }
>>  
>>  static void gic_cpu_save(unsigned int gic_nr)
>> @@ -542,7 +669,7 @@ static void gic_cpu_restore(unsigned int gic_nr)
>>  		writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4);
>>  
>>  	writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK);
>> -	writel_relaxed(1, cpu_base + GIC_CPU_CTRL);
>> +	writel_relaxed(0x1f, cpu_base + GIC_CPU_CTRL);
>>  }
>>  
>>  static int gic_notifier(struct notifier_block *self, unsigned long cmd,	void *v)
>> @@ -604,8 +731,19 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
>>  {
>>  	int cpu;
>>  	unsigned long flags, map = 0;
>> +	unsigned long softint;
>>  
>> -	raw_spin_lock_irqsave(&irq_controller_lock, flags);
>> +	/*
>> +	 * The locking in this function ensures we don't use stale cpu mappings
>> +	 * and thus we never route an IPI to the wrong physical core during a
>> +	 * big.LITTLE switch. The switch code takes both of these locks meaning
>> +	 * we can choose whichever lock is safe to use from our current calling
>> +	 * context.
>> +	 */
>> +	if (in_nmi())
>> +		raw_spin_lock(&fiq_safe_migration_lock);
>> +	else
>> +		raw_spin_lock_irqsave(&irq_controller_lock, flags);
>>  
>>  	/* Convert our logical CPU mask into a physical one. */
>>  	for_each_cpu(cpu, mask)
>> @@ -618,7 +756,15 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
>>  	dmb(ishst);
>>  
>>  	/* this always happens on GIC0 */
>> -	writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
>> +	softint = map << 16 | irq;
>> +	if (gic_get_group_irq(gic_data_dist_base(&gic_data[0]), irq))
>> +		softint |= 0x8000;
>> +	writel_relaxed(softint,
>> +		       gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
>> +
>> +	if (in_nmi())
>> +		raw_spin_unlock(&fiq_safe_migration_lock);
>> +	else
>>  
>>  	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
>>  }
>> @@ -668,7 +814,7 @@ int gic_get_cpu_id(unsigned int cpu)
>>   * Migrate all peripheral interrupts with a target matching the current CPU
>>   * to the interface corresponding to @new_cpu_id.  The CPU interface mapping
>>   * is also updated.  Targets to other CPU interfaces are unchanged.
>> - * This must be called with IRQs locally disabled.
>> + * This must be called with IRQ and FIQ locally disabled.
>>   */
>>  void gic_migrate_target(unsigned int new_cpu_id)
>>  {
>> @@ -690,6 +836,7 @@ void gic_migrate_target(unsigned int new_cpu_id)
>>  	ror_val = (cur_cpu_id - new_cpu_id) & 31;
>>  
>>  	raw_spin_lock(&irq_controller_lock);
>> +	raw_spin_lock(&fiq_safe_migration_lock);
>>  
>>  	/* Update the target interface for this logical CPU */
>>  	gic_cpu_map[cpu] = 1 << new_cpu_id;
>> @@ -709,6 +856,7 @@ void gic_migrate_target(unsigned int new_cpu_id)
>>  		}
>>  	}
>>  
>> +	raw_spin_unlock(&fiq_safe_migration_lock);
>>  	raw_spin_unlock(&irq_controller_lock);
>>  
>>  	/*
>> diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
>> index 45e2d8c..52a5676 100644
>> --- a/include/linux/irqchip/arm-gic.h
>> +++ b/include/linux/irqchip/arm-gic.h
>> @@ -101,5 +101,8 @@ static inline void __init register_routable_domain_ops
>>  {
>>  	gic_routable_irq_domain_ops = ops;
>>  }
>> +
>> +void gic_handle_fiq_ipi(void);
>> +
>>  #endif /* __ASSEMBLY */
>>  #endif
>> -- 
>> 1.9.3
>>
>
Russell King - ARM Linux Sept. 17, 2014, 9:07 p.m. UTC | #3
On Wed, Sep 17, 2014 at 01:12:23PM -0700, Daniel Thompson wrote:
> On 17/09/14 11:51, Russell King - ARM Linux wrote:
> > On Wed, Sep 17, 2014 at 09:10:16AM -0700, Daniel Thompson wrote:
> >> This patch provides support for arm's newly added IPI FIQ. It works
> >> by placing all interrupt sources *except* IPI FIQ in group 1 and
> >> then flips a configuration bit in the GIC such that group 1
> >> interrupts use IRQ and group 0 interrupts use FIQ.
> >>
> >> All GIC hardware except GICv1-without-TrustZone support provides a means
> >> to group exceptions into group 0 and group 1. However the hardware
> >> functionality is unavailable to the kernel when a secure monitor is
> >> present because access to the grouping registers are prohibited outside
> >> "secure world" (a feature that allows grouping to be used to allow
> >> hardware peripherals to send interrupts into the secure world). However
> >> when grouping is not available we can rely on the GIC's RAZ/WI semantics
> >> and avoid conditional code.
> > 
> > I've been chasing a bug with this on the Versatile Express CT9x4.  It
> > seems that the GIC there is a GICv1, with secure extensions.  It seems
> > to support interrupt grouping.
> > 
> > However, setting SPIs to group 1, with the control registers enabling
> > both group 0 and group 1 (such that both groups are treated as IRQs)
> > results in no SPIs being delivered to the kernel.  In other words,
> > setting GIC_DIST_IGROUP for SPIs, and setting both GIC_CPU_CTRL and
> > GIC_DIST_CTRL to 3.
> 
> I may have missed something but this sounds like the expected behaviour
> to me.
> 
> Without AckCtl set (GIC_CPU_CTRL bit 2) then it is not possible to
> acknowledge group 1 interrupts from secure mode. Thus in the
> circumstances described above I would expect the system to wedge in the
> interrupt handler because IRQ is raised but the software is not able to
> acknowledge it.

Setting AckCtl (0x07) results in no change in behaviour - still crashes
when it tries to calibrate the timer.

> I think there may also problems with leaving CBPR unset. Leaving CBPR
> unset certainly causes a change of behaviour compared to an all group0
> setup.

Also tried setting that (0x17), but still the same.

> Daniel.
> 
> 
> > This is rather worrying, because we seem to have a GIC which for all
> > intents and purposes appears to be compatible with what we want to do,
> > appears to conform with the GIC architecture specifications, but doesn't
> > actually work.
> > 
> > I suspect that running the Versatile Express CT9x4 in non-secure mode
> > wouldn't work (because in non-secure mode, the GIC only allows access
> > to group 1 interrupts.)
> > 
> > I've added Will and Mark to this to see whether they have any comment.
> > 
> >> gic_raise_softirq() also needs changing to make it safe to call from
> >> FIQ. This is due to its use by wake_up_klogd() during console_unlock().
> >>
> >> Tested on Freescale i.MX6 (quad A9, GICv1-with-TrustZone running in
> >> secure mode).
> >>
> >> Signed-off-by: Daniel Thompson <daniel.thompson@linaro.org>
> >> Cc: Russell King <linux@arm.linux.org.uk>
> >> Cc: Thomas Gleixner <tglx@linutronix.de>
> >> Cc: Jason Cooper <jason@lakedaemon.net>
> >> ---
> >>  arch/arm/kernel/traps.c         |   5 +-
> >>  drivers/irqchip/irq-gic.c       | 162 ++++++++++++++++++++++++++++++++++++++--
> >>  include/linux/irqchip/arm-gic.h |   3 +
> >>  3 files changed, 162 insertions(+), 8 deletions(-)
> >>
> >> diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
> >> index 439138d..92c4ea1 100644
> >> --- a/arch/arm/kernel/traps.c
> >> +++ b/arch/arm/kernel/traps.c
> >> @@ -26,6 +26,7 @@
> >>  #include <linux/init.h>
> >>  #include <linux/sched.h>
> >>  #include <linux/irq.h>
> >> +#include <linux/irqchip/arm-gic.h>
> >>  
> >>  #include <linux/atomic.h>
> >>  #include <asm/cacheflush.h>
> >> @@ -479,7 +480,9 @@ asmlinkage void __exception_irq_entry handle_fiq_as_nmi(struct pt_regs *regs)
> >>  
> >>  	nmi_enter();
> >>  
> >> -	/* nop. FIQ handlers for special arch/arm features can be added here. */
> >> +#ifdef CONFIG_ARM_GIC
> >> +	gic_handle_fiq_ipi();
> >> +#endif
> >>  
> >>  	nmi_exit();
> >>  
> >> diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
> >> index 4b959e6..05b5d62 100644
> >> --- a/drivers/irqchip/irq-gic.c
> >> +++ b/drivers/irqchip/irq-gic.c
> >> @@ -39,8 +39,10 @@
> >>  #include <linux/slab.h>
> >>  #include <linux/irqchip/chained_irq.h>
> >>  #include <linux/irqchip/arm-gic.h>
> >> +#include <linux/ratelimit.h>
> >>  
> >>  #include <asm/cputype.h>
> >> +#include <asm/fiq.h>
> >>  #include <asm/irq.h>
> >>  #include <asm/exception.h>
> >>  #include <asm/smp_plat.h>
> >> @@ -48,6 +50,10 @@
> >>  #include "irq-gic-common.h"
> >>  #include "irqchip.h"
> >>  
> >> +#ifndef SMP_IPI_FIQ_MASK
> >> +#define SMP_IPI_FIQ_MASK 0
> >> +#endif
> >> +
> >>  union gic_base {
> >>  	void __iomem *common_base;
> >>  	void __percpu * __iomem *percpu_base;
> >> @@ -71,6 +77,8 @@ struct gic_chip_data {
> >>  };
> >>  
> >>  static DEFINE_RAW_SPINLOCK(irq_controller_lock);
> >> +/* A fiq-safe spinlock must only be locked when the FIQ is masked */
> >> +static DEFINE_RAW_SPINLOCK(fiq_safe_migration_lock);
> >>  
> >>  /*
> >>   * The GIC mapping of CPU interfaces does not necessarily match
> >> @@ -325,6 +333,94 @@ static struct irq_chip gic_chip = {
> >>  	.irq_set_wake		= gic_set_wake,
> >>  };
> >>  
> >> +/*
> >> + * Shift an interrupt between Group 0 and Group 1.
> >> + *
> >> + * In addition to changing the group we also modify the priority to
> >> + * match what "ARM strongly recommends" for a system where no Group 1
> >> + * interrupt must ever preempt a Group 0 interrupt.
> >> + *
> >> + * If is safe to call this function on systems which do not support
> >> + * grouping (it will have no effect).
> >> + */
> >> +static void gic_set_group_irq(void __iomem *base, unsigned int hwirq,
> >> +				int group)
> >> +{
> >> +	unsigned int grp_reg = hwirq / 32 * 4;
> >> +	u32 grp_mask = BIT(hwirq % 32);
> >> +	u32 grp_val;
> >> +
> >> +	unsigned int pri_reg = (hwirq / 4) * 4;
> >> +	u32 pri_mask = BIT(7 + ((hwirq % 4) * 8));
> >> +	u32 pri_val;
> >> +
> >> +	/*
> >> +	 * Systems which do not support grouping will have no bits
> >> +	 * set in IGROUP[0] (and all systems which do will have set bits).
> >> +	 */
> >> +	grp_val = readl_relaxed(base + GIC_DIST_IGROUP + 0);
> >> +	if (!grp_val)
> >> +		return;
> >> +
> >> +	raw_spin_lock(&irq_controller_lock);
> >> +
> >> +	grp_val = readl_relaxed(base + GIC_DIST_IGROUP + grp_reg);
> >> +	pri_val = readl_relaxed(base + GIC_DIST_PRI + pri_reg);
> >> +
> >> +	if (group) {
> >> +		grp_val |= grp_mask;
> >> +		pri_val |= pri_mask;
> >> +	} else {
> >> +		grp_val &= ~grp_mask;
> >> +		pri_val &= ~pri_mask;
> >> +	}
> >> +
> >> +	writel_relaxed(grp_val, base + GIC_DIST_IGROUP + grp_reg);
> >> +	writel_relaxed(pri_val, base + GIC_DIST_PRI + pri_reg);
> >> +
> >> +	raw_spin_unlock(&irq_controller_lock);
> >> +}
> >> +
> >> +/*
> >> + * Test which group an interrupt belongs to.
> >> + *
> >> + * Returns 0 if the controller does not support grouping.
> >> + */
> >> +static int gic_get_group_irq(void __iomem *base, unsigned int hwirq)
> >> +{
> >> +	unsigned int grp_reg = hwirq / 32 * 4;
> >> +	u32 grp_val;
> >> +
> >> +	grp_val = readl_relaxed(base + GIC_DIST_IGROUP + grp_reg);
> >> +
> >> +	return (grp_val >> (hwirq % 32)) & 1;
> >> +}
> >> +
> >> +/*
> >> + * Fully acknowledge (both ack and eoi) any outstanding FIQ-based IPI,
> >> + * otherwise do nothing.
> >> + */
> >> +void gic_handle_fiq_ipi(void)
> >> +{
> >> +	struct gic_chip_data *gic = &gic_data[0];
> >> +	void __iomem *cpu_base = gic_data_cpu_base(gic);
> >> +	unsigned long irqstat, irqnr;
> >> +
> >> +	if (WARN_ON(!in_nmi()))
> >> +		return;
> >> +
> >> +	while ((1u << readl_relaxed(cpu_base + GIC_CPU_HIGHPRI)) &
> >> +	       SMP_IPI_FIQ_MASK) {
> >> +		irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
> >> +		writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
> >> +
> >> +		irqnr = irqstat & GICC_IAR_INT_ID_MASK;
> >> +		WARN_RATELIMIT(irqnr > 16,
> >> +			       "Unexpected irqnr %lu (bad prioritization?)\n",
> >> +			       irqnr);
> >> +	}
> >> +}
> >> +
> >>  void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
> >>  {
> >>  	if (gic_nr >= MAX_GIC_NR)
> >> @@ -373,7 +469,18 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
> >>  
> >>  	gic_dist_config(base, gic_irqs, NULL);
> >>  
> >> -	writel_relaxed(1, base + GIC_DIST_CTRL);
> >> +	/*
> >> +	 * Set all global interrupts to be group 1 (this register is
> >> +	 * RAZ/WI if not accessible in current mode)
> >> +	 */
> >> +	for (i = 32; i < gic_irqs; i += 32)
> >> +		writel_relaxed(0xffffffff, base + GIC_DIST_IGROUP + i * 4 / 32);
> >> +
> >> +	/*
> >> +	 * Set EnableGrp1/EnableGrp0 (bit 1 and 0) or EnableGrp (bit 0 only,
> >> +	 * bit 1 ignored)
> >> +	 */
> >> +	writel_relaxed(3, base + GIC_DIST_CTRL);
> >>  }
> >>  
> >>  static void gic_cpu_init(struct gic_chip_data *gic)
> >> @@ -382,6 +489,7 @@ static void gic_cpu_init(struct gic_chip_data *gic)
> >>  	void __iomem *base = gic_data_cpu_base(gic);
> >>  	unsigned int cpu_mask, cpu = smp_processor_id();
> >>  	int i;
> >> +	unsigned long secure_irqs, secure_irq;
> >>  
> >>  	/*
> >>  	 * Get what the GIC says our CPU mask is.
> >> @@ -400,8 +508,27 @@ static void gic_cpu_init(struct gic_chip_data *gic)
> >>  
> >>  	gic_cpu_config(dist_base, NULL);
> >>  
> >> +	/*
> >> +	 * Set any PPI and SGI interrupts not set in SMP_IPI_FIQ_MASK
> >> +	 * to be group 1 (this register is RAZ/WI if not accessible)
> >> +	 */
> >> +	writel_relaxed(~SMP_IPI_FIQ_MASK, dist_base + GIC_DIST_IGROUP + 0);
> >> +
> >> +	/*
> >> +	 * Update the priority of any resulting group0 interrupts.
> >> +	 */
> >> +	secure_irqs = ~readl_relaxed(dist_base + GIC_DIST_IGROUP + 0);
> >> +	for_each_set_bit(secure_irq, &secure_irqs, 16)
> >> +		gic_set_group_irq(dist_base, i, 0);
> >> +
> >>  	writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
> >> -	writel_relaxed(1, base + GIC_CPU_CTRL);
> >> +
> >> +	/* The bottom most bit will be set for all GIC variants (and is
> >> +	 * called Enable or EnableGrp0 depending on operating mode). The
> >> +	 * remaining four bits (CBPR, FIQEn, AckCtl and EnableGrp1) are
> >> +	 * RAZ/WI if not accessible.
> >> +	 */
> >> +	writel_relaxed(0x1f, base + GIC_CPU_CTRL);
> >>  }
> >>  
> >>  void gic_cpu_if_down(void)
> >> @@ -485,7 +612,7 @@ static void gic_dist_restore(unsigned int gic_nr)
> >>  		writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
> >>  			dist_base + GIC_DIST_ENABLE_SET + i * 4);
> >>  
> >> -	writel_relaxed(1, dist_base + GIC_DIST_CTRL);
> >> +	writel_relaxed(3, dist_base + GIC_DIST_CTRL);
> >>  }
> >>  
> >>  static void gic_cpu_save(unsigned int gic_nr)
> >> @@ -542,7 +669,7 @@ static void gic_cpu_restore(unsigned int gic_nr)
> >>  		writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4);
> >>  
> >>  	writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK);
> >> -	writel_relaxed(1, cpu_base + GIC_CPU_CTRL);
> >> +	writel_relaxed(0x1f, cpu_base + GIC_CPU_CTRL);
> >>  }
> >>  
> >>  static int gic_notifier(struct notifier_block *self, unsigned long cmd,	void *v)
> >> @@ -604,8 +731,19 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
> >>  {
> >>  	int cpu;
> >>  	unsigned long flags, map = 0;
> >> +	unsigned long softint;
> >>  
> >> -	raw_spin_lock_irqsave(&irq_controller_lock, flags);
> >> +	/*
> >> +	 * The locking in this function ensures we don't use stale cpu mappings
> >> +	 * and thus we never route an IPI to the wrong physical core during a
> >> +	 * big.LITTLE switch. The switch code takes both of these locks meaning
> >> +	 * we can choose whichever lock is safe to use from our current calling
> >> +	 * context.
> >> +	 */
> >> +	if (in_nmi())
> >> +		raw_spin_lock(&fiq_safe_migration_lock);
> >> +	else
> >> +		raw_spin_lock_irqsave(&irq_controller_lock, flags);
> >>  
> >>  	/* Convert our logical CPU mask into a physical one. */
> >>  	for_each_cpu(cpu, mask)
> >> @@ -618,7 +756,15 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
> >>  	dmb(ishst);
> >>  
> >>  	/* this always happens on GIC0 */
> >> -	writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
> >> +	softint = map << 16 | irq;
> >> +	if (gic_get_group_irq(gic_data_dist_base(&gic_data[0]), irq))
> >> +		softint |= 0x8000;
> >> +	writel_relaxed(softint,
> >> +		       gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
> >> +
> >> +	if (in_nmi())
> >> +		raw_spin_unlock(&fiq_safe_migration_lock);
> >> +	else
> >>  
> >>  	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
> >>  }
> >> @@ -668,7 +814,7 @@ int gic_get_cpu_id(unsigned int cpu)
> >>   * Migrate all peripheral interrupts with a target matching the current CPU
> >>   * to the interface corresponding to @new_cpu_id.  The CPU interface mapping
> >>   * is also updated.  Targets to other CPU interfaces are unchanged.
> >> - * This must be called with IRQs locally disabled.
> >> + * This must be called with IRQ and FIQ locally disabled.
> >>   */
> >>  void gic_migrate_target(unsigned int new_cpu_id)
> >>  {
> >> @@ -690,6 +836,7 @@ void gic_migrate_target(unsigned int new_cpu_id)
> >>  	ror_val = (cur_cpu_id - new_cpu_id) & 31;
> >>  
> >>  	raw_spin_lock(&irq_controller_lock);
> >> +	raw_spin_lock(&fiq_safe_migration_lock);
> >>  
> >>  	/* Update the target interface for this logical CPU */
> >>  	gic_cpu_map[cpu] = 1 << new_cpu_id;
> >> @@ -709,6 +856,7 @@ void gic_migrate_target(unsigned int new_cpu_id)
> >>  		}
> >>  	}
> >>  
> >> +	raw_spin_unlock(&fiq_safe_migration_lock);
> >>  	raw_spin_unlock(&irq_controller_lock);
> >>  
> >>  	/*
> >> diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
> >> index 45e2d8c..52a5676 100644
> >> --- a/include/linux/irqchip/arm-gic.h
> >> +++ b/include/linux/irqchip/arm-gic.h
> >> @@ -101,5 +101,8 @@ static inline void __init register_routable_domain_ops
> >>  {
> >>  	gic_routable_irq_domain_ops = ops;
> >>  }
> >> +
> >> +void gic_handle_fiq_ipi(void);
> >> +
> >>  #endif /* __ASSEMBLY */
> >>  #endif
> >> -- 
> >> 1.9.3
> >>
> > 
>
Russell King - ARM Linux Sept. 18, 2014, 8:17 a.m. UTC | #4
On Wed, Sep 17, 2014 at 09:10:16AM -0700, Daniel Thompson wrote:
> @@ -604,8 +731,19 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
>  {
>  	int cpu;
>  	unsigned long flags, map = 0;
> +	unsigned long softint;
>  
> -	raw_spin_lock_irqsave(&irq_controller_lock, flags);
> +	/*
> +	 * The locking in this function ensures we don't use stale cpu mappings
> +	 * and thus we never route an IPI to the wrong physical core during a
> +	 * big.LITTLE switch. The switch code takes both of these locks meaning
> +	 * we can choose whichever lock is safe to use from our current calling
> +	 * context.
> +	 */
> +	if (in_nmi())
> +		raw_spin_lock(&fiq_safe_migration_lock);
> +	else
> +		raw_spin_lock_irqsave(&irq_controller_lock, flags);

BTW, I see this code is still here...
Daniel Thompson Sept. 18, 2014, 9:20 p.m. UTC | #5
On 18/09/14 01:17, Russell King - ARM Linux wrote:
> On Wed, Sep 17, 2014 at 09:10:16AM -0700, Daniel Thompson wrote:
>> @@ -604,8 +731,19 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
>>  {
>>  	int cpu;
>>  	unsigned long flags, map = 0;
>> +	unsigned long softint;
>>  
>> -	raw_spin_lock_irqsave(&irq_controller_lock, flags);
>> +	/*
>> +	 * The locking in this function ensures we don't use stale cpu mappings
>> +	 * and thus we never route an IPI to the wrong physical core during a
>> +	 * big.LITTLE switch. The switch code takes both of these locks meaning
>> +	 * we can choose whichever lock is safe to use from our current calling
>> +	 * context.
>> +	 */
>> +	if (in_nmi())
>> +		raw_spin_lock(&fiq_safe_migration_lock);
>> +	else
>> +		raw_spin_lock_irqsave(&irq_controller_lock, flags);
> 
> BTW, I see this code is still here...

Quite so.

I'm afraid I haven't yet re-written it to use r/w locks (as proposed in
mails from the weekend) but I had to respin the default FIQ handler
patch to fix the CONFIG_FIQ build problem I introduced.
Marc Zyngier Sept. 18, 2014, 9:46 p.m. UTC | #6
On Wed, Sep 17 2014 at 07:51:38 PM, Russell King - ARM Linux <linux@arm.linux.org.uk> wrote:

Hi Russell,

> On Wed, Sep 17, 2014 at 09:10:16AM -0700, Daniel Thompson wrote:
>> This patch provides support for arm's newly added IPI FIQ. It works
>> by placing all interrupt sources *except* IPI FIQ in group 1 and
>> then flips a configuration bit in the GIC such that group 1
>> interrupts use IRQ and group 0 interrupts use FIQ.
>>
>> All GIC hardware except GICv1-without-TrustZone support provides a means
>> to group exceptions into group 0 and group 1. However the hardware
>> functionality is unavailable to the kernel when a secure monitor is
>> present because access to the grouping registers are prohibited outside
>> "secure world" (a feature that allows grouping to be used to allow
>> hardware peripherals to send interrupts into the secure world). However
>> when grouping is not available we can rely on the GIC's RAZ/WI semantics
>> and avoid conditional code.
>
> I've been chasing a bug with this on the Versatile Express CT9x4.  It
> seems that the GIC there is a GICv1, with secure extensions.  It seems
> to support interrupt grouping.
>
> However, setting SPIs to group 1, with the control registers enabling
> both group 0 and group 1 (such that both groups are treated as IRQs)
> results in no SPIs being delivered to the kernel.  In other words,
> setting GIC_DIST_IGROUP for SPIs, and setting both GIC_CPU_CTRL and
> GIC_DIST_CTRL to 3.
>
> This is rather worrying, because we seem to have a GIC which for all
> intents and purposes appears to be compatible with what we want to do,
> appears to conform with the GIC architecture specifications, but doesn't
> actually work.
>
> I suspect that running the Versatile Express CT9x4 in non-secure mode
> wouldn't work (because in non-secure mode, the GIC only allows access
> to group 1 interrupts.)
>
> I've added Will and Mark to this to see whether they have any comment.

I'm rather far away from my VE-A9 board (and won't be to get back to it
for another two weeks), so this is all a shot in the dark...

Can you have a look at the GICC_AIAR register (located at GICC_IAR +
0x14)? It *shouldn't* exist on this HW, assuming this is a real
GICv1. But what you describe makes me think of something like this.

Thanks,

	M.
diff mbox

Patch

diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 439138d..92c4ea1 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -26,6 +26,7 @@ 
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/irq.h>
+#include <linux/irqchip/arm-gic.h>
 
 #include <linux/atomic.h>
 #include <asm/cacheflush.h>
@@ -479,7 +480,9 @@  asmlinkage void __exception_irq_entry handle_fiq_as_nmi(struct pt_regs *regs)
 
 	nmi_enter();
 
-	/* nop. FIQ handlers for special arch/arm features can be added here. */
+#ifdef CONFIG_ARM_GIC
+	gic_handle_fiq_ipi();
+#endif
 
 	nmi_exit();
 
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 4b959e6..05b5d62 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -39,8 +39,10 @@ 
 #include <linux/slab.h>
 #include <linux/irqchip/chained_irq.h>
 #include <linux/irqchip/arm-gic.h>
+#include <linux/ratelimit.h>
 
 #include <asm/cputype.h>
+#include <asm/fiq.h>
 #include <asm/irq.h>
 #include <asm/exception.h>
 #include <asm/smp_plat.h>
@@ -48,6 +50,10 @@ 
 #include "irq-gic-common.h"
 #include "irqchip.h"
 
+#ifndef SMP_IPI_FIQ_MASK
+#define SMP_IPI_FIQ_MASK 0
+#endif
+
 union gic_base {
 	void __iomem *common_base;
 	void __percpu * __iomem *percpu_base;
@@ -71,6 +77,8 @@  struct gic_chip_data {
 };
 
 static DEFINE_RAW_SPINLOCK(irq_controller_lock);
+/* A fiq-safe spinlock must only be locked when the FIQ is masked */
+static DEFINE_RAW_SPINLOCK(fiq_safe_migration_lock);
 
 /*
  * The GIC mapping of CPU interfaces does not necessarily match
@@ -325,6 +333,94 @@  static struct irq_chip gic_chip = {
 	.irq_set_wake		= gic_set_wake,
 };
 
+/*
+ * Shift an interrupt between Group 0 and Group 1.
+ *
+ * In addition to changing the group we also modify the priority to
+ * match what "ARM strongly recommends" for a system where no Group 1
+ * interrupt must ever preempt a Group 0 interrupt.
+ *
+ * If is safe to call this function on systems which do not support
+ * grouping (it will have no effect).
+ */
+static void gic_set_group_irq(void __iomem *base, unsigned int hwirq,
+				int group)
+{
+	unsigned int grp_reg = hwirq / 32 * 4;
+	u32 grp_mask = BIT(hwirq % 32);
+	u32 grp_val;
+
+	unsigned int pri_reg = (hwirq / 4) * 4;
+	u32 pri_mask = BIT(7 + ((hwirq % 4) * 8));
+	u32 pri_val;
+
+	/*
+	 * Systems which do not support grouping will have no bits
+	 * set in IGROUP[0] (and all systems which do will have set bits).
+	 */
+	grp_val = readl_relaxed(base + GIC_DIST_IGROUP + 0);
+	if (!grp_val)
+		return;
+
+	raw_spin_lock(&irq_controller_lock);
+
+	grp_val = readl_relaxed(base + GIC_DIST_IGROUP + grp_reg);
+	pri_val = readl_relaxed(base + GIC_DIST_PRI + pri_reg);
+
+	if (group) {
+		grp_val |= grp_mask;
+		pri_val |= pri_mask;
+	} else {
+		grp_val &= ~grp_mask;
+		pri_val &= ~pri_mask;
+	}
+
+	writel_relaxed(grp_val, base + GIC_DIST_IGROUP + grp_reg);
+	writel_relaxed(pri_val, base + GIC_DIST_PRI + pri_reg);
+
+	raw_spin_unlock(&irq_controller_lock);
+}
+
+/*
+ * Test which group an interrupt belongs to.
+ *
+ * Returns 0 if the controller does not support grouping.
+ */
+static int gic_get_group_irq(void __iomem *base, unsigned int hwirq)
+{
+	unsigned int grp_reg = hwirq / 32 * 4;
+	u32 grp_val;
+
+	grp_val = readl_relaxed(base + GIC_DIST_IGROUP + grp_reg);
+
+	return (grp_val >> (hwirq % 32)) & 1;
+}
+
+/*
+ * Fully acknowledge (both ack and eoi) any outstanding FIQ-based IPI,
+ * otherwise do nothing.
+ */
+void gic_handle_fiq_ipi(void)
+{
+	struct gic_chip_data *gic = &gic_data[0];
+	void __iomem *cpu_base = gic_data_cpu_base(gic);
+	unsigned long irqstat, irqnr;
+
+	if (WARN_ON(!in_nmi()))
+		return;
+
+	while ((1u << readl_relaxed(cpu_base + GIC_CPU_HIGHPRI)) &
+	       SMP_IPI_FIQ_MASK) {
+		irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
+		writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
+
+		irqnr = irqstat & GICC_IAR_INT_ID_MASK;
+		WARN_RATELIMIT(irqnr > 16,
+			       "Unexpected irqnr %lu (bad prioritization?)\n",
+			       irqnr);
+	}
+}
+
 void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
 {
 	if (gic_nr >= MAX_GIC_NR)
@@ -373,7 +469,18 @@  static void __init gic_dist_init(struct gic_chip_data *gic)
 
 	gic_dist_config(base, gic_irqs, NULL);
 
-	writel_relaxed(1, base + GIC_DIST_CTRL);
+	/*
+	 * Set all global interrupts to be group 1 (this register is
+	 * RAZ/WI if not accessible in current mode)
+	 */
+	for (i = 32; i < gic_irqs; i += 32)
+		writel_relaxed(0xffffffff, base + GIC_DIST_IGROUP + i * 4 / 32);
+
+	/*
+	 * Set EnableGrp1/EnableGrp0 (bit 1 and 0) or EnableGrp (bit 0 only,
+	 * bit 1 ignored)
+	 */
+	writel_relaxed(3, base + GIC_DIST_CTRL);
 }
 
 static void gic_cpu_init(struct gic_chip_data *gic)
@@ -382,6 +489,7 @@  static void gic_cpu_init(struct gic_chip_data *gic)
 	void __iomem *base = gic_data_cpu_base(gic);
 	unsigned int cpu_mask, cpu = smp_processor_id();
 	int i;
+	unsigned long secure_irqs, secure_irq;
 
 	/*
 	 * Get what the GIC says our CPU mask is.
@@ -400,8 +508,27 @@  static void gic_cpu_init(struct gic_chip_data *gic)
 
 	gic_cpu_config(dist_base, NULL);
 
+	/*
+	 * Set any PPI and SGI interrupts not set in SMP_IPI_FIQ_MASK
+	 * to be group 1 (this register is RAZ/WI if not accessible)
+	 */
+	writel_relaxed(~SMP_IPI_FIQ_MASK, dist_base + GIC_DIST_IGROUP + 0);
+
+	/*
+	 * Update the priority of any resulting group0 interrupts.
+	 */
+	secure_irqs = ~readl_relaxed(dist_base + GIC_DIST_IGROUP + 0);
+	for_each_set_bit(secure_irq, &secure_irqs, 16)
+		gic_set_group_irq(dist_base, i, 0);
+
 	writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
-	writel_relaxed(1, base + GIC_CPU_CTRL);
+
+	/* The bottom most bit will be set for all GIC variants (and is
+	 * called Enable or EnableGrp0 depending on operating mode). The
+	 * remaining four bits (CBPR, FIQEn, AckCtl and EnableGrp1) are
+	 * RAZ/WI if not accessible.
+	 */
+	writel_relaxed(0x1f, base + GIC_CPU_CTRL);
 }
 
 void gic_cpu_if_down(void)
@@ -485,7 +612,7 @@  static void gic_dist_restore(unsigned int gic_nr)
 		writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
 			dist_base + GIC_DIST_ENABLE_SET + i * 4);
 
-	writel_relaxed(1, dist_base + GIC_DIST_CTRL);
+	writel_relaxed(3, dist_base + GIC_DIST_CTRL);
 }
 
 static void gic_cpu_save(unsigned int gic_nr)
@@ -542,7 +669,7 @@  static void gic_cpu_restore(unsigned int gic_nr)
 		writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4);
 
 	writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK);
-	writel_relaxed(1, cpu_base + GIC_CPU_CTRL);
+	writel_relaxed(0x1f, cpu_base + GIC_CPU_CTRL);
 }
 
 static int gic_notifier(struct notifier_block *self, unsigned long cmd,	void *v)
@@ -604,8 +731,19 @@  static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
 {
 	int cpu;
 	unsigned long flags, map = 0;
+	unsigned long softint;
 
-	raw_spin_lock_irqsave(&irq_controller_lock, flags);
+	/*
+	 * The locking in this function ensures we don't use stale cpu mappings
+	 * and thus we never route an IPI to the wrong physical core during a
+	 * big.LITTLE switch. The switch code takes both of these locks meaning
+	 * we can choose whichever lock is safe to use from our current calling
+	 * context.
+	 */
+	if (in_nmi())
+		raw_spin_lock(&fiq_safe_migration_lock);
+	else
+		raw_spin_lock_irqsave(&irq_controller_lock, flags);
 
 	/* Convert our logical CPU mask into a physical one. */
 	for_each_cpu(cpu, mask)
@@ -618,7 +756,15 @@  static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
 	dmb(ishst);
 
 	/* this always happens on GIC0 */
-	writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+	softint = map << 16 | irq;
+	if (gic_get_group_irq(gic_data_dist_base(&gic_data[0]), irq))
+		softint |= 0x8000;
+	writel_relaxed(softint,
+		       gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+
+	if (in_nmi())
+		raw_spin_unlock(&fiq_safe_migration_lock);
+	else
 
 	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
 }
@@ -668,7 +814,7 @@  int gic_get_cpu_id(unsigned int cpu)
  * Migrate all peripheral interrupts with a target matching the current CPU
  * to the interface corresponding to @new_cpu_id.  The CPU interface mapping
  * is also updated.  Targets to other CPU interfaces are unchanged.
- * This must be called with IRQs locally disabled.
+ * This must be called with IRQ and FIQ locally disabled.
  */
 void gic_migrate_target(unsigned int new_cpu_id)
 {
@@ -690,6 +836,7 @@  void gic_migrate_target(unsigned int new_cpu_id)
 	ror_val = (cur_cpu_id - new_cpu_id) & 31;
 
 	raw_spin_lock(&irq_controller_lock);
+	raw_spin_lock(&fiq_safe_migration_lock);
 
 	/* Update the target interface for this logical CPU */
 	gic_cpu_map[cpu] = 1 << new_cpu_id;
@@ -709,6 +856,7 @@  void gic_migrate_target(unsigned int new_cpu_id)
 		}
 	}
 
+	raw_spin_unlock(&fiq_safe_migration_lock);
 	raw_spin_unlock(&irq_controller_lock);
 
 	/*
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 45e2d8c..52a5676 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -101,5 +101,8 @@  static inline void __init register_routable_domain_ops
 {
 	gic_routable_irq_domain_ops = ops;
 }
+
+void gic_handle_fiq_ipi(void);
+
 #endif /* __ASSEMBLY */
 #endif