@@ -1281,4 +1281,18 @@ config DEBUG_SET_MODULE_RONX
against certain classes of kernel exploits.
If in doubt, say "N".
+config ARM_BACKTRACE_TRIGGERING
+ bool "Support non-maskable backtrace triggering"
+ depends on SMP
+ select FIQ
+ help
+ Say Y here if you want to provide support for non-maskable
+ backtrace triggering. This is used to generate a backtrace
+ from all CPUs in a non-responsive system. Backtrace requests
+ can be issued by lockup and hung task detectors, spin lock
+ debugging or magic sysrq.
+
+ This option is only effective when the kernel is run on a
+ platform capable of generating FIQs.
+
endmenu
@@ -35,6 +35,11 @@ extern void (*handle_arch_irq)(struct pt_regs *);
extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
#endif
+#ifdef CONFIG_ARM_BACKTRACE_TRIGGERING
+void arch_trigger_all_cpu_backtrace(bool);
+#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
+#endif
+
#endif
#endif
@@ -90,6 +90,10 @@ extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
extern int register_ipi_completion(struct completion *completion, int cpu);
+#ifdef CONFIG_ARM_BACKTRACE_TRIGGERING
+extern void ipi_cpu_backtrace(struct pt_regs *regs);
+#endif
+
struct smp_operations {
#ifdef CONFIG_SMP
/*
@@ -554,6 +554,63 @@ static void ipi_complete(unsigned int cpu)
complete(per_cpu(cpu_completion, cpu));
}
+#ifdef CONFIG_ARM_BACKTRACE_TRIGGERING
+/* For reliability, we're prepared to waste bits here. */
+static DECLARE_BITMAP(backtrace_mask, CONFIG_NR_CPUS) __read_mostly;
+
+void ipi_cpu_backtrace(struct pt_regs *regs)
+{
+ int cpu = smp_processor_id();
+
+ if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
+ static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
+
+ arch_spin_lock(&lock);
+ pr_warn("FIQ backtrace for cpu %d\n", cpu);
+ show_regs(regs);
+ arch_spin_unlock(&lock);
+ cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
+ }
+}
+
+void arch_trigger_all_cpu_backtrace(bool include_self)
+{
+ static unsigned long backtrace_flag;
+ int i, cpu = get_cpu();
+
+ if (test_and_set_bit(0, &backtrace_flag)) {
+ /*
+ * If there is already a trigger_all_cpu_backtrace() in progress
+ * (backtrace_flag == 1), don't output double cpu dump infos.
+ */
+ put_cpu();
+ return;
+ }
+
+ cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
+ if (!include_self)
+ cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
+
+ if (!cpumask_empty(to_cpumask(backtrace_mask))) {
+ pr_info("Sending FIQ to %s CPUs:\n",
+ (include_self ? "all" : "other"));
+ smp_cross_call(to_cpumask(backtrace_mask), IPI_FIQ);
+ }
+
+ /* Wait for up to 10 seconds for all CPUs to do the backtrace */
+ for (i = 0; i < 10 * 1000; i++) {
+ if (cpumask_empty(to_cpumask(backtrace_mask)))
+ break;
+
+ mdelay(1);
+ }
+
+ clear_bit(0, &backtrace_flag);
+ smp_mb__after_atomic();
+ put_cpu();
+}
+#endif
+
/*
* Main handler for inter-processor interrupts
*/
@@ -623,6 +680,9 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
case IPI_FIQ:
BUILD_BUG_ON(SMP_IPI_FIQ_MASK != BIT(IPI_FIQ));
pr_warn("CPU%u: IPI FIQ delivered via IRQ vector\n", cpu);
+#ifdef CONFIG_ARM_BACKTRACE_TRIGGERING
+ ipi_cpu_backtrace(regs);
+#endif
break;
default:
@@ -484,6 +484,9 @@ asmlinkage void __exception_irq_entry handle_fiq_as_nmi(struct pt_regs *regs)
#ifdef CONFIG_ARM_GIC
gic_handle_fiq_ipi();
#endif
+#ifdef CONFIG_ARM_BACKTRACE_TRIGGERING
+ ipi_cpu_backtrace(regs);
+#endif
nmi_exit();