@@ -35,6 +35,11 @@ extern void (*handle_arch_irq)(struct pt_regs *);
extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
#endif
+#ifdef CONFIG_SMP
+extern void arch_trigger_all_cpu_backtrace(bool);
+#define arch_trigger_all_cpu_backtrace(x) arch_trigger_all_cpu_backtrace(x)
+#endif
+
#endif
#endif
@@ -72,8 +72,12 @@ enum ipi_msg_type {
IPI_CPU_STOP,
IPI_IRQ_WORK,
IPI_COMPLETION,
+ IPI_CPU_BACKTRACE,
};
+/* For reliability, we're prepared to waste bits here. */
+static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
+
static DECLARE_COMPLETION(cpu_running);
static struct smp_operations smp_ops;
@@ -535,6 +539,21 @@ static void ipi_cpu_stop(unsigned int cpu)
cpu_relax();
}
+static void ipi_cpu_backtrace(struct pt_regs *regs)
+{
+ int cpu = smp_processor_id();
+
+ if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
+ static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
+
+ arch_spin_lock(&lock);
+ printk(KERN_WARNING "FIQ backtrace for cpu %d\n", cpu);
+ show_regs(regs);
+ arch_spin_unlock(&lock);
+ cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
+ }
+}
+
static DEFINE_PER_CPU(struct completion *, cpu_completion);
int register_ipi_completion(struct completion *completion, int cpu)
@@ -614,6 +633,12 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
irq_exit();
break;
+ case IPI_CPU_BACKTRACE:
+ irq_enter();
+ ipi_cpu_backtrace(regs);
+ irq_exit();
+ break;
+
default:
printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
cpu, ipinr);
@@ -708,3 +733,40 @@ static int __init register_cpufreq_notifier(void)
core_initcall(register_cpufreq_notifier);
#endif
+
+void arch_trigger_all_cpu_backtrace(bool include_self)
+{
+ static unsigned long backtrace_flag;
+ int i, cpu = get_cpu();
+
+ if (test_and_set_bit(0, &backtrace_flag)) {
+ /*
+ * If there is already a trigger_all_cpu_backtrace() in progress
+ * (backtrace_flag == 1), don't output double cpu dump infos.
+ */
+ put_cpu();
+ return;
+ }
+
+ cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
+ if (!include_self)
+ cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
+
+ if (!cpumask_empty(to_cpumask(backtrace_mask))) {
+ pr_info("Sending FIQ to %s CPUs:\n",
+ (include_self ? "all" : "other"));
+ smp_cross_call(to_cpumask(backtrace_mask), IPI_CPU_BACKTRACE);
+ }
+
+ /* Wait for up to 10 seconds for all CPUs to do the backtrace */
+ for (i = 0; i < 10 * 1000; i++) {
+ if (cpumask_empty(to_cpumask(backtrace_mask)))
+ break;
+
+ mdelay(1);
+ }
+
+ clear_bit(0, &backtrace_flag);
+ smp_mb__after_atomic();
+ put_cpu();
+}