@@ -348,9 +348,8 @@ EXPORT_SYMBOL(arch_local_irq_restore);
* already the case when ppc_md.power_save is called). The function
* will return whether to enter power save or just return.
*
- * In the former case, it will have notified lockdep of interrupts
- * being re-enabled and generally sanitized the lazy irq state,
- * and in the latter case it will leave with interrupts hard
+ * In the former case, it will have generally sanitized the lazy irq
+ * state, and in the latter case it will leave with interrupts hard
* disabled and marked as such, so the local_irq_enable() call
* in arch_cpu_idle() will properly re-enable everything.
*/
@@ -370,9 +369,6 @@ __cpuidle bool prep_irq_for_idle(void)
if (lazy_irq_pending())
return false;
- /* Tell lockdep we are about to re-enable */
- trace_hardirqs_on();
-
/*
* Mark interrupts as soft-enabled and clear the
* PACA_IRQ_HARD_DIS from the pending mask since we
Since commit a01353cf1896 ("cpuidle: Fix ct_idle_*() usage"), the cpuidle entry code calls trace_hardirqs_on() (actually trace_hardirqs_on_prepare()) in ct_cpuidle_enter() before calling into the cpuidle driver. Suggested-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> --- arch/powerpc/kernel/irq_64.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-)