===================================================================
@@ -360,6 +360,16 @@ EXPORT_SYMBOL(nr_online_nodes);
static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
+#ifdef CONFIG_PREEMPT_RT
+# define cpu_lock_irqsave(cpu, flags) \
+ local_lock_irqsave_on(pa_lock, flags, cpu)
+# define cpu_unlock_irqrestore(cpu, flags) \
+ local_unlock_irqrestore_on(pa_lock, flags, cpu)
+#else
+# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags)
+# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags)
+#endif
+
int page_group_by_mobility_disabled __read_mostly;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
@@ -2852,7 +2862,7 @@ static void drain_pages_zone(unsigned in
LIST_HEAD(dst);
int count;
- local_lock_irqsave(pa_lock, flags);
+ cpu_lock_irqsave(cpu, flags);
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
@@ -2860,7 +2870,7 @@ static void drain_pages_zone(unsigned in
if (count)
isolate_pcp_pages(count, pcp, &dst);
- local_unlock_irqrestore(pa_lock, flags);
+ cpu_unlock_irqrestore(cpu, flags);
if (count)
free_pcppages_bulk(zone, &dst, false);
@@ -2898,6 +2908,7 @@ void drain_local_pages(struct zone *zone
drain_pages(cpu);
}
+#ifndef CONFIG_PREEMPT_RT
static void drain_local_pages_wq(struct work_struct *work)
{
struct pcpu_drain *drain;
@@ -2915,6 +2926,7 @@ static void drain_local_pages_wq(struct
drain_local_pages(drain->zone);
migrate_enable();
}
+#endif
/*
* Spill all the per-cpu pages from all CPUs back into the buddy allocator.
@@ -2982,6 +2994,7 @@ void drain_all_pages(struct zone *zone)
cpumask_clear_cpu(cpu, &cpus_with_pcps);
}
+#ifndef CONFIG_PREEMPT_RT
for_each_cpu(cpu, &cpus_with_pcps) {
struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu);
@@ -2991,6 +3004,10 @@ void drain_all_pages(struct zone *zone)
}
for_each_cpu(cpu, &cpus_with_pcps)
flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work);
+#else
+ for_each_cpu(cpu, &cpus_with_pcps)
+ drain_pages(cpu);
+#endif
mutex_unlock(&pcpu_drain_mutex);
}