diff mbox series

[net-next] net: try to avoid unneeded backlog flush

Message ID 0d64ac9b321104d58270822c204845ccb31368f8.1599747321.git.pabeni@redhat.com
State New
Headers show
Series [net-next] net: try to avoid unneeded backlog flush | expand

Commit Message

Paolo Abeni Sept. 10, 2020, 2:20 p.m. UTC
flush_all_backlogs() may cause deadlock on systems
running processes with FIFO scheduling policy.

The above is critical in -RT scenarios, where user-space
specifically ensure no network activity is scheduled on
the CPU running the mentioned FIFO process, but still get
stuck.

This commit tries to address the problem checking the
backlog status on the remote CPUs before scheduling the
flush operation. If the backlog is empty, we can skip it.

Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
 net/core/dev.c | 50 ++++++++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 46 insertions(+), 4 deletions(-)

Comments

Eric Dumazet Sept. 10, 2020, 2:36 p.m. UTC | #1
On Thu, Sep 10, 2020 at 4:21 PM Paolo Abeni <pabeni@redhat.com> wrote:
>
> flush_all_backlogs() may cause deadlock on systems
> running processes with FIFO scheduling policy.
>
> The above is critical in -RT scenarios, where user-space
> specifically ensure no network activity is scheduled on
> the CPU running the mentioned FIFO process, but still get
> stuck.
>
> This commit tries to address the problem checking the
> backlog status on the remote CPUs before scheduling the
> flush operation. If the backlog is empty, we can skip it.

If it is not empty, the problem you want to fix is still there ?

>
> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
> ---
>  net/core/dev.c | 50 ++++++++++++++++++++++++++++++++++++++++++++++----
>  1 file changed, 46 insertions(+), 4 deletions(-)
>
> diff --git a/net/core/dev.c b/net/core/dev.c
> index 152ad3b578de..fdef40bf4b88 100644
> --- a/net/core/dev.c
> +++ b/net/core/dev.c
> @@ -5621,17 +5621,59 @@ static void flush_backlog(struct work_struct *work)
>         local_bh_enable();
>  }
>
> +static bool flush_required(int cpu)
> +{
> +#if IS_ENABLED(CONFIG_RPS)
> +       struct softnet_data *sd = &per_cpu(softnet_data, cpu);
> +       bool do_flush;
> +
> +       local_irq_disable();
> +       rps_lock(sd);
> +
> +       /* as insertion into process_queue happens with the rps lock held,
> +        * process_queue access may race only with dequeue
> +        */
> +       do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
> +                  !skb_queue_empty_lockless(&sd->process_queue);
> +       rps_unlock(sd);
> +       local_irq_enable();
> +
> +       return do_flush;
> +#endif
> +       /* without RPS we can't safely check input_pkt_queue: during a
> +        * concurrent remote skb_queue_splice() we can detect as empty both
> +        * input_pkt_queue and process_queue even if the latter could end-up
> +        * containing a lot of packets.
> +        */
> +       return true;
> +}
> +
>  static void flush_all_backlogs(void)
>  {
> +       static cpumask_t flush_cpus  = { CPU_BITS_NONE };
>         unsigned int cpu;
>
> +       /* since we are under rtnl lock protection we can use static data
> +        * for the cpumask and avoid allocating on stack the possibly
> +        * large mask
> +        */
> +       ASSERT_RTNL();
> +

OK, but you only set bits in this bitmask.

You probably want to clear it here, not rely on one time CPU_BITS_NONE

>         get_online_cpus();
>
> -       for_each_online_cpu(cpu)
> -               queue_work_on(cpu, system_highpri_wq,
> -                             per_cpu_ptr(&flush_works, cpu));
> +       for_each_online_cpu(cpu) {
> +               if (flush_required(cpu)) {
> +                       queue_work_on(cpu, system_highpri_wq,
> +                                     per_cpu_ptr(&flush_works, cpu));
> +                       cpumask_set_cpu(cpu, &flush_cpus);
> +               }
> +       }
>
> -       for_each_online_cpu(cpu)
> +       /* we can have in flight packet[s] on the cpus we are not flushing,
> +        * synchronize_net() in rollback_registered_many() will take care of
> +        * them
> +        */
> +       for_each_cpu(cpu, &flush_cpus)
>                 flush_work(per_cpu_ptr(&flush_works, cpu));
>



>         put_online_cpus();
> --
> 2.26.2
>
diff mbox series

Patch

diff --git a/net/core/dev.c b/net/core/dev.c
index 152ad3b578de..fdef40bf4b88 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5621,17 +5621,59 @@  static void flush_backlog(struct work_struct *work)
 	local_bh_enable();
 }
 
+static bool flush_required(int cpu)
+{
+#if IS_ENABLED(CONFIG_RPS)
+	struct softnet_data *sd = &per_cpu(softnet_data, cpu);
+	bool do_flush;
+
+	local_irq_disable();
+	rps_lock(sd);
+
+	/* as insertion into process_queue happens with the rps lock held,
+	 * process_queue access may race only with dequeue
+	 */
+	do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
+		   !skb_queue_empty_lockless(&sd->process_queue);
+	rps_unlock(sd);
+	local_irq_enable();
+
+	return do_flush;
+#endif
+	/* without RPS we can't safely check input_pkt_queue: during a
+	 * concurrent remote skb_queue_splice() we can detect as empty both
+	 * input_pkt_queue and process_queue even if the latter could end-up
+	 * containing a lot of packets.
+	 */
+	return true;
+}
+
 static void flush_all_backlogs(void)
 {
+	static cpumask_t flush_cpus  = { CPU_BITS_NONE };
 	unsigned int cpu;
 
+	/* since we are under rtnl lock protection we can use static data
+	 * for the cpumask and avoid allocating on stack the possibly
+	 * large mask
+	 */
+	ASSERT_RTNL();
+
 	get_online_cpus();
 
-	for_each_online_cpu(cpu)
-		queue_work_on(cpu, system_highpri_wq,
-			      per_cpu_ptr(&flush_works, cpu));
+	for_each_online_cpu(cpu) {
+		if (flush_required(cpu)) {
+			queue_work_on(cpu, system_highpri_wq,
+				      per_cpu_ptr(&flush_works, cpu));
+			cpumask_set_cpu(cpu, &flush_cpus);
+		}
+	}
 
-	for_each_online_cpu(cpu)
+	/* we can have in flight packet[s] on the cpus we are not flushing,
+	 * synchronize_net() in rollback_registered_many() will take care of
+	 * them
+	 */
+	for_each_cpu(cpu, &flush_cpus)
 		flush_work(per_cpu_ptr(&flush_works, cpu));
 
 	put_online_cpus();