diff mbox series

[net-next,v9,1/3] net: extract napi poll functionality to __napi_poll()

Message ID 20210129181812.256216-2-weiwan@google.com
State Superseded
Headers show
Series implement kthread based napi poll | expand

Commit Message

Wei Wang Jan. 29, 2021, 6:18 p.m. UTC
From: Felix Fietkau <nbd@nbd.name> 

This commit introduces a new function __napi_poll() which does the main
logic of the existing napi_poll() function, and will be called by other
functions in later commits.
This idea and implementation is done by Felix Fietkau <nbd@nbd.name> and
is proposed as part of the patch to move napi work to work_queue
context.
This commit by itself is a code restructure.

Signed-off-by: Felix Fietkau <nbd@nbd.name> 
Signed-off-by: Wei Wang <weiwan@google.com>
---
 net/core/dev.c | 35 +++++++++++++++++++++++++----------
 1 file changed, 25 insertions(+), 10 deletions(-)

Comments

Alexander Duyck Feb. 3, 2021, 5 p.m. UTC | #1
On Fri, Jan 29, 2021 at 10:20 AM Wei Wang <weiwan@google.com> wrote:
>

> From: Felix Fietkau <nbd@nbd.name>

>

> This commit introduces a new function __napi_poll() which does the main

> logic of the existing napi_poll() function, and will be called by other

> functions in later commits.

> This idea and implementation is done by Felix Fietkau <nbd@nbd.name> and

> is proposed as part of the patch to move napi work to work_queue

> context.

> This commit by itself is a code restructure.

>

> Signed-off-by: Felix Fietkau <nbd@nbd.name>

> Signed-off-by: Wei Wang <weiwan@google.com>

> ---

>  net/core/dev.c | 35 +++++++++++++++++++++++++----------

>  1 file changed, 25 insertions(+), 10 deletions(-)

>

> diff --git a/net/core/dev.c b/net/core/dev.c

> index 0332f2e8f7da..7d23bff03864 100644

> --- a/net/core/dev.c

> +++ b/net/core/dev.c

> @@ -6768,15 +6768,10 @@ void __netif_napi_del(struct napi_struct *napi)

>  }

>  EXPORT_SYMBOL(__netif_napi_del);

>

> -static int napi_poll(struct napi_struct *n, struct list_head *repoll)

> +static int __napi_poll(struct napi_struct *n, bool *repoll)

>  {

> -       void *have;

>         int work, weight;

>

> -       list_del_init(&n->poll_list);

> -

> -       have = netpoll_poll_lock(n);

> -

>         weight = n->weight;

>

>         /* This NAPI_STATE_SCHED test is for avoiding a race

> @@ -6796,7 +6791,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)

>                             n->poll, work, weight);

>

>         if (likely(work < weight))

> -               goto out_unlock;

> +               return work;

>

>         /* Drivers must not modify the NAPI state if they

>          * consume the entire weight.  In such cases this code

> @@ -6805,7 +6800,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)

>          */

>         if (unlikely(napi_disable_pending(n))) {

>                 napi_complete(n);

> -               goto out_unlock;

> +               return work;

>         }

>

>         /* The NAPI context has more processing work, but busy-polling

> @@ -6818,7 +6813,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)

>                          */

>                         napi_schedule(n);

>                 }

> -               goto out_unlock;

> +               return work;

>         }

>

>         if (n->gro_bitmask) {

> @@ -6836,9 +6831,29 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)

>         if (unlikely(!list_empty(&n->poll_list))) {

>                 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",

>                              n->dev ? n->dev->name : "backlog");

> -               goto out_unlock;

> +               return work;

>         }

>

> +       *repoll = true;

> +

> +       return work;

> +}

> +

> +static int napi_poll(struct napi_struct *n, struct list_head *repoll)

> +{

> +       bool do_repoll = false;

> +       void *have;

> +       int work;

> +

> +       list_del_init(&n->poll_list);

> +

> +       have = netpoll_poll_lock(n);

> +

> +       work = __napi_poll(n, &do_repoll);

> +

> +       if (!do_repoll)

> +               goto out_unlock;

> +

>         list_add_tail(&n->poll_list, repoll);

>

>  out_unlock:


Instead of using the out_unlock label why don't you only do the
list_add_tail if do_repoll is true? It will allow you to drop a few
lines of noise. Otherwise this looks good to me.

Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
Wei Wang Feb. 3, 2021, 5:57 p.m. UTC | #2
On Wed, Feb 3, 2021 at 9:00 AM Alexander Duyck
<alexander.duyck@gmail.com> wrote:
>

> On Fri, Jan 29, 2021 at 10:20 AM Wei Wang <weiwan@google.com> wrote:

> >

> > From: Felix Fietkau <nbd@nbd.name>

> >

> > This commit introduces a new function __napi_poll() which does the main

> > logic of the existing napi_poll() function, and will be called by other

> > functions in later commits.

> > This idea and implementation is done by Felix Fietkau <nbd@nbd.name> and

> > is proposed as part of the patch to move napi work to work_queue

> > context.

> > This commit by itself is a code restructure.

> >

> > Signed-off-by: Felix Fietkau <nbd@nbd.name>

> > Signed-off-by: Wei Wang <weiwan@google.com>

> > ---

> >  net/core/dev.c | 35 +++++++++++++++++++++++++----------

> >  1 file changed, 25 insertions(+), 10 deletions(-)

> >

> > diff --git a/net/core/dev.c b/net/core/dev.c

> > index 0332f2e8f7da..7d23bff03864 100644

> > --- a/net/core/dev.c

> > +++ b/net/core/dev.c

> > @@ -6768,15 +6768,10 @@ void __netif_napi_del(struct napi_struct *napi)

> >  }

> >  EXPORT_SYMBOL(__netif_napi_del);

> >

> > -static int napi_poll(struct napi_struct *n, struct list_head *repoll)

> > +static int __napi_poll(struct napi_struct *n, bool *repoll)

> >  {

> > -       void *have;

> >         int work, weight;

> >

> > -       list_del_init(&n->poll_list);

> > -

> > -       have = netpoll_poll_lock(n);

> > -

> >         weight = n->weight;

> >

> >         /* This NAPI_STATE_SCHED test is for avoiding a race

> > @@ -6796,7 +6791,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)

> >                             n->poll, work, weight);

> >

> >         if (likely(work < weight))

> > -               goto out_unlock;

> > +               return work;

> >

> >         /* Drivers must not modify the NAPI state if they

> >          * consume the entire weight.  In such cases this code

> > @@ -6805,7 +6800,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)

> >          */

> >         if (unlikely(napi_disable_pending(n))) {

> >                 napi_complete(n);

> > -               goto out_unlock;

> > +               return work;

> >         }

> >

> >         /* The NAPI context has more processing work, but busy-polling

> > @@ -6818,7 +6813,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)

> >                          */

> >                         napi_schedule(n);

> >                 }

> > -               goto out_unlock;

> > +               return work;

> >         }

> >

> >         if (n->gro_bitmask) {

> > @@ -6836,9 +6831,29 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)

> >         if (unlikely(!list_empty(&n->poll_list))) {

> >                 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",

> >                              n->dev ? n->dev->name : "backlog");

> > -               goto out_unlock;

> > +               return work;

> >         }

> >

> > +       *repoll = true;

> > +

> > +       return work;

> > +}

> > +

> > +static int napi_poll(struct napi_struct *n, struct list_head *repoll)

> > +{

> > +       bool do_repoll = false;

> > +       void *have;

> > +       int work;

> > +

> > +       list_del_init(&n->poll_list);

> > +

> > +       have = netpoll_poll_lock(n);

> > +

> > +       work = __napi_poll(n, &do_repoll);

> > +

> > +       if (!do_repoll)

> > +               goto out_unlock;

> > +

> >         list_add_tail(&n->poll_list, repoll);

> >

> >  out_unlock:

>

> Instead of using the out_unlock label why don't you only do the

> list_add_tail if do_repoll is true? It will allow you to drop a few

> lines of noise. Otherwise this looks good to me.

>

Ack.

> Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>

Thanks for the review.
diff mbox series

Patch

diff --git a/net/core/dev.c b/net/core/dev.c
index 0332f2e8f7da..7d23bff03864 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6768,15 +6768,10 @@  void __netif_napi_del(struct napi_struct *napi)
 }
 EXPORT_SYMBOL(__netif_napi_del);
 
-static int napi_poll(struct napi_struct *n, struct list_head *repoll)
+static int __napi_poll(struct napi_struct *n, bool *repoll)
 {
-	void *have;
 	int work, weight;
 
-	list_del_init(&n->poll_list);
-
-	have = netpoll_poll_lock(n);
-
 	weight = n->weight;
 
 	/* This NAPI_STATE_SCHED test is for avoiding a race
@@ -6796,7 +6791,7 @@  static int napi_poll(struct napi_struct *n, struct list_head *repoll)
 			    n->poll, work, weight);
 
 	if (likely(work < weight))
-		goto out_unlock;
+		return work;
 
 	/* Drivers must not modify the NAPI state if they
 	 * consume the entire weight.  In such cases this code
@@ -6805,7 +6800,7 @@  static int napi_poll(struct napi_struct *n, struct list_head *repoll)
 	 */
 	if (unlikely(napi_disable_pending(n))) {
 		napi_complete(n);
-		goto out_unlock;
+		return work;
 	}
 
 	/* The NAPI context has more processing work, but busy-polling
@@ -6818,7 +6813,7 @@  static int napi_poll(struct napi_struct *n, struct list_head *repoll)
 			 */
 			napi_schedule(n);
 		}
-		goto out_unlock;
+		return work;
 	}
 
 	if (n->gro_bitmask) {
@@ -6836,9 +6831,29 @@  static int napi_poll(struct napi_struct *n, struct list_head *repoll)
 	if (unlikely(!list_empty(&n->poll_list))) {
 		pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
 			     n->dev ? n->dev->name : "backlog");
-		goto out_unlock;
+		return work;
 	}
 
+	*repoll = true;
+
+	return work;
+}
+
+static int napi_poll(struct napi_struct *n, struct list_head *repoll)
+{
+	bool do_repoll = false;
+	void *have;
+	int work;
+
+	list_del_init(&n->poll_list);
+
+	have = netpoll_poll_lock(n);
+
+	work = __napi_poll(n, &do_repoll);
+
+	if (!do_repoll)
+		goto out_unlock;
+
 	list_add_tail(&n->poll_list, repoll);
 
 out_unlock: