diff mbox series

[net-next,v4,06/10] virtio-net: unify the code for recycling the xmit ptr

Message ID 20210413031523.73507-7-xuanzhuo@linux.alibaba.com
State New
Headers show
Series virtio-net support xdp socket zero copy xmit | expand

Commit Message

Xuan Zhuo April 13, 2021, 3:15 a.m. UTC
Now there are two types of "skb" and "xdp frame" during recycling old
xmit.

There are two completely similar and independent implementations. This
is inconvenient for the subsequent addition of new types. So extract a
function from this piece of code and call this function uniformly to
recover old xmit ptr.

Rename free_old_xmit_skbs() to free_old_xmit().

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
 drivers/net/virtio_net.c | 86 ++++++++++++++++++----------------------
 1 file changed, 38 insertions(+), 48 deletions(-)

Comments

Jason Wang April 14, 2021, 3:32 a.m. UTC | #1
在 2021/4/13 上午11:15, Xuan Zhuo 写道:
> Now there are two types of "skb" and "xdp frame" during recycling old

> xmit.

>

> There are two completely similar and independent implementations. This

> is inconvenient for the subsequent addition of new types. So extract a

> function from this piece of code and call this function uniformly to

> recover old xmit ptr.

>

> Rename free_old_xmit_skbs() to free_old_xmit().

>

> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>

> ---

>   drivers/net/virtio_net.c | 86 ++++++++++++++++++----------------------

>   1 file changed, 38 insertions(+), 48 deletions(-)

>

> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c

> index 52653e234a20..f3752b254965 100644

> --- a/drivers/net/virtio_net.c

> +++ b/drivers/net/virtio_net.c

> @@ -264,6 +264,30 @@ static struct xdp_frame *ptr_to_xdp(void *ptr)

>   	return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);

>   }

>   

> +static void __free_old_xmit(struct send_queue *sq, bool in_napi,

> +			    struct virtnet_sq_stats *stats)

> +{

> +	unsigned int len;

> +	void *ptr;

> +

> +	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {

> +		if (likely(!is_xdp_frame(ptr))) {



So I think we need to drop likely here because it could be called by skb 
path.

Other looks good.

Thanks


> +			struct sk_buff *skb = ptr;

> +

> +			pr_debug("Sent skb %p\n", skb);

> +

> +			stats->bytes += skb->len;

> +			napi_consume_skb(skb, in_napi);

> +		} else {

> +			struct xdp_frame *frame = ptr_to_xdp(ptr);

> +

> +			stats->bytes += frame->len;

> +			xdp_return_frame(frame);

> +		}

> +		stats->packets++;

> +	}

> +}

> +

>   /* Converting between virtqueue no. and kernel tx/rx queue no.

>    * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq

>    */

> @@ -525,15 +549,12 @@ static int virtnet_xdp_xmit(struct net_device *dev,

>   			    int n, struct xdp_frame **frames, u32 flags)

>   {

>   	struct virtnet_info *vi = netdev_priv(dev);

> +	struct virtnet_sq_stats stats = {};

>   	struct receive_queue *rq = vi->rq;

>   	struct bpf_prog *xdp_prog;

>   	struct send_queue *sq;

> -	unsigned int len;

> -	int packets = 0;

> -	int bytes = 0;

>   	int nxmit = 0;

>   	int kicks = 0;

> -	void *ptr;

>   	int ret;

>   	int i;

>   

> @@ -552,20 +573,7 @@ static int virtnet_xdp_xmit(struct net_device *dev,

>   	}

>   

>   	/* Free up any pending old buffers before queueing new ones. */

> -	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {

> -		if (likely(is_xdp_frame(ptr))) {

> -			struct xdp_frame *frame = ptr_to_xdp(ptr);

> -

> -			bytes += frame->len;

> -			xdp_return_frame(frame);

> -		} else {

> -			struct sk_buff *skb = ptr;

> -

> -			bytes += skb->len;

> -			napi_consume_skb(skb, false);

> -		}

> -		packets++;

> -	}

> +	__free_old_xmit(sq, false, &stats);

>   

>   	for (i = 0; i < n; i++) {

>   		struct xdp_frame *xdpf = frames[i];

> @@ -582,8 +590,8 @@ static int virtnet_xdp_xmit(struct net_device *dev,

>   	}

>   out:

>   	u64_stats_update_begin(&sq->stats.syncp);

> -	sq->stats.bytes += bytes;

> -	sq->stats.packets += packets;

> +	sq->stats.bytes += stats.bytes;

> +	sq->stats.packets += stats.packets;

>   	sq->stats.xdp_tx += n;

>   	sq->stats.xdp_tx_drops += n - nxmit;

>   	sq->stats.kicks += kicks;

> @@ -1406,39 +1414,21 @@ static int virtnet_receive(struct receive_queue *rq, int budget,

>   	return stats.packets;

>   }

>   

> -static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)

> +static void free_old_xmit(struct send_queue *sq, bool in_napi)

>   {

> -	unsigned int len;

> -	unsigned int packets = 0;

> -	unsigned int bytes = 0;

> -	void *ptr;

> -

> -	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {

> -		if (likely(!is_xdp_frame(ptr))) {

> -			struct sk_buff *skb = ptr;

> -

> -			pr_debug("Sent skb %p\n", skb);

> +	struct virtnet_sq_stats stats = {};

>   

> -			bytes += skb->len;

> -			napi_consume_skb(skb, in_napi);

> -		} else {

> -			struct xdp_frame *frame = ptr_to_xdp(ptr);

> -

> -			bytes += frame->len;

> -			xdp_return_frame(frame);

> -		}

> -		packets++;

> -	}

> +	__free_old_xmit(sq, in_napi, &stats);

>   

>   	/* Avoid overhead when no packets have been processed

>   	 * happens when called speculatively from start_xmit.

>   	 */

> -	if (!packets)

> +	if (!stats.packets)

>   		return;

>   

>   	u64_stats_update_begin(&sq->stats.syncp);

> -	sq->stats.bytes += bytes;

> -	sq->stats.packets += packets;

> +	sq->stats.bytes += stats.bytes;

> +	sq->stats.packets += stats.packets;

>   	u64_stats_update_end(&sq->stats.syncp);

>   }

>   

> @@ -1463,7 +1453,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)

>   		return;

>   

>   	if (__netif_tx_trylock(txq)) {

> -		free_old_xmit_skbs(sq, true);

> +		free_old_xmit(sq, true);

>   		__netif_tx_unlock(txq);

>   	}

>   

> @@ -1548,7 +1538,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)

>   

>   	txq = netdev_get_tx_queue(vi->dev, index);

>   	__netif_tx_lock(txq, raw_smp_processor_id());

> -	free_old_xmit_skbs(sq, true);

> +	free_old_xmit(sq, true);

>   	__netif_tx_unlock(txq);

>   

>   	virtqueue_napi_complete(napi, sq->vq, 0);

> @@ -1617,7 +1607,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)

>   	bool use_napi = sq->napi.weight;

>   

>   	/* Free up any pending old buffers before queueing new ones. */

> -	free_old_xmit_skbs(sq, false);

> +	free_old_xmit(sq, false);

>   

>   	if (use_napi && kick)

>   		virtqueue_enable_cb_delayed(sq->vq);

> @@ -1661,7 +1651,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)

>   		if (!use_napi &&

>   		    unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {

>   			/* More just got used, free them then recheck. */

> -			free_old_xmit_skbs(sq, false);

> +			free_old_xmit(sq, false);

>   			if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {

>   				netif_start_subqueue(dev, qnum);

>   				virtqueue_disable_cb(sq->vq);
diff mbox series

Patch

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 52653e234a20..f3752b254965 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -264,6 +264,30 @@  static struct xdp_frame *ptr_to_xdp(void *ptr)
 	return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
 }
 
+static void __free_old_xmit(struct send_queue *sq, bool in_napi,
+			    struct virtnet_sq_stats *stats)
+{
+	unsigned int len;
+	void *ptr;
+
+	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+		if (likely(!is_xdp_frame(ptr))) {
+			struct sk_buff *skb = ptr;
+
+			pr_debug("Sent skb %p\n", skb);
+
+			stats->bytes += skb->len;
+			napi_consume_skb(skb, in_napi);
+		} else {
+			struct xdp_frame *frame = ptr_to_xdp(ptr);
+
+			stats->bytes += frame->len;
+			xdp_return_frame(frame);
+		}
+		stats->packets++;
+	}
+}
+
 /* Converting between virtqueue no. and kernel tx/rx queue no.
  * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
  */
@@ -525,15 +549,12 @@  static int virtnet_xdp_xmit(struct net_device *dev,
 			    int n, struct xdp_frame **frames, u32 flags)
 {
 	struct virtnet_info *vi = netdev_priv(dev);
+	struct virtnet_sq_stats stats = {};
 	struct receive_queue *rq = vi->rq;
 	struct bpf_prog *xdp_prog;
 	struct send_queue *sq;
-	unsigned int len;
-	int packets = 0;
-	int bytes = 0;
 	int nxmit = 0;
 	int kicks = 0;
-	void *ptr;
 	int ret;
 	int i;
 
@@ -552,20 +573,7 @@  static int virtnet_xdp_xmit(struct net_device *dev,
 	}
 
 	/* Free up any pending old buffers before queueing new ones. */
-	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
-		if (likely(is_xdp_frame(ptr))) {
-			struct xdp_frame *frame = ptr_to_xdp(ptr);
-
-			bytes += frame->len;
-			xdp_return_frame(frame);
-		} else {
-			struct sk_buff *skb = ptr;
-
-			bytes += skb->len;
-			napi_consume_skb(skb, false);
-		}
-		packets++;
-	}
+	__free_old_xmit(sq, false, &stats);
 
 	for (i = 0; i < n; i++) {
 		struct xdp_frame *xdpf = frames[i];
@@ -582,8 +590,8 @@  static int virtnet_xdp_xmit(struct net_device *dev,
 	}
 out:
 	u64_stats_update_begin(&sq->stats.syncp);
-	sq->stats.bytes += bytes;
-	sq->stats.packets += packets;
+	sq->stats.bytes += stats.bytes;
+	sq->stats.packets += stats.packets;
 	sq->stats.xdp_tx += n;
 	sq->stats.xdp_tx_drops += n - nxmit;
 	sq->stats.kicks += kicks;
@@ -1406,39 +1414,21 @@  static int virtnet_receive(struct receive_queue *rq, int budget,
 	return stats.packets;
 }
 
-static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
+static void free_old_xmit(struct send_queue *sq, bool in_napi)
 {
-	unsigned int len;
-	unsigned int packets = 0;
-	unsigned int bytes = 0;
-	void *ptr;
-
-	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
-		if (likely(!is_xdp_frame(ptr))) {
-			struct sk_buff *skb = ptr;
-
-			pr_debug("Sent skb %p\n", skb);
+	struct virtnet_sq_stats stats = {};
 
-			bytes += skb->len;
-			napi_consume_skb(skb, in_napi);
-		} else {
-			struct xdp_frame *frame = ptr_to_xdp(ptr);
-
-			bytes += frame->len;
-			xdp_return_frame(frame);
-		}
-		packets++;
-	}
+	__free_old_xmit(sq, in_napi, &stats);
 
 	/* Avoid overhead when no packets have been processed
 	 * happens when called speculatively from start_xmit.
 	 */
-	if (!packets)
+	if (!stats.packets)
 		return;
 
 	u64_stats_update_begin(&sq->stats.syncp);
-	sq->stats.bytes += bytes;
-	sq->stats.packets += packets;
+	sq->stats.bytes += stats.bytes;
+	sq->stats.packets += stats.packets;
 	u64_stats_update_end(&sq->stats.syncp);
 }
 
@@ -1463,7 +1453,7 @@  static void virtnet_poll_cleantx(struct receive_queue *rq)
 		return;
 
 	if (__netif_tx_trylock(txq)) {
-		free_old_xmit_skbs(sq, true);
+		free_old_xmit(sq, true);
 		__netif_tx_unlock(txq);
 	}
 
@@ -1548,7 +1538,7 @@  static int virtnet_poll_tx(struct napi_struct *napi, int budget)
 
 	txq = netdev_get_tx_queue(vi->dev, index);
 	__netif_tx_lock(txq, raw_smp_processor_id());
-	free_old_xmit_skbs(sq, true);
+	free_old_xmit(sq, true);
 	__netif_tx_unlock(txq);
 
 	virtqueue_napi_complete(napi, sq->vq, 0);
@@ -1617,7 +1607,7 @@  static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
 	bool use_napi = sq->napi.weight;
 
 	/* Free up any pending old buffers before queueing new ones. */
-	free_old_xmit_skbs(sq, false);
+	free_old_xmit(sq, false);
 
 	if (use_napi && kick)
 		virtqueue_enable_cb_delayed(sq->vq);
@@ -1661,7 +1651,7 @@  static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
 		if (!use_napi &&
 		    unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
 			/* More just got used, free them then recheck. */
-			free_old_xmit_skbs(sq, false);
+			free_old_xmit(sq, false);
 			if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
 				netif_start_subqueue(dev, qnum);
 				virtqueue_disable_cb(sq->vq);