diff mbox series

[net-next,v3,5/8] virtio-net: xsk zero copy xmit support xsk unaligned mode

Message ID 20210331071139.15473-6-xuanzhuo@linux.alibaba.com
State New
Headers show
Series virtio-net support xdp socket zero copy xmit | expand

Commit Message

Xuan Zhuo March 31, 2021, 7:11 a.m. UTC
In xsk unaligned mode, the frame pointed to by desc may span two
consecutive pages, but not more than two pages.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Reviewed-by: Dust Li <dust.li@linux.alibaba.com>
---
 drivers/net/virtio_net.c | 30 ++++++++++++++++++++++++------
 1 file changed, 24 insertions(+), 6 deletions(-)

Comments

Jason Wang April 6, 2021, 6:55 a.m. UTC | #1
在 2021/3/31 下午3:11, Xuan Zhuo 写道:
> In xsk unaligned mode, the frame pointed to by desc may span two

> consecutive pages, but not more than two pages.

>

> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>

> Reviewed-by: Dust Li <dust.li@linux.alibaba.com>



I'd squash this patch into patch 4.


> ---

>   drivers/net/virtio_net.c | 30 ++++++++++++++++++++++++------

>   1 file changed, 24 insertions(+), 6 deletions(-)

>

> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c

> index c8a317a93ef7..259fafcf6028 100644

> --- a/drivers/net/virtio_net.c

> +++ b/drivers/net/virtio_net.c

> @@ -2562,24 +2562,42 @@ static void virtnet_xsk_check_space(struct send_queue *sq)

>   static int virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,

>   			    struct xdp_desc *desc)

>   {

> +	u32 offset, n, i, copy, copied;



Let's use a better name since we don't actually copy anything here.


>   	struct virtnet_info *vi;

>   	struct page *page;

>   	void *data;

> -	u32 offset;

> +	int err, m;

>   	u64 addr;

> -	int err;

>   

>   	vi = sq->vq->vdev->priv;

>   	addr = desc->addr;

> +

>   	data = xsk_buff_raw_get_data(pool, addr);

> +

>   	offset = offset_in_page(data);

> +	m = desc->len - (PAGE_SIZE - offset);

> +	/* xsk unaligned mode, desc will use two page */

> +	if (m > 0)

> +		n = 3;

> +	else

> +		n = 2;

>   

> -	sg_init_table(sq->sg, 2);

> +	sg_init_table(sq->sg, n);

>   	sg_set_buf(sq->sg, &xsk_hdr, vi->hdr_len);

> -	page = xsk_buff_xdp_get_page(pool, addr);

> -	sg_set_page(sq->sg + 1, page, desc->len, offset);

>   

> -	err = virtqueue_add_outbuf(sq->vq, sq->sg, 2, NULL, GFP_ATOMIC);

> +	copied = 0;

> +	for (i = 1; i < n; ++i) {

> +		copy = min_t(int, desc->len - copied, PAGE_SIZE - offset);

> +

> +		page = xsk_buff_xdp_get_page(pool, addr + copied);

> +

> +		sg_set_page(sq->sg + i, page, copy, offset);

> +		copied += copy;

> +		if (offset)

> +			offset = 0;

> +	}



Can we simplify the codes by using while here? Then I think we don't 
need to determine the value of n.

Thanks


> +

> +	err = virtqueue_add_outbuf(sq->vq, sq->sg, n, NULL, GFP_ATOMIC);

>   	if (unlikely(err))

>   		sq->xsk.last_desc = *desc;

>
diff mbox series

Patch

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index c8a317a93ef7..259fafcf6028 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2562,24 +2562,42 @@  static void virtnet_xsk_check_space(struct send_queue *sq)
 static int virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,
 			    struct xdp_desc *desc)
 {
+	u32 offset, n, i, copy, copied;
 	struct virtnet_info *vi;
 	struct page *page;
 	void *data;
-	u32 offset;
+	int err, m;
 	u64 addr;
-	int err;
 
 	vi = sq->vq->vdev->priv;
 	addr = desc->addr;
+
 	data = xsk_buff_raw_get_data(pool, addr);
+
 	offset = offset_in_page(data);
+	m = desc->len - (PAGE_SIZE - offset);
+	/* xsk unaligned mode, desc will use two page */
+	if (m > 0)
+		n = 3;
+	else
+		n = 2;
 
-	sg_init_table(sq->sg, 2);
+	sg_init_table(sq->sg, n);
 	sg_set_buf(sq->sg, &xsk_hdr, vi->hdr_len);
-	page = xsk_buff_xdp_get_page(pool, addr);
-	sg_set_page(sq->sg + 1, page, desc->len, offset);
 
-	err = virtqueue_add_outbuf(sq->vq, sq->sg, 2, NULL, GFP_ATOMIC);
+	copied = 0;
+	for (i = 1; i < n; ++i) {
+		copy = min_t(int, desc->len - copied, PAGE_SIZE - offset);
+
+		page = xsk_buff_xdp_get_page(pool, addr + copied);
+
+		sg_set_page(sq->sg + i, page, copy, offset);
+		copied += copy;
+		if (offset)
+			offset = 0;
+	}
+
+	err = virtqueue_add_outbuf(sq->vq, sq->sg, n, NULL, GFP_ATOMIC);
 	if (unlikely(err))
 		sq->xsk.last_desc = *desc;