diff mbox series

[V4,net-next,6/9] net: ena: use xdp_frame in XDP TX flow

Message ID 1607083875-32134-7-git-send-email-akiyano@amazon.com
State New
Headers show
Series XDP Redirect implementation for ENA driver | expand

Commit Message

Kiyanovski, Arthur Dec. 4, 2020, 12:11 p.m. UTC
From: Arthur Kiyanovski <akiyano@amazon.com>

Rename the ena_xdp_xmit_buff() function to ena_xdp_xmit_frame() and pass
it an xdp_frame struct instead of xdp_buff.
This change lays the ground for XDP redirect implementation which uses
xdp_frames when 'xmit'ing packets.

Signed-off-by: Shay Agroskin <shayagr@amazon.com>
Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
---
 drivers/net/ethernet/amazon/ena/ena_netdev.c | 46 ++++++++++----------
 1 file changed, 23 insertions(+), 23 deletions(-)

Comments

Maciej Fijalkowski Dec. 6, 2020, 8:10 p.m. UTC | #1
On Fri, Dec 04, 2020 at 02:11:12PM +0200, akiyano@amazon.com wrote:
> From: Arthur Kiyanovski <akiyano@amazon.com>

> 

> Rename the ena_xdp_xmit_buff() function to ena_xdp_xmit_frame() and pass

> it an xdp_frame struct instead of xdp_buff.

> This change lays the ground for XDP redirect implementation which uses

> xdp_frames when 'xmit'ing packets.

> 

> Signed-off-by: Shay Agroskin <shayagr@amazon.com>

> Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>

> ---

>  drivers/net/ethernet/amazon/ena/ena_netdev.c | 46 ++++++++++----------

>  1 file changed, 23 insertions(+), 23 deletions(-)

> 

> diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c

> index 222bb576e30e..cbb07548409a 100644

> --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c

> +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c

> @@ -233,18 +233,18 @@ static int ena_xdp_io_poll(struct napi_struct *napi, int budget)

>  	return ret;

>  }

>  

> -static int ena_xdp_tx_map_buff(struct ena_ring *xdp_ring,

> -			       struct ena_tx_buffer *tx_info,

> -			       struct xdp_buff *xdp,

> -			       void **push_hdr,

> -			       u32 *push_len)

> +static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,

> +				struct ena_tx_buffer *tx_info,

> +				struct xdp_frame *xdpf,

> +				void **push_hdr,

> +				u32 *push_len)

>  {

>  	struct ena_adapter *adapter = xdp_ring->adapter;

>  	struct ena_com_buf *ena_buf;

>  	dma_addr_t dma = 0;

>  	u32 size;

>  

> -	tx_info->xdpf = xdp_convert_buff_to_frame(xdp);

> +	tx_info->xdpf = xdpf;

>  	size = tx_info->xdpf->len;

>  	ena_buf = tx_info->bufs;

>  

> @@ -281,29 +281,31 @@ static int ena_xdp_tx_map_buff(struct ena_ring *xdp_ring,

>  	return -EINVAL;

>  }

>  

> -static int ena_xdp_xmit_buff(struct net_device *dev,

> -			     struct xdp_buff *xdp,

> -			     int qid,

> -			     struct ena_rx_buffer *rx_info)

> +static int ena_xdp_xmit_frame(struct net_device *dev,

> +			      struct xdp_frame *xdpf,

> +			      int qid)

>  {

>  	struct ena_adapter *adapter = netdev_priv(dev);

>  	struct ena_com_tx_ctx ena_tx_ctx = {};

>  	struct ena_tx_buffer *tx_info;

>  	struct ena_ring *xdp_ring;

> +	struct page *rx_buff_page;

>  	u16 next_to_use, req_id;

>  	int rc;

>  	void *push_hdr;

>  	u32 push_len;

>  

> +	rx_buff_page = virt_to_page(xdpf->data);

> +

>  	xdp_ring = &adapter->tx_ring[qid];

>  	next_to_use = xdp_ring->next_to_use;

>  	req_id = xdp_ring->free_ids[next_to_use];

>  	tx_info = &xdp_ring->tx_buffer_info[req_id];

>  	tx_info->num_of_bufs = 0;

> -	page_ref_inc(rx_info->page);

> -	tx_info->xdp_rx_page = rx_info->page;

> +	page_ref_inc(rx_buff_page);

> +	tx_info->xdp_rx_page = rx_buff_page;

>  

> -	rc = ena_xdp_tx_map_buff(xdp_ring, tx_info, xdp, &push_hdr, &push_len);

> +	rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &push_hdr, &push_len);

>  	if (unlikely(rc))

>  		goto error_drop_packet;

>  

> @@ -318,7 +320,7 @@ static int ena_xdp_xmit_buff(struct net_device *dev,

>  			     tx_info,

>  			     &ena_tx_ctx,

>  			     next_to_use,

> -			     xdp->data_end - xdp->data);

> +			     xdpf->len);

>  	if (rc)

>  		goto error_unmap_dma;

>  	/* trigger the dma engine. ena_com_write_sq_doorbell()

> @@ -337,12 +339,11 @@ static int ena_xdp_xmit_buff(struct net_device *dev,

>  	return NETDEV_TX_OK;

>  }

>  

> -static int ena_xdp_execute(struct ena_ring *rx_ring,

> -			   struct xdp_buff *xdp,

> -			   struct ena_rx_buffer *rx_info)

> +static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)

>  {

>  	struct bpf_prog *xdp_prog;

>  	u32 verdict = XDP_PASS;

> +	struct xdp_frame *xdpf;

>  	u64 *xdp_stat;

>  

>  	rcu_read_lock();

> @@ -354,10 +355,9 @@ static int ena_xdp_execute(struct ena_ring *rx_ring,

>  	verdict = bpf_prog_run_xdp(xdp_prog, xdp);

>  

>  	if (verdict == XDP_TX) {

> -		ena_xdp_xmit_buff(rx_ring->netdev,

> -				  xdp,

> -				  rx_ring->qid + rx_ring->adapter->num_io_queues,

> -				  rx_info);

> +		xdpf = xdp_convert_buff_to_frame(xdp);


Similar to Jakub's comment on another patch, xdp_convert_buff_to_frame can
return NULL and from what I can tell you never check that in
ena_xdp_xmit_frame.

> +		ena_xdp_xmit_frame(rx_ring->netdev, xdpf,

> +				   rx_ring->qid + rx_ring->adapter->num_io_queues);

>  

>  		xdp_stat = &rx_ring->rx_stats.xdp_tx;

>  	} else if (unlikely(verdict == XDP_ABORTED)) {

> @@ -1521,7 +1521,7 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)

>  	if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))

>  		return XDP_DROP;

>  

> -	ret = ena_xdp_execute(rx_ring, xdp, rx_info);

> +	ret = ena_xdp_execute(rx_ring, xdp);

>  

>  	/* The xdp program might expand the headers */

>  	if (ret == XDP_PASS) {

> @@ -1600,7 +1600,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,

>  		if (unlikely(!skb)) {

>  			/* The page might not actually be freed here since the

>  			 * page reference count is incremented in

> -			 * ena_xdp_xmit_buff(), and it will be decreased only

> +			 * ena_xdp_xmit_frame(), and it will be decreased only

>  			 * when send completion was received from the device

>  			 */

>  			if (xdp_verdict == XDP_TX)

> -- 

> 2.23.3

>
Shay Agroskin Dec. 7, 2020, 7:16 p.m. UTC | #2
Maciej Fijalkowski <maciej.fijalkowski@intel.com> writes:

> On Fri, Dec 04, 2020 at 02:11:12PM +0200, akiyano@amazon.com 

> wrote:

>> From: Arthur Kiyanovski <akiyano@amazon.com>

>> 

>> Rename the ena_xdp_xmit_buff() function to ena_xdp_xmit_frame() 

>> and pass

>> it an xdp_frame struct instead of xdp_buff.

>> This change lays the ground for XDP redirect implementation 

>> which uses

>> xdp_frames when 'xmit'ing packets.

>> 

>> Signed-off-by: Shay Agroskin <shayagr@amazon.com>

>> Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>

>> ---

>>  drivers/net/ethernet/amazon/ena/ena_netdev.c | 46 

>>  ++++++++++----------

>>  1 file changed, 23 insertions(+), 23 deletions(-)

>> 

>> diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c 

>> b/drivers/net/ethernet/amazon/ena/ena_netdev.c

>> index 222bb576e30e..cbb07548409a 100644

>> --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c

>> +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c

>> @@ -233,18 +233,18 @@ static int ena_xdp_io_poll(struct 

>> napi_struct *napi, int budget)

>>  	return ret;

>>  }

>>  

>>  ...

>>  	if (verdict == XDP_TX) {

>> -		ena_xdp_xmit_buff(rx_ring->netdev,

>> -				  xdp,

>> -				  rx_ring->qid + 

>> rx_ring->adapter->num_io_queues,

>> -				  rx_info);

>> +		xdpf = xdp_convert_buff_to_frame(xdp);

>

> Similar to Jakub's comment on another patch, 

> xdp_convert_buff_to_frame can

> return NULL and from what I can tell you never check that in

> ena_xdp_xmit_frame.

>


Hi, thanks for reviewing the code (:

Going over xdp_convert_buff_to_frame() it seems (to me) that the 
function fails either
- we're using an AF XDP socket
- the driver failed to leave enough room for xdp_frame and 
  skb_shared_info structs

the first isn't supported by ENA, and the second doesn't seem to 
be possible since the driver leaves enough space on the RX page 
and bpf_xdp_adjust_head()/bpf_xdp_adjust_tail() seem
to make sure enough space is left on the page for the structs.

Nevertheless, the correct approach is to check the return value of 
the function. I'll add it in the next patchset. Thanks

>> +		ena_xdp_xmit_frame(rx_ring->netdev, xdpf,

>> +				   rx_ring->qid + 

>> rx_ring->adapter->num_io_queues);

>>  

>>  		xdp_stat = &rx_ring->rx_stats.xdp_tx;

>>  	} else if (unlikely(verdict == XDP_ABORTED)) {

>> @@ -1521,7 +1521,7 @@ static int ena_xdp_handle_buff(struct 

>> ena_ring *rx_ring, struct xdp_buff *xdp)

>>  	if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))

>>  		return XDP_DROP;

>>  

>> -	ret = ena_xdp_execute(rx_ring, xdp, rx_info);

>> +	ret = ena_xdp_execute(rx_ring, xdp);

>>  

>>  	/* The xdp program might expand the headers */

>> ...

>>  			 */

>>  			if (xdp_verdict == XDP_TX)

>> -- 

>> 2.23.3

>>
diff mbox series

Patch

diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 222bb576e30e..cbb07548409a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -233,18 +233,18 @@  static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
 	return ret;
 }
 
-static int ena_xdp_tx_map_buff(struct ena_ring *xdp_ring,
-			       struct ena_tx_buffer *tx_info,
-			       struct xdp_buff *xdp,
-			       void **push_hdr,
-			       u32 *push_len)
+static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,
+				struct ena_tx_buffer *tx_info,
+				struct xdp_frame *xdpf,
+				void **push_hdr,
+				u32 *push_len)
 {
 	struct ena_adapter *adapter = xdp_ring->adapter;
 	struct ena_com_buf *ena_buf;
 	dma_addr_t dma = 0;
 	u32 size;
 
-	tx_info->xdpf = xdp_convert_buff_to_frame(xdp);
+	tx_info->xdpf = xdpf;
 	size = tx_info->xdpf->len;
 	ena_buf = tx_info->bufs;
 
@@ -281,29 +281,31 @@  static int ena_xdp_tx_map_buff(struct ena_ring *xdp_ring,
 	return -EINVAL;
 }
 
-static int ena_xdp_xmit_buff(struct net_device *dev,
-			     struct xdp_buff *xdp,
-			     int qid,
-			     struct ena_rx_buffer *rx_info)
+static int ena_xdp_xmit_frame(struct net_device *dev,
+			      struct xdp_frame *xdpf,
+			      int qid)
 {
 	struct ena_adapter *adapter = netdev_priv(dev);
 	struct ena_com_tx_ctx ena_tx_ctx = {};
 	struct ena_tx_buffer *tx_info;
 	struct ena_ring *xdp_ring;
+	struct page *rx_buff_page;
 	u16 next_to_use, req_id;
 	int rc;
 	void *push_hdr;
 	u32 push_len;
 
+	rx_buff_page = virt_to_page(xdpf->data);
+
 	xdp_ring = &adapter->tx_ring[qid];
 	next_to_use = xdp_ring->next_to_use;
 	req_id = xdp_ring->free_ids[next_to_use];
 	tx_info = &xdp_ring->tx_buffer_info[req_id];
 	tx_info->num_of_bufs = 0;
-	page_ref_inc(rx_info->page);
-	tx_info->xdp_rx_page = rx_info->page;
+	page_ref_inc(rx_buff_page);
+	tx_info->xdp_rx_page = rx_buff_page;
 
-	rc = ena_xdp_tx_map_buff(xdp_ring, tx_info, xdp, &push_hdr, &push_len);
+	rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &push_hdr, &push_len);
 	if (unlikely(rc))
 		goto error_drop_packet;
 
@@ -318,7 +320,7 @@  static int ena_xdp_xmit_buff(struct net_device *dev,
 			     tx_info,
 			     &ena_tx_ctx,
 			     next_to_use,
-			     xdp->data_end - xdp->data);
+			     xdpf->len);
 	if (rc)
 		goto error_unmap_dma;
 	/* trigger the dma engine. ena_com_write_sq_doorbell()
@@ -337,12 +339,11 @@  static int ena_xdp_xmit_buff(struct net_device *dev,
 	return NETDEV_TX_OK;
 }
 
-static int ena_xdp_execute(struct ena_ring *rx_ring,
-			   struct xdp_buff *xdp,
-			   struct ena_rx_buffer *rx_info)
+static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
 {
 	struct bpf_prog *xdp_prog;
 	u32 verdict = XDP_PASS;
+	struct xdp_frame *xdpf;
 	u64 *xdp_stat;
 
 	rcu_read_lock();
@@ -354,10 +355,9 @@  static int ena_xdp_execute(struct ena_ring *rx_ring,
 	verdict = bpf_prog_run_xdp(xdp_prog, xdp);
 
 	if (verdict == XDP_TX) {
-		ena_xdp_xmit_buff(rx_ring->netdev,
-				  xdp,
-				  rx_ring->qid + rx_ring->adapter->num_io_queues,
-				  rx_info);
+		xdpf = xdp_convert_buff_to_frame(xdp);
+		ena_xdp_xmit_frame(rx_ring->netdev, xdpf,
+				   rx_ring->qid + rx_ring->adapter->num_io_queues);
 
 		xdp_stat = &rx_ring->rx_stats.xdp_tx;
 	} else if (unlikely(verdict == XDP_ABORTED)) {
@@ -1521,7 +1521,7 @@  static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
 	if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
 		return XDP_DROP;
 
-	ret = ena_xdp_execute(rx_ring, xdp, rx_info);
+	ret = ena_xdp_execute(rx_ring, xdp);
 
 	/* The xdp program might expand the headers */
 	if (ret == XDP_PASS) {
@@ -1600,7 +1600,7 @@  static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
 		if (unlikely(!skb)) {
 			/* The page might not actually be freed here since the
 			 * page reference count is incremented in
-			 * ena_xdp_xmit_buff(), and it will be decreased only
+			 * ena_xdp_xmit_frame(), and it will be decreased only
 			 * when send completion was received from the device
 			 */
 			if (xdp_verdict == XDP_TX)