diff mbox series

virtio-net: support XDP_TX when not more queues

Message ID 81abae33fc8dbec37ef0061ff6f6fd696b484a3e.1610523188.git.xuanzhuo@linux.alibaba.com
State New
Headers show
Series virtio-net: support XDP_TX when not more queues | expand

Commit Message

Xuan Zhuo Jan. 13, 2021, 8:08 a.m. UTC
The number of queues implemented by many virtio backends is limited,
especially some machines have a large number of CPUs. In this case, it
is often impossible to allocate a separate queue for XDP_TX.

This patch allows XDP_TX to run by reuse the existing SQ with
__netif_tx_lock() hold when there are not enough queues.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Reviewed-by: Dust Li <dust.li@linux.alibaba.com>
---
 drivers/net/virtio_net.c | 47 +++++++++++++++++++++++++++++++++++------------
 1 file changed, 35 insertions(+), 12 deletions(-)

Comments

Jakub Kicinski Jan. 16, 2021, 1:47 a.m. UTC | #1
On Wed, 13 Jan 2021 16:08:57 +0800 Xuan Zhuo wrote:
> The number of queues implemented by many virtio backends is limited,

> especially some machines have a large number of CPUs. In this case, it

> is often impossible to allocate a separate queue for XDP_TX.

> 

> This patch allows XDP_TX to run by reuse the existing SQ with

> __netif_tx_lock() hold when there are not enough queues.

> 

> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>

> Reviewed-by: Dust Li <dust.li@linux.alibaba.com>


Since reviews are not coming in let me share some of mine.

nit: please put [PATCH net-next] not [PATCH netdev]

> -static struct send_queue *virtnet_xdp_sq(struct virtnet_info *vi)

> +static struct send_queue *virtnet_get_xdp_sq(struct virtnet_info *vi)

>  {

>  	unsigned int qp;

> +	struct netdev_queue *txq;


nit: please order variable declaration lines longest to shortest

> +

> +	if (vi->curr_queue_pairs > nr_cpu_ids) {

> +		qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();

> +	} else {

> +		qp = smp_processor_id() % vi->curr_queue_pairs;

> +		txq = netdev_get_tx_queue(vi->dev, qp);

> +		__netif_tx_lock(txq, raw_smp_processor_id());

> +	}

>  

> -	qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();

>  	return &vi->sq[qp];

>  }

>  

> +static void virtnet_put_xdp_sq(struct virtnet_info *vi)

> +{

> +	unsigned int qp;

> +	struct netdev_queue *txq;


nit: longest to shortest

> +

> +	if (vi->curr_queue_pairs <= nr_cpu_ids) {

> +		qp = smp_processor_id() % vi->curr_queue_pairs;


Feels a little wasteful to do the modulo calculation twice per packet.

> +		txq = netdev_get_tx_queue(vi->dev, qp);

> +		__netif_tx_unlock(txq);

> +	}

> +}


> +	vi->xdp_enabled = false;

>  	if (prog) {

>  		for (i = 0; i < vi->max_queue_pairs; i++) {

>  			rcu_assign_pointer(vi->rq[i].xdp_prog, prog);

>  			if (i == 0 && !old_prog)

>  				virtnet_clear_guest_offloads(vi);

>  		}

> +		vi->xdp_enabled = true;


is xdp_enabled really needed? can't we do the headroom calculation
based on the program pointer being not NULL? Either way xdp_enabled 
should not temporarily switch true -> false -> true when program 
is swapped.

>  	}

>  

>  	for (i = 0; i < vi->max_queue_pairs; i++) {
Michael S. Tsirkin Feb. 10, 2021, 9:40 p.m. UTC | #2
On Wed, Jan 13, 2021 at 04:08:57PM +0800, Xuan Zhuo wrote:
> The number of queues implemented by many virtio backends is limited,

> especially some machines have a large number of CPUs. In this case, it

> is often impossible to allocate a separate queue for XDP_TX.

> 

> This patch allows XDP_TX to run by reuse the existing SQ with

> __netif_tx_lock() hold when there are not enough queues.

> 

> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>

> Reviewed-by: Dust Li <dust.li@linux.alibaba.com>


I'd like to get some advice on whether this is ok from some
XDP experts - previously my understanding was that it is
preferable to disable XDP for such devices than
use locks on XDP fast path.

> ---

>  drivers/net/virtio_net.c | 47 +++++++++++++++++++++++++++++++++++------------

>  1 file changed, 35 insertions(+), 12 deletions(-)

> 

> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c

> index ba8e637..7a3b2a7 100644

> --- a/drivers/net/virtio_net.c

> +++ b/drivers/net/virtio_net.c

> @@ -195,6 +195,9 @@ struct virtnet_info {

>  	/* # of XDP queue pairs currently used by the driver */

>  	u16 xdp_queue_pairs;

>  

> +	/* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */

> +	bool xdp_enabled;

> +

>  	/* I like... big packets and I cannot lie! */

>  	bool big_packets;

>  

> @@ -481,14 +484,34 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,

>  	return 0;

>  }

>  

> -static struct send_queue *virtnet_xdp_sq(struct virtnet_info *vi)

> +static struct send_queue *virtnet_get_xdp_sq(struct virtnet_info *vi)

>  {

>  	unsigned int qp;

> +	struct netdev_queue *txq;

> +

> +	if (vi->curr_queue_pairs > nr_cpu_ids) {

> +		qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();

> +	} else {

> +		qp = smp_processor_id() % vi->curr_queue_pairs;

> +		txq = netdev_get_tx_queue(vi->dev, qp);

> +		__netif_tx_lock(txq, raw_smp_processor_id());

> +	}

>  

> -	qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();

>  	return &vi->sq[qp];

>  }

>  

> +static void virtnet_put_xdp_sq(struct virtnet_info *vi)

> +{

> +	unsigned int qp;

> +	struct netdev_queue *txq;

> +

> +	if (vi->curr_queue_pairs <= nr_cpu_ids) {

> +		qp = smp_processor_id() % vi->curr_queue_pairs;

> +		txq = netdev_get_tx_queue(vi->dev, qp);

> +		__netif_tx_unlock(txq);

> +	}

> +}

> +

>  static int virtnet_xdp_xmit(struct net_device *dev,

>  			    int n, struct xdp_frame **frames, u32 flags)

>  {

> @@ -512,7 +535,7 @@ static int virtnet_xdp_xmit(struct net_device *dev,

>  	if (!xdp_prog)

>  		return -ENXIO;

>  

> -	sq = virtnet_xdp_sq(vi);

> +	sq = virtnet_get_xdp_sq(vi);

>  

>  	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {

>  		ret = -EINVAL;

> @@ -560,12 +583,13 @@ static int virtnet_xdp_xmit(struct net_device *dev,

>  	sq->stats.kicks += kicks;

>  	u64_stats_update_end(&sq->stats.syncp);

>  

> +	virtnet_put_xdp_sq(vi);

>  	return ret;

>  }

>  

>  static unsigned int virtnet_get_headroom(struct virtnet_info *vi)

>  {

> -	return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0;

> +	return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;

>  }

>  

>  /* We copy the packet for XDP in the following cases:

> @@ -1457,12 +1481,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget)

>  		xdp_do_flush();

>  

>  	if (xdp_xmit & VIRTIO_XDP_TX) {

> -		sq = virtnet_xdp_sq(vi);

> +		sq = virtnet_get_xdp_sq(vi);

>  		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {

>  			u64_stats_update_begin(&sq->stats.syncp);

>  			sq->stats.kicks++;

>  			u64_stats_update_end(&sq->stats.syncp);

>  		}

> +		virtnet_put_xdp_sq(vi);

>  	}

>  

>  	return received;

> @@ -2416,12 +2441,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,

>  		xdp_qp = nr_cpu_ids;

>  

>  	/* XDP requires extra queues for XDP_TX */

> -	if (curr_qp + xdp_qp > vi->max_queue_pairs) {

> -		NL_SET_ERR_MSG_MOD(extack, "Too few free TX rings available");

> -		netdev_warn(dev, "request %i queues but max is %i\n",

> -			    curr_qp + xdp_qp, vi->max_queue_pairs);

> -		return -ENOMEM;

> -	}

> +	if (curr_qp + xdp_qp > vi->max_queue_pairs)

> +		xdp_qp = 0;

>  

>  	old_prog = rtnl_dereference(vi->rq[0].xdp_prog);

>  	if (!prog && !old_prog)

> @@ -2453,12 +2474,14 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,

>  	netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);

>  	vi->xdp_queue_pairs = xdp_qp;

>  

> +	vi->xdp_enabled = false;

>  	if (prog) {

>  		for (i = 0; i < vi->max_queue_pairs; i++) {

>  			rcu_assign_pointer(vi->rq[i].xdp_prog, prog);

>  			if (i == 0 && !old_prog)

>  				virtnet_clear_guest_offloads(vi);

>  		}

> +		vi->xdp_enabled = true;

>  	}

>  

>  	for (i = 0; i < vi->max_queue_pairs; i++) {

> @@ -2526,7 +2549,7 @@ static int virtnet_set_features(struct net_device *dev,

>  	int err;

>  

>  	if ((dev->features ^ features) & NETIF_F_LRO) {

> -		if (vi->xdp_queue_pairs)

> +		if (vi->xdp_enabled)

>  			return -EBUSY;

>  

>  		if (features & NETIF_F_LRO)

> -- 

> 1.8.3.1
Jesper Dangaard Brouer Feb. 11, 2021, 11 a.m. UTC | #3
On Wed, 10 Feb 2021 16:40:41 -0500
"Michael S. Tsirkin" <mst@redhat.com> wrote:

> On Wed, Jan 13, 2021 at 04:08:57PM +0800, Xuan Zhuo wrote:

> > The number of queues implemented by many virtio backends is limited,

> > especially some machines have a large number of CPUs. In this case, it

> > is often impossible to allocate a separate queue for XDP_TX.

> > 

> > This patch allows XDP_TX to run by reuse the existing SQ with

> > __netif_tx_lock() hold when there are not enough queues.


I'm a little puzzled about the choice of using the netdevice TXQ
lock __netif_tx_lock() / __netif_tx_unlock().
Can you explain more about this choice?

> > 

> > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>

> > Reviewed-by: Dust Li <dust.li@linux.alibaba.com>  

> 

> I'd like to get some advice on whether this is ok from some

> XDP experts - previously my understanding was that it is

> preferable to disable XDP for such devices than

> use locks on XDP fast path.


I think it is acceptable, because the ndo_xdp_xmit / virtnet_xdp_xmit
takes a bulk of packets (currently 16).

Some drivers already does this.

It would have been nice if we could set a feature flag, that allow
users to see that this driver uses locking in the XDP transmit
(ndo_xdp_xmit) function call... but it seems like a pipe dream :-P

Code related to the locking

> > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c

> > index ba8e637..7a3b2a7 100644

> > --- a/drivers/net/virtio_net.c

> > +++ b/drivers/net/virtio_net.c

[...]
> > @@ -481,14 +484,34 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,

> >  	return 0;

> >  }

> >  

> > -static struct send_queue *virtnet_xdp_sq(struct virtnet_info *vi)

> > +static struct send_queue *virtnet_get_xdp_sq(struct virtnet_info *vi)

> >  {

> >  	unsigned int qp;

> > +	struct netdev_queue *txq;

> > +

> > +	if (vi->curr_queue_pairs > nr_cpu_ids) {

> > +		qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();

> > +	} else {

> > +		qp = smp_processor_id() % vi->curr_queue_pairs;

> > +		txq = netdev_get_tx_queue(vi->dev, qp);

> > +		__netif_tx_lock(txq, raw_smp_processor_id());

> > +	}

> >  

> > -	qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();

> >  	return &vi->sq[qp];

> >  }

> >  

> > +static void virtnet_put_xdp_sq(struct virtnet_info *vi)

> > +{

> > +	unsigned int qp;

> > +	struct netdev_queue *txq;

> > +

> > +	if (vi->curr_queue_pairs <= nr_cpu_ids) {

> > +		qp = smp_processor_id() % vi->curr_queue_pairs;

> > +		txq = netdev_get_tx_queue(vi->dev, qp);

> > +		__netif_tx_unlock(txq);

> > +	}

> > +}

> > +

> >  static int virtnet_xdp_xmit(struct net_device *dev,

> >  			    int n, struct xdp_frame **frames, u32 flags)

> >  {

> > @@ -512,7 +535,7 @@ static int virtnet_xdp_xmit(struct net_device *dev,

> >  	if (!xdp_prog)

> >  		return -ENXIO;

> >  

> > -	sq = virtnet_xdp_sq(vi);

> > +	sq = virtnet_get_xdp_sq(vi);

> >  

> >  	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {

> >  		ret = -EINVAL;

> > @@ -560,12 +583,13 @@ static int virtnet_xdp_xmit(struct net_device *dev,

> >  	sq->stats.kicks += kicks;

> >  	u64_stats_update_end(&sq->stats.syncp);

> >  

> > +	virtnet_put_xdp_sq(vi);

> >  	return ret;

> >  }

> >  




-- 
Best regards,
  Jesper Dangaard Brouer
  MSc.CS, Principal Kernel Engineer at Red Hat
  LinkedIn: http://www.linkedin.com/in/brouer
Jason Wang Feb. 18, 2021, 2:18 a.m. UTC | #4
On 2021/2/11 5:40 上午, Michael S. Tsirkin wrote:
> On Wed, Jan 13, 2021 at 04:08:57PM +0800, Xuan Zhuo wrote:

>> The number of queues implemented by many virtio backends is limited,

>> especially some machines have a large number of CPUs. In this case, it

>> is often impossible to allocate a separate queue for XDP_TX.

>>

>> This patch allows XDP_TX to run by reuse the existing SQ with

>> __netif_tx_lock() hold when there are not enough queues.

>>

>> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>

>> Reviewed-by: Dust Li <dust.li@linux.alibaba.com>

> I'd like to get some advice on whether this is ok from some

> XDP experts - previously my understanding was that it is

> preferable to disable XDP for such devices than

> use locks on XDP fast path.



I think this is acceptable on the device that changing the number of 
queues is not easy. For virtio-net, it probably requires a lot of 
changes in the management.

Another example is TUN which use TX lock for XDP.

Thanks


>

>> ---

>>   drivers/net/virtio_net.c | 47 +++++++++++++++++++++++++++++++++++------------

>>   1 file changed, 35 insertions(+), 12 deletions(-)

>>

>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c

>> index ba8e637..7a3b2a7 100644

>> --- a/drivers/net/virtio_net.c

>> +++ b/drivers/net/virtio_net.c

>> @@ -195,6 +195,9 @@ struct virtnet_info {

>>   	/* # of XDP queue pairs currently used by the driver */

>>   	u16 xdp_queue_pairs;

>>   

>> +	/* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */

>> +	bool xdp_enabled;

>> +

>>   	/* I like... big packets and I cannot lie! */

>>   	bool big_packets;

>>   

>> @@ -481,14 +484,34 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,

>>   	return 0;

>>   }

>>   

>> -static struct send_queue *virtnet_xdp_sq(struct virtnet_info *vi)

>> +static struct send_queue *virtnet_get_xdp_sq(struct virtnet_info *vi)

>>   {

>>   	unsigned int qp;

>> +	struct netdev_queue *txq;

>> +

>> +	if (vi->curr_queue_pairs > nr_cpu_ids) {

>> +		qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();

>> +	} else {

>> +		qp = smp_processor_id() % vi->curr_queue_pairs;

>> +		txq = netdev_get_tx_queue(vi->dev, qp);

>> +		__netif_tx_lock(txq, raw_smp_processor_id());

>> +	}

>>   

>> -	qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();

>>   	return &vi->sq[qp];

>>   }

>>   

>> +static void virtnet_put_xdp_sq(struct virtnet_info *vi)

>> +{

>> +	unsigned int qp;

>> +	struct netdev_queue *txq;

>> +

>> +	if (vi->curr_queue_pairs <= nr_cpu_ids) {

>> +		qp = smp_processor_id() % vi->curr_queue_pairs;

>> +		txq = netdev_get_tx_queue(vi->dev, qp);

>> +		__netif_tx_unlock(txq);

>> +	}

>> +}

>> +

>>   static int virtnet_xdp_xmit(struct net_device *dev,

>>   			    int n, struct xdp_frame **frames, u32 flags)

>>   {

>> @@ -512,7 +535,7 @@ static int virtnet_xdp_xmit(struct net_device *dev,

>>   	if (!xdp_prog)

>>   		return -ENXIO;

>>   

>> -	sq = virtnet_xdp_sq(vi);

>> +	sq = virtnet_get_xdp_sq(vi);

>>   

>>   	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {

>>   		ret = -EINVAL;

>> @@ -560,12 +583,13 @@ static int virtnet_xdp_xmit(struct net_device *dev,

>>   	sq->stats.kicks += kicks;

>>   	u64_stats_update_end(&sq->stats.syncp);

>>   

>> +	virtnet_put_xdp_sq(vi);

>>   	return ret;

>>   }

>>   

>>   static unsigned int virtnet_get_headroom(struct virtnet_info *vi)

>>   {

>> -	return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0;

>> +	return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;

>>   }

>>   

>>   /* We copy the packet for XDP in the following cases:

>> @@ -1457,12 +1481,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget)

>>   		xdp_do_flush();

>>   

>>   	if (xdp_xmit & VIRTIO_XDP_TX) {

>> -		sq = virtnet_xdp_sq(vi);

>> +		sq = virtnet_get_xdp_sq(vi);

>>   		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {

>>   			u64_stats_update_begin(&sq->stats.syncp);

>>   			sq->stats.kicks++;

>>   			u64_stats_update_end(&sq->stats.syncp);

>>   		}

>> +		virtnet_put_xdp_sq(vi);

>>   	}

>>   

>>   	return received;

>> @@ -2416,12 +2441,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,

>>   		xdp_qp = nr_cpu_ids;

>>   

>>   	/* XDP requires extra queues for XDP_TX */

>> -	if (curr_qp + xdp_qp > vi->max_queue_pairs) {

>> -		NL_SET_ERR_MSG_MOD(extack, "Too few free TX rings available");

>> -		netdev_warn(dev, "request %i queues but max is %i\n",

>> -			    curr_qp + xdp_qp, vi->max_queue_pairs);

>> -		return -ENOMEM;

>> -	}

>> +	if (curr_qp + xdp_qp > vi->max_queue_pairs)

>> +		xdp_qp = 0;

>>   

>>   	old_prog = rtnl_dereference(vi->rq[0].xdp_prog);

>>   	if (!prog && !old_prog)

>> @@ -2453,12 +2474,14 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,

>>   	netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);

>>   	vi->xdp_queue_pairs = xdp_qp;

>>   

>> +	vi->xdp_enabled = false;

>>   	if (prog) {

>>   		for (i = 0; i < vi->max_queue_pairs; i++) {

>>   			rcu_assign_pointer(vi->rq[i].xdp_prog, prog);

>>   			if (i == 0 && !old_prog)

>>   				virtnet_clear_guest_offloads(vi);

>>   		}

>> +		vi->xdp_enabled = true;

>>   	}

>>   

>>   	for (i = 0; i < vi->max_queue_pairs; i++) {

>> @@ -2526,7 +2549,7 @@ static int virtnet_set_features(struct net_device *dev,

>>   	int err;

>>   

>>   	if ((dev->features ^ features) & NETIF_F_LRO) {

>> -		if (vi->xdp_queue_pairs)

>> +		if (vi->xdp_enabled)

>>   			return -EBUSY;

>>   

>>   		if (features & NETIF_F_LRO)

>> -- 

>> 1.8.3.1
diff mbox series

Patch

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index ba8e637..7a3b2a7 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -195,6 +195,9 @@  struct virtnet_info {
 	/* # of XDP queue pairs currently used by the driver */
 	u16 xdp_queue_pairs;
 
+	/* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
+	bool xdp_enabled;
+
 	/* I like... big packets and I cannot lie! */
 	bool big_packets;
 
@@ -481,14 +484,34 @@  static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
 	return 0;
 }
 
-static struct send_queue *virtnet_xdp_sq(struct virtnet_info *vi)
+static struct send_queue *virtnet_get_xdp_sq(struct virtnet_info *vi)
 {
 	unsigned int qp;
+	struct netdev_queue *txq;
+
+	if (vi->curr_queue_pairs > nr_cpu_ids) {
+		qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
+	} else {
+		qp = smp_processor_id() % vi->curr_queue_pairs;
+		txq = netdev_get_tx_queue(vi->dev, qp);
+		__netif_tx_lock(txq, raw_smp_processor_id());
+	}
 
-	qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
 	return &vi->sq[qp];
 }
 
+static void virtnet_put_xdp_sq(struct virtnet_info *vi)
+{
+	unsigned int qp;
+	struct netdev_queue *txq;
+
+	if (vi->curr_queue_pairs <= nr_cpu_ids) {
+		qp = smp_processor_id() % vi->curr_queue_pairs;
+		txq = netdev_get_tx_queue(vi->dev, qp);
+		__netif_tx_unlock(txq);
+	}
+}
+
 static int virtnet_xdp_xmit(struct net_device *dev,
 			    int n, struct xdp_frame **frames, u32 flags)
 {
@@ -512,7 +535,7 @@  static int virtnet_xdp_xmit(struct net_device *dev,
 	if (!xdp_prog)
 		return -ENXIO;
 
-	sq = virtnet_xdp_sq(vi);
+	sq = virtnet_get_xdp_sq(vi);
 
 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
 		ret = -EINVAL;
@@ -560,12 +583,13 @@  static int virtnet_xdp_xmit(struct net_device *dev,
 	sq->stats.kicks += kicks;
 	u64_stats_update_end(&sq->stats.syncp);
 
+	virtnet_put_xdp_sq(vi);
 	return ret;
 }
 
 static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
 {
-	return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0;
+	return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
 }
 
 /* We copy the packet for XDP in the following cases:
@@ -1457,12 +1481,13 @@  static int virtnet_poll(struct napi_struct *napi, int budget)
 		xdp_do_flush();
 
 	if (xdp_xmit & VIRTIO_XDP_TX) {
-		sq = virtnet_xdp_sq(vi);
+		sq = virtnet_get_xdp_sq(vi);
 		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
 			u64_stats_update_begin(&sq->stats.syncp);
 			sq->stats.kicks++;
 			u64_stats_update_end(&sq->stats.syncp);
 		}
+		virtnet_put_xdp_sq(vi);
 	}
 
 	return received;
@@ -2416,12 +2441,8 @@  static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
 		xdp_qp = nr_cpu_ids;
 
 	/* XDP requires extra queues for XDP_TX */
-	if (curr_qp + xdp_qp > vi->max_queue_pairs) {
-		NL_SET_ERR_MSG_MOD(extack, "Too few free TX rings available");
-		netdev_warn(dev, "request %i queues but max is %i\n",
-			    curr_qp + xdp_qp, vi->max_queue_pairs);
-		return -ENOMEM;
-	}
+	if (curr_qp + xdp_qp > vi->max_queue_pairs)
+		xdp_qp = 0;
 
 	old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
 	if (!prog && !old_prog)
@@ -2453,12 +2474,14 @@  static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
 	netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
 	vi->xdp_queue_pairs = xdp_qp;
 
+	vi->xdp_enabled = false;
 	if (prog) {
 		for (i = 0; i < vi->max_queue_pairs; i++) {
 			rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
 			if (i == 0 && !old_prog)
 				virtnet_clear_guest_offloads(vi);
 		}
+		vi->xdp_enabled = true;
 	}
 
 	for (i = 0; i < vi->max_queue_pairs; i++) {
@@ -2526,7 +2549,7 @@  static int virtnet_set_features(struct net_device *dev,
 	int err;
 
 	if ((dev->features ^ features) & NETIF_F_LRO) {
-		if (vi->xdp_queue_pairs)
+		if (vi->xdp_enabled)
 			return -EBUSY;
 
 		if (features & NETIF_F_LRO)