diff mbox series

[5.11,468/601] mt76: reduce q->lock hold time

Message ID 20210512144843.256900301@linuxfoundation.org
State New
Headers show
Series None | expand

Commit Message

Greg Kroah-Hartman May 12, 2021, 2:49 p.m. UTC
From: Felix Fietkau <nbd@nbd.name>

[ Upstream commit 2fbcdb4386dda0a911b5485b33468540716251f8 ]

Instead of holding it for the duration of an entire station schedule run,
which can block out competing tasks for a significant amount of time,
only hold it for scheduling one batch of packets for one station.
Improves responsiveness under load

Signed-off-by: Felix Fietkau <nbd@nbd.name>
Signed-off-by: Sasha Levin <sashal@kernel.org>
---
 drivers/net/wireless/mediatek/mt76/tx.c | 15 +++++++--------
 1 file changed, 7 insertions(+), 8 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 25627e70bdad..d5953223d7cf 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -454,7 +454,6 @@  mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
 	struct mt76_wcid *wcid;
 	int ret = 0;
 
-	spin_lock_bh(&q->lock);
 	while (1) {
 		if (test_bit(MT76_STATE_PM, &phy->state) ||
 		    test_bit(MT76_RESET, &phy->state)) {
@@ -464,14 +463,9 @@  mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
 
 		if (dev->queue_ops->tx_cleanup &&
 		    q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) {
-			spin_unlock_bh(&q->lock);
 			dev->queue_ops->tx_cleanup(dev, q, false);
-			spin_lock_bh(&q->lock);
 		}
 
-		if (mt76_txq_stopped(q))
-			break;
-
 		txq = ieee80211_next_txq(phy->hw, qid);
 		if (!txq)
 			break;
@@ -481,6 +475,8 @@  mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
 		if (wcid && test_bit(MT_WCID_FLAG_PS, &wcid->flags))
 			continue;
 
+		spin_lock_bh(&q->lock);
+
 		if (mtxq->send_bar && mtxq->aggr) {
 			struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
 			struct ieee80211_sta *sta = txq->sta;
@@ -494,10 +490,13 @@  mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
 			spin_lock_bh(&q->lock);
 		}
 
-		ret += mt76_txq_send_burst(phy, q, mtxq);
+		if (!mt76_txq_stopped(q))
+			ret += mt76_txq_send_burst(phy, q, mtxq);
+
+		spin_unlock_bh(&q->lock);
+
 		ieee80211_return_txq(phy->hw, txq, false);
 	}
-	spin_unlock_bh(&q->lock);
 
 	return ret;
 }