@@ -849,6 +849,60 @@ void rtw89_core_tx_kick_off(struct rtw89_dev *rtwdev, u8 qsel)
rtw89_hci_tx_kick_off(rtwdev, ch_dma);
}
+static void rtw89_core_free_tx_wait_work(struct work_struct *work)
+{
+ struct rtw89_tx_wait_info *wait =
+ container_of(work, struct rtw89_tx_wait_info, work);
+ struct rtw89_dev *rtwdev = wait->rtwdev;
+ int done, ret;
+
+ /* Wait rtw89_core_tx_kick_off_and_wait() increase 'wait_done' to
+ * ensure it doesn't use 'wait' anymore. The polling period is larger
+ * than execution time between wait_for_completion_timeout() and
+ * atomic_inc() in rtw89_core_tx_kick_off_and_wait().
+ */
+ ret = read_poll_timeout(atomic_read, done, done, 1000, 100000, false,
+ &wait->wait_done);
+
+ if (ret)
+ rtw89_err(rtwdev, "tx wait timed out, stop polling\n");
+ else
+ kfree(wait);
+}
+
+int rtw89_core_tx_kick_off_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
+ int qsel, unsigned int timeout)
+{
+ struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
+ struct rtw89_tx_wait_info *wait;
+ unsigned long time_left;
+ int ret = 0;
+
+ skb_data->wait = kzalloc(sizeof(*wait), GFP_KERNEL);
+ wait = skb_data->wait;
+ if (!wait) {
+ rtw89_warn(rtwdev, "alloc tx wait info failed\n");
+ rtw89_core_tx_kick_off(rtwdev, qsel);
+ return 0;
+ }
+
+ init_completion(&wait->completion);
+ INIT_WORK(&wait->work, rtw89_core_free_tx_wait_work);
+ wait->rtwdev = rtwdev;
+
+ rtw89_core_tx_kick_off(rtwdev, qsel);
+ time_left = wait_for_completion_timeout(&wait->completion,
+ msecs_to_jiffies(timeout));
+ if (time_left == 0)
+ ret = -ETIMEDOUT;
+ else if (!wait->tx_done)
+ ret = -EAGAIN;
+
+ atomic_inc(&wait->wait_done);
+
+ return ret;
+}
+
int rtw89_h2c_tx(struct rtw89_dev *rtwdev,
struct sk_buff *skb, bool fwdl)
{
@@ -3192,6 +3246,9 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
rtwdev->txq_wq = alloc_workqueue("rtw89_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0);
if (!rtwdev->txq_wq)
return -ENOMEM;
+ rtwdev->gc_wq = alloc_workqueue("rtw89_gc_wq", WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
+ if (!rtwdev->gc_wq)
+ return -ENOMEM;
spin_lock_init(&rtwdev->ba_lock);
spin_lock_init(&rtwdev->rpwm_lock);
mutex_init(&rtwdev->mutex);
@@ -3233,6 +3290,7 @@ void rtw89_core_deinit(struct rtw89_dev *rtwdev)
rtw89_fw_free_all_early_h2c(rtwdev);
destroy_workqueue(rtwdev->txq_wq);
+ destroy_workqueue(rtwdev->gc_wq);
mutex_destroy(&rtwdev->rf_mutex);
mutex_destroy(&rtwdev->mutex);
}
@@ -2599,6 +2599,19 @@ struct rtw89_phy_rate_pattern {
bool enable;
};
+struct rtw89_tx_wait_info {
+ struct rtw89_dev *rtwdev;
+ struct completion completion;
+ struct work_struct work;
+ atomic_t wait_done;
+ bool tx_done;
+};
+
+struct rtw89_tx_skb_data {
+ struct rtw89_tx_wait_info *wait;
+ u8 hci_priv[];
+};
+
#define RTW89_P2P_MAX_NOA_NUM 2
struct rtw89_vif {
@@ -3937,6 +3950,7 @@ struct rtw89_dev {
/* used to protect rf read write */
struct mutex rf_mutex;
struct workqueue_struct *txq_wq;
+ struct workqueue_struct *gc_wq;
struct work_struct txq_work;
struct delayed_work txq_reinvoke_work;
/* used to protect ba_list and forbid_ba_list */
@@ -4148,6 +4162,14 @@ static inline void rtw89_hci_clear(struct rtw89_dev *rtwdev, struct pci_dev *pde
rtwdev->hci.ops->clear(rtwdev, pdev);
}
+static inline
+struct rtw89_tx_skb_data *RTW89_TX_SKB_CB(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ return (struct rtw89_tx_skb_data *)info->status.status_driver_data;
+}
+
static inline u8 rtw89_read8(struct rtw89_dev *rtwdev, u32 addr)
{
return rtwdev->hci.ops->read8(rtwdev, addr);
@@ -4791,11 +4813,22 @@ static inline struct sk_buff *rtw89_alloc_skb_for_rx(struct rtw89_dev *rtwdev,
return dev_alloc_skb(length);
}
+static inline void rtw89_core_tx_wait_complete(struct rtw89_dev *rtwdev,
+ struct rtw89_tx_wait_info *wait,
+ bool tx_done)
+{
+ wait->tx_done = tx_done;
+ complete(&wait->completion);
+ queue_work(rtwdev->gc_wq, &wait->work);
+}
+
int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel);
int rtw89_h2c_tx(struct rtw89_dev *rtwdev,
struct sk_buff *skb, bool fwdl);
void rtw89_core_tx_kick_off(struct rtw89_dev *rtwdev, u8 qsel);
+int rtw89_core_tx_kick_off_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
+ int qsel, unsigned int timeout);
void rtw89_core_fill_txdesc(struct rtw89_dev *rtwdev,
struct rtw89_tx_desc_info *desc_info,
void *txdesc);
@@ -364,6 +364,8 @@ static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev,
struct rtw89_pci_tx_ring *tx_ring,
struct sk_buff *skb, u8 tx_status)
{
+ struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
+ struct rtw89_tx_wait_info *wait = skb_data->wait;
struct ieee80211_tx_info *info;
info = IEEE80211_SKB_CB(skb);
@@ -394,6 +396,8 @@ static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev,
}
}
+ if (wait)
+ rtw89_core_tx_wait_complete(rtwdev, wait, tx_status == RTW89_TX_DONE);
ieee80211_tx_status_ni(rtwdev->hw, skb);
}
@@ -1203,6 +1207,7 @@ static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
struct pci_dev *pdev = rtwpci->pdev;
struct sk_buff *skb = tx_req->skb;
struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
+ struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
bool en_wd_info = desc_info->en_wd_info;
u32 txwd_len;
u32 txwp_len;
@@ -1218,6 +1223,7 @@ static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
}
tx_data->dma = dma;
+ skb_data->wait = NULL;
txwp_len = sizeof(*txwp_info);
txwd_len = chip->txwd_body_size;
@@ -1004,9 +1004,9 @@ rtw89_pci_rxbd_increase(struct rtw89_pci_rx_ring *rx_ring, u32 cnt)
static inline struct rtw89_pci_tx_data *RTW89_PCI_TX_SKB_CB(struct sk_buff *skb)
{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct rtw89_tx_skb_data *data = RTW89_TX_SKB_CB(skb);
- return (struct rtw89_pci_tx_data *)info->status.status_driver_data;
+ return (struct rtw89_pci_tx_data *)data->hci_priv;
}
static inline struct rtw89_pci_tx_bd_32 *