@@ -432,6 +432,7 @@ enum {
HCI_WIDEBAND_SPEECH_ENABLED,
HCI_EVENT_FILTER_CONFIGURED,
HCI_PA_SYNC,
+ HCI_SCO_FLOWCTL,
HCI_DUT_MODE,
HCI_VENDOR_DIAG,
@@ -1528,6 +1529,11 @@ struct hci_rp_read_tx_power {
__s8 tx_power;
} __packed;
+#define HCI_OP_WRITE_SYNC_FLOWCTL 0x0c2f
+struct hci_cp_write_sync_flowctl {
+ __u8 enable;
+} __packed;
+
#define HCI_OP_READ_PAGE_SCAN_TYPE 0x0c46
struct hci_rp_read_page_scan_type {
__u8 status;
@@ -1857,6 +1857,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define lmp_hold_capable(dev) ((dev)->features[0][0] & LMP_HOLD)
#define lmp_sniff_capable(dev) ((dev)->features[0][0] & LMP_SNIFF)
#define lmp_park_capable(dev) ((dev)->features[0][1] & LMP_PARK)
+#define lmp_sco_capable(dev) ((dev)->features[0][1] & LMP_SCO)
#define lmp_inq_rssi_capable(dev) ((dev)->features[0][3] & LMP_RSSI_INQ)
#define lmp_esco_capable(dev) ((dev)->features[0][3] & LMP_ESCO)
#define lmp_bredr_capable(dev) (!((dev)->features[0][4] & LMP_NO_BREDR))
@@ -3564,11 +3564,25 @@ static void hci_sched_sco(struct hci_dev *hdev)
BT_DBG("skb %p len %d", skb, skb->len);
hci_send_frame(hdev, skb);
+ hdev->sco_cnt--;
conn->sent++;
if (conn->sent == ~0)
conn->sent = 0;
}
}
+
+ /* Restore sco_cnt if flow control has not been enabled as
+ * HCI_EV_NUM_COMP_PKTS won't be generated.
+ */
+ if (!hci_dev_test_flag(hdev, HCI_SCO_FLOWCTL)) {
+ hdev->sco_cnt = hdev->sco_pkts;
+
+ /* As flow control is disabled force tx_work to run if there are
+ * still packets left in the queue.
+ */
+ if (!skb_queue_empty(&conn->data_q))
+ queue_work(hdev->workqueue, &hdev->tx_work);
+ }
}
static void hci_sched_esco(struct hci_dev *hdev)
@@ -3588,11 +3602,25 @@ static void hci_sched_esco(struct hci_dev *hdev)
BT_DBG("skb %p len %d", skb, skb->len);
hci_send_frame(hdev, skb);
+ hdev->sco_cnt--;
conn->sent++;
if (conn->sent == ~0)
conn->sent = 0;
}
}
+
+ /* Restore sco_cnt if flow control has not been enabled as
+ * HCI_EV_NUM_COMP_PKTS won't be generated.
+ */
+ if (!hci_dev_test_flag(hdev, HCI_SCO_FLOWCTL)) {
+ hdev->sco_cnt = hdev->sco_pkts;
+
+ /* As flow control is disabled force tx_work to run if there are
+ * still packets left in the queue.
+ */
+ if (!skb_queue_empty(&conn->data_q))
+ queue_work(hdev->workqueue, &hdev->tx_work);
+ }
}
static void hci_sched_acl_pkt(struct hci_dev *hdev)
@@ -3766,6 +3766,27 @@ static int hci_write_ca_timeout_sync(struct hci_dev *hdev)
sizeof(param), ¶m, HCI_CMD_TIMEOUT);
}
+/* Enable SCO flow control if supported */
+static int hci_write_sync_flowctl_sync(struct hci_dev *hdev)
+{
+ struct hci_cp_write_sync_flowctl cp;
+ int err;
+
+ /* Check if the controller supports SCO and HCI_OP_WRITE_SYNC_FLOWCTL */
+ if (!lmp_sco_capable(hdev) || !(hdev->commands[10] & BIT(4)))
+ return 0;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.enable = 0x01;
+
+ err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SYNC_FLOWCTL,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+ if (!err)
+ hci_dev_set_flag(hdev, HCI_SCO_FLOWCTL);
+
+ return err;
+}
+
/* BR Controller init stage 2 command sequence */
static const struct hci_init_stage br_init2[] = {
/* HCI_OP_READ_BUFFER_SIZE */
@@ -3784,6 +3805,8 @@ static const struct hci_init_stage br_init2[] = {
HCI_INIT(hci_clear_event_filter_sync),
/* HCI_OP_WRITE_CA_TIMEOUT */
HCI_INIT(hci_write_ca_timeout_sync),
+ /* HCI_OP_WRITE_SYNC_FLOWCTL */
+ HCI_INIT(hci_write_sync_flowctl_sync),
{}
};