@@ -302,6 +302,17 @@ struct amp_assoc {
#define HCI_MAX_PAGES 3
+typedef int (*cmd_sync_work_func_t)(struct hci_dev *hdev, void *data);
+typedef void (*cmd_sync_work_destroy_t)(struct hci_dev *hdev, void *data,
+ int err);
+
+struct cmd_sync_work_entry {
+ struct list_head list;
+ cmd_sync_work_func_t func;
+ void *data;
+ cmd_sync_work_destroy_t destroy;
+};
+
struct hci_dev {
struct list_head list;
struct mutex lock;
@@ -463,6 +474,9 @@ struct hci_dev {
struct work_struct power_on;
struct delayed_work power_off;
struct work_struct error_reset;
+ struct work_struct cmd_sync_work;
+ struct list_head cmd_sync_work_list;
+ struct mutex cmd_sync_work_lock;
__u16 discov_timeout;
struct delayed_work discov_off;
@@ -1701,6 +1715,9 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u32 timeout);
+int hci_cmd_sync_queue(struct hci_dev *hdev, cmd_sync_work_func_t func,
+ void *data, cmd_sync_work_destroy_t destroy);
+
u32 hci_conn_get_phy(struct hci_conn *conn);
/* ----- HCI Sockets ----- */
@@ -2329,6 +2329,81 @@ static void hci_error_reset(struct work_struct *work)
hci_dev_do_open(hdev);
}
+static void hci_cmd_sync_work(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work);
+ struct cmd_sync_work_entry *entry;
+ cmd_sync_work_func_t func;
+ cmd_sync_work_destroy_t destroy;
+ void *data;
+
+ bt_dev_dbg(hdev, "");
+
+ mutex_lock(&hdev->cmd_sync_work_lock);
+ entry = list_first_entry(&hdev->cmd_sync_work_list,
+ struct cmd_sync_work_entry, list);
+ if (entry) {
+ list_del(&entry->list);
+ func = entry->func;
+ data = entry->data;
+ destroy = entry->destroy;
+ kfree(entry);
+ } else {
+ func = NULL;
+ data = NULL;
+ destroy = NULL;
+ }
+ mutex_unlock(&hdev->cmd_sync_work_lock);
+
+ if (func) {
+ int err;
+
+ hci_req_sync_lock(hdev);
+
+ err = func(hdev, data);
+
+ if (destroy)
+ destroy(hdev, data, err);
+
+ hci_req_sync_unlock(hdev);
+ }
+}
+
+int hci_cmd_sync_queue(struct hci_dev *hdev, cmd_sync_work_func_t func,
+ void *data, cmd_sync_work_destroy_t destroy)
+{
+ struct cmd_sync_work_entry *entry;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->func = func;
+ entry->data = data;
+ entry->destroy = destroy;
+
+ mutex_lock(&hdev->cmd_sync_work_lock);
+ list_add_tail(&entry->list, &hdev->cmd_sync_work_list);
+ mutex_unlock(&hdev->cmd_sync_work_lock);
+
+ queue_work(hdev->req_workqueue, &hdev->cmd_sync_work);
+
+ return 0;
+}
+
+static void hci_cmd_sync_clear(struct hci_dev *hdev)
+{
+ struct cmd_sync_work_entry *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
+ if (entry->destroy)
+ entry->destroy(hdev, entry->data, -ECANCELED);
+
+ list_del(&entry->list);
+ kfree(entry);
+ }
+}
+
void hci_uuids_clear(struct hci_dev *hdev)
{
struct bt_uuid *uuid, *tmp;
@@ -3845,6 +3920,10 @@ struct hci_dev *hci_alloc_dev(void)
INIT_WORK(&hdev->error_reset, hci_error_reset);
INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
+ INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work);
+ INIT_LIST_HEAD(&hdev->cmd_sync_work_list);
+ mutex_init(&hdev->cmd_sync_work_lock);
+
INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
skb_queue_head_init(&hdev->rx_q);
@@ -4005,6 +4084,9 @@ void hci_unregister_dev(struct hci_dev *hdev)
cancel_work_sync(&hdev->power_on);
+ cancel_work_sync(&hdev->cmd_sync_work);
+ hci_cmd_sync_clear(hdev);
+
if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
hci_suspend_clear_tasks(hdev);
unregister_pm_notifier(&hdev->suspend_notifier);