@@ -519,6 +519,9 @@ struct hci_dev {
struct work_struct cmd_sync_cancel_work;
struct work_struct reenable_adv_work;
+ bool cmd_sync_locked;
+ struct hci_conn *cmd_sync_conn;
+
__u16 discov_timeout;
struct delayed_work discov_off;
@@ -1400,6 +1403,10 @@ void hci_conn_del(struct hci_conn *conn);
void hci_conn_hash_flush(struct hci_dev *hdev);
void hci_conn_check_pending(struct hci_dev *hdev);
+void hci_conn_sync_set_conn(struct hci_dev *hdev, struct hci_conn *conn);
+int hci_conn_sync_queue(struct hci_conn *conn, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy);
+
struct hci_chan *hci_chan_create(struct hci_conn *conn);
void hci_chan_del(struct hci_chan *chan);
void hci_chan_list_flush(struct hci_conn *conn);
@@ -46,6 +46,9 @@ int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
void *data, hci_cmd_sync_work_destroy_t destroy);
+void hci_cmd_sync_dev_lock(struct hci_dev *hdev);
+void hci_cmd_sync_dev_unlock(struct hci_dev *hdev);
+
int hci_update_eir_sync(struct hci_dev *hdev);
int hci_update_class_sync(struct hci_dev *hdev);
@@ -2889,3 +2889,63 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason)
return hci_cmd_sync_queue(hdev, abort_conn_sync, ERR_PTR(conn->handle),
NULL);
}
+
+struct hci_conn_sync_work_entry {
+ struct hci_conn *conn;
+ hci_cmd_sync_work_func_t func;
+ void *data;
+ hci_cmd_sync_work_destroy_t destroy;
+};
+
+static int hci_conn_sync_work_run(struct hci_dev *hdev, void *data)
+{
+ struct hci_conn_sync_work_entry *entry = data;
+ int err;
+
+ hci_cmd_sync_dev_lock(hdev);
+ hdev->cmd_sync_conn = entry->conn;
+
+ if (hci_conn_is_alive(hdev, entry->conn))
+ err = entry->func(hdev, entry->data);
+ else
+ err = -ENODEV;
+
+ hdev->cmd_sync_conn = NULL;
+ hci_cmd_sync_dev_unlock(hdev);
+
+ return err;
+}
+
+static void hci_conn_sync_work_destroy(struct hci_dev *hdev, void *data,
+ int err)
+{
+ struct hci_conn_sync_work_entry *entry = data;
+
+ if (entry->destroy)
+ entry->destroy(hdev, entry->data, err);
+ hci_conn_put(entry->conn);
+ kfree(entry);
+}
+
+int hci_conn_sync_queue(struct hci_conn *conn, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy)
+{
+ struct hci_conn_sync_work_entry *entry;
+ int err;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->func = func;
+ entry->data = data;
+ entry->destroy = destroy;
+ entry->conn = hci_conn_get(conn);
+
+ err = hci_cmd_sync_queue(conn->hdev, hci_conn_sync_work_run, entry,
+ hci_conn_sync_work_destroy);
+ if (err)
+ kfree(entry);
+
+ return err;
+}
@@ -164,10 +164,16 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
if (err < 0)
return ERR_PTR(err);
+ if (hdev->cmd_sync_locked)
+ hci_dev_unlock(hdev);
+
err = wait_event_interruptible_timeout(hdev->req_wait_q,
hdev->req_status != HCI_REQ_PEND,
timeout);
+ if (hdev->cmd_sync_locked)
+ hci_dev_lock(hdev);
+
if (err == -ERESTARTSYS)
return ERR_PTR(-EINTR);
@@ -185,6 +191,11 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
break;
}
+ if (hdev->cmd_sync_conn) {
+ if (!hci_conn_is_alive(hdev, hdev->cmd_sync_conn))
+ err = -ENODEV;
+ }
+
hdev->req_status = 0;
hdev->req_result = 0;
skb = hdev->req_skb;
@@ -740,6 +751,26 @@ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
}
EXPORT_SYMBOL(hci_cmd_sync_queue);
+void hci_cmd_sync_dev_lock(struct hci_dev *hdev)
+{
+ lockdep_assert_held(&hdev->req_lock);
+
+ hci_dev_lock(hdev);
+
+ WARN_ON_ONCE(hdev->cmd_sync_locked);
+ hdev->cmd_sync_locked = true;
+}
+
+void hci_cmd_sync_dev_unlock(struct hci_dev *hdev)
+{
+ lockdep_assert_held(&hdev->req_lock);
+
+ WARN_ON_ONCE(!hdev->cmd_sync_locked);
+ hdev->cmd_sync_locked = false;
+
+ hci_dev_unlock(hdev);
+}
+
int hci_update_eir_sync(struct hci_dev *hdev)
{
struct hci_cp_write_eir cp;
Operations on a given hci_conn in hci_sync are hard to write safely, because the conn might be deleted or modified unexpectedly either concurrently or when waiting for the events. Holding hci_dev_lock in the sync callbacks is also inconvenient since it has to be manually released before entering the event waits. Add convenience utilities to make things easier: Add hci_cmd_sync_dev_lock/unlock, for easier writing of hci_sync callbacks where hci_dev_lock should be held. The lock is however still released and reacquired around request waits. Callbacks using them can then assume that state changes protected by hci_dev_lock can only occur when waiting for events. (This is currently assumed in many of the callbacks.) Add hci_conn_sync_queue/submit, whose callback on entry holds hci_dev_lock and the hci_conn has not been deleted. If it was deleted while the sync command was queued, the destroy routine has err -ENODEV. Similarly, synchronous commands called in the callback fail with ENODEV if the conn was deleted during the wait. Signed-off-by: Pauli Virtanen <pav@iki.fi> --- include/net/bluetooth/hci_core.h | 7 ++++ include/net/bluetooth/hci_sync.h | 3 ++ net/bluetooth/hci_conn.c | 60 ++++++++++++++++++++++++++++++++ net/bluetooth/hci_sync.c | 31 +++++++++++++++++ 4 files changed, 101 insertions(+)