diff mbox series

[v7,02/33] xhci: add helper to stop endpoint and wait for completion

Message ID 20230921214843.18450-3-quic_wcheng@quicinc.com
State New
Headers show
Series Introduce QC USB SND audio offloading support | expand

Commit Message

Wesley Cheng Sept. 21, 2023, 9:48 p.m. UTC
From: Mathias Nyman <mathias.nyman@linux.intel.com>

Expose xhci_stop_endpoint_sync() which is a synchronous variant of
xhci_queue_stop_endpoint().  This is useful for client drivers that are
using the secondary interrupters, and need to stop/clean up the current
session.  The stop endpoint command handler will also take care of cleaning
up the ring.

Modifications to repurpose the new API into existing stop endpoint
sequences was implemented by Wesley Cheng.

Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
Co-developed-by: Wesley Cheng <quic_wcheng@quicinc.com>
Signed-off-by: Wesley Cheng <quic_wcheng@quicinc.com>
---
 drivers/usb/host/xhci-hub.c | 29 +++---------------
 drivers/usb/host/xhci.c     | 60 +++++++++++++++++++++++++++----------
 drivers/usb/host/xhci.h     |  2 ++
 3 files changed, 50 insertions(+), 41 deletions(-)

Comments

Mathias Nyman Sept. 28, 2023, 1:31 p.m. UTC | #1
On 22.9.2023 0.48, Wesley Cheng wrote:
> From: Mathias Nyman <mathias.nyman@linux.intel.com>
> 
> Expose xhci_stop_endpoint_sync() which is a synchronous variant of
> xhci_queue_stop_endpoint().  This is useful for client drivers that are
> using the secondary interrupters, and need to stop/clean up the current
> session.  The stop endpoint command handler will also take care of cleaning
> up the ring.
> 
> Modifications to repurpose the new API into existing stop endpoint
> sequences was implemented by Wesley Cheng.
> 
> Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
> Co-developed-by: Wesley Cheng <quic_wcheng@quicinc.com>
> Signed-off-by: Wesley Cheng <quic_wcheng@quicinc.com>
> ---
>   drivers/usb/host/xhci-hub.c | 29 +++---------------
>   drivers/usb/host/xhci.c     | 60 +++++++++++++++++++++++++++----------
>   drivers/usb/host/xhci.h     |  2 ++
>   3 files changed, 50 insertions(+), 41 deletions(-)
> 
> diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
> index 0054d02239e2..2f7309bdc922 100644
> --- a/drivers/usb/host/xhci-hub.c
> +++ b/drivers/usb/host/xhci-hub.c
> @@ -489,7 +489,6 @@ EXPORT_SYMBOL_GPL(xhci_find_slot_id_by_port);
>   static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
>   {
>   	struct xhci_virt_device *virt_dev;
> -	struct xhci_command *cmd;
>   	unsigned long flags;
>   	int ret;
>   	int i;
> @@ -501,10 +500,6 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
>   
>   	trace_xhci_stop_device(virt_dev);
>   
> -	cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
> -	if (!cmd)
> -		return -ENOMEM;
> -
>   	spin_lock_irqsave(&xhci->lock, flags);
>   	for (i = LAST_EP_INDEX; i > 0; i--) {
>   		if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) {
> @@ -521,7 +516,7 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
>   			if (!command) {
>   				spin_unlock_irqrestore(&xhci->lock, flags);
>   				ret = -ENOMEM;
> -				goto cmd_cleanup;
> +				goto out;
>   			}
>   
>   			ret = xhci_queue_stop_endpoint(xhci, command, slot_id,
> @@ -529,30 +524,14 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
>   			if (ret) {
>   				spin_unlock_irqrestore(&xhci->lock, flags);
>   				xhci_free_command(xhci, command);
> -				goto cmd_cleanup;
> +				goto out;
>   			}
>   		}
>   	}
> -	ret = xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend);
> -	if (ret) {
> -		spin_unlock_irqrestore(&xhci->lock, flags);
> -		goto cmd_cleanup;
> -	}
> -
> -	xhci_ring_cmd_db(xhci);
>   	spin_unlock_irqrestore(&xhci->lock, flags);
> +	ret = xhci_stop_endpoint_sync(xhci, &virt_dev->eps[0], suspend);

I didn't take this new xhci_stop_endpoint_sync() helper into use as it causes an extra
xhci spinlock release and reacquire here.

Also the memory allocation flags differ, GFP_NOIO is turned into GFP_KERNEL after this change.

>   
> -	/* Wait for last stop endpoint command to finish */
> -	wait_for_completion(cmd->completion);
> -
> -	if (cmd->status == COMP_COMMAND_ABORTED ||
> -	    cmd->status == COMP_COMMAND_RING_STOPPED) {
> -		xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n");
> -		ret = -ETIME;
> -	}
> -
> -cmd_cleanup:
> -	xhci_free_command(xhci, cmd);
> +out:
>   	return ret;
>   }
>   
> diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
> index 3fd2b58ee1d3..163d533d6200 100644
> --- a/drivers/usb/host/xhci.c
> +++ b/drivers/usb/host/xhci.c
> @@ -2758,6 +2758,46 @@ static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
>   	return -ENOMEM;
>   }
>   
> +/*
> + * Synchronous XHCI stop endpoint helper.  Issues the stop endpoint command and
> + * waits for the command completion before returning.
> + */
> +int xhci_stop_endpoint_sync(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, int suspend)
> +{
> +	struct xhci_command *command;
> +	unsigned long flags;
> +	int ret;
> +
> +	command = xhci_alloc_command(xhci, true, GFP_KERNEL);
> +	if (!command)
> +		return -ENOMEM;
> +
> +	spin_lock_irqsave(&xhci->lock, flags);
> +	ret = xhci_queue_stop_endpoint(xhci, command, ep->vdev->slot_id,
> +				       ep->ep_index, suspend);
> +	if (ret < 0) {
> +		spin_unlock_irqrestore(&xhci->lock, flags);
> +		goto out;
> +	}
> +
> +	xhci_ring_cmd_db(xhci);
> +	spin_unlock_irqrestore(&xhci->lock, flags);
> +
> +	ret = wait_for_completion_timeout(command->completion, msecs_to_jiffies(3000));
> +	if (!ret)
> +		xhci_warn(xhci, "%s: Unable to stop endpoint.\n",
> +				__func__);
> +
> +	if (command->status == COMP_COMMAND_ABORTED ||
> +	    command->status == COMP_COMMAND_RING_STOPPED) {
> +		xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n");
> +		ret = -ETIME;
> +	}
> +out:
> +	xhci_free_command(xhci, command);
> +
> +	return ret;
> +}
>   
>   /* Issue a configure endpoint command or evaluate context command
>    * and wait for it to finish.
> @@ -3078,7 +3118,7 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
>   	struct xhci_virt_device *vdev;
>   	struct xhci_virt_ep *ep;
>   	struct xhci_input_control_ctx *ctrl_ctx;
> -	struct xhci_command *stop_cmd, *cfg_cmd;
> +	struct xhci_command *cfg_cmd;
>   	unsigned int ep_index;
>   	unsigned long flags;
>   	u32 ep_flag;
> @@ -3118,10 +3158,6 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
>   	if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG)
>   		return;
>   
> -	stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT);
> -	if (!stop_cmd)
> -		return;
> -
>   	cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT);
>   	if (!cfg_cmd)
>   		goto cleanup;
> @@ -3144,23 +3180,16 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
>   		goto cleanup;
>   	}
>   
> -	err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id,
> -					ep_index, 0);
> +	spin_unlock_irqrestore(&xhci->lock, flags);
> +

Same here, extra unlock -> lock, and GFP flags differ.


> +	err = xhci_stop_endpoint_sync(xhci, ep, 0);

Thanks
Mathias
Wesley Cheng Sept. 28, 2023, 10:10 p.m. UTC | #2
Hi Mathias,

On 9/28/2023 6:31 AM, Mathias Nyman wrote:
> On 22.9.2023 0.48, Wesley Cheng wrote:
>> From: Mathias Nyman <mathias.nyman@linux.intel.com>
>>
>> Expose xhci_stop_endpoint_sync() which is a synchronous variant of
>> xhci_queue_stop_endpoint().  This is useful for client drivers that are
>> using the secondary interrupters, and need to stop/clean up the current
>> session.  The stop endpoint command handler will also take care of 
>> cleaning
>> up the ring.
>>
>> Modifications to repurpose the new API into existing stop endpoint
>> sequences was implemented by Wesley Cheng.
>>
>> Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
>> Co-developed-by: Wesley Cheng <quic_wcheng@quicinc.com>
>> Signed-off-by: Wesley Cheng <quic_wcheng@quicinc.com>
>> ---
>>   drivers/usb/host/xhci-hub.c | 29 +++---------------
>>   drivers/usb/host/xhci.c     | 60 +++++++++++++++++++++++++++----------
>>   drivers/usb/host/xhci.h     |  2 ++
>>   3 files changed, 50 insertions(+), 41 deletions(-)
>>
>> diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
>> index 0054d02239e2..2f7309bdc922 100644
>> --- a/drivers/usb/host/xhci-hub.c
>> +++ b/drivers/usb/host/xhci-hub.c
>> @@ -489,7 +489,6 @@ EXPORT_SYMBOL_GPL(xhci_find_slot_id_by_port);
>>   static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int 
>> suspend)
>>   {
>>       struct xhci_virt_device *virt_dev;
>> -    struct xhci_command *cmd;
>>       unsigned long flags;
>>       int ret;
>>       int i;
>> @@ -501,10 +500,6 @@ static int xhci_stop_device(struct xhci_hcd 
>> *xhci, int slot_id, int suspend)
>>       trace_xhci_stop_device(virt_dev);
>> -    cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
>> -    if (!cmd)
>> -        return -ENOMEM;
>> -
>>       spin_lock_irqsave(&xhci->lock, flags);
>>       for (i = LAST_EP_INDEX; i > 0; i--) {
>>           if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) {
>> @@ -521,7 +516,7 @@ static int xhci_stop_device(struct xhci_hcd *xhci, 
>> int slot_id, int suspend)
>>               if (!command) {
>>                   spin_unlock_irqrestore(&xhci->lock, flags);
>>                   ret = -ENOMEM;
>> -                goto cmd_cleanup;
>> +                goto out;
>>               }
>>               ret = xhci_queue_stop_endpoint(xhci, command, slot_id,
>> @@ -529,30 +524,14 @@ static int xhci_stop_device(struct xhci_hcd 
>> *xhci, int slot_id, int suspend)
>>               if (ret) {
>>                   spin_unlock_irqrestore(&xhci->lock, flags);
>>                   xhci_free_command(xhci, command);
>> -                goto cmd_cleanup;
>> +                goto out;
>>               }
>>           }
>>       }
>> -    ret = xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend);
>> -    if (ret) {
>> -        spin_unlock_irqrestore(&xhci->lock, flags);
>> -        goto cmd_cleanup;
>> -    }
>> -
>> -    xhci_ring_cmd_db(xhci);
>>       spin_unlock_irqrestore(&xhci->lock, flags);
>> +    ret = xhci_stop_endpoint_sync(xhci, &virt_dev->eps[0], suspend);
> 
> I didn't take this new xhci_stop_endpoint_sync() helper into use as it 
> causes an extra
> xhci spinlock release and reacquire here.
> 
> Also the memory allocation flags differ, GFP_NOIO is turned into 
> GFP_KERNEL after this change.
> 

Thanks for the review.  I agree with the points made.  I wasn't sure if 
the extra unlock/lock would cause issues if we've already queued the 
stop ep for the other eps used by the device.

I think addressing the flags might be straightforward, we can just pass 
it in as an argument.  At least for this change in particular, is the 
concern that there could be another XHCI command queued before the stop 
endpoint command is?

>> -    /* Wait for last stop endpoint command to finish */
>> -    wait_for_completion(cmd->completion);
>> -
>> -    if (cmd->status == COMP_COMMAND_ABORTED ||
>> -        cmd->status == COMP_COMMAND_RING_STOPPED) {
>> -        xhci_warn(xhci, "Timeout while waiting for stop endpoint 
>> command\n");
>> -        ret = -ETIME;
>> -    }
>> -
>> -cmd_cleanup:
>> -    xhci_free_command(xhci, cmd);
>> +out:
>>       return ret;
>>   }
>> diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
>> index 3fd2b58ee1d3..163d533d6200 100644
>> --- a/drivers/usb/host/xhci.c
>> +++ b/drivers/usb/host/xhci.c
>> @@ -2758,6 +2758,46 @@ static int xhci_reserve_bandwidth(struct 
>> xhci_hcd *xhci,
>>       return -ENOMEM;
>>   }
>> +/*
>> + * Synchronous XHCI stop endpoint helper.  Issues the stop endpoint 
>> command and
>> + * waits for the command completion before returning.
>> + */
>> +int xhci_stop_endpoint_sync(struct xhci_hcd *xhci, struct 
>> xhci_virt_ep *ep, int suspend)
>> +{
>> +    struct xhci_command *command;
>> +    unsigned long flags;
>> +    int ret;
>> +
>> +    command = xhci_alloc_command(xhci, true, GFP_KERNEL);
>> +    if (!command)
>> +        return -ENOMEM;
>> +
>> +    spin_lock_irqsave(&xhci->lock, flags);
>> +    ret = xhci_queue_stop_endpoint(xhci, command, ep->vdev->slot_id,
>> +                       ep->ep_index, suspend);
>> +    if (ret < 0) {
>> +        spin_unlock_irqrestore(&xhci->lock, flags);
>> +        goto out;
>> +    }
>> +
>> +    xhci_ring_cmd_db(xhci);
>> +    spin_unlock_irqrestore(&xhci->lock, flags);
>> +
>> +    ret = wait_for_completion_timeout(command->completion, 
>> msecs_to_jiffies(3000));
>> +    if (!ret)
>> +        xhci_warn(xhci, "%s: Unable to stop endpoint.\n",
>> +                __func__);
>> +
>> +    if (command->status == COMP_COMMAND_ABORTED ||
>> +        command->status == COMP_COMMAND_RING_STOPPED) {
>> +        xhci_warn(xhci, "Timeout while waiting for stop endpoint 
>> command\n");
>> +        ret = -ETIME;
>> +    }
>> +out:
>> +    xhci_free_command(xhci, command);
>> +
>> +    return ret;
>> +}
>>   /* Issue a configure endpoint command or evaluate context command
>>    * and wait for it to finish.
>> @@ -3078,7 +3118,7 @@ static void xhci_endpoint_reset(struct usb_hcd 
>> *hcd,
>>       struct xhci_virt_device *vdev;
>>       struct xhci_virt_ep *ep;
>>       struct xhci_input_control_ctx *ctrl_ctx;
>> -    struct xhci_command *stop_cmd, *cfg_cmd;
>> +    struct xhci_command *cfg_cmd;
>>       unsigned int ep_index;
>>       unsigned long flags;
>>       u32 ep_flag;
>> @@ -3118,10 +3158,6 @@ static void xhci_endpoint_reset(struct usb_hcd 
>> *hcd,
>>       if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG)
>>           return;
>> -    stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT);
>> -    if (!stop_cmd)
>> -        return;
>> -
>>       cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT);
>>       if (!cfg_cmd)
>>           goto cleanup;
>> @@ -3144,23 +3180,16 @@ static void xhci_endpoint_reset(struct usb_hcd 
>> *hcd,
>>           goto cleanup;
>>       }
>> -    err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id,
>> -                    ep_index, 0);
>> +    spin_unlock_irqrestore(&xhci->lock, flags);
>> +
> 
> Same here, extra unlock -> lock, and GFP flags differ.
> 
> 

My intention here (minus the GFP flags) was that the locking was mainly 
for setting the EP state flag -- EP_SOFT_CLEAR_TOGGLE.  If that was set, 
then new TD queues are blocked.  Seems like that was why there is a 
check like this afterwards:

if (!list_empty(&ep->ring->td_list)) {

So I believed that releasing the lock here was going to be ok, because 
by that point since the flag is set, nothing would be able to be added 
to the td_list.

Thanks
Wesley Cheng
diff mbox series

Patch

diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 0054d02239e2..2f7309bdc922 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -489,7 +489,6 @@  EXPORT_SYMBOL_GPL(xhci_find_slot_id_by_port);
 static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
 {
 	struct xhci_virt_device *virt_dev;
-	struct xhci_command *cmd;
 	unsigned long flags;
 	int ret;
 	int i;
@@ -501,10 +500,6 @@  static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
 
 	trace_xhci_stop_device(virt_dev);
 
-	cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
-	if (!cmd)
-		return -ENOMEM;
-
 	spin_lock_irqsave(&xhci->lock, flags);
 	for (i = LAST_EP_INDEX; i > 0; i--) {
 		if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) {
@@ -521,7 +516,7 @@  static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
 			if (!command) {
 				spin_unlock_irqrestore(&xhci->lock, flags);
 				ret = -ENOMEM;
-				goto cmd_cleanup;
+				goto out;
 			}
 
 			ret = xhci_queue_stop_endpoint(xhci, command, slot_id,
@@ -529,30 +524,14 @@  static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
 			if (ret) {
 				spin_unlock_irqrestore(&xhci->lock, flags);
 				xhci_free_command(xhci, command);
-				goto cmd_cleanup;
+				goto out;
 			}
 		}
 	}
-	ret = xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend);
-	if (ret) {
-		spin_unlock_irqrestore(&xhci->lock, flags);
-		goto cmd_cleanup;
-	}
-
-	xhci_ring_cmd_db(xhci);
 	spin_unlock_irqrestore(&xhci->lock, flags);
+	ret = xhci_stop_endpoint_sync(xhci, &virt_dev->eps[0], suspend);
 
-	/* Wait for last stop endpoint command to finish */
-	wait_for_completion(cmd->completion);
-
-	if (cmd->status == COMP_COMMAND_ABORTED ||
-	    cmd->status == COMP_COMMAND_RING_STOPPED) {
-		xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n");
-		ret = -ETIME;
-	}
-
-cmd_cleanup:
-	xhci_free_command(xhci, cmd);
+out:
 	return ret;
 }
 
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 3fd2b58ee1d3..163d533d6200 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -2758,6 +2758,46 @@  static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
 	return -ENOMEM;
 }
 
+/*
+ * Synchronous XHCI stop endpoint helper.  Issues the stop endpoint command and
+ * waits for the command completion before returning.
+ */
+int xhci_stop_endpoint_sync(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, int suspend)
+{
+	struct xhci_command *command;
+	unsigned long flags;
+	int ret;
+
+	command = xhci_alloc_command(xhci, true, GFP_KERNEL);
+	if (!command)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&xhci->lock, flags);
+	ret = xhci_queue_stop_endpoint(xhci, command, ep->vdev->slot_id,
+				       ep->ep_index, suspend);
+	if (ret < 0) {
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		goto out;
+	}
+
+	xhci_ring_cmd_db(xhci);
+	spin_unlock_irqrestore(&xhci->lock, flags);
+
+	ret = wait_for_completion_timeout(command->completion, msecs_to_jiffies(3000));
+	if (!ret)
+		xhci_warn(xhci, "%s: Unable to stop endpoint.\n",
+				__func__);
+
+	if (command->status == COMP_COMMAND_ABORTED ||
+	    command->status == COMP_COMMAND_RING_STOPPED) {
+		xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n");
+		ret = -ETIME;
+	}
+out:
+	xhci_free_command(xhci, command);
+
+	return ret;
+}
 
 /* Issue a configure endpoint command or evaluate context command
  * and wait for it to finish.
@@ -3078,7 +3118,7 @@  static void xhci_endpoint_reset(struct usb_hcd *hcd,
 	struct xhci_virt_device *vdev;
 	struct xhci_virt_ep *ep;
 	struct xhci_input_control_ctx *ctrl_ctx;
-	struct xhci_command *stop_cmd, *cfg_cmd;
+	struct xhci_command *cfg_cmd;
 	unsigned int ep_index;
 	unsigned long flags;
 	u32 ep_flag;
@@ -3118,10 +3158,6 @@  static void xhci_endpoint_reset(struct usb_hcd *hcd,
 	if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG)
 		return;
 
-	stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT);
-	if (!stop_cmd)
-		return;
-
 	cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT);
 	if (!cfg_cmd)
 		goto cleanup;
@@ -3144,23 +3180,16 @@  static void xhci_endpoint_reset(struct usb_hcd *hcd,
 		goto cleanup;
 	}
 
-	err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id,
-					ep_index, 0);
+	spin_unlock_irqrestore(&xhci->lock, flags);
+
+	err = xhci_stop_endpoint_sync(xhci, ep, 0);
 	if (err < 0) {
-		spin_unlock_irqrestore(&xhci->lock, flags);
-		xhci_free_command(xhci, cfg_cmd);
 		xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ",
 				__func__, err);
 		goto cleanup;
 	}
 
-	xhci_ring_cmd_db(xhci);
-	spin_unlock_irqrestore(&xhci->lock, flags);
-
-	wait_for_completion(stop_cmd->completion);
-
 	spin_lock_irqsave(&xhci->lock, flags);
-
 	/* config ep command clears toggle if add and drop ep flags are set */
 	ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx);
 	if (!ctrl_ctx) {
@@ -3192,7 +3221,6 @@  static void xhci_endpoint_reset(struct usb_hcd *hcd,
 
 	xhci_free_command(xhci, cfg_cmd);
 cleanup:
-	xhci_free_command(xhci, stop_cmd);
 	spin_lock_irqsave(&xhci->lock, flags);
 	if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE)
 		ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index d706a27ec0a3..898b6434dc27 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -2125,6 +2125,8 @@  void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
 void xhci_cleanup_command_queue(struct xhci_hcd *xhci);
 void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring);
 unsigned int count_trbs(u64 addr, u64 len);
+int xhci_stop_endpoint_sync(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+			    int suspend);
 
 /* xHCI roothub code */
 void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port,