@@ -535,8 +535,13 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
cmd->status == COMP_COMMAND_RING_STOPPED) {
xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n");
ret = -ETIME;
+ goto cmd_cleanup;
}
+ ret = xhci_vendor_sync_dev_ctx(xhci, slot_id);
+ if (ret)
+ xhci_warn(xhci, "Sync device context failed, ret=%d\n", ret);
+
cmd_cleanup:
xhci_free_command(xhci, cmd);
return ret;
@@ -367,6 +367,54 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
return 0;
}
+static void xhci_vendor_free_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
+{
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
+
+ if (ops && ops->free_container_ctx)
+ ops->free_container_ctx(xhci, ctx);
+}
+
+static void xhci_vendor_alloc_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
+ int type, gfp_t flags)
+{
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
+
+ if (ops && ops->alloc_container_ctx)
+ ops->alloc_container_ctx(xhci, ctx, type, flags);
+}
+
+static struct xhci_ring *xhci_vendor_alloc_transfer_ring(struct xhci_hcd *xhci,
+ u32 endpoint_type, enum xhci_ring_type ring_type,
+ unsigned int max_packet, gfp_t mem_flags)
+{
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
+
+ if (ops && ops->alloc_transfer_ring)
+ return ops->alloc_transfer_ring(xhci, endpoint_type, ring_type,
+ max_packet, mem_flags);
+ return 0;
+}
+
+void xhci_vendor_free_transfer_ring(struct xhci_hcd *xhci,
+ struct xhci_ring *ring, unsigned int ep_index)
+{
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
+
+ if (ops && ops->free_transfer_ring)
+ ops->free_transfer_ring(xhci, ring, ep_index);
+}
+
+bool xhci_vendor_is_usb_offload_enabled(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev, unsigned int ep_index)
+{
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
+
+ if (ops && ops->is_usb_offload_enabled)
+ return ops->is_usb_offload_enabled(xhci, virt_dev, ep_index);
+ return false;
+}
+
/*
* Create a new ring with zero or more segments.
*
@@ -419,7 +467,11 @@ void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
unsigned int ep_index)
{
- xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
+ if (xhci_vendor_is_usb_offload_enabled(xhci, virt_dev, ep_index))
+ xhci_vendor_free_transfer_ring(xhci, virt_dev->eps[ep_index].ring, ep_index);
+ else
+ xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
+
virt_dev->eps[ep_index].ring = NULL;
}
@@ -477,6 +529,7 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
{
struct xhci_container_ctx *ctx;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT))
return NULL;
@@ -490,7 +543,12 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
if (type == XHCI_CTX_TYPE_INPUT)
ctx->size += CTX_SIZE(xhci->hcc_params);
- ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma);
+ if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0) &&
+ (ops && ops->alloc_container_ctx))
+ xhci_vendor_alloc_container_ctx(xhci, ctx, type, flags);
+ else
+ ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma);
+
if (!ctx->bytes) {
kfree(ctx);
return NULL;
@@ -501,9 +559,16 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
void xhci_free_container_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx)
{
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
+
if (!ctx)
return;
- dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
+ if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0) &&
+ (ops && ops->free_container_ctx))
+ xhci_vendor_free_container_ctx(xhci, ctx);
+ else
+ dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
+
kfree(ctx);
}
@@ -896,7 +961,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
for (i = 0; i < 31; i++) {
if (dev->eps[i].ring)
- xhci_ring_free(xhci, dev->eps[i].ring);
+ xhci_free_endpoint_ring(xhci, dev, i);
if (dev->eps[i].stream_info)
xhci_free_stream_info(xhci,
dev->eps[i].stream_info);
@@ -1494,8 +1559,16 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
mult = 0;
/* Set up the endpoint ring */
- virt_dev->eps[ep_index].new_ring =
- xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
+ if (xhci_vendor_is_usb_offload_enabled(xhci, virt_dev, ep_index) &&
+ usb_endpoint_xfer_isoc(&ep->desc)) {
+ virt_dev->eps[ep_index].new_ring =
+ xhci_vendor_alloc_transfer_ring(xhci, endpoint_type, ring_type,
+ max_packet, mem_flags);
+ } else {
+ virt_dev->eps[ep_index].new_ring =
+ xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
+ }
+
if (!virt_dev->eps[ep_index].new_ring)
return -ENOMEM;
@@ -1843,6 +1916,24 @@ void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
}
EXPORT_SYMBOL_GPL(xhci_free_erst);
+static struct xhci_device_context_array *xhci_vendor_alloc_dcbaa(
+ struct xhci_hcd *xhci, gfp_t flags)
+{
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
+
+ if (ops && ops->alloc_dcbaa)
+ return ops->alloc_dcbaa(xhci, flags);
+ return 0;
+}
+
+static void xhci_vendor_free_dcbaa(struct xhci_hcd *xhci)
+{
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
+
+ if (ops && ops->free_dcbaa)
+ ops->free_dcbaa(xhci);
+}
+
void xhci_mem_cleanup(struct xhci_hcd *xhci)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
@@ -1894,9 +1985,13 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Freed medium stream array pool");
- if (xhci->dcbaa)
- dma_free_coherent(dev, sizeof(*xhci->dcbaa),
- xhci->dcbaa, xhci->dcbaa->dma);
+ if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0)) {
+ xhci_vendor_free_dcbaa(xhci);
+ } else {
+ if (xhci->dcbaa)
+ dma_free_coherent(dev, sizeof(*xhci->dcbaa),
+ xhci->dcbaa, xhci->dcbaa->dma);
+ }
xhci->dcbaa = NULL;
scratchpad_free(xhci);
@@ -2433,15 +2528,21 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
* xHCI section 5.4.6 - Device Context array must be
* "physically contiguous and 64-byte (cache line) aligned".
*/
- xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
- flags);
- if (!xhci->dcbaa)
- goto fail;
- xhci->dcbaa->dma = dma;
+ if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0)) {
+ xhci->dcbaa = xhci_vendor_alloc_dcbaa(xhci, flags);
+ if (!xhci->dcbaa)
+ goto fail;
+ } else {
+ xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
+ flags);
+ if (!xhci->dcbaa)
+ goto fail;
+ xhci->dcbaa->dma = dma;
+ }
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Device context base array address = 0x%llx (DMA), %p (virt)",
(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
- xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
+ xhci_write_64(xhci, xhci->dcbaa->dma, &xhci->op_regs->dcbaa_ptr);
/*
* Initialize the ring segment pool. The ring must be a contiguous
@@ -173,6 +173,41 @@ static const struct of_device_id usb_xhci_of_match[] = {
MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
#endif
+static struct xhci_plat_priv_overwrite xhci_plat_vendor_overwrite;
+
+int xhci_plat_register_vendor_ops(struct xhci_vendor_ops *vendor_ops)
+{
+ if (vendor_ops == NULL)
+ return -EINVAL;
+
+ xhci_plat_vendor_overwrite.vendor_ops = vendor_ops;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xhci_plat_register_vendor_ops);
+
+static int xhci_vendor_init(struct xhci_hcd *xhci)
+{
+ struct xhci_vendor_ops *ops = NULL;
+
+ if (xhci_plat_vendor_overwrite.vendor_ops)
+ ops = xhci->vendor_ops = xhci_plat_vendor_overwrite.vendor_ops;
+
+ if (ops && ops->vendor_init)
+ return ops->vendor_init(xhci);
+ return 0;
+}
+
+static void xhci_vendor_cleanup(struct xhci_hcd *xhci)
+{
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
+
+ if (ops && ops->vendor_cleanup)
+ ops->vendor_cleanup(xhci);
+
+ xhci->vendor_ops = NULL;
+}
+
static int xhci_plat_probe(struct platform_device *pdev)
{
const struct xhci_plat_priv *priv_match;
@@ -321,6 +356,10 @@ static int xhci_plat_probe(struct platform_device *pdev)
goto put_usb3_hcd;
}
+ ret = xhci_vendor_init(xhci);
+ if (ret)
+ goto disable_usb_phy;
+
hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
xhci->shared_hcd->tpl_support = hcd->tpl_support;
if (priv && (priv->quirks & XHCI_SKIP_PHY_INIT))
@@ -393,8 +432,10 @@ static int xhci_plat_remove(struct platform_device *dev)
usb_phy_shutdown(hcd->usb_phy);
usb_remove_hcd(hcd);
- usb_put_hcd(shared_hcd);
+ xhci_vendor_cleanup(xhci);
+
+ usb_put_hcd(shared_hcd);
clk_disable_unprepare(clk);
clk_disable_unprepare(reg_clk);
usb_put_hcd(hcd);
@@ -13,6 +13,7 @@
struct xhci_plat_priv {
const char *firmware_name;
unsigned long long quirks;
+ struct xhci_vendor_data *vendor_data;
void (*plat_start)(struct usb_hcd *);
int (*init_quirk)(struct usb_hcd *);
int (*suspend_quirk)(struct usb_hcd *);
@@ -21,4 +22,11 @@ struct xhci_plat_priv {
#define hcd_to_xhci_priv(h) ((struct xhci_plat_priv *)hcd_to_xhci(h)->priv)
#define xhci_to_priv(x) ((struct xhci_plat_priv *)(x)->priv)
+
+struct xhci_plat_priv_overwrite {
+ struct xhci_vendor_ops *vendor_ops;
+};
+
+int xhci_plat_register_vendor_ops(struct xhci_vendor_ops *vendor_ops);
+
#endif /* _XHCI_PLAT_H */
@@ -3075,6 +3075,15 @@ void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
}
EXPORT_SYMBOL_GPL(xhci_update_erst_dequeue);
+static irqreturn_t xhci_vendor_queue_irq_work(struct xhci_hcd *xhci)
+{
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
+
+ if (ops && ops->queue_irq_work)
+ return ops->queue_irq_work(xhci);
+ return IRQ_NONE;
+}
+
/*
* xHCI spec says we can get an interrupt, and if the HC has an error condition,
* we might get bad data out of the event ring. Section 4.10.2.7 has a list of
@@ -3108,6 +3117,10 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
goto out;
}
+ ret = xhci_vendor_queue_irq_work(xhci);
+ if (ret == IRQ_HANDLED)
+ goto out;
+
/*
* Clear the op reg interrupt status first,
* so we can receive interrupts from other MSI-X interrupters.
@@ -1632,6 +1632,11 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
return -ENODEV;
}
+ if (xhci_vendor_usb_offload_skip_urb(xhci, urb)) {
+ xhci_dbg(xhci, "skip urb for usb offload\n");
+ return -EOPNOTSUPP;
+ }
+
if (usb_endpoint_xfer_isoc(&urb->ep->desc))
num_tds = urb->number_of_packets;
else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
@@ -2975,6 +2980,14 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
xhci_finish_resource_reservation(xhci, ctrl_ctx);
spin_unlock_irqrestore(&xhci->lock, flags);
}
+ if (ret)
+ goto failed;
+
+ ret = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
+ if (ret)
+ xhci_warn(xhci, "sync device context failed, ret=%d", ret);
+
+failed:
return ret;
}
@@ -3118,7 +3131,11 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
for (i = 0; i < 31; i++) {
if (virt_dev->eps[i].new_ring) {
xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
- xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
+ if (xhci_vendor_is_usb_offload_enabled(xhci, virt_dev, i))
+ xhci_vendor_free_transfer_ring(xhci, virt_dev->eps[i].new_ring, i);
+ else
+ xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
+
virt_dev->eps[i].new_ring = NULL;
}
}
@@ -3279,6 +3296,13 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
wait_for_completion(stop_cmd->completion);
+ err = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
+ if (err) {
+ xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
+ __func__, err);
+ goto cleanup;
+ }
+
spin_lock_irqsave(&xhci->lock, flags);
/* config ep command clears toggle if add and drop ep flags are set */
@@ -3310,6 +3334,11 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
wait_for_completion(cfg_cmd->completion);
+ err = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
+ if (err)
+ xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
+ __func__, err);
+
xhci_free_command(xhci, cfg_cmd);
cleanup:
xhci_free_command(xhci, stop_cmd);
@@ -3855,6 +3884,13 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
/* Wait for the Reset Device command to finish */
wait_for_completion(reset_device_cmd->completion);
+ ret = xhci_vendor_sync_dev_ctx(xhci, slot_id);
+ if (ret) {
+ xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
+ __func__, ret);
+ goto command_cleanup;
+ }
+
/* The Reset Device command can't fail, according to the 0.95/0.96 spec,
* unless we tried to reset a slot ID that wasn't enabled,
* or the device wasn't in the addressed or configured state.
@@ -4100,6 +4136,14 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
goto disable_slot;
}
+
+ ret = xhci_vendor_sync_dev_ctx(xhci, slot_id);
+ if (ret) {
+ xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
+ __func__, ret);
+ goto disable_slot;
+ }
+
vdev = xhci->devs[slot_id];
slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
trace_xhci_alloc_dev(slot_ctx);
@@ -4230,6 +4274,13 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
/* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
wait_for_completion(command->completion);
+ ret = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
+ if (ret) {
+ xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
+ __func__, ret);
+ goto out;
+ }
+
/* FIXME: From section 4.3.4: "Software shall be responsible for timing
* the SetAddress() "recovery interval" required by USB and aborting the
* command on a timeout.
@@ -4382,6 +4433,14 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
return -ENOMEM;
}
+ ret = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
+ if (ret) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
+ __func__, ret);
+ return ret;
+ }
+
xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -4409,6 +4468,30 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
return ret;
}
+struct xhci_vendor_ops *xhci_vendor_get_ops(struct xhci_hcd *xhci)
+{
+ return xhci->vendor_ops;
+}
+EXPORT_SYMBOL_GPL(xhci_vendor_get_ops);
+
+int xhci_vendor_sync_dev_ctx(struct xhci_hcd *xhci, unsigned int slot_id)
+{
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
+
+ if (ops && ops->sync_dev_ctx)
+ return ops->sync_dev_ctx(xhci, slot_id);
+ return 0;
+}
+
+bool xhci_vendor_usb_offload_skip_urb(struct xhci_hcd *xhci, struct urb *urb)
+{
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
+
+ if (ops && ops->usb_offload_skip_urb)
+ return ops->usb_offload_skip_urb(xhci, urb);
+ return false;
+}
+
#ifdef CONFIG_PM
/* BESL to HIRD Encoding array for USB2 LPM */
@@ -5133,6 +5216,15 @@ static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
return -ENOMEM;
}
+ ret = xhci_vendor_sync_dev_ctx(xhci, hdev->slot_id);
+ if (ret) {
+ xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
+ __func__, ret);
+ xhci_free_command(xhci, config_cmd);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return ret;
+ }
+
xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
@@ -1929,6 +1929,9 @@ struct xhci_hcd {
struct list_head regset_list;
void *dbc;
+
+ struct xhci_vendor_ops *vendor_ops;
+
/* platform-specific data -- must come last */
unsigned long priv[] __aligned(sizeof(s64));
};
@@ -2207,6 +2210,53 @@ static inline struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
urb->stream_id);
}
+/**
+ * struct xhci_vendor_ops - function callbacks for vendor specific operations
+ * @vendor_init: called for vendor init process
+ * @vendor_cleanup: called for vendor cleanup process
+ * @is_usb_offload_enabled: called to check if usb offload enabled
+ * @queue_irq_work: called to queue vendor specific irq work
+ * @alloc_dcbaa: called when allocating vendor specific dcbaa
+ * @free_dcbaa: called to free vendor specific dcbaa
+ * @alloc_transfer_ring: called when remote transfer ring allocation is required
+ * @free_transfer_ring: called to free vendor specific transfer ring
+ * @sync_dev_ctx: called when synchronization for device context is required
+ * @alloc_container_ctx: called when allocating vendor specific container context
+ * @free_container_ctx: called to free vendor specific container context
+ */
+struct xhci_vendor_ops {
+ int (*vendor_init)(struct xhci_hcd *xhci);
+ void (*vendor_cleanup)(struct xhci_hcd *xhci);
+ bool (*is_usb_offload_enabled)(struct xhci_hcd *xhci,
+ struct xhci_virt_device *vdev,
+ unsigned int ep_index);
+ irqreturn_t (*queue_irq_work)(struct xhci_hcd *xhci);
+
+ struct xhci_device_context_array *(*alloc_dcbaa)(struct xhci_hcd *xhci,
+ gfp_t flags);
+ void (*free_dcbaa)(struct xhci_hcd *xhci);
+
+ struct xhci_ring *(*alloc_transfer_ring)(struct xhci_hcd *xhci,
+ u32 endpoint_type, enum xhci_ring_type ring_type,
+ unsigned int max_packet, gfp_t mem_flags);
+ void (*free_transfer_ring)(struct xhci_hcd *xhci,
+ struct xhci_ring *ring, unsigned int ep_index);
+ int (*sync_dev_ctx)(struct xhci_hcd *xhci, unsigned int slot_id);
+ bool (*usb_offload_skip_urb)(struct xhci_hcd *xhci, struct urb *urb);
+ void (*alloc_container_ctx)(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
+ int type, gfp_t flags);
+ void (*free_container_ctx)(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
+};
+
+struct xhci_vendor_ops *xhci_vendor_get_ops(struct xhci_hcd *xhci);
+
+int xhci_vendor_sync_dev_ctx(struct xhci_hcd *xhci, unsigned int slot_id);
+bool xhci_vendor_usb_offload_skip_urb(struct xhci_hcd *xhci, struct urb *urb);
+void xhci_vendor_free_transfer_ring(struct xhci_hcd *xhci,
+ struct xhci_ring *ring, unsigned int ep_index);
+bool xhci_vendor_is_usb_offload_enabled(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev, unsigned int ep_index);
+
/*
* TODO: As per spec Isochronous IDT transmissions are supported. We bypass
* them anyways as we where unable to find a device that matches the