@@ -361,6 +361,38 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
return 0;
}
+static struct xhci_ring *xhci_vendor_alloc_transfer_ring(struct xhci_hcd *xhci,
+ u32 endpoint_type, enum xhci_ring_type ring_type,
+ unsigned int max_packet, gfp_t mem_flags)
+{
+ struct xhci_offload_ops *ops = xhci_offload_get_ops(xhci);
+
+ if (ops && ops->alloc_transfer_ring)
+ return ops->alloc_transfer_ring(xhci, endpoint_type, ring_type,
+ max_packet, mem_flags);
+ return 0;
+}
+
+static void xhci_vendor_free_transfer_ring(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev, unsigned int ep_index)
+{
+ struct xhci_offload_ops *ops = xhci_offload_get_ops(xhci);
+
+ if (ops && ops->free_transfer_ring)
+ ops->free_transfer_ring(xhci, virt_dev, ep_index);
+}
+
+static bool xhci_vendor_is_offload_enabled(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev, unsigned int ep_index)
+{
+ struct xhci_offload_ops *ops = xhci_offload_get_ops(xhci);
+
+ if (ops && ops->is_offload_enabled)
+ return ops->is_offload_enabled(xhci, virt_dev, ep_index);
+
+ return false;
+}
+
/*
* Create a new ring with zero or more segments.
*
@@ -412,7 +444,11 @@ void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
unsigned int ep_index)
{
- xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
+ if (xhci_vendor_is_offload_enabled(xhci, virt_dev, ep_index))
+ xhci_vendor_free_transfer_ring(xhci, virt_dev, ep_index);
+ else
+ xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
+
virt_dev->eps[ep_index].ring = NULL;
}
@@ -885,7 +921,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
for (i = 0; i < 31; i++) {
if (dev->eps[i].ring)
- xhci_ring_free(xhci, dev->eps[i].ring);
+ xhci_free_endpoint_ring(xhci, dev, i);
if (dev->eps[i].stream_info)
xhci_free_stream_info(xhci,
dev->eps[i].stream_info);
@@ -1487,8 +1523,16 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
mult = 0;
/* Set up the endpoint ring */
- virt_dev->eps[ep_index].new_ring =
- xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
+ if (xhci_vendor_is_offload_enabled(xhci, virt_dev, ep_index) &&
+ usb_endpoint_xfer_isoc(&ep->desc)) {
+ virt_dev->eps[ep_index].new_ring =
+ xhci_vendor_alloc_transfer_ring(xhci, endpoint_type, ring_type,
+ max_packet, mem_flags);
+ } else {
+ virt_dev->eps[ep_index].new_ring =
+ xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
+ }
+
if (!virt_dev->eps[ep_index].new_ring)
return -ENOMEM;
@@ -1832,6 +1876,23 @@ void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
erst->entries = NULL;
}
+static void xhci_vendor_alloc_dcbaa(
+ struct xhci_hcd *xhci, gfp_t flags)
+{
+ struct xhci_offload_ops *ops = xhci_offload_get_ops(xhci);
+
+ if (ops && ops->alloc_dcbaa)
+ return ops->alloc_dcbaa(xhci, flags);
+}
+
+static void xhci_vendor_free_dcbaa(struct xhci_hcd *xhci)
+{
+ struct xhci_offload_ops *ops = xhci_offload_get_ops(xhci);
+
+ if (ops && ops->free_dcbaa)
+ ops->free_dcbaa(xhci);
+}
+
void xhci_mem_cleanup(struct xhci_hcd *xhci)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
@@ -1883,9 +1944,13 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Freed medium stream array pool");
- if (xhci->dcbaa)
- dma_free_coherent(dev, sizeof(*xhci->dcbaa),
- xhci->dcbaa, xhci->dcbaa->dma);
+ if (xhci_vendor_is_offload_enabled(xhci, NULL, 0)) {
+ xhci_vendor_free_dcbaa(xhci);
+ } else {
+ if (xhci->dcbaa)
+ dma_free_coherent(dev, sizeof(*xhci->dcbaa),
+ xhci->dcbaa, xhci->dcbaa->dma);
+ }
xhci->dcbaa = NULL;
scratchpad_free(xhci);
@@ -2422,15 +2487,21 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
* xHCI section 5.4.6 - Device Context array must be
* "physically contiguous and 64-byte (cache line) aligned".
*/
- xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
- flags);
- if (!xhci->dcbaa)
- goto fail;
- xhci->dcbaa->dma = dma;
+ if (xhci_vendor_is_offload_enabled(xhci, NULL, 0)) {
+ xhci_vendor_alloc_dcbaa(xhci, flags);
+ if (!xhci->dcbaa)
+ goto fail;
+ } else {
+ xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
+ flags);
+ if (!xhci->dcbaa)
+ goto fail;
+ xhci->dcbaa->dma = dma;
+ }
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Device context base array address = 0x%llx (DMA), %p (virt)",
(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
- xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
+ xhci_write_64(xhci, xhci->dcbaa->dma, &xhci->op_regs->dcbaa_ptr);
/*
* Initialize the ring segment pool. The ring must be a contiguous
@@ -173,6 +173,23 @@ static const struct of_device_id usb_xhci_of_match[] = {
MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
#endif
+static int xhci_vendor_init(struct xhci_hcd *xhci)
+{
+ struct xhci_offload_ops *ops = xhci_offload_get_ops(xhci);
+
+ if (ops && ops->offload_init)
+ return ops->offload_init(xhci);
+ return 0;
+}
+
+static void xhci_vendor_cleanup(struct xhci_hcd *xhci)
+{
+ struct xhci_offload_ops *ops = xhci_offload_get_ops(xhci);
+
+ if (ops && ops->offload_cleanup)
+ ops->offload_cleanup(xhci);
+}
+
static int xhci_plat_probe(struct platform_device *pdev)
{
const struct xhci_plat_priv *priv_match;
@@ -317,6 +334,10 @@ static int xhci_plat_probe(struct platform_device *pdev)
goto disable_clk;
}
+ ret = xhci_vendor_init(xhci);
+ if (ret)
+ goto disable_usb_phy;
+
hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
if (priv && (priv->quirks & XHCI_SKIP_PHY_INIT))
@@ -410,6 +431,8 @@ static int xhci_plat_remove(struct platform_device *dev)
if (shared_hcd)
usb_put_hcd(shared_hcd);
+ xhci_vendor_cleanup(xhci);
+
clk_disable_unprepare(clk);
clk_disable_unprepare(reg_clk);
usb_put_hcd(hcd);
@@ -13,6 +13,7 @@
struct xhci_plat_priv {
const char *firmware_name;
unsigned long long quirks;
+ struct xhci_offload_ops *offload_ops;
void (*plat_start)(struct usb_hcd *);
int (*init_quirk)(struct usb_hcd *);
int (*suspend_quirk)(struct usb_hcd *);
@@ -22,6 +22,7 @@
#include "xhci-trace.h"
#include "xhci-debugfs.h"
#include "xhci-dbgcap.h"
+#include "xhci-plat.h"
#define DRIVER_AUTHOR "Sarah Sharp"
#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
@@ -1669,6 +1670,11 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
return -ENODEV;
}
+ if (xhci_vendor_usb_offload_skip_urb(xhci, urb)) {
+ xhci_dbg(xhci, "skip urb for usb offload\n");
+ return -EOPNOTSUPP;
+ }
+
if (usb_endpoint_xfer_isoc(&urb->ep->desc))
num_tds = urb->number_of_packets;
else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
@@ -4441,6 +4447,21 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
return ret;
}
+struct xhci_offload_ops *xhci_offload_get_ops(struct xhci_hcd *xhci)
+{
+ return xhci_to_priv(xhci)->offload_ops;
+}
+EXPORT_SYMBOL_GPL(xhci_offload_get_ops);
+
+bool xhci_vendor_usb_offload_skip_urb(struct xhci_hcd *xhci, struct urb *urb)
+{
+ struct xhci_offload_ops *ops = xhci_offload_get_ops(xhci);
+
+ if (ops && ops->usb_offload_skip_urb)
+ return ops->usb_offload_skip_urb(xhci, urb);
+ return false;
+}
+
#ifdef CONFIG_PM
/* BESL to HIRD Encoding array for USB2 LPM */
@@ -2229,6 +2229,37 @@ static inline struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
urb->stream_id);
}
+/**
+ * struct xhci_offload_ops - function callbacks for offload specific operations
+ * @offload_init: called for offload init process
+ * @offload_cleanup: called for offload cleanup process
+ * @is_usb_offload_enabled: called to check if xhci offload enabled
+ * @alloc_dcbaa: called when allocating specific dcbaa for offload
+ * @free_dcbaa: called to free specific dcbaa for offload
+ * @alloc_transfer_ring: called when remote transfer ring allocation is required
+ * @free_transfer_ring: called to free specific transfer ring for offload
+ * @usb_offload_skip_urb: called to check if need to skip urb
+ */
+struct xhci_offload_ops {
+ int (*offload_init)(struct xhci_hcd *xhci);
+ void (*offload_cleanup)(struct xhci_hcd *xhci);
+ bool (*is_offload_enabled)(struct xhci_hcd *xhci,
+ struct xhci_virt_device *vdev,
+ unsigned int ep_index);
+ void (*alloc_dcbaa)(struct xhci_hcd *xhci, gfp_t flags);
+ void (*free_dcbaa)(struct xhci_hcd *xhci);
+
+ struct xhci_ring *(*alloc_transfer_ring)(struct xhci_hcd *xhci,
+ u32 endpoint_type, enum xhci_ring_type ring_type,
+ unsigned int max_packet, gfp_t mem_flags);
+ void (*free_transfer_ring)(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev, unsigned int ep_index);
+ bool (*usb_offload_skip_urb)(struct xhci_hcd *xhci, struct urb *urb);
+};
+
+struct xhci_offload_ops *xhci_offload_get_ops(struct xhci_hcd *xhci);
+bool xhci_vendor_usb_offload_skip_urb(struct xhci_hcd *xhci, struct urb *urb);
+
/*
* TODO: As per spec Isochronous IDT transmissions are supported. We bypass
* them anyways as we where unable to find a device that matches the