@@ -631,6 +631,46 @@ static void xhci_debugfs_create_ports(struct xhci_hcd *xhci,
}
}
+static int xhci_port_bw_show(struct seq_file *s, void *unused)
+{
+ struct xhci_hcd *xhci = (struct xhci_hcd *)s->private;
+ unsigned int num_ports;
+ unsigned int i;
+ int ret;
+ u8 bw_table[MAX_HC_PORTS] = {0};
+
+ num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
+
+ /* get roothub port bandwidth */
+ ret = xhci_get_port_bandwidth(xhci, bw_table);
+ if (ret)
+ return ret;
+
+ /* print all roothub ports available bandwidth */
+ for (i = 1; i < num_ports+1; i++)
+ seq_printf(s, "port[%d] available bw: %d%%.\n", i, bw_table[i]);
+
+ return ret;
+}
+
+static int bw_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, xhci_port_bw_show, inode->i_private);
+}
+
+static const struct file_operations bw_fops = {
+ .open = bw_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void xhci_debugfs_create_bandwidth(struct xhci_hcd *xhci,
+ struct dentry *parent)
+{
+ debugfs_create_file("port_bandwidth", 0644, parent, xhci, &bw_fops);
+}
+
void xhci_debugfs_init(struct xhci_hcd *xhci)
{
struct device *dev = xhci_to_hcd(xhci)->self.controller;
@@ -681,6 +721,8 @@ void xhci_debugfs_init(struct xhci_hcd *xhci)
xhci->debugfs_slots = debugfs_create_dir("devices", xhci->debugfs_root);
xhci_debugfs_create_ports(xhci, xhci->debugfs_root);
+
+ xhci_debugfs_create_bandwidth(xhci, xhci->debugfs_root);
}
void xhci_debugfs_exit(struct xhci_hcd *xhci)
@@ -1867,6 +1867,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
case TRB_NEC_GET_FW:
xhci_handle_cmd_nec_get_fw(xhci, event);
break;
+ case TRB_GET_BW:
+ break;
default:
/* Skip over unknown commands on the event ring */
xhci_info(xhci, "INFO unknown command type %d\n", cmd_type);
@@ -4414,6 +4416,18 @@ int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
command_must_succeed);
}
+/* Queue a get root hub port bandwidth command TRB */
+int xhci_queue_get_rh_port_bw(struct xhci_hcd *xhci,
+ struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
+ u8 dev_speed, u32 slot_id, bool command_must_succeed)
+{
+ return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
+ upper_32_bits(in_ctx_ptr), 0,
+ TRB_TYPE(TRB_GET_BW) | DEV_SPEED_FOR_TRB(dev_speed) |
+ SLOT_ID_FOR_TRB(slot_id),
+ command_must_succeed);
+}
+
/* Queue an evaluate context command TRB */
int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
@@ -3088,6 +3088,80 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
}
EXPORT_SYMBOL_GPL(xhci_reset_bandwidth);
+/* Get the available bandwidth of the ports under the xhci roothub,
+ * including USB 2.0 port and USB 3.0 port.
+ */
+int xhci_get_port_bandwidth(struct xhci_hcd *xhci, u8 *bw_table)
+{
+ unsigned int num_ports;
+ unsigned int i;
+ struct xhci_command *cmd;
+ dma_addr_t dma_handle;
+ void *dma_buf;
+ int ret;
+ unsigned long flags;
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+
+ num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
+
+ cmd = xhci_alloc_command(xhci, true, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ dma_buf = dma_alloc_coherent(dev, xhci->page_size, &dma_handle,
+ GFP_KERNEL);
+ if (!dma_buf) {
+ xhci_free_command(xhci, cmd);
+ return -ENOMEM;
+ }
+
+ /* get xhci hub usb3 port bandwidth */
+ /* refer to xhci rev1_2 protocol 4.6.15*/
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ ret = xhci_queue_get_rh_port_bw(xhci, cmd, dma_handle, USB_SPEED_SUPER,
+ 0, false);
+ if (ret < 0) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ goto out;
+ }
+ xhci_ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ wait_for_completion(cmd->completion);
+
+ /* refer to xhci rev1_2 protocol 6.2.6 , byte 0 is reserved */
+ for (i = 1; i < num_ports+1; i++) {
+ if (((u8 *)dma_buf)[i])
+ bw_table[i] = ((u8 *)dma_buf)[i];
+ }
+
+ /* get xhci hub usb2 port bandwidth */
+ /* refer to xhci rev1_2 protocol 4.6.15*/
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ ret = xhci_queue_get_rh_port_bw(xhci, cmd, dma_handle, USB_SPEED_HIGH,
+ 0, false);
+ if (ret < 0) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ goto out;
+ }
+ xhci_ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ wait_for_completion(cmd->completion);
+
+ /* refer to xhci rev1_2 protocol 6.2.6 , byte 0 is reserved */
+ for (i = 1; i < num_ports+1; i++) {
+ if (((u8 *)dma_buf)[i])
+ bw_table[i] = ((u8 *)dma_buf)[i];
+ }
+
+out:
+ dma_free_coherent(dev, xhci->page_size, dma_buf, dma_handle);
+ xhci_free_command(xhci, cmd);
+
+ return ret;
+}
+
static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
struct xhci_container_ctx *in_ctx,
struct xhci_container_ctx *out_ctx,
@@ -999,6 +999,9 @@ enum xhci_setup_dev {
/* bits 16:23 are the virtual function ID */
/* bits 24:31 are the slot ID */
+/* bits 19:16 are the dev speed */
+#define DEV_SPEED_FOR_TRB(p) ((p) << 16)
+
/* Stop Endpoint TRB - ep_index to endpoint ID for this TRB */
#define SUSPEND_PORT_FOR_TRB(p) (((p) & 1) << 23)
#define TRB_TO_SUSPEND_PORT(p) (((p) & (1 << 23)) >> 23)
@@ -1907,6 +1910,10 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
struct xhci_command *cmd, dma_addr_t in_ctx_ptr, u32 slot_id,
bool command_must_succeed);
+int xhci_queue_get_rh_port_bw(struct xhci_hcd *xhci,
+ struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
+ u8 dev_speed, u32 slot_id, bool command_must_succeed);
+int xhci_get_port_bandwidth(struct xhci_hcd *xhci, u8 *bw_table);
int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed);
int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,