@@ -208,6 +208,20 @@
#define MHI_RSCTRE_DATA_DWORD1 cpu_to_le32(FIELD_PREP(GENMASK(23, 16), \
MHI_PKT_TYPE_COALESCING))
+/* MHI Bandwidth scaling offsets */
+#define BW_SCALE_CFG_OFFSET (0x04)
+#define BW_SCALE_CFG_CHAN_DB_ID_SHIFT (25)
+#define BW_SCALE_CFG_ENABLED_MASK BIT(24)
+#define BW_SCALE_CFG_ENABLED_SHIFT (24)
+#define BW_SCALE_CFG_ER_ID_SHIFT (19)
+
+#define BW_SCALE_CAP_ID (3)
+#define MHI_TRE_GET_EV_BW_REQ_SEQ(tre) (((tre)->dword[0] >> 8) & 0xFF)
+
+#define MHI_BW_SCALE_RESULT(status, seq) (((status) & 0xF) << 8 | \
+ ((seq) & 0xFF))
+#define MHI_BW_SCALE_NACK 0xF
+
enum mhi_pkt_type {
MHI_PKT_TYPE_INVALID = 0x0,
MHI_PKT_TYPE_NOOP_CMD = 0x1,
@@ -496,10 +496,56 @@ static int mhi_get_capability_offset(struct mhi_controller *mhi_cntrl, u32 capab
return -ENXIO;
}
+/* to be used only if a single event ring with the type is present */
+static int mhi_get_er_index(struct mhi_controller *mhi_cntrl,
+ enum mhi_er_data_type type)
+{
+ struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
+ int i;
+
+ /* find event ring for requested type */
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->data_type == type)
+ return mhi_event->er_index;
+ }
+
+ return -ENOENT;
+}
+
+static int mhi_init_bw_scale(struct mhi_controller *mhi_cntrl,
+ int bw_scale_db)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 bw_cfg_offset, val = 0;
+ int ret, er_index;
+
+ ret = mhi_get_capability_offset(mhi_cntrl, BW_SCALE_CAP_ID,
+ &bw_cfg_offset);
+ if (ret)
+ return ret;
+
+ /* No ER configured to support BW scale */
+ er_index = mhi_get_er_index(mhi_cntrl, MHI_ER_BW_SCALE);
+ if (er_index < 0)
+ return er_index;
+
+ bw_cfg_offset += BW_SCALE_CFG_OFFSET;
+
+ /* advertise host support */
+ val = ((bw_scale_db << BW_SCALE_CFG_CHAN_DB_ID_SHIFT) |
+ BW_SCALE_CFG_ENABLED_MASK | (er_index << BW_SCALE_CFG_ER_ID_SHIFT));
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, bw_cfg_offset, val);
+
+ dev_info(dev, "Bandwidth scaling setup complete. Event ring:%d\n",
+ er_index);
+
+ return 0;
+}
+
int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
{
u32 val;
- int i, ret;
+ int i, ret, doorbell;
struct mhi_chan *mhi_chan;
struct mhi_event *mhi_event;
void __iomem *base = mhi_cntrl->regs;
@@ -633,6 +679,16 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
return ret;
}
+ if (mhi_cntrl->get_misc_doorbell)
+ doorbell = mhi_cntrl->get_misc_doorbell(mhi_cntrl, MHI_ER_BW_SCALE);
+
+ if (doorbell > 0) {
+ ret = mhi_init_bw_scale(mhi_cntrl, doorbell);
+ if (!ret)
+ mhi_cntrl->bw_scale_db = base + val + (8 * doorbell);
+ else
+ dev_warn(dev, "BW scale setup failure\n");
+ }
return 0;
}
@@ -778,6 +834,9 @@ static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
case MHI_ER_CTRL:
mhi_event->process_event = mhi_process_ctrl_ev_ring;
break;
+ case MHI_ER_BW_SCALE:
+ mhi_event->process_event = mhi_process_bw_scale_ev_ring;
+ break;
default:
dev_err(dev, "Event Ring type not supported\n");
goto error_ev_cfg;
@@ -1012,9 +1071,12 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
mhi_event->mhi_cntrl = mhi_cntrl;
spin_lock_init(&mhi_event->lock);
+ mutex_init(&mhi_event->mutex);
if (mhi_event->data_type == MHI_ER_CTRL)
tasklet_init(&mhi_event->task, mhi_ctrl_ev_task,
(ulong)mhi_event);
+ else if (mhi_event->data_type == MHI_ER_BW_SCALE)
+ INIT_WORK(&mhi_event->work, mhi_process_ev_work);
else
tasklet_init(&mhi_event->task, mhi_ev_task,
(ulong)mhi_event);
@@ -241,6 +241,8 @@ struct mhi_event {
struct mhi_ring ring;
struct db_cfg db_cfg;
struct tasklet_struct task;
+ struct work_struct work;
+ struct mutex mutex;
spinlock_t lock;
int (*process_event)(struct mhi_controller *mhi_cntrl,
struct mhi_event *mhi_event,
@@ -403,7 +405,8 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
struct mhi_event *mhi_event, u32 event_quota);
int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
struct mhi_event *mhi_event, u32 event_quota);
-
+int mhi_process_bw_scale_ev_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event, u32 event_quota);
/* ISR handlers */
irqreturn_t mhi_irq_handler(int irq_number, void *dev);
irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev);
@@ -419,5 +422,5 @@ void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
struct mhi_buf_info *buf_info);
void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
struct mhi_buf_info *buf_info);
-
+void mhi_process_ev_work(struct work_struct *work);
#endif /* _MHI_INT_H */
@@ -472,7 +472,10 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev)
if (mhi_dev)
mhi_notify(mhi_dev, MHI_CB_PENDING_DATA);
} else {
- tasklet_schedule(&mhi_event->task);
+ if (mhi_event->data_type == MHI_ER_BW_SCALE)
+ queue_work(mhi_cntrl->hiprio_wq, &mhi_event->work);
+ else
+ tasklet_schedule(&mhi_event->task);
}
return IRQ_HANDLED;
@@ -1049,6 +1052,103 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
return count;
}
+/* dedicated bw scale event ring processing */
+int mhi_process_bw_scale_ev_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event, u32 event_quota)
+{
+ struct mhi_event_ctxt *er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ring *ev_ring = &mhi_event->ring;
+ dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
+ u32 response = MHI_BW_SCALE_NACK;
+ struct mhi_ring_element *dev_rp;
+ struct mhi_link_info link_info;
+ int ret = -EINVAL;
+
+ if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) {
+ ret = -EIO;
+ goto exit_bw_scale_process;
+ }
+
+ if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee))
+ goto exit_bw_scale_process;
+
+ if (!is_valid_ring_ptr(ev_ring, ptr)) {
+ dev_err(dev,
+ "Event ring rp points outside of the event ring\n");
+ ret = -EIO;
+ goto exit_bw_scale_process;
+ }
+
+ dev_rp = mhi_to_virtual(ev_ring, ptr);
+
+ /* if rp points to base, we need to wrap it around */
+ if (dev_rp == ev_ring->base)
+ dev_rp = ev_ring->base + ev_ring->len;
+ dev_rp--;
+
+ /* fast forward to currently processed element and recycle er */
+ ev_ring->rp = dev_rp;
+ ev_ring->wp = dev_rp - 1;
+ if (ev_ring->wp < ev_ring->base)
+ ev_ring->wp = ev_ring->base + ev_ring->len - ev_ring->el_size;
+ mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
+
+ if (WARN_ON(MHI_TRE_GET_EV_TYPE(dev_rp) != MHI_PKT_TYPE_BW_REQ_EVENT)) {
+ dev_err(dev, "!BW SCALE REQ event\n");
+ goto exit_bw_scale_process;
+ }
+
+ link_info.target_link_speed = MHI_TRE_GET_EV_LINKSPEED(dev_rp);
+ link_info.target_link_width = MHI_TRE_GET_EV_LINKWIDTH(dev_rp);
+ link_info.sequence_num = MHI_TRE_GET_EV_BW_REQ_SEQ(dev_rp);
+
+ dev_info(dev, "Received BW_REQ with seq:%d link speed:0x%x width:0x%x\n",
+ link_info.sequence_num,
+ link_info.target_link_speed,
+ link_info.target_link_width);
+
+ /* bring host and device out of suspended states */
+ ret = mhi_device_get_sync(mhi_cntrl->mhi_dev);
+ if (ret)
+ goto exit_bw_scale_process;
+
+ mhi_cntrl->runtime_get(mhi_cntrl);
+
+ ret = mhi_cntrl->bw_scale(mhi_cntrl, &link_info);
+ if (!ret)
+ response = 0;
+
+ response = MHI_BW_SCALE_RESULT(response, link_info.sequence_num);
+
+ write_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->bw_scale_db, 0, response);
+ write_unlock_bh(&mhi_cntrl->pm_lock);
+
+ mhi_cntrl->runtime_put(mhi_cntrl);
+ mhi_device_put(mhi_cntrl->mhi_dev);
+
+exit_bw_scale_process:
+ dev_info(dev, "exit er_index:%u ret:%d\n", mhi_event->er_index, ret);
+
+ return ret;
+}
+
+void mhi_process_ev_work(struct work_struct *work)
+{
+ struct mhi_event *mhi_event = container_of(work, struct mhi_event,
+ work);
+
+ struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
+
+ if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
+ return;
+
+ mutex_lock(&mhi_event->mutex);
+ mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
+ mutex_unlock(&mhi_event->mutex);
+}
+
void mhi_ev_task(unsigned long data)
{
struct mhi_event *mhi_event = (struct mhi_event *)data;
@@ -523,7 +523,10 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
if (mhi_event->offload_ev)
continue;
disable_irq(mhi_cntrl->irq[mhi_event->irq]);
- tasklet_kill(&mhi_event->task);
+ if (mhi_event->data_type == MHI_ER_BW_SCALE)
+ cancel_work_sync(&mhi_event->work);
+ else
+ tasklet_kill(&mhi_event->task);
}
/* Release lock and wait for all pending threads to complete */
@@ -670,7 +673,10 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
if (mhi_event->offload_ev)
continue;
- tasklet_kill(&mhi_event->task);
+ if (mhi_event->data_type == MHI_ER_BW_SCALE)
+ cancel_work_sync(&mhi_event->work);
+ else
+ tasklet_kill(&mhi_event->task);
}
/* Release lock and wait for all pending threads to complete */
@@ -102,10 +102,12 @@ struct image_info {
* struct mhi_link_info - BW requirement
* target_link_speed - Link speed as defined by TLS bits in LinkControl reg
* target_link_width - Link width as defined by NLW bits in LinkStatus reg
+ * sequence_num - used by device to track bw requests sent to host
*/
struct mhi_link_info {
unsigned int target_link_speed;
unsigned int target_link_width;
+ int sequence_num;
};
/**
@@ -183,10 +185,12 @@ enum mhi_ch_ee_mask {
* enum mhi_er_data_type - Event ring data types
* @MHI_ER_DATA: Only client data over this ring
* @MHI_ER_CTRL: MHI control data and client data
+ * @MHI_ER_BW_SCALE: MHI controller bandwidth scale functionality
*/
enum mhi_er_data_type {
MHI_ER_DATA,
MHI_ER_CTRL,
+ MHI_ER_BW_SCALE,
};
/**
@@ -299,6 +303,7 @@ struct mhi_controller_config {
* @bhi: Points to base of MHI BHI register space
* @bhie: Points to base of MHI BHIe register space
* @wake_db: MHI WAKE doorbell register address
+ * @wake_db: MHI BW_SCALE doorbell register address
* @iova_start: IOMMU starting address for data (required)
* @iova_stop: IOMMU stop address for data (required)
* @fw_image: Firmware image name for normal booting (optional)
@@ -355,6 +360,8 @@ struct mhi_controller_config {
* @write_reg: Write a MHI register via the physical link (required)
* @reset: Controller specific reset function (optional)
* @edl_trigger: CB function to trigger EDL mode (optional)
+ * @get_misc_doobell: function to get doorbell used for MISC feature like BW scale etc (optional)
+ * @bw_scale: CB function for passing BW scale info (optional)
* @buffer_len: Bounce buffer length
* @index: Index of the MHI controller instance
* @bounce_buf: Use of bounce buffer
@@ -376,6 +383,7 @@ struct mhi_controller {
void __iomem *bhi;
void __iomem *bhie;
void __iomem *wake_db;
+ void __iomem *bw_scale_db;
dma_addr_t iova_start;
dma_addr_t iova_stop;
@@ -440,6 +448,11 @@ struct mhi_controller {
void (*reset)(struct mhi_controller *mhi_cntrl);
int (*edl_trigger)(struct mhi_controller *mhi_cntrl);
+ int (*get_misc_doorbell)(struct mhi_controller *mhi_cntrl,
+ enum mhi_er_data_type type);
+ int (*bw_scale)(struct mhi_controller *mhi_cntrl,
+ struct mhi_link_info *link_info);
+
size_t buffer_len;
int index;
bool bounce_buf;
As per MHI spec sec 14, MHI supports bandwidth scaling to reduce power consumption. MHI bandwidth scaling is advertised in devices that contain the bandwidth scaling capability registers. If enabled, the device aggregates bandwidth requirements and sends them to the host in the form of an event. After the host performs the bandwidth switch, it sends an acknowledgment by ringing a doorbell. if the host supports bandwidth scaling events, then it must set BW_CFG.ENABLED bit, set BW_CFG.DB_CHAN_ID to the channel ID to the doorbell that will be used by the host to communicate the bandwidth scaling status and BW_CFG.ER_INDEX to the index for the event ring to which the device should send bandwidth scaling request in the bandwidth scaling capability register. As part of mmio init check if the bw scale capability is present or not, if present advertise host supports bw scale by setting all the required fields. MHI layer will only forward the bw scaling request to the controller driver, it is responsibility of the controller driver to do actual bw scaling and then pass status to the MHI. MHI will response back to the device based up on the status of the bw scale received. Add a new get_misc_doorbell() to get doorbell for misc capabilities to use the doorbell with mhi events like MHI BW scale etc. Use workqueue & mutex for the bw scale events as the pci_set_target_speed() which will called by the mhi controller driver can sleep. Signed-off-by: Krishna Chaitanya Chundru <krishna.chundru@oss.qualcomm.com> --- drivers/bus/mhi/common.h | 14 ++++++ drivers/bus/mhi/host/init.c | 64 ++++++++++++++++++++++++- drivers/bus/mhi/host/internal.h | 7 ++- drivers/bus/mhi/host/main.c | 102 +++++++++++++++++++++++++++++++++++++++- drivers/bus/mhi/host/pm.c | 10 +++- include/linux/mhi.h | 13 +++++ 6 files changed, 204 insertions(+), 6 deletions(-)