@@ -208,6 +208,22 @@
#define MHI_RSCTRE_DATA_DWORD1 cpu_to_le32(FIELD_PREP(GENMASK(23, 16), \
MHI_PKT_TYPE_COALESCING))
+/* MHI Bandwidth scaling offsets */
+#define MHI_BW_SCALE_CFG_OFFSET 0x4
+#define MHI_BW_SCALE_CAP_ID (3)
+
+#define MHI_BW_SCALE_ENABLE(bw_scale_db, er_index) cpu_to_le32(FIELD_PREP(GENMASK(31, 25), \
+ bw_scale_db) | \
+ FIELD_PREP(GENMASK(23, 19), er_index) | \
+ BIT(24))
+
+#define MHI_TRE_GET_EV_BW_REQ_SEQ(tre) FIELD_GET(GENMASK(15, 8), (MHI_TRE_GET_DWORD(tre, 0)))
+#define MHI_BW_SCALE_DB_ID(er_index) FIELD_PREP(GENMASK(31, 25), er_index)
+
+#define MHI_BW_SCALE_RESULT(status, seq) cpu_to_le32(FIELD_PREP(GENMASK(11, 8), status) | \
+ FIELD_PREP(GENMASK(7, 0), seq))
+#define MHI_BW_SCALE_NACK 0xF
+
enum mhi_pkt_type {
MHI_PKT_TYPE_INVALID = 0x0,
MHI_PKT_TYPE_NOOP_CMD = 0x1,
@@ -496,10 +496,53 @@ static int mhi_find_capability(struct mhi_controller *mhi_cntrl, u32 capability,
return -ENXIO;
}
+static int mhi_get_er_index(struct mhi_controller *mhi_cntrl,
+ enum mhi_er_data_type type)
+{
+ struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
+ int i;
+
+ /* Find event ring for requested type */
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->data_type == type)
+ return mhi_event->er_index;
+ }
+
+ return -ENOENT;
+}
+
+static int mhi_init_bw_scale(struct mhi_controller *mhi_cntrl,
+ int bw_scale_db)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret, er_index, val;
+ u32 bw_cfg_offset;
+
+ ret = mhi_find_capability(mhi_cntrl, MHI_BW_SCALE_CAP_ID, &bw_cfg_offset);
+ if (ret)
+ return ret;
+
+ er_index = mhi_get_er_index(mhi_cntrl, MHI_ER_BW_SCALE);
+ if (er_index < 0)
+ return er_index;
+
+ bw_cfg_offset += MHI_BW_SCALE_CFG_OFFSET;
+
+ /* Advertise host support */
+ val = MHI_BW_SCALE_ENABLE(bw_scale_db, er_index);
+
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, bw_cfg_offset, val);
+
+ dev_dbg(dev, "Bandwidth scaling setup complete with event ring: %d\n",
+ er_index);
+
+ return 0;
+}
+
int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
{
u32 val;
- int i, ret;
+ int i, ret, doorbell = 0;
struct mhi_chan *mhi_chan;
struct mhi_event *mhi_event;
void __iomem *base = mhi_cntrl->regs;
@@ -633,6 +676,16 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
return ret;
}
+ if (mhi_cntrl->get_misc_doorbell)
+ doorbell = mhi_cntrl->get_misc_doorbell(mhi_cntrl, MHI_ER_BW_SCALE);
+
+ if (doorbell > 0) {
+ ret = mhi_init_bw_scale(mhi_cntrl, doorbell);
+ if (!ret)
+ mhi_cntrl->bw_scale_db = base + val + (8 * doorbell);
+ else
+ dev_warn(dev, "Failed to setup bandwidth scaling: %d\n", ret);
+ }
return 0;
}
@@ -778,6 +831,9 @@ static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
case MHI_ER_CTRL:
mhi_event->process_event = mhi_process_ctrl_ev_ring;
break;
+ case MHI_ER_BW_SCALE:
+ mhi_event->process_event = mhi_process_bw_scale_ev_ring;
+ break;
default:
dev_err(dev, "Event Ring type not supported\n");
goto error_ev_cfg;
@@ -1012,9 +1068,12 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
mhi_event->mhi_cntrl = mhi_cntrl;
spin_lock_init(&mhi_event->lock);
+ mutex_init(&mhi_event->mutex);
if (mhi_event->data_type == MHI_ER_CTRL)
tasklet_init(&mhi_event->task, mhi_ctrl_ev_task,
(ulong)mhi_event);
+ else if (mhi_event->data_type == MHI_ER_BW_SCALE)
+ INIT_WORK(&mhi_event->work, mhi_process_ev_work);
else
tasklet_init(&mhi_event->task, mhi_ev_task,
(ulong)mhi_event);
@@ -248,6 +248,8 @@ struct mhi_event {
struct mhi_ring ring;
struct db_cfg db_cfg;
struct tasklet_struct task;
+ struct work_struct work;
+ struct mutex mutex; /* lock for synchronization */
spinlock_t lock;
int (*process_event)(struct mhi_controller *mhi_cntrl,
struct mhi_event *mhi_event,
@@ -410,7 +412,8 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
struct mhi_event *mhi_event, u32 event_quota);
int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
struct mhi_event *mhi_event, u32 event_quota);
-
+int mhi_process_bw_scale_ev_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event, u32 event_quota);
/* ISR handlers */
irqreturn_t mhi_irq_handler(int irq_number, void *dev);
irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev);
@@ -426,5 +429,5 @@ void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
struct mhi_buf_info *buf_info);
void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
struct mhi_buf_info *buf_info);
-
+void mhi_process_ev_work(struct work_struct *work);
#endif /* _MHI_INT_H */
@@ -472,7 +472,10 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev)
if (mhi_dev)
mhi_notify(mhi_dev, MHI_CB_PENDING_DATA);
} else {
- tasklet_schedule(&mhi_event->task);
+ if (mhi_event->data_type == MHI_ER_BW_SCALE)
+ queue_work(mhi_cntrl->hiprio_wq, &mhi_event->work);
+ else
+ tasklet_schedule(&mhi_event->task);
}
return IRQ_HANDLED;
@@ -1049,6 +1052,99 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
return count;
}
+int mhi_process_bw_scale_ev_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event, u32 event_quota)
+{
+ struct mhi_event_ctxt *er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ring *ev_ring = &mhi_event->ring;
+ dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
+ u32 response = MHI_BW_SCALE_NACK;
+ struct mhi_ring_element *dev_rp;
+ struct mhi_link_info link_info;
+ int ret = -EINVAL;
+
+ if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) {
+ ret = -EIO;
+ goto exit_bw_scale;
+ }
+
+ if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee))
+ goto exit_bw_scale;
+
+ if (!is_valid_ring_ptr(ev_ring, ptr)) {
+ dev_err(dev,
+ "Event ring rp points outside of the event ring\n");
+ ret = -EIO;
+ goto exit_bw_scale;
+ }
+
+ dev_rp = mhi_to_virtual(ev_ring, ptr);
+
+ /* If rp points to base, we need to wrap it around */
+ if (dev_rp == ev_ring->base)
+ dev_rp = ev_ring->base + ev_ring->len;
+ dev_rp--;
+
+ /* Fast forward to currently processed element and recycle er */
+ ev_ring->rp = dev_rp;
+ ev_ring->wp = dev_rp - 1;
+ if (ev_ring->wp < ev_ring->base)
+ ev_ring->wp = ev_ring->base + ev_ring->len - ev_ring->el_size;
+ mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
+
+ if (WARN_ON(MHI_TRE_GET_EV_TYPE(dev_rp) != MHI_PKT_TYPE_BW_REQ_EVENT)) {
+ dev_err(dev, "!BW SCALE REQ event\n");
+ goto exit_bw_scale;
+ }
+
+ link_info.target_link_speed = MHI_TRE_GET_EV_LINKSPEED(dev_rp);
+ link_info.target_link_width = MHI_TRE_GET_EV_LINKWIDTH(dev_rp);
+ link_info.sequence_num = MHI_TRE_GET_EV_BW_REQ_SEQ(dev_rp);
+
+ dev_dbg(dev, "Received BW_REQ with seq:%d link speed:0x%x width:0x%x\n",
+ link_info.sequence_num,
+ link_info.target_link_speed,
+ link_info.target_link_width);
+
+ /* Bring host and device out of suspended states */
+ ret = mhi_device_get_sync(mhi_cntrl->mhi_dev);
+ if (ret)
+ goto exit_bw_scale;
+
+ mhi_cntrl->runtime_get(mhi_cntrl);
+
+ ret = mhi_cntrl->bw_scale(mhi_cntrl, &link_info);
+ if (!ret)
+ response = 0;
+
+ response = MHI_BW_SCALE_RESULT(response, link_info.sequence_num);
+
+ write_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->bw_scale_db, 0, response);
+ write_unlock_bh(&mhi_cntrl->pm_lock);
+
+ mhi_cntrl->runtime_put(mhi_cntrl);
+ mhi_device_put(mhi_cntrl->mhi_dev);
+
+exit_bw_scale:
+ return ret;
+}
+
+void mhi_process_ev_work(struct work_struct *work)
+{
+ struct mhi_event *mhi_event = container_of(work, struct mhi_event,
+ work);
+
+ struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
+
+ if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
+ return;
+
+ guard(mutex)(&mhi_event->mutex);
+ mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
+}
+
void mhi_ev_task(unsigned long data)
{
struct mhi_event *mhi_event = (struct mhi_event *)data;
@@ -523,7 +523,10 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
if (mhi_event->offload_ev)
continue;
disable_irq(mhi_cntrl->irq[mhi_event->irq]);
- tasklet_kill(&mhi_event->task);
+ if (mhi_event->data_type == MHI_ER_BW_SCALE)
+ cancel_work_sync(&mhi_event->work);
+ else
+ tasklet_kill(&mhi_event->task);
}
/* Release lock and wait for all pending threads to complete */
@@ -670,7 +673,10 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
if (mhi_event->offload_ev)
continue;
- tasklet_kill(&mhi_event->task);
+ if (mhi_event->data_type == MHI_ER_BW_SCALE)
+ cancel_work_sync(&mhi_event->work);
+ else
+ tasklet_kill(&mhi_event->task);
}
/* Release lock and wait for all pending threads to complete */
@@ -102,10 +102,12 @@ struct image_info {
* struct mhi_link_info - BW requirement
* target_link_speed - Link speed as defined by TLS bits in LinkControl reg
* target_link_width - Link width as defined by NLW bits in LinkStatus reg
+ * sequence_num - used by device to track bw requests sent to host
*/
struct mhi_link_info {
unsigned int target_link_speed;
unsigned int target_link_width;
+ int sequence_num;
};
/**
@@ -183,10 +185,12 @@ enum mhi_ch_ee_mask {
* enum mhi_er_data_type - Event ring data types
* @MHI_ER_DATA: Only client data over this ring
* @MHI_ER_CTRL: MHI control data and client data
+ * @MHI_ER_BW_SCALE: MHI controller bandwidth scale functionality
*/
enum mhi_er_data_type {
MHI_ER_DATA,
MHI_ER_CTRL,
+ MHI_ER_BW_SCALE,
};
/**
@@ -299,6 +303,7 @@ struct mhi_controller_config {
* @bhi: Points to base of MHI BHI register space
* @bhie: Points to base of MHI BHIe register space
* @wake_db: MHI WAKE doorbell register address
+ * @wake_db: MHI BW_SCALE doorbell register address
* @iova_start: IOMMU starting address for data (required)
* @iova_stop: IOMMU stop address for data (required)
* @fw_image: Firmware image name for normal booting (optional)
@@ -355,6 +360,8 @@ struct mhi_controller_config {
* @write_reg: Write a MHI register via the physical link (required)
* @reset: Controller specific reset function (optional)
* @edl_trigger: CB function to trigger EDL mode (optional)
+ * @get_misc_doobell: function to get doorbell used for MISC feature like BW scale etc (optional)
+ * @bw_scale: CB function for passing BW scale info (optional)
* @buffer_len: Bounce buffer length
* @index: Index of the MHI controller instance
* @bounce_buf: Use of bounce buffer
@@ -376,6 +383,7 @@ struct mhi_controller {
void __iomem *bhi;
void __iomem *bhie;
void __iomem *wake_db;
+ void __iomem *bw_scale_db;
dma_addr_t iova_start;
dma_addr_t iova_stop;
@@ -440,6 +448,11 @@ struct mhi_controller {
void (*reset)(struct mhi_controller *mhi_cntrl);
int (*edl_trigger)(struct mhi_controller *mhi_cntrl);
+ int (*get_misc_doorbell)(struct mhi_controller *mhi_cntrl,
+ enum mhi_er_data_type type);
+ int (*bw_scale)(struct mhi_controller *mhi_cntrl,
+ struct mhi_link_info *link_info);
+
size_t buffer_len;
int index;
bool bounce_buf;