@@ -242,6 +242,8 @@ int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl);
int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl);
int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl);
void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl);
/* MHI EP memory management functions */
int mhi_ep_alloc_map(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, size_t size,
@@ -1105,6 +1105,64 @@ void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl)
}
EXPORT_SYMBOL_GPL(mhi_ep_power_down);
+void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct mhi_ep_chan *mhi_chan;
+ u32 tmp;
+ int i;
+
+ for (i = 0; i < mhi_cntrl->max_chan; i++) {
+ mhi_chan = &mhi_cntrl->mhi_chan[i];
+
+ if (!mhi_chan->mhi_dev)
+ continue;
+
+ mutex_lock(&mhi_chan->lock);
+ /* Skip if the channel is not currently running */
+ tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg);
+ if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_RUNNING) {
+ mutex_unlock(&mhi_chan->lock);
+ continue;
+ }
+
+ dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n");
+ /* Set channel state to SUSPENDED */
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_SUSPENDED);
+ mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
+ mutex_unlock(&mhi_chan->lock);
+ }
+}
+
+void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct mhi_ep_chan *mhi_chan;
+ u32 tmp;
+ int i;
+
+ for (i = 0; i < mhi_cntrl->max_chan; i++) {
+ mhi_chan = &mhi_cntrl->mhi_chan[i];
+
+ if (!mhi_chan->mhi_dev)
+ continue;
+
+ mutex_lock(&mhi_chan->lock);
+ /* Skip if the channel is not currently suspended */
+ tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg);
+ if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_SUSPENDED) {
+ mutex_unlock(&mhi_chan->lock);
+ continue;
+ }
+
+ dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n");
+ /* Set channel state to RUNNING */
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING);
+ mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
+ mutex_unlock(&mhi_chan->lock);
+ }
+}
+
static void mhi_ep_release_device(struct device *dev)
{
struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
@@ -88,8 +88,11 @@ int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl)
enum mhi_state old_state;
int ret;
+ /* If MHI is in M3, resume suspended channels */
spin_lock_bh(&mhi_cntrl->state_lock);
old_state = mhi_cntrl->mhi_state;
+ if (old_state == MHI_STATE_M3)
+ mhi_ep_resume_channels(mhi_cntrl);
ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
if (ret) {
@@ -135,6 +138,7 @@ int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl)
}
spin_unlock_bh(&mhi_cntrl->state_lock);
+ mhi_ep_suspend_channels(mhi_cntrl);
/* Signal host that the device moved to M3 */
ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M3);
Add support for suspending and resuming the channels in MHI endpoint stack. The channels will be moved to the suspended state during M3 state transition and will be resumed during M0 transition. Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> --- drivers/bus/mhi/ep/internal.h | 2 ++ drivers/bus/mhi/ep/main.c | 58 +++++++++++++++++++++++++++++++++++ drivers/bus/mhi/ep/sm.c | 4 +++ 3 files changed, 64 insertions(+)