diff mbox series

[v2,1/2] bus: mhi: core: Fix MHI runtime_pm behavior

Message ID 1617700315-12492-1-git-send-email-loic.poulain@linaro.org
State Accepted
Commit 4547a749be997eb12ea7edcf361ec2a5329f7aec
Headers show
Series [v2,1/2] bus: mhi: core: Fix MHI runtime_pm behavior | expand

Commit Message

Loic Poulain April 6, 2021, 9:11 a.m. UTC
This change ensures that PM reference is always get during packet
queueing and released either after queuing completion (RX) or once
the buffer has been consumed (TX). This guarantees proper update for
underlying MHI controller runtime status (e.g. last_busy timestamp)
and prevents suspend to be triggered while TX packets are flying,
or before we completed update of the RX ring.

Signed-off-by: Loic Poulain <loic.poulain@linaro.org>

---
 v2: mhi_reset_data_chan: move put under existing DMA_TO_DEVICE if block

 drivers/bus/mhi/core/main.c | 21 ++++++++++++++++-----
 1 file changed, 16 insertions(+), 5 deletions(-)

-- 
2.7.4

Comments

Manivannan Sadhasivam April 7, 2021, 5:05 a.m. UTC | #1
On Tue, Apr 06, 2021 at 11:11:54AM +0200, Loic Poulain wrote:
> This change ensures that PM reference is always get during packet

> queueing and released either after queuing completion (RX) or once

> the buffer has been consumed (TX). This guarantees proper update for

> underlying MHI controller runtime status (e.g. last_busy timestamp)

> and prevents suspend to be triggered while TX packets are flying,

> or before we completed update of the RX ring.

> 

> Signed-off-by: Loic Poulain <loic.poulain@linaro.org>


Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>


Thanks,
Mani

> ---

>  v2: mhi_reset_data_chan: move put under existing DMA_TO_DEVICE if block

> 

>  drivers/bus/mhi/core/main.c | 21 ++++++++++++++++-----

>  1 file changed, 16 insertions(+), 5 deletions(-)

> 

> diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c

> index c780234..6e72239 100644

> --- a/drivers/bus/mhi/core/main.c

> +++ b/drivers/bus/mhi/core/main.c

> @@ -584,8 +584,11 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,

>  			/* notify client */

>  			mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);

>  

> -			if (mhi_chan->dir == DMA_TO_DEVICE)

> +			if (mhi_chan->dir == DMA_TO_DEVICE) {

>  				atomic_dec(&mhi_cntrl->pending_pkts);

> +				/* Release the reference got from mhi_queue() */

> +				mhi_cntrl->runtime_put(mhi_cntrl);

> +			}

>  

>  			/*

>  			 * Recycle the buffer if buffer is pre-allocated,

> @@ -1021,9 +1024,11 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,

>  	if (unlikely(ret))

>  		goto exit_unlock;

>  

> -	/* trigger M3 exit if necessary */

> -	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))

> -		mhi_trigger_resume(mhi_cntrl);

> +	/* Packet is queued, take a usage ref to exit M3 if necessary

> +	 * for host->device buffer, balanced put is done on buffer completion

> +	 * for device->host buffer, balanced put is after ringing the DB

> +	 */

> +	mhi_cntrl->runtime_get(mhi_cntrl);

>  

>  	/* Assert dev_wake (to exit/prevent M1/M2)*/

>  	mhi_cntrl->wake_toggle(mhi_cntrl);

> @@ -1034,6 +1039,9 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,

>  	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))

>  		mhi_ring_chan_db(mhi_cntrl, mhi_chan);

>  

> +	if (dir == DMA_FROM_DEVICE)

> +		mhi_cntrl->runtime_put(mhi_cntrl);

> +

>  exit_unlock:

>  	read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);

>  

> @@ -1416,8 +1424,11 @@ static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,

>  	while (tre_ring->rp != tre_ring->wp) {

>  		struct mhi_buf_info *buf_info = buf_ring->rp;

>  

> -		if (mhi_chan->dir == DMA_TO_DEVICE)

> +		if (mhi_chan->dir == DMA_TO_DEVICE) {

>  			atomic_dec(&mhi_cntrl->pending_pkts);

> +			/* Release the reference got from mhi_queue() */

> +			mhi_cntrl->runtime_put(mhi_cntrl);

> +		}

>  

>  		if (!buf_info->pre_mapped)

>  			mhi_cntrl->unmap_single(mhi_cntrl, buf_info);

> -- 

> 2.7.4

>
Manivannan Sadhasivam April 7, 2021, 6:50 a.m. UTC | #2
On Tue, Apr 06, 2021 at 11:11:54AM +0200, Loic Poulain wrote:
> This change ensures that PM reference is always get during packet

> queueing and released either after queuing completion (RX) or once

> the buffer has been consumed (TX). This guarantees proper update for

> underlying MHI controller runtime status (e.g. last_busy timestamp)

> and prevents suspend to be triggered while TX packets are flying,

> or before we completed update of the RX ring.

> 

> Signed-off-by: Loic Poulain <loic.poulain@linaro.org>


Applied to mhi-next!

Thanks,
Mani

> ---

>  v2: mhi_reset_data_chan: move put under existing DMA_TO_DEVICE if block

> 

>  drivers/bus/mhi/core/main.c | 21 ++++++++++++++++-----

>  1 file changed, 16 insertions(+), 5 deletions(-)

> 

> diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c

> index c780234..6e72239 100644

> --- a/drivers/bus/mhi/core/main.c

> +++ b/drivers/bus/mhi/core/main.c

> @@ -584,8 +584,11 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,

>  			/* notify client */

>  			mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);

>  

> -			if (mhi_chan->dir == DMA_TO_DEVICE)

> +			if (mhi_chan->dir == DMA_TO_DEVICE) {

>  				atomic_dec(&mhi_cntrl->pending_pkts);

> +				/* Release the reference got from mhi_queue() */

> +				mhi_cntrl->runtime_put(mhi_cntrl);

> +			}

>  

>  			/*

>  			 * Recycle the buffer if buffer is pre-allocated,

> @@ -1021,9 +1024,11 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,

>  	if (unlikely(ret))

>  		goto exit_unlock;

>  

> -	/* trigger M3 exit if necessary */

> -	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))

> -		mhi_trigger_resume(mhi_cntrl);

> +	/* Packet is queued, take a usage ref to exit M3 if necessary

> +	 * for host->device buffer, balanced put is done on buffer completion

> +	 * for device->host buffer, balanced put is after ringing the DB

> +	 */

> +	mhi_cntrl->runtime_get(mhi_cntrl);

>  

>  	/* Assert dev_wake (to exit/prevent M1/M2)*/

>  	mhi_cntrl->wake_toggle(mhi_cntrl);

> @@ -1034,6 +1039,9 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,

>  	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))

>  		mhi_ring_chan_db(mhi_cntrl, mhi_chan);

>  

> +	if (dir == DMA_FROM_DEVICE)

> +		mhi_cntrl->runtime_put(mhi_cntrl);

> +

>  exit_unlock:

>  	read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);

>  

> @@ -1416,8 +1424,11 @@ static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,

>  	while (tre_ring->rp != tre_ring->wp) {

>  		struct mhi_buf_info *buf_info = buf_ring->rp;

>  

> -		if (mhi_chan->dir == DMA_TO_DEVICE)

> +		if (mhi_chan->dir == DMA_TO_DEVICE) {

>  			atomic_dec(&mhi_cntrl->pending_pkts);

> +			/* Release the reference got from mhi_queue() */

> +			mhi_cntrl->runtime_put(mhi_cntrl);

> +		}

>  

>  		if (!buf_info->pre_mapped)

>  			mhi_cntrl->unmap_single(mhi_cntrl, buf_info);

> -- 

> 2.7.4

>
diff mbox series

Patch

diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
index c780234..6e72239 100644
--- a/drivers/bus/mhi/core/main.c
+++ b/drivers/bus/mhi/core/main.c
@@ -584,8 +584,11 @@  static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
 			/* notify client */
 			mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
 
-			if (mhi_chan->dir == DMA_TO_DEVICE)
+			if (mhi_chan->dir == DMA_TO_DEVICE) {
 				atomic_dec(&mhi_cntrl->pending_pkts);
+				/* Release the reference got from mhi_queue() */
+				mhi_cntrl->runtime_put(mhi_cntrl);
+			}
 
 			/*
 			 * Recycle the buffer if buffer is pre-allocated,
@@ -1021,9 +1024,11 @@  static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
 	if (unlikely(ret))
 		goto exit_unlock;
 
-	/* trigger M3 exit if necessary */
-	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
-		mhi_trigger_resume(mhi_cntrl);
+	/* Packet is queued, take a usage ref to exit M3 if necessary
+	 * for host->device buffer, balanced put is done on buffer completion
+	 * for device->host buffer, balanced put is after ringing the DB
+	 */
+	mhi_cntrl->runtime_get(mhi_cntrl);
 
 	/* Assert dev_wake (to exit/prevent M1/M2)*/
 	mhi_cntrl->wake_toggle(mhi_cntrl);
@@ -1034,6 +1039,9 @@  static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
 	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
 		mhi_ring_chan_db(mhi_cntrl, mhi_chan);
 
+	if (dir == DMA_FROM_DEVICE)
+		mhi_cntrl->runtime_put(mhi_cntrl);
+
 exit_unlock:
 	read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
 
@@ -1416,8 +1424,11 @@  static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
 	while (tre_ring->rp != tre_ring->wp) {
 		struct mhi_buf_info *buf_info = buf_ring->rp;
 
-		if (mhi_chan->dir == DMA_TO_DEVICE)
+		if (mhi_chan->dir == DMA_TO_DEVICE) {
 			atomic_dec(&mhi_cntrl->pending_pkts);
+			/* Release the reference got from mhi_queue() */
+			mhi_cntrl->runtime_put(mhi_cntrl);
+		}
 
 		if (!buf_info->pre_mapped)
 			mhi_cntrl->unmap_single(mhi_cntrl, buf_info);