diff mbox series

[v2] mhi: core: Factorize mhi queuing

Message ID 1610124750-11950-1-git-send-email-loic.poulain@linaro.org
State Superseded
Headers show
Series [v2] mhi: core: Factorize mhi queuing | expand

Commit Message

Loic Poulain Jan. 8, 2021, 4:52 p.m. UTC
Instead of duplicating queuing procedure in mhi_queue_dma(),
mhi_queue_buf() and mhi_queue_skb(), add a new generic mhi_queue()
as common helper.

Note that the unified mhi_queue align pm_lock locking on mhi_queue_buf
behavior, taking it with irqsave variant (vs _bh for former queue_skb
and queue_dma version).

Signed-off-by: Loic Poulain <loic.poulain@linaro.org>

---
 v2: re-integrate pre_alloc check (Mani)
     comment about _bh to _irqsave unification     

 drivers/bus/mhi/core/main.c | 161 +++++++++++---------------------------------
 1 file changed, 40 insertions(+), 121 deletions(-)

-- 
2.7.4

Comments

Manivannan Sadhasivam Jan. 21, 2021, 11:19 a.m. UTC | #1
On Fri, Jan 08, 2021 at 05:52:30PM +0100, Loic Poulain wrote:
> Instead of duplicating queuing procedure in mhi_queue_dma(),

> mhi_queue_buf() and mhi_queue_skb(), add a new generic mhi_queue()

> as common helper.

> 

> Note that the unified mhi_queue align pm_lock locking on mhi_queue_buf

> behavior, taking it with irqsave variant (vs _bh for former queue_skb

> and queue_dma version).

> 

> Signed-off-by: Loic Poulain <loic.poulain@linaro.org>


This looks much cleaner!

Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>


Thanks,
Mani

> ---

>  v2: re-integrate pre_alloc check (Mani)

>      comment about _bh to _irqsave unification     

> 

>  drivers/bus/mhi/core/main.c | 161 +++++++++++---------------------------------

>  1 file changed, 40 insertions(+), 121 deletions(-)

> 

> diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c

> index effe94f..2d9157ce 100644

> --- a/drivers/bus/mhi/core/main.c

> +++ b/drivers/bus/mhi/core/main.c

> @@ -969,118 +969,81 @@ static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,

>  	return (tmp == ring->rp);

>  }

>  

> -int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,

> -		  struct sk_buff *skb, size_t len, enum mhi_flags mflags)

> +static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,

> +		     enum dma_data_direction dir, enum mhi_flags mflags)

>  {

>  	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;

>  	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :

>  							     mhi_dev->dl_chan;

>  	struct mhi_ring *tre_ring = &mhi_chan->tre_ring;

> -	struct mhi_buf_info buf_info = { };

> +	unsigned long flags;

>  	int ret;

>  

> -	/* If MHI host pre-allocates buffers then client drivers cannot queue */

> -	if (mhi_chan->pre_alloc)

> +	if (unlikely(mhi_chan->pre_alloc))

>  		return -EINVAL;

>  

> -	if (mhi_is_ring_full(mhi_cntrl, tre_ring))

> -		return -ENOMEM;

> -

> -	read_lock_bh(&mhi_cntrl->pm_lock);

> -	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {

> -		read_unlock_bh(&mhi_cntrl->pm_lock);

> +	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))

>  		return -EIO;

> +

> +	read_lock_irqsave(&mhi_cntrl->pm_lock, flags);

> +

> +	ret = mhi_is_ring_full(mhi_cntrl, tre_ring);

> +	if (unlikely(ret)) {

> +		ret = -ENOMEM;

> +		goto exit_unlock;

>  	}

>  

> -	/* we're in M3 or transitioning to M3 */

> +	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);

> +	if (unlikely(ret))

> +		goto exit_unlock;

> +

> +	/* trigger M3 exit if necessary */

>  	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))

>  		mhi_trigger_resume(mhi_cntrl);

>  

> -	/* Toggle wake to exit out of M2 */

> +	/* Assert dev_wake (to exit/prevent M1/M2)*/

>  	mhi_cntrl->wake_toggle(mhi_cntrl);

>  

> -	buf_info.v_addr = skb->data;

> -	buf_info.cb_buf = skb;

> -	buf_info.len = len;

> -

> -	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);

> -	if (unlikely(ret)) {

> -		read_unlock_bh(&mhi_cntrl->pm_lock);

> -		return ret;

> -	}

> -

>  	if (mhi_chan->dir == DMA_TO_DEVICE)

>  		atomic_inc(&mhi_cntrl->pending_pkts);

>  

> -	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {

> -		read_lock_bh(&mhi_chan->lock);

> -		mhi_ring_chan_db(mhi_cntrl, mhi_chan);

> -		read_unlock_bh(&mhi_chan->lock);

> +	if (unlikely(!MHI_DB_ACCESS_VALID(mhi_cntrl))) {

> +		ret = -EIO;

> +		goto exit_unlock;

>  	}

>  

> -	read_unlock_bh(&mhi_cntrl->pm_lock);

> +	mhi_ring_chan_db(mhi_cntrl, mhi_chan);

>  

> -	return 0;

> +exit_unlock:

> +	read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);

> +

> +	return ret;

>  }

> -EXPORT_SYMBOL_GPL(mhi_queue_skb);

>  

> -int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,

> -		  struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)

> +int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,

> +		  struct sk_buff *skb, size_t len, enum mhi_flags mflags)

>  {

> -	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;

> -	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :

> -							     mhi_dev->dl_chan;

> -	struct device *dev = &mhi_cntrl->mhi_dev->dev;

> -	struct mhi_ring *tre_ring = &mhi_chan->tre_ring;

>  	struct mhi_buf_info buf_info = { };

> -	int ret;

> -

> -	/* If MHI host pre-allocates buffers then client drivers cannot queue */

> -	if (mhi_chan->pre_alloc)

> -		return -EINVAL;

> -

> -	if (mhi_is_ring_full(mhi_cntrl, tre_ring))

> -		return -ENOMEM;

> -

> -	read_lock_bh(&mhi_cntrl->pm_lock);

> -	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {

> -		dev_err(dev, "MHI is not in activate state, PM state: %s\n",

> -			to_mhi_pm_state_str(mhi_cntrl->pm_state));

> -		read_unlock_bh(&mhi_cntrl->pm_lock);

>  

> -		return -EIO;

> -	}

> +	buf_info.v_addr = skb->data;

> +	buf_info.cb_buf = skb;

> +	buf_info.len = len;

>  

> -	/* we're in M3 or transitioning to M3 */

> -	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))

> -		mhi_trigger_resume(mhi_cntrl);

> +	return mhi_queue(mhi_dev, &buf_info, dir, mflags);

> +}

> +EXPORT_SYMBOL_GPL(mhi_queue_skb);

>  

> -	/* Toggle wake to exit out of M2 */

> -	mhi_cntrl->wake_toggle(mhi_cntrl);

> +int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,

> +		  struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)

> +{

> +	struct mhi_buf_info buf_info = { };

>  

>  	buf_info.p_addr = mhi_buf->dma_addr;

>  	buf_info.cb_buf = mhi_buf;

>  	buf_info.pre_mapped = true;

>  	buf_info.len = len;

>  

> -	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);

> -	if (unlikely(ret)) {

> -		read_unlock_bh(&mhi_cntrl->pm_lock);

> -		return ret;

> -	}

> -

> -	if (mhi_chan->dir == DMA_TO_DEVICE)

> -		atomic_inc(&mhi_cntrl->pending_pkts);

> -

> -	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {

> -		read_lock_bh(&mhi_chan->lock);

> -		mhi_ring_chan_db(mhi_cntrl, mhi_chan);

> -		read_unlock_bh(&mhi_chan->lock);

> -	}

> -

> -	read_unlock_bh(&mhi_cntrl->pm_lock);

> -

> -	return 0;

> +	return mhi_queue(mhi_dev, &buf_info, dir, mflags);

>  }

>  EXPORT_SYMBOL_GPL(mhi_queue_dma);

>  

> @@ -1134,57 +1097,13 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,

>  int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,

>  		  void *buf, size_t len, enum mhi_flags mflags)

>  {

> -	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;

> -	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :

> -							     mhi_dev->dl_chan;

> -	struct mhi_ring *tre_ring;

>  	struct mhi_buf_info buf_info = { };

> -	unsigned long flags;

> -	int ret;

> -

> -	/*

> -	 * this check here only as a guard, it's always

> -	 * possible mhi can enter error while executing rest of function,

> -	 * which is not fatal so we do not need to hold pm_lock

> -	 */

> -	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))

> -		return -EIO;

> -

> -	tre_ring = &mhi_chan->tre_ring;

> -	if (mhi_is_ring_full(mhi_cntrl, tre_ring))

> -		return -ENOMEM;

>  

>  	buf_info.v_addr = buf;

>  	buf_info.cb_buf = buf;

>  	buf_info.len = len;

>  

> -	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);

> -	if (unlikely(ret))

> -		return ret;

> -

> -	read_lock_irqsave(&mhi_cntrl->pm_lock, flags);

> -

> -	/* we're in M3 or transitioning to M3 */

> -	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))

> -		mhi_trigger_resume(mhi_cntrl);

> -

> -	/* Toggle wake to exit out of M2 */

> -	mhi_cntrl->wake_toggle(mhi_cntrl);

> -

> -	if (mhi_chan->dir == DMA_TO_DEVICE)

> -		atomic_inc(&mhi_cntrl->pending_pkts);

> -

> -	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {

> -		unsigned long flags;

> -

> -		read_lock_irqsave(&mhi_chan->lock, flags);

> -		mhi_ring_chan_db(mhi_cntrl, mhi_chan);

> -		read_unlock_irqrestore(&mhi_chan->lock, flags);

> -	}

> -

> -	read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);

> -

> -	return 0;

> +	return mhi_queue(mhi_dev, &buf_info, dir, mflags);

>  }

>  EXPORT_SYMBOL_GPL(mhi_queue_buf);

>  

> -- 

> 2.7.4

>
Manivannan Sadhasivam Jan. 21, 2021, 11:23 a.m. UTC | #2
On Fri, Jan 08, 2021 at 05:52:30PM +0100, Loic Poulain wrote:
> Instead of duplicating queuing procedure in mhi_queue_dma(),

> mhi_queue_buf() and mhi_queue_skb(), add a new generic mhi_queue()

> as common helper.

> 

> Note that the unified mhi_queue align pm_lock locking on mhi_queue_buf

> behavior, taking it with irqsave variant (vs _bh for former queue_skb

> and queue_dma version).

> 

> Signed-off-by: Loic Poulain <loic.poulain@linaro.org>


Applied to mhi-next!

Thanks,
Mani

> ---

>  v2: re-integrate pre_alloc check (Mani)

>      comment about _bh to _irqsave unification     

> 

>  drivers/bus/mhi/core/main.c | 161 +++++++++++---------------------------------

>  1 file changed, 40 insertions(+), 121 deletions(-)

> 

> diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c

> index effe94f..2d9157ce 100644

> --- a/drivers/bus/mhi/core/main.c

> +++ b/drivers/bus/mhi/core/main.c

> @@ -969,118 +969,81 @@ static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,

>  	return (tmp == ring->rp);

>  }

>  

> -int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,

> -		  struct sk_buff *skb, size_t len, enum mhi_flags mflags)

> +static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,

> +		     enum dma_data_direction dir, enum mhi_flags mflags)

>  {

>  	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;

>  	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :

>  							     mhi_dev->dl_chan;

>  	struct mhi_ring *tre_ring = &mhi_chan->tre_ring;

> -	struct mhi_buf_info buf_info = { };

> +	unsigned long flags;

>  	int ret;

>  

> -	/* If MHI host pre-allocates buffers then client drivers cannot queue */

> -	if (mhi_chan->pre_alloc)

> +	if (unlikely(mhi_chan->pre_alloc))

>  		return -EINVAL;

>  

> -	if (mhi_is_ring_full(mhi_cntrl, tre_ring))

> -		return -ENOMEM;

> -

> -	read_lock_bh(&mhi_cntrl->pm_lock);

> -	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {

> -		read_unlock_bh(&mhi_cntrl->pm_lock);

> +	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))

>  		return -EIO;

> +

> +	read_lock_irqsave(&mhi_cntrl->pm_lock, flags);

> +

> +	ret = mhi_is_ring_full(mhi_cntrl, tre_ring);

> +	if (unlikely(ret)) {

> +		ret = -ENOMEM;

> +		goto exit_unlock;

>  	}

>  

> -	/* we're in M3 or transitioning to M3 */

> +	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);

> +	if (unlikely(ret))

> +		goto exit_unlock;

> +

> +	/* trigger M3 exit if necessary */

>  	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))

>  		mhi_trigger_resume(mhi_cntrl);

>  

> -	/* Toggle wake to exit out of M2 */

> +	/* Assert dev_wake (to exit/prevent M1/M2)*/

>  	mhi_cntrl->wake_toggle(mhi_cntrl);

>  

> -	buf_info.v_addr = skb->data;

> -	buf_info.cb_buf = skb;

> -	buf_info.len = len;

> -

> -	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);

> -	if (unlikely(ret)) {

> -		read_unlock_bh(&mhi_cntrl->pm_lock);

> -		return ret;

> -	}

> -

>  	if (mhi_chan->dir == DMA_TO_DEVICE)

>  		atomic_inc(&mhi_cntrl->pending_pkts);

>  

> -	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {

> -		read_lock_bh(&mhi_chan->lock);

> -		mhi_ring_chan_db(mhi_cntrl, mhi_chan);

> -		read_unlock_bh(&mhi_chan->lock);

> +	if (unlikely(!MHI_DB_ACCESS_VALID(mhi_cntrl))) {

> +		ret = -EIO;

> +		goto exit_unlock;

>  	}

>  

> -	read_unlock_bh(&mhi_cntrl->pm_lock);

> +	mhi_ring_chan_db(mhi_cntrl, mhi_chan);

>  

> -	return 0;

> +exit_unlock:

> +	read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);

> +

> +	return ret;

>  }

> -EXPORT_SYMBOL_GPL(mhi_queue_skb);

>  

> -int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,

> -		  struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)

> +int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,

> +		  struct sk_buff *skb, size_t len, enum mhi_flags mflags)

>  {

> -	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;

> -	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :

> -							     mhi_dev->dl_chan;

> -	struct device *dev = &mhi_cntrl->mhi_dev->dev;

> -	struct mhi_ring *tre_ring = &mhi_chan->tre_ring;

>  	struct mhi_buf_info buf_info = { };

> -	int ret;

> -

> -	/* If MHI host pre-allocates buffers then client drivers cannot queue */

> -	if (mhi_chan->pre_alloc)

> -		return -EINVAL;

> -

> -	if (mhi_is_ring_full(mhi_cntrl, tre_ring))

> -		return -ENOMEM;

> -

> -	read_lock_bh(&mhi_cntrl->pm_lock);

> -	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {

> -		dev_err(dev, "MHI is not in activate state, PM state: %s\n",

> -			to_mhi_pm_state_str(mhi_cntrl->pm_state));

> -		read_unlock_bh(&mhi_cntrl->pm_lock);

>  

> -		return -EIO;

> -	}

> +	buf_info.v_addr = skb->data;

> +	buf_info.cb_buf = skb;

> +	buf_info.len = len;

>  

> -	/* we're in M3 or transitioning to M3 */

> -	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))

> -		mhi_trigger_resume(mhi_cntrl);

> +	return mhi_queue(mhi_dev, &buf_info, dir, mflags);

> +}

> +EXPORT_SYMBOL_GPL(mhi_queue_skb);

>  

> -	/* Toggle wake to exit out of M2 */

> -	mhi_cntrl->wake_toggle(mhi_cntrl);

> +int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,

> +		  struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)

> +{

> +	struct mhi_buf_info buf_info = { };

>  

>  	buf_info.p_addr = mhi_buf->dma_addr;

>  	buf_info.cb_buf = mhi_buf;

>  	buf_info.pre_mapped = true;

>  	buf_info.len = len;

>  

> -	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);

> -	if (unlikely(ret)) {

> -		read_unlock_bh(&mhi_cntrl->pm_lock);

> -		return ret;

> -	}

> -

> -	if (mhi_chan->dir == DMA_TO_DEVICE)

> -		atomic_inc(&mhi_cntrl->pending_pkts);

> -

> -	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {

> -		read_lock_bh(&mhi_chan->lock);

> -		mhi_ring_chan_db(mhi_cntrl, mhi_chan);

> -		read_unlock_bh(&mhi_chan->lock);

> -	}

> -

> -	read_unlock_bh(&mhi_cntrl->pm_lock);

> -

> -	return 0;

> +	return mhi_queue(mhi_dev, &buf_info, dir, mflags);

>  }

>  EXPORT_SYMBOL_GPL(mhi_queue_dma);

>  

> @@ -1134,57 +1097,13 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,

>  int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,

>  		  void *buf, size_t len, enum mhi_flags mflags)

>  {

> -	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;

> -	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :

> -							     mhi_dev->dl_chan;

> -	struct mhi_ring *tre_ring;

>  	struct mhi_buf_info buf_info = { };

> -	unsigned long flags;

> -	int ret;

> -

> -	/*

> -	 * this check here only as a guard, it's always

> -	 * possible mhi can enter error while executing rest of function,

> -	 * which is not fatal so we do not need to hold pm_lock

> -	 */

> -	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))

> -		return -EIO;

> -

> -	tre_ring = &mhi_chan->tre_ring;

> -	if (mhi_is_ring_full(mhi_cntrl, tre_ring))

> -		return -ENOMEM;

>  

>  	buf_info.v_addr = buf;

>  	buf_info.cb_buf = buf;

>  	buf_info.len = len;

>  

> -	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);

> -	if (unlikely(ret))

> -		return ret;

> -

> -	read_lock_irqsave(&mhi_cntrl->pm_lock, flags);

> -

> -	/* we're in M3 or transitioning to M3 */

> -	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))

> -		mhi_trigger_resume(mhi_cntrl);

> -

> -	/* Toggle wake to exit out of M2 */

> -	mhi_cntrl->wake_toggle(mhi_cntrl);

> -

> -	if (mhi_chan->dir == DMA_TO_DEVICE)

> -		atomic_inc(&mhi_cntrl->pending_pkts);

> -

> -	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {

> -		unsigned long flags;

> -

> -		read_lock_irqsave(&mhi_chan->lock, flags);

> -		mhi_ring_chan_db(mhi_cntrl, mhi_chan);

> -		read_unlock_irqrestore(&mhi_chan->lock, flags);

> -	}

> -

> -	read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);

> -

> -	return 0;

> +	return mhi_queue(mhi_dev, &buf_info, dir, mflags);

>  }

>  EXPORT_SYMBOL_GPL(mhi_queue_buf);

>  

> -- 

> 2.7.4

>
diff mbox series

Patch

diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
index effe94f..2d9157ce 100644
--- a/drivers/bus/mhi/core/main.c
+++ b/drivers/bus/mhi/core/main.c
@@ -969,118 +969,81 @@  static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
 	return (tmp == ring->rp);
 }
 
-int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
-		  struct sk_buff *skb, size_t len, enum mhi_flags mflags)
+static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
+		     enum dma_data_direction dir, enum mhi_flags mflags)
 {
 	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
 	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
 							     mhi_dev->dl_chan;
 	struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
-	struct mhi_buf_info buf_info = { };
+	unsigned long flags;
 	int ret;
 
-	/* If MHI host pre-allocates buffers then client drivers cannot queue */
-	if (mhi_chan->pre_alloc)
+	if (unlikely(mhi_chan->pre_alloc))
 		return -EINVAL;
 
-	if (mhi_is_ring_full(mhi_cntrl, tre_ring))
-		return -ENOMEM;
-
-	read_lock_bh(&mhi_cntrl->pm_lock);
-	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
-		read_unlock_bh(&mhi_cntrl->pm_lock);
+	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
 		return -EIO;
+
+	read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
+
+	ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
+	if (unlikely(ret)) {
+		ret = -ENOMEM;
+		goto exit_unlock;
 	}
 
-	/* we're in M3 or transitioning to M3 */
+	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
+	if (unlikely(ret))
+		goto exit_unlock;
+
+	/* trigger M3 exit if necessary */
 	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
 		mhi_trigger_resume(mhi_cntrl);
 
-	/* Toggle wake to exit out of M2 */
+	/* Assert dev_wake (to exit/prevent M1/M2)*/
 	mhi_cntrl->wake_toggle(mhi_cntrl);
 
-	buf_info.v_addr = skb->data;
-	buf_info.cb_buf = skb;
-	buf_info.len = len;
-
-	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
-	if (unlikely(ret)) {
-		read_unlock_bh(&mhi_cntrl->pm_lock);
-		return ret;
-	}
-
 	if (mhi_chan->dir == DMA_TO_DEVICE)
 		atomic_inc(&mhi_cntrl->pending_pkts);
 
-	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
-		read_lock_bh(&mhi_chan->lock);
-		mhi_ring_chan_db(mhi_cntrl, mhi_chan);
-		read_unlock_bh(&mhi_chan->lock);
+	if (unlikely(!MHI_DB_ACCESS_VALID(mhi_cntrl))) {
+		ret = -EIO;
+		goto exit_unlock;
 	}
 
-	read_unlock_bh(&mhi_cntrl->pm_lock);
+	mhi_ring_chan_db(mhi_cntrl, mhi_chan);
 
-	return 0;
+exit_unlock:
+	read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
+
+	return ret;
 }
-EXPORT_SYMBOL_GPL(mhi_queue_skb);
 
-int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
-		  struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
+int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+		  struct sk_buff *skb, size_t len, enum mhi_flags mflags)
 {
-	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
-	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
-							     mhi_dev->dl_chan;
-	struct device *dev = &mhi_cntrl->mhi_dev->dev;
-	struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
 	struct mhi_buf_info buf_info = { };
-	int ret;
-
-	/* If MHI host pre-allocates buffers then client drivers cannot queue */
-	if (mhi_chan->pre_alloc)
-		return -EINVAL;
-
-	if (mhi_is_ring_full(mhi_cntrl, tre_ring))
-		return -ENOMEM;
-
-	read_lock_bh(&mhi_cntrl->pm_lock);
-	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
-		dev_err(dev, "MHI is not in activate state, PM state: %s\n",
-			to_mhi_pm_state_str(mhi_cntrl->pm_state));
-		read_unlock_bh(&mhi_cntrl->pm_lock);
 
-		return -EIO;
-	}
+	buf_info.v_addr = skb->data;
+	buf_info.cb_buf = skb;
+	buf_info.len = len;
 
-	/* we're in M3 or transitioning to M3 */
-	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
-		mhi_trigger_resume(mhi_cntrl);
+	return mhi_queue(mhi_dev, &buf_info, dir, mflags);
+}
+EXPORT_SYMBOL_GPL(mhi_queue_skb);
 
-	/* Toggle wake to exit out of M2 */
-	mhi_cntrl->wake_toggle(mhi_cntrl);
+int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+		  struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
+{
+	struct mhi_buf_info buf_info = { };
 
 	buf_info.p_addr = mhi_buf->dma_addr;
 	buf_info.cb_buf = mhi_buf;
 	buf_info.pre_mapped = true;
 	buf_info.len = len;
 
-	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
-	if (unlikely(ret)) {
-		read_unlock_bh(&mhi_cntrl->pm_lock);
-		return ret;
-	}
-
-	if (mhi_chan->dir == DMA_TO_DEVICE)
-		atomic_inc(&mhi_cntrl->pending_pkts);
-
-	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
-		read_lock_bh(&mhi_chan->lock);
-		mhi_ring_chan_db(mhi_cntrl, mhi_chan);
-		read_unlock_bh(&mhi_chan->lock);
-	}
-
-	read_unlock_bh(&mhi_cntrl->pm_lock);
-
-	return 0;
+	return mhi_queue(mhi_dev, &buf_info, dir, mflags);
 }
 EXPORT_SYMBOL_GPL(mhi_queue_dma);
 
@@ -1134,57 +1097,13 @@  int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
 int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
 		  void *buf, size_t len, enum mhi_flags mflags)
 {
-	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
-	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
-							     mhi_dev->dl_chan;
-	struct mhi_ring *tre_ring;
 	struct mhi_buf_info buf_info = { };
-	unsigned long flags;
-	int ret;
-
-	/*
-	 * this check here only as a guard, it's always
-	 * possible mhi can enter error while executing rest of function,
-	 * which is not fatal so we do not need to hold pm_lock
-	 */
-	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
-		return -EIO;
-
-	tre_ring = &mhi_chan->tre_ring;
-	if (mhi_is_ring_full(mhi_cntrl, tre_ring))
-		return -ENOMEM;
 
 	buf_info.v_addr = buf;
 	buf_info.cb_buf = buf;
 	buf_info.len = len;
 
-	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
-	if (unlikely(ret))
-		return ret;
-
-	read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
-
-	/* we're in M3 or transitioning to M3 */
-	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
-		mhi_trigger_resume(mhi_cntrl);
-
-	/* Toggle wake to exit out of M2 */
-	mhi_cntrl->wake_toggle(mhi_cntrl);
-
-	if (mhi_chan->dir == DMA_TO_DEVICE)
-		atomic_inc(&mhi_cntrl->pending_pkts);
-
-	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
-		unsigned long flags;
-
-		read_lock_irqsave(&mhi_chan->lock, flags);
-		mhi_ring_chan_db(mhi_cntrl, mhi_chan);
-		read_unlock_irqrestore(&mhi_chan->lock, flags);
-	}
-
-	read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
-
-	return 0;
+	return mhi_queue(mhi_dev, &buf_info, dir, mflags);
 }
 EXPORT_SYMBOL_GPL(mhi_queue_buf);