diff mbox series

mhi: core: Factorize mhi queuing

Message ID 1607599892-6229-1-git-send-email-loic.poulain@linaro.org
State Superseded
Headers show
Series mhi: core: Factorize mhi queuing | expand

Commit Message

Loic Poulain Dec. 10, 2020, 11:31 a.m. UTC
Instead of duplicating queuing procedure in mhi_queue_dma(),
mhi_queue_buf() and mhi_queue_skb(), add a new generic mhi_queue()
as common helper.

Signed-off-by: Loic Poulain <loic.poulain@linaro.org>

---
 drivers/bus/mhi/core/main.c | 160 +++++++++++---------------------------------
 1 file changed, 38 insertions(+), 122 deletions(-)

-- 
2.7.4

Comments

Hemant Kumar Jan. 7, 2021, 2:45 a.m. UTC | #1
Hi Loic,

On 12/10/20 3:31 AM, Loic Poulain wrote:
> Instead of duplicating queuing procedure in mhi_queue_dma(),

> mhi_queue_buf() and mhi_queue_skb(), add a new generic mhi_queue()

> as common helper.

> 

> Signed-off-by: Loic Poulain <loic.poulain@linaro.org>

> ---

>   drivers/bus/mhi/core/main.c | 160 +++++++++++---------------------------------

>   1 file changed, 38 insertions(+), 122 deletions(-)

> 

> diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c

> index 3871ef0..4fa4c88 100644

> --- a/drivers/bus/mhi/core/main.c

> +++ b/drivers/bus/mhi/core/main.c

> @@ -963,118 +963,78 @@ static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,

>   	return (tmp == ring->rp);

>   }

>   

> -int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,

> -		  struct sk_buff *skb, size_t len, enum mhi_flags mflags)

> +static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,

> +		     enum dma_data_direction dir, enum mhi_flags mflags)

>   {

>   	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;

>   	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :

>   							     mhi_dev->dl_chan;

>   	struct mhi_ring *tre_ring = &mhi_chan->tre_ring;

> -	struct mhi_buf_info buf_info = { };

> +	unsigned long flags;

>   	int ret;

>   

> -	/* If MHI host pre-allocates buffers then client drivers cannot queue */

> -	if (mhi_chan->pre_alloc)

Are we getting rid of auto_queue ?
> -		return -EINVAL;

> +	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))

> +		return -EIO;

>   

> -	if (mhi_is_ring_full(mhi_cntrl, tre_ring))

> -		return -ENOMEM;

> +	read_lock_irqsave(&mhi_cntrl->pm_lock, flags);

This is something used in mhi_queue_buf and rest of the (_skb, _dma) 
used read_lock_bh. I see that now you are making it a re-interant as you
identified before :) So irq ver is disabling irq on the core this code 
is running and disabling preemption, where as bh ver is preemptiable. Is 
that the reason you chose irq instead of bh ?
[..]

Thanks,
Hemant
-- 
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
a Linux Foundation Collaborative Project
Manivannan Sadhasivam Jan. 8, 2021, 12:07 p.m. UTC | #2
On Thu, Dec 10, 2020 at 12:31:32PM +0100, Loic Poulain wrote:
> Instead of duplicating queuing procedure in mhi_queue_dma(),

> mhi_queue_buf() and mhi_queue_skb(), add a new generic mhi_queue()

> as common helper.

> 


While doing the factoring, please don't make any additional changes. If you want
to do, then use incremental patches.

Thanks,
Mani

> Signed-off-by: Loic Poulain <loic.poulain@linaro.org>

> ---

>  drivers/bus/mhi/core/main.c | 160 +++++++++++---------------------------------

>  1 file changed, 38 insertions(+), 122 deletions(-)

> 

> diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c

> index 3871ef0..4fa4c88 100644

> --- a/drivers/bus/mhi/core/main.c

> +++ b/drivers/bus/mhi/core/main.c

> @@ -963,118 +963,78 @@ static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,

>  	return (tmp == ring->rp);

>  }

>  

> -int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,

> -		  struct sk_buff *skb, size_t len, enum mhi_flags mflags)

> +static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,

> +		     enum dma_data_direction dir, enum mhi_flags mflags)

>  {

>  	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;

>  	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :

>  							     mhi_dev->dl_chan;

>  	struct mhi_ring *tre_ring = &mhi_chan->tre_ring;

> -	struct mhi_buf_info buf_info = { };

> +	unsigned long flags;

>  	int ret;

>  

> -	/* If MHI host pre-allocates buffers then client drivers cannot queue */

> -	if (mhi_chan->pre_alloc)

> -		return -EINVAL;

> +	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))

> +		return -EIO;

>  

> -	if (mhi_is_ring_full(mhi_cntrl, tre_ring))

> -		return -ENOMEM;

> +	read_lock_irqsave(&mhi_cntrl->pm_lock, flags);

>  

> -	read_lock_bh(&mhi_cntrl->pm_lock);

> -	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {

> -		read_unlock_bh(&mhi_cntrl->pm_lock);

> -		return -EIO;

> +	ret = mhi_is_ring_full(mhi_cntrl, tre_ring);

> +	if (unlikely(ret)) {

> +		ret = -ENOMEM;

> +		goto exit_unlock;

>  	}

>  

> -	/* we're in M3 or transitioning to M3 */

> +	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);

> +	if (unlikely(ret))

> +		goto exit_unlock;

> +

> +	/* trigger M3 exit if necessary */

>  	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))

>  		mhi_trigger_resume(mhi_cntrl);

>  

> -	/* Toggle wake to exit out of M2 */

> +	/* Assert dev_wake (to exit/prevent M1/M2)*/

>  	mhi_cntrl->wake_toggle(mhi_cntrl);

>  

> -	buf_info.v_addr = skb->data;

> -	buf_info.cb_buf = skb;

> -	buf_info.len = len;

> -

> -	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);

> -	if (unlikely(ret)) {

> -		read_unlock_bh(&mhi_cntrl->pm_lock);

> -		return ret;

> -	}

> -

>  	if (mhi_chan->dir == DMA_TO_DEVICE)

>  		atomic_inc(&mhi_cntrl->pending_pkts);

>  

> -	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {

> -		read_lock_bh(&mhi_chan->lock);

> -		mhi_ring_chan_db(mhi_cntrl, mhi_chan);

> -		read_unlock_bh(&mhi_chan->lock);

> +	if (unlikely(!MHI_DB_ACCESS_VALID(mhi_cntrl))) {

> +		ret = -EIO;

> +		goto exit_unlock;

>  	}

>  

> -	read_unlock_bh(&mhi_cntrl->pm_lock);

> +	mhi_ring_chan_db(mhi_cntrl, mhi_chan);

>  

> -	return 0;

> +exit_unlock:

> +	read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);

> +

> +	return ret;

>  }

> -EXPORT_SYMBOL_GPL(mhi_queue_skb);

>  

> -int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,

> -		  struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)

> +int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,

> +		  struct sk_buff *skb, size_t len, enum mhi_flags mflags)

>  {

> -	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;

> -	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :

> -							     mhi_dev->dl_chan;

> -	struct device *dev = &mhi_cntrl->mhi_dev->dev;

> -	struct mhi_ring *tre_ring = &mhi_chan->tre_ring;

>  	struct mhi_buf_info buf_info = { };

> -	int ret;

>  

> -	/* If MHI host pre-allocates buffers then client drivers cannot queue */

> -	if (mhi_chan->pre_alloc)

> -		return -EINVAL;

> -

> -	if (mhi_is_ring_full(mhi_cntrl, tre_ring))

> -		return -ENOMEM;

> -

> -	read_lock_bh(&mhi_cntrl->pm_lock);

> -	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {

> -		dev_err(dev, "MHI is not in activate state, PM state: %s\n",

> -			to_mhi_pm_state_str(mhi_cntrl->pm_state));

> -		read_unlock_bh(&mhi_cntrl->pm_lock);

> -

> -		return -EIO;

> -	}

> +	buf_info.v_addr = skb->data;

> +	buf_info.cb_buf = skb;

> +	buf_info.len = len;

>  

> -	/* we're in M3 or transitioning to M3 */

> -	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))

> -		mhi_trigger_resume(mhi_cntrl);

> +	return mhi_queue(mhi_dev, &buf_info, dir, mflags);

> +}

> +EXPORT_SYMBOL_GPL(mhi_queue_skb);

>  

> -	/* Toggle wake to exit out of M2 */

> -	mhi_cntrl->wake_toggle(mhi_cntrl);

> +int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,

> +		  struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)

> +{

> +	struct mhi_buf_info buf_info = { };

>  

>  	buf_info.p_addr = mhi_buf->dma_addr;

>  	buf_info.cb_buf = mhi_buf;

>  	buf_info.pre_mapped = true;

>  	buf_info.len = len;

>  

> -	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);

> -	if (unlikely(ret)) {

> -		read_unlock_bh(&mhi_cntrl->pm_lock);

> -		return ret;

> -	}

> -

> -	if (mhi_chan->dir == DMA_TO_DEVICE)

> -		atomic_inc(&mhi_cntrl->pending_pkts);

> -

> -	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {

> -		read_lock_bh(&mhi_chan->lock);

> -		mhi_ring_chan_db(mhi_cntrl, mhi_chan);

> -		read_unlock_bh(&mhi_chan->lock);

> -	}

> -

> -	read_unlock_bh(&mhi_cntrl->pm_lock);

> -

> -	return 0;

> +	return mhi_queue(mhi_dev, &buf_info, dir, mflags);

>  }

>  EXPORT_SYMBOL_GPL(mhi_queue_dma);

>  

> @@ -1128,57 +1088,13 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,

>  int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,

>  		  void *buf, size_t len, enum mhi_flags mflags)

>  {

> -	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;

> -	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :

> -							     mhi_dev->dl_chan;

> -	struct mhi_ring *tre_ring;

>  	struct mhi_buf_info buf_info = { };

> -	unsigned long flags;

> -	int ret;

> -

> -	/*

> -	 * this check here only as a guard, it's always

> -	 * possible mhi can enter error while executing rest of function,

> -	 * which is not fatal so we do not need to hold pm_lock

> -	 */

> -	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))

> -		return -EIO;

> -

> -	tre_ring = &mhi_chan->tre_ring;

> -	if (mhi_is_ring_full(mhi_cntrl, tre_ring))

> -		return -ENOMEM;

>  

>  	buf_info.v_addr = buf;

>  	buf_info.cb_buf = buf;

>  	buf_info.len = len;

>  

> -	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);

> -	if (unlikely(ret))

> -		return ret;

> -

> -	read_lock_irqsave(&mhi_cntrl->pm_lock, flags);

> -

> -	/* we're in M3 or transitioning to M3 */

> -	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))

> -		mhi_trigger_resume(mhi_cntrl);

> -

> -	/* Toggle wake to exit out of M2 */

> -	mhi_cntrl->wake_toggle(mhi_cntrl);

> -

> -	if (mhi_chan->dir == DMA_TO_DEVICE)

> -		atomic_inc(&mhi_cntrl->pending_pkts);

> -

> -	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {

> -		unsigned long flags;

> -

> -		read_lock_irqsave(&mhi_chan->lock, flags);

> -		mhi_ring_chan_db(mhi_cntrl, mhi_chan);

> -		read_unlock_irqrestore(&mhi_chan->lock, flags);

> -	}

> -

> -	read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);

> -

> -	return 0;

> +	return mhi_queue(mhi_dev, &buf_info, dir, mflags);

>  }

>  EXPORT_SYMBOL_GPL(mhi_queue_buf);

>  

> -- 

> 2.7.4

>
Manivannan Sadhasivam Jan. 8, 2021, 3:18 p.m. UTC | #3
On Fri, Jan 08, 2021 at 04:10:11PM +0100, Loic Poulain wrote:
> Hi Mani,

> 

> On Fri, 8 Jan 2021 at 13:07, Manivannan Sadhasivam <

> manivannan.sadhasivam@linaro.org> wrote:

> 

> > On Thu, Dec 10, 2020 at 12:31:32PM +0100, Loic Poulain wrote:

> > > Instead of duplicating queuing procedure in mhi_queue_dma(),

> > > mhi_queue_buf() and mhi_queue_skb(), add a new generic mhi_queue()

> > > as common helper.

> > >

> >

> > While doing the factoring, please don't make any additional changes. If

> > you want

> > to do, then use incremental patches.

> >

> 

> Well, I've not really performed additional change, I've just aligned all

> mhi_queue*() to use _irq for locking, but that was already the one used in

> mhi_queue_buf(), mhi_queue_dma and mhi_queue_skb now use it as well

> (instead bh), but it's because of the factoring. This has been tested

> carefully.

> 


And you have removed "pre_alloc" check... Either mention all the changes
in the commit message or do it incrementally.

Thanks,
Mani

> Regards,

> Loic
diff mbox series

Patch

diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
index 3871ef0..4fa4c88 100644
--- a/drivers/bus/mhi/core/main.c
+++ b/drivers/bus/mhi/core/main.c
@@ -963,118 +963,78 @@  static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
 	return (tmp == ring->rp);
 }
 
-int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
-		  struct sk_buff *skb, size_t len, enum mhi_flags mflags)
+static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
+		     enum dma_data_direction dir, enum mhi_flags mflags)
 {
 	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
 	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
 							     mhi_dev->dl_chan;
 	struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
-	struct mhi_buf_info buf_info = { };
+	unsigned long flags;
 	int ret;
 
-	/* If MHI host pre-allocates buffers then client drivers cannot queue */
-	if (mhi_chan->pre_alloc)
-		return -EINVAL;
+	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
+		return -EIO;
 
-	if (mhi_is_ring_full(mhi_cntrl, tre_ring))
-		return -ENOMEM;
+	read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
 
-	read_lock_bh(&mhi_cntrl->pm_lock);
-	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
-		read_unlock_bh(&mhi_cntrl->pm_lock);
-		return -EIO;
+	ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
+	if (unlikely(ret)) {
+		ret = -ENOMEM;
+		goto exit_unlock;
 	}
 
-	/* we're in M3 or transitioning to M3 */
+	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
+	if (unlikely(ret))
+		goto exit_unlock;
+
+	/* trigger M3 exit if necessary */
 	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
 		mhi_trigger_resume(mhi_cntrl);
 
-	/* Toggle wake to exit out of M2 */
+	/* Assert dev_wake (to exit/prevent M1/M2)*/
 	mhi_cntrl->wake_toggle(mhi_cntrl);
 
-	buf_info.v_addr = skb->data;
-	buf_info.cb_buf = skb;
-	buf_info.len = len;
-
-	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
-	if (unlikely(ret)) {
-		read_unlock_bh(&mhi_cntrl->pm_lock);
-		return ret;
-	}
-
 	if (mhi_chan->dir == DMA_TO_DEVICE)
 		atomic_inc(&mhi_cntrl->pending_pkts);
 
-	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
-		read_lock_bh(&mhi_chan->lock);
-		mhi_ring_chan_db(mhi_cntrl, mhi_chan);
-		read_unlock_bh(&mhi_chan->lock);
+	if (unlikely(!MHI_DB_ACCESS_VALID(mhi_cntrl))) {
+		ret = -EIO;
+		goto exit_unlock;
 	}
 
-	read_unlock_bh(&mhi_cntrl->pm_lock);
+	mhi_ring_chan_db(mhi_cntrl, mhi_chan);
 
-	return 0;
+exit_unlock:
+	read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
+
+	return ret;
 }
-EXPORT_SYMBOL_GPL(mhi_queue_skb);
 
-int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
-		  struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
+int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+		  struct sk_buff *skb, size_t len, enum mhi_flags mflags)
 {
-	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
-	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
-							     mhi_dev->dl_chan;
-	struct device *dev = &mhi_cntrl->mhi_dev->dev;
-	struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
 	struct mhi_buf_info buf_info = { };
-	int ret;
 
-	/* If MHI host pre-allocates buffers then client drivers cannot queue */
-	if (mhi_chan->pre_alloc)
-		return -EINVAL;
-
-	if (mhi_is_ring_full(mhi_cntrl, tre_ring))
-		return -ENOMEM;
-
-	read_lock_bh(&mhi_cntrl->pm_lock);
-	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
-		dev_err(dev, "MHI is not in activate state, PM state: %s\n",
-			to_mhi_pm_state_str(mhi_cntrl->pm_state));
-		read_unlock_bh(&mhi_cntrl->pm_lock);
-
-		return -EIO;
-	}
+	buf_info.v_addr = skb->data;
+	buf_info.cb_buf = skb;
+	buf_info.len = len;
 
-	/* we're in M3 or transitioning to M3 */
-	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
-		mhi_trigger_resume(mhi_cntrl);
+	return mhi_queue(mhi_dev, &buf_info, dir, mflags);
+}
+EXPORT_SYMBOL_GPL(mhi_queue_skb);
 
-	/* Toggle wake to exit out of M2 */
-	mhi_cntrl->wake_toggle(mhi_cntrl);
+int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+		  struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
+{
+	struct mhi_buf_info buf_info = { };
 
 	buf_info.p_addr = mhi_buf->dma_addr;
 	buf_info.cb_buf = mhi_buf;
 	buf_info.pre_mapped = true;
 	buf_info.len = len;
 
-	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
-	if (unlikely(ret)) {
-		read_unlock_bh(&mhi_cntrl->pm_lock);
-		return ret;
-	}
-
-	if (mhi_chan->dir == DMA_TO_DEVICE)
-		atomic_inc(&mhi_cntrl->pending_pkts);
-
-	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
-		read_lock_bh(&mhi_chan->lock);
-		mhi_ring_chan_db(mhi_cntrl, mhi_chan);
-		read_unlock_bh(&mhi_chan->lock);
-	}
-
-	read_unlock_bh(&mhi_cntrl->pm_lock);
-
-	return 0;
+	return mhi_queue(mhi_dev, &buf_info, dir, mflags);
 }
 EXPORT_SYMBOL_GPL(mhi_queue_dma);
 
@@ -1128,57 +1088,13 @@  int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
 int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
 		  void *buf, size_t len, enum mhi_flags mflags)
 {
-	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
-	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
-							     mhi_dev->dl_chan;
-	struct mhi_ring *tre_ring;
 	struct mhi_buf_info buf_info = { };
-	unsigned long flags;
-	int ret;
-
-	/*
-	 * this check here only as a guard, it's always
-	 * possible mhi can enter error while executing rest of function,
-	 * which is not fatal so we do not need to hold pm_lock
-	 */
-	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
-		return -EIO;
-
-	tre_ring = &mhi_chan->tre_ring;
-	if (mhi_is_ring_full(mhi_cntrl, tre_ring))
-		return -ENOMEM;
 
 	buf_info.v_addr = buf;
 	buf_info.cb_buf = buf;
 	buf_info.len = len;
 
-	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
-	if (unlikely(ret))
-		return ret;
-
-	read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
-
-	/* we're in M3 or transitioning to M3 */
-	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
-		mhi_trigger_resume(mhi_cntrl);
-
-	/* Toggle wake to exit out of M2 */
-	mhi_cntrl->wake_toggle(mhi_cntrl);
-
-	if (mhi_chan->dir == DMA_TO_DEVICE)
-		atomic_inc(&mhi_cntrl->pending_pkts);
-
-	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
-		unsigned long flags;
-
-		read_lock_irqsave(&mhi_chan->lock, flags);
-		mhi_ring_chan_db(mhi_cntrl, mhi_chan);
-		read_unlock_irqrestore(&mhi_chan->lock, flags);
-	}
-
-	read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
-
-	return 0;
+	return mhi_queue(mhi_dev, &buf_info, dir, mflags);
 }
 EXPORT_SYMBOL_GPL(mhi_queue_buf);