Message ID | 1699341365-47737-5-git-send-email-quic_qianyu@quicinc.com |
---|---|
State | Superseded |
Headers | show |
Series | bus: mhi: host: Add lock to avoid race when ringing channel DB | expand |
On Tue, Nov 07, 2023 at 03:16:05PM +0800, Qiang Yu wrote: > From: Hemant Kumar <quic_hemantk@quicinc.com> > > Take irqsave lock after TRE is generated to avoid deadlock due to core > getting interrupts enabled as local_bh_enable must not be called with > irqs disabled based on upstream patch. > You still didn't address any of the comments provided by Jeff in v2. - Mani > Signed-off-by: Hemant Kumar <quic_hemantk@quicinc.com> > Signed-off-by: Lazarus Motha <quic_lmotha@quicinc.com> > Signed-off-by: Qiang Yu <quic_qianyu@quicinc.com> > --- > drivers/bus/mhi/host/main.c | 13 +++++-------- > 1 file changed, 5 insertions(+), 8 deletions(-) > > diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c > index b137d54..93b5110 100644 > --- a/drivers/bus/mhi/host/main.c > +++ b/drivers/bus/mhi/host/main.c > @@ -1129,17 +1129,15 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info, > if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) > return -EIO; > > - read_lock_irqsave(&mhi_cntrl->pm_lock, flags); > - > ret = mhi_is_ring_full(mhi_cntrl, tre_ring); > - if (unlikely(ret)) { > - ret = -EAGAIN; > - goto exit_unlock; > - } > + if (unlikely(ret)) > + return -EAGAIN; > > ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags); > if (unlikely(ret)) > - goto exit_unlock; > + return ret; > + > + read_lock_irqsave(&mhi_cntrl->pm_lock, flags); > > /* Packet is queued, take a usage ref to exit M3 if necessary > * for host->device buffer, balanced put is done on buffer completion > @@ -1159,7 +1157,6 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info, > if (dir == DMA_FROM_DEVICE) > mhi_cntrl->runtime_put(mhi_cntrl); > > -exit_unlock: > read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); > > return ret; > -- > 2.7.4 > >
On 11/10/2023 12:29 AM, Manivannan Sadhasivam wrote: > On Tue, Nov 07, 2023 at 03:16:05PM +0800, Qiang Yu wrote: >> From: Hemant Kumar <quic_hemantk@quicinc.com> >> >> Take irqsave lock after TRE is generated to avoid deadlock due to core >> getting interrupts enabled as local_bh_enable must not be called with >> irqs disabled based on upstream patch. >> > You still didn't address any of the comments provided by Jeff in v2. > > - Mani Hi Mani, thanks for review. Sorry, missing this part. Will change to following commit message. If CONFIG_TRACE_IRQFLAGS is enabled, irq will be enabled once __local_bh_enable_ip is called as part of write_unlock_bh. Hence, let's take irqsave lock after TRE is generated to avoid running write_unlock_bh when irqsave lock is held. >> Signed-off-by: Hemant Kumar <quic_hemantk@quicinc.com> >> Signed-off-by: Lazarus Motha <quic_lmotha@quicinc.com> >> Signed-off-by: Qiang Yu <quic_qianyu@quicinc.com> >> --- >> drivers/bus/mhi/host/main.c | 13 +++++-------- >> 1 file changed, 5 insertions(+), 8 deletions(-) >> >> diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c >> index b137d54..93b5110 100644 >> --- a/drivers/bus/mhi/host/main.c >> +++ b/drivers/bus/mhi/host/main.c >> @@ -1129,17 +1129,15 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info, >> if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) >> return -EIO; >> >> - read_lock_irqsave(&mhi_cntrl->pm_lock, flags); >> - >> ret = mhi_is_ring_full(mhi_cntrl, tre_ring); >> - if (unlikely(ret)) { >> - ret = -EAGAIN; >> - goto exit_unlock; >> - } >> + if (unlikely(ret)) >> + return -EAGAIN; >> >> ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags); >> if (unlikely(ret)) >> - goto exit_unlock; >> + return ret; >> + >> + read_lock_irqsave(&mhi_cntrl->pm_lock, flags); >> >> /* Packet is queued, take a usage ref to exit M3 if necessary >> * for host->device buffer, balanced put is done on buffer completion >> @@ -1159,7 +1157,6 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info, >> if (dir == DMA_FROM_DEVICE) >> mhi_cntrl->runtime_put(mhi_cntrl); >> >> -exit_unlock: >> read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); >> >> return ret; >> -- >> 2.7.4 >> >>
diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c index b137d54..93b5110 100644 --- a/drivers/bus/mhi/host/main.c +++ b/drivers/bus/mhi/host/main.c @@ -1129,17 +1129,15 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info, if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) return -EIO; - read_lock_irqsave(&mhi_cntrl->pm_lock, flags); - ret = mhi_is_ring_full(mhi_cntrl, tre_ring); - if (unlikely(ret)) { - ret = -EAGAIN; - goto exit_unlock; - } + if (unlikely(ret)) + return -EAGAIN; ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags); if (unlikely(ret)) - goto exit_unlock; + return ret; + + read_lock_irqsave(&mhi_cntrl->pm_lock, flags); /* Packet is queued, take a usage ref to exit M3 if necessary * for host->device buffer, balanced put is done on buffer completion @@ -1159,7 +1157,6 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info, if (dir == DMA_FROM_DEVICE) mhi_cntrl->runtime_put(mhi_cntrl); -exit_unlock: read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); return ret;