Message ID | 20210407094617.770495-1-jonathanh@nvidia.com |
---|---|
State | New |
Headers | show |
Series | mmc: sdhci-tegra: Add required callbacks to set/clear CQE_EN bit | expand |
On Wed, 7 Apr 2021 at 11:46, Jon Hunter <jonathanh@nvidia.com> wrote: > > From: Aniruddha Tvs Rao <anrao@nvidia.com> > > CMD8 is not supported with Command Queue Enabled. Add required callback > to clear CQE_EN and CQE_INTR fields in the host controller register > before sending CMD8. Add corresponding callback in the CQHCI resume path > to re-enable CQE_EN and CQE_INTR fields. > > Reported-by: Kamal Mostafa <kamal@canonical.com> > Tested-by: Kamal Mostafa <kamal@canonical.com> > Signed-off-by: Aniruddha Tvs Rao <anrao@nvidia.com> > Signed-off-by: Jon Hunter <jonathanh@nvidia.com> Applied for next, thanks! Please tell, if you think this needs to be tagged for stable kernels as well (and if so, perhaps we can set a fixes tag as well?). Kind regards Uffe > --- > drivers/mmc/host/sdhci-tegra.c | 32 ++++++++++++++++++++++++++++++++ > 1 file changed, 32 insertions(+) > > diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c > index c61f797a853f..387ce9cdbd7c 100644 > --- a/drivers/mmc/host/sdhci-tegra.c > +++ b/drivers/mmc/host/sdhci-tegra.c > @@ -119,6 +119,10 @@ > /* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */ > #define SDHCI_TEGRA_CQE_BASE_ADDR 0xF000 > > +#define SDHCI_TEGRA_CQE_TRNS_MODE (SDHCI_TRNS_MULTI | \ > + SDHCI_TRNS_BLK_CNT_EN | \ > + SDHCI_TRNS_DMA) > + > struct sdhci_tegra_soc_data { > const struct sdhci_pltfm_data *pdata; > u64 dma_mask; > @@ -1156,6 +1160,7 @@ static void tegra_sdhci_voltage_switch(struct sdhci_host *host) > static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg) > { > struct mmc_host *mmc = cq_host->mmc; > + struct sdhci_host *host = mmc_priv(mmc); > u8 ctrl; > ktime_t timeout; > bool timed_out; > @@ -1170,6 +1175,7 @@ static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg) > */ > if (reg == CQHCI_CTL && !(val & CQHCI_HALT) && > cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) { > + sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE); > sdhci_cqe_enable(mmc); > writel(val, cq_host->mmio + reg); > timeout = ktime_add_us(ktime_get(), 50); > @@ -1205,6 +1211,7 @@ static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc, > static void sdhci_tegra_cqe_enable(struct mmc_host *mmc) > { > struct cqhci_host *cq_host = mmc->cqe_private; > + struct sdhci_host *host = mmc_priv(mmc); > u32 val; > > /* > @@ -1218,6 +1225,7 @@ static void sdhci_tegra_cqe_enable(struct mmc_host *mmc) > if (val & CQHCI_ENABLE) > cqhci_writel(cq_host, (val & ~CQHCI_ENABLE), > CQHCI_CFG); > + sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE); > sdhci_cqe_enable(mmc); > if (val & CQHCI_ENABLE) > cqhci_writel(cq_host, val, CQHCI_CFG); > @@ -1281,12 +1289,36 @@ static void tegra_sdhci_set_timeout(struct sdhci_host *host, > __sdhci_set_timeout(host, cmd); > } > > +static void sdhci_tegra_cqe_pre_enable(struct mmc_host *mmc) > +{ > + struct cqhci_host *cq_host = mmc->cqe_private; > + u32 reg; > + > + reg = cqhci_readl(cq_host, CQHCI_CFG); > + reg |= CQHCI_ENABLE; > + cqhci_writel(cq_host, reg, CQHCI_CFG); > +} > + > +static void sdhci_tegra_cqe_post_disable(struct mmc_host *mmc) > +{ > + struct cqhci_host *cq_host = mmc->cqe_private; > + struct sdhci_host *host = mmc_priv(mmc); > + u32 reg; > + > + reg = cqhci_readl(cq_host, CQHCI_CFG); > + reg &= ~CQHCI_ENABLE; > + cqhci_writel(cq_host, reg, CQHCI_CFG); > + sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE); > +} > + > static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = { > .write_l = tegra_cqhci_writel, > .enable = sdhci_tegra_cqe_enable, > .disable = sdhci_cqe_disable, > .dumpregs = sdhci_tegra_dumpregs, > .update_dcmd_desc = sdhci_tegra_update_dcmd_desc, > + .pre_enable = sdhci_tegra_cqe_pre_enable, > + .post_disable = sdhci_tegra_cqe_post_disable, > }; > > static int tegra_sdhci_set_dma_mask(struct sdhci_host *host) > -- > 2.25.1 >
On 12/04/2021 08:52, Ulf Hansson wrote: > On Wed, 7 Apr 2021 at 11:46, Jon Hunter <jonathanh@nvidia.com> wrote: >> >> From: Aniruddha Tvs Rao <anrao@nvidia.com> >> >> CMD8 is not supported with Command Queue Enabled. Add required callback >> to clear CQE_EN and CQE_INTR fields in the host controller register >> before sending CMD8. Add corresponding callback in the CQHCI resume path >> to re-enable CQE_EN and CQE_INTR fields. >> >> Reported-by: Kamal Mostafa <kamal@canonical.com> >> Tested-by: Kamal Mostafa <kamal@canonical.com> >> Signed-off-by: Aniruddha Tvs Rao <anrao@nvidia.com> >> Signed-off-by: Jon Hunter <jonathanh@nvidia.com> > > Applied for next, thanks! > > Please tell, if you think this needs to be tagged for stable kernels > as well (and if so, perhaps we can set a fixes tag as well?). Thanks. We were talking about that. I believe that the callbacks were only added in v5.10 and so that is the earliest we could apply. The most logical fixes tag would be ... Fixes: 3c4019f97978 ("mmc: tegra: HW Command Queue Support for Tegra SDMMC") However, this does come before the support for the command queue callbacks. I would like to get this into the v5.10 stable branch and I can either send Greg a direct request once merged or we can tag for stable. Let me know what you think. Thanks Jon -- nvpublic
On Mon, 12 Apr 2021 at 11:35, Jon Hunter <jonathanh@nvidia.com> wrote: > > > On 12/04/2021 08:52, Ulf Hansson wrote: > > On Wed, 7 Apr 2021 at 11:46, Jon Hunter <jonathanh@nvidia.com> wrote: > >> > >> From: Aniruddha Tvs Rao <anrao@nvidia.com> > >> > >> CMD8 is not supported with Command Queue Enabled. Add required callback > >> to clear CQE_EN and CQE_INTR fields in the host controller register > >> before sending CMD8. Add corresponding callback in the CQHCI resume path > >> to re-enable CQE_EN and CQE_INTR fields. > >> > >> Reported-by: Kamal Mostafa <kamal@canonical.com> > >> Tested-by: Kamal Mostafa <kamal@canonical.com> > >> Signed-off-by: Aniruddha Tvs Rao <anrao@nvidia.com> > >> Signed-off-by: Jon Hunter <jonathanh@nvidia.com> > > > > Applied for next, thanks! > > > > Please tell, if you think this needs to be tagged for stable kernels > > as well (and if so, perhaps we can set a fixes tag as well?). > > Thanks. We were talking about that. I believe that the callbacks were > only added in v5.10 and so that is the earliest we could apply. The most > logical fixes tag would be ... > > Fixes: 3c4019f97978 ("mmc: tegra: HW Command Queue Support for Tegra SDMMC") > > However, this does come before the support for the command queue callbacks. > > I would like to get this into the v5.10 stable branch and I can either > send Greg a direct request once merged or we can tag for stable. Let me > know what you think. Let me amend the change and add a stable tag like below: Cc: stable@vger.kerne.org # v5.10+ Kind regards Uffe
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index c61f797a853f..387ce9cdbd7c 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c @@ -119,6 +119,10 @@ /* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */ #define SDHCI_TEGRA_CQE_BASE_ADDR 0xF000 +#define SDHCI_TEGRA_CQE_TRNS_MODE (SDHCI_TRNS_MULTI | \ + SDHCI_TRNS_BLK_CNT_EN | \ + SDHCI_TRNS_DMA) + struct sdhci_tegra_soc_data { const struct sdhci_pltfm_data *pdata; u64 dma_mask; @@ -1156,6 +1160,7 @@ static void tegra_sdhci_voltage_switch(struct sdhci_host *host) static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg) { struct mmc_host *mmc = cq_host->mmc; + struct sdhci_host *host = mmc_priv(mmc); u8 ctrl; ktime_t timeout; bool timed_out; @@ -1170,6 +1175,7 @@ static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg) */ if (reg == CQHCI_CTL && !(val & CQHCI_HALT) && cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) { + sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE); sdhci_cqe_enable(mmc); writel(val, cq_host->mmio + reg); timeout = ktime_add_us(ktime_get(), 50); @@ -1205,6 +1211,7 @@ static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc, static void sdhci_tegra_cqe_enable(struct mmc_host *mmc) { struct cqhci_host *cq_host = mmc->cqe_private; + struct sdhci_host *host = mmc_priv(mmc); u32 val; /* @@ -1218,6 +1225,7 @@ static void sdhci_tegra_cqe_enable(struct mmc_host *mmc) if (val & CQHCI_ENABLE) cqhci_writel(cq_host, (val & ~CQHCI_ENABLE), CQHCI_CFG); + sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE); sdhci_cqe_enable(mmc); if (val & CQHCI_ENABLE) cqhci_writel(cq_host, val, CQHCI_CFG); @@ -1281,12 +1289,36 @@ static void tegra_sdhci_set_timeout(struct sdhci_host *host, __sdhci_set_timeout(host, cmd); } +static void sdhci_tegra_cqe_pre_enable(struct mmc_host *mmc) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + u32 reg; + + reg = cqhci_readl(cq_host, CQHCI_CFG); + reg |= CQHCI_ENABLE; + cqhci_writel(cq_host, reg, CQHCI_CFG); +} + +static void sdhci_tegra_cqe_post_disable(struct mmc_host *mmc) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + struct sdhci_host *host = mmc_priv(mmc); + u32 reg; + + reg = cqhci_readl(cq_host, CQHCI_CFG); + reg &= ~CQHCI_ENABLE; + cqhci_writel(cq_host, reg, CQHCI_CFG); + sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE); +} + static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = { .write_l = tegra_cqhci_writel, .enable = sdhci_tegra_cqe_enable, .disable = sdhci_cqe_disable, .dumpregs = sdhci_tegra_dumpregs, .update_dcmd_desc = sdhci_tegra_update_dcmd_desc, + .pre_enable = sdhci_tegra_cqe_pre_enable, + .post_disable = sdhci_tegra_cqe_post_disable, }; static int tegra_sdhci_set_dma_mask(struct sdhci_host *host)