diff mbox series

[v2,2/2] drm/omap: partial workaround for DRA7xx DMM errata i878

Message ID 20180322134206.22857-3-peter.ujfalusi@ti.com
State New
Headers show
Series [v2,1/2] dt-bindings: arm: omap: dmm: Document new compatible for DRA7xx family | expand

Commit Message

Peter Ujfalusi March 22, 2018, 1:42 p.m. UTC
From: Tomi Valkeinen <tomi.valkeinen@ti.com>


Errata i878 says that MPU should not be used to access RAM and DMM at
the same time. As it's not possible to prevent MPU accessing RAM, we
need to access DMM via a proxy.

This patch changes DMM driver to access DMM registers via sDMA. Instead
of doing a normal readl/writel call to read/write a register, we use
sDMA to copy 4 bytes from/to the DMM registers.

This patch provides only a partial workaround for i878, as not only DMM
register reads/writes are affected, but also accesses to the DMM mapped
buffers (framebuffers, usually).

Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ti.com>

---
 drivers/gpu/drm/omapdrm/omap_dmm_priv.h  |   8 ++
 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c | 153 ++++++++++++++++++++++++++++++-
 2 files changed, 159 insertions(+), 2 deletions(-)

-- 
Peter

Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki.
Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki

--
To unsubscribe from this list: send the line "unsubscribe devicetree" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Comments

Peter Ujfalusi March 23, 2018, 8:32 a.m. UTC | #1
On 2018-03-22 15:42, Peter Ujfalusi wrote:
> From: Tomi Valkeinen <tomi.valkeinen@ti.com>

> 

> Errata i878 says that MPU should not be used to access RAM and DMM at

> the same time. As it's not possible to prevent MPU accessing RAM, we

> need to access DMM via a proxy.

> 

> This patch changes DMM driver to access DMM registers via sDMA. Instead

> of doing a normal readl/writel call to read/write a register, we use

> sDMA to copy 4 bytes from/to the DMM registers.

> 

> This patch provides only a partial workaround for i878, as not only DMM

> register reads/writes are affected, but also accesses to the DMM mapped

> buffers (framebuffers, usually).

> 

> Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ti.com>


I have failed to add:
Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>


> ---

>  drivers/gpu/drm/omapdrm/omap_dmm_priv.h  |   8 ++

>  drivers/gpu/drm/omapdrm/omap_dmm_tiler.c | 153 ++++++++++++++++++++++++++++++-

>  2 files changed, 159 insertions(+), 2 deletions(-)

> 

> diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h

> index c2785cc98dc9..9ce9d1d7039a 100644

> --- a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h

> +++ b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h

> @@ -155,10 +155,12 @@ struct refill_engine {

>  

>  struct dmm_platform_data {

>  	u32 cpu_cache_flags;

> +	bool errata_i878_wa;

>  };

>  

>  struct dmm {

>  	struct device *dev;

> +	dma_addr_t phys_base;

>  	void __iomem *base;

>  	int irq;

>  

> @@ -189,6 +191,12 @@ struct dmm {

>  	struct list_head alloc_head;

>  

>  	const struct dmm_platform_data *plat_data;

> +

> +	bool dmm_workaround;

> +	spinlock_t wa_lock;

> +	u32 *wa_dma_data;

> +	dma_addr_t wa_dma_handle;

> +	struct dma_chan *wa_dma_chan;

>  };

>  

>  #endif

> diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c

> index e84871e74615..27c67bc36203 100644

> --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c

> +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c

> @@ -18,6 +18,7 @@

>  #include <linux/completion.h>

>  #include <linux/delay.h>

>  #include <linux/dma-mapping.h>

> +#include <linux/dmaengine.h>

>  #include <linux/errno.h>

>  #include <linux/init.h>

>  #include <linux/interrupt.h>

> @@ -79,14 +80,138 @@ static const u32 reg[][4] = {

>  			DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},

>  };

>  

> +static int dmm_dma_copy(struct dmm *dmm, dma_addr_t src, dma_addr_t dst)

> +{

> +	struct dma_device *dma_dev = dmm->wa_dma_chan->device;

> +	struct dma_async_tx_descriptor *tx;

> +	enum dma_status status;

> +	dma_cookie_t cookie;

> +

> +	tx = dma_dev->device_prep_dma_memcpy(dmm->wa_dma_chan, dst, src, 4, 0);

> +	if (!tx) {

> +		dev_err(dmm->dev, "Failed to prepare DMA memcpy\n");

> +		return -EIO;

> +	}

> +

> +	cookie = tx->tx_submit(tx);

> +	if (dma_submit_error(cookie)) {

> +		dev_err(dmm->dev, "Failed to do DMA tx_submit\n");

> +		return -EIO;

> +	}

> +

> +	dma_async_issue_pending(dmm->wa_dma_chan);

> +	status = dma_sync_wait(dmm->wa_dma_chan, cookie);

> +	if (status != DMA_COMPLETE)

> +		dev_err(dmm->dev, "i878 wa DMA copy failure\n");

> +

> +	dmaengine_terminate_all(dmm->wa_dma_chan);

> +	return 0;

> +}

> +

> +static u32 dmm_read_wa(struct dmm *dmm, u32 reg)

> +{

> +	dma_addr_t src, dst;

> +	int r;

> +

> +	src = dmm->phys_base + reg;

> +	dst = dmm->wa_dma_handle;

> +

> +	r = dmm_dma_copy(dmm, src, dst);

> +	if (r) {

> +		dev_err(dmm->dev, "sDMA read transfer timeout\n");

> +		return readl(dmm->base + reg);

> +	}

> +

> +	/*

> +	 * As per i878 workaround, the DMA is used to access the DMM registers.

> +	 * Make sure that the readl is not moved by the compiler or the CPU

> +	 * earlier than the DMA finished writing the value to memory.

> +	 */

> +	rmb();

> +	return readl(dmm->wa_dma_data);

> +}

> +

> +static void dmm_write_wa(struct dmm *dmm, u32 val, u32 reg)

> +{

> +	dma_addr_t src, dst;

> +	int r;

> +

> +	writel(val, dmm->wa_dma_data);

> +	/*

> +	 * As per i878 workaround, the DMA is used to access the DMM registers.

> +	 * Make sure that the writel is not moved by the compiler or the CPU, so

> +	 * the data will be in place before we start the DMA to do the actual

> +	 * register write.

> +	 */

> +	wmb();

> +

> +	src = dmm->wa_dma_handle;

> +	dst = dmm->phys_base + reg;

> +

> +	r = dmm_dma_copy(dmm, src, dst);

> +	if (r) {

> +		dev_err(dmm->dev, "sDMA write transfer timeout\n");

> +		writel(val, dmm->base + reg);

> +	}

> +}

> +

>  static u32 dmm_read(struct dmm *dmm, u32 reg)

>  {

> -	return readl(dmm->base + reg);

> +	if (dmm->dmm_workaround) {

> +		u32 v;

> +		unsigned long flags;

> +

> +		spin_lock_irqsave(&dmm->wa_lock, flags);

> +		v = dmm_read_wa(dmm, reg);

> +		spin_unlock_irqrestore(&dmm->wa_lock, flags);

> +

> +		return v;

> +	} else {

> +		return readl(dmm->base + reg);

> +	}

>  }

>  

>  static void dmm_write(struct dmm *dmm, u32 val, u32 reg)

>  {

> -	writel(val, dmm->base + reg);

> +	if (dmm->dmm_workaround) {

> +		unsigned long flags;

> +

> +		spin_lock_irqsave(&dmm->wa_lock, flags);

> +		dmm_write_wa(dmm, val, reg);

> +		spin_unlock_irqrestore(&dmm->wa_lock, flags);

> +	} else {

> +		writel(val, dmm->base + reg);

> +	}

> +}

> +

> +static int dmm_workaround_init(struct dmm *dmm)

> +{

> +	dma_cap_mask_t mask;

> +

> +	spin_lock_init(&dmm->wa_lock);

> +

> +	dmm->wa_dma_data = dma_alloc_coherent(dmm->dev,  sizeof(u32),

> +					      &dmm->wa_dma_handle, GFP_KERNEL);

> +	if (!dmm->wa_dma_data)

> +		return -ENOMEM;

> +

> +	dma_cap_zero(mask);

> +	dma_cap_set(DMA_MEMCPY, mask);

> +

> +	dmm->wa_dma_chan = dma_request_channel(mask, NULL, NULL);

> +	if (!dmm->wa_dma_chan) {

> +		dma_free_coherent(dmm->dev, 4, dmm->wa_dma_data, dmm->wa_dma_handle);

> +		return -ENODEV;

> +	}

> +

> +	return 0;

> +}

> +

> +static void dmm_workaround_uninit(struct dmm *dmm)

> +{

> +	dma_release_channel(dmm->wa_dma_chan);

> +

> +	dma_free_coherent(dmm->dev, 4, dmm->wa_dma_data, dmm->wa_dma_handle);

>  }

>  

>  /* simple allocator to grab next 16 byte aligned memory from txn */

> @@ -632,6 +757,9 @@ static int omap_dmm_remove(struct platform_device *dev)

>  		if (omap_dmm->dummy_page)

>  			__free_page(omap_dmm->dummy_page);

>  

> +		if (omap_dmm->dmm_workaround)

> +			dmm_workaround_uninit(omap_dmm);

> +

>  		if (omap_dmm->irq > 0)

>  			free_irq(omap_dmm->irq, omap_dmm);

>  

> @@ -680,6 +808,7 @@ static int omap_dmm_probe(struct platform_device *dev)

>  		goto fail;

>  	}

>  

> +	omap_dmm->phys_base = mem->start;

>  	omap_dmm->base = ioremap(mem->start, SZ_2K);

>  

>  	if (!omap_dmm->base) {

> @@ -695,6 +824,17 @@ static int omap_dmm_probe(struct platform_device *dev)

>  

>  	omap_dmm->dev = &dev->dev;

>  

> +	if (omap_dmm->plat_data->errata_i878_wa) {

> +		if (!dmm_workaround_init(omap_dmm)) {

> +			omap_dmm->dmm_workaround = true;

> +			dev_info(&dev->dev,

> +				"workaround for errata i878 in use\n");

> +		} else {

> +			dev_warn(&dev->dev,

> +				 "failed to initialize work-around for i878\n");

> +		}

> +	}

> +

>  	hwinfo = dmm_read(omap_dmm, DMM_PAT_HWINFO);

>  	omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;

>  	omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;

> @@ -1058,6 +1198,11 @@ static const struct dmm_platform_data dmm_omap5_platform_data = {

>  	.cpu_cache_flags = OMAP_BO_UNCACHED,

>  };

>  

> +static const struct dmm_platform_data dmm_dra7_platform_data = {

> +	.cpu_cache_flags = OMAP_BO_UNCACHED,

> +	.errata_i878_wa = true,

> +};

> +

>  static const struct of_device_id dmm_of_match[] = {

>  	{

>  		.compatible = "ti,omap4-dmm",

> @@ -1067,6 +1212,10 @@ static const struct of_device_id dmm_of_match[] = {

>  		.compatible = "ti,omap5-dmm",

>  		.data = &dmm_omap5_platform_data,

>  	},

> +	{

> +		.compatible = "ti,dra7-dmm",

> +		.data = &dmm_dra7_platform_data,

> +	},

>  	{},

>  };

>  #endif

> 


- Péter

Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki.
Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki
--
To unsubscribe from this list: send the line "unsubscribe devicetree" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Tomi Valkeinen March 29, 2018, 10:18 a.m. UTC | #2
On 22/03/18 15:42, Peter Ujfalusi wrote:
> From: Tomi Valkeinen <tomi.valkeinen@ti.com>

> 

> Errata i878 says that MPU should not be used to access RAM and DMM at

> the same time. As it's not possible to prevent MPU accessing RAM, we

> need to access DMM via a proxy.

> 

> This patch changes DMM driver to access DMM registers via sDMA. Instead

> of doing a normal readl/writel call to read/write a register, we use

> sDMA to copy 4 bytes from/to the DMM registers.

> 

> This patch provides only a partial workaround for i878, as not only DMM

> register reads/writes are affected, but also accesses to the DMM mapped

> buffers (framebuffers, usually).

> 

> Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ti.com>

> ---

>  drivers/gpu/drm/omapdrm/omap_dmm_priv.h  |   8 ++

>  drivers/gpu/drm/omapdrm/omap_dmm_tiler.c | 153 ++++++++++++++++++++++++++++++-

>  2 files changed, 159 insertions(+), 2 deletions(-)

> 



> +	dmm->wa_dma_chan = dma_request_channel(mask, NULL, NULL);

> +	if (!dmm->wa_dma_chan) {

> +		dma_free_coherent(dmm->dev, 4, dmm->wa_dma_data, dmm->wa_dma_handle);


This and the other free below should use sizeof(u32) as the alloc does.
And I guess device_prep_dma_memcpy() too. Perhaps a #define would be
best here. DMM_REG_SIZE? I can do this change when applying, if you agree.

 Tomi

-- 
Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki.
Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki
--
To unsubscribe from this list: send the line "unsubscribe devicetree" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Peter Ujfalusi March 29, 2018, 12:11 p.m. UTC | #3
On 2018-03-29 13:18, Tomi Valkeinen wrote:
> On 22/03/18 15:42, Peter Ujfalusi wrote:

>> From: Tomi Valkeinen <tomi.valkeinen@ti.com>

>>

>> Errata i878 says that MPU should not be used to access RAM and DMM at

>> the same time. As it's not possible to prevent MPU accessing RAM, we

>> need to access DMM via a proxy.

>>

>> This patch changes DMM driver to access DMM registers via sDMA. Instead

>> of doing a normal readl/writel call to read/write a register, we use

>> sDMA to copy 4 bytes from/to the DMM registers.

>>

>> This patch provides only a partial workaround for i878, as not only DMM

>> register reads/writes are affected, but also accesses to the DMM mapped

>> buffers (framebuffers, usually).

>>

>> Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ti.com>

>> ---

>>  drivers/gpu/drm/omapdrm/omap_dmm_priv.h  |   8 ++

>>  drivers/gpu/drm/omapdrm/omap_dmm_tiler.c | 153 ++++++++++++++++++++++++++++++-

>>  2 files changed, 159 insertions(+), 2 deletions(-)

>>

> 

> 

>> +	dmm->wa_dma_chan = dma_request_channel(mask, NULL, NULL);

>> +	if (!dmm->wa_dma_chan) {

>> +		dma_free_coherent(dmm->dev, 4, dmm->wa_dma_data, dmm->wa_dma_handle);

> 

> This and the other free below should use sizeof(u32) as the alloc does.

> And I guess device_prep_dma_memcpy() too. Perhaps a #define would be

> best here. DMM_REG_SIZE? I can do this change when applying, if you agree.


Oh, there were others, I have changed it for dma_alloc_coherent().
I'll wait for couple of days for other comments and will resend with a
#define

> 

>  Tomi

> 


- Péter

Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki.
Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki
--
To unsubscribe from this list: send the line "unsubscribe devicetree" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Laurent Pinchart April 3, 2018, 9:11 p.m. UTC | #4
Hi Peter,

Thank you for the patch.

On Thursday, 22 March 2018 15:42:06 EEST Peter Ujfalusi wrote:
> From: Tomi Valkeinen <tomi.valkeinen@ti.com>

> 

> Errata i878 says that MPU should not be used to access RAM and DMM at

> the same time. As it's not possible to prevent MPU accessing RAM, we

> need to access DMM via a proxy.

> 

> This patch changes DMM driver to access DMM registers via sDMA. Instead

> of doing a normal readl/writel call to read/write a register, we use

> sDMA to copy 4 bytes from/to the DMM registers.

> 

> This patch provides only a partial workaround for i878, as not only DMM

> register reads/writes are affected, but also accesses to the DMM mapped

> buffers (framebuffers, usually).


I assume access to DMM-mapped buffers to be way more frequent than access to 
the DMM registers. If that's the case, this partial workaround should only 
slightly lower the probability of system lock-up. Do you have plans to 
implement a workaround that will fix the problem completely ?

> Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ti.com>

> ---

>  drivers/gpu/drm/omapdrm/omap_dmm_priv.h  |   8 ++

>  drivers/gpu/drm/omapdrm/omap_dmm_tiler.c | 153 +++++++++++++++++++++++++++-

>  2 files changed, 159 insertions(+), 2 deletions(-)

> 

> diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h

> b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h index c2785cc98dc9..9ce9d1d7039a

> 100644

> --- a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h

> +++ b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h

> @@ -155,10 +155,12 @@ struct refill_engine {

> 

>  struct dmm_platform_data {

>  	u32 cpu_cache_flags;

> +	bool errata_i878_wa;

>  };

> 

>  struct dmm {

>  	struct device *dev;

> +	dma_addr_t phys_base;

>  	void __iomem *base;

>  	int irq;

> 

> @@ -189,6 +191,12 @@ struct dmm {

>  	struct list_head alloc_head;

> 

>  	const struct dmm_platform_data *plat_data;

> +

> +	bool dmm_workaround;

> +	spinlock_t wa_lock;

> +	u32 *wa_dma_data;

> +	dma_addr_t wa_dma_handle;

> +	struct dma_chan *wa_dma_chan;

>  };

> 

>  #endif

> diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c

> b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index e84871e74615..27c67bc36203

> 100644

> --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c

> +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c

> @@ -18,6 +18,7 @@

>  #include <linux/completion.h>

>  #include <linux/delay.h>

>  #include <linux/dma-mapping.h>

> +#include <linux/dmaengine.h>

>  #include <linux/errno.h>

>  #include <linux/init.h>

>  #include <linux/interrupt.h>

> @@ -79,14 +80,138 @@ static const u32 reg[][4] = {

>  			DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},

>  };

> 

> +static int dmm_dma_copy(struct dmm *dmm, dma_addr_t src, dma_addr_t dst)

> +{

> +	struct dma_device *dma_dev = dmm->wa_dma_chan->device;

> +	struct dma_async_tx_descriptor *tx;

> +	enum dma_status status;

> +	dma_cookie_t cookie;

> +

> +	tx = dma_dev->device_prep_dma_memcpy(dmm->wa_dma_chan, dst, src, 4, 0);

> +	if (!tx) {

> +		dev_err(dmm->dev, "Failed to prepare DMA memcpy\n");

> +		return -EIO;

> +	}

> +

> +	cookie = tx->tx_submit(tx);

> +	if (dma_submit_error(cookie)) {

> +		dev_err(dmm->dev, "Failed to do DMA tx_submit\n");

> +		return -EIO;

> +	}

> +

> +	dma_async_issue_pending(dmm->wa_dma_chan);

> +	status = dma_sync_wait(dmm->wa_dma_chan, cookie);


dma_sync_wait() has a 5s timeout. You're calling this function with a spinlock 
held. The end result might be slightly better than a complete system lock as 
caused by the bug described in i878, but only slightly.

Unless I'm mistaken the reason you can't sleep here is because of the need to 
access registers in the interrupt handler. Could we use threaded IRQs to solve 
this ?

> +	if (status != DMA_COMPLETE)

> +		dev_err(dmm->dev, "i878 wa DMA copy failure\n");

> +

> +	dmaengine_terminate_all(dmm->wa_dma_chan);

> +	return 0;

> +}

> +

> +static u32 dmm_read_wa(struct dmm *dmm, u32 reg)

> +{

> +	dma_addr_t src, dst;

> +	int r;

> +

> +	src = dmm->phys_base + reg;

> +	dst = dmm->wa_dma_handle;

> +

> +	r = dmm_dma_copy(dmm, src, dst);

> +	if (r) {

> +		dev_err(dmm->dev, "sDMA read transfer timeout\n");

> +		return readl(dmm->base + reg);

> +	}

> +

> +	/*

> +	 * As per i878 workaround, the DMA is used to access the DMM registers.

> +	 * Make sure that the readl is not moved by the compiler or the CPU

> +	 * earlier than the DMA finished writing the value to memory.

> +	 */

> +	rmb();

> +	return readl(dmm->wa_dma_data);

> +}

> +

> +static void dmm_write_wa(struct dmm *dmm, u32 val, u32 reg)

> +{

> +	dma_addr_t src, dst;

> +	int r;

> +

> +	writel(val, dmm->wa_dma_data);

> +	/*

> +	 * As per i878 workaround, the DMA is used to access the DMM registers.

> +	 * Make sure that the writel is not moved by the compiler or the CPU, so

> +	 * the data will be in place before we start the DMA to do the actual

> +	 * register write.

> +	 */

> +	wmb();

> +

> +	src = dmm->wa_dma_handle;

> +	dst = dmm->phys_base + reg;

> +

> +	r = dmm_dma_copy(dmm, src, dst);

> +	if (r) {

> +		dev_err(dmm->dev, "sDMA write transfer timeout\n");

> +		writel(val, dmm->base + reg);

> +	}

> +}

> +

>  static u32 dmm_read(struct dmm *dmm, u32 reg)

>  {

> -	return readl(dmm->base + reg);

> +	if (dmm->dmm_workaround) {

> +		u32 v;

> +		unsigned long flags;

> +

> +		spin_lock_irqsave(&dmm->wa_lock, flags);

> +		v = dmm_read_wa(dmm, reg);

> +		spin_unlock_irqrestore(&dmm->wa_lock, flags);

> +

> +		return v;

> +	} else {

> +		return readl(dmm->base + reg);

> +	}

>  }

> 

>  static void dmm_write(struct dmm *dmm, u32 val, u32 reg)

>  {

> -	writel(val, dmm->base + reg);

> +	if (dmm->dmm_workaround) {

> +		unsigned long flags;

> +

> +		spin_lock_irqsave(&dmm->wa_lock, flags);

> +		dmm_write_wa(dmm, val, reg);

> +		spin_unlock_irqrestore(&dmm->wa_lock, flags);

> +	} else {

> +		writel(val, dmm->base + reg);

> +	}

> +}

> +

> +static int dmm_workaround_init(struct dmm *dmm)

> +{

> +	dma_cap_mask_t mask;

> +

> +	spin_lock_init(&dmm->wa_lock);

> +

> +	dmm->wa_dma_data = dma_alloc_coherent(dmm->dev,  sizeof(u32),

> +					      &dmm->wa_dma_handle, GFP_KERNEL);

> +	if (!dmm->wa_dma_data)

> +		return -ENOMEM;

> +

> +	dma_cap_zero(mask);

> +	dma_cap_set(DMA_MEMCPY, mask);

> +

> +	dmm->wa_dma_chan = dma_request_channel(mask, NULL, NULL);

> +	if (!dmm->wa_dma_chan) {

> +		dma_free_coherent(dmm->dev, 4, dmm->wa_dma_data, dmm->wa_dma_handle);

> +		return -ENODEV;

> +	}

> +

> +	return 0;

> +}

> +

> +static void dmm_workaround_uninit(struct dmm *dmm)

> +{

> +	dma_release_channel(dmm->wa_dma_chan);

> +

> +	dma_free_coherent(dmm->dev, 4, dmm->wa_dma_data, dmm->wa_dma_handle);

>  }

> 

>  /* simple allocator to grab next 16 byte aligned memory from txn */

> @@ -632,6 +757,9 @@ static int omap_dmm_remove(struct platform_device *dev)

>  		if (omap_dmm->dummy_page)

>  			__free_page(omap_dmm->dummy_page);

> 

> +		if (omap_dmm->dmm_workaround)

> +			dmm_workaround_uninit(omap_dmm);

> +

>  		if (omap_dmm->irq > 0)

>  			free_irq(omap_dmm->irq, omap_dmm);

> 

> @@ -680,6 +808,7 @@ static int omap_dmm_probe(struct platform_device *dev)

>  		goto fail;

>  	}

> 

> +	omap_dmm->phys_base = mem->start;

>  	omap_dmm->base = ioremap(mem->start, SZ_2K);

> 

>  	if (!omap_dmm->base) {

> @@ -695,6 +824,17 @@ static int omap_dmm_probe(struct platform_device *dev)

> 

>  	omap_dmm->dev = &dev->dev;

> 

> +	if (omap_dmm->plat_data->errata_i878_wa) {

> +		if (!dmm_workaround_init(omap_dmm)) {

> +			omap_dmm->dmm_workaround = true;

> +			dev_info(&dev->dev,

> +				"workaround for errata i878 in use\n");

> +		} else {

> +			dev_warn(&dev->dev,

> +				 "failed to initialize work-around for i878\n");

> +		}

> +	}

> +

>  	hwinfo = dmm_read(omap_dmm, DMM_PAT_HWINFO);

>  	omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;

>  	omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;

> @@ -1058,6 +1198,11 @@ static const struct dmm_platform_data

> dmm_omap5_platform_data = { .cpu_cache_flags = OMAP_BO_UNCACHED,

>  };

> 

> +static const struct dmm_platform_data dmm_dra7_platform_data = {

> +	.cpu_cache_flags = OMAP_BO_UNCACHED,

> +	.errata_i878_wa = true,

> +};

> +

>  static const struct of_device_id dmm_of_match[] = {

>  	{

>  		.compatible = "ti,omap4-dmm",

> @@ -1067,6 +1212,10 @@ static const struct of_device_id dmm_of_match[] = {

>  		.compatible = "ti,omap5-dmm",

>  		.data = &dmm_omap5_platform_data,

>  	},

> +	{

> +		.compatible = "ti,dra7-dmm",

> +		.data = &dmm_dra7_platform_data,

> +	},

>  	{},

>  };

>  #endif



-- 
Regards,

Laurent Pinchart



--
To unsubscribe from this list: send the line "unsubscribe devicetree" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Tomi Valkeinen April 4, 2018, 7:37 a.m. UTC | #5
On 04/04/18 00:11, Laurent Pinchart wrote:

> I assume access to DMM-mapped buffers to be way more frequent than access to 

> the DMM registers. If that's the case, this partial workaround should only 

> slightly lower the probability of system lock-up. Do you have plans to 

> implement a workaround that will fix the problem completely ?


CPU only accesses memory via DMM when using TILER 2D buffers, which are
not officially supported. For non-2D, the pages are mapped directly to
the CPU without DMM in between.

 Tomi

-- 
Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki.
Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki
--
To unsubscribe from this list: send the line "unsubscribe devicetree" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Laurent Pinchart April 4, 2018, 9:51 a.m. UTC | #6
Hi Tomi,

On Wednesday, 4 April 2018 10:37:05 EEST Tomi Valkeinen wrote:
> On 04/04/18 00:11, Laurent Pinchart wrote:

> > I assume access to DMM-mapped buffers to be way more frequent than access

> > to the DMM registers. If that's the case, this partial workaround should

> > only slightly lower the probability of system lock-up. Do you have plans

> > to implement a workaround that will fix the problem completely ?

> 

> CPU only accesses memory via DMM when using TILER 2D buffers, which are

> not officially supported. For non-2D, the pages are mapped directly to

> the CPU without DMM in between.


What is the DMM used for with non-2D then ? Does it need to be setup at all ?

-- 
Regards,

Laurent Pinchart



--
To unsubscribe from this list: send the line "unsubscribe devicetree" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Laurent Pinchart April 4, 2018, 10:28 a.m. UTC | #7
Hi Tomi,

On Wednesday, 4 April 2018 13:02:04 EEST Tomi Valkeinen wrote:
> On 04/04/18 12:51, Laurent Pinchart wrote:

> > On Wednesday, 4 April 2018 10:37:05 EEST Tomi Valkeinen wrote:

> >> On 04/04/18 00:11, Laurent Pinchart wrote:

> >>> I assume access to DMM-mapped buffers to be way more frequent than

> >>> access to the DMM registers. If that's the case, this partial workaround

> >>> should only slightly lower the probability of system lock-up. Do you

> >>> have plans to implement a workaround that will fix the problem

> >>> completely ?

> >> 

> >> CPU only accesses memory via DMM when using TILER 2D buffers, which are

> >> not officially supported. For non-2D, the pages are mapped directly to

> >> the CPU without DMM in between.

> > 

> > What is the DMM used for with non-2D then ? Does it need to be setup at

> > all ?

> 

> It creates a contiguous view of memory for IPs without IOMMUs, like DSS.


OK, got it. In that case the CPU accesses don't need to go through the DMM, 
only the device accesses do, as the CPU will go through the MMU. Sorry for the 
noise.

-- 
Regards,

Laurent Pinchart



--
To unsubscribe from this list: send the line "unsubscribe devicetree" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Tomi Valkeinen April 4, 2018, 10:33 a.m. UTC | #8
On 04/04/18 13:28, Laurent Pinchart wrote:
> Hi Tomi,

> 

> On Wednesday, 4 April 2018 13:02:04 EEST Tomi Valkeinen wrote:

>> On 04/04/18 12:51, Laurent Pinchart wrote:

>>> On Wednesday, 4 April 2018 10:37:05 EEST Tomi Valkeinen wrote:

>>>> On 04/04/18 00:11, Laurent Pinchart wrote:

>>>>> I assume access to DMM-mapped buffers to be way more frequent than

>>>>> access to the DMM registers. If that's the case, this partial workaround

>>>>> should only slightly lower the probability of system lock-up. Do you

>>>>> have plans to implement a workaround that will fix the problem

>>>>> completely ?

>>>>

>>>> CPU only accesses memory via DMM when using TILER 2D buffers, which are

>>>> not officially supported. For non-2D, the pages are mapped directly to

>>>> the CPU without DMM in between.

>>>

>>> What is the DMM used for with non-2D then ? Does it need to be setup at

>>> all ?

>>

>> It creates a contiguous view of memory for IPs without IOMMUs, like DSS.

> 

> OK, got it. In that case the CPU accesses don't need to go through the DMM, 

> only the device accesses do, as the CPU will go through the MMU. Sorry for the 

> noise.


Slightly related, just thinking out loud:

This is the first part of the work-around. The other part would be to
make TILER 2D available to the CPU via some kind of indirect access.
TILER 2D memory is mapped in a custom way to the CPU even now (if I
recall right, only two pages are mapped at once, with a custom DMM
mapping for those).

I think sDMA would be the choice there too, allocating two pages as a
"cache" and using sDMA to fill and flush those pages.

I haven't spent any time on that, as TILER 2D has other issues and is
not very usable.

 Tomi

-- 
Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki.
Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki
--
To unsubscribe from this list: send the line "unsubscribe devicetree" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Tomi Valkeinen April 4, 2018, 10:50 a.m. UTC | #9
On 04/04/18 00:11, Laurent Pinchart wrote:

>> +	dma_async_issue_pending(dmm->wa_dma_chan);

>> +	status = dma_sync_wait(dmm->wa_dma_chan, cookie);

> 

> dma_sync_wait() has a 5s timeout. You're calling this function with a spinlock 

> held. The end result might be slightly better than a complete system lock as 

> caused by the bug described in i878, but only slightly.


When does the timeout trigger? I presume it only happens when things are
badly broken on the HW or driver level, and when things work normally,
the wait is very short.

> Unless I'm mistaken the reason you can't sleep here is because of the need to 

> access registers in the interrupt handler. Could we use threaded IRQs to solve 

> this ?


Yes, I think that's the reason. Probably we could use threaded IRQs.

Also, I'm not sure if this is a big issue. If the dma_sync_wait
timeouts, things are already rather broken. Then again, any wait in an
irq context is not that nice. But if the wait is just a few loops long,
it's not really even a wait...

 Tomi

-- 
Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki.
Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki
--
To unsubscribe from this list: send the line "unsubscribe devicetree" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Laurent Pinchart April 4, 2018, 11:08 a.m. UTC | #10
Hi Tomi,

On Wednesday, 4 April 2018 13:50:43 EEST Tomi Valkeinen wrote:
> On 04/04/18 00:11, Laurent Pinchart wrote:

> >> +	dma_async_issue_pending(dmm->wa_dma_chan);

> >> +	status = dma_sync_wait(dmm->wa_dma_chan, cookie);

> > 

> > dma_sync_wait() has a 5s timeout. You're calling this function with a

> > spinlock held. The end result might be slightly better than a complete

> > system lock as caused by the bug described in i878, but only slightly.

> 

> When does the timeout trigger? I presume it only happens when things are

> badly broken on the HW or driver level, and when things work normally,

> the wait is very short.


It shouldn't happen when things go right, and I indeed expect the transfer to 
complete quite fast. I would however like to get real numbers there, we should 
measure how long the transfer typically takes.

> > Unless I'm mistaken the reason you can't sleep here is because of the need

> > to access registers in the interrupt handler. Could we use threaded IRQs

> > to solve this ?

> 

> Yes, I think that's the reason. Probably we could use threaded IRQs.

> 

> Also, I'm not sure if this is a big issue. If the dma_sync_wait

> timeouts, things are already rather broken. Then again, any wait in an

> irq context is not that nice. But if the wait is just a few loops long,

> it's not really even a wait...


-- 
Regards,

Laurent Pinchart



--
To unsubscribe from this list: send the line "unsubscribe devicetree" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Laurent Pinchart April 4, 2018, 11:17 a.m. UTC | #11
Hi Tomi,

On Wednesday, 4 April 2018 13:33:02 EEST Tomi Valkeinen wrote:
> On 04/04/18 13:28, Laurent Pinchart wrote:

> > On Wednesday, 4 April 2018 13:02:04 EEST Tomi Valkeinen wrote:

> >> On 04/04/18 12:51, Laurent Pinchart wrote:

> >>> On Wednesday, 4 April 2018 10:37:05 EEST Tomi Valkeinen wrote:

> >>>> On 04/04/18 00:11, Laurent Pinchart wrote:

> >>>>> I assume access to DMM-mapped buffers to be way more frequent than

> >>>>> access to the DMM registers. If that's the case, this partial

> >>>>> workaround should only slightly lower the probability of system lock-

> >>>>> up. Do you have plans to implement a workaround that will fix the

> >>>>> problem completely ?

> >>>> 

> >>>> CPU only accesses memory via DMM when using TILER 2D buffers, which are

> >>>> not officially supported. For non-2D, the pages are mapped directly to

> >>>> the CPU without DMM in between.

> >>> 

> >>> What is the DMM used for with non-2D then ? Does it need to be setup at

> >>> all ?

> >> 

> >> It creates a contiguous view of memory for IPs without IOMMUs, like DSS.

> > 

> > OK, got it. In that case the CPU accesses don't need to go through the

> > DMM, only the device accesses do, as the CPU will go through the MMU.

> > Sorry for the noise.

> 

> Slightly related, just thinking out loud:

> 

> This is the first part of the work-around. The other part would be to

> make TILER 2D available to the CPU via some kind of indirect access.

> TILER 2D memory is mapped in a custom way to the CPU even now (if I

> recall right, only two pages are mapped at once, with a custom DMM

> mapping for those).

> 

> I think sDMA would be the choice there too, allocating two pages as a

> "cache" and using sDMA to fill and flush those pages.


Thinking out loud too, I suppose we would trigger the sDMA to flush the page 
out when it has to be evicted from the usergart, through the same mechanism we 
use to evict the TILER 2D mapping now. If the sDMA is fast enough it could 
complete before the CPU fills the next page, and we wouldn't have any 
noticeable delay (there would be extra memory bandwidth consumption though).

However, when faulting a page in, we would also need to use sDMA to read the 
data, right ? That sDMA transfer could only be triggered at the time of the 
page fault, so every access resulting in a fault would be delayed by a page-
sized sDMA transfer. I wonder if the resulting performances would be 
acceptable.

> I haven't spent any time on that, as TILER 2D has other issues and is

> not very usable.


Maybe we should just not use it then ;-)

-- 
Regards,

Laurent Pinchart



--
To unsubscribe from this list: send the line "unsubscribe devicetree" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox series

Patch

diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
index c2785cc98dc9..9ce9d1d7039a 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
@@ -155,10 +155,12 @@  struct refill_engine {
 
 struct dmm_platform_data {
 	u32 cpu_cache_flags;
+	bool errata_i878_wa;
 };
 
 struct dmm {
 	struct device *dev;
+	dma_addr_t phys_base;
 	void __iomem *base;
 	int irq;
 
@@ -189,6 +191,12 @@  struct dmm {
 	struct list_head alloc_head;
 
 	const struct dmm_platform_data *plat_data;
+
+	bool dmm_workaround;
+	spinlock_t wa_lock;
+	u32 *wa_dma_data;
+	dma_addr_t wa_dma_handle;
+	struct dma_chan *wa_dma_chan;
 };
 
 #endif
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index e84871e74615..27c67bc36203 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -18,6 +18,7 @@ 
 #include <linux/completion.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
 #include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
@@ -79,14 +80,138 @@  static const u32 reg[][4] = {
 			DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
 };
 
+static int dmm_dma_copy(struct dmm *dmm, dma_addr_t src, dma_addr_t dst)
+{
+	struct dma_device *dma_dev = dmm->wa_dma_chan->device;
+	struct dma_async_tx_descriptor *tx;
+	enum dma_status status;
+	dma_cookie_t cookie;
+
+	tx = dma_dev->device_prep_dma_memcpy(dmm->wa_dma_chan, dst, src, 4, 0);
+	if (!tx) {
+		dev_err(dmm->dev, "Failed to prepare DMA memcpy\n");
+		return -EIO;
+	}
+
+	cookie = tx->tx_submit(tx);
+	if (dma_submit_error(cookie)) {
+		dev_err(dmm->dev, "Failed to do DMA tx_submit\n");
+		return -EIO;
+	}
+
+	dma_async_issue_pending(dmm->wa_dma_chan);
+	status = dma_sync_wait(dmm->wa_dma_chan, cookie);
+	if (status != DMA_COMPLETE)
+		dev_err(dmm->dev, "i878 wa DMA copy failure\n");
+
+	dmaengine_terminate_all(dmm->wa_dma_chan);
+	return 0;
+}
+
+static u32 dmm_read_wa(struct dmm *dmm, u32 reg)
+{
+	dma_addr_t src, dst;
+	int r;
+
+	src = dmm->phys_base + reg;
+	dst = dmm->wa_dma_handle;
+
+	r = dmm_dma_copy(dmm, src, dst);
+	if (r) {
+		dev_err(dmm->dev, "sDMA read transfer timeout\n");
+		return readl(dmm->base + reg);
+	}
+
+	/*
+	 * As per i878 workaround, the DMA is used to access the DMM registers.
+	 * Make sure that the readl is not moved by the compiler or the CPU
+	 * earlier than the DMA finished writing the value to memory.
+	 */
+	rmb();
+	return readl(dmm->wa_dma_data);
+}
+
+static void dmm_write_wa(struct dmm *dmm, u32 val, u32 reg)
+{
+	dma_addr_t src, dst;
+	int r;
+
+	writel(val, dmm->wa_dma_data);
+	/*
+	 * As per i878 workaround, the DMA is used to access the DMM registers.
+	 * Make sure that the writel is not moved by the compiler or the CPU, so
+	 * the data will be in place before we start the DMA to do the actual
+	 * register write.
+	 */
+	wmb();
+
+	src = dmm->wa_dma_handle;
+	dst = dmm->phys_base + reg;
+
+	r = dmm_dma_copy(dmm, src, dst);
+	if (r) {
+		dev_err(dmm->dev, "sDMA write transfer timeout\n");
+		writel(val, dmm->base + reg);
+	}
+}
+
 static u32 dmm_read(struct dmm *dmm, u32 reg)
 {
-	return readl(dmm->base + reg);
+	if (dmm->dmm_workaround) {
+		u32 v;
+		unsigned long flags;
+
+		spin_lock_irqsave(&dmm->wa_lock, flags);
+		v = dmm_read_wa(dmm, reg);
+		spin_unlock_irqrestore(&dmm->wa_lock, flags);
+
+		return v;
+	} else {
+		return readl(dmm->base + reg);
+	}
 }
 
 static void dmm_write(struct dmm *dmm, u32 val, u32 reg)
 {
-	writel(val, dmm->base + reg);
+	if (dmm->dmm_workaround) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&dmm->wa_lock, flags);
+		dmm_write_wa(dmm, val, reg);
+		spin_unlock_irqrestore(&dmm->wa_lock, flags);
+	} else {
+		writel(val, dmm->base + reg);
+	}
+}
+
+static int dmm_workaround_init(struct dmm *dmm)
+{
+	dma_cap_mask_t mask;
+
+	spin_lock_init(&dmm->wa_lock);
+
+	dmm->wa_dma_data = dma_alloc_coherent(dmm->dev,  sizeof(u32),
+					      &dmm->wa_dma_handle, GFP_KERNEL);
+	if (!dmm->wa_dma_data)
+		return -ENOMEM;
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_MEMCPY, mask);
+
+	dmm->wa_dma_chan = dma_request_channel(mask, NULL, NULL);
+	if (!dmm->wa_dma_chan) {
+		dma_free_coherent(dmm->dev, 4, dmm->wa_dma_data, dmm->wa_dma_handle);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static void dmm_workaround_uninit(struct dmm *dmm)
+{
+	dma_release_channel(dmm->wa_dma_chan);
+
+	dma_free_coherent(dmm->dev, 4, dmm->wa_dma_data, dmm->wa_dma_handle);
 }
 
 /* simple allocator to grab next 16 byte aligned memory from txn */
@@ -632,6 +757,9 @@  static int omap_dmm_remove(struct platform_device *dev)
 		if (omap_dmm->dummy_page)
 			__free_page(omap_dmm->dummy_page);
 
+		if (omap_dmm->dmm_workaround)
+			dmm_workaround_uninit(omap_dmm);
+
 		if (omap_dmm->irq > 0)
 			free_irq(omap_dmm->irq, omap_dmm);
 
@@ -680,6 +808,7 @@  static int omap_dmm_probe(struct platform_device *dev)
 		goto fail;
 	}
 
+	omap_dmm->phys_base = mem->start;
 	omap_dmm->base = ioremap(mem->start, SZ_2K);
 
 	if (!omap_dmm->base) {
@@ -695,6 +824,17 @@  static int omap_dmm_probe(struct platform_device *dev)
 
 	omap_dmm->dev = &dev->dev;
 
+	if (omap_dmm->plat_data->errata_i878_wa) {
+		if (!dmm_workaround_init(omap_dmm)) {
+			omap_dmm->dmm_workaround = true;
+			dev_info(&dev->dev,
+				"workaround for errata i878 in use\n");
+		} else {
+			dev_warn(&dev->dev,
+				 "failed to initialize work-around for i878\n");
+		}
+	}
+
 	hwinfo = dmm_read(omap_dmm, DMM_PAT_HWINFO);
 	omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;
 	omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;
@@ -1058,6 +1198,11 @@  static const struct dmm_platform_data dmm_omap5_platform_data = {
 	.cpu_cache_flags = OMAP_BO_UNCACHED,
 };
 
+static const struct dmm_platform_data dmm_dra7_platform_data = {
+	.cpu_cache_flags = OMAP_BO_UNCACHED,
+	.errata_i878_wa = true,
+};
+
 static const struct of_device_id dmm_of_match[] = {
 	{
 		.compatible = "ti,omap4-dmm",
@@ -1067,6 +1212,10 @@  static const struct of_device_id dmm_of_match[] = {
 		.compatible = "ti,omap5-dmm",
 		.data = &dmm_omap5_platform_data,
 	},
+	{
+		.compatible = "ti,dra7-dmm",
+		.data = &dmm_dra7_platform_data,
+	},
 	{},
 };
 #endif