diff mbox series

[22/22] mtd: rawnand: Use dma_alloc_noncoherent() for dma buffer

Message ID 20220219005221.634-23-bhe@redhat.com
State New
Headers show
Series Don't use kmalloc() with GFP_DMA | expand

Commit Message

Baoquan He Feb. 19, 2022, 12:52 a.m. UTC
Use dma_alloc_noncoherent() instead of directly allocating buffer
from kmalloc with GFP_DMA. DMA API will try to allocate buffer
depending on devices addressing limitation.

[ 42.hyeyoo@gmail.com: Use dma_alloc_noncoherent() instead of
  __get_free_page() and update changelog.

  As it does not allocate high order buffers, allocate buffer
  when needed and free after DMA. ]

Signed-off-by: Baoquan He <bhe@redhat.com>
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Cc: Miquel Raynal <miquel.raynal@bootlin.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Vignesh Raghavendra <vigneshr@ti.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: christian.koenig@amd.com
Cc: linux-mtd@lists.infradead.org

---
 drivers/mtd/nand/raw/marvell_nand.c | 55 ++++++++++++++++++-----------
 1 file changed, 34 insertions(+), 21 deletions(-)

Comments

Hyeonggon Yoo Feb. 19, 2022, 11:18 a.m. UTC | #1
On Sat, Feb 19, 2022 at 08:19:00AM +0100, Christoph Hellwig wrote:
> On Sat, Feb 19, 2022 at 08:52:21AM +0800, Baoquan He wrote:
> > Use dma_alloc_noncoherent() instead of directly allocating buffer
> > from kmalloc with GFP_DMA. DMA API will try to allocate buffer
> > depending on devices addressing limitation.
> 
> I think it would be better to still allocate the buffer at allocation
> time and then just transfer ownership using dma_sync_single* in the I/O
> path to avoid the GFP_ATOMIC allocation.

This driver allocates the buffer at initialization step and maps the buffer
for DMA_TO_DEVICE and DMA_FROM_DEVICE when processing IO.

But after making this driver to use dma_alloc_noncoherent(), remapping
dma_alloc_noncoherent()-ed buffer is strange So I just made it to allocate
the buffer in IO path.

At this point I thought we need an API that allocates based on
address bit mask (like dma_alloc_noncoherent()), which does not maps buffer
into dma address. __get_free_pages/kmalloc(GFP_DMA) has been so confusing..

Hmm.. for this specific case, What about allocating two buffers
for DMA_TO_DEVICE and DMA_FROM_DEVICE at initialization time?

Thanks,
Hyeonggon
diff mbox series

Patch

diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index 2455a581fd70..c0b64a7e50af 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -860,26 +860,45 @@  static int marvell_nfc_xfer_data_dma(struct marvell_nfc *nfc,
 	struct dma_async_tx_descriptor *tx;
 	struct scatterlist sg;
 	dma_cookie_t cookie;
-	int ret;
+	dma_addr_t dma_handle;
+	int ret = 0;
 
 	marvell_nfc_enable_dma(nfc);
+
+	/*
+	 * DMA must act on length multiple of 32 and this length may be
+	 * bigger than the destination buffer. Use this buffer instead
+	 * for DMA transfers and then copy the desired amount of data to
+	 * the provided buffer.
+	 */
+	nfc->dma_buf = dma_alloc_noncoherent(nfc->dev, MAX_CHUNK_SIZE,
+						&dma_handle,
+						direction,
+						GFP_ATOMIC);
+	if (!nfc->dma_buf) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+
 	/* Prepare the DMA transfer */
-	sg_init_one(&sg, nfc->dma_buf, dma_len);
-	dma_map_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
-	tx = dmaengine_prep_slave_sg(nfc->dma_chan, &sg, 1,
+	tx = dmaengine_prep_slave_single(nfc->dma_chan, dma_handle, dma_len,
 				     direction == DMA_FROM_DEVICE ?
 				     DMA_DEV_TO_MEM : DMA_MEM_TO_DEV,
 				     DMA_PREP_INTERRUPT);
 	if (!tx) {
 		dev_err(nfc->dev, "Could not prepare DMA S/G list\n");
-		return -ENXIO;
+		ret = -ENXIO;
+		goto free;
 	}
 
 	/* Do the task and wait for it to finish */
 	cookie = dmaengine_submit(tx);
 	ret = dma_submit_error(cookie);
-	if (ret)
-		return -EIO;
+	if (ret) {
+		ret = -EIO;
+		goto free;
+	}
 
 	dma_async_issue_pending(nfc->dma_chan);
 	ret = marvell_nfc_wait_cmdd(nfc->selected_chip);
@@ -889,10 +908,16 @@  static int marvell_nfc_xfer_data_dma(struct marvell_nfc *nfc,
 		dev_err(nfc->dev, "Timeout waiting for DMA (status: %d)\n",
 			dmaengine_tx_status(nfc->dma_chan, cookie, NULL));
 		dmaengine_terminate_all(nfc->dma_chan);
-		return -ETIMEDOUT;
+		ret = -ETIMEDOUT;
+		goto free;
 	}
 
-	return 0;
+free:
+	dma_free_noncoherent(nfc->dev, MAX_CHUNK_SIZE, nfc->dma_buf,
+			     dma_handle, direction);
+
+out:
+	return ret;
 }
 
 static int marvell_nfc_xfer_data_in_pio(struct marvell_nfc *nfc, u8 *in,
@@ -2814,18 +2839,6 @@  static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
 		goto release_channel;
 	}
 
-	/*
-	 * DMA must act on length multiple of 32 and this length may be
-	 * bigger than the destination buffer. Use this buffer instead
-	 * for DMA transfers and then copy the desired amount of data to
-	 * the provided buffer.
-	 */
-	nfc->dma_buf = kmalloc(MAX_CHUNK_SIZE, GFP_KERNEL | GFP_DMA);
-	if (!nfc->dma_buf) {
-		ret = -ENOMEM;
-		goto release_channel;
-	}
-
 	nfc->use_dma = true;
 
 	return 0;