diff mbox series

[4.17.y] Revert "iommu/intel-iommu: Enable CONFIG_DMA_DIRECT_OPS=y and clean up intel_{alloc,free}_coherent()"

Message ID 20180726130205.1973-1-Jason@zx2c4.com
State New
Headers show
Series [4.17.y] Revert "iommu/intel-iommu: Enable CONFIG_DMA_DIRECT_OPS=y and clean up intel_{alloc,free}_coherent()" | expand

Commit Message

Jason A. Donenfeld July 26, 2018, 1:02 p.m. UTC
From: Christoph Hellwig <hch@lst.de>


commit 7ec916f82c48dcfc115eee2e3e0e6d400e310fc5 upstream.

This commit may cause a less than required dma mask to be used for
some allocations, which apparently leads to module load failures for
iwlwifi sometimes.

This reverts commit d657c5c73ca987214a6f9436e435b34fc60f332a.

Signed-off-by: Christoph Hellwig <hch@lst.de>

Reported-by: Fabio Coatti <fabio.coatti@gmail.com>
Tested-by: Fabio Coatti <fabio.coatti@gmail.com>

---
Backporting this and submitting this to stable@, because without it,
ordinary WiFi is broken on a fairly vanilla Thinkpad P50, on all 4.17
kernels.

 drivers/iommu/Kconfig       |  1 -
 drivers/iommu/intel-iommu.c | 62 +++++++++++++++++++++++++++----------
 2 files changed, 46 insertions(+), 17 deletions(-)

-- 
2.18.0

Comments

Greg KH July 26, 2018, 1:58 p.m. UTC | #1
On Thu, Jul 26, 2018 at 03:02:05PM +0200, Jason A. Donenfeld wrote:
> From: Christoph Hellwig <hch@lst.de>

> 

> commit 7ec916f82c48dcfc115eee2e3e0e6d400e310fc5 upstream.

> 

> This commit may cause a less than required dma mask to be used for

> some allocations, which apparently leads to module load failures for

> iwlwifi sometimes.

> 

> This reverts commit d657c5c73ca987214a6f9436e435b34fc60f332a.

> 

> Signed-off-by: Christoph Hellwig <hch@lst.de>

> Reported-by: Fabio Coatti <fabio.coatti@gmail.com>

> Tested-by: Fabio Coatti <fabio.coatti@gmail.com>

> ---

> Backporting this and submitting this to stable@, because without it,

> ordinary WiFi is broken on a fairly vanilla Thinkpad P50, on all 4.17

> kernels.


Thanks, now queued up.

greg k-h
diff mbox series

Patch

diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index b38798cc5288..f3a21343e636 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -142,7 +142,6 @@  config DMAR_TABLE
 config INTEL_IOMMU
 	bool "Support for Intel IOMMU using DMA Remapping Devices"
 	depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC)
-	select DMA_DIRECT_OPS
 	select IOMMU_API
 	select IOMMU_IOVA
 	select DMAR_TABLE
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 749d8f235346..6392a4964fc5 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -31,7 +31,6 @@ 
 #include <linux/pci.h>
 #include <linux/dmar.h>
 #include <linux/dma-mapping.h>
-#include <linux/dma-direct.h>
 #include <linux/mempool.h>
 #include <linux/memory.h>
 #include <linux/cpu.h>
@@ -3709,30 +3708,61 @@  static void *intel_alloc_coherent(struct device *dev, size_t size,
 				  dma_addr_t *dma_handle, gfp_t flags,
 				  unsigned long attrs)
 {
-	void *vaddr;
+	struct page *page = NULL;
+	int order;
 
-	vaddr = dma_direct_alloc(dev, size, dma_handle, flags, attrs);
-	if (iommu_no_mapping(dev) || !vaddr)
-		return vaddr;
+	size = PAGE_ALIGN(size);
+	order = get_order(size);
 
-	*dma_handle = __intel_map_single(dev, virt_to_phys(vaddr),
-			PAGE_ALIGN(size), DMA_BIDIRECTIONAL,
-			dev->coherent_dma_mask);
-	if (!*dma_handle)
-		goto out_free_pages;
-	return vaddr;
+	if (!iommu_no_mapping(dev))
+		flags &= ~(GFP_DMA | GFP_DMA32);
+	else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
+		if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
+			flags |= GFP_DMA;
+		else
+			flags |= GFP_DMA32;
+	}
+
+	if (gfpflags_allow_blocking(flags)) {
+		unsigned int count = size >> PAGE_SHIFT;
+
+		page = dma_alloc_from_contiguous(dev, count, order, flags);
+		if (page && iommu_no_mapping(dev) &&
+		    page_to_phys(page) + size > dev->coherent_dma_mask) {
+			dma_release_from_contiguous(dev, page, count);
+			page = NULL;
+		}
+	}
+
+	if (!page)
+		page = alloc_pages(flags, order);
+	if (!page)
+		return NULL;
+	memset(page_address(page), 0, size);
+
+	*dma_handle = __intel_map_single(dev, page_to_phys(page), size,
+					 DMA_BIDIRECTIONAL,
+					 dev->coherent_dma_mask);
+	if (*dma_handle)
+		return page_address(page);
+	if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
+		__free_pages(page, order);
 
-out_free_pages:
-	dma_direct_free(dev, size, vaddr, *dma_handle, attrs);
 	return NULL;
 }
 
 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
 				dma_addr_t dma_handle, unsigned long attrs)
 {
-	if (!iommu_no_mapping(dev))
-		intel_unmap(dev, dma_handle, PAGE_ALIGN(size));
-	dma_direct_free(dev, size, vaddr, dma_handle, attrs);
+	int order;
+	struct page *page = virt_to_page(vaddr);
+
+	size = PAGE_ALIGN(size);
+	order = get_order(size);
+
+	intel_unmap(dev, dma_handle, size);
+	if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
+		__free_pages(page, order);
 }
 
 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,