diff mbox series

[16/30,for,5.4] xtensa: use the generic uncached segment support

Message ID 20200925161916.204667-17-pgonda@google.com
State New
Headers show
Series Backport unencrypted non-blocking DMA allocations | expand

Commit Message

Peter Gonda Sept. 25, 2020, 4:19 p.m. UTC
From: Christoph Hellwig <hch@lst.de>

upstream 0f665b9e2a6d4cc963e6cd349d40320ed5281f95 commit.

Switch xtensa over to use the generic uncached support, and thus the
generic implementations of dma_alloc_* and dma_alloc_*, which also
gains support for mmaping DMA memory.  The non-working nommu DMA
support has been disabled, but could be re-enabled easily if platforms
that actually have an uncached segment show up.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Max Filippov <jcmvbkbc@gmail.com>
Tested-by: Max Filippov <jcmvbkbc@gmail.com>
Signed-off-by: Peter Gonda <pgonda@google.com>
---
 arch/xtensa/Kconfig                |   6 +-
 arch/xtensa/include/asm/platform.h |  27 -------
 arch/xtensa/kernel/Makefile        |   3 +-
 arch/xtensa/kernel/pci-dma.c       | 121 +++--------------------------
 4 files changed, 18 insertions(+), 139 deletions(-)
diff mbox series

Patch

diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 8352037322df..d3a5891eff2e 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -3,8 +3,10 @@  config XTENSA
 	def_bool y
 	select ARCH_32BIT_OFF_T
 	select ARCH_HAS_BINFMT_FLAT if !MMU
-	select ARCH_HAS_SYNC_DMA_FOR_CPU
-	select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+	select ARCH_HAS_DMA_PREP_COHERENT if MMU
+	select ARCH_HAS_SYNC_DMA_FOR_CPU if MMU
+	select ARCH_HAS_SYNC_DMA_FOR_DEVICE if MMU
+	select ARCH_HAS_UNCACHED_SEGMENT if MMU
 	select ARCH_USE_QUEUED_RWLOCKS
 	select ARCH_USE_QUEUED_SPINLOCKS
 	select ARCH_WANT_FRAME_POINTERS
diff --git a/arch/xtensa/include/asm/platform.h b/arch/xtensa/include/asm/platform.h
index 913826dfa838..f2c48522c5a1 100644
--- a/arch/xtensa/include/asm/platform.h
+++ b/arch/xtensa/include/asm/platform.h
@@ -65,31 +65,4 @@  extern void platform_calibrate_ccount (void);
  */
 void cpu_reset(void) __attribute__((noreturn));
 
-/*
- * Memory caching is platform-dependent in noMMU xtensa configurations.
- * The following set of functions should be implemented in platform code
- * in order to enable coherent DMA memory operations when CONFIG_MMU is not
- * enabled. Default implementations do nothing and issue a warning.
- */
-
-/*
- * Check whether p points to a cached memory.
- */
-bool platform_vaddr_cached(const void *p);
-
-/*
- * Check whether p points to an uncached memory.
- */
-bool platform_vaddr_uncached(const void *p);
-
-/*
- * Return pointer to an uncached view of the cached sddress p.
- */
-void *platform_vaddr_to_uncached(void *p);
-
-/*
- * Return pointer to a cached view of the uncached sddress p.
- */
-void *platform_vaddr_to_cached(void *p);
-
 #endif	/* _XTENSA_PLATFORM_H */
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
index 6f629027ac7d..d4082c6a121b 100644
--- a/arch/xtensa/kernel/Makefile
+++ b/arch/xtensa/kernel/Makefile
@@ -5,10 +5,11 @@ 
 
 extra-y := head.o vmlinux.lds
 
-obj-y := align.o coprocessor.o entry.o irq.o pci-dma.o platform.o process.o \
+obj-y := align.o coprocessor.o entry.o irq.o platform.o process.o \
 	 ptrace.o setup.o signal.o stacktrace.o syscall.o time.o traps.o \
 	 vectors.o
 
+obj-$(CONFIG_MMU) += pci-dma.o
 obj-$(CONFIG_PCI) += pci.o
 obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
 obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
index 154979d62b73..1c82e21de4f6 100644
--- a/arch/xtensa/kernel/pci-dma.c
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -81,122 +81,25 @@  void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
 	}
 }
 
-#ifdef CONFIG_MMU
-bool platform_vaddr_cached(const void *p)
-{
-	unsigned long addr = (unsigned long)p;
-
-	return addr >= XCHAL_KSEG_CACHED_VADDR &&
-	       addr - XCHAL_KSEG_CACHED_VADDR < XCHAL_KSEG_SIZE;
-}
-
-bool platform_vaddr_uncached(const void *p)
-{
-	unsigned long addr = (unsigned long)p;
-
-	return addr >= XCHAL_KSEG_BYPASS_VADDR &&
-	       addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE;
-}
-
-void *platform_vaddr_to_uncached(void *p)
-{
-	return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
-}
-
-void *platform_vaddr_to_cached(void *p)
-{
-	return p + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
-}
-#else
-bool __attribute__((weak)) platform_vaddr_cached(const void *p)
-{
-	WARN_ONCE(1, "Default %s implementation is used\n", __func__);
-	return true;
-}
-
-bool __attribute__((weak)) platform_vaddr_uncached(const void *p)
-{
-	WARN_ONCE(1, "Default %s implementation is used\n", __func__);
-	return false;
-}
-
-void __attribute__((weak)) *platform_vaddr_to_uncached(void *p)
+void arch_dma_prep_coherent(struct page *page, size_t size)
 {
-	WARN_ONCE(1, "Default %s implementation is used\n", __func__);
-	return p;
-}
-
-void __attribute__((weak)) *platform_vaddr_to_cached(void *p)
-{
-	WARN_ONCE(1, "Default %s implementation is used\n", __func__);
-	return p;
+	__invalidate_dcache_range((unsigned long)page_address(page), size);
 }
-#endif
 
 /*
- * Note: We assume that the full memory space is always mapped to 'kseg'
- *	 Otherwise we have to use page attributes (not implemented).
+ * Memory caching is platform-dependent in noMMU xtensa configurations.
+ * The following two functions should be implemented in platform code
+ * in order to enable coherent DMA memory operations when CONFIG_MMU is not
+ * enabled.
  */
-
-void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
-		gfp_t flag, unsigned long attrs)
-{
-	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-	struct page *page = NULL;
-
-	/* ignore region speicifiers */
-
-	flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
-
-	if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
-		flag |= GFP_DMA;
-
-	if (gfpflags_allow_blocking(flag))
-		page = dma_alloc_from_contiguous(dev, count, get_order(size),
-						 flag & __GFP_NOWARN);
-
-	if (!page)
-		page = alloc_pages(flag | __GFP_ZERO, get_order(size));
-
-	if (!page)
-		return NULL;
-
-	*handle = phys_to_dma(dev, page_to_phys(page));
-
 #ifdef CONFIG_MMU
-	if (PageHighMem(page)) {
-		void *p;
-
-		p = dma_common_contiguous_remap(page, size,
-						pgprot_noncached(PAGE_KERNEL),
-						__builtin_return_address(0));
-		if (!p) {
-			if (!dma_release_from_contiguous(dev, page, count))
-				__free_pages(page, get_order(size));
-		}
-		return p;
-	}
-#endif
-	BUG_ON(!platform_vaddr_cached(page_address(page)));
-	__invalidate_dcache_range((unsigned long)page_address(page), size);
-	return platform_vaddr_to_uncached(page_address(page));
+void *uncached_kernel_address(void *p)
+{
+	return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
 }
 
-void arch_dma_free(struct device *dev, size_t size, void *vaddr,
-		dma_addr_t dma_handle, unsigned long attrs)
+void *cached_kernel_address(void *p)
 {
-	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-	struct page *page;
-
-	if (platform_vaddr_uncached(vaddr)) {
-		page = virt_to_page(platform_vaddr_to_cached(vaddr));
-	} else {
-#ifdef CONFIG_MMU
-		dma_common_free_remap(vaddr, size);
-#endif
-		page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle)));
-	}
-
-	if (!dma_release_from_contiguous(dev, page, count))
-		__free_pages(page, get_order(size));
+	return p + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
 }
+#endif /* CONFIG_MMU */