@@ -19897,6 +19897,14 @@ S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
F: drivers/staging/
+STANDALONE CACHE CONTROLLER DRIVERS
+M: Conor Dooley <conor@kernel.org>
+L: linux-riscv@lists.infradead.org
+S: Maintained
+T: git https://git.kernel.org/pub/scm/linux/kernel/git/conor/linux.git/
+F: drivers/cache
+F: include/cache
+
STARFIRE/DURALAN NETWORK DRIVER
M: Ion Badulescu <ionut@badula.org>
S: Odd Fixes
@@ -15,6 +15,8 @@ source "drivers/base/Kconfig"
source "drivers/bus/Kconfig"
+source "drivers/cache/Kconfig"
+
source "drivers/connector/Kconfig"
source "drivers/firmware/Kconfig"
@@ -11,6 +11,7 @@ ifdef building_out_of_srctree
MAKEFLAGS += --include-dir=$(srctree)
endif
+obj-y += cache/
obj-y += irqchip/
obj-y += bus/
new file mode 100644
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+menu "Cache Drivers"
+
+config AX45MP_L2_CACHE
+ bool "Andes Technology AX45MP L2 Cache controller"
+ depends on RISCV && RISCV_DMA_NONCOHERENT
+ help
+ Support for the L2 cache controller on Andes Technology AX45MP platforms.
+
+endmenu
new file mode 100644
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_AX45MP_L2_CACHE) += ax45mp_cache.o
new file mode 100644
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * non-coherent cache functions for Andes AX45MP
+ *
+ * Copyright (C) 2023 Renesas Electronics Corp.
+ */
+
+#include <asm/dma-noncoherent.h>
+#include <linux/cacheflush.h>
+#include <linux/cacheinfo.h>
+#include <linux/dma-direction.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
+/* L2 cache registers */
+#define AX45MP_L2C_REG_CTL_OFFSET 0x8
+
+#define AX45MP_L2C_REG_C0_CMD_OFFSET 0x40
+#define AX45MP_L2C_REG_C0_ACC_OFFSET 0x48
+#define AX45MP_L2C_REG_STATUS_OFFSET 0x80
+
+/* D-cache operation */
+#define AX45MP_CCTL_L1D_VA_INVAL 0 /* Invalidate an L1 cache entry */
+#define AX45MP_CCTL_L1D_VA_WB 1 /* Write-back an L1 cache entry */
+
+/* L2 CCTL status */
+#define AX45MP_CCTL_L2_STATUS_IDLE 0
+
+/* L2 CCTL status cores mask */
+#define AX45MP_CCTL_L2_STATUS_C0_MASK 0xf
+
+/* L2 cache operation */
+#define AX45MP_CCTL_L2_PA_INVAL 0x8 /* Invalidate an L2 cache entry */
+#define AX45MP_CCTL_L2_PA_WB 0x9 /* Write-back an L2 cache entry */
+
+#define AX45MP_L2C_REG_PER_CORE_OFFSET 0x10
+#define AX45MP_CCTL_L2_STATUS_PER_CORE_OFFSET 4
+
+#define AX45MP_L2C_REG_CN_CMD_OFFSET(n) \
+ (AX45MP_L2C_REG_C0_CMD_OFFSET + ((n) * AX45MP_L2C_REG_PER_CORE_OFFSET))
+#define AX45MP_L2C_REG_CN_ACC_OFFSET(n) \
+ (AX45MP_L2C_REG_C0_ACC_OFFSET + ((n) * AX45MP_L2C_REG_PER_CORE_OFFSET))
+#define AX45MP_CCTL_L2_STATUS_CN_MASK(n) \
+ (AX45MP_CCTL_L2_STATUS_C0_MASK << ((n) * AX45MP_CCTL_L2_STATUS_PER_CORE_OFFSET))
+
+#define AX45MP_CCTL_REG_UCCTLBEGINADDR_NUM 0x80b
+#define AX45MP_CCTL_REG_UCCTLCOMMAND_NUM 0x80c
+
+#define AX45MP_CACHE_LINE_SIZE 64
+
+struct ax45mp_priv {
+ void __iomem *l2c_base;
+ u32 ax45mp_cache_line_size;
+};
+
+static struct ax45mp_priv *ax45mp_priv;
+
+/* L2 Cache operations */
+static inline uint32_t ax45mp_cpu_l2c_get_cctl_status(void)
+{
+ return readl(ax45mp_priv->l2c_base + AX45MP_L2C_REG_STATUS_OFFSET);
+}
+
+static void ax45mp_cpu_cache_operation(unsigned long start, unsigned long end,
+ unsigned long line_size, unsigned int l1_op,
+ unsigned int l2_op)
+{
+ void __iomem *base = ax45mp_priv->l2c_base;
+ int mhartid = smp_processor_id();
+ unsigned long pa;
+
+ while (end > start) {
+ csr_write(AX45MP_CCTL_REG_UCCTLBEGINADDR_NUM, start);
+ csr_write(AX45MP_CCTL_REG_UCCTLCOMMAND_NUM, l1_op);
+
+ pa = virt_to_phys((void *)start);
+ writel(pa, base + AX45MP_L2C_REG_CN_ACC_OFFSET(mhartid));
+ writel(l2_op, base + AX45MP_L2C_REG_CN_CMD_OFFSET(mhartid));
+ while ((ax45mp_cpu_l2c_get_cctl_status() &
+ AX45MP_CCTL_L2_STATUS_CN_MASK(mhartid)) !=
+ AX45MP_CCTL_L2_STATUS_IDLE)
+ ;
+
+ start += line_size;
+ }
+}
+
+/* Write-back L1 and L2 cache entry */
+static inline void ax45mp_cpu_dcache_wb_range(unsigned long start, unsigned long end,
+ unsigned long line_size)
+{
+ ax45mp_cpu_cache_operation(start, end, line_size,
+ AX45MP_CCTL_L1D_VA_WB,
+ AX45MP_CCTL_L2_PA_WB);
+}
+
+/* Invalidate the L1 and L2 cache entry */
+static inline void ax45mp_cpu_dcache_inval_range(unsigned long start, unsigned long end,
+ unsigned long line_size)
+{
+ ax45mp_cpu_cache_operation(start, end, line_size,
+ AX45MP_CCTL_L1D_VA_INVAL,
+ AX45MP_CCTL_L2_PA_INVAL);
+}
+
+static void ax45mp_cpu_dma_inval_range(unsigned long vaddr, unsigned long size)
+{
+ char cache_buf[2][AX45MP_CACHE_LINE_SIZE];
+ unsigned long start = vaddr;
+ unsigned long end = start + size;
+ unsigned long old_start = start;
+ unsigned long old_end = end;
+ unsigned long line_size;
+ unsigned long flags;
+
+ if (unlikely(start == end))
+ return;
+
+ line_size = ax45mp_priv->ax45mp_cache_line_size;
+
+ memset(&cache_buf, 0x0, sizeof(cache_buf));
+ start = start & (~(line_size - 1));
+ end = ((end + line_size - 1) & (~(line_size - 1)));
+
+ local_irq_save(flags);
+ if (unlikely(start != old_start))
+ memcpy(&cache_buf[0][0], (void *)start, line_size);
+
+ if (unlikely(end != old_end))
+ memcpy(&cache_buf[1][0], (void *)(old_end & (~(line_size - 1))), line_size);
+
+ ax45mp_cpu_dcache_inval_range(start, end, line_size);
+
+ if (unlikely(start != old_start))
+ memcpy((void *)start, &cache_buf[0][0], (old_start & (line_size - 1)));
+
+ local_irq_restore(flags);
+}
+
+static void ax45mp_cpu_dma_wb_range(unsigned long vaddr, unsigned long size)
+{
+ unsigned long start = vaddr;
+ unsigned long end = start + size;
+ unsigned long line_size;
+ unsigned long flags;
+
+ line_size = ax45mp_priv->ax45mp_cache_line_size;
+ local_irq_save(flags);
+ start = start & (~(line_size - 1));
+ ax45mp_cpu_dcache_wb_range(start, end, line_size);
+ local_irq_restore(flags);
+}
+
+static void ax45mp_cpu_dma_flush_range(unsigned long vaddr, unsigned long size)
+{
+ ax45mp_cpu_dma_wb_range(vaddr, size);
+ ax45mp_cpu_dma_inval_range(vaddr, size);
+}
+
+static void ax45mp_get_l2_line_size(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = of_property_read_u32(np, "cache-line-size", &ax45mp_priv->ax45mp_cache_line_size);
+ if (ret) {
+ dev_err(dev, "Failed to get cache-line-size, defaulting to 64 bytes\n");
+ ax45mp_priv->ax45mp_cache_line_size = AX45MP_CACHE_LINE_SIZE;
+ }
+
+ if (ax45mp_priv->ax45mp_cache_line_size != AX45MP_CACHE_LINE_SIZE) {
+ dev_err(dev, "Expected cache-line-size to be 64 bytes (found:%u). Defaulting to 64 bytes\n",
+ ax45mp_priv->ax45mp_cache_line_size);
+ ax45mp_priv->ax45mp_cache_line_size = AX45MP_CACHE_LINE_SIZE;
+ }
+}
+
+static const struct riscv_cache_ops ax45mp_cmo_ops = {
+ .clean_range = &ax45mp_cpu_dma_wb_range,
+ .inv_range = &ax45mp_cpu_dma_inval_range,
+ .flush_range = &ax45mp_cpu_dma_flush_range,
+};
+
+static int ax45mp_l2c_probe(struct platform_device *pdev)
+{
+ /*
+ * If IOCP is present on the Andes AX45MP core riscv_cbom_block_size
+ * will be 0 for sure, so we can definitely rely on it. If
+ * riscv_cbom_block_size = 0 we don't need to handle CMO using SW any
+ * more so we just return success here and only if its being set we
+ * continue further in the probe path.
+ */
+ if (!riscv_cbom_block_size)
+ return 0;
+
+ ax45mp_priv = devm_kzalloc(&pdev->dev, sizeof(*ax45mp_priv), GFP_KERNEL);
+ if (!ax45mp_priv)
+ return -ENOMEM;
+
+ ax45mp_priv->l2c_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ax45mp_priv->l2c_base))
+ return PTR_ERR(ax45mp_priv->l2c_base);
+
+ ax45mp_get_l2_line_size(pdev);
+
+ riscv_noncoherent_register_cache_ops(&ax45mp_cmo_ops);
+
+ return 0;
+}
+
+static const struct of_device_id ax45mp_cache_ids[] = {
+ { .compatible = "andestech,ax45mp-cache" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver ax45mp_l2c_driver = {
+ .driver = {
+ .name = "ax45mp-l2c",
+ .of_match_table = ax45mp_cache_ids,
+ },
+ .probe = ax45mp_l2c_probe,
+};
+
+static int __init ax45mp_cache_init(void)
+{
+ return platform_driver_register(&ax45mp_l2c_driver);
+}
+arch_initcall(ax45mp_cache_init);