diff mbox

[v3,4/6] arm: add early_ioremap support

Message ID 1389325833-16535-5-git-send-email-msalter@redhat.com
State New
Headers show

Commit Message

Mark Salter Jan. 10, 2014, 3:50 a.m. UTC
This patch uses the generic early_ioremap code to implement
early_ioremap for ARM. The ARM-specific bits come mostly from
an earlier patch from Leif Lindholm <leif.lindholm@linaro.org>
here:

  https://lkml.org/lkml/2013/10/3/279

Signed-off-by: Mark Salter <msalter@redhat.com>
Tested-by: Leif Lindholm <leif.lindholm@linaro.org>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
CC: linux-arm-kernel@lists.infradead.org
CC: Russell King <linux@arm.linux.org.uk>
CC: Catalin Marinas <catalin.marinas@arm.com>
CC: Will Deacon <will.deacon@arm.com>
CC: Arnd Bergmann <arnd@arndb.de>
---
 arch/arm/Kconfig              | 11 +++++
 arch/arm/include/asm/Kbuild   |  1 +
 arch/arm/include/asm/fixmap.h | 18 +++++++++
 arch/arm/include/asm/io.h     |  1 +
 arch/arm/kernel/setup.c       |  3 ++
 arch/arm/mm/Makefile          |  1 +
 arch/arm/mm/early_ioremap.c   | 93 +++++++++++++++++++++++++++++++++++++++++++
 arch/arm/mm/mmu.c             |  2 +
 8 files changed, 130 insertions(+)
 create mode 100644 arch/arm/mm/early_ioremap.c

Comments

Mark Salter Jan. 10, 2014, 1:50 p.m. UTC | #1
On Fri, 2014-01-10 at 11:34 +0000, Russell King - ARM Linux wrote:
> On Thu, Jan 09, 2014 at 10:50:31PM -0500, Mark Salter wrote:
> > +#define FIXMAP_PAGE_NORMAL (L_PTE_MT_WRITEBACK | L_PTE_YOUNG | L_PTE_PRESENT)
> > +#define FIXMAP_PAGE_IO (L_PTE_MT_DEV_NONSHARED | L_PTE_YOUNG | L_PTE_PRESENT)
> 
> Doesn't this also want L_PTE_XN for at least IO mappings - maybe for the
> normal too if no code is expected to be run from it.

Yes is both cases. I also need to fix arm64 mem mapping for same reason.
Thanks!
Catalin Marinas Jan. 10, 2014, 4:58 p.m. UTC | #2
On Fri, Jan 10, 2014 at 04:11:03PM +0000, Rob Herring wrote:
> On Thu, Jan 9, 2014 at 9:50 PM, Mark Salter <msalter@redhat.com> wrote:
> > This patch uses the generic early_ioremap code to implement
> > early_ioremap for ARM. The ARM-specific bits come mostly from
> > an earlier patch from Leif Lindholm <leif.lindholm@linaro.org>
> > here:
> >
> >   https://lkml.org/lkml/2013/10/3/279
> 
> I think this will be useful to implement an arm64 style earlycon for
> arm as the current DEBUG_LL doesn't really work with multi-platform
> kernels. I started on this and quickly realized I needed the fixmap
> support.

It would be even better if you can make it more generic
(drivers/tty/serial/) and keep DEBUG_LL as a specialised case for
arch/arm (when !multi-platform).
Mark Salter Jan. 10, 2014, 8:51 p.m. UTC | #3
On Fri, 2014-01-10 at 10:11 -0600, Rob Herring wrote:
> On Thu, Jan 9, 2014 at 9:50 PM, Mark Salter <msalter@redhat.com> wrote:
> > +config EARLY_IOREMAP
> > +       depends on MMU
> 
> Is it possible to implement a !MMU version of early_ioremap that
> simply returns the phys address rather than have this dependency?

I don't think that would be too hard to do.

> 
> > +       bool "Provide early_ioremap() support for kernel initialization."
> > +       select GENERIC_EARLY_IOREMAP
> > +       help
> > +         Provide a mechanism for kernel initialisation code to temporarily
> > +         map, in a highmem-agnostic way, memory pages in before ioremap()
> > +         and friends are available (before paging_init() has run). It uses
> > +         the same virtual memory range as kmap so all early mappings must
> > +         be unapped before paging_init() is called.
> > +
> >  config SECCOMP
> >         bool
> >         prompt "Enable seccomp to safely compute untrusted bytecode"
> > diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
> > index c38b58c..49ec506 100644
> > --- a/arch/arm/include/asm/Kbuild
> > +++ b/arch/arm/include/asm/Kbuild
> > @@ -4,6 +4,7 @@ generic-y += auxvec.h
> >  generic-y += bitsperlong.h
> >  generic-y += cputime.h
> >  generic-y += current.h
> > +generic-y += early_ioremap.h
> >  generic-y += emergency-restart.h
> >  generic-y += errno.h
> >  generic-y += exec.h
> > diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h
> > index 68ea615..e92b7a4 100644
> > --- a/arch/arm/include/asm/fixmap.h
> > +++ b/arch/arm/include/asm/fixmap.h
> > @@ -21,8 +21,26 @@ enum fixed_addresses {
> >         FIX_KMAP_BEGIN,
> >         FIX_KMAP_END = (FIXADDR_TOP - FIXADDR_START) >> PAGE_SHIFT,
> >         __end_of_fixed_addresses
> > +/*
> > + * 224 temporary boot-time mappings, used by early_ioremap(),
> > + * before ioremap() is functional.
> > + *
> > + * (P)re-using the FIXADDR region, which is used for highmem
> > + * later on, and statically aligned to 1MB.
> > + */
> > +#define NR_FIX_BTMAPS          32
> > +#define FIX_BTMAPS_SLOTS       7
> > +#define TOTAL_FIX_BTMAPS       (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
> > +#define FIX_BTMAP_END          FIX_KMAP_BEGIN
> > +#define FIX_BTMAP_BEGIN                (FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1)
> >  };
> >
> > +#define FIXMAP_PAGE_NORMAL (L_PTE_MT_WRITEBACK | L_PTE_YOUNG | L_PTE_PRESENT)
> > +#define FIXMAP_PAGE_IO (L_PTE_MT_DEV_NONSHARED | L_PTE_YOUNG | L_PTE_PRESENT)
> > +
> > +extern void __early_set_fixmap(enum fixed_addresses idx,
> > +                              phys_addr_t phys, pgprot_t flags);
> > +
> >  #include <asm-generic/fixmap.h>
> >
> >  #endif
> > diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
> > index fbeb39c..6b2cc53 100644
> > --- a/arch/arm/include/asm/io.h
> > +++ b/arch/arm/include/asm/io.h
> > @@ -28,6 +28,7 @@
> >  #include <asm/byteorder.h>
> >  #include <asm/memory.h>
> >  #include <asm-generic/pci_iomap.h>
> > +#include <asm/early_ioremap.h>
> >  #include <xen/xen.h>
> >
> >  /*
> > diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
> > index 987a7f5..038fb75 100644
> > --- a/arch/arm/kernel/setup.c
> > +++ b/arch/arm/kernel/setup.c
> > @@ -36,6 +36,7 @@
> >  #include <asm/cpu.h>
> >  #include <asm/cputype.h>
> >  #include <asm/elf.h>
> > +#include <asm/io.h>
> 
> Use linux/io.h?

Yes.

> 
> >  #include <asm/procinfo.h>
> >  #include <asm/psci.h>
> >  #include <asm/sections.h>
> > @@ -887,6 +888,8 @@ void __init setup_arch(char **cmdline_p)
> >
> >         parse_early_param();
> >
> > +       early_ioremap_init();
> > +
> 
> This call would need to be before parse_early_param for the earlycon to work.

Yes, like arm64 does it.


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
Mark Salter Jan. 10, 2014, 8:52 p.m. UTC | #4
On Fri, 2014-01-10 at 12:40 -0800, Stephen Boyd wrote:
> Just some minor nitpicks
> 
> On 01/09/14 19:50, Mark Salter wrote:
> > diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
> > index c1f1a7e..78a79a6a 100644
> > --- a/arch/arm/Kconfig
> > +++ b/arch/arm/Kconfig
> > @@ -1842,6 +1842,17 @@ config UACCESS_WITH_MEMCPY
> >  	  However, if the CPU data cache is using a write-allocate mode,
> >  	  this option is unlikely to provide any performance gain.
> >  
> > +config EARLY_IOREMAP
> > +	depends on MMU
> > +	bool "Provide early_ioremap() support for kernel initialization."
> 
> Please drop the full stop.
> 
> > +	select GENERIC_EARLY_IOREMAP
> > +	help
> > +	  Provide a mechanism for kernel initialisation code to temporarily
> > +	  map, in a highmem-agnostic way, memory pages in before ioremap()
> > +	  and friends are available (before paging_init() has run). It uses
> > +	  the same virtual memory range as kmap so all early mappings must
> > +	  be unapped before paging_init() is called.
> 
> s/unapped/unmapped/
> 

Will do. Thanks!


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
Mark Salter Jan. 16, 2014, 2:13 a.m. UTC | #5
On Wed, 2014-01-15 at 16:32 -0800, Laura Abbott wrote:
> On 1/9/2014 7:50 PM, Mark Salter wrote:
> ....
> > diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
> > index 987a7f5..038fb75 100644
> > --- a/arch/arm/kernel/setup.c
> > +++ b/arch/arm/kernel/setup.c
> > @@ -36,6 +36,7 @@
> >   #include <asm/cpu.h>
> >   #include <asm/cputype.h>
> >   #include <asm/elf.h>
> > +#include <asm/io.h>
> >   #include <asm/procinfo.h>
> >   #include <asm/psci.h>
> >   #include <asm/sections.h>
> > @@ -887,6 +888,8 @@ void __init setup_arch(char **cmdline_p)
> >
> >   	parse_early_param();
> >
> > +	early_ioremap_init();
> > +
> >   	sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
> >
> >   	early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
> 
> Any chance we could do this even earlier, say right after 
> setup_processor? It would be nice to be able to do this while FDT 
> scanning (I have a use case in mind, potential patches are in the works)

It looks like that would work...


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
diff mbox

Patch

diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index c1f1a7e..78a79a6a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1842,6 +1842,17 @@  config UACCESS_WITH_MEMCPY
 	  However, if the CPU data cache is using a write-allocate mode,
 	  this option is unlikely to provide any performance gain.
 
+config EARLY_IOREMAP
+	depends on MMU
+	bool "Provide early_ioremap() support for kernel initialization."
+	select GENERIC_EARLY_IOREMAP
+	help
+	  Provide a mechanism for kernel initialisation code to temporarily
+	  map, in a highmem-agnostic way, memory pages in before ioremap()
+	  and friends are available (before paging_init() has run). It uses
+	  the same virtual memory range as kmap so all early mappings must
+	  be unapped before paging_init() is called.
+
 config SECCOMP
 	bool
 	prompt "Enable seccomp to safely compute untrusted bytecode"
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index c38b58c..49ec506 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -4,6 +4,7 @@  generic-y += auxvec.h
 generic-y += bitsperlong.h
 generic-y += cputime.h
 generic-y += current.h
+generic-y += early_ioremap.h
 generic-y += emergency-restart.h
 generic-y += errno.h
 generic-y += exec.h
diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h
index 68ea615..e92b7a4 100644
--- a/arch/arm/include/asm/fixmap.h
+++ b/arch/arm/include/asm/fixmap.h
@@ -21,8 +21,26 @@  enum fixed_addresses {
 	FIX_KMAP_BEGIN,
 	FIX_KMAP_END = (FIXADDR_TOP - FIXADDR_START) >> PAGE_SHIFT,
 	__end_of_fixed_addresses
+/*
+ * 224 temporary boot-time mappings, used by early_ioremap(),
+ * before ioremap() is functional.
+ *
+ * (P)re-using the FIXADDR region, which is used for highmem
+ * later on, and statically aligned to 1MB.
+ */
+#define NR_FIX_BTMAPS		32
+#define FIX_BTMAPS_SLOTS	7
+#define TOTAL_FIX_BTMAPS	(NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
+#define FIX_BTMAP_END		FIX_KMAP_BEGIN
+#define FIX_BTMAP_BEGIN		(FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1)
 };
 
+#define FIXMAP_PAGE_NORMAL (L_PTE_MT_WRITEBACK | L_PTE_YOUNG | L_PTE_PRESENT)
+#define FIXMAP_PAGE_IO (L_PTE_MT_DEV_NONSHARED | L_PTE_YOUNG | L_PTE_PRESENT)
+
+extern void __early_set_fixmap(enum fixed_addresses idx,
+			       phys_addr_t phys, pgprot_t flags);
+
 #include <asm-generic/fixmap.h>
 
 #endif
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index fbeb39c..6b2cc53 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -28,6 +28,7 @@ 
 #include <asm/byteorder.h>
 #include <asm/memory.h>
 #include <asm-generic/pci_iomap.h>
+#include <asm/early_ioremap.h>
 #include <xen/xen.h>
 
 /*
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 987a7f5..038fb75 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -36,6 +36,7 @@ 
 #include <asm/cpu.h>
 #include <asm/cputype.h>
 #include <asm/elf.h>
+#include <asm/io.h>
 #include <asm/procinfo.h>
 #include <asm/psci.h>
 #include <asm/sections.h>
@@ -887,6 +888,8 @@  void __init setup_arch(char **cmdline_p)
 
 	parse_early_param();
 
+	early_ioremap_init();
+
 	sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
 
 	early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index ecfe6e5..fea855e 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -15,6 +15,7 @@  endif
 obj-$(CONFIG_MODULES)		+= proc-syms.o
 
 obj-$(CONFIG_ALIGNMENT_TRAP)	+= alignment.o
+obj-$(CONFIG_EARLY_IOREMAP)	+= early_ioremap.o
 obj-$(CONFIG_HIGHMEM)		+= highmem.o
 obj-$(CONFIG_HUGETLB_PAGE)	+= hugetlbpage.o
 
diff --git a/arch/arm/mm/early_ioremap.c b/arch/arm/mm/early_ioremap.c
new file mode 100644
index 0000000..c3e2bf2
--- /dev/null
+++ b/arch/arm/mm/early_ioremap.c
@@ -0,0 +1,93 @@ 
+/*
+ * early_ioremap() support for ARM
+ *
+ * Based on existing support in arch/x86/mm/ioremap.c
+ *
+ * Restrictions: currently only functional before paging_init()
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+
+#include <asm/fixmap.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+#include <asm/mach/map.h>
+
+static pte_t bm_pte[PTRS_PER_PTE] __aligned(PTE_HWTABLE_SIZE) __initdata;
+
+static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
+{
+	unsigned int index = pgd_index(addr);
+	pgd_t *pgd = cpu_get_pgd() + index;
+	pud_t *pud = pud_offset(pgd, addr);
+	pmd_t *pmd = pmd_offset(pud, addr);
+
+	return pmd;
+}
+
+static inline pte_t * __init early_ioremap_pte(unsigned long addr)
+{
+	return &bm_pte[pte_index(addr)];
+}
+
+void __init early_ioremap_init(void)
+{
+	pmd_t *pmd;
+
+	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
+
+	pmd_populate_kernel(NULL, pmd, bm_pte);
+
+	/*
+	 * Make sure we don't span multiple pmds.
+	 */
+	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
+		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
+
+	if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
+		WARN_ON(1);
+		pr_warn("pmd %p != %p\n",
+			pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
+		pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
+			fix_to_virt(FIX_BTMAP_BEGIN));
+		pr_warn("fix_to_virt(FIX_BTMAP_END):   %08lx\n",
+			fix_to_virt(FIX_BTMAP_END));
+		pr_warn("FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
+		pr_warn("FIX_BTMAP_BEGIN:     %d\n", FIX_BTMAP_BEGIN);
+	}
+
+	early_ioremap_setup();
+}
+
+void __init __early_set_fixmap(enum fixed_addresses idx,
+			       phys_addr_t phys, pgprot_t flags)
+{
+	unsigned long addr = __fix_to_virt(idx);
+	pte_t *pte;
+	u64 desc;
+
+	if (idx > FIX_KMAP_END) {
+		BUG();
+		return;
+	}
+	pte = early_ioremap_pte(addr);
+
+	if (pgprot_val(flags))
+		set_pte_at(NULL, 0xfff00000, pte,
+			   pfn_pte(phys >> PAGE_SHIFT, flags));
+	else
+		pte_clear(NULL, addr, pte);
+	flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+	desc = *pte;
+}
+
+void __init
+early_ioremap_shutdown(void)
+{
+	pmd_t *pmd;
+	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
+	pmd_clear(pmd);
+}
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 580ef2d..bef59b9 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -34,6 +34,7 @@ 
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <asm/mach/pci.h>
+#include <asm/early_ioremap.h>
 
 #include "mm.h"
 #include "tcm.h"
@@ -1405,6 +1406,7 @@  void __init paging_init(const struct machine_desc *mdesc)
 {
 	void *zero_page;
 
+	early_ioremap_reset();
 	build_mem_type_table();
 	prepare_page_table();
 	map_lowmem();