diff mbox series

[Xen-devel,1/2] xen/arm: mm: Set-up page permission for Xen mappings earlier on

Message ID 20181129113836.2853-2-julien.grall@arm.com
State Superseded
Headers show
Series xen/arm: mm: Boot fixes | expand

Commit Message

Julien Grall Nov. 29, 2018, 11:38 a.m. UTC
Xen mapping is first create using a 2MB page and then shatterred in 4KB
page for fine-graine permission. However, it is not safe to break-down
superpage page without going to an intermediate step invalidating
the entry.

As we are changing Xen mappings, we cannot go through the intermediate
step. The only solution is to create Xen mapping using 4KB entries
directly. As the Xen should always access the mappings according with
the runtime permission, it is then possible to set-up the permissions
while create the mapping.

We are still playing with the fire as there are still some
break-before-make issue in setup_pagetables (i.e switch between 2 sets of
page-tables). But it should slightly be better than the current state.

Signed-off-by: Julien Grall <julien.grall@arm.com>
Reported-by: Shameerali Kolothum Thodi <shameerali.kolothum.thodi@huawei.com>
Reported-by: Jan-Peter Larsson <Jan-Peter.Larsson@arm.com>

---
    I had few reports on new platforms where Xen reliably stale as soon as
    SCTLR.WXN is turned on. This likely happens because of not complying
    with Break-Before-Make when setting-up the permission as we
    break-down a superpage to 4KB mappings.
---
 xen/arch/arm/mm.c | 49 ++++++++++++++++++++++---------------------------
 1 file changed, 22 insertions(+), 27 deletions(-)

Comments

Shameerali Kolothum Thodi Nov. 29, 2018, 4:51 p.m. UTC | #1
Hi Julien,

> -----Original Message-----

> From: Xen-devel [mailto:xen-devel-bounces@lists.xenproject.org] On Behalf Of

> Julien Grall

> Sent: 29 November 2018 11:39

> To: julien.grall@arm.com; xen-devel@lists.xenproject.org

> Cc: Stefano Stabellini <sstabellini@kernel.org>

> Subject: [Xen-devel] [PATCH 1/2] xen/arm: mm: Set-up page permission for Xen

> mappings earlier on

> 

> Xen mapping is first create using a 2MB page and then shatterred in 4KB

> page for fine-graine permission. However, it is not safe to break-down

> superpage page without going to an intermediate step invalidating

> the entry.

> 

> As we are changing Xen mappings, we cannot go through the intermediate

> step. The only solution is to create Xen mapping using 4KB entries

> directly. As the Xen should always access the mappings according with

> the runtime permission, it is then possible to set-up the permissions

> while create the mapping.

> 

> We are still playing with the fire as there are still some

> break-before-make issue in setup_pagetables (i.e switch between 2 sets of

> page-tables). But it should slightly be better than the current state.

> 

> Signed-off-by: Julien Grall <julien.grall@arm.com>

> Reported-by: Shameerali Kolothum Thodi

> <shameerali.kolothum.thodi@huawei.com>

> Reported-by: Jan-Peter Larsson <Jan-Peter.Larsson@arm.com>

> 

> ---

>     I had few reports on new platforms where Xen reliably stale as soon as

>     SCTLR.WXN is turned on. This likely happens because of not complying

>     with Break-Before-Make when setting-up the permission as we

>     break-down a superpage to 4KB mappings.


Thanks for this. I can confirm that one of our platform on which we observed
the boot stall issue just after SCTLR.WXN is turned on, can now boot with
this patch.

FWIW:
Tested-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>


Cheers,
Shameer

> ---

>  xen/arch/arm/mm.c | 49 ++++++++++++++++++++++---------------------------

>  1 file changed, 22 insertions(+), 27 deletions(-)

> 

> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c

> index 987fcb9162..2556e57a99 100644

> --- a/xen/arch/arm/mm.c

> +++ b/xen/arch/arm/mm.c

> @@ -649,11 +649,31 @@ void __init setup_pagetables(unsigned long

> boot_phys_offset, paddr_t xen_paddr)

>      }

>  #endif

> 

> +    /* Break up the Xen mapping into 4k pages and protect them separately. */

> +    for ( i = 0; i < LPAE_ENTRIES; i++ )

> +    {

> +        mfn_t mfn = mfn_add(maddr_to_mfn(xen_paddr), i);

> +        unsigned long va = XEN_VIRT_START + (i << PAGE_SHIFT);

> +

> +        if ( !is_kernel(va) )

> +            break;

> +        pte = mfn_to_xen_entry(mfn, MT_NORMAL);

> +        pte.pt.table = 1; /* 4k mappings always have this bit set */

> +        if ( is_kernel_text(va) || is_kernel_inittext(va) )

> +        {

> +            pte.pt.xn = 0;

> +            pte.pt.ro = 1;

> +        }

> +        if ( is_kernel_rodata(va) )

> +            pte.pt.ro = 1;

> +        xen_xenmap[i] = pte;

> +    }

> +

>      /* Initialise xen second level entries ... */

>      /* ... Xen's text etc */

> 

> -    pte = mfn_to_xen_entry(maddr_to_mfn(xen_paddr), MT_NORMAL);

> -    pte.pt.xn = 0;/* Contains our text mapping! */

> +    pte = pte_of_xenaddr((vaddr_t)xen_xenmap);

> +    pte.pt.table = 1;

>      xen_second[second_table_offset(XEN_VIRT_START)] = pte;

> 

>      /* ... Fixmap */

> @@ -693,31 +713,6 @@ void __init setup_pagetables(unsigned long

> boot_phys_offset, paddr_t xen_paddr)

>      clear_table(boot_second);

>      clear_table(boot_third);

> 

> -    /* Break up the Xen mapping into 4k pages and protect them separately. */

> -    for ( i = 0; i < LPAE_ENTRIES; i++ )

> -    {

> -        mfn_t mfn = mfn_add(maddr_to_mfn(xen_paddr), i);

> -        unsigned long va = XEN_VIRT_START + (i << PAGE_SHIFT);

> -        if ( !is_kernel(va) )

> -            break;

> -        pte = mfn_to_xen_entry(mfn, MT_NORMAL);

> -        pte.pt.table = 1; /* 4k mappings always have this bit set */

> -        if ( is_kernel_text(va) || is_kernel_inittext(va) )

> -        {

> -            pte.pt.xn = 0;

> -            pte.pt.ro = 1;

> -        }

> -        if ( is_kernel_rodata(va) )

> -            pte.pt.ro = 1;

> -        write_pte(xen_xenmap + i, pte);

> -        /* No flush required here as page table is not hooked in yet. */

> -    }

> -

> -    pte = pte_of_xenaddr((vaddr_t)xen_xenmap);

> -    pte.pt.table = 1;

> -    write_pte(xen_second + second_linear_offset(XEN_VIRT_START), pte);

> -    /* TLBFLUSH and ISB would be needed here, but wait until we set WXN */

> -

>      /* From now on, no mapping may be both writable and executable. */

>      WRITE_SYSREG32(READ_SYSREG32(SCTLR_EL2) | SCTLR_WXN, SCTLR_EL2);

>      /* Flush everything after setting WXN bit. */

> --

> 2.11.0

> 

> 

> _______________________________________________

> Xen-devel mailing list

> Xen-devel@lists.xenproject.org

> https://lists.xenproject.org/mailman/listinfo/xen-devel
diff mbox series

Patch

diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 987fcb9162..2556e57a99 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -649,11 +649,31 @@  void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr)
     }
 #endif
 
+    /* Break up the Xen mapping into 4k pages and protect them separately. */
+    for ( i = 0; i < LPAE_ENTRIES; i++ )
+    {
+        mfn_t mfn = mfn_add(maddr_to_mfn(xen_paddr), i);
+        unsigned long va = XEN_VIRT_START + (i << PAGE_SHIFT);
+
+        if ( !is_kernel(va) )
+            break;
+        pte = mfn_to_xen_entry(mfn, MT_NORMAL);
+        pte.pt.table = 1; /* 4k mappings always have this bit set */
+        if ( is_kernel_text(va) || is_kernel_inittext(va) )
+        {
+            pte.pt.xn = 0;
+            pte.pt.ro = 1;
+        }
+        if ( is_kernel_rodata(va) )
+            pte.pt.ro = 1;
+        xen_xenmap[i] = pte;
+    }
+
     /* Initialise xen second level entries ... */
     /* ... Xen's text etc */
 
-    pte = mfn_to_xen_entry(maddr_to_mfn(xen_paddr), MT_NORMAL);
-    pte.pt.xn = 0;/* Contains our text mapping! */
+    pte = pte_of_xenaddr((vaddr_t)xen_xenmap);
+    pte.pt.table = 1;
     xen_second[second_table_offset(XEN_VIRT_START)] = pte;
 
     /* ... Fixmap */
@@ -693,31 +713,6 @@  void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr)
     clear_table(boot_second);
     clear_table(boot_third);
 
-    /* Break up the Xen mapping into 4k pages and protect them separately. */
-    for ( i = 0; i < LPAE_ENTRIES; i++ )
-    {
-        mfn_t mfn = mfn_add(maddr_to_mfn(xen_paddr), i);
-        unsigned long va = XEN_VIRT_START + (i << PAGE_SHIFT);
-        if ( !is_kernel(va) )
-            break;
-        pte = mfn_to_xen_entry(mfn, MT_NORMAL);
-        pte.pt.table = 1; /* 4k mappings always have this bit set */
-        if ( is_kernel_text(va) || is_kernel_inittext(va) )
-        {
-            pte.pt.xn = 0;
-            pte.pt.ro = 1;
-        }
-        if ( is_kernel_rodata(va) )
-            pte.pt.ro = 1;
-        write_pte(xen_xenmap + i, pte);
-        /* No flush required here as page table is not hooked in yet. */
-    }
-
-    pte = pte_of_xenaddr((vaddr_t)xen_xenmap);
-    pte.pt.table = 1;
-    write_pte(xen_second + second_linear_offset(XEN_VIRT_START), pte);
-    /* TLBFLUSH and ISB would be needed here, but wait until we set WXN */
-
     /* From now on, no mapping may be both writable and executable. */
     WRITE_SYSREG32(READ_SYSREG32(SCTLR_EL2) | SCTLR_WXN, SCTLR_EL2);
     /* Flush everything after setting WXN bit. */