diff mbox series

[v5,1/2] target/s390x: Implement the MVPG condition-code-option bit

Message ID 20210311161747.129834-2-david@redhat.com
State New
Headers show
Series [v5,1/2] target/s390x: Implement the MVPG condition-code-option bit | expand

Commit Message

David Hildenbrand March 11, 2021, 4:17 p.m. UTC
From: Richard Henderson <richard.henderson@linaro.org>


If the CCO bit is set, MVPG should not generate an exception but
report page translation faults via a CC code.

Create a new helper, access_prepare_nf, which can use probe_access_flags
in non-faulting mode, and then handle watchpoints.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

[thuth: Added logic to still inject protection exceptions]
Signed-off-by: Thomas Huth <thuth@redhat.com>

[david: Look at env->tlb_fill_exc to determine if there was an exception]
Signed-off-by: David Hildenbrand <david@redhat.com>

---
 target/s390x/cpu.h         |   5 ++
 target/s390x/excp_helper.c |   3 +
 target/s390x/mem_helper.c  | 124 ++++++++++++++++++++++++++++++-------
 3 files changed, 110 insertions(+), 22 deletions(-)

-- 
2.29.2

Comments

Richard Henderson March 11, 2021, 5:02 p.m. UTC | #1
On 3/11/21 10:17 AM, David Hildenbrand wrote:
> +#if !defined(CONFIG_USER_ONLY)

> +    if (env->tlb_fill_exc) {

> +        return env->tlb_fill_exc;

> +    }

> +#else

> +    if (!haddr1) {

> +        env->__excp_addr = vaddr1;

> +        return PGM_ADDRESSING;

> +    }

> +#endif


For user-only, we can rely on TLB_INVALID_MASK, and check once for both pages.


> @@ -858,13 +925,26 @@ uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2)

>       /*

>        * TODO:

>        * - Access key handling

> -     * - CC-option with surpression of page-translation exceptions

>        * - Store r1/r2 register identifiers at real location 162

>        */

> -    srca = access_prepare(env, r2, TARGET_PAGE_SIZE, MMU_DATA_LOAD, mmu_idx,

> -                          ra);

> -    desta = access_prepare(env, r1, TARGET_PAGE_SIZE, MMU_DATA_STORE, mmu_idx,

> -                           ra);

> +    exc = access_prepare_nf(&srca, env, cco, r2, TARGET_PAGE_SIZE,

> +                            MMU_DATA_LOAD, mmu_idx, ra);

> +    if (exc) {

> +        return 2;

> +    }

> +    exc = access_prepare_nf(&desta, env, cco, r1, TARGET_PAGE_SIZE,

> +                            MMU_DATA_STORE, mmu_idx, ra);

> +    if (exc) {

> +#if !defined(CONFIG_USER_ONLY)

> +        if (exc == PGM_PROTECTION) {

> +            stq_phys(env_cpu(env)->as,

> +                     env->psa + offsetof(LowCore, trans_exc_code),

> +                     env->tlb_fill_tec);

> +            tcg_s390_program_interrupt(env, PGM_PROTECTION, ra);

> +        }

> +#endif

> +        return 1;

> +    }

>       access_memmove(env, &desta, &srca, ra);

>       return 0; /* data moved */

>   }


If we're going to have an ifdef at all here, it should be around the entire 
helper -- this is a privileged operation.


r~
David Hildenbrand March 11, 2021, 5:12 p.m. UTC | #2
On 11.03.21 18:02, Richard Henderson wrote:
> On 3/11/21 10:17 AM, David Hildenbrand wrote:

>> +#if !defined(CONFIG_USER_ONLY)

>> +    if (env->tlb_fill_exc) {

>> +        return env->tlb_fill_exc;

>> +    }

>> +#else

>> +    if (!haddr1) {

>> +        env->__excp_addr = vaddr1;

>> +        return PGM_ADDRESSING;

>> +    }

>> +#endif

> 

> For user-only, we can rely on TLB_INVALID_MASK, and check once for both pages.


Then, I cannot set the proper vaddr1 vs. vaddr2 (see patch #2).


>> @@ -858,13 +925,26 @@ uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2)

>>        /*

>>         * TODO:

>>         * - Access key handling

>> -     * - CC-option with surpression of page-translation exceptions

>>         * - Store r1/r2 register identifiers at real location 162

>>         */

>> -    srca = access_prepare(env, r2, TARGET_PAGE_SIZE, MMU_DATA_LOAD, mmu_idx,

>> -                          ra);

>> -    desta = access_prepare(env, r1, TARGET_PAGE_SIZE, MMU_DATA_STORE, mmu_idx,

>> -                           ra);

>> +    exc = access_prepare_nf(&srca, env, cco, r2, TARGET_PAGE_SIZE,

>> +                            MMU_DATA_LOAD, mmu_idx, ra);

>> +    if (exc) {

>> +        return 2;

>> +    }

>> +    exc = access_prepare_nf(&desta, env, cco, r1, TARGET_PAGE_SIZE,

>> +                            MMU_DATA_STORE, mmu_idx, ra);

>> +    if (exc) {

>> +#if !defined(CONFIG_USER_ONLY)

>> +        if (exc == PGM_PROTECTION) {

>> +            stq_phys(env_cpu(env)->as,

>> +                     env->psa + offsetof(LowCore, trans_exc_code),

>> +                     env->tlb_fill_tec);

>> +            tcg_s390_program_interrupt(env, PGM_PROTECTION, ra);

>> +        }

>> +#endif

>> +        return 1;

>> +    }

>>        access_memmove(env, &desta, &srca, ra);

>>        return 0; /* data moved */

>>    }

> 

> If we're going to have an ifdef at all here, it should be around the entire

> helper -- this is a privileged operation.



Privileged operation (access key specified, and
selected PSW-key-mask bit is zero in the prob-
lem state)

Without an access key in GR0, we're using the PSW key - which should 
always work, no?

What am I missing?

-- 
Thanks,

David / dhildenb
Richard Henderson March 11, 2021, 5:26 p.m. UTC | #3
On 3/11/21 11:12 AM, David Hildenbrand wrote:
> Without an access key in GR0, we're using the PSW key - which should always 

> work, no?

> 

> What am I missing?


Nothing.  Insufficiently close reading on my part.


r~
Richard Henderson March 11, 2021, 5:52 p.m. UTC | #4
On 3/11/21 10:17 AM, David Hildenbrand wrote:
> +#if !defined(CONFIG_USER_ONLY)

> +    /*

> +     * For !CONFIG_USER_ONLY, we cannot rely on TLB_INVALID_MASK or haddr==NULL

> +     * to detect if there was an exception during tlb_fill().

> +     */

> +    env->tlb_fill_exc = 0;

> +#endif

> +    flags = probe_access_flags(env, vaddr1, access_type, mmu_idx,

> +                               nofault, &haddr1, ra);

> +#if !defined(CONFIG_USER_ONLY)

> +    if (env->tlb_fill_exc) {

> +        return env->tlb_fill_exc;

> +    }

> +#else

> +    if (!haddr1) {

> +        env->__excp_addr = vaddr1;

> +        return PGM_ADDRESSING;

> +    }

> +#endif


The assumption of PGM_ADDRESSING is incorrect here -- it could still be 
PGM_PROTECTION, depending on how the page is mapped.

I guess this should be done like

#ifdef CONFIG_USER_ONLY
     flags = page_get_flags(vaddr1);
     if (!flags & (access_type == MMU_DATA_LOAD
                   ? PAGE_READ : PAGE_WRITE)) {
         env->__excp_addr = vaddr1;
         if (nofault) {
             return (flags & PAGE_VALID
                 ? PGM_PROTECTION : PGM_ADDRESSING);
         }
         raise exception.
     }
     haddr1 = g2h(vaddr1);
#else
     env->tlb_fill_exc = 0;
     flags = probe_access_flags(...);
     if (env->tlb_fill_exc) {
         return env->tlb_fill_exc;
     }
#endif

which is pretty ugly, but no worse than what you have above.


r~
David Hildenbrand March 11, 2021, 6:16 p.m. UTC | #5
On 11.03.21 18:52, Richard Henderson wrote:
> On 3/11/21 10:17 AM, David Hildenbrand wrote:

>> +#if !defined(CONFIG_USER_ONLY)

>> +    /*

>> +     * For !CONFIG_USER_ONLY, we cannot rely on TLB_INVALID_MASK or haddr==NULL

>> +     * to detect if there was an exception during tlb_fill().

>> +     */

>> +    env->tlb_fill_exc = 0;

>> +#endif

>> +    flags = probe_access_flags(env, vaddr1, access_type, mmu_idx,

>> +                               nofault, &haddr1, ra);

>> +#if !defined(CONFIG_USER_ONLY)

>> +    if (env->tlb_fill_exc) {

>> +        return env->tlb_fill_exc;

>> +    }

>> +#else

>> +    if (!haddr1) {

>> +        env->__excp_addr = vaddr1;

>> +        return PGM_ADDRESSING;

>> +    }

>> +#endif

> 

> The assumption of PGM_ADDRESSING is incorrect here -- it could still be

> PGM_PROTECTION, depending on how the page is mapped.

> 


Interesting, I was only looking at the s390x tlb_fill() implementation. 
But I assume these checks are performed in common code.

> I guess this should be done like

> 

> #ifdef CONFIG_USER_ONLY

>       flags = page_get_flags(vaddr1);

>       if (!flags & (access_type == MMU_DATA_LOAD

>                     ? PAGE_READ : PAGE_WRITE)) {

>           env->__excp_addr = vaddr1;

>           if (nofault) {

>               return (flags & PAGE_VALID

>                   ? PGM_PROTECTION : PGM_ADDRESSING);

>           }

>           raise exception.

>       }

>       haddr1 = g2h(vaddr1);

> #else

>       env->tlb_fill_exc = 0;

>       flags = probe_access_flags(...);

>       if (env->tlb_fill_exc) {

>           return env->tlb_fill_exc;

>       }

> #endif

> 

> which is pretty ugly, but no worse than what you have above.


Thanks, maybe I can factor that out in a nice way. I guess we could do 
the access via probe_access_flags() and only on error do the 
page_get_flags()?


-- 
Thanks,

David / dhildenb
Richard Henderson March 11, 2021, 6:24 p.m. UTC | #6
On 3/11/21 12:16 PM, David Hildenbrand wrote:
> On 11.03.21 18:52, Richard Henderson wrote:

>> On 3/11/21 10:17 AM, David Hildenbrand wrote:

>>> +#if !defined(CONFIG_USER_ONLY)

>>> +    /*

>>> +     * For !CONFIG_USER_ONLY, we cannot rely on TLB_INVALID_MASK or 

>>> haddr==NULL

>>> +     * to detect if there was an exception during tlb_fill().

>>> +     */

>>> +    env->tlb_fill_exc = 0;

>>> +#endif

>>> +    flags = probe_access_flags(env, vaddr1, access_type, mmu_idx,

>>> +                               nofault, &haddr1, ra);

>>> +#if !defined(CONFIG_USER_ONLY)

>>> +    if (env->tlb_fill_exc) {

>>> +        return env->tlb_fill_exc;

>>> +    }

>>> +#else

>>> +    if (!haddr1) {

>>> +        env->__excp_addr = vaddr1;

>>> +        return PGM_ADDRESSING;

>>> +    }

>>> +#endif

>>

>> The assumption of PGM_ADDRESSING is incorrect here -- it could still be

>> PGM_PROTECTION, depending on how the page is mapped.

>>

> 

> Interesting, I was only looking at the s390x tlb_fill() implementation. But I 

> assume these checks are performed in common code.


Actually, no.  It's a common bug in our linux-user targets, where we don't fill 
in the SIGSEGV si_code correctly.

See e.g. 8db94ab4e5d.

> Thanks, maybe I can factor that out in a nice way. I guess we could do the 

> access via probe_access_flags() and only on error do the page_get_flags()?


Yes, we could do that.  It's certainly better for !nofault, which is the 
common-case user of this function.


r~
diff mbox series

Patch

diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h
index 60d434d5ed..468b4430f3 100644
--- a/target/s390x/cpu.h
+++ b/target/s390x/cpu.h
@@ -114,6 +114,11 @@  struct CPUS390XState {
 
     uint64_t diag318_info;
 
+#if !defined(CONFIG_USER_ONLY)
+    uint64_t tlb_fill_tec;   /* translation exception code during tlb_fill */
+    int tlb_fill_exc;        /* exception number seen during tlb_fill */
+#endif
+
     /* Fields up to this point are cleared by a CPU reset */
     struct {} end_reset_fields;
 
diff --git a/target/s390x/excp_helper.c b/target/s390x/excp_helper.c
index ce16af394b..c48cd6b46f 100644
--- a/target/s390x/excp_helper.c
+++ b/target/s390x/excp_helper.c
@@ -164,6 +164,9 @@  bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
         tec = 0; /* unused */
     }
 
+    env->tlb_fill_exc = excp;
+    env->tlb_fill_tec = tec;
+
     if (!excp) {
         qemu_log_mask(CPU_LOG_MMU,
                       "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c
index 25cfede806..ebb55884c9 100644
--- a/target/s390x/mem_helper.c
+++ b/target/s390x/mem_helper.c
@@ -130,28 +130,93 @@  typedef struct S390Access {
     int mmu_idx;
 } S390Access;
 
-static S390Access access_prepare(CPUS390XState *env, vaddr vaddr, int size,
-                                 MMUAccessType access_type, int mmu_idx,
-                                 uintptr_t ra)
+/*
+ * With nofault=1, return the PGM_ exception that would have been injected
+ * into the guest; return 0 if no exception was detected.
+ *
+ * For !CONFIG_USER_ONLY, the TEC is stored stored to env->tlb_fill_tec.
+ * For CONFIG_USER_ONLY, the faulting address is stored to env->__excp_addr.
+ */
+static int access_prepare_nf(S390Access *access, CPUS390XState *env,
+                             bool nofault, vaddr vaddr1, int size,
+                             MMUAccessType access_type,
+                             int mmu_idx, uintptr_t ra)
 {
-    S390Access access = {
-        .vaddr1 = vaddr,
-        .size1 = MIN(size, -(vaddr | TARGET_PAGE_MASK)),
-        .mmu_idx = mmu_idx,
-    };
+    void *haddr1, *haddr2 = NULL;
+    int size1, size2;
+    vaddr vaddr2 = 0;
+    int flags;
+
+    assert(size > 0 && size <= 4096);
 
-    g_assert(size > 0 && size <= 4096);
-    access.haddr1 = probe_access(env, access.vaddr1, access.size1, access_type,
-                                 mmu_idx, ra);
+    size1 = MIN(size, -(vaddr1 | TARGET_PAGE_MASK)),
+    size2 = size - size1;
 
-    if (unlikely(access.size1 != size)) {
+#if !defined(CONFIG_USER_ONLY)
+    /*
+     * For !CONFIG_USER_ONLY, we cannot rely on TLB_INVALID_MASK or haddr==NULL
+     * to detect if there was an exception during tlb_fill().
+     */
+    env->tlb_fill_exc = 0;
+#endif
+    flags = probe_access_flags(env, vaddr1, access_type, mmu_idx,
+                               nofault, &haddr1, ra);
+#if !defined(CONFIG_USER_ONLY)
+    if (env->tlb_fill_exc) {
+        return env->tlb_fill_exc;
+    }
+#else
+    if (!haddr1) {
+        env->__excp_addr = vaddr1;
+        return PGM_ADDRESSING;
+    }
+#endif
+    if (unlikely(size2)) {
         /* The access crosses page boundaries. */
-        access.vaddr2 = wrap_address(env, vaddr + access.size1);
-        access.size2 = size - access.size1;
-        access.haddr2 = probe_access(env, access.vaddr2, access.size2,
-                                     access_type, mmu_idx, ra);
+        vaddr2 = wrap_address(env, vaddr1 + size1);
+        flags |= probe_access_flags(env, vaddr2, access_type, mmu_idx,
+                                    nofault, &haddr2, ra);
+#if !defined(CONFIG_USER_ONLY)
+        if (env->tlb_fill_exc) {
+            return env->tlb_fill_exc;
+        }
+#else
+        if (!haddr2) {
+            env->__excp_addr = vaddr2;
+            return PGM_ADDRESSING;
+        }
+#endif
     }
-    return access;
+
+    if (unlikely(flags & TLB_WATCHPOINT)) {
+        /* S390 does not presently use transaction attributes. */
+        cpu_check_watchpoint(env_cpu(env), vaddr1, size,
+                             MEMTXATTRS_UNSPECIFIED,
+                             (access_type == MMU_DATA_STORE
+                              ? BP_MEM_WRITE : BP_MEM_READ), ra);
+    }
+
+    *access = (S390Access) {
+        .vaddr1 = vaddr1,
+        .vaddr2 = vaddr2,
+        .haddr1 = haddr1,
+        .haddr2 = haddr2,
+        .size1 = size1,
+        .size2 = size2,
+        .mmu_idx = mmu_idx
+    };
+    return 0;
+}
+
+static S390Access access_prepare(CPUS390XState *env, vaddr vaddr, int size,
+                                 MMUAccessType access_type, int mmu_idx,
+                                 uintptr_t ra)
+{
+    S390Access ret;
+    int exc = access_prepare_nf(&ret, env, false, vaddr, size,
+                                access_type, mmu_idx, ra);
+    assert(!exc);
+    return ret;
 }
 
 /* Helper to handle memset on a single page. */
@@ -845,8 +910,10 @@  uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2)
     const int mmu_idx = cpu_mmu_index(env, false);
     const bool f = extract64(r0, 11, 1);
     const bool s = extract64(r0, 10, 1);
+    const bool cco = extract64(r0, 8, 1);
     uintptr_t ra = GETPC();
     S390Access srca, desta;
+    int exc;
 
     if ((f && s) || extract64(r0, 12, 4)) {
         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
@@ -858,13 +925,26 @@  uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2)
     /*
      * TODO:
      * - Access key handling
-     * - CC-option with surpression of page-translation exceptions
      * - Store r1/r2 register identifiers at real location 162
      */
-    srca = access_prepare(env, r2, TARGET_PAGE_SIZE, MMU_DATA_LOAD, mmu_idx,
-                          ra);
-    desta = access_prepare(env, r1, TARGET_PAGE_SIZE, MMU_DATA_STORE, mmu_idx,
-                           ra);
+    exc = access_prepare_nf(&srca, env, cco, r2, TARGET_PAGE_SIZE,
+                            MMU_DATA_LOAD, mmu_idx, ra);
+    if (exc) {
+        return 2;
+    }
+    exc = access_prepare_nf(&desta, env, cco, r1, TARGET_PAGE_SIZE,
+                            MMU_DATA_STORE, mmu_idx, ra);
+    if (exc) {
+#if !defined(CONFIG_USER_ONLY)
+        if (exc == PGM_PROTECTION) {
+            stq_phys(env_cpu(env)->as,
+                     env->psa + offsetof(LowCore, trans_exc_code),
+                     env->tlb_fill_tec);
+            tcg_s390_program_interrupt(env, PGM_PROTECTION, ra);
+        }
+#endif
+        return 1;
+    }
     access_memmove(env, &desta, &srca, ra);
     return 0; /* data moved */
 }