diff mbox series

[16/20] target/arm: Fix sve_zip_p vs odd vector lengths

Message ID 20200815013145.539409-17-richard.henderson@linaro.org
State Superseded
Headers show
Series target/arm: SVE2 preparatory patches | expand

Commit Message

Richard Henderson Aug. 15, 2020, 1:31 a.m. UTC
Wrote too much with low-half zip (zip1) with vl % 512 != 0.

Adjust all of the x + (y << s) to x | (y << s) as a style fix.

Reported-by: Laurent Desnogues <laurent.desnogues@gmail.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

---
 target/arm/sve_helper.c | 25 ++++++++++++++-----------
 1 file changed, 14 insertions(+), 11 deletions(-)

-- 
2.25.1

Comments

Peter Maydell Aug. 25, 2020, 1:49 p.m. UTC | #1
On Sat, 15 Aug 2020 at 02:32, Richard Henderson
<richard.henderson@linaro.org> wrote:
>

> Wrote too much with low-half zip (zip1) with vl % 512 != 0.

>

> Adjust all of the x + (y << s) to x | (y << s) as a style fix.

>

> Reported-by: Laurent Desnogues <laurent.desnogues@gmail.com>

> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

> ---

>  target/arm/sve_helper.c | 25 ++++++++++++++-----------

>  1 file changed, 14 insertions(+), 11 deletions(-)

>

> diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c

> index fcb46f150f..b8651ae173 100644

> --- a/target/arm/sve_helper.c

> +++ b/target/arm/sve_helper.c

> @@ -1870,6 +1870,7 @@ void HELPER(sve_zip_p)(void *vd, void *vn, void *vm, uint32_t pred_desc)

>      intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2;

>      int esz = extract32(pred_desc, SIMD_DATA_SHIFT, 2);

>      intptr_t high = extract32(pred_desc, SIMD_DATA_SHIFT + 2, 1);

> +    int esize = 1 << esz;

>      uint64_t *d = vd;

>      intptr_t i;

>

> @@ -1882,33 +1883,35 @@ void HELPER(sve_zip_p)(void *vd, void *vn, void *vm, uint32_t pred_desc)

>          mm = extract64(mm, high * half, half);

>          nn = expand_bits(nn, esz);

>          mm = expand_bits(mm, esz);

> -        d[0] = nn + (mm << (1 << esz));

> +        d[0] = nn | (mm << esize);

>      } else {

> -        ARMPredicateReg tmp_n, tmp_m;

> +        ARMPredicateReg tmp;

>

>          /* We produce output faster than we consume input.

>             Therefore we must be mindful of possible overlap.  */

> -        if ((vn - vd) < (uintptr_t)oprsz) {

> -            vn = memcpy(&tmp_n, vn, oprsz);

> -        }

> -        if ((vm - vd) < (uintptr_t)oprsz) {

> -            vm = memcpy(&tmp_m, vm, oprsz);

> +        if (vd == vn) {

> +            vn = memcpy(&tmp, vn, oprsz);

> +            if (vd == vm) {

> +                vm = vn;

> +            }

> +        } else if (vd == vm) {

> +            vm = memcpy(&tmp, vm, oprsz);


Why is it OK to only check vd==vn etc rather than checking for
overlap the way the old code did ? The commit message doesn't
mention this.

thanks
-- PMM
Richard Henderson Aug. 28, 2020, 7:26 p.m. UTC | #2
On 8/25/20 6:49 AM, Peter Maydell wrote:
> On Sat, 15 Aug 2020 at 02:32, Richard Henderson

> <richard.henderson@linaro.org> wrote:

>>

>> Wrote too much with low-half zip (zip1) with vl % 512 != 0.

>>

>> Adjust all of the x + (y << s) to x | (y << s) as a style fix.

>>

>> Reported-by: Laurent Desnogues <laurent.desnogues@gmail.com>

>> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

>> ---

>>  target/arm/sve_helper.c | 25 ++++++++++++++-----------

>>  1 file changed, 14 insertions(+), 11 deletions(-)

>>

>> diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c

>> index fcb46f150f..b8651ae173 100644

>> --- a/target/arm/sve_helper.c

>> +++ b/target/arm/sve_helper.c

>> @@ -1870,6 +1870,7 @@ void HELPER(sve_zip_p)(void *vd, void *vn, void *vm, uint32_t pred_desc)

>>      intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2;

>>      int esz = extract32(pred_desc, SIMD_DATA_SHIFT, 2);

>>      intptr_t high = extract32(pred_desc, SIMD_DATA_SHIFT + 2, 1);

>> +    int esize = 1 << esz;

>>      uint64_t *d = vd;

>>      intptr_t i;

>>

>> @@ -1882,33 +1883,35 @@ void HELPER(sve_zip_p)(void *vd, void *vn, void *vm, uint32_t pred_desc)

>>          mm = extract64(mm, high * half, half);

>>          nn = expand_bits(nn, esz);

>>          mm = expand_bits(mm, esz);

>> -        d[0] = nn + (mm << (1 << esz));

>> +        d[0] = nn | (mm << esize);

>>      } else {

>> -        ARMPredicateReg tmp_n, tmp_m;

>> +        ARMPredicateReg tmp;

>>

>>          /* We produce output faster than we consume input.

>>             Therefore we must be mindful of possible overlap.  */

>> -        if ((vn - vd) < (uintptr_t)oprsz) {

>> -            vn = memcpy(&tmp_n, vn, oprsz);

>> -        }

>> -        if ((vm - vd) < (uintptr_t)oprsz) {

>> -            vm = memcpy(&tmp_m, vm, oprsz);

>> +        if (vd == vn) {

>> +            vn = memcpy(&tmp, vn, oprsz);

>> +            if (vd == vm) {

>> +                vm = vn;

>> +            }

>> +        } else if (vd == vm) {

>> +            vm = memcpy(&tmp, vm, oprsz);

> 

> Why is it OK to only check vd==vn etc rather than checking for

> overlap the way the old code did ? The commit message doesn't

> mention this.


We only ever pass pred_full_reg_offset, so there will only ever be exact
overlap.  I can either split this out as a separate change or simply add it to
the patch description.


r~
Peter Maydell Aug. 28, 2020, 11:01 p.m. UTC | #3
On Fri, 28 Aug 2020 at 20:26, Richard Henderson
<richard.henderson@linaro.org> wrote:
>

> On 8/25/20 6:49 AM, Peter Maydell wrote:

> > Why is it OK to only check vd==vn etc rather than checking for

> > overlap the way the old code did ? The commit message doesn't

> > mention this.

>

> We only ever pass pred_full_reg_offset, so there will only ever be exact

> overlap.  I can either split this out as a separate change or simply add it to

> the patch description.


Whichever you prefer, I guess.

thanks
-- PMM
diff mbox series

Patch

diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
index fcb46f150f..b8651ae173 100644
--- a/target/arm/sve_helper.c
+++ b/target/arm/sve_helper.c
@@ -1870,6 +1870,7 @@  void HELPER(sve_zip_p)(void *vd, void *vn, void *vm, uint32_t pred_desc)
     intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2;
     int esz = extract32(pred_desc, SIMD_DATA_SHIFT, 2);
     intptr_t high = extract32(pred_desc, SIMD_DATA_SHIFT + 2, 1);
+    int esize = 1 << esz;
     uint64_t *d = vd;
     intptr_t i;
 
@@ -1882,33 +1883,35 @@  void HELPER(sve_zip_p)(void *vd, void *vn, void *vm, uint32_t pred_desc)
         mm = extract64(mm, high * half, half);
         nn = expand_bits(nn, esz);
         mm = expand_bits(mm, esz);
-        d[0] = nn + (mm << (1 << esz));
+        d[0] = nn | (mm << esize);
     } else {
-        ARMPredicateReg tmp_n, tmp_m;
+        ARMPredicateReg tmp;
 
         /* We produce output faster than we consume input.
            Therefore we must be mindful of possible overlap.  */
-        if ((vn - vd) < (uintptr_t)oprsz) {
-            vn = memcpy(&tmp_n, vn, oprsz);
-        }
-        if ((vm - vd) < (uintptr_t)oprsz) {
-            vm = memcpy(&tmp_m, vm, oprsz);
+        if (vd == vn) {
+            vn = memcpy(&tmp, vn, oprsz);
+            if (vd == vm) {
+                vm = vn;
+            }
+        } else if (vd == vm) {
+            vm = memcpy(&tmp, vm, oprsz);
         }
         if (high) {
             high = oprsz >> 1;
         }
 
-        if ((high & 3) == 0) {
+        if ((oprsz & 7) == 0) {
             uint32_t *n = vn, *m = vm;
             high >>= 2;
 
-            for (i = 0; i < DIV_ROUND_UP(oprsz, 8); i++) {
+            for (i = 0; i < oprsz / 8; i++) {
                 uint64_t nn = n[H4(high + i)];
                 uint64_t mm = m[H4(high + i)];
 
                 nn = expand_bits(nn, esz);
                 mm = expand_bits(mm, esz);
-                d[i] = nn + (mm << (1 << esz));
+                d[i] = nn | (mm << esize);
             }
         } else {
             uint8_t *n = vn, *m = vm;
@@ -1920,7 +1923,7 @@  void HELPER(sve_zip_p)(void *vd, void *vn, void *vm, uint32_t pred_desc)
 
                 nn = expand_bits(nn, esz);
                 mm = expand_bits(mm, esz);
-                d16[H2(i)] = nn + (mm << (1 << esz));
+                d16[H2(i)] = nn | (mm << esize);
             }
         }
     }