@@ -14,6 +14,7 @@
#include "vec.h"
#include "exec/helper-proto.h"
#include "tcg/tcg-gvec-desc.h"
+#include "crypto/clmul.h"
static bool s390_vec_is_zero(const S390Vector *v)
{
@@ -179,7 +180,6 @@ static uint##TBITS##_t galois_multiply##BITS(uint##TBITS##_t a, \
} \
return res; \
}
-DEF_GALOIS_MULTIPLY(8, 16)
DEF_GALOIS_MULTIPLY(16, 32)
DEF_GALOIS_MULTIPLY(32, 64)
@@ -203,6 +203,34 @@ static S390Vector galois_multiply64(uint64_t a, uint64_t b)
return res;
}
+/*
+ * There is no carry across the two doublewords, so their order does
+ * not matter. Nor is there partial overlap between registers.
+ */
+static inline uint64_t do_gfma8(uint64_t n, uint64_t m, uint64_t a)
+{
+ return clmul_8x4_even(n, m) ^ clmul_8x4_odd(n, m) ^ a;
+}
+
+void HELPER(gvec_vgfm8)(void *v1, const void *v2, const void *v3, uint32_t d)
+{
+ uint64_t *q1 = v1;
+ const uint64_t *q2 = v2, *q3 = v3;
+
+ q1[0] = do_gfma8(q2[0], q3[0], 0);
+ q1[1] = do_gfma8(q2[1], q3[1], 0);
+}
+
+void HELPER(gvec_vgfma8)(void *v1, const void *v2, const void *v3,
+ const void *v4, uint32_t desc)
+{
+ uint64_t *q1 = v1;
+ const uint64_t *q2 = v2, *q3 = v3, *q4 = v4;
+
+ q1[0] = do_gfma8(q2[0], q3[0], q4[0]);
+ q1[1] = do_gfma8(q2[1], q3[1], q4[1]);
+}
+
#define DEF_VGFM(BITS, TBITS) \
void HELPER(gvec_vgfm##BITS)(void *v1, const void *v2, const void *v3, \
uint32_t desc) \
@@ -220,7 +248,6 @@ void HELPER(gvec_vgfm##BITS)(void *v1, const void *v2, const void *v3, \
s390_vec_write_element##TBITS(v1, i, d); \
} \
}
-DEF_VGFM(8, 16)
DEF_VGFM(16, 32)
DEF_VGFM(32, 64)
@@ -257,7 +284,6 @@ void HELPER(gvec_vgfma##BITS)(void *v1, const void *v2, const void *v3, \
s390_vec_write_element##TBITS(v1, i, d); \
} \
}
-DEF_VGFMA(8, 16)
DEF_VGFMA(16, 32)
DEF_VGFMA(32, 64)
Use generic routines for 8-bit carry-less multiply. Remove our local version of galois_multiply8. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- target/s390x/tcg/vec_int_helper.c | 32 ++++++++++++++++++++++++++++--- 1 file changed, 29 insertions(+), 3 deletions(-)