diff mbox series

[v3,01/19] crypto: Add generic 8-bit carry-less multiply routines

Message ID 20230821161854.419893-2-richard.henderson@linaro.org
State Superseded
Headers show
Series crypto: Provide clmul.h and host accel | expand

Commit Message

Richard Henderson Aug. 21, 2023, 4:18 p.m. UTC
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 include/crypto/clmul.h | 41 +++++++++++++++++++++++++++++
 crypto/clmul.c         | 60 ++++++++++++++++++++++++++++++++++++++++++
 crypto/meson.build     |  9 ++++---
 3 files changed, 107 insertions(+), 3 deletions(-)
 create mode 100644 include/crypto/clmul.h
 create mode 100644 crypto/clmul.c

Comments

Ard Biesheuvel Sept. 10, 2023, 12:27 p.m. UTC | #1
On Mon, 21 Aug 2023 at 18:18, Richard Henderson
<richard.henderson@linaro.org> wrote:
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

Reviewed-by: Ard Biesheuvel <ardb@kernel.org>


> ---
>  include/crypto/clmul.h | 41 +++++++++++++++++++++++++++++
>  crypto/clmul.c         | 60 ++++++++++++++++++++++++++++++++++++++++++
>  crypto/meson.build     |  9 ++++---
>  3 files changed, 107 insertions(+), 3 deletions(-)
>  create mode 100644 include/crypto/clmul.h
>  create mode 100644 crypto/clmul.c
>
> diff --git a/include/crypto/clmul.h b/include/crypto/clmul.h
> new file mode 100644
> index 0000000000..153b5e3057
> --- /dev/null
> +++ b/include/crypto/clmul.h
> @@ -0,0 +1,41 @@
> +/*
> + * Carry-less multiply operations.
> + * SPDX-License-Identifier: GPL-2.0-or-later
> + *
> + * Copyright (C) 2023 Linaro, Ltd.
> + */
> +
> +#ifndef CRYPTO_CLMUL_H
> +#define CRYPTO_CLMUL_H
> +
> +/**
> + * clmul_8x8_low:
> + *
> + * Perform eight 8x8->8 carry-less multiplies.
> + */
> +uint64_t clmul_8x8_low(uint64_t, uint64_t);
> +
> +/**
> + * clmul_8x4_even:
> + *
> + * Perform four 8x8->16 carry-less multiplies.
> + * The odd bytes of the inputs are ignored.
> + */
> +uint64_t clmul_8x4_even(uint64_t, uint64_t);
> +
> +/**
> + * clmul_8x4_odd:
> + *
> + * Perform four 8x8->16 carry-less multiplies.
> + * The even bytes of the inputs are ignored.
> + */
> +uint64_t clmul_8x4_odd(uint64_t, uint64_t);
> +
> +/**
> + * clmul_8x4_packed:
> + *
> + * Perform four 8x8->16 carry-less multiplies.
> + */
> +uint64_t clmul_8x4_packed(uint32_t, uint32_t);
> +
> +#endif /* CRYPTO_CLMUL_H */
> diff --git a/crypto/clmul.c b/crypto/clmul.c
> new file mode 100644
> index 0000000000..82d873fee5
> --- /dev/null
> +++ b/crypto/clmul.c
> @@ -0,0 +1,60 @@
> +/*
> + * Carry-less multiply operations.
> + * SPDX-License-Identifier: GPL-2.0-or-later
> + *
> + * Copyright (C) 2023 Linaro, Ltd.
> + */
> +
> +#include "qemu/osdep.h"
> +#include "crypto/clmul.h"
> +
> +uint64_t clmul_8x8_low(uint64_t n, uint64_t m)
> +{
> +    uint64_t r = 0;
> +
> +    for (int i = 0; i < 8; ++i) {
> +        uint64_t mask = (n & 0x0101010101010101ull) * 0xff;
> +        r ^= m & mask;
> +        m = (m << 1) & 0xfefefefefefefefeull;
> +        n >>= 1;
> +    }
> +    return r;
> +}
> +
> +static uint64_t clmul_8x4_even_int(uint64_t n, uint64_t m)
> +{
> +    uint64_t r = 0;
> +
> +    for (int i = 0; i < 8; ++i) {
> +        uint64_t mask = (n & 0x0001000100010001ull) * 0xffff;
> +        r ^= m & mask;
> +        n >>= 1;
> +        m <<= 1;
> +    }
> +    return r;
> +}
> +
> +uint64_t clmul_8x4_even(uint64_t n, uint64_t m)
> +{
> +    n &= 0x00ff00ff00ff00ffull;
> +    m &= 0x00ff00ff00ff00ffull;
> +    return clmul_8x4_even_int(n, m);
> +}
> +
> +uint64_t clmul_8x4_odd(uint64_t n, uint64_t m)
> +{
> +    return clmul_8x4_even(n >> 8, m >> 8);
> +}
> +
> +static uint64_t unpack_8_to_16(uint64_t x)
> +{
> +    return  (x & 0x000000ff)
> +         | ((x & 0x0000ff00) << 8)
> +         | ((x & 0x00ff0000) << 16)
> +         | ((x & 0xff000000) << 24);
> +}
> +
> +uint64_t clmul_8x4_packed(uint32_t n, uint32_t m)
> +{
> +    return clmul_8x4_even_int(unpack_8_to_16(n), unpack_8_to_16(m));
> +}
> diff --git a/crypto/meson.build b/crypto/meson.build
> index 5f03a30d34..9ac1a89802 100644
> --- a/crypto/meson.build
> +++ b/crypto/meson.build
> @@ -48,9 +48,12 @@ if have_afalg
>  endif
>  crypto_ss.add(when: gnutls, if_true: files('tls-cipher-suites.c'))
>
> -util_ss.add(files('sm4.c'))
> -util_ss.add(files('aes.c'))
> -util_ss.add(files('init.c'))
> +util_ss.add(files(
> +  'aes.c',
> +  'clmul.c',
> +  'init.c',
> +  'sm4.c',
> +))
>  if gnutls.found()
>    util_ss.add(gnutls)
>  endif
> --
> 2.34.1
>
diff mbox series

Patch

diff --git a/include/crypto/clmul.h b/include/crypto/clmul.h
new file mode 100644
index 0000000000..153b5e3057
--- /dev/null
+++ b/include/crypto/clmul.h
@@ -0,0 +1,41 @@ 
+/*
+ * Carry-less multiply operations.
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Copyright (C) 2023 Linaro, Ltd.
+ */
+
+#ifndef CRYPTO_CLMUL_H
+#define CRYPTO_CLMUL_H
+
+/**
+ * clmul_8x8_low:
+ *
+ * Perform eight 8x8->8 carry-less multiplies.
+ */
+uint64_t clmul_8x8_low(uint64_t, uint64_t);
+
+/**
+ * clmul_8x4_even:
+ *
+ * Perform four 8x8->16 carry-less multiplies.
+ * The odd bytes of the inputs are ignored.
+ */
+uint64_t clmul_8x4_even(uint64_t, uint64_t);
+
+/**
+ * clmul_8x4_odd:
+ *
+ * Perform four 8x8->16 carry-less multiplies.
+ * The even bytes of the inputs are ignored.
+ */
+uint64_t clmul_8x4_odd(uint64_t, uint64_t);
+
+/**
+ * clmul_8x4_packed:
+ *
+ * Perform four 8x8->16 carry-less multiplies.
+ */
+uint64_t clmul_8x4_packed(uint32_t, uint32_t);
+
+#endif /* CRYPTO_CLMUL_H */
diff --git a/crypto/clmul.c b/crypto/clmul.c
new file mode 100644
index 0000000000..82d873fee5
--- /dev/null
+++ b/crypto/clmul.c
@@ -0,0 +1,60 @@ 
+/*
+ * Carry-less multiply operations.
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Copyright (C) 2023 Linaro, Ltd.
+ */
+
+#include "qemu/osdep.h"
+#include "crypto/clmul.h"
+
+uint64_t clmul_8x8_low(uint64_t n, uint64_t m)
+{
+    uint64_t r = 0;
+
+    for (int i = 0; i < 8; ++i) {
+        uint64_t mask = (n & 0x0101010101010101ull) * 0xff;
+        r ^= m & mask;
+        m = (m << 1) & 0xfefefefefefefefeull;
+        n >>= 1;
+    }
+    return r;
+}
+
+static uint64_t clmul_8x4_even_int(uint64_t n, uint64_t m)
+{
+    uint64_t r = 0;
+
+    for (int i = 0; i < 8; ++i) {
+        uint64_t mask = (n & 0x0001000100010001ull) * 0xffff;
+        r ^= m & mask;
+        n >>= 1;
+        m <<= 1;
+    }
+    return r;
+}
+
+uint64_t clmul_8x4_even(uint64_t n, uint64_t m)
+{
+    n &= 0x00ff00ff00ff00ffull;
+    m &= 0x00ff00ff00ff00ffull;
+    return clmul_8x4_even_int(n, m);
+}
+
+uint64_t clmul_8x4_odd(uint64_t n, uint64_t m)
+{
+    return clmul_8x4_even(n >> 8, m >> 8);
+}
+
+static uint64_t unpack_8_to_16(uint64_t x)
+{
+    return  (x & 0x000000ff)
+         | ((x & 0x0000ff00) << 8)
+         | ((x & 0x00ff0000) << 16)
+         | ((x & 0xff000000) << 24);
+}
+
+uint64_t clmul_8x4_packed(uint32_t n, uint32_t m)
+{
+    return clmul_8x4_even_int(unpack_8_to_16(n), unpack_8_to_16(m));
+}
diff --git a/crypto/meson.build b/crypto/meson.build
index 5f03a30d34..9ac1a89802 100644
--- a/crypto/meson.build
+++ b/crypto/meson.build
@@ -48,9 +48,12 @@  if have_afalg
 endif
 crypto_ss.add(when: gnutls, if_true: files('tls-cipher-suites.c'))
 
-util_ss.add(files('sm4.c'))
-util_ss.add(files('aes.c'))
-util_ss.add(files('init.c'))
+util_ss.add(files(
+  'aes.c',
+  'clmul.c',
+  'init.c',
+  'sm4.c',
+))
 if gnutls.found()
   util_ss.add(gnutls)
 endif