diff mbox series

[v3,2/4] crypto: aria: do not use magic number offsets of aria_ctx

Message ID 20221106143627.30920-3-ap420073@gmail.com
State Superseded
Headers show
Series crypto: aria: implement aria-avx2 and aria-avx512 | expand

Commit Message

Taehee Yoo Nov. 6, 2022, 2:36 p.m. UTC
aria-avx assembly code accesses members of aria_ctx with magic number
offset. If the shape of struct aria_ctx is changed carelessly,
aria-avx will not work.
So, we need to ensure accessing members of aria_ctx with correct
offset values, not with magic numbers.

It adds ARIA_CTX_enc_key, ARIA_CTX_dec_key, and ARIA_CTX_rounds in the
asm-offsets.c So, correct offset definitions will be generated.
aria-avx assembly code can access members of aria_ctx safely with
these definitions.

Signed-off-by: Taehee Yoo <ap420073@gmail.com>
---

v3:
 - Patch introduced.

 arch/x86/crypto/aria-aesni-avx-asm_64.S | 26 +++++++++++--------------
 arch/x86/kernel/asm-offsets.c           | 11 +++++++++++
 2 files changed, 22 insertions(+), 15 deletions(-)

Comments

Elliott, Robert (Servers) Nov. 10, 2022, 3:55 a.m. UTC | #1
> -----Original Message-----
> From: Taehee Yoo <ap420073@gmail.com>
> Sent: Sunday, November 6, 2022 8:36 AM
> To: linux-crypto@vger.kernel.org; herbert@gondor.apana.org.au;
> davem@davemloft.net; tglx@linutronix.de; mingo@redhat.com; bp@alien8.de;
> dave.hansen@linux.intel.com; hpa@zytor.com;
> kirill.shutemov@linux.intel.com; richard@nod.at; viro@zeniv.linux.org.uk;
> sathyanarayanan.kuppuswamy@linux.intel.com; jpoimboe@kernel.org; Elliott,
> Robert (Servers) <elliott@hpe.com>; x86@kernel.org; jussi.kivilinna@iki.fi
> Cc: ap420073@gmail.com
> Subject: [PATCH v3 2/4] crypto: aria: do not use magic number offsets of
> aria_ctx
> 
> aria-avx assembly code accesses members of aria_ctx with magic number
> offset. If the shape of struct aria_ctx is changed carelessly,
> aria-avx will not work.
> So, we need to ensure accessing members of aria_ctx with correct
> offset values, not with magic numbers.
> 
> It adds ARIA_CTX_enc_key, ARIA_CTX_dec_key, and ARIA_CTX_rounds in the
> asm-offsets.c So, correct offset definitions will be generated.
> aria-avx assembly code can access members of aria_ctx safely with
> these definitions.
> 
> diff --git a/arch/x86/crypto/aria-aesni-avx-asm_64.S
...
> 
>  #include <linux/linkage.h>
>  #include <asm/frame.h>
> -
> -/* struct aria_ctx: */
> -#define enc_key 0
> -#define dec_key 272
> -#define rounds 544

That structure also has a key_length field after the rounds field.
aria_set_key() sets it, but no function ever seems to use it.
Perhaps that field should be removed?

> +#include <asm/asm-offsets.h>

That makes the offsets flexible, which is good.

The assembly code also implicitly assumes the size of each of those fields
(e.g., enc_key is 272 bytes, dec_key is 272 bytes, and rounds is 4 bytes).
A BUILD_BUG_ON confirming those assumptions might be worthwhile.

> diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
> index cb50589a7102..32192a91c65b 100644
> --- a/arch/x86/kernel/asm-offsets.c
> +++ b/arch/x86/kernel/asm-offsets.c
> @@ -7,6 +7,7 @@
>  #define COMPILE_OFFSETS
> 
>  #include <linux/crypto.h>
> +#include <crypto/aria.h>

Is it safe to include .h files for a large number of crypto modules
in one C file? It seems like they could easily include naming conflicts. 

However, this set does seem to compile cleanly:

+// no .h for aegis, but ctx is simple
+#include <crypto/aes.h>
+#include <crypto/aria.h>
+#include <crypto/blake2s.h>
+#include <crypto/blowfish.h>
+// no .h for camellia in crypto; it is in arch/x86/crypto
+#include <crypto/cast5.h>
+#include <crypto/cast6.h>
+#include <crypto/chacha.h>
+// no .h for crc32c, crc32, crct10dif, but no ctx structure to worry about
+#include <crypto/curve25519.h>
+#include <crypto/des.h>
+#include <crypto/ghash.h>
+#include <crypto/nhpoly1305.h>
+#include <crypto/poly1305.h>
+#include <crypto/polyval.h>
+#include <crypto/serpent.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+#include <crypto/sm3.h>
+#include <crypto/twofish.h>
Taehee Yoo Nov. 11, 2022, 6:45 a.m. UTC | #2
Hi Elliott,
Thank you so much for your review!

On 2022. 11. 10. 오후 12:55, Elliott, Robert (Servers) wrote:
 >
 >
 >> -----Original Message-----
 >> From: Taehee Yoo <ap420073@gmail.com>
 >> Sent: Sunday, November 6, 2022 8:36 AM
 >> To: linux-crypto@vger.kernel.org; herbert@gondor.apana.org.au;
 >> davem@davemloft.net; tglx@linutronix.de; mingo@redhat.com; bp@alien8.de;
 >> dave.hansen@linux.intel.com; hpa@zytor.com;
 >> kirill.shutemov@linux.intel.com; richard@nod.at; 
viro@zeniv.linux.org.uk;
 >> sathyanarayanan.kuppuswamy@linux.intel.com; jpoimboe@kernel.org; 
Elliott,
 >> Robert (Servers) <elliott@hpe.com>; x86@kernel.org; 
jussi.kivilinna@iki.fi
 >> Cc: ap420073@gmail.com
 >> Subject: [PATCH v3 2/4] crypto: aria: do not use magic number offsets of
 >> aria_ctx
 >>
 >> aria-avx assembly code accesses members of aria_ctx with magic number
 >> offset. If the shape of struct aria_ctx is changed carelessly,
 >> aria-avx will not work.
 >> So, we need to ensure accessing members of aria_ctx with correct
 >> offset values, not with magic numbers.
 >>
 >> It adds ARIA_CTX_enc_key, ARIA_CTX_dec_key, and ARIA_CTX_rounds in the
 >> asm-offsets.c So, correct offset definitions will be generated.
 >> aria-avx assembly code can access members of aria_ctx safely with
 >> these definitions.
 >>
 >> diff --git a/arch/x86/crypto/aria-aesni-avx-asm_64.S
 > ...
 >>
 >>   #include <linux/linkage.h>
 >>   #include <asm/frame.h>
 >> -
 >> -/* struct aria_ctx: */
 >> -#define enc_key 0
 >> -#define dec_key 272
 >> -#define rounds 544
 >
 > That structure also has a key_length field after the rounds field.
 > aria_set_key() sets it, but no function ever seems to use it.
 > Perhaps that field should be removed?

Okay, I will remove that fields in a separate patch.

 >
 >> +#include <asm/asm-offsets.h>
 >
 > That makes the offsets flexible, which is good.
 >
 > The assembly code also implicitly assumes the size of each of those 
fields
 > (e.g., enc_key is 272 bytes, dec_key is 272 bytes, and rounds is 4 
bytes).
 > A BUILD_BUG_ON confirming those assumptions might be worthwhile.

You're right,
I didn't consider the size of the fields.
I will contain BUILD_BUG_ON() to check the size.

 >
 >> diff --git a/arch/x86/kernel/asm-offsets.c 
b/arch/x86/kernel/asm-offsets.c
 >> index cb50589a7102..32192a91c65b 100644
 >> --- a/arch/x86/kernel/asm-offsets.c
 >> +++ b/arch/x86/kernel/asm-offsets.c
 >> @@ -7,6 +7,7 @@
 >>   #define COMPILE_OFFSETS
 >>
 >>   #include <linux/crypto.h>
 >> +#include <crypto/aria.h>
 >
 > Is it safe to include .h files for a large number of crypto modules
 > in one C file? It seems like they could easily include naming conflicts.
 >
 > However, this set does seem to compile cleanly:
 >

Thanks for this check.
Sorry, I'm not sure about the side effects of a large number of header 
in .c file.

 > +// no .h for aegis, but ctx is simple
 > +#include <crypto/aes.h>
 > +#include <crypto/aria.h>
 > +#include <crypto/blake2s.h>
 > +#include <crypto/blowfish.h>
 > +// no .h for camellia in crypto; it is in arch/x86/crypto
 > +#include <crypto/cast5.h>
 > +#include <crypto/cast6.h>
 > +#include <crypto/chacha.h>
 > +// no .h for crc32c, crc32, crct10dif, but no ctx structure to worry 
about
 > +#include <crypto/curve25519.h>
 > +#include <crypto/des.h>
 > +#include <crypto/ghash.h>
 > +#include <crypto/nhpoly1305.h>
 > +#include <crypto/poly1305.h>
 > +#include <crypto/polyval.h>
 > +#include <crypto/serpent.h>
 > +#include <crypto/sha1.h>
 > +#include <crypto/sha2.h>
 > +#include <crypto/sm3.h>
 > +#include <crypto/twofish.h>
 >

Thanks a lot!
Taehee Yoo
diff mbox series

Patch

diff --git a/arch/x86/crypto/aria-aesni-avx-asm_64.S b/arch/x86/crypto/aria-aesni-avx-asm_64.S
index c75fd7d015ed..e47e7e54e08f 100644
--- a/arch/x86/crypto/aria-aesni-avx-asm_64.S
+++ b/arch/x86/crypto/aria-aesni-avx-asm_64.S
@@ -8,11 +8,7 @@ 
 
 #include <linux/linkage.h>
 #include <asm/frame.h>
-
-/* struct aria_ctx: */
-#define enc_key 0
-#define dec_key 272
-#define rounds 544
+#include <asm/asm-offsets.h>
 
 /* register macros */
 #define CTX %rdi
@@ -873,7 +869,7 @@  SYM_FUNC_START_LOCAL(__aria_aesni_avx_crypt_16way)
 	aria_fo(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15,
 		%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
 		%rax, %r9, 10);
-	cmpl $12, rounds(CTX);
+	cmpl $12, ARIA_CTX_rounds(CTX);
 	jne .Laria_192;
 	aria_ff(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7,
 		%xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
@@ -886,7 +882,7 @@  SYM_FUNC_START_LOCAL(__aria_aesni_avx_crypt_16way)
 	aria_fo(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15,
 		%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
 		%rax, %r9, 12);
-	cmpl $14, rounds(CTX);
+	cmpl $14, ARIA_CTX_rounds(CTX);
 	jne .Laria_256;
 	aria_ff(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7,
 		%xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
@@ -922,7 +918,7 @@  SYM_FUNC_START(aria_aesni_avx_encrypt_16way)
 
 	FRAME_BEGIN
 
-	leaq enc_key(CTX), %r9;
+	leaq ARIA_CTX_enc_key(CTX), %r9;
 
 	inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
 		     %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
@@ -947,7 +943,7 @@  SYM_FUNC_START(aria_aesni_avx_decrypt_16way)
 
 	FRAME_BEGIN
 
-	leaq dec_key(CTX), %r9;
+	leaq ARIA_CTX_dec_key(CTX), %r9;
 
 	inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
 		     %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
@@ -1055,7 +1051,7 @@  SYM_FUNC_START(aria_aesni_avx_ctr_crypt_16way)
 	leaq (%rdx), %r11;
 	leaq (%rcx), %rsi;
 	leaq (%rcx), %rdx;
-	leaq enc_key(CTX), %r9;
+	leaq ARIA_CTX_enc_key(CTX), %r9;
 
 	call __aria_aesni_avx_crypt_16way;
 
@@ -1156,7 +1152,7 @@  SYM_FUNC_START_LOCAL(__aria_aesni_avx_gfni_crypt_16way)
 		     %xmm0, %xmm1, %xmm2, %xmm3,
 		     %xmm4, %xmm5, %xmm6, %xmm7,
 		     %rax, %r9, 10);
-	cmpl $12, rounds(CTX);
+	cmpl $12, ARIA_CTX_rounds(CTX);
 	jne .Laria_gfni_192;
 	aria_ff_gfni(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7,
 		%xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
@@ -1173,7 +1169,7 @@  SYM_FUNC_START_LOCAL(__aria_aesni_avx_gfni_crypt_16way)
 		     %xmm0, %xmm1, %xmm2, %xmm3,
 		     %xmm4, %xmm5, %xmm6, %xmm7,
 		     %rax, %r9, 12);
-	cmpl $14, rounds(CTX);
+	cmpl $14, ARIA_CTX_rounds(CTX);
 	jne .Laria_gfni_256;
 	aria_ff_gfni(%xmm1, %xmm0, %xmm3, %xmm2,
 		     %xmm4, %xmm5, %xmm6, %xmm7,
@@ -1217,7 +1213,7 @@  SYM_FUNC_START(aria_aesni_avx_gfni_encrypt_16way)
 
 	FRAME_BEGIN
 
-	leaq enc_key(CTX), %r9;
+	leaq ARIA_CTX_enc_key(CTX), %r9;
 
 	inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
 		     %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
@@ -1242,7 +1238,7 @@  SYM_FUNC_START(aria_aesni_avx_gfni_decrypt_16way)
 
 	FRAME_BEGIN
 
-	leaq dec_key(CTX), %r9;
+	leaq ARIA_CTX_dec_key(CTX), %r9;
 
 	inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
 		     %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
@@ -1274,7 +1270,7 @@  SYM_FUNC_START(aria_aesni_avx_gfni_ctr_crypt_16way)
 	leaq (%rdx), %r11;
 	leaq (%rcx), %rsi;
 	leaq (%rcx), %rdx;
-	leaq enc_key(CTX), %r9;
+	leaq ARIA_CTX_enc_key(CTX), %r9;
 
 	call __aria_aesni_avx_gfni_crypt_16way;
 
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
index cb50589a7102..32192a91c65b 100644
--- a/arch/x86/kernel/asm-offsets.c
+++ b/arch/x86/kernel/asm-offsets.c
@@ -7,6 +7,7 @@ 
 #define COMPILE_OFFSETS
 
 #include <linux/crypto.h>
+#include <crypto/aria.h>
 #include <linux/sched.h>
 #include <linux/stddef.h>
 #include <linux/hardirq.h>
@@ -109,6 +110,16 @@  static void __used common(void)
 	OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
 	OFFSET(TSS_sp2, tss_struct, x86_tss.sp2);
 
+#if defined(CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64) ||  \
+	defined(CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64_MODULE)
+
+	/* Offset for fields in aria_ctx */
+	BLANK();
+	OFFSET(ARIA_CTX_enc_key, aria_ctx, enc_key);
+	OFFSET(ARIA_CTX_dec_key, aria_ctx, dec_key);
+	OFFSET(ARIA_CTX_rounds, aria_ctx, rounds);
+#endif
+
 	if (IS_ENABLED(CONFIG_KVM_INTEL)) {
 		BLANK();
 		OFFSET(VMX_spec_ctrl, vcpu_vmx, spec_ctrl);