diff mbox series

[v2,3/3] crypto: arm64/aes-ce-gcm - don't reload key schedule if avoidable

Message ID 20180730210642.25180-4-ard.biesheuvel@linaro.org
State New
Headers show
Series crypto/arm64: aes-ce-gcm - switch to 2-way aggregation | expand

Commit Message

Ard Biesheuvel July 30, 2018, 9:06 p.m. UTC
Squeeze out another 5% of performance by minimizing the number
of invocations of kernel_neon_begin()/kernel_neon_end() on the
common path, which also allows some reloads of the key schedule
to be optimized away.

The resulting code runs at 2.3 cycles per byte on a Cortex-A53.

Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>

---
Raw numbers after the patch.

 arch/arm64/crypto/ghash-ce-core.S |  9 ++-
 arch/arm64/crypto/ghash-ce-glue.c | 81 +++++++++++---------
 2 files changed, 49 insertions(+), 41 deletions(-)

-- 
2.18.0

testing speed of gcm(aes) (gcm-aes-ce) encryption
test  0 (128 bit key,   16 byte blocks): 365343 operations in 1 seconds (  5845488 bytes)
test  1 (128 bit key,   64 byte blocks): 504620 operations in 1 seconds ( 32295680 bytes)
test  2 (128 bit key,  256 byte blocks): 418881 operations in 1 seconds (107233536 bytes)
test  3 (128 bit key,  512 byte blocks): 343166 operations in 1 seconds (175700992 bytes)
test  4 (128 bit key, 1024 byte blocks): 252229 operations in 1 seconds (258282496 bytes)
test  5 (128 bit key, 2048 byte blocks): 164862 operations in 1 seconds (337637376 bytes)
test  6 (128 bit key, 4096 byte blocks):  98274 operations in 1 seconds (402530304 bytes)
test  7 (128 bit key, 8192 byte blocks):  52530 operations in 1 seconds (430325760 bytes)
test  8 (192 bit key,   16 byte blocks): 343221 operations in 1 seconds (  5491536 bytes)
test  9 (192 bit key,   64 byte blocks): 495929 operations in 1 seconds ( 31739456 bytes)
test 10 (192 bit key,  256 byte blocks): 404755 operations in 1 seconds (103617280 bytes)
test 11 (192 bit key,  512 byte blocks): 326728 operations in 1 seconds (167284736 bytes)
test 12 (192 bit key, 1024 byte blocks): 235987 operations in 1 seconds (241650688 bytes)
test 13 (192 bit key, 2048 byte blocks): 151724 operations in 1 seconds (310730752 bytes)
test 14 (192 bit key, 4096 byte blocks):  89285 operations in 1 seconds (365711360 bytes)
test 15 (192 bit key, 8192 byte blocks):  47432 operations in 1 seconds (388562944 bytes)
test 16 (256 bit key,   16 byte blocks): 323574 operations in 1 seconds (  5177184 bytes)
test 17 (256 bit key,   64 byte blocks): 489854 operations in 1 seconds ( 31350656 bytes)
test 18 (256 bit key,  256 byte blocks): 396979 operations in 1 seconds (101626624 bytes)
test 19 (256 bit key,  512 byte blocks): 317923 operations in 1 seconds (162776576 bytes)
test 20 (256 bit key, 1024 byte blocks): 211440 operations in 1 seconds (216514560 bytes)
test 21 (256 bit key, 2048 byte blocks): 145407 operations in 1 seconds (297793536 bytes)
test 22 (256 bit key, 4096 byte blocks):  85050 operations in 1 seconds (348364800 bytes)
test 23 (256 bit key, 8192 byte blocks):  45068 operations in 1 seconds (369197056 bytes)
diff mbox series

Patch

diff --git a/arch/arm64/crypto/ghash-ce-core.S b/arch/arm64/crypto/ghash-ce-core.S
index f7281e7a592f..913e49932ae6 100644
--- a/arch/arm64/crypto/ghash-ce-core.S
+++ b/arch/arm64/crypto/ghash-ce-core.S
@@ -1,7 +1,7 @@ 
 /*
  * Accelerated GHASH implementation with ARMv8 PMULL instructions.
  *
- * Copyright (C) 2014 - 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -332,8 +332,6 @@  ENDPROC(pmull_ghash_update_p8)
 	ld1		{XL.2d}, [x1]
 	ldr		x8, [x5, #8]			// load lower counter
 
-	load_round_keys	w7, x6
-
 	movi		MASK.16b, #0xe1
 	trn1		SHASH2.2d, SHASH.2d, HH.2d
 	trn2		T1.2d, SHASH.2d, HH.2d
@@ -346,6 +344,8 @@  CPU_LE(	rev		x8, x8		)
 	ld1		{KS0.16b-KS1.16b}, [x10]
 	.endif
 
+	cbnz		x6, 4f
+
 0:	ld1		{INP0.16b-INP1.16b}, [x3], #32
 
 	rev		x9, x8
@@ -471,6 +471,9 @@  CPU_LE(	rev		x8, x8		)
 	enc_round	KS0, v20
 	enc_round	KS1, v20
 	b		1b
+
+4:	load_round_keys	w7, x6
+	b		0b
 	.endm
 
 	/*
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
index c41ac62c90e9..88e3d93fa7c7 100644
--- a/arch/arm64/crypto/ghash-ce-glue.c
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -1,7 +1,7 @@ 
 /*
  * Accelerated GHASH implementation with ARMv8 PMULL instructions.
  *
- * Copyright (C) 2014 - 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -374,37 +374,39 @@  static int gcm_encrypt(struct aead_request *req)
 	memcpy(iv, req->iv, GCM_IV_SIZE);
 	put_unaligned_be32(1, iv + GCM_IV_SIZE);
 
-	if (likely(may_use_simd())) {
-		kernel_neon_begin();
+	err = skcipher_walk_aead_encrypt(&walk, req, false);
 
+	if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) {
+		u32 const *rk = NULL;
+
+		kernel_neon_begin();
 		pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, nrounds);
 		put_unaligned_be32(2, iv + GCM_IV_SIZE);
 		pmull_gcm_encrypt_block(ks, iv, NULL, nrounds);
 		put_unaligned_be32(3, iv + GCM_IV_SIZE);
 		pmull_gcm_encrypt_block(ks + AES_BLOCK_SIZE, iv, NULL, nrounds);
 		put_unaligned_be32(4, iv + GCM_IV_SIZE);
-		kernel_neon_end();
-
-		err = skcipher_walk_aead_encrypt(&walk, req, false);
 
-		while (walk.nbytes >= 2 * AES_BLOCK_SIZE) {
+		do {
 			int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
 
-			kernel_neon_begin();
+			if (rk)
+				kernel_neon_begin();
+
 			pmull_gcm_encrypt(blocks, dg, walk.dst.virt.addr,
 					  walk.src.virt.addr, ctx->h2, iv,
-					  ctx->aes_key.key_enc, nrounds, ks);
+					  rk, nrounds, ks);
 			kernel_neon_end();
 
 			err = skcipher_walk_done(&walk,
 					walk.nbytes % (2 * AES_BLOCK_SIZE));
-		}
+
+			rk = ctx->aes_key.key_enc;
+		} while (walk.nbytes >= 2 * AES_BLOCK_SIZE);
 	} else {
 		__aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
 		put_unaligned_be32(2, iv + GCM_IV_SIZE);
 
-		err = skcipher_walk_aead_encrypt(&walk, req, false);
-
 		while (walk.nbytes >= AES_BLOCK_SIZE) {
 			int blocks = walk.nbytes / AES_BLOCK_SIZE;
 			u8 *dst = walk.dst.virt.addr;
@@ -486,50 +488,53 @@  static int gcm_decrypt(struct aead_request *req)
 	memcpy(iv, req->iv, GCM_IV_SIZE);
 	put_unaligned_be32(1, iv + GCM_IV_SIZE);
 
-	if (likely(may_use_simd())) {
+	err = skcipher_walk_aead_decrypt(&walk, req, false);
+
+	if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) {
+		u32 const *rk = NULL;
+
 		kernel_neon_begin();
 		pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, nrounds);
 		put_unaligned_be32(2, iv + GCM_IV_SIZE);
-		kernel_neon_end();
 
-		err = skcipher_walk_aead_decrypt(&walk, req, false);
-
-		while (walk.nbytes >= 2 * AES_BLOCK_SIZE) {
+		do {
 			int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
+			int rem = walk.total - blocks * AES_BLOCK_SIZE;
+
+			if (rk)
+				kernel_neon_begin();
 
-			kernel_neon_begin();
 			pmull_gcm_decrypt(blocks, dg, walk.dst.virt.addr,
 					  walk.src.virt.addr, ctx->h2, iv,
-					  ctx->aes_key.key_enc, nrounds);
-			kernel_neon_end();
+					  rk, nrounds);
 
-			err = skcipher_walk_done(&walk,
-					walk.nbytes % (2 * AES_BLOCK_SIZE));
-		}
+			/* check if this is the final iteration of the loop */
+			if (rem < (2 * AES_BLOCK_SIZE)) {
+				u8 *iv2 = iv + AES_BLOCK_SIZE;
 
-		if (walk.nbytes) {
-			u8 *iv2 = iv + AES_BLOCK_SIZE;
+				if (rem > AES_BLOCK_SIZE) {
+					memcpy(iv2, iv, AES_BLOCK_SIZE);
+					crypto_inc(iv2, AES_BLOCK_SIZE);
+				}
 
-			if (walk.nbytes > AES_BLOCK_SIZE) {
-				memcpy(iv2, iv, AES_BLOCK_SIZE);
-				crypto_inc(iv2, AES_BLOCK_SIZE);
-			}
+				pmull_gcm_encrypt_block(iv, iv, NULL, nrounds);
 
-			kernel_neon_begin();
-			pmull_gcm_encrypt_block(iv, iv, ctx->aes_key.key_enc,
-						nrounds);
+				if (rem > AES_BLOCK_SIZE)
+					pmull_gcm_encrypt_block(iv2, iv2, NULL,
+								nrounds);
+			}
 
-			if (walk.nbytes > AES_BLOCK_SIZE)
-				pmull_gcm_encrypt_block(iv2, iv2, NULL,
-							nrounds);
 			kernel_neon_end();
-		}
+
+			err = skcipher_walk_done(&walk,
+					walk.nbytes % (2 * AES_BLOCK_SIZE));
+
+			rk = ctx->aes_key.key_enc;
+		} while (walk.nbytes >= 2 * AES_BLOCK_SIZE);
 	} else {
 		__aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
 		put_unaligned_be32(2, iv + GCM_IV_SIZE);
 
-		err = skcipher_walk_aead_decrypt(&walk, req, false);
-
 		while (walk.nbytes >= AES_BLOCK_SIZE) {
 			int blocks = walk.nbytes / AES_BLOCK_SIZE;
 			u8 *dst = walk.dst.virt.addr;