diff mbox series

[RFC,v2,08/11] crypto: x86/aes-ni - Improve error handling

Message ID 20210514201508.27967-9-chang.seok.bae@intel.com
State New
Headers show
Series None | expand

Commit Message

Chang S. Bae May 14, 2021, 8:15 p.m. UTC
Some error case in the glue code is possibly ignored and thus not handled
correctly. Make sure each error code is not overwritten.

Signed-off-by: Chang S. Bae <chang.seok.bae@intel.com>
Cc: x86@kernel.org
Cc: linux-crypto@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
---
Changes from RFC v1:
* Added as a new patch. This change prepares to address Ard's feedback.
---
 arch/x86/crypto/aesni-intel_glue.c | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 2144e54a6c89..685943f0e5a3 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -291,7 +291,7 @@  static int ecb_encrypt(struct skcipher_request *req)
 			      nbytes & AES_BLOCK_MASK);
 		kernel_fpu_end();
 		nbytes &= AES_BLOCK_SIZE - 1;
-		err = skcipher_walk_done(&walk, nbytes);
+		err |= skcipher_walk_done(&walk, nbytes);
 	}
 
 	return err;
@@ -313,7 +313,7 @@  static int ecb_decrypt(struct skcipher_request *req)
 			      nbytes & AES_BLOCK_MASK);
 		kernel_fpu_end();
 		nbytes &= AES_BLOCK_SIZE - 1;
-		err = skcipher_walk_done(&walk, nbytes);
+		err |= skcipher_walk_done(&walk, nbytes);
 	}
 
 	return err;
@@ -335,7 +335,7 @@  static int cbc_encrypt(struct skcipher_request *req)
 			      nbytes & AES_BLOCK_MASK, walk.iv);
 		kernel_fpu_end();
 		nbytes &= AES_BLOCK_SIZE - 1;
-		err = skcipher_walk_done(&walk, nbytes);
+		err |= skcipher_walk_done(&walk, nbytes);
 	}
 
 	return err;
@@ -357,7 +357,7 @@  static int cbc_decrypt(struct skcipher_request *req)
 			      nbytes & AES_BLOCK_MASK, walk.iv);
 		kernel_fpu_end();
 		nbytes &= AES_BLOCK_SIZE - 1;
-		err = skcipher_walk_done(&walk, nbytes);
+		err |= skcipher_walk_done(&walk, nbytes);
 	}
 
 	return err;
@@ -522,7 +522,7 @@  static int ctr_crypt(struct skcipher_request *req)
 			nbytes = 0;
 		}
 		kernel_fpu_end();
-		err = skcipher_walk_done(&walk, nbytes);
+		err |= skcipher_walk_done(&walk, nbytes);
 	}
 	return err;
 }
@@ -691,7 +691,7 @@  static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
 		}
 		kernel_fpu_end();
 
-		err = skcipher_walk_done(&walk, 0);
+		err |= skcipher_walk_done(&walk, 0);
 	}
 
 	if (err)
@@ -862,7 +862,7 @@  static int xts_crypt(struct skcipher_request *req, bool encrypt)
 		skcipher_request_set_crypt(&subreq, req->src, req->dst,
 					   blocks * AES_BLOCK_SIZE, req->iv);
 		req = &subreq;
-		err = skcipher_walk_virt(&walk, req, false);
+		err |= skcipher_walk_virt(&walk, req, false);
 	} else {
 		tail = 0;
 	}
@@ -888,7 +888,7 @@  static int xts_crypt(struct skcipher_request *req, bool encrypt)
 					  nbytes, walk.iv);
 		kernel_fpu_end();
 
-		err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
+		err |= skcipher_walk_done(&walk, walk.nbytes - nbytes);
 
 		if (walk.nbytes > 0)
 			kernel_fpu_begin();
@@ -905,7 +905,7 @@  static int xts_crypt(struct skcipher_request *req, bool encrypt)
 		skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
 					   req->iv);
 
-		err = skcipher_walk_virt(&walk, &subreq, false);
+		err |= skcipher_walk_virt(&walk, &subreq, false);
 		if (err)
 			return err;