diff mbox series

crypto: ccree: fix iv copying for small buffers

Message ID 1528361927-4172-1-git-send-email-gilad@benyossef.com
State New
Headers show
Series crypto: ccree: fix iv copying for small buffers | expand

Commit Message

Gilad Ben-Yossef June 7, 2018, 8:58 a.m. UTC
We are copying our last cipher block into the request for use as IV as
required by the Crypto API but we failed to handle correctly the case the
buffer we are working on is smaller than a block. Fix it by calculating
how much we need to copy based on buffer size.

CC: stable@vger.kernel.org
Fixes: 63ee04c8b491 ("crypto: ccree - add skcipher support")
Reported by: Hadar Gat <hadar.gat@arm.com>
Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>

---
 drivers/crypto/ccree/cc_cipher.c | 30 ++++++++++++++++++++++++------
 1 file changed, 24 insertions(+), 6 deletions(-)

-- 
2.7.4

Comments

Harsh Jain June 8, 2018, 7:30 a.m. UTC | #1
On Thu, Jun 7, 2018 at 2:32 PM, Gilad Ben-Yossef <gilad@benyossef.com> wrote:
> Hi,

>

> On Thu, Jun 7, 2018 at 11:58 AM, Gilad Ben-Yossef <gilad@benyossef.com> wrote:

>> We are copying our last cipher block into the request for use as IV as

>> required by the Crypto API but we failed to handle correctly the case the

>> buffer we are working on is smaller than a block. Fix it by calculating

>> how much we need to copy based on buffer size.

>>

>

> I'd be really happy to get a review on this patch - not so much what

> it is doing but

> rather the rational behind it - how is a tfm provider supposed to

> handle copying the

> last block of ciphertext into the request structure if the ciphertext

> size is less than a

> block?

>

> I opted for simply copying whatever ciphertext was available and

> zeroing the rest

> but frankly I'm not sure this is the right thing.


Hi Gilad,

Requirement to copy IV back is different for various mode. You can
check software template(cbc, ctr,...)  for reference. Basic idea is
Update IV such that user can use the same context to continue
encryption.

e.g   CBC  mode uses last encrypted block as IV for current block.
That why we have to copy last 16 byte to req->info

        Note : For in-place decryption we save the last block(IV for
next) in local buffer to avoid  getting written by Plaintext.

for ctr  mode: driver should increment counter(last 4 bytes of IV)
based to no of block( 1 block implies 1 counter increment) processed


You can use following  kcapi command to compare the results with sw
crypto. "-s"  switch will use the same context for "-d" No. of
iterations

./kcapi -x 1 -e -s  -d 4 -c "cbc(aes)" -k
8d7dd9b0170ce0b5f2f8e1aa768e01e91da8bfc67fd486d081b28254c99eb423 -i
7fbc02ebf5b93322329df9bfccb635af -p 48981da18e4bb9ef7e2e3162d16b1910

>

> Any feedback is apreciated.

>

> Thanks!

> Gilad

>

>

>> CC: stable@vger.kernel.org

>> Fixes: 63ee04c8b491 ("crypto: ccree - add skcipher support")

>> Reported by: Hadar Gat <hadar.gat@arm.com>

>> Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>

>> ---

>>  drivers/crypto/ccree/cc_cipher.c | 30 ++++++++++++++++++++++++------

>>  1 file changed, 24 insertions(+), 6 deletions(-)

>>

>> diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c

>> index d2810c1..a07547f 100644

>> --- a/drivers/crypto/ccree/cc_cipher.c

>> +++ b/drivers/crypto/ccree/cc_cipher.c

>> @@ -616,9 +616,18 @@ static void cc_cipher_complete(struct device *dev, void *cc_req, int err)

>>                 memcpy(req->iv, req_ctx->backup_info, ivsize);

>>                 kzfree(req_ctx->backup_info);

>>         } else if (!err) {

>> -               scatterwalk_map_and_copy(req->iv, req->dst,

>> -                                        (req->cryptlen - ivsize),

>> -                                        ivsize, 0);

>> +               unsigned int len;

>> +

>> +               if (req->cryptlen > ivsize) {

>> +                       len = req->cryptlen - ivsize;

>> +               } else {

>> +                       memset(req->iv, 0, ivsize);

>> +                       len = 0;

>> +                       ivsize = req->cryptlen;

>> +

>> +               }

>> +

>> +               scatterwalk_map_and_copy(req->iv, req->dst, len, ivsize, 0);


Copy of last block is required for CBC mode only and CBC input buffer
is always in multiple of 16(Block size).
Above change not required for CBC and will not work with mode like ctr.
>>         }

>>

>>         skcipher_request_complete(req, err);

>> @@ -755,17 +764,26 @@ static int cc_cipher_decrypt(struct skcipher_request *req)

>>         struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);

>>         unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);

>>         gfp_t flags = cc_gfp_flags(&req->base);

>> +       unsigned int len;

>>

>>         /*

>>          * Allocate and save the last IV sized bytes of the source, which will

>>          * be lost in case of in-place decryption and might be needed for CTS.

>>          */

>> -       req_ctx->backup_info = kmalloc(ivsize, flags);

>> +       req_ctx->backup_info = kzalloc(ivsize, flags);

>>         if (!req_ctx->backup_info)

>>                 return -ENOMEM;

>>

>> -       scatterwalk_map_and_copy(req_ctx->backup_info, req->src,

>> -                                (req->cryptlen - ivsize), ivsize, 0);

>> +

>> +       if (req->cryptlen > ivsize) {

>> +               len = req->cryptlen - ivsize;

>> +       } else {

>> +               len = 0;

>> +               ivsize = req->cryptlen;

>> +       }

>> +

>> +       scatterwalk_map_and_copy(req_ctx->backup_info, req->src, len, ivsize,

>> +                                0);

>>         req_ctx->is_giv = false;

>>

>>         return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);

>> --

>> 2.7.4

>>

>

>

>

> --

> Gilad Ben-Yossef

> Chief Coffee Drinker

>

> "If you take a class in large-scale robotics, can you end up in a

> situation where the homework eats your dog?"

>  -- Jean-Baptiste Queru
Herbert Xu June 19, 2018, 2:27 p.m. UTC | #2
On Sun, Jun 17, 2018 at 01:06:42PM +0300, Gilad Ben-Yossef wrote:
>

> It was ctr(aes). I wrongly assumed that we are supposed to unconditionally

> copy

> the cipher-text block post operation and let the caller do with it what it

> wants and so the

> code now does that for all cipher operations unconditionally.


For CTR it doesn't matter whether the last block is less than a
block, you should still increment the counter.

> So what is a good description of what we are supposed to provide in that

> field post operation?

> The next IV? but as you stated, that is not necessarily useful for all

> ciphers.


When in doubt, please refer to the generic implementation.  If
that is still unclear or if it seems wrong, please post to the
list.

Cheers,
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
Herbert Xu June 22, 2018, 6:53 a.m. UTC | #3
On Thu, Jun 21, 2018 at 04:35:44PM +0300, Gilad Ben-Yossef wrote:
> 

> What about OFB? unless I've missed something there is no generic

> implementation... ?


In general we shouldn't add hardware drivers for algorithms that
do not have a generic implementation.  Sometimes they slip through
though.

Cheers,
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
diff mbox series

Patch

diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index d2810c1..a07547f 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -616,9 +616,18 @@  static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
 		memcpy(req->iv, req_ctx->backup_info, ivsize);
 		kzfree(req_ctx->backup_info);
 	} else if (!err) {
-		scatterwalk_map_and_copy(req->iv, req->dst,
-					 (req->cryptlen - ivsize),
-					 ivsize, 0);
+		unsigned int len;
+
+		if (req->cryptlen > ivsize) {
+			len = req->cryptlen - ivsize;
+		} else {
+			memset(req->iv, 0, ivsize);
+			len = 0;
+			ivsize = req->cryptlen;
+
+		}
+
+		scatterwalk_map_and_copy(req->iv, req->dst, len, ivsize, 0);
 	}
 
 	skcipher_request_complete(req, err);
@@ -755,17 +764,26 @@  static int cc_cipher_decrypt(struct skcipher_request *req)
 	struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
 	unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
 	gfp_t flags = cc_gfp_flags(&req->base);
+	unsigned int len;
 
 	/*
 	 * Allocate and save the last IV sized bytes of the source, which will
 	 * be lost in case of in-place decryption and might be needed for CTS.
 	 */
-	req_ctx->backup_info = kmalloc(ivsize, flags);
+	req_ctx->backup_info = kzalloc(ivsize, flags);
 	if (!req_ctx->backup_info)
 		return -ENOMEM;
 
-	scatterwalk_map_and_copy(req_ctx->backup_info, req->src,
-				 (req->cryptlen - ivsize), ivsize, 0);
+
+	if (req->cryptlen > ivsize) {
+		len = req->cryptlen - ivsize;
+	} else {
+		len = 0;
+		ivsize = req->cryptlen;
+	}
+
+	scatterwalk_map_and_copy(req_ctx->backup_info, req->src, len, ivsize,
+				 0);
 	req_ctx->is_giv = false;
 
 	return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);