@@ -13,8 +13,61 @@
#ifndef _CRYPTO_CTR_H
#define _CRYPTO_CTR_H
+#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
#define CTR_RFC3686_NONCE_SIZE 4
#define CTR_RFC3686_IV_SIZE 8
#define CTR_RFC3686_BLOCK_SIZE 16
+#define CTR_HELPER_MAX_BLOCK_SIZE 16
+
+static inline int crypto_ctr_encrypt_walk(struct skcipher_request *req,
+ void (*fn)(struct crypto_skcipher *,
+ const u8 *, u8 *))
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ int blocksize = crypto_skcipher_blocksize(tfm);
+ u8 buf[CTR_HELPER_MAX_BLOCK_SIZE];
+ struct skcipher_walk walk;
+ int err;
+
+ /* verify some assumptions that help us keep the code simple */
+ if (WARN_ON_ONCE(!is_power_of_2(blocksize) ||
+ blocksize > CTR_HELPER_MAX_BLOCK_SIZE))
+ return -EINVAL;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ while (walk.nbytes > 0) {
+ u8 *dst = walk.dst.virt.addr;
+ u8 *src = walk.src.virt.addr;
+ int nbytes = walk.nbytes;
+ int tail = 0;
+
+ if (nbytes < walk.total) {
+ nbytes = round_down(nbytes, blocksize);
+ tail = walk.nbytes & (blocksize - 1);
+ }
+
+ do {
+ int bsize = min(nbytes, blocksize);
+
+ fn(tfm, walk.iv, buf);
+
+ crypto_xor_cpy(dst, src, buf, bsize);
+ crypto_inc(walk.iv, blocksize);
+
+ dst += blocksize;
+ src += blocksize;
+ nbytes -= blocksize;
+ } while (nbytes > 0);
+
+ err = skcipher_walk_done(&walk, tail);
+ }
+ return err;
+}
+
#endif /* _CRYPTO_CTR_H */
Add a static inline helper modeled after crypto_cbc_encrypt_walk() that can be reused for SIMD algorithms that need to implement a non-SIMD fallback for performing CTR encryption. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> --- include/crypto/ctr.h | 53 ++++++++++++++++++++ 1 file changed, 53 insertions(+) -- 2.20.1