diff mbox series

[RFC] crypto: use kmap_local() not kmap_atomic()

Message ID 20221213161310.2205802-1-ardb@kernel.org
State Accepted
Commit aa9695157f65c55e5c85a1c194859d3c03e68018
Headers show
Series [RFC] crypto: use kmap_local() not kmap_atomic() | expand

Commit Message

Ard Biesheuvel Dec. 13, 2022, 4:13 p.m. UTC
kmap_atomic() is used to create short-lived mappings of pages that may
not be accessible via the kernel direct map. This is only needed on
32-bit architectures that implement CONFIG_HIGHMEM, but it can be used
on 64-bit other architectures too, where the returned mapping is simply
the kernel direct address of the page.

However, kmap_atomic() does not support migration on CONFIG_HIGHMEM
configurations, due to the use of per-CPU kmap slots, and so it disables
preemption on all architectures, not just the 32-bit ones. This implies
that all scatterwalk based crypto routines essentially execute with
preemption disabled all the time, which is less than ideal.

So let's switch scatterwalk_map/_unmap and the shash/ahash routines to
kmap_local() instead, which serves a similar purpose, but without the
resulting impact on preemption on architectures that have no need for
CONFIG_HIGHMEM.

Cc: Eric Biggers <ebiggers@kernel.org>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: "Elliott, Robert (Servers)" <elliott@hpe.com>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 crypto/ahash.c               | 4 ++--
 crypto/shash.c               | 4 ++--
 include/crypto/scatterwalk.h | 4 ++--
 3 files changed, 6 insertions(+), 6 deletions(-)

Comments

Herbert Xu Dec. 30, 2022, 3:11 p.m. UTC | #1
On Tue, Dec 13, 2022 at 05:13:10PM +0100, Ard Biesheuvel wrote:
> kmap_atomic() is used to create short-lived mappings of pages that may
> not be accessible via the kernel direct map. This is only needed on
> 32-bit architectures that implement CONFIG_HIGHMEM, but it can be used
> on 64-bit other architectures too, where the returned mapping is simply
> the kernel direct address of the page.
> 
> However, kmap_atomic() does not support migration on CONFIG_HIGHMEM
> configurations, due to the use of per-CPU kmap slots, and so it disables
> preemption on all architectures, not just the 32-bit ones. This implies
> that all scatterwalk based crypto routines essentially execute with
> preemption disabled all the time, which is less than ideal.
> 
> So let's switch scatterwalk_map/_unmap and the shash/ahash routines to
> kmap_local() instead, which serves a similar purpose, but without the
> resulting impact on preemption on architectures that have no need for
> CONFIG_HIGHMEM.
> 
> Cc: Eric Biggers <ebiggers@kernel.org>
> Cc: Herbert Xu <herbert@gondor.apana.org.au>
> Cc: "Elliott, Robert (Servers)" <elliott@hpe.com>
> Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
> ---
>  crypto/ahash.c               | 4 ++--
>  crypto/shash.c               | 4 ++--
>  include/crypto/scatterwalk.h | 4 ++--
>  3 files changed, 6 insertions(+), 6 deletions(-)

Patch applied.  Thanks.
diff mbox series

Patch

diff --git a/crypto/ahash.c b/crypto/ahash.c
index c2ca631a111fc7fd..4b089f1b770f2a60 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -45,7 +45,7 @@  static int hash_walk_next(struct crypto_hash_walk *walk)
 	unsigned int nbytes = min(walk->entrylen,
 				  ((unsigned int)(PAGE_SIZE)) - offset);
 
-	walk->data = kmap_atomic(walk->pg);
+	walk->data = kmap_local_page(walk->pg);
 	walk->data += offset;
 
 	if (offset & alignmask) {
@@ -95,7 +95,7 @@  int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
 		}
 	}
 
-	kunmap_atomic(walk->data);
+	kunmap_local(walk->data);
 	crypto_yield(walk->flags);
 
 	if (err)
diff --git a/crypto/shash.c b/crypto/shash.c
index 4c88e63b3350fc7f..0c7f91b1917e3691 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -330,10 +330,10 @@  int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
 	     nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
 		void *data;
 
-		data = kmap_atomic(sg_page(sg));
+		data = kmap_local_page(sg_page(sg));
 		err = crypto_shash_digest(desc, data + offset, nbytes,
 					  req->result);
-		kunmap_atomic(data);
+		kunmap_local(data);
 	} else
 		err = crypto_shash_init(desc) ?:
 		      shash_ahash_finup(req, desc);
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index ccdb05f68a75c3a2..9cdf31c756eeacba 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -53,7 +53,7 @@  static inline struct page *scatterwalk_page(struct scatter_walk *walk)
 
 static inline void scatterwalk_unmap(void *vaddr)
 {
-	kunmap_atomic(vaddr);
+	kunmap_local(vaddr);
 }
 
 static inline void scatterwalk_start(struct scatter_walk *walk,
@@ -65,7 +65,7 @@  static inline void scatterwalk_start(struct scatter_walk *walk,
 
 static inline void *scatterwalk_map(struct scatter_walk *walk)
 {
-	return kmap_atomic(scatterwalk_page(walk)) +
+	return kmap_local_page(scatterwalk_page(walk)) +
 	       offset_in_page(walk->offset);
 }