diff mbox series

[v4,1/2] crypto: scatterwalk - Use nth_page instead of doing it by hand

Message ID 96553040a4b37d8b54b9959e859fc057889dfdac.1741922689.git.herbert@gondor.apana.org.au
State Accepted
Commit e9ed7aff2554176cac0c49907e14d55679d67f8a
Headers show
Series [v4,1/2] crypto: scatterwalk - Use nth_page instead of doing it by hand | expand

Commit Message

Herbert Xu March 14, 2025, 3:27 a.m. UTC
Curiously, the Crypto API scatterwalk incremented pages by hand
rather than using nth_page.  Possibly because scatterwalk predates
nth_page (the following commit is from the history tree):

	commit 3957f2b34960d85b63e814262a8be7d5ad91444d
	Author: James Morris <jmorris@intercode.com.au>
	Date:   Sun Feb 2 07:35:32 2003 -0800

	    [CRYPTO]: in/out scatterlist support for ciphers.

Fix this by using nth_page.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 include/crypto/scatterwalk.h | 30 ++++++++++++++++++++----------
 1 file changed, 20 insertions(+), 10 deletions(-)

Comments

Eric Biggers March 16, 2025, 3:31 a.m. UTC | #1
On Fri, Mar 14, 2025 at 11:27:20AM +0800, Herbert Xu wrote:
> Curiously, the Crypto API scatterwalk incremented pages by hand
> rather than using nth_page.  Possibly because scatterwalk predates
> nth_page (the following commit is from the history tree):
> 
> 	commit 3957f2b34960d85b63e814262a8be7d5ad91444d
> 	Author: James Morris <jmorris@intercode.com.au>
> 	Date:   Sun Feb 2 07:35:32 2003 -0800
> 
> 	    [CRYPTO]: in/out scatterlist support for ciphers.
> 
> Fix this by using nth_page.

As I said on v2, this needs an explanation of what it's actually fixing.

> @@ -189,14 +195,18 @@ static inline void scatterwalk_done_dst(struct scatter_walk *walk,
>  	 * reliably optimized out or not.
>  	 */
>  	if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE) {
> -		struct page *base_page, *start_page, *end_page, *page;
> +		struct page *base_page;
> +		unsigned int offset;
> +		int start, end, i;
>  
>  		base_page = sg_page(walk->sg);
> -		start_page = base_page + (walk->offset >> PAGE_SHIFT);
> -		end_page = base_page + ((walk->offset + nbytes +
> -					 PAGE_SIZE - 1) >> PAGE_SHIFT);
> -		for (page = start_page; page < end_page; page++)
> -			flush_dcache_page(page);
> +		offset = walk->offset;
> +		start = offset >> PAGE_SHIFT;
> +		end = start + (nbytes >> PAGE_SHIFT);
> +		end += (offset_in_page(offset) + offset_in_page(nbytes) +
> +			PAGE_SIZE - 1) >> PAGE_SHIFT;

The change to how the end page index is calculated is unrelated to the nth_page
fix, and it makes the code slower and harder to understand.  My original code
just rounded the new offset up to a page boundary to get the end page index.

- Eric
Herbert Xu March 16, 2025, 4:28 a.m. UTC | #2
On Sat, Mar 15, 2025 at 08:31:41PM -0700, Eric Biggers wrote:
>
> > @@ -189,14 +195,18 @@ static inline void scatterwalk_done_dst(struct scatter_walk *walk,
> >  	 * reliably optimized out or not.
> >  	 */
> >  	if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE) {
> > -		struct page *base_page, *start_page, *end_page, *page;
> > +		struct page *base_page;
> > +		unsigned int offset;
> > +		int start, end, i;
> >  
> >  		base_page = sg_page(walk->sg);
> > -		start_page = base_page + (walk->offset >> PAGE_SHIFT);
> > -		end_page = base_page + ((walk->offset + nbytes +
> > -					 PAGE_SIZE - 1) >> PAGE_SHIFT);
> > -		for (page = start_page; page < end_page; page++)
> > -			flush_dcache_page(page);
> > +		offset = walk->offset;
> > +		start = offset >> PAGE_SHIFT;
> > +		end = start + (nbytes >> PAGE_SHIFT);
> > +		end += (offset_in_page(offset) + offset_in_page(nbytes) +
> > +			PAGE_SIZE - 1) >> PAGE_SHIFT;
> 
> The change to how the end page index is calculated is unrelated to the nth_page
> fix, and it makes the code slower and harder to understand.  My original code
> just rounded the new offset up to a page boundary to get the end page index.

The original code is open to overflows in the addition.  The new
version is not.

Cheers,
Eric Biggers March 16, 2025, 4:42 a.m. UTC | #3
On Sun, Mar 16, 2025 at 12:28:24PM +0800, Herbert Xu wrote:
> On Sat, Mar 15, 2025 at 08:31:41PM -0700, Eric Biggers wrote:
> >
> > > @@ -189,14 +195,18 @@ static inline void scatterwalk_done_dst(struct scatter_walk *walk,
> > >  	 * reliably optimized out or not.
> > >  	 */
> > >  	if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE) {
> > > -		struct page *base_page, *start_page, *end_page, *page;
> > > +		struct page *base_page;
> > > +		unsigned int offset;
> > > +		int start, end, i;
> > >  
> > >  		base_page = sg_page(walk->sg);
> > > -		start_page = base_page + (walk->offset >> PAGE_SHIFT);
> > > -		end_page = base_page + ((walk->offset + nbytes +
> > > -					 PAGE_SIZE - 1) >> PAGE_SHIFT);
> > > -		for (page = start_page; page < end_page; page++)
> > > -			flush_dcache_page(page);
> > > +		offset = walk->offset;
> > > +		start = offset >> PAGE_SHIFT;
> > > +		end = start + (nbytes >> PAGE_SHIFT);
> > > +		end += (offset_in_page(offset) + offset_in_page(nbytes) +
> > > +			PAGE_SIZE - 1) >> PAGE_SHIFT;
> > 
> > The change to how the end page index is calculated is unrelated to the nth_page
> > fix, and it makes the code slower and harder to understand.  My original code
> > just rounded the new offset up to a page boundary to get the end page index.
> 
> The original code is open to overflows in the addition.  The new
> version is not.

If you think you are fixing a separate issue, you need to say so.

But I also don't think it's worth worrying about lengths so close to UINT_MAX.
No one is using them, they have no testing, and there are likely to be other
overflows like this one too.

- Eric
diff mbox series

Patch

diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index b7e617ae4442..94a8585f26b2 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -100,11 +100,15 @@  static inline void scatterwalk_get_sglist(struct scatter_walk *walk,
 static inline void scatterwalk_map(struct scatter_walk *walk)
 {
 	struct page *base_page = sg_page(walk->sg);
+	unsigned int offset = walk->offset;
+	void *addr;
 
 	if (IS_ENABLED(CONFIG_HIGHMEM)) {
-		walk->__addr = kmap_local_page(base_page +
-					       (walk->offset >> PAGE_SHIFT)) +
-			       offset_in_page(walk->offset);
+		struct page *page;
+
+		page = nth_page(base_page, offset >> PAGE_SHIFT);
+		offset = offset_in_page(offset);
+		addr = kmap_local_page(page) + offset;
 	} else {
 		/*
 		 * When !HIGHMEM we allow the walker to return segments that
@@ -117,8 +121,10 @@  static inline void scatterwalk_map(struct scatter_walk *walk)
 		 * in the direct map, but this makes it clearer what is really
 		 * going on.
 		 */
-		walk->__addr = page_address(base_page) + walk->offset;
+		addr = page_address(base_page) + offset;
 	}
+
+	walk->__addr = addr;
 }
 
 /**
@@ -189,14 +195,18 @@  static inline void scatterwalk_done_dst(struct scatter_walk *walk,
 	 * reliably optimized out or not.
 	 */
 	if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE) {
-		struct page *base_page, *start_page, *end_page, *page;
+		struct page *base_page;
+		unsigned int offset;
+		int start, end, i;
 
 		base_page = sg_page(walk->sg);
-		start_page = base_page + (walk->offset >> PAGE_SHIFT);
-		end_page = base_page + ((walk->offset + nbytes +
-					 PAGE_SIZE - 1) >> PAGE_SHIFT);
-		for (page = start_page; page < end_page; page++)
-			flush_dcache_page(page);
+		offset = walk->offset;
+		start = offset >> PAGE_SHIFT;
+		end = start + (nbytes >> PAGE_SHIFT);
+		end += (offset_in_page(offset) + offset_in_page(nbytes) +
+			PAGE_SIZE - 1) >> PAGE_SHIFT;
+		for (i = start; i < end; i++)
+			flush_dcache_page(nth_page(base_page, i));
 	}
 	scatterwalk_advance(walk, nbytes);
 }