diff mbox series

[bpf,v2] xsk: fix broken Tx ring validation

Message ID 20210618075805.14412-1-magnus.karlsson@gmail.com
State New
Headers show
Series [bpf,v2] xsk: fix broken Tx ring validation | expand

Commit Message

Magnus Karlsson June 18, 2021, 7:58 a.m. UTC
From: Magnus Karlsson <magnus.karlsson@intel.com>

Fix broken Tx ring validation for AF_XDP. The commit under the Fixes
tag, fixed an off-by-one error in the validation but introduced
another error. Descriptors are now let through even if they straddle a
chunk boundary which they are not allowed to do in aligned mode. Worse
is that they are let through even if they straddle the end of the umem
itself, tricking the kernel to read data outside the allowed umem
region which might or might not be mapped at all.

Fix this by reintroducing the old code, but subtract the length by one
to fix the off-by-one error that the original patch was
addressing. The test chunk != chunk_end makes sure packets do not
straddle chunk boundraries. Note that packets of zero length are
allowed in the interface, therefore the test if the length is
non-zero.

v1 -> v2:
* Improved commit message

Fixes: ac31565c2193 ("xsk: Fix for xp_aligned_validate_desc() when len == chunk_size")
Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com>
---
 net/xdp/xsk_queue.h | 11 +++++++----
 1 file changed, 7 insertions(+), 4 deletions(-)


base-commit: da5ac772cfe2a03058b0accfac03fad60c46c24d

Comments

Björn Töpel June 18, 2021, 1:33 p.m. UTC | #1
On Fri, 18 Jun 2021 at 09:58, Magnus Karlsson <magnus.karlsson@gmail.com> wrote:
>
> From: Magnus Karlsson <magnus.karlsson@intel.com>
>
> Fix broken Tx ring validation for AF_XDP. The commit under the Fixes
> tag, fixed an off-by-one error in the validation but introduced
> another error. Descriptors are now let through even if they straddle a
> chunk boundary which they are not allowed to do in aligned mode. Worse
> is that they are let through even if they straddle the end of the umem
> itself, tricking the kernel to read data outside the allowed umem
> region which might or might not be mapped at all.
>
> Fix this by reintroducing the old code, but subtract the length by one
> to fix the off-by-one error that the original patch was
> addressing. The test chunk != chunk_end makes sure packets do not
> straddle chunk boundraries. Note that packets of zero length are
> allowed in the interface, therefore the test if the length is
> non-zero.
>
> v1 -> v2:
> * Improved commit message
>
> Fixes: ac31565c2193 ("xsk: Fix for xp_aligned_validate_desc() when len == chunk_size")
> Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com>

Acked-by: Björn Töpel <bjorn@kernel.org>

> ---
>  net/xdp/xsk_queue.h | 11 +++++++----
>  1 file changed, 7 insertions(+), 4 deletions(-)
>
> diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
> index 9d2a89d793c0..9ae13cccfb28 100644
> --- a/net/xdp/xsk_queue.h
> +++ b/net/xdp/xsk_queue.h
> @@ -128,12 +128,15 @@ static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
>  static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
>                                             struct xdp_desc *desc)
>  {
> -       u64 chunk;
> -
> -       if (desc->len > pool->chunk_size)
> -               return false;
> +       u64 chunk, chunk_end;
>
>         chunk = xp_aligned_extract_addr(pool, desc->addr);
> +       if (likely(desc->len)) {
> +               chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len - 1);
> +               if (chunk != chunk_end)
> +                       return false;
> +       }
> +
>         if (chunk >= pool->addrs_cnt)
>                 return false;
>
>
> base-commit: da5ac772cfe2a03058b0accfac03fad60c46c24d
> --
> 2.29.0
>
diff mbox series

Patch

diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index 9d2a89d793c0..9ae13cccfb28 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -128,12 +128,15 @@  static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
 static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
 					    struct xdp_desc *desc)
 {
-	u64 chunk;
-
-	if (desc->len > pool->chunk_size)
-		return false;
+	u64 chunk, chunk_end;
 
 	chunk = xp_aligned_extract_addr(pool, desc->addr);
+	if (likely(desc->len)) {
+		chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len - 1);
+		if (chunk != chunk_end)
+			return false;
+	}
+
 	if (chunk >= pool->addrs_cnt)
 		return false;