diff mbox series

[net-next,v5] skb_expand_head() adjust skb->truesize incorrectly

Message ID 053307fc-cc3d-68f5-1994-fe447428b1f2@virtuozzo.com
State New
Headers show
Series [net-next,v5] skb_expand_head() adjust skb->truesize incorrectly | expand

Commit Message

Vasily Averin Sept. 2, 2021, 11:12 a.m. UTC
Christoph Paasch reports [1] about incorrect skb->truesize
after skb_expand_head() call in ip6_xmit.
This may happen because of two reasons:
- skb_set_owner_w() for newly cloned skb is called too early,
before pskb_expand_head() where truesize is adjusted for (!skb-sk) case.
- pskb_expand_head() does not adjust truesize in (skb->sk) case.
In this case sk->sk_wmem_alloc should be adjusted too.

[1] https://lkml.org/lkml/2021/8/20/1082

Fixes: f1260ff15a71 ("skbuff: introduce skb_expand_head()")
Reported-by: Christoph Paasch <christoph.paasch@gmail.com>
Signed-off-by: Vasily Averin <vvs@virtuozzo.com>
---
v5: fixed else condition, thanks to Eric
    reworked update of expanded skb,
    added corresponding comments
v4: decided to use is_skb_wmem() after pskb_expand_head() call
    fixed 'return (EXPRESSION);' in os_skb_wmem according to Eric Dumazet
v3: removed __pskb_expand_head(),
    added is_skb_wmem() helper for skb with wmem-compatible destructors
    there are 2 ways to use it:
     - before pskb_expand_head(), to create skb clones
     - after successfull pskb_expand_head() to change owner on extended skb.
v2: based on patch version from Eric Dumazet,
    added __pskb_expand_head() function, which can be forced
    to adjust skb->truesize and sk->sk_wmem_alloc.
---
 include/net/sock.h |  1 +
 net/core/skbuff.c  | 63 ++++++++++++++++++++++++++++++++++++++++++++++--------
 net/core/sock.c    |  8 +++++++
 3 files changed, 63 insertions(+), 9 deletions(-)
diff mbox series

Patch

diff --git a/include/net/sock.h b/include/net/sock.h
index 95b2577..173d58c 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1695,6 +1695,7 @@  struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
 			     gfp_t priority);
 void __sock_wfree(struct sk_buff *skb);
 void sock_wfree(struct sk_buff *skb);
+bool is_skb_wmem(const struct sk_buff *skb);
 struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
 			     gfp_t priority);
 void skb_orphan_partial(struct sk_buff *skb);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f931176..29bb92e7 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1804,28 +1804,73 @@  struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
 struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom)
 {
 	int delta = headroom - skb_headroom(skb);
+	int osize = skb_end_offset(skb);
+	struct sk_buff *oskb = NULL;
+	struct sock *sk = skb->sk;
 
 	if (WARN_ONCE(delta <= 0,
 		      "%s is expecting an increase in the headroom", __func__))
 		return skb;
 
-	/* pskb_expand_head() might crash, if skb is shared */
+	delta = SKB_DATA_ALIGN(delta);
+	/* pskb_expand_head() might crash, if skb is shared.
+	 * Also we should clone skb if its destructor does
+	 * not adjust skb->truesize and sk->sk_wmem_alloc
+	 */
 	if (skb_shared(skb)) {
 		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
 
-		if (likely(nskb)) {
-			if (skb->sk)
-				skb_set_owner_w(nskb, skb->sk);
-			consume_skb(skb);
-		} else {
+		if (unlikely(!nskb)) {
 			kfree_skb(skb);
+			return NULL;
 		}
+		oskb = skb;
 		skb = nskb;
 	}
-	if (skb &&
-	    pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
+	if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC)) {
 		kfree_skb(skb);
-		skb = NULL;
+		kfree_skb(oskb);
+		return NULL;
+	}
+	if (oskb) {
+		if (sk)
+			skb_set_owner_w(skb, sk);
+		consume_skb(oskb);
+	} else if (sk && skb->destructor != sock_edemux) {
+		bool ref, set_owner;
+
+		ref = false; set_owner = false;
+		delta = osize - skb_end_offset(skb);
+		/* skb_set_owner_w() calls current skb destructor.
+		 * It can decrease sk_wmem_alloc to 0 and release sk,
+		 * To prevnt it we increase sk_wmem_alloc earlier.
+		 * Another kind of destructors can release last sk_refcnt,
+		 * so it will be impossible to call sock_hold for !fullsock
+		 * Take extra sk_refcnt to prevent it.
+		 * Otherwise just increase truesize of expanded skb.
+		 */
+		refcount_add(delta, &sk->sk_wmem_alloc);
+		if (!is_skb_wmem(skb)) {
+			set_owner = true;
+			if (!sk_fullsock(sk) && IS_ENABLED(CONFIG_INET)) {
+				/* skb_set_owner_w can set sock_edemux */
+				ref = refcount_inc_not_zero(&sk->sk_refcnt);
+				if (!ref) {
+					set_owner = false;
+					WARN_ON(refcount_sub_and_test(delta, &sk->sk_wmem_alloc));
+				}
+			}
+		}
+		if (set_owner)
+			skb_set_owner_w(skb, sk);
+#ifdef CONFIG_INET
+		if (skb->destructor == sock_edemux) {
+			WARN_ON(refcount_sub_and_test(delta, &sk->sk_wmem_alloc));
+			if (ref)
+				WARN_ON(refcount_dec_and_test(&sk->sk_refcnt));
+		}
+#endif
+		skb->truesize += delta;
 	}
 	return skb;
 }
diff --git a/net/core/sock.c b/net/core/sock.c
index 950f1e7..6cbda43 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2227,6 +2227,14 @@  void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
 }
 EXPORT_SYMBOL(skb_set_owner_w);
 
+bool is_skb_wmem(const struct sk_buff *skb)
+{
+	return skb->destructor == sock_wfree ||
+	       skb->destructor == __sock_wfree ||
+	       (IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree);
+}
+EXPORT_SYMBOL(is_skb_wmem);
+
 static bool can_skb_orphan_partial(const struct sk_buff *skb)
 {
 #ifdef CONFIG_TLS_DEVICE