diff mbox series

[v5,net-next,04/11] skbuff: simplify __alloc_skb() a bit

Message ID 20210211185220.9753-5-alobakin@pm.me
State New
Headers show
Series skbuff: introduce skbuff_heads bulking and reusing | expand

Commit Message

Alexander Lobakin Feb. 11, 2021, 6:53 p.m. UTC
Use unlikely() annotations for skbuff_head and data similarly to the
two other allocation functions and remove totally redundant goto.

Signed-off-by: Alexander Lobakin <alobakin@pm.me>
---
 net/core/skbuff.c | 11 +++++------
 1 file changed, 5 insertions(+), 6 deletions(-)
diff mbox series

Patch

diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index c7d184e11547..88566de26cd1 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -339,8 +339,8 @@  struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
 
 	/* Get the HEAD */
 	skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
-	if (!skb)
-		goto out;
+	if (unlikely(!skb))
+		return NULL;
 	prefetchw(skb);
 
 	/* We do our best to align skb_shared_info on a separate cache
@@ -351,7 +351,7 @@  struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
 	size = SKB_DATA_ALIGN(size);
 	size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 	data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
-	if (!data)
+	if (unlikely(!data))
 		goto nodata;
 	/* kmalloc(size) might give us more room than requested.
 	 * Put skb_shared_info exactly at the end of allocated zone,
@@ -395,12 +395,11 @@  struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
 
 	skb_set_kcov_handle(skb, kcov_common_handle());
 
-out:
 	return skb;
+
 nodata:
 	kmem_cache_free(cache, skb);
-	skb = NULL;
-	goto out;
+	return NULL;
 }
 EXPORT_SYMBOL(__alloc_skb);