diff mbox series

[v5,net-next,09/11] skbuff: allow to optionally use NAPI cache from __alloc_skb()

Message ID 20210211185220.9753-10-alobakin@pm.me
State New
Headers show
Series skbuff: introduce skbuff_heads bulking and reusing | expand

Commit Message

Alexander Lobakin Feb. 11, 2021, 6:54 p.m. UTC
Reuse the old and forgotten SKB_ALLOC_NAPI to add an option to get
an skbuff_head from the NAPI cache instead of inplace allocation
inside __alloc_skb().
This implies that the function is called from softirq or BH-off
context, not for allocating a clone or from a distant node.

Signed-off-by: Alexander Lobakin <alobakin@pm.me>
---
 net/core/skbuff.c | 13 +++++++++----
 1 file changed, 9 insertions(+), 4 deletions(-)
diff mbox series

Patch

diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 9e1a8ded4acc..a0b457ae87c2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -397,15 +397,20 @@  struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
 	struct sk_buff *skb;
 	u8 *data;
 	bool pfmemalloc;
+	bool clone;
 
-	cache = (flags & SKB_ALLOC_FCLONE)
-		? skbuff_fclone_cache : skbuff_head_cache;
+	clone = !!(flags & SKB_ALLOC_FCLONE);
+	cache = clone ? skbuff_fclone_cache : skbuff_head_cache;
 
 	if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
 		gfp_mask |= __GFP_MEMALLOC;
 
 	/* Get the HEAD */
-	skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
+	if ((flags & SKB_ALLOC_NAPI) && !clone &&
+	    likely(node == NUMA_NO_NODE || node == numa_mem_id()))
+		skb = napi_skb_cache_get();
+	else
+		skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node);
 	if (unlikely(!skb))
 		return NULL;
 	prefetchw(skb);
@@ -436,7 +441,7 @@  struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
 	__build_skb_around(skb, data, 0);
 	skb->pfmemalloc = pfmemalloc;
 
-	if (flags & SKB_ALLOC_FCLONE) {
+	if (clone) {
 		struct sk_buff_fclones *fclones;
 
 		fclones = container_of(skb, struct sk_buff_fclones, skb1);