diff mbox series

[net-next] skbuff: pass the result of data ksize to __build_skb_around

Message ID 1631169238-37867-1-git-send-email-lirongqing@baidu.com
State New
Headers show
Series [net-next] skbuff: pass the result of data ksize to __build_skb_around | expand

Commit Message

Li RongQing Sept. 9, 2021, 6:33 a.m. UTC
Avoid to call ksize again in __build_skb_around by passing
the result of data ksize to __build_skb_around

nginx stress test shows this change can reduce ksize cpu usage,
and give a little performance boost

Signed-off-by: Li RongQing <lirongqing@baidu.com>
---
 net/core/skbuff.c |    8 +++++---
 1 files changed, 5 insertions(+), 3 deletions(-)
diff mbox series

Patch

diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 9240af2..1e0a930 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -397,8 +397,9 @@  struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
 {
 	struct kmem_cache *cache;
 	struct sk_buff *skb;
-	u8 *data;
+	unsigned int osize;
 	bool pfmemalloc;
+	u8 *data;
 
 	cache = (flags & SKB_ALLOC_FCLONE)
 		? skbuff_fclone_cache : skbuff_head_cache;
@@ -430,7 +431,8 @@  struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
 	 * Put skb_shared_info exactly at the end of allocated zone,
 	 * to allow max possible filling before reallocation.
 	 */
-	size = SKB_WITH_OVERHEAD(ksize(data));
+	osize = ksize(data);
+	size = SKB_WITH_OVERHEAD(osize);
 	prefetchw(data + size);
 
 	/*
@@ -439,7 +441,7 @@  struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
 	 * the tail pointer in struct sk_buff!
 	 */
 	memset(skb, 0, offsetof(struct sk_buff, tail));
-	__build_skb_around(skb, data, 0);
+	__build_skb_around(skb, data, osize);
 	skb->pfmemalloc = pfmemalloc;
 
 	if (flags & SKB_ALLOC_FCLONE) {