diff mbox series

[net-next,1/5] skbuff: rename fields of struct napi_alloc_cache to be more intuitive

Message ID 20210111182801.12609-1-alobakin@pm.me
State New
Headers show
Series [net-next,1/5] skbuff: rename fields of struct napi_alloc_cache to be more intuitive | expand

Commit Message

Alexander Lobakin Jan. 11, 2021, 6:28 p.m. UTC
skb_cache and skb_count fields are used to store skbuff_heads queued
for freeing to flush them by bulks, and aren't related to allocation
path. Give them more obvious names to improve code understanding and
allow to expand this struct with more allocation-related elements.

Misc: indent struct napi_alloc_cache declaration for better reading.

Signed-off-by: Alexander Lobakin <alobakin@pm.me>
---
 net/core/skbuff.c | 26 +++++++++++++-------------
 1 file changed, 13 insertions(+), 13 deletions(-)

Comments

Jonathan Lemon Jan. 11, 2021, 6:49 p.m. UTC | #1
On Mon, Jan 11, 2021 at 06:28:21PM +0000, Alexander Lobakin wrote:
> skb_cache and skb_count fields are used to store skbuff_heads queued
> for freeing to flush them by bulks, and aren't related to allocation
> path. Give them more obvious names to improve code understanding and
> allow to expand this struct with more allocation-related elements.

I don't think prefixing these with flush_ is the correct approach;
flush is just an operation on the structure, not a property of the
structure itself.  It especially becomes confusing in the later 
patches when the cache is used on the allocation path.
Alexander Lobakin Jan. 11, 2021, 9:03 p.m. UTC | #2
From: Jonathan Lemon <jonathan.lemon@gmail.com>
Date: Mon, 11 Jan 2021 10:49:45 -0800

> On Mon, Jan 11, 2021 at 06:28:21PM +0000, Alexander Lobakin wrote:
>> skb_cache and skb_count fields are used to store skbuff_heads queued
>> for freeing to flush them by bulks, and aren't related to allocation
>> path. Give them more obvious names to improve code understanding and
>> allow to expand this struct with more allocation-related elements.
>
> I don't think prefixing these with flush_ is the correct approach;
> flush is just an operation on the structure, not a property of the
> structure itself.  It especially becomes confusing in the later
> patches when the cache is used on the allocation path.

Agree, but didn't come up with anything more fitting. Any suggestions
maybe?

> --
> Jonathan

Thanks,
Al
diff mbox series

Patch

diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 7626a33cce59..17ae5e90f103 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -366,9 +366,9 @@  EXPORT_SYMBOL(build_skb_around);
 #define NAPI_SKB_CACHE_SIZE	64
 
 struct napi_alloc_cache {
-	struct page_frag_cache page;
-	unsigned int skb_count;
-	void *skb_cache[NAPI_SKB_CACHE_SIZE];
+	struct page_frag_cache	page;
+	u32			flush_skb_count;
+	void			*flush_skb_cache[NAPI_SKB_CACHE_SIZE];
 };
 
 static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
@@ -860,11 +860,11 @@  void __kfree_skb_flush(void)
 {
 	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
 
-	/* flush skb_cache if containing objects */
-	if (nc->skb_count) {
-		kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
-				     nc->skb_cache);
-		nc->skb_count = 0;
+	/* flush flush_skb_cache if containing objects */
+	if (nc->flush_skb_count) {
+		kmem_cache_free_bulk(skbuff_head_cache, nc->flush_skb_count,
+				     nc->flush_skb_cache);
+		nc->flush_skb_count = 0;
 	}
 }
 
@@ -876,18 +876,18 @@  static inline void _kfree_skb_defer(struct sk_buff *skb)
 	skb_release_all(skb);
 
 	/* record skb to CPU local list */
-	nc->skb_cache[nc->skb_count++] = skb;
+	nc->flush_skb_cache[nc->flush_skb_count++] = skb;
 
 #ifdef CONFIG_SLUB
 	/* SLUB writes into objects when freeing */
 	prefetchw(skb);
 #endif
 
-	/* flush skb_cache if it is filled */
-	if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
+	/* flush flush_skb_cache if it is filled */
+	if (unlikely(nc->flush_skb_count == NAPI_SKB_CACHE_SIZE)) {
 		kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_SIZE,
-				     nc->skb_cache);
-		nc->skb_count = 0;
+				     nc->flush_skb_cache);
+		nc->flush_skb_count = 0;
 	}
 }
 void __kfree_skb_defer(struct sk_buff *skb)