diff mbox series

[RFC,5/7] sock: support refilling pfrag from pfrag_pool

Message ID 1629257542-36145-6-git-send-email-linyunsheng@huawei.com
State New
Headers show
Series add socket to netdev page frag recycling support | expand

Commit Message

Yunsheng Lin Aug. 18, 2021, 3:32 a.m. UTC
As previous patch has added pfrag pool based on the page
pool, so support refilling pfrag from the new pfrag pool
for tcpv4.

Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
---
 include/net/sock.h |  1 +
 net/core/sock.c    |  9 +++++++++
 net/ipv4/tcp.c     | 34 ++++++++++++++++++++++++++--------
 3 files changed, 36 insertions(+), 8 deletions(-)
diff mbox series

Patch

diff --git a/include/net/sock.h b/include/net/sock.h
index 6e76145..af40084 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -455,6 +455,7 @@  struct sock {
 	unsigned long		sk_pacing_rate; /* bytes per second */
 	unsigned long		sk_max_pacing_rate;
 	struct page_frag	sk_frag;
+	struct pfrag_pool	*sk_frag_pool;
 	netdev_features_t	sk_route_caps;
 	netdev_features_t	sk_route_nocaps;
 	netdev_features_t	sk_route_forced_caps;
diff --git a/net/core/sock.c b/net/core/sock.c
index aada649..53152c9 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -140,6 +140,7 @@ 
 #include <net/busy_poll.h>
 
 #include <linux/ethtool.h>
+#include <net/pfrag_pool.h>
 
 static DEFINE_MUTEX(proto_list_mutex);
 static LIST_HEAD(proto_list);
@@ -1934,6 +1935,11 @@  static void __sk_destruct(struct rcu_head *head)
 		put_page(sk->sk_frag.page);
 		sk->sk_frag.page = NULL;
 	}
+	if (sk->sk_frag_pool) {
+		pfrag_pool_flush(sk->sk_frag_pool);
+		kfree(sk->sk_frag_pool);
+		sk->sk_frag_pool = NULL;
+	}
 
 	if (sk->sk_peer_cred)
 		put_cred(sk->sk_peer_cred);
@@ -3134,6 +3140,9 @@  void sock_init_data(struct socket *sock, struct sock *sk)
 
 	sk->sk_frag.page	=	NULL;
 	sk->sk_frag.offset	=	0;
+
+	sk->sk_frag_pool = kzalloc(sizeof(*sk->sk_frag_pool), sk->sk_allocation);
+
 	sk->sk_peek_off		=	-1;
 
 	sk->sk_peer_pid 	=	NULL;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index f931def..992dcbc 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -280,6 +280,7 @@ 
 #include <linux/uaccess.h>
 #include <asm/ioctls.h>
 #include <net/busy_poll.h>
+#include <net/pfrag_pool.h>
 
 /* Track pending CMSGs. */
 enum {
@@ -1337,12 +1338,20 @@  int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
 			if (err)
 				goto do_fault;
 		} else if (!zc) {
-			bool merge = true;
+			bool merge = true, pfrag_pool = true;
 			int i = skb_shinfo(skb)->nr_frags;
-			struct page_frag *pfrag = sk_page_frag(sk);
+			struct page_frag *pfrag;
 
-			if (!sk_page_frag_refill(sk, pfrag))
-				goto wait_for_space;
+			pfrag_pool_updata_napi(sk->sk_frag_pool,
+					       READ_ONCE(sk->sk_napi_id));
+			pfrag = pfrag_pool_refill(sk->sk_frag_pool, sk->sk_allocation);
+			if (!pfrag) {
+				pfrag = sk_page_frag(sk);
+				if (!sk_page_frag_refill(sk, pfrag))
+					goto wait_for_space;
+
+				pfrag_pool = false;
+			}
 
 			if (!skb_can_coalesce(skb, i, pfrag->page,
 					      pfrag->offset)) {
@@ -1369,11 +1378,20 @@  int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
 			if (merge) {
 				skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
 			} else {
-				skb_fill_page_desc(skb, i, pfrag->page,
-						   pfrag->offset, copy);
-				page_ref_inc(pfrag->page);
+				if (pfrag_pool) {
+					skb_fill_pp_page_desc(skb, i, pfrag->page,
+							      pfrag->offset, copy);
+				} else {
+					page_ref_inc(pfrag->page);
+					skb_fill_page_desc(skb, i, pfrag->page,
+							   pfrag->offset, copy);
+				}
 			}
-			pfrag->offset += copy;
+
+			if (pfrag_pool)
+				pfrag_pool_commit(sk->sk_frag_pool, copy, merge);
+			else
+				pfrag->offset += copy;
 		} else {
 			if (!sk_wmem_schedule(sk, copy))
 				goto wait_for_space;