diff mbox series

[net-next,13/13] sock: introduce tracepoint trace_sk_data_ready()

Message ID 20210805185750.4522-14-xiyou.wangcong@gmail.com
State New
Headers show
Series net: add more tracepoints to TCP/IP stack | expand

Commit Message

Cong Wang Aug. 5, 2021, 6:57 p.m. UTC
From: Qitao Xu <qitao.xu@bytedance.com>

Tracepoint trace_sk_data_ready is introduced to trace skb
at exit of socket layer on RX side. Here we only implement
it for UDP and TCP.

Reviewed-by: Cong Wang <cong.wang@bytedance.com>
Signed-off-by: Qitao Xu <qitao.xu@bytedance.com>
---
 include/trace/events/sock.h | 19 +++++++++++++++++++
 net/ipv4/tcp_input.c        |  8 +++++++-
 net/ipv4/udp.c              |  5 ++++-
 3 files changed, 30 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h
index 12c315782766..860d8b0f02c5 100644
--- a/include/trace/events/sock.h
+++ b/include/trace/events/sock.h
@@ -261,6 +261,25 @@  TRACE_EVENT(inet_sk_error_report,
 		  __entry->error)
 );
 
+TRACE_EVENT(sk_data_ready,
+
+	TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
+
+	TP_ARGS(sk, skb),
+
+	TP_STRUCT__entry(
+		__field(const void *, skaddr)
+		__field(const void *, skbaddr)
+	),
+
+	TP_fast_assign(
+		__entry->skaddr = sk;
+		__entry->skbaddr = skb;
+	),
+
+	TP_printk("skaddr=%px, skbaddr=%px", __entry->skaddr, __entry->skbaddr)
+);
+
 #endif /* _TRACE_SOCK_H */
 
 /* This part must be outside protection */
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3f7bd7ae7d7a..16edb9d37529 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -77,6 +77,7 @@ 
 #include <asm/unaligned.h>
 #include <linux/errqueue.h>
 #include <trace/events/tcp.h>
+#include <trace/events/sock.h>
 #include <linux/jump_label_ratelimit.h>
 #include <net/busy_poll.h>
 #include <net/mptcp.h>
@@ -5034,6 +5035,8 @@  static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
 
 		tcp_fast_path_check(sk);
 
+		if (!sock_flag(sk, SOCK_DEAD))
+			trace_sk_data_ready(sk, skb);
 		if (eaten > 0)
 			kfree_skb_partial(skb, fragstolen);
 		if (!sock_flag(sk, SOCK_DEAD))
@@ -5601,8 +5604,10 @@  static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *t
 			if (skb_copy_bits(skb, ptr, &tmp, 1))
 				BUG();
 			tp->urg_data = TCP_URG_VALID | tmp;
-			if (!sock_flag(sk, SOCK_DEAD))
+			if (!sock_flag(sk, SOCK_DEAD)) {
+				trace_sk_data_ready(sk, skb);
 				sk->sk_data_ready(sk);
+			}
 		}
 	}
 }
@@ -5894,6 +5899,7 @@  void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
 
 			__tcp_ack_snd_check(sk, 0);
 no_ack:
+			trace_sk_data_ready(sk, skb);
 			if (eaten)
 				kfree_skb_partial(skb, fragstolen);
 			tcp_data_ready(sk);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 4751a8f9acff..b58cc943a862 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -108,6 +108,7 @@ 
 #include <linux/static_key.h>
 #include <linux/btf_ids.h>
 #include <trace/events/skb.h>
+#include <trace/events/sock.h>
 #include <net/busy_poll.h>
 #include "udp_impl.h"
 #include <net/sock_reuseport.h>
@@ -1579,8 +1580,10 @@  int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
 	__skb_queue_tail(list, skb);
 	spin_unlock(&list->lock);
 
-	if (!sock_flag(sk, SOCK_DEAD))
+	if (!sock_flag(sk, SOCK_DEAD)) {
+		trace_sk_data_ready(sk, skb);
 		sk->sk_data_ready(sk);
+	}
 
 	busylock_release(busy);
 	return 0;