diff mbox series

[bpf-next,v2,5/5] selftest/bpf: Use global variables instead of maps for test_tcpbpf_kern

Message ID 160417035730.2823.6697632421519908152.stgit@localhost.localdomain
State New
Headers show
Series selftests/bpf: Migrate test_tcpbpf_user to be a part of test_progs framework | expand

Commit Message

Alexander Duyck Oct. 31, 2020, 6:52 p.m. UTC
From: Alexander Duyck <alexanderduyck@fb.com>

Use global variables instead of global_map and sockopt_results_map to track
test data. Doing this greatly simplifies the code as there is not need to
take the extra steps of updating the maps or looking up elements.

Suggested-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Alexander Duyck <alexanderduyck@fb.com>
---
 .../testing/selftests/bpf/prog_tests/tcpbpf_user.c |   53 ++++--------
 .../testing/selftests/bpf/progs/test_tcpbpf_kern.c |   86 +++-----------------
 tools/testing/selftests/bpf/test_tcpbpf.h          |    2 
 3 files changed, 31 insertions(+), 110 deletions(-)

Comments

Martin KaFai Lau Nov. 3, 2020, 1:25 a.m. UTC | #1
On Sat, Oct 31, 2020 at 11:52:37AM -0700, Alexander Duyck wrote:
[ ... ]

> +struct tcpbpf_globals global = { 0 };

>  int _version SEC("version") = 1;

>  

>  SEC("sockops")

> @@ -105,29 +72,15 @@ int bpf_testcb(struct bpf_sock_ops *skops)

>  

>  	op = (int) skops->op;

>  

> -	update_event_map(op);

> +	global.event_map |= (1 << op);

>  

>  	switch (op) {

>  	case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:

>  		/* Test failure to set largest cb flag (assumes not defined) */

> -		bad_call_rv = bpf_sock_ops_cb_flags_set(skops, 0x80);

> +		global.bad_cb_test_rv = bpf_sock_ops_cb_flags_set(skops, 0x80);

>  		/* Set callback */

> -		good_call_rv = bpf_sock_ops_cb_flags_set(skops,

> +		global.good_cb_test_rv = bpf_sock_ops_cb_flags_set(skops,

>  						 BPF_SOCK_OPS_STATE_CB_FLAG);

> -		/* Update results */

> -		{

> -			__u32 key = 0;

> -			struct tcpbpf_globals g, *gp;

> -

> -			gp = bpf_map_lookup_elem(&global_map, &key);

> -			if (!gp)

> -				break;

> -			g = *gp;

> -			g.bad_cb_test_rv = bad_call_rv;

> -			g.good_cb_test_rv = good_call_rv;

> -			bpf_map_update_elem(&global_map, &key, &g,

> -					    BPF_ANY);

> -		}

>  		break;

>  	case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:

>  		skops->sk_txhash = 0x12345f;

> @@ -143,10 +96,8 @@ int bpf_testcb(struct bpf_sock_ops *skops)

>  

>  				thdr = (struct tcphdr *)(header + offset);

>  				v = thdr->syn;

> -				__u32 key = 1;

>  

> -				bpf_map_update_elem(&sockopt_results, &key, &v,

> -						    BPF_ANY);

> +				global.tcp_saved_syn = v;

>  			}

>  		}

>  		break;

> @@ -156,25 +107,16 @@ int bpf_testcb(struct bpf_sock_ops *skops)

>  		break;

>  	case BPF_SOCK_OPS_STATE_CB:

>  		if (skops->args[1] == BPF_TCP_CLOSE) {

> -			__u32 key = 0;

> -			struct tcpbpf_globals g, *gp;

> -

> -			gp = bpf_map_lookup_elem(&global_map, &key);

> -			if (!gp)

> -				break;

> -			g = *gp;

>  			if (skops->args[0] == BPF_TCP_LISTEN) {

> -				g.num_listen++;

> +				global.num_listen++;

>  			} else {

> -				g.total_retrans = skops->total_retrans;

> -				g.data_segs_in = skops->data_segs_in;

> -				g.data_segs_out = skops->data_segs_out;

> -				g.bytes_received = skops->bytes_received;

> -				g.bytes_acked = skops->bytes_acked;

> +				global.total_retrans = skops->total_retrans;

> +				global.data_segs_in = skops->data_segs_in;

> +				global.data_segs_out = skops->data_segs_out;

> +				global.bytes_received = skops->bytes_received;

> +				global.bytes_acked = skops->bytes_acked;

>  			}

> -			g.num_close_events++;

> -			bpf_map_update_elem(&global_map, &key, &g,

> -					    BPF_ANY);

It is interesting that there is no race in the original "g.num_close_events++"
followed by the bpf_map_update_elem().  It seems quite fragile though.

> +			global.num_close_events++;

There is __sync_fetch_and_add().

not sure about the global.event_map though, may be use an individual
variable for each _CB.  Thoughts?

>  		}

>  		break;

>  	case BPF_SOCK_OPS_TCP_LISTEN_CB:

> @@ -182,9 +124,7 @@ int bpf_testcb(struct bpf_sock_ops *skops)

>  		v = bpf_setsockopt(skops, IPPROTO_TCP, TCP_SAVE_SYN,

>  				   &save_syn, sizeof(save_syn));

>  		/* Update global map w/ result of setsock opt */

> -		__u32 key = 0;

> -

> -		bpf_map_update_elem(&sockopt_results, &key, &v, BPF_ANY);

> +		global.tcp_save_syn = v;

>  		break;

>  	default:

>  		rv = -1;

> diff --git a/tools/testing/selftests/bpf/test_tcpbpf.h b/tools/testing/selftests/bpf/test_tcpbpf.h

> index 6220b95cbd02..0ed33521cbbb 100644

> --- a/tools/testing/selftests/bpf/test_tcpbpf.h

> +++ b/tools/testing/selftests/bpf/test_tcpbpf.h

> @@ -14,5 +14,7 @@ struct tcpbpf_globals {

>  	__u64 bytes_acked;

>  	__u32 num_listen;

>  	__u32 num_close_events;

> +	__u32 tcp_save_syn;

> +	__u32 tcp_saved_syn;

>  };

>  #endif

> 

>
Alexander Duyck Nov. 3, 2020, 3:42 p.m. UTC | #2
On Mon, Nov 2, 2020 at 5:26 PM Martin KaFai Lau <kafai@fb.com> wrote:
>

> On Sat, Oct 31, 2020 at 11:52:37AM -0700, Alexander Duyck wrote:

> [ ... ]

>

> > +struct tcpbpf_globals global = { 0 };

> >  int _version SEC("version") = 1;

> >

> >  SEC("sockops")

> > @@ -105,29 +72,15 @@ int bpf_testcb(struct bpf_sock_ops *skops)

> >

> >       op = (int) skops->op;

> >

> > -     update_event_map(op);

> > +     global.event_map |= (1 << op);

> >

> >       switch (op) {

> >       case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:

> >               /* Test failure to set largest cb flag (assumes not defined) */

> > -             bad_call_rv = bpf_sock_ops_cb_flags_set(skops, 0x80);

> > +             global.bad_cb_test_rv = bpf_sock_ops_cb_flags_set(skops, 0x80);

> >               /* Set callback */

> > -             good_call_rv = bpf_sock_ops_cb_flags_set(skops,

> > +             global.good_cb_test_rv = bpf_sock_ops_cb_flags_set(skops,

> >                                                BPF_SOCK_OPS_STATE_CB_FLAG);

> > -             /* Update results */

> > -             {

> > -                     __u32 key = 0;

> > -                     struct tcpbpf_globals g, *gp;

> > -

> > -                     gp = bpf_map_lookup_elem(&global_map, &key);

> > -                     if (!gp)

> > -                             break;

> > -                     g = *gp;

> > -                     g.bad_cb_test_rv = bad_call_rv;

> > -                     g.good_cb_test_rv = good_call_rv;

> > -                     bpf_map_update_elem(&global_map, &key, &g,

> > -                                         BPF_ANY);

> > -             }

> >               break;

> >       case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:

> >               skops->sk_txhash = 0x12345f;

> > @@ -143,10 +96,8 @@ int bpf_testcb(struct bpf_sock_ops *skops)

> >

> >                               thdr = (struct tcphdr *)(header + offset);

> >                               v = thdr->syn;

> > -                             __u32 key = 1;

> >

> > -                             bpf_map_update_elem(&sockopt_results, &key, &v,

> > -                                                 BPF_ANY);

> > +                             global.tcp_saved_syn = v;

> >                       }

> >               }

> >               break;

> > @@ -156,25 +107,16 @@ int bpf_testcb(struct bpf_sock_ops *skops)

> >               break;

> >       case BPF_SOCK_OPS_STATE_CB:

> >               if (skops->args[1] == BPF_TCP_CLOSE) {

> > -                     __u32 key = 0;

> > -                     struct tcpbpf_globals g, *gp;

> > -

> > -                     gp = bpf_map_lookup_elem(&global_map, &key);

> > -                     if (!gp)

> > -                             break;

> > -                     g = *gp;

> >                       if (skops->args[0] == BPF_TCP_LISTEN) {

> > -                             g.num_listen++;

> > +                             global.num_listen++;

> >                       } else {

> > -                             g.total_retrans = skops->total_retrans;

> > -                             g.data_segs_in = skops->data_segs_in;

> > -                             g.data_segs_out = skops->data_segs_out;

> > -                             g.bytes_received = skops->bytes_received;

> > -                             g.bytes_acked = skops->bytes_acked;

> > +                             global.total_retrans = skops->total_retrans;

> > +                             global.data_segs_in = skops->data_segs_in;

> > +                             global.data_segs_out = skops->data_segs_out;

> > +                             global.bytes_received = skops->bytes_received;

> > +                             global.bytes_acked = skops->bytes_acked;

> >                       }

> > -                     g.num_close_events++;

> > -                     bpf_map_update_elem(&global_map, &key, &g,

> > -                                         BPF_ANY);

> It is interesting that there is no race in the original "g.num_close_events++"

> followed by the bpf_map_update_elem().  It seems quite fragile though.


How would it race with the current code though? At this point we are
controlling the sockets in a single thread. As such the close events
should already be serialized shouldn't they? This may have been a
problem with the old code, but even then it was only two sockets so I
don't think there was much risk of them racing against each other
since the two sockets were linked anyway.

> > +                     global.num_close_events++;

> There is __sync_fetch_and_add().

>

> not sure about the global.event_map though, may be use an individual

> variable for each _CB.  Thoughts?


I think this may be overkill for what we actually need. Since we are
closing the sockets in a single threaded application there isn't much
risk of the sockets all racing against each other in the close is
there?
Martin KaFai Lau Nov. 3, 2020, 6:12 p.m. UTC | #3
On Tue, Nov 03, 2020 at 07:42:46AM -0800, Alexander Duyck wrote:
> On Mon, Nov 2, 2020 at 5:26 PM Martin KaFai Lau <kafai@fb.com> wrote:

> >

> > On Sat, Oct 31, 2020 at 11:52:37AM -0700, Alexander Duyck wrote:

> > [ ... ]

> >

> > > +struct tcpbpf_globals global = { 0 };

> > >  int _version SEC("version") = 1;

> > >

> > >  SEC("sockops")

> > > @@ -105,29 +72,15 @@ int bpf_testcb(struct bpf_sock_ops *skops)

> > >

> > >       op = (int) skops->op;

> > >

> > > -     update_event_map(op);

> > > +     global.event_map |= (1 << op);

> > >

> > >       switch (op) {

> > >       case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:

> > >               /* Test failure to set largest cb flag (assumes not defined) */

> > > -             bad_call_rv = bpf_sock_ops_cb_flags_set(skops, 0x80);

> > > +             global.bad_cb_test_rv = bpf_sock_ops_cb_flags_set(skops, 0x80);

> > >               /* Set callback */

> > > -             good_call_rv = bpf_sock_ops_cb_flags_set(skops,

> > > +             global.good_cb_test_rv = bpf_sock_ops_cb_flags_set(skops,

> > >                                                BPF_SOCK_OPS_STATE_CB_FLAG);

> > > -             /* Update results */

> > > -             {

> > > -                     __u32 key = 0;

> > > -                     struct tcpbpf_globals g, *gp;

> > > -

> > > -                     gp = bpf_map_lookup_elem(&global_map, &key);

> > > -                     if (!gp)

> > > -                             break;

> > > -                     g = *gp;

> > > -                     g.bad_cb_test_rv = bad_call_rv;

> > > -                     g.good_cb_test_rv = good_call_rv;

> > > -                     bpf_map_update_elem(&global_map, &key, &g,

> > > -                                         BPF_ANY);

> > > -             }

> > >               break;

> > >       case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:

> > >               skops->sk_txhash = 0x12345f;

> > > @@ -143,10 +96,8 @@ int bpf_testcb(struct bpf_sock_ops *skops)

> > >

> > >                               thdr = (struct tcphdr *)(header + offset);

> > >                               v = thdr->syn;

> > > -                             __u32 key = 1;

> > >

> > > -                             bpf_map_update_elem(&sockopt_results, &key, &v,

> > > -                                                 BPF_ANY);

> > > +                             global.tcp_saved_syn = v;

> > >                       }

> > >               }

> > >               break;

> > > @@ -156,25 +107,16 @@ int bpf_testcb(struct bpf_sock_ops *skops)

> > >               break;

> > >       case BPF_SOCK_OPS_STATE_CB:

> > >               if (skops->args[1] == BPF_TCP_CLOSE) {

> > > -                     __u32 key = 0;

> > > -                     struct tcpbpf_globals g, *gp;

> > > -

> > > -                     gp = bpf_map_lookup_elem(&global_map, &key);

> > > -                     if (!gp)

> > > -                             break;

> > > -                     g = *gp;

> > >                       if (skops->args[0] == BPF_TCP_LISTEN) {

> > > -                             g.num_listen++;

> > > +                             global.num_listen++;

> > >                       } else {

> > > -                             g.total_retrans = skops->total_retrans;

> > > -                             g.data_segs_in = skops->data_segs_in;

> > > -                             g.data_segs_out = skops->data_segs_out;

> > > -                             g.bytes_received = skops->bytes_received;

> > > -                             g.bytes_acked = skops->bytes_acked;

> > > +                             global.total_retrans = skops->total_retrans;

> > > +                             global.data_segs_in = skops->data_segs_in;

> > > +                             global.data_segs_out = skops->data_segs_out;

> > > +                             global.bytes_received = skops->bytes_received;

> > > +                             global.bytes_acked = skops->bytes_acked;

> > >                       }

> > > -                     g.num_close_events++;

> > > -                     bpf_map_update_elem(&global_map, &key, &g,

> > > -                                         BPF_ANY);

> > It is interesting that there is no race in the original "g.num_close_events++"

> > followed by the bpf_map_update_elem().  It seems quite fragile though.

> 

> How would it race with the current code though? At this point we are

> controlling the sockets in a single thread. As such the close events

> should already be serialized shouldn't they? This may have been a

> problem with the old code, but even then it was only two sockets so I

> don't think there was much risk of them racing against each other

> since the two sockets were linked anyway.

> 

> > > +                     global.num_close_events++;

> > There is __sync_fetch_and_add().

> >

> > not sure about the global.event_map though, may be use an individual

> > variable for each _CB.  Thoughts?

> 

> I think this may be overkill for what we actually need. Since we are

> closing the sockets in a single threaded application there isn't much

> risk of the sockets all racing against each other in the close is

> there?

Make sense.

Acked-by: Martin KaFai Lau <kafai@fb.com>
Andrii Nakryiko Nov. 3, 2020, 6:40 p.m. UTC | #4
On Sat, Oct 31, 2020 at 11:52 AM Alexander Duyck
<alexander.duyck@gmail.com> wrote:
>

> From: Alexander Duyck <alexanderduyck@fb.com>

>

> Use global variables instead of global_map and sockopt_results_map to track

> test data. Doing this greatly simplifies the code as there is not need to

> take the extra steps of updating the maps or looking up elements.

>

> Suggested-by: Martin KaFai Lau <kafai@fb.com>

> Signed-off-by: Alexander Duyck <alexanderduyck@fb.com>

> ---

>  .../testing/selftests/bpf/prog_tests/tcpbpf_user.c |   53 ++++--------

>  .../testing/selftests/bpf/progs/test_tcpbpf_kern.c |   86 +++-----------------

>  tools/testing/selftests/bpf/test_tcpbpf.h          |    2

>  3 files changed, 31 insertions(+), 110 deletions(-)


[...]

> -}

> -

> +struct tcpbpf_globals global = { 0 };


nit: = {0} notation is misleading, just = {}; is equivalent and IMO better.

But don't bother if it's the only change you need to do for the next version.

>  int _version SEC("version") = 1;

>


[...]
diff mbox series

Patch

diff --git a/tools/testing/selftests/bpf/prog_tests/tcpbpf_user.c b/tools/testing/selftests/bpf/prog_tests/tcpbpf_user.c
index c7a61b0d616a..bceca6fce09a 100644
--- a/tools/testing/selftests/bpf/prog_tests/tcpbpf_user.c
+++ b/tools/testing/selftests/bpf/prog_tests/tcpbpf_user.c
@@ -11,7 +11,7 @@ 
 
 static __u32 duration;
 
-static void verify_result(int map_fd, int sock_map_fd)
+static void verify_result(struct tcpbpf_globals *result)
 {
 	__u32 expected_events = ((1 << BPF_SOCK_OPS_TIMEOUT_INIT) |
 				 (1 << BPF_SOCK_OPS_RWND_INIT) |
@@ -21,48 +21,31 @@  static void verify_result(int map_fd, int sock_map_fd)
 				 (1 << BPF_SOCK_OPS_NEEDS_ECN) |
 				 (1 << BPF_SOCK_OPS_STATE_CB) |
 				 (1 << BPF_SOCK_OPS_TCP_LISTEN_CB));
-	struct tcpbpf_globals result = { 0 };
-	__u32 key = 0;
-	int res;
-	int rv;
-
-	rv = bpf_map_lookup_elem(map_fd, &key, &result);
-	if (CHECK(rv, "bpf_map_lookup_elem(map_fd)", "err:%d errno:%d",
-		  rv, errno))
-		return;
 
 	/* check global map */
-	CHECK(expected_events != result.event_map, "event_map",
+	CHECK(expected_events != result->event_map, "event_map",
 	      "unexpected event_map: actual %#" PRIx32" != expected %#" PRIx32 "\n",
-	      result.event_map, expected_events);
+	      result->event_map, expected_events);
 
-	ASSERT_EQ(result.bytes_received, 501, "bytes_received");
-	ASSERT_EQ(result.bytes_acked, 1002, "bytes_acked");
-	ASSERT_EQ(result.data_segs_in, 1, "data_segs_in");
-	ASSERT_EQ(result.data_segs_out, 1, "data_segs_out");
-	ASSERT_EQ(result.bad_cb_test_rv, 0x80, "bad_cb_test_rv");
-	ASSERT_EQ(result.good_cb_test_rv, 0, "good_cb_test_rv");
-	ASSERT_EQ(result.num_listen, 1, "num_listen");
+	ASSERT_EQ(result->bytes_received, 501, "bytes_received");
+	ASSERT_EQ(result->bytes_acked, 1002, "bytes_acked");
+	ASSERT_EQ(result->data_segs_in, 1, "data_segs_in");
+	ASSERT_EQ(result->data_segs_out, 1, "data_segs_out");
+	ASSERT_EQ(result->bad_cb_test_rv, 0x80, "bad_cb_test_rv");
+	ASSERT_EQ(result->good_cb_test_rv, 0, "good_cb_test_rv");
+	ASSERT_EQ(result->num_listen, 1, "num_listen");
 
 	/* 3 comes from one listening socket + both ends of the connection */
-	ASSERT_EQ(result.num_close_events, 3, "num_close_events");
+	ASSERT_EQ(result->num_close_events, 3, "num_close_events");
 
 	/* check setsockopt for SAVE_SYN */
-	key = 0;
-	rv = bpf_map_lookup_elem(sock_map_fd, &key, &res);
-	CHECK(rv, "bpf_map_lookup_elem(sock_map_fd)", "err:%d errno:%d",
-	      rv, errno);
-	ASSERT_EQ(res, 0, "bpf_setsockopt(TCP_SAVE_SYN)");
+	ASSERT_EQ(result->tcp_save_syn, 0, "tcp_save_syn");
 
 	/* check getsockopt for SAVED_SYN */
-	key = 1;
-	rv = bpf_map_lookup_elem(sock_map_fd, &key, &res);
-	CHECK(rv, "bpf_map_lookup_elem(sock_map_fd)", "err:%d errno:%d",
-	      rv, errno);
-	ASSERT_EQ(res, 1, "bpf_getsockopt(TCP_SAVED_SYN)");
+	ASSERT_EQ(result->tcp_saved_syn, 1, "tcp_saved_syn");
 }
 
-static void run_test(int map_fd, int sock_map_fd)
+static void run_test(struct tcpbpf_globals *result)
 {
 	int listen_fd = -1, cli_fd = -1, accept_fd = -1;
 	char buf[1000];
@@ -129,13 +112,12 @@  static void run_test(int map_fd, int sock_map_fd)
 		close(listen_fd);
 
 	if (!err)
-		verify_result(map_fd, sock_map_fd);
+		verify_result(result);
 }
 
 void test_tcpbpf_user(void)
 {
 	struct test_tcpbpf_kern *skel;
-	int map_fd, sock_map_fd;
 	int cg_fd = -1;
 
 	skel = test_tcpbpf_kern__open_and_load();
@@ -147,14 +129,11 @@  void test_tcpbpf_user(void)
 		  "cg_fd:%d errno:%d", cg_fd, errno))
 		goto cleanup_skel;
 
-	map_fd = bpf_map__fd(skel->maps.global_map);
-	sock_map_fd = bpf_map__fd(skel->maps.sockopt_results);
-
 	skel->links.bpf_testcb = bpf_program__attach_cgroup(skel->progs.bpf_testcb, cg_fd);
 	if (ASSERT_OK_PTR(skel->links.bpf_testcb, "attach_cgroup(bpf_testcb)"))
 		goto cleanup_namespace;
 
-	run_test(map_fd, sock_map_fd);
+	run_test(&skel->bss->global);
 
 cleanup_namespace:
 	if (cg_fd != -1)
diff --git a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c
index 3e6912e4df3d..c0250142e1f6 100644
--- a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c
@@ -14,40 +14,7 @@ 
 #include <bpf/bpf_endian.h>
 #include "test_tcpbpf.h"
 
-struct {
-	__uint(type, BPF_MAP_TYPE_ARRAY);
-	__uint(max_entries, 4);
-	__type(key, __u32);
-	__type(value, struct tcpbpf_globals);
-} global_map SEC(".maps");
-
-struct {
-	__uint(type, BPF_MAP_TYPE_ARRAY);
-	__uint(max_entries, 2);
-	__type(key, __u32);
-	__type(value, int);
-} sockopt_results SEC(".maps");
-
-static inline void update_event_map(int event)
-{
-	__u32 key = 0;
-	struct tcpbpf_globals g, *gp;
-
-	gp = bpf_map_lookup_elem(&global_map, &key);
-	if (gp == NULL) {
-		struct tcpbpf_globals g = {0};
-
-		g.event_map |= (1 << event);
-		bpf_map_update_elem(&global_map, &key, &g,
-			    BPF_ANY);
-	} else {
-		g = *gp;
-		g.event_map |= (1 << event);
-		bpf_map_update_elem(&global_map, &key, &g,
-			    BPF_ANY);
-	}
-}
-
+struct tcpbpf_globals global = { 0 };
 int _version SEC("version") = 1;
 
 SEC("sockops")
@@ -105,29 +72,15 @@  int bpf_testcb(struct bpf_sock_ops *skops)
 
 	op = (int) skops->op;
 
-	update_event_map(op);
+	global.event_map |= (1 << op);
 
 	switch (op) {
 	case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:
 		/* Test failure to set largest cb flag (assumes not defined) */
-		bad_call_rv = bpf_sock_ops_cb_flags_set(skops, 0x80);
+		global.bad_cb_test_rv = bpf_sock_ops_cb_flags_set(skops, 0x80);
 		/* Set callback */
-		good_call_rv = bpf_sock_ops_cb_flags_set(skops,
+		global.good_cb_test_rv = bpf_sock_ops_cb_flags_set(skops,
 						 BPF_SOCK_OPS_STATE_CB_FLAG);
-		/* Update results */
-		{
-			__u32 key = 0;
-			struct tcpbpf_globals g, *gp;
-
-			gp = bpf_map_lookup_elem(&global_map, &key);
-			if (!gp)
-				break;
-			g = *gp;
-			g.bad_cb_test_rv = bad_call_rv;
-			g.good_cb_test_rv = good_call_rv;
-			bpf_map_update_elem(&global_map, &key, &g,
-					    BPF_ANY);
-		}
 		break;
 	case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
 		skops->sk_txhash = 0x12345f;
@@ -143,10 +96,8 @@  int bpf_testcb(struct bpf_sock_ops *skops)
 
 				thdr = (struct tcphdr *)(header + offset);
 				v = thdr->syn;
-				__u32 key = 1;
 
-				bpf_map_update_elem(&sockopt_results, &key, &v,
-						    BPF_ANY);
+				global.tcp_saved_syn = v;
 			}
 		}
 		break;
@@ -156,25 +107,16 @@  int bpf_testcb(struct bpf_sock_ops *skops)
 		break;
 	case BPF_SOCK_OPS_STATE_CB:
 		if (skops->args[1] == BPF_TCP_CLOSE) {
-			__u32 key = 0;
-			struct tcpbpf_globals g, *gp;
-
-			gp = bpf_map_lookup_elem(&global_map, &key);
-			if (!gp)
-				break;
-			g = *gp;
 			if (skops->args[0] == BPF_TCP_LISTEN) {
-				g.num_listen++;
+				global.num_listen++;
 			} else {
-				g.total_retrans = skops->total_retrans;
-				g.data_segs_in = skops->data_segs_in;
-				g.data_segs_out = skops->data_segs_out;
-				g.bytes_received = skops->bytes_received;
-				g.bytes_acked = skops->bytes_acked;
+				global.total_retrans = skops->total_retrans;
+				global.data_segs_in = skops->data_segs_in;
+				global.data_segs_out = skops->data_segs_out;
+				global.bytes_received = skops->bytes_received;
+				global.bytes_acked = skops->bytes_acked;
 			}
-			g.num_close_events++;
-			bpf_map_update_elem(&global_map, &key, &g,
-					    BPF_ANY);
+			global.num_close_events++;
 		}
 		break;
 	case BPF_SOCK_OPS_TCP_LISTEN_CB:
@@ -182,9 +124,7 @@  int bpf_testcb(struct bpf_sock_ops *skops)
 		v = bpf_setsockopt(skops, IPPROTO_TCP, TCP_SAVE_SYN,
 				   &save_syn, sizeof(save_syn));
 		/* Update global map w/ result of setsock opt */
-		__u32 key = 0;
-
-		bpf_map_update_elem(&sockopt_results, &key, &v, BPF_ANY);
+		global.tcp_save_syn = v;
 		break;
 	default:
 		rv = -1;
diff --git a/tools/testing/selftests/bpf/test_tcpbpf.h b/tools/testing/selftests/bpf/test_tcpbpf.h
index 6220b95cbd02..0ed33521cbbb 100644
--- a/tools/testing/selftests/bpf/test_tcpbpf.h
+++ b/tools/testing/selftests/bpf/test_tcpbpf.h
@@ -14,5 +14,7 @@  struct tcpbpf_globals {
 	__u64 bytes_acked;
 	__u32 num_listen;
 	__u32 num_close_events;
+	__u32 tcp_save_syn;
+	__u32 tcp_saved_syn;
 };
 #endif