diff mbox series

perflib: deprecate bpf_map__resize in favor of bpf_map_set_max_entries

Message ID 20210815103610.27887-1-falakreyaz@gmail.com
State New
Headers show
Series perflib: deprecate bpf_map__resize in favor of bpf_map_set_max_entries | expand

Commit Message

Muhammad Falak R Wani Aug. 15, 2021, 10:36 a.m. UTC
As a part of libbpf 1.0 plan[0], this patch deprecates use of
bpf_map__resize in favour of bpf_map__set_max_entries.

Reference: https://github.com/libbpf/libbpf/issues/304
[0]: https://github.com/libbpf/libbpf/wiki/Libbpf:-the-road-to-v1.0#libbpfh-high-level-apis

Signed-off-by: Muhammad Falak R Wani <falakreyaz@gmail.com>
---
 tools/perf/util/bpf_counter.c        | 8 ++++----
 tools/perf/util/bpf_counter_cgroup.c | 8 ++++----
 2 files changed, 8 insertions(+), 8 deletions(-)

Comments

Andrii Nakryiko Aug. 16, 2021, 7:28 p.m. UTC | #1
On Sun, Aug 15, 2021 at 3:36 AM Muhammad Falak R Wani
<falakreyaz@gmail.com> wrote:
>

> As a part of libbpf 1.0 plan[0], this patch deprecates use of

> bpf_map__resize in favour of bpf_map__set_max_entries.

>

> Reference: https://github.com/libbpf/libbpf/issues/304

> [0]: https://github.com/libbpf/libbpf/wiki/Libbpf:-the-road-to-v1.0#libbpfh-high-level-apis

>

> Signed-off-by: Muhammad Falak R Wani <falakreyaz@gmail.com>

> ---


All looks good, there is an opportunity to simplify the code a bit (see below).

Arnaldo, I assume you'll take this through your tree or you'd like us
to take it through bpf-next?

Acked-by: Andrii Nakryiko <andrii@kernel.org>


>  tools/perf/util/bpf_counter.c        | 8 ++++----

>  tools/perf/util/bpf_counter_cgroup.c | 8 ++++----

>  2 files changed, 8 insertions(+), 8 deletions(-)

>

> diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c

> index ba0f20853651..ced2dac31dcf 100644

> --- a/tools/perf/util/bpf_counter.c

> +++ b/tools/perf/util/bpf_counter.c

> @@ -127,9 +127,9 @@ static int bpf_program_profiler_load_one(struct evsel *evsel, u32 prog_id)

>

>         skel->rodata->num_cpu = evsel__nr_cpus(evsel);

>

> -       bpf_map__resize(skel->maps.events, evsel__nr_cpus(evsel));

> -       bpf_map__resize(skel->maps.fentry_readings, 1);

> -       bpf_map__resize(skel->maps.accum_readings, 1);

> +       bpf_map__set_max_entries(skel->maps.events, evsel__nr_cpus(evsel));

> +       bpf_map__set_max_entries(skel->maps.fentry_readings, 1);

> +       bpf_map__set_max_entries(skel->maps.accum_readings, 1);

>

>         prog_name = bpf_target_prog_name(prog_fd);

>         if (!prog_name) {

> @@ -399,7 +399,7 @@ static int bperf_reload_leader_program(struct evsel *evsel, int attr_map_fd,

>                 return -1;

>         }

>

> -       bpf_map__resize(skel->maps.events, libbpf_num_possible_cpus());

> +       bpf_map__set_max_entries(skel->maps.events, libbpf_num_possible_cpus());


If you set max_entries to 0 (or just skip specifying it) for events
map in util/bpf_skel/bperf_cgroup.bpf.c, you won't need to resize it,
libbpf will automatically size it to number of possible CPUs.

>         err = bperf_leader_bpf__load(skel);

>         if (err) {

>                 pr_err("Failed to load leader skeleton\n");

> diff --git a/tools/perf/util/bpf_counter_cgroup.c b/tools/perf/util/bpf_counter_cgroup.c

> index 89aa5e71db1a..cbc6c2bca488 100644

> --- a/tools/perf/util/bpf_counter_cgroup.c

> +++ b/tools/perf/util/bpf_counter_cgroup.c

> @@ -65,14 +65,14 @@ static int bperf_load_program(struct evlist *evlist)

>

>         /* we need one copy of events per cpu for reading */

>         map_size = total_cpus * evlist->core.nr_entries / nr_cgroups;

> -       bpf_map__resize(skel->maps.events, map_size);

> -       bpf_map__resize(skel->maps.cgrp_idx, nr_cgroups);

> +       bpf_map__set_max_entries(skel->maps.events, map_size);

> +       bpf_map__set_max_entries(skel->maps.cgrp_idx, nr_cgroups);

>         /* previous result is saved in a per-cpu array */

>         map_size = evlist->core.nr_entries / nr_cgroups;

> -       bpf_map__resize(skel->maps.prev_readings, map_size);

> +       bpf_map__set_max_entries(skel->maps.prev_readings, map_size);

>         /* cgroup result needs all events (per-cpu) */

>         map_size = evlist->core.nr_entries;

> -       bpf_map__resize(skel->maps.cgrp_readings, map_size);

> +       bpf_map__set_max_entries(skel->maps.cgrp_readings, map_size);

>

>         set_max_rlimit();

>

> --

> 2.17.1

>
Arnaldo Carvalho de Melo Sept. 15, 2021, 8:56 p.m. UTC | #2
Em Mon, Aug 16, 2021 at 12:28:14PM -0700, Andrii Nakryiko escreveu:
> On Sun, Aug 15, 2021 at 3:36 AM Muhammad Falak R Wani

> <falakreyaz@gmail.com> wrote:

> >

> > As a part of libbpf 1.0 plan[0], this patch deprecates use of

> > bpf_map__resize in favour of bpf_map__set_max_entries.

> >

> > Reference: https://github.com/libbpf/libbpf/issues/304

> > [0]: https://github.com/libbpf/libbpf/wiki/Libbpf:-the-road-to-v1.0#libbpfh-high-level-apis

> >

> > Signed-off-by: Muhammad Falak R Wani <falakreyaz@gmail.com>

> > ---

> 

> All looks good, there is an opportunity to simplify the code a bit (see below).

> 

> Arnaldo, I assume you'll take this through your tree or you'd like us


Yeah, I'll take the opportunity to try to improve that detection of
libbpf version, etc.

- Arnaldo

> to take it through bpf-next?

> 

> Acked-by: Andrii Nakryiko <andrii@kernel.org>

> 

> >  tools/perf/util/bpf_counter.c        | 8 ++++----

> >  tools/perf/util/bpf_counter_cgroup.c | 8 ++++----

> >  2 files changed, 8 insertions(+), 8 deletions(-)

> >

> > diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c

> > index ba0f20853651..ced2dac31dcf 100644

> > --- a/tools/perf/util/bpf_counter.c

> > +++ b/tools/perf/util/bpf_counter.c

> > @@ -127,9 +127,9 @@ static int bpf_program_profiler_load_one(struct evsel *evsel, u32 prog_id)

> >

> >         skel->rodata->num_cpu = evsel__nr_cpus(evsel);

> >

> > -       bpf_map__resize(skel->maps.events, evsel__nr_cpus(evsel));

> > -       bpf_map__resize(skel->maps.fentry_readings, 1);

> > -       bpf_map__resize(skel->maps.accum_readings, 1);

> > +       bpf_map__set_max_entries(skel->maps.events, evsel__nr_cpus(evsel));

> > +       bpf_map__set_max_entries(skel->maps.fentry_readings, 1);

> > +       bpf_map__set_max_entries(skel->maps.accum_readings, 1);

> >

> >         prog_name = bpf_target_prog_name(prog_fd);

> >         if (!prog_name) {

> > @@ -399,7 +399,7 @@ static int bperf_reload_leader_program(struct evsel *evsel, int attr_map_fd,

> >                 return -1;

> >         }

> >

> > -       bpf_map__resize(skel->maps.events, libbpf_num_possible_cpus());

> > +       bpf_map__set_max_entries(skel->maps.events, libbpf_num_possible_cpus());

> 

> If you set max_entries to 0 (or just skip specifying it) for events

> map in util/bpf_skel/bperf_cgroup.bpf.c, you won't need to resize it,

> libbpf will automatically size it to number of possible CPUs.

> 

> >         err = bperf_leader_bpf__load(skel);

> >         if (err) {

> >                 pr_err("Failed to load leader skeleton\n");

> > diff --git a/tools/perf/util/bpf_counter_cgroup.c b/tools/perf/util/bpf_counter_cgroup.c

> > index 89aa5e71db1a..cbc6c2bca488 100644

> > --- a/tools/perf/util/bpf_counter_cgroup.c

> > +++ b/tools/perf/util/bpf_counter_cgroup.c

> > @@ -65,14 +65,14 @@ static int bperf_load_program(struct evlist *evlist)

> >

> >         /* we need one copy of events per cpu for reading */

> >         map_size = total_cpus * evlist->core.nr_entries / nr_cgroups;

> > -       bpf_map__resize(skel->maps.events, map_size);

> > -       bpf_map__resize(skel->maps.cgrp_idx, nr_cgroups);

> > +       bpf_map__set_max_entries(skel->maps.events, map_size);

> > +       bpf_map__set_max_entries(skel->maps.cgrp_idx, nr_cgroups);

> >         /* previous result is saved in a per-cpu array */

> >         map_size = evlist->core.nr_entries / nr_cgroups;

> > -       bpf_map__resize(skel->maps.prev_readings, map_size);

> > +       bpf_map__set_max_entries(skel->maps.prev_readings, map_size);

> >         /* cgroup result needs all events (per-cpu) */

> >         map_size = evlist->core.nr_entries;

> > -       bpf_map__resize(skel->maps.cgrp_readings, map_size);

> > +       bpf_map__set_max_entries(skel->maps.cgrp_readings, map_size);

> >

> >         set_max_rlimit();

> >

> > --

> > 2.17.1

> >


-- 

- Arnaldo
diff mbox series

Patch

diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c
index ba0f20853651..ced2dac31dcf 100644
--- a/tools/perf/util/bpf_counter.c
+++ b/tools/perf/util/bpf_counter.c
@@ -127,9 +127,9 @@  static int bpf_program_profiler_load_one(struct evsel *evsel, u32 prog_id)
 
 	skel->rodata->num_cpu = evsel__nr_cpus(evsel);
 
-	bpf_map__resize(skel->maps.events, evsel__nr_cpus(evsel));
-	bpf_map__resize(skel->maps.fentry_readings, 1);
-	bpf_map__resize(skel->maps.accum_readings, 1);
+	bpf_map__set_max_entries(skel->maps.events, evsel__nr_cpus(evsel));
+	bpf_map__set_max_entries(skel->maps.fentry_readings, 1);
+	bpf_map__set_max_entries(skel->maps.accum_readings, 1);
 
 	prog_name = bpf_target_prog_name(prog_fd);
 	if (!prog_name) {
@@ -399,7 +399,7 @@  static int bperf_reload_leader_program(struct evsel *evsel, int attr_map_fd,
 		return -1;
 	}
 
-	bpf_map__resize(skel->maps.events, libbpf_num_possible_cpus());
+	bpf_map__set_max_entries(skel->maps.events, libbpf_num_possible_cpus());
 	err = bperf_leader_bpf__load(skel);
 	if (err) {
 		pr_err("Failed to load leader skeleton\n");
diff --git a/tools/perf/util/bpf_counter_cgroup.c b/tools/perf/util/bpf_counter_cgroup.c
index 89aa5e71db1a..cbc6c2bca488 100644
--- a/tools/perf/util/bpf_counter_cgroup.c
+++ b/tools/perf/util/bpf_counter_cgroup.c
@@ -65,14 +65,14 @@  static int bperf_load_program(struct evlist *evlist)
 
 	/* we need one copy of events per cpu for reading */
 	map_size = total_cpus * evlist->core.nr_entries / nr_cgroups;
-	bpf_map__resize(skel->maps.events, map_size);
-	bpf_map__resize(skel->maps.cgrp_idx, nr_cgroups);
+	bpf_map__set_max_entries(skel->maps.events, map_size);
+	bpf_map__set_max_entries(skel->maps.cgrp_idx, nr_cgroups);
 	/* previous result is saved in a per-cpu array */
 	map_size = evlist->core.nr_entries / nr_cgroups;
-	bpf_map__resize(skel->maps.prev_readings, map_size);
+	bpf_map__set_max_entries(skel->maps.prev_readings, map_size);
 	/* cgroup result needs all events (per-cpu) */
 	map_size = evlist->core.nr_entries;
-	bpf_map__resize(skel->maps.cgrp_readings, map_size);
+	bpf_map__set_max_entries(skel->maps.cgrp_readings, map_size);
 
 	set_max_rlimit();