@@ -237,6 +237,14 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
return perf_evsel__open_per_thread(evsel, evsel_list->threads);
}
+static void close_perf_stat_counter(struct perf_evsel *evsel)
+{
+ if (target__has_cpu(&target))
+ perf_evsel__close_per_cpu(evsel, perf_evsel__cpus(evsel));
+
+ perf_evsel__close_per_thread(evsel, evsel_list->threads);
+}
+
/*
* Does the counter have nsecs as a unit?
*/
@@ -686,7 +694,9 @@ static int __run_perf_stat(int argc, const char **argv)
* group leaders.
*/
read_counters();
- perf_evlist__close(evsel_list);
+
+ evlist__for_each_entry(evsel_list, counter)
+ close_perf_stat_counter(counter);
return WEXITSTATUS(status);
}
@@ -1655,6 +1655,18 @@ void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
perf_evsel__free_fd(evsel);
}
+void perf_evsel__close_per_cpu(struct perf_evsel *evsel,
+ struct cpu_map *cpus)
+{
+ perf_evsel__close(evsel, cpus->nr, 1);
+}
+
+void perf_evsel__close_per_thread(struct perf_evsel *evsel,
+ struct thread_map *threads)
+{
+ perf_evsel__close(evsel, 1, threads->nr);
+}
+
static struct {
struct cpu_map map;
int cpus[1];
@@ -252,6 +252,10 @@ int perf_evsel__open_per_thread(struct perf_evsel *evsel,
struct thread_map *threads);
int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
struct thread_map *threads);
+void perf_evsel__close_per_cpu(struct perf_evsel *evsel,
+ struct cpu_map *cpus);
+void perf_evsel__close_per_thread(struct perf_evsel *evsel,
+ struct thread_map *threads);
void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads);
struct perf_sample;
In create_perf_stat_counter(), we open events on a potentially per-cpu or per-thread basis, depending on if we have a target CPU list. The number of FDs we allocate depends on whether we're in per-thread or per-cpu mode. Subsequently, we close events using perf_evlist__close(), which assumes that if an evsel has a cpu map, we've opened events per-cpu-per-thread for the evsel, and each of the FDs needs to be closed. If we'd actually opened events purely per-thread, this can result in erroneously treating unallocated memory as FDs, which we attempt to close, then set to -1. This has been observed to corrupt the datastructures used by the libc memory allocator, resulting in segfaults when closing events. To fix this, this patch adds a close_perf_stat_counter() function that matches the logic in create_perf_stat_counter(), ensuring that we open and close the same number of counters. Helpers are added for closing an evsel per-thread or per-cpu. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: linux-kernel@vger.kernel.org --- tools/perf/builtin-stat.c | 12 +++++++++++- tools/perf/util/evsel.c | 12 ++++++++++++ tools/perf/util/evsel.h | 4 ++++ 3 files changed, 27 insertions(+), 1 deletion(-) -- 1.9.1