diff mbox series

[bpf-next,1/4] libbpf: make btf_dump work with modifiable BTF

Message ID 20200929232843.1249318-2-andriin@fb.com
State New
Headers show
Series libbpf: add raw BTF type dumping | expand

Commit Message

Andrii Nakryiko Sept. 29, 2020, 11:28 p.m. UTC
Ensure that btf_dump can accommodate new BTF types being appended to BTF
instance after struct btf_dump was created. This came up during attemp to
use btf_dump for raw type dumping in selftests, but given changes are not
excessive, it's good to not have any gotchas in API usage, so I decided to
support such use case in general.

Signed-off-by: Andrii Nakryiko <andriin@fb.com>
---
 tools/lib/bpf/btf.c             | 17 ++++++++
 tools/lib/bpf/btf_dump.c        | 69 ++++++++++++++++++++++-----------
 tools/lib/bpf/libbpf_internal.h |  1 +
 3 files changed, 65 insertions(+), 22 deletions(-)
diff mbox series

Patch

diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index e1dbd766c698..df4fd9132079 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -146,6 +146,23 @@  void *btf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
 	return new_data + cur_cnt * elem_sz;
 }
 
+/* Ensure given dynamically allocated memory region has enough allocated space
+ * to accommodate *need_cnt* elements of size *elem_sz* bytes each
+ */
+int btf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt)
+{
+	void *p;
+
+	if (need_cnt <= *cap_cnt)
+		return 0;
+
+	p = btf_add_mem(data, cap_cnt, elem_sz, *cap_cnt, SIZE_MAX, need_cnt - *cap_cnt);
+	if (!p)
+		return -ENOMEM;
+
+	return 0;
+}
+
 static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off)
 {
 	__u32 *p;
diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
index 91310e528a3a..2f9d685bd522 100644
--- a/tools/lib/bpf/btf_dump.c
+++ b/tools/lib/bpf/btf_dump.c
@@ -60,11 +60,14 @@  struct btf_dump {
 	struct btf_dump_opts opts;
 	int ptr_sz;
 	bool strip_mods;
+	int last_id;
 
 	/* per-type auxiliary state */
 	struct btf_dump_type_aux_state *type_states;
+	size_t type_states_cap;
 	/* per-type optional cached unique name, must be freed, if present */
 	const char **cached_names;
+	size_t cached_names_cap;
 
 	/* topo-sorted list of dependent type definitions */
 	__u32 *emit_queue;
@@ -113,6 +116,7 @@  static void btf_dump_printf(const struct btf_dump *d, const char *fmt, ...)
 }
 
 static int btf_dump_mark_referenced(struct btf_dump *d);
+static int btf_dump_resize(struct btf_dump *d);
 
 struct btf_dump *btf_dump__new(const struct btf *btf,
 			       const struct btf_ext *btf_ext,
@@ -144,25 +148,8 @@  struct btf_dump *btf_dump__new(const struct btf *btf,
 		d->ident_names = NULL;
 		goto err;
 	}
-	d->type_states = calloc(1 + btf__get_nr_types(d->btf),
-				sizeof(d->type_states[0]));
-	if (!d->type_states) {
-		err = -ENOMEM;
-		goto err;
-	}
-	d->cached_names = calloc(1 + btf__get_nr_types(d->btf),
-				 sizeof(d->cached_names[0]));
-	if (!d->cached_names) {
-		err = -ENOMEM;
-		goto err;
-	}
 
-	/* VOID is special */
-	d->type_states[0].order_state = ORDERED;
-	d->type_states[0].emit_state = EMITTED;
-
-	/* eagerly determine referenced types for anon enums */
-	err = btf_dump_mark_referenced(d);
+	err = btf_dump_resize(d);
 	if (err)
 		goto err;
 
@@ -172,9 +159,38 @@  struct btf_dump *btf_dump__new(const struct btf *btf,
 	return ERR_PTR(err);
 }
 
+static int btf_dump_resize(struct btf_dump *d)
+{
+	int err, last_id = btf__get_nr_types(d->btf);
+
+	if (last_id <= d->last_id)
+		return 0;
+
+	if (btf_ensure_mem((void **)&d->type_states, &d->type_states_cap,
+			   sizeof(*d->type_states), last_id + 1))
+		return -ENOMEM;
+	if (btf_ensure_mem((void **)&d->cached_names, &d->cached_names_cap,
+			   sizeof(*d->cached_names), last_id + 1))
+		return -ENOMEM;
+
+	if (d->last_id == 0) {
+		/* VOID is special */
+		d->type_states[0].order_state = ORDERED;
+		d->type_states[0].emit_state = EMITTED;
+	}
+
+	/* eagerly determine referenced types for anon enums */
+	err = btf_dump_mark_referenced(d);
+	if (err)
+		return err;
+
+	d->last_id = last_id;
+	return 0;
+}
+
 void btf_dump__free(struct btf_dump *d)
 {
-	int i, cnt;
+	int i;
 
 	if (IS_ERR_OR_NULL(d))
 		return;
@@ -182,7 +198,7 @@  void btf_dump__free(struct btf_dump *d)
 	free(d->type_states);
 	if (d->cached_names) {
 		/* any set cached name is owned by us and should be freed */
-		for (i = 0, cnt = btf__get_nr_types(d->btf); i <= cnt; i++) {
+		for (i = 0; i <= d->last_id; i++) {
 			if (d->cached_names[i])
 				free((void *)d->cached_names[i]);
 		}
@@ -222,6 +238,10 @@  int btf_dump__dump_type(struct btf_dump *d, __u32 id)
 	if (id > btf__get_nr_types(d->btf))
 		return -EINVAL;
 
+	err = btf_dump_resize(d);
+	if (err)
+		return err;
+
 	d->emit_queue_cnt = 0;
 	err = btf_dump_order_type(d, id, false);
 	if (err < 0)
@@ -251,7 +271,7 @@  static int btf_dump_mark_referenced(struct btf_dump *d)
 	const struct btf_type *t;
 	__u16 vlen;
 
-	for (i = 1; i <= n; i++) {
+	for (i = d->last_id + 1; i <= n; i++) {
 		t = btf__type_by_id(d->btf, i);
 		vlen = btf_vlen(t);
 
@@ -306,6 +326,7 @@  static int btf_dump_mark_referenced(struct btf_dump *d)
 	}
 	return 0;
 }
+
 static int btf_dump_add_emit_queue_id(struct btf_dump *d, __u32 id)
 {
 	__u32 *new_queue;
@@ -1049,11 +1070,15 @@  int btf_dump__emit_type_decl(struct btf_dump *d, __u32 id,
 			     const struct btf_dump_emit_type_decl_opts *opts)
 {
 	const char *fname;
-	int lvl;
+	int lvl, err;
 
 	if (!OPTS_VALID(opts, btf_dump_emit_type_decl_opts))
 		return -EINVAL;
 
+	err = btf_dump_resize(d);
+	if (err)
+		return -EINVAL;
+
 	fname = OPTS_GET(opts, field_name, "");
 	lvl = OPTS_GET(opts, indent_level, 0);
 	d->strip_mods = OPTS_GET(opts, strip_mods, false);
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index eed5b624a784..d99bc847bf84 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -107,6 +107,7 @@  static inline void *libbpf_reallocarray(void *ptr, size_t nmemb, size_t size)
 
 void *btf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
 		  size_t cur_cnt, size_t max_cnt, size_t add_cnt);
+int btf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt);
 
 static inline bool libbpf_validate_opts(const char *opts,
 					size_t opts_sz, size_t user_sz,