@@ -262,11 +262,28 @@ static int queue_stack_map_get_next_key(struct bpf_map *map, void *key,
return -EINVAL;
}
+/* Called from syscall */
+static int queue_map_copy_value(struct bpf_map *map, void *key, void *value)
+{
+ (void)key;
+
+ return queue_map_peek_elem(map, value);
+}
+
+/* Called from syscall */
+static int stack_map_copy_value(struct bpf_map *map, void *key, void *value)
+{
+ (void)key;
+
+ return stack_map_peek_elem(map, value);
+}
+
const struct bpf_map_ops queue_map_ops = {
.map_alloc_check = queue_stack_map_alloc_check,
.map_alloc = queue_stack_map_alloc,
.map_free = queue_stack_map_free,
.map_lookup_elem = queue_stack_map_lookup_elem,
+ .map_copy_value = queue_map_copy_value,
.map_update_elem = queue_stack_map_update_elem,
.map_delete_elem = queue_stack_map_delete_elem,
.map_push_elem = queue_stack_map_push_elem,
@@ -280,6 +297,7 @@ const struct bpf_map_ops stack_map_ops = {
.map_alloc = queue_stack_map_alloc,
.map_free = queue_stack_map_free,
.map_lookup_elem = queue_stack_map_lookup_elem,
+ .map_copy_value = stack_map_copy_value,
.map_update_elem = queue_stack_map_update_elem,
.map_delete_elem = queue_stack_map_delete_elem,
.map_push_elem = queue_stack_map_push_elem,
@@ -218,10 +218,7 @@ static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
return bpf_map_offload_lookup_elem(map, key, value);
bpf_disable_instrumentation();
- if (map->map_type == BPF_MAP_TYPE_QUEUE ||
- map->map_type == BPF_MAP_TYPE_STACK) {
- err = map->ops->map_peek_elem(map, value);
- } else if (map->ops->map_copy_value) {
+ if (map->ops->map_copy_value) {
err = map->ops->map_copy_value(map, key, value);
} else {
rcu_read_lock();
Migrate BPF_MAP_TYPE_QUEUE and BPF_MAP_TYPE_STACK to map_copy_value, by introducing small wrappers that discard the (unused) key argument. Signed-off-by: Lorenz Bauer <lmb@cloudflare.com> --- kernel/bpf/queue_stack_maps.c | 18 ++++++++++++++++++ kernel/bpf/syscall.c | 5 +---- 2 files changed, 19 insertions(+), 4 deletions(-)