@@ -16,7 +16,7 @@
#define ARRAY_CREATE_FLAG_MASK \
(BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \
- BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP)
+ BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP | BPF_F_NO_CHARGE)
static void bpf_array_free_percpu(struct bpf_array *array)
{
@@ -8,8 +8,8 @@
#include <linux/jhash.h>
#include <linux/random.h>
-#define BLOOM_CREATE_FLAG_MASK \
- (BPF_F_NUMA_NODE | BPF_F_ZERO_SEED | BPF_F_ACCESS_MASK)
+#define BLOOM_CREATE_FLAG_MASK (BPF_F_NUMA_NODE | \
+ BPF_F_ZERO_SEED | BPF_F_ACCESS_MASK | BPF_F_NO_CHARGE)
struct bpf_bloom_filter {
struct bpf_map map;
@@ -15,7 +15,8 @@
#include <linux/rcupdate_trace.h>
#include <linux/rcupdate_wait.h>
-#define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE)
+#define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK \
+ (BPF_F_NO_PREALLOC | BPF_F_CLONE | BPF_F_NO_CHARGE)
static struct bpf_local_storage_map_bucket *
select_bucket(struct bpf_local_storage_map *smap,
@@ -21,6 +21,8 @@ enum bpf_struct_ops_state {
refcount_t refcnt; \
enum bpf_struct_ops_state state
+#define STRUCT_OPS_CREATE_FLAG_MASK (BPF_F_NO_CHARGE)
+
struct bpf_struct_ops_value {
BPF_STRUCT_OPS_COMMON_VALUE;
char data[] ____cacheline_aligned_in_smp;
@@ -556,7 +558,8 @@ static void bpf_struct_ops_map_free(struct bpf_map *map)
static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr)
{
if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 ||
- attr->map_flags || !attr->btf_vmlinux_value_type_id)
+ attr->map_flags & ~STRUCT_OPS_CREATE_FLAG_MASK ||
+ !attr->btf_vmlinux_value_type_id)
return -EINVAL;
return 0;
}
@@ -39,6 +39,9 @@
*/
#define CPU_MAP_BULK_SIZE 8 /* 8 == one cacheline on 64-bit archs */
+
+#define CPU_MAP_CREATE_FLAG_MASK (BPF_F_NUMA_NODE | BPF_F_NO_CHARGE)
+
struct bpf_cpu_map_entry;
struct bpf_cpu_map;
@@ -93,7 +96,7 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
if (attr->max_entries == 0 || attr->key_size != 4 ||
(value_size != offsetofend(struct bpf_cpumap_val, qsize) &&
value_size != offsetofend(struct bpf_cpumap_val, bpf_prog.fd)) ||
- attr->map_flags & ~BPF_F_NUMA_NODE)
+ attr->map_flags & ~CPU_MAP_CREATE_FLAG_MASK)
return ERR_PTR(-EINVAL);
cmap = kzalloc(sizeof(*cmap), GFP_USER | __GFP_ACCOUNT);
@@ -50,7 +50,7 @@
#include <trace/events/xdp.h>
#define DEV_CREATE_FLAG_MASK \
- (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
+ (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY | BPF_F_NO_CHARGE)
struct xdp_dev_bulk_queue {
struct xdp_frame *q[DEV_MAP_BULK_SIZE];
@@ -16,7 +16,7 @@
#define HTAB_CREATE_FLAG_MASK \
(BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE | \
- BPF_F_ACCESS_MASK | BPF_F_ZERO_SEED)
+ BPF_F_ACCESS_MASK | BPF_F_ZERO_SEED | BPF_F_NO_CHARGE)
#define BATCH_OPS(_name) \
.map_lookup_batch = \
@@ -15,7 +15,7 @@
#include "../cgroup/cgroup-internal.h"
#define LOCAL_STORAGE_CREATE_FLAG_MASK \
- (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
+ (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK | BPF_F_NO_CHARGE)
struct bpf_cgroup_storage_map {
struct bpf_map map;
@@ -537,7 +537,7 @@ static int trie_delete_elem(struct bpf_map *map, void *_key)
#define LPM_KEY_SIZE_MIN LPM_KEY_SIZE(LPM_DATA_SIZE_MIN)
#define LPM_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_NUMA_NODE | \
- BPF_F_ACCESS_MASK)
+ BPF_F_ACCESS_MASK | BPF_F_NO_CHARGE)
static struct bpf_map *trie_alloc(union bpf_attr *attr)
{
@@ -11,7 +11,7 @@
#include "percpu_freelist.h"
#define QUEUE_STACK_CREATE_FLAG_MASK \
- (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
+ (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK | BPF_F_NO_CHARGE)
struct bpf_queue_stack {
struct bpf_map map;
@@ -11,7 +11,7 @@
#include <linux/kmemleak.h>
#include <uapi/linux/btf.h>
-#define RINGBUF_CREATE_FLAG_MASK (BPF_F_NUMA_NODE)
+#define RINGBUF_CREATE_FLAG_MASK (BPF_F_NUMA_NODE | BPF_F_NO_CHARGE)
/* non-mmap()'able part of bpf_ringbuf (everything up to consumer page) */
#define RINGBUF_PGOFF \
@@ -14,7 +14,7 @@
#define STACK_CREATE_FLAG_MASK \
(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY | \
- BPF_F_STACK_BUILD_ID)
+ BPF_F_STACK_BUILD_ID | BPF_F_NO_CHARGE)
struct stack_map_bucket {
struct pcpu_freelist_node fnode;
@@ -22,7 +22,7 @@ struct bpf_stab {
};
#define SOCK_CREATE_FLAG_MASK \
- (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
+ (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY | BPF_F_NO_CHARGE)
static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
struct bpf_prog *old, u32 which);
@@ -12,6 +12,9 @@
#include "xsk.h"
+#define XSK_MAP_CREATE_FLAG_MASK \
+ (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY | BPF_F_NO_CHARGE)
+
static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map,
struct xdp_sock __rcu **map_entry)
{
@@ -68,7 +71,7 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
if (attr->max_entries == 0 || attr->key_size != 4 ||
attr->value_size != 4 ||
- attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY))
+ attr->map_flags & ~XSK_MAP_CREATE_FLAG_MASK)
return ERR_PTR(-EINVAL);
numa_node = bpf_map_attr_numa_node(attr);
Many maps have their create masks to warn the invalid map flag. Set BPF_F_NO_CHARGE in all these masks to enable it. Signed-off-by: Yafang Shao <laoar.shao@gmail.com> --- kernel/bpf/arraymap.c | 2 +- kernel/bpf/bloom_filter.c | 4 ++-- kernel/bpf/bpf_local_storage.c | 3 ++- kernel/bpf/bpf_struct_ops.c | 5 ++++- kernel/bpf/cpumap.c | 5 ++++- kernel/bpf/devmap.c | 2 +- kernel/bpf/hashtab.c | 2 +- kernel/bpf/local_storage.c | 2 +- kernel/bpf/lpm_trie.c | 2 +- kernel/bpf/queue_stack_maps.c | 2 +- kernel/bpf/ringbuf.c | 2 +- kernel/bpf/stackmap.c | 2 +- net/core/sock_map.c | 2 +- net/xdp/xskmap.c | 5 ++++- 14 files changed, 25 insertions(+), 15 deletions(-)