aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/syscall.c
diff options
context:
space:
mode:
authorMartin KaFai Lau <kafai@fb.com>2017-06-05 15:15:47 -0400
committerDavid S. Miller <davem@davemloft.net>2017-06-06 15:41:22 -0400
commitf3f1c054c288bb6e503005e6d73611151ed20e91 (patch)
treeae79e214e4c8ea0d144f69ac8f0bf85b726752c0 /kernel/bpf/syscall.c
parentdc4bb0e2356149aee4cdae061936f3bbdd45595c (diff)
bpf: Introduce bpf_map ID
This patch generates an unique ID for each created bpf_map. The approach is similar to the earlier patch for bpf_prog ID. It is worth to note that the bpf_map's ID and bpf_prog's ID are in two independent ID spaces and both have the same valid range: [1, INT_MAX). Signed-off-by: Martin KaFai Lau <kafai@fb.com> Acked-by: Alexei Starovoitov <ast@fb.com> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/bpf/syscall.c')
-rw-r--r--kernel/bpf/syscall.c34
1 files changed, 33 insertions, 1 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 2a1b32b470f1..4c3075b5d840 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -27,6 +27,8 @@
27DEFINE_PER_CPU(int, bpf_prog_active); 27DEFINE_PER_CPU(int, bpf_prog_active);
28static DEFINE_IDR(prog_idr); 28static DEFINE_IDR(prog_idr);
29static DEFINE_SPINLOCK(prog_idr_lock); 29static DEFINE_SPINLOCK(prog_idr_lock);
30static DEFINE_IDR(map_idr);
31static DEFINE_SPINLOCK(map_idr_lock);
30 32
31int sysctl_unprivileged_bpf_disabled __read_mostly; 33int sysctl_unprivileged_bpf_disabled __read_mostly;
32 34
@@ -117,6 +119,29 @@ static void bpf_map_uncharge_memlock(struct bpf_map *map)
117 free_uid(user); 119 free_uid(user);
118} 120}
119 121
122static int bpf_map_alloc_id(struct bpf_map *map)
123{
124 int id;
125
126 spin_lock_bh(&map_idr_lock);
127 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
128 if (id > 0)
129 map->id = id;
130 spin_unlock_bh(&map_idr_lock);
131
132 if (WARN_ON_ONCE(!id))
133 return -ENOSPC;
134
135 return id > 0 ? 0 : id;
136}
137
138static void bpf_map_free_id(struct bpf_map *map)
139{
140 spin_lock_bh(&map_idr_lock);
141 idr_remove(&map_idr, map->id);
142 spin_unlock_bh(&map_idr_lock);
143}
144
120/* called from workqueue */ 145/* called from workqueue */
121static void bpf_map_free_deferred(struct work_struct *work) 146static void bpf_map_free_deferred(struct work_struct *work)
122{ 147{
@@ -141,6 +166,7 @@ static void bpf_map_put_uref(struct bpf_map *map)
141void bpf_map_put(struct bpf_map *map) 166void bpf_map_put(struct bpf_map *map)
142{ 167{
143 if (atomic_dec_and_test(&map->refcnt)) { 168 if (atomic_dec_and_test(&map->refcnt)) {
169 bpf_map_free_id(map);
144 INIT_WORK(&map->work, bpf_map_free_deferred); 170 INIT_WORK(&map->work, bpf_map_free_deferred);
145 schedule_work(&map->work); 171 schedule_work(&map->work);
146 } 172 }
@@ -239,14 +265,20 @@ static int map_create(union bpf_attr *attr)
239 if (err) 265 if (err)
240 goto free_map_nouncharge; 266 goto free_map_nouncharge;
241 267
268 err = bpf_map_alloc_id(map);
269 if (err)
270 goto free_map;
271
242 err = bpf_map_new_fd(map); 272 err = bpf_map_new_fd(map);
243 if (err < 0) 273 if (err < 0)
244 /* failed to allocate fd */ 274 /* failed to allocate fd */
245 goto free_map; 275 goto free_id;
246 276
247 trace_bpf_map_create(map, err); 277 trace_bpf_map_create(map, err);
248 return err; 278 return err;
249 279
280free_id:
281 bpf_map_free_id(map);
250free_map: 282free_map:
251 bpf_map_uncharge_memlock(map); 283 bpf_map_uncharge_memlock(map);
252free_map_nouncharge: 284free_map_nouncharge: