aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf/core.c')
-rw-r--r--kernel/bpf/core.c223
1 files changed, 223 insertions, 0 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 2831ba1e71c1..f45827e205d3 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -28,6 +28,9 @@
28#include <linux/moduleloader.h> 28#include <linux/moduleloader.h>
29#include <linux/bpf.h> 29#include <linux/bpf.h>
30#include <linux/frame.h> 30#include <linux/frame.h>
31#include <linux/rbtree_latch.h>
32#include <linux/kallsyms.h>
33#include <linux/rcupdate.h>
31 34
32#include <asm/unaligned.h> 35#include <asm/unaligned.h>
33 36
@@ -95,6 +98,8 @@ struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
95 fp->aux = aux; 98 fp->aux = aux;
96 fp->aux->prog = fp; 99 fp->aux->prog = fp;
97 100
101 INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
102
98 return fp; 103 return fp;
99} 104}
100EXPORT_SYMBOL_GPL(bpf_prog_alloc); 105EXPORT_SYMBOL_GPL(bpf_prog_alloc);
@@ -290,6 +295,206 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
290} 295}
291 296
292#ifdef CONFIG_BPF_JIT 297#ifdef CONFIG_BPF_JIT
298static __always_inline void
299bpf_get_prog_addr_region(const struct bpf_prog *prog,
300 unsigned long *symbol_start,
301 unsigned long *symbol_end)
302{
303 const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
304 unsigned long addr = (unsigned long)hdr;
305
306 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
307
308 *symbol_start = addr;
309 *symbol_end = addr + hdr->pages * PAGE_SIZE;
310}
311
312static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
313{
314 BUILD_BUG_ON(sizeof("bpf_prog_") +
315 sizeof(prog->tag) * 2 + 1 > KSYM_NAME_LEN);
316
317 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
318 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
319 *sym = 0;
320}
321
322static __always_inline unsigned long
323bpf_get_prog_addr_start(struct latch_tree_node *n)
324{
325 unsigned long symbol_start, symbol_end;
326 const struct bpf_prog_aux *aux;
327
328 aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
329 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
330
331 return symbol_start;
332}
333
334static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
335 struct latch_tree_node *b)
336{
337 return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
338}
339
340static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
341{
342 unsigned long val = (unsigned long)key;
343 unsigned long symbol_start, symbol_end;
344 const struct bpf_prog_aux *aux;
345
346 aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
347 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
348
349 if (val < symbol_start)
350 return -1;
351 if (val >= symbol_end)
352 return 1;
353
354 return 0;
355}
356
357static const struct latch_tree_ops bpf_tree_ops = {
358 .less = bpf_tree_less,
359 .comp = bpf_tree_comp,
360};
361
362static DEFINE_SPINLOCK(bpf_lock);
363static LIST_HEAD(bpf_kallsyms);
364static struct latch_tree_root bpf_tree __cacheline_aligned;
365
366int bpf_jit_kallsyms __read_mostly;
367
368static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
369{
370 WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
371 list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
372 latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
373}
374
375static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
376{
377 if (list_empty(&aux->ksym_lnode))
378 return;
379
380 latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
381 list_del_rcu(&aux->ksym_lnode);
382}
383
384static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
385{
386 return fp->jited && !bpf_prog_was_classic(fp);
387}
388
389static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
390{
391 return list_empty(&fp->aux->ksym_lnode) ||
392 fp->aux->ksym_lnode.prev == LIST_POISON2;
393}
394
395void bpf_prog_kallsyms_add(struct bpf_prog *fp)
396{
397 unsigned long flags;
398
399 if (!bpf_prog_kallsyms_candidate(fp) ||
400 !capable(CAP_SYS_ADMIN))
401 return;
402
403 spin_lock_irqsave(&bpf_lock, flags);
404 bpf_prog_ksym_node_add(fp->aux);
405 spin_unlock_irqrestore(&bpf_lock, flags);
406}
407
408void bpf_prog_kallsyms_del(struct bpf_prog *fp)
409{
410 unsigned long flags;
411
412 if (!bpf_prog_kallsyms_candidate(fp))
413 return;
414
415 spin_lock_irqsave(&bpf_lock, flags);
416 bpf_prog_ksym_node_del(fp->aux);
417 spin_unlock_irqrestore(&bpf_lock, flags);
418}
419
420static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
421{
422 struct latch_tree_node *n;
423
424 if (!bpf_jit_kallsyms_enabled())
425 return NULL;
426
427 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
428 return n ?
429 container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
430 NULL;
431}
432
433const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
434 unsigned long *off, char *sym)
435{
436 unsigned long symbol_start, symbol_end;
437 struct bpf_prog *prog;
438 char *ret = NULL;
439
440 rcu_read_lock();
441 prog = bpf_prog_kallsyms_find(addr);
442 if (prog) {
443 bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
444 bpf_get_prog_name(prog, sym);
445
446 ret = sym;
447 if (size)
448 *size = symbol_end - symbol_start;
449 if (off)
450 *off = addr - symbol_start;
451 }
452 rcu_read_unlock();
453
454 return ret;
455}
456
457bool is_bpf_text_address(unsigned long addr)
458{
459 bool ret;
460
461 rcu_read_lock();
462 ret = bpf_prog_kallsyms_find(addr) != NULL;
463 rcu_read_unlock();
464
465 return ret;
466}
467
468int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
469 char *sym)
470{
471 unsigned long symbol_start, symbol_end;
472 struct bpf_prog_aux *aux;
473 unsigned int it = 0;
474 int ret = -ERANGE;
475
476 if (!bpf_jit_kallsyms_enabled())
477 return ret;
478
479 rcu_read_lock();
480 list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
481 if (it++ != symnum)
482 continue;
483
484 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
485 bpf_get_prog_name(aux->prog, sym);
486
487 *value = symbol_start;
488 *type = BPF_SYM_ELF_TYPE;
489
490 ret = 0;
491 break;
492 }
493 rcu_read_unlock();
494
495 return ret;
496}
497
293struct bpf_binary_header * 498struct bpf_binary_header *
294bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, 499bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
295 unsigned int alignment, 500 unsigned int alignment,
@@ -326,6 +531,24 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr)
326 module_memfree(hdr); 531 module_memfree(hdr);
327} 532}
328 533
534/* This symbol is only overridden by archs that have different
535 * requirements than the usual eBPF JITs, f.e. when they only
536 * implement cBPF JIT, do not set images read-only, etc.
537 */
538void __weak bpf_jit_free(struct bpf_prog *fp)
539{
540 if (fp->jited) {
541 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
542
543 bpf_jit_binary_unlock_ro(hdr);
544 bpf_jit_binary_free(hdr);
545
546 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
547 }
548
549 bpf_prog_unlock_free(fp);
550}
551
329int bpf_jit_harden __read_mostly; 552int bpf_jit_harden __read_mostly;
330 553
331static int bpf_jit_blind_insn(const struct bpf_insn *from, 554static int bpf_jit_blind_insn(const struct bpf_insn *from,