aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/sysctl/net.txt12
-rw-r--r--arch/arm64/net/bpf_jit_comp.c15
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c1
-rw-r--r--arch/s390/net/bpf_jit_comp.c18
-rw-r--r--arch/x86/net/bpf_jit_comp.c15
-rw-r--r--include/linux/bpf.h4
-rw-r--r--include/linux/filter.h112
-rw-r--r--kernel/bpf/core.c223
-rw-r--r--kernel/bpf/syscall.c2
-rw-r--r--kernel/extable.c9
-rw-r--r--kernel/kallsyms.c61
-rw-r--r--net/Kconfig3
-rw-r--r--net/core/sysctl_net_core.c7
13 files changed, 419 insertions, 63 deletions
diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt
index b80fbd4e5575..2ebabc93014a 100644
--- a/Documentation/sysctl/net.txt
+++ b/Documentation/sysctl/net.txt
@@ -54,6 +54,18 @@ Values :
54 1 - enable JIT hardening for unprivileged users only 54 1 - enable JIT hardening for unprivileged users only
55 2 - enable JIT hardening for all users 55 2 - enable JIT hardening for all users
56 56
57bpf_jit_kallsyms
58----------------
59
60When Berkeley Packet Filter Just in Time compiler is enabled, then compiled
61images are unknown addresses to the kernel, meaning they neither show up in
62traces nor in /proc/kallsyms. This enables export of these addresses, which
63can be used for debugging/tracing. If bpf_jit_harden is enabled, this feature
64is disabled.
65Values :
66 0 - disable JIT kallsyms export (default value)
67 1 - enable JIT kallsyms export for privileged users only
68
57dev_weight 69dev_weight
58-------------- 70--------------
59 71
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index c444408d5a8c..05d12104d270 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -910,18 +910,3 @@ out:
910 tmp : orig_prog); 910 tmp : orig_prog);
911 return prog; 911 return prog;
912} 912}
913
914void bpf_jit_free(struct bpf_prog *prog)
915{
916 unsigned long addr = (unsigned long)prog->bpf_func & PAGE_MASK;
917 struct bpf_binary_header *header = (void *)addr;
918
919 if (!prog->jited)
920 goto free_filter;
921
922 set_memory_rw(addr, header->pages);
923 bpf_jit_binary_free(header);
924
925free_filter:
926 bpf_prog_unlock_free(prog);
927}
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index f9ebd02260da..c34166ef76fc 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -1064,6 +1064,7 @@ out:
1064 return fp; 1064 return fp;
1065} 1065}
1066 1066
1067/* Overriding bpf_jit_free() as we don't set images read-only. */
1067void bpf_jit_free(struct bpf_prog *fp) 1068void bpf_jit_free(struct bpf_prog *fp)
1068{ 1069{
1069 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; 1070 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 6454efd22e63..f1d0e62ec1dd 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -1339,21 +1339,3 @@ out:
1339 tmp : orig_fp); 1339 tmp : orig_fp);
1340 return fp; 1340 return fp;
1341} 1341}
1342
1343/*
1344 * Free eBPF program
1345 */
1346void bpf_jit_free(struct bpf_prog *fp)
1347{
1348 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1349 struct bpf_binary_header *header = (void *)addr;
1350
1351 if (!fp->jited)
1352 goto free_filter;
1353
1354 set_memory_rw(addr, header->pages);
1355 bpf_jit_binary_free(header);
1356
1357free_filter:
1358 bpf_prog_unlock_free(fp);
1359}
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 26123d0ae13a..18a62e208826 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1180,18 +1180,3 @@ out:
1180 tmp : orig_prog); 1180 tmp : orig_prog);
1181 return prog; 1181 return prog;
1182} 1182}
1183
1184void bpf_jit_free(struct bpf_prog *fp)
1185{
1186 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1187 struct bpf_binary_header *header = (void *)addr;
1188
1189 if (!fp->jited)
1190 goto free_filter;
1191
1192 set_memory_rw(addr, header->pages);
1193 bpf_jit_binary_free(header);
1194
1195free_filter:
1196 bpf_prog_unlock_free(fp);
1197}
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 57d60dc5b600..909fc033173a 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -8,10 +8,12 @@
8#define _LINUX_BPF_H 1 8#define _LINUX_BPF_H 1
9 9
10#include <uapi/linux/bpf.h> 10#include <uapi/linux/bpf.h>
11
11#include <linux/workqueue.h> 12#include <linux/workqueue.h>
12#include <linux/file.h> 13#include <linux/file.h>
13#include <linux/percpu.h> 14#include <linux/percpu.h>
14#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/rbtree_latch.h>
15 17
16struct perf_event; 18struct perf_event;
17struct bpf_map; 19struct bpf_map;
@@ -177,6 +179,8 @@ struct bpf_prog_aux {
177 atomic_t refcnt; 179 atomic_t refcnt;
178 u32 used_map_cnt; 180 u32 used_map_cnt;
179 u32 max_ctx_offset; 181 u32 max_ctx_offset;
182 struct latch_tree_node ksym_tnode;
183 struct list_head ksym_lnode;
180 const struct bpf_verifier_ops *ops; 184 const struct bpf_verifier_ops *ops;
181 struct bpf_map **used_maps; 185 struct bpf_map **used_maps;
182 struct bpf_prog *prog; 186 struct bpf_prog *prog;
diff --git a/include/linux/filter.h b/include/linux/filter.h
index c7a70e0cc3a0..0c1cc9143cb2 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -54,6 +54,12 @@ struct bpf_prog_aux;
54#define BPF_REG_AX MAX_BPF_REG 54#define BPF_REG_AX MAX_BPF_REG
55#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1) 55#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1)
56 56
57/* As per nm, we expose JITed images as text (code) section for
58 * kallsyms. That way, tools like perf can find it to match
59 * addresses.
60 */
61#define BPF_SYM_ELF_TYPE 't'
62
57/* BPF program can access up to 512 bytes of stack space. */ 63/* BPF program can access up to 512 bytes of stack space. */
58#define MAX_BPF_STACK 512 64#define MAX_BPF_STACK 512
59 65
@@ -555,6 +561,11 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
555{ 561{
556 set_memory_rw((unsigned long)fp, fp->pages); 562 set_memory_rw((unsigned long)fp, fp->pages);
557} 563}
564
565static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
566{
567 set_memory_rw((unsigned long)hdr, hdr->pages);
568}
558#else 569#else
559static inline void bpf_prog_lock_ro(struct bpf_prog *fp) 570static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
560{ 571{
@@ -563,8 +574,21 @@ static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
563static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) 574static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
564{ 575{
565} 576}
577
578static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
579{
580}
566#endif /* CONFIG_DEBUG_SET_MODULE_RONX */ 581#endif /* CONFIG_DEBUG_SET_MODULE_RONX */
567 582
583static inline struct bpf_binary_header *
584bpf_jit_binary_hdr(const struct bpf_prog *fp)
585{
586 unsigned long real_start = (unsigned long)fp->bpf_func;
587 unsigned long addr = real_start & PAGE_MASK;
588
589 return (void *)addr;
590}
591
568int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); 592int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
569static inline int sk_filter(struct sock *sk, struct sk_buff *skb) 593static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
570{ 594{
@@ -617,6 +641,7 @@ void bpf_warn_invalid_xdp_action(u32 act);
617#ifdef CONFIG_BPF_JIT 641#ifdef CONFIG_BPF_JIT
618extern int bpf_jit_enable; 642extern int bpf_jit_enable;
619extern int bpf_jit_harden; 643extern int bpf_jit_harden;
644extern int bpf_jit_kallsyms;
620 645
621typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); 646typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
622 647
@@ -651,6 +676,11 @@ static inline bool bpf_jit_is_ebpf(void)
651# endif 676# endif
652} 677}
653 678
679static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
680{
681 return fp->jited && bpf_jit_is_ebpf();
682}
683
654static inline bool bpf_jit_blinding_enabled(void) 684static inline bool bpf_jit_blinding_enabled(void)
655{ 685{
656 /* These are the prerequisites, should someone ever have the 686 /* These are the prerequisites, should someone ever have the
@@ -668,11 +698,91 @@ static inline bool bpf_jit_blinding_enabled(void)
668 698
669 return true; 699 return true;
670} 700}
671#else 701
702static inline bool bpf_jit_kallsyms_enabled(void)
703{
704 /* There are a couple of corner cases where kallsyms should
705 * not be enabled f.e. on hardening.
706 */
707 if (bpf_jit_harden)
708 return false;
709 if (!bpf_jit_kallsyms)
710 return false;
711 if (bpf_jit_kallsyms == 1)
712 return true;
713
714 return false;
715}
716
717const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
718 unsigned long *off, char *sym);
719bool is_bpf_text_address(unsigned long addr);
720int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
721 char *sym);
722
723static inline const char *
724bpf_address_lookup(unsigned long addr, unsigned long *size,
725 unsigned long *off, char **modname, char *sym)
726{
727 const char *ret = __bpf_address_lookup(addr, size, off, sym);
728
729 if (ret && modname)
730 *modname = NULL;
731 return ret;
732}
733
734void bpf_prog_kallsyms_add(struct bpf_prog *fp);
735void bpf_prog_kallsyms_del(struct bpf_prog *fp);
736
737#else /* CONFIG_BPF_JIT */
738
739static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
740{
741 return false;
742}
743
672static inline void bpf_jit_free(struct bpf_prog *fp) 744static inline void bpf_jit_free(struct bpf_prog *fp)
673{ 745{
674 bpf_prog_unlock_free(fp); 746 bpf_prog_unlock_free(fp);
675} 747}
748
749static inline bool bpf_jit_kallsyms_enabled(void)
750{
751 return false;
752}
753
754static inline const char *
755__bpf_address_lookup(unsigned long addr, unsigned long *size,
756 unsigned long *off, char *sym)
757{
758 return NULL;
759}
760
761static inline bool is_bpf_text_address(unsigned long addr)
762{
763 return false;
764}
765
766static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value,
767 char *type, char *sym)
768{
769 return -ERANGE;
770}
771
772static inline const char *
773bpf_address_lookup(unsigned long addr, unsigned long *size,
774 unsigned long *off, char **modname, char *sym)
775{
776 return NULL;
777}
778
779static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp)
780{
781}
782
783static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
784{
785}
676#endif /* CONFIG_BPF_JIT */ 786#endif /* CONFIG_BPF_JIT */
677 787
678#define BPF_ANC BIT(15) 788#define BPF_ANC BIT(15)
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 2831ba1e71c1..f45827e205d3 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -28,6 +28,9 @@
28#include <linux/moduleloader.h> 28#include <linux/moduleloader.h>
29#include <linux/bpf.h> 29#include <linux/bpf.h>
30#include <linux/frame.h> 30#include <linux/frame.h>
31#include <linux/rbtree_latch.h>
32#include <linux/kallsyms.h>
33#include <linux/rcupdate.h>
31 34
32#include <asm/unaligned.h> 35#include <asm/unaligned.h>
33 36
@@ -95,6 +98,8 @@ struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
95 fp->aux = aux; 98 fp->aux = aux;
96 fp->aux->prog = fp; 99 fp->aux->prog = fp;
97 100
101 INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
102
98 return fp; 103 return fp;
99} 104}
100EXPORT_SYMBOL_GPL(bpf_prog_alloc); 105EXPORT_SYMBOL_GPL(bpf_prog_alloc);
@@ -290,6 +295,206 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
290} 295}
291 296
292#ifdef CONFIG_BPF_JIT 297#ifdef CONFIG_BPF_JIT
298static __always_inline void
299bpf_get_prog_addr_region(const struct bpf_prog *prog,
300 unsigned long *symbol_start,
301 unsigned long *symbol_end)
302{
303 const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
304 unsigned long addr = (unsigned long)hdr;
305
306 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
307
308 *symbol_start = addr;
309 *symbol_end = addr + hdr->pages * PAGE_SIZE;
310}
311
312static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
313{
314 BUILD_BUG_ON(sizeof("bpf_prog_") +
315 sizeof(prog->tag) * 2 + 1 > KSYM_NAME_LEN);
316
317 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
318 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
319 *sym = 0;
320}
321
322static __always_inline unsigned long
323bpf_get_prog_addr_start(struct latch_tree_node *n)
324{
325 unsigned long symbol_start, symbol_end;
326 const struct bpf_prog_aux *aux;
327
328 aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
329 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
330
331 return symbol_start;
332}
333
334static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
335 struct latch_tree_node *b)
336{
337 return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
338}
339
340static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
341{
342 unsigned long val = (unsigned long)key;
343 unsigned long symbol_start, symbol_end;
344 const struct bpf_prog_aux *aux;
345
346 aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
347 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
348
349 if (val < symbol_start)
350 return -1;
351 if (val >= symbol_end)
352 return 1;
353
354 return 0;
355}
356
357static const struct latch_tree_ops bpf_tree_ops = {
358 .less = bpf_tree_less,
359 .comp = bpf_tree_comp,
360};
361
362static DEFINE_SPINLOCK(bpf_lock);
363static LIST_HEAD(bpf_kallsyms);
364static struct latch_tree_root bpf_tree __cacheline_aligned;
365
366int bpf_jit_kallsyms __read_mostly;
367
368static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
369{
370 WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
371 list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
372 latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
373}
374
375static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
376{
377 if (list_empty(&aux->ksym_lnode))
378 return;
379
380 latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
381 list_del_rcu(&aux->ksym_lnode);
382}
383
384static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
385{
386 return fp->jited && !bpf_prog_was_classic(fp);
387}
388
389static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
390{
391 return list_empty(&fp->aux->ksym_lnode) ||
392 fp->aux->ksym_lnode.prev == LIST_POISON2;
393}
394
395void bpf_prog_kallsyms_add(struct bpf_prog *fp)
396{
397 unsigned long flags;
398
399 if (!bpf_prog_kallsyms_candidate(fp) ||
400 !capable(CAP_SYS_ADMIN))
401 return;
402
403 spin_lock_irqsave(&bpf_lock, flags);
404 bpf_prog_ksym_node_add(fp->aux);
405 spin_unlock_irqrestore(&bpf_lock, flags);
406}
407
408void bpf_prog_kallsyms_del(struct bpf_prog *fp)
409{
410 unsigned long flags;
411
412 if (!bpf_prog_kallsyms_candidate(fp))
413 return;
414
415 spin_lock_irqsave(&bpf_lock, flags);
416 bpf_prog_ksym_node_del(fp->aux);
417 spin_unlock_irqrestore(&bpf_lock, flags);
418}
419
420static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
421{
422 struct latch_tree_node *n;
423
424 if (!bpf_jit_kallsyms_enabled())
425 return NULL;
426
427 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
428 return n ?
429 container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
430 NULL;
431}
432
433const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
434 unsigned long *off, char *sym)
435{
436 unsigned long symbol_start, symbol_end;
437 struct bpf_prog *prog;
438 char *ret = NULL;
439
440 rcu_read_lock();
441 prog = bpf_prog_kallsyms_find(addr);
442 if (prog) {
443 bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
444 bpf_get_prog_name(prog, sym);
445
446 ret = sym;
447 if (size)
448 *size = symbol_end - symbol_start;
449 if (off)
450 *off = addr - symbol_start;
451 }
452 rcu_read_unlock();
453
454 return ret;
455}
456
457bool is_bpf_text_address(unsigned long addr)
458{
459 bool ret;
460
461 rcu_read_lock();
462 ret = bpf_prog_kallsyms_find(addr) != NULL;
463 rcu_read_unlock();
464
465 return ret;
466}
467
468int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
469 char *sym)
470{
471 unsigned long symbol_start, symbol_end;
472 struct bpf_prog_aux *aux;
473 unsigned int it = 0;
474 int ret = -ERANGE;
475
476 if (!bpf_jit_kallsyms_enabled())
477 return ret;
478
479 rcu_read_lock();
480 list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
481 if (it++ != symnum)
482 continue;
483
484 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
485 bpf_get_prog_name(aux->prog, sym);
486
487 *value = symbol_start;
488 *type = BPF_SYM_ELF_TYPE;
489
490 ret = 0;
491 break;
492 }
493 rcu_read_unlock();
494
495 return ret;
496}
497
293struct bpf_binary_header * 498struct bpf_binary_header *
294bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, 499bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
295 unsigned int alignment, 500 unsigned int alignment,
@@ -326,6 +531,24 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr)
326 module_memfree(hdr); 531 module_memfree(hdr);
327} 532}
328 533
534/* This symbol is only overridden by archs that have different
535 * requirements than the usual eBPF JITs, f.e. when they only
536 * implement cBPF JIT, do not set images read-only, etc.
537 */
538void __weak bpf_jit_free(struct bpf_prog *fp)
539{
540 if (fp->jited) {
541 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
542
543 bpf_jit_binary_unlock_ro(hdr);
544 bpf_jit_binary_free(hdr);
545
546 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
547 }
548
549 bpf_prog_unlock_free(fp);
550}
551
329int bpf_jit_harden __read_mostly; 552int bpf_jit_harden __read_mostly;
330 553
331static int bpf_jit_blind_insn(const struct bpf_insn *from, 554static int bpf_jit_blind_insn(const struct bpf_insn *from,
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index f74ca17af64a..461eb1e66a0f 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -707,6 +707,7 @@ void bpf_prog_put(struct bpf_prog *prog)
707{ 707{
708 if (atomic_dec_and_test(&prog->aux->refcnt)) { 708 if (atomic_dec_and_test(&prog->aux->refcnt)) {
709 trace_bpf_prog_put_rcu(prog); 709 trace_bpf_prog_put_rcu(prog);
710 bpf_prog_kallsyms_del(prog);
710 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); 711 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
711 } 712 }
712} 713}
@@ -903,6 +904,7 @@ static int bpf_prog_load(union bpf_attr *attr)
903 /* failed to allocate fd */ 904 /* failed to allocate fd */
904 goto free_used_maps; 905 goto free_used_maps;
905 906
907 bpf_prog_kallsyms_add(prog);
906 trace_bpf_prog_load(prog, err); 908 trace_bpf_prog_load(prog, err);
907 return err; 909 return err;
908 910
diff --git a/kernel/extable.c b/kernel/extable.c
index e3beec4a2339..bd82117ad424 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -20,6 +20,7 @@
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/mutex.h> 21#include <linux/mutex.h>
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/filter.h>
23 24
24#include <asm/sections.h> 25#include <asm/sections.h>
25#include <linux/uaccess.h> 26#include <linux/uaccess.h>
@@ -104,6 +105,8 @@ int __kernel_text_address(unsigned long addr)
104 return 1; 105 return 1;
105 if (is_ftrace_trampoline(addr)) 106 if (is_ftrace_trampoline(addr))
106 return 1; 107 return 1;
108 if (is_bpf_text_address(addr))
109 return 1;
107 /* 110 /*
108 * There might be init symbols in saved stacktraces. 111 * There might be init symbols in saved stacktraces.
109 * Give those symbols a chance to be printed in 112 * Give those symbols a chance to be printed in
@@ -123,7 +126,11 @@ int kernel_text_address(unsigned long addr)
123 return 1; 126 return 1;
124 if (is_module_text_address(addr)) 127 if (is_module_text_address(addr))
125 return 1; 128 return 1;
126 return is_ftrace_trampoline(addr); 129 if (is_ftrace_trampoline(addr))
130 return 1;
131 if (is_bpf_text_address(addr))
132 return 1;
133 return 0;
127} 134}
128 135
129/* 136/*
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index fafd1a3ef0da..6a3b249a2ae1 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -23,6 +23,7 @@
23#include <linux/mm.h> 23#include <linux/mm.h>
24#include <linux/ctype.h> 24#include <linux/ctype.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/filter.h>
26#include <linux/compiler.h> 27#include <linux/compiler.h>
27 28
28#include <asm/sections.h> 29#include <asm/sections.h>
@@ -300,10 +301,11 @@ int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize,
300 unsigned long *offset) 301 unsigned long *offset)
301{ 302{
302 char namebuf[KSYM_NAME_LEN]; 303 char namebuf[KSYM_NAME_LEN];
304
303 if (is_ksym_addr(addr)) 305 if (is_ksym_addr(addr))
304 return !!get_symbol_pos(addr, symbolsize, offset); 306 return !!get_symbol_pos(addr, symbolsize, offset);
305 307 return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf) ||
306 return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf); 308 !!__bpf_address_lookup(addr, symbolsize, offset, namebuf);
307} 309}
308 310
309/* 311/*
@@ -318,6 +320,8 @@ const char *kallsyms_lookup(unsigned long addr,
318 unsigned long *offset, 320 unsigned long *offset,
319 char **modname, char *namebuf) 321 char **modname, char *namebuf)
320{ 322{
323 const char *ret;
324
321 namebuf[KSYM_NAME_LEN - 1] = 0; 325 namebuf[KSYM_NAME_LEN - 1] = 0;
322 namebuf[0] = 0; 326 namebuf[0] = 0;
323 327
@@ -333,9 +337,13 @@ const char *kallsyms_lookup(unsigned long addr,
333 return namebuf; 337 return namebuf;
334 } 338 }
335 339
336 /* See if it's in a module. */ 340 /* See if it's in a module or a BPF JITed image. */
337 return module_address_lookup(addr, symbolsize, offset, modname, 341 ret = module_address_lookup(addr, symbolsize, offset,
338 namebuf); 342 modname, namebuf);
343 if (!ret)
344 ret = bpf_address_lookup(addr, symbolsize,
345 offset, modname, namebuf);
346 return ret;
339} 347}
340 348
341int lookup_symbol_name(unsigned long addr, char *symname) 349int lookup_symbol_name(unsigned long addr, char *symname)
@@ -471,6 +479,7 @@ EXPORT_SYMBOL(__print_symbol);
471/* To avoid using get_symbol_offset for every symbol, we carry prefix along. */ 479/* To avoid using get_symbol_offset for every symbol, we carry prefix along. */
472struct kallsym_iter { 480struct kallsym_iter {
473 loff_t pos; 481 loff_t pos;
482 loff_t pos_mod_end;
474 unsigned long value; 483 unsigned long value;
475 unsigned int nameoff; /* If iterating in core kernel symbols. */ 484 unsigned int nameoff; /* If iterating in core kernel symbols. */
476 char type; 485 char type;
@@ -481,13 +490,27 @@ struct kallsym_iter {
481 490
482static int get_ksymbol_mod(struct kallsym_iter *iter) 491static int get_ksymbol_mod(struct kallsym_iter *iter)
483{ 492{
484 if (module_get_kallsym(iter->pos - kallsyms_num_syms, &iter->value, 493 int ret = module_get_kallsym(iter->pos - kallsyms_num_syms,
485 &iter->type, iter->name, iter->module_name, 494 &iter->value, &iter->type,
486 &iter->exported) < 0) 495 iter->name, iter->module_name,
496 &iter->exported);
497 if (ret < 0) {
498 iter->pos_mod_end = iter->pos;
487 return 0; 499 return 0;
500 }
501
488 return 1; 502 return 1;
489} 503}
490 504
505static int get_ksymbol_bpf(struct kallsym_iter *iter)
506{
507 iter->module_name[0] = '\0';
508 iter->exported = 0;
509 return bpf_get_kallsym(iter->pos - iter->pos_mod_end,
510 &iter->value, &iter->type,
511 iter->name) < 0 ? 0 : 1;
512}
513
491/* Returns space to next name. */ 514/* Returns space to next name. */
492static unsigned long get_ksymbol_core(struct kallsym_iter *iter) 515static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
493{ 516{
@@ -508,16 +531,30 @@ static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
508 iter->name[0] = '\0'; 531 iter->name[0] = '\0';
509 iter->nameoff = get_symbol_offset(new_pos); 532 iter->nameoff = get_symbol_offset(new_pos);
510 iter->pos = new_pos; 533 iter->pos = new_pos;
534 if (new_pos == 0)
535 iter->pos_mod_end = 0;
536}
537
538static int update_iter_mod(struct kallsym_iter *iter, loff_t pos)
539{
540 iter->pos = pos;
541
542 if (iter->pos_mod_end > 0 &&
543 iter->pos_mod_end < iter->pos)
544 return get_ksymbol_bpf(iter);
545
546 if (!get_ksymbol_mod(iter))
547 return get_ksymbol_bpf(iter);
548
549 return 1;
511} 550}
512 551
513/* Returns false if pos at or past end of file. */ 552/* Returns false if pos at or past end of file. */
514static int update_iter(struct kallsym_iter *iter, loff_t pos) 553static int update_iter(struct kallsym_iter *iter, loff_t pos)
515{ 554{
516 /* Module symbols can be accessed randomly. */ 555 /* Module symbols can be accessed randomly. */
517 if (pos >= kallsyms_num_syms) { 556 if (pos >= kallsyms_num_syms)
518 iter->pos = pos; 557 return update_iter_mod(iter, pos);
519 return get_ksymbol_mod(iter);
520 }
521 558
522 /* If we're not on the desired position, reset to new position. */ 559 /* If we're not on the desired position, reset to new position. */
523 if (pos != iter->pos) 560 if (pos != iter->pos)
diff --git a/net/Kconfig b/net/Kconfig
index f19c0c3b9589..102f781a0131 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -297,7 +297,8 @@ config BPF_JIT
297 297
298 Note, admin should enable this feature changing: 298 Note, admin should enable this feature changing:
299 /proc/sys/net/core/bpf_jit_enable 299 /proc/sys/net/core/bpf_jit_enable
300 /proc/sys/net/core/bpf_jit_harden (optional) 300 /proc/sys/net/core/bpf_jit_harden (optional)
301 /proc/sys/net/core/bpf_jit_kallsyms (optional)
301 302
302config NET_FLOW_LIMIT 303config NET_FLOW_LIMIT
303 bool 304 bool
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index eaa72eb0399c..4ead336e14ea 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -334,6 +334,13 @@ static struct ctl_table net_core_table[] = {
334 .mode = 0600, 334 .mode = 0600,
335 .proc_handler = proc_dointvec, 335 .proc_handler = proc_dointvec,
336 }, 336 },
337 {
338 .procname = "bpf_jit_kallsyms",
339 .data = &bpf_jit_kallsyms,
340 .maxlen = sizeof(int),
341 .mode = 0600,
342 .proc_handler = proc_dointvec,
343 },
337# endif 344# endif
338#endif 345#endif
339 { 346 {