aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf')
-rw-r--r--kernel/bpf/core.c80
1 files changed, 78 insertions, 2 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 7f0dbcbb34af..b54bb2c2e494 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -22,6 +22,7 @@
22 */ 22 */
23#include <linux/filter.h> 23#include <linux/filter.h>
24#include <linux/skbuff.h> 24#include <linux/skbuff.h>
25#include <linux/vmalloc.h>
25#include <asm/unaligned.h> 26#include <asm/unaligned.h>
26 27
27/* Registers */ 28/* Registers */
@@ -63,6 +64,67 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns
63 return NULL; 64 return NULL;
64} 65}
65 66
67struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
68{
69 gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
70 gfp_extra_flags;
71 struct bpf_work_struct *ws;
72 struct bpf_prog *fp;
73
74 size = round_up(size, PAGE_SIZE);
75 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
76 if (fp == NULL)
77 return NULL;
78
79 ws = kmalloc(sizeof(*ws), GFP_KERNEL | gfp_extra_flags);
80 if (ws == NULL) {
81 vfree(fp);
82 return NULL;
83 }
84
85 fp->pages = size / PAGE_SIZE;
86 fp->work = ws;
87
88 return fp;
89}
90EXPORT_SYMBOL_GPL(bpf_prog_alloc);
91
92struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
93 gfp_t gfp_extra_flags)
94{
95 gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
96 gfp_extra_flags;
97 struct bpf_prog *fp;
98
99 BUG_ON(fp_old == NULL);
100
101 size = round_up(size, PAGE_SIZE);
102 if (size <= fp_old->pages * PAGE_SIZE)
103 return fp_old;
104
105 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
106 if (fp != NULL) {
107 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
108 fp->pages = size / PAGE_SIZE;
109
110 /* We keep fp->work from fp_old around in the new
111 * reallocated structure.
112 */
113 fp_old->work = NULL;
114 __bpf_prog_free(fp_old);
115 }
116
117 return fp;
118}
119EXPORT_SYMBOL_GPL(bpf_prog_realloc);
120
121void __bpf_prog_free(struct bpf_prog *fp)
122{
123 kfree(fp->work);
124 vfree(fp);
125}
126EXPORT_SYMBOL_GPL(__bpf_prog_free);
127
66/* Base function for offset calculation. Needs to go into .text section, 128/* Base function for offset calculation. Needs to go into .text section,
67 * therefore keeping it non-static as well; will also be used by JITs 129 * therefore keeping it non-static as well; will also be used by JITs
68 * anyway later on, so do not let the compiler omit it. 130 * anyway later on, so do not let the compiler omit it.
@@ -523,12 +585,26 @@ void bpf_prog_select_runtime(struct bpf_prog *fp)
523 585
524 /* Probe if internal BPF can be JITed */ 586 /* Probe if internal BPF can be JITed */
525 bpf_int_jit_compile(fp); 587 bpf_int_jit_compile(fp);
588 /* Lock whole bpf_prog as read-only */
589 bpf_prog_lock_ro(fp);
526} 590}
527EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); 591EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
528 592
529/* free internal BPF program */ 593static void bpf_prog_free_deferred(struct work_struct *work)
594{
595 struct bpf_work_struct *ws;
596
597 ws = container_of(work, struct bpf_work_struct, work);
598 bpf_jit_free(ws->prog);
599}
600
601/* Free internal BPF program */
530void bpf_prog_free(struct bpf_prog *fp) 602void bpf_prog_free(struct bpf_prog *fp)
531{ 603{
532 bpf_jit_free(fp); 604 struct bpf_work_struct *ws = fp->work;
605
606 INIT_WORK(&ws->work, bpf_prog_free_deferred);
607 ws->prog = fp;
608 schedule_work(&ws->work);
533} 609}
534EXPORT_SYMBOL_GPL(bpf_prog_free); 610EXPORT_SYMBOL_GPL(bpf_prog_free);