aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/bpf.h13
-rw-r--r--include/linux/filter.h14
-rw-r--r--kernel/bpf/core.c43
-rw-r--r--kernel/bpf/syscall.c38
-rw-r--r--kernel/bpf/verifier.c17
5 files changed, 93 insertions, 32 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 8796ff03f472..f74ae68086dc 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -216,7 +216,7 @@ u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
216u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 216u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
217 217
218bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); 218bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
219void bpf_prog_calc_digest(struct bpf_prog *fp); 219int bpf_prog_calc_digest(struct bpf_prog *fp);
220 220
221const struct bpf_func_proto *bpf_get_trace_printk_proto(void); 221const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
222 222
@@ -238,6 +238,8 @@ struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i);
238void bpf_prog_sub(struct bpf_prog *prog, int i); 238void bpf_prog_sub(struct bpf_prog *prog, int i);
239struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog); 239struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog);
240void bpf_prog_put(struct bpf_prog *prog); 240void bpf_prog_put(struct bpf_prog *prog);
241int __bpf_prog_charge(struct user_struct *user, u32 pages);
242void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
241 243
242struct bpf_map *bpf_map_get_with_uref(u32 ufd); 244struct bpf_map *bpf_map_get_with_uref(u32 ufd);
243struct bpf_map *__bpf_map_get(struct fd f); 245struct bpf_map *__bpf_map_get(struct fd f);
@@ -318,6 +320,15 @@ static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog)
318{ 320{
319 return ERR_PTR(-EOPNOTSUPP); 321 return ERR_PTR(-EOPNOTSUPP);
320} 322}
323
324static inline int __bpf_prog_charge(struct user_struct *user, u32 pages)
325{
326 return 0;
327}
328
329static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
330{
331}
321#endif /* CONFIG_BPF_SYSCALL */ 332#endif /* CONFIG_BPF_SYSCALL */
322 333
323/* verifier prototypes for helper functions called from eBPF programs */ 334/* verifier prototypes for helper functions called from eBPF programs */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index af8a1804cac6..702314253797 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -57,9 +57,6 @@ struct bpf_prog_aux;
57/* BPF program can access up to 512 bytes of stack space. */ 57/* BPF program can access up to 512 bytes of stack space. */
58#define MAX_BPF_STACK 512 58#define MAX_BPF_STACK 512
59 59
60/* Maximum BPF program size in bytes. */
61#define MAX_BPF_SIZE (BPF_MAXINSNS * sizeof(struct bpf_insn))
62
63/* Helper macros for filter block array initializers. */ 60/* Helper macros for filter block array initializers. */
64 61
65/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ 62/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
@@ -517,6 +514,17 @@ static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
517 return BPF_PROG_RUN(prog, xdp); 514 return BPF_PROG_RUN(prog, xdp);
518} 515}
519 516
517static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
518{
519 return prog->len * sizeof(struct bpf_insn);
520}
521
522static inline u32 bpf_prog_digest_scratch_size(const struct bpf_prog *prog)
523{
524 return round_up(bpf_prog_insn_size(prog) +
525 sizeof(__be64) + 1, SHA_MESSAGE_BYTES);
526}
527
520static inline unsigned int bpf_prog_size(unsigned int proglen) 528static inline unsigned int bpf_prog_size(unsigned int proglen)
521{ 529{
522 return max(sizeof(struct bpf_prog), 530 return max(sizeof(struct bpf_prog),
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 83e0d153b0b4..1eb4f1303756 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -105,19 +105,29 @@ struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
105 gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO | 105 gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
106 gfp_extra_flags; 106 gfp_extra_flags;
107 struct bpf_prog *fp; 107 struct bpf_prog *fp;
108 u32 pages, delta;
109 int ret;
108 110
109 BUG_ON(fp_old == NULL); 111 BUG_ON(fp_old == NULL);
110 112
111 size = round_up(size, PAGE_SIZE); 113 size = round_up(size, PAGE_SIZE);
112 if (size <= fp_old->pages * PAGE_SIZE) 114 pages = size / PAGE_SIZE;
115 if (pages <= fp_old->pages)
113 return fp_old; 116 return fp_old;
114 117
118 delta = pages - fp_old->pages;
119 ret = __bpf_prog_charge(fp_old->aux->user, delta);
120 if (ret)
121 return NULL;
122
115 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL); 123 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
116 if (fp != NULL) { 124 if (fp == NULL) {
125 __bpf_prog_uncharge(fp_old->aux->user, delta);
126 } else {
117 kmemcheck_annotate_bitfield(fp, meta); 127 kmemcheck_annotate_bitfield(fp, meta);
118 128
119 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE); 129 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
120 fp->pages = size / PAGE_SIZE; 130 fp->pages = pages;
121 fp->aux->prog = fp; 131 fp->aux->prog = fp;
122 132
123 /* We keep fp->aux from fp_old around in the new 133 /* We keep fp->aux from fp_old around in the new
@@ -136,28 +146,29 @@ void __bpf_prog_free(struct bpf_prog *fp)
136 vfree(fp); 146 vfree(fp);
137} 147}
138 148
139#define SHA_BPF_RAW_SIZE \ 149int bpf_prog_calc_digest(struct bpf_prog *fp)
140 round_up(MAX_BPF_SIZE + sizeof(__be64) + 1, SHA_MESSAGE_BYTES)
141
142/* Called under verifier mutex. */
143void bpf_prog_calc_digest(struct bpf_prog *fp)
144{ 150{
145 const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64); 151 const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
146 static u32 ws[SHA_WORKSPACE_WORDS]; 152 u32 raw_size = bpf_prog_digest_scratch_size(fp);
147 static u8 raw[SHA_BPF_RAW_SIZE]; 153 u32 ws[SHA_WORKSPACE_WORDS];
148 struct bpf_insn *dst = (void *)raw;
149 u32 i, bsize, psize, blocks; 154 u32 i, bsize, psize, blocks;
155 struct bpf_insn *dst;
150 bool was_ld_map; 156 bool was_ld_map;
151 u8 *todo = raw; 157 u8 *raw, *todo;
152 __be32 *result; 158 __be32 *result;
153 __be64 *bits; 159 __be64 *bits;
154 160
161 raw = vmalloc(raw_size);
162 if (!raw)
163 return -ENOMEM;
164
155 sha_init(fp->digest); 165 sha_init(fp->digest);
156 memset(ws, 0, sizeof(ws)); 166 memset(ws, 0, sizeof(ws));
157 167
158 /* We need to take out the map fd for the digest calculation 168 /* We need to take out the map fd for the digest calculation
159 * since they are unstable from user space side. 169 * since they are unstable from user space side.
160 */ 170 */
171 dst = (void *)raw;
161 for (i = 0, was_ld_map = false; i < fp->len; i++) { 172 for (i = 0, was_ld_map = false; i < fp->len; i++) {
162 dst[i] = fp->insnsi[i]; 173 dst[i] = fp->insnsi[i];
163 if (!was_ld_map && 174 if (!was_ld_map &&
@@ -177,12 +188,13 @@ void bpf_prog_calc_digest(struct bpf_prog *fp)
177 } 188 }
178 } 189 }
179 190
180 psize = fp->len * sizeof(struct bpf_insn); 191 psize = bpf_prog_insn_size(fp);
181 memset(&raw[psize], 0, sizeof(raw) - psize); 192 memset(&raw[psize], 0, raw_size - psize);
182 raw[psize++] = 0x80; 193 raw[psize++] = 0x80;
183 194
184 bsize = round_up(psize, SHA_MESSAGE_BYTES); 195 bsize = round_up(psize, SHA_MESSAGE_BYTES);
185 blocks = bsize / SHA_MESSAGE_BYTES; 196 blocks = bsize / SHA_MESSAGE_BYTES;
197 todo = raw;
186 if (bsize - psize >= sizeof(__be64)) { 198 if (bsize - psize >= sizeof(__be64)) {
187 bits = (__be64 *)(todo + bsize - sizeof(__be64)); 199 bits = (__be64 *)(todo + bsize - sizeof(__be64));
188 } else { 200 } else {
@@ -199,6 +211,9 @@ void bpf_prog_calc_digest(struct bpf_prog *fp)
199 result = (__force __be32 *)fp->digest; 211 result = (__force __be32 *)fp->digest;
200 for (i = 0; i < SHA_DIGEST_WORDS; i++) 212 for (i = 0; i < SHA_DIGEST_WORDS; i++)
201 result[i] = cpu_to_be32(fp->digest[i]); 213 result[i] = cpu_to_be32(fp->digest[i]);
214
215 vfree(raw);
216 return 0;
202} 217}
203 218
204static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn) 219static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 4819ec9d95f6..e89acea22ecf 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -615,19 +615,39 @@ static void free_used_maps(struct bpf_prog_aux *aux)
615 kfree(aux->used_maps); 615 kfree(aux->used_maps);
616} 616}
617 617
618int __bpf_prog_charge(struct user_struct *user, u32 pages)
619{
620 unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
621 unsigned long user_bufs;
622
623 if (user) {
624 user_bufs = atomic_long_add_return(pages, &user->locked_vm);
625 if (user_bufs > memlock_limit) {
626 atomic_long_sub(pages, &user->locked_vm);
627 return -EPERM;
628 }
629 }
630
631 return 0;
632}
633
634void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
635{
636 if (user)
637 atomic_long_sub(pages, &user->locked_vm);
638}
639
618static int bpf_prog_charge_memlock(struct bpf_prog *prog) 640static int bpf_prog_charge_memlock(struct bpf_prog *prog)
619{ 641{
620 struct user_struct *user = get_current_user(); 642 struct user_struct *user = get_current_user();
621 unsigned long memlock_limit; 643 int ret;
622
623 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
624 644
625 atomic_long_add(prog->pages, &user->locked_vm); 645 ret = __bpf_prog_charge(user, prog->pages);
626 if (atomic_long_read(&user->locked_vm) > memlock_limit) { 646 if (ret) {
627 atomic_long_sub(prog->pages, &user->locked_vm);
628 free_uid(user); 647 free_uid(user);
629 return -EPERM; 648 return ret;
630 } 649 }
650
631 prog->aux->user = user; 651 prog->aux->user = user;
632 return 0; 652 return 0;
633} 653}
@@ -636,7 +656,7 @@ static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
636{ 656{
637 struct user_struct *user = prog->aux->user; 657 struct user_struct *user = prog->aux->user;
638 658
639 atomic_long_sub(prog->pages, &user->locked_vm); 659 __bpf_prog_uncharge(user, prog->pages);
640 free_uid(user); 660 free_uid(user);
641} 661}
642 662
@@ -811,7 +831,7 @@ static int bpf_prog_load(union bpf_attr *attr)
811 831
812 err = -EFAULT; 832 err = -EFAULT;
813 if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns), 833 if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
814 prog->len * sizeof(struct bpf_insn)) != 0) 834 bpf_prog_insn_size(prog)) != 0)
815 goto free_prog; 835 goto free_prog;
816 836
817 prog->orig_prog = NULL; 837 prog->orig_prog = NULL;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 81e267bc4640..83ed2f8f6f22 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -462,14 +462,19 @@ static void init_reg_state(struct bpf_reg_state *regs)
462 regs[BPF_REG_1].type = PTR_TO_CTX; 462 regs[BPF_REG_1].type = PTR_TO_CTX;
463} 463}
464 464
465static void mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno) 465static void __mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno)
466{ 466{
467 BUG_ON(regno >= MAX_BPF_REG);
468 regs[regno].type = UNKNOWN_VALUE; 467 regs[regno].type = UNKNOWN_VALUE;
469 regs[regno].id = 0; 468 regs[regno].id = 0;
470 regs[regno].imm = 0; 469 regs[regno].imm = 0;
471} 470}
472 471
472static void mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno)
473{
474 BUG_ON(regno >= MAX_BPF_REG);
475 __mark_reg_unknown_value(regs, regno);
476}
477
473static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno) 478static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno)
474{ 479{
475 regs[regno].min_value = BPF_REGISTER_MIN_RANGE; 480 regs[regno].min_value = BPF_REGISTER_MIN_RANGE;
@@ -1976,7 +1981,7 @@ static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
1976 */ 1981 */
1977 reg->id = 0; 1982 reg->id = 0;
1978 if (type == UNKNOWN_VALUE) 1983 if (type == UNKNOWN_VALUE)
1979 mark_reg_unknown_value(regs, regno); 1984 __mark_reg_unknown_value(regs, regno);
1980 } 1985 }
1981} 1986}
1982 1987
@@ -2931,6 +2936,10 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
2931 int insn_cnt = env->prog->len; 2936 int insn_cnt = env->prog->len;
2932 int i, j, err; 2937 int i, j, err;
2933 2938
2939 err = bpf_prog_calc_digest(env->prog);
2940 if (err)
2941 return err;
2942
2934 for (i = 0; i < insn_cnt; i++, insn++) { 2943 for (i = 0; i < insn_cnt; i++, insn++) {
2935 if (BPF_CLASS(insn->code) == BPF_LDX && 2944 if (BPF_CLASS(insn->code) == BPF_LDX &&
2936 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { 2945 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
@@ -3178,8 +3187,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
3178 log_level = 0; 3187 log_level = 0;
3179 } 3188 }
3180 3189
3181 bpf_prog_calc_digest(env->prog);
3182
3183 ret = replace_map_fd_with_map_ptr(env); 3190 ret = replace_map_fd_with_map_ptr(env);
3184 if (ret < 0) 3191 if (ret < 0)
3185 goto skip_full_check; 3192 goto skip_full_check;