diff options
author | Jakub Kicinski <jakub.kicinski@netronome.com> | 2016-09-21 06:43:57 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-09-21 19:50:02 -0400 |
commit | 58e2af8b3a6b587e4ac8414343581da4349d3c0f (patch) | |
tree | c508cab9f1c8c2790cff8a35cad626f4402cee30 | |
parent | 3df126f35f88dc76eea33769f85a3c3bb8ce6c6b (diff) |
bpf: expose internal verfier structures
Move verifier's internal structures to a header file and
prefix their names with bpf_ to avoid potential namespace
conflicts. Those structures will soon be used by external
analyzers.
Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/linux/bpf_verifier.h | 79 | ||||
-rw-r--r-- | kernel/bpf/verifier.c | 266 |
2 files changed, 182 insertions, 163 deletions
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h new file mode 100644 index 000000000000..9457a22fc6e0 --- /dev/null +++ b/include/linux/bpf_verifier.h | |||
@@ -0,0 +1,79 @@ | |||
1 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or | ||
4 | * modify it under the terms of version 2 of the GNU General Public | ||
5 | * License as published by the Free Software Foundation. | ||
6 | */ | ||
7 | #ifndef _LINUX_BPF_VERIFIER_H | ||
8 | #define _LINUX_BPF_VERIFIER_H 1 | ||
9 | |||
10 | #include <linux/bpf.h> /* for enum bpf_reg_type */ | ||
11 | #include <linux/filter.h> /* for MAX_BPF_STACK */ | ||
12 | |||
13 | struct bpf_reg_state { | ||
14 | enum bpf_reg_type type; | ||
15 | union { | ||
16 | /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */ | ||
17 | s64 imm; | ||
18 | |||
19 | /* valid when type == PTR_TO_PACKET* */ | ||
20 | struct { | ||
21 | u32 id; | ||
22 | u16 off; | ||
23 | u16 range; | ||
24 | }; | ||
25 | |||
26 | /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | | ||
27 | * PTR_TO_MAP_VALUE_OR_NULL | ||
28 | */ | ||
29 | struct bpf_map *map_ptr; | ||
30 | }; | ||
31 | }; | ||
32 | |||
33 | enum bpf_stack_slot_type { | ||
34 | STACK_INVALID, /* nothing was stored in this stack slot */ | ||
35 | STACK_SPILL, /* register spilled into stack */ | ||
36 | STACK_MISC /* BPF program wrote some data into this slot */ | ||
37 | }; | ||
38 | |||
39 | #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ | ||
40 | |||
41 | /* state of the program: | ||
42 | * type of all registers and stack info | ||
43 | */ | ||
44 | struct bpf_verifier_state { | ||
45 | struct bpf_reg_state regs[MAX_BPF_REG]; | ||
46 | u8 stack_slot_type[MAX_BPF_STACK]; | ||
47 | struct bpf_reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE]; | ||
48 | }; | ||
49 | |||
50 | /* linked list of verifier states used to prune search */ | ||
51 | struct bpf_verifier_state_list { | ||
52 | struct bpf_verifier_state state; | ||
53 | struct bpf_verifier_state_list *next; | ||
54 | }; | ||
55 | |||
56 | struct bpf_insn_aux_data { | ||
57 | enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ | ||
58 | }; | ||
59 | |||
60 | #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ | ||
61 | |||
62 | /* single container for all structs | ||
63 | * one verifier_env per bpf_check() call | ||
64 | */ | ||
65 | struct bpf_verifier_env { | ||
66 | struct bpf_prog *prog; /* eBPF program being verified */ | ||
67 | struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ | ||
68 | int stack_size; /* number of states to be processed */ | ||
69 | struct bpf_verifier_state cur_state; /* current verifier state */ | ||
70 | struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ | ||
71 | struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ | ||
72 | u32 used_map_cnt; /* number of used maps */ | ||
73 | u32 id_gen; /* used to generate unique reg IDs */ | ||
74 | bool allow_ptr_leaks; | ||
75 | bool seen_direct_write; | ||
76 | struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ | ||
77 | }; | ||
78 | |||
79 | #endif /* _LINUX_BPF_VERIFIER_H */ | ||
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index a9542d89f293..dca2b9b1d02e 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/bpf.h> | 16 | #include <linux/bpf.h> |
17 | #include <linux/bpf_verifier.h> | ||
17 | #include <linux/filter.h> | 18 | #include <linux/filter.h> |
18 | #include <net/netlink.h> | 19 | #include <net/netlink.h> |
19 | #include <linux/file.h> | 20 | #include <linux/file.h> |
@@ -126,82 +127,16 @@ | |||
126 | * are set to NOT_INIT to indicate that they are no longer readable. | 127 | * are set to NOT_INIT to indicate that they are no longer readable. |
127 | */ | 128 | */ |
128 | 129 | ||
129 | struct reg_state { | ||
130 | enum bpf_reg_type type; | ||
131 | union { | ||
132 | /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */ | ||
133 | s64 imm; | ||
134 | |||
135 | /* valid when type == PTR_TO_PACKET* */ | ||
136 | struct { | ||
137 | u32 id; | ||
138 | u16 off; | ||
139 | u16 range; | ||
140 | }; | ||
141 | |||
142 | /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | | ||
143 | * PTR_TO_MAP_VALUE_OR_NULL | ||
144 | */ | ||
145 | struct bpf_map *map_ptr; | ||
146 | }; | ||
147 | }; | ||
148 | |||
149 | enum bpf_stack_slot_type { | ||
150 | STACK_INVALID, /* nothing was stored in this stack slot */ | ||
151 | STACK_SPILL, /* register spilled into stack */ | ||
152 | STACK_MISC /* BPF program wrote some data into this slot */ | ||
153 | }; | ||
154 | |||
155 | #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ | ||
156 | |||
157 | /* state of the program: | ||
158 | * type of all registers and stack info | ||
159 | */ | ||
160 | struct verifier_state { | ||
161 | struct reg_state regs[MAX_BPF_REG]; | ||
162 | u8 stack_slot_type[MAX_BPF_STACK]; | ||
163 | struct reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE]; | ||
164 | }; | ||
165 | |||
166 | /* linked list of verifier states used to prune search */ | ||
167 | struct verifier_state_list { | ||
168 | struct verifier_state state; | ||
169 | struct verifier_state_list *next; | ||
170 | }; | ||
171 | |||
172 | /* verifier_state + insn_idx are pushed to stack when branch is encountered */ | 130 | /* verifier_state + insn_idx are pushed to stack when branch is encountered */ |
173 | struct verifier_stack_elem { | 131 | struct bpf_verifier_stack_elem { |
174 | /* verifer state is 'st' | 132 | /* verifer state is 'st' |
175 | * before processing instruction 'insn_idx' | 133 | * before processing instruction 'insn_idx' |
176 | * and after processing instruction 'prev_insn_idx' | 134 | * and after processing instruction 'prev_insn_idx' |
177 | */ | 135 | */ |
178 | struct verifier_state st; | 136 | struct bpf_verifier_state st; |
179 | int insn_idx; | 137 | int insn_idx; |
180 | int prev_insn_idx; | 138 | int prev_insn_idx; |
181 | struct verifier_stack_elem *next; | 139 | struct bpf_verifier_stack_elem *next; |
182 | }; | ||
183 | |||
184 | struct bpf_insn_aux_data { | ||
185 | enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ | ||
186 | }; | ||
187 | |||
188 | #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ | ||
189 | |||
190 | /* single container for all structs | ||
191 | * one verifier_env per bpf_check() call | ||
192 | */ | ||
193 | struct verifier_env { | ||
194 | struct bpf_prog *prog; /* eBPF program being verified */ | ||
195 | struct verifier_stack_elem *head; /* stack of verifier states to be processed */ | ||
196 | int stack_size; /* number of states to be processed */ | ||
197 | struct verifier_state cur_state; /* current verifier state */ | ||
198 | struct verifier_state_list **explored_states; /* search pruning optimization */ | ||
199 | struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ | ||
200 | u32 used_map_cnt; /* number of used maps */ | ||
201 | u32 id_gen; /* used to generate unique reg IDs */ | ||
202 | bool allow_ptr_leaks; | ||
203 | bool seen_direct_write; | ||
204 | struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ | ||
205 | }; | 140 | }; |
206 | 141 | ||
207 | #define BPF_COMPLEXITY_LIMIT_INSNS 65536 | 142 | #define BPF_COMPLEXITY_LIMIT_INSNS 65536 |
@@ -254,9 +189,9 @@ static const char * const reg_type_str[] = { | |||
254 | [PTR_TO_PACKET_END] = "pkt_end", | 189 | [PTR_TO_PACKET_END] = "pkt_end", |
255 | }; | 190 | }; |
256 | 191 | ||
257 | static void print_verifier_state(struct verifier_state *state) | 192 | static void print_verifier_state(struct bpf_verifier_state *state) |
258 | { | 193 | { |
259 | struct reg_state *reg; | 194 | struct bpf_reg_state *reg; |
260 | enum bpf_reg_type t; | 195 | enum bpf_reg_type t; |
261 | int i; | 196 | int i; |
262 | 197 | ||
@@ -432,9 +367,9 @@ static void print_bpf_insn(struct bpf_insn *insn) | |||
432 | } | 367 | } |
433 | } | 368 | } |
434 | 369 | ||
435 | static int pop_stack(struct verifier_env *env, int *prev_insn_idx) | 370 | static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx) |
436 | { | 371 | { |
437 | struct verifier_stack_elem *elem; | 372 | struct bpf_verifier_stack_elem *elem; |
438 | int insn_idx; | 373 | int insn_idx; |
439 | 374 | ||
440 | if (env->head == NULL) | 375 | if (env->head == NULL) |
@@ -451,12 +386,12 @@ static int pop_stack(struct verifier_env *env, int *prev_insn_idx) | |||
451 | return insn_idx; | 386 | return insn_idx; |
452 | } | 387 | } |
453 | 388 | ||
454 | static struct verifier_state *push_stack(struct verifier_env *env, int insn_idx, | 389 | static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, |
455 | int prev_insn_idx) | 390 | int insn_idx, int prev_insn_idx) |
456 | { | 391 | { |
457 | struct verifier_stack_elem *elem; | 392 | struct bpf_verifier_stack_elem *elem; |
458 | 393 | ||
459 | elem = kmalloc(sizeof(struct verifier_stack_elem), GFP_KERNEL); | 394 | elem = kmalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); |
460 | if (!elem) | 395 | if (!elem) |
461 | goto err; | 396 | goto err; |
462 | 397 | ||
@@ -482,7 +417,7 @@ static const int caller_saved[CALLER_SAVED_REGS] = { | |||
482 | BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 | 417 | BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 |
483 | }; | 418 | }; |
484 | 419 | ||
485 | static void init_reg_state(struct reg_state *regs) | 420 | static void init_reg_state(struct bpf_reg_state *regs) |
486 | { | 421 | { |
487 | int i; | 422 | int i; |
488 | 423 | ||
@@ -498,7 +433,7 @@ static void init_reg_state(struct reg_state *regs) | |||
498 | regs[BPF_REG_1].type = PTR_TO_CTX; | 433 | regs[BPF_REG_1].type = PTR_TO_CTX; |
499 | } | 434 | } |
500 | 435 | ||
501 | static void mark_reg_unknown_value(struct reg_state *regs, u32 regno) | 436 | static void mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno) |
502 | { | 437 | { |
503 | BUG_ON(regno >= MAX_BPF_REG); | 438 | BUG_ON(regno >= MAX_BPF_REG); |
504 | regs[regno].type = UNKNOWN_VALUE; | 439 | regs[regno].type = UNKNOWN_VALUE; |
@@ -511,7 +446,7 @@ enum reg_arg_type { | |||
511 | DST_OP_NO_MARK /* same as above, check only, don't mark */ | 446 | DST_OP_NO_MARK /* same as above, check only, don't mark */ |
512 | }; | 447 | }; |
513 | 448 | ||
514 | static int check_reg_arg(struct reg_state *regs, u32 regno, | 449 | static int check_reg_arg(struct bpf_reg_state *regs, u32 regno, |
515 | enum reg_arg_type t) | 450 | enum reg_arg_type t) |
516 | { | 451 | { |
517 | if (regno >= MAX_BPF_REG) { | 452 | if (regno >= MAX_BPF_REG) { |
@@ -571,8 +506,8 @@ static bool is_spillable_regtype(enum bpf_reg_type type) | |||
571 | /* check_stack_read/write functions track spill/fill of registers, | 506 | /* check_stack_read/write functions track spill/fill of registers, |
572 | * stack boundary and alignment are checked in check_mem_access() | 507 | * stack boundary and alignment are checked in check_mem_access() |
573 | */ | 508 | */ |
574 | static int check_stack_write(struct verifier_state *state, int off, int size, | 509 | static int check_stack_write(struct bpf_verifier_state *state, int off, |
575 | int value_regno) | 510 | int size, int value_regno) |
576 | { | 511 | { |
577 | int i; | 512 | int i; |
578 | /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, | 513 | /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, |
@@ -597,7 +532,7 @@ static int check_stack_write(struct verifier_state *state, int off, int size, | |||
597 | } else { | 532 | } else { |
598 | /* regular write of data into stack */ | 533 | /* regular write of data into stack */ |
599 | state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] = | 534 | state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] = |
600 | (struct reg_state) {}; | 535 | (struct bpf_reg_state) {}; |
601 | 536 | ||
602 | for (i = 0; i < size; i++) | 537 | for (i = 0; i < size; i++) |
603 | state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC; | 538 | state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC; |
@@ -605,7 +540,7 @@ static int check_stack_write(struct verifier_state *state, int off, int size, | |||
605 | return 0; | 540 | return 0; |
606 | } | 541 | } |
607 | 542 | ||
608 | static int check_stack_read(struct verifier_state *state, int off, int size, | 543 | static int check_stack_read(struct bpf_verifier_state *state, int off, int size, |
609 | int value_regno) | 544 | int value_regno) |
610 | { | 545 | { |
611 | u8 *slot_type; | 546 | u8 *slot_type; |
@@ -646,7 +581,7 @@ static int check_stack_read(struct verifier_state *state, int off, int size, | |||
646 | } | 581 | } |
647 | 582 | ||
648 | /* check read/write into map element returned by bpf_map_lookup_elem() */ | 583 | /* check read/write into map element returned by bpf_map_lookup_elem() */ |
649 | static int check_map_access(struct verifier_env *env, u32 regno, int off, | 584 | static int check_map_access(struct bpf_verifier_env *env, u32 regno, int off, |
650 | int size) | 585 | int size) |
651 | { | 586 | { |
652 | struct bpf_map *map = env->cur_state.regs[regno].map_ptr; | 587 | struct bpf_map *map = env->cur_state.regs[regno].map_ptr; |
@@ -661,7 +596,7 @@ static int check_map_access(struct verifier_env *env, u32 regno, int off, | |||
661 | 596 | ||
662 | #define MAX_PACKET_OFF 0xffff | 597 | #define MAX_PACKET_OFF 0xffff |
663 | 598 | ||
664 | static bool may_access_direct_pkt_data(struct verifier_env *env, | 599 | static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, |
665 | const struct bpf_call_arg_meta *meta) | 600 | const struct bpf_call_arg_meta *meta) |
666 | { | 601 | { |
667 | switch (env->prog->type) { | 602 | switch (env->prog->type) { |
@@ -678,11 +613,11 @@ static bool may_access_direct_pkt_data(struct verifier_env *env, | |||
678 | } | 613 | } |
679 | } | 614 | } |
680 | 615 | ||
681 | static int check_packet_access(struct verifier_env *env, u32 regno, int off, | 616 | static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, |
682 | int size) | 617 | int size) |
683 | { | 618 | { |
684 | struct reg_state *regs = env->cur_state.regs; | 619 | struct bpf_reg_state *regs = env->cur_state.regs; |
685 | struct reg_state *reg = ®s[regno]; | 620 | struct bpf_reg_state *reg = ®s[regno]; |
686 | 621 | ||
687 | off += reg->off; | 622 | off += reg->off; |
688 | if (off < 0 || size <= 0 || off + size > reg->range) { | 623 | if (off < 0 || size <= 0 || off + size > reg->range) { |
@@ -694,7 +629,7 @@ static int check_packet_access(struct verifier_env *env, u32 regno, int off, | |||
694 | } | 629 | } |
695 | 630 | ||
696 | /* check access to 'struct bpf_context' fields */ | 631 | /* check access to 'struct bpf_context' fields */ |
697 | static int check_ctx_access(struct verifier_env *env, int off, int size, | 632 | static int check_ctx_access(struct bpf_verifier_env *env, int off, int size, |
698 | enum bpf_access_type t, enum bpf_reg_type *reg_type) | 633 | enum bpf_access_type t, enum bpf_reg_type *reg_type) |
699 | { | 634 | { |
700 | if (env->prog->aux->ops->is_valid_access && | 635 | if (env->prog->aux->ops->is_valid_access && |
@@ -709,7 +644,7 @@ static int check_ctx_access(struct verifier_env *env, int off, int size, | |||
709 | return -EACCES; | 644 | return -EACCES; |
710 | } | 645 | } |
711 | 646 | ||
712 | static bool is_pointer_value(struct verifier_env *env, int regno) | 647 | static bool is_pointer_value(struct bpf_verifier_env *env, int regno) |
713 | { | 648 | { |
714 | if (env->allow_ptr_leaks) | 649 | if (env->allow_ptr_leaks) |
715 | return false; | 650 | return false; |
@@ -723,12 +658,13 @@ static bool is_pointer_value(struct verifier_env *env, int regno) | |||
723 | } | 658 | } |
724 | } | 659 | } |
725 | 660 | ||
726 | static int check_ptr_alignment(struct verifier_env *env, struct reg_state *reg, | 661 | static int check_ptr_alignment(struct bpf_verifier_env *env, |
727 | int off, int size) | 662 | struct bpf_reg_state *reg, int off, int size) |
728 | { | 663 | { |
729 | if (reg->type != PTR_TO_PACKET) { | 664 | if (reg->type != PTR_TO_PACKET) { |
730 | if (off % size != 0) { | 665 | if (off % size != 0) { |
731 | verbose("misaligned access off %d size %d\n", off, size); | 666 | verbose("misaligned access off %d size %d\n", |
667 | off, size); | ||
732 | return -EACCES; | 668 | return -EACCES; |
733 | } else { | 669 | } else { |
734 | return 0; | 670 | return 0; |
@@ -769,12 +705,12 @@ static int check_ptr_alignment(struct verifier_env *env, struct reg_state *reg, | |||
769 | * if t==write && value_regno==-1, some unknown value is stored into memory | 705 | * if t==write && value_regno==-1, some unknown value is stored into memory |
770 | * if t==read && value_regno==-1, don't care what we read from memory | 706 | * if t==read && value_regno==-1, don't care what we read from memory |
771 | */ | 707 | */ |
772 | static int check_mem_access(struct verifier_env *env, u32 regno, int off, | 708 | static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off, |
773 | int bpf_size, enum bpf_access_type t, | 709 | int bpf_size, enum bpf_access_type t, |
774 | int value_regno) | 710 | int value_regno) |
775 | { | 711 | { |
776 | struct verifier_state *state = &env->cur_state; | 712 | struct bpf_verifier_state *state = &env->cur_state; |
777 | struct reg_state *reg = &state->regs[regno]; | 713 | struct bpf_reg_state *reg = &state->regs[regno]; |
778 | int size, err = 0; | 714 | int size, err = 0; |
779 | 715 | ||
780 | if (reg->type == PTR_TO_STACK) | 716 | if (reg->type == PTR_TO_STACK) |
@@ -860,9 +796,9 @@ static int check_mem_access(struct verifier_env *env, u32 regno, int off, | |||
860 | return err; | 796 | return err; |
861 | } | 797 | } |
862 | 798 | ||
863 | static int check_xadd(struct verifier_env *env, struct bpf_insn *insn) | 799 | static int check_xadd(struct bpf_verifier_env *env, struct bpf_insn *insn) |
864 | { | 800 | { |
865 | struct reg_state *regs = env->cur_state.regs; | 801 | struct bpf_reg_state *regs = env->cur_state.regs; |
866 | int err; | 802 | int err; |
867 | 803 | ||
868 | if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || | 804 | if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || |
@@ -896,12 +832,12 @@ static int check_xadd(struct verifier_env *env, struct bpf_insn *insn) | |||
896 | * bytes from that pointer, make sure that it's within stack boundary | 832 | * bytes from that pointer, make sure that it's within stack boundary |
897 | * and all elements of stack are initialized | 833 | * and all elements of stack are initialized |
898 | */ | 834 | */ |
899 | static int check_stack_boundary(struct verifier_env *env, int regno, | 835 | static int check_stack_boundary(struct bpf_verifier_env *env, int regno, |
900 | int access_size, bool zero_size_allowed, | 836 | int access_size, bool zero_size_allowed, |
901 | struct bpf_call_arg_meta *meta) | 837 | struct bpf_call_arg_meta *meta) |
902 | { | 838 | { |
903 | struct verifier_state *state = &env->cur_state; | 839 | struct bpf_verifier_state *state = &env->cur_state; |
904 | struct reg_state *regs = state->regs; | 840 | struct bpf_reg_state *regs = state->regs; |
905 | int off, i; | 841 | int off, i; |
906 | 842 | ||
907 | if (regs[regno].type != PTR_TO_STACK) { | 843 | if (regs[regno].type != PTR_TO_STACK) { |
@@ -940,11 +876,11 @@ static int check_stack_boundary(struct verifier_env *env, int regno, | |||
940 | return 0; | 876 | return 0; |
941 | } | 877 | } |
942 | 878 | ||
943 | static int check_func_arg(struct verifier_env *env, u32 regno, | 879 | static int check_func_arg(struct bpf_verifier_env *env, u32 regno, |
944 | enum bpf_arg_type arg_type, | 880 | enum bpf_arg_type arg_type, |
945 | struct bpf_call_arg_meta *meta) | 881 | struct bpf_call_arg_meta *meta) |
946 | { | 882 | { |
947 | struct reg_state *regs = env->cur_state.regs, *reg = ®s[regno]; | 883 | struct bpf_reg_state *regs = env->cur_state.regs, *reg = ®s[regno]; |
948 | enum bpf_reg_type expected_type, type = reg->type; | 884 | enum bpf_reg_type expected_type, type = reg->type; |
949 | int err = 0; | 885 | int err = 0; |
950 | 886 | ||
@@ -1149,10 +1085,10 @@ static int check_raw_mode(const struct bpf_func_proto *fn) | |||
1149 | return count > 1 ? -EINVAL : 0; | 1085 | return count > 1 ? -EINVAL : 0; |
1150 | } | 1086 | } |
1151 | 1087 | ||
1152 | static void clear_all_pkt_pointers(struct verifier_env *env) | 1088 | static void clear_all_pkt_pointers(struct bpf_verifier_env *env) |
1153 | { | 1089 | { |
1154 | struct verifier_state *state = &env->cur_state; | 1090 | struct bpf_verifier_state *state = &env->cur_state; |
1155 | struct reg_state *regs = state->regs, *reg; | 1091 | struct bpf_reg_state *regs = state->regs, *reg; |
1156 | int i; | 1092 | int i; |
1157 | 1093 | ||
1158 | for (i = 0; i < MAX_BPF_REG; i++) | 1094 | for (i = 0; i < MAX_BPF_REG; i++) |
@@ -1172,12 +1108,12 @@ static void clear_all_pkt_pointers(struct verifier_env *env) | |||
1172 | } | 1108 | } |
1173 | } | 1109 | } |
1174 | 1110 | ||
1175 | static int check_call(struct verifier_env *env, int func_id) | 1111 | static int check_call(struct bpf_verifier_env *env, int func_id) |
1176 | { | 1112 | { |
1177 | struct verifier_state *state = &env->cur_state; | 1113 | struct bpf_verifier_state *state = &env->cur_state; |
1178 | const struct bpf_func_proto *fn = NULL; | 1114 | const struct bpf_func_proto *fn = NULL; |
1179 | struct reg_state *regs = state->regs; | 1115 | struct bpf_reg_state *regs = state->regs; |
1180 | struct reg_state *reg; | 1116 | struct bpf_reg_state *reg; |
1181 | struct bpf_call_arg_meta meta; | 1117 | struct bpf_call_arg_meta meta; |
1182 | bool changes_data; | 1118 | bool changes_data; |
1183 | int i, err; | 1119 | int i, err; |
@@ -1280,12 +1216,13 @@ static int check_call(struct verifier_env *env, int func_id) | |||
1280 | return 0; | 1216 | return 0; |
1281 | } | 1217 | } |
1282 | 1218 | ||
1283 | static int check_packet_ptr_add(struct verifier_env *env, struct bpf_insn *insn) | 1219 | static int check_packet_ptr_add(struct bpf_verifier_env *env, |
1220 | struct bpf_insn *insn) | ||
1284 | { | 1221 | { |
1285 | struct reg_state *regs = env->cur_state.regs; | 1222 | struct bpf_reg_state *regs = env->cur_state.regs; |
1286 | struct reg_state *dst_reg = ®s[insn->dst_reg]; | 1223 | struct bpf_reg_state *dst_reg = ®s[insn->dst_reg]; |
1287 | struct reg_state *src_reg = ®s[insn->src_reg]; | 1224 | struct bpf_reg_state *src_reg = ®s[insn->src_reg]; |
1288 | struct reg_state tmp_reg; | 1225 | struct bpf_reg_state tmp_reg; |
1289 | s32 imm; | 1226 | s32 imm; |
1290 | 1227 | ||
1291 | if (BPF_SRC(insn->code) == BPF_K) { | 1228 | if (BPF_SRC(insn->code) == BPF_K) { |
@@ -1353,10 +1290,10 @@ add_imm: | |||
1353 | return 0; | 1290 | return 0; |
1354 | } | 1291 | } |
1355 | 1292 | ||
1356 | static int evaluate_reg_alu(struct verifier_env *env, struct bpf_insn *insn) | 1293 | static int evaluate_reg_alu(struct bpf_verifier_env *env, struct bpf_insn *insn) |
1357 | { | 1294 | { |
1358 | struct reg_state *regs = env->cur_state.regs; | 1295 | struct bpf_reg_state *regs = env->cur_state.regs; |
1359 | struct reg_state *dst_reg = ®s[insn->dst_reg]; | 1296 | struct bpf_reg_state *dst_reg = ®s[insn->dst_reg]; |
1360 | u8 opcode = BPF_OP(insn->code); | 1297 | u8 opcode = BPF_OP(insn->code); |
1361 | s64 imm_log2; | 1298 | s64 imm_log2; |
1362 | 1299 | ||
@@ -1366,7 +1303,7 @@ static int evaluate_reg_alu(struct verifier_env *env, struct bpf_insn *insn) | |||
1366 | */ | 1303 | */ |
1367 | 1304 | ||
1368 | if (BPF_SRC(insn->code) == BPF_X) { | 1305 | if (BPF_SRC(insn->code) == BPF_X) { |
1369 | struct reg_state *src_reg = ®s[insn->src_reg]; | 1306 | struct bpf_reg_state *src_reg = ®s[insn->src_reg]; |
1370 | 1307 | ||
1371 | if (src_reg->type == UNKNOWN_VALUE && src_reg->imm > 0 && | 1308 | if (src_reg->type == UNKNOWN_VALUE && src_reg->imm > 0 && |
1372 | dst_reg->imm && opcode == BPF_ADD) { | 1309 | dst_reg->imm && opcode == BPF_ADD) { |
@@ -1455,11 +1392,12 @@ static int evaluate_reg_alu(struct verifier_env *env, struct bpf_insn *insn) | |||
1455 | return 0; | 1392 | return 0; |
1456 | } | 1393 | } |
1457 | 1394 | ||
1458 | static int evaluate_reg_imm_alu(struct verifier_env *env, struct bpf_insn *insn) | 1395 | static int evaluate_reg_imm_alu(struct bpf_verifier_env *env, |
1396 | struct bpf_insn *insn) | ||
1459 | { | 1397 | { |
1460 | struct reg_state *regs = env->cur_state.regs; | 1398 | struct bpf_reg_state *regs = env->cur_state.regs; |
1461 | struct reg_state *dst_reg = ®s[insn->dst_reg]; | 1399 | struct bpf_reg_state *dst_reg = ®s[insn->dst_reg]; |
1462 | struct reg_state *src_reg = ®s[insn->src_reg]; | 1400 | struct bpf_reg_state *src_reg = ®s[insn->src_reg]; |
1463 | u8 opcode = BPF_OP(insn->code); | 1401 | u8 opcode = BPF_OP(insn->code); |
1464 | 1402 | ||
1465 | /* dst_reg->type == CONST_IMM here, simulate execution of 'add' insn. | 1403 | /* dst_reg->type == CONST_IMM here, simulate execution of 'add' insn. |
@@ -1476,9 +1414,9 @@ static int evaluate_reg_imm_alu(struct verifier_env *env, struct bpf_insn *insn) | |||
1476 | } | 1414 | } |
1477 | 1415 | ||
1478 | /* check validity of 32-bit and 64-bit arithmetic operations */ | 1416 | /* check validity of 32-bit and 64-bit arithmetic operations */ |
1479 | static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn) | 1417 | static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) |
1480 | { | 1418 | { |
1481 | struct reg_state *regs = env->cur_state.regs, *dst_reg; | 1419 | struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg; |
1482 | u8 opcode = BPF_OP(insn->code); | 1420 | u8 opcode = BPF_OP(insn->code); |
1483 | int err; | 1421 | int err; |
1484 | 1422 | ||
@@ -1652,10 +1590,10 @@ static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn) | |||
1652 | return 0; | 1590 | return 0; |
1653 | } | 1591 | } |
1654 | 1592 | ||
1655 | static void find_good_pkt_pointers(struct verifier_state *state, | 1593 | static void find_good_pkt_pointers(struct bpf_verifier_state *state, |
1656 | const struct reg_state *dst_reg) | 1594 | struct bpf_reg_state *dst_reg) |
1657 | { | 1595 | { |
1658 | struct reg_state *regs = state->regs, *reg; | 1596 | struct bpf_reg_state *regs = state->regs, *reg; |
1659 | int i; | 1597 | int i; |
1660 | 1598 | ||
1661 | /* LLVM can generate two kind of checks: | 1599 | /* LLVM can generate two kind of checks: |
@@ -1701,11 +1639,11 @@ static void find_good_pkt_pointers(struct verifier_state *state, | |||
1701 | } | 1639 | } |
1702 | } | 1640 | } |
1703 | 1641 | ||
1704 | static int check_cond_jmp_op(struct verifier_env *env, | 1642 | static int check_cond_jmp_op(struct bpf_verifier_env *env, |
1705 | struct bpf_insn *insn, int *insn_idx) | 1643 | struct bpf_insn *insn, int *insn_idx) |
1706 | { | 1644 | { |
1707 | struct verifier_state *other_branch, *this_branch = &env->cur_state; | 1645 | struct bpf_verifier_state *other_branch, *this_branch = &env->cur_state; |
1708 | struct reg_state *regs = this_branch->regs, *dst_reg; | 1646 | struct bpf_reg_state *regs = this_branch->regs, *dst_reg; |
1709 | u8 opcode = BPF_OP(insn->code); | 1647 | u8 opcode = BPF_OP(insn->code); |
1710 | int err; | 1648 | int err; |
1711 | 1649 | ||
@@ -1767,7 +1705,7 @@ static int check_cond_jmp_op(struct verifier_env *env, | |||
1767 | if (!other_branch) | 1705 | if (!other_branch) |
1768 | return -EFAULT; | 1706 | return -EFAULT; |
1769 | 1707 | ||
1770 | /* detect if R == 0 where R is returned value from bpf_map_lookup_elem() */ | 1708 | /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */ |
1771 | if (BPF_SRC(insn->code) == BPF_K && | 1709 | if (BPF_SRC(insn->code) == BPF_K && |
1772 | insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && | 1710 | insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && |
1773 | dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { | 1711 | dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { |
@@ -1809,9 +1747,9 @@ static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn) | |||
1809 | } | 1747 | } |
1810 | 1748 | ||
1811 | /* verify BPF_LD_IMM64 instruction */ | 1749 | /* verify BPF_LD_IMM64 instruction */ |
1812 | static int check_ld_imm(struct verifier_env *env, struct bpf_insn *insn) | 1750 | static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) |
1813 | { | 1751 | { |
1814 | struct reg_state *regs = env->cur_state.regs; | 1752 | struct bpf_reg_state *regs = env->cur_state.regs; |
1815 | int err; | 1753 | int err; |
1816 | 1754 | ||
1817 | if (BPF_SIZE(insn->code) != BPF_DW) { | 1755 | if (BPF_SIZE(insn->code) != BPF_DW) { |
@@ -1866,11 +1804,11 @@ static bool may_access_skb(enum bpf_prog_type type) | |||
1866 | * Output: | 1804 | * Output: |
1867 | * R0 - 8/16/32-bit skb data converted to cpu endianness | 1805 | * R0 - 8/16/32-bit skb data converted to cpu endianness |
1868 | */ | 1806 | */ |
1869 | static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn) | 1807 | static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) |
1870 | { | 1808 | { |
1871 | struct reg_state *regs = env->cur_state.regs; | 1809 | struct bpf_reg_state *regs = env->cur_state.regs; |
1872 | u8 mode = BPF_MODE(insn->code); | 1810 | u8 mode = BPF_MODE(insn->code); |
1873 | struct reg_state *reg; | 1811 | struct bpf_reg_state *reg; |
1874 | int i, err; | 1812 | int i, err; |
1875 | 1813 | ||
1876 | if (!may_access_skb(env->prog->type)) { | 1814 | if (!may_access_skb(env->prog->type)) { |
@@ -1956,7 +1894,7 @@ enum { | |||
1956 | BRANCH = 2, | 1894 | BRANCH = 2, |
1957 | }; | 1895 | }; |
1958 | 1896 | ||
1959 | #define STATE_LIST_MARK ((struct verifier_state_list *) -1L) | 1897 | #define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L) |
1960 | 1898 | ||
1961 | static int *insn_stack; /* stack of insns to process */ | 1899 | static int *insn_stack; /* stack of insns to process */ |
1962 | static int cur_stack; /* current stack index */ | 1900 | static int cur_stack; /* current stack index */ |
@@ -1967,7 +1905,7 @@ static int *insn_state; | |||
1967 | * w - next instruction | 1905 | * w - next instruction |
1968 | * e - edge | 1906 | * e - edge |
1969 | */ | 1907 | */ |
1970 | static int push_insn(int t, int w, int e, struct verifier_env *env) | 1908 | static int push_insn(int t, int w, int e, struct bpf_verifier_env *env) |
1971 | { | 1909 | { |
1972 | if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) | 1910 | if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) |
1973 | return 0; | 1911 | return 0; |
@@ -2008,7 +1946,7 @@ static int push_insn(int t, int w, int e, struct verifier_env *env) | |||
2008 | /* non-recursive depth-first-search to detect loops in BPF program | 1946 | /* non-recursive depth-first-search to detect loops in BPF program |
2009 | * loop == back-edge in directed graph | 1947 | * loop == back-edge in directed graph |
2010 | */ | 1948 | */ |
2011 | static int check_cfg(struct verifier_env *env) | 1949 | static int check_cfg(struct bpf_verifier_env *env) |
2012 | { | 1950 | { |
2013 | struct bpf_insn *insns = env->prog->insnsi; | 1951 | struct bpf_insn *insns = env->prog->insnsi; |
2014 | int insn_cnt = env->prog->len; | 1952 | int insn_cnt = env->prog->len; |
@@ -2117,7 +2055,8 @@ err_free: | |||
2117 | /* the following conditions reduce the number of explored insns | 2055 | /* the following conditions reduce the number of explored insns |
2118 | * from ~140k to ~80k for ultra large programs that use a lot of ptr_to_packet | 2056 | * from ~140k to ~80k for ultra large programs that use a lot of ptr_to_packet |
2119 | */ | 2057 | */ |
2120 | static bool compare_ptrs_to_packet(struct reg_state *old, struct reg_state *cur) | 2058 | static bool compare_ptrs_to_packet(struct bpf_reg_state *old, |
2059 | struct bpf_reg_state *cur) | ||
2121 | { | 2060 | { |
2122 | if (old->id != cur->id) | 2061 | if (old->id != cur->id) |
2123 | return false; | 2062 | return false; |
@@ -2192,9 +2131,10 @@ static bool compare_ptrs_to_packet(struct reg_state *old, struct reg_state *cur) | |||
2192 | * whereas register type in current state is meaningful, it means that | 2131 | * whereas register type in current state is meaningful, it means that |
2193 | * the current state will reach 'bpf_exit' instruction safely | 2132 | * the current state will reach 'bpf_exit' instruction safely |
2194 | */ | 2133 | */ |
2195 | static bool states_equal(struct verifier_state *old, struct verifier_state *cur) | 2134 | static bool states_equal(struct bpf_verifier_state *old, |
2135 | struct bpf_verifier_state *cur) | ||
2196 | { | 2136 | { |
2197 | struct reg_state *rold, *rcur; | 2137 | struct bpf_reg_state *rold, *rcur; |
2198 | int i; | 2138 | int i; |
2199 | 2139 | ||
2200 | for (i = 0; i < MAX_BPF_REG; i++) { | 2140 | for (i = 0; i < MAX_BPF_REG; i++) { |
@@ -2234,9 +2174,9 @@ static bool states_equal(struct verifier_state *old, struct verifier_state *cur) | |||
2234 | * the same, check that stored pointers types | 2174 | * the same, check that stored pointers types |
2235 | * are the same as well. | 2175 | * are the same as well. |
2236 | * Ex: explored safe path could have stored | 2176 | * Ex: explored safe path could have stored |
2237 | * (struct reg_state) {.type = PTR_TO_STACK, .imm = -8} | 2177 | * (bpf_reg_state) {.type = PTR_TO_STACK, .imm = -8} |
2238 | * but current path has stored: | 2178 | * but current path has stored: |
2239 | * (struct reg_state) {.type = PTR_TO_STACK, .imm = -16} | 2179 | * (bpf_reg_state) {.type = PTR_TO_STACK, .imm = -16} |
2240 | * such verifier states are not equivalent. | 2180 | * such verifier states are not equivalent. |
2241 | * return false to continue verification of this path | 2181 | * return false to continue verification of this path |
2242 | */ | 2182 | */ |
@@ -2247,10 +2187,10 @@ static bool states_equal(struct verifier_state *old, struct verifier_state *cur) | |||
2247 | return true; | 2187 | return true; |
2248 | } | 2188 | } |
2249 | 2189 | ||
2250 | static int is_state_visited(struct verifier_env *env, int insn_idx) | 2190 | static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) |
2251 | { | 2191 | { |
2252 | struct verifier_state_list *new_sl; | 2192 | struct bpf_verifier_state_list *new_sl; |
2253 | struct verifier_state_list *sl; | 2193 | struct bpf_verifier_state_list *sl; |
2254 | 2194 | ||
2255 | sl = env->explored_states[insn_idx]; | 2195 | sl = env->explored_states[insn_idx]; |
2256 | if (!sl) | 2196 | if (!sl) |
@@ -2274,7 +2214,7 @@ static int is_state_visited(struct verifier_env *env, int insn_idx) | |||
2274 | * it will be rejected. Since there are no loops, we won't be | 2214 | * it will be rejected. Since there are no loops, we won't be |
2275 | * seeing this 'insn_idx' instruction again on the way to bpf_exit | 2215 | * seeing this 'insn_idx' instruction again on the way to bpf_exit |
2276 | */ | 2216 | */ |
2277 | new_sl = kmalloc(sizeof(struct verifier_state_list), GFP_USER); | 2217 | new_sl = kmalloc(sizeof(struct bpf_verifier_state_list), GFP_USER); |
2278 | if (!new_sl) | 2218 | if (!new_sl) |
2279 | return -ENOMEM; | 2219 | return -ENOMEM; |
2280 | 2220 | ||
@@ -2285,11 +2225,11 @@ static int is_state_visited(struct verifier_env *env, int insn_idx) | |||
2285 | return 0; | 2225 | return 0; |
2286 | } | 2226 | } |
2287 | 2227 | ||
2288 | static int do_check(struct verifier_env *env) | 2228 | static int do_check(struct bpf_verifier_env *env) |
2289 | { | 2229 | { |
2290 | struct verifier_state *state = &env->cur_state; | 2230 | struct bpf_verifier_state *state = &env->cur_state; |
2291 | struct bpf_insn *insns = env->prog->insnsi; | 2231 | struct bpf_insn *insns = env->prog->insnsi; |
2292 | struct reg_state *regs = state->regs; | 2232 | struct bpf_reg_state *regs = state->regs; |
2293 | int insn_cnt = env->prog->len; | 2233 | int insn_cnt = env->prog->len; |
2294 | int insn_idx, prev_insn_idx = 0; | 2234 | int insn_idx, prev_insn_idx = 0; |
2295 | int insn_processed = 0; | 2235 | int insn_processed = 0; |
@@ -2572,7 +2512,7 @@ static int check_map_prog_compatibility(struct bpf_map *map, | |||
2572 | /* look for pseudo eBPF instructions that access map FDs and | 2512 | /* look for pseudo eBPF instructions that access map FDs and |
2573 | * replace them with actual map pointers | 2513 | * replace them with actual map pointers |
2574 | */ | 2514 | */ |
2575 | static int replace_map_fd_with_map_ptr(struct verifier_env *env) | 2515 | static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) |
2576 | { | 2516 | { |
2577 | struct bpf_insn *insn = env->prog->insnsi; | 2517 | struct bpf_insn *insn = env->prog->insnsi; |
2578 | int insn_cnt = env->prog->len; | 2518 | int insn_cnt = env->prog->len; |
@@ -2669,7 +2609,7 @@ next_insn: | |||
2669 | } | 2609 | } |
2670 | 2610 | ||
2671 | /* drop refcnt of maps used by the rejected program */ | 2611 | /* drop refcnt of maps used by the rejected program */ |
2672 | static void release_maps(struct verifier_env *env) | 2612 | static void release_maps(struct bpf_verifier_env *env) |
2673 | { | 2613 | { |
2674 | int i; | 2614 | int i; |
2675 | 2615 | ||
@@ -2678,7 +2618,7 @@ static void release_maps(struct verifier_env *env) | |||
2678 | } | 2618 | } |
2679 | 2619 | ||
2680 | /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ | 2620 | /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ |
2681 | static void convert_pseudo_ld_imm64(struct verifier_env *env) | 2621 | static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) |
2682 | { | 2622 | { |
2683 | struct bpf_insn *insn = env->prog->insnsi; | 2623 | struct bpf_insn *insn = env->prog->insnsi; |
2684 | int insn_cnt = env->prog->len; | 2624 | int insn_cnt = env->prog->len; |
@@ -2692,7 +2632,7 @@ static void convert_pseudo_ld_imm64(struct verifier_env *env) | |||
2692 | /* convert load instructions that access fields of 'struct __sk_buff' | 2632 | /* convert load instructions that access fields of 'struct __sk_buff' |
2693 | * into sequence of instructions that access fields of 'struct sk_buff' | 2633 | * into sequence of instructions that access fields of 'struct sk_buff' |
2694 | */ | 2634 | */ |
2695 | static int convert_ctx_accesses(struct verifier_env *env) | 2635 | static int convert_ctx_accesses(struct bpf_verifier_env *env) |
2696 | { | 2636 | { |
2697 | const struct bpf_verifier_ops *ops = env->prog->aux->ops; | 2637 | const struct bpf_verifier_ops *ops = env->prog->aux->ops; |
2698 | const int insn_cnt = env->prog->len; | 2638 | const int insn_cnt = env->prog->len; |
@@ -2757,9 +2697,9 @@ static int convert_ctx_accesses(struct verifier_env *env) | |||
2757 | return 0; | 2697 | return 0; |
2758 | } | 2698 | } |
2759 | 2699 | ||
2760 | static void free_states(struct verifier_env *env) | 2700 | static void free_states(struct bpf_verifier_env *env) |
2761 | { | 2701 | { |
2762 | struct verifier_state_list *sl, *sln; | 2702 | struct bpf_verifier_state_list *sl, *sln; |
2763 | int i; | 2703 | int i; |
2764 | 2704 | ||
2765 | if (!env->explored_states) | 2705 | if (!env->explored_states) |
@@ -2782,16 +2722,16 @@ static void free_states(struct verifier_env *env) | |||
2782 | int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) | 2722 | int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) |
2783 | { | 2723 | { |
2784 | char __user *log_ubuf = NULL; | 2724 | char __user *log_ubuf = NULL; |
2785 | struct verifier_env *env; | 2725 | struct bpf_verifier_env *env; |
2786 | int ret = -EINVAL; | 2726 | int ret = -EINVAL; |
2787 | 2727 | ||
2788 | if ((*prog)->len <= 0 || (*prog)->len > BPF_MAXINSNS) | 2728 | if ((*prog)->len <= 0 || (*prog)->len > BPF_MAXINSNS) |
2789 | return -E2BIG; | 2729 | return -E2BIG; |
2790 | 2730 | ||
2791 | /* 'struct verifier_env' can be global, but since it's not small, | 2731 | /* 'struct bpf_verifier_env' can be global, but since it's not small, |
2792 | * allocate/free it every time bpf_check() is called | 2732 | * allocate/free it every time bpf_check() is called |
2793 | */ | 2733 | */ |
2794 | env = kzalloc(sizeof(struct verifier_env), GFP_KERNEL); | 2734 | env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); |
2795 | if (!env) | 2735 | if (!env) |
2796 | return -ENOMEM; | 2736 | return -ENOMEM; |
2797 | 2737 | ||
@@ -2833,7 +2773,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) | |||
2833 | goto skip_full_check; | 2773 | goto skip_full_check; |
2834 | 2774 | ||
2835 | env->explored_states = kcalloc(env->prog->len, | 2775 | env->explored_states = kcalloc(env->prog->len, |
2836 | sizeof(struct verifier_state_list *), | 2776 | sizeof(struct bpf_verifier_state_list *), |
2837 | GFP_USER); | 2777 | GFP_USER); |
2838 | ret = -ENOMEM; | 2778 | ret = -ENOMEM; |
2839 | if (!env->explored_states) | 2779 | if (!env->explored_states) |