aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-05-25 15:37:41 -0400
committerDavid S. Miller <davem@davemloft.net>2018-05-25 15:37:41 -0400
commitd2f30f5172603bacaf34f0fdb021c25ad1915b05 (patch)
treea113e2dca477fa059d1e9fd2fd6cb010909df506
parent24e4b075d899e5376dfa39fecd1dbc12bddc1e98 (diff)
parentc93552c443ebc63b14e26e46d2e76941c88e0d71 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says: ==================== pull-request: bpf 2018-05-24 The following pull-request contains BPF updates for your *net* tree. The main changes are: 1) Fix a bug in the original fix to prevent out of bounds speculation when multiple tail call maps from different branches or calls end up at the same tail call helper invocation, from Daniel. 2) Two selftest fixes, one in reuseport_bpf_numa where test is skipped in case of missing numa support and another one to update kernel config to properly support xdp_meta.sh test, from Anders. ... Would be great if you have a chance to merge net into net-next after that. The verifier fix would be needed later as a dependency in bpf-next for upcomig work there. When you do the merge there's a trivial conflict on BPF side with 849fa50662fb ("bpf/verifier: refine retval R0 state for bpf_get_stack helper"): Resolution is to keep both functions, the do_refine_retval_range() and record_func_map(). ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/bpf_verifier.h2
-rw-r--r--kernel/bpf/verifier.c86
-rw-r--r--tools/testing/selftests/bpf/config2
-rw-r--r--tools/testing/selftests/net/reuseport_bpf_numa.c4
4 files changed, 70 insertions, 24 deletions
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 7e61c395fddf..52fb077d3c45 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -142,7 +142,7 @@ struct bpf_verifier_state_list {
142struct bpf_insn_aux_data { 142struct bpf_insn_aux_data {
143 union { 143 union {
144 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ 144 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
145 struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */ 145 unsigned long map_state; /* pointer/poison value for maps */
146 s32 call_imm; /* saved imm field of call insn */ 146 s32 call_imm; /* saved imm field of call insn */
147 }; 147 };
148 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ 148 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 5dd1dcb902bf..dcebf3f7365c 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -156,7 +156,29 @@ struct bpf_verifier_stack_elem {
156#define BPF_COMPLEXITY_LIMIT_INSNS 131072 156#define BPF_COMPLEXITY_LIMIT_INSNS 131072
157#define BPF_COMPLEXITY_LIMIT_STACK 1024 157#define BPF_COMPLEXITY_LIMIT_STACK 1024
158 158
159#define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA) 159#define BPF_MAP_PTR_UNPRIV 1UL
160#define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \
161 POISON_POINTER_DELTA))
162#define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
163
164static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
165{
166 return BPF_MAP_PTR(aux->map_state) == BPF_MAP_PTR_POISON;
167}
168
169static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
170{
171 return aux->map_state & BPF_MAP_PTR_UNPRIV;
172}
173
174static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
175 const struct bpf_map *map, bool unpriv)
176{
177 BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
178 unpriv |= bpf_map_ptr_unpriv(aux);
179 aux->map_state = (unsigned long)map |
180 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
181}
160 182
161struct bpf_call_arg_meta { 183struct bpf_call_arg_meta {
162 struct bpf_map *map_ptr; 184 struct bpf_map *map_ptr;
@@ -2333,6 +2355,29 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
2333 return 0; 2355 return 0;
2334} 2356}
2335 2357
2358static int
2359record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
2360 int func_id, int insn_idx)
2361{
2362 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
2363
2364 if (func_id != BPF_FUNC_tail_call &&
2365 func_id != BPF_FUNC_map_lookup_elem)
2366 return 0;
2367 if (meta->map_ptr == NULL) {
2368 verbose(env, "kernel subsystem misconfigured verifier\n");
2369 return -EINVAL;
2370 }
2371
2372 if (!BPF_MAP_PTR(aux->map_state))
2373 bpf_map_ptr_store(aux, meta->map_ptr,
2374 meta->map_ptr->unpriv_array);
2375 else if (BPF_MAP_PTR(aux->map_state) != meta->map_ptr)
2376 bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
2377 meta->map_ptr->unpriv_array);
2378 return 0;
2379}
2380
2336static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx) 2381static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
2337{ 2382{
2338 const struct bpf_func_proto *fn = NULL; 2383 const struct bpf_func_proto *fn = NULL;
@@ -2387,13 +2432,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
2387 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); 2432 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
2388 if (err) 2433 if (err)
2389 return err; 2434 return err;
2390 if (func_id == BPF_FUNC_tail_call) {
2391 if (meta.map_ptr == NULL) {
2392 verbose(env, "verifier bug\n");
2393 return -EINVAL;
2394 }
2395 env->insn_aux_data[insn_idx].map_ptr = meta.map_ptr;
2396 }
2397 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); 2435 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
2398 if (err) 2436 if (err)
2399 return err; 2437 return err;
@@ -2404,6 +2442,10 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
2404 if (err) 2442 if (err)
2405 return err; 2443 return err;
2406 2444
2445 err = record_func_map(env, &meta, func_id, insn_idx);
2446 if (err)
2447 return err;
2448
2407 /* Mark slots with STACK_MISC in case of raw mode, stack offset 2449 /* Mark slots with STACK_MISC in case of raw mode, stack offset
2408 * is inferred from register state. 2450 * is inferred from register state.
2409 */ 2451 */
@@ -2428,8 +2470,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
2428 } else if (fn->ret_type == RET_VOID) { 2470 } else if (fn->ret_type == RET_VOID) {
2429 regs[BPF_REG_0].type = NOT_INIT; 2471 regs[BPF_REG_0].type = NOT_INIT;
2430 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) { 2472 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) {
2431 struct bpf_insn_aux_data *insn_aux;
2432
2433 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; 2473 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
2434 /* There is no offset yet applied, variable or fixed */ 2474 /* There is no offset yet applied, variable or fixed */
2435 mark_reg_known_zero(env, regs, BPF_REG_0); 2475 mark_reg_known_zero(env, regs, BPF_REG_0);
@@ -2445,11 +2485,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
2445 } 2485 }
2446 regs[BPF_REG_0].map_ptr = meta.map_ptr; 2486 regs[BPF_REG_0].map_ptr = meta.map_ptr;
2447 regs[BPF_REG_0].id = ++env->id_gen; 2487 regs[BPF_REG_0].id = ++env->id_gen;
2448 insn_aux = &env->insn_aux_data[insn_idx];
2449 if (!insn_aux->map_ptr)
2450 insn_aux->map_ptr = meta.map_ptr;
2451 else if (insn_aux->map_ptr != meta.map_ptr)
2452 insn_aux->map_ptr = BPF_MAP_PTR_POISON;
2453 } else { 2488 } else {
2454 verbose(env, "unknown return type %d of func %s#%d\n", 2489 verbose(env, "unknown return type %d of func %s#%d\n",
2455 fn->ret_type, func_id_name(func_id), func_id); 2490 fn->ret_type, func_id_name(func_id), func_id);
@@ -5417,6 +5452,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
5417 struct bpf_insn *insn = prog->insnsi; 5452 struct bpf_insn *insn = prog->insnsi;
5418 const struct bpf_func_proto *fn; 5453 const struct bpf_func_proto *fn;
5419 const int insn_cnt = prog->len; 5454 const int insn_cnt = prog->len;
5455 struct bpf_insn_aux_data *aux;
5420 struct bpf_insn insn_buf[16]; 5456 struct bpf_insn insn_buf[16];
5421 struct bpf_prog *new_prog; 5457 struct bpf_prog *new_prog;
5422 struct bpf_map *map_ptr; 5458 struct bpf_map *map_ptr;
@@ -5491,19 +5527,22 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
5491 insn->imm = 0; 5527 insn->imm = 0;
5492 insn->code = BPF_JMP | BPF_TAIL_CALL; 5528 insn->code = BPF_JMP | BPF_TAIL_CALL;
5493 5529
5530 aux = &env->insn_aux_data[i + delta];
5531 if (!bpf_map_ptr_unpriv(aux))
5532 continue;
5533
5494 /* instead of changing every JIT dealing with tail_call 5534 /* instead of changing every JIT dealing with tail_call
5495 * emit two extra insns: 5535 * emit two extra insns:
5496 * if (index >= max_entries) goto out; 5536 * if (index >= max_entries) goto out;
5497 * index &= array->index_mask; 5537 * index &= array->index_mask;
5498 * to avoid out-of-bounds cpu speculation 5538 * to avoid out-of-bounds cpu speculation
5499 */ 5539 */
5500 map_ptr = env->insn_aux_data[i + delta].map_ptr; 5540 if (bpf_map_ptr_poisoned(aux)) {
5501 if (map_ptr == BPF_MAP_PTR_POISON) {
5502 verbose(env, "tail_call abusing map_ptr\n"); 5541 verbose(env, "tail_call abusing map_ptr\n");
5503 return -EINVAL; 5542 return -EINVAL;
5504 } 5543 }
5505 if (!map_ptr->unpriv_array) 5544
5506 continue; 5545 map_ptr = BPF_MAP_PTR(aux->map_state);
5507 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, 5546 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
5508 map_ptr->max_entries, 2); 5547 map_ptr->max_entries, 2);
5509 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, 5548 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
@@ -5527,9 +5566,12 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
5527 */ 5566 */
5528 if (prog->jit_requested && BITS_PER_LONG == 64 && 5567 if (prog->jit_requested && BITS_PER_LONG == 64 &&
5529 insn->imm == BPF_FUNC_map_lookup_elem) { 5568 insn->imm == BPF_FUNC_map_lookup_elem) {
5530 map_ptr = env->insn_aux_data[i + delta].map_ptr; 5569 aux = &env->insn_aux_data[i + delta];
5531 if (map_ptr == BPF_MAP_PTR_POISON || 5570 if (bpf_map_ptr_poisoned(aux))
5532 !map_ptr->ops->map_gen_lookup) 5571 goto patch_call_imm;
5572
5573 map_ptr = BPF_MAP_PTR(aux->map_state);
5574 if (!map_ptr->ops->map_gen_lookup)
5533 goto patch_call_imm; 5575 goto patch_call_imm;
5534 5576
5535 cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf); 5577 cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf);
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index 983dd25d49f4..1eefe211a4a8 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -5,3 +5,5 @@ CONFIG_BPF_EVENTS=y
5CONFIG_TEST_BPF=m 5CONFIG_TEST_BPF=m
6CONFIG_CGROUP_BPF=y 6CONFIG_CGROUP_BPF=y
7CONFIG_NETDEVSIM=m 7CONFIG_NETDEVSIM=m
8CONFIG_NET_CLS_ACT=y
9CONFIG_NET_SCH_INGRESS=y
diff --git a/tools/testing/selftests/net/reuseport_bpf_numa.c b/tools/testing/selftests/net/reuseport_bpf_numa.c
index 365c32e84189..c9f478b40996 100644
--- a/tools/testing/selftests/net/reuseport_bpf_numa.c
+++ b/tools/testing/selftests/net/reuseport_bpf_numa.c
@@ -23,6 +23,8 @@
23#include <unistd.h> 23#include <unistd.h>
24#include <numa.h> 24#include <numa.h>
25 25
26#include "../kselftest.h"
27
26static const int PORT = 8888; 28static const int PORT = 8888;
27 29
28static void build_rcv_group(int *rcv_fd, size_t len, int family, int proto) 30static void build_rcv_group(int *rcv_fd, size_t len, int family, int proto)
@@ -229,7 +231,7 @@ int main(void)
229 int *rcv_fd, nodes; 231 int *rcv_fd, nodes;
230 232
231 if (numa_available() < 0) 233 if (numa_available() < 0)
232 error(1, errno, "no numa api support"); 234 ksft_exit_skip("no numa api support\n");
233 235
234 nodes = numa_max_node() + 1; 236 nodes = numa_max_node() + 1;
235 237