aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/filter.h10
-rw-r--r--kernel/bpf/arraymap.c1
-rw-r--r--kernel/bpf/lpm_trie.c1
-rw-r--r--kernel/bpf/stackmap.c1
-rw-r--r--kernel/bpf/verifier.c56
-rw-r--r--net/core/filter.c1
-rw-r--r--tools/include/linux/filter.h10
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c239
8 files changed, 285 insertions, 34 deletions
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 56197f82af45..62d948f80730 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -272,6 +272,16 @@ struct bpf_prog_aux;
272 .off = OFF, \ 272 .off = OFF, \
273 .imm = IMM }) 273 .imm = IMM })
274 274
275/* Unconditional jumps, goto pc + off16 */
276
277#define BPF_JMP_A(OFF) \
278 ((struct bpf_insn) { \
279 .code = BPF_JMP | BPF_JA, \
280 .dst_reg = 0, \
281 .src_reg = 0, \
282 .off = OFF, \
283 .imm = 0 })
284
275/* Function call */ 285/* Function call */
276 286
277#define BPF_EMIT_CALL(FUNC) \ 287#define BPF_EMIT_CALL(FUNC) \
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 5e00b2333c26..172dc8ee0e3b 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -86,6 +86,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
86 array->map.key_size = attr->key_size; 86 array->map.key_size = attr->key_size;
87 array->map.value_size = attr->value_size; 87 array->map.value_size = attr->value_size;
88 array->map.max_entries = attr->max_entries; 88 array->map.max_entries = attr->max_entries;
89 array->map.map_flags = attr->map_flags;
89 array->elem_size = elem_size; 90 array->elem_size = elem_size;
90 91
91 if (!percpu) 92 if (!percpu)
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 39cfafd895b8..b09185f0f17d 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -432,6 +432,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
432 trie->map.key_size = attr->key_size; 432 trie->map.key_size = attr->key_size;
433 trie->map.value_size = attr->value_size; 433 trie->map.value_size = attr->value_size;
434 trie->map.max_entries = attr->max_entries; 434 trie->map.max_entries = attr->max_entries;
435 trie->map.map_flags = attr->map_flags;
435 trie->data_size = attr->key_size - 436 trie->data_size = attr->key_size -
436 offsetof(struct bpf_lpm_trie_key, data); 437 offsetof(struct bpf_lpm_trie_key, data);
437 trie->max_prefixlen = trie->data_size * 8; 438 trie->max_prefixlen = trie->data_size * 8;
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 4dfd6f2ec2f9..31147d730abf 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -88,6 +88,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
88 smap->map.key_size = attr->key_size; 88 smap->map.key_size = attr->key_size;
89 smap->map.value_size = value_size; 89 smap->map.value_size = value_size;
90 smap->map.max_entries = attr->max_entries; 90 smap->map.max_entries = attr->max_entries;
91 smap->map.map_flags = attr->map_flags;
91 smap->n_buckets = n_buckets; 92 smap->n_buckets = n_buckets;
92 smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; 93 smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
93 94
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index c72cd41f5b8b..339c8a1371de 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -463,19 +463,22 @@ static const int caller_saved[CALLER_SAVED_REGS] = {
463 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 463 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
464}; 464};
465 465
466static void mark_reg_not_init(struct bpf_reg_state *regs, u32 regno)
467{
468 BUG_ON(regno >= MAX_BPF_REG);
469
470 memset(&regs[regno], 0, sizeof(regs[regno]));
471 regs[regno].type = NOT_INIT;
472 regs[regno].min_value = BPF_REGISTER_MIN_RANGE;
473 regs[regno].max_value = BPF_REGISTER_MAX_RANGE;
474}
475
466static void init_reg_state(struct bpf_reg_state *regs) 476static void init_reg_state(struct bpf_reg_state *regs)
467{ 477{
468 int i; 478 int i;
469 479
470 for (i = 0; i < MAX_BPF_REG; i++) { 480 for (i = 0; i < MAX_BPF_REG; i++)
471 regs[i].type = NOT_INIT; 481 mark_reg_not_init(regs, i);
472 regs[i].imm = 0;
473 regs[i].min_value = BPF_REGISTER_MIN_RANGE;
474 regs[i].max_value = BPF_REGISTER_MAX_RANGE;
475 regs[i].min_align = 0;
476 regs[i].aux_off = 0;
477 regs[i].aux_off_align = 0;
478 }
479 482
480 /* frame pointer */ 483 /* frame pointer */
481 regs[BPF_REG_FP].type = FRAME_PTR; 484 regs[BPF_REG_FP].type = FRAME_PTR;
@@ -843,9 +846,6 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
843{ 846{
844 bool strict = env->strict_alignment; 847 bool strict = env->strict_alignment;
845 848
846 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
847 strict = true;
848
849 switch (reg->type) { 849 switch (reg->type) {
850 case PTR_TO_PACKET: 850 case PTR_TO_PACKET:
851 return check_pkt_ptr_alignment(reg, off, size, strict); 851 return check_pkt_ptr_alignment(reg, off, size, strict);
@@ -1349,7 +1349,6 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
1349 struct bpf_verifier_state *state = &env->cur_state; 1349 struct bpf_verifier_state *state = &env->cur_state;
1350 const struct bpf_func_proto *fn = NULL; 1350 const struct bpf_func_proto *fn = NULL;
1351 struct bpf_reg_state *regs = state->regs; 1351 struct bpf_reg_state *regs = state->regs;
1352 struct bpf_reg_state *reg;
1353 struct bpf_call_arg_meta meta; 1352 struct bpf_call_arg_meta meta;
1354 bool changes_data; 1353 bool changes_data;
1355 int i, err; 1354 int i, err;
@@ -1416,11 +1415,8 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
1416 } 1415 }
1417 1416
1418 /* reset caller saved regs */ 1417 /* reset caller saved regs */
1419 for (i = 0; i < CALLER_SAVED_REGS; i++) { 1418 for (i = 0; i < CALLER_SAVED_REGS; i++)
1420 reg = regs + caller_saved[i]; 1419 mark_reg_not_init(regs, caller_saved[i]);
1421 reg->type = NOT_INIT;
1422 reg->imm = 0;
1423 }
1424 1420
1425 /* update return register */ 1421 /* update return register */
1426 if (fn->ret_type == RET_INTEGER) { 1422 if (fn->ret_type == RET_INTEGER) {
@@ -2448,7 +2444,6 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
2448{ 2444{
2449 struct bpf_reg_state *regs = env->cur_state.regs; 2445 struct bpf_reg_state *regs = env->cur_state.regs;
2450 u8 mode = BPF_MODE(insn->code); 2446 u8 mode = BPF_MODE(insn->code);
2451 struct bpf_reg_state *reg;
2452 int i, err; 2447 int i, err;
2453 2448
2454 if (!may_access_skb(env->prog->type)) { 2449 if (!may_access_skb(env->prog->type)) {
@@ -2481,11 +2476,8 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
2481 } 2476 }
2482 2477
2483 /* reset caller saved regs to unreadable */ 2478 /* reset caller saved regs to unreadable */
2484 for (i = 0; i < CALLER_SAVED_REGS; i++) { 2479 for (i = 0; i < CALLER_SAVED_REGS; i++)
2485 reg = regs + caller_saved[i]; 2480 mark_reg_not_init(regs, caller_saved[i]);
2486 reg->type = NOT_INIT;
2487 reg->imm = 0;
2488 }
2489 2481
2490 /* mark destination R0 register as readable, since it contains 2482 /* mark destination R0 register as readable, since it contains
2491 * the value fetched from the packet 2483 * the value fetched from the packet
@@ -2696,7 +2688,8 @@ err_free:
2696/* the following conditions reduce the number of explored insns 2688/* the following conditions reduce the number of explored insns
2697 * from ~140k to ~80k for ultra large programs that use a lot of ptr_to_packet 2689 * from ~140k to ~80k for ultra large programs that use a lot of ptr_to_packet
2698 */ 2690 */
2699static bool compare_ptrs_to_packet(struct bpf_reg_state *old, 2691static bool compare_ptrs_to_packet(struct bpf_verifier_env *env,
2692 struct bpf_reg_state *old,
2700 struct bpf_reg_state *cur) 2693 struct bpf_reg_state *cur)
2701{ 2694{
2702 if (old->id != cur->id) 2695 if (old->id != cur->id)
@@ -2739,7 +2732,7 @@ static bool compare_ptrs_to_packet(struct bpf_reg_state *old,
2739 * 'if (R4 > data_end)' and all further insn were already good with r=20, 2732 * 'if (R4 > data_end)' and all further insn were already good with r=20,
2740 * so they will be good with r=30 and we can prune the search. 2733 * so they will be good with r=30 and we can prune the search.
2741 */ 2734 */
2742 if (old->off <= cur->off && 2735 if (!env->strict_alignment && old->off <= cur->off &&
2743 old->off >= old->range && cur->off >= cur->range) 2736 old->off >= old->range && cur->off >= cur->range)
2744 return true; 2737 return true;
2745 2738
@@ -2810,7 +2803,7 @@ static bool states_equal(struct bpf_verifier_env *env,
2810 continue; 2803 continue;
2811 2804
2812 if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET && 2805 if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET &&
2813 compare_ptrs_to_packet(rold, rcur)) 2806 compare_ptrs_to_packet(env, rold, rcur))
2814 continue; 2807 continue;
2815 2808
2816 return false; 2809 return false;
@@ -3588,10 +3581,10 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
3588 } else { 3581 } else {
3589 log_level = 0; 3582 log_level = 0;
3590 } 3583 }
3591 if (attr->prog_flags & BPF_F_STRICT_ALIGNMENT) 3584
3585 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
3586 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
3592 env->strict_alignment = true; 3587 env->strict_alignment = true;
3593 else
3594 env->strict_alignment = false;
3595 3588
3596 ret = replace_map_fd_with_map_ptr(env); 3589 ret = replace_map_fd_with_map_ptr(env);
3597 if (ret < 0) 3590 if (ret < 0)
@@ -3697,7 +3690,10 @@ int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
3697 mutex_lock(&bpf_verifier_lock); 3690 mutex_lock(&bpf_verifier_lock);
3698 3691
3699 log_level = 0; 3692 log_level = 0;
3693
3700 env->strict_alignment = false; 3694 env->strict_alignment = false;
3695 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
3696 env->strict_alignment = true;
3701 3697
3702 env->explored_states = kcalloc(env->prog->len, 3698 env->explored_states = kcalloc(env->prog->len,
3703 sizeof(struct bpf_verifier_state_list *), 3699 sizeof(struct bpf_verifier_state_list *),
diff --git a/net/core/filter.c b/net/core/filter.c
index a253a6197e6b..a6bb95fa87b2 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2281,6 +2281,7 @@ bool bpf_helper_changes_pkt_data(void *func)
2281 func == bpf_skb_change_head || 2281 func == bpf_skb_change_head ||
2282 func == bpf_skb_change_tail || 2282 func == bpf_skb_change_tail ||
2283 func == bpf_skb_pull_data || 2283 func == bpf_skb_pull_data ||
2284 func == bpf_clone_redirect ||
2284 func == bpf_l3_csum_replace || 2285 func == bpf_l3_csum_replace ||
2285 func == bpf_l4_csum_replace || 2286 func == bpf_l4_csum_replace ||
2286 func == bpf_xdp_adjust_head) 2287 func == bpf_xdp_adjust_head)
diff --git a/tools/include/linux/filter.h b/tools/include/linux/filter.h
index 390d7c9685fd..4ce25d43e8e3 100644
--- a/tools/include/linux/filter.h
+++ b/tools/include/linux/filter.h
@@ -208,6 +208,16 @@
208 .off = OFF, \ 208 .off = OFF, \
209 .imm = IMM }) 209 .imm = IMM })
210 210
211/* Unconditional jumps, goto pc + off16 */
212
213#define BPF_JMP_A(OFF) \
214 ((struct bpf_insn) { \
215 .code = BPF_JMP | BPF_JA, \
216 .dst_reg = 0, \
217 .src_reg = 0, \
218 .off = OFF, \
219 .imm = 0 })
220
211/* Function call */ 221/* Function call */
212 222
213#define BPF_EMIT_CALL(FUNC) \ 223#define BPF_EMIT_CALL(FUNC) \
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 3773562056da..cabb19b1e371 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -49,6 +49,7 @@
49#define MAX_NR_MAPS 4 49#define MAX_NR_MAPS 4
50 50
51#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0) 51#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
52#define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
52 53
53struct bpf_test { 54struct bpf_test {
54 const char *descr; 55 const char *descr;
@@ -2615,6 +2616,30 @@ static struct bpf_test tests[] = {
2615 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2616 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2616 }, 2617 },
2617 { 2618 {
2619 "direct packet access: test17 (pruning, alignment)",
2620 .insns = {
2621 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2622 offsetof(struct __sk_buff, data)),
2623 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2624 offsetof(struct __sk_buff, data_end)),
2625 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2626 offsetof(struct __sk_buff, mark)),
2627 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2628 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
2629 BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
2630 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2631 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
2632 BPF_MOV64_IMM(BPF_REG_0, 0),
2633 BPF_EXIT_INSN(),
2634 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
2635 BPF_JMP_A(-6),
2636 },
2637 .errstr = "misaligned packet access off 2+15+-4 size 4",
2638 .result = REJECT,
2639 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2640 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2641 },
2642 {
2618 "helper access to packet: test1, valid packet_ptr range", 2643 "helper access to packet: test1, valid packet_ptr range",
2619 .insns = { 2644 .insns = {
2620 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2645 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
@@ -3341,6 +3366,70 @@ static struct bpf_test tests[] = {
3341 .prog_type = BPF_PROG_TYPE_SCHED_CLS 3366 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3342 }, 3367 },
3343 { 3368 {
3369 "alu ops on ptr_to_map_value_or_null, 1",
3370 .insns = {
3371 BPF_MOV64_IMM(BPF_REG_1, 10),
3372 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3373 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3374 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3375 BPF_LD_MAP_FD(BPF_REG_1, 0),
3376 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3377 BPF_FUNC_map_lookup_elem),
3378 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3379 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
3380 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
3381 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3382 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3383 BPF_EXIT_INSN(),
3384 },
3385 .fixup_map1 = { 4 },
3386 .errstr = "R4 invalid mem access",
3387 .result = REJECT,
3388 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3389 },
3390 {
3391 "alu ops on ptr_to_map_value_or_null, 2",
3392 .insns = {
3393 BPF_MOV64_IMM(BPF_REG_1, 10),
3394 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3395 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3396 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3397 BPF_LD_MAP_FD(BPF_REG_1, 0),
3398 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3399 BPF_FUNC_map_lookup_elem),
3400 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3401 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
3402 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3403 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3404 BPF_EXIT_INSN(),
3405 },
3406 .fixup_map1 = { 4 },
3407 .errstr = "R4 invalid mem access",
3408 .result = REJECT,
3409 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3410 },
3411 {
3412 "alu ops on ptr_to_map_value_or_null, 3",
3413 .insns = {
3414 BPF_MOV64_IMM(BPF_REG_1, 10),
3415 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3416 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3417 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3418 BPF_LD_MAP_FD(BPF_REG_1, 0),
3419 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3420 BPF_FUNC_map_lookup_elem),
3421 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3422 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
3423 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3424 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3425 BPF_EXIT_INSN(),
3426 },
3427 .fixup_map1 = { 4 },
3428 .errstr = "R4 invalid mem access",
3429 .result = REJECT,
3430 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3431 },
3432 {
3344 "invalid memory access with multiple map_lookup_elem calls", 3433 "invalid memory access with multiple map_lookup_elem calls",
3345 .insns = { 3434 .insns = {
3346 BPF_MOV64_IMM(BPF_REG_1, 10), 3435 BPF_MOV64_IMM(BPF_REG_1, 10),
@@ -4937,7 +5026,149 @@ static struct bpf_test tests[] = {
4937 .fixup_map_in_map = { 3 }, 5026 .fixup_map_in_map = { 3 },
4938 .errstr = "R1 type=map_value_or_null expected=map_ptr", 5027 .errstr = "R1 type=map_value_or_null expected=map_ptr",
4939 .result = REJECT, 5028 .result = REJECT,
4940 } 5029 },
5030 {
5031 "ld_abs: check calling conv, r1",
5032 .insns = {
5033 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5034 BPF_MOV64_IMM(BPF_REG_1, 0),
5035 BPF_LD_ABS(BPF_W, -0x200000),
5036 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5037 BPF_EXIT_INSN(),
5038 },
5039 .errstr = "R1 !read_ok",
5040 .result = REJECT,
5041 },
5042 {
5043 "ld_abs: check calling conv, r2",
5044 .insns = {
5045 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5046 BPF_MOV64_IMM(BPF_REG_2, 0),
5047 BPF_LD_ABS(BPF_W, -0x200000),
5048 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5049 BPF_EXIT_INSN(),
5050 },
5051 .errstr = "R2 !read_ok",
5052 .result = REJECT,
5053 },
5054 {
5055 "ld_abs: check calling conv, r3",
5056 .insns = {
5057 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5058 BPF_MOV64_IMM(BPF_REG_3, 0),
5059 BPF_LD_ABS(BPF_W, -0x200000),
5060 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
5061 BPF_EXIT_INSN(),
5062 },
5063 .errstr = "R3 !read_ok",
5064 .result = REJECT,
5065 },
5066 {
5067 "ld_abs: check calling conv, r4",
5068 .insns = {
5069 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5070 BPF_MOV64_IMM(BPF_REG_4, 0),
5071 BPF_LD_ABS(BPF_W, -0x200000),
5072 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
5073 BPF_EXIT_INSN(),
5074 },
5075 .errstr = "R4 !read_ok",
5076 .result = REJECT,
5077 },
5078 {
5079 "ld_abs: check calling conv, r5",
5080 .insns = {
5081 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5082 BPF_MOV64_IMM(BPF_REG_5, 0),
5083 BPF_LD_ABS(BPF_W, -0x200000),
5084 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
5085 BPF_EXIT_INSN(),
5086 },
5087 .errstr = "R5 !read_ok",
5088 .result = REJECT,
5089 },
5090 {
5091 "ld_abs: check calling conv, r7",
5092 .insns = {
5093 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5094 BPF_MOV64_IMM(BPF_REG_7, 0),
5095 BPF_LD_ABS(BPF_W, -0x200000),
5096 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
5097 BPF_EXIT_INSN(),
5098 },
5099 .result = ACCEPT,
5100 },
5101 {
5102 "ld_ind: check calling conv, r1",
5103 .insns = {
5104 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5105 BPF_MOV64_IMM(BPF_REG_1, 1),
5106 BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
5107 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5108 BPF_EXIT_INSN(),
5109 },
5110 .errstr = "R1 !read_ok",
5111 .result = REJECT,
5112 },
5113 {
5114 "ld_ind: check calling conv, r2",
5115 .insns = {
5116 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5117 BPF_MOV64_IMM(BPF_REG_2, 1),
5118 BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
5119 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5120 BPF_EXIT_INSN(),
5121 },
5122 .errstr = "R2 !read_ok",
5123 .result = REJECT,
5124 },
5125 {
5126 "ld_ind: check calling conv, r3",
5127 .insns = {
5128 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5129 BPF_MOV64_IMM(BPF_REG_3, 1),
5130 BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
5131 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
5132 BPF_EXIT_INSN(),
5133 },
5134 .errstr = "R3 !read_ok",
5135 .result = REJECT,
5136 },
5137 {
5138 "ld_ind: check calling conv, r4",
5139 .insns = {
5140 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5141 BPF_MOV64_IMM(BPF_REG_4, 1),
5142 BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
5143 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
5144 BPF_EXIT_INSN(),
5145 },
5146 .errstr = "R4 !read_ok",
5147 .result = REJECT,
5148 },
5149 {
5150 "ld_ind: check calling conv, r5",
5151 .insns = {
5152 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5153 BPF_MOV64_IMM(BPF_REG_5, 1),
5154 BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
5155 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
5156 BPF_EXIT_INSN(),
5157 },
5158 .errstr = "R5 !read_ok",
5159 .result = REJECT,
5160 },
5161 {
5162 "ld_ind: check calling conv, r7",
5163 .insns = {
5164 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5165 BPF_MOV64_IMM(BPF_REG_7, 1),
5166 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
5167 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
5168 BPF_EXIT_INSN(),
5169 },
5170 .result = ACCEPT,
5171 },
4941}; 5172};
4942 5173
4943static int probe_filter_length(const struct bpf_insn *fp) 5174static int probe_filter_length(const struct bpf_insn *fp)
@@ -5059,9 +5290,9 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
5059 5290
5060 do_test_fixup(test, prog, map_fds); 5291 do_test_fixup(test, prog, map_fds);
5061 5292
5062 fd_prog = bpf_load_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, 5293 fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
5063 prog, prog_len, "GPL", 0, bpf_vlog, 5294 prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
5064 sizeof(bpf_vlog)); 5295 "GPL", 0, bpf_vlog, sizeof(bpf_vlog));
5065 5296
5066 expected_ret = unpriv && test->result_unpriv != UNDEF ? 5297 expected_ret = unpriv && test->result_unpriv != UNDEF ?
5067 test->result_unpriv : test->result; 5298 test->result_unpriv : test->result;