aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2017-12-15 12:19:36 -0500
committerAlexei Starovoitov <ast@kernel.org>2017-12-15 12:19:37 -0500
commitc1b08ebe5003ae291470cb6e26923628ab19606f (patch)
treed6536ec187e003bb6a7c87cc8acdef20e16bf6c2
parent2d17d8d79e77ff3f1b35b87522fc72fa562260ff (diff)
parent87ab8194303e73af2898e9e1c8b3b9bcfe91e7a9 (diff)
Merge branch 'bpf-jit-fixes'
Daniel Borkmann says: ==================== Two fixes that deal with buggy usage of bpf_helper_changes_pkt_data() in the sense that they also reload cached skb data when there's no skb context but xdp one, for example. A fix where skb meta data is reloaded out of the wrong register on helper call, rest is test cases and making sure on verifier side that there's always the guarantee that ctx sits in r1. Thanks! ==================== Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c6
-rw-r--r--arch/s390/net/bpf_jit_comp.c11
-rw-r--r--arch/sparc/net/bpf_jit_comp_64.c6
-rw-r--r--kernel/bpf/verifier.c6
-rw-r--r--lib/test_bpf.c43
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c24
6 files changed, 86 insertions, 10 deletions
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 46d74e81aff1..d183b4801bdb 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -763,7 +763,8 @@ emit_clear:
763 func = (u8 *) __bpf_call_base + imm; 763 func = (u8 *) __bpf_call_base + imm;
764 764
765 /* Save skb pointer if we need to re-cache skb data */ 765 /* Save skb pointer if we need to re-cache skb data */
766 if (bpf_helper_changes_pkt_data(func)) 766 if ((ctx->seen & SEEN_SKB) &&
767 bpf_helper_changes_pkt_data(func))
767 PPC_BPF_STL(3, 1, bpf_jit_stack_local(ctx)); 768 PPC_BPF_STL(3, 1, bpf_jit_stack_local(ctx));
768 769
769 bpf_jit_emit_func_call(image, ctx, (u64)func); 770 bpf_jit_emit_func_call(image, ctx, (u64)func);
@@ -772,7 +773,8 @@ emit_clear:
772 PPC_MR(b2p[BPF_REG_0], 3); 773 PPC_MR(b2p[BPF_REG_0], 3);
773 774
774 /* refresh skb cache */ 775 /* refresh skb cache */
775 if (bpf_helper_changes_pkt_data(func)) { 776 if ((ctx->seen & SEEN_SKB) &&
777 bpf_helper_changes_pkt_data(func)) {
776 /* reload skb pointer to r3 */ 778 /* reload skb pointer to r3 */
777 PPC_BPF_LL(3, 1, bpf_jit_stack_local(ctx)); 779 PPC_BPF_LL(3, 1, bpf_jit_stack_local(ctx));
778 bpf_jit_emit_skb_loads(image, ctx); 780 bpf_jit_emit_skb_loads(image, ctx);
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index e81c16838b90..9557d8b516df 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -55,8 +55,7 @@ struct bpf_jit {
55#define SEEN_LITERAL 8 /* code uses literals */ 55#define SEEN_LITERAL 8 /* code uses literals */
56#define SEEN_FUNC 16 /* calls C functions */ 56#define SEEN_FUNC 16 /* calls C functions */
57#define SEEN_TAIL_CALL 32 /* code uses tail calls */ 57#define SEEN_TAIL_CALL 32 /* code uses tail calls */
58#define SEEN_SKB_CHANGE 64 /* code changes skb data */ 58#define SEEN_REG_AX 64 /* code uses constant blinding */
59#define SEEN_REG_AX 128 /* code uses constant blinding */
60#define SEEN_STACK (SEEN_FUNC | SEEN_MEM | SEEN_SKB) 59#define SEEN_STACK (SEEN_FUNC | SEEN_MEM | SEEN_SKB)
61 60
62/* 61/*
@@ -448,12 +447,12 @@ static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth)
448 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, 447 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
449 REG_15, 152); 448 REG_15, 152);
450 } 449 }
451 if (jit->seen & SEEN_SKB) 450 if (jit->seen & SEEN_SKB) {
452 emit_load_skb_data_hlen(jit); 451 emit_load_skb_data_hlen(jit);
453 if (jit->seen & SEEN_SKB_CHANGE)
454 /* stg %b1,ST_OFF_SKBP(%r0,%r15) */ 452 /* stg %b1,ST_OFF_SKBP(%r0,%r15) */
455 EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15, 453 EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15,
456 STK_OFF_SKBP); 454 STK_OFF_SKBP);
455 }
457} 456}
458 457
459/* 458/*
@@ -983,8 +982,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
983 EMIT2(0x0d00, REG_14, REG_W1); 982 EMIT2(0x0d00, REG_14, REG_W1);
984 /* lgr %b0,%r2: load return value into %b0 */ 983 /* lgr %b0,%r2: load return value into %b0 */
985 EMIT4(0xb9040000, BPF_REG_0, REG_2); 984 EMIT4(0xb9040000, BPF_REG_0, REG_2);
986 if (bpf_helper_changes_pkt_data((void *)func)) { 985 if ((jit->seen & SEEN_SKB) &&
987 jit->seen |= SEEN_SKB_CHANGE; 986 bpf_helper_changes_pkt_data((void *)func)) {
988 /* lg %b1,ST_OFF_SKBP(%r15) */ 987 /* lg %b1,ST_OFF_SKBP(%r15) */
989 EMIT6_DISP_LH(0xe3000000, 0x0004, BPF_REG_1, REG_0, 988 EMIT6_DISP_LH(0xe3000000, 0x0004, BPF_REG_1, REG_0,
990 REG_15, STK_OFF_SKBP); 989 REG_15, STK_OFF_SKBP);
diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c
index 5765e7e711f7..ff5f9cb3039a 100644
--- a/arch/sparc/net/bpf_jit_comp_64.c
+++ b/arch/sparc/net/bpf_jit_comp_64.c
@@ -1245,14 +1245,16 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
1245 u8 *func = ((u8 *)__bpf_call_base) + imm; 1245 u8 *func = ((u8 *)__bpf_call_base) + imm;
1246 1246
1247 ctx->saw_call = true; 1247 ctx->saw_call = true;
1248 if (ctx->saw_ld_abs_ind && bpf_helper_changes_pkt_data(func))
1249 emit_reg_move(bpf2sparc[BPF_REG_1], L7, ctx);
1248 1250
1249 emit_call((u32 *)func, ctx); 1251 emit_call((u32 *)func, ctx);
1250 emit_nop(ctx); 1252 emit_nop(ctx);
1251 1253
1252 emit_reg_move(O0, bpf2sparc[BPF_REG_0], ctx); 1254 emit_reg_move(O0, bpf2sparc[BPF_REG_0], ctx);
1253 1255
1254 if (bpf_helper_changes_pkt_data(func) && ctx->saw_ld_abs_ind) 1256 if (ctx->saw_ld_abs_ind && bpf_helper_changes_pkt_data(func))
1255 load_skb_regs(ctx, bpf2sparc[BPF_REG_6]); 1257 load_skb_regs(ctx, L7);
1256 break; 1258 break;
1257 } 1259 }
1258 1260
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index d4593571c404..e39b01317b6f 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1674,7 +1674,13 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
1674 return -EINVAL; 1674 return -EINVAL;
1675 } 1675 }
1676 1676
1677 /* With LD_ABS/IND some JITs save/restore skb from r1. */
1677 changes_data = bpf_helper_changes_pkt_data(fn->func); 1678 changes_data = bpf_helper_changes_pkt_data(fn->func);
1679 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
1680 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
1681 func_id_name(func_id), func_id);
1682 return -EINVAL;
1683 }
1678 1684
1679 memset(&meta, 0, sizeof(meta)); 1685 memset(&meta, 0, sizeof(meta));
1680 meta.pkt_access = fn->pkt_access; 1686 meta.pkt_access = fn->pkt_access;
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index aa8812ae6776..9e9748089270 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -435,6 +435,41 @@ loop:
435 return 0; 435 return 0;
436} 436}
437 437
438static int bpf_fill_ld_abs_vlan_push_pop2(struct bpf_test *self)
439{
440 struct bpf_insn *insn;
441
442 insn = kmalloc_array(16, sizeof(*insn), GFP_KERNEL);
443 if (!insn)
444 return -ENOMEM;
445
446 /* Due to func address being non-const, we need to
447 * assemble this here.
448 */
449 insn[0] = BPF_MOV64_REG(R6, R1);
450 insn[1] = BPF_LD_ABS(BPF_B, 0);
451 insn[2] = BPF_LD_ABS(BPF_H, 0);
452 insn[3] = BPF_LD_ABS(BPF_W, 0);
453 insn[4] = BPF_MOV64_REG(R7, R6);
454 insn[5] = BPF_MOV64_IMM(R6, 0);
455 insn[6] = BPF_MOV64_REG(R1, R7);
456 insn[7] = BPF_MOV64_IMM(R2, 1);
457 insn[8] = BPF_MOV64_IMM(R3, 2);
458 insn[9] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
459 bpf_skb_vlan_push_proto.func - __bpf_call_base);
460 insn[10] = BPF_MOV64_REG(R6, R7);
461 insn[11] = BPF_LD_ABS(BPF_B, 0);
462 insn[12] = BPF_LD_ABS(BPF_H, 0);
463 insn[13] = BPF_LD_ABS(BPF_W, 0);
464 insn[14] = BPF_MOV64_IMM(R0, 42);
465 insn[15] = BPF_EXIT_INSN();
466
467 self->u.ptr.insns = insn;
468 self->u.ptr.len = 16;
469
470 return 0;
471}
472
438static int bpf_fill_jump_around_ld_abs(struct bpf_test *self) 473static int bpf_fill_jump_around_ld_abs(struct bpf_test *self)
439{ 474{
440 unsigned int len = BPF_MAXINSNS; 475 unsigned int len = BPF_MAXINSNS;
@@ -6066,6 +6101,14 @@ static struct bpf_test tests[] = {
6066 {}, 6101 {},
6067 { {0x1, 0x42 } }, 6102 { {0x1, 0x42 } },
6068 }, 6103 },
6104 {
6105 "LD_ABS with helper changing skb data",
6106 { },
6107 INTERNAL,
6108 { 0x34 },
6109 { { ETH_HLEN, 42 } },
6110 .fill_helper = bpf_fill_ld_abs_vlan_push_pop2,
6111 },
6069}; 6112};
6070 6113
6071static struct net_device dev; 6114static struct net_device dev;
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 3c64f30cf63c..b03ecfd7185b 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -6117,6 +6117,30 @@ static struct bpf_test tests[] = {
6117 .result = ACCEPT, 6117 .result = ACCEPT,
6118 }, 6118 },
6119 { 6119 {
6120 "ld_abs: tests on r6 and skb data reload helper",
6121 .insns = {
6122 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6123 BPF_LD_ABS(BPF_B, 0),
6124 BPF_LD_ABS(BPF_H, 0),
6125 BPF_LD_ABS(BPF_W, 0),
6126 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
6127 BPF_MOV64_IMM(BPF_REG_6, 0),
6128 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
6129 BPF_MOV64_IMM(BPF_REG_2, 1),
6130 BPF_MOV64_IMM(BPF_REG_3, 2),
6131 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6132 BPF_FUNC_skb_vlan_push),
6133 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
6134 BPF_LD_ABS(BPF_B, 0),
6135 BPF_LD_ABS(BPF_H, 0),
6136 BPF_LD_ABS(BPF_W, 0),
6137 BPF_MOV64_IMM(BPF_REG_0, 42),
6138 BPF_EXIT_INSN(),
6139 },
6140 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6141 .result = ACCEPT,
6142 },
6143 {
6120 "ld_ind: check calling conv, r1", 6144 "ld_ind: check calling conv, r1",
6121 .insns = { 6145 .insns = {
6122 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 6146 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),