diff options
author | Alexei Starovoitov <ast@kernel.org> | 2018-07-19 19:08:07 -0400 |
---|---|---|
committer | Alexei Starovoitov <ast@kernel.org> | 2018-07-19 19:08:07 -0400 |
commit | bb3928673955b423db270982ff7dad19a3e22d9c (patch) | |
tree | edddd22c72b270f167cd13371e29b95c97f661a3 | |
parent | f39f28ff82c14b4f628973d9bba835195a71d437 (diff) | |
parent | fa47a16b04ad896ea9c10d4fa0caf47dcf47cd00 (diff) |
Merge branch 'ppc-fix'
Daniel Borkmann says:
====================
This set adds a ppc64 JIT fix for xadd as well as a missing test
case for verifying whether xadd messes with src/dst reg. Thanks!
====================
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-rw-r--r-- | arch/powerpc/net/bpf_jit_comp64.c | 29 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/test_verifier.c | 40 |
2 files changed, 45 insertions, 24 deletions
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 380cbf9a40d9..c0a9bcd28356 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c | |||
@@ -286,6 +286,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, | |||
286 | u64 imm64; | 286 | u64 imm64; |
287 | u8 *func; | 287 | u8 *func; |
288 | u32 true_cond; | 288 | u32 true_cond; |
289 | u32 tmp_idx; | ||
289 | 290 | ||
290 | /* | 291 | /* |
291 | * addrs[] maps a BPF bytecode address into a real offset from | 292 | * addrs[] maps a BPF bytecode address into a real offset from |
@@ -637,11 +638,7 @@ emit_clear: | |||
637 | case BPF_STX | BPF_XADD | BPF_W: | 638 | case BPF_STX | BPF_XADD | BPF_W: |
638 | /* Get EA into TMP_REG_1 */ | 639 | /* Get EA into TMP_REG_1 */ |
639 | PPC_ADDI(b2p[TMP_REG_1], dst_reg, off); | 640 | PPC_ADDI(b2p[TMP_REG_1], dst_reg, off); |
640 | /* error if EA is not word-aligned */ | 641 | tmp_idx = ctx->idx * 4; |
641 | PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03); | ||
642 | PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12); | ||
643 | PPC_LI(b2p[BPF_REG_0], 0); | ||
644 | PPC_JMP(exit_addr); | ||
645 | /* load value from memory into TMP_REG_2 */ | 642 | /* load value from memory into TMP_REG_2 */ |
646 | PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); | 643 | PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); |
647 | /* add value from src_reg into this */ | 644 | /* add value from src_reg into this */ |
@@ -649,32 +646,16 @@ emit_clear: | |||
649 | /* store result back */ | 646 | /* store result back */ |
650 | PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); | 647 | PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); |
651 | /* we're done if this succeeded */ | 648 | /* we're done if this succeeded */ |
652 | PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4)); | 649 | PPC_BCC_SHORT(COND_NE, tmp_idx); |
653 | /* otherwise, let's try once more */ | ||
654 | PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); | ||
655 | PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg); | ||
656 | PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); | ||
657 | /* exit if the store was not successful */ | ||
658 | PPC_LI(b2p[BPF_REG_0], 0); | ||
659 | PPC_BCC(COND_NE, exit_addr); | ||
660 | break; | 650 | break; |
661 | /* *(u64 *)(dst + off) += src */ | 651 | /* *(u64 *)(dst + off) += src */ |
662 | case BPF_STX | BPF_XADD | BPF_DW: | 652 | case BPF_STX | BPF_XADD | BPF_DW: |
663 | PPC_ADDI(b2p[TMP_REG_1], dst_reg, off); | 653 | PPC_ADDI(b2p[TMP_REG_1], dst_reg, off); |
664 | /* error if EA is not doubleword-aligned */ | 654 | tmp_idx = ctx->idx * 4; |
665 | PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07); | ||
666 | PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4)); | ||
667 | PPC_LI(b2p[BPF_REG_0], 0); | ||
668 | PPC_JMP(exit_addr); | ||
669 | PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); | ||
670 | PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg); | ||
671 | PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); | ||
672 | PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4)); | ||
673 | PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); | 655 | PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); |
674 | PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg); | 656 | PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg); |
675 | PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); | 657 | PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); |
676 | PPC_LI(b2p[BPF_REG_0], 0); | 658 | PPC_BCC_SHORT(COND_NE, tmp_idx); |
677 | PPC_BCC(COND_NE, exit_addr); | ||
678 | break; | 659 | break; |
679 | 660 | ||
680 | /* | 661 | /* |
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index f5f7bcc96046..41106d9d5cc7 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c | |||
@@ -12005,6 +12005,46 @@ static struct bpf_test tests[] = { | |||
12005 | .prog_type = BPF_PROG_TYPE_XDP, | 12005 | .prog_type = BPF_PROG_TYPE_XDP, |
12006 | }, | 12006 | }, |
12007 | { | 12007 | { |
12008 | "xadd/w check whether src/dst got mangled, 1", | ||
12009 | .insns = { | ||
12010 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
12011 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), | ||
12012 | BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), | ||
12013 | BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), | ||
12014 | BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8), | ||
12015 | BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8), | ||
12016 | BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3), | ||
12017 | BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2), | ||
12018 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), | ||
12019 | BPF_EXIT_INSN(), | ||
12020 | BPF_MOV64_IMM(BPF_REG_0, 42), | ||
12021 | BPF_EXIT_INSN(), | ||
12022 | }, | ||
12023 | .result = ACCEPT, | ||
12024 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
12025 | .retval = 3, | ||
12026 | }, | ||
12027 | { | ||
12028 | "xadd/w check whether src/dst got mangled, 2", | ||
12029 | .insns = { | ||
12030 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
12031 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), | ||
12032 | BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), | ||
12033 | BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8), | ||
12034 | BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8), | ||
12035 | BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8), | ||
12036 | BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3), | ||
12037 | BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2), | ||
12038 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8), | ||
12039 | BPF_EXIT_INSN(), | ||
12040 | BPF_MOV64_IMM(BPF_REG_0, 42), | ||
12041 | BPF_EXIT_INSN(), | ||
12042 | }, | ||
12043 | .result = ACCEPT, | ||
12044 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
12045 | .retval = 3, | ||
12046 | }, | ||
12047 | { | ||
12008 | "bpf_get_stack return R0 within range", | 12048 | "bpf_get_stack return R0 within range", |
12009 | .insns = { | 12049 | .insns = { |
12010 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | 12050 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), |