aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJann Horn <jannh@google.com>2017-12-18 23:12:01 -0500
committerDaniel Borkmann <daniel@iogearbox.net>2017-12-20 20:15:41 -0500
commit2255f8d520b0a318fc6d387d0940854b2f522a7f (patch)
tree117e4805d7777db771898351ebdc71994371d9e3
parentbb7f0f989ca7de1153bd128a40a71709e339fa03 (diff)
selftests/bpf: add tests for recent bugfixes
These tests should cover the following cases: - MOV with both zero-extended and sign-extended immediates - implicit truncation of register contents via ALU32/MOV32 - implicit 32-bit truncation of ALU32 output - oversized register source operand for ALU32 shift - right-shift of a number that could be positive or negative - map access where adding the operation size to the offset causes signed 32-bit overflow - direct stack access at a ~4GiB offset Also remove the F_LOAD_WITH_STRICT_ALIGNMENT flag from a bunch of tests that should fail independent of what flags userspace passes. Signed-off-by: Jann Horn <jannh@google.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c549
1 files changed, 533 insertions, 16 deletions
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index b03ecfd7185b..961c1426fbf2 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -606,7 +606,6 @@ static struct bpf_test tests[] = {
606 }, 606 },
607 .errstr = "misaligned stack access", 607 .errstr = "misaligned stack access",
608 .result = REJECT, 608 .result = REJECT,
609 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
610 }, 609 },
611 { 610 {
612 "invalid map_fd for function call", 611 "invalid map_fd for function call",
@@ -1797,7 +1796,6 @@ static struct bpf_test tests[] = {
1797 }, 1796 },
1798 .result = REJECT, 1797 .result = REJECT,
1799 .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8", 1798 .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
1800 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1801 }, 1799 },
1802 { 1800 {
1803 "PTR_TO_STACK store/load - bad alignment on reg", 1801 "PTR_TO_STACK store/load - bad alignment on reg",
@@ -1810,7 +1808,6 @@ static struct bpf_test tests[] = {
1810 }, 1808 },
1811 .result = REJECT, 1809 .result = REJECT,
1812 .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8", 1810 .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
1813 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1814 }, 1811 },
1815 { 1812 {
1816 "PTR_TO_STACK store/load - out of bounds low", 1813 "PTR_TO_STACK store/load - out of bounds low",
@@ -6324,7 +6321,7 @@ static struct bpf_test tests[] = {
6324 BPF_EXIT_INSN(), 6321 BPF_EXIT_INSN(),
6325 }, 6322 },
6326 .fixup_map1 = { 3 }, 6323 .fixup_map1 = { 3 },
6327 .errstr = "R0 min value is negative", 6324 .errstr = "unbounded min value",
6328 .result = REJECT, 6325 .result = REJECT,
6329 }, 6326 },
6330 { 6327 {
@@ -6348,7 +6345,7 @@ static struct bpf_test tests[] = {
6348 BPF_EXIT_INSN(), 6345 BPF_EXIT_INSN(),
6349 }, 6346 },
6350 .fixup_map1 = { 3 }, 6347 .fixup_map1 = { 3 },
6351 .errstr = "R0 min value is negative", 6348 .errstr = "unbounded min value",
6352 .result = REJECT, 6349 .result = REJECT,
6353 }, 6350 },
6354 { 6351 {
@@ -6374,7 +6371,7 @@ static struct bpf_test tests[] = {
6374 BPF_EXIT_INSN(), 6371 BPF_EXIT_INSN(),
6375 }, 6372 },
6376 .fixup_map1 = { 3 }, 6373 .fixup_map1 = { 3 },
6377 .errstr = "R8 invalid mem access 'inv'", 6374 .errstr = "unbounded min value",
6378 .result = REJECT, 6375 .result = REJECT,
6379 }, 6376 },
6380 { 6377 {
@@ -6399,7 +6396,7 @@ static struct bpf_test tests[] = {
6399 BPF_EXIT_INSN(), 6396 BPF_EXIT_INSN(),
6400 }, 6397 },
6401 .fixup_map1 = { 3 }, 6398 .fixup_map1 = { 3 },
6402 .errstr = "R8 invalid mem access 'inv'", 6399 .errstr = "unbounded min value",
6403 .result = REJECT, 6400 .result = REJECT,
6404 }, 6401 },
6405 { 6402 {
@@ -6447,7 +6444,7 @@ static struct bpf_test tests[] = {
6447 BPF_EXIT_INSN(), 6444 BPF_EXIT_INSN(),
6448 }, 6445 },
6449 .fixup_map1 = { 3 }, 6446 .fixup_map1 = { 3 },
6450 .errstr = "R0 min value is negative", 6447 .errstr = "unbounded min value",
6451 .result = REJECT, 6448 .result = REJECT,
6452 }, 6449 },
6453 { 6450 {
@@ -6518,7 +6515,7 @@ static struct bpf_test tests[] = {
6518 BPF_EXIT_INSN(), 6515 BPF_EXIT_INSN(),
6519 }, 6516 },
6520 .fixup_map1 = { 3 }, 6517 .fixup_map1 = { 3 },
6521 .errstr = "R0 min value is negative", 6518 .errstr = "unbounded min value",
6522 .result = REJECT, 6519 .result = REJECT,
6523 }, 6520 },
6524 { 6521 {
@@ -6569,7 +6566,7 @@ static struct bpf_test tests[] = {
6569 BPF_EXIT_INSN(), 6566 BPF_EXIT_INSN(),
6570 }, 6567 },
6571 .fixup_map1 = { 3 }, 6568 .fixup_map1 = { 3 },
6572 .errstr = "R0 min value is negative", 6569 .errstr = "unbounded min value",
6573 .result = REJECT, 6570 .result = REJECT,
6574 }, 6571 },
6575 { 6572 {
@@ -6596,7 +6593,7 @@ static struct bpf_test tests[] = {
6596 BPF_EXIT_INSN(), 6593 BPF_EXIT_INSN(),
6597 }, 6594 },
6598 .fixup_map1 = { 3 }, 6595 .fixup_map1 = { 3 },
6599 .errstr = "R0 min value is negative", 6596 .errstr = "unbounded min value",
6600 .result = REJECT, 6597 .result = REJECT,
6601 }, 6598 },
6602 { 6599 {
@@ -6622,7 +6619,7 @@ static struct bpf_test tests[] = {
6622 BPF_EXIT_INSN(), 6619 BPF_EXIT_INSN(),
6623 }, 6620 },
6624 .fixup_map1 = { 3 }, 6621 .fixup_map1 = { 3 },
6625 .errstr = "R0 min value is negative", 6622 .errstr = "unbounded min value",
6626 .result = REJECT, 6623 .result = REJECT,
6627 }, 6624 },
6628 { 6625 {
@@ -6651,7 +6648,7 @@ static struct bpf_test tests[] = {
6651 BPF_EXIT_INSN(), 6648 BPF_EXIT_INSN(),
6652 }, 6649 },
6653 .fixup_map1 = { 3 }, 6650 .fixup_map1 = { 3 },
6654 .errstr = "R0 min value is negative", 6651 .errstr = "unbounded min value",
6655 .result = REJECT, 6652 .result = REJECT,
6656 }, 6653 },
6657 { 6654 {
@@ -6681,7 +6678,7 @@ static struct bpf_test tests[] = {
6681 BPF_JMP_IMM(BPF_JA, 0, 0, -7), 6678 BPF_JMP_IMM(BPF_JA, 0, 0, -7),
6682 }, 6679 },
6683 .fixup_map1 = { 4 }, 6680 .fixup_map1 = { 4 },
6684 .errstr = "R0 min value is negative", 6681 .errstr = "unbounded min value",
6685 .result = REJECT, 6682 .result = REJECT,
6686 }, 6683 },
6687 { 6684 {
@@ -6709,8 +6706,7 @@ static struct bpf_test tests[] = {
6709 BPF_EXIT_INSN(), 6706 BPF_EXIT_INSN(),
6710 }, 6707 },
6711 .fixup_map1 = { 3 }, 6708 .fixup_map1 = { 3 },
6712 .errstr_unpriv = "R0 pointer comparison prohibited", 6709 .errstr = "unbounded min value",
6713 .errstr = "R0 min value is negative",
6714 .result = REJECT, 6710 .result = REJECT,
6715 .result_unpriv = REJECT, 6711 .result_unpriv = REJECT,
6716 }, 6712 },
@@ -6766,6 +6762,462 @@ static struct bpf_test tests[] = {
6766 .result = REJECT, 6762 .result = REJECT,
6767 }, 6763 },
6768 { 6764 {
6765 "bounds check based on zero-extended MOV",
6766 .insns = {
6767 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6768 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6769 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6770 BPF_LD_MAP_FD(BPF_REG_1, 0),
6771 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6772 BPF_FUNC_map_lookup_elem),
6773 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6774 /* r2 = 0x0000'0000'ffff'ffff */
6775 BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
6776 /* r2 = 0 */
6777 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
6778 /* no-op */
6779 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
6780 /* access at offset 0 */
6781 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6782 /* exit */
6783 BPF_MOV64_IMM(BPF_REG_0, 0),
6784 BPF_EXIT_INSN(),
6785 },
6786 .fixup_map1 = { 3 },
6787 .result = ACCEPT
6788 },
6789 {
6790 "bounds check based on sign-extended MOV. test1",
6791 .insns = {
6792 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6793 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6794 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6795 BPF_LD_MAP_FD(BPF_REG_1, 0),
6796 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6797 BPF_FUNC_map_lookup_elem),
6798 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6799 /* r2 = 0xffff'ffff'ffff'ffff */
6800 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
6801 /* r2 = 0xffff'ffff */
6802 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
6803 /* r0 = <oob pointer> */
6804 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
6805 /* access to OOB pointer */
6806 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6807 /* exit */
6808 BPF_MOV64_IMM(BPF_REG_0, 0),
6809 BPF_EXIT_INSN(),
6810 },
6811 .fixup_map1 = { 3 },
6812 .errstr = "map_value pointer and 4294967295",
6813 .result = REJECT
6814 },
6815 {
6816 "bounds check based on sign-extended MOV. test2",
6817 .insns = {
6818 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6819 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6820 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6821 BPF_LD_MAP_FD(BPF_REG_1, 0),
6822 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6823 BPF_FUNC_map_lookup_elem),
6824 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6825 /* r2 = 0xffff'ffff'ffff'ffff */
6826 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
6827 /* r2 = 0xfff'ffff */
6828 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
6829 /* r0 = <oob pointer> */
6830 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
6831 /* access to OOB pointer */
6832 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6833 /* exit */
6834 BPF_MOV64_IMM(BPF_REG_0, 0),
6835 BPF_EXIT_INSN(),
6836 },
6837 .fixup_map1 = { 3 },
6838 .errstr = "R0 min value is outside of the array range",
6839 .result = REJECT
6840 },
6841 {
6842 "bounds check based on reg_off + var_off + insn_off. test1",
6843 .insns = {
6844 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
6845 offsetof(struct __sk_buff, mark)),
6846 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6847 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6848 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6849 BPF_LD_MAP_FD(BPF_REG_1, 0),
6850 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6851 BPF_FUNC_map_lookup_elem),
6852 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6853 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
6854 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
6855 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
6856 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
6857 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
6858 BPF_MOV64_IMM(BPF_REG_0, 0),
6859 BPF_EXIT_INSN(),
6860 },
6861 .fixup_map1 = { 4 },
6862 .errstr = "value_size=8 off=1073741825",
6863 .result = REJECT,
6864 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6865 },
6866 {
6867 "bounds check based on reg_off + var_off + insn_off. test2",
6868 .insns = {
6869 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
6870 offsetof(struct __sk_buff, mark)),
6871 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6872 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6873 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6874 BPF_LD_MAP_FD(BPF_REG_1, 0),
6875 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6876 BPF_FUNC_map_lookup_elem),
6877 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6878 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
6879 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
6880 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
6881 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
6882 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
6883 BPF_MOV64_IMM(BPF_REG_0, 0),
6884 BPF_EXIT_INSN(),
6885 },
6886 .fixup_map1 = { 4 },
6887 .errstr = "value 1073741823",
6888 .result = REJECT,
6889 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6890 },
6891 {
6892 "bounds check after truncation of non-boundary-crossing range",
6893 .insns = {
6894 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6895 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6896 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6897 BPF_LD_MAP_FD(BPF_REG_1, 0),
6898 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6899 BPF_FUNC_map_lookup_elem),
6900 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6901 /* r1 = [0x00, 0xff] */
6902 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6903 BPF_MOV64_IMM(BPF_REG_2, 1),
6904 /* r2 = 0x10'0000'0000 */
6905 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
6906 /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
6907 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
6908 /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
6909 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
6910 /* r1 = [0x00, 0xff] */
6911 BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
6912 /* r1 = 0 */
6913 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
6914 /* no-op */
6915 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6916 /* access at offset 0 */
6917 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6918 /* exit */
6919 BPF_MOV64_IMM(BPF_REG_0, 0),
6920 BPF_EXIT_INSN(),
6921 },
6922 .fixup_map1 = { 3 },
6923 .result = ACCEPT
6924 },
6925 {
6926 "bounds check after truncation of boundary-crossing range (1)",
6927 .insns = {
6928 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6929 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6930 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6931 BPF_LD_MAP_FD(BPF_REG_1, 0),
6932 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6933 BPF_FUNC_map_lookup_elem),
6934 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6935 /* r1 = [0x00, 0xff] */
6936 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6937 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
6938 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
6939 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
6940 /* r1 = [0xffff'ff80, 0xffff'ffff] or
6941 * [0x0000'0000, 0x0000'007f]
6942 */
6943 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
6944 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
6945 /* r1 = [0x00, 0xff] or
6946 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
6947 */
6948 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
6949 /* r1 = 0 or
6950 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
6951 */
6952 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
6953 /* no-op or OOB pointer computation */
6954 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6955 /* potentially OOB access */
6956 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6957 /* exit */
6958 BPF_MOV64_IMM(BPF_REG_0, 0),
6959 BPF_EXIT_INSN(),
6960 },
6961 .fixup_map1 = { 3 },
6962 /* not actually fully unbounded, but the bound is very high */
6963 .errstr = "R0 unbounded memory access",
6964 .result = REJECT
6965 },
6966 {
6967 "bounds check after truncation of boundary-crossing range (2)",
6968 .insns = {
6969 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6970 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6971 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6972 BPF_LD_MAP_FD(BPF_REG_1, 0),
6973 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6974 BPF_FUNC_map_lookup_elem),
6975 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6976 /* r1 = [0x00, 0xff] */
6977 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6978 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
6979 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
6980 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
6981 /* r1 = [0xffff'ff80, 0xffff'ffff] or
6982 * [0x0000'0000, 0x0000'007f]
6983 * difference to previous test: truncation via MOV32
6984 * instead of ALU32.
6985 */
6986 BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
6987 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
6988 /* r1 = [0x00, 0xff] or
6989 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
6990 */
6991 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
6992 /* r1 = 0 or
6993 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
6994 */
6995 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
6996 /* no-op or OOB pointer computation */
6997 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6998 /* potentially OOB access */
6999 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7000 /* exit */
7001 BPF_MOV64_IMM(BPF_REG_0, 0),
7002 BPF_EXIT_INSN(),
7003 },
7004 .fixup_map1 = { 3 },
7005 /* not actually fully unbounded, but the bound is very high */
7006 .errstr = "R0 unbounded memory access",
7007 .result = REJECT
7008 },
7009 {
7010 "bounds check after wrapping 32-bit addition",
7011 .insns = {
7012 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7013 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7014 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7015 BPF_LD_MAP_FD(BPF_REG_1, 0),
7016 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7017 BPF_FUNC_map_lookup_elem),
7018 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7019 /* r1 = 0x7fff'ffff */
7020 BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
7021 /* r1 = 0xffff'fffe */
7022 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7023 /* r1 = 0 */
7024 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
7025 /* no-op */
7026 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7027 /* access at offset 0 */
7028 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7029 /* exit */
7030 BPF_MOV64_IMM(BPF_REG_0, 0),
7031 BPF_EXIT_INSN(),
7032 },
7033 .fixup_map1 = { 3 },
7034 .result = ACCEPT
7035 },
7036 {
7037 "bounds check after shift with oversized count operand",
7038 .insns = {
7039 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7040 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7041 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7042 BPF_LD_MAP_FD(BPF_REG_1, 0),
7043 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7044 BPF_FUNC_map_lookup_elem),
7045 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7046 BPF_MOV64_IMM(BPF_REG_2, 32),
7047 BPF_MOV64_IMM(BPF_REG_1, 1),
7048 /* r1 = (u32)1 << (u32)32 = ? */
7049 BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
7050 /* r1 = [0x0000, 0xffff] */
7051 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
7052 /* computes unknown pointer, potentially OOB */
7053 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7054 /* potentially OOB access */
7055 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7056 /* exit */
7057 BPF_MOV64_IMM(BPF_REG_0, 0),
7058 BPF_EXIT_INSN(),
7059 },
7060 .fixup_map1 = { 3 },
7061 .errstr = "R0 max value is outside of the array range",
7062 .result = REJECT
7063 },
7064 {
7065 "bounds check after right shift of maybe-negative number",
7066 .insns = {
7067 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7068 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7069 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7070 BPF_LD_MAP_FD(BPF_REG_1, 0),
7071 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7072 BPF_FUNC_map_lookup_elem),
7073 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7074 /* r1 = [0x00, 0xff] */
7075 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7076 /* r1 = [-0x01, 0xfe] */
7077 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
7078 /* r1 = 0 or 0xff'ffff'ffff'ffff */
7079 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7080 /* r1 = 0 or 0xffff'ffff'ffff */
7081 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7082 /* computes unknown pointer, potentially OOB */
7083 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7084 /* potentially OOB access */
7085 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7086 /* exit */
7087 BPF_MOV64_IMM(BPF_REG_0, 0),
7088 BPF_EXIT_INSN(),
7089 },
7090 .fixup_map1 = { 3 },
7091 .errstr = "R0 unbounded memory access",
7092 .result = REJECT
7093 },
7094 {
7095 "bounds check map access with off+size signed 32bit overflow. test1",
7096 .insns = {
7097 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7098 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7099 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7100 BPF_LD_MAP_FD(BPF_REG_1, 0),
7101 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7102 BPF_FUNC_map_lookup_elem),
7103 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7104 BPF_EXIT_INSN(),
7105 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
7106 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7107 BPF_JMP_A(0),
7108 BPF_EXIT_INSN(),
7109 },
7110 .fixup_map1 = { 3 },
7111 .errstr = "map_value pointer and 2147483646",
7112 .result = REJECT
7113 },
7114 {
7115 "bounds check map access with off+size signed 32bit overflow. test2",
7116 .insns = {
7117 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7118 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7119 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7120 BPF_LD_MAP_FD(BPF_REG_1, 0),
7121 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7122 BPF_FUNC_map_lookup_elem),
7123 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7124 BPF_EXIT_INSN(),
7125 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7126 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7127 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7128 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7129 BPF_JMP_A(0),
7130 BPF_EXIT_INSN(),
7131 },
7132 .fixup_map1 = { 3 },
7133 .errstr = "pointer offset 1073741822",
7134 .result = REJECT
7135 },
7136 {
7137 "bounds check map access with off+size signed 32bit overflow. test3",
7138 .insns = {
7139 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7140 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7141 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7142 BPF_LD_MAP_FD(BPF_REG_1, 0),
7143 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7144 BPF_FUNC_map_lookup_elem),
7145 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7146 BPF_EXIT_INSN(),
7147 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
7148 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
7149 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
7150 BPF_JMP_A(0),
7151 BPF_EXIT_INSN(),
7152 },
7153 .fixup_map1 = { 3 },
7154 .errstr = "pointer offset -1073741822",
7155 .result = REJECT
7156 },
7157 {
7158 "bounds check map access with off+size signed 32bit overflow. test4",
7159 .insns = {
7160 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7161 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7162 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7163 BPF_LD_MAP_FD(BPF_REG_1, 0),
7164 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7165 BPF_FUNC_map_lookup_elem),
7166 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7167 BPF_EXIT_INSN(),
7168 BPF_MOV64_IMM(BPF_REG_1, 1000000),
7169 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
7170 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7171 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
7172 BPF_JMP_A(0),
7173 BPF_EXIT_INSN(),
7174 },
7175 .fixup_map1 = { 3 },
7176 .errstr = "map_value pointer and 1000000000000",
7177 .result = REJECT
7178 },
7179 {
7180 "pointer/scalar confusion in state equality check (way 1)",
7181 .insns = {
7182 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7183 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7184 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7185 BPF_LD_MAP_FD(BPF_REG_1, 0),
7186 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7187 BPF_FUNC_map_lookup_elem),
7188 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7189 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7190 BPF_JMP_A(1),
7191 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
7192 BPF_JMP_A(0),
7193 BPF_EXIT_INSN(),
7194 },
7195 .fixup_map1 = { 3 },
7196 .result = ACCEPT,
7197 .result_unpriv = REJECT,
7198 .errstr_unpriv = "R0 leaks addr as return value"
7199 },
7200 {
7201 "pointer/scalar confusion in state equality check (way 2)",
7202 .insns = {
7203 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7204 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7205 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7206 BPF_LD_MAP_FD(BPF_REG_1, 0),
7207 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7208 BPF_FUNC_map_lookup_elem),
7209 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
7210 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
7211 BPF_JMP_A(1),
7212 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7213 BPF_EXIT_INSN(),
7214 },
7215 .fixup_map1 = { 3 },
7216 .result = ACCEPT,
7217 .result_unpriv = REJECT,
7218 .errstr_unpriv = "R0 leaks addr as return value"
7219 },
7220 {
6769 "variable-offset ctx access", 7221 "variable-offset ctx access",
6770 .insns = { 7222 .insns = {
6771 /* Get an unknown value */ 7223 /* Get an unknown value */
@@ -6807,6 +7259,71 @@ static struct bpf_test tests[] = {
6807 .prog_type = BPF_PROG_TYPE_LWT_IN, 7259 .prog_type = BPF_PROG_TYPE_LWT_IN,
6808 }, 7260 },
6809 { 7261 {
7262 "indirect variable-offset stack access",
7263 .insns = {
7264 /* Fill the top 8 bytes of the stack */
7265 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7266 /* Get an unknown value */
7267 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7268 /* Make it small and 4-byte aligned */
7269 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
7270 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
7271 /* add it to fp. We now have either fp-4 or fp-8, but
7272 * we don't know which
7273 */
7274 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
7275 /* dereference it indirectly */
7276 BPF_LD_MAP_FD(BPF_REG_1, 0),
7277 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7278 BPF_FUNC_map_lookup_elem),
7279 BPF_MOV64_IMM(BPF_REG_0, 0),
7280 BPF_EXIT_INSN(),
7281 },
7282 .fixup_map1 = { 5 },
7283 .errstr = "variable stack read R2",
7284 .result = REJECT,
7285 .prog_type = BPF_PROG_TYPE_LWT_IN,
7286 },
7287 {
7288 "direct stack access with 32-bit wraparound. test1",
7289 .insns = {
7290 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7291 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7292 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7293 BPF_MOV32_IMM(BPF_REG_0, 0),
7294 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7295 BPF_EXIT_INSN()
7296 },
7297 .errstr = "fp pointer and 2147483647",
7298 .result = REJECT
7299 },
7300 {
7301 "direct stack access with 32-bit wraparound. test2",
7302 .insns = {
7303 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7304 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
7305 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
7306 BPF_MOV32_IMM(BPF_REG_0, 0),
7307 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7308 BPF_EXIT_INSN()
7309 },
7310 .errstr = "fp pointer and 1073741823",
7311 .result = REJECT
7312 },
7313 {
7314 "direct stack access with 32-bit wraparound. test3",
7315 .insns = {
7316 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7317 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
7318 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
7319 BPF_MOV32_IMM(BPF_REG_0, 0),
7320 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7321 BPF_EXIT_INSN()
7322 },
7323 .errstr = "fp pointer offset 1073741822",
7324 .result = REJECT
7325 },
7326 {
6810 "liveness pruning and write screening", 7327 "liveness pruning and write screening",
6811 .insns = { 7328 .insns = {
6812 /* Get an unknown value */ 7329 /* Get an unknown value */