diff options
author | Alexei Starovoitov <ast@kernel.org> | 2019-01-06 00:32:39 -0500 |
---|---|---|
committer | Alexei Starovoitov <ast@kernel.org> | 2019-01-06 00:32:39 -0500 |
commit | 97274b6126193cca2b820579f5d758589a2badc2 (patch) | |
tree | 576851fb11b2246f784da4ef3716fd441037ceaf | |
parent | 466f89e9ec8c6868131c2d2ba9cd5f536879c42a (diff) | |
parent | 1cbbcfbbd56efd994d643428c69467fe3c8ab672 (diff) |
Merge branch 'reject-ptr-scalar-mix'
Daniel Borkmann says:
====================
Follow-up fix to 979d63d50c0c ("bpf: prevent out of bounds speculation
on pointer arithmetic") in order to reject a corner case for sanitation
when ptr / scalars are mixed in the same alu op.
====================
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-rw-r--r-- | include/linux/bpf_verifier.h | 1 | ||||
-rw-r--r-- | kernel/bpf/verifier.c | 61 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/test_verifier.c | 120 |
3 files changed, 169 insertions, 13 deletions
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 27b74947cd2b..573cca00a0e6 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h | |||
@@ -172,6 +172,7 @@ struct bpf_verifier_state_list { | |||
172 | #define BPF_ALU_SANITIZE_SRC 1U | 172 | #define BPF_ALU_SANITIZE_SRC 1U |
173 | #define BPF_ALU_SANITIZE_DST 2U | 173 | #define BPF_ALU_SANITIZE_DST 2U |
174 | #define BPF_ALU_NEG_VALUE (1U << 2) | 174 | #define BPF_ALU_NEG_VALUE (1U << 2) |
175 | #define BPF_ALU_NON_POINTER (1U << 3) | ||
175 | #define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ | 176 | #define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ |
176 | BPF_ALU_SANITIZE_DST) | 177 | BPF_ALU_SANITIZE_DST) |
177 | 178 | ||
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index f6bc62a9ee8e..56674a7c3778 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
@@ -3103,6 +3103,40 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, | |||
3103 | } | 3103 | } |
3104 | } | 3104 | } |
3105 | 3105 | ||
3106 | static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env, | ||
3107 | const struct bpf_insn *insn) | ||
3108 | { | ||
3109 | return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K; | ||
3110 | } | ||
3111 | |||
3112 | static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux, | ||
3113 | u32 alu_state, u32 alu_limit) | ||
3114 | { | ||
3115 | /* If we arrived here from different branches with different | ||
3116 | * state or limits to sanitize, then this won't work. | ||
3117 | */ | ||
3118 | if (aux->alu_state && | ||
3119 | (aux->alu_state != alu_state || | ||
3120 | aux->alu_limit != alu_limit)) | ||
3121 | return -EACCES; | ||
3122 | |||
3123 | /* Corresponding fixup done in fixup_bpf_calls(). */ | ||
3124 | aux->alu_state = alu_state; | ||
3125 | aux->alu_limit = alu_limit; | ||
3126 | return 0; | ||
3127 | } | ||
3128 | |||
3129 | static int sanitize_val_alu(struct bpf_verifier_env *env, | ||
3130 | struct bpf_insn *insn) | ||
3131 | { | ||
3132 | struct bpf_insn_aux_data *aux = cur_aux(env); | ||
3133 | |||
3134 | if (can_skip_alu_sanitation(env, insn)) | ||
3135 | return 0; | ||
3136 | |||
3137 | return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0); | ||
3138 | } | ||
3139 | |||
3106 | static int sanitize_ptr_alu(struct bpf_verifier_env *env, | 3140 | static int sanitize_ptr_alu(struct bpf_verifier_env *env, |
3107 | struct bpf_insn *insn, | 3141 | struct bpf_insn *insn, |
3108 | const struct bpf_reg_state *ptr_reg, | 3142 | const struct bpf_reg_state *ptr_reg, |
@@ -3117,7 +3151,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env, | |||
3117 | struct bpf_reg_state tmp; | 3151 | struct bpf_reg_state tmp; |
3118 | bool ret; | 3152 | bool ret; |
3119 | 3153 | ||
3120 | if (env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K) | 3154 | if (can_skip_alu_sanitation(env, insn)) |
3121 | return 0; | 3155 | return 0; |
3122 | 3156 | ||
3123 | /* We already marked aux for masking from non-speculative | 3157 | /* We already marked aux for masking from non-speculative |
@@ -3133,19 +3167,8 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env, | |||
3133 | 3167 | ||
3134 | if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg)) | 3168 | if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg)) |
3135 | return 0; | 3169 | return 0; |
3136 | 3170 | if (update_alu_sanitation_state(aux, alu_state, alu_limit)) | |
3137 | /* If we arrived here from different branches with different | ||
3138 | * limits to sanitize, then this won't work. | ||
3139 | */ | ||
3140 | if (aux->alu_state && | ||
3141 | (aux->alu_state != alu_state || | ||
3142 | aux->alu_limit != alu_limit)) | ||
3143 | return -EACCES; | 3171 | return -EACCES; |
3144 | |||
3145 | /* Corresponding fixup done in fixup_bpf_calls(). */ | ||
3146 | aux->alu_state = alu_state; | ||
3147 | aux->alu_limit = alu_limit; | ||
3148 | |||
3149 | do_sim: | 3172 | do_sim: |
3150 | /* Simulate and find potential out-of-bounds access under | 3173 | /* Simulate and find potential out-of-bounds access under |
3151 | * speculative execution from truncation as a result of | 3174 | * speculative execution from truncation as a result of |
@@ -3418,6 +3441,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, | |||
3418 | s64 smin_val, smax_val; | 3441 | s64 smin_val, smax_val; |
3419 | u64 umin_val, umax_val; | 3442 | u64 umin_val, umax_val; |
3420 | u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; | 3443 | u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; |
3444 | u32 dst = insn->dst_reg; | ||
3445 | int ret; | ||
3421 | 3446 | ||
3422 | if (insn_bitness == 32) { | 3447 | if (insn_bitness == 32) { |
3423 | /* Relevant for 32-bit RSH: Information can propagate towards | 3448 | /* Relevant for 32-bit RSH: Information can propagate towards |
@@ -3452,6 +3477,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, | |||
3452 | 3477 | ||
3453 | switch (opcode) { | 3478 | switch (opcode) { |
3454 | case BPF_ADD: | 3479 | case BPF_ADD: |
3480 | ret = sanitize_val_alu(env, insn); | ||
3481 | if (ret < 0) { | ||
3482 | verbose(env, "R%d tried to add from different pointers or scalars\n", dst); | ||
3483 | return ret; | ||
3484 | } | ||
3455 | if (signed_add_overflows(dst_reg->smin_value, smin_val) || | 3485 | if (signed_add_overflows(dst_reg->smin_value, smin_val) || |
3456 | signed_add_overflows(dst_reg->smax_value, smax_val)) { | 3486 | signed_add_overflows(dst_reg->smax_value, smax_val)) { |
3457 | dst_reg->smin_value = S64_MIN; | 3487 | dst_reg->smin_value = S64_MIN; |
@@ -3471,6 +3501,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, | |||
3471 | dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); | 3501 | dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); |
3472 | break; | 3502 | break; |
3473 | case BPF_SUB: | 3503 | case BPF_SUB: |
3504 | ret = sanitize_val_alu(env, insn); | ||
3505 | if (ret < 0) { | ||
3506 | verbose(env, "R%d tried to sub from different pointers or scalars\n", dst); | ||
3507 | return ret; | ||
3508 | } | ||
3474 | if (signed_sub_overflows(dst_reg->smin_value, smax_val) || | 3509 | if (signed_sub_overflows(dst_reg->smin_value, smax_val) || |
3475 | signed_sub_overflows(dst_reg->smax_value, smin_val)) { | 3510 | signed_sub_overflows(dst_reg->smax_value, smin_val)) { |
3476 | /* Overflow possible, we know nothing */ | 3511 | /* Overflow possible, we know nothing */ |
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 10d44446e801..2fd90d456892 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c | |||
@@ -6934,6 +6934,126 @@ static struct bpf_test tests[] = { | |||
6934 | .retval = 1, | 6934 | .retval = 1, |
6935 | }, | 6935 | }, |
6936 | { | 6936 | { |
6937 | "map access: mixing value pointer and scalar, 1", | ||
6938 | .insns = { | ||
6939 | // load map value pointer into r0 and r2 | ||
6940 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
6941 | BPF_LD_MAP_FD(BPF_REG_ARG1, 0), | ||
6942 | BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), | ||
6943 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16), | ||
6944 | BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0), | ||
6945 | BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), | ||
6946 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), | ||
6947 | BPF_EXIT_INSN(), | ||
6948 | // load some number from the map into r1 | ||
6949 | BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), | ||
6950 | // depending on r1, branch: | ||
6951 | BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 3), | ||
6952 | // branch A | ||
6953 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), | ||
6954 | BPF_MOV64_IMM(BPF_REG_3, 0), | ||
6955 | BPF_JMP_A(2), | ||
6956 | // branch B | ||
6957 | BPF_MOV64_IMM(BPF_REG_2, 0), | ||
6958 | BPF_MOV64_IMM(BPF_REG_3, 0x100000), | ||
6959 | // common instruction | ||
6960 | BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), | ||
6961 | // depending on r1, branch: | ||
6962 | BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), | ||
6963 | // branch A | ||
6964 | BPF_JMP_A(4), | ||
6965 | // branch B | ||
6966 | BPF_MOV64_IMM(BPF_REG_0, 0x13371337), | ||
6967 | // verifier follows fall-through | ||
6968 | BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2), | ||
6969 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
6970 | BPF_EXIT_INSN(), | ||
6971 | // fake-dead code; targeted from branch A to | ||
6972 | // prevent dead code sanitization | ||
6973 | BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), | ||
6974 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
6975 | BPF_EXIT_INSN(), | ||
6976 | }, | ||
6977 | .fixup_map_array_48b = { 1 }, | ||
6978 | .result = ACCEPT, | ||
6979 | .result_unpriv = REJECT, | ||
6980 | .errstr_unpriv = "R2 tried to add from different pointers or scalars", | ||
6981 | .retval = 0, | ||
6982 | }, | ||
6983 | { | ||
6984 | "map access: mixing value pointer and scalar, 2", | ||
6985 | .insns = { | ||
6986 | // load map value pointer into r0 and r2 | ||
6987 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
6988 | BPF_LD_MAP_FD(BPF_REG_ARG1, 0), | ||
6989 | BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), | ||
6990 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16), | ||
6991 | BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0), | ||
6992 | BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), | ||
6993 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), | ||
6994 | BPF_EXIT_INSN(), | ||
6995 | // load some number from the map into r1 | ||
6996 | BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), | ||
6997 | // depending on r1, branch: | ||
6998 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), | ||
6999 | // branch A | ||
7000 | BPF_MOV64_IMM(BPF_REG_2, 0), | ||
7001 | BPF_MOV64_IMM(BPF_REG_3, 0x100000), | ||
7002 | BPF_JMP_A(2), | ||
7003 | // branch B | ||
7004 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), | ||
7005 | BPF_MOV64_IMM(BPF_REG_3, 0), | ||
7006 | // common instruction | ||
7007 | BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), | ||
7008 | // depending on r1, branch: | ||
7009 | BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), | ||
7010 | // branch A | ||
7011 | BPF_JMP_A(4), | ||
7012 | // branch B | ||
7013 | BPF_MOV64_IMM(BPF_REG_0, 0x13371337), | ||
7014 | // verifier follows fall-through | ||
7015 | BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2), | ||
7016 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
7017 | BPF_EXIT_INSN(), | ||
7018 | // fake-dead code; targeted from branch A to | ||
7019 | // prevent dead code sanitization | ||
7020 | BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), | ||
7021 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
7022 | BPF_EXIT_INSN(), | ||
7023 | }, | ||
7024 | .fixup_map_array_48b = { 1 }, | ||
7025 | .result = ACCEPT, | ||
7026 | .result_unpriv = REJECT, | ||
7027 | .errstr_unpriv = "R2 tried to add from different maps or paths", | ||
7028 | .retval = 0, | ||
7029 | }, | ||
7030 | { | ||
7031 | "sanitation: alu with different scalars", | ||
7032 | .insns = { | ||
7033 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
7034 | BPF_LD_MAP_FD(BPF_REG_ARG1, 0), | ||
7035 | BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), | ||
7036 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16), | ||
7037 | BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0), | ||
7038 | BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), | ||
7039 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), | ||
7040 | BPF_EXIT_INSN(), | ||
7041 | BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), | ||
7042 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), | ||
7043 | BPF_MOV64_IMM(BPF_REG_2, 0), | ||
7044 | BPF_MOV64_IMM(BPF_REG_3, 0x100000), | ||
7045 | BPF_JMP_A(2), | ||
7046 | BPF_MOV64_IMM(BPF_REG_2, 42), | ||
7047 | BPF_MOV64_IMM(BPF_REG_3, 0x100001), | ||
7048 | BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), | ||
7049 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), | ||
7050 | BPF_EXIT_INSN(), | ||
7051 | }, | ||
7052 | .fixup_map_array_48b = { 1 }, | ||
7053 | .result = ACCEPT, | ||
7054 | .retval = 0x100000, | ||
7055 | }, | ||
7056 | { | ||
6937 | "map access: value_ptr += known scalar, upper oob arith, test 1", | 7057 | "map access: value_ptr += known scalar, upper oob arith, test 1", |
6938 | .insns = { | 7058 | .insns = { |
6939 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), | 7059 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), |