aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2018-05-15 12:27:05 -0400
committerThomas Gleixner <tglx@linutronix.de>2018-05-19 14:44:24 -0400
commitaf86ca4e3088fe5eacf2f7e58c01fa68ca067672 (patch)
treed01711e5fe7b1674c2929c473ead8047001ba886
parent240da953fcc6a9008c92fae5b1f727ee5ed167ab (diff)
bpf: Prevent memory disambiguation attack
Detect code patterns where malicious 'speculative store bypass' can be used and sanitize such patterns. 39: (bf) r3 = r10 40: (07) r3 += -216 41: (79) r8 = *(u64 *)(r7 +0) // slow read 42: (7a) *(u64 *)(r10 -72) = 0 // verifier inserts this instruction 43: (7b) *(u64 *)(r8 +0) = r3 // this store becomes slow due to r8 44: (79) r1 = *(u64 *)(r6 +0) // cpu speculatively executes this load 45: (71) r2 = *(u8 *)(r1 +0) // speculatively arbitrary 'load byte' // is now sanitized Above code after x86 JIT becomes: e5: mov %rbp,%rdx e8: add $0xffffffffffffff28,%rdx ef: mov 0x0(%r13),%r14 f3: movq $0x0,-0x48(%rbp) fb: mov %rdx,0x0(%r14) ff: mov 0x0(%rbx),%rdi 103: movzbq 0x0(%rdi),%rsi Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--include/linux/bpf_verifier.h1
-rw-r--r--kernel/bpf/verifier.c59
2 files changed, 57 insertions, 3 deletions
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 7e61c395fddf..65cfc2f59db9 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -146,6 +146,7 @@ struct bpf_insn_aux_data {
146 s32 call_imm; /* saved imm field of call insn */ 146 s32 call_imm; /* saved imm field of call insn */
147 }; 147 };
148 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ 148 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
149 int sanitize_stack_off; /* stack slot to be cleared */
149 bool seen; /* this insn was processed by the verifier */ 150 bool seen; /* this insn was processed by the verifier */
150}; 151};
151 152
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 5dd1dcb902bf..2ce967a63ede 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -978,7 +978,7 @@ static bool register_is_null(struct bpf_reg_state *reg)
978 */ 978 */
979static int check_stack_write(struct bpf_verifier_env *env, 979static int check_stack_write(struct bpf_verifier_env *env,
980 struct bpf_func_state *state, /* func where register points to */ 980 struct bpf_func_state *state, /* func where register points to */
981 int off, int size, int value_regno) 981 int off, int size, int value_regno, int insn_idx)
982{ 982{
983 struct bpf_func_state *cur; /* state of the current function */ 983 struct bpf_func_state *cur; /* state of the current function */
984 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; 984 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
@@ -1017,8 +1017,33 @@ static int check_stack_write(struct bpf_verifier_env *env,
1017 state->stack[spi].spilled_ptr = cur->regs[value_regno]; 1017 state->stack[spi].spilled_ptr = cur->regs[value_regno];
1018 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 1018 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
1019 1019
1020 for (i = 0; i < BPF_REG_SIZE; i++) 1020 for (i = 0; i < BPF_REG_SIZE; i++) {
1021 if (state->stack[spi].slot_type[i] == STACK_MISC &&
1022 !env->allow_ptr_leaks) {
1023 int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
1024 int soff = (-spi - 1) * BPF_REG_SIZE;
1025
1026 /* detected reuse of integer stack slot with a pointer
1027 * which means either llvm is reusing stack slot or
1028 * an attacker is trying to exploit CVE-2018-3639
1029 * (speculative store bypass)
1030 * Have to sanitize that slot with preemptive
1031 * store of zero.
1032 */
1033 if (*poff && *poff != soff) {
1034 /* disallow programs where single insn stores
1035 * into two different stack slots, since verifier
1036 * cannot sanitize them
1037 */
1038 verbose(env,
1039 "insn %d cannot access two stack slots fp%d and fp%d",
1040 insn_idx, *poff, soff);
1041 return -EINVAL;
1042 }
1043 *poff = soff;
1044 }
1021 state->stack[spi].slot_type[i] = STACK_SPILL; 1045 state->stack[spi].slot_type[i] = STACK_SPILL;
1046 }
1022 } else { 1047 } else {
1023 u8 type = STACK_MISC; 1048 u8 type = STACK_MISC;
1024 1049
@@ -1694,7 +1719,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
1694 1719
1695 if (t == BPF_WRITE) 1720 if (t == BPF_WRITE)
1696 err = check_stack_write(env, state, off, size, 1721 err = check_stack_write(env, state, off, size,
1697 value_regno); 1722 value_regno, insn_idx);
1698 else 1723 else
1699 err = check_stack_read(env, state, off, size, 1724 err = check_stack_read(env, state, off, size,
1700 value_regno); 1725 value_regno);
@@ -5169,6 +5194,34 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
5169 else 5194 else
5170 continue; 5195 continue;
5171 5196
5197 if (type == BPF_WRITE &&
5198 env->insn_aux_data[i + delta].sanitize_stack_off) {
5199 struct bpf_insn patch[] = {
5200 /* Sanitize suspicious stack slot with zero.
5201 * There are no memory dependencies for this store,
5202 * since it's only using frame pointer and immediate
5203 * constant of zero
5204 */
5205 BPF_ST_MEM(BPF_DW, BPF_REG_FP,
5206 env->insn_aux_data[i + delta].sanitize_stack_off,
5207 0),
5208 /* the original STX instruction will immediately
5209 * overwrite the same stack slot with appropriate value
5210 */
5211 *insn,
5212 };
5213
5214 cnt = ARRAY_SIZE(patch);
5215 new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
5216 if (!new_prog)
5217 return -ENOMEM;
5218
5219 delta += cnt - 1;
5220 env->prog = new_prog;
5221 insn = new_prog->insnsi + i + delta;
5222 continue;
5223 }
5224
5172 if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX) 5225 if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
5173 continue; 5226 continue;
5174 5227