aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2018-05-22 12:11:20 -0400
committerWill Deacon <will.deacon@arm.com>2018-05-22 12:14:20 -0400
commitcc19846079a70abcfd91b5a0791a5f17d69458a5 (patch)
tree691577dde2aa215a0794f710b10d626f5bd8d656
parent255845fc43a3aaf806852a1d3bc89bff1411ebe3 (diff)
arm64: fault: Don't leak data in ESR context for user fault on kernel VA
If userspace faults on a kernel address, handing them the raw ESR value on the sigframe as part of the delivered signal can leak data useful to attackers who are using information about the underlying hardware fault type (e.g. translation vs permission) as a mechanism to defeat KASLR. However there are also legitimate uses for the information provided in the ESR -- notably the GCC and LLVM sanitizers use this to report whether wild pointer accesses by the application are reads or writes (since a wild write is a more serious bug than a wild read), so we don't want to drop the ESR information entirely. For faulting addresses in the kernel, sanitize the ESR. We choose to present userspace with the illusion that there is nothing mapped in the kernel's part of the address space at all, by reporting all faults as level 0 translation faults taken to EL1. These fields are safe to pass through to userspace as they depend only on the instruction that userspace used to provoke the fault: EC IL (always) ISV CM WNR (for all data aborts) All the other fields in ESR except DFSC are architecturally RES0 for an L0 translation fault taken to EL1, so can be zeroed out without confusing userspace. The illusion is not entirely perfect, as there is a tiny wrinkle where we will report an alignment fault that was not due to the memory type (for instance a LDREX to an unaligned address) as a translation fault, whereas if you do this on real unmapped memory the alignment fault takes precedence. This is not likely to trip anybody up in practice, as the only users we know of for the ESR information who care about the behaviour for kernel addresses only really want to know about the WnR bit. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--arch/arm64/mm/fault.c51
1 files changed, 51 insertions, 0 deletions
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 4165485e8b6e..2af3dd89bcdb 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -293,6 +293,57 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
293static void __do_user_fault(struct siginfo *info, unsigned int esr) 293static void __do_user_fault(struct siginfo *info, unsigned int esr)
294{ 294{
295 current->thread.fault_address = (unsigned long)info->si_addr; 295 current->thread.fault_address = (unsigned long)info->si_addr;
296
297 /*
298 * If the faulting address is in the kernel, we must sanitize the ESR.
299 * From userspace's point of view, kernel-only mappings don't exist
300 * at all, so we report them as level 0 translation faults.
301 * (This is not quite the way that "no mapping there at all" behaves:
302 * an alignment fault not caused by the memory type would take
303 * precedence over translation fault for a real access to empty
304 * space. Unfortunately we can't easily distinguish "alignment fault
305 * not caused by memory type" from "alignment fault caused by memory
306 * type", so we ignore this wrinkle and just return the translation
307 * fault.)
308 */
309 if (current->thread.fault_address >= TASK_SIZE) {
310 switch (ESR_ELx_EC(esr)) {
311 case ESR_ELx_EC_DABT_LOW:
312 /*
313 * These bits provide only information about the
314 * faulting instruction, which userspace knows already.
315 * We explicitly clear bits which are architecturally
316 * RES0 in case they are given meanings in future.
317 * We always report the ESR as if the fault was taken
318 * to EL1 and so ISV and the bits in ISS[23:14] are
319 * clear. (In fact it always will be a fault to EL1.)
320 */
321 esr &= ESR_ELx_EC_MASK | ESR_ELx_IL |
322 ESR_ELx_CM | ESR_ELx_WNR;
323 esr |= ESR_ELx_FSC_FAULT;
324 break;
325 case ESR_ELx_EC_IABT_LOW:
326 /*
327 * Claim a level 0 translation fault.
328 * All other bits are architecturally RES0 for faults
329 * reported with that DFSC value, so we clear them.
330 */
331 esr &= ESR_ELx_EC_MASK | ESR_ELx_IL;
332 esr |= ESR_ELx_FSC_FAULT;
333 break;
334 default:
335 /*
336 * This should never happen (entry.S only brings us
337 * into this code for insn and data aborts from a lower
338 * exception level). Fail safe by not providing an ESR
339 * context record at all.
340 */
341 WARN(1, "ESR 0x%x is not DABT or IABT from EL0\n", esr);
342 esr = 0;
343 break;
344 }
345 }
346
296 current->thread.fault_code = esr; 347 current->thread.fault_code = esr;
297 arm64_force_sig_info(info, esr_to_fault_info(esr)->name, current); 348 arm64_force_sig_info(info, esr_to_fault_info(esr)->name, current);
298} 349}