aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-02-03 01:08:15 -0500
committerDavid S. Miller <davem@davemloft.net>2009-02-03 01:08:15 -0500
commiteeabac7386ca13bfe1a58afeb04326a9e1a3a20e (patch)
tree9e15ca81917df4c980b29c54091f909bdc868138 /arch/sparc
parent802c64b310e5b9dfda6cb50b850b962ed96a9e81 (diff)
sparc64: Validate kernel generated fault addresses on sparc64.
In order to handle all of the cases of address calculation overflow properly, we run sparc 32-bit processes in "address masking" mode when running on a 64-bit kernel. Address masking mode zeros out the top 32-bits of the address calculated for every load and store instruction. However, when we're in privileged mode we have to run with that address masking mode disabled even when accessing userspace from the kernel. To "simulate" the address masking mode we clear the top-bits by hand for 32-bit processes in the fault handler. It is the responsibility of code in the compat layer to properly zero extend addresses used to access userspace. If this isn't followed properly we can get into a fault loop. Say that the user address is 0xf0000000 but for whatever reason the kernel code sign extends this to 64-bit, and then the kernel tries to access the result. In such a case we'll fault on address 0xfffffffff0000000 but the fault handler will process that fault as if it were to address 0xf0000000. We'll loop faulting forever because the fault never gets satisfied. So add a check specifically for this case, when the kernel is faulting on a user address access and the addresses don't match up. This code path is sufficiently slow path, and this bug is sufficiently painful to diagnose, that this kind of bug check is warranted. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/mm/fault_64.c25
1 files changed, 18 insertions, 7 deletions
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index a9e474bf6385..1a786abdada3 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -19,6 +19,7 @@
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/kprobes.h> 20#include <linux/kprobes.h>
21#include <linux/kdebug.h> 21#include <linux/kdebug.h>
22#include <linux/percpu.h>
22 23
23#include <asm/page.h> 24#include <asm/page.h>
24#include <asm/pgtable.h> 25#include <asm/pgtable.h>
@@ -244,8 +245,14 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
244 (fault_code & FAULT_CODE_DTLB)) 245 (fault_code & FAULT_CODE_DTLB))
245 BUG(); 246 BUG();
246 247
248 if (test_thread_flag(TIF_32BIT)) {
249 if (!(regs->tstate & TSTATE_PRIV))
250 regs->tpc &= 0xffffffff;
251 address &= 0xffffffff;
252 }
253
247 if (regs->tstate & TSTATE_PRIV) { 254 if (regs->tstate & TSTATE_PRIV) {
248 unsigned long tpc = regs->tpc; 255 unsigned long eaddr, tpc = regs->tpc;
249 256
250 /* Sanity check the PC. */ 257 /* Sanity check the PC. */
251 if ((tpc >= KERNBASE && tpc < (unsigned long) __init_end) || 258 if ((tpc >= KERNBASE && tpc < (unsigned long) __init_end) ||
@@ -255,6 +262,16 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
255 bad_kernel_pc(regs, address); 262 bad_kernel_pc(regs, address);
256 return; 263 return;
257 } 264 }
265
266 insn = get_fault_insn(regs, insn);
267 eaddr = compute_effective_address(regs, insn, 0);
268 if (WARN_ON_ONCE((eaddr & PAGE_MASK) != (address & PAGE_MASK))){
269 printk(KERN_ERR "FAULT: Mismatch kernel fault "
270 "address: addr[%lx] eaddr[%lx] TPC[%lx]\n",
271 address, eaddr, tpc);
272 show_regs(regs);
273 goto handle_kernel_fault;
274 }
258 } 275 }
259 276
260 /* 277 /*
@@ -264,12 +281,6 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
264 if (in_atomic() || !mm) 281 if (in_atomic() || !mm)
265 goto intr_or_no_mm; 282 goto intr_or_no_mm;
266 283
267 if (test_thread_flag(TIF_32BIT)) {
268 if (!(regs->tstate & TSTATE_PRIV))
269 regs->tpc &= 0xffffffff;
270 address &= 0xffffffff;
271 }
272
273 if (!down_read_trylock(&mm->mmap_sem)) { 284 if (!down_read_trylock(&mm->mmap_sem)) {
274 if ((regs->tstate & TSTATE_PRIV) && 285 if ((regs->tstate & TSTATE_PRIV) &&
275 !search_exception_tables(regs->tpc)) { 286 !search_exception_tables(regs->tpc)) {