aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/fault_32.c
diff options
context:
space:
mode:
authorHarvey Harrison <harvey.harrison@gmail.com>2008-01-30 07:33:12 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:33:12 -0500
commit608566b4edda5079c7812c2108a89c0fcf2894bb (patch)
tree4926ecc581c767eee053a2d74f84b832d9ea4948 /arch/x86/mm/fault_32.c
parentf2857ce92023409df1544737d5b3499b4630a183 (diff)
x86: do_page_fault small unification
Copy the prefetch of map_sem from X86_64 and move the check notify_page_fault (soon to be kprobe_handle_fault) out of the unlikely if() statement. This makes the X86_32|64 pagefault handlers closer to each other. Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/mm/fault_32.c')
-rw-r--r--arch/x86/mm/fault_32.c16
1 files changed, 7 insertions, 9 deletions
diff --git a/arch/x86/mm/fault_32.c b/arch/x86/mm/fault_32.c
index 36cb67e02b04..52c13d2e011e 100644
--- a/arch/x86/mm/fault_32.c
+++ b/arch/x86/mm/fault_32.c
@@ -295,13 +295,18 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
295 */ 295 */
296 trace_hardirqs_fixup(); 296 trace_hardirqs_fixup();
297 297
298 tsk = current;
299 mm = tsk->mm;
300 prefetchw(&mm->mmap_sem);
301
298 /* get the address */ 302 /* get the address */
299 address = read_cr2(); 303 address = read_cr2();
300 304
301 tsk = current;
302
303 si_code = SEGV_MAPERR; 305 si_code = SEGV_MAPERR;
304 306
307 if (notify_page_fault(regs))
308 return;
309
305 /* 310 /*
306 * We fault-in kernel-space virtual memory on-demand. The 311 * We fault-in kernel-space virtual memory on-demand. The
307 * 'reference' page table is init_mm.pgd. 312 * 'reference' page table is init_mm.pgd.
@@ -319,8 +324,6 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
319 if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) && 324 if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
320 vmalloc_fault(address) >= 0) 325 vmalloc_fault(address) >= 0)
321 return; 326 return;
322 if (notify_page_fault(regs))
323 return;
324 /* 327 /*
325 * Don't take the mm semaphore here. If we fixup a prefetch 328 * Don't take the mm semaphore here. If we fixup a prefetch
326 * fault we could otherwise deadlock. 329 * fault we could otherwise deadlock.
@@ -328,16 +331,11 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
328 goto bad_area_nosemaphore; 331 goto bad_area_nosemaphore;
329 } 332 }
330 333
331 if (notify_page_fault(regs))
332 return;
333
334 /* It's safe to allow irq's after cr2 has been saved and the vmalloc 334 /* It's safe to allow irq's after cr2 has been saved and the vmalloc
335 fault has been handled. */ 335 fault has been handled. */
336 if (regs->flags & (X86_EFLAGS_IF|VM_MASK)) 336 if (regs->flags & (X86_EFLAGS_IF|VM_MASK))
337 local_irq_enable(); 337 local_irq_enable();
338 338
339 mm = tsk->mm;
340
341 /* 339 /*
342 * If we're in an interrupt, have no user context or are running in an 340 * If we're in an interrupt, have no user context or are running in an
343 * atomic region then we must not take the fault. 341 * atomic region then we must not take the fault.