aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/fault.c
diff options
context:
space:
mode:
authorHarvey Harrison <harvey.harrison@gmail.com>2008-02-04 10:47:56 -0500
committerIngo Molnar <mingo@elte.hu>2008-02-04 10:47:56 -0500
commitcf89ec924da5b76cbff293a1b378f312c7161411 (patch)
tree83e88f6694fa639bdf0327359b78d22a34432b80 /arch/x86/mm/fault.c
parent6118f76fb7408bad7631345cc41a5f0efc49ce3e (diff)
x86: reduce ifdef sections in fault.c
Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/mm/fault.c')
-rw-r--r--arch/x86/mm/fault.c31
1 files changed, 9 insertions, 22 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index e4440d0abf81..3fff490254a9 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -508,6 +508,10 @@ static int vmalloc_fault(unsigned long address)
508 pmd_t *pmd, *pmd_ref; 508 pmd_t *pmd, *pmd_ref;
509 pte_t *pte, *pte_ref; 509 pte_t *pte, *pte_ref;
510 510
511 /* Make sure we are in vmalloc area */
512 if (!(address >= VMALLOC_START && address < VMALLOC_END))
513 return -1;
514
511 /* Copy kernel mappings over when needed. This can also 515 /* Copy kernel mappings over when needed. This can also
512 happen within a race in page table update. In the later 516 happen within a race in page table update. In the later
513 case just flush. */ 517 case just flush. */
@@ -603,6 +607,9 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
603 */ 607 */
604#ifdef CONFIG_X86_32 608#ifdef CONFIG_X86_32
605 if (unlikely(address >= TASK_SIZE)) { 609 if (unlikely(address >= TASK_SIZE)) {
610#else
611 if (unlikely(address >= TASK_SIZE64)) {
612#endif
606 if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) && 613 if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
607 vmalloc_fault(address) >= 0) 614 vmalloc_fault(address) >= 0)
608 return; 615 return;
@@ -618,6 +625,8 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
618 goto bad_area_nosemaphore; 625 goto bad_area_nosemaphore;
619 } 626 }
620 627
628
629#ifdef CONFIG_X86_32
621 /* It's safe to allow irq's after cr2 has been saved and the vmalloc 630 /* It's safe to allow irq's after cr2 has been saved and the vmalloc
622 fault has been handled. */ 631 fault has been handled. */
623 if (regs->flags & (X86_EFLAGS_IF|VM_MASK)) 632 if (regs->flags & (X86_EFLAGS_IF|VM_MASK))
@@ -630,28 +639,6 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
630 if (in_atomic() || !mm) 639 if (in_atomic() || !mm)
631 goto bad_area_nosemaphore; 640 goto bad_area_nosemaphore;
632#else /* CONFIG_X86_64 */ 641#else /* CONFIG_X86_64 */
633 if (unlikely(address >= TASK_SIZE64)) {
634 /*
635 * Don't check for the module range here: its PML4
636 * is always initialized because it's shared with the main
637 * kernel text. Only vmalloc may need PML4 syncups.
638 */
639 if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
640 ((address >= VMALLOC_START && address < VMALLOC_END))) {
641 if (vmalloc_fault(address) >= 0)
642 return;
643 }
644
645 /* Can handle a stale RO->RW TLB */
646 if (spurious_fault(address, error_code))
647 return;
648
649 /*
650 * Don't take the mm semaphore here. If we fixup a prefetch
651 * fault we could otherwise deadlock.
652 */
653 goto bad_area_nosemaphore;
654 }
655 if (likely(regs->flags & X86_EFLAGS_IF)) 642 if (likely(regs->flags & X86_EFLAGS_IF))
656 local_irq_enable(); 643 local_irq_enable();
657 644