diff options
Diffstat (limited to 'arch/x86/mm/fault.c')
-rw-r--r-- | arch/x86/mm/fault.c | 67 |
1 files changed, 24 insertions, 43 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 455f3fe67b42..31e8730fa246 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <asm/tlbflush.h> | 35 | #include <asm/tlbflush.h> |
36 | #include <asm/proto.h> | 36 | #include <asm/proto.h> |
37 | #include <asm-generic/sections.h> | 37 | #include <asm-generic/sections.h> |
38 | #include <asm/traps.h> | ||
38 | 39 | ||
39 | /* | 40 | /* |
40 | * Page fault error code bits | 41 | * Page fault error code bits |
@@ -357,8 +358,6 @@ static int is_errata100(struct pt_regs *regs, unsigned long address) | |||
357 | return 0; | 358 | return 0; |
358 | } | 359 | } |
359 | 360 | ||
360 | void do_invalid_op(struct pt_regs *, unsigned long); | ||
361 | |||
362 | static int is_f00f_bug(struct pt_regs *regs, unsigned long address) | 361 | static int is_f00f_bug(struct pt_regs *regs, unsigned long address) |
363 | { | 362 | { |
364 | #ifdef CONFIG_X86_F00F_BUG | 363 | #ifdef CONFIG_X86_F00F_BUG |
@@ -593,11 +592,6 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
593 | unsigned long flags; | 592 | unsigned long flags; |
594 | #endif | 593 | #endif |
595 | 594 | ||
596 | /* | ||
597 | * We can fault from pretty much anywhere, with unknown IRQ state. | ||
598 | */ | ||
599 | trace_hardirqs_fixup(); | ||
600 | |||
601 | tsk = current; | 595 | tsk = current; |
602 | mm = tsk->mm; | 596 | mm = tsk->mm; |
603 | prefetchw(&mm->mmap_sem); | 597 | prefetchw(&mm->mmap_sem); |
@@ -646,24 +640,23 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
646 | } | 640 | } |
647 | 641 | ||
648 | 642 | ||
649 | #ifdef CONFIG_X86_32 | ||
650 | /* It's safe to allow irq's after cr2 has been saved and the vmalloc | ||
651 | fault has been handled. */ | ||
652 | if (regs->flags & (X86_EFLAGS_IF | X86_VM_MASK)) | ||
653 | local_irq_enable(); | ||
654 | |||
655 | /* | 643 | /* |
656 | * If we're in an interrupt, have no user context or are running in an | 644 | * It's safe to allow irq's after cr2 has been saved and the |
657 | * atomic region then we must not take the fault. | 645 | * vmalloc fault has been handled. |
646 | * | ||
647 | * User-mode registers count as a user access even for any | ||
648 | * potential system fault or CPU buglet. | ||
658 | */ | 649 | */ |
659 | if (in_atomic() || !mm) | 650 | if (user_mode_vm(regs)) { |
660 | goto bad_area_nosemaphore; | 651 | local_irq_enable(); |
661 | #else /* CONFIG_X86_64 */ | 652 | error_code |= PF_USER; |
662 | if (likely(regs->flags & X86_EFLAGS_IF)) | 653 | } else if (regs->flags & X86_EFLAGS_IF) |
663 | local_irq_enable(); | 654 | local_irq_enable(); |
664 | 655 | ||
656 | #ifdef CONFIG_X86_64 | ||
665 | if (unlikely(error_code & PF_RSVD)) | 657 | if (unlikely(error_code & PF_RSVD)) |
666 | pgtable_bad(address, regs, error_code); | 658 | pgtable_bad(address, regs, error_code); |
659 | #endif | ||
667 | 660 | ||
668 | /* | 661 | /* |
669 | * If we're in an interrupt, have no user context or are running in an | 662 | * If we're in an interrupt, have no user context or are running in an |
@@ -672,15 +665,9 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
672 | if (unlikely(in_atomic() || !mm)) | 665 | if (unlikely(in_atomic() || !mm)) |
673 | goto bad_area_nosemaphore; | 666 | goto bad_area_nosemaphore; |
674 | 667 | ||
675 | /* | ||
676 | * User-mode registers count as a user access even for any | ||
677 | * potential system fault or CPU buglet. | ||
678 | */ | ||
679 | if (user_mode_vm(regs)) | ||
680 | error_code |= PF_USER; | ||
681 | again: | 668 | again: |
682 | #endif | 669 | /* |
683 | /* When running in the kernel we expect faults to occur only to | 670 | * When running in the kernel we expect faults to occur only to |
684 | * addresses in user space. All other faults represent errors in the | 671 | * addresses in user space. All other faults represent errors in the |
685 | * kernel and should generate an OOPS. Unfortunately, in the case of an | 672 | * kernel and should generate an OOPS. Unfortunately, in the case of an |
686 | * erroneous fault occurring in a code path which already holds mmap_sem | 673 | * erroneous fault occurring in a code path which already holds mmap_sem |
@@ -743,9 +730,6 @@ good_area: | |||
743 | goto bad_area; | 730 | goto bad_area; |
744 | } | 731 | } |
745 | 732 | ||
746 | #ifdef CONFIG_X86_32 | ||
747 | survive: | ||
748 | #endif | ||
749 | /* | 733 | /* |
750 | * If for any reason at all we couldn't handle the fault, | 734 | * If for any reason at all we couldn't handle the fault, |
751 | * make sure we exit gracefully rather than endlessly redo | 735 | * make sure we exit gracefully rather than endlessly redo |
@@ -880,12 +864,11 @@ out_of_memory: | |||
880 | up_read(&mm->mmap_sem); | 864 | up_read(&mm->mmap_sem); |
881 | if (is_global_init(tsk)) { | 865 | if (is_global_init(tsk)) { |
882 | yield(); | 866 | yield(); |
883 | #ifdef CONFIG_X86_32 | 867 | /* |
884 | down_read(&mm->mmap_sem); | 868 | * Re-lookup the vma - in theory the vma tree might |
885 | goto survive; | 869 | * have changed: |
886 | #else | 870 | */ |
887 | goto again; | 871 | goto again; |
888 | #endif | ||
889 | } | 872 | } |
890 | 873 | ||
891 | printk("VM: killing process %s\n", tsk->comm); | 874 | printk("VM: killing process %s\n", tsk->comm); |
@@ -915,15 +898,15 @@ LIST_HEAD(pgd_list); | |||
915 | 898 | ||
916 | void vmalloc_sync_all(void) | 899 | void vmalloc_sync_all(void) |
917 | { | 900 | { |
918 | #ifdef CONFIG_X86_32 | ||
919 | unsigned long start = VMALLOC_START & PGDIR_MASK; | ||
920 | unsigned long address; | 901 | unsigned long address; |
921 | 902 | ||
903 | #ifdef CONFIG_X86_32 | ||
922 | if (SHARED_KERNEL_PMD) | 904 | if (SHARED_KERNEL_PMD) |
923 | return; | 905 | return; |
924 | 906 | ||
925 | BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK); | 907 | for (address = VMALLOC_START & PMD_MASK; |
926 | for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) { | 908 | address >= TASK_SIZE && address < FIXADDR_TOP; |
909 | address += PMD_SIZE) { | ||
927 | unsigned long flags; | 910 | unsigned long flags; |
928 | struct page *page; | 911 | struct page *page; |
929 | 912 | ||
@@ -936,10 +919,8 @@ void vmalloc_sync_all(void) | |||
936 | spin_unlock_irqrestore(&pgd_lock, flags); | 919 | spin_unlock_irqrestore(&pgd_lock, flags); |
937 | } | 920 | } |
938 | #else /* CONFIG_X86_64 */ | 921 | #else /* CONFIG_X86_64 */ |
939 | unsigned long start = VMALLOC_START & PGDIR_MASK; | 922 | for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END; |
940 | unsigned long address; | 923 | address += PGDIR_SIZE) { |
941 | |||
942 | for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) { | ||
943 | const pgd_t *pgd_ref = pgd_offset_k(address); | 924 | const pgd_t *pgd_ref = pgd_offset_k(address); |
944 | unsigned long flags; | 925 | unsigned long flags; |
945 | struct page *page; | 926 | struct page *page; |