aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/fault.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/fault.c')
-rw-r--r--arch/x86/mm/fault.c97
1 files changed, 30 insertions, 67 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 8bcb6f40ccb6..d0f5fce77d95 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -55,11 +55,7 @@ static inline int notify_page_fault(struct pt_regs *regs)
55 int ret = 0; 55 int ret = 0;
56 56
57 /* kprobe_running() needs smp_processor_id() */ 57 /* kprobe_running() needs smp_processor_id() */
58#ifdef CONFIG_X86_32
59 if (!user_mode_vm(regs)) { 58 if (!user_mode_vm(regs)) {
60#else
61 if (!user_mode(regs)) {
62#endif
63 preempt_disable(); 59 preempt_disable();
64 if (kprobe_running() && kprobe_fault_handler(regs, 14)) 60 if (kprobe_running() && kprobe_fault_handler(regs, 14))
65 ret = 1; 61 ret = 1;
@@ -396,11 +392,7 @@ static void show_fault_oops(struct pt_regs *regs, unsigned long error_code,
396 printk(KERN_CONT "NULL pointer dereference"); 392 printk(KERN_CONT "NULL pointer dereference");
397 else 393 else
398 printk(KERN_CONT "paging request"); 394 printk(KERN_CONT "paging request");
399#ifdef CONFIG_X86_32 395 printk(KERN_CONT " at %p\n", (void *) address);
400 printk(KERN_CONT " at %08lx\n", address);
401#else
402 printk(KERN_CONT " at %016lx\n", address);
403#endif
404 printk(KERN_ALERT "IP:"); 396 printk(KERN_ALERT "IP:");
405 printk_address(regs->ip, 1); 397 printk_address(regs->ip, 1);
406 dump_pagetable(address); 398 dump_pagetable(address);
@@ -800,14 +792,10 @@ bad_area_nosemaphore:
800 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && 792 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
801 printk_ratelimit()) { 793 printk_ratelimit()) {
802 printk( 794 printk(
803#ifdef CONFIG_X86_32 795 "%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
804 "%s%s[%d]: segfault at %lx ip %08lx sp %08lx error %lx",
805#else
806 "%s%s[%d]: segfault at %lx ip %lx sp %lx error %lx",
807#endif
808 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, 796 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
809 tsk->comm, task_pid_nr(tsk), address, regs->ip, 797 tsk->comm, task_pid_nr(tsk), address,
810 regs->sp, error_code); 798 (void *) regs->ip, (void *) regs->sp, error_code);
811 print_vma_addr(" in ", regs->ip); 799 print_vma_addr(" in ", regs->ip);
812 printk("\n"); 800 printk("\n");
813 } 801 }
@@ -915,14 +903,7 @@ LIST_HEAD(pgd_list);
915void vmalloc_sync_all(void) 903void vmalloc_sync_all(void)
916{ 904{
917#ifdef CONFIG_X86_32 905#ifdef CONFIG_X86_32
918 /* 906 unsigned long start = VMALLOC_START & PGDIR_MASK;
919 * Note that races in the updates of insync and start aren't
920 * problematic: insync can only get set bits added, and updates to
921 * start are only improving performance (without affecting correctness
922 * if undone).
923 */
924 static DECLARE_BITMAP(insync, PTRS_PER_PGD);
925 static unsigned long start = TASK_SIZE;
926 unsigned long address; 907 unsigned long address;
927 908
928 if (SHARED_KERNEL_PMD) 909 if (SHARED_KERNEL_PMD)
@@ -930,56 +911,38 @@ void vmalloc_sync_all(void)
930 911
931 BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK); 912 BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
932 for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) { 913 for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
933 if (!test_bit(pgd_index(address), insync)) { 914 unsigned long flags;
934 unsigned long flags; 915 struct page *page;
935 struct page *page; 916
936 917 spin_lock_irqsave(&pgd_lock, flags);
937 spin_lock_irqsave(&pgd_lock, flags); 918 list_for_each_entry(page, &pgd_list, lru) {
938 list_for_each_entry(page, &pgd_list, lru) { 919 if (!vmalloc_sync_one(page_address(page),
939 if (!vmalloc_sync_one(page_address(page), 920 address))
940 address)) 921 break;
941 break;
942 }
943 spin_unlock_irqrestore(&pgd_lock, flags);
944 if (!page)
945 set_bit(pgd_index(address), insync);
946 } 922 }
947 if (address == start && test_bit(pgd_index(address), insync)) 923 spin_unlock_irqrestore(&pgd_lock, flags);
948 start = address + PGDIR_SIZE;
949 } 924 }
950#else /* CONFIG_X86_64 */ 925#else /* CONFIG_X86_64 */
951 /* 926 unsigned long start = VMALLOC_START & PGDIR_MASK;
952 * Note that races in the updates of insync and start aren't
953 * problematic: insync can only get set bits added, and updates to
954 * start are only improving performance (without affecting correctness
955 * if undone).
956 */
957 static DECLARE_BITMAP(insync, PTRS_PER_PGD);
958 static unsigned long start = VMALLOC_START & PGDIR_MASK;
959 unsigned long address; 927 unsigned long address;
960 928
961 for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) { 929 for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
962 if (!test_bit(pgd_index(address), insync)) { 930 const pgd_t *pgd_ref = pgd_offset_k(address);
963 const pgd_t *pgd_ref = pgd_offset_k(address); 931 unsigned long flags;
964 unsigned long flags; 932 struct page *page;
965 struct page *page; 933
966 934 if (pgd_none(*pgd_ref))
967 if (pgd_none(*pgd_ref)) 935 continue;
968 continue; 936 spin_lock_irqsave(&pgd_lock, flags);
969 spin_lock_irqsave(&pgd_lock, flags); 937 list_for_each_entry(page, &pgd_list, lru) {
970 list_for_each_entry(page, &pgd_list, lru) { 938 pgd_t *pgd;
971 pgd_t *pgd; 939 pgd = (pgd_t *)page_address(page) + pgd_index(address);
972 pgd = (pgd_t *)page_address(page) + pgd_index(address); 940 if (pgd_none(*pgd))
973 if (pgd_none(*pgd)) 941 set_pgd(pgd, *pgd_ref);
974 set_pgd(pgd, *pgd_ref); 942 else
975 else 943 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
976 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
977 }
978 spin_unlock_irqrestore(&pgd_lock, flags);
979 set_bit(pgd_index(address), insync);
980 } 944 }
981 if (address == start) 945 spin_unlock_irqrestore(&pgd_lock, flags);
982 start = address + PGDIR_SIZE;
983 } 946 }
984#endif 947#endif
985} 948}