aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/fault.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/fault.c')
-rw-r--r--arch/x86/mm/fault.c115
1 files changed, 48 insertions, 67 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index fd7e1798c75a..455f3fe67b42 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -10,6 +10,7 @@
10#include <linux/string.h> 10#include <linux/string.h>
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/ptrace.h> 12#include <linux/ptrace.h>
13#include <linux/mmiotrace.h>
13#include <linux/mman.h> 14#include <linux/mman.h>
14#include <linux/mm.h> 15#include <linux/mm.h>
15#include <linux/smp.h> 16#include <linux/smp.h>
@@ -49,17 +50,23 @@
49#define PF_RSVD (1<<3) 50#define PF_RSVD (1<<3)
50#define PF_INSTR (1<<4) 51#define PF_INSTR (1<<4)
51 52
53static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr)
54{
55#ifdef CONFIG_MMIOTRACE_HOOKS
56 if (unlikely(is_kmmio_active()))
57 if (kmmio_handler(regs, addr) == 1)
58 return -1;
59#endif
60 return 0;
61}
62
52static inline int notify_page_fault(struct pt_regs *regs) 63static inline int notify_page_fault(struct pt_regs *regs)
53{ 64{
54#ifdef CONFIG_KPROBES 65#ifdef CONFIG_KPROBES
55 int ret = 0; 66 int ret = 0;
56 67
57 /* kprobe_running() needs smp_processor_id() */ 68 /* kprobe_running() needs smp_processor_id() */
58#ifdef CONFIG_X86_32
59 if (!user_mode_vm(regs)) { 69 if (!user_mode_vm(regs)) {
60#else
61 if (!user_mode(regs)) {
62#endif
63 preempt_disable(); 70 preempt_disable();
64 if (kprobe_running() && kprobe_fault_handler(regs, 14)) 71 if (kprobe_running() && kprobe_fault_handler(regs, 14))
65 ret = 1; 72 ret = 1;
@@ -396,11 +403,7 @@ static void show_fault_oops(struct pt_regs *regs, unsigned long error_code,
396 printk(KERN_CONT "NULL pointer dereference"); 403 printk(KERN_CONT "NULL pointer dereference");
397 else 404 else
398 printk(KERN_CONT "paging request"); 405 printk(KERN_CONT "paging request");
399#ifdef CONFIG_X86_32 406 printk(KERN_CONT " at %p\n", (void *) address);
400 printk(KERN_CONT " at %08lx\n", address);
401#else
402 printk(KERN_CONT " at %016lx\n", address);
403#endif
404 printk(KERN_ALERT "IP:"); 407 printk(KERN_ALERT "IP:");
405 printk_address(regs->ip, 1); 408 printk_address(regs->ip, 1);
406 dump_pagetable(address); 409 dump_pagetable(address);
@@ -497,6 +500,11 @@ static int vmalloc_fault(unsigned long address)
497 unsigned long pgd_paddr; 500 unsigned long pgd_paddr;
498 pmd_t *pmd_k; 501 pmd_t *pmd_k;
499 pte_t *pte_k; 502 pte_t *pte_k;
503
504 /* Make sure we are in vmalloc area */
505 if (!(address >= VMALLOC_START && address < VMALLOC_END))
506 return -1;
507
500 /* 508 /*
501 * Synchronize this task's top level page-table 509 * Synchronize this task's top level page-table
502 * with the 'reference' page table. 510 * with the 'reference' page table.
@@ -601,6 +609,8 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
601 609
602 if (notify_page_fault(regs)) 610 if (notify_page_fault(regs))
603 return; 611 return;
612 if (unlikely(kmmio_fault(regs, address)))
613 return;
604 614
605 /* 615 /*
606 * We fault-in kernel-space virtual memory on-demand. The 616 * We fault-in kernel-space virtual memory on-demand. The
@@ -795,14 +805,10 @@ bad_area_nosemaphore:
795 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && 805 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
796 printk_ratelimit()) { 806 printk_ratelimit()) {
797 printk( 807 printk(
798#ifdef CONFIG_X86_32 808 "%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
799 "%s%s[%d]: segfault at %lx ip %08lx sp %08lx error %lx",
800#else
801 "%s%s[%d]: segfault at %lx ip %lx sp %lx error %lx",
802#endif
803 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, 809 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
804 tsk->comm, task_pid_nr(tsk), address, regs->ip, 810 tsk->comm, task_pid_nr(tsk), address,
805 regs->sp, error_code); 811 (void *) regs->ip, (void *) regs->sp, error_code);
806 print_vma_addr(" in ", regs->ip); 812 print_vma_addr(" in ", regs->ip);
807 printk("\n"); 813 printk("\n");
808 } 814 }
@@ -910,14 +916,7 @@ LIST_HEAD(pgd_list);
910void vmalloc_sync_all(void) 916void vmalloc_sync_all(void)
911{ 917{
912#ifdef CONFIG_X86_32 918#ifdef CONFIG_X86_32
913 /* 919 unsigned long start = VMALLOC_START & PGDIR_MASK;
914 * Note that races in the updates of insync and start aren't
915 * problematic: insync can only get set bits added, and updates to
916 * start are only improving performance (without affecting correctness
917 * if undone).
918 */
919 static DECLARE_BITMAP(insync, PTRS_PER_PGD);
920 static unsigned long start = TASK_SIZE;
921 unsigned long address; 920 unsigned long address;
922 921
923 if (SHARED_KERNEL_PMD) 922 if (SHARED_KERNEL_PMD)
@@ -925,56 +924,38 @@ void vmalloc_sync_all(void)
925 924
926 BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK); 925 BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
927 for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) { 926 for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
928 if (!test_bit(pgd_index(address), insync)) { 927 unsigned long flags;
929 unsigned long flags; 928 struct page *page;
930 struct page *page; 929
931 930 spin_lock_irqsave(&pgd_lock, flags);
932 spin_lock_irqsave(&pgd_lock, flags); 931 list_for_each_entry(page, &pgd_list, lru) {
933 list_for_each_entry(page, &pgd_list, lru) { 932 if (!vmalloc_sync_one(page_address(page),
934 if (!vmalloc_sync_one(page_address(page), 933 address))
935 address)) 934 break;
936 break;
937 }
938 spin_unlock_irqrestore(&pgd_lock, flags);
939 if (!page)
940 set_bit(pgd_index(address), insync);
941 } 935 }
942 if (address == start && test_bit(pgd_index(address), insync)) 936 spin_unlock_irqrestore(&pgd_lock, flags);
943 start = address + PGDIR_SIZE;
944 } 937 }
945#else /* CONFIG_X86_64 */ 938#else /* CONFIG_X86_64 */
946 /* 939 unsigned long start = VMALLOC_START & PGDIR_MASK;
947 * Note that races in the updates of insync and start aren't
948 * problematic: insync can only get set bits added, and updates to
949 * start are only improving performance (without affecting correctness
950 * if undone).
951 */
952 static DECLARE_BITMAP(insync, PTRS_PER_PGD);
953 static unsigned long start = VMALLOC_START & PGDIR_MASK;
954 unsigned long address; 940 unsigned long address;
955 941
956 for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) { 942 for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
957 if (!test_bit(pgd_index(address), insync)) { 943 const pgd_t *pgd_ref = pgd_offset_k(address);
958 const pgd_t *pgd_ref = pgd_offset_k(address); 944 unsigned long flags;
959 unsigned long flags; 945 struct page *page;
960 struct page *page; 946
961 947 if (pgd_none(*pgd_ref))
962 if (pgd_none(*pgd_ref)) 948 continue;
963 continue; 949 spin_lock_irqsave(&pgd_lock, flags);
964 spin_lock_irqsave(&pgd_lock, flags); 950 list_for_each_entry(page, &pgd_list, lru) {
965 list_for_each_entry(page, &pgd_list, lru) { 951 pgd_t *pgd;
966 pgd_t *pgd; 952 pgd = (pgd_t *)page_address(page) + pgd_index(address);
967 pgd = (pgd_t *)page_address(page) + pgd_index(address); 953 if (pgd_none(*pgd))
968 if (pgd_none(*pgd)) 954 set_pgd(pgd, *pgd_ref);
969 set_pgd(pgd, *pgd_ref); 955 else
970 else 956 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
971 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
972 }
973 spin_unlock_irqrestore(&pgd_lock, flags);
974 set_bit(pgd_index(address), insync);
975 } 957 }
976 if (address == start) 958 spin_unlock_irqrestore(&pgd_lock, flags);
977 start = address + PGDIR_SIZE;
978 } 959 }
979#endif 960#endif
980} 961}