diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-10-15 07:46:29 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-15 07:46:29 -0400 |
commit | b2aaf8f74cdc84a9182f6cabf198b7763bcb9d40 (patch) | |
tree | 53ccb1c2c14751fe69cf93102e76e97021f6df07 /arch/x86/mm/fault.c | |
parent | 4f962d4d65923d7b722192e729840cfb79af0a5a (diff) | |
parent | 278429cff8809958d25415ba0ed32b59866ab1a8 (diff) |
Merge branch 'linus' into stackprotector
Conflicts:
arch/x86/kernel/Makefile
include/asm-x86/pda.h
Diffstat (limited to 'arch/x86/mm/fault.c')
-rw-r--r-- | arch/x86/mm/fault.c | 128 |
1 files changed, 48 insertions, 80 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 0c5dcee23bb1..d18ea136d8a6 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/string.h> | 10 | #include <linux/string.h> |
11 | #include <linux/types.h> | 11 | #include <linux/types.h> |
12 | #include <linux/ptrace.h> | 12 | #include <linux/ptrace.h> |
13 | #include <linux/mmiotrace.h> | ||
13 | #include <linux/mman.h> | 14 | #include <linux/mman.h> |
14 | #include <linux/mm.h> | 15 | #include <linux/mm.h> |
15 | #include <linux/smp.h> | 16 | #include <linux/smp.h> |
@@ -35,6 +36,7 @@ | |||
35 | #include <asm/tlbflush.h> | 36 | #include <asm/tlbflush.h> |
36 | #include <asm/proto.h> | 37 | #include <asm/proto.h> |
37 | #include <asm-generic/sections.h> | 38 | #include <asm-generic/sections.h> |
39 | #include <asm/traps.h> | ||
38 | 40 | ||
39 | /* | 41 | /* |
40 | * Page fault error code bits | 42 | * Page fault error code bits |
@@ -50,17 +52,23 @@ | |||
50 | #define PF_RSVD (1<<3) | 52 | #define PF_RSVD (1<<3) |
51 | #define PF_INSTR (1<<4) | 53 | #define PF_INSTR (1<<4) |
52 | 54 | ||
55 | static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr) | ||
56 | { | ||
57 | #ifdef CONFIG_MMIOTRACE_HOOKS | ||
58 | if (unlikely(is_kmmio_active())) | ||
59 | if (kmmio_handler(regs, addr) == 1) | ||
60 | return -1; | ||
61 | #endif | ||
62 | return 0; | ||
63 | } | ||
64 | |||
53 | static inline int notify_page_fault(struct pt_regs *regs) | 65 | static inline int notify_page_fault(struct pt_regs *regs) |
54 | { | 66 | { |
55 | #ifdef CONFIG_KPROBES | 67 | #ifdef CONFIG_KPROBES |
56 | int ret = 0; | 68 | int ret = 0; |
57 | 69 | ||
58 | /* kprobe_running() needs smp_processor_id() */ | 70 | /* kprobe_running() needs smp_processor_id() */ |
59 | #ifdef CONFIG_X86_32 | ||
60 | if (!user_mode_vm(regs)) { | 71 | if (!user_mode_vm(regs)) { |
61 | #else | ||
62 | if (!user_mode(regs)) { | ||
63 | #endif | ||
64 | preempt_disable(); | 72 | preempt_disable(); |
65 | if (kprobe_running() && kprobe_fault_handler(regs, 14)) | 73 | if (kprobe_running() && kprobe_fault_handler(regs, 14)) |
66 | ret = 1; | 74 | ret = 1; |
@@ -351,8 +359,6 @@ static int is_errata100(struct pt_regs *regs, unsigned long address) | |||
351 | return 0; | 359 | return 0; |
352 | } | 360 | } |
353 | 361 | ||
354 | void do_invalid_op(struct pt_regs *, unsigned long); | ||
355 | |||
356 | static int is_f00f_bug(struct pt_regs *regs, unsigned long address) | 362 | static int is_f00f_bug(struct pt_regs *regs, unsigned long address) |
357 | { | 363 | { |
358 | #ifdef CONFIG_X86_F00F_BUG | 364 | #ifdef CONFIG_X86_F00F_BUG |
@@ -397,11 +403,7 @@ static void show_fault_oops(struct pt_regs *regs, unsigned long error_code, | |||
397 | printk(KERN_CONT "NULL pointer dereference"); | 403 | printk(KERN_CONT "NULL pointer dereference"); |
398 | else | 404 | else |
399 | printk(KERN_CONT "paging request"); | 405 | printk(KERN_CONT "paging request"); |
400 | #ifdef CONFIG_X86_32 | 406 | printk(KERN_CONT " at %p\n", (void *) address); |
401 | printk(KERN_CONT " at %08lx\n", address); | ||
402 | #else | ||
403 | printk(KERN_CONT " at %016lx\n", address); | ||
404 | #endif | ||
405 | printk(KERN_ALERT "IP:"); | 407 | printk(KERN_ALERT "IP:"); |
406 | printk_address(regs->ip, 1); | 408 | printk_address(regs->ip, 1); |
407 | dump_pagetable(address); | 409 | dump_pagetable(address); |
@@ -593,11 +595,6 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
593 | unsigned long flags; | 595 | unsigned long flags; |
594 | #endif | 596 | #endif |
595 | 597 | ||
596 | /* | ||
597 | * We can fault from pretty much anywhere, with unknown IRQ state. | ||
598 | */ | ||
599 | trace_hardirqs_fixup(); | ||
600 | |||
601 | tsk = current; | 598 | tsk = current; |
602 | mm = tsk->mm; | 599 | mm = tsk->mm; |
603 | prefetchw(&mm->mmap_sem); | 600 | prefetchw(&mm->mmap_sem); |
@@ -609,6 +606,8 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
609 | 606 | ||
610 | if (notify_page_fault(regs)) | 607 | if (notify_page_fault(regs)) |
611 | return; | 608 | return; |
609 | if (unlikely(kmmio_fault(regs, address))) | ||
610 | return; | ||
612 | 611 | ||
613 | /* | 612 | /* |
614 | * We fault-in kernel-space virtual memory on-demand. The | 613 | * We fault-in kernel-space virtual memory on-demand. The |
@@ -803,14 +802,10 @@ bad_area_nosemaphore: | |||
803 | if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && | 802 | if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && |
804 | printk_ratelimit()) { | 803 | printk_ratelimit()) { |
805 | printk( | 804 | printk( |
806 | #ifdef CONFIG_X86_32 | 805 | "%s%s[%d]: segfault at %lx ip %p sp %p error %lx", |
807 | "%s%s[%d]: segfault at %lx ip %08lx sp %08lx error %lx", | ||
808 | #else | ||
809 | "%s%s[%d]: segfault at %lx ip %lx sp %lx error %lx", | ||
810 | #endif | ||
811 | task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, | 806 | task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, |
812 | tsk->comm, task_pid_nr(tsk), address, regs->ip, | 807 | tsk->comm, task_pid_nr(tsk), address, |
813 | regs->sp, error_code); | 808 | (void *) regs->ip, (void *) regs->sp, error_code); |
814 | print_vma_addr(" in ", regs->ip); | 809 | print_vma_addr(" in ", regs->ip); |
815 | printk("\n"); | 810 | printk("\n"); |
816 | } | 811 | } |
@@ -921,72 +916,45 @@ LIST_HEAD(pgd_list); | |||
921 | 916 | ||
922 | void vmalloc_sync_all(void) | 917 | void vmalloc_sync_all(void) |
923 | { | 918 | { |
924 | #ifdef CONFIG_X86_32 | ||
925 | /* | ||
926 | * Note that races in the updates of insync and start aren't | ||
927 | * problematic: insync can only get set bits added, and updates to | ||
928 | * start are only improving performance (without affecting correctness | ||
929 | * if undone). | ||
930 | */ | ||
931 | static DECLARE_BITMAP(insync, PTRS_PER_PGD); | ||
932 | static unsigned long start = TASK_SIZE; | ||
933 | unsigned long address; | 919 | unsigned long address; |
934 | 920 | ||
921 | #ifdef CONFIG_X86_32 | ||
935 | if (SHARED_KERNEL_PMD) | 922 | if (SHARED_KERNEL_PMD) |
936 | return; | 923 | return; |
937 | 924 | ||
938 | BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK); | 925 | for (address = VMALLOC_START & PMD_MASK; |
939 | for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) { | 926 | address >= TASK_SIZE && address < FIXADDR_TOP; |
940 | if (!test_bit(pgd_index(address), insync)) { | 927 | address += PMD_SIZE) { |
941 | unsigned long flags; | 928 | unsigned long flags; |
942 | struct page *page; | 929 | struct page *page; |
943 | 930 | ||
944 | spin_lock_irqsave(&pgd_lock, flags); | 931 | spin_lock_irqsave(&pgd_lock, flags); |
945 | list_for_each_entry(page, &pgd_list, lru) { | 932 | list_for_each_entry(page, &pgd_list, lru) { |
946 | if (!vmalloc_sync_one(page_address(page), | 933 | if (!vmalloc_sync_one(page_address(page), |
947 | address)) | 934 | address)) |
948 | break; | 935 | break; |
949 | } | ||
950 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
951 | if (!page) | ||
952 | set_bit(pgd_index(address), insync); | ||
953 | } | 936 | } |
954 | if (address == start && test_bit(pgd_index(address), insync)) | 937 | spin_unlock_irqrestore(&pgd_lock, flags); |
955 | start = address + PGDIR_SIZE; | ||
956 | } | 938 | } |
957 | #else /* CONFIG_X86_64 */ | 939 | #else /* CONFIG_X86_64 */ |
958 | /* | 940 | for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END; |
959 | * Note that races in the updates of insync and start aren't | 941 | address += PGDIR_SIZE) { |
960 | * problematic: insync can only get set bits added, and updates to | 942 | const pgd_t *pgd_ref = pgd_offset_k(address); |
961 | * start are only improving performance (without affecting correctness | 943 | unsigned long flags; |
962 | * if undone). | 944 | struct page *page; |
963 | */ | 945 | |
964 | static DECLARE_BITMAP(insync, PTRS_PER_PGD); | 946 | if (pgd_none(*pgd_ref)) |
965 | static unsigned long start = VMALLOC_START & PGDIR_MASK; | 947 | continue; |
966 | unsigned long address; | 948 | spin_lock_irqsave(&pgd_lock, flags); |
967 | 949 | list_for_each_entry(page, &pgd_list, lru) { | |
968 | for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) { | 950 | pgd_t *pgd; |
969 | if (!test_bit(pgd_index(address), insync)) { | 951 | pgd = (pgd_t *)page_address(page) + pgd_index(address); |
970 | const pgd_t *pgd_ref = pgd_offset_k(address); | 952 | if (pgd_none(*pgd)) |
971 | unsigned long flags; | 953 | set_pgd(pgd, *pgd_ref); |
972 | struct page *page; | 954 | else |
973 | 955 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); | |
974 | if (pgd_none(*pgd_ref)) | ||
975 | continue; | ||
976 | spin_lock_irqsave(&pgd_lock, flags); | ||
977 | list_for_each_entry(page, &pgd_list, lru) { | ||
978 | pgd_t *pgd; | ||
979 | pgd = (pgd_t *)page_address(page) + pgd_index(address); | ||
980 | if (pgd_none(*pgd)) | ||
981 | set_pgd(pgd, *pgd_ref); | ||
982 | else | ||
983 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); | ||
984 | } | ||
985 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
986 | set_bit(pgd_index(address), insync); | ||
987 | } | 956 | } |
988 | if (address == start) | 957 | spin_unlock_irqrestore(&pgd_lock, flags); |
989 | start = address + PGDIR_SIZE; | ||
990 | } | 958 | } |
991 | #endif | 959 | #endif |
992 | } | 960 | } |