diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2009-03-23 09:50:03 -0400 |
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2009-03-23 16:20:20 -0400 |
| commit | 80c5520811d3805adcb15c570ea5e2d489fa5d0b (patch) | |
| tree | ae797a7f4af39f80e77526533d06ac23b439f0ab /arch/x86/mm/fault.c | |
| parent | b3e3b302cf6dc8d60b67f0e84d1fa5648889c038 (diff) | |
| parent | 8c083f081d0014057901c68a0a3e0f8ca7ac8d23 (diff) | |
Merge branch 'cpus4096' into irq/threaded
Conflicts:
arch/parisc/kernel/irq.c
kernel/irq/handle.c
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/mm/fault.c')
| -rw-r--r-- | arch/x86/mm/fault.c | 1333 |
1 files changed, 772 insertions, 561 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index c76ef1d701c9..a03b7279efa0 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
| @@ -1,73 +1,79 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright (C) 1995 Linus Torvalds | 2 | * Copyright (C) 1995 Linus Torvalds |
| 3 | * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs. | 3 | * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs. |
| 4 | * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar | ||
| 4 | */ | 5 | */ |
| 5 | |||
| 6 | #include <linux/signal.h> | ||
| 7 | #include <linux/sched.h> | ||
| 8 | #include <linux/kernel.h> | ||
| 9 | #include <linux/errno.h> | ||
| 10 | #include <linux/string.h> | ||
| 11 | #include <linux/types.h> | ||
| 12 | #include <linux/ptrace.h> | ||
| 13 | #include <linux/mmiotrace.h> | ||
| 14 | #include <linux/mman.h> | ||
| 15 | #include <linux/mm.h> | ||
| 16 | #include <linux/smp.h> | ||
| 17 | #include <linux/interrupt.h> | 6 | #include <linux/interrupt.h> |
| 18 | #include <linux/init.h> | 7 | #include <linux/mmiotrace.h> |
| 19 | #include <linux/tty.h> | 8 | #include <linux/bootmem.h> |
| 20 | #include <linux/vt_kern.h> /* For unblank_screen() */ | ||
| 21 | #include <linux/compiler.h> | 9 | #include <linux/compiler.h> |
| 22 | #include <linux/highmem.h> | 10 | #include <linux/highmem.h> |
| 23 | #include <linux/bootmem.h> /* for max_low_pfn */ | ||
| 24 | #include <linux/vmalloc.h> | ||
| 25 | #include <linux/module.h> | ||
| 26 | #include <linux/kprobes.h> | 11 | #include <linux/kprobes.h> |
| 27 | #include <linux/uaccess.h> | 12 | #include <linux/uaccess.h> |
| 13 | #include <linux/vmalloc.h> | ||
| 14 | #include <linux/vt_kern.h> | ||
| 15 | #include <linux/signal.h> | ||
| 16 | #include <linux/kernel.h> | ||
| 17 | #include <linux/ptrace.h> | ||
| 18 | #include <linux/string.h> | ||
| 19 | #include <linux/module.h> | ||
| 28 | #include <linux/kdebug.h> | 20 | #include <linux/kdebug.h> |
| 21 | #include <linux/errno.h> | ||
| 22 | #include <linux/magic.h> | ||
| 23 | #include <linux/sched.h> | ||
| 24 | #include <linux/types.h> | ||
| 25 | #include <linux/init.h> | ||
| 26 | #include <linux/mman.h> | ||
| 27 | #include <linux/tty.h> | ||
| 28 | #include <linux/smp.h> | ||
| 29 | #include <linux/mm.h> | ||
| 30 | |||
| 31 | #include <asm-generic/sections.h> | ||
| 29 | 32 | ||
| 30 | #include <asm/system.h> | ||
| 31 | #include <asm/desc.h> | ||
| 32 | #include <asm/segment.h> | ||
| 33 | #include <asm/pgalloc.h> | ||
| 34 | #include <asm/smp.h> | ||
| 35 | #include <asm/tlbflush.h> | 33 | #include <asm/tlbflush.h> |
| 34 | #include <asm/pgalloc.h> | ||
| 35 | #include <asm/segment.h> | ||
| 36 | #include <asm/system.h> | ||
| 36 | #include <asm/proto.h> | 37 | #include <asm/proto.h> |
| 37 | #include <asm-generic/sections.h> | ||
| 38 | #include <asm/traps.h> | 38 | #include <asm/traps.h> |
| 39 | #include <asm/desc.h> | ||
| 39 | 40 | ||
| 40 | /* | 41 | /* |
| 41 | * Page fault error code bits | 42 | * Page fault error code bits: |
| 42 | * bit 0 == 0 means no page found, 1 means protection fault | 43 | * |
| 43 | * bit 1 == 0 means read, 1 means write | 44 | * bit 0 == 0: no page found 1: protection fault |
| 44 | * bit 2 == 0 means kernel, 1 means user-mode | 45 | * bit 1 == 0: read access 1: write access |
| 45 | * bit 3 == 1 means use of reserved bit detected | 46 | * bit 2 == 0: kernel-mode access 1: user-mode access |
| 46 | * bit 4 == 1 means fault was an instruction fetch | 47 | * bit 3 == 1: use of reserved bit detected |
| 48 | * bit 4 == 1: fault was an instruction fetch | ||
| 47 | */ | 49 | */ |
| 48 | #define PF_PROT (1<<0) | 50 | enum x86_pf_error_code { |
| 49 | #define PF_WRITE (1<<1) | 51 | |
| 50 | #define PF_USER (1<<2) | 52 | PF_PROT = 1 << 0, |
| 51 | #define PF_RSVD (1<<3) | 53 | PF_WRITE = 1 << 1, |
| 52 | #define PF_INSTR (1<<4) | 54 | PF_USER = 1 << 2, |
| 55 | PF_RSVD = 1 << 3, | ||
| 56 | PF_INSTR = 1 << 4, | ||
| 57 | }; | ||
| 53 | 58 | ||
| 59 | /* | ||
| 60 | * Returns 0 if mmiotrace is disabled, or if the fault is not | ||
| 61 | * handled by mmiotrace: | ||
| 62 | */ | ||
| 54 | static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr) | 63 | static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr) |
| 55 | { | 64 | { |
| 56 | #ifdef CONFIG_MMIOTRACE | ||
| 57 | if (unlikely(is_kmmio_active())) | 65 | if (unlikely(is_kmmio_active())) |
| 58 | if (kmmio_handler(regs, addr) == 1) | 66 | if (kmmio_handler(regs, addr) == 1) |
| 59 | return -1; | 67 | return -1; |
| 60 | #endif | ||
| 61 | return 0; | 68 | return 0; |
| 62 | } | 69 | } |
| 63 | 70 | ||
| 64 | static inline int notify_page_fault(struct pt_regs *regs) | 71 | static inline int notify_page_fault(struct pt_regs *regs) |
| 65 | { | 72 | { |
| 66 | #ifdef CONFIG_KPROBES | ||
| 67 | int ret = 0; | 73 | int ret = 0; |
| 68 | 74 | ||
| 69 | /* kprobe_running() needs smp_processor_id() */ | 75 | /* kprobe_running() needs smp_processor_id() */ |
| 70 | if (!user_mode_vm(regs)) { | 76 | if (kprobes_built_in() && !user_mode_vm(regs)) { |
| 71 | preempt_disable(); | 77 | preempt_disable(); |
| 72 | if (kprobe_running() && kprobe_fault_handler(regs, 14)) | 78 | if (kprobe_running() && kprobe_fault_handler(regs, 14)) |
| 73 | ret = 1; | 79 | ret = 1; |
| @@ -75,29 +81,76 @@ static inline int notify_page_fault(struct pt_regs *regs) | |||
| 75 | } | 81 | } |
| 76 | 82 | ||
| 77 | return ret; | 83 | return ret; |
| 78 | #else | ||
| 79 | return 0; | ||
| 80 | #endif | ||
| 81 | } | 84 | } |
| 82 | 85 | ||
| 83 | /* | 86 | /* |
| 84 | * X86_32 | 87 | * Prefetch quirks: |
| 85 | * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. | 88 | * |
| 86 | * Check that here and ignore it. | 89 | * 32-bit mode: |
| 90 | * | ||
| 91 | * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. | ||
| 92 | * Check that here and ignore it. | ||
| 93 | * | ||
| 94 | * 64-bit mode: | ||
| 87 | * | 95 | * |
| 88 | * X86_64 | 96 | * Sometimes the CPU reports invalid exceptions on prefetch. |
| 89 | * Sometimes the CPU reports invalid exceptions on prefetch. | 97 | * Check that here and ignore it. |
| 90 | * Check that here and ignore it. | ||
| 91 | * | 98 | * |
| 92 | * Opcode checker based on code by Richard Brunner | 99 | * Opcode checker based on code by Richard Brunner. |
| 93 | */ | 100 | */ |
| 94 | static int is_prefetch(struct pt_regs *regs, unsigned long addr, | 101 | static inline int |
| 95 | unsigned long error_code) | 102 | check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr, |
| 103 | unsigned char opcode, int *prefetch) | ||
| 96 | { | 104 | { |
| 105 | unsigned char instr_hi = opcode & 0xf0; | ||
| 106 | unsigned char instr_lo = opcode & 0x0f; | ||
| 107 | |||
| 108 | switch (instr_hi) { | ||
| 109 | case 0x20: | ||
| 110 | case 0x30: | ||
| 111 | /* | ||
| 112 | * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. | ||
| 113 | * In X86_64 long mode, the CPU will signal invalid | ||
| 114 | * opcode if some of these prefixes are present so | ||
| 115 | * X86_64 will never get here anyway | ||
| 116 | */ | ||
| 117 | return ((instr_lo & 7) == 0x6); | ||
| 118 | #ifdef CONFIG_X86_64 | ||
| 119 | case 0x40: | ||
| 120 | /* | ||
| 121 | * In AMD64 long mode 0x40..0x4F are valid REX prefixes | ||
| 122 | * Need to figure out under what instruction mode the | ||
| 123 | * instruction was issued. Could check the LDT for lm, | ||
| 124 | * but for now it's good enough to assume that long | ||
| 125 | * mode only uses well known segments or kernel. | ||
| 126 | */ | ||
| 127 | return (!user_mode(regs)) || (regs->cs == __USER_CS); | ||
| 128 | #endif | ||
| 129 | case 0x60: | ||
| 130 | /* 0x64 thru 0x67 are valid prefixes in all modes. */ | ||
| 131 | return (instr_lo & 0xC) == 0x4; | ||
| 132 | case 0xF0: | ||
| 133 | /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */ | ||
| 134 | return !instr_lo || (instr_lo>>1) == 1; | ||
| 135 | case 0x00: | ||
| 136 | /* Prefetch instruction is 0x0F0D or 0x0F18 */ | ||
| 137 | if (probe_kernel_address(instr, opcode)) | ||
| 138 | return 0; | ||
| 139 | |||
| 140 | *prefetch = (instr_lo == 0xF) && | ||
| 141 | (opcode == 0x0D || opcode == 0x18); | ||
| 142 | return 0; | ||
| 143 | default: | ||
| 144 | return 0; | ||
| 145 | } | ||
| 146 | } | ||
| 147 | |||
| 148 | static int | ||
| 149 | is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) | ||
| 150 | { | ||
| 151 | unsigned char *max_instr; | ||
| 97 | unsigned char *instr; | 152 | unsigned char *instr; |
| 98 | int scan_more = 1; | ||
| 99 | int prefetch = 0; | 153 | int prefetch = 0; |
| 100 | unsigned char *max_instr; | ||
| 101 | 154 | ||
| 102 | /* | 155 | /* |
| 103 | * If it was a exec (instruction fetch) fault on NX page, then | 156 | * If it was a exec (instruction fetch) fault on NX page, then |
| @@ -106,106 +159,170 @@ static int is_prefetch(struct pt_regs *regs, unsigned long addr, | |||
| 106 | if (error_code & PF_INSTR) | 159 | if (error_code & PF_INSTR) |
| 107 | return 0; | 160 | return 0; |
| 108 | 161 | ||
| 109 | instr = (unsigned char *)convert_ip_to_linear(current, regs); | 162 | instr = (void *)convert_ip_to_linear(current, regs); |
| 110 | max_instr = instr + 15; | 163 | max_instr = instr + 15; |
| 111 | 164 | ||
| 112 | if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE) | 165 | if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE) |
| 113 | return 0; | 166 | return 0; |
| 114 | 167 | ||
| 115 | while (scan_more && instr < max_instr) { | 168 | while (instr < max_instr) { |
| 116 | unsigned char opcode; | 169 | unsigned char opcode; |
| 117 | unsigned char instr_hi; | ||
| 118 | unsigned char instr_lo; | ||
| 119 | 170 | ||
| 120 | if (probe_kernel_address(instr, opcode)) | 171 | if (probe_kernel_address(instr, opcode)) |
| 121 | break; | 172 | break; |
| 122 | 173 | ||
| 123 | instr_hi = opcode & 0xf0; | ||
| 124 | instr_lo = opcode & 0x0f; | ||
| 125 | instr++; | 174 | instr++; |
| 126 | 175 | ||
| 127 | switch (instr_hi) { | 176 | if (!check_prefetch_opcode(regs, instr, opcode, &prefetch)) |
| 128 | case 0x20: | ||
| 129 | case 0x30: | ||
| 130 | /* | ||
| 131 | * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. | ||
| 132 | * In X86_64 long mode, the CPU will signal invalid | ||
| 133 | * opcode if some of these prefixes are present so | ||
| 134 | * X86_64 will never get here anyway | ||
| 135 | */ | ||
| 136 | scan_more = ((instr_lo & 7) == 0x6); | ||
| 137 | break; | 177 | break; |
| 138 | #ifdef CONFIG_X86_64 | ||
| 139 | case 0x40: | ||
| 140 | /* | ||
| 141 | * In AMD64 long mode 0x40..0x4F are valid REX prefixes | ||
| 142 | * Need to figure out under what instruction mode the | ||
| 143 | * instruction was issued. Could check the LDT for lm, | ||
| 144 | * but for now it's good enough to assume that long | ||
| 145 | * mode only uses well known segments or kernel. | ||
| 146 | */ | ||
| 147 | scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS); | ||
| 148 | break; | ||
| 149 | #endif | ||
| 150 | case 0x60: | ||
| 151 | /* 0x64 thru 0x67 are valid prefixes in all modes. */ | ||
| 152 | scan_more = (instr_lo & 0xC) == 0x4; | ||
| 153 | break; | ||
| 154 | case 0xF0: | ||
| 155 | /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */ | ||
| 156 | scan_more = !instr_lo || (instr_lo>>1) == 1; | ||
| 157 | break; | ||
| 158 | case 0x00: | ||
| 159 | /* Prefetch instruction is 0x0F0D or 0x0F18 */ | ||
| 160 | scan_more = 0; | ||
| 161 | |||
| 162 | if (probe_kernel_address(instr, opcode)) | ||
| 163 | break; | ||
| 164 | prefetch = (instr_lo == 0xF) && | ||
| 165 | (opcode == 0x0D || opcode == 0x18); | ||
| 166 | break; | ||
| 167 | default: | ||
| 168 | scan_more = 0; | ||
| 169 | break; | ||
| 170 | } | ||
| 171 | } | 178 | } |
| 172 | return prefetch; | 179 | return prefetch; |
| 173 | } | 180 | } |
| 174 | 181 | ||
| 175 | static void force_sig_info_fault(int si_signo, int si_code, | 182 | static void |
| 176 | unsigned long address, struct task_struct *tsk) | 183 | force_sig_info_fault(int si_signo, int si_code, unsigned long address, |
| 184 | struct task_struct *tsk) | ||
| 177 | { | 185 | { |
| 178 | siginfo_t info; | 186 | siginfo_t info; |
| 179 | 187 | ||
| 180 | info.si_signo = si_signo; | 188 | info.si_signo = si_signo; |
| 181 | info.si_errno = 0; | 189 | info.si_errno = 0; |
| 182 | info.si_code = si_code; | 190 | info.si_code = si_code; |
| 183 | info.si_addr = (void __user *)address; | 191 | info.si_addr = (void __user *)address; |
| 192 | |||
| 184 | force_sig_info(si_signo, &info, tsk); | 193 | force_sig_info(si_signo, &info, tsk); |
| 185 | } | 194 | } |
| 186 | 195 | ||
| 187 | #ifdef CONFIG_X86_64 | 196 | DEFINE_SPINLOCK(pgd_lock); |
| 188 | static int bad_address(void *p) | 197 | LIST_HEAD(pgd_list); |
| 198 | |||
| 199 | #ifdef CONFIG_X86_32 | ||
| 200 | static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) | ||
| 189 | { | 201 | { |
| 190 | unsigned long dummy; | 202 | unsigned index = pgd_index(address); |
| 191 | return probe_kernel_address((unsigned long *)p, dummy); | 203 | pgd_t *pgd_k; |
| 204 | pud_t *pud, *pud_k; | ||
| 205 | pmd_t *pmd, *pmd_k; | ||
| 206 | |||
| 207 | pgd += index; | ||
| 208 | pgd_k = init_mm.pgd + index; | ||
| 209 | |||
| 210 | if (!pgd_present(*pgd_k)) | ||
| 211 | return NULL; | ||
| 212 | |||
| 213 | /* | ||
| 214 | * set_pgd(pgd, *pgd_k); here would be useless on PAE | ||
| 215 | * and redundant with the set_pmd() on non-PAE. As would | ||
| 216 | * set_pud. | ||
| 217 | */ | ||
| 218 | pud = pud_offset(pgd, address); | ||
| 219 | pud_k = pud_offset(pgd_k, address); | ||
| 220 | if (!pud_present(*pud_k)) | ||
| 221 | return NULL; | ||
| 222 | |||
| 223 | pmd = pmd_offset(pud, address); | ||
| 224 | pmd_k = pmd_offset(pud_k, address); | ||
| 225 | if (!pmd_present(*pmd_k)) | ||
| 226 | return NULL; | ||
| 227 | |||
| 228 | if (!pmd_present(*pmd)) { | ||
| 229 | set_pmd(pmd, *pmd_k); | ||
| 230 | arch_flush_lazy_mmu_mode(); | ||
| 231 | } else { | ||
| 232 | BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); | ||
| 233 | } | ||
| 234 | |||
| 235 | return pmd_k; | ||
| 236 | } | ||
| 237 | |||
| 238 | void vmalloc_sync_all(void) | ||
| 239 | { | ||
| 240 | unsigned long address; | ||
| 241 | |||
| 242 | if (SHARED_KERNEL_PMD) | ||
| 243 | return; | ||
| 244 | |||
| 245 | for (address = VMALLOC_START & PMD_MASK; | ||
| 246 | address >= TASK_SIZE && address < FIXADDR_TOP; | ||
| 247 | address += PMD_SIZE) { | ||
| 248 | |||
| 249 | unsigned long flags; | ||
| 250 | struct page *page; | ||
| 251 | |||
| 252 | spin_lock_irqsave(&pgd_lock, flags); | ||
| 253 | list_for_each_entry(page, &pgd_list, lru) { | ||
| 254 | if (!vmalloc_sync_one(page_address(page), address)) | ||
| 255 | break; | ||
| 256 | } | ||
| 257 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
| 258 | } | ||
| 259 | } | ||
| 260 | |||
| 261 | /* | ||
| 262 | * 32-bit: | ||
| 263 | * | ||
| 264 | * Handle a fault on the vmalloc or module mapping area | ||
| 265 | */ | ||
| 266 | static noinline int vmalloc_fault(unsigned long address) | ||
| 267 | { | ||
| 268 | unsigned long pgd_paddr; | ||
| 269 | pmd_t *pmd_k; | ||
| 270 | pte_t *pte_k; | ||
| 271 | |||
| 272 | /* Make sure we are in vmalloc area: */ | ||
| 273 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) | ||
| 274 | return -1; | ||
| 275 | |||
| 276 | /* | ||
| 277 | * Synchronize this task's top level page-table | ||
| 278 | * with the 'reference' page table. | ||
| 279 | * | ||
| 280 | * Do _not_ use "current" here. We might be inside | ||
| 281 | * an interrupt in the middle of a task switch.. | ||
| 282 | */ | ||
| 283 | pgd_paddr = read_cr3(); | ||
| 284 | pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); | ||
| 285 | if (!pmd_k) | ||
| 286 | return -1; | ||
| 287 | |||
| 288 | pte_k = pte_offset_kernel(pmd_k, address); | ||
| 289 | if (!pte_present(*pte_k)) | ||
| 290 | return -1; | ||
| 291 | |||
| 292 | return 0; | ||
| 293 | } | ||
| 294 | |||
| 295 | /* | ||
| 296 | * Did it hit the DOS screen memory VA from vm86 mode? | ||
| 297 | */ | ||
| 298 | static inline void | ||
| 299 | check_v8086_mode(struct pt_regs *regs, unsigned long address, | ||
| 300 | struct task_struct *tsk) | ||
| 301 | { | ||
| 302 | unsigned long bit; | ||
| 303 | |||
| 304 | if (!v8086_mode(regs)) | ||
| 305 | return; | ||
| 306 | |||
| 307 | bit = (address - 0xA0000) >> PAGE_SHIFT; | ||
| 308 | if (bit < 32) | ||
| 309 | tsk->thread.screen_bitmap |= 1 << bit; | ||
| 192 | } | 310 | } |
| 193 | #endif | ||
| 194 | 311 | ||
| 195 | static void dump_pagetable(unsigned long address) | 312 | static void dump_pagetable(unsigned long address) |
| 196 | { | 313 | { |
| 197 | #ifdef CONFIG_X86_32 | ||
| 198 | __typeof__(pte_val(__pte(0))) page; | 314 | __typeof__(pte_val(__pte(0))) page; |
| 199 | 315 | ||
| 200 | page = read_cr3(); | 316 | page = read_cr3(); |
| 201 | page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT]; | 317 | page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT]; |
| 318 | |||
| 202 | #ifdef CONFIG_X86_PAE | 319 | #ifdef CONFIG_X86_PAE |
| 203 | printk("*pdpt = %016Lx ", page); | 320 | printk("*pdpt = %016Lx ", page); |
| 204 | if ((page >> PAGE_SHIFT) < max_low_pfn | 321 | if ((page >> PAGE_SHIFT) < max_low_pfn |
| 205 | && page & _PAGE_PRESENT) { | 322 | && page & _PAGE_PRESENT) { |
| 206 | page &= PAGE_MASK; | 323 | page &= PAGE_MASK; |
| 207 | page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT) | 324 | page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT) |
| 208 | & (PTRS_PER_PMD - 1)]; | 325 | & (PTRS_PER_PMD - 1)]; |
| 209 | printk(KERN_CONT "*pde = %016Lx ", page); | 326 | printk(KERN_CONT "*pde = %016Lx ", page); |
| 210 | page &= ~_PAGE_NX; | 327 | page &= ~_PAGE_NX; |
| 211 | } | 328 | } |
| @@ -217,19 +334,145 @@ static void dump_pagetable(unsigned long address) | |||
| 217 | * We must not directly access the pte in the highpte | 334 | * We must not directly access the pte in the highpte |
| 218 | * case if the page table is located in highmem. | 335 | * case if the page table is located in highmem. |
| 219 | * And let's rather not kmap-atomic the pte, just in case | 336 | * And let's rather not kmap-atomic the pte, just in case |
| 220 | * it's allocated already. | 337 | * it's allocated already: |
| 221 | */ | 338 | */ |
| 222 | if ((page >> PAGE_SHIFT) < max_low_pfn | 339 | if ((page >> PAGE_SHIFT) < max_low_pfn |
| 223 | && (page & _PAGE_PRESENT) | 340 | && (page & _PAGE_PRESENT) |
| 224 | && !(page & _PAGE_PSE)) { | 341 | && !(page & _PAGE_PSE)) { |
| 342 | |||
| 225 | page &= PAGE_MASK; | 343 | page &= PAGE_MASK; |
| 226 | page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT) | 344 | page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT) |
| 227 | & (PTRS_PER_PTE - 1)]; | 345 | & (PTRS_PER_PTE - 1)]; |
| 228 | printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page); | 346 | printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page); |
| 229 | } | 347 | } |
| 230 | 348 | ||
| 231 | printk("\n"); | 349 | printk("\n"); |
| 232 | #else /* CONFIG_X86_64 */ | 350 | } |
| 351 | |||
| 352 | #else /* CONFIG_X86_64: */ | ||
| 353 | |||
| 354 | void vmalloc_sync_all(void) | ||
| 355 | { | ||
| 356 | unsigned long address; | ||
| 357 | |||
| 358 | for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END; | ||
| 359 | address += PGDIR_SIZE) { | ||
| 360 | |||
| 361 | const pgd_t *pgd_ref = pgd_offset_k(address); | ||
| 362 | unsigned long flags; | ||
| 363 | struct page *page; | ||
| 364 | |||
| 365 | if (pgd_none(*pgd_ref)) | ||
| 366 | continue; | ||
| 367 | |||
| 368 | spin_lock_irqsave(&pgd_lock, flags); | ||
| 369 | list_for_each_entry(page, &pgd_list, lru) { | ||
| 370 | pgd_t *pgd; | ||
| 371 | pgd = (pgd_t *)page_address(page) + pgd_index(address); | ||
| 372 | if (pgd_none(*pgd)) | ||
| 373 | set_pgd(pgd, *pgd_ref); | ||
| 374 | else | ||
| 375 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); | ||
| 376 | } | ||
| 377 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
| 378 | } | ||
| 379 | } | ||
| 380 | |||
| 381 | /* | ||
| 382 | * 64-bit: | ||
| 383 | * | ||
| 384 | * Handle a fault on the vmalloc area | ||
| 385 | * | ||
| 386 | * This assumes no large pages in there. | ||
| 387 | */ | ||
| 388 | static noinline int vmalloc_fault(unsigned long address) | ||
| 389 | { | ||
| 390 | pgd_t *pgd, *pgd_ref; | ||
| 391 | pud_t *pud, *pud_ref; | ||
| 392 | pmd_t *pmd, *pmd_ref; | ||
| 393 | pte_t *pte, *pte_ref; | ||
| 394 | |||
| 395 | /* Make sure we are in vmalloc area: */ | ||
| 396 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) | ||
| 397 | return -1; | ||
| 398 | |||
| 399 | /* | ||
| 400 | * Copy kernel mappings over when needed. This can also | ||
| 401 | * happen within a race in page table update. In the later | ||
| 402 | * case just flush: | ||
| 403 | */ | ||
| 404 | pgd = pgd_offset(current->active_mm, address); | ||
| 405 | pgd_ref = pgd_offset_k(address); | ||
| 406 | if (pgd_none(*pgd_ref)) | ||
| 407 | return -1; | ||
| 408 | |||
| 409 | if (pgd_none(*pgd)) | ||
| 410 | set_pgd(pgd, *pgd_ref); | ||
| 411 | else | ||
| 412 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); | ||
| 413 | |||
| 414 | /* | ||
| 415 | * Below here mismatches are bugs because these lower tables | ||
| 416 | * are shared: | ||
| 417 | */ | ||
| 418 | |||
| 419 | pud = pud_offset(pgd, address); | ||
| 420 | pud_ref = pud_offset(pgd_ref, address); | ||
| 421 | if (pud_none(*pud_ref)) | ||
| 422 | return -1; | ||
| 423 | |||
| 424 | if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref)) | ||
| 425 | BUG(); | ||
| 426 | |||
| 427 | pmd = pmd_offset(pud, address); | ||
| 428 | pmd_ref = pmd_offset(pud_ref, address); | ||
| 429 | if (pmd_none(*pmd_ref)) | ||
| 430 | return -1; | ||
| 431 | |||
| 432 | if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref)) | ||
| 433 | BUG(); | ||
| 434 | |||
| 435 | pte_ref = pte_offset_kernel(pmd_ref, address); | ||
| 436 | if (!pte_present(*pte_ref)) | ||
| 437 | return -1; | ||
| 438 | |||
| 439 | pte = pte_offset_kernel(pmd, address); | ||
| 440 | |||
| 441 | /* | ||
| 442 | * Don't use pte_page here, because the mappings can point | ||
| 443 | * outside mem_map, and the NUMA hash lookup cannot handle | ||
| 444 | * that: | ||
| 445 | */ | ||
| 446 | if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref)) | ||
| 447 | BUG(); | ||
| 448 | |||
| 449 | return 0; | ||
| 450 | } | ||
| 451 | |||
| 452 | static const char errata93_warning[] = | ||
| 453 | KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n" | ||
| 454 | KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n" | ||
| 455 | KERN_ERR "******* Please consider a BIOS update.\n" | ||
| 456 | KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n"; | ||
| 457 | |||
| 458 | /* | ||
| 459 | * No vm86 mode in 64-bit mode: | ||
| 460 | */ | ||
| 461 | static inline void | ||
| 462 | check_v8086_mode(struct pt_regs *regs, unsigned long address, | ||
| 463 | struct task_struct *tsk) | ||
| 464 | { | ||
| 465 | } | ||
| 466 | |||
| 467 | static int bad_address(void *p) | ||
| 468 | { | ||
| 469 | unsigned long dummy; | ||
| 470 | |||
| 471 | return probe_kernel_address((unsigned long *)p, dummy); | ||
| 472 | } | ||
| 473 | |||
| 474 | static void dump_pagetable(unsigned long address) | ||
| 475 | { | ||
| 233 | pgd_t *pgd; | 476 | pgd_t *pgd; |
| 234 | pud_t *pud; | 477 | pud_t *pud; |
| 235 | pmd_t *pmd; | 478 | pmd_t *pmd; |
| @@ -238,102 +481,77 @@ static void dump_pagetable(unsigned long address) | |||
| 238 | pgd = (pgd_t *)read_cr3(); | 481 | pgd = (pgd_t *)read_cr3(); |
| 239 | 482 | ||
| 240 | pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK); | 483 | pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK); |
| 484 | |||
| 241 | pgd += pgd_index(address); | 485 | pgd += pgd_index(address); |
| 242 | if (bad_address(pgd)) goto bad; | 486 | if (bad_address(pgd)) |
| 487 | goto bad; | ||
| 488 | |||
| 243 | printk("PGD %lx ", pgd_val(*pgd)); | 489 | printk("PGD %lx ", pgd_val(*pgd)); |
| 244 | if (!pgd_present(*pgd)) goto ret; | 490 | |
| 491 | if (!pgd_present(*pgd)) | ||
| 492 | goto out; | ||
| 245 | 493 | ||
| 246 | pud = pud_offset(pgd, address); | 494 | pud = pud_offset(pgd, address); |
| 247 | if (bad_address(pud)) goto bad; | 495 | if (bad_address(pud)) |
| 496 | goto bad; | ||
| 497 | |||
| 248 | printk("PUD %lx ", pud_val(*pud)); | 498 | printk("PUD %lx ", pud_val(*pud)); |
| 249 | if (!pud_present(*pud) || pud_large(*pud)) | 499 | if (!pud_present(*pud) || pud_large(*pud)) |
| 250 | goto ret; | 500 | goto out; |
| 251 | 501 | ||
| 252 | pmd = pmd_offset(pud, address); | 502 | pmd = pmd_offset(pud, address); |
| 253 | if (bad_address(pmd)) goto bad; | 503 | if (bad_address(pmd)) |
| 504 | goto bad; | ||
| 505 | |||
| 254 | printk("PMD %lx ", pmd_val(*pmd)); | 506 | printk("PMD %lx ", pmd_val(*pmd)); |
| 255 | if (!pmd_present(*pmd) || pmd_large(*pmd)) goto ret; | 507 | if (!pmd_present(*pmd) || pmd_large(*pmd)) |
| 508 | goto out; | ||
| 256 | 509 | ||
| 257 | pte = pte_offset_kernel(pmd, address); | 510 | pte = pte_offset_kernel(pmd, address); |
| 258 | if (bad_address(pte)) goto bad; | 511 | if (bad_address(pte)) |
| 512 | goto bad; | ||
| 513 | |||
| 259 | printk("PTE %lx", pte_val(*pte)); | 514 | printk("PTE %lx", pte_val(*pte)); |
| 260 | ret: | 515 | out: |
| 261 | printk("\n"); | 516 | printk("\n"); |
| 262 | return; | 517 | return; |
| 263 | bad: | 518 | bad: |
| 264 | printk("BAD\n"); | 519 | printk("BAD\n"); |
| 265 | #endif | ||
| 266 | } | ||
| 267 | |||
| 268 | #ifdef CONFIG_X86_32 | ||
| 269 | static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) | ||
| 270 | { | ||
| 271 | unsigned index = pgd_index(address); | ||
| 272 | pgd_t *pgd_k; | ||
| 273 | pud_t *pud, *pud_k; | ||
| 274 | pmd_t *pmd, *pmd_k; | ||
| 275 | |||
| 276 | pgd += index; | ||
| 277 | pgd_k = init_mm.pgd + index; | ||
| 278 | |||
| 279 | if (!pgd_present(*pgd_k)) | ||
| 280 | return NULL; | ||
| 281 | |||
| 282 | /* | ||
| 283 | * set_pgd(pgd, *pgd_k); here would be useless on PAE | ||
| 284 | * and redundant with the set_pmd() on non-PAE. As would | ||
| 285 | * set_pud. | ||
| 286 | */ | ||
| 287 | |||
| 288 | pud = pud_offset(pgd, address); | ||
| 289 | pud_k = pud_offset(pgd_k, address); | ||
| 290 | if (!pud_present(*pud_k)) | ||
| 291 | return NULL; | ||
| 292 | |||
| 293 | pmd = pmd_offset(pud, address); | ||
| 294 | pmd_k = pmd_offset(pud_k, address); | ||
| 295 | if (!pmd_present(*pmd_k)) | ||
| 296 | return NULL; | ||
| 297 | if (!pmd_present(*pmd)) { | ||
| 298 | set_pmd(pmd, *pmd_k); | ||
| 299 | arch_flush_lazy_mmu_mode(); | ||
| 300 | } else | ||
| 301 | BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); | ||
| 302 | return pmd_k; | ||
| 303 | } | 520 | } |
| 304 | #endif | ||
| 305 | 521 | ||
| 306 | #ifdef CONFIG_X86_64 | 522 | #endif /* CONFIG_X86_64 */ |
| 307 | static const char errata93_warning[] = | ||
| 308 | KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n" | ||
| 309 | KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n" | ||
| 310 | KERN_ERR "******* Please consider a BIOS update.\n" | ||
| 311 | KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n"; | ||
| 312 | #endif | ||
| 313 | 523 | ||
| 314 | /* Workaround for K8 erratum #93 & buggy BIOS. | 524 | /* |
| 315 | BIOS SMM functions are required to use a specific workaround | 525 | * Workaround for K8 erratum #93 & buggy BIOS. |
| 316 | to avoid corruption of the 64bit RIP register on C stepping K8. | 526 | * |
| 317 | A lot of BIOS that didn't get tested properly miss this. | 527 | * BIOS SMM functions are required to use a specific workaround |
| 318 | The OS sees this as a page fault with the upper 32bits of RIP cleared. | 528 | * to avoid corruption of the 64bit RIP register on C stepping K8. |
| 319 | Try to work around it here. | 529 | * |
| 320 | Note we only handle faults in kernel here. | 530 | * A lot of BIOS that didn't get tested properly miss this. |
| 321 | Does nothing for X86_32 | 531 | * |
| 532 | * The OS sees this as a page fault with the upper 32bits of RIP cleared. | ||
| 533 | * Try to work around it here. | ||
| 534 | * | ||
| 535 | * Note we only handle faults in kernel here. | ||
| 536 | * Does nothing on 32-bit. | ||
| 322 | */ | 537 | */ |
| 323 | static int is_errata93(struct pt_regs *regs, unsigned long address) | 538 | static int is_errata93(struct pt_regs *regs, unsigned long address) |
| 324 | { | 539 | { |
| 325 | #ifdef CONFIG_X86_64 | 540 | #ifdef CONFIG_X86_64 |
| 326 | static int warned; | 541 | static int once; |
| 542 | |||
| 327 | if (address != regs->ip) | 543 | if (address != regs->ip) |
| 328 | return 0; | 544 | return 0; |
| 545 | |||
| 329 | if ((address >> 32) != 0) | 546 | if ((address >> 32) != 0) |
| 330 | return 0; | 547 | return 0; |
| 548 | |||
| 331 | address |= 0xffffffffUL << 32; | 549 | address |= 0xffffffffUL << 32; |
| 332 | if ((address >= (u64)_stext && address <= (u64)_etext) || | 550 | if ((address >= (u64)_stext && address <= (u64)_etext) || |
| 333 | (address >= MODULES_VADDR && address <= MODULES_END)) { | 551 | (address >= MODULES_VADDR && address <= MODULES_END)) { |
| 334 | if (!warned) { | 552 | if (!once) { |
| 335 | printk(errata93_warning); | 553 | printk(errata93_warning); |
| 336 | warned = 1; | 554 | once = 1; |
| 337 | } | 555 | } |
| 338 | regs->ip = address; | 556 | regs->ip = address; |
| 339 | return 1; | 557 | return 1; |
| @@ -343,16 +561,17 @@ static int is_errata93(struct pt_regs *regs, unsigned long address) | |||
| 343 | } | 561 | } |
| 344 | 562 | ||
| 345 | /* | 563 | /* |
| 346 | * Work around K8 erratum #100 K8 in compat mode occasionally jumps to illegal | 564 | * Work around K8 erratum #100 K8 in compat mode occasionally jumps |
| 347 | * addresses >4GB. We catch this in the page fault handler because these | 565 | * to illegal addresses >4GB. |
| 348 | * addresses are not reachable. Just detect this case and return. Any code | 566 | * |
| 567 | * We catch this in the page fault handler because these addresses | ||
| 568 | * are not reachable. Just detect this case and return. Any code | ||
| 349 | * segment in LDT is compatibility mode. | 569 | * segment in LDT is compatibility mode. |
| 350 | */ | 570 | */ |
| 351 | static int is_errata100(struct pt_regs *regs, unsigned long address) | 571 | static int is_errata100(struct pt_regs *regs, unsigned long address) |
| 352 | { | 572 | { |
| 353 | #ifdef CONFIG_X86_64 | 573 | #ifdef CONFIG_X86_64 |
| 354 | if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && | 574 | if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) |
| 355 | (address >> 32)) | ||
| 356 | return 1; | 575 | return 1; |
| 357 | #endif | 576 | #endif |
| 358 | return 0; | 577 | return 0; |
| @@ -362,8 +581,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address) | |||
| 362 | { | 581 | { |
| 363 | #ifdef CONFIG_X86_F00F_BUG | 582 | #ifdef CONFIG_X86_F00F_BUG |
| 364 | unsigned long nr; | 583 | unsigned long nr; |
| 584 | |||
| 365 | /* | 585 | /* |
| 366 | * Pentium F0 0F C7 C8 bug workaround. | 586 | * Pentium F0 0F C7 C8 bug workaround: |
| 367 | */ | 587 | */ |
| 368 | if (boot_cpu_data.f00f_bug) { | 588 | if (boot_cpu_data.f00f_bug) { |
| 369 | nr = (address - idt_descr.address) >> 3; | 589 | nr = (address - idt_descr.address) >> 3; |
| @@ -377,62 +597,277 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address) | |||
| 377 | return 0; | 597 | return 0; |
| 378 | } | 598 | } |
| 379 | 599 | ||
| 380 | static void show_fault_oops(struct pt_regs *regs, unsigned long error_code, | 600 | static const char nx_warning[] = KERN_CRIT |
| 381 | unsigned long address) | 601 | "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n"; |
| 602 | |||
| 603 | static void | ||
| 604 | show_fault_oops(struct pt_regs *regs, unsigned long error_code, | ||
| 605 | unsigned long address) | ||
| 382 | { | 606 | { |
| 383 | #ifdef CONFIG_X86_32 | ||
| 384 | if (!oops_may_print()) | 607 | if (!oops_may_print()) |
| 385 | return; | 608 | return; |
| 386 | #endif | ||
| 387 | 609 | ||
| 388 | #ifdef CONFIG_X86_PAE | ||
| 389 | if (error_code & PF_INSTR) { | 610 | if (error_code & PF_INSTR) { |
| 390 | unsigned int level; | 611 | unsigned int level; |
| 612 | |||
| 391 | pte_t *pte = lookup_address(address, &level); | 613 | pte_t *pte = lookup_address(address, &level); |
| 392 | 614 | ||
| 393 | if (pte && pte_present(*pte) && !pte_exec(*pte)) | 615 | if (pte && pte_present(*pte) && !pte_exec(*pte)) |
| 394 | printk(KERN_CRIT "kernel tried to execute " | 616 | printk(nx_warning, current_uid()); |
| 395 | "NX-protected page - exploit attempt? " | ||
| 396 | "(uid: %d)\n", current_uid()); | ||
| 397 | } | 617 | } |
| 398 | #endif | ||
| 399 | 618 | ||
| 400 | printk(KERN_ALERT "BUG: unable to handle kernel "); | 619 | printk(KERN_ALERT "BUG: unable to handle kernel "); |
| 401 | if (address < PAGE_SIZE) | 620 | if (address < PAGE_SIZE) |
| 402 | printk(KERN_CONT "NULL pointer dereference"); | 621 | printk(KERN_CONT "NULL pointer dereference"); |
| 403 | else | 622 | else |
| 404 | printk(KERN_CONT "paging request"); | 623 | printk(KERN_CONT "paging request"); |
| 624 | |||
| 405 | printk(KERN_CONT " at %p\n", (void *) address); | 625 | printk(KERN_CONT " at %p\n", (void *) address); |
| 406 | printk(KERN_ALERT "IP:"); | 626 | printk(KERN_ALERT "IP:"); |
| 407 | printk_address(regs->ip, 1); | 627 | printk_address(regs->ip, 1); |
| 628 | |||
| 408 | dump_pagetable(address); | 629 | dump_pagetable(address); |
| 409 | } | 630 | } |
| 410 | 631 | ||
| 411 | #ifdef CONFIG_X86_64 | 632 | static noinline void |
| 412 | static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs, | 633 | pgtable_bad(struct pt_regs *regs, unsigned long error_code, |
| 413 | unsigned long error_code) | 634 | unsigned long address) |
| 414 | { | 635 | { |
| 415 | unsigned long flags = oops_begin(); | ||
| 416 | int sig = SIGKILL; | ||
| 417 | struct task_struct *tsk; | 636 | struct task_struct *tsk; |
| 637 | unsigned long flags; | ||
| 638 | int sig; | ||
| 639 | |||
| 640 | flags = oops_begin(); | ||
| 641 | tsk = current; | ||
| 642 | sig = SIGKILL; | ||
| 418 | 643 | ||
| 419 | printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", | 644 | printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", |
| 420 | current->comm, address); | 645 | tsk->comm, address); |
| 421 | dump_pagetable(address); | 646 | dump_pagetable(address); |
| 422 | tsk = current; | 647 | |
| 423 | tsk->thread.cr2 = address; | 648 | tsk->thread.cr2 = address; |
| 424 | tsk->thread.trap_no = 14; | 649 | tsk->thread.trap_no = 14; |
| 425 | tsk->thread.error_code = error_code; | 650 | tsk->thread.error_code = error_code; |
| 651 | |||
| 426 | if (__die("Bad pagetable", regs, error_code)) | 652 | if (__die("Bad pagetable", regs, error_code)) |
| 427 | sig = 0; | 653 | sig = 0; |
| 654 | |||
| 428 | oops_end(flags, regs, sig); | 655 | oops_end(flags, regs, sig); |
| 429 | } | 656 | } |
| 430 | #endif | 657 | |
| 658 | static noinline void | ||
| 659 | no_context(struct pt_regs *regs, unsigned long error_code, | ||
| 660 | unsigned long address) | ||
| 661 | { | ||
| 662 | struct task_struct *tsk = current; | ||
| 663 | unsigned long *stackend; | ||
| 664 | unsigned long flags; | ||
| 665 | int sig; | ||
| 666 | |||
| 667 | /* Are we prepared to handle this kernel fault? */ | ||
| 668 | if (fixup_exception(regs)) | ||
| 669 | return; | ||
| 670 | |||
| 671 | /* | ||
| 672 | * 32-bit: | ||
| 673 | * | ||
| 674 | * Valid to do another page fault here, because if this fault | ||
| 675 | * had been triggered by is_prefetch fixup_exception would have | ||
| 676 | * handled it. | ||
| 677 | * | ||
| 678 | * 64-bit: | ||
| 679 | * | ||
| 680 | * Hall of shame of CPU/BIOS bugs. | ||
| 681 | */ | ||
| 682 | if (is_prefetch(regs, error_code, address)) | ||
| 683 | return; | ||
| 684 | |||
| 685 | if (is_errata93(regs, address)) | ||
| 686 | return; | ||
| 687 | |||
| 688 | /* | ||
| 689 | * Oops. The kernel tried to access some bad page. We'll have to | ||
| 690 | * terminate things with extreme prejudice: | ||
| 691 | */ | ||
| 692 | flags = oops_begin(); | ||
| 693 | |||
| 694 | show_fault_oops(regs, error_code, address); | ||
| 695 | |||
| 696 | stackend = end_of_stack(tsk); | ||
| 697 | if (*stackend != STACK_END_MAGIC) | ||
| 698 | printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); | ||
| 699 | |||
| 700 | tsk->thread.cr2 = address; | ||
| 701 | tsk->thread.trap_no = 14; | ||
| 702 | tsk->thread.error_code = error_code; | ||
| 703 | |||
| 704 | sig = SIGKILL; | ||
| 705 | if (__die("Oops", regs, error_code)) | ||
| 706 | sig = 0; | ||
| 707 | |||
| 708 | /* Executive summary in case the body of the oops scrolled away */ | ||
| 709 | printk(KERN_EMERG "CR2: %016lx\n", address); | ||
| 710 | |||
| 711 | oops_end(flags, regs, sig); | ||
| 712 | } | ||
| 713 | |||
| 714 | /* | ||
| 715 | * Print out info about fatal segfaults, if the show_unhandled_signals | ||
| 716 | * sysctl is set: | ||
| 717 | */ | ||
| 718 | static inline void | ||
| 719 | show_signal_msg(struct pt_regs *regs, unsigned long error_code, | ||
| 720 | unsigned long address, struct task_struct *tsk) | ||
| 721 | { | ||
| 722 | if (!unhandled_signal(tsk, SIGSEGV)) | ||
| 723 | return; | ||
| 724 | |||
| 725 | if (!printk_ratelimit()) | ||
| 726 | return; | ||
| 727 | |||
| 728 | printk(KERN_CONT "%s%s[%d]: segfault at %lx ip %p sp %p error %lx", | ||
| 729 | task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, | ||
| 730 | tsk->comm, task_pid_nr(tsk), address, | ||
| 731 | (void *)regs->ip, (void *)regs->sp, error_code); | ||
| 732 | |||
| 733 | print_vma_addr(KERN_CONT " in ", regs->ip); | ||
| 734 | |||
| 735 | printk(KERN_CONT "\n"); | ||
| 736 | } | ||
| 737 | |||
| 738 | static void | ||
| 739 | __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, | ||
| 740 | unsigned long address, int si_code) | ||
| 741 | { | ||
| 742 | struct task_struct *tsk = current; | ||
| 743 | |||
| 744 | /* User mode accesses just cause a SIGSEGV */ | ||
| 745 | if (error_code & PF_USER) { | ||
| 746 | /* | ||
| 747 | * It's possible to have interrupts off here: | ||
| 748 | */ | ||
| 749 | local_irq_enable(); | ||
| 750 | |||
| 751 | /* | ||
| 752 | * Valid to do another page fault here because this one came | ||
| 753 | * from user space: | ||
| 754 | */ | ||
| 755 | if (is_prefetch(regs, error_code, address)) | ||
| 756 | return; | ||
| 757 | |||
| 758 | if (is_errata100(regs, address)) | ||
| 759 | return; | ||
| 760 | |||
| 761 | if (unlikely(show_unhandled_signals)) | ||
| 762 | show_signal_msg(regs, error_code, address, tsk); | ||
| 763 | |||
| 764 | /* Kernel addresses are always protection faults: */ | ||
| 765 | tsk->thread.cr2 = address; | ||
| 766 | tsk->thread.error_code = error_code | (address >= TASK_SIZE); | ||
| 767 | tsk->thread.trap_no = 14; | ||
| 768 | |||
| 769 | force_sig_info_fault(SIGSEGV, si_code, address, tsk); | ||
| 770 | |||
| 771 | return; | ||
| 772 | } | ||
| 773 | |||
| 774 | if (is_f00f_bug(regs, address)) | ||
| 775 | return; | ||
| 776 | |||
| 777 | no_context(regs, error_code, address); | ||
| 778 | } | ||
| 779 | |||
| 780 | static noinline void | ||
| 781 | bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, | ||
| 782 | unsigned long address) | ||
| 783 | { | ||
| 784 | __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR); | ||
| 785 | } | ||
| 786 | |||
| 787 | static void | ||
| 788 | __bad_area(struct pt_regs *regs, unsigned long error_code, | ||
| 789 | unsigned long address, int si_code) | ||
| 790 | { | ||
| 791 | struct mm_struct *mm = current->mm; | ||
| 792 | |||
| 793 | /* | ||
| 794 | * Something tried to access memory that isn't in our memory map.. | ||
| 795 | * Fix it, but check if it's kernel or user first.. | ||
| 796 | */ | ||
| 797 | up_read(&mm->mmap_sem); | ||
| 798 | |||
| 799 | __bad_area_nosemaphore(regs, error_code, address, si_code); | ||
| 800 | } | ||
| 801 | |||
| 802 | static noinline void | ||
| 803 | bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) | ||
| 804 | { | ||
| 805 | __bad_area(regs, error_code, address, SEGV_MAPERR); | ||
| 806 | } | ||
| 807 | |||
| 808 | static noinline void | ||
| 809 | bad_area_access_error(struct pt_regs *regs, unsigned long error_code, | ||
| 810 | unsigned long address) | ||
| 811 | { | ||
| 812 | __bad_area(regs, error_code, address, SEGV_ACCERR); | ||
| 813 | } | ||
| 814 | |||
| 815 | /* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */ | ||
| 816 | static void | ||
| 817 | out_of_memory(struct pt_regs *regs, unsigned long error_code, | ||
| 818 | unsigned long address) | ||
| 819 | { | ||
| 820 | /* | ||
| 821 | * We ran out of memory, call the OOM killer, and return the userspace | ||
| 822 | * (which will retry the fault, or kill us if we got oom-killed): | ||
| 823 | */ | ||
| 824 | up_read(¤t->mm->mmap_sem); | ||
| 825 | |||
| 826 | pagefault_out_of_memory(); | ||
| 827 | } | ||
| 828 | |||
| 829 | static void | ||
| 830 | do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address) | ||
| 831 | { | ||
| 832 | struct task_struct *tsk = current; | ||
| 833 | struct mm_struct *mm = tsk->mm; | ||
| 834 | |||
| 835 | up_read(&mm->mmap_sem); | ||
| 836 | |||
| 837 | /* Kernel mode? Handle exceptions or die: */ | ||
| 838 | if (!(error_code & PF_USER)) | ||
| 839 | no_context(regs, error_code, address); | ||
| 840 | |||
| 841 | /* User-space => ok to do another page fault: */ | ||
| 842 | if (is_prefetch(regs, error_code, address)) | ||
| 843 | return; | ||
| 844 | |||
| 845 | tsk->thread.cr2 = address; | ||
| 846 | tsk->thread.error_code = error_code; | ||
| 847 | tsk->thread.trap_no = 14; | ||
| 848 | |||
| 849 | force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); | ||
| 850 | } | ||
| 851 | |||
| 852 | static noinline void | ||
| 853 | mm_fault_error(struct pt_regs *regs, unsigned long error_code, | ||
| 854 | unsigned long address, unsigned int fault) | ||
| 855 | { | ||
| 856 | if (fault & VM_FAULT_OOM) { | ||
| 857 | out_of_memory(regs, error_code, address); | ||
| 858 | } else { | ||
| 859 | if (fault & VM_FAULT_SIGBUS) | ||
| 860 | do_sigbus(regs, error_code, address); | ||
| 861 | else | ||
| 862 | BUG(); | ||
| 863 | } | ||
| 864 | } | ||
| 431 | 865 | ||
| 432 | static int spurious_fault_check(unsigned long error_code, pte_t *pte) | 866 | static int spurious_fault_check(unsigned long error_code, pte_t *pte) |
| 433 | { | 867 | { |
| 434 | if ((error_code & PF_WRITE) && !pte_write(*pte)) | 868 | if ((error_code & PF_WRITE) && !pte_write(*pte)) |
| 435 | return 0; | 869 | return 0; |
| 870 | |||
| 436 | if ((error_code & PF_INSTR) && !pte_exec(*pte)) | 871 | if ((error_code & PF_INSTR) && !pte_exec(*pte)) |
| 437 | return 0; | 872 | return 0; |
| 438 | 873 | ||
| @@ -440,21 +875,25 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte) | |||
| 440 | } | 875 | } |
| 441 | 876 | ||
| 442 | /* | 877 | /* |
| 443 | * Handle a spurious fault caused by a stale TLB entry. This allows | 878 | * Handle a spurious fault caused by a stale TLB entry. |
| 444 | * us to lazily refresh the TLB when increasing the permissions of a | 879 | * |
| 445 | * kernel page (RO -> RW or NX -> X). Doing it eagerly is very | 880 | * This allows us to lazily refresh the TLB when increasing the |
| 446 | * expensive since that implies doing a full cross-processor TLB | 881 | * permissions of a kernel page (RO -> RW or NX -> X). Doing it |
| 447 | * flush, even if no stale TLB entries exist on other processors. | 882 | * eagerly is very expensive since that implies doing a full |
| 883 | * cross-processor TLB flush, even if no stale TLB entries exist | ||
| 884 | * on other processors. | ||
| 885 | * | ||
| 448 | * There are no security implications to leaving a stale TLB when | 886 | * There are no security implications to leaving a stale TLB when |
| 449 | * increasing the permissions on a page. | 887 | * increasing the permissions on a page. |
| 450 | */ | 888 | */ |
| 451 | static int spurious_fault(unsigned long address, | 889 | static noinline int |
| 452 | unsigned long error_code) | 890 | spurious_fault(unsigned long error_code, unsigned long address) |
| 453 | { | 891 | { |
| 454 | pgd_t *pgd; | 892 | pgd_t *pgd; |
| 455 | pud_t *pud; | 893 | pud_t *pud; |
| 456 | pmd_t *pmd; | 894 | pmd_t *pmd; |
| 457 | pte_t *pte; | 895 | pte_t *pte; |
| 896 | int ret; | ||
| 458 | 897 | ||
| 459 | /* Reserved-bit violation or user access to kernel space? */ | 898 | /* Reserved-bit violation or user access to kernel space? */ |
| 460 | if (error_code & (PF_USER | PF_RSVD)) | 899 | if (error_code & (PF_USER | PF_RSVD)) |
| @@ -482,127 +921,71 @@ static int spurious_fault(unsigned long address, | |||
| 482 | if (!pte_present(*pte)) | 921 | if (!pte_present(*pte)) |
| 483 | return 0; | 922 | return 0; |
| 484 | 923 | ||
| 485 | return spurious_fault_check(error_code, pte); | 924 | ret = spurious_fault_check(error_code, pte); |
| 486 | } | 925 | if (!ret) |
| 487 | 926 | return 0; | |
| 488 | /* | ||
| 489 | * X86_32 | ||
| 490 | * Handle a fault on the vmalloc or module mapping area | ||
| 491 | * | ||
| 492 | * X86_64 | ||
| 493 | * Handle a fault on the vmalloc area | ||
| 494 | * | ||
| 495 | * This assumes no large pages in there. | ||
| 496 | */ | ||
| 497 | static int vmalloc_fault(unsigned long address) | ||
| 498 | { | ||
| 499 | #ifdef CONFIG_X86_32 | ||
| 500 | unsigned long pgd_paddr; | ||
| 501 | pmd_t *pmd_k; | ||
| 502 | pte_t *pte_k; | ||
| 503 | |||
| 504 | /* Make sure we are in vmalloc area */ | ||
| 505 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) | ||
| 506 | return -1; | ||
| 507 | 927 | ||
| 508 | /* | 928 | /* |
| 509 | * Synchronize this task's top level page-table | 929 | * Make sure we have permissions in PMD. |
| 510 | * with the 'reference' page table. | 930 | * If not, then there's a bug in the page tables: |
| 511 | * | ||
| 512 | * Do _not_ use "current" here. We might be inside | ||
| 513 | * an interrupt in the middle of a task switch.. | ||
| 514 | */ | 931 | */ |
| 515 | pgd_paddr = read_cr3(); | 932 | ret = spurious_fault_check(error_code, (pte_t *) pmd); |
| 516 | pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); | 933 | WARN_ONCE(!ret, "PMD has incorrect permission bits\n"); |
| 517 | if (!pmd_k) | ||
| 518 | return -1; | ||
| 519 | pte_k = pte_offset_kernel(pmd_k, address); | ||
| 520 | if (!pte_present(*pte_k)) | ||
| 521 | return -1; | ||
| 522 | return 0; | ||
| 523 | #else | ||
| 524 | pgd_t *pgd, *pgd_ref; | ||
| 525 | pud_t *pud, *pud_ref; | ||
| 526 | pmd_t *pmd, *pmd_ref; | ||
| 527 | pte_t *pte, *pte_ref; | ||
| 528 | 934 | ||
| 529 | /* Make sure we are in vmalloc area */ | 935 | return ret; |
| 530 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) | 936 | } |
| 531 | return -1; | ||
| 532 | 937 | ||
| 533 | /* Copy kernel mappings over when needed. This can also | 938 | int show_unhandled_signals = 1; |
| 534 | happen within a race in page table update. In the later | ||
| 535 | case just flush. */ | ||
| 536 | 939 | ||
| 537 | pgd = pgd_offset(current->active_mm, address); | 940 | static inline int |
| 538 | pgd_ref = pgd_offset_k(address); | 941 | access_error(unsigned long error_code, int write, struct vm_area_struct *vma) |
| 539 | if (pgd_none(*pgd_ref)) | 942 | { |
| 540 | return -1; | 943 | if (write) { |
| 541 | if (pgd_none(*pgd)) | 944 | /* write, present and write, not present: */ |
| 542 | set_pgd(pgd, *pgd_ref); | 945 | if (unlikely(!(vma->vm_flags & VM_WRITE))) |
| 543 | else | 946 | return 1; |
| 544 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); | 947 | return 0; |
| 948 | } | ||
| 545 | 949 | ||
| 546 | /* Below here mismatches are bugs because these lower tables | 950 | /* read, present: */ |
| 547 | are shared */ | 951 | if (unlikely(error_code & PF_PROT)) |
| 952 | return 1; | ||
| 953 | |||
| 954 | /* read, not present: */ | ||
| 955 | if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) | ||
| 956 | return 1; | ||
| 548 | 957 | ||
| 549 | pud = pud_offset(pgd, address); | ||
| 550 | pud_ref = pud_offset(pgd_ref, address); | ||
| 551 | if (pud_none(*pud_ref)) | ||
| 552 | return -1; | ||
| 553 | if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref)) | ||
| 554 | BUG(); | ||
| 555 | pmd = pmd_offset(pud, address); | ||
| 556 | pmd_ref = pmd_offset(pud_ref, address); | ||
| 557 | if (pmd_none(*pmd_ref)) | ||
| 558 | return -1; | ||
| 559 | if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref)) | ||
| 560 | BUG(); | ||
| 561 | pte_ref = pte_offset_kernel(pmd_ref, address); | ||
| 562 | if (!pte_present(*pte_ref)) | ||
| 563 | return -1; | ||
| 564 | pte = pte_offset_kernel(pmd, address); | ||
| 565 | /* Don't use pte_page here, because the mappings can point | ||
| 566 | outside mem_map, and the NUMA hash lookup cannot handle | ||
| 567 | that. */ | ||
| 568 | if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref)) | ||
| 569 | BUG(); | ||
| 570 | return 0; | 958 | return 0; |
| 571 | #endif | ||
| 572 | } | 959 | } |
| 573 | 960 | ||
| 574 | int show_unhandled_signals = 1; | 961 | static int fault_in_kernel_space(unsigned long address) |
| 962 | { | ||
| 963 | return address >= TASK_SIZE_MAX; | ||
| 964 | } | ||
| 575 | 965 | ||
| 576 | /* | 966 | /* |
| 577 | * This routine handles page faults. It determines the address, | 967 | * This routine handles page faults. It determines the address, |
| 578 | * and the problem, and then passes it off to one of the appropriate | 968 | * and the problem, and then passes it off to one of the appropriate |
| 579 | * routines. | 969 | * routines. |
| 580 | */ | 970 | */ |
| 581 | #ifdef CONFIG_X86_64 | 971 | dotraplinkage void __kprobes |
| 582 | asmlinkage | 972 | do_page_fault(struct pt_regs *regs, unsigned long error_code) |
| 583 | #endif | ||
| 584 | void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) | ||
| 585 | { | 973 | { |
| 586 | struct task_struct *tsk; | ||
| 587 | struct mm_struct *mm; | ||
| 588 | struct vm_area_struct *vma; | 974 | struct vm_area_struct *vma; |
| 975 | struct task_struct *tsk; | ||
| 589 | unsigned long address; | 976 | unsigned long address; |
| 590 | int write, si_code; | 977 | struct mm_struct *mm; |
| 978 | int write; | ||
| 591 | int fault; | 979 | int fault; |
| 592 | #ifdef CONFIG_X86_64 | ||
| 593 | unsigned long flags; | ||
| 594 | int sig; | ||
| 595 | #endif | ||
| 596 | 980 | ||
| 597 | tsk = current; | 981 | tsk = current; |
| 598 | mm = tsk->mm; | 982 | mm = tsk->mm; |
| 983 | |||
| 599 | prefetchw(&mm->mmap_sem); | 984 | prefetchw(&mm->mmap_sem); |
| 600 | 985 | ||
| 601 | /* get the address */ | 986 | /* Get the faulting address: */ |
| 602 | address = read_cr2(); | 987 | address = read_cr2(); |
| 603 | 988 | ||
| 604 | si_code = SEGV_MAPERR; | ||
| 605 | |||
| 606 | if (unlikely(kmmio_fault(regs, address))) | 989 | if (unlikely(kmmio_fault(regs, address))) |
| 607 | return; | 990 | return; |
| 608 | 991 | ||
| @@ -619,319 +1002,147 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
| 619 | * (error_code & 4) == 0, and that the fault was not a | 1002 | * (error_code & 4) == 0, and that the fault was not a |
| 620 | * protection error (error_code & 9) == 0. | 1003 | * protection error (error_code & 9) == 0. |
| 621 | */ | 1004 | */ |
| 622 | #ifdef CONFIG_X86_32 | 1005 | if (unlikely(fault_in_kernel_space(address))) { |
| 623 | if (unlikely(address >= TASK_SIZE)) { | ||
| 624 | #else | ||
| 625 | if (unlikely(address >= TASK_SIZE64)) { | ||
| 626 | #endif | ||
| 627 | if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) && | 1006 | if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) && |
| 628 | vmalloc_fault(address) >= 0) | 1007 | vmalloc_fault(address) >= 0) |
| 629 | return; | 1008 | return; |
| 630 | 1009 | ||
| 631 | /* Can handle a stale RO->RW TLB */ | 1010 | /* Can handle a stale RO->RW TLB: */ |
| 632 | if (spurious_fault(address, error_code)) | 1011 | if (spurious_fault(error_code, address)) |
| 633 | return; | 1012 | return; |
| 634 | 1013 | ||
| 635 | /* kprobes don't want to hook the spurious faults. */ | 1014 | /* kprobes don't want to hook the spurious faults: */ |
| 636 | if (notify_page_fault(regs)) | 1015 | if (notify_page_fault(regs)) |
| 637 | return; | 1016 | return; |
| 638 | /* | 1017 | /* |
| 639 | * Don't take the mm semaphore here. If we fixup a prefetch | 1018 | * Don't take the mm semaphore here. If we fixup a prefetch |
| 640 | * fault we could otherwise deadlock. | 1019 | * fault we could otherwise deadlock: |
| 641 | */ | 1020 | */ |
| 642 | goto bad_area_nosemaphore; | 1021 | bad_area_nosemaphore(regs, error_code, address); |
| 643 | } | ||
| 644 | 1022 | ||
| 645 | /* kprobes don't want to hook the spurious faults. */ | ||
| 646 | if (notify_page_fault(regs)) | ||
| 647 | return; | 1023 | return; |
| 1024 | } | ||
| 648 | 1025 | ||
| 1026 | /* kprobes don't want to hook the spurious faults: */ | ||
| 1027 | if (unlikely(notify_page_fault(regs))) | ||
| 1028 | return; | ||
| 649 | /* | 1029 | /* |
| 650 | * It's safe to allow irq's after cr2 has been saved and the | 1030 | * It's safe to allow irq's after cr2 has been saved and the |
| 651 | * vmalloc fault has been handled. | 1031 | * vmalloc fault has been handled. |
| 652 | * | 1032 | * |
| 653 | * User-mode registers count as a user access even for any | 1033 | * User-mode registers count as a user access even for any |
| 654 | * potential system fault or CPU buglet. | 1034 | * potential system fault or CPU buglet: |
| 655 | */ | 1035 | */ |
| 656 | if (user_mode_vm(regs)) { | 1036 | if (user_mode_vm(regs)) { |
| 657 | local_irq_enable(); | 1037 | local_irq_enable(); |
| 658 | error_code |= PF_USER; | 1038 | error_code |= PF_USER; |
| 659 | } else if (regs->flags & X86_EFLAGS_IF) | 1039 | } else { |
| 660 | local_irq_enable(); | 1040 | if (regs->flags & X86_EFLAGS_IF) |
| 1041 | local_irq_enable(); | ||
| 1042 | } | ||
| 661 | 1043 | ||
| 662 | #ifdef CONFIG_X86_64 | ||
| 663 | if (unlikely(error_code & PF_RSVD)) | 1044 | if (unlikely(error_code & PF_RSVD)) |
| 664 | pgtable_bad(address, regs, error_code); | 1045 | pgtable_bad(regs, error_code, address); |
| 665 | #endif | ||
| 666 | 1046 | ||
| 667 | /* | 1047 | /* |
| 668 | * If we're in an interrupt, have no user context or are running in an | 1048 | * If we're in an interrupt, have no user context or are running |
| 669 | * atomic region then we must not take the fault. | 1049 | * in an atomic region then we must not take the fault: |
| 670 | */ | 1050 | */ |
| 671 | if (unlikely(in_atomic() || !mm)) | 1051 | if (unlikely(in_atomic() || !mm)) { |
| 672 | goto bad_area_nosemaphore; | 1052 | bad_area_nosemaphore(regs, error_code, address); |
| 1053 | return; | ||
| 1054 | } | ||
| 673 | 1055 | ||
| 674 | /* | 1056 | /* |
| 675 | * When running in the kernel we expect faults to occur only to | 1057 | * When running in the kernel we expect faults to occur only to |
| 676 | * addresses in user space. All other faults represent errors in the | 1058 | * addresses in user space. All other faults represent errors in |
| 677 | * kernel and should generate an OOPS. Unfortunately, in the case of an | 1059 | * the kernel and should generate an OOPS. Unfortunately, in the |
| 678 | * erroneous fault occurring in a code path which already holds mmap_sem | 1060 | * case of an erroneous fault occurring in a code path which already |
| 679 | * we will deadlock attempting to validate the fault against the | 1061 | * holds mmap_sem we will deadlock attempting to validate the fault |
| 680 | * address space. Luckily the kernel only validly references user | 1062 | * against the address space. Luckily the kernel only validly |
| 681 | * space from well defined areas of code, which are listed in the | 1063 | * references user space from well defined areas of code, which are |
| 682 | * exceptions table. | 1064 | * listed in the exceptions table. |
| 683 | * | 1065 | * |
| 684 | * As the vast majority of faults will be valid we will only perform | 1066 | * As the vast majority of faults will be valid we will only perform |
| 685 | * the source reference check when there is a possibility of a deadlock. | 1067 | * the source reference check when there is a possibility of a |
| 686 | * Attempt to lock the address space, if we cannot we then validate the | 1068 | * deadlock. Attempt to lock the address space, if we cannot we then |
| 687 | * source. If this is invalid we can skip the address space check, | 1069 | * validate the source. If this is invalid we can skip the address |
| 688 | * thus avoiding the deadlock. | 1070 | * space check, thus avoiding the deadlock: |
| 689 | */ | 1071 | */ |
| 690 | if (!down_read_trylock(&mm->mmap_sem)) { | 1072 | if (unlikely(!down_read_trylock(&mm->mmap_sem))) { |
| 691 | if ((error_code & PF_USER) == 0 && | 1073 | if ((error_code & PF_USER) == 0 && |
| 692 | !search_exception_tables(regs->ip)) | 1074 | !search_exception_tables(regs->ip)) { |
| 693 | goto bad_area_nosemaphore; | 1075 | bad_area_nosemaphore(regs, error_code, address); |
| 1076 | return; | ||
| 1077 | } | ||
| 694 | down_read(&mm->mmap_sem); | 1078 | down_read(&mm->mmap_sem); |
| 1079 | } else { | ||
| 1080 | /* | ||
| 1081 | * The above down_read_trylock() might have succeeded in | ||
| 1082 | * which case we'll have missed the might_sleep() from | ||
| 1083 | * down_read(): | ||
| 1084 | */ | ||
| 1085 | might_sleep(); | ||
| 695 | } | 1086 | } |
| 696 | 1087 | ||
| 697 | vma = find_vma(mm, address); | 1088 | vma = find_vma(mm, address); |
| 698 | if (!vma) | 1089 | if (unlikely(!vma)) { |
| 699 | goto bad_area; | 1090 | bad_area(regs, error_code, address); |
| 700 | if (vma->vm_start <= address) | 1091 | return; |
| 1092 | } | ||
| 1093 | if (likely(vma->vm_start <= address)) | ||
| 701 | goto good_area; | 1094 | goto good_area; |
| 702 | if (!(vma->vm_flags & VM_GROWSDOWN)) | 1095 | if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { |
| 703 | goto bad_area; | 1096 | bad_area(regs, error_code, address); |
| 1097 | return; | ||
| 1098 | } | ||
| 704 | if (error_code & PF_USER) { | 1099 | if (error_code & PF_USER) { |
| 705 | /* | 1100 | /* |
| 706 | * Accessing the stack below %sp is always a bug. | 1101 | * Accessing the stack below %sp is always a bug. |
| 707 | * The large cushion allows instructions like enter | 1102 | * The large cushion allows instructions like enter |
| 708 | * and pusha to work. ("enter $65535,$31" pushes | 1103 | * and pusha to work. ("enter $65535, $31" pushes |
| 709 | * 32 pointers and then decrements %sp by 65535.) | 1104 | * 32 pointers and then decrements %sp by 65535.) |
| 710 | */ | 1105 | */ |
| 711 | if (address + 65536 + 32 * sizeof(unsigned long) < regs->sp) | 1106 | if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { |
| 712 | goto bad_area; | 1107 | bad_area(regs, error_code, address); |
| 1108 | return; | ||
| 1109 | } | ||
| 713 | } | 1110 | } |
| 714 | if (expand_stack(vma, address)) | 1111 | if (unlikely(expand_stack(vma, address))) { |
| 715 | goto bad_area; | 1112 | bad_area(regs, error_code, address); |
| 716 | /* | 1113 | return; |
| 717 | * Ok, we have a good vm_area for this memory access, so | 1114 | } |
| 718 | * we can handle it.. | 1115 | |
| 719 | */ | 1116 | /* |
| 1117 | * Ok, we have a good vm_area for this memory access, so | ||
| 1118 | * we can handle it.. | ||
| 1119 | */ | ||
| 720 | good_area: | 1120 | good_area: |
| 721 | si_code = SEGV_ACCERR; | 1121 | write = error_code & PF_WRITE; |
| 722 | write = 0; | 1122 | |
| 723 | switch (error_code & (PF_PROT|PF_WRITE)) { | 1123 | if (unlikely(access_error(error_code, write, vma))) { |
| 724 | default: /* 3: write, present */ | 1124 | bad_area_access_error(regs, error_code, address); |
| 725 | /* fall through */ | 1125 | return; |
| 726 | case PF_WRITE: /* write, not present */ | ||
| 727 | if (!(vma->vm_flags & VM_WRITE)) | ||
| 728 | goto bad_area; | ||
| 729 | write++; | ||
| 730 | break; | ||
| 731 | case PF_PROT: /* read, present */ | ||
| 732 | goto bad_area; | ||
| 733 | case 0: /* read, not present */ | ||
| 734 | if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) | ||
| 735 | goto bad_area; | ||
| 736 | } | 1126 | } |
| 737 | 1127 | ||
| 738 | /* | 1128 | /* |
| 739 | * If for any reason at all we couldn't handle the fault, | 1129 | * If for any reason at all we couldn't handle the fault, |
| 740 | * make sure we exit gracefully rather than endlessly redo | 1130 | * make sure we exit gracefully rather than endlessly redo |
| 741 | * the fault. | 1131 | * the fault: |
| 742 | */ | 1132 | */ |
| 743 | fault = handle_mm_fault(mm, vma, address, write); | 1133 | fault = handle_mm_fault(mm, vma, address, write); |
| 1134 | |||
| 744 | if (unlikely(fault & VM_FAULT_ERROR)) { | 1135 | if (unlikely(fault & VM_FAULT_ERROR)) { |
| 745 | if (fault & VM_FAULT_OOM) | 1136 | mm_fault_error(regs, error_code, address, fault); |
| 746 | goto out_of_memory; | 1137 | return; |
| 747 | else if (fault & VM_FAULT_SIGBUS) | ||
| 748 | goto do_sigbus; | ||
| 749 | BUG(); | ||
| 750 | } | 1138 | } |
| 1139 | |||
| 751 | if (fault & VM_FAULT_MAJOR) | 1140 | if (fault & VM_FAULT_MAJOR) |
| 752 | tsk->maj_flt++; | 1141 | tsk->maj_flt++; |
| 753 | else | 1142 | else |
| 754 | tsk->min_flt++; | 1143 | tsk->min_flt++; |
| 755 | 1144 | ||
| 756 | #ifdef CONFIG_X86_32 | 1145 | check_v8086_mode(regs, address, tsk); |
| 757 | /* | ||
| 758 | * Did it hit the DOS screen memory VA from vm86 mode? | ||
| 759 | */ | ||
| 760 | if (v8086_mode(regs)) { | ||
| 761 | unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT; | ||
| 762 | if (bit < 32) | ||
| 763 | tsk->thread.screen_bitmap |= 1 << bit; | ||
| 764 | } | ||
| 765 | #endif | ||
| 766 | up_read(&mm->mmap_sem); | ||
| 767 | return; | ||
| 768 | 1146 | ||
| 769 | /* | ||
| 770 | * Something tried to access memory that isn't in our memory map.. | ||
| 771 | * Fix it, but check if it's kernel or user first.. | ||
| 772 | */ | ||
| 773 | bad_area: | ||
| 774 | up_read(&mm->mmap_sem); | 1147 | up_read(&mm->mmap_sem); |
| 775 | |||
| 776 | bad_area_nosemaphore: | ||
| 777 | /* User mode accesses just cause a SIGSEGV */ | ||
| 778 | if (error_code & PF_USER) { | ||
| 779 | /* | ||
| 780 | * It's possible to have interrupts off here. | ||
| 781 | */ | ||
| 782 | local_irq_enable(); | ||
| 783 | |||
| 784 | /* | ||
| 785 | * Valid to do another page fault here because this one came | ||
| 786 | * from user space. | ||
| 787 | */ | ||
| 788 | if (is_prefetch(regs, address, error_code)) | ||
| 789 | return; | ||
| 790 | |||
| 791 | if (is_errata100(regs, address)) | ||
| 792 | return; | ||
| 793 | |||
| 794 | if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && | ||
| 795 | printk_ratelimit()) { | ||
| 796 | printk( | ||
| 797 | "%s%s[%d]: segfault at %lx ip %p sp %p error %lx", | ||
| 798 | task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, | ||
| 799 | tsk->comm, task_pid_nr(tsk), address, | ||
| 800 | (void *) regs->ip, (void *) regs->sp, error_code); | ||
| 801 | print_vma_addr(" in ", regs->ip); | ||
| 802 | printk("\n"); | ||
| 803 | } | ||
| 804 | |||
| 805 | tsk->thread.cr2 = address; | ||
| 806 | /* Kernel addresses are always protection faults */ | ||
| 807 | tsk->thread.error_code = error_code | (address >= TASK_SIZE); | ||
| 808 | tsk->thread.trap_no = 14; | ||
| 809 | force_sig_info_fault(SIGSEGV, si_code, address, tsk); | ||
| 810 | return; | ||
| 811 | } | ||
| 812 | |||
| 813 | if (is_f00f_bug(regs, address)) | ||
| 814 | return; | ||
| 815 | |||
| 816 | no_context: | ||
| 817 | /* Are we prepared to handle this kernel fault? */ | ||
| 818 | if (fixup_exception(regs)) | ||
| 819 | return; | ||
| 820 | |||
| 821 | /* | ||
| 822 | * X86_32 | ||
| 823 | * Valid to do another page fault here, because if this fault | ||
| 824 | * had been triggered by is_prefetch fixup_exception would have | ||
| 825 | * handled it. | ||
| 826 | * | ||
| 827 | * X86_64 | ||
| 828 | * Hall of shame of CPU/BIOS bugs. | ||
| 829 | */ | ||
| 830 | if (is_prefetch(regs, address, error_code)) | ||
| 831 | return; | ||
| 832 | |||
| 833 | if (is_errata93(regs, address)) | ||
| 834 | return; | ||
| 835 | |||
| 836 | /* | ||
| 837 | * Oops. The kernel tried to access some bad page. We'll have to | ||
| 838 | * terminate things with extreme prejudice. | ||
| 839 | */ | ||
| 840 | #ifdef CONFIG_X86_32 | ||
| 841 | bust_spinlocks(1); | ||
| 842 | #else | ||
| 843 | flags = oops_begin(); | ||
| 844 | #endif | ||
| 845 | |||
| 846 | show_fault_oops(regs, error_code, address); | ||
| 847 | |||
| 848 | tsk->thread.cr2 = address; | ||
| 849 | tsk->thread.trap_no = 14; | ||
| 850 | tsk->thread.error_code = error_code; | ||
| 851 | |||
| 852 | #ifdef CONFIG_X86_32 | ||
| 853 | die("Oops", regs, error_code); | ||
| 854 | bust_spinlocks(0); | ||
| 855 | do_exit(SIGKILL); | ||
| 856 | #else | ||
| 857 | sig = SIGKILL; | ||
| 858 | if (__die("Oops", regs, error_code)) | ||
| 859 | sig = 0; | ||
| 860 | /* Executive summary in case the body of the oops scrolled away */ | ||
| 861 | printk(KERN_EMERG "CR2: %016lx\n", address); | ||
| 862 | oops_end(flags, regs, sig); | ||
| 863 | #endif | ||
| 864 | |||
| 865 | out_of_memory: | ||
| 866 | /* | ||
| 867 | * We ran out of memory, call the OOM killer, and return the userspace | ||
| 868 | * (which will retry the fault, or kill us if we got oom-killed). | ||
| 869 | */ | ||
| 870 | up_read(&mm->mmap_sem); | ||
| 871 | pagefault_out_of_memory(); | ||
| 872 | return; | ||
| 873 | |||
| 874 | do_sigbus: | ||
| 875 | up_read(&mm->mmap_sem); | ||
| 876 | |||
| 877 | /* Kernel mode? Handle exceptions or die */ | ||
| 878 | if (!(error_code & PF_USER)) | ||
| 879 | goto no_context; | ||
| 880 | #ifdef CONFIG_X86_32 | ||
| 881 | /* User space => ok to do another page fault */ | ||
| 882 | if (is_prefetch(regs, address, error_code)) | ||
| 883 | return; | ||
| 884 | #endif | ||
| 885 | tsk->thread.cr2 = address; | ||
| 886 | tsk->thread.error_code = error_code; | ||
| 887 | tsk->thread.trap_no = 14; | ||
| 888 | force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); | ||
| 889 | } | ||
| 890 | |||
| 891 | DEFINE_SPINLOCK(pgd_lock); | ||
| 892 | LIST_HEAD(pgd_list); | ||
| 893 | |||
| 894 | void vmalloc_sync_all(void) | ||
| 895 | { | ||
| 896 | unsigned long address; | ||
| 897 | |||
| 898 | #ifdef CONFIG_X86_32 | ||
| 899 | if (SHARED_KERNEL_PMD) | ||
| 900 | return; | ||
| 901 | |||
| 902 | for (address = VMALLOC_START & PMD_MASK; | ||
| 903 | address >= TASK_SIZE && address < FIXADDR_TOP; | ||
| 904 | address += PMD_SIZE) { | ||
| 905 | unsigned long flags; | ||
| 906 | struct page *page; | ||
| 907 | |||
| 908 | spin_lock_irqsave(&pgd_lock, flags); | ||
| 909 | list_for_each_entry(page, &pgd_list, lru) { | ||
| 910 | if (!vmalloc_sync_one(page_address(page), | ||
| 911 | address)) | ||
| 912 | break; | ||
| 913 | } | ||
| 914 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
| 915 | } | ||
| 916 | #else /* CONFIG_X86_64 */ | ||
| 917 | for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END; | ||
| 918 | address += PGDIR_SIZE) { | ||
| 919 | const pgd_t *pgd_ref = pgd_offset_k(address); | ||
| 920 | unsigned long flags; | ||
| 921 | struct page *page; | ||
| 922 | |||
| 923 | if (pgd_none(*pgd_ref)) | ||
| 924 | continue; | ||
| 925 | spin_lock_irqsave(&pgd_lock, flags); | ||
| 926 | list_for_each_entry(page, &pgd_list, lru) { | ||
| 927 | pgd_t *pgd; | ||
| 928 | pgd = (pgd_t *)page_address(page) + pgd_index(address); | ||
| 929 | if (pgd_none(*pgd)) | ||
| 930 | set_pgd(pgd, *pgd_ref); | ||
| 931 | else | ||
| 932 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); | ||
| 933 | } | ||
| 934 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
| 935 | } | ||
| 936 | #endif | ||
| 937 | } | 1148 | } |
