aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorHarvey Harrison <harvey.harrison@gmail.com>2008-01-30 07:34:11 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:34:11 -0500
commitc61e211d9989e4c112d3d58db12ad58f9016a3c8 (patch)
tree17ac1e9220b9becda9bee059b4aa0bb129a56e1f /arch
parentf8c2ee224d8397364835204c6c0130d08c2e644c (diff)
x86: unify fault_32|64.c
Unify includes in moved fault.c. Modify Makefiles to pick up unified file. Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/mm/Makefile_322
-rw-r--r--arch/x86/mm/Makefile_642
-rw-r--r--arch/x86/mm/fault.c (renamed from arch/x86/mm/fault_64.c)21
-rw-r--r--arch/x86/mm/fault_32.c949
4 files changed, 14 insertions, 960 deletions
diff --git a/arch/x86/mm/Makefile_32 b/arch/x86/mm/Makefile_32
index ffa6d46a1e73..c36ae88bb543 100644
--- a/arch/x86/mm/Makefile_32
+++ b/arch/x86/mm/Makefile_32
@@ -2,7 +2,7 @@
2# Makefile for the linux i386-specific parts of the memory manager. 2# Makefile for the linux i386-specific parts of the memory manager.
3# 3#
4 4
5obj-y := init_32.o pgtable_32.o fault_32.o ioremap.o extable.o pageattr.o mmap.o 5obj-y := init_32.o pgtable_32.o fault.o ioremap.o extable.o pageattr.o mmap.o
6 6
7obj-$(CONFIG_NUMA) += discontig_32.o 7obj-$(CONFIG_NUMA) += discontig_32.o
8obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 8obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/x86/mm/Makefile_64 b/arch/x86/mm/Makefile_64
index 27a090c86e9b..688c8c28ac8f 100644
--- a/arch/x86/mm/Makefile_64
+++ b/arch/x86/mm/Makefile_64
@@ -2,7 +2,7 @@
2# Makefile for the linux x86_64-specific parts of the memory manager. 2# Makefile for the linux x86_64-specific parts of the memory manager.
3# 3#
4 4
5obj-y := init_64.o fault_64.o ioremap.o extable.o pageattr.o mmap.o 5obj-y := init_64.o fault.o ioremap.o extable.o pageattr.o mmap.o
6obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 6obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
7obj-$(CONFIG_NUMA) += numa_64.o 7obj-$(CONFIG_NUMA) += numa_64.o
8obj-$(CONFIG_K8_NUMA) += k8topology_64.o 8obj-$(CONFIG_K8_NUMA) += k8topology_64.o
diff --git a/arch/x86/mm/fault_64.c b/arch/x86/mm/fault.c
index 0902719388bc..14a0c6e541de 100644
--- a/arch/x86/mm/fault_64.c
+++ b/arch/x86/mm/fault.c
@@ -18,6 +18,8 @@
18#include <linux/tty.h> 18#include <linux/tty.h>
19#include <linux/vt_kern.h> /* For unblank_screen() */ 19#include <linux/vt_kern.h> /* For unblank_screen() */
20#include <linux/compiler.h> 20#include <linux/compiler.h>
21#include <linux/highmem.h>
22#include <linux/bootmem.h> /* for max_low_pfn */
21#include <linux/vmalloc.h> 23#include <linux/vmalloc.h>
22#include <linux/module.h> 24#include <linux/module.h>
23#include <linux/kprobes.h> 25#include <linux/kprobes.h>
@@ -25,6 +27,8 @@
25#include <linux/kdebug.h> 27#include <linux/kdebug.h>
26 28
27#include <asm/system.h> 29#include <asm/system.h>
30#include <asm/desc.h>
31#include <asm/segment.h>
28#include <asm/pgalloc.h> 32#include <asm/pgalloc.h>
29#include <asm/smp.h> 33#include <asm/smp.h>
30#include <asm/tlbflush.h> 34#include <asm/tlbflush.h>
@@ -88,16 +92,15 @@ static int is_prefetch(struct pt_regs *regs, unsigned long addr,
88 unsigned char *max_instr; 92 unsigned char *max_instr;
89 93
90#ifdef CONFIG_X86_32 94#ifdef CONFIG_X86_32
91 if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD && 95# ifdef CONFIG_X86_PAE
92 boot_cpu_data.x86 >= 6)) { 96 /* If it was a exec fault on NX page, ignore */
93 /* Catch an obscure case of prefetch inside an NX page. */ 97 if (nx_enabled && (error_code & PF_INSTR))
94 if (nx_enabled && (error_code & PF_INSTR))
95 return 0;
96 } else {
97 return 0; 98 return 0;
98 } 99# else
99#else 100 return 0;
100 /* If it was a exec fault ignore */ 101# endif
102#else /* CONFIG_X86_64 */
103 /* If it was a exec fault on NX page, ignore */
101 if (error_code & PF_INSTR) 104 if (error_code & PF_INSTR)
102 return 0; 105 return 0;
103#endif 106#endif
diff --git a/arch/x86/mm/fault_32.c b/arch/x86/mm/fault_32.c
deleted file mode 100644
index 4da4625c6968..000000000000
--- a/arch/x86/mm/fault_32.c
+++ /dev/null
@@ -1,949 +0,0 @@
1/*
2 * Copyright (C) 1995 Linus Torvalds
3 */
4
5#include <linux/signal.h>
6#include <linux/sched.h>
7#include <linux/kernel.h>
8#include <linux/errno.h>
9#include <linux/string.h>
10#include <linux/types.h>
11#include <linux/ptrace.h>
12#include <linux/mman.h>
13#include <linux/mm.h>
14#include <linux/smp.h>
15#include <linux/interrupt.h>
16#include <linux/init.h>
17#include <linux/tty.h>
18#include <linux/vt_kern.h> /* For unblank_screen() */
19#include <linux/highmem.h>
20#include <linux/bootmem.h> /* for max_low_pfn */
21#include <linux/vmalloc.h>
22#include <linux/module.h>
23#include <linux/kprobes.h>
24#include <linux/uaccess.h>
25#include <linux/kdebug.h>
26
27#include <asm/system.h>
28#include <asm/desc.h>
29#include <asm/segment.h>
30
31/*
32 * Page fault error code bits
33 * bit 0 == 0 means no page found, 1 means protection fault
34 * bit 1 == 0 means read, 1 means write
35 * bit 2 == 0 means kernel, 1 means user-mode
36 * bit 3 == 1 means use of reserved bit detected
37 * bit 4 == 1 means fault was an instruction fetch
38 */
39#define PF_PROT (1<<0)
40#define PF_WRITE (1<<1)
41#define PF_USER (1<<2)
42#define PF_RSVD (1<<3)
43#define PF_INSTR (1<<4)
44
45static inline int notify_page_fault(struct pt_regs *regs)
46{
47#ifdef CONFIG_KPROBES
48 int ret = 0;
49
50 /* kprobe_running() needs smp_processor_id() */
51#ifdef CONFIG_X86_32
52 if (!user_mode_vm(regs)) {
53#else
54 if (!user_mode(regs)) {
55#endif
56 preempt_disable();
57 if (kprobe_running() && kprobe_fault_handler(regs, 14))
58 ret = 1;
59 preempt_enable();
60 }
61
62 return ret;
63#else
64 return 0;
65#endif
66}
67
68/*
69 * X86_32
70 * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
71 * Check that here and ignore it.
72 *
73 * X86_64
74 * Sometimes the CPU reports invalid exceptions on prefetch.
75 * Check that here and ignore it.
76 *
77 * Opcode checker based on code by Richard Brunner
78 */
79static int is_prefetch(struct pt_regs *regs, unsigned long addr,
80 unsigned long error_code)
81{
82 unsigned char *instr;
83 int scan_more = 1;
84 int prefetch = 0;
85 unsigned char *max_instr;
86
87#ifdef CONFIG_X86_32
88 if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
89 boot_cpu_data.x86 >= 6)) {
90 /* Catch an obscure case of prefetch inside an NX page. */
91 if (nx_enabled && (error_code & PF_INSTR))
92 return 0;
93 } else {
94 return 0;
95 }
96#else
97 /* If it was a exec fault ignore */
98 if (error_code & PF_INSTR)
99 return 0;
100#endif
101
102 instr = (unsigned char *)convert_ip_to_linear(current, regs);
103 max_instr = instr + 15;
104
105 if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
106 return 0;
107
108 while (scan_more && instr < max_instr) {
109 unsigned char opcode;
110 unsigned char instr_hi;
111 unsigned char instr_lo;
112
113 if (probe_kernel_address(instr, opcode))
114 break;
115
116 instr_hi = opcode & 0xf0;
117 instr_lo = opcode & 0x0f;
118 instr++;
119
120 switch (instr_hi) {
121 case 0x20:
122 case 0x30:
123 /*
124 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
125 * In X86_64 long mode, the CPU will signal invalid
126 * opcode if some of these prefixes are present so
127 * X86_64 will never get here anyway
128 */
129 scan_more = ((instr_lo & 7) == 0x6);
130 break;
131#ifdef CONFIG_X86_64
132 case 0x40:
133 /*
134 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
135 * Need to figure out under what instruction mode the
136 * instruction was issued. Could check the LDT for lm,
137 * but for now it's good enough to assume that long
138 * mode only uses well known segments or kernel.
139 */
140 scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
141 break;
142#endif
143 case 0x60:
144 /* 0x64 thru 0x67 are valid prefixes in all modes. */
145 scan_more = (instr_lo & 0xC) == 0x4;
146 break;
147 case 0xF0:
148 /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
149 scan_more = !instr_lo || (instr_lo>>1) == 1;
150 break;
151 case 0x00:
152 /* Prefetch instruction is 0x0F0D or 0x0F18 */
153 scan_more = 0;
154
155 if (probe_kernel_address(instr, opcode))
156 break;
157 prefetch = (instr_lo == 0xF) &&
158 (opcode == 0x0D || opcode == 0x18);
159 break;
160 default:
161 scan_more = 0;
162 break;
163 }
164 }
165 return prefetch;
166}
167
168static void force_sig_info_fault(int si_signo, int si_code,
169 unsigned long address, struct task_struct *tsk)
170{
171 siginfo_t info;
172
173 info.si_signo = si_signo;
174 info.si_errno = 0;
175 info.si_code = si_code;
176 info.si_addr = (void __user *)address;
177 force_sig_info(si_signo, &info, tsk);
178}
179
180#ifdef CONFIG_X86_64
181static int bad_address(void *p)
182{
183 unsigned long dummy;
184 return probe_kernel_address((unsigned long *)p, dummy);
185}
186#endif
187
188void dump_pagetable(unsigned long address)
189{
190#ifdef CONFIG_X86_32
191 __typeof__(pte_val(__pte(0))) page;
192
193 page = read_cr3();
194 page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT];
195#ifdef CONFIG_X86_PAE
196 printk("*pdpt = %016Lx ", page);
197 if ((page >> PAGE_SHIFT) < max_low_pfn
198 && page & _PAGE_PRESENT) {
199 page &= PAGE_MASK;
200 page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
201 & (PTRS_PER_PMD - 1)];
202 printk(KERN_CONT "*pde = %016Lx ", page);
203 page &= ~_PAGE_NX;
204 }
205#else
206 printk("*pde = %08lx ", page);
207#endif
208
209 /*
210 * We must not directly access the pte in the highpte
211 * case if the page table is located in highmem.
212 * And let's rather not kmap-atomic the pte, just in case
213 * it's allocated already.
214 */
215 if ((page >> PAGE_SHIFT) < max_low_pfn
216 && (page & _PAGE_PRESENT)
217 && !(page & _PAGE_PSE)) {
218 page &= PAGE_MASK;
219 page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
220 & (PTRS_PER_PTE - 1)];
221 printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page);
222 }
223
224 printk("\n");
225#else /* CONFIG_X86_64 */
226 pgd_t *pgd;
227 pud_t *pud;
228 pmd_t *pmd;
229 pte_t *pte;
230
231 pgd = (pgd_t *)read_cr3();
232
233 pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK);
234 pgd += pgd_index(address);
235 if (bad_address(pgd)) goto bad;
236 printk("PGD %lx ", pgd_val(*pgd));
237 if (!pgd_present(*pgd)) goto ret;
238
239 pud = pud_offset(pgd, address);
240 if (bad_address(pud)) goto bad;
241 printk("PUD %lx ", pud_val(*pud));
242 if (!pud_present(*pud)) goto ret;
243
244 pmd = pmd_offset(pud, address);
245 if (bad_address(pmd)) goto bad;
246 printk("PMD %lx ", pmd_val(*pmd));
247 if (!pmd_present(*pmd) || pmd_large(*pmd)) goto ret;
248
249 pte = pte_offset_kernel(pmd, address);
250 if (bad_address(pte)) goto bad;
251 printk("PTE %lx", pte_val(*pte));
252ret:
253 printk("\n");
254 return;
255bad:
256 printk("BAD\n");
257#endif
258}
259
260#ifdef CONFIG_X86_32
261static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
262{
263 unsigned index = pgd_index(address);
264 pgd_t *pgd_k;
265 pud_t *pud, *pud_k;
266 pmd_t *pmd, *pmd_k;
267
268 pgd += index;
269 pgd_k = init_mm.pgd + index;
270
271 if (!pgd_present(*pgd_k))
272 return NULL;
273
274 /*
275 * set_pgd(pgd, *pgd_k); here would be useless on PAE
276 * and redundant with the set_pmd() on non-PAE. As would
277 * set_pud.
278 */
279
280 pud = pud_offset(pgd, address);
281 pud_k = pud_offset(pgd_k, address);
282 if (!pud_present(*pud_k))
283 return NULL;
284
285 pmd = pmd_offset(pud, address);
286 pmd_k = pmd_offset(pud_k, address);
287 if (!pmd_present(*pmd_k))
288 return NULL;
289 if (!pmd_present(*pmd)) {
290 set_pmd(pmd, *pmd_k);
291 arch_flush_lazy_mmu_mode();
292 } else
293 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
294 return pmd_k;
295}
296#endif
297
298#ifdef CONFIG_X86_64
299static const char errata93_warning[] =
300KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
301KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
302KERN_ERR "******* Please consider a BIOS update.\n"
303KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
304#endif
305
306/* Workaround for K8 erratum #93 & buggy BIOS.
307 BIOS SMM functions are required to use a specific workaround
308 to avoid corruption of the 64bit RIP register on C stepping K8.
309 A lot of BIOS that didn't get tested properly miss this.
310 The OS sees this as a page fault with the upper 32bits of RIP cleared.
311 Try to work around it here.
312 Note we only handle faults in kernel here.
313 Does nothing for X86_32
314 */
315static int is_errata93(struct pt_regs *regs, unsigned long address)
316{
317#ifdef CONFIG_X86_64
318 static int warned;
319 if (address != regs->ip)
320 return 0;
321 if ((address >> 32) != 0)
322 return 0;
323 address |= 0xffffffffUL << 32;
324 if ((address >= (u64)_stext && address <= (u64)_etext) ||
325 (address >= MODULES_VADDR && address <= MODULES_END)) {
326 if (!warned) {
327 printk(errata93_warning);
328 warned = 1;
329 }
330 regs->ip = address;
331 return 1;
332 }
333#endif
334 return 0;
335}
336
337/*
338 * Work around K8 erratum #100 K8 in compat mode occasionally jumps to illegal
339 * addresses >4GB. We catch this in the page fault handler because these
340 * addresses are not reachable. Just detect this case and return. Any code
341 * segment in LDT is compatibility mode.
342 */
343static int is_errata100(struct pt_regs *regs, unsigned long address)
344{
345#ifdef CONFIG_X86_64
346 if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) &&
347 (address >> 32))
348 return 1;
349#endif
350 return 0;
351}
352
353void do_invalid_op(struct pt_regs *, unsigned long);
354
355static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
356{
357#ifdef CONFIG_X86_F00F_BUG
358 unsigned long nr;
359 /*
360 * Pentium F0 0F C7 C8 bug workaround.
361 */
362 if (boot_cpu_data.f00f_bug) {
363 nr = (address - idt_descr.address) >> 3;
364
365 if (nr == 6) {
366 do_invalid_op(regs, 0);
367 return 1;
368 }
369 }
370#endif
371 return 0;
372}
373
374static void show_fault_oops(struct pt_regs *regs, unsigned long error_code,
375 unsigned long address)
376{
377#ifdef CONFIG_X86_32
378 if (!oops_may_print())
379 return;
380
381#ifdef CONFIG_X86_PAE
382 if (error_code & PF_INSTR) {
383 int level;
384 pte_t *pte = lookup_address(address, &level);
385
386 if (pte && pte_present(*pte) && !pte_exec(*pte))
387 printk(KERN_CRIT "kernel tried to execute "
388 "NX-protected page - exploit attempt? "
389 "(uid: %d)\n", current->uid);
390 }
391#endif
392 printk(KERN_ALERT "BUG: unable to handle kernel ");
393 if (address < PAGE_SIZE)
394 printk(KERN_CONT "NULL pointer dereference");
395 else
396 printk(KERN_CONT "paging request");
397 printk(KERN_CONT " at %08lx\n", address);
398
399 printk(KERN_ALERT "IP:");
400 printk_address(regs->ip, 1);
401 dump_pagetable(address);
402#else /* CONFIG_X86_64 */
403 printk(KERN_ALERT "BUG: unable to handle kernel ");
404 if (address < PAGE_SIZE)
405 printk(KERN_CONT "NULL pointer dereference");
406 else
407 printk(KERN_CONT "paging request");
408 printk(KERN_CONT " at %016lx\n", address);
409
410 printk(KERN_ALERT "IP:");
411 printk_address(regs->ip, 1);
412 dump_pagetable(address);
413#endif
414}
415
416#ifdef CONFIG_X86_64
417static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
418 unsigned long error_code)
419{
420 unsigned long flags = oops_begin();
421 struct task_struct *tsk;
422
423 printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
424 current->comm, address);
425 dump_pagetable(address);
426 tsk = current;
427 tsk->thread.cr2 = address;
428 tsk->thread.trap_no = 14;
429 tsk->thread.error_code = error_code;
430 if (__die("Bad pagetable", regs, error_code))
431 regs = NULL;
432 oops_end(flags, regs, SIGKILL);
433}
434#endif
435
436/*
437 * X86_32
438 * Handle a fault on the vmalloc or module mapping area
439 *
440 * X86_64
441 * Handle a fault on the vmalloc area
442 *
443 * This assumes no large pages in there.
444 */
445static int vmalloc_fault(unsigned long address)
446{
447#ifdef CONFIG_X86_32
448 unsigned long pgd_paddr;
449 pmd_t *pmd_k;
450 pte_t *pte_k;
451 /*
452 * Synchronize this task's top level page-table
453 * with the 'reference' page table.
454 *
455 * Do _not_ use "current" here. We might be inside
456 * an interrupt in the middle of a task switch..
457 */
458 pgd_paddr = read_cr3();
459 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
460 if (!pmd_k)
461 return -1;
462 pte_k = pte_offset_kernel(pmd_k, address);
463 if (!pte_present(*pte_k))
464 return -1;
465 return 0;
466#else
467 pgd_t *pgd, *pgd_ref;
468 pud_t *pud, *pud_ref;
469 pmd_t *pmd, *pmd_ref;
470 pte_t *pte, *pte_ref;
471
472 /* Copy kernel mappings over when needed. This can also
473 happen within a race in page table update. In the later
474 case just flush. */
475
476 pgd = pgd_offset(current->mm ?: &init_mm, address);
477 pgd_ref = pgd_offset_k(address);
478 if (pgd_none(*pgd_ref))
479 return -1;
480 if (pgd_none(*pgd))
481 set_pgd(pgd, *pgd_ref);
482 else
483 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
484
485 /* Below here mismatches are bugs because these lower tables
486 are shared */
487
488 pud = pud_offset(pgd, address);
489 pud_ref = pud_offset(pgd_ref, address);
490 if (pud_none(*pud_ref))
491 return -1;
492 if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
493 BUG();
494 pmd = pmd_offset(pud, address);
495 pmd_ref = pmd_offset(pud_ref, address);
496 if (pmd_none(*pmd_ref))
497 return -1;
498 if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
499 BUG();
500 pte_ref = pte_offset_kernel(pmd_ref, address);
501 if (!pte_present(*pte_ref))
502 return -1;
503 pte = pte_offset_kernel(pmd, address);
504 /* Don't use pte_page here, because the mappings can point
505 outside mem_map, and the NUMA hash lookup cannot handle
506 that. */
507 if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
508 BUG();
509 return 0;
510#endif
511}
512
513int show_unhandled_signals = 1;
514
515/*
516 * This routine handles page faults. It determines the address,
517 * and the problem, and then passes it off to one of the appropriate
518 * routines.
519 */
520#ifdef CONFIG_X86_64
521asmlinkage
522#endif
523void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
524{
525 struct task_struct *tsk;
526 struct mm_struct *mm;
527 struct vm_area_struct *vma;
528 unsigned long address;
529 int write, si_code;
530 int fault;
531#ifdef CONFIG_X86_64
532 unsigned long flags;
533#endif
534
535 /*
536 * We can fault from pretty much anywhere, with unknown IRQ state.
537 */
538 trace_hardirqs_fixup();
539
540 tsk = current;
541 mm = tsk->mm;
542 prefetchw(&mm->mmap_sem);
543
544 /* get the address */
545 address = read_cr2();
546
547 si_code = SEGV_MAPERR;
548
549 if (notify_page_fault(regs))
550 return;
551
552 /*
553 * We fault-in kernel-space virtual memory on-demand. The
554 * 'reference' page table is init_mm.pgd.
555 *
556 * NOTE! We MUST NOT take any locks for this case. We may
557 * be in an interrupt or a critical region, and should
558 * only copy the information from the master page table,
559 * nothing more.
560 *
561 * This verifies that the fault happens in kernel space
562 * (error_code & 4) == 0, and that the fault was not a
563 * protection error (error_code & 9) == 0.
564 */
565#ifdef CONFIG_X86_32
566 if (unlikely(address >= TASK_SIZE)) {
567 if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
568 vmalloc_fault(address) >= 0)
569 return;
570 /*
571 * Don't take the mm semaphore here. If we fixup a prefetch
572 * fault we could otherwise deadlock.
573 */
574 goto bad_area_nosemaphore;
575 }
576
577 /* It's safe to allow irq's after cr2 has been saved and the vmalloc
578 fault has been handled. */
579 if (regs->flags & (X86_EFLAGS_IF|VM_MASK))
580 local_irq_enable();
581
582 /*
583 * If we're in an interrupt, have no user context or are running in an
584 * atomic region then we must not take the fault.
585 */
586 if (in_atomic() || !mm)
587 goto bad_area_nosemaphore;
588#else /* CONFIG_X86_64 */
589 if (unlikely(address >= TASK_SIZE64)) {
590 /*
591 * Don't check for the module range here: its PML4
592 * is always initialized because it's shared with the main
593 * kernel text. Only vmalloc may need PML4 syncups.
594 */
595 if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
596 ((address >= VMALLOC_START && address < VMALLOC_END))) {
597 if (vmalloc_fault(address) >= 0)
598 return;
599 }
600 /*
601 * Don't take the mm semaphore here. If we fixup a prefetch
602 * fault we could otherwise deadlock.
603 */
604 goto bad_area_nosemaphore;
605 }
606 if (likely(regs->flags & X86_EFLAGS_IF))
607 local_irq_enable();
608
609 if (unlikely(error_code & PF_RSVD))
610 pgtable_bad(address, regs, error_code);
611
612 /*
613 * If we're in an interrupt, have no user context or are running in an
614 * atomic region then we must not take the fault.
615 */
616 if (unlikely(in_atomic() || !mm))
617 goto bad_area_nosemaphore;
618
619 /*
620 * User-mode registers count as a user access even for any
621 * potential system fault or CPU buglet.
622 */
623 if (user_mode_vm(regs))
624 error_code |= PF_USER;
625again:
626#endif
627 /* When running in the kernel we expect faults to occur only to
628 * addresses in user space. All other faults represent errors in the
629 * kernel and should generate an OOPS. Unfortunately, in the case of an
630 * erroneous fault occurring in a code path which already holds mmap_sem
631 * we will deadlock attempting to validate the fault against the
632 * address space. Luckily the kernel only validly references user
633 * space from well defined areas of code, which are listed in the
634 * exceptions table.
635 *
636 * As the vast majority of faults will be valid we will only perform
637 * the source reference check when there is a possibility of a deadlock.
638 * Attempt to lock the address space, if we cannot we then validate the
639 * source. If this is invalid we can skip the address space check,
640 * thus avoiding the deadlock.
641 */
642 if (!down_read_trylock(&mm->mmap_sem)) {
643 if ((error_code & PF_USER) == 0 &&
644 !search_exception_tables(regs->ip))
645 goto bad_area_nosemaphore;
646 down_read(&mm->mmap_sem);
647 }
648
649 vma = find_vma(mm, address);
650 if (!vma)
651 goto bad_area;
652#ifdef CONFIG_X86_32
653 if (vma->vm_start <= address)
654#else
655 if (likely(vma->vm_start <= address))
656#endif
657 goto good_area;
658 if (!(vma->vm_flags & VM_GROWSDOWN))
659 goto bad_area;
660 if (error_code & PF_USER) {
661 /*
662 * Accessing the stack below %sp is always a bug.
663 * The large cushion allows instructions like enter
664 * and pusha to work. ("enter $65535,$31" pushes
665 * 32 pointers and then decrements %sp by 65535.)
666 */
667 if (address + 65536 + 32 * sizeof(unsigned long) < regs->sp)
668 goto bad_area;
669 }
670 if (expand_stack(vma, address))
671 goto bad_area;
672/*
673 * Ok, we have a good vm_area for this memory access, so
674 * we can handle it..
675 */
676good_area:
677 si_code = SEGV_ACCERR;
678 write = 0;
679 switch (error_code & (PF_PROT|PF_WRITE)) {
680 default: /* 3: write, present */
681 /* fall through */
682 case PF_WRITE: /* write, not present */
683 if (!(vma->vm_flags & VM_WRITE))
684 goto bad_area;
685 write++;
686 break;
687 case PF_PROT: /* read, present */
688 goto bad_area;
689 case 0: /* read, not present */
690 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
691 goto bad_area;
692 }
693
694#ifdef CONFIG_X86_32
695survive:
696#endif
697 /*
698 * If for any reason at all we couldn't handle the fault,
699 * make sure we exit gracefully rather than endlessly redo
700 * the fault.
701 */
702 fault = handle_mm_fault(mm, vma, address, write);
703 if (unlikely(fault & VM_FAULT_ERROR)) {
704 if (fault & VM_FAULT_OOM)
705 goto out_of_memory;
706 else if (fault & VM_FAULT_SIGBUS)
707 goto do_sigbus;
708 BUG();
709 }
710 if (fault & VM_FAULT_MAJOR)
711 tsk->maj_flt++;
712 else
713 tsk->min_flt++;
714
715#ifdef CONFIG_X86_32
716 /*
717 * Did it hit the DOS screen memory VA from vm86 mode?
718 */
719 if (v8086_mode(regs)) {
720 unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
721 if (bit < 32)
722 tsk->thread.screen_bitmap |= 1 << bit;
723 }
724#endif
725 up_read(&mm->mmap_sem);
726 return;
727
728/*
729 * Something tried to access memory that isn't in our memory map..
730 * Fix it, but check if it's kernel or user first..
731 */
732bad_area:
733 up_read(&mm->mmap_sem);
734
735bad_area_nosemaphore:
736 /* User mode accesses just cause a SIGSEGV */
737 if (error_code & PF_USER) {
738 /*
739 * It's possible to have interrupts off here.
740 */
741 local_irq_enable();
742
743 /*
744 * Valid to do another page fault here because this one came
745 * from user space.
746 */
747 if (is_prefetch(regs, address, error_code))
748 return;
749
750 if (is_errata100(regs, address))
751 return;
752
753 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
754 printk_ratelimit()) {
755 printk(
756#ifdef CONFIG_X86_32
757 "%s%s[%d]: segfault at %lx ip %08lx sp %08lx error %lx",
758#else
759 "%s%s[%d]: segfault at %lx ip %lx sp %lx error %lx",
760#endif
761 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
762 tsk->comm, task_pid_nr(tsk), address, regs->ip,
763 regs->sp, error_code);
764 print_vma_addr(" in ", regs->ip);
765 printk("\n");
766 }
767
768 tsk->thread.cr2 = address;
769 /* Kernel addresses are always protection faults */
770 tsk->thread.error_code = error_code | (address >= TASK_SIZE);
771 tsk->thread.trap_no = 14;
772 force_sig_info_fault(SIGSEGV, si_code, address, tsk);
773 return;
774 }
775
776 if (is_f00f_bug(regs, address))
777 return;
778
779no_context:
780 /* Are we prepared to handle this kernel fault? */
781 if (fixup_exception(regs))
782 return;
783
784 /*
785 * X86_32
786 * Valid to do another page fault here, because if this fault
787 * had been triggered by is_prefetch fixup_exception would have
788 * handled it.
789 *
790 * X86_64
791 * Hall of shame of CPU/BIOS bugs.
792 */
793 if (is_prefetch(regs, address, error_code))
794 return;
795
796 if (is_errata93(regs, address))
797 return;
798
799/*
800 * Oops. The kernel tried to access some bad page. We'll have to
801 * terminate things with extreme prejudice.
802 */
803#ifdef CONFIG_X86_32
804 bust_spinlocks(1);
805
806 show_fault_oops(regs, error_code, address);
807
808 tsk->thread.cr2 = address;
809 tsk->thread.trap_no = 14;
810 tsk->thread.error_code = error_code;
811 die("Oops", regs, error_code);
812 bust_spinlocks(0);
813 do_exit(SIGKILL);
814#else /* CONFIG_X86_64 */
815 flags = oops_begin();
816
817 show_fault_oops(regs, error_code, address);
818
819 tsk->thread.cr2 = address;
820 tsk->thread.trap_no = 14;
821 tsk->thread.error_code = error_code;
822 if (__die("Oops", regs, error_code))
823 regs = NULL;
824 /* Executive summary in case the body of the oops scrolled away */
825 printk(KERN_EMERG "CR2: %016lx\n", address);
826 oops_end(flags, regs, SIGKILL);
827#endif
828
829/*
830 * We ran out of memory, or some other thing happened to us that made
831 * us unable to handle the page fault gracefully.
832 */
833out_of_memory:
834 up_read(&mm->mmap_sem);
835#ifdef CONFIG_X86_32
836 if (is_global_init(tsk)) {
837 yield();
838 down_read(&mm->mmap_sem);
839 goto survive;
840 }
841#else
842 if (is_global_init(current)) {
843 yield();
844 goto again;
845 }
846#endif
847 printk("VM: killing process %s\n", tsk->comm);
848 if (error_code & PF_USER)
849 do_group_exit(SIGKILL);
850 goto no_context;
851
852do_sigbus:
853 up_read(&mm->mmap_sem);
854
855 /* Kernel mode? Handle exceptions or die */
856 if (!(error_code & PF_USER))
857 goto no_context;
858#ifdef CONFIG_X86_32
859 /* User space => ok to do another page fault */
860 if (is_prefetch(regs, address, error_code))
861 return;
862#endif
863 tsk->thread.cr2 = address;
864 tsk->thread.error_code = error_code;
865 tsk->thread.trap_no = 14;
866 force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
867}
868
869#ifdef CONFIG_X86_64
870DEFINE_SPINLOCK(pgd_lock);
871LIST_HEAD(pgd_list);
872#endif
873
874void vmalloc_sync_all(void)
875{
876#ifdef CONFIG_X86_32
877 /*
878 * Note that races in the updates of insync and start aren't
879 * problematic: insync can only get set bits added, and updates to
880 * start are only improving performance (without affecting correctness
881 * if undone).
882 */
883 static DECLARE_BITMAP(insync, PTRS_PER_PGD);
884 static unsigned long start = TASK_SIZE;
885 unsigned long address;
886
887 if (SHARED_KERNEL_PMD)
888 return;
889
890 BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
891 for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
892 if (!test_bit(pgd_index(address), insync)) {
893 unsigned long flags;
894 struct page *page;
895
896 spin_lock_irqsave(&pgd_lock, flags);
897 for (page = pgd_list; page; page =
898 (struct page *)page->index)
899 if (!vmalloc_sync_one(page_address(page),
900 address)) {
901 BUG_ON(page != pgd_list);
902 break;
903 }
904 spin_unlock_irqrestore(&pgd_lock, flags);
905 if (!page)
906 set_bit(pgd_index(address), insync);
907 }
908 if (address == start && test_bit(pgd_index(address), insync))
909 start = address + PGDIR_SIZE;
910 }
911#else /* CONFIG_X86_64 */
912 /*
913 * Note that races in the updates of insync and start aren't
914 * problematic: insync can only get set bits added, and updates to
915 * start are only improving performance (without affecting correctness
916 * if undone).
917 */
918 static DECLARE_BITMAP(insync, PTRS_PER_PGD);
919 static unsigned long start = VMALLOC_START & PGDIR_MASK;
920 unsigned long address;
921
922 for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
923 if (!test_bit(pgd_index(address), insync)) {
924 const pgd_t *pgd_ref = pgd_offset_k(address);
925 struct page *page;
926
927 if (pgd_none(*pgd_ref))
928 continue;
929 spin_lock(&pgd_lock);
930 list_for_each_entry(page, &pgd_list, lru) {
931 pgd_t *pgd;
932 pgd = (pgd_t *)page_address(page) + pgd_index(address);
933 if (pgd_none(*pgd))
934 set_pgd(pgd, *pgd_ref);
935 else
936 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
937 }
938 spin_unlock(&pgd_lock);
939 set_bit(pgd_index(address), insync);
940 }
941 if (address == start)
942 start = address + PGDIR_SIZE;
943 }
944 /* Check that there is no need to do the same for the modules area. */
945 BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
946 BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
947 (__START_KERNEL & PGDIR_MASK)));
948#endif
949}