aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/fault.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-02-04 12:16:03 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2008-02-04 12:16:03 -0500
commitd2fc0bacd5c438cb459fdf531eff00ab18422a00 (patch)
treed0ea52e4d2ad2fac12e19eaf6891c6af98353cfc /arch/x86/mm/fault.c
parent93890b71a34f9490673a6edd56b61c2124215e46 (diff)
parent795d45b22c079946332bf3825afefe5a981a97b6 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86
* git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86: (78 commits) x86: fix RTC lockdep warning: potential hardirq recursion x86: cpa, micro-optimization x86: cpa, clean up code flow x86: cpa, eliminate CPA_ enum x86: cpa, cleanups x86: implement gbpages support in change_page_attr() x86: support gbpages in pagetable dump x86: add gbpages support to lookup_address x86: add pgtable accessor functions for gbpages x86: add PUD_PAGE_SIZE x86: add feature macros for the gbpages cpuid bit x86: switch direct mapping setup over to set_pte x86: fix page-present check in cpa_flush_range x86: remove cpa warning x86: remove now unused clear_kernel_mapping x86: switch pci-gart over to using set_memory_np() instead of clear_kernel_mapping() x86: cpa selftest, skip non present entries x86: CPA fix pagetable split x86: rename LARGE_PAGE_SIZE to PMD_PAGE_SIZE x86: cpa, fix lookup_address ...
Diffstat (limited to 'arch/x86/mm/fault.c')
-rw-r--r--arch/x86/mm/fault.c34
1 files changed, 11 insertions, 23 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index e4440d0abf8..ad8b9733d6b 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -240,7 +240,8 @@ void dump_pagetable(unsigned long address)
240 pud = pud_offset(pgd, address); 240 pud = pud_offset(pgd, address);
241 if (bad_address(pud)) goto bad; 241 if (bad_address(pud)) goto bad;
242 printk("PUD %lx ", pud_val(*pud)); 242 printk("PUD %lx ", pud_val(*pud));
243 if (!pud_present(*pud)) goto ret; 243 if (!pud_present(*pud) || pud_large(*pud))
244 goto ret;
244 245
245 pmd = pmd_offset(pud, address); 246 pmd = pmd_offset(pud, address);
246 if (bad_address(pmd)) goto bad; 247 if (bad_address(pmd)) goto bad;
@@ -508,6 +509,10 @@ static int vmalloc_fault(unsigned long address)
508 pmd_t *pmd, *pmd_ref; 509 pmd_t *pmd, *pmd_ref;
509 pte_t *pte, *pte_ref; 510 pte_t *pte, *pte_ref;
510 511
512 /* Make sure we are in vmalloc area */
513 if (!(address >= VMALLOC_START && address < VMALLOC_END))
514 return -1;
515
511 /* Copy kernel mappings over when needed. This can also 516 /* Copy kernel mappings over when needed. This can also
512 happen within a race in page table update. In the later 517 happen within a race in page table update. In the later
513 case just flush. */ 518 case just flush. */
@@ -603,6 +608,9 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
603 */ 608 */
604#ifdef CONFIG_X86_32 609#ifdef CONFIG_X86_32
605 if (unlikely(address >= TASK_SIZE)) { 610 if (unlikely(address >= TASK_SIZE)) {
611#else
612 if (unlikely(address >= TASK_SIZE64)) {
613#endif
606 if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) && 614 if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
607 vmalloc_fault(address) >= 0) 615 vmalloc_fault(address) >= 0)
608 return; 616 return;
@@ -618,6 +626,8 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
618 goto bad_area_nosemaphore; 626 goto bad_area_nosemaphore;
619 } 627 }
620 628
629
630#ifdef CONFIG_X86_32
621 /* It's safe to allow irq's after cr2 has been saved and the vmalloc 631 /* It's safe to allow irq's after cr2 has been saved and the vmalloc
622 fault has been handled. */ 632 fault has been handled. */
623 if (regs->flags & (X86_EFLAGS_IF|VM_MASK)) 633 if (regs->flags & (X86_EFLAGS_IF|VM_MASK))
@@ -630,28 +640,6 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
630 if (in_atomic() || !mm) 640 if (in_atomic() || !mm)
631 goto bad_area_nosemaphore; 641 goto bad_area_nosemaphore;
632#else /* CONFIG_X86_64 */ 642#else /* CONFIG_X86_64 */
633 if (unlikely(address >= TASK_SIZE64)) {
634 /*
635 * Don't check for the module range here: its PML4
636 * is always initialized because it's shared with the main
637 * kernel text. Only vmalloc may need PML4 syncups.
638 */
639 if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
640 ((address >= VMALLOC_START && address < VMALLOC_END))) {
641 if (vmalloc_fault(address) >= 0)
642 return;
643 }
644
645 /* Can handle a stale RO->RW TLB */
646 if (spurious_fault(address, error_code))
647 return;
648
649 /*
650 * Don't take the mm semaphore here. If we fixup a prefetch
651 * fault we could otherwise deadlock.
652 */
653 goto bad_area_nosemaphore;
654 }
655 if (likely(regs->flags & X86_EFLAGS_IF)) 643 if (likely(regs->flags & X86_EFLAGS_IF))
656 local_irq_enable(); 644 local_irq_enable();
657 645