diff options
Diffstat (limited to 'arch/x86/mm/fault.c')
-rw-r--r-- | arch/x86/mm/fault.c | 34 |
1 files changed, 11 insertions, 23 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index e4440d0abf81..ad8b9733d6b3 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -240,7 +240,8 @@ void dump_pagetable(unsigned long address) | |||
240 | pud = pud_offset(pgd, address); | 240 | pud = pud_offset(pgd, address); |
241 | if (bad_address(pud)) goto bad; | 241 | if (bad_address(pud)) goto bad; |
242 | printk("PUD %lx ", pud_val(*pud)); | 242 | printk("PUD %lx ", pud_val(*pud)); |
243 | if (!pud_present(*pud)) goto ret; | 243 | if (!pud_present(*pud) || pud_large(*pud)) |
244 | goto ret; | ||
244 | 245 | ||
245 | pmd = pmd_offset(pud, address); | 246 | pmd = pmd_offset(pud, address); |
246 | if (bad_address(pmd)) goto bad; | 247 | if (bad_address(pmd)) goto bad; |
@@ -508,6 +509,10 @@ static int vmalloc_fault(unsigned long address) | |||
508 | pmd_t *pmd, *pmd_ref; | 509 | pmd_t *pmd, *pmd_ref; |
509 | pte_t *pte, *pte_ref; | 510 | pte_t *pte, *pte_ref; |
510 | 511 | ||
512 | /* Make sure we are in vmalloc area */ | ||
513 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) | ||
514 | return -1; | ||
515 | |||
511 | /* Copy kernel mappings over when needed. This can also | 516 | /* Copy kernel mappings over when needed. This can also |
512 | happen within a race in page table update. In the later | 517 | happen within a race in page table update. In the later |
513 | case just flush. */ | 518 | case just flush. */ |
@@ -603,6 +608,9 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
603 | */ | 608 | */ |
604 | #ifdef CONFIG_X86_32 | 609 | #ifdef CONFIG_X86_32 |
605 | if (unlikely(address >= TASK_SIZE)) { | 610 | if (unlikely(address >= TASK_SIZE)) { |
611 | #else | ||
612 | if (unlikely(address >= TASK_SIZE64)) { | ||
613 | #endif | ||
606 | if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) && | 614 | if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) && |
607 | vmalloc_fault(address) >= 0) | 615 | vmalloc_fault(address) >= 0) |
608 | return; | 616 | return; |
@@ -618,6 +626,8 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
618 | goto bad_area_nosemaphore; | 626 | goto bad_area_nosemaphore; |
619 | } | 627 | } |
620 | 628 | ||
629 | |||
630 | #ifdef CONFIG_X86_32 | ||
621 | /* It's safe to allow irq's after cr2 has been saved and the vmalloc | 631 | /* It's safe to allow irq's after cr2 has been saved and the vmalloc |
622 | fault has been handled. */ | 632 | fault has been handled. */ |
623 | if (regs->flags & (X86_EFLAGS_IF|VM_MASK)) | 633 | if (regs->flags & (X86_EFLAGS_IF|VM_MASK)) |
@@ -630,28 +640,6 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
630 | if (in_atomic() || !mm) | 640 | if (in_atomic() || !mm) |
631 | goto bad_area_nosemaphore; | 641 | goto bad_area_nosemaphore; |
632 | #else /* CONFIG_X86_64 */ | 642 | #else /* CONFIG_X86_64 */ |
633 | if (unlikely(address >= TASK_SIZE64)) { | ||
634 | /* | ||
635 | * Don't check for the module range here: its PML4 | ||
636 | * is always initialized because it's shared with the main | ||
637 | * kernel text. Only vmalloc may need PML4 syncups. | ||
638 | */ | ||
639 | if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) && | ||
640 | ((address >= VMALLOC_START && address < VMALLOC_END))) { | ||
641 | if (vmalloc_fault(address) >= 0) | ||
642 | return; | ||
643 | } | ||
644 | |||
645 | /* Can handle a stale RO->RW TLB */ | ||
646 | if (spurious_fault(address, error_code)) | ||
647 | return; | ||
648 | |||
649 | /* | ||
650 | * Don't take the mm semaphore here. If we fixup a prefetch | ||
651 | * fault we could otherwise deadlock. | ||
652 | */ | ||
653 | goto bad_area_nosemaphore; | ||
654 | } | ||
655 | if (likely(regs->flags & X86_EFLAGS_IF)) | 643 | if (likely(regs->flags & X86_EFLAGS_IF)) |
656 | local_irq_enable(); | 644 | local_irq_enable(); |
657 | 645 | ||