diff options
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/fault.c | 8 | ||||
-rw-r--r-- | arch/x86/mm/numa_64.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/pageattr.c | 15 |
3 files changed, 12 insertions, 13 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 90dfae511a41..c76ef1d701c9 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -603,8 +603,6 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
603 | 603 | ||
604 | si_code = SEGV_MAPERR; | 604 | si_code = SEGV_MAPERR; |
605 | 605 | ||
606 | if (notify_page_fault(regs)) | ||
607 | return; | ||
608 | if (unlikely(kmmio_fault(regs, address))) | 606 | if (unlikely(kmmio_fault(regs, address))) |
609 | return; | 607 | return; |
610 | 608 | ||
@@ -634,6 +632,9 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
634 | if (spurious_fault(address, error_code)) | 632 | if (spurious_fault(address, error_code)) |
635 | return; | 633 | return; |
636 | 634 | ||
635 | /* kprobes don't want to hook the spurious faults. */ | ||
636 | if (notify_page_fault(regs)) | ||
637 | return; | ||
637 | /* | 638 | /* |
638 | * Don't take the mm semaphore here. If we fixup a prefetch | 639 | * Don't take the mm semaphore here. If we fixup a prefetch |
639 | * fault we could otherwise deadlock. | 640 | * fault we could otherwise deadlock. |
@@ -641,6 +642,9 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
641 | goto bad_area_nosemaphore; | 642 | goto bad_area_nosemaphore; |
642 | } | 643 | } |
643 | 644 | ||
645 | /* kprobes don't want to hook the spurious faults. */ | ||
646 | if (notify_page_fault(regs)) | ||
647 | return; | ||
644 | 648 | ||
645 | /* | 649 | /* |
646 | * It's safe to allow irq's after cr2 has been saved and the | 650 | * It's safe to allow irq's after cr2 has been saved and the |
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 71a14f89f89e..f3516da035d1 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c | |||
@@ -145,7 +145,7 @@ int __init compute_hash_shift(struct bootnode *nodes, int numnodes, | |||
145 | return shift; | 145 | return shift; |
146 | } | 146 | } |
147 | 147 | ||
148 | int early_pfn_to_nid(unsigned long pfn) | 148 | int __meminit __early_pfn_to_nid(unsigned long pfn) |
149 | { | 149 | { |
150 | return phys_to_nid(pfn << PAGE_SHIFT); | 150 | return phys_to_nid(pfn << PAGE_SHIFT); |
151 | } | 151 | } |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 8ca0d8566fc8..7be47d1a97e4 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -508,18 +508,13 @@ static int split_large_page(pte_t *kpte, unsigned long address) | |||
508 | #endif | 508 | #endif |
509 | 509 | ||
510 | /* | 510 | /* |
511 | * Install the new, split up pagetable. Important details here: | 511 | * Install the new, split up pagetable. |
512 | * | 512 | * |
513 | * On Intel the NX bit of all levels must be cleared to make a | 513 | * We use the standard kernel pagetable protections for the new |
514 | * page executable. See section 4.13.2 of Intel 64 and IA-32 | 514 | * pagetable protections, the actual ptes set above control the |
515 | * Architectures Software Developer's Manual). | 515 | * primary protection behavior: |
516 | * | ||
517 | * Mark the entry present. The current mapping might be | ||
518 | * set to not present, which we preserved above. | ||
519 | */ | 516 | */ |
520 | ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte))); | 517 | __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE))); |
521 | pgprot_val(ref_prot) |= _PAGE_PRESENT; | ||
522 | __set_pmd_pte(kpte, address, mk_pte(base, ref_prot)); | ||
523 | base = NULL; | 518 | base = NULL; |
524 | 519 | ||
525 | out_unlock: | 520 | out_unlock: |