diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-25 18:34:18 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-25 18:34:18 -0400 |
commit | 0f657262d5f99ad86b9a63fb5dcd29036c2ed916 (patch) | |
tree | 54b83052c019bc1dff662cb1b38cbff59d901535 /arch/x86/mm | |
parent | 425dbc6db34dbd679cab1a17135c5910b271a03d (diff) | |
parent | 55920d31f1e3fea06702c74271dd56c4fc9b70ca (diff) |
Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 mm updates from Ingo Molnar:
"Various x86 low level modifications:
- preparatory work to support virtually mapped kernel stacks (Andy
Lutomirski)
- support for 64-bit __get_user() on 32-bit kernels (Benjamin
LaHaise)
- (involved) workaround for Knights Landing CPU erratum (Dave Hansen)
- MPX enhancements (Dave Hansen)
- mremap() extension to allow remapping of the special VDSO vma, for
purposes of user level context save/restore (Dmitry Safonov)
- hweight and entry code cleanups (Borislav Petkov)
- bitops code generation optimizations and cleanups with modern GCC
(H. Peter Anvin)
- syscall entry code optimizations (Paolo Bonzini)"
* 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (43 commits)
x86/mm/cpa: Add missing comment in populate_pdg()
x86/mm/cpa: Fix populate_pgd(): Stop trying to deallocate failed PUDs
x86/syscalls: Add compat_sys_preadv64v2/compat_sys_pwritev64v2
x86/smp: Remove unnecessary initialization of thread_info::cpu
x86/smp: Remove stack_smp_processor_id()
x86/uaccess: Move thread_info::addr_limit to thread_struct
x86/dumpstack: Rename thread_struct::sig_on_uaccess_error to sig_on_uaccess_err
x86/uaccess: Move thread_info::uaccess_err and thread_info::sig_on_uaccess_err to thread_struct
x86/dumpstack: When OOPSing, rewind the stack before do_exit()
x86/mm/64: In vmalloc_fault(), use CR3 instead of current->active_mm
x86/dumpstack/64: Handle faults when printing the "Stack: " part of an OOPS
x86/dumpstack: Try harder to get a call trace on stack overflow
x86/mm: Remove kernel_unmap_pages_in_pgd() and efi_cleanup_page_tables()
x86/mm/cpa: In populate_pgd(), don't set the PGD entry until it's populated
x86/mm/hotplug: Don't remove PGD entries in remove_pagetable()
x86/mm: Use pte_none() to test for empty PTE
x86/mm: Disallow running with 32-bit PTEs to work around erratum
x86/mm: Ignore A/D bits in pte/pmd/pud_none()
x86/mm: Move swap offset/type up in PTE to work around erratum
x86/entry: Inline enter_from_user_mode()
...
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/extable.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/fault.c | 4 | ||||
-rw-r--r-- | arch/x86/mm/init_64.c | 37 | ||||
-rw-r--r-- | arch/x86/mm/pageattr.c | 37 | ||||
-rw-r--r-- | arch/x86/mm/pat.c | 5 | ||||
-rw-r--r-- | arch/x86/mm/pgtable_32.c | 2 |
6 files changed, 17 insertions, 70 deletions
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index 4bb53b89f3c5..0f90cc218d04 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c | |||
@@ -37,7 +37,7 @@ bool ex_handler_ext(const struct exception_table_entry *fixup, | |||
37 | struct pt_regs *regs, int trapnr) | 37 | struct pt_regs *regs, int trapnr) |
38 | { | 38 | { |
39 | /* Special hack for uaccess_err */ | 39 | /* Special hack for uaccess_err */ |
40 | current_thread_info()->uaccess_err = 1; | 40 | current->thread.uaccess_err = 1; |
41 | regs->ip = ex_fixup_addr(fixup); | 41 | regs->ip = ex_fixup_addr(fixup); |
42 | return true; | 42 | return true; |
43 | } | 43 | } |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 7d1fa7cd2374..d22161ab941d 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -439,7 +439,7 @@ static noinline int vmalloc_fault(unsigned long address) | |||
439 | * happen within a race in page table update. In the later | 439 | * happen within a race in page table update. In the later |
440 | * case just flush: | 440 | * case just flush: |
441 | */ | 441 | */ |
442 | pgd = pgd_offset(current->active_mm, address); | 442 | pgd = (pgd_t *)__va(read_cr3()) + pgd_index(address); |
443 | pgd_ref = pgd_offset_k(address); | 443 | pgd_ref = pgd_offset_k(address); |
444 | if (pgd_none(*pgd_ref)) | 444 | if (pgd_none(*pgd_ref)) |
445 | return -1; | 445 | return -1; |
@@ -737,7 +737,7 @@ no_context(struct pt_regs *regs, unsigned long error_code, | |||
737 | * In this case we need to make sure we're not recursively | 737 | * In this case we need to make sure we're not recursively |
738 | * faulting through the emulate_vsyscall() logic. | 738 | * faulting through the emulate_vsyscall() logic. |
739 | */ | 739 | */ |
740 | if (current_thread_info()->sig_on_uaccess_error && signal) { | 740 | if (current->thread.sig_on_uaccess_err && signal) { |
741 | tsk->thread.trap_nr = X86_TRAP_PF; | 741 | tsk->thread.trap_nr = X86_TRAP_PF; |
742 | tsk->thread.error_code = error_code | PF_USER; | 742 | tsk->thread.error_code = error_code | PF_USER; |
743 | tsk->thread.cr2 = address; | 743 | tsk->thread.cr2 = address; |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index bce2e5d9edd4..e14f87057c3f 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -354,7 +354,7 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end, | |||
354 | * pagetable pages as RO. So assume someone who pre-setup | 354 | * pagetable pages as RO. So assume someone who pre-setup |
355 | * these mappings are more intelligent. | 355 | * these mappings are more intelligent. |
356 | */ | 356 | */ |
357 | if (pte_val(*pte)) { | 357 | if (!pte_none(*pte)) { |
358 | if (!after_bootmem) | 358 | if (!after_bootmem) |
359 | pages++; | 359 | pages++; |
360 | continue; | 360 | continue; |
@@ -396,7 +396,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, | |||
396 | continue; | 396 | continue; |
397 | } | 397 | } |
398 | 398 | ||
399 | if (pmd_val(*pmd)) { | 399 | if (!pmd_none(*pmd)) { |
400 | if (!pmd_large(*pmd)) { | 400 | if (!pmd_large(*pmd)) { |
401 | spin_lock(&init_mm.page_table_lock); | 401 | spin_lock(&init_mm.page_table_lock); |
402 | pte = (pte_t *)pmd_page_vaddr(*pmd); | 402 | pte = (pte_t *)pmd_page_vaddr(*pmd); |
@@ -470,7 +470,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, | |||
470 | continue; | 470 | continue; |
471 | } | 471 | } |
472 | 472 | ||
473 | if (pud_val(*pud)) { | 473 | if (!pud_none(*pud)) { |
474 | if (!pud_large(*pud)) { | 474 | if (!pud_large(*pud)) { |
475 | pmd = pmd_offset(pud, 0); | 475 | pmd = pmd_offset(pud, 0); |
476 | last_map_addr = phys_pmd_init(pmd, addr, end, | 476 | last_map_addr = phys_pmd_init(pmd, addr, end, |
@@ -673,7 +673,7 @@ static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd) | |||
673 | 673 | ||
674 | for (i = 0; i < PTRS_PER_PTE; i++) { | 674 | for (i = 0; i < PTRS_PER_PTE; i++) { |
675 | pte = pte_start + i; | 675 | pte = pte_start + i; |
676 | if (pte_val(*pte)) | 676 | if (!pte_none(*pte)) |
677 | return; | 677 | return; |
678 | } | 678 | } |
679 | 679 | ||
@@ -691,7 +691,7 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud) | |||
691 | 691 | ||
692 | for (i = 0; i < PTRS_PER_PMD; i++) { | 692 | for (i = 0; i < PTRS_PER_PMD; i++) { |
693 | pmd = pmd_start + i; | 693 | pmd = pmd_start + i; |
694 | if (pmd_val(*pmd)) | 694 | if (!pmd_none(*pmd)) |
695 | return; | 695 | return; |
696 | } | 696 | } |
697 | 697 | ||
@@ -702,27 +702,6 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud) | |||
702 | spin_unlock(&init_mm.page_table_lock); | 702 | spin_unlock(&init_mm.page_table_lock); |
703 | } | 703 | } |
704 | 704 | ||
705 | /* Return true if pgd is changed, otherwise return false. */ | ||
706 | static bool __meminit free_pud_table(pud_t *pud_start, pgd_t *pgd) | ||
707 | { | ||
708 | pud_t *pud; | ||
709 | int i; | ||
710 | |||
711 | for (i = 0; i < PTRS_PER_PUD; i++) { | ||
712 | pud = pud_start + i; | ||
713 | if (pud_val(*pud)) | ||
714 | return false; | ||
715 | } | ||
716 | |||
717 | /* free a pud table */ | ||
718 | free_pagetable(pgd_page(*pgd), 0); | ||
719 | spin_lock(&init_mm.page_table_lock); | ||
720 | pgd_clear(pgd); | ||
721 | spin_unlock(&init_mm.page_table_lock); | ||
722 | |||
723 | return true; | ||
724 | } | ||
725 | |||
726 | static void __meminit | 705 | static void __meminit |
727 | remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, | 706 | remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, |
728 | bool direct) | 707 | bool direct) |
@@ -913,7 +892,6 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct) | |||
913 | unsigned long addr; | 892 | unsigned long addr; |
914 | pgd_t *pgd; | 893 | pgd_t *pgd; |
915 | pud_t *pud; | 894 | pud_t *pud; |
916 | bool pgd_changed = false; | ||
917 | 895 | ||
918 | for (addr = start; addr < end; addr = next) { | 896 | for (addr = start; addr < end; addr = next) { |
919 | next = pgd_addr_end(addr, end); | 897 | next = pgd_addr_end(addr, end); |
@@ -924,13 +902,8 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct) | |||
924 | 902 | ||
925 | pud = (pud_t *)pgd_page_vaddr(*pgd); | 903 | pud = (pud_t *)pgd_page_vaddr(*pgd); |
926 | remove_pud_table(pud, addr, next, direct); | 904 | remove_pud_table(pud, addr, next, direct); |
927 | if (free_pud_table(pud, pgd)) | ||
928 | pgd_changed = true; | ||
929 | } | 905 | } |
930 | 906 | ||
931 | if (pgd_changed) | ||
932 | sync_global_pgds(start, end - 1, 1); | ||
933 | |||
934 | flush_tlb_all(); | 907 | flush_tlb_all(); |
935 | } | 908 | } |
936 | 909 | ||
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 7a1f7bbf4105..47870a534877 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -746,18 +746,6 @@ static bool try_to_free_pmd_page(pmd_t *pmd) | |||
746 | return true; | 746 | return true; |
747 | } | 747 | } |
748 | 748 | ||
749 | static bool try_to_free_pud_page(pud_t *pud) | ||
750 | { | ||
751 | int i; | ||
752 | |||
753 | for (i = 0; i < PTRS_PER_PUD; i++) | ||
754 | if (!pud_none(pud[i])) | ||
755 | return false; | ||
756 | |||
757 | free_page((unsigned long)pud); | ||
758 | return true; | ||
759 | } | ||
760 | |||
761 | static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end) | 749 | static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end) |
762 | { | 750 | { |
763 | pte_t *pte = pte_offset_kernel(pmd, start); | 751 | pte_t *pte = pte_offset_kernel(pmd, start); |
@@ -871,16 +859,6 @@ static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end) | |||
871 | */ | 859 | */ |
872 | } | 860 | } |
873 | 861 | ||
874 | static void unmap_pgd_range(pgd_t *root, unsigned long addr, unsigned long end) | ||
875 | { | ||
876 | pgd_t *pgd_entry = root + pgd_index(addr); | ||
877 | |||
878 | unmap_pud_range(pgd_entry, addr, end); | ||
879 | |||
880 | if (try_to_free_pud_page((pud_t *)pgd_page_vaddr(*pgd_entry))) | ||
881 | pgd_clear(pgd_entry); | ||
882 | } | ||
883 | |||
884 | static int alloc_pte_page(pmd_t *pmd) | 862 | static int alloc_pte_page(pmd_t *pmd) |
885 | { | 863 | { |
886 | pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); | 864 | pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); |
@@ -1113,7 +1091,12 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr) | |||
1113 | 1091 | ||
1114 | ret = populate_pud(cpa, addr, pgd_entry, pgprot); | 1092 | ret = populate_pud(cpa, addr, pgd_entry, pgprot); |
1115 | if (ret < 0) { | 1093 | if (ret < 0) { |
1116 | unmap_pgd_range(cpa->pgd, addr, | 1094 | /* |
1095 | * Leave the PUD page in place in case some other CPU or thread | ||
1096 | * already found it, but remove any useless entries we just | ||
1097 | * added to it. | ||
1098 | */ | ||
1099 | unmap_pud_range(pgd_entry, addr, | ||
1117 | addr + (cpa->numpages << PAGE_SHIFT)); | 1100 | addr + (cpa->numpages << PAGE_SHIFT)); |
1118 | return ret; | 1101 | return ret; |
1119 | } | 1102 | } |
@@ -1185,7 +1168,7 @@ repeat: | |||
1185 | return __cpa_process_fault(cpa, address, primary); | 1168 | return __cpa_process_fault(cpa, address, primary); |
1186 | 1169 | ||
1187 | old_pte = *kpte; | 1170 | old_pte = *kpte; |
1188 | if (!pte_val(old_pte)) | 1171 | if (pte_none(old_pte)) |
1189 | return __cpa_process_fault(cpa, address, primary); | 1172 | return __cpa_process_fault(cpa, address, primary); |
1190 | 1173 | ||
1191 | if (level == PG_LEVEL_4K) { | 1174 | if (level == PG_LEVEL_4K) { |
@@ -1991,12 +1974,6 @@ out: | |||
1991 | return retval; | 1974 | return retval; |
1992 | } | 1975 | } |
1993 | 1976 | ||
1994 | void kernel_unmap_pages_in_pgd(pgd_t *root, unsigned long address, | ||
1995 | unsigned numpages) | ||
1996 | { | ||
1997 | unmap_pgd_range(root, address, address + (numpages << PAGE_SHIFT)); | ||
1998 | } | ||
1999 | |||
2000 | /* | 1977 | /* |
2001 | * The testcases use internal knowledge of the implementation that shouldn't | 1978 | * The testcases use internal knowledge of the implementation that shouldn't |
2002 | * be exposed to the rest of the kernel. Include these directly here. | 1979 | * be exposed to the rest of the kernel. Include these directly here. |
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index fb0604f11eec..db00e3e2f3dc 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
@@ -755,11 +755,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) | |||
755 | return 1; | 755 | return 1; |
756 | 756 | ||
757 | while (cursor < to) { | 757 | while (cursor < to) { |
758 | if (!devmem_is_allowed(pfn)) { | 758 | if (!devmem_is_allowed(pfn)) |
759 | pr_info("x86/PAT: Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx], PAT prevents it\n", | ||
760 | current->comm, from, to - 1); | ||
761 | return 0; | 759 | return 0; |
762 | } | ||
763 | cursor += PAGE_SIZE; | 760 | cursor += PAGE_SIZE; |
764 | pfn++; | 761 | pfn++; |
765 | } | 762 | } |
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index 75cc0978d45d..e67ae0e6c59d 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c | |||
@@ -47,7 +47,7 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval) | |||
47 | return; | 47 | return; |
48 | } | 48 | } |
49 | pte = pte_offset_kernel(pmd, vaddr); | 49 | pte = pte_offset_kernel(pmd, vaddr); |
50 | if (pte_val(pteval)) | 50 | if (!pte_none(pteval)) |
51 | set_pte_at(&init_mm, vaddr, pte, pteval); | 51 | set_pte_at(&init_mm, vaddr, pte, pteval); |
52 | else | 52 | else |
53 | pte_clear(&init_mm, vaddr, pte); | 53 | pte_clear(&init_mm, vaddr, pte); |