diff options
author | Dave Hansen <dave.hansen@linux.intel.com> | 2018-04-20 18:20:28 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2018-04-25 05:02:51 -0400 |
commit | 316d097c4cd4e7f2ef50c40cff2db266593c4ec4 (patch) | |
tree | 0cbc41b328c814ddbfb51d9eb8e265f2a9cdb0e6 | |
parent | b7c21bc56fbedf4a61b628c6b11e0d7048746cc1 (diff) |
x86/pti: Filter at vma->vm_page_prot population
commit ce9962bf7e22bb3891655c349faff618922d4a73
0day reported warnings at boot on 32-bit systems without NX support:
attempted to set unsupported pgprot: 8000000000000025 bits: 8000000000000000 supported: 7fffffffffffffff
WARNING: CPU: 0 PID: 1 at
arch/x86/include/asm/pgtable.h:540 handle_mm_fault+0xfc1/0xfe0:
check_pgprot at arch/x86/include/asm/pgtable.h:535
(inlined by) pfn_pte at arch/x86/include/asm/pgtable.h:549
(inlined by) do_anonymous_page at mm/memory.c:3169
(inlined by) handle_pte_fault at mm/memory.c:3961
(inlined by) __handle_mm_fault at mm/memory.c:4087
(inlined by) handle_mm_fault at mm/memory.c:4124
The problem is that due to the recent commit which removed auto-massaging
of page protections, filtering page permissions at PTE creation time is not
longer done, so vma->vm_page_prot is passed unfiltered to PTE creation.
Filter the page protections before they are installed in vma->vm_page_prot.
Fixes: fb43d6cb91 ("x86/mm: Do not auto-massage page protections")
Reported-by: Fengguang Wu <fengguang.wu@intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Ingo Molnar <mingo@kernel.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Kees Cook <keescook@google.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: linux-mm@kvack.org
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Nadav Amit <namit@vmware.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Link: https://lkml.kernel.org/r/20180420222028.99D72858@viggo.jf.intel.com
-rw-r--r-- | arch/x86/Kconfig | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/pgtable.h | 5 | ||||
-rw-r--r-- | mm/mmap.c | 11 |
3 files changed, 19 insertions, 1 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 00fcf81f2c56..c07f492b871a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -52,6 +52,7 @@ config X86 | |||
52 | select ARCH_HAS_DEVMEM_IS_ALLOWED | 52 | select ARCH_HAS_DEVMEM_IS_ALLOWED |
53 | select ARCH_HAS_ELF_RANDOMIZE | 53 | select ARCH_HAS_ELF_RANDOMIZE |
54 | select ARCH_HAS_FAST_MULTIPLIER | 54 | select ARCH_HAS_FAST_MULTIPLIER |
55 | select ARCH_HAS_FILTER_PGPROT | ||
55 | select ARCH_HAS_FORTIFY_SOURCE | 56 | select ARCH_HAS_FORTIFY_SOURCE |
56 | select ARCH_HAS_GCOV_PROFILE_ALL | 57 | select ARCH_HAS_GCOV_PROFILE_ALL |
57 | select ARCH_HAS_KCOV if X86_64 | 58 | select ARCH_HAS_KCOV if X86_64 |
@@ -273,6 +274,9 @@ config ARCH_HAS_CPU_RELAX | |||
273 | config ARCH_HAS_CACHE_LINE_SIZE | 274 | config ARCH_HAS_CACHE_LINE_SIZE |
274 | def_bool y | 275 | def_bool y |
275 | 276 | ||
277 | config ARCH_HAS_FILTER_PGPROT | ||
278 | def_bool y | ||
279 | |||
276 | config HAVE_SETUP_PER_CPU_AREA | 280 | config HAVE_SETUP_PER_CPU_AREA |
277 | def_bool y | 281 | def_bool y |
278 | 282 | ||
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 5f49b4ff0c24..f1633de5a675 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -601,6 +601,11 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) | |||
601 | 601 | ||
602 | #define canon_pgprot(p) __pgprot(massage_pgprot(p)) | 602 | #define canon_pgprot(p) __pgprot(massage_pgprot(p)) |
603 | 603 | ||
604 | static inline pgprot_t arch_filter_pgprot(pgprot_t prot) | ||
605 | { | ||
606 | return canon_pgprot(prot); | ||
607 | } | ||
608 | |||
604 | static inline int is_new_memtype_allowed(u64 paddr, unsigned long size, | 609 | static inline int is_new_memtype_allowed(u64 paddr, unsigned long size, |
605 | enum page_cache_mode pcm, | 610 | enum page_cache_mode pcm, |
606 | enum page_cache_mode new_pcm) | 611 | enum page_cache_mode new_pcm) |
@@ -100,11 +100,20 @@ pgprot_t protection_map[16] __ro_after_init = { | |||
100 | __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 | 100 | __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 |
101 | }; | 101 | }; |
102 | 102 | ||
103 | #ifndef CONFIG_ARCH_HAS_FILTER_PGPROT | ||
104 | static inline pgprot_t arch_filter_pgprot(pgprot_t prot) | ||
105 | { | ||
106 | return prot; | ||
107 | } | ||
108 | #endif | ||
109 | |||
103 | pgprot_t vm_get_page_prot(unsigned long vm_flags) | 110 | pgprot_t vm_get_page_prot(unsigned long vm_flags) |
104 | { | 111 | { |
105 | return __pgprot(pgprot_val(protection_map[vm_flags & | 112 | pgprot_t ret = __pgprot(pgprot_val(protection_map[vm_flags & |
106 | (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) | | 113 | (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) | |
107 | pgprot_val(arch_vm_get_page_prot(vm_flags))); | 114 | pgprot_val(arch_vm_get_page_prot(vm_flags))); |
115 | |||
116 | return arch_filter_pgprot(ret); | ||
108 | } | 117 | } |
109 | EXPORT_SYMBOL(vm_get_page_prot); | 118 | EXPORT_SYMBOL(vm_get_page_prot); |
110 | 119 | ||