aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrey Ryabinin <aryabinin@virtuozzo.com>2017-11-15 20:36:35 -0500
committerIngo Molnar <mingo@kernel.org>2017-12-17 07:57:26 -0500
commit2aeb07365bcd489620f71390a7d2031cd4dfb83e (patch)
treec003ea482b02c2197ef86fe26d48fd6790409438
parent3382290ed2d5e275429cef510ab21889d3ccd164 (diff)
x86/mm/kasan: Don't use vmemmap_populate() to initialize shadow
[ Note, this is a Git cherry-pick of the following commit: d17a1d97dc20: ("x86/mm/kasan: don't use vmemmap_populate() to initialize shadow") ... for easier x86 PTI code testing and back-porting. ] The KASAN shadow is currently mapped using vmemmap_populate() since that provides a semi-convenient way to map pages into init_top_pgt. However, since that no longer zeroes the mapped pages, it is not suitable for KASAN, which requires zeroed shadow memory. Add kasan_populate_shadow() interface and use it instead of vmemmap_populate(). Besides, this allows us to take advantage of gigantic pages and use them to populate the shadow, which should save us some memory wasted on page tables and reduce TLB pressure. Link: http://lkml.kernel.org/r/20171103185147.2688-2-pasha.tatashin@oracle.com Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com> Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Steven Sistare <steven.sistare@oracle.com> Cc: Daniel Jordan <daniel.m.jordan@oracle.com> Cc: Bob Picco <bob.picco@oracle.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Alexander Potapenko <glider@google.com> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: David S. Miller <davem@davemloft.net> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Michal Hocko <mhocko@kernel.org> Cc: Sam Ravnborg <sam@ravnborg.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/mm/kasan_init_64.c143
2 files changed, 137 insertions, 8 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 4ae940a0ed3b..665eba1b6103 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -108,7 +108,7 @@ config X86
108 select HAVE_ARCH_AUDITSYSCALL 108 select HAVE_ARCH_AUDITSYSCALL
109 select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE 109 select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE
110 select HAVE_ARCH_JUMP_LABEL 110 select HAVE_ARCH_JUMP_LABEL
111 select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP 111 select HAVE_ARCH_KASAN if X86_64
112 select HAVE_ARCH_KGDB 112 select HAVE_ARCH_KGDB
113 select HAVE_ARCH_KMEMCHECK 113 select HAVE_ARCH_KMEMCHECK
114 select HAVE_ARCH_MMAP_RND_BITS if MMU 114 select HAVE_ARCH_MMAP_RND_BITS if MMU
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 2b60dc6e64b1..99dfed6dfef8 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -4,12 +4,14 @@
4#include <linux/bootmem.h> 4#include <linux/bootmem.h>
5#include <linux/kasan.h> 5#include <linux/kasan.h>
6#include <linux/kdebug.h> 6#include <linux/kdebug.h>
7#include <linux/memblock.h>
7#include <linux/mm.h> 8#include <linux/mm.h>
8#include <linux/sched.h> 9#include <linux/sched.h>
9#include <linux/sched/task.h> 10#include <linux/sched/task.h>
10#include <linux/vmalloc.h> 11#include <linux/vmalloc.h>
11 12
12#include <asm/e820/types.h> 13#include <asm/e820/types.h>
14#include <asm/pgalloc.h>
13#include <asm/tlbflush.h> 15#include <asm/tlbflush.h>
14#include <asm/sections.h> 16#include <asm/sections.h>
15#include <asm/pgtable.h> 17#include <asm/pgtable.h>
@@ -18,7 +20,134 @@ extern struct range pfn_mapped[E820_MAX_ENTRIES];
18 20
19static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE); 21static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
20 22
21static int __init map_range(struct range *range) 23static __init void *early_alloc(size_t size, int nid)
24{
25 return memblock_virt_alloc_try_nid_nopanic(size, size,
26 __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
27}
28
29static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
30 unsigned long end, int nid)
31{
32 pte_t *pte;
33
34 if (pmd_none(*pmd)) {
35 void *p;
36
37 if (boot_cpu_has(X86_FEATURE_PSE) &&
38 ((end - addr) == PMD_SIZE) &&
39 IS_ALIGNED(addr, PMD_SIZE)) {
40 p = early_alloc(PMD_SIZE, nid);
41 if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
42 return;
43 else if (p)
44 memblock_free(__pa(p), PMD_SIZE);
45 }
46
47 p = early_alloc(PAGE_SIZE, nid);
48 pmd_populate_kernel(&init_mm, pmd, p);
49 }
50
51 pte = pte_offset_kernel(pmd, addr);
52 do {
53 pte_t entry;
54 void *p;
55
56 if (!pte_none(*pte))
57 continue;
58
59 p = early_alloc(PAGE_SIZE, nid);
60 entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
61 set_pte_at(&init_mm, addr, pte, entry);
62 } while (pte++, addr += PAGE_SIZE, addr != end);
63}
64
65static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
66 unsigned long end, int nid)
67{
68 pmd_t *pmd;
69 unsigned long next;
70
71 if (pud_none(*pud)) {
72 void *p;
73
74 if (boot_cpu_has(X86_FEATURE_GBPAGES) &&
75 ((end - addr) == PUD_SIZE) &&
76 IS_ALIGNED(addr, PUD_SIZE)) {
77 p = early_alloc(PUD_SIZE, nid);
78 if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
79 return;
80 else if (p)
81 memblock_free(__pa(p), PUD_SIZE);
82 }
83
84 p = early_alloc(PAGE_SIZE, nid);
85 pud_populate(&init_mm, pud, p);
86 }
87
88 pmd = pmd_offset(pud, addr);
89 do {
90 next = pmd_addr_end(addr, end);
91 if (!pmd_large(*pmd))
92 kasan_populate_pmd(pmd, addr, next, nid);
93 } while (pmd++, addr = next, addr != end);
94}
95
96static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
97 unsigned long end, int nid)
98{
99 pud_t *pud;
100 unsigned long next;
101
102 if (p4d_none(*p4d)) {
103 void *p = early_alloc(PAGE_SIZE, nid);
104
105 p4d_populate(&init_mm, p4d, p);
106 }
107
108 pud = pud_offset(p4d, addr);
109 do {
110 next = pud_addr_end(addr, end);
111 if (!pud_large(*pud))
112 kasan_populate_pud(pud, addr, next, nid);
113 } while (pud++, addr = next, addr != end);
114}
115
116static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr,
117 unsigned long end, int nid)
118{
119 void *p;
120 p4d_t *p4d;
121 unsigned long next;
122
123 if (pgd_none(*pgd)) {
124 p = early_alloc(PAGE_SIZE, nid);
125 pgd_populate(&init_mm, pgd, p);
126 }
127
128 p4d = p4d_offset(pgd, addr);
129 do {
130 next = p4d_addr_end(addr, end);
131 kasan_populate_p4d(p4d, addr, next, nid);
132 } while (p4d++, addr = next, addr != end);
133}
134
135static void __init kasan_populate_shadow(unsigned long addr, unsigned long end,
136 int nid)
137{
138 pgd_t *pgd;
139 unsigned long next;
140
141 addr = addr & PAGE_MASK;
142 end = round_up(end, PAGE_SIZE);
143 pgd = pgd_offset_k(addr);
144 do {
145 next = pgd_addr_end(addr, end);
146 kasan_populate_pgd(pgd, addr, next, nid);
147 } while (pgd++, addr = next, addr != end);
148}
149
150static void __init map_range(struct range *range)
22{ 151{
23 unsigned long start; 152 unsigned long start;
24 unsigned long end; 153 unsigned long end;
@@ -26,7 +155,7 @@ static int __init map_range(struct range *range)
26 start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start)); 155 start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
27 end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end)); 156 end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
28 157
29 return vmemmap_populate(start, end, NUMA_NO_NODE); 158 kasan_populate_shadow(start, end, early_pfn_to_nid(range->start));
30} 159}
31 160
32static void __init clear_pgds(unsigned long start, 161static void __init clear_pgds(unsigned long start,
@@ -189,16 +318,16 @@ void __init kasan_init(void)
189 if (pfn_mapped[i].end == 0) 318 if (pfn_mapped[i].end == 0)
190 break; 319 break;
191 320
192 if (map_range(&pfn_mapped[i])) 321 map_range(&pfn_mapped[i]);
193 panic("kasan: unable to allocate shadow!");
194 } 322 }
323
195 kasan_populate_zero_shadow( 324 kasan_populate_zero_shadow(
196 kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), 325 kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
197 kasan_mem_to_shadow((void *)__START_KERNEL_map)); 326 kasan_mem_to_shadow((void *)__START_KERNEL_map));
198 327
199 vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext), 328 kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
200 (unsigned long)kasan_mem_to_shadow(_end), 329 (unsigned long)kasan_mem_to_shadow(_end),
201 NUMA_NO_NODE); 330 early_pfn_to_nid(__pa(_stext)));
202 331
203 kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), 332 kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
204 (void *)KASAN_SHADOW_END); 333 (void *)KASAN_SHADOW_END);