aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/init.c1
-rw-r--r--arch/arm64/mm/mm.h2
-rw-r--r--arch/arm64/mm/mmu.c211
3 files changed, 190 insertions, 24 deletions
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 11c7b701b681..43cccb5101c0 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -344,6 +344,7 @@ void __init mem_init(void)
344 344
345void free_initmem(void) 345void free_initmem(void)
346{ 346{
347 fixup_init();
347 free_initmem_default(0); 348 free_initmem_default(0);
348 free_alternatives_memory(); 349 free_alternatives_memory();
349} 350}
diff --git a/arch/arm64/mm/mm.h b/arch/arm64/mm/mm.h
index 50c3351df9c7..ef47d99b5cbc 100644
--- a/arch/arm64/mm/mm.h
+++ b/arch/arm64/mm/mm.h
@@ -1 +1,3 @@
1extern void __init bootmem_init(void); 1extern void __init bootmem_init(void);
2
3void fixup_init(void);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index e57c170a91f3..91d55b6efd8a 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -26,6 +26,7 @@
26#include <linux/memblock.h> 26#include <linux/memblock.h>
27#include <linux/fs.h> 27#include <linux/fs.h>
28#include <linux/io.h> 28#include <linux/io.h>
29#include <linux/stop_machine.h>
29 30
30#include <asm/cputype.h> 31#include <asm/cputype.h>
31#include <asm/fixmap.h> 32#include <asm/fixmap.h>
@@ -59,21 +60,43 @@ EXPORT_SYMBOL(phys_mem_access_prot);
59static void __init *early_alloc(unsigned long sz) 60static void __init *early_alloc(unsigned long sz)
60{ 61{
61 void *ptr = __va(memblock_alloc(sz, sz)); 62 void *ptr = __va(memblock_alloc(sz, sz));
63 BUG_ON(!ptr);
62 memset(ptr, 0, sz); 64 memset(ptr, 0, sz);
63 return ptr; 65 return ptr;
64} 66}
65 67
66static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, 68/*
69 * remap a PMD into pages
70 */
71static void split_pmd(pmd_t *pmd, pte_t *pte)
72{
73 unsigned long pfn = pmd_pfn(*pmd);
74 int i = 0;
75
76 do {
77 /*
78 * Need to have the least restrictive permissions available
79 * permissions will be fixed up later
80 */
81 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
82 pfn++;
83 } while (pte++, i++, i < PTRS_PER_PTE);
84}
85
86static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
67 unsigned long end, unsigned long pfn, 87 unsigned long end, unsigned long pfn,
68 pgprot_t prot) 88 pgprot_t prot,
89 void *(*alloc)(unsigned long size))
69{ 90{
70 pte_t *pte; 91 pte_t *pte;
71 92
72 if (pmd_none(*pmd)) { 93 if (pmd_none(*pmd) || pmd_bad(*pmd)) {
73 pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t)); 94 pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
95 if (pmd_sect(*pmd))
96 split_pmd(pmd, pte);
74 __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE); 97 __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
98 flush_tlb_all();
75 } 99 }
76 BUG_ON(pmd_bad(*pmd));
77 100
78 pte = pte_offset_kernel(pmd, addr); 101 pte = pte_offset_kernel(pmd, addr);
79 do { 102 do {
@@ -82,9 +105,22 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
82 } while (pte++, addr += PAGE_SIZE, addr != end); 105 } while (pte++, addr += PAGE_SIZE, addr != end);
83} 106}
84 107
85static void __init alloc_init_pmd(struct mm_struct *mm, pud_t *pud, 108void split_pud(pud_t *old_pud, pmd_t *pmd)
109{
110 unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
111 pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
112 int i = 0;
113
114 do {
115 set_pmd(pmd, __pmd(addr | prot));
116 addr += PMD_SIZE;
117 } while (pmd++, i++, i < PTRS_PER_PMD);
118}
119
120static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
86 unsigned long addr, unsigned long end, 121 unsigned long addr, unsigned long end,
87 phys_addr_t phys, pgprot_t prot) 122 phys_addr_t phys, pgprot_t prot,
123 void *(*alloc)(unsigned long size))
88{ 124{
89 pmd_t *pmd; 125 pmd_t *pmd;
90 unsigned long next; 126 unsigned long next;
@@ -93,8 +129,16 @@ static void __init alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
93 * Check for initial section mappings in the pgd/pud and remove them. 129 * Check for initial section mappings in the pgd/pud and remove them.
94 */ 130 */
95 if (pud_none(*pud) || pud_bad(*pud)) { 131 if (pud_none(*pud) || pud_bad(*pud)) {
96 pmd = early_alloc(PTRS_PER_PMD * sizeof(pmd_t)); 132 pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t));
133 if (pud_sect(*pud)) {
134 /*
135 * need to have the 1G of mappings continue to be
136 * present
137 */
138 split_pud(pud, pmd);
139 }
97 pud_populate(mm, pud, pmd); 140 pud_populate(mm, pud, pmd);
141 flush_tlb_all();
98 } 142 }
99 143
100 pmd = pmd_offset(pud, addr); 144 pmd = pmd_offset(pud, addr);
@@ -113,21 +157,34 @@ static void __init alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
113 flush_tlb_all(); 157 flush_tlb_all();
114 } else { 158 } else {
115 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys), 159 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
116 prot); 160 prot, alloc);
117 } 161 }
118 phys += next - addr; 162 phys += next - addr;
119 } while (pmd++, addr = next, addr != end); 163 } while (pmd++, addr = next, addr != end);
120} 164}
121 165
122static void __init alloc_init_pud(struct mm_struct *mm, pgd_t *pgd, 166static inline bool use_1G_block(unsigned long addr, unsigned long next,
167 unsigned long phys)
168{
169 if (PAGE_SHIFT != 12)
170 return false;
171
172 if (((addr | next | phys) & ~PUD_MASK) != 0)
173 return false;
174
175 return true;
176}
177
178static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
123 unsigned long addr, unsigned long end, 179 unsigned long addr, unsigned long end,
124 phys_addr_t phys, pgprot_t prot) 180 phys_addr_t phys, pgprot_t prot,
181 void *(*alloc)(unsigned long size))
125{ 182{
126 pud_t *pud; 183 pud_t *pud;
127 unsigned long next; 184 unsigned long next;
128 185
129 if (pgd_none(*pgd)) { 186 if (pgd_none(*pgd)) {
130 pud = early_alloc(PTRS_PER_PUD * sizeof(pud_t)); 187 pud = alloc(PTRS_PER_PUD * sizeof(pud_t));
131 pgd_populate(mm, pgd, pud); 188 pgd_populate(mm, pgd, pud);
132 } 189 }
133 BUG_ON(pgd_bad(*pgd)); 190 BUG_ON(pgd_bad(*pgd));
@@ -139,8 +196,7 @@ static void __init alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
139 /* 196 /*
140 * For 4K granule only, attempt to put down a 1GB block 197 * For 4K granule only, attempt to put down a 1GB block
141 */ 198 */
142 if ((PAGE_SHIFT == 12) && 199 if (use_1G_block(addr, next, phys)) {
143 ((addr | next | phys) & ~PUD_MASK) == 0) {
144 pud_t old_pud = *pud; 200 pud_t old_pud = *pud;
145 set_pud(pud, __pud(phys | 201 set_pud(pud, __pud(phys |
146 pgprot_val(mk_sect_prot(prot)))); 202 pgprot_val(mk_sect_prot(prot))));
@@ -158,7 +214,7 @@ static void __init alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
158 flush_tlb_all(); 214 flush_tlb_all();
159 } 215 }
160 } else { 216 } else {
161 alloc_init_pmd(mm, pud, addr, next, phys, prot); 217 alloc_init_pmd(mm, pud, addr, next, phys, prot, alloc);
162 } 218 }
163 phys += next - addr; 219 phys += next - addr;
164 } while (pud++, addr = next, addr != end); 220 } while (pud++, addr = next, addr != end);
@@ -168,9 +224,10 @@ static void __init alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
168 * Create the page directory entries and any necessary page tables for the 224 * Create the page directory entries and any necessary page tables for the
169 * mapping specified by 'md'. 225 * mapping specified by 'md'.
170 */ 226 */
171static void __init __create_mapping(struct mm_struct *mm, pgd_t *pgd, 227static void __create_mapping(struct mm_struct *mm, pgd_t *pgd,
172 phys_addr_t phys, unsigned long virt, 228 phys_addr_t phys, unsigned long virt,
173 phys_addr_t size, pgprot_t prot) 229 phys_addr_t size, pgprot_t prot,
230 void *(*alloc)(unsigned long size))
174{ 231{
175 unsigned long addr, length, end, next; 232 unsigned long addr, length, end, next;
176 233
@@ -180,13 +237,23 @@ static void __init __create_mapping(struct mm_struct *mm, pgd_t *pgd,
180 end = addr + length; 237 end = addr + length;
181 do { 238 do {
182 next = pgd_addr_end(addr, end); 239 next = pgd_addr_end(addr, end);
183 alloc_init_pud(mm, pgd, addr, next, phys, prot); 240 alloc_init_pud(mm, pgd, addr, next, phys, prot, alloc);
184 phys += next - addr; 241 phys += next - addr;
185 } while (pgd++, addr = next, addr != end); 242 } while (pgd++, addr = next, addr != end);
186} 243}
187 244
188static void __init create_mapping(phys_addr_t phys, unsigned long virt, 245static void *late_alloc(unsigned long size)
189 phys_addr_t size) 246{
247 void *ptr;
248
249 BUG_ON(size > PAGE_SIZE);
250 ptr = (void *)__get_free_page(PGALLOC_GFP);
251 BUG_ON(!ptr);
252 return ptr;
253}
254
255static void __ref create_mapping(phys_addr_t phys, unsigned long virt,
256 phys_addr_t size, pgprot_t prot)
190{ 257{
191 if (virt < VMALLOC_START) { 258 if (virt < VMALLOC_START) {
192 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", 259 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
@@ -194,15 +261,71 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt,
194 return; 261 return;
195 } 262 }
196 __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt, 263 __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt,
197 size, PAGE_KERNEL_EXEC); 264 size, prot, early_alloc);
198} 265}
199 266
200void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, 267void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
201 unsigned long virt, phys_addr_t size, 268 unsigned long virt, phys_addr_t size,
202 pgprot_t prot) 269 pgprot_t prot)
203{ 270{
204 __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot); 271 __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot,
272 early_alloc);
273}
274
275static void create_mapping_late(phys_addr_t phys, unsigned long virt,
276 phys_addr_t size, pgprot_t prot)
277{
278 if (virt < VMALLOC_START) {
279 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
280 &phys, virt);
281 return;
282 }
283
284 return __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK),
285 phys, virt, size, prot, late_alloc);
286}
287
288#ifdef CONFIG_DEBUG_RODATA
289static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
290{
291 /*
292 * Set up the executable regions using the existing section mappings
293 * for now. This will get more fine grained later once all memory
294 * is mapped
295 */
296 unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
297 unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
298
299 if (end < kernel_x_start) {
300 create_mapping(start, __phys_to_virt(start),
301 end - start, PAGE_KERNEL);
302 } else if (start >= kernel_x_end) {
303 create_mapping(start, __phys_to_virt(start),
304 end - start, PAGE_KERNEL);
305 } else {
306 if (start < kernel_x_start)
307 create_mapping(start, __phys_to_virt(start),
308 kernel_x_start - start,
309 PAGE_KERNEL);
310 create_mapping(kernel_x_start,
311 __phys_to_virt(kernel_x_start),
312 kernel_x_end - kernel_x_start,
313 PAGE_KERNEL_EXEC);
314 if (kernel_x_end < end)
315 create_mapping(kernel_x_end,
316 __phys_to_virt(kernel_x_end),
317 end - kernel_x_end,
318 PAGE_KERNEL);
319 }
320
321}
322#else
323static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
324{
325 create_mapping(start, __phys_to_virt(start), end - start,
326 PAGE_KERNEL_EXEC);
205} 327}
328#endif
206 329
207static void __init map_mem(void) 330static void __init map_mem(void)
208{ 331{
@@ -248,14 +371,53 @@ static void __init map_mem(void)
248 memblock_set_current_limit(limit); 371 memblock_set_current_limit(limit);
249 } 372 }
250#endif 373#endif
251 374 __map_memblock(start, end);
252 create_mapping(start, __phys_to_virt(start), end - start);
253 } 375 }
254 376
255 /* Limit no longer required. */ 377 /* Limit no longer required. */
256 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); 378 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
257} 379}
258 380
381void __init fixup_executable(void)
382{
383#ifdef CONFIG_DEBUG_RODATA
384 /* now that we are actually fully mapped, make the start/end more fine grained */
385 if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) {
386 unsigned long aligned_start = round_down(__pa(_stext),
387 SECTION_SIZE);
388
389 create_mapping(aligned_start, __phys_to_virt(aligned_start),
390 __pa(_stext) - aligned_start,
391 PAGE_KERNEL);
392 }
393
394 if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) {
395 unsigned long aligned_end = round_up(__pa(__init_end),
396 SECTION_SIZE);
397 create_mapping(__pa(__init_end), (unsigned long)__init_end,
398 aligned_end - __pa(__init_end),
399 PAGE_KERNEL);
400 }
401#endif
402}
403
404#ifdef CONFIG_DEBUG_RODATA
405void mark_rodata_ro(void)
406{
407 create_mapping_late(__pa(_stext), (unsigned long)_stext,
408 (unsigned long)_etext - (unsigned long)_stext,
409 PAGE_KERNEL_EXEC | PTE_RDONLY);
410
411}
412#endif
413
414void fixup_init(void)
415{
416 create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
417 (unsigned long)__init_end - (unsigned long)__init_begin,
418 PAGE_KERNEL);
419}
420
259/* 421/*
260 * paging_init() sets up the page tables, initialises the zone memory 422 * paging_init() sets up the page tables, initialises the zone memory
261 * maps and sets up the zero page. 423 * maps and sets up the zero page.
@@ -265,6 +427,7 @@ void __init paging_init(void)
265 void *zero_page; 427 void *zero_page;
266 428
267 map_mem(); 429 map_mem();
430 fixup_executable();
268 431
269 /* 432 /*
270 * Finally flush the caches and tlb to ensure that we're in a 433 * Finally flush the caches and tlb to ensure that we're in a