aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2016-06-29 08:51:26 -0400
committerCatalin Marinas <catalin.marinas@arm.com>2016-07-01 06:56:26 -0400
commit53e1b32910a3bc94d9f122321442b79b314219f8 (patch)
tree3c229eca9be5bb61f266c3bde9a2144e37342b7b /arch/arm64/mm
parent7dd01aef055792260287c6708daf75aac3918f66 (diff)
arm64: mm: add param to force create_pgd_mapping() to use page mappings
Add a bool parameter 'allow_block_mappings' to create_pgd_mapping() and the various helper functions that it descends into, to give the caller control over whether block entries may be used to create the mapping. The UEFI runtime mapping routines will use this to avoid creating block entries that would need to split up into page entries when applying the permissions listed in the Memory Attributes firmware table. This also replaces the block_mappings_allowed() helper function that was added for DEBUG_PAGEALLOC functionality, but the resulting code is functionally equivalent (given that debug_page_alloc does not operate on EFI page table entries anyway) Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/mmu.c67
1 files changed, 27 insertions, 40 deletions
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 0c95d6ec873d..a289d66121b6 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -155,29 +155,10 @@ static void split_pud(pud_t *old_pud, pmd_t *pmd)
155 } while (pmd++, i++, i < PTRS_PER_PMD); 155 } while (pmd++, i++, i < PTRS_PER_PMD);
156} 156}
157 157
158#ifdef CONFIG_DEBUG_PAGEALLOC
159static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
160{
161
162 /*
163 * If debug_page_alloc is enabled we must map the linear map
164 * using pages. However, other mappings created by
165 * create_mapping_noalloc must use sections in some cases. Allow
166 * sections to be used in those cases, where no pgtable_alloc
167 * function is provided.
168 */
169 return !pgtable_alloc || !debug_pagealloc_enabled();
170}
171#else
172static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
173{
174 return true;
175}
176#endif
177
178static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end, 158static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
179 phys_addr_t phys, pgprot_t prot, 159 phys_addr_t phys, pgprot_t prot,
180 phys_addr_t (*pgtable_alloc)(void)) 160 phys_addr_t (*pgtable_alloc)(void),
161 bool allow_block_mappings)
181{ 162{
182 pmd_t *pmd; 163 pmd_t *pmd;
183 unsigned long next; 164 unsigned long next;
@@ -208,7 +189,7 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
208 next = pmd_addr_end(addr, end); 189 next = pmd_addr_end(addr, end);
209 /* try section mapping first */ 190 /* try section mapping first */
210 if (((addr | next | phys) & ~SECTION_MASK) == 0 && 191 if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
211 block_mappings_allowed(pgtable_alloc)) { 192 allow_block_mappings) {
212 pmd_t old_pmd =*pmd; 193 pmd_t old_pmd =*pmd;
213 pmd_set_huge(pmd, phys, prot); 194 pmd_set_huge(pmd, phys, prot);
214 /* 195 /*
@@ -247,7 +228,8 @@ static inline bool use_1G_block(unsigned long addr, unsigned long next,
247 228
248static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, 229static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
249 phys_addr_t phys, pgprot_t prot, 230 phys_addr_t phys, pgprot_t prot,
250 phys_addr_t (*pgtable_alloc)(void)) 231 phys_addr_t (*pgtable_alloc)(void),
232 bool allow_block_mappings)
251{ 233{
252 pud_t *pud; 234 pud_t *pud;
253 unsigned long next; 235 unsigned long next;
@@ -267,8 +249,7 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
267 /* 249 /*
268 * For 4K granule only, attempt to put down a 1GB block 250 * For 4K granule only, attempt to put down a 1GB block
269 */ 251 */
270 if (use_1G_block(addr, next, phys) && 252 if (use_1G_block(addr, next, phys) && allow_block_mappings) {
271 block_mappings_allowed(pgtable_alloc)) {
272 pud_t old_pud = *pud; 253 pud_t old_pud = *pud;
273 pud_set_huge(pud, phys, prot); 254 pud_set_huge(pud, phys, prot);
274 255
@@ -289,7 +270,7 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
289 } 270 }
290 } else { 271 } else {
291 alloc_init_pmd(pud, addr, next, phys, prot, 272 alloc_init_pmd(pud, addr, next, phys, prot,
292 pgtable_alloc); 273 pgtable_alloc, allow_block_mappings);
293 } 274 }
294 phys += next - addr; 275 phys += next - addr;
295 } while (pud++, addr = next, addr != end); 276 } while (pud++, addr = next, addr != end);
@@ -303,7 +284,8 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
303 */ 284 */
304static void init_pgd(pgd_t *pgd, phys_addr_t phys, unsigned long virt, 285static void init_pgd(pgd_t *pgd, phys_addr_t phys, unsigned long virt,
305 phys_addr_t size, pgprot_t prot, 286 phys_addr_t size, pgprot_t prot,
306 phys_addr_t (*pgtable_alloc)(void)) 287 phys_addr_t (*pgtable_alloc)(void),
288 bool allow_block_mappings)
307{ 289{
308 unsigned long addr, length, end, next; 290 unsigned long addr, length, end, next;
309 291
@@ -321,7 +303,8 @@ static void init_pgd(pgd_t *pgd, phys_addr_t phys, unsigned long virt,
321 end = addr + length; 303 end = addr + length;
322 do { 304 do {
323 next = pgd_addr_end(addr, end); 305 next = pgd_addr_end(addr, end);
324 alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc); 306 alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc,
307 allow_block_mappings);
325 phys += next - addr; 308 phys += next - addr;
326 } while (pgd++, addr = next, addr != end); 309 } while (pgd++, addr = next, addr != end);
327} 310}
@@ -339,9 +322,11 @@ static phys_addr_t late_pgtable_alloc(void)
339static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, 322static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
340 unsigned long virt, phys_addr_t size, 323 unsigned long virt, phys_addr_t size,
341 pgprot_t prot, 324 pgprot_t prot,
342 phys_addr_t (*alloc)(void)) 325 phys_addr_t (*alloc)(void),
326 bool allow_block_mappings)
343{ 327{
344 init_pgd(pgd_offset_raw(pgdir, virt), phys, virt, size, prot, alloc); 328 init_pgd(pgd_offset_raw(pgdir, virt), phys, virt, size, prot, alloc,
329 allow_block_mappings);
345} 330}
346 331
347/* 332/*
@@ -357,16 +342,15 @@ static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
357 &phys, virt); 342 &phys, virt);
358 return; 343 return;
359 } 344 }
360 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, 345 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, true);
361 NULL);
362} 346}
363 347
364void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, 348void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
365 unsigned long virt, phys_addr_t size, 349 unsigned long virt, phys_addr_t size,
366 pgprot_t prot) 350 pgprot_t prot, bool allow_block_mappings)
367{ 351{
368 __create_pgd_mapping(mm->pgd, phys, virt, size, prot, 352 __create_pgd_mapping(mm->pgd, phys, virt, size, prot,
369 late_pgtable_alloc); 353 late_pgtable_alloc, allow_block_mappings);
370} 354}
371 355
372static void create_mapping_late(phys_addr_t phys, unsigned long virt, 356static void create_mapping_late(phys_addr_t phys, unsigned long virt,
@@ -379,7 +363,7 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt,
379 } 363 }
380 364
381 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, 365 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
382 late_pgtable_alloc); 366 late_pgtable_alloc, !debug_pagealloc_enabled());
383} 367}
384 368
385static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end) 369static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
@@ -396,7 +380,8 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end
396 if (end < kernel_start || start >= kernel_end) { 380 if (end < kernel_start || start >= kernel_end) {
397 __create_pgd_mapping(pgd, start, __phys_to_virt(start), 381 __create_pgd_mapping(pgd, start, __phys_to_virt(start),
398 end - start, PAGE_KERNEL, 382 end - start, PAGE_KERNEL,
399 early_pgtable_alloc); 383 early_pgtable_alloc,
384 !debug_pagealloc_enabled());
400 return; 385 return;
401 } 386 }
402 387
@@ -408,12 +393,14 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end
408 __create_pgd_mapping(pgd, start, 393 __create_pgd_mapping(pgd, start,
409 __phys_to_virt(start), 394 __phys_to_virt(start),
410 kernel_start - start, PAGE_KERNEL, 395 kernel_start - start, PAGE_KERNEL,
411 early_pgtable_alloc); 396 early_pgtable_alloc,
397 !debug_pagealloc_enabled());
412 if (kernel_end < end) 398 if (kernel_end < end)
413 __create_pgd_mapping(pgd, kernel_end, 399 __create_pgd_mapping(pgd, kernel_end,
414 __phys_to_virt(kernel_end), 400 __phys_to_virt(kernel_end),
415 end - kernel_end, PAGE_KERNEL, 401 end - kernel_end, PAGE_KERNEL,
416 early_pgtable_alloc); 402 early_pgtable_alloc,
403 !debug_pagealloc_enabled());
417 404
418 /* 405 /*
419 * Map the linear alias of the [_text, __init_begin) interval as 406 * Map the linear alias of the [_text, __init_begin) interval as
@@ -423,7 +410,7 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end
423 */ 410 */
424 __create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start), 411 __create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start),
425 kernel_end - kernel_start, PAGE_KERNEL_RO, 412 kernel_end - kernel_start, PAGE_KERNEL_RO,
426 early_pgtable_alloc); 413 early_pgtable_alloc, !debug_pagealloc_enabled());
427} 414}
428 415
429static void __init map_mem(pgd_t *pgd) 416static void __init map_mem(pgd_t *pgd)
@@ -480,7 +467,7 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
480 BUG_ON(!PAGE_ALIGNED(size)); 467 BUG_ON(!PAGE_ALIGNED(size));
481 468
482 __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot, 469 __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
483 early_pgtable_alloc); 470 early_pgtable_alloc, !debug_pagealloc_enabled());
484 471
485 vma->addr = va_start; 472 vma->addr = va_start;
486 vma->phys_addr = pa_start; 473 vma->phys_addr = pa_start;