aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2016-02-16 07:52:40 -0500
committerCatalin Marinas <catalin.marinas@arm.com>2016-02-18 13:16:44 -0500
commitf9040773b7bbbd9e98eb6184a263512a7cfc133f (patch)
tree8af5e77510e3774356f737a8674954e776001e93 /arch/arm64/mm
parenta0bf9776cd0be4490d4675d4108e13379849fc7f (diff)
arm64: move kernel image to base of vmalloc area
This moves the module area to right before the vmalloc area, and moves the kernel image to the base of the vmalloc area. This is an intermediate step towards implementing KASLR, which allows the kernel image to be located anywhere in the vmalloc area. Since other subsystems such as hibernate may still need to refer to the kernel text or data segments via their linears addresses, both are mapped in the linear region as well. The linear alias of the text region is mapped read-only/non-executable to prevent inadvertent modification or execution. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/dump.c12
-rw-r--r--arch/arm64/mm/init.c23
-rw-r--r--arch/arm64/mm/kasan_init.c27
-rw-r--r--arch/arm64/mm/mmu.c110
4 files changed, 119 insertions, 53 deletions
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index 0841b2bf0e6a..6be918478f85 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -35,7 +35,9 @@ struct addr_marker {
35}; 35};
36 36
37enum address_markers_idx { 37enum address_markers_idx {
38 VMALLOC_START_NR = 0, 38 MODULES_START_NR = 0,
39 MODULES_END_NR,
40 VMALLOC_START_NR,
39 VMALLOC_END_NR, 41 VMALLOC_END_NR,
40#ifdef CONFIG_SPARSEMEM_VMEMMAP 42#ifdef CONFIG_SPARSEMEM_VMEMMAP
41 VMEMMAP_START_NR, 43 VMEMMAP_START_NR,
@@ -45,12 +47,12 @@ enum address_markers_idx {
45 FIXADDR_END_NR, 47 FIXADDR_END_NR,
46 PCI_START_NR, 48 PCI_START_NR,
47 PCI_END_NR, 49 PCI_END_NR,
48 MODULES_START_NR,
49 MODULES_END_NR,
50 KERNEL_SPACE_NR, 50 KERNEL_SPACE_NR,
51}; 51};
52 52
53static struct addr_marker address_markers[] = { 53static struct addr_marker address_markers[] = {
54 { MODULES_VADDR, "Modules start" },
55 { MODULES_END, "Modules end" },
54 { VMALLOC_START, "vmalloc() Area" }, 56 { VMALLOC_START, "vmalloc() Area" },
55 { VMALLOC_END, "vmalloc() End" }, 57 { VMALLOC_END, "vmalloc() End" },
56#ifdef CONFIG_SPARSEMEM_VMEMMAP 58#ifdef CONFIG_SPARSEMEM_VMEMMAP
@@ -61,9 +63,7 @@ static struct addr_marker address_markers[] = {
61 { FIXADDR_TOP, "Fixmap end" }, 63 { FIXADDR_TOP, "Fixmap end" },
62 { PCI_IO_START, "PCI I/O start" }, 64 { PCI_IO_START, "PCI I/O start" },
63 { PCI_IO_END, "PCI I/O end" }, 65 { PCI_IO_END, "PCI I/O end" },
64 { MODULES_VADDR, "Modules start" }, 66 { PAGE_OFFSET, "Linear Mapping" },
65 { MODULES_END, "Modules end" },
66 { PAGE_OFFSET, "Kernel Mapping" },
67 { -1, NULL }, 67 { -1, NULL },
68}; 68};
69 69
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index f3b061e67bfe..1d627cd8121c 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -36,6 +36,7 @@
36#include <linux/swiotlb.h> 36#include <linux/swiotlb.h>
37 37
38#include <asm/fixmap.h> 38#include <asm/fixmap.h>
39#include <asm/kasan.h>
39#include <asm/memory.h> 40#include <asm/memory.h>
40#include <asm/sections.h> 41#include <asm/sections.h>
41#include <asm/setup.h> 42#include <asm/setup.h>
@@ -302,22 +303,26 @@ void __init mem_init(void)
302#ifdef CONFIG_KASAN 303#ifdef CONFIG_KASAN
303 " kasan : 0x%16lx - 0x%16lx (%6ld GB)\n" 304 " kasan : 0x%16lx - 0x%16lx (%6ld GB)\n"
304#endif 305#endif
306 " modules : 0x%16lx - 0x%16lx (%6ld MB)\n"
305 " vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n" 307 " vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n"
308 " .init : 0x%p" " - 0x%p" " (%6ld KB)\n"
309 " .text : 0x%p" " - 0x%p" " (%6ld KB)\n"
310 " .data : 0x%p" " - 0x%p" " (%6ld KB)\n"
306#ifdef CONFIG_SPARSEMEM_VMEMMAP 311#ifdef CONFIG_SPARSEMEM_VMEMMAP
307 " vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n" 312 " vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n"
308 " 0x%16lx - 0x%16lx (%6ld MB actual)\n" 313 " 0x%16lx - 0x%16lx (%6ld MB actual)\n"
309#endif 314#endif
310 " fixed : 0x%16lx - 0x%16lx (%6ld KB)\n" 315 " fixed : 0x%16lx - 0x%16lx (%6ld KB)\n"
311 " PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n" 316 " PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n"
312 " modules : 0x%16lx - 0x%16lx (%6ld MB)\n" 317 " memory : 0x%16lx - 0x%16lx (%6ld MB)\n",
313 " memory : 0x%16lx - 0x%16lx (%6ld MB)\n"
314 " .init : 0x%p" " - 0x%p" " (%6ld KB)\n"
315 " .text : 0x%p" " - 0x%p" " (%6ld KB)\n"
316 " .data : 0x%p" " - 0x%p" " (%6ld KB)\n",
317#ifdef CONFIG_KASAN 318#ifdef CONFIG_KASAN
318 MLG(KASAN_SHADOW_START, KASAN_SHADOW_END), 319 MLG(KASAN_SHADOW_START, KASAN_SHADOW_END),
319#endif 320#endif
321 MLM(MODULES_VADDR, MODULES_END),
320 MLG(VMALLOC_START, VMALLOC_END), 322 MLG(VMALLOC_START, VMALLOC_END),
323 MLK_ROUNDUP(__init_begin, __init_end),
324 MLK_ROUNDUP(_text, _etext),
325 MLK_ROUNDUP(_sdata, _edata),
321#ifdef CONFIG_SPARSEMEM_VMEMMAP 326#ifdef CONFIG_SPARSEMEM_VMEMMAP
322 MLG((unsigned long)vmemmap, 327 MLG((unsigned long)vmemmap,
323 (unsigned long)vmemmap + VMEMMAP_SIZE), 328 (unsigned long)vmemmap + VMEMMAP_SIZE),
@@ -326,11 +331,7 @@ void __init mem_init(void)
326#endif 331#endif
327 MLK(FIXADDR_START, FIXADDR_TOP), 332 MLK(FIXADDR_START, FIXADDR_TOP),
328 MLM(PCI_IO_START, PCI_IO_END), 333 MLM(PCI_IO_START, PCI_IO_END),
329 MLM(MODULES_VADDR, MODULES_END), 334 MLM(PAGE_OFFSET, (unsigned long)high_memory));
330 MLM(PAGE_OFFSET, (unsigned long)high_memory),
331 MLK_ROUNDUP(__init_begin, __init_end),
332 MLK_ROUNDUP(_text, _etext),
333 MLK_ROUNDUP(_sdata, _edata));
334 335
335#undef MLK 336#undef MLK
336#undef MLM 337#undef MLM
@@ -358,8 +359,8 @@ void __init mem_init(void)
358 359
359void free_initmem(void) 360void free_initmem(void)
360{ 361{
361 fixup_init();
362 free_initmem_default(0); 362 free_initmem_default(0);
363 fixup_init();
363} 364}
364 365
365#ifdef CONFIG_BLK_DEV_INITRD 366#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index cc569a38bc76..7f10cc91fa8a 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -17,9 +17,11 @@
17#include <linux/start_kernel.h> 17#include <linux/start_kernel.h>
18 18
19#include <asm/mmu_context.h> 19#include <asm/mmu_context.h>
20#include <asm/kernel-pgtable.h>
20#include <asm/page.h> 21#include <asm/page.h>
21#include <asm/pgalloc.h> 22#include <asm/pgalloc.h>
22#include <asm/pgtable.h> 23#include <asm/pgtable.h>
24#include <asm/sections.h>
23#include <asm/tlbflush.h> 25#include <asm/tlbflush.h>
24 26
25static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE); 27static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
@@ -33,7 +35,7 @@ static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr,
33 if (pmd_none(*pmd)) 35 if (pmd_none(*pmd))
34 pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte); 36 pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
35 37
36 pte = pte_offset_kernel(pmd, addr); 38 pte = pte_offset_kimg(pmd, addr);
37 do { 39 do {
38 next = addr + PAGE_SIZE; 40 next = addr + PAGE_SIZE;
39 set_pte(pte, pfn_pte(virt_to_pfn(kasan_zero_page), 41 set_pte(pte, pfn_pte(virt_to_pfn(kasan_zero_page),
@@ -51,7 +53,7 @@ static void __init kasan_early_pmd_populate(pud_t *pud,
51 if (pud_none(*pud)) 53 if (pud_none(*pud))
52 pud_populate(&init_mm, pud, kasan_zero_pmd); 54 pud_populate(&init_mm, pud, kasan_zero_pmd);
53 55
54 pmd = pmd_offset(pud, addr); 56 pmd = pmd_offset_kimg(pud, addr);
55 do { 57 do {
56 next = pmd_addr_end(addr, end); 58 next = pmd_addr_end(addr, end);
57 kasan_early_pte_populate(pmd, addr, next); 59 kasan_early_pte_populate(pmd, addr, next);
@@ -68,7 +70,7 @@ static void __init kasan_early_pud_populate(pgd_t *pgd,
68 if (pgd_none(*pgd)) 70 if (pgd_none(*pgd))
69 pgd_populate(&init_mm, pgd, kasan_zero_pud); 71 pgd_populate(&init_mm, pgd, kasan_zero_pud);
70 72
71 pud = pud_offset(pgd, addr); 73 pud = pud_offset_kimg(pgd, addr);
72 do { 74 do {
73 next = pud_addr_end(addr, end); 75 next = pud_addr_end(addr, end);
74 kasan_early_pmd_populate(pud, addr, next); 76 kasan_early_pmd_populate(pud, addr, next);
@@ -126,9 +128,13 @@ static void __init clear_pgds(unsigned long start,
126 128
127void __init kasan_init(void) 129void __init kasan_init(void)
128{ 130{
131 u64 kimg_shadow_start, kimg_shadow_end;
129 struct memblock_region *reg; 132 struct memblock_region *reg;
130 int i; 133 int i;
131 134
135 kimg_shadow_start = (u64)kasan_mem_to_shadow(_text);
136 kimg_shadow_end = (u64)kasan_mem_to_shadow(_end);
137
132 /* 138 /*
133 * We are going to perform proper setup of shadow memory. 139 * We are going to perform proper setup of shadow memory.
134 * At first we should unmap early shadow (clear_pgds() call bellow). 140 * At first we should unmap early shadow (clear_pgds() call bellow).
@@ -142,8 +148,23 @@ void __init kasan_init(void)
142 148
143 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); 149 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
144 150
151 vmemmap_populate(kimg_shadow_start, kimg_shadow_end, NUMA_NO_NODE);
152
153 /*
154 * vmemmap_populate() has populated the shadow region that covers the
155 * kernel image with SWAPPER_BLOCK_SIZE mappings, so we have to round
156 * the start and end addresses to SWAPPER_BLOCK_SIZE as well, to prevent
157 * kasan_populate_zero_shadow() from replacing the PMD block mappings
158 * with PMD table mappings at the edges of the shadow region for the
159 * kernel image.
160 */
161 if (ARM64_SWAPPER_USES_SECTION_MAPS)
162 kimg_shadow_end = round_up(kimg_shadow_end, SWAPPER_BLOCK_SIZE);
163
145 kasan_populate_zero_shadow((void *)KASAN_SHADOW_START, 164 kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
146 kasan_mem_to_shadow((void *)MODULES_VADDR)); 165 kasan_mem_to_shadow((void *)MODULES_VADDR));
166 kasan_populate_zero_shadow((void *)kimg_shadow_end,
167 kasan_mem_to_shadow((void *)PAGE_OFFSET));
147 168
148 for_each_memblock(memory, reg) { 169 for_each_memblock(memory, reg) {
149 void *start = (void *)__phys_to_virt(reg->base); 170 void *start = (void *)__phys_to_virt(reg->base);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index d2d05585a357..1d9aea4adc37 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -53,6 +53,10 @@ u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
53unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; 53unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
54EXPORT_SYMBOL(empty_zero_page); 54EXPORT_SYMBOL(empty_zero_page);
55 55
56static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
57static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
58static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
59
56pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 60pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
57 unsigned long size, pgprot_t vma_prot) 61 unsigned long size, pgprot_t vma_prot)
58{ 62{
@@ -380,16 +384,15 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt,
380 384
381static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end) 385static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
382{ 386{
383
384 unsigned long kernel_start = __pa(_stext); 387 unsigned long kernel_start = __pa(_stext);
385 unsigned long kernel_end = __pa(_end); 388 unsigned long kernel_end = __pa(_etext);
386 389
387 /* 390 /*
388 * The kernel itself is mapped at page granularity. Map all other 391 * Take care not to create a writable alias for the
389 * memory, making sure we don't overwrite the existing kernel mappings. 392 * read-only text and rodata sections of the kernel image.
390 */ 393 */
391 394
392 /* No overlap with the kernel. */ 395 /* No overlap with the kernel text */
393 if (end < kernel_start || start >= kernel_end) { 396 if (end < kernel_start || start >= kernel_end) {
394 __create_pgd_mapping(pgd, start, __phys_to_virt(start), 397 __create_pgd_mapping(pgd, start, __phys_to_virt(start),
395 end - start, PAGE_KERNEL, 398 end - start, PAGE_KERNEL,
@@ -398,8 +401,8 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end
398 } 401 }
399 402
400 /* 403 /*
401 * This block overlaps the kernel mapping. Map the portion(s) which 404 * This block overlaps the kernel text mapping.
402 * don't overlap. 405 * Map the portion(s) which don't overlap.
403 */ 406 */
404 if (start < kernel_start) 407 if (start < kernel_start)
405 __create_pgd_mapping(pgd, start, 408 __create_pgd_mapping(pgd, start,
@@ -411,6 +414,16 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end
411 __phys_to_virt(kernel_end), 414 __phys_to_virt(kernel_end),
412 end - kernel_end, PAGE_KERNEL, 415 end - kernel_end, PAGE_KERNEL,
413 early_pgtable_alloc); 416 early_pgtable_alloc);
417
418 /*
419 * Map the linear alias of the [_stext, _etext) interval as
420 * read-only/non-executable. This makes the contents of the
421 * region accessible to subsystems such as hibernate, but
422 * protects it from inadvertent modification or execution.
423 */
424 __create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start),
425 kernel_end - kernel_start, PAGE_KERNEL_RO,
426 early_pgtable_alloc);
414} 427}
415 428
416static void __init map_mem(pgd_t *pgd) 429static void __init map_mem(pgd_t *pgd)
@@ -431,25 +444,28 @@ static void __init map_mem(pgd_t *pgd)
431 } 444 }
432} 445}
433 446
434#ifdef CONFIG_DEBUG_RODATA
435void mark_rodata_ro(void) 447void mark_rodata_ro(void)
436{ 448{
449 if (!IS_ENABLED(CONFIG_DEBUG_RODATA))
450 return;
451
437 create_mapping_late(__pa(_stext), (unsigned long)_stext, 452 create_mapping_late(__pa(_stext), (unsigned long)_stext,
438 (unsigned long)_etext - (unsigned long)_stext, 453 (unsigned long)_etext - (unsigned long)_stext,
439 PAGE_KERNEL_ROX); 454 PAGE_KERNEL_ROX);
440
441} 455}
442#endif
443 456
444void fixup_init(void) 457void fixup_init(void)
445{ 458{
446 create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin, 459 /*
447 (unsigned long)__init_end - (unsigned long)__init_begin, 460 * Unmap the __init region but leave the VM area in place. This
448 PAGE_KERNEL); 461 * prevents the region from being reused for kernel modules, which
462 * is not supported by kallsyms.
463 */
464 unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
449} 465}
450 466
451static void __init map_kernel_chunk(pgd_t *pgd, void *va_start, void *va_end, 467static void __init map_kernel_chunk(pgd_t *pgd, void *va_start, void *va_end,
452 pgprot_t prot) 468 pgprot_t prot, struct vm_struct *vma)
453{ 469{
454 phys_addr_t pa_start = __pa(va_start); 470 phys_addr_t pa_start = __pa(va_start);
455 unsigned long size = va_end - va_start; 471 unsigned long size = va_end - va_start;
@@ -459,6 +475,14 @@ static void __init map_kernel_chunk(pgd_t *pgd, void *va_start, void *va_end,
459 475
460 __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot, 476 __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
461 early_pgtable_alloc); 477 early_pgtable_alloc);
478
479 vma->addr = va_start;
480 vma->phys_addr = pa_start;
481 vma->size = size;
482 vma->flags = VM_MAP;
483 vma->caller = __builtin_return_address(0);
484
485 vm_area_add_early(vma);
462} 486}
463 487
464/* 488/*
@@ -466,17 +490,35 @@ static void __init map_kernel_chunk(pgd_t *pgd, void *va_start, void *va_end,
466 */ 490 */
467static void __init map_kernel(pgd_t *pgd) 491static void __init map_kernel(pgd_t *pgd)
468{ 492{
493 static struct vm_struct vmlinux_text, vmlinux_init, vmlinux_data;
469 494
470 map_kernel_chunk(pgd, _stext, _etext, PAGE_KERNEL_EXEC); 495 map_kernel_chunk(pgd, _stext, _etext, PAGE_KERNEL_EXEC, &vmlinux_text);
471 map_kernel_chunk(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC); 496 map_kernel_chunk(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC,
472 map_kernel_chunk(pgd, _data, _end, PAGE_KERNEL); 497 &vmlinux_init);
498 map_kernel_chunk(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data);
473 499
474 /* 500 if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) {
475 * The fixmap falls in a separate pgd to the kernel, and doesn't live 501 /*
476 * in the carveout for the swapper_pg_dir. We can simply re-use the 502 * The fixmap falls in a separate pgd to the kernel, and doesn't
477 * existing dir for the fixmap. 503 * live in the carveout for the swapper_pg_dir. We can simply
478 */ 504 * re-use the existing dir for the fixmap.
479 set_pgd(pgd_offset_raw(pgd, FIXADDR_START), *pgd_offset_k(FIXADDR_START)); 505 */
506 set_pgd(pgd_offset_raw(pgd, FIXADDR_START),
507 *pgd_offset_k(FIXADDR_START));
508 } else if (CONFIG_PGTABLE_LEVELS > 3) {
509 /*
510 * The fixmap shares its top level pgd entry with the kernel
511 * mapping. This can really only occur when we are running
512 * with 16k/4 levels, so we can simply reuse the pud level
513 * entry instead.
514 */
515 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
516 set_pud(pud_set_fixmap_offset(pgd, FIXADDR_START),
517 __pud(__pa(bm_pmd) | PUD_TYPE_TABLE));
518 pud_clear_fixmap();
519 } else {
520 BUG();
521 }
480 522
481 kasan_copy_shadow(pgd); 523 kasan_copy_shadow(pgd);
482} 524}
@@ -602,14 +644,6 @@ void vmemmap_free(unsigned long start, unsigned long end)
602} 644}
603#endif /* CONFIG_SPARSEMEM_VMEMMAP */ 645#endif /* CONFIG_SPARSEMEM_VMEMMAP */
604 646
605static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
606#if CONFIG_PGTABLE_LEVELS > 2
607static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
608#endif
609#if CONFIG_PGTABLE_LEVELS > 3
610static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
611#endif
612
613static inline pud_t * fixmap_pud(unsigned long addr) 647static inline pud_t * fixmap_pud(unsigned long addr)
614{ 648{
615 pgd_t *pgd = pgd_offset_k(addr); 649 pgd_t *pgd = pgd_offset_k(addr);
@@ -641,8 +675,18 @@ void __init early_fixmap_init(void)
641 unsigned long addr = FIXADDR_START; 675 unsigned long addr = FIXADDR_START;
642 676
643 pgd = pgd_offset_k(addr); 677 pgd = pgd_offset_k(addr);
644 pgd_populate(&init_mm, pgd, bm_pud); 678 if (CONFIG_PGTABLE_LEVELS > 3 && !pgd_none(*pgd)) {
645 pud = fixmap_pud(addr); 679 /*
680 * We only end up here if the kernel mapping and the fixmap
681 * share the top level pgd entry, which should only happen on
682 * 16k/4 levels configurations.
683 */
684 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
685 pud = pud_offset_kimg(pgd, addr);
686 } else {
687 pgd_populate(&init_mm, pgd, bm_pud);
688 pud = fixmap_pud(addr);
689 }
646 pud_populate(&init_mm, pud, bm_pmd); 690 pud_populate(&init_mm, pud, bm_pmd);
647 pmd = fixmap_pmd(addr); 691 pmd = fixmap_pmd(addr);
648 pmd_populate_kernel(&init_mm, pmd, bm_pte); 692 pmd_populate_kernel(&init_mm, pmd, bm_pte);