aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJungseok Lee <jays.lee@samsung.com>2014-05-12 05:40:51 -0400
committerCatalin Marinas <catalin.marinas@arm.com>2014-07-23 10:27:40 -0400
commitc79b954bf6c006f2d3dd9d01f231abeead13a410 (patch)
treeb7d4375c12d2b61554af0e5eb7596b22c60c643e
parent57e0139041a978c0cfa4d2366c96ea3418e7d553 (diff)
arm64: mm: Implement 4 levels of translation tables
This patch implements 4 levels of translation tables since 3 levels of page tables with 4KB pages cannot support 40-bit physical address space described in [1] due to the following issue. It is a restriction that kernel logical memory map with 4KB + 3 levels (0xffffffc000000000-0xffffffffffffffff) cannot cover RAM region from 544GB to 1024GB in [1]. Specifically, ARM64 kernel fails to create mapping for this region in map_mem function since __phys_to_virt for this region reaches to address overflow. If SoC design follows the document, [1], over 32GB RAM would be placed from 544GB. Even 64GB system is supposed to use the region from 544GB to 576GB for only 32GB RAM. Naturally, it would reach to enable 4 levels of page tables to avoid hacking __virt_to_phys and __phys_to_virt. However, it is recommended 4 levels of page table should be only enabled if memory map is too sparse or there is about 512GB RAM. References ---------- [1]: Principles of ARM Memory Maps, White Paper, Issue C Signed-off-by: Jungseok Lee <jays.lee@samsung.com> Reviewed-by: Sungjinn Chung <sungjinn.chung@samsung.com> Acked-by: Kukjin Kim <kgene.kim@samsung.com> Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org> Reviewed-by: Steve Capper <steve.capper@linaro.org> [catalin.marinas@arm.com: MEMBLOCK_INITIAL_LIMIT removed, same as PUD_SIZE] [catalin.marinas@arm.com: early_ioremap_init() updated for 4 levels] [catalin.marinas@arm.com: 48-bit VA depends on BROKEN until KVM is fixed] Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Tested-by: Jungseok Lee <jungseoklee85@gmail.com>
-rw-r--r--arch/arm64/Kconfig8
-rw-r--r--arch/arm64/include/asm/page.h13
-rw-r--r--arch/arm64/include/asm/pgalloc.h20
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h6
-rw-r--r--arch/arm64/include/asm/pgtable.h40
-rw-r--r--arch/arm64/include/asm/tlb.h9
-rw-r--r--arch/arm64/kernel/head.S42
-rw-r--r--arch/arm64/kernel/traps.c5
-rw-r--r--arch/arm64/mm/fault.c1
-rw-r--r--arch/arm64/mm/ioremap.c6
-rw-r--r--arch/arm64/mm/mmu.c14
11 files changed, 147 insertions, 17 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 94ba45b198ff..cf07cc7295bb 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -195,12 +195,17 @@ config ARM64_VA_BITS_42
195 bool "42-bit" 195 bool "42-bit"
196 depends on ARM64_64K_PAGES 196 depends on ARM64_64K_PAGES
197 197
198config ARM64_VA_BITS_48
199 bool "48-bit"
200 depends on BROKEN
201
198endchoice 202endchoice
199 203
200config ARM64_VA_BITS 204config ARM64_VA_BITS
201 int 205 int
202 default 39 if ARM64_VA_BITS_39 206 default 39 if ARM64_VA_BITS_39
203 default 42 if ARM64_VA_BITS_42 207 default 42 if ARM64_VA_BITS_42
208 default 48 if ARM64_VA_BITS_48
204 209
205config ARM64_2_LEVELS 210config ARM64_2_LEVELS
206 def_bool y if ARM64_64K_PAGES && ARM64_VA_BITS_42 211 def_bool y if ARM64_64K_PAGES && ARM64_VA_BITS_42
@@ -208,6 +213,9 @@ config ARM64_2_LEVELS
208config ARM64_3_LEVELS 213config ARM64_3_LEVELS
209 def_bool y if ARM64_4K_PAGES && ARM64_VA_BITS_39 214 def_bool y if ARM64_4K_PAGES && ARM64_VA_BITS_39
210 215
216config ARM64_4_LEVELS
217 def_bool y if ARM64_4K_PAGES && ARM64_VA_BITS_48
218
211config CPU_BIG_ENDIAN 219config CPU_BIG_ENDIAN
212 bool "Build big-endian kernel" 220 bool "Build big-endian kernel"
213 help 221 help
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index 1cbde2773c4f..cf9afa0366b6 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -33,19 +33,26 @@
33 33
34/* 34/*
35 * The idmap and swapper page tables need some space reserved in the kernel 35 * The idmap and swapper page tables need some space reserved in the kernel
36 * image. Both require a pgd and a next level table to (section) map the 36 * image. Both require pgd, pud (4 levels only) and pmd tables to (section)
37 * kernel. The the swapper also maps the FDT (see __create_page_tables for 37 * map the kernel. The swapper also maps the FDT (see __create_page_tables for
38 * more information). 38 * more information).
39 */ 39 */
40#ifdef CONFIG_ARM64_4_LEVELS
41#define SWAPPER_DIR_SIZE (3 * PAGE_SIZE)
42#define IDMAP_DIR_SIZE (3 * PAGE_SIZE)
43#else
40#define SWAPPER_DIR_SIZE (2 * PAGE_SIZE) 44#define SWAPPER_DIR_SIZE (2 * PAGE_SIZE)
41#define IDMAP_DIR_SIZE (2 * PAGE_SIZE) 45#define IDMAP_DIR_SIZE (2 * PAGE_SIZE)
46#endif
42 47
43#ifndef __ASSEMBLY__ 48#ifndef __ASSEMBLY__
44 49
45#ifdef CONFIG_ARM64_2_LEVELS 50#ifdef CONFIG_ARM64_2_LEVELS
46#include <asm/pgtable-2level-types.h> 51#include <asm/pgtable-2level-types.h>
47#else 52#elif defined(CONFIG_ARM64_3_LEVELS)
48#include <asm/pgtable-3level-types.h> 53#include <asm/pgtable-3level-types.h>
54#else
55#include <asm/pgtable-4level-types.h>
49#endif 56#endif
50 57
51extern void __cpu_clear_user_page(void *p, unsigned long user); 58extern void __cpu_clear_user_page(void *p, unsigned long user);
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index 48298376e46a..7deb5750a945 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -46,6 +46,26 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
46 46
47#endif /* CONFIG_ARM64_2_LEVELS */ 47#endif /* CONFIG_ARM64_2_LEVELS */
48 48
49#ifdef CONFIG_ARM64_4_LEVELS
50
51static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
52{
53 return (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
54}
55
56static inline void pud_free(struct mm_struct *mm, pud_t *pud)
57{
58 BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
59 free_page((unsigned long)pud);
60}
61
62static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
63{
64 set_pgd(pgd, __pgd(__pa(pud) | PUD_TYPE_TABLE));
65}
66
67#endif /* CONFIG_ARM64_4_LEVELS */
68
49extern pgd_t *pgd_alloc(struct mm_struct *mm); 69extern pgd_t *pgd_alloc(struct mm_struct *mm);
50extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); 70extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
51 71
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index c7c603b489b8..fddcc3efa569 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -18,8 +18,10 @@
18 18
19#ifdef CONFIG_ARM64_2_LEVELS 19#ifdef CONFIG_ARM64_2_LEVELS
20#include <asm/pgtable-2level-hwdef.h> 20#include <asm/pgtable-2level-hwdef.h>
21#else 21#elif defined(CONFIG_ARM64_3_LEVELS)
22#include <asm/pgtable-3level-hwdef.h> 22#include <asm/pgtable-3level-hwdef.h>
23#else
24#include <asm/pgtable-4level-hwdef.h>
23#endif 25#endif
24 26
25/* 27/*
@@ -27,7 +29,7 @@
27 * 29 *
28 * Level 1 descriptor (PUD). 30 * Level 1 descriptor (PUD).
29 */ 31 */
30 32#define PUD_TYPE_TABLE (_AT(pudval_t, 3) << 0)
31#define PUD_TABLE_BIT (_AT(pgdval_t, 1) << 1) 33#define PUD_TABLE_BIT (_AT(pgdval_t, 1) << 1)
32#define PUD_TYPE_MASK (_AT(pgdval_t, 3) << 0) 34#define PUD_TYPE_MASK (_AT(pgdval_t, 3) << 0)
33#define PUD_TYPE_SECT (_AT(pgdval_t, 1) << 0) 35#define PUD_TYPE_SECT (_AT(pgdval_t, 1) << 0)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 6d5854972a77..d9b23efdaded 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -35,7 +35,11 @@
35 * VMALLOC and SPARSEMEM_VMEMMAP ranges. 35 * VMALLOC and SPARSEMEM_VMEMMAP ranges.
36 */ 36 */
37#define VMALLOC_START (UL(0xffffffffffffffff) << VA_BITS) 37#define VMALLOC_START (UL(0xffffffffffffffff) << VA_BITS)
38#ifndef CONFIG_ARM64_4_LEVELS
38#define VMALLOC_END (PAGE_OFFSET - UL(0x400000000) - SZ_64K) 39#define VMALLOC_END (PAGE_OFFSET - UL(0x400000000) - SZ_64K)
40#else
41#define VMALLOC_END (PAGE_OFFSET - UL(0x40000000000) - SZ_64K)
42#endif
39 43
40#define vmemmap ((struct page *)(VMALLOC_END + SZ_64K)) 44#define vmemmap ((struct page *)(VMALLOC_END + SZ_64K))
41 45
@@ -44,12 +48,16 @@
44#ifndef __ASSEMBLY__ 48#ifndef __ASSEMBLY__
45extern void __pte_error(const char *file, int line, unsigned long val); 49extern void __pte_error(const char *file, int line, unsigned long val);
46extern void __pmd_error(const char *file, int line, unsigned long val); 50extern void __pmd_error(const char *file, int line, unsigned long val);
51extern void __pud_error(const char *file, int line, unsigned long val);
47extern void __pgd_error(const char *file, int line, unsigned long val); 52extern void __pgd_error(const char *file, int line, unsigned long val);
48 53
49#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) 54#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
50#ifndef CONFIG_ARM64_2_LEVELS 55#ifndef CONFIG_ARM64_2_LEVELS
51#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) 56#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
52#endif 57#endif
58#ifdef CONFIG_ARM64_4_LEVELS
59#define pud_ERROR(pud) __pud_error(__FILE__, __LINE__, pud_val(pud))
60#endif
53#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) 61#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
54 62
55#ifdef CONFIG_SMP 63#ifdef CONFIG_SMP
@@ -347,6 +355,30 @@ static inline pmd_t *pud_page_vaddr(pud_t pud)
347 355
348#endif /* CONFIG_ARM64_2_LEVELS */ 356#endif /* CONFIG_ARM64_2_LEVELS */
349 357
358#ifdef CONFIG_ARM64_4_LEVELS
359
360#define pgd_none(pgd) (!pgd_val(pgd))
361#define pgd_bad(pgd) (!(pgd_val(pgd) & 2))
362#define pgd_present(pgd) (pgd_val(pgd))
363
364static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
365{
366 *pgdp = pgd;
367 dsb(ishst);
368}
369
370static inline void pgd_clear(pgd_t *pgdp)
371{
372 set_pgd(pgdp, __pgd(0));
373}
374
375static inline pud_t *pgd_page_vaddr(pgd_t pgd)
376{
377 return __va(pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK);
378}
379
380#endif /* CONFIG_ARM64_4_LEVELS */
381
350/* to find an entry in a page-table-directory */ 382/* to find an entry in a page-table-directory */
351#define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) 383#define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
352 384
@@ -355,6 +387,14 @@ static inline pmd_t *pud_page_vaddr(pud_t pud)
355/* to find an entry in a kernel page-table-directory */ 387/* to find an entry in a kernel page-table-directory */
356#define pgd_offset_k(addr) pgd_offset(&init_mm, addr) 388#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
357 389
390#ifdef CONFIG_ARM64_4_LEVELS
391#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
392static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
393{
394 return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(addr);
395}
396#endif
397
358/* Find an entry in the second-level page table.. */ 398/* Find an entry in the second-level page table.. */
359#ifndef CONFIG_ARM64_2_LEVELS 399#ifndef CONFIG_ARM64_2_LEVELS
360#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) 400#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index bc19101edaeb..49dc8f03362f 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -100,6 +100,15 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
100} 100}
101#endif 101#endif
102 102
103#ifdef CONFIG_ARM64_4_LEVELS
104static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
105 unsigned long addr)
106{
107 tlb_add_flush(tlb, addr);
108 tlb_remove_page(tlb, virt_to_page(pudp));
109}
110#endif
111
103static inline void __tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, 112static inline void __tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp,
104 unsigned long address) 113 unsigned long address)
105{ 114{
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index fa3b7fb8a77a..847b99daad79 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -476,16 +476,42 @@ ENDPROC(__calc_phys_offset)
476 .quad PAGE_OFFSET 476 .quad PAGE_OFFSET
477 477
478/* 478/*
479 * Macro to populate the PGD for the corresponding block entry in the next 479 * Macro to populate the PUD for the corresponding block entry in the next
480 * level (tbl) for the given virtual address. 480 * level (tbl) for the given virtual address in case of 4 levels.
481 * 481 *
482 * Preserves: pgd, tbl, virt 482 * Preserves: pgd, virt
483 * Corrupts: tmp1, tmp2 483 * Corrupts: tbl, tmp1, tmp2
484 * Returns: pud
484 */ 485 */
485 .macro create_pgd_entry, pgd, tbl, virt, tmp1, tmp2 486 .macro create_pud_entry, pgd, tbl, virt, pud, tmp1, tmp2
487#ifdef CONFIG_ARM64_4_LEVELS
488 add \tbl, \tbl, #PAGE_SIZE // bump tbl 1 page up.
489 // to make room for pud
490 add \pud, \pgd, #PAGE_SIZE // pgd points to pud which
491 // follows pgd
492 lsr \tmp1, \virt, #PUD_SHIFT
493 and \tmp1, \tmp1, #PTRS_PER_PUD - 1 // PUD index
494 orr \tmp2, \tbl, #3 // PUD entry table type
495 str \tmp2, [\pud, \tmp1, lsl #3]
496#else
497 mov \pud, \tbl
498#endif
499 .endm
500
501/*
502 * Macro to populate the PGD (and possibily PUD) for the corresponding
503 * block entry in the next level (tbl) for the given virtual address.
504 *
505 * Preserves: pgd, virt
506 * Corrupts: tmp1, tmp2, tmp3
507 * Returns: tbl -> page where block mappings can be placed
508 * (changed to make room for pud with 4 levels, preserved otherwise)
509 */
510 .macro create_pgd_entry, pgd, tbl, virt, tmp1, tmp2, tmp3
511 create_pud_entry \pgd, \tbl, \virt, \tmp3, \tmp1, \tmp2
486 lsr \tmp1, \virt, #PGDIR_SHIFT 512 lsr \tmp1, \virt, #PGDIR_SHIFT
487 and \tmp1, \tmp1, #PTRS_PER_PGD - 1 // PGD index 513 and \tmp1, \tmp1, #PTRS_PER_PGD - 1 // PGD index
488 orr \tmp2, \tbl, #3 // PGD entry table type 514 orr \tmp2, \tmp3, #3 // PGD entry table type
489 str \tmp2, [\pgd, \tmp1, lsl #3] 515 str \tmp2, [\pgd, \tmp1, lsl #3]
490 .endm 516 .endm
491 517
@@ -550,7 +576,7 @@ __create_page_tables:
550 add x0, x25, #PAGE_SIZE // section table address 576 add x0, x25, #PAGE_SIZE // section table address
551 ldr x3, =KERNEL_START 577 ldr x3, =KERNEL_START
552 add x3, x3, x28 // __pa(KERNEL_START) 578 add x3, x3, x28 // __pa(KERNEL_START)
553 create_pgd_entry x25, x0, x3, x5, x6 579 create_pgd_entry x25, x0, x3, x1, x5, x6
554 ldr x6, =KERNEL_END 580 ldr x6, =KERNEL_END
555 mov x5, x3 // __pa(KERNEL_START) 581 mov x5, x3 // __pa(KERNEL_START)
556 add x6, x6, x28 // __pa(KERNEL_END) 582 add x6, x6, x28 // __pa(KERNEL_END)
@@ -561,7 +587,7 @@ __create_page_tables:
561 */ 587 */
562 add x0, x26, #PAGE_SIZE // section table address 588 add x0, x26, #PAGE_SIZE // section table address
563 mov x5, #PAGE_OFFSET 589 mov x5, #PAGE_OFFSET
564 create_pgd_entry x26, x0, x5, x3, x6 590 create_pgd_entry x26, x0, x5, x1, x3, x6
565 ldr x6, =KERNEL_END 591 ldr x6, =KERNEL_END
566 mov x3, x24 // phys offset 592 mov x3, x24 // phys offset
567 create_block_map x0, x7, x3, x5, x6 593 create_block_map x0, x7, x3, x5, x6
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 506f7814e305..02cd3f023e9a 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -339,6 +339,11 @@ void __pmd_error(const char *file, int line, unsigned long val)
339 pr_crit("%s:%d: bad pmd %016lx.\n", file, line, val); 339 pr_crit("%s:%d: bad pmd %016lx.\n", file, line, val);
340} 340}
341 341
342void __pud_error(const char *file, int line, unsigned long val)
343{
344 pr_crit("%s:%d: bad pud %016lx.\n", file, line, val);
345}
346
342void __pgd_error(const char *file, int line, unsigned long val) 347void __pgd_error(const char *file, int line, unsigned long val)
343{ 348{
344 pr_crit("%s:%d: bad pgd %016lx.\n", file, line, val); 349 pr_crit("%s:%d: bad pgd %016lx.\n", file, line, val);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index bcc965e2cce1..41cb6d3d6075 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -62,6 +62,7 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
62 break; 62 break;
63 63
64 pud = pud_offset(pgd, addr); 64 pud = pud_offset(pgd, addr);
65 printk(", *pud=%016llx", pud_val(*pud));
65 if (pud_none(*pud) || pud_bad(*pud)) 66 if (pud_none(*pud) || pud_bad(*pud))
66 break; 67 break;
67 68
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index 69000efa015e..d5e969e7b576 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -104,9 +104,12 @@ void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
104EXPORT_SYMBOL(ioremap_cache); 104EXPORT_SYMBOL(ioremap_cache);
105 105
106static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss; 106static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
107#ifndef CONFIG_ARM64_64K_PAGES 107#ifndef CONFIG_ARM64_2_LEVELS
108static pte_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss; 108static pte_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
109#endif 109#endif
110#ifdef CONFIG_ARM64_4_LEVELS
111static pte_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
112#endif
110 113
111static inline pud_t * __init early_ioremap_pud(unsigned long addr) 114static inline pud_t * __init early_ioremap_pud(unsigned long addr)
112{ 115{
@@ -144,6 +147,7 @@ void __init early_ioremap_init(void)
144 unsigned long addr = fix_to_virt(FIX_BTMAP_BEGIN); 147 unsigned long addr = fix_to_virt(FIX_BTMAP_BEGIN);
145 148
146 pgd = pgd_offset_k(addr); 149 pgd = pgd_offset_k(addr);
150 pgd_populate(&init_mm, pgd, bm_pud);
147 pud = pud_offset(pgd, addr); 151 pud = pud_offset(pgd, addr);
148 pud_populate(&init_mm, pud, bm_pmd); 152 pud_populate(&init_mm, pud, bm_pmd);
149 pmd = pmd_offset(pud, addr); 153 pmd = pmd_offset(pud, addr);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index c43f1dd19489..c55567283cde 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -32,6 +32,7 @@
32#include <asm/setup.h> 32#include <asm/setup.h>
33#include <asm/sizes.h> 33#include <asm/sizes.h>
34#include <asm/tlb.h> 34#include <asm/tlb.h>
35#include <asm/memblock.h>
35#include <asm/mmu_context.h> 36#include <asm/mmu_context.h>
36 37
37#include "mm.h" 38#include "mm.h"
@@ -204,9 +205,16 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
204 unsigned long end, unsigned long phys, 205 unsigned long end, unsigned long phys,
205 int map_io) 206 int map_io)
206{ 207{
207 pud_t *pud = pud_offset(pgd, addr); 208 pud_t *pud;
208 unsigned long next; 209 unsigned long next;
209 210
211 if (pgd_none(*pgd)) {
212 pud = early_alloc(PTRS_PER_PUD * sizeof(pud_t));
213 pgd_populate(&init_mm, pgd, pud);
214 }
215 BUG_ON(pgd_bad(*pgd));
216
217 pud = pud_offset(pgd, addr);
210 do { 218 do {
211 next = pud_addr_end(addr, end); 219 next = pud_addr_end(addr, end);
212 220
@@ -290,10 +298,10 @@ static void __init map_mem(void)
290 * memory addressable from the initial direct kernel mapping. 298 * memory addressable from the initial direct kernel mapping.
291 * 299 *
292 * The initial direct kernel mapping, located at swapper_pg_dir, 300 * The initial direct kernel mapping, located at swapper_pg_dir,
293 * gives us PGDIR_SIZE memory starting from PHYS_OFFSET (which must be 301 * gives us PUD_SIZE memory starting from PHYS_OFFSET (which must be
294 * aligned to 2MB as per Documentation/arm64/booting.txt). 302 * aligned to 2MB as per Documentation/arm64/booting.txt).
295 */ 303 */
296 limit = PHYS_OFFSET + PGDIR_SIZE; 304 limit = PHYS_OFFSET + PUD_SIZE;
297 memblock_set_current_limit(limit); 305 memblock_set_current_limit(limit);
298 306
299 /* map all the memory banks */ 307 /* map all the memory banks */