diff options
author | Catalin Marinas <catalin.marinas@arm.com> | 2011-11-22 12:30:29 -0500 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2011-12-08 05:30:39 -0500 |
commit | da02877987e6e173ebba137d4e1e155e1f1151cd (patch) | |
tree | 8035bb1fb7def068ed2fd13d5d11ec5857c7d338 /arch/arm/mm | |
parent | dcfdae04bd92e8a2ea155db0e21e3bddc09e0a89 (diff) |
ARM: LPAE: Page table maintenance for the 3-level format
This patch modifies the pgd/pmd/pte manipulation functions to support
the 3-level page table format. Since there is no need for an 'ext'
argument to cpu_set_pte_ext(), this patch conditionally defines a
different prototype for this function when CONFIG_ARM_LPAE.
The patch also introduces the L_PGD_SWAPPER flag to mark pgd entries
pointing to pmd tables pre-allocated in the swapper_pg_dir and avoid
trying to free them at run-time. This flag is 0 with the classic page
table format.
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/ioremap.c | 8 | ||||
-rw-r--r-- | arch/arm/mm/pgd.c | 51 |
2 files changed, 52 insertions, 7 deletions
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index c3fa40da3b75..d1f78bacb015 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
@@ -64,7 +64,7 @@ void __check_kvm_seq(struct mm_struct *mm) | |||
64 | } while (seq != init_mm.context.kvm_seq); | 64 | } while (seq != init_mm.context.kvm_seq); |
65 | } | 65 | } |
66 | 66 | ||
67 | #ifndef CONFIG_SMP | 67 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) |
68 | /* | 68 | /* |
69 | * Section support is unsafe on SMP - If you iounmap and ioremap a region, | 69 | * Section support is unsafe on SMP - If you iounmap and ioremap a region, |
70 | * the other CPUs will not see this change until their next context switch. | 70 | * the other CPUs will not see this change until their next context switch. |
@@ -202,11 +202,13 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |||
202 | unsigned long addr; | 202 | unsigned long addr; |
203 | struct vm_struct * area; | 203 | struct vm_struct * area; |
204 | 204 | ||
205 | #ifndef CONFIG_ARM_LPAE | ||
205 | /* | 206 | /* |
206 | * High mappings must be supersection aligned | 207 | * High mappings must be supersection aligned |
207 | */ | 208 | */ |
208 | if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) | 209 | if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) |
209 | return NULL; | 210 | return NULL; |
211 | #endif | ||
210 | 212 | ||
211 | /* | 213 | /* |
212 | * Don't allow RAM to be mapped - this causes problems with ARMv6+ | 214 | * Don't allow RAM to be mapped - this causes problems with ARMv6+ |
@@ -228,7 +230,7 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |||
228 | return NULL; | 230 | return NULL; |
229 | addr = (unsigned long)area->addr; | 231 | addr = (unsigned long)area->addr; |
230 | 232 | ||
231 | #ifndef CONFIG_SMP | 233 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) |
232 | if (DOMAIN_IO == 0 && | 234 | if (DOMAIN_IO == 0 && |
233 | (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || | 235 | (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || |
234 | cpu_is_xsc3()) && pfn >= 0x100000 && | 236 | cpu_is_xsc3()) && pfn >= 0x100000 && |
@@ -320,7 +322,7 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached) | |||
320 | void __iounmap(volatile void __iomem *io_addr) | 322 | void __iounmap(volatile void __iomem *io_addr) |
321 | { | 323 | { |
322 | void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); | 324 | void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); |
323 | #ifndef CONFIG_SMP | 325 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) |
324 | struct vm_struct **p, *tmp; | 326 | struct vm_struct **p, *tmp; |
325 | 327 | ||
326 | /* | 328 | /* |
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index b2027c154b2a..a3e78ccabd65 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
11 | #include <linux/gfp.h> | 11 | #include <linux/gfp.h> |
12 | #include <linux/highmem.h> | 12 | #include <linux/highmem.h> |
13 | #include <linux/slab.h> | ||
13 | 14 | ||
14 | #include <asm/pgalloc.h> | 15 | #include <asm/pgalloc.h> |
15 | #include <asm/page.h> | 16 | #include <asm/page.h> |
@@ -17,6 +18,14 @@ | |||
17 | 18 | ||
18 | #include "mm.h" | 19 | #include "mm.h" |
19 | 20 | ||
21 | #ifdef CONFIG_ARM_LPAE | ||
22 | #define __pgd_alloc() kmalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL) | ||
23 | #define __pgd_free(pgd) kfree(pgd) | ||
24 | #else | ||
25 | #define __pgd_alloc() (pgd_t *)__get_free_pages(GFP_KERNEL, 2) | ||
26 | #define __pgd_free(pgd) free_pages((unsigned long)pgd, 2) | ||
27 | #endif | ||
28 | |||
20 | /* | 29 | /* |
21 | * need to get a 16k page for level 1 | 30 | * need to get a 16k page for level 1 |
22 | */ | 31 | */ |
@@ -27,7 +36,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) | |||
27 | pmd_t *new_pmd, *init_pmd; | 36 | pmd_t *new_pmd, *init_pmd; |
28 | pte_t *new_pte, *init_pte; | 37 | pte_t *new_pte, *init_pte; |
29 | 38 | ||
30 | new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2); | 39 | new_pgd = __pgd_alloc(); |
31 | if (!new_pgd) | 40 | if (!new_pgd) |
32 | goto no_pgd; | 41 | goto no_pgd; |
33 | 42 | ||
@@ -42,10 +51,25 @@ pgd_t *pgd_alloc(struct mm_struct *mm) | |||
42 | 51 | ||
43 | clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); | 52 | clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); |
44 | 53 | ||
54 | #ifdef CONFIG_ARM_LPAE | ||
55 | /* | ||
56 | * Allocate PMD table for modules and pkmap mappings. | ||
57 | */ | ||
58 | new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR), | ||
59 | MODULES_VADDR); | ||
60 | if (!new_pud) | ||
61 | goto no_pud; | ||
62 | |||
63 | new_pmd = pmd_alloc(mm, new_pud, 0); | ||
64 | if (!new_pmd) | ||
65 | goto no_pmd; | ||
66 | #endif | ||
67 | |||
45 | if (!vectors_high()) { | 68 | if (!vectors_high()) { |
46 | /* | 69 | /* |
47 | * On ARM, first page must always be allocated since it | 70 | * On ARM, first page must always be allocated since it |
48 | * contains the machine vectors. | 71 | * contains the machine vectors. The vectors are always high |
72 | * with LPAE. | ||
49 | */ | 73 | */ |
50 | new_pud = pud_alloc(mm, new_pgd, 0); | 74 | new_pud = pud_alloc(mm, new_pgd, 0); |
51 | if (!new_pud) | 75 | if (!new_pud) |
@@ -74,7 +98,7 @@ no_pte: | |||
74 | no_pmd: | 98 | no_pmd: |
75 | pud_free(mm, new_pud); | 99 | pud_free(mm, new_pud); |
76 | no_pud: | 100 | no_pud: |
77 | free_pages((unsigned long)new_pgd, 2); | 101 | __pgd_free(new_pgd); |
78 | no_pgd: | 102 | no_pgd: |
79 | return NULL; | 103 | return NULL; |
80 | } | 104 | } |
@@ -111,5 +135,24 @@ no_pud: | |||
111 | pgd_clear(pgd); | 135 | pgd_clear(pgd); |
112 | pud_free(mm, pud); | 136 | pud_free(mm, pud); |
113 | no_pgd: | 137 | no_pgd: |
114 | free_pages((unsigned long) pgd_base, 2); | 138 | #ifdef CONFIG_ARM_LPAE |
139 | /* | ||
140 | * Free modules/pkmap or identity pmd tables. | ||
141 | */ | ||
142 | for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) { | ||
143 | if (pgd_none_or_clear_bad(pgd)) | ||
144 | continue; | ||
145 | if (pgd_val(*pgd) & L_PGD_SWAPPER) | ||
146 | continue; | ||
147 | pud = pud_offset(pgd, 0); | ||
148 | if (pud_none_or_clear_bad(pud)) | ||
149 | continue; | ||
150 | pmd = pmd_offset(pud, 0); | ||
151 | pud_clear(pud); | ||
152 | pmd_free(mm, pmd); | ||
153 | pgd_clear(pgd); | ||
154 | pud_free(mm, pud); | ||
155 | } | ||
156 | #endif | ||
157 | __pgd_free(pgd_base); | ||
115 | } | 158 | } |