aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2014-10-20 09:42:07 -0400
committerArd Biesheuvel <ard.biesheuvel@linaro.org>2015-01-12 03:16:52 -0500
commit8ce837cee8f51fb0eacb32c85461ea2f0fafc9f8 (patch)
tree14bb68cb91e5890156381969a1f1f9ca87cd2047 /arch/arm64
parente1e1fddae74b72d0415965821ad00fe39aac6f13 (diff)
arm64/mm: add create_pgd_mapping() to create private page tables
For UEFI, we need to install the memory mappings used for Runtime Services in a dedicated set of page tables. Add create_pgd_mapping(), which allows us to allocate and install those page table entries early. Reviewed-by: Will Deacon <will.deacon@arm.com> Tested-by: Leif Lindholm <leif.lindholm@linaro.org> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/mmu.h3
-rw-r--r--arch/arm64/include/asm/pgtable.h5
-rw-r--r--arch/arm64/mm/mmu.c43
3 files changed, 30 insertions, 21 deletions
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index c2f006c48bdb..5fd40c43be80 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -33,5 +33,8 @@ extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
33extern void init_mem_pgprot(void); 33extern void init_mem_pgprot(void);
34/* create an identity mapping for memory (or io if map_io is true) */ 34/* create an identity mapping for memory (or io if map_io is true) */
35extern void create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io); 35extern void create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io);
36extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
37 unsigned long virt, phys_addr_t size,
38 pgprot_t prot);
36 39
37#endif 40#endif
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 210d632aa5ad..59079248529d 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -264,6 +264,11 @@ static inline pmd_t pte_pmd(pte_t pte)
264 return __pmd(pte_val(pte)); 264 return __pmd(pte_val(pte));
265} 265}
266 266
267static inline pgprot_t mk_sect_prot(pgprot_t prot)
268{
269 return __pgprot(pgprot_val(prot) & ~PTE_TABLE_BIT);
270}
271
267/* 272/*
268 * THP definitions. 273 * THP definitions.
269 */ 274 */
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 7d5dfe2d3de0..3f3d5aa4a8b1 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -158,20 +158,10 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
158 158
159static void __init alloc_init_pmd(struct mm_struct *mm, pud_t *pud, 159static void __init alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
160 unsigned long addr, unsigned long end, 160 unsigned long addr, unsigned long end,
161 phys_addr_t phys, int map_io) 161 phys_addr_t phys, pgprot_t prot)
162{ 162{
163 pmd_t *pmd; 163 pmd_t *pmd;
164 unsigned long next; 164 unsigned long next;
165 pmdval_t prot_sect;
166 pgprot_t prot_pte;
167
168 if (map_io) {
169 prot_sect = PROT_SECT_DEVICE_nGnRE;
170 prot_pte = __pgprot(PROT_DEVICE_nGnRE);
171 } else {
172 prot_sect = PROT_SECT_NORMAL_EXEC;
173 prot_pte = PAGE_KERNEL_EXEC;
174 }
175 165
176 /* 166 /*
177 * Check for initial section mappings in the pgd/pud and remove them. 167 * Check for initial section mappings in the pgd/pud and remove them.
@@ -187,7 +177,8 @@ static void __init alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
187 /* try section mapping first */ 177 /* try section mapping first */
188 if (((addr | next | phys) & ~SECTION_MASK) == 0) { 178 if (((addr | next | phys) & ~SECTION_MASK) == 0) {
189 pmd_t old_pmd =*pmd; 179 pmd_t old_pmd =*pmd;
190 set_pmd(pmd, __pmd(phys | prot_sect)); 180 set_pmd(pmd, __pmd(phys |
181 pgprot_val(mk_sect_prot(prot))));
191 /* 182 /*
192 * Check for previous table entries created during 183 * Check for previous table entries created during
193 * boot (__create_page_tables) and flush them. 184 * boot (__create_page_tables) and flush them.
@@ -196,7 +187,7 @@ static void __init alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
196 flush_tlb_all(); 187 flush_tlb_all();
197 } else { 188 } else {
198 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys), 189 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
199 prot_pte); 190 prot);
200 } 191 }
201 phys += next - addr; 192 phys += next - addr;
202 } while (pmd++, addr = next, addr != end); 193 } while (pmd++, addr = next, addr != end);
@@ -204,7 +195,7 @@ static void __init alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
204 195
205static void __init alloc_init_pud(struct mm_struct *mm, pgd_t *pgd, 196static void __init alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
206 unsigned long addr, unsigned long end, 197 unsigned long addr, unsigned long end,
207 phys_addr_t phys, int map_io) 198 phys_addr_t phys, pgprot_t prot)
208{ 199{
209 pud_t *pud; 200 pud_t *pud;
210 unsigned long next; 201 unsigned long next;
@@ -222,10 +213,11 @@ static void __init alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
222 /* 213 /*
223 * For 4K granule only, attempt to put down a 1GB block 214 * For 4K granule only, attempt to put down a 1GB block
224 */ 215 */
225 if (!map_io && (PAGE_SHIFT == 12) && 216 if ((PAGE_SHIFT == 12) &&
226 ((addr | next | phys) & ~PUD_MASK) == 0) { 217 ((addr | next | phys) & ~PUD_MASK) == 0) {
227 pud_t old_pud = *pud; 218 pud_t old_pud = *pud;
228 set_pud(pud, __pud(phys | PROT_SECT_NORMAL_EXEC)); 219 set_pud(pud, __pud(phys |
220 pgprot_val(mk_sect_prot(prot))));
229 221
230 /* 222 /*
231 * If we have an old value for a pud, it will 223 * If we have an old value for a pud, it will
@@ -240,7 +232,7 @@ static void __init alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
240 flush_tlb_all(); 232 flush_tlb_all();
241 } 233 }
242 } else { 234 } else {
243 alloc_init_pmd(mm, pud, addr, next, phys, map_io); 235 alloc_init_pmd(mm, pud, addr, next, phys, prot);
244 } 236 }
245 phys += next - addr; 237 phys += next - addr;
246 } while (pud++, addr = next, addr != end); 238 } while (pud++, addr = next, addr != end);
@@ -252,7 +244,7 @@ static void __init alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
252 */ 244 */
253static void __init __create_mapping(struct mm_struct *mm, pgd_t *pgd, 245static void __init __create_mapping(struct mm_struct *mm, pgd_t *pgd,
254 phys_addr_t phys, unsigned long virt, 246 phys_addr_t phys, unsigned long virt,
255 phys_addr_t size, int map_io) 247 phys_addr_t size, pgprot_t prot)
256{ 248{
257 unsigned long addr, length, end, next; 249 unsigned long addr, length, end, next;
258 250
@@ -262,7 +254,7 @@ static void __init __create_mapping(struct mm_struct *mm, pgd_t *pgd,
262 end = addr + length; 254 end = addr + length;
263 do { 255 do {
264 next = pgd_addr_end(addr, end); 256 next = pgd_addr_end(addr, end);
265 alloc_init_pud(mm, pgd, addr, next, phys, map_io); 257 alloc_init_pud(mm, pgd, addr, next, phys, prot);
266 phys += next - addr; 258 phys += next - addr;
267 } while (pgd++, addr = next, addr != end); 259 } while (pgd++, addr = next, addr != end);
268} 260}
@@ -276,7 +268,7 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt,
276 return; 268 return;
277 } 269 }
278 __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt, 270 __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt,
279 size, 0); 271 size, PAGE_KERNEL_EXEC);
280} 272}
281 273
282void __init create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io) 274void __init create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io)
@@ -286,7 +278,16 @@ void __init create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io)
286 return; 278 return;
287 } 279 }
288 __create_mapping(&init_mm, &idmap_pg_dir[pgd_index(addr)], 280 __create_mapping(&init_mm, &idmap_pg_dir[pgd_index(addr)],
289 addr, addr, size, map_io); 281 addr, addr, size,
282 map_io ? __pgprot(PROT_DEVICE_nGnRE)
283 : PAGE_KERNEL_EXEC);
284}
285
286void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
287 unsigned long virt, phys_addr_t size,
288 pgprot_t prot)
289{
290 __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot);
290} 291}
291 292
292static void __init map_mem(void) 293static void __init map_mem(void)