aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2015-01-13 17:42:31 -0500
committerCatalin Marinas <catalin.marinas@arm.com>2015-01-13 17:42:31 -0500
commitc26a535b747a56298000c42cdd669514456dfc2d (patch)
tree1456199ed7f9038ffc664cebb31bad9e0c35414d /arch/arm64
parenteaa27f34e91a14cdceed26ed6c6793ec1d186115 (diff)
parent9679be103108926cfe9e6fd2f6829cefa77e47b0 (diff)
Merge tag 'for-3.20' of http://git.linaro.org/people/ard.biesheuvel/linux-arm into upstream
UEFI updates for arm64 This series consists of a reimplementation of the virtual remapping of UEFI Runtime Services in a way that is stable across kexec, including the required preparatory refactoring and other work to set the stage, and some cleaning up afterwards to remove boot services memory and identitity map handling that has now become redundant. * tag 'for-3.20' of http://git.linaro.org/people/ard.biesheuvel/linux-arm: arm64/efi: remove idmap manipulations from UEFI code arm64/efi: remove free_boot_services() and friends arm64/efi: move SetVirtualAddressMap() to UEFI stub arm64/efi: set EFI_ALLOC_ALIGN to 64 KB efi: efistub: allow allocation alignment larger than EFI_PAGE_SIZE efi: split off remapping code from efi_config_init() arm64/mm: add create_pgd_mapping() to create private page tables arm64/mm: add explicit struct_mm argument to __create_mapping()
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/efi.h38
-rw-r--r--arch/arm64/include/asm/mmu.h5
-rw-r--r--arch/arm64/include/asm/pgtable.h5
-rw-r--r--arch/arm64/kernel/efi.c369
-rw-r--r--arch/arm64/kernel/setup.c2
-rw-r--r--arch/arm64/mm/mmu.c60
6 files changed, 189 insertions, 290 deletions
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index a34fd3b12e2b..7baf2cc04e1e 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -6,29 +6,35 @@
6 6
7#ifdef CONFIG_EFI 7#ifdef CONFIG_EFI
8extern void efi_init(void); 8extern void efi_init(void);
9extern void efi_idmap_init(void); 9extern void efi_virtmap_init(void);
10#else 10#else
11#define efi_init() 11#define efi_init()
12#define efi_idmap_init() 12#define efi_virtmap_init()
13#endif 13#endif
14 14
15#define efi_call_virt(f, ...) \ 15#define efi_call_virt(f, ...) \
16({ \ 16({ \
17 efi_##f##_t *__f = efi.systab->runtime->f; \ 17 efi_##f##_t *__f; \
18 efi_status_t __s; \ 18 efi_status_t __s; \
19 \ 19 \
20 kernel_neon_begin(); \ 20 kernel_neon_begin(); \
21 efi_virtmap_load(); \
22 __f = efi.systab->runtime->f; \
21 __s = __f(__VA_ARGS__); \ 23 __s = __f(__VA_ARGS__); \
24 efi_virtmap_unload(); \
22 kernel_neon_end(); \ 25 kernel_neon_end(); \
23 __s; \ 26 __s; \
24}) 27})
25 28
26#define __efi_call_virt(f, ...) \ 29#define __efi_call_virt(f, ...) \
27({ \ 30({ \
28 efi_##f##_t *__f = efi.systab->runtime->f; \ 31 efi_##f##_t *__f; \
29 \ 32 \
30 kernel_neon_begin(); \ 33 kernel_neon_begin(); \
34 efi_virtmap_load(); \
35 __f = efi.systab->runtime->f; \
31 __f(__VA_ARGS__); \ 36 __f(__VA_ARGS__); \
37 efi_virtmap_unload(); \
32 kernel_neon_end(); \ 38 kernel_neon_end(); \
33}) 39})
34 40
@@ -44,4 +50,28 @@ extern void efi_idmap_init(void);
44 50
45#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__) 51#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__)
46 52
53#define EFI_ALLOC_ALIGN SZ_64K
54
55/*
56 * On ARM systems, virtually remapped UEFI runtime services are set up in three
57 * distinct stages:
58 * - The stub retrieves the final version of the memory map from UEFI, populates
59 * the virt_addr fields and calls the SetVirtualAddressMap() [SVAM] runtime
60 * service to communicate the new mapping to the firmware (Note that the new
61 * mapping is not live at this time)
62 * - During early boot, the page tables are allocated and populated based on the
63 * virt_addr fields in the memory map, but only if all descriptors with the
64 * EFI_MEMORY_RUNTIME attribute have a non-zero value for virt_addr. If this
65 * succeeds, the EFI_VIRTMAP flag is set to indicate that the virtual mappings
66 * have been installed successfully.
67 * - During an early initcall(), the UEFI Runtime Services are enabled and the
68 * EFI_RUNTIME_SERVICES bit set if some conditions are met, i.e., we need a
69 * non-early mapping of the UEFI system table, and we need to have the virtmap
70 * installed.
71 */
72#define EFI_VIRTMAP EFI_ARCH_1
73
74void efi_virtmap_load(void);
75void efi_virtmap_unload(void);
76
47#endif /* _ASM_EFI_H */ 77#endif /* _ASM_EFI_H */
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index c2f006c48bdb..3d311761e3c2 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -31,7 +31,8 @@ extern void paging_init(void);
31extern void setup_mm_for_reboot(void); 31extern void setup_mm_for_reboot(void);
32extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); 32extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
33extern void init_mem_pgprot(void); 33extern void init_mem_pgprot(void);
34/* create an identity mapping for memory (or io if map_io is true) */ 34extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
35extern void create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io); 35 unsigned long virt, phys_addr_t size,
36 pgprot_t prot);
36 37
37#endif 38#endif
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 210d632aa5ad..59079248529d 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -264,6 +264,11 @@ static inline pmd_t pte_pmd(pte_t pte)
264 return __pmd(pte_val(pte)); 264 return __pmd(pte_val(pte));
265} 265}
266 266
267static inline pgprot_t mk_sect_prot(pgprot_t prot)
268{
269 return __pgprot(pgprot_val(prot) & ~PTE_TABLE_BIT);
270}
271
267/* 272/*
268 * THP definitions. 273 * THP definitions.
269 */ 274 */
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 2bb4347d0edf..a98415b5979c 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -11,25 +11,31 @@
11 * 11 *
12 */ 12 */
13 13
14#include <linux/atomic.h>
14#include <linux/dmi.h> 15#include <linux/dmi.h>
15#include <linux/efi.h> 16#include <linux/efi.h>
16#include <linux/export.h> 17#include <linux/export.h>
17#include <linux/memblock.h> 18#include <linux/memblock.h>
19#include <linux/mm_types.h>
18#include <linux/bootmem.h> 20#include <linux/bootmem.h>
19#include <linux/of.h> 21#include <linux/of.h>
20#include <linux/of_fdt.h> 22#include <linux/of_fdt.h>
23#include <linux/preempt.h>
24#include <linux/rbtree.h>
25#include <linux/rwsem.h>
21#include <linux/sched.h> 26#include <linux/sched.h>
22#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/spinlock.h>
23 29
24#include <asm/cacheflush.h> 30#include <asm/cacheflush.h>
25#include <asm/efi.h> 31#include <asm/efi.h>
26#include <asm/tlbflush.h> 32#include <asm/tlbflush.h>
27#include <asm/mmu_context.h> 33#include <asm/mmu_context.h>
34#include <asm/mmu.h>
35#include <asm/pgtable.h>
28 36
29struct efi_memory_map memmap; 37struct efi_memory_map memmap;
30 38
31static efi_runtime_services_t *runtime;
32
33static u64 efi_system_table; 39static u64 efi_system_table;
34 40
35static int uefi_debug __initdata; 41static int uefi_debug __initdata;
@@ -48,30 +54,33 @@ static int __init is_normal_ram(efi_memory_desc_t *md)
48 return 0; 54 return 0;
49} 55}
50 56
51static void __init efi_setup_idmap(void) 57/*
58 * Translate a EFI virtual address into a physical address: this is necessary,
59 * as some data members of the EFI system table are virtually remapped after
60 * SetVirtualAddressMap() has been called.
61 */
62static phys_addr_t efi_to_phys(unsigned long addr)
52{ 63{
53 struct memblock_region *r;
54 efi_memory_desc_t *md; 64 efi_memory_desc_t *md;
55 u64 paddr, npages, size;
56
57 for_each_memblock(memory, r)
58 create_id_mapping(r->base, r->size, 0);
59 65
60 /* map runtime io spaces */
61 for_each_efi_memory_desc(&memmap, md) { 66 for_each_efi_memory_desc(&memmap, md) {
62 if (!(md->attribute & EFI_MEMORY_RUNTIME) || is_normal_ram(md)) 67 if (!(md->attribute & EFI_MEMORY_RUNTIME))
63 continue; 68 continue;
64 paddr = md->phys_addr; 69 if (md->virt_addr == 0)
65 npages = md->num_pages; 70 /* no virtual mapping has been installed by the stub */
66 memrange_efi_to_native(&paddr, &npages); 71 break;
67 size = npages << PAGE_SHIFT; 72 if (md->virt_addr <= addr &&
68 create_id_mapping(paddr, size, 1); 73 (addr - md->virt_addr) < (md->num_pages << EFI_PAGE_SHIFT))
74 return md->phys_addr + addr - md->virt_addr;
69 } 75 }
76 return addr;
70} 77}
71 78
72static int __init uefi_init(void) 79static int __init uefi_init(void)
73{ 80{
74 efi_char16_t *c16; 81 efi_char16_t *c16;
82 void *config_tables;
83 u64 table_size;
75 char vendor[100] = "unknown"; 84 char vendor[100] = "unknown";
76 int i, retval; 85 int i, retval;
77 86
@@ -99,7 +108,7 @@ static int __init uefi_init(void)
99 efi.systab->hdr.revision & 0xffff); 108 efi.systab->hdr.revision & 0xffff);
100 109
101 /* Show what we know for posterity */ 110 /* Show what we know for posterity */
102 c16 = early_memremap(efi.systab->fw_vendor, 111 c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor),
103 sizeof(vendor)); 112 sizeof(vendor));
104 if (c16) { 113 if (c16) {
105 for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i) 114 for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
@@ -112,8 +121,14 @@ static int __init uefi_init(void)
112 efi.systab->hdr.revision >> 16, 121 efi.systab->hdr.revision >> 16,
113 efi.systab->hdr.revision & 0xffff, vendor); 122 efi.systab->hdr.revision & 0xffff, vendor);
114 123
115 retval = efi_config_init(NULL); 124 table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables;
125 config_tables = early_memremap(efi_to_phys(efi.systab->tables),
126 table_size);
127
128 retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables,
129 sizeof(efi_config_table_64_t), NULL);
116 130
131 early_memunmap(config_tables, table_size);
117out: 132out:
118 early_memunmap(efi.systab, sizeof(efi_system_table_t)); 133 early_memunmap(efi.systab, sizeof(efi_system_table_t));
119 return retval; 134 return retval;
@@ -163,9 +178,7 @@ static __init void reserve_regions(void)
163 if (is_normal_ram(md)) 178 if (is_normal_ram(md))
164 early_init_dt_add_memory_arch(paddr, size); 179 early_init_dt_add_memory_arch(paddr, size);
165 180
166 if (is_reserve_region(md) || 181 if (is_reserve_region(md)) {
167 md->type == EFI_BOOT_SERVICES_CODE ||
168 md->type == EFI_BOOT_SERVICES_DATA) {
169 memblock_reserve(paddr, size); 182 memblock_reserve(paddr, size);
170 if (uefi_debug) 183 if (uefi_debug)
171 pr_cont("*"); 184 pr_cont("*");
@@ -178,123 +191,6 @@ static __init void reserve_regions(void)
178 set_bit(EFI_MEMMAP, &efi.flags); 191 set_bit(EFI_MEMMAP, &efi.flags);
179} 192}
180 193
181
182static u64 __init free_one_region(u64 start, u64 end)
183{
184 u64 size = end - start;
185
186 if (uefi_debug)
187 pr_info(" EFI freeing: 0x%012llx-0x%012llx\n", start, end - 1);
188
189 free_bootmem_late(start, size);
190 return size;
191}
192
193static u64 __init free_region(u64 start, u64 end)
194{
195 u64 map_start, map_end, total = 0;
196
197 if (end <= start)
198 return total;
199
200 map_start = (u64)memmap.phys_map;
201 map_end = PAGE_ALIGN(map_start + (memmap.map_end - memmap.map));
202 map_start &= PAGE_MASK;
203
204 if (start < map_end && end > map_start) {
205 /* region overlaps UEFI memmap */
206 if (start < map_start)
207 total += free_one_region(start, map_start);
208
209 if (map_end < end)
210 total += free_one_region(map_end, end);
211 } else
212 total += free_one_region(start, end);
213
214 return total;
215}
216
217static void __init free_boot_services(void)
218{
219 u64 total_freed = 0;
220 u64 keep_end, free_start, free_end;
221 efi_memory_desc_t *md;
222
223 /*
224 * If kernel uses larger pages than UEFI, we have to be careful
225 * not to inadvertantly free memory we want to keep if there is
226 * overlap at the kernel page size alignment. We do not want to
227 * free is_reserve_region() memory nor the UEFI memmap itself.
228 *
229 * The memory map is sorted, so we keep track of the end of
230 * any previous region we want to keep, remember any region
231 * we want to free and defer freeing it until we encounter
232 * the next region we want to keep. This way, before freeing
233 * it, we can clip it as needed to avoid freeing memory we
234 * want to keep for UEFI.
235 */
236
237 keep_end = 0;
238 free_start = 0;
239
240 for_each_efi_memory_desc(&memmap, md) {
241 u64 paddr, npages, size;
242
243 if (is_reserve_region(md)) {
244 /*
245 * We don't want to free any memory from this region.
246 */
247 if (free_start) {
248 /* adjust free_end then free region */
249 if (free_end > md->phys_addr)
250 free_end -= PAGE_SIZE;
251 total_freed += free_region(free_start, free_end);
252 free_start = 0;
253 }
254 keep_end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
255 continue;
256 }
257
258 if (md->type != EFI_BOOT_SERVICES_CODE &&
259 md->type != EFI_BOOT_SERVICES_DATA) {
260 /* no need to free this region */
261 continue;
262 }
263
264 /*
265 * We want to free memory from this region.
266 */
267 paddr = md->phys_addr;
268 npages = md->num_pages;
269 memrange_efi_to_native(&paddr, &npages);
270 size = npages << PAGE_SHIFT;
271
272 if (free_start) {
273 if (paddr <= free_end)
274 free_end = paddr + size;
275 else {
276 total_freed += free_region(free_start, free_end);
277 free_start = paddr;
278 free_end = paddr + size;
279 }
280 } else {
281 free_start = paddr;
282 free_end = paddr + size;
283 }
284 if (free_start < keep_end) {
285 free_start += PAGE_SIZE;
286 if (free_start >= free_end)
287 free_start = 0;
288 }
289 }
290 if (free_start)
291 total_freed += free_region(free_start, free_end);
292
293 if (total_freed)
294 pr_info("Freed 0x%llx bytes of EFI boot services memory",
295 total_freed);
296}
297
298void __init efi_init(void) 194void __init efi_init(void)
299{ 195{
300 struct efi_fdt_params params; 196 struct efi_fdt_params params;
@@ -319,61 +215,14 @@ void __init efi_init(void)
319 reserve_regions(); 215 reserve_regions();
320} 216}
321 217
322void __init efi_idmap_init(void)
323{
324 if (!efi_enabled(EFI_BOOT))
325 return;
326
327 /* boot time idmap_pg_dir is incomplete, so fill in missing parts */
328 efi_setup_idmap();
329 early_memunmap(memmap.map, memmap.map_end - memmap.map);
330}
331
332static int __init remap_region(efi_memory_desc_t *md, void **new)
333{
334 u64 paddr, vaddr, npages, size;
335
336 paddr = md->phys_addr;
337 npages = md->num_pages;
338 memrange_efi_to_native(&paddr, &npages);
339 size = npages << PAGE_SHIFT;
340
341 if (is_normal_ram(md))
342 vaddr = (__force u64)ioremap_cache(paddr, size);
343 else
344 vaddr = (__force u64)ioremap(paddr, size);
345
346 if (!vaddr) {
347 pr_err("Unable to remap 0x%llx pages @ %p\n",
348 npages, (void *)paddr);
349 return 0;
350 }
351
352 /* adjust for any rounding when EFI and system pagesize differs */
353 md->virt_addr = vaddr + (md->phys_addr - paddr);
354
355 if (uefi_debug)
356 pr_info(" EFI remap 0x%012llx => %p\n",
357 md->phys_addr, (void *)md->virt_addr);
358
359 memcpy(*new, md, memmap.desc_size);
360 *new += memmap.desc_size;
361
362 return 1;
363}
364
365/* 218/*
366 * Switch UEFI from an identity map to a kernel virtual map 219 * Enable the UEFI Runtime Services if all prerequisites are in place, i.e.,
220 * non-early mapping of the UEFI system table and virtual mappings for all
221 * EFI_MEMORY_RUNTIME regions.
367 */ 222 */
368static int __init arm64_enter_virtual_mode(void) 223static int __init arm64_enable_runtime_services(void)
369{ 224{
370 efi_memory_desc_t *md;
371 phys_addr_t virtmap_phys;
372 void *virtmap, *virt_md;
373 efi_status_t status;
374 u64 mapsize; 225 u64 mapsize;
375 int count = 0;
376 unsigned long flags;
377 226
378 if (!efi_enabled(EFI_BOOT)) { 227 if (!efi_enabled(EFI_BOOT)) {
379 pr_info("EFI services will not be available.\n"); 228 pr_info("EFI services will not be available.\n");
@@ -395,81 +244,28 @@ static int __init arm64_enter_virtual_mode(void)
395 244
396 efi.memmap = &memmap; 245 efi.memmap = &memmap;
397 246
398 /* Map the runtime regions */ 247 efi.systab = (__force void *)ioremap_cache(efi_system_table,
399 virtmap = kmalloc(mapsize, GFP_KERNEL); 248 sizeof(efi_system_table_t));
400 if (!virtmap) {
401 pr_err("Failed to allocate EFI virtual memmap\n");
402 return -1;
403 }
404 virtmap_phys = virt_to_phys(virtmap);
405 virt_md = virtmap;
406
407 for_each_efi_memory_desc(&memmap, md) {
408 if (!(md->attribute & EFI_MEMORY_RUNTIME))
409 continue;
410 if (!remap_region(md, &virt_md))
411 goto err_unmap;
412 ++count;
413 }
414
415 efi.systab = (__force void *)efi_lookup_mapped_addr(efi_system_table);
416 if (!efi.systab) { 249 if (!efi.systab) {
417 /* 250 pr_err("Failed to remap EFI System Table\n");
418 * If we have no virtual mapping for the System Table at this 251 return -1;
419 * point, the memory map doesn't cover the physical offset where
420 * it resides. This means the System Table will be inaccessible
421 * to Runtime Services themselves once the virtual mapping is
422 * installed.
423 */
424 pr_err("Failed to remap EFI System Table -- buggy firmware?\n");
425 goto err_unmap;
426 } 252 }
427 set_bit(EFI_SYSTEM_TABLES, &efi.flags); 253 set_bit(EFI_SYSTEM_TABLES, &efi.flags);
428 254
429 local_irq_save(flags); 255 if (!efi_enabled(EFI_VIRTMAP)) {
430 cpu_switch_mm(idmap_pg_dir, &init_mm); 256 pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n");
431
432 /* Call SetVirtualAddressMap with the physical address of the map */
433 runtime = efi.systab->runtime;
434 efi.set_virtual_address_map = runtime->set_virtual_address_map;
435
436 status = efi.set_virtual_address_map(count * memmap.desc_size,
437 memmap.desc_size,
438 memmap.desc_version,
439 (efi_memory_desc_t *)virtmap_phys);
440 cpu_set_reserved_ttbr0();
441 flush_tlb_all();
442 local_irq_restore(flags);
443
444 kfree(virtmap);
445
446 free_boot_services();
447
448 if (status != EFI_SUCCESS) {
449 pr_err("Failed to set EFI virtual address map! [%lx]\n",
450 status);
451 return -1; 257 return -1;
452 } 258 }
453 259
454 /* Set up runtime services function pointers */ 260 /* Set up runtime services function pointers */
455 runtime = efi.systab->runtime;
456 efi_native_runtime_setup(); 261 efi_native_runtime_setup();
457 set_bit(EFI_RUNTIME_SERVICES, &efi.flags); 262 set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
458 263
459 efi.runtime_version = efi.systab->hdr.revision; 264 efi.runtime_version = efi.systab->hdr.revision;
460 265
461 return 0; 266 return 0;
462
463err_unmap:
464 /* unmap all mappings that succeeded: there are 'count' of those */
465 for (virt_md = virtmap; count--; virt_md += memmap.desc_size) {
466 md = virt_md;
467 iounmap((__force void __iomem *)md->virt_addr);
468 }
469 kfree(virtmap);
470 return -1;
471} 267}
472early_initcall(arm64_enter_virtual_mode); 268early_initcall(arm64_enable_runtime_services);
473 269
474static int __init arm64_dmi_init(void) 270static int __init arm64_dmi_init(void)
475{ 271{
@@ -484,3 +280,80 @@ static int __init arm64_dmi_init(void)
484 return 0; 280 return 0;
485} 281}
486core_initcall(arm64_dmi_init); 282core_initcall(arm64_dmi_init);
283
284static pgd_t efi_pgd[PTRS_PER_PGD] __page_aligned_bss;
285
286static struct mm_struct efi_mm = {
287 .mm_rb = RB_ROOT,
288 .pgd = efi_pgd,
289 .mm_users = ATOMIC_INIT(2),
290 .mm_count = ATOMIC_INIT(1),
291 .mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem),
292 .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
293 .mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
294 INIT_MM_CONTEXT(efi_mm)
295};
296
297static void efi_set_pgd(struct mm_struct *mm)
298{
299 cpu_switch_mm(mm->pgd, mm);
300 flush_tlb_all();
301 if (icache_is_aivivt())
302 __flush_icache_all();
303}
304
305void efi_virtmap_load(void)
306{
307 preempt_disable();
308 efi_set_pgd(&efi_mm);
309}
310
311void efi_virtmap_unload(void)
312{
313 efi_set_pgd(current->active_mm);
314 preempt_enable();
315}
316
317void __init efi_virtmap_init(void)
318{
319 efi_memory_desc_t *md;
320
321 if (!efi_enabled(EFI_BOOT))
322 return;
323
324 for_each_efi_memory_desc(&memmap, md) {
325 u64 paddr, npages, size;
326 pgprot_t prot;
327
328 if (!(md->attribute & EFI_MEMORY_RUNTIME))
329 continue;
330 if (WARN(md->virt_addr == 0,
331 "UEFI virtual mapping incomplete or missing -- no entry found for 0x%llx\n",
332 md->phys_addr))
333 return;
334
335 paddr = md->phys_addr;
336 npages = md->num_pages;
337 memrange_efi_to_native(&paddr, &npages);
338 size = npages << PAGE_SHIFT;
339
340 pr_info(" EFI remap 0x%016llx => %p\n",
341 md->phys_addr, (void *)md->virt_addr);
342
343 /*
344 * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
345 * executable, everything else can be mapped with the XN bits
346 * set.
347 */
348 if (!is_normal_ram(md))
349 prot = __pgprot(PROT_DEVICE_nGnRE);
350 else if (md->type == EFI_RUNTIME_SERVICES_CODE)
351 prot = PAGE_KERNEL_EXEC;
352 else
353 prot = PAGE_KERNEL;
354
355 create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, prot);
356 }
357 set_bit(EFI_VIRTMAP, &efi.flags);
358 early_memunmap(memmap.map, memmap.map_end - memmap.map);
359}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 20fe2932ad0c..199d1b7809d7 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -401,7 +401,7 @@ void __init setup_arch(char **cmdline_p)
401 paging_init(); 401 paging_init();
402 request_standard_resources(); 402 request_standard_resources();
403 403
404 efi_idmap_init(); 404 efi_virtmap_init();
405 early_ioremap_reset(); 405 early_ioremap_reset();
406 406
407 unflatten_device_tree(); 407 unflatten_device_tree();
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 6032f3e3056a..328638548871 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -156,29 +156,19 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
156 } while (pte++, addr += PAGE_SIZE, addr != end); 156 } while (pte++, addr += PAGE_SIZE, addr != end);
157} 157}
158 158
159static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, 159static void __init alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
160 unsigned long end, phys_addr_t phys, 160 unsigned long addr, unsigned long end,
161 int map_io) 161 phys_addr_t phys, pgprot_t prot)
162{ 162{
163 pmd_t *pmd; 163 pmd_t *pmd;
164 unsigned long next; 164 unsigned long next;
165 pmdval_t prot_sect;
166 pgprot_t prot_pte;
167
168 if (map_io) {
169 prot_sect = PROT_SECT_DEVICE_nGnRE;
170 prot_pte = __pgprot(PROT_DEVICE_nGnRE);
171 } else {
172 prot_sect = PROT_SECT_NORMAL_EXEC;
173 prot_pte = PAGE_KERNEL_EXEC;
174 }
175 165
176 /* 166 /*
177 * Check for initial section mappings in the pgd/pud and remove them. 167 * Check for initial section mappings in the pgd/pud and remove them.
178 */ 168 */
179 if (pud_none(*pud) || pud_bad(*pud)) { 169 if (pud_none(*pud) || pud_bad(*pud)) {
180 pmd = early_alloc(PTRS_PER_PMD * sizeof(pmd_t)); 170 pmd = early_alloc(PTRS_PER_PMD * sizeof(pmd_t));
181 pud_populate(&init_mm, pud, pmd); 171 pud_populate(mm, pud, pmd);
182 } 172 }
183 173
184 pmd = pmd_offset(pud, addr); 174 pmd = pmd_offset(pud, addr);
@@ -187,7 +177,8 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
187 /* try section mapping first */ 177 /* try section mapping first */
188 if (((addr | next | phys) & ~SECTION_MASK) == 0) { 178 if (((addr | next | phys) & ~SECTION_MASK) == 0) {
189 pmd_t old_pmd =*pmd; 179 pmd_t old_pmd =*pmd;
190 set_pmd(pmd, __pmd(phys | prot_sect)); 180 set_pmd(pmd, __pmd(phys |
181 pgprot_val(mk_sect_prot(prot))));
191 /* 182 /*
192 * Check for previous table entries created during 183 * Check for previous table entries created during
193 * boot (__create_page_tables) and flush them. 184 * boot (__create_page_tables) and flush them.
@@ -196,22 +187,22 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
196 flush_tlb_all(); 187 flush_tlb_all();
197 } else { 188 } else {
198 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys), 189 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
199 prot_pte); 190 prot);
200 } 191 }
201 phys += next - addr; 192 phys += next - addr;
202 } while (pmd++, addr = next, addr != end); 193 } while (pmd++, addr = next, addr != end);
203} 194}
204 195
205static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, 196static void __init alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
206 unsigned long end, phys_addr_t phys, 197 unsigned long addr, unsigned long end,
207 int map_io) 198 phys_addr_t phys, pgprot_t prot)
208{ 199{
209 pud_t *pud; 200 pud_t *pud;
210 unsigned long next; 201 unsigned long next;
211 202
212 if (pgd_none(*pgd)) { 203 if (pgd_none(*pgd)) {
213 pud = early_alloc(PTRS_PER_PUD * sizeof(pud_t)); 204 pud = early_alloc(PTRS_PER_PUD * sizeof(pud_t));
214 pgd_populate(&init_mm, pgd, pud); 205 pgd_populate(mm, pgd, pud);
215 } 206 }
216 BUG_ON(pgd_bad(*pgd)); 207 BUG_ON(pgd_bad(*pgd));
217 208
@@ -222,10 +213,11 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
222 /* 213 /*
223 * For 4K granule only, attempt to put down a 1GB block 214 * For 4K granule only, attempt to put down a 1GB block
224 */ 215 */
225 if (!map_io && (PAGE_SHIFT == 12) && 216 if ((PAGE_SHIFT == 12) &&
226 ((addr | next | phys) & ~PUD_MASK) == 0) { 217 ((addr | next | phys) & ~PUD_MASK) == 0) {
227 pud_t old_pud = *pud; 218 pud_t old_pud = *pud;
228 set_pud(pud, __pud(phys | PROT_SECT_NORMAL_EXEC)); 219 set_pud(pud, __pud(phys |
220 pgprot_val(mk_sect_prot(prot))));
229 221
230 /* 222 /*
231 * If we have an old value for a pud, it will 223 * If we have an old value for a pud, it will
@@ -240,7 +232,7 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
240 flush_tlb_all(); 232 flush_tlb_all();
241 } 233 }
242 } else { 234 } else {
243 alloc_init_pmd(pud, addr, next, phys, map_io); 235 alloc_init_pmd(mm, pud, addr, next, phys, prot);
244 } 236 }
245 phys += next - addr; 237 phys += next - addr;
246 } while (pud++, addr = next, addr != end); 238 } while (pud++, addr = next, addr != end);
@@ -250,9 +242,9 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
250 * Create the page directory entries and any necessary page tables for the 242 * Create the page directory entries and any necessary page tables for the
251 * mapping specified by 'md'. 243 * mapping specified by 'md'.
252 */ 244 */
253static void __init __create_mapping(pgd_t *pgd, phys_addr_t phys, 245static void __init __create_mapping(struct mm_struct *mm, pgd_t *pgd,
254 unsigned long virt, phys_addr_t size, 246 phys_addr_t phys, unsigned long virt,
255 int map_io) 247 phys_addr_t size, pgprot_t prot)
256{ 248{
257 unsigned long addr, length, end, next; 249 unsigned long addr, length, end, next;
258 250
@@ -262,7 +254,7 @@ static void __init __create_mapping(pgd_t *pgd, phys_addr_t phys,
262 end = addr + length; 254 end = addr + length;
263 do { 255 do {
264 next = pgd_addr_end(addr, end); 256 next = pgd_addr_end(addr, end);
265 alloc_init_pud(pgd, addr, next, phys, map_io); 257 alloc_init_pud(mm, pgd, addr, next, phys, prot);
266 phys += next - addr; 258 phys += next - addr;
267 } while (pgd++, addr = next, addr != end); 259 } while (pgd++, addr = next, addr != end);
268} 260}
@@ -275,17 +267,15 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt,
275 &phys, virt); 267 &phys, virt);
276 return; 268 return;
277 } 269 }
278 __create_mapping(pgd_offset_k(virt & PAGE_MASK), phys, virt, size, 0); 270 __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt,
271 size, PAGE_KERNEL_EXEC);
279} 272}
280 273
281void __init create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io) 274void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
275 unsigned long virt, phys_addr_t size,
276 pgprot_t prot)
282{ 277{
283 if ((addr >> PGDIR_SHIFT) >= ARRAY_SIZE(idmap_pg_dir)) { 278 __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot);
284 pr_warn("BUG: not creating id mapping for %pa\n", &addr);
285 return;
286 }
287 __create_mapping(&idmap_pg_dir[pgd_index(addr)],
288 addr, addr, size, map_io);
289} 279}
290 280
291static void __init map_mem(void) 281static void __init map_mem(void)