aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2014-10-20 10:27:26 -0400
committerArd Biesheuvel <ard.biesheuvel@linaro.org>2015-01-12 11:29:12 -0500
commitf3cdfd239da56a4cea75a2920dc326f0f45f67e3 (patch)
tree71eef379af8b2aac7232d21bdcbb4ae390ae9ba5 /arch/arm64
parent1bd0abb0c924a8b28c6466cdd6bb34ea053541dc (diff)
arm64/efi: move SetVirtualAddressMap() to UEFI stub
In order to support kexec, the kernel needs to be able to deal with the state of the UEFI firmware after SetVirtualAddressMap() has been called. To avoid having separate code paths for non-kexec and kexec, let's move the call to SetVirtualAddressMap() to the stub: this will guarantee us that it will only be called once (since the stub is not executed during kexec), and ensures that the UEFI state is identical between kexec and normal boot. This implies that the layout of the virtual mapping needs to be created by the stub as well. All regions are rounded up to a naturally aligned multiple of 64 KB (for compatibility with 64k pages kernels) and recorded in the UEFI memory map. The kernel proper reads those values and installs the mappings in a dedicated set of page tables that are swapped in during UEFI Runtime Services calls. Acked-by: Leif Lindholm <leif.lindholm@linaro.org> Acked-by: Matt Fleming <matt.fleming@intel.com> Tested-by: Leif Lindholm <leif.lindholm@linaro.org> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/efi.h34
-rw-r--r--arch/arm64/kernel/efi.c230
-rw-r--r--arch/arm64/kernel/setup.c1
3 files changed, 160 insertions, 105 deletions
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 71291253114f..effef3713c5a 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -7,28 +7,36 @@
7#ifdef CONFIG_EFI 7#ifdef CONFIG_EFI
8extern void efi_init(void); 8extern void efi_init(void);
9extern void efi_idmap_init(void); 9extern void efi_idmap_init(void);
10extern void efi_virtmap_init(void);
10#else 11#else
11#define efi_init() 12#define efi_init()
12#define efi_idmap_init() 13#define efi_idmap_init()
14#define efi_virtmap_init()
13#endif 15#endif
14 16
15#define efi_call_virt(f, ...) \ 17#define efi_call_virt(f, ...) \
16({ \ 18({ \
17 efi_##f##_t *__f = efi.systab->runtime->f; \ 19 efi_##f##_t *__f; \
18 efi_status_t __s; \ 20 efi_status_t __s; \
19 \ 21 \
20 kernel_neon_begin(); \ 22 kernel_neon_begin(); \
23 efi_virtmap_load(); \
24 __f = efi.systab->runtime->f; \
21 __s = __f(__VA_ARGS__); \ 25 __s = __f(__VA_ARGS__); \
26 efi_virtmap_unload(); \
22 kernel_neon_end(); \ 27 kernel_neon_end(); \
23 __s; \ 28 __s; \
24}) 29})
25 30
26#define __efi_call_virt(f, ...) \ 31#define __efi_call_virt(f, ...) \
27({ \ 32({ \
28 efi_##f##_t *__f = efi.systab->runtime->f; \ 33 efi_##f##_t *__f; \
29 \ 34 \
30 kernel_neon_begin(); \ 35 kernel_neon_begin(); \
36 efi_virtmap_load(); \
37 __f = efi.systab->runtime->f; \
31 __f(__VA_ARGS__); \ 38 __f(__VA_ARGS__); \
39 efi_virtmap_unload(); \
32 kernel_neon_end(); \ 40 kernel_neon_end(); \
33}) 41})
34 42
@@ -46,4 +54,26 @@ extern void efi_idmap_init(void);
46 54
47#define EFI_ALLOC_ALIGN SZ_64K 55#define EFI_ALLOC_ALIGN SZ_64K
48 56
57/*
58 * On ARM systems, virtually remapped UEFI runtime services are set up in three
59 * distinct stages:
60 * - The stub retrieves the final version of the memory map from UEFI, populates
61 * the virt_addr fields and calls the SetVirtualAddressMap() [SVAM] runtime
62 * service to communicate the new mapping to the firmware (Note that the new
63 * mapping is not live at this time)
64 * - During early boot, the page tables are allocated and populated based on the
65 * virt_addr fields in the memory map, but only if all descriptors with the
66 * EFI_MEMORY_RUNTIME attribute have a non-zero value for virt_addr. If this
67 * succeeds, the EFI_VIRTMAP flag is set to indicate that the virtual mappings
68 * have been installed successfully.
69 * - During an early initcall(), the UEFI Runtime Services are enabled and the
70 * EFI_RUNTIME_SERVICES bit set if some conditions are met, i.e., we need a
71 * non-early mapping of the UEFI system table, and we need to have the virtmap
72 * installed.
73 */
74#define EFI_VIRTMAP EFI_ARCH_1
75
76void efi_virtmap_load(void);
77void efi_virtmap_unload(void);
78
49#endif /* _ASM_EFI_H */ 79#endif /* _ASM_EFI_H */
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 2bb4347d0edf..755e545144ea 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -11,25 +11,31 @@
11 * 11 *
12 */ 12 */
13 13
14#include <linux/atomic.h>
14#include <linux/dmi.h> 15#include <linux/dmi.h>
15#include <linux/efi.h> 16#include <linux/efi.h>
16#include <linux/export.h> 17#include <linux/export.h>
17#include <linux/memblock.h> 18#include <linux/memblock.h>
19#include <linux/mm_types.h>
18#include <linux/bootmem.h> 20#include <linux/bootmem.h>
19#include <linux/of.h> 21#include <linux/of.h>
20#include <linux/of_fdt.h> 22#include <linux/of_fdt.h>
23#include <linux/preempt.h>
24#include <linux/rbtree.h>
25#include <linux/rwsem.h>
21#include <linux/sched.h> 26#include <linux/sched.h>
22#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/spinlock.h>
23 29
24#include <asm/cacheflush.h> 30#include <asm/cacheflush.h>
25#include <asm/efi.h> 31#include <asm/efi.h>
26#include <asm/tlbflush.h> 32#include <asm/tlbflush.h>
27#include <asm/mmu_context.h> 33#include <asm/mmu_context.h>
34#include <asm/mmu.h>
35#include <asm/pgtable.h>
28 36
29struct efi_memory_map memmap; 37struct efi_memory_map memmap;
30 38
31static efi_runtime_services_t *runtime;
32
33static u64 efi_system_table; 39static u64 efi_system_table;
34 40
35static int uefi_debug __initdata; 41static int uefi_debug __initdata;
@@ -69,9 +75,33 @@ static void __init efi_setup_idmap(void)
69 } 75 }
70} 76}
71 77
78/*
79 * Translate a EFI virtual address into a physical address: this is necessary,
80 * as some data members of the EFI system table are virtually remapped after
81 * SetVirtualAddressMap() has been called.
82 */
83static phys_addr_t efi_to_phys(unsigned long addr)
84{
85 efi_memory_desc_t *md;
86
87 for_each_efi_memory_desc(&memmap, md) {
88 if (!(md->attribute & EFI_MEMORY_RUNTIME))
89 continue;
90 if (md->virt_addr == 0)
91 /* no virtual mapping has been installed by the stub */
92 break;
93 if (md->virt_addr <= addr &&
94 (addr - md->virt_addr) < (md->num_pages << EFI_PAGE_SHIFT))
95 return md->phys_addr + addr - md->virt_addr;
96 }
97 return addr;
98}
99
72static int __init uefi_init(void) 100static int __init uefi_init(void)
73{ 101{
74 efi_char16_t *c16; 102 efi_char16_t *c16;
103 void *config_tables;
104 u64 table_size;
75 char vendor[100] = "unknown"; 105 char vendor[100] = "unknown";
76 int i, retval; 106 int i, retval;
77 107
@@ -99,7 +129,7 @@ static int __init uefi_init(void)
99 efi.systab->hdr.revision & 0xffff); 129 efi.systab->hdr.revision & 0xffff);
100 130
101 /* Show what we know for posterity */ 131 /* Show what we know for posterity */
102 c16 = early_memremap(efi.systab->fw_vendor, 132 c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor),
103 sizeof(vendor)); 133 sizeof(vendor));
104 if (c16) { 134 if (c16) {
105 for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i) 135 for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
@@ -112,8 +142,14 @@ static int __init uefi_init(void)
112 efi.systab->hdr.revision >> 16, 142 efi.systab->hdr.revision >> 16,
113 efi.systab->hdr.revision & 0xffff, vendor); 143 efi.systab->hdr.revision & 0xffff, vendor);
114 144
115 retval = efi_config_init(NULL); 145 table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables;
146 config_tables = early_memremap(efi_to_phys(efi.systab->tables),
147 table_size);
148
149 retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables,
150 sizeof(efi_config_table_64_t), NULL);
116 151
152 early_memunmap(config_tables, table_size);
117out: 153out:
118 early_memunmap(efi.systab, sizeof(efi_system_table_t)); 154 early_memunmap(efi.systab, sizeof(efi_system_table_t));
119 return retval; 155 return retval;
@@ -329,51 +365,14 @@ void __init efi_idmap_init(void)
329 early_memunmap(memmap.map, memmap.map_end - memmap.map); 365 early_memunmap(memmap.map, memmap.map_end - memmap.map);
330} 366}
331 367
332static int __init remap_region(efi_memory_desc_t *md, void **new)
333{
334 u64 paddr, vaddr, npages, size;
335
336 paddr = md->phys_addr;
337 npages = md->num_pages;
338 memrange_efi_to_native(&paddr, &npages);
339 size = npages << PAGE_SHIFT;
340
341 if (is_normal_ram(md))
342 vaddr = (__force u64)ioremap_cache(paddr, size);
343 else
344 vaddr = (__force u64)ioremap(paddr, size);
345
346 if (!vaddr) {
347 pr_err("Unable to remap 0x%llx pages @ %p\n",
348 npages, (void *)paddr);
349 return 0;
350 }
351
352 /* adjust for any rounding when EFI and system pagesize differs */
353 md->virt_addr = vaddr + (md->phys_addr - paddr);
354
355 if (uefi_debug)
356 pr_info(" EFI remap 0x%012llx => %p\n",
357 md->phys_addr, (void *)md->virt_addr);
358
359 memcpy(*new, md, memmap.desc_size);
360 *new += memmap.desc_size;
361
362 return 1;
363}
364
365/* 368/*
366 * Switch UEFI from an identity map to a kernel virtual map 369 * Enable the UEFI Runtime Services if all prerequisites are in place, i.e.,
370 * non-early mapping of the UEFI system table and virtual mappings for all
371 * EFI_MEMORY_RUNTIME regions.
367 */ 372 */
368static int __init arm64_enter_virtual_mode(void) 373static int __init arm64_enable_runtime_services(void)
369{ 374{
370 efi_memory_desc_t *md;
371 phys_addr_t virtmap_phys;
372 void *virtmap, *virt_md;
373 efi_status_t status;
374 u64 mapsize; 375 u64 mapsize;
375 int count = 0;
376 unsigned long flags;
377 376
378 if (!efi_enabled(EFI_BOOT)) { 377 if (!efi_enabled(EFI_BOOT)) {
379 pr_info("EFI services will not be available.\n"); 378 pr_info("EFI services will not be available.\n");
@@ -395,81 +394,30 @@ static int __init arm64_enter_virtual_mode(void)
395 394
396 efi.memmap = &memmap; 395 efi.memmap = &memmap;
397 396
398 /* Map the runtime regions */ 397 efi.systab = (__force void *)ioremap_cache(efi_system_table,
399 virtmap = kmalloc(mapsize, GFP_KERNEL); 398 sizeof(efi_system_table_t));
400 if (!virtmap) {
401 pr_err("Failed to allocate EFI virtual memmap\n");
402 return -1;
403 }
404 virtmap_phys = virt_to_phys(virtmap);
405 virt_md = virtmap;
406
407 for_each_efi_memory_desc(&memmap, md) {
408 if (!(md->attribute & EFI_MEMORY_RUNTIME))
409 continue;
410 if (!remap_region(md, &virt_md))
411 goto err_unmap;
412 ++count;
413 }
414
415 efi.systab = (__force void *)efi_lookup_mapped_addr(efi_system_table);
416 if (!efi.systab) { 399 if (!efi.systab) {
417 /* 400 pr_err("Failed to remap EFI System Table\n");
418 * If we have no virtual mapping for the System Table at this 401 return -1;
419 * point, the memory map doesn't cover the physical offset where
420 * it resides. This means the System Table will be inaccessible
421 * to Runtime Services themselves once the virtual mapping is
422 * installed.
423 */
424 pr_err("Failed to remap EFI System Table -- buggy firmware?\n");
425 goto err_unmap;
426 } 402 }
427 set_bit(EFI_SYSTEM_TABLES, &efi.flags); 403 set_bit(EFI_SYSTEM_TABLES, &efi.flags);
428 404
429 local_irq_save(flags);
430 cpu_switch_mm(idmap_pg_dir, &init_mm);
431
432 /* Call SetVirtualAddressMap with the physical address of the map */
433 runtime = efi.systab->runtime;
434 efi.set_virtual_address_map = runtime->set_virtual_address_map;
435
436 status = efi.set_virtual_address_map(count * memmap.desc_size,
437 memmap.desc_size,
438 memmap.desc_version,
439 (efi_memory_desc_t *)virtmap_phys);
440 cpu_set_reserved_ttbr0();
441 flush_tlb_all();
442 local_irq_restore(flags);
443
444 kfree(virtmap);
445
446 free_boot_services(); 405 free_boot_services();
447 406
448 if (status != EFI_SUCCESS) { 407 if (!efi_enabled(EFI_VIRTMAP)) {
449 pr_err("Failed to set EFI virtual address map! [%lx]\n", 408 pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n");
450 status);
451 return -1; 409 return -1;
452 } 410 }
453 411
454 /* Set up runtime services function pointers */ 412 /* Set up runtime services function pointers */
455 runtime = efi.systab->runtime;
456 efi_native_runtime_setup(); 413 efi_native_runtime_setup();
457 set_bit(EFI_RUNTIME_SERVICES, &efi.flags); 414 set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
458 415
459 efi.runtime_version = efi.systab->hdr.revision; 416 efi.runtime_version = efi.systab->hdr.revision;
460 417
461 return 0; 418 return 0;
462
463err_unmap:
464 /* unmap all mappings that succeeded: there are 'count' of those */
465 for (virt_md = virtmap; count--; virt_md += memmap.desc_size) {
466 md = virt_md;
467 iounmap((__force void __iomem *)md->virt_addr);
468 }
469 kfree(virtmap);
470 return -1;
471} 419}
472early_initcall(arm64_enter_virtual_mode); 420early_initcall(arm64_enable_runtime_services);
473 421
474static int __init arm64_dmi_init(void) 422static int __init arm64_dmi_init(void)
475{ 423{
@@ -484,3 +432,79 @@ static int __init arm64_dmi_init(void)
484 return 0; 432 return 0;
485} 433}
486core_initcall(arm64_dmi_init); 434core_initcall(arm64_dmi_init);
435
436static pgd_t efi_pgd[PTRS_PER_PGD] __page_aligned_bss;
437
438static struct mm_struct efi_mm = {
439 .mm_rb = RB_ROOT,
440 .pgd = efi_pgd,
441 .mm_users = ATOMIC_INIT(2),
442 .mm_count = ATOMIC_INIT(1),
443 .mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem),
444 .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
445 .mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
446 INIT_MM_CONTEXT(efi_mm)
447};
448
449static void efi_set_pgd(struct mm_struct *mm)
450{
451 cpu_switch_mm(mm->pgd, mm);
452 flush_tlb_all();
453 if (icache_is_aivivt())
454 __flush_icache_all();
455}
456
457void efi_virtmap_load(void)
458{
459 preempt_disable();
460 efi_set_pgd(&efi_mm);
461}
462
463void efi_virtmap_unload(void)
464{
465 efi_set_pgd(current->active_mm);
466 preempt_enable();
467}
468
469void __init efi_virtmap_init(void)
470{
471 efi_memory_desc_t *md;
472
473 if (!efi_enabled(EFI_BOOT))
474 return;
475
476 for_each_efi_memory_desc(&memmap, md) {
477 u64 paddr, npages, size;
478 pgprot_t prot;
479
480 if (!(md->attribute & EFI_MEMORY_RUNTIME))
481 continue;
482 if (WARN(md->virt_addr == 0,
483 "UEFI virtual mapping incomplete or missing -- no entry found for 0x%llx\n",
484 md->phys_addr))
485 return;
486
487 paddr = md->phys_addr;
488 npages = md->num_pages;
489 memrange_efi_to_native(&paddr, &npages);
490 size = npages << PAGE_SHIFT;
491
492 pr_info(" EFI remap 0x%016llx => %p\n",
493 md->phys_addr, (void *)md->virt_addr);
494
495 /*
496 * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
497 * executable, everything else can be mapped with the XN bits
498 * set.
499 */
500 if (!is_normal_ram(md))
501 prot = __pgprot(PROT_DEVICE_nGnRE);
502 else if (md->type == EFI_RUNTIME_SERVICES_CODE)
503 prot = PAGE_KERNEL_EXEC;
504 else
505 prot = PAGE_KERNEL;
506
507 create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, prot);
508 }
509 set_bit(EFI_VIRTMAP, &efi.flags);
510}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 20fe2932ad0c..beac8188fdbd 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -401,6 +401,7 @@ void __init setup_arch(char **cmdline_p)
401 paging_init(); 401 paging_init();
402 request_standard_resources(); 402 request_standard_resources();
403 403
404 efi_virtmap_init();
404 efi_idmap_init(); 405 efi_idmap_init();
405 early_ioremap_reset(); 406 early_ioremap_reset();
406 407