diff options
author | Ard Biesheuvel <ard.biesheuvel-QSEj5FYQhm4dnm+yROfE0A@public.gmane.org> | 2015-01-22 05:01:40 -0500 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2015-01-22 09:59:25 -0500 |
commit | 60305db9884515ca063474e262b454f6da04e4e2 (patch) | |
tree | cbf4480285dd0e7fc8d13780bac7b50678dbf714 | |
parent | da141706aea52c1a9fbd28cb8d289b78819f5436 (diff) |
arm64/efi: move virtmap init to early initcall
Now that the create_mapping() code in mm/mmu.c is able to support
setting up kernel page tables at initcall time, we can move the whole
virtmap creation to arm64_enable_runtime_services() instead of having
a distinct stage during early boot. This also allows us to drop the
arm64-specific EFI_VIRTMAP flag.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel-QSEj5FYQhm4dnm+yROfE0A@public.gmane.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r-- | arch/arm64/include/asm/efi.h | 18 | ||||
-rw-r--r-- | arch/arm64/kernel/efi.c | 111 | ||||
-rw-r--r-- | arch/arm64/kernel/setup.c | 1 | ||||
-rw-r--r-- | arch/arm64/mm/mmu.c | 2 |
4 files changed, 59 insertions, 73 deletions
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 7baf2cc04e1e..ef572206f1c3 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h | |||
@@ -6,10 +6,8 @@ | |||
6 | 6 | ||
7 | #ifdef CONFIG_EFI | 7 | #ifdef CONFIG_EFI |
8 | extern void efi_init(void); | 8 | extern void efi_init(void); |
9 | extern void efi_virtmap_init(void); | ||
10 | #else | 9 | #else |
11 | #define efi_init() | 10 | #define efi_init() |
12 | #define efi_virtmap_init() | ||
13 | #endif | 11 | #endif |
14 | 12 | ||
15 | #define efi_call_virt(f, ...) \ | 13 | #define efi_call_virt(f, ...) \ |
@@ -53,23 +51,17 @@ extern void efi_virtmap_init(void); | |||
53 | #define EFI_ALLOC_ALIGN SZ_64K | 51 | #define EFI_ALLOC_ALIGN SZ_64K |
54 | 52 | ||
55 | /* | 53 | /* |
56 | * On ARM systems, virtually remapped UEFI runtime services are set up in three | 54 | * On ARM systems, virtually remapped UEFI runtime services are set up in two |
57 | * distinct stages: | 55 | * distinct stages: |
58 | * - The stub retrieves the final version of the memory map from UEFI, populates | 56 | * - The stub retrieves the final version of the memory map from UEFI, populates |
59 | * the virt_addr fields and calls the SetVirtualAddressMap() [SVAM] runtime | 57 | * the virt_addr fields and calls the SetVirtualAddressMap() [SVAM] runtime |
60 | * service to communicate the new mapping to the firmware (Note that the new | 58 | * service to communicate the new mapping to the firmware (Note that the new |
61 | * mapping is not live at this time) | 59 | * mapping is not live at this time) |
62 | * - During early boot, the page tables are allocated and populated based on the | 60 | * - During an early initcall(), the EFI system table is permanently remapped |
63 | * virt_addr fields in the memory map, but only if all descriptors with the | 61 | * and the virtual remapping of the UEFI Runtime Services regions is loaded |
64 | * EFI_MEMORY_RUNTIME attribute have a non-zero value for virt_addr. If this | 62 | * into a private set of page tables. If this all succeeds, the Runtime |
65 | * succeeds, the EFI_VIRTMAP flag is set to indicate that the virtual mappings | 63 | * Services are enabled and the EFI_RUNTIME_SERVICES bit set. |
66 | * have been installed successfully. | ||
67 | * - During an early initcall(), the UEFI Runtime Services are enabled and the | ||
68 | * EFI_RUNTIME_SERVICES bit set if some conditions are met, i.e., we need a | ||
69 | * non-early mapping of the UEFI system table, and we need to have the virtmap | ||
70 | * installed. | ||
71 | */ | 64 | */ |
72 | #define EFI_VIRTMAP EFI_ARCH_1 | ||
73 | 65 | ||
74 | void efi_virtmap_load(void); | 66 | void efi_virtmap_load(void); |
75 | void efi_virtmap_unload(void); | 67 | void efi_virtmap_unload(void); |
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index c9cb0fbe7aa4..b42c7b480e1e 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c | |||
@@ -38,6 +38,19 @@ struct efi_memory_map memmap; | |||
38 | 38 | ||
39 | static u64 efi_system_table; | 39 | static u64 efi_system_table; |
40 | 40 | ||
41 | static pgd_t efi_pgd[PTRS_PER_PGD] __page_aligned_bss; | ||
42 | |||
43 | static struct mm_struct efi_mm = { | ||
44 | .mm_rb = RB_ROOT, | ||
45 | .pgd = efi_pgd, | ||
46 | .mm_users = ATOMIC_INIT(2), | ||
47 | .mm_count = ATOMIC_INIT(1), | ||
48 | .mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem), | ||
49 | .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock), | ||
50 | .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), | ||
51 | INIT_MM_CONTEXT(efi_mm) | ||
52 | }; | ||
53 | |||
41 | static int uefi_debug __initdata; | 54 | static int uefi_debug __initdata; |
42 | static int __init uefi_debug_setup(char *str) | 55 | static int __init uefi_debug_setup(char *str) |
43 | { | 56 | { |
@@ -213,6 +226,45 @@ void __init efi_init(void) | |||
213 | return; | 226 | return; |
214 | 227 | ||
215 | reserve_regions(); | 228 | reserve_regions(); |
229 | early_memunmap(memmap.map, params.mmap_size); | ||
230 | } | ||
231 | |||
232 | static bool __init efi_virtmap_init(void) | ||
233 | { | ||
234 | efi_memory_desc_t *md; | ||
235 | |||
236 | for_each_efi_memory_desc(&memmap, md) { | ||
237 | u64 paddr, npages, size; | ||
238 | pgprot_t prot; | ||
239 | |||
240 | if (!(md->attribute & EFI_MEMORY_RUNTIME)) | ||
241 | continue; | ||
242 | if (md->virt_addr == 0) | ||
243 | return false; | ||
244 | |||
245 | paddr = md->phys_addr; | ||
246 | npages = md->num_pages; | ||
247 | memrange_efi_to_native(&paddr, &npages); | ||
248 | size = npages << PAGE_SHIFT; | ||
249 | |||
250 | pr_info(" EFI remap 0x%016llx => %p\n", | ||
251 | md->phys_addr, (void *)md->virt_addr); | ||
252 | |||
253 | /* | ||
254 | * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be | ||
255 | * executable, everything else can be mapped with the XN bits | ||
256 | * set. | ||
257 | */ | ||
258 | if (!is_normal_ram(md)) | ||
259 | prot = __pgprot(PROT_DEVICE_nGnRE); | ||
260 | else if (md->type == EFI_RUNTIME_SERVICES_CODE) | ||
261 | prot = PAGE_KERNEL_EXEC; | ||
262 | else | ||
263 | prot = PAGE_KERNEL; | ||
264 | |||
265 | create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, prot); | ||
266 | } | ||
267 | return true; | ||
216 | } | 268 | } |
217 | 269 | ||
218 | /* | 270 | /* |
@@ -254,7 +306,7 @@ static int __init arm64_enable_runtime_services(void) | |||
254 | } | 306 | } |
255 | set_bit(EFI_SYSTEM_TABLES, &efi.flags); | 307 | set_bit(EFI_SYSTEM_TABLES, &efi.flags); |
256 | 308 | ||
257 | if (!efi_enabled(EFI_VIRTMAP)) { | 309 | if (!efi_virtmap_init()) { |
258 | pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n"); | 310 | pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n"); |
259 | return -1; | 311 | return -1; |
260 | } | 312 | } |
@@ -283,19 +335,6 @@ static int __init arm64_dmi_init(void) | |||
283 | } | 335 | } |
284 | core_initcall(arm64_dmi_init); | 336 | core_initcall(arm64_dmi_init); |
285 | 337 | ||
286 | static pgd_t efi_pgd[PTRS_PER_PGD] __page_aligned_bss; | ||
287 | |||
288 | static struct mm_struct efi_mm = { | ||
289 | .mm_rb = RB_ROOT, | ||
290 | .pgd = efi_pgd, | ||
291 | .mm_users = ATOMIC_INIT(2), | ||
292 | .mm_count = ATOMIC_INIT(1), | ||
293 | .mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem), | ||
294 | .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock), | ||
295 | .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), | ||
296 | INIT_MM_CONTEXT(efi_mm) | ||
297 | }; | ||
298 | |||
299 | static void efi_set_pgd(struct mm_struct *mm) | 338 | static void efi_set_pgd(struct mm_struct *mm) |
300 | { | 339 | { |
301 | cpu_switch_mm(mm->pgd, mm); | 340 | cpu_switch_mm(mm->pgd, mm); |
@@ -315,47 +354,3 @@ void efi_virtmap_unload(void) | |||
315 | efi_set_pgd(current->active_mm); | 354 | efi_set_pgd(current->active_mm); |
316 | preempt_enable(); | 355 | preempt_enable(); |
317 | } | 356 | } |
318 | |||
319 | void __init efi_virtmap_init(void) | ||
320 | { | ||
321 | efi_memory_desc_t *md; | ||
322 | |||
323 | if (!efi_enabled(EFI_BOOT)) | ||
324 | return; | ||
325 | |||
326 | for_each_efi_memory_desc(&memmap, md) { | ||
327 | u64 paddr, npages, size; | ||
328 | pgprot_t prot; | ||
329 | |||
330 | if (!(md->attribute & EFI_MEMORY_RUNTIME)) | ||
331 | continue; | ||
332 | if (WARN(md->virt_addr == 0, | ||
333 | "UEFI virtual mapping incomplete or missing -- no entry found for 0x%llx\n", | ||
334 | md->phys_addr)) | ||
335 | return; | ||
336 | |||
337 | paddr = md->phys_addr; | ||
338 | npages = md->num_pages; | ||
339 | memrange_efi_to_native(&paddr, &npages); | ||
340 | size = npages << PAGE_SHIFT; | ||
341 | |||
342 | pr_info(" EFI remap 0x%016llx => %p\n", | ||
343 | md->phys_addr, (void *)md->virt_addr); | ||
344 | |||
345 | /* | ||
346 | * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be | ||
347 | * executable, everything else can be mapped with the XN bits | ||
348 | * set. | ||
349 | */ | ||
350 | if (!is_normal_ram(md)) | ||
351 | prot = __pgprot(PROT_DEVICE_nGnRE); | ||
352 | else if (md->type == EFI_RUNTIME_SERVICES_CODE) | ||
353 | prot = PAGE_KERNEL_EXEC; | ||
354 | else | ||
355 | prot = PAGE_KERNEL; | ||
356 | |||
357 | create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, prot); | ||
358 | } | ||
359 | set_bit(EFI_VIRTMAP, &efi.flags); | ||
360 | early_memunmap(memmap.map, memmap.map_end - memmap.map); | ||
361 | } | ||
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 207413fe08a0..bb10903887d4 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c | |||
@@ -382,7 +382,6 @@ void __init setup_arch(char **cmdline_p) | |||
382 | paging_init(); | 382 | paging_init(); |
383 | request_standard_resources(); | 383 | request_standard_resources(); |
384 | 384 | ||
385 | efi_virtmap_init(); | ||
386 | early_ioremap_reset(); | 385 | early_ioremap_reset(); |
387 | 386 | ||
388 | unflatten_device_tree(); | 387 | unflatten_device_tree(); |
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 91d55b6efd8a..155cbb0a74b6 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c | |||
@@ -269,7 +269,7 @@ void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, | |||
269 | pgprot_t prot) | 269 | pgprot_t prot) |
270 | { | 270 | { |
271 | __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot, | 271 | __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot, |
272 | early_alloc); | 272 | late_alloc); |
273 | } | 273 | } |
274 | 274 | ||
275 | static void create_mapping_late(phys_addr_t phys, unsigned long virt, | 275 | static void create_mapping_late(phys_addr_t phys, unsigned long virt, |