aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/platform/efi/efi_64.c
diff options
context:
space:
mode:
authorBorislav Petkov <bp@suse.de>2014-01-18 06:48:17 -0500
committerMatt Fleming <matt.fleming@intel.com>2014-03-04 11:17:18 -0500
commitb7b898ae0c0a82489511a1ce1b35f26215e6beb5 (patch)
tree79167e1a9fe59bf3e4b55772ccd294bf9cad2b11 /arch/x86/platform/efi/efi_64.c
parent42a5477251f0e0f33ad5f6a95c48d685ec03191e (diff)
x86/efi: Make efi virtual runtime map passing more robust
Currently, running SetVirtualAddressMap() and passing the physical address of the virtual map array was working only by a lucky coincidence because the memory was present in the EFI page table too. Until Toshi went and booted this on a big HP box - the krealloc() manner of resizing the memmap we're doing did allocate from such physical addresses which were not mapped anymore and boom: http://lkml.kernel.org/r/1386806463.1791.295.camel@misato.fc.hp.com One way to take care of that issue is to reimplement the krealloc thing but with pages. We start with contiguous pages of order 1, i.e. 2 pages, and when we deplete that memory (shouldn't happen all that often but you know firmware) we realloc the next power-of-two pages. Having the pages, it is much more handy and easy to map them into the EFI page table with the already existing mapping code which we're using for building the virtual mappings. Thanks to Toshi Kani and Matt for the great debugging help. Reported-by: Toshi Kani <toshi.kani@hp.com> Signed-off-by: Borislav Petkov <bp@suse.de> Tested-by: Toshi Kani <toshi.kani@hp.com> Signed-off-by: Matt Fleming <matt.fleming@intel.com>
Diffstat (limited to 'arch/x86/platform/efi/efi_64.c')
-rw-r--r--arch/x86/platform/efi/efi_64.c32
1 files changed, 29 insertions, 3 deletions
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index e05c69b46f05..19280900ec25 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -137,12 +137,38 @@ void efi_sync_low_kernel_mappings(void)
137 sizeof(pgd_t) * num_pgds); 137 sizeof(pgd_t) * num_pgds);
138} 138}
139 139
140void efi_setup_page_tables(void) 140int efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
141{ 141{
142 pgd_t *pgd;
143
144 if (efi_enabled(EFI_OLD_MEMMAP))
145 return 0;
146
142 efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd; 147 efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
148 pgd = __va(efi_scratch.efi_pgt);
143 149
144 if (!efi_enabled(EFI_OLD_MEMMAP)) 150 /*
145 efi_scratch.use_pgd = true; 151 * It can happen that the physical address of new_memmap lands in memory
152 * which is not mapped in the EFI page table. Therefore we need to go
153 * and ident-map those pages containing the map before calling
154 * phys_efi_set_virtual_address_map().
155 */
156 if (kernel_map_pages_in_pgd(pgd, pa_memmap, pa_memmap, num_pages, _PAGE_NX)) {
157 pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
158 return 1;
159 }
160
161 efi_scratch.use_pgd = true;
162
163
164 return 0;
165}
166
167void efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages)
168{
169 pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
170
171 kernel_unmap_pages_in_pgd(pgd, pa_memmap, num_pages);
146} 172}
147 173
148static void __init __map_region(efi_memory_desc_t *md, u64 va) 174static void __init __map_region(efi_memory_desc_t *md, u64 va)