aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHuang, Ying <ying.huang@intel.com>2008-02-04 10:48:06 -0500
committerIngo Molnar <mingo@elte.hu>2008-02-04 10:48:06 -0500
commit1c083eb2cbdd917149f6acaa55efca129d05c2a9 (patch)
tree8d44b2d7daf85393994e71b5962e10c8ca9daa6f
parentf56d005d30342a45d8af2b75ecccc82200f09600 (diff)
x86: fix EFI mapping
The patch updates EFI runtime memory mapping code, by making EFI areas explicitly executable. Signed-off-by: Huang Ying <ying.huang@intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/x86/kernel/efi.c57
-rw-r--r--arch/x86/kernel/efi_64.c22
-rw-r--r--include/asm-x86/efi.h4
3 files changed, 43 insertions, 40 deletions
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c
index 1411324a625..32dd62b36ff 100644
--- a/arch/x86/kernel/efi.c
+++ b/arch/x86/kernel/efi.c
@@ -379,11 +379,9 @@ void __init efi_init(void)
379#endif 379#endif
380} 380}
381 381
382#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
383static void __init runtime_code_page_mkexec(void) 382static void __init runtime_code_page_mkexec(void)
384{ 383{
385 efi_memory_desc_t *md; 384 efi_memory_desc_t *md;
386 unsigned long end;
387 void *p; 385 void *p;
388 386
389 if (!(__supported_pte_mask & _PAGE_NX)) 387 if (!(__supported_pte_mask & _PAGE_NX))
@@ -392,18 +390,13 @@ static void __init runtime_code_page_mkexec(void)
392 /* Make EFI runtime service code area executable */ 390 /* Make EFI runtime service code area executable */
393 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { 391 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
394 md = p; 392 md = p;
395 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); 393
396 if (md->type == EFI_RUNTIME_SERVICES_CODE && 394 if (md->type != EFI_RUNTIME_SERVICES_CODE)
397 (end >> PAGE_SHIFT) <= max_pfn_mapped) { 395 continue;
398 set_memory_x(md->virt_addr, md->num_pages); 396
399 set_memory_uc(md->virt_addr, md->num_pages); 397 set_memory_x(md->virt_addr, md->num_pages << EFI_PAGE_SHIFT);
400 }
401 } 398 }
402 __flush_tlb_all();
403} 399}
404#else
405static inline void __init runtime_code_page_mkexec(void) { }
406#endif
407 400
408/* 401/*
409 * This function will switch the EFI runtime services to virtual mode. 402 * This function will switch the EFI runtime services to virtual mode.
@@ -417,30 +410,40 @@ void __init efi_enter_virtual_mode(void)
417{ 410{
418 efi_memory_desc_t *md; 411 efi_memory_desc_t *md;
419 efi_status_t status; 412 efi_status_t status;
420 unsigned long end; 413 unsigned long size;
421 void *p; 414 u64 end, systab;
415 void *p, *va;
422 416
423 efi.systab = NULL; 417 efi.systab = NULL;
424 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { 418 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
425 md = p; 419 md = p;
426 if (!(md->attribute & EFI_MEMORY_RUNTIME)) 420 if (!(md->attribute & EFI_MEMORY_RUNTIME))
427 continue; 421 continue;
428 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); 422
429 if ((md->attribute & EFI_MEMORY_WB) && 423 size = md->num_pages << EFI_PAGE_SHIFT;
430 ((end >> PAGE_SHIFT) <= max_pfn_mapped)) 424 end = md->phys_addr + size;
431 md->virt_addr = (unsigned long)__va(md->phys_addr); 425
426 if ((end >> PAGE_SHIFT) <= max_pfn_mapped)
427 va = __va(md->phys_addr);
432 else 428 else
433 md->virt_addr = (unsigned long) 429 va = efi_ioremap(md->phys_addr, size);
434 efi_ioremap(md->phys_addr, 430
435 md->num_pages << EFI_PAGE_SHIFT); 431 if (md->attribute & EFI_MEMORY_WB)
436 if (!md->virt_addr) 432 set_memory_uc(md->virt_addr, size);
433
434 md->virt_addr = (u64) (unsigned long) va;
435
436 if (!va) {
437 printk(KERN_ERR PFX "ioremap of 0x%llX failed!\n", 437 printk(KERN_ERR PFX "ioremap of 0x%llX failed!\n",
438 (unsigned long long)md->phys_addr); 438 (unsigned long long)md->phys_addr);
439 if ((md->phys_addr <= (unsigned long)efi_phys.systab) && 439 continue;
440 ((unsigned long)efi_phys.systab < end)) 440 }
441 efi.systab = (efi_system_table_t *)(unsigned long) 441
442 (md->virt_addr - md->phys_addr + 442 systab = (u64) (unsigned long) efi_phys.systab;
443 (unsigned long)efi_phys.systab); 443 if (md->phys_addr <= systab && systab < end) {
444 systab += md->virt_addr - md->phys_addr;
445 efi.systab = (efi_system_table_t *) (unsigned long) systab;
446 }
444 } 447 }
445 448
446 BUG_ON(!efi.systab); 449 BUG_ON(!efi.systab);
diff --git a/arch/x86/kernel/efi_64.c b/arch/x86/kernel/efi_64.c
index 674f2379480..09d5c233093 100644
--- a/arch/x86/kernel/efi_64.c
+++ b/arch/x86/kernel/efi_64.c
@@ -54,10 +54,10 @@ static void __init early_mapping_set_exec(unsigned long start,
54 else 54 else
55 set_pte(kpte, __pte((pte_val(*kpte) | _PAGE_NX) & \ 55 set_pte(kpte, __pte((pte_val(*kpte) | _PAGE_NX) & \
56 __supported_pte_mask)); 56 __supported_pte_mask));
57 if (level == 4) 57 if (level == PG_LEVEL_4K)
58 start = (start + PMD_SIZE) & PMD_MASK;
59 else
60 start = (start + PAGE_SIZE) & PAGE_MASK; 58 start = (start + PAGE_SIZE) & PAGE_MASK;
59 else
60 start = (start + PMD_SIZE) & PMD_MASK;
61 } 61 }
62} 62}
63 63
@@ -109,23 +109,23 @@ void __init efi_reserve_bootmem(void)
109 memmap.nr_map * memmap.desc_size); 109 memmap.nr_map * memmap.desc_size);
110} 110}
111 111
112void __iomem * __init efi_ioremap(unsigned long offset, 112void __iomem * __init efi_ioremap(unsigned long phys_addr, unsigned long size)
113 unsigned long size)
114{ 113{
115 static unsigned pages_mapped; 114 static unsigned pages_mapped;
116 unsigned long last_addr;
117 unsigned i, pages; 115 unsigned i, pages;
118 116
119 last_addr = offset + size - 1; 117 /* phys_addr and size must be page aligned */
120 offset &= PAGE_MASK; 118 if ((phys_addr & ~PAGE_MASK) || (size & ~PAGE_MASK))
121 pages = (PAGE_ALIGN(last_addr) - offset) >> PAGE_SHIFT; 119 return NULL;
120
121 pages = size >> PAGE_SHIFT;
122 if (pages_mapped + pages > MAX_EFI_IO_PAGES) 122 if (pages_mapped + pages > MAX_EFI_IO_PAGES)
123 return NULL; 123 return NULL;
124 124
125 for (i = 0; i < pages; i++) { 125 for (i = 0; i < pages; i++) {
126 __set_fixmap(FIX_EFI_IO_MAP_FIRST_PAGE - pages_mapped, 126 __set_fixmap(FIX_EFI_IO_MAP_FIRST_PAGE - pages_mapped,
127 offset, PAGE_KERNEL_EXEC_NOCACHE); 127 phys_addr, PAGE_KERNEL);
128 offset += PAGE_SIZE; 128 phys_addr += PAGE_SIZE;
129 pages_mapped++; 129 pages_mapped++;
130 } 130 }
131 131
diff --git a/include/asm-x86/efi.h b/include/asm-x86/efi.h
index 9c68a1f098d..ea9734b74ac 100644
--- a/include/asm-x86/efi.h
+++ b/include/asm-x86/efi.h
@@ -33,7 +33,7 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
33#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ 33#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
34 efi_call_virt(f, a1, a2, a3, a4, a5, a6) 34 efi_call_virt(f, a1, a2, a3, a4, a5, a6)
35 35
36#define efi_ioremap(addr, size) ioremap(addr, size) 36#define efi_ioremap(addr, size) ioremap_cache(addr, size)
37 37
38#else /* !CONFIG_X86_32 */ 38#else /* !CONFIG_X86_32 */
39 39
@@ -86,7 +86,7 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
86 efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ 86 efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
87 (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) 87 (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
88 88
89extern void *efi_ioremap(unsigned long offset, unsigned long size); 89extern void *efi_ioremap(unsigned long addr, unsigned long size);
90 90
91#endif /* CONFIG_X86_32 */ 91#endif /* CONFIG_X86_32 */
92 92