diff options
author | Huang, Ying <ying.huang@intel.com> | 2008-02-04 10:48:06 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-02-04 10:48:06 -0500 |
commit | 1c083eb2cbdd917149f6acaa55efca129d05c2a9 (patch) | |
tree | 8d44b2d7daf85393994e71b5962e10c8ca9daa6f /arch | |
parent | f56d005d30342a45d8af2b75ecccc82200f09600 (diff) |
x86: fix EFI mapping
The patch updates EFI runtime memory mapping code, by making EFI
areas explicitly executable.
Signed-off-by: Huang Ying <ying.huang@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/efi.c | 57 | ||||
-rw-r--r-- | arch/x86/kernel/efi_64.c | 22 |
2 files changed, 41 insertions, 38 deletions
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c index 1411324a625c..32dd62b36ff7 100644 --- a/arch/x86/kernel/efi.c +++ b/arch/x86/kernel/efi.c | |||
@@ -379,11 +379,9 @@ void __init efi_init(void) | |||
379 | #endif | 379 | #endif |
380 | } | 380 | } |
381 | 381 | ||
382 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) | ||
383 | static void __init runtime_code_page_mkexec(void) | 382 | static void __init runtime_code_page_mkexec(void) |
384 | { | 383 | { |
385 | efi_memory_desc_t *md; | 384 | efi_memory_desc_t *md; |
386 | unsigned long end; | ||
387 | void *p; | 385 | void *p; |
388 | 386 | ||
389 | if (!(__supported_pte_mask & _PAGE_NX)) | 387 | if (!(__supported_pte_mask & _PAGE_NX)) |
@@ -392,18 +390,13 @@ static void __init runtime_code_page_mkexec(void) | |||
392 | /* Make EFI runtime service code area executable */ | 390 | /* Make EFI runtime service code area executable */ |
393 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | 391 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { |
394 | md = p; | 392 | md = p; |
395 | end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); | 393 | |
396 | if (md->type == EFI_RUNTIME_SERVICES_CODE && | 394 | if (md->type != EFI_RUNTIME_SERVICES_CODE) |
397 | (end >> PAGE_SHIFT) <= max_pfn_mapped) { | 395 | continue; |
398 | set_memory_x(md->virt_addr, md->num_pages); | 396 | |
399 | set_memory_uc(md->virt_addr, md->num_pages); | 397 | set_memory_x(md->virt_addr, md->num_pages << EFI_PAGE_SHIFT); |
400 | } | ||
401 | } | 398 | } |
402 | __flush_tlb_all(); | ||
403 | } | 399 | } |
404 | #else | ||
405 | static inline void __init runtime_code_page_mkexec(void) { } | ||
406 | #endif | ||
407 | 400 | ||
408 | /* | 401 | /* |
409 | * This function will switch the EFI runtime services to virtual mode. | 402 | * This function will switch the EFI runtime services to virtual mode. |
@@ -417,30 +410,40 @@ void __init efi_enter_virtual_mode(void) | |||
417 | { | 410 | { |
418 | efi_memory_desc_t *md; | 411 | efi_memory_desc_t *md; |
419 | efi_status_t status; | 412 | efi_status_t status; |
420 | unsigned long end; | 413 | unsigned long size; |
421 | void *p; | 414 | u64 end, systab; |
415 | void *p, *va; | ||
422 | 416 | ||
423 | efi.systab = NULL; | 417 | efi.systab = NULL; |
424 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | 418 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { |
425 | md = p; | 419 | md = p; |
426 | if (!(md->attribute & EFI_MEMORY_RUNTIME)) | 420 | if (!(md->attribute & EFI_MEMORY_RUNTIME)) |
427 | continue; | 421 | continue; |
428 | end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); | 422 | |
429 | if ((md->attribute & EFI_MEMORY_WB) && | 423 | size = md->num_pages << EFI_PAGE_SHIFT; |
430 | ((end >> PAGE_SHIFT) <= max_pfn_mapped)) | 424 | end = md->phys_addr + size; |
431 | md->virt_addr = (unsigned long)__va(md->phys_addr); | 425 | |
426 | if ((end >> PAGE_SHIFT) <= max_pfn_mapped) | ||
427 | va = __va(md->phys_addr); | ||
432 | else | 428 | else |
433 | md->virt_addr = (unsigned long) | 429 | va = efi_ioremap(md->phys_addr, size); |
434 | efi_ioremap(md->phys_addr, | 430 | |
435 | md->num_pages << EFI_PAGE_SHIFT); | 431 | if (md->attribute & EFI_MEMORY_WB) |
436 | if (!md->virt_addr) | 432 | set_memory_uc(md->virt_addr, size); |
433 | |||
434 | md->virt_addr = (u64) (unsigned long) va; | ||
435 | |||
436 | if (!va) { | ||
437 | printk(KERN_ERR PFX "ioremap of 0x%llX failed!\n", | 437 | printk(KERN_ERR PFX "ioremap of 0x%llX failed!\n", |
438 | (unsigned long long)md->phys_addr); | 438 | (unsigned long long)md->phys_addr); |
439 | if ((md->phys_addr <= (unsigned long)efi_phys.systab) && | 439 | continue; |
440 | ((unsigned long)efi_phys.systab < end)) | 440 | } |
441 | efi.systab = (efi_system_table_t *)(unsigned long) | 441 | |
442 | (md->virt_addr - md->phys_addr + | 442 | systab = (u64) (unsigned long) efi_phys.systab; |
443 | (unsigned long)efi_phys.systab); | 443 | if (md->phys_addr <= systab && systab < end) { |
444 | systab += md->virt_addr - md->phys_addr; | ||
445 | efi.systab = (efi_system_table_t *) (unsigned long) systab; | ||
446 | } | ||
444 | } | 447 | } |
445 | 448 | ||
446 | BUG_ON(!efi.systab); | 449 | BUG_ON(!efi.systab); |
diff --git a/arch/x86/kernel/efi_64.c b/arch/x86/kernel/efi_64.c index 674f2379480f..09d5c2330934 100644 --- a/arch/x86/kernel/efi_64.c +++ b/arch/x86/kernel/efi_64.c | |||
@@ -54,10 +54,10 @@ static void __init early_mapping_set_exec(unsigned long start, | |||
54 | else | 54 | else |
55 | set_pte(kpte, __pte((pte_val(*kpte) | _PAGE_NX) & \ | 55 | set_pte(kpte, __pte((pte_val(*kpte) | _PAGE_NX) & \ |
56 | __supported_pte_mask)); | 56 | __supported_pte_mask)); |
57 | if (level == 4) | 57 | if (level == PG_LEVEL_4K) |
58 | start = (start + PMD_SIZE) & PMD_MASK; | ||
59 | else | ||
60 | start = (start + PAGE_SIZE) & PAGE_MASK; | 58 | start = (start + PAGE_SIZE) & PAGE_MASK; |
59 | else | ||
60 | start = (start + PMD_SIZE) & PMD_MASK; | ||
61 | } | 61 | } |
62 | } | 62 | } |
63 | 63 | ||
@@ -109,23 +109,23 @@ void __init efi_reserve_bootmem(void) | |||
109 | memmap.nr_map * memmap.desc_size); | 109 | memmap.nr_map * memmap.desc_size); |
110 | } | 110 | } |
111 | 111 | ||
112 | void __iomem * __init efi_ioremap(unsigned long offset, | 112 | void __iomem * __init efi_ioremap(unsigned long phys_addr, unsigned long size) |
113 | unsigned long size) | ||
114 | { | 113 | { |
115 | static unsigned pages_mapped; | 114 | static unsigned pages_mapped; |
116 | unsigned long last_addr; | ||
117 | unsigned i, pages; | 115 | unsigned i, pages; |
118 | 116 | ||
119 | last_addr = offset + size - 1; | 117 | /* phys_addr and size must be page aligned */ |
120 | offset &= PAGE_MASK; | 118 | if ((phys_addr & ~PAGE_MASK) || (size & ~PAGE_MASK)) |
121 | pages = (PAGE_ALIGN(last_addr) - offset) >> PAGE_SHIFT; | 119 | return NULL; |
120 | |||
121 | pages = size >> PAGE_SHIFT; | ||
122 | if (pages_mapped + pages > MAX_EFI_IO_PAGES) | 122 | if (pages_mapped + pages > MAX_EFI_IO_PAGES) |
123 | return NULL; | 123 | return NULL; |
124 | 124 | ||
125 | for (i = 0; i < pages; i++) { | 125 | for (i = 0; i < pages; i++) { |
126 | __set_fixmap(FIX_EFI_IO_MAP_FIRST_PAGE - pages_mapped, | 126 | __set_fixmap(FIX_EFI_IO_MAP_FIRST_PAGE - pages_mapped, |
127 | offset, PAGE_KERNEL_EXEC_NOCACHE); | 127 | phys_addr, PAGE_KERNEL); |
128 | offset += PAGE_SIZE; | 128 | phys_addr += PAGE_SIZE; |
129 | pages_mapped++; | 129 | pages_mapped++; |
130 | } | 130 | } |
131 | 131 | ||