diff options
author | Huang, Ying <ying.huang@intel.com> | 2008-01-30 07:33:55 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:33:55 -0500 |
commit | a2172e2586f6662af996e47f417bb718c37cf8d2 (patch) | |
tree | f2430abad9fd9417653a3a2faa472d9df0c308bd /arch/x86 | |
parent | cd58289667293593b04fd315ec7f2f37589134cb (diff) |
x86: fix some bugs about EFI runtime code mapping
This patch fixes some bugs of making EFI runtime code executable.
- Use change_page_attr in i386 too. Because the runtime code may be
mapped not through ioremap.
- If there is no _PAGE_NX in __supported_pte_mask, the change_page_attr
is not called.
- Make efi_ioremap map pages as PAGE_KERNEL_EXEC_NOCACHE, because EFI runtime
code may be mapped through efi_ioremap.
Signed-off-by: Huang Ying <ying.huang@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/efi.c | 35 | ||||
-rw-r--r-- | arch/x86/kernel/efi_64.c | 26 |
2 files changed, 36 insertions, 25 deletions
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c index 5d492f99d967..834ecfb41e97 100644 --- a/arch/x86/kernel/efi.c +++ b/arch/x86/kernel/efi.c | |||
@@ -40,6 +40,8 @@ | |||
40 | #include <asm/setup.h> | 40 | #include <asm/setup.h> |
41 | #include <asm/efi.h> | 41 | #include <asm/efi.h> |
42 | #include <asm/time.h> | 42 | #include <asm/time.h> |
43 | #include <asm/cacheflush.h> | ||
44 | #include <asm/tlbflush.h> | ||
43 | 45 | ||
44 | #define EFI_DEBUG 1 | 46 | #define EFI_DEBUG 1 |
45 | #define PFX "EFI: " | 47 | #define PFX "EFI: " |
@@ -379,6 +381,32 @@ void __init efi_init(void) | |||
379 | #endif | 381 | #endif |
380 | } | 382 | } |
381 | 383 | ||
384 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) | ||
385 | static void __init runtime_code_page_mkexec(void) | ||
386 | { | ||
387 | efi_memory_desc_t *md; | ||
388 | unsigned long end; | ||
389 | void *p; | ||
390 | |||
391 | if (!(__supported_pte_mask & _PAGE_NX)) | ||
392 | return; | ||
393 | |||
394 | /* Make EFI runtime service code area executable */ | ||
395 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | ||
396 | md = p; | ||
397 | end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); | ||
398 | if (md->type == EFI_RUNTIME_SERVICES_CODE && | ||
399 | (end >> PAGE_SHIFT) <= end_pfn_map) | ||
400 | change_page_attr_addr(md->virt_addr, | ||
401 | md->num_pages, | ||
402 | PAGE_KERNEL_EXEC_NOCACHE); | ||
403 | } | ||
404 | __flush_tlb_all(); | ||
405 | } | ||
406 | #else | ||
407 | static inline void __init runtime_code_page_mkexec(void) { } | ||
408 | #endif | ||
409 | |||
382 | /* | 410 | /* |
383 | * This function will switch the EFI runtime services to virtual mode. | 411 | * This function will switch the EFI runtime services to virtual mode. |
384 | * Essentially, look through the EFI memmap and map every region that | 412 | * Essentially, look through the EFI memmap and map every region that |
@@ -399,9 +427,9 @@ void __init efi_enter_virtual_mode(void) | |||
399 | md = p; | 427 | md = p; |
400 | if (!(md->attribute & EFI_MEMORY_RUNTIME)) | 428 | if (!(md->attribute & EFI_MEMORY_RUNTIME)) |
401 | continue; | 429 | continue; |
430 | end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); | ||
402 | if ((md->attribute & EFI_MEMORY_WB) && | 431 | if ((md->attribute & EFI_MEMORY_WB) && |
403 | (((md->phys_addr + (md->num_pages<<EFI_PAGE_SHIFT)) >> | 432 | ((end >> PAGE_SHIFT) <= end_pfn_map)) |
404 | PAGE_SHIFT) < end_pfn_map)) | ||
405 | md->virt_addr = (unsigned long)__va(md->phys_addr); | 433 | md->virt_addr = (unsigned long)__va(md->phys_addr); |
406 | else | 434 | else |
407 | md->virt_addr = (unsigned long) | 435 | md->virt_addr = (unsigned long) |
@@ -410,7 +438,6 @@ void __init efi_enter_virtual_mode(void) | |||
410 | if (!md->virt_addr) | 438 | if (!md->virt_addr) |
411 | printk(KERN_ERR PFX "ioremap of 0x%llX failed!\n", | 439 | printk(KERN_ERR PFX "ioremap of 0x%llX failed!\n", |
412 | (unsigned long long)md->phys_addr); | 440 | (unsigned long long)md->phys_addr); |
413 | end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); | ||
414 | if ((md->phys_addr <= (unsigned long)efi_phys.systab) && | 441 | if ((md->phys_addr <= (unsigned long)efi_phys.systab) && |
415 | ((unsigned long)efi_phys.systab < end)) | 442 | ((unsigned long)efi_phys.systab < end)) |
416 | efi.systab = (efi_system_table_t *)(unsigned long) | 443 | efi.systab = (efi_system_table_t *)(unsigned long) |
@@ -448,9 +475,7 @@ void __init efi_enter_virtual_mode(void) | |||
448 | efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count; | 475 | efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count; |
449 | efi.reset_system = virt_efi_reset_system; | 476 | efi.reset_system = virt_efi_reset_system; |
450 | efi.set_virtual_address_map = virt_efi_set_virtual_address_map; | 477 | efi.set_virtual_address_map = virt_efi_set_virtual_address_map; |
451 | #ifdef CONFIG_X86_64 | ||
452 | runtime_code_page_mkexec(); | 478 | runtime_code_page_mkexec(); |
453 | #endif | ||
454 | } | 479 | } |
455 | 480 | ||
456 | /* | 481 | /* |
diff --git a/arch/x86/kernel/efi_64.c b/arch/x86/kernel/efi_64.c index 1f8bbd9644d7..9f8a75594398 100644 --- a/arch/x86/kernel/efi_64.c +++ b/arch/x86/kernel/efi_64.c | |||
@@ -33,7 +33,6 @@ | |||
33 | #include <asm/e820.h> | 33 | #include <asm/e820.h> |
34 | #include <asm/pgtable.h> | 34 | #include <asm/pgtable.h> |
35 | #include <asm/tlbflush.h> | 35 | #include <asm/tlbflush.h> |
36 | #include <asm/cacheflush.h> | ||
37 | #include <asm/proto.h> | 36 | #include <asm/proto.h> |
38 | #include <asm/efi.h> | 37 | #include <asm/efi.h> |
39 | 38 | ||
@@ -55,7 +54,7 @@ static void __init early_mapping_set_exec(unsigned long start, | |||
55 | else | 54 | else |
56 | set_pte(kpte, __pte((pte_val(*kpte) | _PAGE_NX) & \ | 55 | set_pte(kpte, __pte((pte_val(*kpte) | _PAGE_NX) & \ |
57 | __supported_pte_mask)); | 56 | __supported_pte_mask)); |
58 | if (pte_huge(*kpte)) | 57 | if (level == 4) |
59 | start = (start + PMD_SIZE) & PMD_MASK; | 58 | start = (start + PMD_SIZE) & PMD_MASK; |
60 | else | 59 | else |
61 | start = (start + PAGE_SIZE) & PAGE_MASK; | 60 | start = (start + PAGE_SIZE) & PAGE_MASK; |
@@ -67,6 +66,9 @@ static void __init early_runtime_code_mapping_set_exec(int executable) | |||
67 | efi_memory_desc_t *md; | 66 | efi_memory_desc_t *md; |
68 | void *p; | 67 | void *p; |
69 | 68 | ||
69 | if (!(__supported_pte_mask & _PAGE_NX)) | ||
70 | return; | ||
71 | |||
70 | /* Make EFI runtime service code area executable */ | 72 | /* Make EFI runtime service code area executable */ |
71 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | 73 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { |
72 | md = p; | 74 | md = p; |
@@ -116,22 +118,6 @@ void __init efi_reserve_bootmem(void) | |||
116 | memmap.nr_map * memmap.desc_size); | 118 | memmap.nr_map * memmap.desc_size); |
117 | } | 119 | } |
118 | 120 | ||
119 | void __init runtime_code_page_mkexec(void) | ||
120 | { | ||
121 | efi_memory_desc_t *md; | ||
122 | void *p; | ||
123 | |||
124 | /* Make EFI runtime service code area executable */ | ||
125 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | ||
126 | md = p; | ||
127 | if (md->type == EFI_RUNTIME_SERVICES_CODE) | ||
128 | change_page_attr_addr(md->virt_addr, | ||
129 | md->num_pages, | ||
130 | PAGE_KERNEL_EXEC); | ||
131 | } | ||
132 | __flush_tlb_all(); | ||
133 | } | ||
134 | |||
135 | void __iomem * __init efi_ioremap(unsigned long offset, | 121 | void __iomem * __init efi_ioremap(unsigned long offset, |
136 | unsigned long size) | 122 | unsigned long size) |
137 | { | 123 | { |
@@ -146,8 +132,8 @@ void __iomem * __init efi_ioremap(unsigned long offset, | |||
146 | return NULL; | 132 | return NULL; |
147 | 133 | ||
148 | for (i = 0; i < pages; i++) { | 134 | for (i = 0; i < pages; i++) { |
149 | set_fixmap_nocache(FIX_EFI_IO_MAP_FIRST_PAGE - pages_mapped, | 135 | __set_fixmap(FIX_EFI_IO_MAP_FIRST_PAGE - pages_mapped, |
150 | offset); | 136 | offset, PAGE_KERNEL_EXEC_NOCACHE); |
151 | offset += PAGE_SIZE; | 137 | offset += PAGE_SIZE; |
152 | pages_mapped++; | 138 | pages_mapped++; |
153 | } | 139 | } |