aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorHuang, Ying <ying.huang@intel.com>2008-02-25 02:18:37 -0500
committerIngo Molnar <mingo@elte.hu>2008-04-19 13:19:54 -0400
commit4a3575fd436aa98957184afd745e4ada8f1542d8 (patch)
tree786585e3d89af847c126d0b4577e212288a134fe /arch
parentf8dfd5ed149ae340451f25847b434297c20d4645 (diff)
x86: EFI_PAGE_SHIFT fix
Make x86 EFI code works when EFI_PAGE_SHIFT != PAGE_SHIFT. The memrage_efi_to_native() provided in this patch can be used on other EFI platform such as IA64 too. This patch has been tested on Intel x86_64 platform with EFI 64/32 firmware. Signed-off-by: Huang Ying <ying.huang@intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/efi.c18
-rw-r--r--arch/x86/kernel/efi_64.c12
2 files changed, 19 insertions, 11 deletions
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c
index 759e02bec070..77d424cf68b3 100644
--- a/arch/x86/kernel/efi.c
+++ b/arch/x86/kernel/efi.c
@@ -383,6 +383,7 @@ static void __init runtime_code_page_mkexec(void)
383{ 383{
384 efi_memory_desc_t *md; 384 efi_memory_desc_t *md;
385 void *p; 385 void *p;
386 u64 addr, npages;
386 387
387 /* Make EFI runtime service code area executable */ 388 /* Make EFI runtime service code area executable */
388 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { 389 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
@@ -391,7 +392,10 @@ static void __init runtime_code_page_mkexec(void)
391 if (md->type != EFI_RUNTIME_SERVICES_CODE) 392 if (md->type != EFI_RUNTIME_SERVICES_CODE)
392 continue; 393 continue;
393 394
394 set_memory_x(md->virt_addr, md->num_pages); 395 addr = md->virt_addr;
396 npages = md->num_pages;
397 memrange_efi_to_native(&addr, &npages);
398 set_memory_x(addr, npages);
395 } 399 }
396} 400}
397 401
@@ -408,7 +412,7 @@ void __init efi_enter_virtual_mode(void)
408 efi_memory_desc_t *md; 412 efi_memory_desc_t *md;
409 efi_status_t status; 413 efi_status_t status;
410 unsigned long size; 414 unsigned long size;
411 u64 end, systab; 415 u64 end, systab, addr, npages;
412 void *p, *va; 416 void *p, *va;
413 417
414 efi.systab = NULL; 418 efi.systab = NULL;
@@ -420,7 +424,7 @@ void __init efi_enter_virtual_mode(void)
420 size = md->num_pages << EFI_PAGE_SHIFT; 424 size = md->num_pages << EFI_PAGE_SHIFT;
421 end = md->phys_addr + size; 425 end = md->phys_addr + size;
422 426
423 if ((end >> PAGE_SHIFT) <= max_pfn_mapped) 427 if (PFN_UP(end) <= max_pfn_mapped)
424 va = __va(md->phys_addr); 428 va = __va(md->phys_addr);
425 else 429 else
426 va = efi_ioremap(md->phys_addr, size); 430 va = efi_ioremap(md->phys_addr, size);
@@ -433,8 +437,12 @@ void __init efi_enter_virtual_mode(void)
433 continue; 437 continue;
434 } 438 }
435 439
436 if (!(md->attribute & EFI_MEMORY_WB)) 440 if (!(md->attribute & EFI_MEMORY_WB)) {
437 set_memory_uc(md->virt_addr, md->num_pages); 441 addr = md->virt_addr;
442 npages = md->num_pages;
443 memrange_efi_to_native(&addr, &npages);
444 set_memory_uc(addr, npages);
445 }
438 446
439 systab = (u64) (unsigned long) efi_phys.systab; 447 systab = (u64) (unsigned long) efi_phys.systab;
440 if (md->phys_addr <= systab && systab < end) { 448 if (md->phys_addr <= systab && systab < end) {
diff --git a/arch/x86/kernel/efi_64.c b/arch/x86/kernel/efi_64.c
index d143a1e76b30..d0060fdcccac 100644
--- a/arch/x86/kernel/efi_64.c
+++ b/arch/x86/kernel/efi_64.c
@@ -105,14 +105,14 @@ void __init efi_reserve_bootmem(void)
105 105
106void __iomem * __init efi_ioremap(unsigned long phys_addr, unsigned long size) 106void __iomem * __init efi_ioremap(unsigned long phys_addr, unsigned long size)
107{ 107{
108 static unsigned pages_mapped; 108 static unsigned pages_mapped __initdata;
109 unsigned i, pages; 109 unsigned i, pages;
110 unsigned long offset;
110 111
111 /* phys_addr and size must be page aligned */ 112 pages = PFN_UP(phys_addr + size) - PFN_DOWN(phys_addr);
112 if ((phys_addr & ~PAGE_MASK) || (size & ~PAGE_MASK)) 113 offset = phys_addr & ~PAGE_MASK;
113 return NULL; 114 phys_addr &= PAGE_MASK;
114 115
115 pages = size >> PAGE_SHIFT;
116 if (pages_mapped + pages > MAX_EFI_IO_PAGES) 116 if (pages_mapped + pages > MAX_EFI_IO_PAGES)
117 return NULL; 117 return NULL;
118 118
@@ -124,5 +124,5 @@ void __iomem * __init efi_ioremap(unsigned long phys_addr, unsigned long size)
124 } 124 }
125 125
126 return (void __iomem *)__fix_to_virt(FIX_EFI_IO_MAP_FIRST_PAGE - \ 126 return (void __iomem *)__fix_to_virt(FIX_EFI_IO_MAP_FIRST_PAGE - \
127 (pages_mapped - pages)); 127 (pages_mapped - pages)) + offset;
128} 128}