aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/platform
diff options
context:
space:
mode:
authorMatt Fleming <matt.fleming@intel.com>2012-10-19 08:25:46 -0400
committerIngo Molnar <mingo@kernel.org>2012-10-24 06:48:47 -0400
commit3e8fa263a97079c74880675c451587bb6899e661 (patch)
tree96921de81e9f3e5ca65299819d2ec158716cf97b /arch/x86/platform
parent876ee61aadf01aa0db981b5d249cbdd53dc28b5e (diff)
x86/efi: Fix oops caused by incorrect set_memory_uc() usage
Calling __pa() with an ioremap'd address is invalid. If we encounter an efi_memory_desc_t without EFI_MEMORY_WB set in ->attribute we currently call set_memory_uc(), which in turn calls __pa() on a potentially ioremap'd address. On CONFIG_X86_32 this results in the following oops: BUG: unable to handle kernel paging request at f7f22280 IP: [<c10257b9>] reserve_ram_pages_type+0x89/0x210 *pdpt = 0000000001978001 *pde = 0000000001ffb067 *pte = 0000000000000000 Oops: 0000 [#1] PREEMPT SMP Modules linked in: Pid: 0, comm: swapper Not tainted 3.0.0-acpi-efi-0805 #3 EIP: 0060:[<c10257b9>] EFLAGS: 00010202 CPU: 0 EIP is at reserve_ram_pages_type+0x89/0x210 EAX: 0070e280 EBX: 38714000 ECX: f7814000 EDX: 00000000 ESI: 00000000 EDI: 38715000 EBP: c189fef0 ESP: c189fea8 DS: 007b ES: 007b FS: 00d8 GS: 0000 SS: 0068 Process swapper (pid: 0, ti=c189e000 task=c18bbe60 task.ti=c189e000) Stack: 80000200 ff108000 00000000 c189ff00 00038714 00000000 00000000 c189fed0 c104f8ca 00038714 00000000 00038715 00000000 00000000 00038715 00000000 00000010 38715000 c189ff48 c1025aff 38715000 00000000 00000010 00000000 Call Trace: [<c104f8ca>] ? page_is_ram+0x1a/0x40 [<c1025aff>] reserve_memtype+0xdf/0x2f0 [<c1024dc9>] set_memory_uc+0x49/0xa0 [<c19334d0>] efi_enter_virtual_mode+0x1c2/0x3aa [<c19216d4>] start_kernel+0x291/0x2f2 [<c19211c7>] ? loglevel+0x1b/0x1b [<c19210bf>] i386_start_kernel+0xbf/0xc8 The only time we can call set_memory_uc() for a memory region is when it is part of the direct kernel mapping. For the case where we ioremap a memory region we must leave it alone. This patch reimplements the fix from e8c7106280a3 ("x86, efi: Calling __pa() with an ioremap()ed address is invalid") which was reverted in e1ad783b12ec because it caused a regression on some MacBooks (they hung at boot). The regression was caused because the commit only marked EFI_RUNTIME_SERVICES_DATA as E820_RESERVED_EFI, when it should have marked all regions that have the EFI_MEMORY_RUNTIME attribute. Despite first impressions, it's not possible to use ioremap_cache() to map all cached memory regions on CONFIG_X86_64 because of the way that the memory map might be configured as detailed in the following bug report, https://bugzilla.redhat.com/show_bug.cgi?id=748516 e.g. some of the EFI memory regions *need* to be mapped as part of the direct kernel mapping. Signed-off-by: Matt Fleming <matt.fleming@intel.com> Cc: Matthew Garrett <mjg@redhat.com> Cc: Zhang Rui <rui.zhang@intel.com> Cc: Huang Ying <huang.ying.caritas@gmail.com> Cc: Keith Packard <keithp@keithp.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/1350649546-23541-1-git-send-email-matt@console-pimps.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/platform')
-rw-r--r--arch/x86/platform/efi/efi.c29
-rw-r--r--arch/x86/platform/efi/efi_64.c7
2 files changed, 23 insertions, 13 deletions
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index aded2a91162a..cb34839c97c5 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -810,6 +810,16 @@ void __iomem *efi_lookup_mapped_addr(u64 phys_addr)
810 return NULL; 810 return NULL;
811} 811}
812 812
813void efi_memory_uc(u64 addr, unsigned long size)
814{
815 unsigned long page_shift = 1UL << EFI_PAGE_SHIFT;
816 u64 npages;
817
818 npages = round_up(size, page_shift) / page_shift;
819 memrange_efi_to_native(&addr, &npages);
820 set_memory_uc(addr, npages);
821}
822
813/* 823/*
814 * This function will switch the EFI runtime services to virtual mode. 824 * This function will switch the EFI runtime services to virtual mode.
815 * Essentially, look through the EFI memmap and map every region that 825 * Essentially, look through the EFI memmap and map every region that
@@ -823,7 +833,7 @@ void __init efi_enter_virtual_mode(void)
823 efi_memory_desc_t *md, *prev_md = NULL; 833 efi_memory_desc_t *md, *prev_md = NULL;
824 efi_status_t status; 834 efi_status_t status;
825 unsigned long size; 835 unsigned long size;
826 u64 end, systab, addr, npages, end_pfn; 836 u64 end, systab, end_pfn;
827 void *p, *va, *new_memmap = NULL; 837 void *p, *va, *new_memmap = NULL;
828 int count = 0; 838 int count = 0;
829 839
@@ -879,10 +889,14 @@ void __init efi_enter_virtual_mode(void)
879 end_pfn = PFN_UP(end); 889 end_pfn = PFN_UP(end);
880 if (end_pfn <= max_low_pfn_mapped 890 if (end_pfn <= max_low_pfn_mapped
881 || (end_pfn > (1UL << (32 - PAGE_SHIFT)) 891 || (end_pfn > (1UL << (32 - PAGE_SHIFT))
882 && end_pfn <= max_pfn_mapped)) 892 && end_pfn <= max_pfn_mapped)) {
883 va = __va(md->phys_addr); 893 va = __va(md->phys_addr);
884 else 894
885 va = efi_ioremap(md->phys_addr, size, md->type); 895 if (!(md->attribute & EFI_MEMORY_WB))
896 efi_memory_uc((u64)(unsigned long)va, size);
897 } else
898 va = efi_ioremap(md->phys_addr, size,
899 md->type, md->attribute);
886 900
887 md->virt_addr = (u64) (unsigned long) va; 901 md->virt_addr = (u64) (unsigned long) va;
888 902
@@ -892,13 +906,6 @@ void __init efi_enter_virtual_mode(void)
892 continue; 906 continue;
893 } 907 }
894 908
895 if (!(md->attribute & EFI_MEMORY_WB)) {
896 addr = md->virt_addr;
897 npages = md->num_pages;
898 memrange_efi_to_native(&addr, &npages);
899 set_memory_uc(addr, npages);
900 }
901
902 systab = (u64) (unsigned long) efi_phys.systab; 909 systab = (u64) (unsigned long) efi_phys.systab;
903 if (md->phys_addr <= systab && systab < end) { 910 if (md->phys_addr <= systab && systab < end) {
904 systab += md->virt_addr - md->phys_addr; 911 systab += md->virt_addr - md->phys_addr;
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index ac3aa54e2654..95fd505dfeb6 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -82,7 +82,7 @@ void __init efi_call_phys_epilog(void)
82} 82}
83 83
84void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, 84void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
85 u32 type) 85 u32 type, u64 attribute)
86{ 86{
87 unsigned long last_map_pfn; 87 unsigned long last_map_pfn;
88 88
@@ -92,8 +92,11 @@ void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
92 last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size); 92 last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
93 if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) { 93 if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
94 unsigned long top = last_map_pfn << PAGE_SHIFT; 94 unsigned long top = last_map_pfn << PAGE_SHIFT;
95 efi_ioremap(top, size - (top - phys_addr), type); 95 efi_ioremap(top, size - (top - phys_addr), type, attribute);
96 } 96 }
97 97
98 if (!(attribute & EFI_MEMORY_WB))
99 efi_memory_uc((u64)(unsigned long)__va(phys_addr), size);
100
98 return (void __iomem *)__va(phys_addr); 101 return (void __iomem *)__va(phys_addr);
99} 102}