aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLaurent Dufour <ldufour@linux.vnet.ibm.com>2014-02-24 11:30:55 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-03-07 00:30:08 -0500
commitfb22dbab12b48bb19498df8e796b05be4608bd0d (patch)
tree530aef647de3e6feac80810028cddea5ef2fbc87 /arch
parent64747d3d2fd4fcf93678ccc785327371e35a752e (diff)
powerpc/crashdump : Fix page frame number check in copy_oldmem_page
commit f5295bd8ea8a65dc5eac608b151386314cb978f1 upstream. In copy_oldmem_page, the current check using max_pfn and min_low_pfn to decide if the page is backed or not, is not valid when the memory layout is not continuous. This happens when running as a QEMU/KVM guest, where RTAS is mapped higher in the memory. In that case max_pfn points to the end of RTAS, and a hole between the end of the kdump kernel and RTAS is not backed by PTEs. As a consequence, the kdump kernel is crashing in copy_oldmem_page when accessing in a direct way the pages in that hole. This fix relies on the memblock's service memblock_is_region_memory to check if the read page is part or not of the directly accessible memory. Signed-off-by: Laurent Dufour <ldufour@linux.vnet.ibm.com> Tested-by: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/crash_dump.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 9ec3fe174cba..555ae67e4086 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -108,17 +108,19 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
108 size_t csize, unsigned long offset, int userbuf) 108 size_t csize, unsigned long offset, int userbuf)
109{ 109{
110 void *vaddr; 110 void *vaddr;
111 phys_addr_t paddr;
111 112
112 if (!csize) 113 if (!csize)
113 return 0; 114 return 0;
114 115
115 csize = min_t(size_t, csize, PAGE_SIZE); 116 csize = min_t(size_t, csize, PAGE_SIZE);
117 paddr = pfn << PAGE_SHIFT;
116 118
117 if ((min_low_pfn < pfn) && (pfn < max_pfn)) { 119 if (memblock_is_region_memory(paddr, csize)) {
118 vaddr = __va(pfn << PAGE_SHIFT); 120 vaddr = __va(paddr);
119 csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); 121 csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
120 } else { 122 } else {
121 vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0); 123 vaddr = __ioremap(paddr, PAGE_SIZE, 0);
122 csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); 124 csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
123 iounmap(vaddr); 125 iounmap(vaddr);
124 } 126 }