aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVivek Goyal <vgoyal@redhat.com>2009-11-24 01:50:03 -0500
committerTejun Heo <tj@kernel.org>2009-11-25 07:49:22 -0500
commit3b034b0d084221596bf35c8d893e1d4d5477b9cc (patch)
treec3b9f33dc9fc748a9460036ae8647b16541a7547
parent833af8427be4b217b5bc522f61afdbd3f1d282c2 (diff)
percpu: Fix kdump failure if booted with percpu_alloc=page
o kdump functionality reserves a per cpu area at boot time and exports the physical address of that area to user space through sys interface. This area stores some dump related information like cpu register states etc at the time of crash. o We were assuming that per cpu area always come from linearly mapped meory region and using __pa() to determine physical address. With percpu_alloc=page, per cpu area can come from vmalloc region also and __pa() breaks. o This patch implments a new function to convert per cpu address to physical address. Before the patch, crash_notes addresses looked as follows. cpu0 60fffff49800 cpu1 60fffff60800 cpu2 60fffff77800 These are bogus phsyical addresses. After the patch, address are following. cpu0 13eb44000 cpu1 13eb43000 cpu2 13eb42000 cpu3 13eb41000 These look fine. I got 4G of memory and /proc/iomem tell me following. 100000000-13fffffff : System RAM tj: * added missing asm/io.h include reported by Stephen Rothwell * repositioned per_cpu_ptr_phys() in percpu.c and added comment. Signed-off-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Stephen Rothwell <sfr@canb.auug.org.au>
-rw-r--r--drivers/base/cpu.c2
-rw-r--r--include/linux/percpu.h1
-rw-r--r--mm/percpu.c22
3 files changed, 24 insertions, 1 deletions
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index e62a4ccea54d..69ee5b7517ec 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -97,7 +97,7 @@ static ssize_t show_crash_notes(struct sys_device *dev, struct sysdev_attribute
97 * boot up and this data does not change there after. Hence this 97 * boot up and this data does not change there after. Hence this
98 * operation should be safe. No locking required. 98 * operation should be safe. No locking required.
99 */ 99 */
100 addr = __pa(per_cpu_ptr(crash_notes, cpunum)); 100 addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
101 rc = sprintf(buf, "%Lx\n", addr); 101 rc = sprintf(buf, "%Lx\n", addr);
102 return rc; 102 return rc;
103} 103}
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 878836ca999c..6ac984fa34f8 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -154,6 +154,7 @@ struct percpu_data {
154 154
155extern void *__alloc_percpu(size_t size, size_t align); 155extern void *__alloc_percpu(size_t size, size_t align);
156extern void free_percpu(void *__pdata); 156extern void free_percpu(void *__pdata);
157extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
157 158
158#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA 159#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
159extern void __init setup_per_cpu_areas(void); 160extern void __init setup_per_cpu_areas(void);
diff --git a/mm/percpu.c b/mm/percpu.c
index 5adfc268b408..008fbd9e6fa4 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -74,6 +74,7 @@
74#include <asm/cacheflush.h> 74#include <asm/cacheflush.h>
75#include <asm/sections.h> 75#include <asm/sections.h>
76#include <asm/tlbflush.h> 76#include <asm/tlbflush.h>
77#include <asm/io.h>
77 78
78#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ 79#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
79#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ 80#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
@@ -1302,6 +1303,27 @@ void free_percpu(void *ptr)
1302} 1303}
1303EXPORT_SYMBOL_GPL(free_percpu); 1304EXPORT_SYMBOL_GPL(free_percpu);
1304 1305
1306/**
1307 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
1308 * @addr: the address to be converted to physical address
1309 *
1310 * Given @addr which is dereferenceable address obtained via one of
1311 * percpu access macros, this function translates it into its physical
1312 * address. The caller is responsible for ensuring @addr stays valid
1313 * until this function finishes.
1314 *
1315 * RETURNS:
1316 * The physical address for @addr.
1317 */
1318phys_addr_t per_cpu_ptr_to_phys(void *addr)
1319{
1320 if ((unsigned long)addr < VMALLOC_START ||
1321 (unsigned long)addr >= VMALLOC_END)
1322 return __pa(addr);
1323 else
1324 return page_to_phys(vmalloc_to_page(addr));
1325}
1326
1305static inline size_t pcpu_calc_fc_sizes(size_t static_size, 1327static inline size_t pcpu_calc_fc_sizes(size_t static_size,
1306 size_t reserved_size, 1328 size_t reserved_size,
1307 ssize_t *dyn_sizep) 1329 ssize_t *dyn_sizep)