diff options
author | Paul Mundt <lethal@linux-sh.org> | 2010-05-11 00:52:50 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2010-05-11 00:52:50 -0400 |
commit | 21823259a70b7a2a21eea1d48c25a6f38896dd11 (patch) | |
tree | e1bc3e69cbf01534e77a89e450ea047d261da510 /arch/sh/kernel/setup.c | |
parent | dfbca89987b74c34d9b1a2414b0e5ccee65347e0 (diff) |
sh: Ensure active regions have a backing PMB entry.
In the NUMA or memory hot-add case where system memory has been
partitioned up, we immediately run in to a situation where the existing
PMB entry doesn't cover the new range (primarily as a result of the entry
size being shrunk to match the node size early in the initialization). In
order to fix this up it's necessary to preload a PMB mapping for the new
range prior to activation in order to circumvent reset by MMU.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/kernel/setup.c')
-rw-r--r-- | arch/sh/kernel/setup.c | 17 |
1 files changed, 15 insertions, 2 deletions
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index 61404ed01449..57bd93838f15 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c | |||
@@ -191,13 +191,18 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn, | |||
191 | unsigned long end_pfn) | 191 | unsigned long end_pfn) |
192 | { | 192 | { |
193 | struct resource *res = &mem_resources[nid]; | 193 | struct resource *res = &mem_resources[nid]; |
194 | unsigned long start, end; | ||
194 | 195 | ||
195 | WARN_ON(res->name); /* max one active range per node for now */ | 196 | WARN_ON(res->name); /* max one active range per node for now */ |
196 | 197 | ||
198 | start = start_pfn << PAGE_SHIFT; | ||
199 | end = end_pfn << PAGE_SHIFT; | ||
200 | |||
197 | res->name = "System RAM"; | 201 | res->name = "System RAM"; |
198 | res->start = start_pfn << PAGE_SHIFT; | 202 | res->start = start; |
199 | res->end = (end_pfn << PAGE_SHIFT) - 1; | 203 | res->end = end - 1; |
200 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; | 204 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
205 | |||
201 | if (request_resource(&iomem_resource, res)) { | 206 | if (request_resource(&iomem_resource, res)) { |
202 | pr_err("unable to request memory_resource 0x%lx 0x%lx\n", | 207 | pr_err("unable to request memory_resource 0x%lx 0x%lx\n", |
203 | start_pfn, end_pfn); | 208 | start_pfn, end_pfn); |
@@ -213,6 +218,14 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn, | |||
213 | request_resource(res, &data_resource); | 218 | request_resource(res, &data_resource); |
214 | request_resource(res, &bss_resource); | 219 | request_resource(res, &bss_resource); |
215 | 220 | ||
221 | /* | ||
222 | * Also make sure that there is a PMB mapping that covers this | ||
223 | * range before we attempt to activate it, to avoid reset by MMU. | ||
224 | * We can hit this path with NUMA or memory hot-add. | ||
225 | */ | ||
226 | pmb_bolt_mapping((unsigned long)__va(start), start, end - start, | ||
227 | PAGE_KERNEL); | ||
228 | |||
216 | add_active_range(nid, start_pfn, end_pfn); | 229 | add_active_range(nid, start_pfn, end_pfn); |
217 | } | 230 | } |
218 | 231 | ||