aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-05-11 00:32:19 -0400
committerPaul Mundt <lethal@linux-sh.org>2010-05-11 00:32:19 -0400
commit4bc277ac9cae60e11fe2e557e4ea4acb56d3dc9a (patch)
treec9ed42c605674b2e41d8c962ad25c021fd518dbc /arch/sh/mm
parent5e2ff328c0668794ff408a4632f5b8a62827571f (diff)
sh: bootmem refactoring.
This reworks much of the bootmem setup and initialization code allowing us to get rid of duplicate work between the NUMA and non-NUMA cases. The end result is that we end up with a much more flexible interface for supporting more complex topologies (fake NUMA, highmem, etc, etc.) which is entirely LMB backed. This is an incremental step for more NUMA work as well as gradually enabling migration off of bootmem entirely. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm')
-rw-r--r--arch/sh/mm/init.c165
1 files changed, 162 insertions, 3 deletions
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 9c5400b02f43..7f3cb5254abb 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -17,11 +17,14 @@
17#include <linux/percpu.h> 17#include <linux/percpu.h>
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/lmb.h> 19#include <linux/lmb.h>
20#include <linux/kexec.h>
20#include <linux/dma-mapping.h> 21#include <linux/dma-mapping.h>
21#include <asm/mmu_context.h> 22#include <asm/mmu_context.h>
23#include <asm/mmzone.h>
22#include <asm/tlb.h> 24#include <asm/tlb.h>
23#include <asm/cacheflush.h> 25#include <asm/cacheflush.h>
24#include <asm/sections.h> 26#include <asm/sections.h>
27#include <asm/setup.h>
25#include <asm/cache.h> 28#include <asm/cache.h>
26#include <asm/sizes.h> 29#include <asm/sizes.h>
27 30
@@ -33,6 +36,11 @@ void __init generic_mem_init(void)
33 lmb_add(__MEMORY_START, __MEMORY_SIZE); 36 lmb_add(__MEMORY_START, __MEMORY_SIZE);
34} 37}
35 38
39void __init __weak plat_mem_setup(void)
40{
41 /* Nothing to see here, move along. */
42}
43
36#ifdef CONFIG_MMU 44#ifdef CONFIG_MMU
37static pte_t *__get_pte_phys(unsigned long addr) 45static pte_t *__get_pte_phys(unsigned long addr)
38{ 46{
@@ -158,15 +166,166 @@ void __init page_table_range_init(unsigned long start, unsigned long end,
158} 166}
159#endif /* CONFIG_MMU */ 167#endif /* CONFIG_MMU */
160 168
161/* 169void __init allocate_pgdat(unsigned int nid)
162 * paging_init() sets up the page tables 170{
163 */ 171 unsigned long start_pfn, end_pfn;
172#ifdef CONFIG_NEED_MULTIPLE_NODES
173 unsigned long phys;
174#endif
175
176 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
177
178#ifdef CONFIG_NEED_MULTIPLE_NODES
179 phys = __lmb_alloc_base(sizeof(struct pglist_data),
180 SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT);
181 /* Retry with all of system memory */
182 if (!phys)
183 phys = __lmb_alloc_base(sizeof(struct pglist_data),
184 SMP_CACHE_BYTES, lmb_end_of_DRAM());
185 if (!phys)
186 panic("Can't allocate pgdat for node %d\n", nid);
187
188 NODE_DATA(nid) = __va(phys);
189 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
190
191 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
192#endif
193
194 NODE_DATA(nid)->node_start_pfn = start_pfn;
195 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
196}
197
198static void __init bootmem_init_one_node(unsigned int nid)
199{
200 unsigned long total_pages, paddr;
201 unsigned long end_pfn;
202 struct pglist_data *p;
203 int i;
204
205 p = NODE_DATA(nid);
206
207 /* Nothing to do.. */
208 if (!p->node_spanned_pages)
209 return;
210
211 end_pfn = p->node_start_pfn + p->node_spanned_pages;
212
213 total_pages = bootmem_bootmap_pages(p->node_spanned_pages);
214
215 paddr = lmb_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
216 if (!paddr)
217 panic("Can't allocate bootmap for nid[%d]\n", nid);
218
219 init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
220
221 free_bootmem_with_active_regions(nid, end_pfn);
222
223 /*
224 * XXX Handle initial reservations for the system memory node
225 * only for the moment, we'll refactor this later for handling
226 * reservations in other nodes.
227 */
228 if (nid == 0) {
229 /* Reserve the sections we're already using. */
230 for (i = 0; i < lmb.reserved.cnt; i++)
231 reserve_bootmem(lmb.reserved.region[i].base,
232 lmb_size_bytes(&lmb.reserved, i),
233 BOOTMEM_DEFAULT);
234 }
235
236 sparse_memory_present_with_active_regions(nid);
237}
238
239static void __init do_init_bootmem(void)
240{
241 int i;
242
243 /* Add active regions with valid PFNs. */
244 for (i = 0; i < lmb.memory.cnt; i++) {
245 unsigned long start_pfn, end_pfn;
246 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
247 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
248 __add_active_range(0, start_pfn, end_pfn);
249 }
250
251 /* All of system RAM sits in node 0 for the non-NUMA case */
252 allocate_pgdat(0);
253 node_set_online(0);
254
255 plat_mem_setup();
256
257 for_each_online_node(i)
258 bootmem_init_one_node(i);
259
260 sparse_init();
261}
262
263static void __init early_reserve_mem(void)
264{
265 unsigned long start_pfn;
266
267 /*
268 * Partially used pages are not usable - thus
269 * we are rounding upwards:
270 */
271 start_pfn = PFN_UP(__pa(_end));
272
273 /*
274 * Reserve the kernel text and Reserve the bootmem bitmap. We do
275 * this in two steps (first step was init_bootmem()), because
276 * this catches the (definitely buggy) case of us accidentally
277 * initializing the bootmem allocator with an invalid RAM area.
278 */
279 lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
280 (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) -
281 (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET));
282
283 /*
284 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
285 */
286 if (CONFIG_ZERO_PAGE_OFFSET != 0)
287 lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET);
288
289 /*
290 * Handle additional early reservations
291 */
292 check_for_initrd();
293 reserve_crashkernel();
294}
295
164void __init paging_init(void) 296void __init paging_init(void)
165{ 297{
166 unsigned long max_zone_pfns[MAX_NR_ZONES]; 298 unsigned long max_zone_pfns[MAX_NR_ZONES];
167 unsigned long vaddr, end; 299 unsigned long vaddr, end;
168 int nid; 300 int nid;
169 301
302 lmb_init();
303
304 sh_mv.mv_mem_init();
305
306 early_reserve_mem();
307
308 lmb_enforce_memory_limit(memory_limit);
309 lmb_analyze();
310
311 lmb_dump_all();
312
313 /*
314 * Determine low and high memory ranges:
315 */
316 max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
317 min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
318
319 nodes_clear(node_online_map);
320
321 memory_start = (unsigned long)__va(__MEMORY_START);
322 memory_end = memory_start + (memory_limit ?: lmb_phys_mem_size());
323
324 uncached_init();
325 pmb_init();
326 do_init_bootmem();
327 ioremap_fixed_init();
328
170 /* We don't need to map the kernel through the TLB, as 329 /* We don't need to map the kernel through the TLB, as
171 * it is permanatly mapped using P1. So clear the 330 * it is permanatly mapped using P1. So clear the
172 * entire pgd. */ 331 * entire pgd. */