aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/include/asm/mmzone.h2
-rw-r--r--arch/sh/include/asm/setup.h1
-rw-r--r--arch/sh/kernel/setup.c143
-rw-r--r--arch/sh/mm/init.c165
4 files changed, 169 insertions, 142 deletions
diff --git a/arch/sh/include/asm/mmzone.h b/arch/sh/include/asm/mmzone.h
index 94f04b2f4fb1..8887baff5eff 100644
--- a/arch/sh/include/asm/mmzone.h
+++ b/arch/sh/include/asm/mmzone.h
@@ -44,6 +44,8 @@ void __init plat_mem_setup(void);
44/* arch/sh/kernel/setup.c */ 44/* arch/sh/kernel/setup.c */
45void __init __add_active_range(unsigned int nid, unsigned long start_pfn, 45void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
46 unsigned long end_pfn); 46 unsigned long end_pfn);
47/* arch/sh/mm/init.c */
48void __init allocate_pgdat(unsigned int nid);
47 49
48#endif /* __KERNEL__ */ 50#endif /* __KERNEL__ */
49#endif /* __ASM_SH_MMZONE_H */ 51#endif /* __ASM_SH_MMZONE_H */
diff --git a/arch/sh/include/asm/setup.h b/arch/sh/include/asm/setup.h
index 4758325bb24a..01fa17a3d759 100644
--- a/arch/sh/include/asm/setup.h
+++ b/arch/sh/include/asm/setup.h
@@ -19,6 +19,7 @@
19#define COMMAND_LINE ((char *) (PARAM+0x100)) 19#define COMMAND_LINE ((char *) (PARAM+0x100))
20 20
21void sh_mv_setup(void); 21void sh_mv_setup(void);
22void check_for_initrd(void);
22 23
23#endif /* __KERNEL__ */ 24#endif /* __KERNEL__ */
24 25
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index f6a2db12ad78..61404ed01449 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -114,31 +114,7 @@ static int __init early_parse_mem(char *p)
114} 114}
115early_param("mem", early_parse_mem); 115early_param("mem", early_parse_mem);
116 116
117/* 117void __init check_for_initrd(void)
118 * Register fully available low RAM pages with the bootmem allocator.
119 */
120static void __init register_bootmem_low_pages(void)
121{
122 unsigned long curr_pfn, last_pfn, pages;
123
124 /*
125 * We are rounding up the start address of usable memory:
126 */
127 curr_pfn = PFN_UP(__MEMORY_START);
128
129 /*
130 * ... and at the end of the usable range downwards:
131 */
132 last_pfn = PFN_DOWN(__pa(memory_end));
133
134 if (last_pfn > max_low_pfn)
135 last_pfn = max_low_pfn;
136
137 pages = last_pfn - curr_pfn;
138 free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(pages));
139}
140
141static void __init check_for_initrd(void)
142{ 118{
143#ifdef CONFIG_BLK_DEV_INITRD 119#ifdef CONFIG_BLK_DEV_INITRD
144 unsigned long start, end; 120 unsigned long start, end;
@@ -240,85 +216,6 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
240 add_active_range(nid, start_pfn, end_pfn); 216 add_active_range(nid, start_pfn, end_pfn);
241} 217}
242 218
243void __init do_init_bootmem(void)
244{
245 unsigned long bootmap_size;
246 unsigned long bootmap_pages, bootmem_paddr;
247 u64 total_pages = lmb_phys_mem_size() >> PAGE_SHIFT;
248 int i;
249
250 bootmap_pages = bootmem_bootmap_pages(total_pages);
251
252 bootmem_paddr = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
253
254 /*
255 * Find a proper area for the bootmem bitmap. After this
256 * bootstrap step all allocations (until the page allocator
257 * is intact) must be done via bootmem_alloc().
258 */
259 bootmap_size = init_bootmem_node(NODE_DATA(0),
260 bootmem_paddr >> PAGE_SHIFT,
261 min_low_pfn, max_low_pfn);
262
263 /* Add active regions with valid PFNs. */
264 for (i = 0; i < lmb.memory.cnt; i++) {
265 unsigned long start_pfn, end_pfn;
266 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
267 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
268 __add_active_range(0, start_pfn, end_pfn);
269 }
270
271 /*
272 * Add all physical memory to the bootmem map and mark each
273 * area as present.
274 */
275 register_bootmem_low_pages();
276
277 /* Reserve the sections we're already using. */
278 for (i = 0; i < lmb.reserved.cnt; i++)
279 reserve_bootmem(lmb.reserved.region[i].base,
280 lmb_size_bytes(&lmb.reserved, i),
281 BOOTMEM_DEFAULT);
282
283 node_set_online(0);
284
285 sparse_memory_present_with_active_regions(0);
286}
287
288static void __init early_reserve_mem(void)
289{
290 unsigned long start_pfn;
291
292 /*
293 * Partially used pages are not usable - thus
294 * we are rounding upwards:
295 */
296 start_pfn = PFN_UP(__pa(_end));
297
298 /*
299 * Reserve the kernel text and
300 * Reserve the bootmem bitmap. We do this in two steps (first step
301 * was init_bootmem()), because this catches the (definitely buggy)
302 * case of us accidentally initializing the bootmem allocator with
303 * an invalid RAM area.
304 */
305 lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
306 (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) -
307 (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET));
308
309 /*
310 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
311 */
312 if (CONFIG_ZERO_PAGE_OFFSET != 0)
313 lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET);
314
315 /*
316 * Handle additional early reservations
317 */
318 check_for_initrd();
319 reserve_crashkernel();
320}
321
322/* 219/*
323 * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by 220 * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
324 * is_kdump_kernel() to determine if we are booting after a panic. Hence 221 * is_kdump_kernel() to determine if we are booting after a panic. Hence
@@ -342,10 +239,6 @@ void __init __weak plat_early_device_setup(void)
342{ 239{
343} 240}
344 241
345void __init __weak plat_mem_setup(void)
346{
347}
348
349void __init setup_arch(char **cmdline_p) 242void __init setup_arch(char **cmdline_p)
350{ 243{
351 enable_mmu(); 244 enable_mmu();
@@ -401,44 +294,16 @@ void __init setup_arch(char **cmdline_p)
401 294
402 plat_early_device_setup(); 295 plat_early_device_setup();
403 296
404 /* Let earlyprintk output early console messages */
405 early_platform_driver_probe("earlyprintk", 1, 1);
406
407 lmb_init();
408
409 sh_mv_setup(); 297 sh_mv_setup();
410 sh_mv.mv_mem_init();
411
412 early_reserve_mem();
413 298
414 lmb_enforce_memory_limit(memory_limit); 299 /* Let earlyprintk output early console messages */
415 lmb_analyze(); 300 early_platform_driver_probe("earlyprintk", 1, 1);
416
417 lmb_dump_all();
418
419 /*
420 * Determine low and high memory ranges:
421 */
422 max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
423 min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
424
425 nodes_clear(node_online_map);
426
427 memory_start = (unsigned long)__va(__MEMORY_START);
428 memory_end = memory_start + (memory_limit ?: lmb_phys_mem_size());
429 301
430 uncached_init(); 302 paging_init();
431 pmb_init();
432 do_init_bootmem();
433 plat_mem_setup();
434 sparse_init();
435 303
436#ifdef CONFIG_DUMMY_CONSOLE 304#ifdef CONFIG_DUMMY_CONSOLE
437 conswitchp = &dummy_con; 305 conswitchp = &dummy_con;
438#endif 306#endif
439 paging_init();
440
441 ioremap_fixed_init();
442 307
443 /* Perform the machine specific initialisation */ 308 /* Perform the machine specific initialisation */
444 if (likely(sh_mv.mv_setup)) 309 if (likely(sh_mv.mv_setup))
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 9c5400b02f43..7f3cb5254abb 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -17,11 +17,14 @@
17#include <linux/percpu.h> 17#include <linux/percpu.h>
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/lmb.h> 19#include <linux/lmb.h>
20#include <linux/kexec.h>
20#include <linux/dma-mapping.h> 21#include <linux/dma-mapping.h>
21#include <asm/mmu_context.h> 22#include <asm/mmu_context.h>
23#include <asm/mmzone.h>
22#include <asm/tlb.h> 24#include <asm/tlb.h>
23#include <asm/cacheflush.h> 25#include <asm/cacheflush.h>
24#include <asm/sections.h> 26#include <asm/sections.h>
27#include <asm/setup.h>
25#include <asm/cache.h> 28#include <asm/cache.h>
26#include <asm/sizes.h> 29#include <asm/sizes.h>
27 30
@@ -33,6 +36,11 @@ void __init generic_mem_init(void)
33 lmb_add(__MEMORY_START, __MEMORY_SIZE); 36 lmb_add(__MEMORY_START, __MEMORY_SIZE);
34} 37}
35 38
39void __init __weak plat_mem_setup(void)
40{
41 /* Nothing to see here, move along. */
42}
43
36#ifdef CONFIG_MMU 44#ifdef CONFIG_MMU
37static pte_t *__get_pte_phys(unsigned long addr) 45static pte_t *__get_pte_phys(unsigned long addr)
38{ 46{
@@ -158,15 +166,166 @@ void __init page_table_range_init(unsigned long start, unsigned long end,
158} 166}
159#endif /* CONFIG_MMU */ 167#endif /* CONFIG_MMU */
160 168
161/* 169void __init allocate_pgdat(unsigned int nid)
162 * paging_init() sets up the page tables 170{
163 */ 171 unsigned long start_pfn, end_pfn;
172#ifdef CONFIG_NEED_MULTIPLE_NODES
173 unsigned long phys;
174#endif
175
176 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
177
178#ifdef CONFIG_NEED_MULTIPLE_NODES
179 phys = __lmb_alloc_base(sizeof(struct pglist_data),
180 SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT);
181 /* Retry with all of system memory */
182 if (!phys)
183 phys = __lmb_alloc_base(sizeof(struct pglist_data),
184 SMP_CACHE_BYTES, lmb_end_of_DRAM());
185 if (!phys)
186 panic("Can't allocate pgdat for node %d\n", nid);
187
188 NODE_DATA(nid) = __va(phys);
189 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
190
191 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
192#endif
193
194 NODE_DATA(nid)->node_start_pfn = start_pfn;
195 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
196}
197
198static void __init bootmem_init_one_node(unsigned int nid)
199{
200 unsigned long total_pages, paddr;
201 unsigned long end_pfn;
202 struct pglist_data *p;
203 int i;
204
205 p = NODE_DATA(nid);
206
207 /* Nothing to do.. */
208 if (!p->node_spanned_pages)
209 return;
210
211 end_pfn = p->node_start_pfn + p->node_spanned_pages;
212
213 total_pages = bootmem_bootmap_pages(p->node_spanned_pages);
214
215 paddr = lmb_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
216 if (!paddr)
217 panic("Can't allocate bootmap for nid[%d]\n", nid);
218
219 init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
220
221 free_bootmem_with_active_regions(nid, end_pfn);
222
223 /*
224 * XXX Handle initial reservations for the system memory node
225 * only for the moment, we'll refactor this later for handling
226 * reservations in other nodes.
227 */
228 if (nid == 0) {
229 /* Reserve the sections we're already using. */
230 for (i = 0; i < lmb.reserved.cnt; i++)
231 reserve_bootmem(lmb.reserved.region[i].base,
232 lmb_size_bytes(&lmb.reserved, i),
233 BOOTMEM_DEFAULT);
234 }
235
236 sparse_memory_present_with_active_regions(nid);
237}
238
239static void __init do_init_bootmem(void)
240{
241 int i;
242
243 /* Add active regions with valid PFNs. */
244 for (i = 0; i < lmb.memory.cnt; i++) {
245 unsigned long start_pfn, end_pfn;
246 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
247 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
248 __add_active_range(0, start_pfn, end_pfn);
249 }
250
251 /* All of system RAM sits in node 0 for the non-NUMA case */
252 allocate_pgdat(0);
253 node_set_online(0);
254
255 plat_mem_setup();
256
257 for_each_online_node(i)
258 bootmem_init_one_node(i);
259
260 sparse_init();
261}
262
263static void __init early_reserve_mem(void)
264{
265 unsigned long start_pfn;
266
267 /*
268 * Partially used pages are not usable - thus
269 * we are rounding upwards:
270 */
271 start_pfn = PFN_UP(__pa(_end));
272
273 /*
274 * Reserve the kernel text and Reserve the bootmem bitmap. We do
275 * this in two steps (first step was init_bootmem()), because
276 * this catches the (definitely buggy) case of us accidentally
277 * initializing the bootmem allocator with an invalid RAM area.
278 */
279 lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
280 (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) -
281 (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET));
282
283 /*
284 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
285 */
286 if (CONFIG_ZERO_PAGE_OFFSET != 0)
287 lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET);
288
289 /*
290 * Handle additional early reservations
291 */
292 check_for_initrd();
293 reserve_crashkernel();
294}
295
164void __init paging_init(void) 296void __init paging_init(void)
165{ 297{
166 unsigned long max_zone_pfns[MAX_NR_ZONES]; 298 unsigned long max_zone_pfns[MAX_NR_ZONES];
167 unsigned long vaddr, end; 299 unsigned long vaddr, end;
168 int nid; 300 int nid;
169 301
302 lmb_init();
303
304 sh_mv.mv_mem_init();
305
306 early_reserve_mem();
307
308 lmb_enforce_memory_limit(memory_limit);
309 lmb_analyze();
310
311 lmb_dump_all();
312
313 /*
314 * Determine low and high memory ranges:
315 */
316 max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
317 min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
318
319 nodes_clear(node_online_map);
320
321 memory_start = (unsigned long)__va(__MEMORY_START);
322 memory_end = memory_start + (memory_limit ?: lmb_phys_mem_size());
323
324 uncached_init();
325 pmb_init();
326 do_init_bootmem();
327 ioremap_fixed_init();
328
170 /* We don't need to map the kernel through the TLB, as 329 /* We don't need to map the kernel through the TLB, as
171 * it is permanatly mapped using P1. So clear the 330 * it is permanatly mapped using P1. So clear the
172 * entire pgd. */ 331 * entire pgd. */