diff options
author | Paul Mundt <lethal@linux-sh.org> | 2010-05-13 04:48:05 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2010-05-13 04:48:05 -0400 |
commit | ef4ed97d6bd91aa41907181e80a7feaf2721719a (patch) | |
tree | adc44e858c150c41ebf1dd7c6ee2f6bcbac75502 /arch/sh/kernel/setup.c | |
parent | c5eb5b372e7ea18a5eeb6b5192a6369967cb1afe (diff) | |
parent | 21823259a70b7a2a21eea1d48c25a6f38896dd11 (diff) |
Merge branch 'sh/lmb'
Conflicts:
arch/sh/kernel/setup.c
Diffstat (limited to 'arch/sh/kernel/setup.c')
-rw-r--r-- | arch/sh/kernel/setup.c | 228 |
1 files changed, 26 insertions, 202 deletions
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index 4f1585f41f2b..272734681d29 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * This file handles the architecture-dependent parts of initialization | 4 | * This file handles the architecture-dependent parts of initialization |
5 | * | 5 | * |
6 | * Copyright (C) 1999 Niibe Yutaka | 6 | * Copyright (C) 1999 Niibe Yutaka |
7 | * Copyright (C) 2002 - 2007 Paul Mundt | 7 | * Copyright (C) 2002 - 2010 Paul Mundt |
8 | */ | 8 | */ |
9 | #include <linux/screen_info.h> | 9 | #include <linux/screen_info.h> |
10 | #include <linux/ioport.h> | 10 | #include <linux/ioport.h> |
@@ -41,6 +41,7 @@ | |||
41 | #include <asm/clock.h> | 41 | #include <asm/clock.h> |
42 | #include <asm/smp.h> | 42 | #include <asm/smp.h> |
43 | #include <asm/mmu_context.h> | 43 | #include <asm/mmu_context.h> |
44 | #include <asm/mmzone.h> | ||
44 | 45 | ||
45 | /* | 46 | /* |
46 | * Initialize loops_per_jiffy as 10000000 (1000MIPS). | 47 | * Initialize loops_per_jiffy as 10000000 (1000MIPS). |
@@ -94,6 +95,7 @@ unsigned long memory_start; | |||
94 | EXPORT_SYMBOL(memory_start); | 95 | EXPORT_SYMBOL(memory_start); |
95 | unsigned long memory_end = 0; | 96 | unsigned long memory_end = 0; |
96 | EXPORT_SYMBOL(memory_end); | 97 | EXPORT_SYMBOL(memory_end); |
98 | unsigned long memory_limit = 0; | ||
97 | 99 | ||
98 | static struct resource mem_resources[MAX_NUMNODES]; | 100 | static struct resource mem_resources[MAX_NUMNODES]; |
99 | 101 | ||
@@ -101,94 +103,18 @@ int l1i_cache_shape, l1d_cache_shape, l2_cache_shape; | |||
101 | 103 | ||
102 | static int __init early_parse_mem(char *p) | 104 | static int __init early_parse_mem(char *p) |
103 | { | 105 | { |
104 | unsigned long size; | 106 | if (!p) |
107 | return 1; | ||
105 | 108 | ||
106 | memory_start = (unsigned long)__va(__MEMORY_START); | 109 | memory_limit = PAGE_ALIGN(memparse(p, &p)); |
107 | size = memparse(p, &p); | ||
108 | 110 | ||
109 | if (size > __MEMORY_SIZE) { | 111 | pr_notice("Memory limited to %ldMB\n", memory_limit >> 20); |
110 | printk(KERN_ERR | ||
111 | "Using mem= to increase the size of kernel memory " | ||
112 | "is not allowed.\n" | ||
113 | " Recompile the kernel with the correct value for " | ||
114 | "CONFIG_MEMORY_SIZE.\n"); | ||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | memory_end = memory_start + size; | ||
119 | 112 | ||
120 | return 0; | 113 | return 0; |
121 | } | 114 | } |
122 | early_param("mem", early_parse_mem); | 115 | early_param("mem", early_parse_mem); |
123 | 116 | ||
124 | /* | 117 | void __init check_for_initrd(void) |
125 | * Register fully available low RAM pages with the bootmem allocator. | ||
126 | */ | ||
127 | static void __init register_bootmem_low_pages(void) | ||
128 | { | ||
129 | unsigned long curr_pfn, last_pfn, pages; | ||
130 | |||
131 | /* | ||
132 | * We are rounding up the start address of usable memory: | ||
133 | */ | ||
134 | curr_pfn = PFN_UP(__MEMORY_START); | ||
135 | |||
136 | /* | ||
137 | * ... and at the end of the usable range downwards: | ||
138 | */ | ||
139 | last_pfn = PFN_DOWN(__pa(memory_end)); | ||
140 | |||
141 | if (last_pfn > max_low_pfn) | ||
142 | last_pfn = max_low_pfn; | ||
143 | |||
144 | pages = last_pfn - curr_pfn; | ||
145 | free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(pages)); | ||
146 | } | ||
147 | |||
148 | #ifdef CONFIG_KEXEC | ||
149 | static void __init reserve_crashkernel(void) | ||
150 | { | ||
151 | unsigned long long free_mem; | ||
152 | unsigned long long crash_size, crash_base; | ||
153 | void *vp; | ||
154 | int ret; | ||
155 | |||
156 | free_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT; | ||
157 | |||
158 | ret = parse_crashkernel(boot_command_line, free_mem, | ||
159 | &crash_size, &crash_base); | ||
160 | if (ret == 0 && crash_size) { | ||
161 | if (crash_base <= 0) { | ||
162 | vp = alloc_bootmem_nopanic(crash_size); | ||
163 | if (!vp) { | ||
164 | printk(KERN_INFO "crashkernel allocation " | ||
165 | "failed\n"); | ||
166 | return; | ||
167 | } | ||
168 | crash_base = __pa(vp); | ||
169 | } else if (reserve_bootmem(crash_base, crash_size, | ||
170 | BOOTMEM_EXCLUSIVE) < 0) { | ||
171 | printk(KERN_INFO "crashkernel reservation failed - " | ||
172 | "memory is in use\n"); | ||
173 | return; | ||
174 | } | ||
175 | |||
176 | printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " | ||
177 | "for crashkernel (System RAM: %ldMB)\n", | ||
178 | (unsigned long)(crash_size >> 20), | ||
179 | (unsigned long)(crash_base >> 20), | ||
180 | (unsigned long)(free_mem >> 20)); | ||
181 | crashk_res.start = crash_base; | ||
182 | crashk_res.end = crash_base + crash_size - 1; | ||
183 | insert_resource(&iomem_resource, &crashk_res); | ||
184 | } | ||
185 | } | ||
186 | #else | ||
187 | static inline void __init reserve_crashkernel(void) | ||
188 | {} | ||
189 | #endif | ||
190 | |||
191 | static void __init check_for_initrd(void) | ||
192 | { | 118 | { |
193 | #ifdef CONFIG_BLK_DEV_INITRD | 119 | #ifdef CONFIG_BLK_DEV_INITRD |
194 | unsigned long start, end; | 120 | unsigned long start, end; |
@@ -235,7 +161,7 @@ static void __init check_for_initrd(void) | |||
235 | initrd_start = (unsigned long)__va(__pa(start)); | 161 | initrd_start = (unsigned long)__va(__pa(start)); |
236 | initrd_end = initrd_start + INITRD_SIZE; | 162 | initrd_end = initrd_start + INITRD_SIZE; |
237 | 163 | ||
238 | reserve_bootmem(__pa(initrd_start), INITRD_SIZE, BOOTMEM_DEFAULT); | 164 | lmb_reserve(__pa(initrd_start), INITRD_SIZE); |
239 | 165 | ||
240 | return; | 166 | return; |
241 | 167 | ||
@@ -265,13 +191,18 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn, | |||
265 | unsigned long end_pfn) | 191 | unsigned long end_pfn) |
266 | { | 192 | { |
267 | struct resource *res = &mem_resources[nid]; | 193 | struct resource *res = &mem_resources[nid]; |
194 | unsigned long start, end; | ||
268 | 195 | ||
269 | WARN_ON(res->name); /* max one active range per node for now */ | 196 | WARN_ON(res->name); /* max one active range per node for now */ |
270 | 197 | ||
198 | start = start_pfn << PAGE_SHIFT; | ||
199 | end = end_pfn << PAGE_SHIFT; | ||
200 | |||
271 | res->name = "System RAM"; | 201 | res->name = "System RAM"; |
272 | res->start = start_pfn << PAGE_SHIFT; | 202 | res->start = start; |
273 | res->end = (end_pfn << PAGE_SHIFT) - 1; | 203 | res->end = end - 1; |
274 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; | 204 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
205 | |||
275 | if (request_resource(&iomem_resource, res)) { | 206 | if (request_resource(&iomem_resource, res)) { |
276 | pr_err("unable to request memory_resource 0x%lx 0x%lx\n", | 207 | pr_err("unable to request memory_resource 0x%lx 0x%lx\n", |
277 | start_pfn, end_pfn); | 208 | start_pfn, end_pfn); |
@@ -287,100 +218,18 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn, | |||
287 | request_resource(res, &data_resource); | 218 | request_resource(res, &data_resource); |
288 | request_resource(res, &bss_resource); | 219 | request_resource(res, &bss_resource); |
289 | 220 | ||
290 | add_active_range(nid, start_pfn, end_pfn); | ||
291 | } | ||
292 | |||
293 | void __init setup_bootmem_allocator(unsigned long free_pfn) | ||
294 | { | ||
295 | unsigned long bootmap_size; | ||
296 | unsigned long bootmap_pages, bootmem_paddr; | ||
297 | u64 total_pages = (lmb_end_of_DRAM() - __MEMORY_START) >> PAGE_SHIFT; | ||
298 | int i; | ||
299 | |||
300 | bootmap_pages = bootmem_bootmap_pages(total_pages); | ||
301 | |||
302 | bootmem_paddr = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); | ||
303 | |||
304 | /* | ||
305 | * Find a proper area for the bootmem bitmap. After this | ||
306 | * bootstrap step all allocations (until the page allocator | ||
307 | * is intact) must be done via bootmem_alloc(). | ||
308 | */ | ||
309 | bootmap_size = init_bootmem_node(NODE_DATA(0), | ||
310 | bootmem_paddr >> PAGE_SHIFT, | ||
311 | min_low_pfn, max_low_pfn); | ||
312 | |||
313 | /* Add active regions with valid PFNs. */ | ||
314 | for (i = 0; i < lmb.memory.cnt; i++) { | ||
315 | unsigned long start_pfn, end_pfn; | ||
316 | start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; | ||
317 | end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); | ||
318 | __add_active_range(0, start_pfn, end_pfn); | ||
319 | } | ||
320 | |||
321 | /* | ||
322 | * Add all physical memory to the bootmem map and mark each | ||
323 | * area as present. | ||
324 | */ | ||
325 | register_bootmem_low_pages(); | ||
326 | |||
327 | /* Reserve the sections we're already using. */ | ||
328 | for (i = 0; i < lmb.reserved.cnt; i++) | ||
329 | reserve_bootmem(lmb.reserved.region[i].base, | ||
330 | lmb_size_bytes(&lmb.reserved, i), | ||
331 | BOOTMEM_DEFAULT); | ||
332 | |||
333 | node_set_online(0); | ||
334 | |||
335 | sparse_memory_present_with_active_regions(0); | ||
336 | |||
337 | check_for_initrd(); | ||
338 | |||
339 | reserve_crashkernel(); | ||
340 | } | ||
341 | |||
342 | #ifndef CONFIG_NEED_MULTIPLE_NODES | ||
343 | static void __init setup_memory(void) | ||
344 | { | ||
345 | unsigned long start_pfn; | ||
346 | u64 base = min_low_pfn << PAGE_SHIFT; | ||
347 | u64 size = (max_low_pfn << PAGE_SHIFT) - base; | ||
348 | |||
349 | /* | 221 | /* |
350 | * Partially used pages are not usable - thus | 222 | * Also make sure that there is a PMB mapping that covers this |
351 | * we are rounding upwards: | 223 | * range before we attempt to activate it, to avoid reset by MMU. |
224 | * We can hit this path with NUMA or memory hot-add. | ||
352 | */ | 225 | */ |
353 | start_pfn = PFN_UP(__pa(_end)); | 226 | pmb_bolt_mapping((unsigned long)__va(start), start, end - start, |
227 | PAGE_KERNEL); | ||
354 | 228 | ||
355 | lmb_add(base, size); | 229 | add_active_range(nid, start_pfn, end_pfn); |
356 | |||
357 | /* | ||
358 | * Reserve the kernel text and | ||
359 | * Reserve the bootmem bitmap. We do this in two steps (first step | ||
360 | * was init_bootmem()), because this catches the (definitely buggy) | ||
361 | * case of us accidentally initializing the bootmem allocator with | ||
362 | * an invalid RAM area. | ||
363 | */ | ||
364 | lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET, | ||
365 | (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - | ||
366 | (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET)); | ||
367 | |||
368 | /* | ||
369 | * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET. | ||
370 | */ | ||
371 | if (CONFIG_ZERO_PAGE_OFFSET != 0) | ||
372 | lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET); | ||
373 | |||
374 | lmb_analyze(); | ||
375 | lmb_dump_all(); | ||
376 | |||
377 | setup_bootmem_allocator(start_pfn); | ||
378 | } | 230 | } |
379 | #else | ||
380 | extern void __init setup_memory(void); | ||
381 | #endif | ||
382 | 231 | ||
383 | void __init __attribute__ ((weak)) plat_early_device_setup(void) | 232 | void __init __weak plat_early_device_setup(void) |
384 | { | 233 | { |
385 | } | 234 | } |
386 | 235 | ||
@@ -421,10 +270,6 @@ void __init setup_arch(char **cmdline_p) | |||
421 | bss_resource.start = virt_to_phys(__bss_start); | 270 | bss_resource.start = virt_to_phys(__bss_start); |
422 | bss_resource.end = virt_to_phys(_ebss)-1; | 271 | bss_resource.end = virt_to_phys(_ebss)-1; |
423 | 272 | ||
424 | memory_start = (unsigned long)__va(__MEMORY_START); | ||
425 | if (!memory_end) | ||
426 | memory_end = memory_start + __MEMORY_SIZE; | ||
427 | |||
428 | #ifdef CONFIG_CMDLINE_OVERWRITE | 273 | #ifdef CONFIG_CMDLINE_OVERWRITE |
429 | strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line)); | 274 | strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line)); |
430 | #else | 275 | #else |
@@ -441,39 +286,18 @@ void __init setup_arch(char **cmdline_p) | |||
441 | 286 | ||
442 | parse_early_param(); | 287 | parse_early_param(); |
443 | 288 | ||
444 | uncached_init(); | ||
445 | |||
446 | plat_early_device_setup(); | 289 | plat_early_device_setup(); |
447 | 290 | ||
448 | /* Let earlyprintk output early console messages */ | ||
449 | early_platform_driver_probe("earlyprintk", 1, 1); | ||
450 | |||
451 | sh_mv_setup(); | 291 | sh_mv_setup(); |
452 | 292 | ||
453 | /* | 293 | /* Let earlyprintk output early console messages */ |
454 | * Find the highest page frame number we have available | 294 | early_platform_driver_probe("earlyprintk", 1, 1); |
455 | */ | ||
456 | max_pfn = PFN_DOWN(__pa(memory_end)); | ||
457 | |||
458 | /* | ||
459 | * Determine low and high memory ranges: | ||
460 | */ | ||
461 | max_low_pfn = max_pfn; | ||
462 | min_low_pfn = __MEMORY_START >> PAGE_SHIFT; | ||
463 | |||
464 | nodes_clear(node_online_map); | ||
465 | 295 | ||
466 | pmb_init(); | 296 | paging_init(); |
467 | lmb_init(); | ||
468 | setup_memory(); | ||
469 | sparse_init(); | ||
470 | 297 | ||
471 | #ifdef CONFIG_DUMMY_CONSOLE | 298 | #ifdef CONFIG_DUMMY_CONSOLE |
472 | conswitchp = &dummy_con; | 299 | conswitchp = &dummy_con; |
473 | #endif | 300 | #endif |
474 | paging_init(); | ||
475 | |||
476 | ioremap_fixed_init(); | ||
477 | 301 | ||
478 | /* Perform the machine specific initialisation */ | 302 | /* Perform the machine specific initialisation */ |
479 | if (likely(sh_mv.mv_setup)) | 303 | if (likely(sh_mv.mv_setup)) |