diff options
Diffstat (limited to 'arch/sh/kernel/setup.c')
-rw-r--r-- | arch/sh/kernel/setup.c | 298 |
1 files changed, 70 insertions, 228 deletions
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index 8870d6ba64b..272734681d2 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * This file handles the architecture-dependent parts of initialization | 4 | * This file handles the architecture-dependent parts of initialization |
5 | * | 5 | * |
6 | * Copyright (C) 1999 Niibe Yutaka | 6 | * Copyright (C) 1999 Niibe Yutaka |
7 | * Copyright (C) 2002 - 2007 Paul Mundt | 7 | * Copyright (C) 2002 - 2010 Paul Mundt |
8 | */ | 8 | */ |
9 | #include <linux/screen_info.h> | 9 | #include <linux/screen_info.h> |
10 | #include <linux/ioport.h> | 10 | #include <linux/ioport.h> |
@@ -39,7 +39,9 @@ | |||
39 | #include <asm/irq.h> | 39 | #include <asm/irq.h> |
40 | #include <asm/setup.h> | 40 | #include <asm/setup.h> |
41 | #include <asm/clock.h> | 41 | #include <asm/clock.h> |
42 | #include <asm/smp.h> | ||
42 | #include <asm/mmu_context.h> | 43 | #include <asm/mmu_context.h> |
44 | #include <asm/mmzone.h> | ||
43 | 45 | ||
44 | /* | 46 | /* |
45 | * Initialize loops_per_jiffy as 10000000 (1000MIPS). | 47 | * Initialize loops_per_jiffy as 10000000 (1000MIPS). |
@@ -93,6 +95,7 @@ unsigned long memory_start; | |||
93 | EXPORT_SYMBOL(memory_start); | 95 | EXPORT_SYMBOL(memory_start); |
94 | unsigned long memory_end = 0; | 96 | unsigned long memory_end = 0; |
95 | EXPORT_SYMBOL(memory_end); | 97 | EXPORT_SYMBOL(memory_end); |
98 | unsigned long memory_limit = 0; | ||
96 | 99 | ||
97 | static struct resource mem_resources[MAX_NUMNODES]; | 100 | static struct resource mem_resources[MAX_NUMNODES]; |
98 | 101 | ||
@@ -100,92 +103,73 @@ int l1i_cache_shape, l1d_cache_shape, l2_cache_shape; | |||
100 | 103 | ||
101 | static int __init early_parse_mem(char *p) | 104 | static int __init early_parse_mem(char *p) |
102 | { | 105 | { |
103 | unsigned long size; | 106 | if (!p) |
107 | return 1; | ||
104 | 108 | ||
105 | memory_start = (unsigned long)__va(__MEMORY_START); | 109 | memory_limit = PAGE_ALIGN(memparse(p, &p)); |
106 | size = memparse(p, &p); | ||
107 | 110 | ||
108 | if (size > __MEMORY_SIZE) { | 111 | pr_notice("Memory limited to %ldMB\n", memory_limit >> 20); |
109 | printk(KERN_ERR | ||
110 | "Using mem= to increase the size of kernel memory " | ||
111 | "is not allowed.\n" | ||
112 | " Recompile the kernel with the correct value for " | ||
113 | "CONFIG_MEMORY_SIZE.\n"); | ||
114 | return 0; | ||
115 | } | ||
116 | |||
117 | memory_end = memory_start + size; | ||
118 | 112 | ||
119 | return 0; | 113 | return 0; |
120 | } | 114 | } |
121 | early_param("mem", early_parse_mem); | 115 | early_param("mem", early_parse_mem); |
122 | 116 | ||
123 | /* | 117 | void __init check_for_initrd(void) |
124 | * Register fully available low RAM pages with the bootmem allocator. | ||
125 | */ | ||
126 | static void __init register_bootmem_low_pages(void) | ||
127 | { | 118 | { |
128 | unsigned long curr_pfn, last_pfn, pages; | 119 | #ifdef CONFIG_BLK_DEV_INITRD |
120 | unsigned long start, end; | ||
121 | |||
122 | /* | ||
123 | * Check for the rare cases where boot loaders adhere to the boot | ||
124 | * ABI. | ||
125 | */ | ||
126 | if (!LOADER_TYPE || !INITRD_START || !INITRD_SIZE) | ||
127 | goto disable; | ||
128 | |||
129 | start = INITRD_START + __MEMORY_START; | ||
130 | end = start + INITRD_SIZE; | ||
131 | |||
132 | if (unlikely(end <= start)) | ||
133 | goto disable; | ||
134 | if (unlikely(start & ~PAGE_MASK)) { | ||
135 | pr_err("initrd must be page aligned\n"); | ||
136 | goto disable; | ||
137 | } | ||
138 | |||
139 | if (unlikely(start < PAGE_OFFSET)) { | ||
140 | pr_err("initrd start < PAGE_OFFSET\n"); | ||
141 | goto disable; | ||
142 | } | ||
143 | |||
144 | if (unlikely(end > lmb_end_of_DRAM())) { | ||
145 | pr_err("initrd extends beyond end of memory " | ||
146 | "(0x%08lx > 0x%08lx)\ndisabling initrd\n", | ||
147 | end, (unsigned long)lmb_end_of_DRAM()); | ||
148 | goto disable; | ||
149 | } | ||
129 | 150 | ||
130 | /* | 151 | /* |
131 | * We are rounding up the start address of usable memory: | 152 | * If we got this far inspite of the boot loader's best efforts |
153 | * to the contrary, assume we actually have a valid initrd and | ||
154 | * fix up the root dev. | ||
132 | */ | 155 | */ |
133 | curr_pfn = PFN_UP(__MEMORY_START); | 156 | ROOT_DEV = Root_RAM0; |
134 | 157 | ||
135 | /* | 158 | /* |
136 | * ... and at the end of the usable range downwards: | 159 | * Address sanitization |
137 | */ | 160 | */ |
138 | last_pfn = PFN_DOWN(__pa(memory_end)); | 161 | initrd_start = (unsigned long)__va(__pa(start)); |
162 | initrd_end = initrd_start + INITRD_SIZE; | ||
139 | 163 | ||
140 | if (last_pfn > max_low_pfn) | 164 | lmb_reserve(__pa(initrd_start), INITRD_SIZE); |
141 | last_pfn = max_low_pfn; | ||
142 | 165 | ||
143 | pages = last_pfn - curr_pfn; | 166 | return; |
144 | free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(pages)); | ||
145 | } | ||
146 | 167 | ||
147 | #ifdef CONFIG_KEXEC | 168 | disable: |
148 | static void __init reserve_crashkernel(void) | 169 | pr_info("initrd disabled\n"); |
149 | { | 170 | initrd_start = initrd_end = 0; |
150 | unsigned long long free_mem; | ||
151 | unsigned long long crash_size, crash_base; | ||
152 | void *vp; | ||
153 | int ret; | ||
154 | |||
155 | free_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT; | ||
156 | |||
157 | ret = parse_crashkernel(boot_command_line, free_mem, | ||
158 | &crash_size, &crash_base); | ||
159 | if (ret == 0 && crash_size) { | ||
160 | if (crash_base <= 0) { | ||
161 | vp = alloc_bootmem_nopanic(crash_size); | ||
162 | if (!vp) { | ||
163 | printk(KERN_INFO "crashkernel allocation " | ||
164 | "failed\n"); | ||
165 | return; | ||
166 | } | ||
167 | crash_base = __pa(vp); | ||
168 | } else if (reserve_bootmem(crash_base, crash_size, | ||
169 | BOOTMEM_EXCLUSIVE) < 0) { | ||
170 | printk(KERN_INFO "crashkernel reservation failed - " | ||
171 | "memory is in use\n"); | ||
172 | return; | ||
173 | } | ||
174 | |||
175 | printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " | ||
176 | "for crashkernel (System RAM: %ldMB)\n", | ||
177 | (unsigned long)(crash_size >> 20), | ||
178 | (unsigned long)(crash_base >> 20), | ||
179 | (unsigned long)(free_mem >> 20)); | ||
180 | crashk_res.start = crash_base; | ||
181 | crashk_res.end = crash_base + crash_size - 1; | ||
182 | insert_resource(&iomem_resource, &crashk_res); | ||
183 | } | ||
184 | } | ||
185 | #else | ||
186 | static inline void __init reserve_crashkernel(void) | ||
187 | {} | ||
188 | #endif | 171 | #endif |
172 | } | ||
189 | 173 | ||
190 | void __cpuinit calibrate_delay(void) | 174 | void __cpuinit calibrate_delay(void) |
191 | { | 175 | { |
@@ -207,13 +191,18 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn, | |||
207 | unsigned long end_pfn) | 191 | unsigned long end_pfn) |
208 | { | 192 | { |
209 | struct resource *res = &mem_resources[nid]; | 193 | struct resource *res = &mem_resources[nid]; |
194 | unsigned long start, end; | ||
210 | 195 | ||
211 | WARN_ON(res->name); /* max one active range per node for now */ | 196 | WARN_ON(res->name); /* max one active range per node for now */ |
212 | 197 | ||
198 | start = start_pfn << PAGE_SHIFT; | ||
199 | end = end_pfn << PAGE_SHIFT; | ||
200 | |||
213 | res->name = "System RAM"; | 201 | res->name = "System RAM"; |
214 | res->start = start_pfn << PAGE_SHIFT; | 202 | res->start = start; |
215 | res->end = (end_pfn << PAGE_SHIFT) - 1; | 203 | res->end = end - 1; |
216 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; | 204 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
205 | |||
217 | if (request_resource(&iomem_resource, res)) { | 206 | if (request_resource(&iomem_resource, res)) { |
218 | pr_err("unable to request memory_resource 0x%lx 0x%lx\n", | 207 | pr_err("unable to request memory_resource 0x%lx 0x%lx\n", |
219 | start_pfn, end_pfn); | 208 | start_pfn, end_pfn); |
@@ -229,138 +218,18 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn, | |||
229 | request_resource(res, &data_resource); | 218 | request_resource(res, &data_resource); |
230 | request_resource(res, &bss_resource); | 219 | request_resource(res, &bss_resource); |
231 | 220 | ||
232 | add_active_range(nid, start_pfn, end_pfn); | ||
233 | } | ||
234 | |||
235 | void __init setup_bootmem_allocator(unsigned long free_pfn) | ||
236 | { | ||
237 | unsigned long bootmap_size; | ||
238 | unsigned long bootmap_pages, bootmem_paddr; | ||
239 | u64 total_pages = (lmb_end_of_DRAM() - __MEMORY_START) >> PAGE_SHIFT; | ||
240 | int i; | ||
241 | |||
242 | bootmap_pages = bootmem_bootmap_pages(total_pages); | ||
243 | |||
244 | bootmem_paddr = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); | ||
245 | |||
246 | /* | ||
247 | * Find a proper area for the bootmem bitmap. After this | ||
248 | * bootstrap step all allocations (until the page allocator | ||
249 | * is intact) must be done via bootmem_alloc(). | ||
250 | */ | ||
251 | bootmap_size = init_bootmem_node(NODE_DATA(0), | ||
252 | bootmem_paddr >> PAGE_SHIFT, | ||
253 | min_low_pfn, max_low_pfn); | ||
254 | |||
255 | /* Add active regions with valid PFNs. */ | ||
256 | for (i = 0; i < lmb.memory.cnt; i++) { | ||
257 | unsigned long start_pfn, end_pfn; | ||
258 | start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; | ||
259 | end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); | ||
260 | __add_active_range(0, start_pfn, end_pfn); | ||
261 | } | ||
262 | |||
263 | /* | ||
264 | * Add all physical memory to the bootmem map and mark each | ||
265 | * area as present. | ||
266 | */ | ||
267 | register_bootmem_low_pages(); | ||
268 | |||
269 | /* Reserve the sections we're already using. */ | ||
270 | for (i = 0; i < lmb.reserved.cnt; i++) | ||
271 | reserve_bootmem(lmb.reserved.region[i].base, | ||
272 | lmb_size_bytes(&lmb.reserved, i), | ||
273 | BOOTMEM_DEFAULT); | ||
274 | |||
275 | node_set_online(0); | ||
276 | |||
277 | sparse_memory_present_with_active_regions(0); | ||
278 | |||
279 | #ifdef CONFIG_BLK_DEV_INITRD | ||
280 | ROOT_DEV = Root_RAM0; | ||
281 | |||
282 | if (LOADER_TYPE && INITRD_START) { | ||
283 | unsigned long initrd_start_phys = INITRD_START + __MEMORY_START; | ||
284 | |||
285 | if (initrd_start_phys + INITRD_SIZE <= PFN_PHYS(max_low_pfn)) { | ||
286 | reserve_bootmem(initrd_start_phys, INITRD_SIZE, | ||
287 | BOOTMEM_DEFAULT); | ||
288 | initrd_start = (unsigned long)__va(initrd_start_phys); | ||
289 | initrd_end = initrd_start + INITRD_SIZE; | ||
290 | } else { | ||
291 | printk("initrd extends beyond end of memory " | ||
292 | "(0x%08lx > 0x%08lx)\ndisabling initrd\n", | ||
293 | initrd_start_phys + INITRD_SIZE, | ||
294 | (unsigned long)PFN_PHYS(max_low_pfn)); | ||
295 | initrd_start = 0; | ||
296 | } | ||
297 | } | ||
298 | #endif | ||
299 | |||
300 | reserve_crashkernel(); | ||
301 | } | ||
302 | |||
303 | #ifndef CONFIG_NEED_MULTIPLE_NODES | ||
304 | static void __init setup_memory(void) | ||
305 | { | ||
306 | unsigned long start_pfn; | ||
307 | u64 base = min_low_pfn << PAGE_SHIFT; | ||
308 | u64 size = (max_low_pfn << PAGE_SHIFT) - base; | ||
309 | |||
310 | /* | ||
311 | * Partially used pages are not usable - thus | ||
312 | * we are rounding upwards: | ||
313 | */ | ||
314 | start_pfn = PFN_UP(__pa(_end)); | ||
315 | |||
316 | lmb_add(base, size); | ||
317 | |||
318 | /* | ||
319 | * Reserve the kernel text and | ||
320 | * Reserve the bootmem bitmap. We do this in two steps (first step | ||
321 | * was init_bootmem()), because this catches the (definitely buggy) | ||
322 | * case of us accidentally initializing the bootmem allocator with | ||
323 | * an invalid RAM area. | ||
324 | */ | ||
325 | lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET, | ||
326 | (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - | ||
327 | (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET)); | ||
328 | |||
329 | /* | 221 | /* |
330 | * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET. | 222 | * Also make sure that there is a PMB mapping that covers this |
223 | * range before we attempt to activate it, to avoid reset by MMU. | ||
224 | * We can hit this path with NUMA or memory hot-add. | ||
331 | */ | 225 | */ |
332 | if (CONFIG_ZERO_PAGE_OFFSET != 0) | 226 | pmb_bolt_mapping((unsigned long)__va(start), start, end - start, |
333 | lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET); | 227 | PAGE_KERNEL); |
334 | |||
335 | lmb_analyze(); | ||
336 | lmb_dump_all(); | ||
337 | 228 | ||
338 | setup_bootmem_allocator(start_pfn); | 229 | add_active_range(nid, start_pfn, end_pfn); |
339 | } | ||
340 | #else | ||
341 | extern void __init setup_memory(void); | ||
342 | #endif | ||
343 | |||
344 | /* | ||
345 | * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by | ||
346 | * is_kdump_kernel() to determine if we are booting after a panic. Hence | ||
347 | * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE. | ||
348 | */ | ||
349 | #ifdef CONFIG_CRASH_DUMP | ||
350 | /* elfcorehdr= specifies the location of elf core header | ||
351 | * stored by the crashed kernel. | ||
352 | */ | ||
353 | static int __init parse_elfcorehdr(char *arg) | ||
354 | { | ||
355 | if (!arg) | ||
356 | return -EINVAL; | ||
357 | elfcorehdr_addr = memparse(arg, &arg); | ||
358 | return 0; | ||
359 | } | 230 | } |
360 | early_param("elfcorehdr", parse_elfcorehdr); | ||
361 | #endif | ||
362 | 231 | ||
363 | void __init __attribute__ ((weak)) plat_early_device_setup(void) | 232 | void __init __weak plat_early_device_setup(void) |
364 | { | 233 | { |
365 | } | 234 | } |
366 | 235 | ||
@@ -401,10 +270,6 @@ void __init setup_arch(char **cmdline_p) | |||
401 | bss_resource.start = virt_to_phys(__bss_start); | 270 | bss_resource.start = virt_to_phys(__bss_start); |
402 | bss_resource.end = virt_to_phys(_ebss)-1; | 271 | bss_resource.end = virt_to_phys(_ebss)-1; |
403 | 272 | ||
404 | memory_start = (unsigned long)__va(__MEMORY_START); | ||
405 | if (!memory_end) | ||
406 | memory_end = memory_start + __MEMORY_SIZE; | ||
407 | |||
408 | #ifdef CONFIG_CMDLINE_OVERWRITE | 273 | #ifdef CONFIG_CMDLINE_OVERWRITE |
409 | strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line)); | 274 | strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line)); |
410 | #else | 275 | #else |
@@ -421,47 +286,24 @@ void __init setup_arch(char **cmdline_p) | |||
421 | 286 | ||
422 | parse_early_param(); | 287 | parse_early_param(); |
423 | 288 | ||
424 | uncached_init(); | ||
425 | |||
426 | plat_early_device_setup(); | 289 | plat_early_device_setup(); |
427 | 290 | ||
428 | /* Let earlyprintk output early console messages */ | ||
429 | early_platform_driver_probe("earlyprintk", 1, 1); | ||
430 | |||
431 | sh_mv_setup(); | 291 | sh_mv_setup(); |
432 | 292 | ||
433 | /* | 293 | /* Let earlyprintk output early console messages */ |
434 | * Find the highest page frame number we have available | 294 | early_platform_driver_probe("earlyprintk", 1, 1); |
435 | */ | ||
436 | max_pfn = PFN_DOWN(__pa(memory_end)); | ||
437 | |||
438 | /* | ||
439 | * Determine low and high memory ranges: | ||
440 | */ | ||
441 | max_low_pfn = max_pfn; | ||
442 | min_low_pfn = __MEMORY_START >> PAGE_SHIFT; | ||
443 | |||
444 | nodes_clear(node_online_map); | ||
445 | 295 | ||
446 | pmb_init(); | 296 | paging_init(); |
447 | lmb_init(); | ||
448 | setup_memory(); | ||
449 | sparse_init(); | ||
450 | 297 | ||
451 | #ifdef CONFIG_DUMMY_CONSOLE | 298 | #ifdef CONFIG_DUMMY_CONSOLE |
452 | conswitchp = &dummy_con; | 299 | conswitchp = &dummy_con; |
453 | #endif | 300 | #endif |
454 | paging_init(); | ||
455 | |||
456 | ioremap_fixed_init(); | ||
457 | 301 | ||
458 | /* Perform the machine specific initialisation */ | 302 | /* Perform the machine specific initialisation */ |
459 | if (likely(sh_mv.mv_setup)) | 303 | if (likely(sh_mv.mv_setup)) |
460 | sh_mv.mv_setup(cmdline_p); | 304 | sh_mv.mv_setup(cmdline_p); |
461 | 305 | ||
462 | #ifdef CONFIG_SMP | ||
463 | plat_smp_setup(); | 306 | plat_smp_setup(); |
464 | #endif | ||
465 | } | 307 | } |
466 | 308 | ||
467 | /* processor boot mode configuration */ | 309 | /* processor boot mode configuration */ |