diff options
Diffstat (limited to 'arch/arm/mm/init.c')
-rw-r--r-- | arch/arm/mm/init.c | 193 |
1 files changed, 122 insertions, 71 deletions
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 30a69d67d673..82c4b4217989 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -26,9 +26,42 @@ | |||
26 | 26 | ||
27 | #include "mm.h" | 27 | #include "mm.h" |
28 | 28 | ||
29 | extern void _text, _etext, __data_start, _end, __init_begin, __init_end; | 29 | static unsigned long phys_initrd_start __initdata = 0; |
30 | extern unsigned long phys_initrd_start; | 30 | static unsigned long phys_initrd_size __initdata = 0; |
31 | extern unsigned long phys_initrd_size; | 31 | |
32 | static void __init early_initrd(char **p) | ||
33 | { | ||
34 | unsigned long start, size; | ||
35 | |||
36 | start = memparse(*p, p); | ||
37 | if (**p == ',') { | ||
38 | size = memparse((*p) + 1, p); | ||
39 | |||
40 | phys_initrd_start = start; | ||
41 | phys_initrd_size = size; | ||
42 | } | ||
43 | } | ||
44 | __early_param("initrd=", early_initrd); | ||
45 | |||
46 | static int __init parse_tag_initrd(const struct tag *tag) | ||
47 | { | ||
48 | printk(KERN_WARNING "ATAG_INITRD is deprecated; " | ||
49 | "please update your bootloader.\n"); | ||
50 | phys_initrd_start = __virt_to_phys(tag->u.initrd.start); | ||
51 | phys_initrd_size = tag->u.initrd.size; | ||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | __tagtable(ATAG_INITRD, parse_tag_initrd); | ||
56 | |||
57 | static int __init parse_tag_initrd2(const struct tag *tag) | ||
58 | { | ||
59 | phys_initrd_start = tag->u.initrd.start; | ||
60 | phys_initrd_size = tag->u.initrd.size; | ||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | __tagtable(ATAG_INITRD2, parse_tag_initrd2); | ||
32 | 65 | ||
33 | /* | 66 | /* |
34 | * This is used to pass memory configuration data from paging_init | 67 | * This is used to pass memory configuration data from paging_init |
@@ -36,10 +69,6 @@ extern unsigned long phys_initrd_size; | |||
36 | */ | 69 | */ |
37 | static struct meminfo meminfo = { 0, }; | 70 | static struct meminfo meminfo = { 0, }; |
38 | 71 | ||
39 | #define for_each_nodebank(iter,mi,no) \ | ||
40 | for (iter = 0; iter < mi->nr_banks; iter++) \ | ||
41 | if (mi->bank[iter].node == no) | ||
42 | |||
43 | void show_mem(void) | 72 | void show_mem(void) |
44 | { | 73 | { |
45 | int free = 0, total = 0, reserved = 0; | 74 | int free = 0, total = 0, reserved = 0; |
@@ -50,14 +79,15 @@ void show_mem(void) | |||
50 | show_free_areas(); | 79 | show_free_areas(); |
51 | for_each_online_node(node) { | 80 | for_each_online_node(node) { |
52 | pg_data_t *n = NODE_DATA(node); | 81 | pg_data_t *n = NODE_DATA(node); |
53 | struct page *map = n->node_mem_map - n->node_start_pfn; | 82 | struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn; |
54 | 83 | ||
55 | for_each_nodebank (i,mi,node) { | 84 | for_each_nodebank (i,mi,node) { |
85 | struct membank *bank = &mi->bank[i]; | ||
56 | unsigned int pfn1, pfn2; | 86 | unsigned int pfn1, pfn2; |
57 | struct page *page, *end; | 87 | struct page *page, *end; |
58 | 88 | ||
59 | pfn1 = __phys_to_pfn(mi->bank[i].start); | 89 | pfn1 = bank_pfn_start(bank); |
60 | pfn2 = __phys_to_pfn(mi->bank[i].size + mi->bank[i].start); | 90 | pfn2 = bank_pfn_end(bank); |
61 | 91 | ||
62 | page = map + pfn1; | 92 | page = map + pfn1; |
63 | end = map + pfn2; | 93 | end = map + pfn2; |
@@ -96,17 +126,17 @@ void show_mem(void) | |||
96 | static unsigned int __init | 126 | static unsigned int __init |
97 | find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages) | 127 | find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages) |
98 | { | 128 | { |
99 | unsigned int start_pfn, bank, bootmap_pfn; | 129 | unsigned int start_pfn, i, bootmap_pfn; |
100 | 130 | ||
101 | start_pfn = PAGE_ALIGN(__pa(&_end)) >> PAGE_SHIFT; | 131 | start_pfn = PAGE_ALIGN(__pa(&_end)) >> PAGE_SHIFT; |
102 | bootmap_pfn = 0; | 132 | bootmap_pfn = 0; |
103 | 133 | ||
104 | for_each_nodebank(bank, mi, node) { | 134 | for_each_nodebank(i, mi, node) { |
135 | struct membank *bank = &mi->bank[i]; | ||
105 | unsigned int start, end; | 136 | unsigned int start, end; |
106 | 137 | ||
107 | start = mi->bank[bank].start >> PAGE_SHIFT; | 138 | start = bank_pfn_start(bank); |
108 | end = (mi->bank[bank].size + | 139 | end = bank_pfn_end(bank); |
109 | mi->bank[bank].start) >> PAGE_SHIFT; | ||
110 | 140 | ||
111 | if (end < start_pfn) | 141 | if (end < start_pfn) |
112 | continue; | 142 | continue; |
@@ -145,13 +175,10 @@ static int __init check_initrd(struct meminfo *mi) | |||
145 | initrd_node = -1; | 175 | initrd_node = -1; |
146 | 176 | ||
147 | for (i = 0; i < mi->nr_banks; i++) { | 177 | for (i = 0; i < mi->nr_banks; i++) { |
148 | unsigned long bank_end; | 178 | struct membank *bank = &mi->bank[i]; |
149 | 179 | if (bank_phys_start(bank) <= phys_initrd_start && | |
150 | bank_end = mi->bank[i].start + mi->bank[i].size; | 180 | end <= bank_phys_end(bank)) |
151 | 181 | initrd_node = bank->node; | |
152 | if (mi->bank[i].start <= phys_initrd_start && | ||
153 | end <= bank_end) | ||
154 | initrd_node = mi->bank[i].node; | ||
155 | } | 182 | } |
156 | } | 183 | } |
157 | 184 | ||
@@ -171,19 +198,17 @@ static inline void map_memory_bank(struct membank *bank) | |||
171 | #ifdef CONFIG_MMU | 198 | #ifdef CONFIG_MMU |
172 | struct map_desc map; | 199 | struct map_desc map; |
173 | 200 | ||
174 | map.pfn = __phys_to_pfn(bank->start); | 201 | map.pfn = bank_pfn_start(bank); |
175 | map.virtual = __phys_to_virt(bank->start); | 202 | map.virtual = __phys_to_virt(bank_phys_start(bank)); |
176 | map.length = bank->size; | 203 | map.length = bank_phys_size(bank); |
177 | map.type = MT_MEMORY; | 204 | map.type = MT_MEMORY; |
178 | 205 | ||
179 | create_mapping(&map); | 206 | create_mapping(&map); |
180 | #endif | 207 | #endif |
181 | } | 208 | } |
182 | 209 | ||
183 | static unsigned long __init | 210 | static unsigned long __init bootmem_init_node(int node, struct meminfo *mi) |
184 | bootmem_init_node(int node, int initrd_node, struct meminfo *mi) | ||
185 | { | 211 | { |
186 | unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; | ||
187 | unsigned long start_pfn, end_pfn, boot_pfn; | 212 | unsigned long start_pfn, end_pfn, boot_pfn; |
188 | unsigned int boot_pages; | 213 | unsigned int boot_pages; |
189 | pg_data_t *pgdat; | 214 | pg_data_t *pgdat; |
@@ -199,8 +224,8 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi) | |||
199 | struct membank *bank = &mi->bank[i]; | 224 | struct membank *bank = &mi->bank[i]; |
200 | unsigned long start, end; | 225 | unsigned long start, end; |
201 | 226 | ||
202 | start = bank->start >> PAGE_SHIFT; | 227 | start = bank_pfn_start(bank); |
203 | end = (bank->start + bank->size) >> PAGE_SHIFT; | 228 | end = bank_pfn_end(bank); |
204 | 229 | ||
205 | if (start_pfn > start) | 230 | if (start_pfn > start) |
206 | start_pfn = start; | 231 | start_pfn = start; |
@@ -230,8 +255,11 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi) | |||
230 | pgdat = NODE_DATA(node); | 255 | pgdat = NODE_DATA(node); |
231 | init_bootmem_node(pgdat, boot_pfn, start_pfn, end_pfn); | 256 | init_bootmem_node(pgdat, boot_pfn, start_pfn, end_pfn); |
232 | 257 | ||
233 | for_each_nodebank(i, mi, node) | 258 | for_each_nodebank(i, mi, node) { |
234 | free_bootmem_node(pgdat, mi->bank[i].start, mi->bank[i].size); | 259 | struct membank *bank = &mi->bank[i]; |
260 | free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank)); | ||
261 | memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank)); | ||
262 | } | ||
235 | 263 | ||
236 | /* | 264 | /* |
237 | * Reserve the bootmem bitmap for this node. | 265 | * Reserve the bootmem bitmap for this node. |
@@ -239,31 +267,39 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi) | |||
239 | reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT, | 267 | reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT, |
240 | boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); | 268 | boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); |
241 | 269 | ||
242 | /* | 270 | return end_pfn; |
243 | * Reserve any special node zero regions. | 271 | } |
244 | */ | ||
245 | if (node == 0) | ||
246 | reserve_node_zero(pgdat); | ||
247 | 272 | ||
273 | static void __init bootmem_reserve_initrd(int node) | ||
274 | { | ||
248 | #ifdef CONFIG_BLK_DEV_INITRD | 275 | #ifdef CONFIG_BLK_DEV_INITRD |
249 | /* | 276 | pg_data_t *pgdat = NODE_DATA(node); |
250 | * If the initrd is in this node, reserve its memory. | 277 | int res; |
251 | */ | 278 | |
252 | if (node == initrd_node) { | 279 | res = reserve_bootmem_node(pgdat, phys_initrd_start, |
253 | int res = reserve_bootmem_node(pgdat, phys_initrd_start, | 280 | phys_initrd_size, BOOTMEM_EXCLUSIVE); |
254 | phys_initrd_size, BOOTMEM_EXCLUSIVE); | 281 | |
255 | 282 | if (res == 0) { | |
256 | if (res == 0) { | 283 | initrd_start = __phys_to_virt(phys_initrd_start); |
257 | initrd_start = __phys_to_virt(phys_initrd_start); | 284 | initrd_end = initrd_start + phys_initrd_size; |
258 | initrd_end = initrd_start + phys_initrd_size; | 285 | } else { |
259 | } else { | 286 | printk(KERN_ERR |
260 | printk(KERN_ERR | 287 | "INITRD: 0x%08lx+0x%08lx overlaps in-use " |
261 | "INITRD: 0x%08lx+0x%08lx overlaps in-use " | 288 | "memory region - disabling initrd\n", |
262 | "memory region - disabling initrd\n", | 289 | phys_initrd_start, phys_initrd_size); |
263 | phys_initrd_start, phys_initrd_size); | ||
264 | } | ||
265 | } | 290 | } |
266 | #endif | 291 | #endif |
292 | } | ||
293 | |||
294 | static void __init bootmem_free_node(int node, struct meminfo *mi) | ||
295 | { | ||
296 | unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; | ||
297 | unsigned long start_pfn, end_pfn; | ||
298 | pg_data_t *pgdat = NODE_DATA(node); | ||
299 | int i; | ||
300 | |||
301 | start_pfn = pgdat->bdata->node_min_pfn; | ||
302 | end_pfn = pgdat->bdata->node_low_pfn; | ||
267 | 303 | ||
268 | /* | 304 | /* |
269 | * initialise the zones within this node. | 305 | * initialise the zones within this node. |
@@ -284,7 +320,7 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi) | |||
284 | */ | 320 | */ |
285 | zhole_size[0] = zone_size[0]; | 321 | zhole_size[0] = zone_size[0]; |
286 | for_each_nodebank(i, mi, node) | 322 | for_each_nodebank(i, mi, node) |
287 | zhole_size[0] -= mi->bank[i].size >> PAGE_SHIFT; | 323 | zhole_size[0] -= bank_pfn_size(&mi->bank[i]); |
288 | 324 | ||
289 | /* | 325 | /* |
290 | * Adjust the sizes according to any special requirements for | 326 | * Adjust the sizes according to any special requirements for |
@@ -293,21 +329,12 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi) | |||
293 | arch_adjust_zones(node, zone_size, zhole_size); | 329 | arch_adjust_zones(node, zone_size, zhole_size); |
294 | 330 | ||
295 | free_area_init_node(node, zone_size, start_pfn, zhole_size); | 331 | free_area_init_node(node, zone_size, start_pfn, zhole_size); |
296 | |||
297 | return end_pfn; | ||
298 | } | 332 | } |
299 | 333 | ||
300 | void __init bootmem_init(struct meminfo *mi) | 334 | void __init bootmem_init(struct meminfo *mi) |
301 | { | 335 | { |
302 | unsigned long memend_pfn = 0; | 336 | unsigned long memend_pfn = 0; |
303 | int node, initrd_node, i; | 337 | int node, initrd_node; |
304 | |||
305 | /* | ||
306 | * Invalidate the node number for empty or invalid memory banks | ||
307 | */ | ||
308 | for (i = 0; i < mi->nr_banks; i++) | ||
309 | if (mi->bank[i].size == 0 || mi->bank[i].node >= MAX_NUMNODES) | ||
310 | mi->bank[i].node = -1; | ||
311 | 338 | ||
312 | memcpy(&meminfo, mi, sizeof(meminfo)); | 339 | memcpy(&meminfo, mi, sizeof(meminfo)); |
313 | 340 | ||
@@ -320,9 +347,19 @@ void __init bootmem_init(struct meminfo *mi) | |||
320 | * Run through each node initialising the bootmem allocator. | 347 | * Run through each node initialising the bootmem allocator. |
321 | */ | 348 | */ |
322 | for_each_node(node) { | 349 | for_each_node(node) { |
323 | unsigned long end_pfn; | 350 | unsigned long end_pfn = bootmem_init_node(node, mi); |
324 | 351 | ||
325 | end_pfn = bootmem_init_node(node, initrd_node, mi); | 352 | /* |
353 | * Reserve any special node zero regions. | ||
354 | */ | ||
355 | if (node == 0) | ||
356 | reserve_node_zero(NODE_DATA(node)); | ||
357 | |||
358 | /* | ||
359 | * If the initrd is in this node, reserve its memory. | ||
360 | */ | ||
361 | if (node == initrd_node) | ||
362 | bootmem_reserve_initrd(node); | ||
326 | 363 | ||
327 | /* | 364 | /* |
328 | * Remember the highest memory PFN. | 365 | * Remember the highest memory PFN. |
@@ -331,6 +368,19 @@ void __init bootmem_init(struct meminfo *mi) | |||
331 | memend_pfn = end_pfn; | 368 | memend_pfn = end_pfn; |
332 | } | 369 | } |
333 | 370 | ||
371 | /* | ||
372 | * sparse_init() needs the bootmem allocator up and running. | ||
373 | */ | ||
374 | sparse_init(); | ||
375 | |||
376 | /* | ||
377 | * Now free memory in each node - free_area_init_node needs | ||
378 | * the sparse mem_map arrays initialized by sparse_init() | ||
379 | * for memmap_init_zone(), otherwise all PFNs are invalid. | ||
380 | */ | ||
381 | for_each_node(node) | ||
382 | bootmem_free_node(node, mi); | ||
383 | |||
334 | high_memory = __va(memend_pfn << PAGE_SHIFT); | 384 | high_memory = __va(memend_pfn << PAGE_SHIFT); |
335 | 385 | ||
336 | /* | 386 | /* |
@@ -401,7 +451,9 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi) | |||
401 | * information on the command line. | 451 | * information on the command line. |
402 | */ | 452 | */ |
403 | for_each_nodebank(i, mi, node) { | 453 | for_each_nodebank(i, mi, node) { |
404 | bank_start = mi->bank[i].start >> PAGE_SHIFT; | 454 | struct membank *bank = &mi->bank[i]; |
455 | |||
456 | bank_start = bank_pfn_start(bank); | ||
405 | if (bank_start < prev_bank_end) { | 457 | if (bank_start < prev_bank_end) { |
406 | printk(KERN_ERR "MEM: unordered memory banks. " | 458 | printk(KERN_ERR "MEM: unordered memory banks. " |
407 | "Not freeing memmap.\n"); | 459 | "Not freeing memmap.\n"); |
@@ -415,8 +467,7 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi) | |||
415 | if (prev_bank_end && prev_bank_end != bank_start) | 467 | if (prev_bank_end && prev_bank_end != bank_start) |
416 | free_memmap(node, prev_bank_end, bank_start); | 468 | free_memmap(node, prev_bank_end, bank_start); |
417 | 469 | ||
418 | prev_bank_end = (mi->bank[i].start + | 470 | prev_bank_end = bank_pfn_end(bank); |
419 | mi->bank[i].size) >> PAGE_SHIFT; | ||
420 | } | 471 | } |
421 | } | 472 | } |
422 | 473 | ||
@@ -461,8 +512,8 @@ void __init mem_init(void) | |||
461 | 512 | ||
462 | num_physpages = 0; | 513 | num_physpages = 0; |
463 | for (i = 0; i < meminfo.nr_banks; i++) { | 514 | for (i = 0; i < meminfo.nr_banks; i++) { |
464 | num_physpages += meminfo.bank[i].size >> PAGE_SHIFT; | 515 | num_physpages += bank_pfn_size(&meminfo.bank[i]); |
465 | printk(" %ldMB", meminfo.bank[i].size >> 20); | 516 | printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20); |
466 | } | 517 | } |
467 | 518 | ||
468 | printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); | 519 | printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); |