diff options
author | Johannes Weiner <hannes@saeurebad.de> | 2008-07-24 00:28:00 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-24 13:47:19 -0400 |
commit | 223e8dc9249c9e15f6c8b638d73fcad78ccb0a88 (patch) | |
tree | dff068c99609163c1632926c7f321279d8f945d8 /mm/bootmem.c | |
parent | 7251ff78b94c2a68d267623d09b32672b20662c1 (diff) |
bootmem: reorder code to match new bootmem structure
This only reorders functions so that further patches will be easier to
read. No code changed.
Signed-off-by: Johannes Weiner <hannes@saeurebad.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/bootmem.c')
-rw-r--r-- | mm/bootmem.c | 356 |
1 files changed, 177 insertions, 179 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c index 9ac972535fff..24eacf52c50e 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c | |||
@@ -38,6 +38,19 @@ unsigned long saved_max_pfn; | |||
38 | 38 | ||
39 | bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata; | 39 | bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata; |
40 | 40 | ||
41 | /* | ||
42 | * Given an initialised bdata, it returns the size of the boot bitmap | ||
43 | */ | ||
44 | static unsigned long __init get_mapsize(bootmem_data_t *bdata) | ||
45 | { | ||
46 | unsigned long mapsize; | ||
47 | unsigned long start = PFN_DOWN(bdata->node_boot_start); | ||
48 | unsigned long end = bdata->node_low_pfn; | ||
49 | |||
50 | mapsize = ((end - start) + 7) / 8; | ||
51 | return ALIGN(mapsize, sizeof(long)); | ||
52 | } | ||
53 | |||
41 | /* return the number of _pages_ that will be allocated for the boot bitmap */ | 54 | /* return the number of _pages_ that will be allocated for the boot bitmap */ |
42 | unsigned long __init bootmem_bootmap_pages(unsigned long pages) | 55 | unsigned long __init bootmem_bootmap_pages(unsigned long pages) |
43 | { | 56 | { |
@@ -72,19 +85,6 @@ static void __init link_bootmem(bootmem_data_t *bdata) | |||
72 | } | 85 | } |
73 | 86 | ||
74 | /* | 87 | /* |
75 | * Given an initialised bdata, it returns the size of the boot bitmap | ||
76 | */ | ||
77 | static unsigned long __init get_mapsize(bootmem_data_t *bdata) | ||
78 | { | ||
79 | unsigned long mapsize; | ||
80 | unsigned long start = PFN_DOWN(bdata->node_boot_start); | ||
81 | unsigned long end = bdata->node_low_pfn; | ||
82 | |||
83 | mapsize = ((end - start) + 7) / 8; | ||
84 | return ALIGN(mapsize, sizeof(long)); | ||
85 | } | ||
86 | |||
87 | /* | ||
88 | * Called once to set up the allocator itself. | 88 | * Called once to set up the allocator itself. |
89 | */ | 89 | */ |
90 | static unsigned long __init init_bootmem_core(bootmem_data_t *bdata, | 90 | static unsigned long __init init_bootmem_core(bootmem_data_t *bdata, |
@@ -108,6 +108,146 @@ static unsigned long __init init_bootmem_core(bootmem_data_t *bdata, | |||
108 | return mapsize; | 108 | return mapsize; |
109 | } | 109 | } |
110 | 110 | ||
111 | unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn, | ||
112 | unsigned long startpfn, unsigned long endpfn) | ||
113 | { | ||
114 | return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn); | ||
115 | } | ||
116 | |||
117 | unsigned long __init init_bootmem(unsigned long start, unsigned long pages) | ||
118 | { | ||
119 | max_low_pfn = pages; | ||
120 | min_low_pfn = start; | ||
121 | return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages); | ||
122 | } | ||
123 | |||
124 | static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) | ||
125 | { | ||
126 | struct page *page; | ||
127 | unsigned long pfn; | ||
128 | unsigned long i, count; | ||
129 | unsigned long idx; | ||
130 | unsigned long *map; | ||
131 | int gofast = 0; | ||
132 | |||
133 | BUG_ON(!bdata->node_bootmem_map); | ||
134 | |||
135 | count = 0; | ||
136 | /* first extant page of the node */ | ||
137 | pfn = PFN_DOWN(bdata->node_boot_start); | ||
138 | idx = bdata->node_low_pfn - pfn; | ||
139 | map = bdata->node_bootmem_map; | ||
140 | /* | ||
141 | * Check if we are aligned to BITS_PER_LONG pages. If so, we might | ||
142 | * be able to free page orders of that size at once. | ||
143 | */ | ||
144 | if (!(pfn & (BITS_PER_LONG-1))) | ||
145 | gofast = 1; | ||
146 | |||
147 | for (i = 0; i < idx; ) { | ||
148 | unsigned long v = ~map[i / BITS_PER_LONG]; | ||
149 | |||
150 | if (gofast && v == ~0UL) { | ||
151 | int order; | ||
152 | |||
153 | page = pfn_to_page(pfn); | ||
154 | count += BITS_PER_LONG; | ||
155 | order = ffs(BITS_PER_LONG) - 1; | ||
156 | __free_pages_bootmem(page, order); | ||
157 | i += BITS_PER_LONG; | ||
158 | page += BITS_PER_LONG; | ||
159 | } else if (v) { | ||
160 | unsigned long m; | ||
161 | |||
162 | page = pfn_to_page(pfn); | ||
163 | for (m = 1; m && i < idx; m<<=1, page++, i++) { | ||
164 | if (v & m) { | ||
165 | count++; | ||
166 | __free_pages_bootmem(page, 0); | ||
167 | } | ||
168 | } | ||
169 | } else { | ||
170 | i += BITS_PER_LONG; | ||
171 | } | ||
172 | pfn += BITS_PER_LONG; | ||
173 | } | ||
174 | |||
175 | /* | ||
176 | * Now free the allocator bitmap itself, it's not | ||
177 | * needed anymore: | ||
178 | */ | ||
179 | page = virt_to_page(bdata->node_bootmem_map); | ||
180 | idx = (get_mapsize(bdata) + PAGE_SIZE-1) >> PAGE_SHIFT; | ||
181 | for (i = 0; i < idx; i++, page++) | ||
182 | __free_pages_bootmem(page, 0); | ||
183 | count += i; | ||
184 | bdata->node_bootmem_map = NULL; | ||
185 | |||
186 | return count; | ||
187 | } | ||
188 | |||
189 | unsigned long __init free_all_bootmem_node(pg_data_t *pgdat) | ||
190 | { | ||
191 | register_page_bootmem_info_node(pgdat); | ||
192 | return free_all_bootmem_core(pgdat->bdata); | ||
193 | } | ||
194 | |||
195 | unsigned long __init free_all_bootmem(void) | ||
196 | { | ||
197 | return free_all_bootmem_core(NODE_DATA(0)->bdata); | ||
198 | } | ||
199 | |||
200 | static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr, | ||
201 | unsigned long size) | ||
202 | { | ||
203 | unsigned long sidx, eidx; | ||
204 | unsigned long i; | ||
205 | |||
206 | BUG_ON(!size); | ||
207 | |||
208 | /* out range */ | ||
209 | if (addr + size < bdata->node_boot_start || | ||
210 | PFN_DOWN(addr) > bdata->node_low_pfn) | ||
211 | return; | ||
212 | /* | ||
213 | * round down end of usable mem, partially free pages are | ||
214 | * considered reserved. | ||
215 | */ | ||
216 | |||
217 | if (addr >= bdata->node_boot_start && addr < bdata->last_success) | ||
218 | bdata->last_success = addr; | ||
219 | |||
220 | /* | ||
221 | * Round up to index to the range. | ||
222 | */ | ||
223 | if (PFN_UP(addr) > PFN_DOWN(bdata->node_boot_start)) | ||
224 | sidx = PFN_UP(addr) - PFN_DOWN(bdata->node_boot_start); | ||
225 | else | ||
226 | sidx = 0; | ||
227 | |||
228 | eidx = PFN_DOWN(addr + size - bdata->node_boot_start); | ||
229 | if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start)) | ||
230 | eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start); | ||
231 | |||
232 | for (i = sidx; i < eidx; i++) { | ||
233 | if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map))) | ||
234 | BUG(); | ||
235 | } | ||
236 | } | ||
237 | |||
238 | void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, | ||
239 | unsigned long size) | ||
240 | { | ||
241 | free_bootmem_core(pgdat->bdata, physaddr, size); | ||
242 | } | ||
243 | |||
244 | void __init free_bootmem(unsigned long addr, unsigned long size) | ||
245 | { | ||
246 | bootmem_data_t *bdata; | ||
247 | list_for_each_entry(bdata, &bdata_list, list) | ||
248 | free_bootmem_core(bdata, addr, size); | ||
249 | } | ||
250 | |||
111 | /* | 251 | /* |
112 | * Marks a particular physical memory range as unallocatable. Usable RAM | 252 | * Marks a particular physical memory range as unallocatable. Usable RAM |
113 | * might be used for boot-time allocations - or it might get added | 253 | * might be used for boot-time allocations - or it might get added |
@@ -183,43 +323,36 @@ static void __init reserve_bootmem_core(bootmem_data_t *bdata, | |||
183 | } | 323 | } |
184 | } | 324 | } |
185 | 325 | ||
186 | static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr, | 326 | int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, |
187 | unsigned long size) | 327 | unsigned long size, int flags) |
188 | { | 328 | { |
189 | unsigned long sidx, eidx; | 329 | int ret; |
190 | unsigned long i; | ||
191 | |||
192 | BUG_ON(!size); | ||
193 | |||
194 | /* out range */ | ||
195 | if (addr + size < bdata->node_boot_start || | ||
196 | PFN_DOWN(addr) > bdata->node_low_pfn) | ||
197 | return; | ||
198 | /* | ||
199 | * round down end of usable mem, partially free pages are | ||
200 | * considered reserved. | ||
201 | */ | ||
202 | |||
203 | if (addr >= bdata->node_boot_start && addr < bdata->last_success) | ||
204 | bdata->last_success = addr; | ||
205 | 330 | ||
206 | /* | 331 | ret = can_reserve_bootmem_core(pgdat->bdata, physaddr, size, flags); |
207 | * Round up to index to the range. | 332 | if (ret < 0) |
208 | */ | 333 | return -ENOMEM; |
209 | if (PFN_UP(addr) > PFN_DOWN(bdata->node_boot_start)) | 334 | reserve_bootmem_core(pgdat->bdata, physaddr, size, flags); |
210 | sidx = PFN_UP(addr) - PFN_DOWN(bdata->node_boot_start); | 335 | return 0; |
211 | else | 336 | } |
212 | sidx = 0; | ||
213 | 337 | ||
214 | eidx = PFN_DOWN(addr + size - bdata->node_boot_start); | 338 | #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE |
215 | if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start)) | 339 | int __init reserve_bootmem(unsigned long addr, unsigned long size, |
216 | eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start); | 340 | int flags) |
341 | { | ||
342 | bootmem_data_t *bdata; | ||
343 | int ret; | ||
217 | 344 | ||
218 | for (i = sidx; i < eidx; i++) { | 345 | list_for_each_entry(bdata, &bdata_list, list) { |
219 | if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map))) | 346 | ret = can_reserve_bootmem_core(bdata, addr, size, flags); |
220 | BUG(); | 347 | if (ret < 0) |
348 | return ret; | ||
221 | } | 349 | } |
350 | list_for_each_entry(bdata, &bdata_list, list) | ||
351 | reserve_bootmem_core(bdata, addr, size, flags); | ||
352 | |||
353 | return 0; | ||
222 | } | 354 | } |
355 | #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ | ||
223 | 356 | ||
224 | /* | 357 | /* |
225 | * We 'merge' subsequent allocations to save space. We might 'lose' | 358 | * We 'merge' subsequent allocations to save space. We might 'lose' |
@@ -371,140 +504,6 @@ found: | |||
371 | return ret; | 504 | return ret; |
372 | } | 505 | } |
373 | 506 | ||
374 | static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) | ||
375 | { | ||
376 | struct page *page; | ||
377 | unsigned long pfn; | ||
378 | unsigned long i, count; | ||
379 | unsigned long idx; | ||
380 | unsigned long *map; | ||
381 | int gofast = 0; | ||
382 | |||
383 | BUG_ON(!bdata->node_bootmem_map); | ||
384 | |||
385 | count = 0; | ||
386 | /* first extant page of the node */ | ||
387 | pfn = PFN_DOWN(bdata->node_boot_start); | ||
388 | idx = bdata->node_low_pfn - pfn; | ||
389 | map = bdata->node_bootmem_map; | ||
390 | /* | ||
391 | * Check if we are aligned to BITS_PER_LONG pages. If so, we might | ||
392 | * be able to free page orders of that size at once. | ||
393 | */ | ||
394 | if (!(pfn & (BITS_PER_LONG-1))) | ||
395 | gofast = 1; | ||
396 | |||
397 | for (i = 0; i < idx; ) { | ||
398 | unsigned long v = ~map[i / BITS_PER_LONG]; | ||
399 | |||
400 | if (gofast && v == ~0UL) { | ||
401 | int order; | ||
402 | |||
403 | page = pfn_to_page(pfn); | ||
404 | count += BITS_PER_LONG; | ||
405 | order = ffs(BITS_PER_LONG) - 1; | ||
406 | __free_pages_bootmem(page, order); | ||
407 | i += BITS_PER_LONG; | ||
408 | page += BITS_PER_LONG; | ||
409 | } else if (v) { | ||
410 | unsigned long m; | ||
411 | |||
412 | page = pfn_to_page(pfn); | ||
413 | for (m = 1; m && i < idx; m<<=1, page++, i++) { | ||
414 | if (v & m) { | ||
415 | count++; | ||
416 | __free_pages_bootmem(page, 0); | ||
417 | } | ||
418 | } | ||
419 | } else { | ||
420 | i += BITS_PER_LONG; | ||
421 | } | ||
422 | pfn += BITS_PER_LONG; | ||
423 | } | ||
424 | |||
425 | /* | ||
426 | * Now free the allocator bitmap itself, it's not | ||
427 | * needed anymore: | ||
428 | */ | ||
429 | page = virt_to_page(bdata->node_bootmem_map); | ||
430 | idx = (get_mapsize(bdata) + PAGE_SIZE-1) >> PAGE_SHIFT; | ||
431 | for (i = 0; i < idx; i++, page++) | ||
432 | __free_pages_bootmem(page, 0); | ||
433 | count += i; | ||
434 | bdata->node_bootmem_map = NULL; | ||
435 | |||
436 | return count; | ||
437 | } | ||
438 | |||
439 | unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn, | ||
440 | unsigned long startpfn, unsigned long endpfn) | ||
441 | { | ||
442 | return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn); | ||
443 | } | ||
444 | |||
445 | int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, | ||
446 | unsigned long size, int flags) | ||
447 | { | ||
448 | int ret; | ||
449 | |||
450 | ret = can_reserve_bootmem_core(pgdat->bdata, physaddr, size, flags); | ||
451 | if (ret < 0) | ||
452 | return -ENOMEM; | ||
453 | reserve_bootmem_core(pgdat->bdata, physaddr, size, flags); | ||
454 | |||
455 | return 0; | ||
456 | } | ||
457 | |||
458 | void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, | ||
459 | unsigned long size) | ||
460 | { | ||
461 | free_bootmem_core(pgdat->bdata, physaddr, size); | ||
462 | } | ||
463 | |||
464 | unsigned long __init free_all_bootmem_node(pg_data_t *pgdat) | ||
465 | { | ||
466 | register_page_bootmem_info_node(pgdat); | ||
467 | return free_all_bootmem_core(pgdat->bdata); | ||
468 | } | ||
469 | |||
470 | unsigned long __init init_bootmem(unsigned long start, unsigned long pages) | ||
471 | { | ||
472 | max_low_pfn = pages; | ||
473 | min_low_pfn = start; | ||
474 | return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages); | ||
475 | } | ||
476 | |||
477 | #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE | ||
478 | int __init reserve_bootmem(unsigned long addr, unsigned long size, | ||
479 | int flags) | ||
480 | { | ||
481 | bootmem_data_t *bdata; | ||
482 | int ret; | ||
483 | |||
484 | list_for_each_entry(bdata, &bdata_list, list) { | ||
485 | ret = can_reserve_bootmem_core(bdata, addr, size, flags); | ||
486 | if (ret < 0) | ||
487 | return ret; | ||
488 | } | ||
489 | list_for_each_entry(bdata, &bdata_list, list) | ||
490 | reserve_bootmem_core(bdata, addr, size, flags); | ||
491 | |||
492 | return 0; | ||
493 | } | ||
494 | #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ | ||
495 | |||
496 | void __init free_bootmem(unsigned long addr, unsigned long size) | ||
497 | { | ||
498 | bootmem_data_t *bdata; | ||
499 | list_for_each_entry(bdata, &bdata_list, list) | ||
500 | free_bootmem_core(bdata, addr, size); | ||
501 | } | ||
502 | |||
503 | unsigned long __init free_all_bootmem(void) | ||
504 | { | ||
505 | return free_all_bootmem_core(NODE_DATA(0)->bdata); | ||
506 | } | ||
507 | |||
508 | void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align, | 507 | void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align, |
509 | unsigned long goal) | 508 | unsigned long goal) |
510 | { | 509 | { |
@@ -534,7 +533,6 @@ void * __init __alloc_bootmem(unsigned long size, unsigned long align, | |||
534 | return NULL; | 533 | return NULL; |
535 | } | 534 | } |
536 | 535 | ||
537 | |||
538 | void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, | 536 | void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, |
539 | unsigned long align, unsigned long goal) | 537 | unsigned long align, unsigned long goal) |
540 | { | 538 | { |