diff options
-rw-r--r-- | include/linux/bootmem.h | 86 | ||||
-rw-r--r-- | mm/bootmem.c | 356 |
2 files changed, 222 insertions, 220 deletions
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index f352c5f125b4..5000fd70b04f 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h | |||
@@ -41,36 +41,62 @@ typedef struct bootmem_data { | |||
41 | extern bootmem_data_t bootmem_node_data[]; | 41 | extern bootmem_data_t bootmem_node_data[]; |
42 | 42 | ||
43 | extern unsigned long bootmem_bootmap_pages(unsigned long); | 43 | extern unsigned long bootmem_bootmap_pages(unsigned long); |
44 | |||
45 | extern unsigned long init_bootmem_node(pg_data_t *pgdat, | ||
46 | unsigned long freepfn, | ||
47 | unsigned long startpfn, | ||
48 | unsigned long endpfn); | ||
44 | extern unsigned long init_bootmem(unsigned long addr, unsigned long memend); | 49 | extern unsigned long init_bootmem(unsigned long addr, unsigned long memend); |
50 | |||
51 | extern unsigned long free_all_bootmem_node(pg_data_t *pgdat); | ||
52 | extern unsigned long free_all_bootmem(void); | ||
53 | |||
54 | extern void free_bootmem_node(pg_data_t *pgdat, | ||
55 | unsigned long addr, | ||
56 | unsigned long size); | ||
45 | extern void free_bootmem(unsigned long addr, unsigned long size); | 57 | extern void free_bootmem(unsigned long addr, unsigned long size); |
46 | extern void *__alloc_bootmem(unsigned long size, | 58 | |
59 | /* | ||
60 | * Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE, | ||
61 | * the architecture-specific code should honor this). | ||
62 | * | ||
63 | * If flags is 0, then the return value is always 0 (success). If | ||
64 | * flags contains BOOTMEM_EXCLUSIVE, then -EBUSY is returned if the | ||
65 | * memory already was reserved. | ||
66 | */ | ||
67 | #define BOOTMEM_DEFAULT 0 | ||
68 | #define BOOTMEM_EXCLUSIVE (1<<0) | ||
69 | |||
70 | extern int reserve_bootmem_node(pg_data_t *pgdat, | ||
71 | unsigned long physaddr, | ||
72 | unsigned long size, | ||
73 | int flags); | ||
74 | #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE | ||
75 | extern int reserve_bootmem(unsigned long addr, unsigned long size, int flags); | ||
76 | #endif | ||
77 | |||
78 | extern void *__alloc_bootmem_nopanic(unsigned long size, | ||
47 | unsigned long align, | 79 | unsigned long align, |
48 | unsigned long goal); | 80 | unsigned long goal); |
49 | extern void *__alloc_bootmem_nopanic(unsigned long size, | 81 | extern void *__alloc_bootmem(unsigned long size, |
50 | unsigned long align, | 82 | unsigned long align, |
51 | unsigned long goal); | 83 | unsigned long goal); |
52 | extern void *__alloc_bootmem_low(unsigned long size, | 84 | extern void *__alloc_bootmem_low(unsigned long size, |
53 | unsigned long align, | 85 | unsigned long align, |
54 | unsigned long goal); | 86 | unsigned long goal); |
87 | extern void *__alloc_bootmem_node(pg_data_t *pgdat, | ||
88 | unsigned long size, | ||
89 | unsigned long align, | ||
90 | unsigned long goal); | ||
91 | extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat, | ||
92 | unsigned long size, | ||
93 | unsigned long align, | ||
94 | unsigned long goal); | ||
55 | extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, | 95 | extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, |
56 | unsigned long size, | 96 | unsigned long size, |
57 | unsigned long align, | 97 | unsigned long align, |
58 | unsigned long goal); | 98 | unsigned long goal); |
59 | |||
60 | /* | ||
61 | * flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE, | ||
62 | * the architecture-specific code should honor this) | ||
63 | */ | ||
64 | #define BOOTMEM_DEFAULT 0 | ||
65 | #define BOOTMEM_EXCLUSIVE (1<<0) | ||
66 | |||
67 | #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE | 99 | #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE |
68 | /* | ||
69 | * If flags is 0, then the return value is always 0 (success). If | ||
70 | * flags contains BOOTMEM_EXCLUSIVE, then -EBUSY is returned if the | ||
71 | * memory already was reserved. | ||
72 | */ | ||
73 | extern int reserve_bootmem(unsigned long addr, unsigned long size, int flags); | ||
74 | #define alloc_bootmem(x) \ | 100 | #define alloc_bootmem(x) \ |
75 | __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) | 101 | __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) |
76 | #define alloc_bootmem_low(x) \ | 102 | #define alloc_bootmem_low(x) \ |
@@ -83,38 +109,16 @@ extern int reserve_bootmem(unsigned long addr, unsigned long size, int flags); | |||
83 | 109 | ||
84 | extern int reserve_bootmem_generic(unsigned long addr, unsigned long size, | 110 | extern int reserve_bootmem_generic(unsigned long addr, unsigned long size, |
85 | int flags); | 111 | int flags); |
86 | extern unsigned long free_all_bootmem(void); | ||
87 | extern unsigned long free_all_bootmem_node(pg_data_t *pgdat); | ||
88 | extern void *__alloc_bootmem_node(pg_data_t *pgdat, | ||
89 | unsigned long size, | ||
90 | unsigned long align, | ||
91 | unsigned long goal); | ||
92 | extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat, | ||
93 | unsigned long size, | ||
94 | unsigned long align, | ||
95 | unsigned long goal); | ||
96 | extern unsigned long init_bootmem_node(pg_data_t *pgdat, | ||
97 | unsigned long freepfn, | ||
98 | unsigned long startpfn, | ||
99 | unsigned long endpfn); | ||
100 | extern int reserve_bootmem_node(pg_data_t *pgdat, | ||
101 | unsigned long physaddr, | ||
102 | unsigned long size, | ||
103 | int flags); | ||
104 | extern void free_bootmem_node(pg_data_t *pgdat, | ||
105 | unsigned long addr, | ||
106 | unsigned long size); | ||
107 | extern void *alloc_bootmem_section(unsigned long size, | ||
108 | unsigned long section_nr); | ||
109 | 112 | ||
110 | #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE | ||
111 | #define alloc_bootmem_node(pgdat, x) \ | 113 | #define alloc_bootmem_node(pgdat, x) \ |
112 | __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) | 114 | __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) |
113 | #define alloc_bootmem_pages_node(pgdat, x) \ | 115 | #define alloc_bootmem_pages_node(pgdat, x) \ |
114 | __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | 116 | __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) |
115 | #define alloc_bootmem_low_pages_node(pgdat, x) \ | 117 | #define alloc_bootmem_low_pages_node(pgdat, x) \ |
116 | __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0) | 118 | __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0) |
117 | #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ | 119 | |
120 | extern void *alloc_bootmem_section(unsigned long size, | ||
121 | unsigned long section_nr); | ||
118 | 122 | ||
119 | #ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP | 123 | #ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP |
120 | extern void *alloc_remap(int nid, unsigned long size); | 124 | extern void *alloc_remap(int nid, unsigned long size); |
diff --git a/mm/bootmem.c b/mm/bootmem.c index 9ac972535fff..24eacf52c50e 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c | |||
@@ -38,6 +38,19 @@ unsigned long saved_max_pfn; | |||
38 | 38 | ||
39 | bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata; | 39 | bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata; |
40 | 40 | ||
41 | /* | ||
42 | * Given an initialised bdata, it returns the size of the boot bitmap | ||
43 | */ | ||
44 | static unsigned long __init get_mapsize(bootmem_data_t *bdata) | ||
45 | { | ||
46 | unsigned long mapsize; | ||
47 | unsigned long start = PFN_DOWN(bdata->node_boot_start); | ||
48 | unsigned long end = bdata->node_low_pfn; | ||
49 | |||
50 | mapsize = ((end - start) + 7) / 8; | ||
51 | return ALIGN(mapsize, sizeof(long)); | ||
52 | } | ||
53 | |||
41 | /* return the number of _pages_ that will be allocated for the boot bitmap */ | 54 | /* return the number of _pages_ that will be allocated for the boot bitmap */ |
42 | unsigned long __init bootmem_bootmap_pages(unsigned long pages) | 55 | unsigned long __init bootmem_bootmap_pages(unsigned long pages) |
43 | { | 56 | { |
@@ -72,19 +85,6 @@ static void __init link_bootmem(bootmem_data_t *bdata) | |||
72 | } | 85 | } |
73 | 86 | ||
74 | /* | 87 | /* |
75 | * Given an initialised bdata, it returns the size of the boot bitmap | ||
76 | */ | ||
77 | static unsigned long __init get_mapsize(bootmem_data_t *bdata) | ||
78 | { | ||
79 | unsigned long mapsize; | ||
80 | unsigned long start = PFN_DOWN(bdata->node_boot_start); | ||
81 | unsigned long end = bdata->node_low_pfn; | ||
82 | |||
83 | mapsize = ((end - start) + 7) / 8; | ||
84 | return ALIGN(mapsize, sizeof(long)); | ||
85 | } | ||
86 | |||
87 | /* | ||
88 | * Called once to set up the allocator itself. | 88 | * Called once to set up the allocator itself. |
89 | */ | 89 | */ |
90 | static unsigned long __init init_bootmem_core(bootmem_data_t *bdata, | 90 | static unsigned long __init init_bootmem_core(bootmem_data_t *bdata, |
@@ -108,6 +108,146 @@ static unsigned long __init init_bootmem_core(bootmem_data_t *bdata, | |||
108 | return mapsize; | 108 | return mapsize; |
109 | } | 109 | } |
110 | 110 | ||
111 | unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn, | ||
112 | unsigned long startpfn, unsigned long endpfn) | ||
113 | { | ||
114 | return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn); | ||
115 | } | ||
116 | |||
117 | unsigned long __init init_bootmem(unsigned long start, unsigned long pages) | ||
118 | { | ||
119 | max_low_pfn = pages; | ||
120 | min_low_pfn = start; | ||
121 | return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages); | ||
122 | } | ||
123 | |||
124 | static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) | ||
125 | { | ||
126 | struct page *page; | ||
127 | unsigned long pfn; | ||
128 | unsigned long i, count; | ||
129 | unsigned long idx; | ||
130 | unsigned long *map; | ||
131 | int gofast = 0; | ||
132 | |||
133 | BUG_ON(!bdata->node_bootmem_map); | ||
134 | |||
135 | count = 0; | ||
136 | /* first extant page of the node */ | ||
137 | pfn = PFN_DOWN(bdata->node_boot_start); | ||
138 | idx = bdata->node_low_pfn - pfn; | ||
139 | map = bdata->node_bootmem_map; | ||
140 | /* | ||
141 | * Check if we are aligned to BITS_PER_LONG pages. If so, we might | ||
142 | * be able to free page orders of that size at once. | ||
143 | */ | ||
144 | if (!(pfn & (BITS_PER_LONG-1))) | ||
145 | gofast = 1; | ||
146 | |||
147 | for (i = 0; i < idx; ) { | ||
148 | unsigned long v = ~map[i / BITS_PER_LONG]; | ||
149 | |||
150 | if (gofast && v == ~0UL) { | ||
151 | int order; | ||
152 | |||
153 | page = pfn_to_page(pfn); | ||
154 | count += BITS_PER_LONG; | ||
155 | order = ffs(BITS_PER_LONG) - 1; | ||
156 | __free_pages_bootmem(page, order); | ||
157 | i += BITS_PER_LONG; | ||
158 | page += BITS_PER_LONG; | ||
159 | } else if (v) { | ||
160 | unsigned long m; | ||
161 | |||
162 | page = pfn_to_page(pfn); | ||
163 | for (m = 1; m && i < idx; m<<=1, page++, i++) { | ||
164 | if (v & m) { | ||
165 | count++; | ||
166 | __free_pages_bootmem(page, 0); | ||
167 | } | ||
168 | } | ||
169 | } else { | ||
170 | i += BITS_PER_LONG; | ||
171 | } | ||
172 | pfn += BITS_PER_LONG; | ||
173 | } | ||
174 | |||
175 | /* | ||
176 | * Now free the allocator bitmap itself, it's not | ||
177 | * needed anymore: | ||
178 | */ | ||
179 | page = virt_to_page(bdata->node_bootmem_map); | ||
180 | idx = (get_mapsize(bdata) + PAGE_SIZE-1) >> PAGE_SHIFT; | ||
181 | for (i = 0; i < idx; i++, page++) | ||
182 | __free_pages_bootmem(page, 0); | ||
183 | count += i; | ||
184 | bdata->node_bootmem_map = NULL; | ||
185 | |||
186 | return count; | ||
187 | } | ||
188 | |||
189 | unsigned long __init free_all_bootmem_node(pg_data_t *pgdat) | ||
190 | { | ||
191 | register_page_bootmem_info_node(pgdat); | ||
192 | return free_all_bootmem_core(pgdat->bdata); | ||
193 | } | ||
194 | |||
195 | unsigned long __init free_all_bootmem(void) | ||
196 | { | ||
197 | return free_all_bootmem_core(NODE_DATA(0)->bdata); | ||
198 | } | ||
199 | |||
200 | static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr, | ||
201 | unsigned long size) | ||
202 | { | ||
203 | unsigned long sidx, eidx; | ||
204 | unsigned long i; | ||
205 | |||
206 | BUG_ON(!size); | ||
207 | |||
208 | /* out range */ | ||
209 | if (addr + size < bdata->node_boot_start || | ||
210 | PFN_DOWN(addr) > bdata->node_low_pfn) | ||
211 | return; | ||
212 | /* | ||
213 | * round down end of usable mem, partially free pages are | ||
214 | * considered reserved. | ||
215 | */ | ||
216 | |||
217 | if (addr >= bdata->node_boot_start && addr < bdata->last_success) | ||
218 | bdata->last_success = addr; | ||
219 | |||
220 | /* | ||
221 | * Round up to index to the range. | ||
222 | */ | ||
223 | if (PFN_UP(addr) > PFN_DOWN(bdata->node_boot_start)) | ||
224 | sidx = PFN_UP(addr) - PFN_DOWN(bdata->node_boot_start); | ||
225 | else | ||
226 | sidx = 0; | ||
227 | |||
228 | eidx = PFN_DOWN(addr + size - bdata->node_boot_start); | ||
229 | if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start)) | ||
230 | eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start); | ||
231 | |||
232 | for (i = sidx; i < eidx; i++) { | ||
233 | if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map))) | ||
234 | BUG(); | ||
235 | } | ||
236 | } | ||
237 | |||
238 | void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, | ||
239 | unsigned long size) | ||
240 | { | ||
241 | free_bootmem_core(pgdat->bdata, physaddr, size); | ||
242 | } | ||
243 | |||
244 | void __init free_bootmem(unsigned long addr, unsigned long size) | ||
245 | { | ||
246 | bootmem_data_t *bdata; | ||
247 | list_for_each_entry(bdata, &bdata_list, list) | ||
248 | free_bootmem_core(bdata, addr, size); | ||
249 | } | ||
250 | |||
111 | /* | 251 | /* |
112 | * Marks a particular physical memory range as unallocatable. Usable RAM | 252 | * Marks a particular physical memory range as unallocatable. Usable RAM |
113 | * might be used for boot-time allocations - or it might get added | 253 | * might be used for boot-time allocations - or it might get added |
@@ -183,43 +323,36 @@ static void __init reserve_bootmem_core(bootmem_data_t *bdata, | |||
183 | } | 323 | } |
184 | } | 324 | } |
185 | 325 | ||
186 | static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr, | 326 | int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, |
187 | unsigned long size) | 327 | unsigned long size, int flags) |
188 | { | 328 | { |
189 | unsigned long sidx, eidx; | 329 | int ret; |
190 | unsigned long i; | ||
191 | |||
192 | BUG_ON(!size); | ||
193 | |||
194 | /* out range */ | ||
195 | if (addr + size < bdata->node_boot_start || | ||
196 | PFN_DOWN(addr) > bdata->node_low_pfn) | ||
197 | return; | ||
198 | /* | ||
199 | * round down end of usable mem, partially free pages are | ||
200 | * considered reserved. | ||
201 | */ | ||
202 | |||
203 | if (addr >= bdata->node_boot_start && addr < bdata->last_success) | ||
204 | bdata->last_success = addr; | ||
205 | 330 | ||
206 | /* | 331 | ret = can_reserve_bootmem_core(pgdat->bdata, physaddr, size, flags); |
207 | * Round up to index to the range. | 332 | if (ret < 0) |
208 | */ | 333 | return -ENOMEM; |
209 | if (PFN_UP(addr) > PFN_DOWN(bdata->node_boot_start)) | 334 | reserve_bootmem_core(pgdat->bdata, physaddr, size, flags); |
210 | sidx = PFN_UP(addr) - PFN_DOWN(bdata->node_boot_start); | 335 | return 0; |
211 | else | 336 | } |
212 | sidx = 0; | ||
213 | 337 | ||
214 | eidx = PFN_DOWN(addr + size - bdata->node_boot_start); | 338 | #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE |
215 | if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start)) | 339 | int __init reserve_bootmem(unsigned long addr, unsigned long size, |
216 | eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start); | 340 | int flags) |
341 | { | ||
342 | bootmem_data_t *bdata; | ||
343 | int ret; | ||
217 | 344 | ||
218 | for (i = sidx; i < eidx; i++) { | 345 | list_for_each_entry(bdata, &bdata_list, list) { |
219 | if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map))) | 346 | ret = can_reserve_bootmem_core(bdata, addr, size, flags); |
220 | BUG(); | 347 | if (ret < 0) |
348 | return ret; | ||
221 | } | 349 | } |
350 | list_for_each_entry(bdata, &bdata_list, list) | ||
351 | reserve_bootmem_core(bdata, addr, size, flags); | ||
352 | |||
353 | return 0; | ||
222 | } | 354 | } |
355 | #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ | ||
223 | 356 | ||
224 | /* | 357 | /* |
225 | * We 'merge' subsequent allocations to save space. We might 'lose' | 358 | * We 'merge' subsequent allocations to save space. We might 'lose' |
@@ -371,140 +504,6 @@ found: | |||
371 | return ret; | 504 | return ret; |
372 | } | 505 | } |
373 | 506 | ||
374 | static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) | ||
375 | { | ||
376 | struct page *page; | ||
377 | unsigned long pfn; | ||
378 | unsigned long i, count; | ||
379 | unsigned long idx; | ||
380 | unsigned long *map; | ||
381 | int gofast = 0; | ||
382 | |||
383 | BUG_ON(!bdata->node_bootmem_map); | ||
384 | |||
385 | count = 0; | ||
386 | /* first extant page of the node */ | ||
387 | pfn = PFN_DOWN(bdata->node_boot_start); | ||
388 | idx = bdata->node_low_pfn - pfn; | ||
389 | map = bdata->node_bootmem_map; | ||
390 | /* | ||
391 | * Check if we are aligned to BITS_PER_LONG pages. If so, we might | ||
392 | * be able to free page orders of that size at once. | ||
393 | */ | ||
394 | if (!(pfn & (BITS_PER_LONG-1))) | ||
395 | gofast = 1; | ||
396 | |||
397 | for (i = 0; i < idx; ) { | ||
398 | unsigned long v = ~map[i / BITS_PER_LONG]; | ||
399 | |||
400 | if (gofast && v == ~0UL) { | ||
401 | int order; | ||
402 | |||
403 | page = pfn_to_page(pfn); | ||
404 | count += BITS_PER_LONG; | ||
405 | order = ffs(BITS_PER_LONG) - 1; | ||
406 | __free_pages_bootmem(page, order); | ||
407 | i += BITS_PER_LONG; | ||
408 | page += BITS_PER_LONG; | ||
409 | } else if (v) { | ||
410 | unsigned long m; | ||
411 | |||
412 | page = pfn_to_page(pfn); | ||
413 | for (m = 1; m && i < idx; m<<=1, page++, i++) { | ||
414 | if (v & m) { | ||
415 | count++; | ||
416 | __free_pages_bootmem(page, 0); | ||
417 | } | ||
418 | } | ||
419 | } else { | ||
420 | i += BITS_PER_LONG; | ||
421 | } | ||
422 | pfn += BITS_PER_LONG; | ||
423 | } | ||
424 | |||
425 | /* | ||
426 | * Now free the allocator bitmap itself, it's not | ||
427 | * needed anymore: | ||
428 | */ | ||
429 | page = virt_to_page(bdata->node_bootmem_map); | ||
430 | idx = (get_mapsize(bdata) + PAGE_SIZE-1) >> PAGE_SHIFT; | ||
431 | for (i = 0; i < idx; i++, page++) | ||
432 | __free_pages_bootmem(page, 0); | ||
433 | count += i; | ||
434 | bdata->node_bootmem_map = NULL; | ||
435 | |||
436 | return count; | ||
437 | } | ||
438 | |||
439 | unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn, | ||
440 | unsigned long startpfn, unsigned long endpfn) | ||
441 | { | ||
442 | return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn); | ||
443 | } | ||
444 | |||
445 | int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, | ||
446 | unsigned long size, int flags) | ||
447 | { | ||
448 | int ret; | ||
449 | |||
450 | ret = can_reserve_bootmem_core(pgdat->bdata, physaddr, size, flags); | ||
451 | if (ret < 0) | ||
452 | return -ENOMEM; | ||
453 | reserve_bootmem_core(pgdat->bdata, physaddr, size, flags); | ||
454 | |||
455 | return 0; | ||
456 | } | ||
457 | |||
458 | void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, | ||
459 | unsigned long size) | ||
460 | { | ||
461 | free_bootmem_core(pgdat->bdata, physaddr, size); | ||
462 | } | ||
463 | |||
464 | unsigned long __init free_all_bootmem_node(pg_data_t *pgdat) | ||
465 | { | ||
466 | register_page_bootmem_info_node(pgdat); | ||
467 | return free_all_bootmem_core(pgdat->bdata); | ||
468 | } | ||
469 | |||
470 | unsigned long __init init_bootmem(unsigned long start, unsigned long pages) | ||
471 | { | ||
472 | max_low_pfn = pages; | ||
473 | min_low_pfn = start; | ||
474 | return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages); | ||
475 | } | ||
476 | |||
477 | #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE | ||
478 | int __init reserve_bootmem(unsigned long addr, unsigned long size, | ||
479 | int flags) | ||
480 | { | ||
481 | bootmem_data_t *bdata; | ||
482 | int ret; | ||
483 | |||
484 | list_for_each_entry(bdata, &bdata_list, list) { | ||
485 | ret = can_reserve_bootmem_core(bdata, addr, size, flags); | ||
486 | if (ret < 0) | ||
487 | return ret; | ||
488 | } | ||
489 | list_for_each_entry(bdata, &bdata_list, list) | ||
490 | reserve_bootmem_core(bdata, addr, size, flags); | ||
491 | |||
492 | return 0; | ||
493 | } | ||
494 | #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ | ||
495 | |||
496 | void __init free_bootmem(unsigned long addr, unsigned long size) | ||
497 | { | ||
498 | bootmem_data_t *bdata; | ||
499 | list_for_each_entry(bdata, &bdata_list, list) | ||
500 | free_bootmem_core(bdata, addr, size); | ||
501 | } | ||
502 | |||
503 | unsigned long __init free_all_bootmem(void) | ||
504 | { | ||
505 | return free_all_bootmem_core(NODE_DATA(0)->bdata); | ||
506 | } | ||
507 | |||
508 | void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align, | 507 | void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align, |
509 | unsigned long goal) | 508 | unsigned long goal) |
510 | { | 509 | { |
@@ -534,7 +533,6 @@ void * __init __alloc_bootmem(unsigned long size, unsigned long align, | |||
534 | return NULL; | 533 | return NULL; |
535 | } | 534 | } |
536 | 535 | ||
537 | |||
538 | void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, | 536 | void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, |
539 | unsigned long align, unsigned long goal) | 537 | unsigned long align, unsigned long goal) |
540 | { | 538 | { |