diff options
Diffstat (limited to 'mm/nobootmem.c')
-rw-r--r-- | mm/nobootmem.c | 150 |
1 files changed, 75 insertions, 75 deletions
diff --git a/mm/nobootmem.c b/mm/nobootmem.c index 1983fb1c7026..405573010f99 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c | |||
@@ -105,27 +105,35 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end) | |||
105 | __free_pages_bootmem(pfn_to_page(i), 0); | 105 | __free_pages_bootmem(pfn_to_page(i), 0); |
106 | } | 106 | } |
107 | 107 | ||
108 | static unsigned long __init __free_memory_core(phys_addr_t start, | ||
109 | phys_addr_t end) | ||
110 | { | ||
111 | unsigned long start_pfn = PFN_UP(start); | ||
112 | unsigned long end_pfn = min_t(unsigned long, | ||
113 | PFN_DOWN(end), max_low_pfn); | ||
114 | |||
115 | if (start_pfn > end_pfn) | ||
116 | return 0; | ||
117 | |||
118 | __free_pages_memory(start_pfn, end_pfn); | ||
119 | |||
120 | return end_pfn - start_pfn; | ||
121 | } | ||
122 | |||
108 | unsigned long __init free_low_memory_core_early(int nodeid) | 123 | unsigned long __init free_low_memory_core_early(int nodeid) |
109 | { | 124 | { |
110 | unsigned long count = 0; | 125 | unsigned long count = 0; |
111 | phys_addr_t start, end; | 126 | phys_addr_t start, end, size; |
112 | u64 i; | 127 | u64 i; |
113 | 128 | ||
114 | /* free reserved array temporarily so that it's treated as free area */ | 129 | for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) |
115 | memblock_free_reserved_regions(); | 130 | count += __free_memory_core(start, end); |
116 | 131 | ||
117 | for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) { | 132 | /* free range that is used for reserved array if we allocate it */ |
118 | unsigned long start_pfn = PFN_UP(start); | 133 | size = get_allocated_memblock_reserved_regions_info(&start); |
119 | unsigned long end_pfn = min_t(unsigned long, | 134 | if (size) |
120 | PFN_DOWN(end), max_low_pfn); | 135 | count += __free_memory_core(start, start + size); |
121 | if (start_pfn < end_pfn) { | ||
122 | __free_pages_memory(start_pfn, end_pfn); | ||
123 | count += end_pfn - start_pfn; | ||
124 | } | ||
125 | } | ||
126 | 136 | ||
127 | /* put region array back? */ | ||
128 | memblock_reserve_reserved_regions(); | ||
129 | return count; | 137 | return count; |
130 | } | 138 | } |
131 | 139 | ||
@@ -274,86 +282,85 @@ void * __init __alloc_bootmem(unsigned long size, unsigned long align, | |||
274 | return ___alloc_bootmem(size, align, goal, limit); | 282 | return ___alloc_bootmem(size, align, goal, limit); |
275 | } | 283 | } |
276 | 284 | ||
277 | /** | 285 | void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat, |
278 | * __alloc_bootmem_node - allocate boot memory from a specific node | 286 | unsigned long size, |
279 | * @pgdat: node to allocate from | 287 | unsigned long align, |
280 | * @size: size of the request in bytes | 288 | unsigned long goal, |
281 | * @align: alignment of the region | 289 | unsigned long limit) |
282 | * @goal: preferred starting address of the region | ||
283 | * | ||
284 | * The goal is dropped if it can not be satisfied and the allocation will | ||
285 | * fall back to memory below @goal. | ||
286 | * | ||
287 | * Allocation may fall back to any node in the system if the specified node | ||
288 | * can not hold the requested memory. | ||
289 | * | ||
290 | * The function panics if the request can not be satisfied. | ||
291 | */ | ||
292 | void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, | ||
293 | unsigned long align, unsigned long goal) | ||
294 | { | 290 | { |
295 | void *ptr; | 291 | void *ptr; |
296 | 292 | ||
297 | if (WARN_ON_ONCE(slab_is_available())) | ||
298 | return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); | ||
299 | |||
300 | again: | 293 | again: |
301 | ptr = __alloc_memory_core_early(pgdat->node_id, size, align, | 294 | ptr = __alloc_memory_core_early(pgdat->node_id, size, align, |
302 | goal, -1ULL); | 295 | goal, limit); |
303 | if (ptr) | 296 | if (ptr) |
304 | return ptr; | 297 | return ptr; |
305 | 298 | ||
306 | ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, | 299 | ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, |
307 | goal, -1ULL); | 300 | goal, limit); |
308 | if (!ptr && goal) { | 301 | if (ptr) |
302 | return ptr; | ||
303 | |||
304 | if (goal) { | ||
309 | goal = 0; | 305 | goal = 0; |
310 | goto again; | 306 | goto again; |
311 | } | 307 | } |
312 | return ptr; | 308 | |
309 | return NULL; | ||
313 | } | 310 | } |
314 | 311 | ||
315 | void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, | 312 | void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, |
316 | unsigned long align, unsigned long goal) | 313 | unsigned long align, unsigned long goal) |
317 | { | 314 | { |
318 | return __alloc_bootmem_node(pgdat, size, align, goal); | 315 | if (WARN_ON_ONCE(slab_is_available())) |
316 | return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); | ||
317 | |||
318 | return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0); | ||
319 | } | 319 | } |
320 | 320 | ||
321 | #ifdef CONFIG_SPARSEMEM | 321 | void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, |
322 | /** | 322 | unsigned long align, unsigned long goal, |
323 | * alloc_bootmem_section - allocate boot memory from a specific section | 323 | unsigned long limit) |
324 | * @size: size of the request in bytes | ||
325 | * @section_nr: sparse map section to allocate from | ||
326 | * | ||
327 | * Return NULL on failure. | ||
328 | */ | ||
329 | void * __init alloc_bootmem_section(unsigned long size, | ||
330 | unsigned long section_nr) | ||
331 | { | 324 | { |
332 | unsigned long pfn, goal, limit; | 325 | void *ptr; |
333 | 326 | ||
334 | pfn = section_nr_to_pfn(section_nr); | 327 | ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, limit); |
335 | goal = pfn << PAGE_SHIFT; | 328 | if (ptr) |
336 | limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT; | 329 | return ptr; |
337 | 330 | ||
338 | return __alloc_memory_core_early(early_pfn_to_nid(pfn), size, | 331 | printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size); |
339 | SMP_CACHE_BYTES, goal, limit); | 332 | panic("Out of memory"); |
333 | return NULL; | ||
340 | } | 334 | } |
341 | #endif | ||
342 | 335 | ||
343 | void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, | 336 | /** |
337 | * __alloc_bootmem_node - allocate boot memory from a specific node | ||
338 | * @pgdat: node to allocate from | ||
339 | * @size: size of the request in bytes | ||
340 | * @align: alignment of the region | ||
341 | * @goal: preferred starting address of the region | ||
342 | * | ||
343 | * The goal is dropped if it can not be satisfied and the allocation will | ||
344 | * fall back to memory below @goal. | ||
345 | * | ||
346 | * Allocation may fall back to any node in the system if the specified node | ||
347 | * can not hold the requested memory. | ||
348 | * | ||
349 | * The function panics if the request can not be satisfied. | ||
350 | */ | ||
351 | void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, | ||
344 | unsigned long align, unsigned long goal) | 352 | unsigned long align, unsigned long goal) |
345 | { | 353 | { |
346 | void *ptr; | ||
347 | |||
348 | if (WARN_ON_ONCE(slab_is_available())) | 354 | if (WARN_ON_ONCE(slab_is_available())) |
349 | return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); | 355 | return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); |
350 | 356 | ||
351 | ptr = __alloc_memory_core_early(pgdat->node_id, size, align, | 357 | return ___alloc_bootmem_node(pgdat, size, align, goal, 0); |
352 | goal, -1ULL); | 358 | } |
353 | if (ptr) | ||
354 | return ptr; | ||
355 | 359 | ||
356 | return __alloc_bootmem_nopanic(size, align, goal); | 360 | void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, |
361 | unsigned long align, unsigned long goal) | ||
362 | { | ||
363 | return __alloc_bootmem_node(pgdat, size, align, goal); | ||
357 | } | 364 | } |
358 | 365 | ||
359 | #ifndef ARCH_LOW_ADDRESS_LIMIT | 366 | #ifndef ARCH_LOW_ADDRESS_LIMIT |
@@ -397,16 +404,9 @@ void * __init __alloc_bootmem_low(unsigned long size, unsigned long align, | |||
397 | void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, | 404 | void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, |
398 | unsigned long align, unsigned long goal) | 405 | unsigned long align, unsigned long goal) |
399 | { | 406 | { |
400 | void *ptr; | ||
401 | |||
402 | if (WARN_ON_ONCE(slab_is_available())) | 407 | if (WARN_ON_ONCE(slab_is_available())) |
403 | return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); | 408 | return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); |
404 | 409 | ||
405 | ptr = __alloc_memory_core_early(pgdat->node_id, size, align, | 410 | return ___alloc_bootmem_node(pgdat, size, align, goal, |
406 | goal, ARCH_LOW_ADDRESS_LIMIT); | 411 | ARCH_LOW_ADDRESS_LIMIT); |
407 | if (ptr) | ||
408 | return ptr; | ||
409 | |||
410 | return __alloc_memory_core_early(MAX_NUMNODES, size, align, | ||
411 | goal, ARCH_LOW_ADDRESS_LIMIT); | ||
412 | } | 412 | } |