aboutsummaryrefslogtreecommitdiffstats
path: root/mm/sparse.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/sparse.c')
-rw-r--r--mm/sparse.c33
1 files changed, 20 insertions, 13 deletions
diff --git a/mm/sparse.c b/mm/sparse.c
index a8bc7d364deb..c7bb952400c8 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -273,10 +273,11 @@ static unsigned long *__kmalloc_section_usemap(void)
273#ifdef CONFIG_MEMORY_HOTREMOVE 273#ifdef CONFIG_MEMORY_HOTREMOVE
274static unsigned long * __init 274static unsigned long * __init
275sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, 275sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
276 unsigned long count) 276 unsigned long size)
277{ 277{
278 unsigned long section_nr; 278 unsigned long goal, limit;
279 279 unsigned long *p;
280 int nid;
280 /* 281 /*
281 * A page may contain usemaps for other sections preventing the 282 * A page may contain usemaps for other sections preventing the
282 * page being freed and making a section unremovable while 283 * page being freed and making a section unremovable while
@@ -287,8 +288,17 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
287 * from the same section as the pgdat where possible to avoid 288 * from the same section as the pgdat where possible to avoid
288 * this problem. 289 * this problem.
289 */ 290 */
290 section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); 291 goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
291 return alloc_bootmem_section(usemap_size() * count, section_nr); 292 limit = goal + (1UL << PA_SECTION_SHIFT);
293 nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
294again:
295 p = ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size,
296 SMP_CACHE_BYTES, goal, limit);
297 if (!p && limit) {
298 limit = 0;
299 goto again;
300 }
301 return p;
292} 302}
293 303
294static void __init check_usemap_section_nr(int nid, unsigned long *usemap) 304static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
@@ -332,9 +342,9 @@ static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
332#else 342#else
333static unsigned long * __init 343static unsigned long * __init
334sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, 344sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
335 unsigned long count) 345 unsigned long size)
336{ 346{
337 return NULL; 347 return alloc_bootmem_node_nopanic(pgdat, size);
338} 348}
339 349
340static void __init check_usemap_section_nr(int nid, unsigned long *usemap) 350static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
@@ -352,13 +362,10 @@ static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map,
352 int size = usemap_size(); 362 int size = usemap_size();
353 363
354 usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid), 364 usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
355 usemap_count); 365 size * usemap_count);
356 if (!usemap) { 366 if (!usemap) {
357 usemap = alloc_bootmem_node(NODE_DATA(nodeid), size * usemap_count); 367 printk(KERN_WARNING "%s: allocation failed\n", __func__);
358 if (!usemap) { 368 return;
359 printk(KERN_WARNING "%s: allocation failed\n", __func__);
360 return;
361 }
362 } 369 }
363 370
364 for (pnum = pnum_begin; pnum < pnum_end; pnum++) { 371 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {