aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorXishi Qiu <qiuxishi@huawei.com>2014-10-13 18:55:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 20:18:26 -0400
commitbd5cfb8977fbb49d9350f7c81cf1516142e35a6a (patch)
tree3dd7aed530f32f54c7f84df2cfa8f8377a6a5e61
parent9470dd5d352985ba907df7554845f87a4b8f9ea5 (diff)
arch/x86/mm/numa.c: fix boot failure when all nodes are hotpluggable
If all the nodes are marked hotpluggable, alloc node data will fail. Because __next_mem_range_rev() will skip the hotpluggable memory regions. numa_clear_kernel_node_hotplug() is called after alloc node data. numa_init() ... ret = init_func(); // this will mark hotpluggable flag from SRAT ... memblock_set_bottom_up(false); ... ret = numa_register_memblks(&numa_meminfo); // this will alloc node data(pglist_data) ... numa_clear_kernel_node_hotplug(); // in case all the nodes are hotpluggable ... numa_register_memblks() setup_node_data() memblock_find_in_range_node() __memblock_find_range_top_down() for_each_mem_range_rev() __next_mem_range_rev() This patch moves numa_clear_kernel_node_hotplug() into numa_register_memblks(), clear kernel node hotpluggable flag before alloc node data, then alloc node data won't fail even all the nodes are hotpluggable. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Xishi Qiu <qiuxishi@huawei.com> Cc: Dave Jones <davej@redhat.com> Cc: Tang Chen <tangchen@cn.fujitsu.com> Cc: Gu Zheng <guz.fnst@cn.fujitsu.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/x86/mm/numa.c89
1 files changed, 45 insertions, 44 deletions
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index a32b706c401a..4e1e5709fe17 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -478,6 +478,42 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
478 return true; 478 return true;
479} 479}
480 480
481static void __init numa_clear_kernel_node_hotplug(void)
482{
483 int i, nid;
484 nodemask_t numa_kernel_nodes = NODE_MASK_NONE;
485 unsigned long start, end;
486 struct memblock_region *r;
487
488 /*
489 * At this time, all memory regions reserved by memblock are
490 * used by the kernel. Set the nid in memblock.reserved will
491 * mark out all the nodes the kernel resides in.
492 */
493 for (i = 0; i < numa_meminfo.nr_blks; i++) {
494 struct numa_memblk *mb = &numa_meminfo.blk[i];
495
496 memblock_set_node(mb->start, mb->end - mb->start,
497 &memblock.reserved, mb->nid);
498 }
499
500 /* Mark all kernel nodes. */
501 for_each_memblock(reserved, r)
502 node_set(r->nid, numa_kernel_nodes);
503
504 /* Clear MEMBLOCK_HOTPLUG flag for memory in kernel nodes. */
505 for (i = 0; i < numa_meminfo.nr_blks; i++) {
506 nid = numa_meminfo.blk[i].nid;
507 if (!node_isset(nid, numa_kernel_nodes))
508 continue;
509
510 start = numa_meminfo.blk[i].start;
511 end = numa_meminfo.blk[i].end;
512
513 memblock_clear_hotplug(start, end - start);
514 }
515}
516
481static int __init numa_register_memblks(struct numa_meminfo *mi) 517static int __init numa_register_memblks(struct numa_meminfo *mi)
482{ 518{
483 unsigned long uninitialized_var(pfn_align); 519 unsigned long uninitialized_var(pfn_align);
@@ -496,6 +532,15 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
496 } 532 }
497 533
498 /* 534 /*
535 * At very early time, the kernel have to use some memory such as
536 * loading the kernel image. We cannot prevent this anyway. So any
537 * node the kernel resides in should be un-hotpluggable.
538 *
539 * And when we come here, alloc node data won't fail.
540 */
541 numa_clear_kernel_node_hotplug();
542
543 /*
499 * If sections array is gonna be used for pfn -> nid mapping, check 544 * If sections array is gonna be used for pfn -> nid mapping, check
500 * whether its granularity is fine enough. 545 * whether its granularity is fine enough.
501 */ 546 */
@@ -554,41 +599,6 @@ static void __init numa_init_array(void)
554 } 599 }
555} 600}
556 601
557static void __init numa_clear_kernel_node_hotplug(void)
558{
559 int i, nid;
560 nodemask_t numa_kernel_nodes = NODE_MASK_NONE;
561 unsigned long start, end;
562 struct memblock_region *r;
563
564 /*
565 * At this time, all memory regions reserved by memblock are
566 * used by the kernel. Set the nid in memblock.reserved will
567 * mark out all the nodes the kernel resides in.
568 */
569 for (i = 0; i < numa_meminfo.nr_blks; i++) {
570 struct numa_memblk *mb = &numa_meminfo.blk[i];
571 memblock_set_node(mb->start, mb->end - mb->start,
572 &memblock.reserved, mb->nid);
573 }
574
575 /* Mark all kernel nodes. */
576 for_each_memblock(reserved, r)
577 node_set(r->nid, numa_kernel_nodes);
578
579 /* Clear MEMBLOCK_HOTPLUG flag for memory in kernel nodes. */
580 for (i = 0; i < numa_meminfo.nr_blks; i++) {
581 nid = numa_meminfo.blk[i].nid;
582 if (!node_isset(nid, numa_kernel_nodes))
583 continue;
584
585 start = numa_meminfo.blk[i].start;
586 end = numa_meminfo.blk[i].end;
587
588 memblock_clear_hotplug(start, end - start);
589 }
590}
591
592static int __init numa_init(int (*init_func)(void)) 602static int __init numa_init(int (*init_func)(void))
593{ 603{
594 int i; 604 int i;
@@ -643,15 +653,6 @@ static int __init numa_init(int (*init_func)(void))
643 } 653 }
644 numa_init_array(); 654 numa_init_array();
645 655
646 /*
647 * At very early time, the kernel have to use some memory such as
648 * loading the kernel image. We cannot prevent this anyway. So any
649 * node the kernel resides in should be un-hotpluggable.
650 *
651 * And when we come here, numa_init() won't fail.
652 */
653 numa_clear_kernel_node_hotplug();
654
655 return 0; 656 return 0;
656} 657}
657 658