diff options
author | H. Peter Anvin <hpa@linux.intel.com> | 2014-02-07 14:27:30 -0500 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2014-02-07 14:27:30 -0500 |
commit | a3b072cd180c12e8fe0ece9487b9065808327640 (patch) | |
tree | 62b982041be84748852d77cdf6ca5639ef40858f /arch/x86/mm | |
parent | 75a1ba5b2c529db60ca49626bcaf0bddf4548438 (diff) | |
parent | 081cd62a010f97b5bc1d2b0cd123c5abc692b68a (diff) |
Merge tag 'efi-urgent' into x86/urgent
* Avoid WARN_ON() when mapping BGRT on Baytrail (EFI 32-bit).
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/gup.c | 8 | ||||
-rw-r--r-- | arch/x86/mm/init_32.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/init_64.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/memtest.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/numa.c | 52 | ||||
-rw-r--r-- | arch/x86/mm/srat.c | 5 |
6 files changed, 62 insertions, 9 deletions
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index 0596e8e0cc19..207d9aef662d 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c | |||
@@ -108,8 +108,8 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, | |||
108 | 108 | ||
109 | static inline void get_head_page_multiple(struct page *page, int nr) | 109 | static inline void get_head_page_multiple(struct page *page, int nr) |
110 | { | 110 | { |
111 | VM_BUG_ON(page != compound_head(page)); | 111 | VM_BUG_ON_PAGE(page != compound_head(page), page); |
112 | VM_BUG_ON(page_count(page) == 0); | 112 | VM_BUG_ON_PAGE(page_count(page) == 0, page); |
113 | atomic_add(nr, &page->_count); | 113 | atomic_add(nr, &page->_count); |
114 | SetPageReferenced(page); | 114 | SetPageReferenced(page); |
115 | } | 115 | } |
@@ -135,7 +135,7 @@ static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr, | |||
135 | head = pte_page(pte); | 135 | head = pte_page(pte); |
136 | page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); | 136 | page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); |
137 | do { | 137 | do { |
138 | VM_BUG_ON(compound_head(page) != head); | 138 | VM_BUG_ON_PAGE(compound_head(page) != head, page); |
139 | pages[*nr] = page; | 139 | pages[*nr] = page; |
140 | if (PageTail(page)) | 140 | if (PageTail(page)) |
141 | get_huge_page_tail(page); | 141 | get_huge_page_tail(page); |
@@ -212,7 +212,7 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr, | |||
212 | head = pte_page(pte); | 212 | head = pte_page(pte); |
213 | page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT); | 213 | page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT); |
214 | do { | 214 | do { |
215 | VM_BUG_ON(compound_head(page) != head); | 215 | VM_BUG_ON_PAGE(compound_head(page) != head, page); |
216 | pages[*nr] = page; | 216 | pages[*nr] = page; |
217 | if (PageTail(page)) | 217 | if (PageTail(page)) |
218 | get_huge_page_tail(page); | 218 | get_huge_page_tail(page); |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 5bdc5430597c..e39504878aec 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -665,7 +665,7 @@ void __init initmem_init(void) | |||
665 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; | 665 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; |
666 | #endif | 666 | #endif |
667 | 667 | ||
668 | memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0); | 668 | memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); |
669 | sparse_memory_present_with_active_regions(0); | 669 | sparse_memory_present_with_active_regions(0); |
670 | 670 | ||
671 | #ifdef CONFIG_FLATMEM | 671 | #ifdef CONFIG_FLATMEM |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 104d56a9245f..f35c66c5959a 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -643,7 +643,7 @@ kernel_physical_mapping_init(unsigned long start, | |||
643 | #ifndef CONFIG_NUMA | 643 | #ifndef CONFIG_NUMA |
644 | void __init initmem_init(void) | 644 | void __init initmem_init(void) |
645 | { | 645 | { |
646 | memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0); | 646 | memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); |
647 | } | 647 | } |
648 | #endif | 648 | #endif |
649 | 649 | ||
diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c index 8dabbed409ee..1e9da795767a 100644 --- a/arch/x86/mm/memtest.c +++ b/arch/x86/mm/memtest.c | |||
@@ -74,7 +74,7 @@ static void __init do_one_pass(u64 pattern, u64 start, u64 end) | |||
74 | u64 i; | 74 | u64 i; |
75 | phys_addr_t this_start, this_end; | 75 | phys_addr_t this_start, this_end; |
76 | 76 | ||
77 | for_each_free_mem_range(i, MAX_NUMNODES, &this_start, &this_end, NULL) { | 77 | for_each_free_mem_range(i, NUMA_NO_NODE, &this_start, &this_end, NULL) { |
78 | this_start = clamp_t(phys_addr_t, this_start, start, end); | 78 | this_start = clamp_t(phys_addr_t, this_start, start, end); |
79 | this_end = clamp_t(phys_addr_t, this_end, start, end); | 79 | this_end = clamp_t(phys_addr_t, this_end, start, end); |
80 | if (this_start < this_end) { | 80 | if (this_start < this_end) { |
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index c85da7bb6b60..81b2750f3666 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c | |||
@@ -491,7 +491,16 @@ static int __init numa_register_memblks(struct numa_meminfo *mi) | |||
491 | 491 | ||
492 | for (i = 0; i < mi->nr_blks; i++) { | 492 | for (i = 0; i < mi->nr_blks; i++) { |
493 | struct numa_memblk *mb = &mi->blk[i]; | 493 | struct numa_memblk *mb = &mi->blk[i]; |
494 | memblock_set_node(mb->start, mb->end - mb->start, mb->nid); | 494 | memblock_set_node(mb->start, mb->end - mb->start, |
495 | &memblock.memory, mb->nid); | ||
496 | |||
497 | /* | ||
498 | * At this time, all memory regions reserved by memblock are | ||
499 | * used by the kernel. Set the nid in memblock.reserved will | ||
500 | * mark out all the nodes the kernel resides in. | ||
501 | */ | ||
502 | memblock_set_node(mb->start, mb->end - mb->start, | ||
503 | &memblock.reserved, mb->nid); | ||
495 | } | 504 | } |
496 | 505 | ||
497 | /* | 506 | /* |
@@ -553,6 +562,30 @@ static void __init numa_init_array(void) | |||
553 | } | 562 | } |
554 | } | 563 | } |
555 | 564 | ||
565 | static void __init numa_clear_kernel_node_hotplug(void) | ||
566 | { | ||
567 | int i, nid; | ||
568 | nodemask_t numa_kernel_nodes; | ||
569 | unsigned long start, end; | ||
570 | struct memblock_type *type = &memblock.reserved; | ||
571 | |||
572 | /* Mark all kernel nodes. */ | ||
573 | for (i = 0; i < type->cnt; i++) | ||
574 | node_set(type->regions[i].nid, numa_kernel_nodes); | ||
575 | |||
576 | /* Clear MEMBLOCK_HOTPLUG flag for memory in kernel nodes. */ | ||
577 | for (i = 0; i < numa_meminfo.nr_blks; i++) { | ||
578 | nid = numa_meminfo.blk[i].nid; | ||
579 | if (!node_isset(nid, numa_kernel_nodes)) | ||
580 | continue; | ||
581 | |||
582 | start = numa_meminfo.blk[i].start; | ||
583 | end = numa_meminfo.blk[i].end; | ||
584 | |||
585 | memblock_clear_hotplug(start, end - start); | ||
586 | } | ||
587 | } | ||
588 | |||
556 | static int __init numa_init(int (*init_func)(void)) | 589 | static int __init numa_init(int (*init_func)(void)) |
557 | { | 590 | { |
558 | int i; | 591 | int i; |
@@ -565,7 +598,12 @@ static int __init numa_init(int (*init_func)(void)) | |||
565 | nodes_clear(node_possible_map); | 598 | nodes_clear(node_possible_map); |
566 | nodes_clear(node_online_map); | 599 | nodes_clear(node_online_map); |
567 | memset(&numa_meminfo, 0, sizeof(numa_meminfo)); | 600 | memset(&numa_meminfo, 0, sizeof(numa_meminfo)); |
568 | WARN_ON(memblock_set_node(0, ULLONG_MAX, MAX_NUMNODES)); | 601 | WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory, |
602 | MAX_NUMNODES)); | ||
603 | WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved, | ||
604 | MAX_NUMNODES)); | ||
605 | /* In case that parsing SRAT failed. */ | ||
606 | WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX)); | ||
569 | numa_reset_distance(); | 607 | numa_reset_distance(); |
570 | 608 | ||
571 | ret = init_func(); | 609 | ret = init_func(); |
@@ -601,6 +639,16 @@ static int __init numa_init(int (*init_func)(void)) | |||
601 | numa_clear_node(i); | 639 | numa_clear_node(i); |
602 | } | 640 | } |
603 | numa_init_array(); | 641 | numa_init_array(); |
642 | |||
643 | /* | ||
644 | * At very early time, the kernel have to use some memory such as | ||
645 | * loading the kernel image. We cannot prevent this anyway. So any | ||
646 | * node the kernel resides in should be un-hotpluggable. | ||
647 | * | ||
648 | * And when we come here, numa_init() won't fail. | ||
649 | */ | ||
650 | numa_clear_kernel_node_hotplug(); | ||
651 | |||
604 | return 0; | 652 | return 0; |
605 | } | 653 | } |
606 | 654 | ||
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c index 5ecf65117e6f..1953e9c9391a 100644 --- a/arch/x86/mm/srat.c +++ b/arch/x86/mm/srat.c | |||
@@ -191,6 +191,11 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) | |||
191 | (unsigned long long) start, (unsigned long long) end - 1, | 191 | (unsigned long long) start, (unsigned long long) end - 1, |
192 | hotpluggable ? " hotplug" : ""); | 192 | hotpluggable ? " hotplug" : ""); |
193 | 193 | ||
194 | /* Mark hotplug range in memblock. */ | ||
195 | if (hotpluggable && memblock_mark_hotplug(start, ma->length)) | ||
196 | pr_warn("SRAT: Failed to mark hotplug range [mem %#010Lx-%#010Lx] in memblock\n", | ||
197 | (unsigned long long)start, (unsigned long long)end - 1); | ||
198 | |||
194 | return 0; | 199 | return 0; |
195 | out_err_bad_srat: | 200 | out_err_bad_srat: |
196 | bad_srat(); | 201 | bad_srat(); |