diff options
-rw-r--r-- | arch/x86/mm/init_64.c | 18 | ||||
-rw-r--r-- | mm/memory_hotplug.c | 16 | ||||
-rw-r--r-- | mm/page_alloc.c | 9 |
3 files changed, 5 insertions, 38 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index b7bdf7bebf3b..ec312a92b137 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -712,36 +712,22 @@ EXPORT_SYMBOL_GPL(arch_add_memory); | |||
712 | 712 | ||
713 | static void __meminit free_pagetable(struct page *page, int order) | 713 | static void __meminit free_pagetable(struct page *page, int order) |
714 | { | 714 | { |
715 | struct zone *zone; | ||
716 | bool bootmem = false; | ||
717 | unsigned long magic; | 715 | unsigned long magic; |
718 | unsigned int nr_pages = 1 << order; | 716 | unsigned int nr_pages = 1 << order; |
719 | 717 | ||
720 | /* bootmem page has reserved flag */ | 718 | /* bootmem page has reserved flag */ |
721 | if (PageReserved(page)) { | 719 | if (PageReserved(page)) { |
722 | __ClearPageReserved(page); | 720 | __ClearPageReserved(page); |
723 | bootmem = true; | ||
724 | 721 | ||
725 | magic = (unsigned long)page->lru.next; | 722 | magic = (unsigned long)page->lru.next; |
726 | if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) { | 723 | if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) { |
727 | while (nr_pages--) | 724 | while (nr_pages--) |
728 | put_page_bootmem(page++); | 725 | put_page_bootmem(page++); |
729 | } else | 726 | } else |
730 | __free_pages_bootmem(page, order); | 727 | while (nr_pages--) |
728 | free_reserved_page(page++); | ||
731 | } else | 729 | } else |
732 | free_pages((unsigned long)page_address(page), order); | 730 | free_pages((unsigned long)page_address(page), order); |
733 | |||
734 | /* | ||
735 | * SECTION_INFO pages and MIX_SECTION_INFO pages | ||
736 | * are all allocated by bootmem. | ||
737 | */ | ||
738 | if (bootmem) { | ||
739 | zone = page_zone(page); | ||
740 | zone_span_writelock(zone); | ||
741 | zone->present_pages += nr_pages; | ||
742 | zone_span_writeunlock(zone); | ||
743 | totalram_pages += nr_pages; | ||
744 | } | ||
745 | } | 731 | } |
746 | 732 | ||
747 | static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd) | 733 | static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd) |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 6096cb918735..814ecb2d262f 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -101,12 +101,9 @@ void get_page_bootmem(unsigned long info, struct page *page, | |||
101 | atomic_inc(&page->_count); | 101 | atomic_inc(&page->_count); |
102 | } | 102 | } |
103 | 103 | ||
104 | /* reference to __meminit __free_pages_bootmem is valid | 104 | void put_page_bootmem(struct page *page) |
105 | * so use __ref to tell modpost not to generate a warning */ | ||
106 | void __ref put_page_bootmem(struct page *page) | ||
107 | { | 105 | { |
108 | unsigned long type; | 106 | unsigned long type; |
109 | static DEFINE_MUTEX(ppb_lock); | ||
110 | 107 | ||
111 | type = (unsigned long) page->lru.next; | 108 | type = (unsigned long) page->lru.next; |
112 | BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE || | 109 | BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE || |
@@ -116,17 +113,8 @@ void __ref put_page_bootmem(struct page *page) | |||
116 | ClearPagePrivate(page); | 113 | ClearPagePrivate(page); |
117 | set_page_private(page, 0); | 114 | set_page_private(page, 0); |
118 | INIT_LIST_HEAD(&page->lru); | 115 | INIT_LIST_HEAD(&page->lru); |
119 | 116 | free_reserved_page(page); | |
120 | /* | ||
121 | * Please refer to comment for __free_pages_bootmem() | ||
122 | * for why we serialize here. | ||
123 | */ | ||
124 | mutex_lock(&ppb_lock); | ||
125 | __free_pages_bootmem(page, 0); | ||
126 | mutex_unlock(&ppb_lock); | ||
127 | totalram_pages++; | ||
128 | } | 117 | } |
129 | |||
130 | } | 118 | } |
131 | 119 | ||
132 | #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE | 120 | #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 93f292a60cb0..2437a7e17aba 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -745,14 +745,7 @@ static void __free_pages_ok(struct page *page, unsigned int order) | |||
745 | local_irq_restore(flags); | 745 | local_irq_restore(flags); |
746 | } | 746 | } |
747 | 747 | ||
748 | /* | 748 | void __init __free_pages_bootmem(struct page *page, unsigned int order) |
749 | * Read access to zone->managed_pages is safe because it's unsigned long, | ||
750 | * but we still need to serialize writers. Currently all callers of | ||
751 | * __free_pages_bootmem() except put_page_bootmem() should only be used | ||
752 | * at boot time. So for shorter boot time, we shift the burden to | ||
753 | * put_page_bootmem() to serialize writers. | ||
754 | */ | ||
755 | void __meminit __free_pages_bootmem(struct page *page, unsigned int order) | ||
756 | { | 749 | { |
757 | unsigned int nr_pages = 1 << order; | 750 | unsigned int nr_pages = 1 << order; |
758 | unsigned int loop; | 751 | unsigned int loop; |