aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJiang Liu <liuj97@gmail.com>2013-07-03 18:03:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-03 19:07:33 -0400
commit3dcc0571cd64816309765b7c7e4691a4cadf2ee7 (patch)
tree4f40128bbd9fcc70960aba596ff7fb25430554ab
parent170a5a7eb2bf10161197e5490fbc29ca4561aedb (diff)
mm: correctly update zone->managed_pages
Enhance adjust_managed_page_count() to adjust totalhigh_pages for highmem pages. And change code which directly adjusts totalram_pages to use adjust_managed_page_count() because it adjusts totalram_pages, totalhigh_pages and zone->managed_pages altogether in a safe way. Remove inc_totalhigh_pages() and dec_totalhigh_pages() from xen/balloon driver bacause adjust_managed_page_count() has already adjusted totalhigh_pages. This patch also fixes two bugs: 1) enhances virtio_balloon driver to adjust totalhigh_pages when reserve/unreserve pages. 2) enhance memory_hotplug.c to adjust totalhigh_pages when hot-removing memory. We still need to deal with modifications of totalram_pages in file arch/powerpc/platforms/pseries/cmm.c, but need help from PPC experts. [akpm@linux-foundation.org: remove ifdef, per Wanpeng Li, virtio_balloon.c cleanup, per Sergei] [akpm@linux-foundation.org: export adjust_managed_page_count() to modules, for drivers/virtio/virtio_balloon.c] Signed-off-by: Jiang Liu <jiang.liu@huawei.com> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: "Michael S. Tsirkin" <mst@redhat.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Cc: Wen Congyang <wency@cn.fujitsu.com> Cc: Tang Chen <tangchen@cn.fujitsu.com> Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Minchan Kim <minchan@kernel.org> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: <sworddragon2@aol.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: David Howells <dhowells@redhat.com> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jianguo Wu <wujianguo@huawei.com> Cc: Joonsoo Kim <js1304@gmail.com> Cc: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Michel Lespinasse <walken@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Tejun Heo <tj@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will.deacon@arm.com> Cc: Yinghai Lu <yinghai@kernel.org> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--drivers/virtio/virtio_balloon.c7
-rw-r--r--drivers/xen/balloon.c23
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/memory_hotplug.c16
-rw-r--r--mm/page_alloc.c11
5 files changed, 19 insertions, 40 deletions
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index bd3ae324a1a2..0098810df69d 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -148,7 +148,7 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num)
148 } 148 }
149 set_page_pfns(vb->pfns + vb->num_pfns, page); 149 set_page_pfns(vb->pfns + vb->num_pfns, page);
150 vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE; 150 vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
151 totalram_pages--; 151 adjust_managed_page_count(page, -1);
152 } 152 }
153 153
154 /* Did we get any? */ 154 /* Did we get any? */
@@ -163,8 +163,9 @@ static void release_pages_by_pfn(const u32 pfns[], unsigned int num)
163 163
164 /* Find pfns pointing at start of each page, get pages and free them. */ 164 /* Find pfns pointing at start of each page, get pages and free them. */
165 for (i = 0; i < num; i += VIRTIO_BALLOON_PAGES_PER_PAGE) { 165 for (i = 0; i < num; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
166 balloon_page_free(balloon_pfn_to_page(pfns[i])); 166 struct page *page = balloon_pfn_to_page(pfns[i]);
167 totalram_pages++; 167 balloon_page_free(page);
168 adjust_managed_page_count(page, 1);
168 } 169 }
169} 170}
170 171
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 930fb6817901..c8aab4e97833 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -89,14 +89,6 @@ EXPORT_SYMBOL_GPL(balloon_stats);
89/* We increase/decrease in batches which fit in a page */ 89/* We increase/decrease in batches which fit in a page */
90static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)]; 90static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)];
91 91
92#ifdef CONFIG_HIGHMEM
93#define inc_totalhigh_pages() (totalhigh_pages++)
94#define dec_totalhigh_pages() (totalhigh_pages--)
95#else
96#define inc_totalhigh_pages() do {} while (0)
97#define dec_totalhigh_pages() do {} while (0)
98#endif
99
100/* List of ballooned pages, threaded through the mem_map array. */ 92/* List of ballooned pages, threaded through the mem_map array. */
101static LIST_HEAD(ballooned_pages); 93static LIST_HEAD(ballooned_pages);
102 94
@@ -132,9 +124,7 @@ static void __balloon_append(struct page *page)
132static void balloon_append(struct page *page) 124static void balloon_append(struct page *page)
133{ 125{
134 __balloon_append(page); 126 __balloon_append(page);
135 if (PageHighMem(page)) 127 adjust_managed_page_count(page, -1);
136 dec_totalhigh_pages();
137 totalram_pages--;
138} 128}
139 129
140/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ 130/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
@@ -151,13 +141,12 @@ static struct page *balloon_retrieve(bool prefer_highmem)
151 page = list_entry(ballooned_pages.next, struct page, lru); 141 page = list_entry(ballooned_pages.next, struct page, lru);
152 list_del(&page->lru); 142 list_del(&page->lru);
153 143
154 if (PageHighMem(page)) { 144 if (PageHighMem(page))
155 balloon_stats.balloon_high--; 145 balloon_stats.balloon_high--;
156 inc_totalhigh_pages(); 146 else
157 } else
158 balloon_stats.balloon_low--; 147 balloon_stats.balloon_low--;
159 148
160 totalram_pages++; 149 adjust_managed_page_count(page, 1);
161 150
162 return page; 151 return page;
163} 152}
@@ -372,9 +361,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
372#endif 361#endif
373 362
374 /* Relinquish the page back to the allocator. */ 363 /* Relinquish the page back to the allocator. */
375 ClearPageReserved(page); 364 __free_reserved_page(page);
376 init_page_count(page);
377 __free_page(page);
378 } 365 }
379 366
380 balloon_stats.current_pages += rc; 367 balloon_stats.current_pages += rc;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index fe095158859e..83aff0a4d093 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1263,7 +1263,7 @@ static void __init gather_bootmem_prealloc(void)
1263 * side-effects, like CommitLimit going negative. 1263 * side-effects, like CommitLimit going negative.
1264 */ 1264 */
1265 if (h->order > (MAX_ORDER - 1)) 1265 if (h->order > (MAX_ORDER - 1))
1266 totalram_pages += 1 << h->order; 1266 adjust_managed_page_count(page, 1 << h->order);
1267 } 1267 }
1268} 1268}
1269 1269
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 814ecb2d262f..5e34922124a3 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -772,20 +772,13 @@ EXPORT_SYMBOL_GPL(__online_page_set_limits);
772 772
773void __online_page_increment_counters(struct page *page) 773void __online_page_increment_counters(struct page *page)
774{ 774{
775 totalram_pages++; 775 adjust_managed_page_count(page, 1);
776
777#ifdef CONFIG_HIGHMEM
778 if (PageHighMem(page))
779 totalhigh_pages++;
780#endif
781} 776}
782EXPORT_SYMBOL_GPL(__online_page_increment_counters); 777EXPORT_SYMBOL_GPL(__online_page_increment_counters);
783 778
784void __online_page_free(struct page *page) 779void __online_page_free(struct page *page)
785{ 780{
786 ClearPageReserved(page); 781 __free_reserved_page(page);
787 init_page_count(page);
788 __free_page(page);
789} 782}
790EXPORT_SYMBOL_GPL(__online_page_free); 783EXPORT_SYMBOL_GPL(__online_page_free);
791 784
@@ -983,7 +976,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
983 return ret; 976 return ret;
984 } 977 }
985 978
986 zone->managed_pages += onlined_pages;
987 zone->present_pages += onlined_pages; 979 zone->present_pages += onlined_pages;
988 980
989 pgdat_resize_lock(zone->zone_pgdat, &flags); 981 pgdat_resize_lock(zone->zone_pgdat, &flags);
@@ -1572,15 +1564,13 @@ repeat:
1572 /* reset pagetype flags and makes migrate type to be MOVABLE */ 1564 /* reset pagetype flags and makes migrate type to be MOVABLE */
1573 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); 1565 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
1574 /* removal success */ 1566 /* removal success */
1575 zone->managed_pages -= offlined_pages; 1567 adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages);
1576 zone->present_pages -= offlined_pages; 1568 zone->present_pages -= offlined_pages;
1577 1569
1578 pgdat_resize_lock(zone->zone_pgdat, &flags); 1570 pgdat_resize_lock(zone->zone_pgdat, &flags);
1579 zone->zone_pgdat->node_present_pages -= offlined_pages; 1571 zone->zone_pgdat->node_present_pages -= offlined_pages;
1580 pgdat_resize_unlock(zone->zone_pgdat, &flags); 1572 pgdat_resize_unlock(zone->zone_pgdat, &flags);
1581 1573
1582 totalram_pages -= offlined_pages;
1583
1584 init_per_zone_wmark_min(); 1574 init_per_zone_wmark_min();
1585 1575
1586 if (!populated_zone(zone)) { 1576 if (!populated_zone(zone)) {
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2437a7e17aba..1481439ee2e4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -780,11 +780,7 @@ void __init init_cma_reserved_pageblock(struct page *page)
780 set_page_refcounted(page); 780 set_page_refcounted(page);
781 set_pageblock_migratetype(page, MIGRATE_CMA); 781 set_pageblock_migratetype(page, MIGRATE_CMA);
782 __free_pages(page, pageblock_order); 782 __free_pages(page, pageblock_order);
783 totalram_pages += pageblock_nr_pages; 783 adjust_managed_page_count(page, pageblock_nr_pages);
784#ifdef CONFIG_HIGHMEM
785 if (PageHighMem(page))
786 totalhigh_pages += pageblock_nr_pages;
787#endif
788} 784}
789#endif 785#endif
790 786
@@ -5207,8 +5203,13 @@ void adjust_managed_page_count(struct page *page, long count)
5207 spin_lock(&managed_page_count_lock); 5203 spin_lock(&managed_page_count_lock);
5208 page_zone(page)->managed_pages += count; 5204 page_zone(page)->managed_pages += count;
5209 totalram_pages += count; 5205 totalram_pages += count;
5206#ifdef CONFIG_HIGHMEM
5207 if (PageHighMem(page))
5208 totalhigh_pages += count;
5209#endif
5210 spin_unlock(&managed_page_count_lock); 5210 spin_unlock(&managed_page_count_lock);
5211} 5211}
5212EXPORT_SYMBOL(adjust_managed_page_count);
5212 5213
5213unsigned long free_reserved_area(void *start, void *end, int poison, char *s) 5214unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
5214{ 5215{