diff options
author | Jiang Liu <liuj97@gmail.com> | 2013-07-03 18:03:21 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-03 19:07:33 -0400 |
commit | 3dcc0571cd64816309765b7c7e4691a4cadf2ee7 (patch) | |
tree | 4f40128bbd9fcc70960aba596ff7fb25430554ab /mm | |
parent | 170a5a7eb2bf10161197e5490fbc29ca4561aedb (diff) |
mm: correctly update zone->managed_pages
Enhance adjust_managed_page_count() to adjust totalhigh_pages for
highmem pages. And change code which directly adjusts totalram_pages to
use adjust_managed_page_count() because it adjusts totalram_pages,
totalhigh_pages and zone->managed_pages altogether in a safe way.
Remove inc_totalhigh_pages() and dec_totalhigh_pages() from xen/balloon
driver bacause adjust_managed_page_count() has already adjusted
totalhigh_pages.
This patch also fixes two bugs:
1) enhances virtio_balloon driver to adjust totalhigh_pages when
reserve/unreserve pages.
2) enhance memory_hotplug.c to adjust totalhigh_pages when hot-removing
memory.
We still need to deal with modifications of totalram_pages in file
arch/powerpc/platforms/pseries/cmm.c, but need help from PPC experts.
[akpm@linux-foundation.org: remove ifdef, per Wanpeng Li, virtio_balloon.c cleanup, per Sergei]
[akpm@linux-foundation.org: export adjust_managed_page_count() to modules, for drivers/virtio/virtio_balloon.c]
Signed-off-by: Jiang Liu <jiang.liu@huawei.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Wen Congyang <wency@cn.fujitsu.com>
Cc: Tang Chen <tangchen@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Minchan Kim <minchan@kernel.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: <sworddragon2@aol.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jianguo Wu <wujianguo@huawei.com>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/hugetlb.c | 2 | ||||
-rw-r--r-- | mm/memory_hotplug.c | 16 | ||||
-rw-r--r-- | mm/page_alloc.c | 11 |
3 files changed, 10 insertions, 19 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index fe095158859e..83aff0a4d093 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -1263,7 +1263,7 @@ static void __init gather_bootmem_prealloc(void) | |||
1263 | * side-effects, like CommitLimit going negative. | 1263 | * side-effects, like CommitLimit going negative. |
1264 | */ | 1264 | */ |
1265 | if (h->order > (MAX_ORDER - 1)) | 1265 | if (h->order > (MAX_ORDER - 1)) |
1266 | totalram_pages += 1 << h->order; | 1266 | adjust_managed_page_count(page, 1 << h->order); |
1267 | } | 1267 | } |
1268 | } | 1268 | } |
1269 | 1269 | ||
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 814ecb2d262f..5e34922124a3 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -772,20 +772,13 @@ EXPORT_SYMBOL_GPL(__online_page_set_limits); | |||
772 | 772 | ||
773 | void __online_page_increment_counters(struct page *page) | 773 | void __online_page_increment_counters(struct page *page) |
774 | { | 774 | { |
775 | totalram_pages++; | 775 | adjust_managed_page_count(page, 1); |
776 | |||
777 | #ifdef CONFIG_HIGHMEM | ||
778 | if (PageHighMem(page)) | ||
779 | totalhigh_pages++; | ||
780 | #endif | ||
781 | } | 776 | } |
782 | EXPORT_SYMBOL_GPL(__online_page_increment_counters); | 777 | EXPORT_SYMBOL_GPL(__online_page_increment_counters); |
783 | 778 | ||
784 | void __online_page_free(struct page *page) | 779 | void __online_page_free(struct page *page) |
785 | { | 780 | { |
786 | ClearPageReserved(page); | 781 | __free_reserved_page(page); |
787 | init_page_count(page); | ||
788 | __free_page(page); | ||
789 | } | 782 | } |
790 | EXPORT_SYMBOL_GPL(__online_page_free); | 783 | EXPORT_SYMBOL_GPL(__online_page_free); |
791 | 784 | ||
@@ -983,7 +976,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ | |||
983 | return ret; | 976 | return ret; |
984 | } | 977 | } |
985 | 978 | ||
986 | zone->managed_pages += onlined_pages; | ||
987 | zone->present_pages += onlined_pages; | 979 | zone->present_pages += onlined_pages; |
988 | 980 | ||
989 | pgdat_resize_lock(zone->zone_pgdat, &flags); | 981 | pgdat_resize_lock(zone->zone_pgdat, &flags); |
@@ -1572,15 +1564,13 @@ repeat: | |||
1572 | /* reset pagetype flags and makes migrate type to be MOVABLE */ | 1564 | /* reset pagetype flags and makes migrate type to be MOVABLE */ |
1573 | undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); | 1565 | undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); |
1574 | /* removal success */ | 1566 | /* removal success */ |
1575 | zone->managed_pages -= offlined_pages; | 1567 | adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages); |
1576 | zone->present_pages -= offlined_pages; | 1568 | zone->present_pages -= offlined_pages; |
1577 | 1569 | ||
1578 | pgdat_resize_lock(zone->zone_pgdat, &flags); | 1570 | pgdat_resize_lock(zone->zone_pgdat, &flags); |
1579 | zone->zone_pgdat->node_present_pages -= offlined_pages; | 1571 | zone->zone_pgdat->node_present_pages -= offlined_pages; |
1580 | pgdat_resize_unlock(zone->zone_pgdat, &flags); | 1572 | pgdat_resize_unlock(zone->zone_pgdat, &flags); |
1581 | 1573 | ||
1582 | totalram_pages -= offlined_pages; | ||
1583 | |||
1584 | init_per_zone_wmark_min(); | 1574 | init_per_zone_wmark_min(); |
1585 | 1575 | ||
1586 | if (!populated_zone(zone)) { | 1576 | if (!populated_zone(zone)) { |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2437a7e17aba..1481439ee2e4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -780,11 +780,7 @@ void __init init_cma_reserved_pageblock(struct page *page) | |||
780 | set_page_refcounted(page); | 780 | set_page_refcounted(page); |
781 | set_pageblock_migratetype(page, MIGRATE_CMA); | 781 | set_pageblock_migratetype(page, MIGRATE_CMA); |
782 | __free_pages(page, pageblock_order); | 782 | __free_pages(page, pageblock_order); |
783 | totalram_pages += pageblock_nr_pages; | 783 | adjust_managed_page_count(page, pageblock_nr_pages); |
784 | #ifdef CONFIG_HIGHMEM | ||
785 | if (PageHighMem(page)) | ||
786 | totalhigh_pages += pageblock_nr_pages; | ||
787 | #endif | ||
788 | } | 784 | } |
789 | #endif | 785 | #endif |
790 | 786 | ||
@@ -5207,8 +5203,13 @@ void adjust_managed_page_count(struct page *page, long count) | |||
5207 | spin_lock(&managed_page_count_lock); | 5203 | spin_lock(&managed_page_count_lock); |
5208 | page_zone(page)->managed_pages += count; | 5204 | page_zone(page)->managed_pages += count; |
5209 | totalram_pages += count; | 5205 | totalram_pages += count; |
5206 | #ifdef CONFIG_HIGHMEM | ||
5207 | if (PageHighMem(page)) | ||
5208 | totalhigh_pages += count; | ||
5209 | #endif | ||
5210 | spin_unlock(&managed_page_count_lock); | 5210 | spin_unlock(&managed_page_count_lock); |
5211 | } | 5211 | } |
5212 | EXPORT_SYMBOL(adjust_managed_page_count); | ||
5212 | 5213 | ||
5213 | unsigned long free_reserved_area(void *start, void *end, int poison, char *s) | 5214 | unsigned long free_reserved_area(void *start, void *end, int poison, char *s) |
5214 | { | 5215 | { |