diff options
author | KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> | 2011-05-24 20:11:32 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-25 11:39:09 -0400 |
commit | 1b79acc91115ba47e744b70bb166b77bd94f5855 (patch) | |
tree | 9097834522de3840845368312c09b5ad4a98e5e5 | |
parent | 839a4fcc8af7412be2efd11f0bd0504757f79f08 (diff) |
mm, mem-hotplug: recalculate lowmem_reserve when memory hotplug occurs
Currently, memory hotplug calls setup_per_zone_wmarks() and
calculate_zone_inactive_ratio(), but doesn't call
setup_per_zone_lowmem_reserve().
It means the number of reserved pages aren't updated even if memory hot
plug occur. This patch fixes it.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/mm.h | 2 | ||||
-rw-r--r-- | mm/memory_hotplug.c | 9 | ||||
-rw-r--r-- | mm/page_alloc.c | 4 |
3 files changed, 8 insertions, 7 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 57d3d5fade16..e173cd297d88 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -1381,7 +1381,7 @@ extern void set_dma_reserve(unsigned long new_dma_reserve); | |||
1381 | extern void memmap_init_zone(unsigned long, int, unsigned long, | 1381 | extern void memmap_init_zone(unsigned long, int, unsigned long, |
1382 | unsigned long, enum memmap_context); | 1382 | unsigned long, enum memmap_context); |
1383 | extern void setup_per_zone_wmarks(void); | 1383 | extern void setup_per_zone_wmarks(void); |
1384 | extern void calculate_zone_inactive_ratio(struct zone *zone); | 1384 | extern int __meminit init_per_zone_wmark_min(void); |
1385 | extern void mem_init(void); | 1385 | extern void mem_init(void); |
1386 | extern void __init mmap_init(void); | 1386 | extern void __init mmap_init(void); |
1387 | extern void show_mem(unsigned int flags); | 1387 | extern void show_mem(unsigned int flags); |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 2c4edc459fb0..59ac18fefd65 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -459,8 +459,9 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages) | |||
459 | zone_pcp_update(zone); | 459 | zone_pcp_update(zone); |
460 | 460 | ||
461 | mutex_unlock(&zonelists_mutex); | 461 | mutex_unlock(&zonelists_mutex); |
462 | setup_per_zone_wmarks(); | 462 | |
463 | calculate_zone_inactive_ratio(zone); | 463 | init_per_zone_wmark_min(); |
464 | |||
464 | if (onlined_pages) { | 465 | if (onlined_pages) { |
465 | kswapd_run(zone_to_nid(zone)); | 466 | kswapd_run(zone_to_nid(zone)); |
466 | node_set_state(zone_to_nid(zone), N_HIGH_MEMORY); | 467 | node_set_state(zone_to_nid(zone), N_HIGH_MEMORY); |
@@ -893,8 +894,8 @@ repeat: | |||
893 | zone->zone_pgdat->node_present_pages -= offlined_pages; | 894 | zone->zone_pgdat->node_present_pages -= offlined_pages; |
894 | totalram_pages -= offlined_pages; | 895 | totalram_pages -= offlined_pages; |
895 | 896 | ||
896 | setup_per_zone_wmarks(); | 897 | init_per_zone_wmark_min(); |
897 | calculate_zone_inactive_ratio(zone); | 898 | |
898 | if (!node_present_pages(node)) { | 899 | if (!node_present_pages(node)) { |
899 | node_clear_state(node, N_HIGH_MEMORY); | 900 | node_clear_state(node, N_HIGH_MEMORY); |
900 | kswapd_stop(node); | 901 | kswapd_stop(node); |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 56d0be36be9d..e133cea36932 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -5094,7 +5094,7 @@ void setup_per_zone_wmarks(void) | |||
5094 | * 1TB 101 10GB | 5094 | * 1TB 101 10GB |
5095 | * 10TB 320 32GB | 5095 | * 10TB 320 32GB |
5096 | */ | 5096 | */ |
5097 | void __meminit calculate_zone_inactive_ratio(struct zone *zone) | 5097 | static void __meminit calculate_zone_inactive_ratio(struct zone *zone) |
5098 | { | 5098 | { |
5099 | unsigned int gb, ratio; | 5099 | unsigned int gb, ratio; |
5100 | 5100 | ||
@@ -5140,7 +5140,7 @@ static void __meminit setup_per_zone_inactive_ratio(void) | |||
5140 | * 8192MB: 11584k | 5140 | * 8192MB: 11584k |
5141 | * 16384MB: 16384k | 5141 | * 16384MB: 16384k |
5142 | */ | 5142 | */ |
5143 | static int __init init_per_zone_wmark_min(void) | 5143 | int __meminit init_per_zone_wmark_min(void) |
5144 | { | 5144 | { |
5145 | unsigned long lowmem_kbytes; | 5145 | unsigned long lowmem_kbytes; |
5146 | 5146 | ||