diff options
author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2012-12-11 19:03:23 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-11 20:22:28 -0500 |
commit | 74d42d8fe146e870c52bde3b1c692f86cc8ff844 (patch) | |
tree | 0b17084ef171572fb9f1c6f01bcb667e638607ed /mm/memory_hotplug.c | |
parent | e455a9b92d6e19a3f0f7eb6f6241efa566a7e81a (diff) |
memory_hotplug: ensure every online node has NORMAL memory
Old memory hotplug code and new online/movable may cause a online node
don't have any normal memory, but memory-management acts bad when we have
nodes which is online but don't have any normal memory. Example: it may
cause a bound task fail on all kernel allocation and cause the task can't
create task or create other kernel object.
So we disable non-normal-memory-node here, we will enable it when we
prepared.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Greg KH <greg@kroah.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory_hotplug.c')
-rw-r--r-- | mm/memory_hotplug.c | 40 |
1 files changed, 40 insertions, 0 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index c370491bdb97..de9cb14ae753 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -581,6 +581,12 @@ static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, | |||
581 | return 0; | 581 | return 0; |
582 | } | 582 | } |
583 | 583 | ||
584 | /* ensure every online node has NORMAL memory */ | ||
585 | static bool can_online_high_movable(struct zone *zone) | ||
586 | { | ||
587 | return node_state(zone_to_nid(zone), N_NORMAL_MEMORY); | ||
588 | } | ||
589 | |||
584 | /* check which state of node_states will be changed when online memory */ | 590 | /* check which state of node_states will be changed when online memory */ |
585 | static void node_states_check_changes_online(unsigned long nr_pages, | 591 | static void node_states_check_changes_online(unsigned long nr_pages, |
586 | struct zone *zone, struct memory_notify *arg) | 592 | struct zone *zone, struct memory_notify *arg) |
@@ -646,6 +652,12 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ | |||
646 | */ | 652 | */ |
647 | zone = page_zone(pfn_to_page(pfn)); | 653 | zone = page_zone(pfn_to_page(pfn)); |
648 | 654 | ||
655 | if ((zone_idx(zone) > ZONE_NORMAL || online_type == ONLINE_MOVABLE) && | ||
656 | !can_online_high_movable(zone)) { | ||
657 | unlock_memory_hotplug(); | ||
658 | return -1; | ||
659 | } | ||
660 | |||
649 | if (online_type == ONLINE_KERNEL && zone_idx(zone) == ZONE_MOVABLE) { | 661 | if (online_type == ONLINE_KERNEL && zone_idx(zone) == ZONE_MOVABLE) { |
650 | if (move_pfn_range_left(zone - 1, zone, pfn, pfn + nr_pages)) { | 662 | if (move_pfn_range_left(zone - 1, zone, pfn, pfn + nr_pages)) { |
651 | unlock_memory_hotplug(); | 663 | unlock_memory_hotplug(); |
@@ -1054,6 +1066,30 @@ check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) | |||
1054 | return offlined; | 1066 | return offlined; |
1055 | } | 1067 | } |
1056 | 1068 | ||
1069 | /* ensure the node has NORMAL memory if it is still online */ | ||
1070 | static bool can_offline_normal(struct zone *zone, unsigned long nr_pages) | ||
1071 | { | ||
1072 | struct pglist_data *pgdat = zone->zone_pgdat; | ||
1073 | unsigned long present_pages = 0; | ||
1074 | enum zone_type zt; | ||
1075 | |||
1076 | for (zt = 0; zt <= ZONE_NORMAL; zt++) | ||
1077 | present_pages += pgdat->node_zones[zt].present_pages; | ||
1078 | |||
1079 | if (present_pages > nr_pages) | ||
1080 | return true; | ||
1081 | |||
1082 | present_pages = 0; | ||
1083 | for (; zt <= ZONE_MOVABLE; zt++) | ||
1084 | present_pages += pgdat->node_zones[zt].present_pages; | ||
1085 | |||
1086 | /* | ||
1087 | * we can't offline the last normal memory until all | ||
1088 | * higher memory is offlined. | ||
1089 | */ | ||
1090 | return present_pages == 0; | ||
1091 | } | ||
1092 | |||
1057 | /* check which state of node_states will be changed when offline memory */ | 1093 | /* check which state of node_states will be changed when offline memory */ |
1058 | static void node_states_check_changes_offline(unsigned long nr_pages, | 1094 | static void node_states_check_changes_offline(unsigned long nr_pages, |
1059 | struct zone *zone, struct memory_notify *arg) | 1095 | struct zone *zone, struct memory_notify *arg) |
@@ -1141,6 +1177,10 @@ static int __ref __offline_pages(unsigned long start_pfn, | |||
1141 | node = zone_to_nid(zone); | 1177 | node = zone_to_nid(zone); |
1142 | nr_pages = end_pfn - start_pfn; | 1178 | nr_pages = end_pfn - start_pfn; |
1143 | 1179 | ||
1180 | ret = -EINVAL; | ||
1181 | if (zone_idx(zone) <= ZONE_NORMAL && !can_offline_normal(zone, nr_pages)) | ||
1182 | goto out; | ||
1183 | |||
1144 | /* set above range as isolated */ | 1184 | /* set above range as isolated */ |
1145 | ret = start_isolate_page_range(start_pfn, end_pfn, | 1185 | ret = start_isolate_page_range(start_pfn, end_pfn, |
1146 | MIGRATE_MOVABLE, true); | 1186 | MIGRATE_MOVABLE, true); |