aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory_hotplug.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory_hotplug.c')
-rw-r--r--mm/memory_hotplug.c31
1 files changed, 26 insertions, 5 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 29d8693d0c61..1bf4807cb21e 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -31,6 +31,7 @@
31#include <linux/stop_machine.h> 31#include <linux/stop_machine.h>
32#include <linux/hugetlb.h> 32#include <linux/hugetlb.h>
33#include <linux/memblock.h> 33#include <linux/memblock.h>
34#include <linux/bootmem.h>
34 35
35#include <asm/tlbflush.h> 36#include <asm/tlbflush.h>
36 37
@@ -1066,6 +1067,16 @@ out:
1066} 1067}
1067#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ 1068#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
1068 1069
1070static void reset_node_present_pages(pg_data_t *pgdat)
1071{
1072 struct zone *z;
1073
1074 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
1075 z->present_pages = 0;
1076
1077 pgdat->node_present_pages = 0;
1078}
1079
1069/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ 1080/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
1070static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) 1081static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
1071{ 1082{
@@ -1096,6 +1107,21 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
1096 build_all_zonelists(pgdat, NULL); 1107 build_all_zonelists(pgdat, NULL);
1097 mutex_unlock(&zonelists_mutex); 1108 mutex_unlock(&zonelists_mutex);
1098 1109
1110 /*
1111 * zone->managed_pages is set to an approximate value in
1112 * free_area_init_core(), which will cause
1113 * /sys/device/system/node/nodeX/meminfo has wrong data.
1114 * So reset it to 0 before any memory is onlined.
1115 */
1116 reset_node_managed_pages(pgdat);
1117
1118 /*
1119 * When memory is hot-added, all the memory is in offline state. So
1120 * clear all zones' present_pages because they will be updated in
1121 * online_pages() and offline_pages().
1122 */
1123 reset_node_present_pages(pgdat);
1124
1099 return pgdat; 1125 return pgdat;
1100} 1126}
1101 1127
@@ -1912,7 +1938,6 @@ void try_offline_node(int nid)
1912 unsigned long start_pfn = pgdat->node_start_pfn; 1938 unsigned long start_pfn = pgdat->node_start_pfn;
1913 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; 1939 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
1914 unsigned long pfn; 1940 unsigned long pfn;
1915 struct page *pgdat_page = virt_to_page(pgdat);
1916 int i; 1941 int i;
1917 1942
1918 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 1943 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
@@ -1941,10 +1966,6 @@ void try_offline_node(int nid)
1941 node_set_offline(nid); 1966 node_set_offline(nid);
1942 unregister_one_node(nid); 1967 unregister_one_node(nid);
1943 1968
1944 if (!PageSlab(pgdat_page) && !PageCompound(pgdat_page))
1945 /* node data is allocated from boot memory */
1946 return;
1947
1948 /* free waittable in each zone */ 1969 /* free waittable in each zone */
1949 for (i = 0; i < MAX_NR_ZONES; i++) { 1970 for (i = 0; i < MAX_NR_ZONES; i++) {
1950 struct zone *zone = pgdat->node_zones + i; 1971 struct zone *zone = pgdat->node_zones + i;