aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory_hotplug.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory_hotplug.c')
-rw-r--r--mm/memory_hotplug.c27
1 files changed, 25 insertions, 2 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 70df5c0d957e..841a077d5aeb 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -26,7 +26,7 @@
26 26
27extern void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn, 27extern void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn,
28 unsigned long size); 28 unsigned long size);
29static void __add_zone(struct zone *zone, unsigned long phys_start_pfn) 29static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
30{ 30{
31 struct pglist_data *pgdat = zone->zone_pgdat; 31 struct pglist_data *pgdat = zone->zone_pgdat;
32 int nr_pages = PAGES_PER_SECTION; 32 int nr_pages = PAGES_PER_SECTION;
@@ -34,8 +34,15 @@ static void __add_zone(struct zone *zone, unsigned long phys_start_pfn)
34 int zone_type; 34 int zone_type;
35 35
36 zone_type = zone - pgdat->node_zones; 36 zone_type = zone - pgdat->node_zones;
37 if (!populated_zone(zone)) {
38 int ret = 0;
39 ret = init_currently_empty_zone(zone, phys_start_pfn, nr_pages);
40 if (ret < 0)
41 return ret;
42 }
37 memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn); 43 memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn);
38 zonetable_add(zone, nid, zone_type, phys_start_pfn, nr_pages); 44 zonetable_add(zone, nid, zone_type, phys_start_pfn, nr_pages);
45 return 0;
39} 46}
40 47
41extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, 48extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
@@ -50,7 +57,11 @@ static int __add_section(struct zone *zone, unsigned long phys_start_pfn)
50 if (ret < 0) 57 if (ret < 0)
51 return ret; 58 return ret;
52 59
53 __add_zone(zone, phys_start_pfn); 60 ret = __add_zone(zone, phys_start_pfn);
61
62 if (ret < 0)
63 return ret;
64
54 return register_new_memory(__pfn_to_section(phys_start_pfn)); 65 return register_new_memory(__pfn_to_section(phys_start_pfn));
55} 66}
56 67
@@ -116,6 +127,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
116 unsigned long flags; 127 unsigned long flags;
117 unsigned long onlined_pages = 0; 128 unsigned long onlined_pages = 0;
118 struct zone *zone; 129 struct zone *zone;
130 int need_zonelists_rebuild = 0;
119 131
120 /* 132 /*
121 * This doesn't need a lock to do pfn_to_page(). 133 * This doesn't need a lock to do pfn_to_page().
@@ -128,6 +140,14 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
128 grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages); 140 grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages);
129 pgdat_resize_unlock(zone->zone_pgdat, &flags); 141 pgdat_resize_unlock(zone->zone_pgdat, &flags);
130 142
143 /*
144 * If this zone is not populated, then it is not in zonelist.
145 * This means the page allocator ignores this zone.
146 * So, zonelist must be updated after online.
147 */
148 if (!populated_zone(zone))
149 need_zonelists_rebuild = 1;
150
131 for (i = 0; i < nr_pages; i++) { 151 for (i = 0; i < nr_pages; i++) {
132 struct page *page = pfn_to_page(pfn + i); 152 struct page *page = pfn_to_page(pfn + i);
133 online_page(page); 153 online_page(page);
@@ -138,5 +158,8 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
138 158
139 setup_per_zone_pages_min(); 159 setup_per_zone_pages_min();
140 160
161 if (need_zonelists_rebuild)
162 build_all_zonelists();
163 vm_total_pages = nr_free_pagecache_pages();
141 return 0; 164 return 0;
142} 165}