aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2008-05-14 19:05:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-05-14 22:11:15 -0400
commit76cdd58e558669366adfaded436fda01b30cce3e (patch)
treedeaccb499752c928919693edea65f9ea4c1fe756 /mm
parent1c12c4cf9411eb130b245fa8d0fbbaf989477c7b (diff)
memory_hotplug: always initialize pageblock bitmap
Trying to online a new memory section that was added via memory hotplug sometimes results in crashes when the new pages are added via __free_page. Reason for that is that the pageblock bitmap isn't initialized and hence contains random stuff. That means that get_pageblock_migratetype() returns also random stuff and therefore list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); in __free_one_page() tries to do a list_add to something that isn't even necessarily a list. This happens since 86051ca5eaf5e560113ec7673462804c54284456 ("mm: fix usemap initialization") which makes sure that the pageblock bitmap gets only initialized for pages present in a zone. Unfortunately for hot-added memory the zones "grow" after the memmap and the pageblock memmap have been initialized. Which means that the new pages have an unitialized bitmap. To solve this the calls to grow_zone_span() and grow_pgdat_span() are moved to __add_zone() just before the initialization happens. The patch also moves the two functions since __add_zone() is the only caller and I didn't want to add a forward declaration. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Andy Whitcroft <apw@shadowen.org> Cc: Dave Hansen <haveblue@us.ibm.com> Cc: Gerald Schaefer <gerald.schaefer@de.ibm.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Yasunori Goto <y-goto@jp.fujitsu.com> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory_hotplug.c83
-rw-r--r--mm/page_alloc.c3
2 files changed, 45 insertions, 41 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 656ad1c65422..833f854eabe5 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -159,17 +159,58 @@ void register_page_bootmem_info_node(struct pglist_data *pgdat)
159} 159}
160#endif /* !CONFIG_SPARSEMEM_VMEMMAP */ 160#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
161 161
162static void grow_zone_span(struct zone *zone, unsigned long start_pfn,
163 unsigned long end_pfn)
164{
165 unsigned long old_zone_end_pfn;
166
167 zone_span_writelock(zone);
168
169 old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
170 if (start_pfn < zone->zone_start_pfn)
171 zone->zone_start_pfn = start_pfn;
172
173 zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
174 zone->zone_start_pfn;
175
176 zone_span_writeunlock(zone);
177}
178
179static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn,
180 unsigned long end_pfn)
181{
182 unsigned long old_pgdat_end_pfn =
183 pgdat->node_start_pfn + pgdat->node_spanned_pages;
184
185 if (start_pfn < pgdat->node_start_pfn)
186 pgdat->node_start_pfn = start_pfn;
187
188 pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
189 pgdat->node_start_pfn;
190}
191
162static int __add_zone(struct zone *zone, unsigned long phys_start_pfn) 192static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
163{ 193{
164 struct pglist_data *pgdat = zone->zone_pgdat; 194 struct pglist_data *pgdat = zone->zone_pgdat;
165 int nr_pages = PAGES_PER_SECTION; 195 int nr_pages = PAGES_PER_SECTION;
166 int nid = pgdat->node_id; 196 int nid = pgdat->node_id;
167 int zone_type; 197 int zone_type;
198 unsigned long flags;
168 199
169 zone_type = zone - pgdat->node_zones; 200 zone_type = zone - pgdat->node_zones;
170 if (!zone->wait_table) 201 if (!zone->wait_table) {
171 return init_currently_empty_zone(zone, phys_start_pfn, 202 int ret;
172 nr_pages, MEMMAP_HOTPLUG); 203
204 ret = init_currently_empty_zone(zone, phys_start_pfn,
205 nr_pages, MEMMAP_HOTPLUG);
206 if (ret)
207 return ret;
208 }
209 pgdat_resize_lock(zone->zone_pgdat, &flags);
210 grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages);
211 grow_pgdat_span(zone->zone_pgdat, phys_start_pfn,
212 phys_start_pfn + nr_pages);
213 pgdat_resize_unlock(zone->zone_pgdat, &flags);
173 memmap_init_zone(nr_pages, nid, zone_type, 214 memmap_init_zone(nr_pages, nid, zone_type,
174 phys_start_pfn, MEMMAP_HOTPLUG); 215 phys_start_pfn, MEMMAP_HOTPLUG);
175 return 0; 216 return 0;
@@ -295,36 +336,6 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
295} 336}
296EXPORT_SYMBOL_GPL(__remove_pages); 337EXPORT_SYMBOL_GPL(__remove_pages);
297 338
298static void grow_zone_span(struct zone *zone,
299 unsigned long start_pfn, unsigned long end_pfn)
300{
301 unsigned long old_zone_end_pfn;
302
303 zone_span_writelock(zone);
304
305 old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
306 if (start_pfn < zone->zone_start_pfn)
307 zone->zone_start_pfn = start_pfn;
308
309 zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
310 zone->zone_start_pfn;
311
312 zone_span_writeunlock(zone);
313}
314
315static void grow_pgdat_span(struct pglist_data *pgdat,
316 unsigned long start_pfn, unsigned long end_pfn)
317{
318 unsigned long old_pgdat_end_pfn =
319 pgdat->node_start_pfn + pgdat->node_spanned_pages;
320
321 if (start_pfn < pgdat->node_start_pfn)
322 pgdat->node_start_pfn = start_pfn;
323
324 pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
325 pgdat->node_start_pfn;
326}
327
328void online_page(struct page *page) 339void online_page(struct page *page)
329{ 340{
330 totalram_pages++; 341 totalram_pages++;
@@ -363,7 +374,6 @@ static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
363 374
364int online_pages(unsigned long pfn, unsigned long nr_pages) 375int online_pages(unsigned long pfn, unsigned long nr_pages)
365{ 376{
366 unsigned long flags;
367 unsigned long onlined_pages = 0; 377 unsigned long onlined_pages = 0;
368 struct zone *zone; 378 struct zone *zone;
369 int need_zonelists_rebuild = 0; 379 int need_zonelists_rebuild = 0;
@@ -391,11 +401,6 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
391 * memory_block->state_mutex. 401 * memory_block->state_mutex.
392 */ 402 */
393 zone = page_zone(pfn_to_page(pfn)); 403 zone = page_zone(pfn_to_page(pfn));
394 pgdat_resize_lock(zone->zone_pgdat, &flags);
395 grow_zone_span(zone, pfn, pfn + nr_pages);
396 grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages);
397 pgdat_resize_unlock(zone->zone_pgdat, &flags);
398
399 /* 404 /*
400 * If this zone is not populated, then it is not in zonelist. 405 * If this zone is not populated, then it is not in zonelist.
401 * This means the page allocator ignores this zone. 406 * This means the page allocator ignores this zone.
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index bdd5c432c426..63835579323a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2862,8 +2862,6 @@ __meminit int init_currently_empty_zone(struct zone *zone,
2862 2862
2863 zone->zone_start_pfn = zone_start_pfn; 2863 zone->zone_start_pfn = zone_start_pfn;
2864 2864
2865 memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn);
2866
2867 zone_init_free_lists(zone); 2865 zone_init_free_lists(zone);
2868 2866
2869 return 0; 2867 return 0;
@@ -3433,6 +3431,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
3433 ret = init_currently_empty_zone(zone, zone_start_pfn, 3431 ret = init_currently_empty_zone(zone, zone_start_pfn,
3434 size, MEMMAP_EARLY); 3432 size, MEMMAP_EARLY);
3435 BUG_ON(ret); 3433 BUG_ON(ret);
3434 memmap_init(size, nid, j, zone_start_pfn);
3436 zone_start_pfn += size; 3435 zone_start_pfn += size;
3437 } 3436 }
3438} 3437}