diff options
author | KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> | 2008-04-29 03:58:21 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-29 11:05:58 -0400 |
commit | 86051ca5eaf5e560113ec7673462804c54284456 (patch) | |
tree | e2e6fd3df079f85c43c7e164569a8c962f91e537 /mm | |
parent | a01e035ebb552223c03f2d9138ffc73f2d4d3965 (diff) |
mm: fix usemap initialization
usemap must be initialized only when pfn is within zone. If not, it corrupts
memory.
And this patch also reduces the number of calls to set_pageblock_migratetype()
from
(pfn & (pageblock_nr_pages -1)
to
!(pfn & (pageblock_nr_pages-1)
it should be called once per pageblock.
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Shi Weihua <shiwh@cn.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Pavel Emelyanov <xemul@openvz.org>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 14 |
1 files changed, 12 insertions, 2 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d1cf4f05dcda..88eb59dd7ac6 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2524,7 +2524,9 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, | |||
2524 | struct page *page; | 2524 | struct page *page; |
2525 | unsigned long end_pfn = start_pfn + size; | 2525 | unsigned long end_pfn = start_pfn + size; |
2526 | unsigned long pfn; | 2526 | unsigned long pfn; |
2527 | struct zone *z; | ||
2527 | 2528 | ||
2529 | z = &NODE_DATA(nid)->node_zones[zone]; | ||
2528 | for (pfn = start_pfn; pfn < end_pfn; pfn++) { | 2530 | for (pfn = start_pfn; pfn < end_pfn; pfn++) { |
2529 | /* | 2531 | /* |
2530 | * There can be holes in boot-time mem_map[]s | 2532 | * There can be holes in boot-time mem_map[]s |
@@ -2542,7 +2544,6 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, | |||
2542 | init_page_count(page); | 2544 | init_page_count(page); |
2543 | reset_page_mapcount(page); | 2545 | reset_page_mapcount(page); |
2544 | SetPageReserved(page); | 2546 | SetPageReserved(page); |
2545 | |||
2546 | /* | 2547 | /* |
2547 | * Mark the block movable so that blocks are reserved for | 2548 | * Mark the block movable so that blocks are reserved for |
2548 | * movable at startup. This will force kernel allocations | 2549 | * movable at startup. This will force kernel allocations |
@@ -2551,8 +2552,15 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, | |||
2551 | * kernel allocations are made. Later some blocks near | 2552 | * kernel allocations are made. Later some blocks near |
2552 | * the start are marked MIGRATE_RESERVE by | 2553 | * the start are marked MIGRATE_RESERVE by |
2553 | * setup_zone_migrate_reserve() | 2554 | * setup_zone_migrate_reserve() |
2555 | * | ||
2556 | * bitmap is created for zone's valid pfn range. but memmap | ||
2557 | * can be created for invalid pages (for alignment) | ||
2558 | * check here not to call set_pageblock_migratetype() against | ||
2559 | * pfn out of zone. | ||
2554 | */ | 2560 | */ |
2555 | if ((pfn & (pageblock_nr_pages-1))) | 2561 | if ((z->zone_start_pfn <= pfn) |
2562 | && (pfn < z->zone_start_pfn + z->spanned_pages) | ||
2563 | && !(pfn & (pageblock_nr_pages - 1))) | ||
2556 | set_pageblock_migratetype(page, MIGRATE_MOVABLE); | 2564 | set_pageblock_migratetype(page, MIGRATE_MOVABLE); |
2557 | 2565 | ||
2558 | INIT_LIST_HEAD(&page->lru); | 2566 | INIT_LIST_HEAD(&page->lru); |
@@ -4464,6 +4472,8 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags, | |||
4464 | pfn = page_to_pfn(page); | 4472 | pfn = page_to_pfn(page); |
4465 | bitmap = get_pageblock_bitmap(zone, pfn); | 4473 | bitmap = get_pageblock_bitmap(zone, pfn); |
4466 | bitidx = pfn_to_bitidx(zone, pfn); | 4474 | bitidx = pfn_to_bitidx(zone, pfn); |
4475 | VM_BUG_ON(pfn < zone->zone_start_pfn); | ||
4476 | VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages); | ||
4467 | 4477 | ||
4468 | for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) | 4478 | for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) |
4469 | if (flags & value) | 4479 | if (flags & value) |