aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorArve Hjønnevåg <arve@android.com>2011-05-24 20:12:24 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-25 11:39:24 -0400
commit6d3163ce86dd386b4f7bda80241d7fea2bc0bb1d (patch)
tree169f74df152fa593e5dc9adfff7f6f7e31061028 /mm/page_alloc.c
parent0091a47da0b720ca39511c7d90dcc768cafcaf58 (diff)
mm: check if any page in a pageblock is reserved before marking it MIGRATE_RESERVE
This fixes a problem where the first pageblock got marked MIGRATE_RESERVE even though it only had a few free pages. eg, On current ARM port, The kernel starts at offset 0x8000 to leave room for boot parameters, and the memory is freed later. This in turn caused no contiguous memory to be reserved and frequent kswapd wakeups that emptied the caches to get more contiguous memory. Unfortunatelly, ARM needs order-2 allocation for pgd (see arm/mm/pgd.c#pgd_alloc()). Therefore the issue is not minor nor easy avoidable. [kosaki.motohiro@jp.fujitsu.com: added some explanation] [kosaki.motohiro@jp.fujitsu.com: add !pfn_valid_within() to check] [minchan.kim@gmail.com: check end_pfn in pageblock_is_reserved] Signed-off-by: John Stultz <john.stultz@linaro.org> Signed-off-by: Arve Hjønnevåg <arve@android.com> Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Acked-by: Dave Hansen <dave@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c19
1 files changed, 17 insertions, 2 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 44019da9632e..01e6b614839d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3329,6 +3329,20 @@ static inline unsigned long wait_table_bits(unsigned long size)
3329#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) 3329#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
3330 3330
3331/* 3331/*
3332 * Check if a pageblock contains reserved pages
3333 */
3334static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
3335{
3336 unsigned long pfn;
3337
3338 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
3339 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
3340 return 1;
3341 }
3342 return 0;
3343}
3344
3345/*
3332 * Mark a number of pageblocks as MIGRATE_RESERVE. The number 3346 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
3333 * of blocks reserved is based on min_wmark_pages(zone). The memory within 3347 * of blocks reserved is based on min_wmark_pages(zone). The memory within
3334 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes 3348 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
@@ -3337,7 +3351,7 @@ static inline unsigned long wait_table_bits(unsigned long size)
3337 */ 3351 */
3338static void setup_zone_migrate_reserve(struct zone *zone) 3352static void setup_zone_migrate_reserve(struct zone *zone)
3339{ 3353{
3340 unsigned long start_pfn, pfn, end_pfn; 3354 unsigned long start_pfn, pfn, end_pfn, block_end_pfn;
3341 struct page *page; 3355 struct page *page;
3342 unsigned long block_migratetype; 3356 unsigned long block_migratetype;
3343 int reserve; 3357 int reserve;
@@ -3367,7 +3381,8 @@ static void setup_zone_migrate_reserve(struct zone *zone)
3367 continue; 3381 continue;
3368 3382
3369 /* Blocks with reserved pages will never free, skip them. */ 3383 /* Blocks with reserved pages will never free, skip them. */
3370 if (PageReserved(page)) 3384 block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
3385 if (pageblock_is_reserved(pfn, block_end_pfn))
3371 continue; 3386 continue;
3372 3387
3373 block_migratetype = get_pageblock_migratetype(page); 3388 block_migratetype = get_pageblock_migratetype(page);