aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2009-06-16 18:32:12 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 22:47:35 -0400
commit418589663d6011de9006425b6c5721e1544fb47a (patch)
treeef37fb026d3e38191d6b5c99bc95c190fa98d0fb /mm/page_alloc.c
parenta3af9c389a7f3e675313f442fdd8c247c1cdb66b (diff)
page allocator: use allocation flags as an index to the zone watermark
ALLOC_WMARK_MIN, ALLOC_WMARK_LOW and ALLOC_WMARK_HIGH determin whether pages_min, pages_low or pages_high is used as the zone watermark when allocating the pages. Two branches in the allocator hotpath determine which watermark to use. This patch uses the flags as an array index into a watermark array that is indexed with WMARK_* defines accessed via helpers. All call sites that use zone->pages_* are updated to use the helpers for accessing the values and the array offsets for setting. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Reviewed-by: Christoph Lameter <cl@linux-foundation.org> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Dave Hansen <dave@linux.vnet.ibm.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c51
1 files changed, 26 insertions, 25 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8485735fc690..abe26003124d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1150,10 +1150,15 @@ failed:
1150 return NULL; 1150 return NULL;
1151} 1151}
1152 1152
1153#define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */ 1153/* The ALLOC_WMARK bits are used as an index to zone->watermark */
1154#define ALLOC_WMARK_MIN 0x02 /* use pages_min watermark */ 1154#define ALLOC_WMARK_MIN WMARK_MIN
1155#define ALLOC_WMARK_LOW 0x04 /* use pages_low watermark */ 1155#define ALLOC_WMARK_LOW WMARK_LOW
1156#define ALLOC_WMARK_HIGH 0x08 /* use pages_high watermark */ 1156#define ALLOC_WMARK_HIGH WMARK_HIGH
1157#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
1158
1159/* Mask to get the watermark bits */
1160#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
1161
1157#define ALLOC_HARDER 0x10 /* try to alloc harder */ 1162#define ALLOC_HARDER 0x10 /* try to alloc harder */
1158#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ 1163#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
1159#define ALLOC_CPUSET 0x40 /* check for correct cpuset */ 1164#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
@@ -1440,14 +1445,10 @@ zonelist_scan:
1440 !cpuset_zone_allowed_softwall(zone, gfp_mask)) 1445 !cpuset_zone_allowed_softwall(zone, gfp_mask))
1441 goto try_next_zone; 1446 goto try_next_zone;
1442 1447
1448 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
1443 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) { 1449 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1444 unsigned long mark; 1450 unsigned long mark;
1445 if (alloc_flags & ALLOC_WMARK_MIN) 1451 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
1446 mark = zone->pages_min;
1447 else if (alloc_flags & ALLOC_WMARK_LOW)
1448 mark = zone->pages_low;
1449 else
1450 mark = zone->pages_high;
1451 if (!zone_watermark_ok(zone, order, mark, 1452 if (!zone_watermark_ok(zone, order, mark,
1452 classzone_idx, alloc_flags)) { 1453 classzone_idx, alloc_flags)) {
1453 if (!zone_reclaim_mode || 1454 if (!zone_reclaim_mode ||
@@ -1959,7 +1960,7 @@ static unsigned int nr_free_zone_pages(int offset)
1959 1960
1960 for_each_zone_zonelist(zone, z, zonelist, offset) { 1961 for_each_zone_zonelist(zone, z, zonelist, offset) {
1961 unsigned long size = zone->present_pages; 1962 unsigned long size = zone->present_pages;
1962 unsigned long high = zone->pages_high; 1963 unsigned long high = high_wmark_pages(zone);
1963 if (size > high) 1964 if (size > high)
1964 sum += size - high; 1965 sum += size - high;
1965 } 1966 }
@@ -2096,9 +2097,9 @@ void show_free_areas(void)
2096 "\n", 2097 "\n",
2097 zone->name, 2098 zone->name,
2098 K(zone_page_state(zone, NR_FREE_PAGES)), 2099 K(zone_page_state(zone, NR_FREE_PAGES)),
2099 K(zone->pages_min), 2100 K(min_wmark_pages(zone)),
2100 K(zone->pages_low), 2101 K(low_wmark_pages(zone)),
2101 K(zone->pages_high), 2102 K(high_wmark_pages(zone)),
2102 K(zone_page_state(zone, NR_ACTIVE_ANON)), 2103 K(zone_page_state(zone, NR_ACTIVE_ANON)),
2103 K(zone_page_state(zone, NR_INACTIVE_ANON)), 2104 K(zone_page_state(zone, NR_INACTIVE_ANON)),
2104 K(zone_page_state(zone, NR_ACTIVE_FILE)), 2105 K(zone_page_state(zone, NR_ACTIVE_FILE)),
@@ -2702,8 +2703,8 @@ static inline unsigned long wait_table_bits(unsigned long size)
2702 2703
2703/* 2704/*
2704 * Mark a number of pageblocks as MIGRATE_RESERVE. The number 2705 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
2705 * of blocks reserved is based on zone->pages_min. The memory within the 2706 * of blocks reserved is based on min_wmark_pages(zone). The memory within
2706 * reserve will tend to store contiguous free pages. Setting min_free_kbytes 2707 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
2707 * higher will lead to a bigger reserve which will get freed as contiguous 2708 * higher will lead to a bigger reserve which will get freed as contiguous
2708 * blocks as reclaim kicks in 2709 * blocks as reclaim kicks in
2709 */ 2710 */
@@ -2716,7 +2717,7 @@ static void setup_zone_migrate_reserve(struct zone *zone)
2716 /* Get the start pfn, end pfn and the number of blocks to reserve */ 2717 /* Get the start pfn, end pfn and the number of blocks to reserve */
2717 start_pfn = zone->zone_start_pfn; 2718 start_pfn = zone->zone_start_pfn;
2718 end_pfn = start_pfn + zone->spanned_pages; 2719 end_pfn = start_pfn + zone->spanned_pages;
2719 reserve = roundup(zone->pages_min, pageblock_nr_pages) >> 2720 reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
2720 pageblock_order; 2721 pageblock_order;
2721 2722
2722 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 2723 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
@@ -4319,8 +4320,8 @@ static void calculate_totalreserve_pages(void)
4319 max = zone->lowmem_reserve[j]; 4320 max = zone->lowmem_reserve[j];
4320 } 4321 }
4321 4322
4322 /* we treat pages_high as reserved pages. */ 4323 /* we treat the high watermark as reserved pages. */
4323 max += zone->pages_high; 4324 max += high_wmark_pages(zone);
4324 4325
4325 if (max > zone->present_pages) 4326 if (max > zone->present_pages)
4326 max = zone->present_pages; 4327 max = zone->present_pages;
@@ -4400,7 +4401,7 @@ void setup_per_zone_pages_min(void)
4400 * need highmem pages, so cap pages_min to a small 4401 * need highmem pages, so cap pages_min to a small
4401 * value here. 4402 * value here.
4402 * 4403 *
4403 * The (pages_high-pages_low) and (pages_low-pages_min) 4404 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
4404 * deltas controls asynch page reclaim, and so should 4405 * deltas controls asynch page reclaim, and so should
4405 * not be capped for highmem. 4406 * not be capped for highmem.
4406 */ 4407 */
@@ -4411,17 +4412,17 @@ void setup_per_zone_pages_min(void)
4411 min_pages = SWAP_CLUSTER_MAX; 4412 min_pages = SWAP_CLUSTER_MAX;
4412 if (min_pages > 128) 4413 if (min_pages > 128)
4413 min_pages = 128; 4414 min_pages = 128;
4414 zone->pages_min = min_pages; 4415 zone->watermark[WMARK_MIN] = min_pages;
4415 } else { 4416 } else {
4416 /* 4417 /*
4417 * If it's a lowmem zone, reserve a number of pages 4418 * If it's a lowmem zone, reserve a number of pages
4418 * proportionate to the zone's size. 4419 * proportionate to the zone's size.
4419 */ 4420 */
4420 zone->pages_min = tmp; 4421 zone->watermark[WMARK_MIN] = tmp;
4421 } 4422 }
4422 4423
4423 zone->pages_low = zone->pages_min + (tmp >> 2); 4424 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
4424 zone->pages_high = zone->pages_min + (tmp >> 1); 4425 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
4425 setup_zone_migrate_reserve(zone); 4426 setup_zone_migrate_reserve(zone);
4426 spin_unlock_irqrestore(&zone->lock, flags); 4427 spin_unlock_irqrestore(&zone->lock, flags);
4427 } 4428 }
@@ -4566,7 +4567,7 @@ int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
4566 * whenever sysctl_lowmem_reserve_ratio changes. 4567 * whenever sysctl_lowmem_reserve_ratio changes.
4567 * 4568 *
4568 * The reserve ratio obviously has absolutely no relation with the 4569 * The reserve ratio obviously has absolutely no relation with the
4569 * pages_min watermarks. The lowmem reserve ratio can only make sense 4570 * minimum watermarks. The lowmem reserve ratio can only make sense
4570 * if in function of the boot time zone sizes. 4571 * if in function of the boot time zone sizes.
4571 */ 4572 */
4572int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write, 4573int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,