diff options
author | Mel Gorman <mel@csn.ul.ie> | 2009-06-16 18:32:12 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-16 22:47:35 -0400 |
commit | 418589663d6011de9006425b6c5721e1544fb47a (patch) | |
tree | ef37fb026d3e38191d6b5c99bc95c190fa98d0fb /mm/vmscan.c | |
parent | a3af9c389a7f3e675313f442fdd8c247c1cdb66b (diff) |
page allocator: use allocation flags as an index to the zone watermark
ALLOC_WMARK_MIN, ALLOC_WMARK_LOW and ALLOC_WMARK_HIGH determin whether
pages_min, pages_low or pages_high is used as the zone watermark when
allocating the pages. Two branches in the allocator hotpath determine
which watermark to use.
This patch uses the flags as an array index into a watermark array that is
indexed with WMARK_* defines accessed via helpers. All call sites that
use zone->pages_* are updated to use the helpers for accessing the values
and the array offsets for setting.
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Reviewed-by: Christoph Lameter <cl@linux-foundation.org>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 39 |
1 files changed, 21 insertions, 18 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index a6b7d14812e6..e5245d051647 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1401,7 +1401,7 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, | |||
1401 | free = zone_page_state(zone, NR_FREE_PAGES); | 1401 | free = zone_page_state(zone, NR_FREE_PAGES); |
1402 | /* If we have very few page cache pages, | 1402 | /* If we have very few page cache pages, |
1403 | force-scan anon pages. */ | 1403 | force-scan anon pages. */ |
1404 | if (unlikely(file + free <= zone->pages_high)) { | 1404 | if (unlikely(file + free <= high_wmark_pages(zone))) { |
1405 | percent[0] = 100; | 1405 | percent[0] = 100; |
1406 | percent[1] = 0; | 1406 | percent[1] = 0; |
1407 | return; | 1407 | return; |
@@ -1533,11 +1533,13 @@ static void shrink_zone(int priority, struct zone *zone, | |||
1533 | * try to reclaim pages from zones which will satisfy the caller's allocation | 1533 | * try to reclaim pages from zones which will satisfy the caller's allocation |
1534 | * request. | 1534 | * request. |
1535 | * | 1535 | * |
1536 | * We reclaim from a zone even if that zone is over pages_high. Because: | 1536 | * We reclaim from a zone even if that zone is over high_wmark_pages(zone). |
1537 | * Because: | ||
1537 | * a) The caller may be trying to free *extra* pages to satisfy a higher-order | 1538 | * a) The caller may be trying to free *extra* pages to satisfy a higher-order |
1538 | * allocation or | 1539 | * allocation or |
1539 | * b) The zones may be over pages_high but they must go *over* pages_high to | 1540 | * b) The target zone may be at high_wmark_pages(zone) but the lower zones |
1540 | * satisfy the `incremental min' zone defense algorithm. | 1541 | * must go *over* high_wmark_pages(zone) to satisfy the `incremental min' |
1542 | * zone defense algorithm. | ||
1541 | * | 1543 | * |
1542 | * If a zone is deemed to be full of pinned pages then just give it a light | 1544 | * If a zone is deemed to be full of pinned pages then just give it a light |
1543 | * scan then give up on it. | 1545 | * scan then give up on it. |
@@ -1743,7 +1745,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, | |||
1743 | 1745 | ||
1744 | /* | 1746 | /* |
1745 | * For kswapd, balance_pgdat() will work across all this node's zones until | 1747 | * For kswapd, balance_pgdat() will work across all this node's zones until |
1746 | * they are all at pages_high. | 1748 | * they are all at high_wmark_pages(zone). |
1747 | * | 1749 | * |
1748 | * Returns the number of pages which were actually freed. | 1750 | * Returns the number of pages which were actually freed. |
1749 | * | 1751 | * |
@@ -1756,11 +1758,11 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, | |||
1756 | * the zone for when the problem goes away. | 1758 | * the zone for when the problem goes away. |
1757 | * | 1759 | * |
1758 | * kswapd scans the zones in the highmem->normal->dma direction. It skips | 1760 | * kswapd scans the zones in the highmem->normal->dma direction. It skips |
1759 | * zones which have free_pages > pages_high, but once a zone is found to have | 1761 | * zones which have free_pages > high_wmark_pages(zone), but once a zone is |
1760 | * free_pages <= pages_high, we scan that zone and the lower zones regardless | 1762 | * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the |
1761 | * of the number of free pages in the lower zones. This interoperates with | 1763 | * lower zones regardless of the number of free pages in the lower zones. This |
1762 | * the page allocator fallback scheme to ensure that aging of pages is balanced | 1764 | * interoperates with the page allocator fallback scheme to ensure that aging |
1763 | * across the zones. | 1765 | * of pages is balanced across the zones. |
1764 | */ | 1766 | */ |
1765 | static unsigned long balance_pgdat(pg_data_t *pgdat, int order) | 1767 | static unsigned long balance_pgdat(pg_data_t *pgdat, int order) |
1766 | { | 1768 | { |
@@ -1781,7 +1783,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order) | |||
1781 | }; | 1783 | }; |
1782 | /* | 1784 | /* |
1783 | * temp_priority is used to remember the scanning priority at which | 1785 | * temp_priority is used to remember the scanning priority at which |
1784 | * this zone was successfully refilled to free_pages == pages_high. | 1786 | * this zone was successfully refilled to |
1787 | * free_pages == high_wmark_pages(zone). | ||
1785 | */ | 1788 | */ |
1786 | int temp_priority[MAX_NR_ZONES]; | 1789 | int temp_priority[MAX_NR_ZONES]; |
1787 | 1790 | ||
@@ -1826,8 +1829,8 @@ loop_again: | |||
1826 | shrink_active_list(SWAP_CLUSTER_MAX, zone, | 1829 | shrink_active_list(SWAP_CLUSTER_MAX, zone, |
1827 | &sc, priority, 0); | 1830 | &sc, priority, 0); |
1828 | 1831 | ||
1829 | if (!zone_watermark_ok(zone, order, zone->pages_high, | 1832 | if (!zone_watermark_ok(zone, order, |
1830 | 0, 0)) { | 1833 | high_wmark_pages(zone), 0, 0)) { |
1831 | end_zone = i; | 1834 | end_zone = i; |
1832 | break; | 1835 | break; |
1833 | } | 1836 | } |
@@ -1861,8 +1864,8 @@ loop_again: | |||
1861 | priority != DEF_PRIORITY) | 1864 | priority != DEF_PRIORITY) |
1862 | continue; | 1865 | continue; |
1863 | 1866 | ||
1864 | if (!zone_watermark_ok(zone, order, zone->pages_high, | 1867 | if (!zone_watermark_ok(zone, order, |
1865 | end_zone, 0)) | 1868 | high_wmark_pages(zone), end_zone, 0)) |
1866 | all_zones_ok = 0; | 1869 | all_zones_ok = 0; |
1867 | temp_priority[i] = priority; | 1870 | temp_priority[i] = priority; |
1868 | sc.nr_scanned = 0; | 1871 | sc.nr_scanned = 0; |
@@ -1871,8 +1874,8 @@ loop_again: | |||
1871 | * We put equal pressure on every zone, unless one | 1874 | * We put equal pressure on every zone, unless one |
1872 | * zone has way too many pages free already. | 1875 | * zone has way too many pages free already. |
1873 | */ | 1876 | */ |
1874 | if (!zone_watermark_ok(zone, order, 8*zone->pages_high, | 1877 | if (!zone_watermark_ok(zone, order, |
1875 | end_zone, 0)) | 1878 | 8*high_wmark_pages(zone), end_zone, 0)) |
1876 | shrink_zone(priority, zone, &sc); | 1879 | shrink_zone(priority, zone, &sc); |
1877 | reclaim_state->reclaimed_slab = 0; | 1880 | reclaim_state->reclaimed_slab = 0; |
1878 | nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, | 1881 | nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, |
@@ -2038,7 +2041,7 @@ void wakeup_kswapd(struct zone *zone, int order) | |||
2038 | return; | 2041 | return; |
2039 | 2042 | ||
2040 | pgdat = zone->zone_pgdat; | 2043 | pgdat = zone->zone_pgdat; |
2041 | if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0)) | 2044 | if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0)) |
2042 | return; | 2045 | return; |
2043 | if (pgdat->kswapd_max_order < order) | 2046 | if (pgdat->kswapd_max_order < order) |
2044 | pgdat->kswapd_max_order = order; | 2047 | pgdat->kswapd_max_order = order; |