aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2010-03-05 16:41:55 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-06 14:26:25 -0500
commit93e4a89a8c987189b168a530a331ef6d0fcf07a7 (patch)
treedeb08017c0e4874539549d3ea9bf2d7b447a43be /mm/vmscan.c
parentfc91668eaf9e7ba61e867fc2218b7e9fb67faa4f (diff)
mm: restore zone->all_unreclaimable to independence word
commit e815af95 ("change all_unreclaimable zone member to flags") changed all_unreclaimable member to bit flag. But it had an undesireble side effect. free_one_page() is one of most hot path in linux kernel and increasing atomic ops in it can reduce kernel performance a bit. Thus, this patch revert such commit partially. at least all_unreclaimable shouldn't share memory word with other zone flags. [akpm@linux-foundation.org: fix patch interaction] Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: David Rientjes <rientjes@google.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Minchan Kim <minchan.kim@gmail.com> Cc: Huang Shijie <shijie8@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c22
1 files changed, 9 insertions, 13 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index bc0f8db8340f..5cbf64dd79c1 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1699,8 +1699,7 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
1699 continue; 1699 continue;
1700 note_zone_scanning_priority(zone, priority); 1700 note_zone_scanning_priority(zone, priority);
1701 1701
1702 if (zone_is_all_unreclaimable(zone) && 1702 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
1703 priority != DEF_PRIORITY)
1704 continue; /* Let kswapd poll it */ 1703 continue; /* Let kswapd poll it */
1705 sc->all_unreclaimable = 0; 1704 sc->all_unreclaimable = 0;
1706 } else { 1705 } else {
@@ -1927,7 +1926,7 @@ static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining)
1927 if (!populated_zone(zone)) 1926 if (!populated_zone(zone))
1928 continue; 1927 continue;
1929 1928
1930 if (zone_is_all_unreclaimable(zone)) 1929 if (zone->all_unreclaimable)
1931 continue; 1930 continue;
1932 1931
1933 if (!zone_watermark_ok(zone, order, high_wmark_pages(zone), 1932 if (!zone_watermark_ok(zone, order, high_wmark_pages(zone),
@@ -2017,8 +2016,7 @@ loop_again:
2017 if (!populated_zone(zone)) 2016 if (!populated_zone(zone))
2018 continue; 2017 continue;
2019 2018
2020 if (zone_is_all_unreclaimable(zone) && 2019 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
2021 priority != DEF_PRIORITY)
2022 continue; 2020 continue;
2023 2021
2024 /* 2022 /*
@@ -2061,8 +2059,7 @@ loop_again:
2061 if (!populated_zone(zone)) 2059 if (!populated_zone(zone))
2062 continue; 2060 continue;
2063 2061
2064 if (zone_is_all_unreclaimable(zone) && 2062 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
2065 priority != DEF_PRIORITY)
2066 continue; 2063 continue;
2067 2064
2068 temp_priority[i] = priority; 2065 temp_priority[i] = priority;
@@ -2089,12 +2086,11 @@ loop_again:
2089 lru_pages); 2086 lru_pages);
2090 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 2087 sc.nr_reclaimed += reclaim_state->reclaimed_slab;
2091 total_scanned += sc.nr_scanned; 2088 total_scanned += sc.nr_scanned;
2092 if (zone_is_all_unreclaimable(zone)) 2089 if (zone->all_unreclaimable)
2093 continue; 2090 continue;
2094 if (nr_slab == 0 && zone->pages_scanned >= 2091 if (nr_slab == 0 &&
2095 (zone_reclaimable_pages(zone) * 6)) 2092 zone->pages_scanned >= (zone_reclaimable_pages(zone) * 6))
2096 zone_set_flag(zone, 2093 zone->all_unreclaimable = 1;
2097 ZONE_ALL_UNRECLAIMABLE);
2098 /* 2094 /*
2099 * If we've done a decent amount of scanning and 2095 * If we've done a decent amount of scanning and
2100 * the reclaim ratio is low, start doing writepage 2096 * the reclaim ratio is low, start doing writepage
@@ -2624,7 +2620,7 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2624 zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages) 2620 zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
2625 return ZONE_RECLAIM_FULL; 2621 return ZONE_RECLAIM_FULL;
2626 2622
2627 if (zone_is_all_unreclaimable(zone)) 2623 if (zone->all_unreclaimable)
2628 return ZONE_RECLAIM_FULL; 2624 return ZONE_RECLAIM_FULL;
2629 2625
2630 /* 2626 /*