aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2009-02-13 20:04:10 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-02-21 17:17:17 -0500
commit0cb57258fe01e9b21076b6a15b6aec7a24168228 (patch)
tree9744786d92356a21d3d2bcd2d3cc2935d81a489f /mm
parent3049103ddfc9aac111916bd2f39ac6976c431517 (diff)
swsusp: clean up shrink_all_zones()
Move local variables to innermost possible scopes and use local variables to cache calculations/reads done more than once. No change in functionality (intended). Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Cc: Len Brown <lenb@kernel.org> Cc: Greg KH <gregkh@suse.de> Acked-by: Pavel Machek <pavel@ucw.cz> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c23
1 files changed, 11 insertions, 12 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 550e8695070d..6177e3bcd66b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2057,31 +2057,31 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
2057 int pass, struct scan_control *sc) 2057 int pass, struct scan_control *sc)
2058{ 2058{
2059 struct zone *zone; 2059 struct zone *zone;
2060 unsigned long nr_to_scan, ret = 0; 2060 unsigned long ret = 0;
2061 enum lru_list l;
2062 2061
2063 for_each_zone(zone) { 2062 for_each_zone(zone) {
2063 enum lru_list l;
2064 2064
2065 if (!populated_zone(zone)) 2065 if (!populated_zone(zone))
2066 continue; 2066 continue;
2067
2068 if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY) 2067 if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
2069 continue; 2068 continue;
2070 2069
2071 for_each_evictable_lru(l) { 2070 for_each_evictable_lru(l) {
2071 enum zone_stat_item ls = NR_LRU_BASE + l;
2072 unsigned long lru_pages = zone_page_state(zone, ls);
2073
2072 /* For pass = 0, we don't shrink the active list */ 2074 /* For pass = 0, we don't shrink the active list */
2073 if (pass == 0 && 2075 if (pass == 0 && (l == LRU_ACTIVE_ANON ||
2074 (l == LRU_ACTIVE || l == LRU_ACTIVE_FILE)) 2076 l == LRU_ACTIVE_FILE))
2075 continue; 2077 continue;
2076 2078
2077 zone->lru[l].nr_scan += 2079 zone->lru[l].nr_scan += (lru_pages >> prio) + 1;
2078 (zone_page_state(zone, NR_LRU_BASE + l)
2079 >> prio) + 1;
2080 if (zone->lru[l].nr_scan >= nr_pages || pass > 3) { 2080 if (zone->lru[l].nr_scan >= nr_pages || pass > 3) {
2081 unsigned long nr_to_scan;
2082
2081 zone->lru[l].nr_scan = 0; 2083 zone->lru[l].nr_scan = 0;
2082 nr_to_scan = min(nr_pages, 2084 nr_to_scan = min(nr_pages, lru_pages);
2083 zone_page_state(zone,
2084 NR_LRU_BASE + l));
2085 ret += shrink_list(l, nr_to_scan, zone, 2085 ret += shrink_list(l, nr_to_scan, zone,
2086 sc, prio); 2086 sc, prio);
2087 if (ret >= nr_pages) 2087 if (ret >= nr_pages)
@@ -2089,7 +2089,6 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
2089 } 2089 }
2090 } 2090 }
2091 } 2091 }
2092
2093 return ret; 2092 return ret;
2094} 2093}
2095 2094