aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c28
1 files changed, 12 insertions, 16 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index cf8441345277..ae6f4c174a12 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2059,31 +2059,31 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
2059 int pass, struct scan_control *sc) 2059 int pass, struct scan_control *sc)
2060{ 2060{
2061 struct zone *zone; 2061 struct zone *zone;
2062 unsigned long nr_to_scan, ret = 0; 2062 unsigned long ret = 0;
2063 enum lru_list l;
2064 2063
2065 for_each_zone(zone) { 2064 for_each_zone(zone) {
2065 enum lru_list l;
2066 2066
2067 if (!populated_zone(zone)) 2067 if (!populated_zone(zone))
2068 continue; 2068 continue;
2069
2070 if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY) 2069 if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
2071 continue; 2070 continue;
2072 2071
2073 for_each_evictable_lru(l) { 2072 for_each_evictable_lru(l) {
2073 enum zone_stat_item ls = NR_LRU_BASE + l;
2074 unsigned long lru_pages = zone_page_state(zone, ls);
2075
2074 /* For pass = 0, we don't shrink the active list */ 2076 /* For pass = 0, we don't shrink the active list */
2075 if (pass == 0 && 2077 if (pass == 0 && (l == LRU_ACTIVE_ANON ||
2076 (l == LRU_ACTIVE || l == LRU_ACTIVE_FILE)) 2078 l == LRU_ACTIVE_FILE))
2077 continue; 2079 continue;
2078 2080
2079 zone->lru[l].nr_scan += 2081 zone->lru[l].nr_scan += (lru_pages >> prio) + 1;
2080 (zone_page_state(zone, NR_LRU_BASE + l)
2081 >> prio) + 1;
2082 if (zone->lru[l].nr_scan >= nr_pages || pass > 3) { 2082 if (zone->lru[l].nr_scan >= nr_pages || pass > 3) {
2083 unsigned long nr_to_scan;
2084
2083 zone->lru[l].nr_scan = 0; 2085 zone->lru[l].nr_scan = 0;
2084 nr_to_scan = min(nr_pages, 2086 nr_to_scan = min(nr_pages, lru_pages);
2085 zone_page_state(zone,
2086 NR_LRU_BASE + l));
2087 ret += shrink_list(l, nr_to_scan, zone, 2087 ret += shrink_list(l, nr_to_scan, zone,
2088 sc, prio); 2088 sc, prio);
2089 if (ret >= nr_pages) 2089 if (ret >= nr_pages)
@@ -2091,7 +2091,6 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
2091 } 2091 }
2092 } 2092 }
2093 } 2093 }
2094
2095 return ret; 2094 return ret;
2096} 2095}
2097 2096
@@ -2114,7 +2113,6 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
2114 .may_swap = 0, 2113 .may_swap = 0,
2115 .swap_cluster_max = nr_pages, 2114 .swap_cluster_max = nr_pages,
2116 .may_writepage = 1, 2115 .may_writepage = 1,
2117 .swappiness = vm_swappiness,
2118 .isolate_pages = isolate_pages_global, 2116 .isolate_pages = isolate_pages_global,
2119 }; 2117 };
2120 2118
@@ -2148,10 +2146,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
2148 int prio; 2146 int prio;
2149 2147
2150 /* Force reclaiming mapped pages in the passes #3 and #4 */ 2148 /* Force reclaiming mapped pages in the passes #3 and #4 */
2151 if (pass > 2) { 2149 if (pass > 2)
2152 sc.may_swap = 1; 2150 sc.may_swap = 1;
2153 sc.swappiness = 100;
2154 }
2155 2151
2156 for (prio = DEF_PRIORITY; prio >= 0; prio--) { 2152 for (prio = DEF_PRIORITY; prio >= 0; prio--) {
2157 unsigned long nr_to_scan = nr_pages - ret; 2153 unsigned long nr_to_scan = nr_pages - ret;