aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c19
1 files changed, 14 insertions, 5 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 269eded9b459..c62cadce0426 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -180,17 +180,20 @@ EXPORT_SYMBOL(remove_shrinker);
180 * `lru_pages' represents the number of on-LRU pages in all the zones which 180 * `lru_pages' represents the number of on-LRU pages in all the zones which
181 * are eligible for the caller's allocation attempt. It is used for balancing 181 * are eligible for the caller's allocation attempt. It is used for balancing
182 * slab reclaim versus page reclaim. 182 * slab reclaim versus page reclaim.
183 *
184 * Returns the number of slab objects which we shrunk.
183 */ 185 */
184static int shrink_slab(unsigned long scanned, unsigned int gfp_mask, 186static int shrink_slab(unsigned long scanned, unsigned int gfp_mask,
185 unsigned long lru_pages) 187 unsigned long lru_pages)
186{ 188{
187 struct shrinker *shrinker; 189 struct shrinker *shrinker;
190 int ret = 0;
188 191
189 if (scanned == 0) 192 if (scanned == 0)
190 scanned = SWAP_CLUSTER_MAX; 193 scanned = SWAP_CLUSTER_MAX;
191 194
192 if (!down_read_trylock(&shrinker_rwsem)) 195 if (!down_read_trylock(&shrinker_rwsem))
193 return 0; 196 return 1; /* Assume we'll be able to shrink next time */
194 197
195 list_for_each_entry(shrinker, &shrinker_list, list) { 198 list_for_each_entry(shrinker, &shrinker_list, list) {
196 unsigned long long delta; 199 unsigned long long delta;
@@ -209,10 +212,14 @@ static int shrink_slab(unsigned long scanned, unsigned int gfp_mask,
209 while (total_scan >= SHRINK_BATCH) { 212 while (total_scan >= SHRINK_BATCH) {
210 long this_scan = SHRINK_BATCH; 213 long this_scan = SHRINK_BATCH;
211 int shrink_ret; 214 int shrink_ret;
215 int nr_before;
212 216
217 nr_before = (*shrinker->shrinker)(0, gfp_mask);
213 shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask); 218 shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask);
214 if (shrink_ret == -1) 219 if (shrink_ret == -1)
215 break; 220 break;
221 if (shrink_ret < nr_before)
222 ret += nr_before - shrink_ret;
216 mod_page_state(slabs_scanned, this_scan); 223 mod_page_state(slabs_scanned, this_scan);
217 total_scan -= this_scan; 224 total_scan -= this_scan;
218 225
@@ -222,7 +229,7 @@ static int shrink_slab(unsigned long scanned, unsigned int gfp_mask,
222 shrinker->nr += total_scan; 229 shrinker->nr += total_scan;
223 } 230 }
224 up_read(&shrinker_rwsem); 231 up_read(&shrinker_rwsem);
225 return 0; 232 return ret;
226} 233}
227 234
228/* Called without lock on whether page is mapped, so answer is unstable */ 235/* Called without lock on whether page is mapped, so answer is unstable */
@@ -1079,6 +1086,7 @@ scan:
1079 */ 1086 */
1080 for (i = 0; i <= end_zone; i++) { 1087 for (i = 0; i <= end_zone; i++) {
1081 struct zone *zone = pgdat->node_zones + i; 1088 struct zone *zone = pgdat->node_zones + i;
1089 int nr_slab;
1082 1090
1083 if (zone->present_pages == 0) 1091 if (zone->present_pages == 0)
1084 continue; 1092 continue;
@@ -1100,14 +1108,15 @@ scan:
1100 sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX; 1108 sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX;
1101 shrink_zone(zone, &sc); 1109 shrink_zone(zone, &sc);
1102 reclaim_state->reclaimed_slab = 0; 1110 reclaim_state->reclaimed_slab = 0;
1103 shrink_slab(sc.nr_scanned, GFP_KERNEL, lru_pages); 1111 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
1112 lru_pages);
1104 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 1113 sc.nr_reclaimed += reclaim_state->reclaimed_slab;
1105 total_reclaimed += sc.nr_reclaimed; 1114 total_reclaimed += sc.nr_reclaimed;
1106 total_scanned += sc.nr_scanned; 1115 total_scanned += sc.nr_scanned;
1107 if (zone->all_unreclaimable) 1116 if (zone->all_unreclaimable)
1108 continue; 1117 continue;
1109 if (zone->pages_scanned >= (zone->nr_active + 1118 if (nr_slab == 0 && zone->pages_scanned >=
1110 zone->nr_inactive) * 4) 1119 (zone->nr_active + zone->nr_inactive) * 4)
1111 zone->all_unreclaimable = 1; 1120 zone->all_unreclaimable = 1;
1112 /* 1121 /*
1113 * If we've done a decent amount of scanning and 1122 * If we've done a decent amount of scanning and