summaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@parallels.com>2014-04-03 17:47:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-03 19:21:00 -0400
commitd5bc5fd3fcb7b8dfb431694a8c8052466504c10c (patch)
treeaabd10454570bc8602867ce650344e2025b0569e /mm/vmscan.c
parent8382d914ebf72092aa15cdc2a5dcedb2daa0209d (diff)
mm: vmscan: shrink_slab: rename max_pass -> freeable
The name `max_pass' is misleading, because this variable actually keeps the estimate number of freeable objects, not the maximal number of objects we can scan in this pass, which can be twice that. Rename it to reflect its actual meaning. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1cd48519f506..c53d1a54964c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -224,15 +224,15 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
224 unsigned long freed = 0; 224 unsigned long freed = 0;
225 unsigned long long delta; 225 unsigned long long delta;
226 long total_scan; 226 long total_scan;
227 long max_pass; 227 long freeable;
228 long nr; 228 long nr;
229 long new_nr; 229 long new_nr;
230 int nid = shrinkctl->nid; 230 int nid = shrinkctl->nid;
231 long batch_size = shrinker->batch ? shrinker->batch 231 long batch_size = shrinker->batch ? shrinker->batch
232 : SHRINK_BATCH; 232 : SHRINK_BATCH;
233 233
234 max_pass = shrinker->count_objects(shrinker, shrinkctl); 234 freeable = shrinker->count_objects(shrinker, shrinkctl);
235 if (max_pass == 0) 235 if (freeable == 0)
236 return 0; 236 return 0;
237 237
238 /* 238 /*
@@ -244,14 +244,14 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
244 244
245 total_scan = nr; 245 total_scan = nr;
246 delta = (4 * nr_pages_scanned) / shrinker->seeks; 246 delta = (4 * nr_pages_scanned) / shrinker->seeks;
247 delta *= max_pass; 247 delta *= freeable;
248 do_div(delta, lru_pages + 1); 248 do_div(delta, lru_pages + 1);
249 total_scan += delta; 249 total_scan += delta;
250 if (total_scan < 0) { 250 if (total_scan < 0) {
251 printk(KERN_ERR 251 printk(KERN_ERR
252 "shrink_slab: %pF negative objects to delete nr=%ld\n", 252 "shrink_slab: %pF negative objects to delete nr=%ld\n",
253 shrinker->scan_objects, total_scan); 253 shrinker->scan_objects, total_scan);
254 total_scan = max_pass; 254 total_scan = freeable;
255 } 255 }
256 256
257 /* 257 /*
@@ -260,26 +260,26 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
260 * shrinkers to return -1 all the time. This results in a large 260 * shrinkers to return -1 all the time. This results in a large
261 * nr being built up so when a shrink that can do some work 261 * nr being built up so when a shrink that can do some work
262 * comes along it empties the entire cache due to nr >>> 262 * comes along it empties the entire cache due to nr >>>
263 * max_pass. This is bad for sustaining a working set in 263 * freeable. This is bad for sustaining a working set in
264 * memory. 264 * memory.
265 * 265 *
266 * Hence only allow the shrinker to scan the entire cache when 266 * Hence only allow the shrinker to scan the entire cache when
267 * a large delta change is calculated directly. 267 * a large delta change is calculated directly.
268 */ 268 */
269 if (delta < max_pass / 4) 269 if (delta < freeable / 4)
270 total_scan = min(total_scan, max_pass / 2); 270 total_scan = min(total_scan, freeable / 2);
271 271
272 /* 272 /*
273 * Avoid risking looping forever due to too large nr value: 273 * Avoid risking looping forever due to too large nr value:
274 * never try to free more than twice the estimate number of 274 * never try to free more than twice the estimate number of
275 * freeable entries. 275 * freeable entries.
276 */ 276 */
277 if (total_scan > max_pass * 2) 277 if (total_scan > freeable * 2)
278 total_scan = max_pass * 2; 278 total_scan = freeable * 2;
279 279
280 trace_mm_shrink_slab_start(shrinker, shrinkctl, nr, 280 trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
281 nr_pages_scanned, lru_pages, 281 nr_pages_scanned, lru_pages,
282 max_pass, delta, total_scan); 282 freeable, delta, total_scan);
283 283
284 /* 284 /*
285 * Normally, we should not scan less than batch_size objects in one 285 * Normally, we should not scan less than batch_size objects in one
@@ -292,12 +292,12 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
292 * 292 *
293 * We detect the "tight on memory" situations by looking at the total 293 * We detect the "tight on memory" situations by looking at the total
294 * number of objects we want to scan (total_scan). If it is greater 294 * number of objects we want to scan (total_scan). If it is greater
295 * than the total number of objects on slab (max_pass), we must be 295 * than the total number of objects on slab (freeable), we must be
296 * scanning at high prio and therefore should try to reclaim as much as 296 * scanning at high prio and therefore should try to reclaim as much as
297 * possible. 297 * possible.
298 */ 298 */
299 while (total_scan >= batch_size || 299 while (total_scan >= batch_size ||
300 total_scan >= max_pass) { 300 total_scan >= freeable) {
301 unsigned long ret; 301 unsigned long ret;
302 unsigned long nr_to_scan = min(batch_size, total_scan); 302 unsigned long nr_to_scan = min(batch_size, total_scan);
303 303