aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c14
1 files changed, 11 insertions, 3 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d75cdf360730..c4abf08861d2 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -291,6 +291,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
291 int nid = shrinkctl->nid; 291 int nid = shrinkctl->nid;
292 long batch_size = shrinker->batch ? shrinker->batch 292 long batch_size = shrinker->batch ? shrinker->batch
293 : SHRINK_BATCH; 293 : SHRINK_BATCH;
294 long scanned = 0, next_deferred;
294 295
295 freeable = shrinker->count_objects(shrinker, shrinkctl); 296 freeable = shrinker->count_objects(shrinker, shrinkctl);
296 if (freeable == 0) 297 if (freeable == 0)
@@ -312,7 +313,9 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
312 pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n", 313 pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
313 shrinker->scan_objects, total_scan); 314 shrinker->scan_objects, total_scan);
314 total_scan = freeable; 315 total_scan = freeable;
315 } 316 next_deferred = nr;
317 } else
318 next_deferred = total_scan;
316 319
317 /* 320 /*
318 * We need to avoid excessive windup on filesystem shrinkers 321 * We need to avoid excessive windup on filesystem shrinkers
@@ -369,17 +372,22 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
369 372
370 count_vm_events(SLABS_SCANNED, nr_to_scan); 373 count_vm_events(SLABS_SCANNED, nr_to_scan);
371 total_scan -= nr_to_scan; 374 total_scan -= nr_to_scan;
375 scanned += nr_to_scan;
372 376
373 cond_resched(); 377 cond_resched();
374 } 378 }
375 379
380 if (next_deferred >= scanned)
381 next_deferred -= scanned;
382 else
383 next_deferred = 0;
376 /* 384 /*
377 * move the unused scan count back into the shrinker in a 385 * move the unused scan count back into the shrinker in a
378 * manner that handles concurrent updates. If we exhausted the 386 * manner that handles concurrent updates. If we exhausted the
379 * scan, there is no need to do an update. 387 * scan, there is no need to do an update.
380 */ 388 */
381 if (total_scan > 0) 389 if (next_deferred > 0)
382 new_nr = atomic_long_add_return(total_scan, 390 new_nr = atomic_long_add_return(next_deferred,
383 &shrinker->nr_deferred[nid]); 391 &shrinker->nr_deferred[nid]);
384 else 392 else
385 new_nr = atomic_long_read(&shrinker->nr_deferred[nid]); 393 new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);