aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorMinchan Kim <minchan.kim@gmail.com>2011-05-24 20:11:11 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-25 11:39:01 -0400
commitf06590bd718ed950c98828e30ef93204028f3210 (patch)
tree60d1c52a538618a16ebcd82a4d949446fd2036c7 /mm/vmscan.c
parentafc7e326a3f5bafc41324d7926c324414e343ee5 (diff)
mm: vmscan: correctly check if reclaimer should schedule during shrink_slab
It has been reported on some laptops that kswapd is consuming large amounts of CPU and not being scheduled when SLUB is enabled during large amounts of file copying. It is expected that this is due to kswapd missing every cond_resched() point because; shrink_page_list() calls cond_resched() if inactive pages were isolated which in turn may not happen if all_unreclaimable is set in shrink_zones(). If for whatver reason, all_unreclaimable is set on all zones, we can miss calling cond_resched(). balance_pgdat() only calls cond_resched if the zones are not balanced. For a high-order allocation that is balanced, it checks order-0 again. During that window, order-0 might have become unbalanced so it loops again for order-0 and returns that it was reclaiming for order-0 to kswapd(). It can then find that a caller has rewoken kswapd for a high-order and re-enters balance_pgdat() without ever calling cond_resched(). shrink_slab only calls cond_resched() if we are reclaiming slab pages. If there are a large number of direct reclaimers, the shrinker_rwsem can be contended and prevent kswapd calling cond_resched(). This patch modifies the shrink_slab() case. If the semaphore is contended, the caller will still check cond_resched(). After each successful call into a shrinker, the check for cond_resched() remains in case one shrinker is particularly slow. [mgorman@suse.de: preserve call to cond_resched after each call into shrinker] Signed-off-by: Mel Gorman <mgorman@suse.de> Signed-off-by: Minchan Kim <minchan.kim@gmail.com> Cc: Rik van Riel <riel@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Tested-by: Colin King <colin.king@canonical.com> Cc: Raghavendra D Prabhu <raghu.prabhu13@gmail.com> Cc: Jan Kara <jack@suse.cz> Cc: Chris Mason <chris.mason@oracle.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Rik van Riel <riel@redhat.com> Cc: <stable@kernel.org> [2.6.38+] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 66698f603aa4..d303b60f4c2a 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -231,8 +231,11 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
231 if (scanned == 0) 231 if (scanned == 0)
232 scanned = SWAP_CLUSTER_MAX; 232 scanned = SWAP_CLUSTER_MAX;
233 233
234 if (!down_read_trylock(&shrinker_rwsem)) 234 if (!down_read_trylock(&shrinker_rwsem)) {
235 return 1; /* Assume we'll be able to shrink next time */ 235 /* Assume we'll be able to shrink next time */
236 ret = 1;
237 goto out;
238 }
236 239
237 list_for_each_entry(shrinker, &shrinker_list, list) { 240 list_for_each_entry(shrinker, &shrinker_list, list) {
238 unsigned long long delta; 241 unsigned long long delta;
@@ -283,6 +286,8 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
283 shrinker->nr += total_scan; 286 shrinker->nr += total_scan;
284 } 287 }
285 up_read(&shrinker_rwsem); 288 up_read(&shrinker_rwsem);
289out:
290 cond_resched();
286 return ret; 291 return ret;
287} 292}
288 293