aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorShaohua Li <shaohua.li@intel.com>2011-10-31 20:07:03 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-31 20:30:45 -0400
commit3da367c3e5fca71d4e778fa565d9b098d5518f4a (patch)
tree915dff1989bdffaed157b56f724631b5d8f2d328 /mm
parent3fa36acbced23c563345de3179dfe1775f15be5e (diff)
vmscan: add block plug for page reclaim
per-task block plug can reduce block queue lock contention and increase request merge. Currently page reclaim doesn't support it. I originally thought page reclaim doesn't need it, because kswapd thread count is limited and file cache write is done at flusher mostly. When I test a workload with heavy swap in a 4-node machine, each CPU is doing direct page reclaim and swap. This causes block queue lock contention. In my test, without below patch, the CPU utilization is about 2% ~ 7%. With the patch, the CPU utilization is about 1% ~ 3%. Disk throughput isn't changed. This should improve normal kswapd write and file cache write too (increase request merge for example), but might not be so obvious as I explain above. Signed-off-by: Shaohua Li <shaohua.li@intel.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Minchan Kim <minchan.kim@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c3
1 files changed, 3 insertions, 0 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b68a9342d5a3..b1520b077858 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2005,12 +2005,14 @@ static void shrink_zone(int priority, struct zone *zone,
2005 enum lru_list l; 2005 enum lru_list l;
2006 unsigned long nr_reclaimed, nr_scanned; 2006 unsigned long nr_reclaimed, nr_scanned;
2007 unsigned long nr_to_reclaim = sc->nr_to_reclaim; 2007 unsigned long nr_to_reclaim = sc->nr_to_reclaim;
2008 struct blk_plug plug;
2008 2009
2009restart: 2010restart:
2010 nr_reclaimed = 0; 2011 nr_reclaimed = 0;
2011 nr_scanned = sc->nr_scanned; 2012 nr_scanned = sc->nr_scanned;
2012 get_scan_count(zone, sc, nr, priority); 2013 get_scan_count(zone, sc, nr, priority);
2013 2014
2015 blk_start_plug(&plug);
2014 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 2016 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
2015 nr[LRU_INACTIVE_FILE]) { 2017 nr[LRU_INACTIVE_FILE]) {
2016 for_each_evictable_lru(l) { 2018 for_each_evictable_lru(l) {
@@ -2034,6 +2036,7 @@ restart:
2034 if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY) 2036 if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
2035 break; 2037 break;
2036 } 2038 }
2039 blk_finish_plug(&plug);
2037 sc->nr_reclaimed += nr_reclaimed; 2040 sc->nr_reclaimed += nr_reclaimed;
2038 2041
2039 /* 2042 /*