diff options
author | Mel Gorman <mel@csn.ul.ie> | 2010-10-26 17:21:45 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-26 19:52:07 -0400 |
commit | 0e093d99763eb4cea09f8ca4f1d01f34e121d10b (patch) | |
tree | fad38f9c3651c81db298521141a79d9468f71986 /mm/page_alloc.c | |
parent | 08fc468f4eaf6683bae5bdb94743a09d8630cb80 (diff) |
writeback: do not sleep on the congestion queue if there are no congested BDIs or if significant congestion is not being encountered in the current zone
If congestion_wait() is called with no BDI congested, the caller will
sleep for the full timeout and this may be an unnecessary sleep. This
patch adds a wait_iff_congested() that checks congestion and only sleeps
if a BDI is congested else, it calls cond_resched() to ensure the caller
is not hogging the CPU longer than its quota but otherwise will not sleep.
This is aimed at reducing some of the major desktop stalls reported during
IO. For example, while kswapd is operating, it calls congestion_wait()
but it could just have been reclaiming clean page cache pages with no
congestion. Without this patch, it would sleep for a full timeout but
after this patch, it'll just call schedule() if it has been on the CPU too
long. Similar logic applies to direct reclaimers that are not making
enough progress.
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6a683f819439..b13bc5e5bd7d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1907,7 +1907,7 @@ __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order, | |||
1907 | preferred_zone, migratetype); | 1907 | preferred_zone, migratetype); |
1908 | 1908 | ||
1909 | if (!page && gfp_mask & __GFP_NOFAIL) | 1909 | if (!page && gfp_mask & __GFP_NOFAIL) |
1910 | congestion_wait(BLK_RW_ASYNC, HZ/50); | 1910 | wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50); |
1911 | } while (!page && (gfp_mask & __GFP_NOFAIL)); | 1911 | } while (!page && (gfp_mask & __GFP_NOFAIL)); |
1912 | 1912 | ||
1913 | return page; | 1913 | return page; |
@@ -2095,7 +2095,7 @@ rebalance: | |||
2095 | pages_reclaimed += did_some_progress; | 2095 | pages_reclaimed += did_some_progress; |
2096 | if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) { | 2096 | if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) { |
2097 | /* Wait for some write requests to complete then retry */ | 2097 | /* Wait for some write requests to complete then retry */ |
2098 | congestion_wait(BLK_RW_ASYNC, HZ/50); | 2098 | wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50); |
2099 | goto rebalance; | 2099 | goto rebalance; |
2100 | } | 2100 | } |
2101 | 2101 | ||