aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
authorWu Fengguang <fengguang.wu@intel.com>2011-04-05 15:21:19 -0400
committerWu Fengguang <fengguang.wu@intel.com>2011-12-18 01:20:20 -0500
commit54848d73f9f254631303d6eab9b976855988b266 (patch)
tree9fb4b7e564f2c0df88d0bde2f482b9b7efc847fa /mm/page-writeback.c
parent1bc36b6426ae49139e9f56491db76b95921454d7 (diff)
writeback: charge leaked page dirties to active tasks
It's a years long problem that a large number of short-lived dirtiers (eg. gcc instances in a fast kernel build) may starve long-run dirtiers (eg. dd) as well as pushing the dirty pages to the global hard limit. The solution is to charge the pages dirtied by the exited gcc to the other random dirtying tasks. It sounds not perfect, however should behave good enough in practice, seeing as that throttled tasks aren't actually running so those that are running are more likely to pick it up and get throttled, therefore promoting an equal spread. Randy: fix compile error: 'dirty_throttle_leaks' undeclared in exit.c Acked-by: Jan Kara <jack@suse.cz> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Randy Dunlap <rdunlap@xenotime.net> Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c27
1 files changed, 27 insertions, 0 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 50f08241f981..619c445fc03c 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1214,6 +1214,22 @@ void set_page_dirty_balance(struct page *page, int page_mkwrite)
1214 1214
1215static DEFINE_PER_CPU(int, bdp_ratelimits); 1215static DEFINE_PER_CPU(int, bdp_ratelimits);
1216 1216
1217/*
1218 * Normal tasks are throttled by
1219 * loop {
1220 * dirty tsk->nr_dirtied_pause pages;
1221 * take a snap in balance_dirty_pages();
1222 * }
1223 * However there is a worst case. If every task exit immediately when dirtied
1224 * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be
1225 * called to throttle the page dirties. The solution is to save the not yet
1226 * throttled page dirties in dirty_throttle_leaks on task exit and charge them
1227 * randomly into the running tasks. This works well for the above worst case,
1228 * as the new task will pick up and accumulate the old task's leaked dirty
1229 * count and eventually get throttled.
1230 */
1231DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
1232
1217/** 1233/**
1218 * balance_dirty_pages_ratelimited_nr - balance dirty memory state 1234 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
1219 * @mapping: address_space which was dirtied 1235 * @mapping: address_space which was dirtied
@@ -1261,6 +1277,17 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
1261 ratelimit = 0; 1277 ratelimit = 0;
1262 } 1278 }
1263 } 1279 }
1280 /*
1281 * Pick up the dirtied pages by the exited tasks. This avoids lots of
1282 * short-lived tasks (eg. gcc invocations in a kernel build) escaping
1283 * the dirty throttling and livelock other long-run dirtiers.
1284 */
1285 p = &__get_cpu_var(dirty_throttle_leaks);
1286 if (*p > 0 && current->nr_dirtied < ratelimit) {
1287 nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
1288 *p -= nr_pages_dirtied;
1289 current->nr_dirtied += nr_pages_dirtied;
1290 }
1264 preempt_enable(); 1291 preempt_enable();
1265 1292
1266 if (unlikely(current->nr_dirtied >= ratelimit)) 1293 if (unlikely(current->nr_dirtied >= ratelimit))