diff options
-rw-r--r-- | include/linux/writeback.h | 2 | ||||
-rw-r--r-- | kernel/exit.c | 3 | ||||
-rw-r--r-- | mm/page-writeback.c | 27 |
3 files changed, 32 insertions, 0 deletions
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index a378c295851..05eaf5e3aad 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
@@ -7,6 +7,8 @@ | |||
7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
8 | #include <linux/fs.h> | 8 | #include <linux/fs.h> |
9 | 9 | ||
10 | DECLARE_PER_CPU(int, dirty_throttle_leaks); | ||
11 | |||
10 | /* | 12 | /* |
11 | * The 1/4 region under the global dirty thresh is for smooth dirty throttling: | 13 | * The 1/4 region under the global dirty thresh is for smooth dirty throttling: |
12 | * | 14 | * |
diff --git a/kernel/exit.c b/kernel/exit.c index d0b7d988f87..d4aac24cc46 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -51,6 +51,7 @@ | |||
51 | #include <trace/events/sched.h> | 51 | #include <trace/events/sched.h> |
52 | #include <linux/hw_breakpoint.h> | 52 | #include <linux/hw_breakpoint.h> |
53 | #include <linux/oom.h> | 53 | #include <linux/oom.h> |
54 | #include <linux/writeback.h> | ||
54 | 55 | ||
55 | #include <asm/uaccess.h> | 56 | #include <asm/uaccess.h> |
56 | #include <asm/unistd.h> | 57 | #include <asm/unistd.h> |
@@ -1037,6 +1038,8 @@ NORET_TYPE void do_exit(long code) | |||
1037 | validate_creds_for_do_exit(tsk); | 1038 | validate_creds_for_do_exit(tsk); |
1038 | 1039 | ||
1039 | preempt_disable(); | 1040 | preempt_disable(); |
1041 | if (tsk->nr_dirtied) | ||
1042 | __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); | ||
1040 | exit_rcu(); | 1043 | exit_rcu(); |
1041 | /* causes final put_task_struct in finish_task_switch(). */ | 1044 | /* causes final put_task_struct in finish_task_switch(). */ |
1042 | tsk->state = TASK_DEAD; | 1045 | tsk->state = TASK_DEAD; |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 50f08241f98..619c445fc03 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -1214,6 +1214,22 @@ void set_page_dirty_balance(struct page *page, int page_mkwrite) | |||
1214 | 1214 | ||
1215 | static DEFINE_PER_CPU(int, bdp_ratelimits); | 1215 | static DEFINE_PER_CPU(int, bdp_ratelimits); |
1216 | 1216 | ||
1217 | /* | ||
1218 | * Normal tasks are throttled by | ||
1219 | * loop { | ||
1220 | * dirty tsk->nr_dirtied_pause pages; | ||
1221 | * take a snap in balance_dirty_pages(); | ||
1222 | * } | ||
1223 | * However there is a worst case. If every task exit immediately when dirtied | ||
1224 | * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be | ||
1225 | * called to throttle the page dirties. The solution is to save the not yet | ||
1226 | * throttled page dirties in dirty_throttle_leaks on task exit and charge them | ||
1227 | * randomly into the running tasks. This works well for the above worst case, | ||
1228 | * as the new task will pick up and accumulate the old task's leaked dirty | ||
1229 | * count and eventually get throttled. | ||
1230 | */ | ||
1231 | DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0; | ||
1232 | |||
1217 | /** | 1233 | /** |
1218 | * balance_dirty_pages_ratelimited_nr - balance dirty memory state | 1234 | * balance_dirty_pages_ratelimited_nr - balance dirty memory state |
1219 | * @mapping: address_space which was dirtied | 1235 | * @mapping: address_space which was dirtied |
@@ -1261,6 +1277,17 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, | |||
1261 | ratelimit = 0; | 1277 | ratelimit = 0; |
1262 | } | 1278 | } |
1263 | } | 1279 | } |
1280 | /* | ||
1281 | * Pick up the dirtied pages by the exited tasks. This avoids lots of | ||
1282 | * short-lived tasks (eg. gcc invocations in a kernel build) escaping | ||
1283 | * the dirty throttling and livelock other long-run dirtiers. | ||
1284 | */ | ||
1285 | p = &__get_cpu_var(dirty_throttle_leaks); | ||
1286 | if (*p > 0 && current->nr_dirtied < ratelimit) { | ||
1287 | nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied); | ||
1288 | *p -= nr_pages_dirtied; | ||
1289 | current->nr_dirtied += nr_pages_dirtied; | ||
1290 | } | ||
1264 | preempt_enable(); | 1291 | preempt_enable(); |
1265 | 1292 | ||
1266 | if (unlikely(current->nr_dirtied >= ratelimit)) | 1293 | if (unlikely(current->nr_dirtied >= ratelimit)) |