aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
authorWu Fengguang <fengguang.wu@intel.com>2011-06-11 21:32:32 -0400
committerWu Fengguang <fengguang.wu@intel.com>2011-10-03 09:08:58 -0400
commit57fc978cfb61ed40a7bbfe5a569359159ba31abd (patch)
tree870ffd08e0c1bb0dde55e4f1ed4dfa2bda8e3a80 /mm/page-writeback.c
parentc8462cc9de9e92264ec647903772f6036a99b286 (diff)
writeback: control dirty pause time
The dirty pause time shall ultimately be controlled by adjusting nr_dirtied_pause, since there is relationship pause = pages_dirtied / task_ratelimit Assuming pages_dirtied ~= nr_dirtied_pause task_ratelimit ~= dirty_ratelimit We get nr_dirtied_pause ~= dirty_ratelimit * desired_pause Here dirty_ratelimit is preferred over task_ratelimit because it's more stable. It's also important to limit possible large transitional errors: - bw is changing quickly - pages_dirtied << nr_dirtied_pause on entering dirty exceeded area - pages_dirtied >> nr_dirtied_pause on btrfs (to be improved by a separate fix, but still expect non-trivial errors) So we end up using the above formula inside clamp_val(). The best test case for this code is to run 100 "dd bs=4M" tasks on btrfs and check its pause time distribution. Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c20
1 files changed, 19 insertions, 1 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index cc351e6f9ed9..6a8bb693b429 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1086,6 +1086,10 @@ static void balance_dirty_pages(struct address_space *mapping,
1086 task_ratelimit = (u64)dirty_ratelimit * 1086 task_ratelimit = (u64)dirty_ratelimit *
1087 pos_ratio >> RATELIMIT_CALC_SHIFT; 1087 pos_ratio >> RATELIMIT_CALC_SHIFT;
1088 pause = (HZ * pages_dirtied) / (task_ratelimit | 1); 1088 pause = (HZ * pages_dirtied) / (task_ratelimit | 1);
1089 if (unlikely(pause <= 0)) {
1090 pause = 1; /* avoid resetting nr_dirtied_pause below */
1091 break;
1092 }
1089 pause = min(pause, max_pause); 1093 pause = min(pause, max_pause);
1090 1094
1091pause: 1095pause:
@@ -1107,7 +1111,21 @@ pause:
1107 bdi->dirty_exceeded = 0; 1111 bdi->dirty_exceeded = 0;
1108 1112
1109 current->nr_dirtied = 0; 1113 current->nr_dirtied = 0;
1110 current->nr_dirtied_pause = dirty_poll_interval(nr_dirty, dirty_thresh); 1114 if (pause == 0) { /* in freerun area */
1115 current->nr_dirtied_pause =
1116 dirty_poll_interval(nr_dirty, dirty_thresh);
1117 } else if (pause <= max_pause / 4 &&
1118 pages_dirtied >= current->nr_dirtied_pause) {
1119 current->nr_dirtied_pause = clamp_val(
1120 dirty_ratelimit * (max_pause / 2) / HZ,
1121 pages_dirtied + pages_dirtied / 8,
1122 pages_dirtied * 4);
1123 } else if (pause >= max_pause) {
1124 current->nr_dirtied_pause = 1 | clamp_val(
1125 dirty_ratelimit * (max_pause / 2) / HZ,
1126 pages_dirtied / 4,
1127 pages_dirtied - pages_dirtied / 8);
1128 }
1111 1129
1112 if (writeback_in_progress(bdi)) 1130 if (writeback_in_progress(bdi))
1113 return; 1131 return;