diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-12-13 17:58:56 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-12-13 17:58:56 -0500 |
commit | 4dde6dedad736b86bcea3722abb7d8031aeeaa16 (patch) | |
tree | fac6c306bb1ae3461213937c564dddd634ad89a3 /mm | |
parent | 442ee5a942834431ccf0b412e3cf7bb9ae97ff4e (diff) | |
parent | 82e230a07de3812a5e87a27979f033dad59172e3 (diff) |
Merge branch 'writeback-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/wfg/linux
* 'writeback-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/wfg/linux:
writeback: set max_pause to lowest value on zero bdi_dirty
writeback: permit through good bdi even when global dirty exceeded
writeback: comment on the bdi dirty threshold
fs: Make write(2) interruptible by a fatal signal
writeback: Fix issue on make htmldocs
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 6 | ||||
-rw-r--r-- | mm/page-writeback.c | 32 |
2 files changed, 32 insertions, 6 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index c0018f2d50e0..c106d3b3cc64 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -2407,7 +2407,6 @@ static ssize_t generic_perform_write(struct file *file, | |||
2407 | iov_iter_count(i)); | 2407 | iov_iter_count(i)); |
2408 | 2408 | ||
2409 | again: | 2409 | again: |
2410 | |||
2411 | /* | 2410 | /* |
2412 | * Bring in the user page that we will copy from _first_. | 2411 | * Bring in the user page that we will copy from _first_. |
2413 | * Otherwise there's a nasty deadlock on copying from the | 2412 | * Otherwise there's a nasty deadlock on copying from the |
@@ -2463,7 +2462,10 @@ again: | |||
2463 | written += copied; | 2462 | written += copied; |
2464 | 2463 | ||
2465 | balance_dirty_pages_ratelimited(mapping); | 2464 | balance_dirty_pages_ratelimited(mapping); |
2466 | 2465 | if (fatal_signal_pending(current)) { | |
2466 | status = -EINTR; | ||
2467 | break; | ||
2468 | } | ||
2467 | } while (iov_iter_count(i)); | 2469 | } while (iov_iter_count(i)); |
2468 | 2470 | ||
2469 | return written ? written : status; | 2471 | return written ? written : status; |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 71252486bc6f..50f08241f981 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -411,8 +411,13 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty) | |||
411 | * | 411 | * |
412 | * Returns @bdi's dirty limit in pages. The term "dirty" in the context of | 412 | * Returns @bdi's dirty limit in pages. The term "dirty" in the context of |
413 | * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages. | 413 | * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages. |
414 | * And the "limit" in the name is not seriously taken as hard limit in | 414 | * |
415 | * balance_dirty_pages(). | 415 | * Note that balance_dirty_pages() will only seriously take it as a hard limit |
416 | * when sleeping max_pause per page is not enough to keep the dirty pages under | ||
417 | * control. For example, when the device is completely stalled due to some error | ||
418 | * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key. | ||
419 | * In the other normal situations, it acts more gently by throttling the tasks | ||
420 | * more (rather than completely block them) when the bdi dirty pages go high. | ||
416 | * | 421 | * |
417 | * It allocates high/low dirty limits to fast/slow devices, in order to prevent | 422 | * It allocates high/low dirty limits to fast/slow devices, in order to prevent |
418 | * - starving fast devices | 423 | * - starving fast devices |
@@ -594,6 +599,13 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi, | |||
594 | */ | 599 | */ |
595 | if (unlikely(bdi_thresh > thresh)) | 600 | if (unlikely(bdi_thresh > thresh)) |
596 | bdi_thresh = thresh; | 601 | bdi_thresh = thresh; |
602 | /* | ||
603 | * It's very possible that bdi_thresh is close to 0 not because the | ||
604 | * device is slow, but that it has remained inactive for long time. | ||
605 | * Honour such devices a reasonable good (hopefully IO efficient) | ||
606 | * threshold, so that the occasional writes won't be blocked and active | ||
607 | * writes can rampup the threshold quickly. | ||
608 | */ | ||
597 | bdi_thresh = max(bdi_thresh, (limit - dirty) / 8); | 609 | bdi_thresh = max(bdi_thresh, (limit - dirty) / 8); |
598 | /* | 610 | /* |
599 | * scale global setpoint to bdi's: | 611 | * scale global setpoint to bdi's: |
@@ -977,8 +989,7 @@ static unsigned long bdi_max_pause(struct backing_dev_info *bdi, | |||
977 | * | 989 | * |
978 | * 8 serves as the safety ratio. | 990 | * 8 serves as the safety ratio. |
979 | */ | 991 | */ |
980 | if (bdi_dirty) | 992 | t = min(t, bdi_dirty * HZ / (8 * bw + 1)); |
981 | t = min(t, bdi_dirty * HZ / (8 * bw + 1)); | ||
982 | 993 | ||
983 | /* | 994 | /* |
984 | * The pause time will be settled within range (max_pause/4, max_pause). | 995 | * The pause time will be settled within range (max_pause/4, max_pause). |
@@ -1136,6 +1147,19 @@ pause: | |||
1136 | if (task_ratelimit) | 1147 | if (task_ratelimit) |
1137 | break; | 1148 | break; |
1138 | 1149 | ||
1150 | /* | ||
1151 | * In the case of an unresponding NFS server and the NFS dirty | ||
1152 | * pages exceeds dirty_thresh, give the other good bdi's a pipe | ||
1153 | * to go through, so that tasks on them still remain responsive. | ||
1154 | * | ||
1155 | * In theory 1 page is enough to keep the comsumer-producer | ||
1156 | * pipe going: the flusher cleans 1 page => the task dirties 1 | ||
1157 | * more page. However bdi_dirty has accounting errors. So use | ||
1158 | * the larger and more IO friendly bdi_stat_error. | ||
1159 | */ | ||
1160 | if (bdi_dirty <= bdi_stat_error(bdi)) | ||
1161 | break; | ||
1162 | |||
1139 | if (fatal_signal_pending(current)) | 1163 | if (fatal_signal_pending(current)) |
1140 | break; | 1164 | break; |
1141 | } | 1165 | } |