diff options
author | Wu Fengguang <fengguang.wu@intel.com> | 2010-10-26 17:21:26 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-26 19:52:05 -0400 |
commit | 1b430beee5e388605dfb092b214ef0320f752cf6 (patch) | |
tree | c1b1ece282aab771fd1386a3fe0c6e82cb5c5bfe | |
parent | d19d5476f4b9f91d2de92b91588bb118beba6c0d (diff) |
writeback: remove nonblocking/encountered_congestion references
This removes more dead code that was somehow missed by commit 0d99519efef
(writeback: remove unused nonblocking and congestion checks). There are
no behavior change except for the removal of two entries from one of the
ext4 tracing interface.
The nonblocking checks in ->writepages are no longer used because the
flusher now prefer to block on get_request_wait() than to skip inodes on
IO congestion. The latter will lead to more seeky IO.
The nonblocking checks in ->writepage are no longer used because it's
redundant with the WB_SYNC_NONE check.
We no long set ->nonblocking in VM page out and page migration, because
a) it's effectively redundant with WB_SYNC_NONE in current code
b) it's old semantic of "Don't get stuck on request queues" is mis-behavior:
that would skip some dirty inodes on congestion and page out others, which
is unfair in terms of LRU age.
Inspired by Christoph Hellwig. Thanks!
Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: David Howells <dhowells@redhat.com>
Cc: Sage Weil <sage@newdream.net>
Cc: Steve French <sfrench@samba.org>
Cc: Chris Mason <chris.mason@oracle.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | fs/afs/write.c | 19 | ||||
-rw-r--r-- | fs/buffer.c | 2 | ||||
-rw-r--r-- | fs/ceph/addr.c | 9 | ||||
-rw-r--r-- | fs/cifs/file.c | 10 | ||||
-rw-r--r-- | fs/gfs2/meta_io.c | 2 | ||||
-rw-r--r-- | fs/nfs/write.c | 4 | ||||
-rw-r--r-- | fs/reiserfs/inode.c | 2 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/xfs_aops.c | 3 | ||||
-rw-r--r-- | include/trace/events/ext4.h | 8 | ||||
-rw-r--r-- | include/trace/events/writeback.h | 2 | ||||
-rw-r--r-- | mm/migrate.c | 1 | ||||
-rw-r--r-- | mm/vmscan.c | 1 |
12 files changed, 11 insertions, 52 deletions
diff --git a/fs/afs/write.c b/fs/afs/write.c index 722743b152d8..15690bb1d3b5 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c | |||
@@ -438,7 +438,6 @@ no_more: | |||
438 | */ | 438 | */ |
439 | int afs_writepage(struct page *page, struct writeback_control *wbc) | 439 | int afs_writepage(struct page *page, struct writeback_control *wbc) |
440 | { | 440 | { |
441 | struct backing_dev_info *bdi = page->mapping->backing_dev_info; | ||
442 | struct afs_writeback *wb; | 441 | struct afs_writeback *wb; |
443 | int ret; | 442 | int ret; |
444 | 443 | ||
@@ -455,8 +454,6 @@ int afs_writepage(struct page *page, struct writeback_control *wbc) | |||
455 | } | 454 | } |
456 | 455 | ||
457 | wbc->nr_to_write -= ret; | 456 | wbc->nr_to_write -= ret; |
458 | if (wbc->nonblocking && bdi_write_congested(bdi)) | ||
459 | wbc->encountered_congestion = 1; | ||
460 | 457 | ||
461 | _leave(" = 0"); | 458 | _leave(" = 0"); |
462 | return 0; | 459 | return 0; |
@@ -469,7 +466,6 @@ static int afs_writepages_region(struct address_space *mapping, | |||
469 | struct writeback_control *wbc, | 466 | struct writeback_control *wbc, |
470 | pgoff_t index, pgoff_t end, pgoff_t *_next) | 467 | pgoff_t index, pgoff_t end, pgoff_t *_next) |
471 | { | 468 | { |
472 | struct backing_dev_info *bdi = mapping->backing_dev_info; | ||
473 | struct afs_writeback *wb; | 469 | struct afs_writeback *wb; |
474 | struct page *page; | 470 | struct page *page; |
475 | int ret, n; | 471 | int ret, n; |
@@ -529,11 +525,6 @@ static int afs_writepages_region(struct address_space *mapping, | |||
529 | 525 | ||
530 | wbc->nr_to_write -= ret; | 526 | wbc->nr_to_write -= ret; |
531 | 527 | ||
532 | if (wbc->nonblocking && bdi_write_congested(bdi)) { | ||
533 | wbc->encountered_congestion = 1; | ||
534 | break; | ||
535 | } | ||
536 | |||
537 | cond_resched(); | 528 | cond_resched(); |
538 | } while (index < end && wbc->nr_to_write > 0); | 529 | } while (index < end && wbc->nr_to_write > 0); |
539 | 530 | ||
@@ -548,24 +539,16 @@ static int afs_writepages_region(struct address_space *mapping, | |||
548 | int afs_writepages(struct address_space *mapping, | 539 | int afs_writepages(struct address_space *mapping, |
549 | struct writeback_control *wbc) | 540 | struct writeback_control *wbc) |
550 | { | 541 | { |
551 | struct backing_dev_info *bdi = mapping->backing_dev_info; | ||
552 | pgoff_t start, end, next; | 542 | pgoff_t start, end, next; |
553 | int ret; | 543 | int ret; |
554 | 544 | ||
555 | _enter(""); | 545 | _enter(""); |
556 | 546 | ||
557 | if (wbc->nonblocking && bdi_write_congested(bdi)) { | ||
558 | wbc->encountered_congestion = 1; | ||
559 | _leave(" = 0 [congest]"); | ||
560 | return 0; | ||
561 | } | ||
562 | |||
563 | if (wbc->range_cyclic) { | 547 | if (wbc->range_cyclic) { |
564 | start = mapping->writeback_index; | 548 | start = mapping->writeback_index; |
565 | end = -1; | 549 | end = -1; |
566 | ret = afs_writepages_region(mapping, wbc, start, end, &next); | 550 | ret = afs_writepages_region(mapping, wbc, start, end, &next); |
567 | if (start > 0 && wbc->nr_to_write > 0 && ret == 0 && | 551 | if (start > 0 && wbc->nr_to_write > 0 && ret == 0) |
568 | !(wbc->nonblocking && wbc->encountered_congestion)) | ||
569 | ret = afs_writepages_region(mapping, wbc, 0, start, | 552 | ret = afs_writepages_region(mapping, wbc, 0, start, |
570 | &next); | 553 | &next); |
571 | mapping->writeback_index = next; | 554 | mapping->writeback_index = next; |
diff --git a/fs/buffer.c b/fs/buffer.c index 7f0b9b083f77..ec21a92e08b4 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -1706,7 +1706,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, | |||
1706 | * and kswapd activity, but those code paths have their own | 1706 | * and kswapd activity, but those code paths have their own |
1707 | * higher-level throttling. | 1707 | * higher-level throttling. |
1708 | */ | 1708 | */ |
1709 | if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) { | 1709 | if (wbc->sync_mode != WB_SYNC_NONE) { |
1710 | lock_buffer(bh); | 1710 | lock_buffer(bh); |
1711 | } else if (!trylock_buffer(bh)) { | 1711 | } else if (!trylock_buffer(bh)) { |
1712 | redirty_page_for_writepage(wbc, page); | 1712 | redirty_page_for_writepage(wbc, page); |
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 51bcc5ce3230..e9c874abc9e1 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c | |||
@@ -591,7 +591,6 @@ static int ceph_writepages_start(struct address_space *mapping, | |||
591 | struct writeback_control *wbc) | 591 | struct writeback_control *wbc) |
592 | { | 592 | { |
593 | struct inode *inode = mapping->host; | 593 | struct inode *inode = mapping->host; |
594 | struct backing_dev_info *bdi = mapping->backing_dev_info; | ||
595 | struct ceph_inode_info *ci = ceph_inode(inode); | 594 | struct ceph_inode_info *ci = ceph_inode(inode); |
596 | struct ceph_fs_client *fsc; | 595 | struct ceph_fs_client *fsc; |
597 | pgoff_t index, start, end; | 596 | pgoff_t index, start, end; |
@@ -633,13 +632,6 @@ static int ceph_writepages_start(struct address_space *mapping, | |||
633 | 632 | ||
634 | pagevec_init(&pvec, 0); | 633 | pagevec_init(&pvec, 0); |
635 | 634 | ||
636 | /* ?? */ | ||
637 | if (wbc->nonblocking && bdi_write_congested(bdi)) { | ||
638 | dout(" writepages congested\n"); | ||
639 | wbc->encountered_congestion = 1; | ||
640 | goto out_final; | ||
641 | } | ||
642 | |||
643 | /* where to start/end? */ | 635 | /* where to start/end? */ |
644 | if (wbc->range_cyclic) { | 636 | if (wbc->range_cyclic) { |
645 | start = mapping->writeback_index; /* Start from prev offset */ | 637 | start = mapping->writeback_index; /* Start from prev offset */ |
@@ -885,7 +877,6 @@ out: | |||
885 | rc = 0; /* vfs expects us to return 0 */ | 877 | rc = 0; /* vfs expects us to return 0 */ |
886 | ceph_put_snap_context(snapc); | 878 | ceph_put_snap_context(snapc); |
887 | dout("writepages done, rc = %d\n", rc); | 879 | dout("writepages done, rc = %d\n", rc); |
888 | out_final: | ||
889 | return rc; | 880 | return rc; |
890 | } | 881 | } |
891 | 882 | ||
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 8c81e7b14d53..45af003865d2 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -1303,7 +1303,6 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) | |||
1303 | static int cifs_writepages(struct address_space *mapping, | 1303 | static int cifs_writepages(struct address_space *mapping, |
1304 | struct writeback_control *wbc) | 1304 | struct writeback_control *wbc) |
1305 | { | 1305 | { |
1306 | struct backing_dev_info *bdi = mapping->backing_dev_info; | ||
1307 | unsigned int bytes_to_write; | 1306 | unsigned int bytes_to_write; |
1308 | unsigned int bytes_written; | 1307 | unsigned int bytes_written; |
1309 | struct cifs_sb_info *cifs_sb; | 1308 | struct cifs_sb_info *cifs_sb; |
@@ -1326,15 +1325,6 @@ static int cifs_writepages(struct address_space *mapping, | |||
1326 | int scanned = 0; | 1325 | int scanned = 0; |
1327 | int xid, long_op; | 1326 | int xid, long_op; |
1328 | 1327 | ||
1329 | /* | ||
1330 | * BB: Is this meaningful for a non-block-device file system? | ||
1331 | * If it is, we should test it again after we do I/O | ||
1332 | */ | ||
1333 | if (wbc->nonblocking && bdi_write_congested(bdi)) { | ||
1334 | wbc->encountered_congestion = 1; | ||
1335 | return 0; | ||
1336 | } | ||
1337 | |||
1338 | cifs_sb = CIFS_SB(mapping->host->i_sb); | 1328 | cifs_sb = CIFS_SB(mapping->host->i_sb); |
1339 | 1329 | ||
1340 | /* | 1330 | /* |
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index f3b071f921aa..939739c7b3f9 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c | |||
@@ -55,7 +55,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb | |||
55 | * activity, but those code paths have their own higher-level | 55 | * activity, but those code paths have their own higher-level |
56 | * throttling. | 56 | * throttling. |
57 | */ | 57 | */ |
58 | if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) { | 58 | if (wbc->sync_mode != WB_SYNC_NONE) { |
59 | lock_buffer(bh); | 59 | lock_buffer(bh); |
60 | } else if (!trylock_buffer(bh)) { | 60 | } else if (!trylock_buffer(bh)) { |
61 | redirty_page_for_writepage(wbc, page); | 61 | redirty_page_for_writepage(wbc, page); |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 605e292501f4..4c14c17a5276 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -290,9 +290,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, st | |||
290 | nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); | 290 | nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); |
291 | 291 | ||
292 | nfs_pageio_cond_complete(pgio, page->index); | 292 | nfs_pageio_cond_complete(pgio, page->index); |
293 | ret = nfs_page_async_flush(pgio, page, | 293 | ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE); |
294 | wbc->sync_mode == WB_SYNC_NONE || | ||
295 | wbc->nonblocking != 0); | ||
296 | if (ret == -EAGAIN) { | 294 | if (ret == -EAGAIN) { |
297 | redirty_page_for_writepage(wbc, page); | 295 | redirty_page_for_writepage(wbc, page); |
298 | ret = 0; | 296 | ret = 0; |
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index caa758377d66..c1f93896cb53 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c | |||
@@ -2438,7 +2438,7 @@ static int reiserfs_write_full_page(struct page *page, | |||
2438 | /* from this point on, we know the buffer is mapped to a | 2438 | /* from this point on, we know the buffer is mapped to a |
2439 | * real block and not a direct item | 2439 | * real block and not a direct item |
2440 | */ | 2440 | */ |
2441 | if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) { | 2441 | if (wbc->sync_mode != WB_SYNC_NONE) { |
2442 | lock_buffer(bh); | 2442 | lock_buffer(bh); |
2443 | } else { | 2443 | } else { |
2444 | if (!trylock_buffer(bh)) { | 2444 | if (!trylock_buffer(bh)) { |
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index b552f816de15..c9af48fffcd7 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c | |||
@@ -1139,8 +1139,7 @@ xfs_vm_writepage( | |||
1139 | type = IO_DELAY; | 1139 | type = IO_DELAY; |
1140 | flags = BMAPI_ALLOCATE; | 1140 | flags = BMAPI_ALLOCATE; |
1141 | 1141 | ||
1142 | if (wbc->sync_mode == WB_SYNC_NONE && | 1142 | if (wbc->sync_mode == WB_SYNC_NONE) |
1143 | wbc->nonblocking) | ||
1144 | flags |= BMAPI_TRYLOCK; | 1143 | flags |= BMAPI_TRYLOCK; |
1145 | } | 1144 | } |
1146 | 1145 | ||
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index 01e9e0076a92..6bcb00645de4 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h | |||
@@ -242,18 +242,20 @@ TRACE_EVENT(ext4_da_writepages, | |||
242 | __entry->pages_skipped = wbc->pages_skipped; | 242 | __entry->pages_skipped = wbc->pages_skipped; |
243 | __entry->range_start = wbc->range_start; | 243 | __entry->range_start = wbc->range_start; |
244 | __entry->range_end = wbc->range_end; | 244 | __entry->range_end = wbc->range_end; |
245 | __entry->nonblocking = wbc->nonblocking; | ||
246 | __entry->for_kupdate = wbc->for_kupdate; | 245 | __entry->for_kupdate = wbc->for_kupdate; |
247 | __entry->for_reclaim = wbc->for_reclaim; | 246 | __entry->for_reclaim = wbc->for_reclaim; |
248 | __entry->range_cyclic = wbc->range_cyclic; | 247 | __entry->range_cyclic = wbc->range_cyclic; |
249 | __entry->writeback_index = inode->i_mapping->writeback_index; | 248 | __entry->writeback_index = inode->i_mapping->writeback_index; |
250 | ), | 249 | ), |
251 | 250 | ||
252 | TP_printk("dev %s ino %lu nr_to_write %ld pages_skipped %ld range_start %llu range_end %llu nonblocking %d for_kupdate %d for_reclaim %d range_cyclic %d writeback_index %lu", | 251 | TP_printk("dev %s ino %lu nr_to_write %ld pages_skipped %ld " |
252 | "range_start %llu range_end %llu " | ||
253 | "for_kupdate %d for_reclaim %d " | ||
254 | "range_cyclic %d writeback_index %lu", | ||
253 | jbd2_dev_to_name(__entry->dev), | 255 | jbd2_dev_to_name(__entry->dev), |
254 | (unsigned long) __entry->ino, __entry->nr_to_write, | 256 | (unsigned long) __entry->ino, __entry->nr_to_write, |
255 | __entry->pages_skipped, __entry->range_start, | 257 | __entry->pages_skipped, __entry->range_start, |
256 | __entry->range_end, __entry->nonblocking, | 258 | __entry->range_end, |
257 | __entry->for_kupdate, __entry->for_reclaim, | 259 | __entry->for_kupdate, __entry->for_reclaim, |
258 | __entry->range_cyclic, | 260 | __entry->range_cyclic, |
259 | (unsigned long) __entry->writeback_index) | 261 | (unsigned long) __entry->writeback_index) |
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index f345f66ae9d1..0bb01ab2e984 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h | |||
@@ -96,8 +96,6 @@ DECLARE_EVENT_CLASS(wbc_class, | |||
96 | __field(long, nr_to_write) | 96 | __field(long, nr_to_write) |
97 | __field(long, pages_skipped) | 97 | __field(long, pages_skipped) |
98 | __field(int, sync_mode) | 98 | __field(int, sync_mode) |
99 | __field(int, nonblocking) | ||
100 | __field(int, encountered_congestion) | ||
101 | __field(int, for_kupdate) | 99 | __field(int, for_kupdate) |
102 | __field(int, for_background) | 100 | __field(int, for_background) |
103 | __field(int, for_reclaim) | 101 | __field(int, for_reclaim) |
diff --git a/mm/migrate.c b/mm/migrate.c index f8c9bccf2520..d917ac3207f5 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -497,7 +497,6 @@ static int writeout(struct address_space *mapping, struct page *page) | |||
497 | .nr_to_write = 1, | 497 | .nr_to_write = 1, |
498 | .range_start = 0, | 498 | .range_start = 0, |
499 | .range_end = LLONG_MAX, | 499 | .range_end = LLONG_MAX, |
500 | .nonblocking = 1, | ||
501 | .for_reclaim = 1 | 500 | .for_reclaim = 1 |
502 | }; | 501 | }; |
503 | int rc; | 502 | int rc; |
diff --git a/mm/vmscan.c b/mm/vmscan.c index b94c9464f262..6cbc1aac23ae 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -376,7 +376,6 @@ static pageout_t pageout(struct page *page, struct address_space *mapping, | |||
376 | .nr_to_write = SWAP_CLUSTER_MAX, | 376 | .nr_to_write = SWAP_CLUSTER_MAX, |
377 | .range_start = 0, | 377 | .range_start = 0, |
378 | .range_end = LLONG_MAX, | 378 | .range_end = LLONG_MAX, |
379 | .nonblocking = 1, | ||
380 | .for_reclaim = 1, | 379 | .for_reclaim = 1, |
381 | }; | 380 | }; |
382 | 381 | ||