diff options
Diffstat (limited to 'fs/buffer.c')
-rw-r--r-- | fs/buffer.c | 35 |
1 files changed, 29 insertions, 6 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 5d55a896ff78..13edf7ad3ff1 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -737,7 +737,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) | |||
737 | { | 737 | { |
738 | struct buffer_head *bh; | 738 | struct buffer_head *bh; |
739 | struct list_head tmp; | 739 | struct list_head tmp; |
740 | struct address_space *mapping; | 740 | struct address_space *mapping, *prev_mapping = NULL; |
741 | int err = 0, err2; | 741 | int err = 0, err2; |
742 | 742 | ||
743 | INIT_LIST_HEAD(&tmp); | 743 | INIT_LIST_HEAD(&tmp); |
@@ -762,7 +762,18 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) | |||
762 | * contents - it is a noop if I/O is still in | 762 | * contents - it is a noop if I/O is still in |
763 | * flight on potentially older contents. | 763 | * flight on potentially older contents. |
764 | */ | 764 | */ |
765 | ll_rw_block(SWRITE_SYNC, 1, &bh); | 765 | ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh); |
766 | |||
767 | /* | ||
768 | * Kick off IO for the previous mapping. Note | ||
769 | * that we will not run the very last mapping, | ||
770 | * wait_on_buffer() will do that for us | ||
771 | * through sync_buffer(). | ||
772 | */ | ||
773 | if (prev_mapping && prev_mapping != mapping) | ||
774 | blk_run_address_space(prev_mapping); | ||
775 | prev_mapping = mapping; | ||
776 | |||
766 | brelse(bh); | 777 | brelse(bh); |
767 | spin_lock(lock); | 778 | spin_lock(lock); |
768 | } | 779 | } |
@@ -1585,6 +1596,16 @@ EXPORT_SYMBOL(unmap_underlying_metadata); | |||
1585 | * locked buffer. This only can happen if someone has written the buffer | 1596 | * locked buffer. This only can happen if someone has written the buffer |
1586 | * directly, with submit_bh(). At the address_space level PageWriteback | 1597 | * directly, with submit_bh(). At the address_space level PageWriteback |
1587 | * prevents this contention from occurring. | 1598 | * prevents this contention from occurring. |
1599 | * | ||
1600 | * If block_write_full_page() is called with wbc->sync_mode == | ||
1601 | * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this | ||
1602 | * causes the writes to be flagged as synchronous writes, but the | ||
1603 | * block device queue will NOT be unplugged, since usually many pages | ||
1604 | * will be pushed to the out before the higher-level caller actually | ||
1605 | * waits for the writes to be completed. The various wait functions, | ||
1606 | * such as wait_on_writeback_range() will ultimately call sync_page() | ||
1607 | * which will ultimately call blk_run_backing_dev(), which will end up | ||
1608 | * unplugging the device queue. | ||
1588 | */ | 1609 | */ |
1589 | static int __block_write_full_page(struct inode *inode, struct page *page, | 1610 | static int __block_write_full_page(struct inode *inode, struct page *page, |
1590 | get_block_t *get_block, struct writeback_control *wbc) | 1611 | get_block_t *get_block, struct writeback_control *wbc) |
@@ -1595,7 +1616,8 @@ static int __block_write_full_page(struct inode *inode, struct page *page, | |||
1595 | struct buffer_head *bh, *head; | 1616 | struct buffer_head *bh, *head; |
1596 | const unsigned blocksize = 1 << inode->i_blkbits; | 1617 | const unsigned blocksize = 1 << inode->i_blkbits; |
1597 | int nr_underway = 0; | 1618 | int nr_underway = 0; |
1598 | int write_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); | 1619 | int write_op = (wbc->sync_mode == WB_SYNC_ALL ? |
1620 | WRITE_SYNC_PLUG : WRITE); | ||
1599 | 1621 | ||
1600 | BUG_ON(!PageLocked(page)); | 1622 | BUG_ON(!PageLocked(page)); |
1601 | 1623 | ||
@@ -2957,12 +2979,13 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) | |||
2957 | for (i = 0; i < nr; i++) { | 2979 | for (i = 0; i < nr; i++) { |
2958 | struct buffer_head *bh = bhs[i]; | 2980 | struct buffer_head *bh = bhs[i]; |
2959 | 2981 | ||
2960 | if (rw == SWRITE || rw == SWRITE_SYNC) | 2982 | if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG) |
2961 | lock_buffer(bh); | 2983 | lock_buffer(bh); |
2962 | else if (!trylock_buffer(bh)) | 2984 | else if (!trylock_buffer(bh)) |
2963 | continue; | 2985 | continue; |
2964 | 2986 | ||
2965 | if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) { | 2987 | if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC || |
2988 | rw == SWRITE_SYNC_PLUG) { | ||
2966 | if (test_clear_buffer_dirty(bh)) { | 2989 | if (test_clear_buffer_dirty(bh)) { |
2967 | bh->b_end_io = end_buffer_write_sync; | 2990 | bh->b_end_io = end_buffer_write_sync; |
2968 | get_bh(bh); | 2991 | get_bh(bh); |
@@ -2998,7 +3021,7 @@ int sync_dirty_buffer(struct buffer_head *bh) | |||
2998 | if (test_clear_buffer_dirty(bh)) { | 3021 | if (test_clear_buffer_dirty(bh)) { |
2999 | get_bh(bh); | 3022 | get_bh(bh); |
3000 | bh->b_end_io = end_buffer_write_sync; | 3023 | bh->b_end_io = end_buffer_write_sync; |
3001 | ret = submit_bh(WRITE, bh); | 3024 | ret = submit_bh(WRITE_SYNC, bh); |
3002 | wait_on_buffer(bh); | 3025 | wait_on_buffer(bh); |
3003 | if (buffer_eopnotsupp(bh)) { | 3026 | if (buffer_eopnotsupp(bh)) { |
3004 | clear_buffer_eopnotsupp(bh); | 3027 | clear_buffer_eopnotsupp(bh); |