aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2009-04-06 08:48:03 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-04-06 11:04:53 -0400
commit9cf6b720f84d6999ff9a514d0a939dd183846aaf (patch)
tree13f58881927769c792b8b1f685282aa67264b9ed /fs/buffer.c
parenta1f242524c3c1f5d40f1c9c343427e34d1aadd6e (diff)
block: fsync_buffers_list() should use SWRITE_SYNC_PLUG
Then it can submit all the buffers without unplugging for each one. We will kick off the pending IO if we come across a new address space. Signed-off-by: Jens Axboe <jens.axboe@oracle.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c20
1 files changed, 16 insertions, 4 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 5d55a896ff78..43afaa5d6901 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -737,7 +737,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
737{ 737{
738 struct buffer_head *bh; 738 struct buffer_head *bh;
739 struct list_head tmp; 739 struct list_head tmp;
740 struct address_space *mapping; 740 struct address_space *mapping, *prev_mapping = NULL;
741 int err = 0, err2; 741 int err = 0, err2;
742 742
743 INIT_LIST_HEAD(&tmp); 743 INIT_LIST_HEAD(&tmp);
@@ -762,7 +762,18 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
762 * contents - it is a noop if I/O is still in 762 * contents - it is a noop if I/O is still in
763 * flight on potentially older contents. 763 * flight on potentially older contents.
764 */ 764 */
765 ll_rw_block(SWRITE_SYNC, 1, &bh); 765 ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh);
766
767 /*
768 * Kick off IO for the previous mapping. Note
769 * that we will not run the very last mapping,
770 * wait_on_buffer() will do that for us
771 * through sync_buffer().
772 */
773 if (prev_mapping && prev_mapping != mapping)
774 blk_run_address_space(prev_mapping);
775 prev_mapping = mapping;
776
766 brelse(bh); 777 brelse(bh);
767 spin_lock(lock); 778 spin_lock(lock);
768 } 779 }
@@ -2957,12 +2968,13 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2957 for (i = 0; i < nr; i++) { 2968 for (i = 0; i < nr; i++) {
2958 struct buffer_head *bh = bhs[i]; 2969 struct buffer_head *bh = bhs[i];
2959 2970
2960 if (rw == SWRITE || rw == SWRITE_SYNC) 2971 if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG)
2961 lock_buffer(bh); 2972 lock_buffer(bh);
2962 else if (!trylock_buffer(bh)) 2973 else if (!trylock_buffer(bh))
2963 continue; 2974 continue;
2964 2975
2965 if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) { 2976 if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
2977 rw == SWRITE_SYNC_PLUG) {
2966 if (test_clear_buffer_dirty(bh)) { 2978 if (test_clear_buffer_dirty(bh)) {
2967 bh->b_end_io = end_buffer_write_sync; 2979 bh->b_end_io = end_buffer_write_sync;
2968 get_bh(bh); 2980 get_bh(bh);