diff options
-rw-r--r-- | block/blk-core.c | 2 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_actlog.c | 2 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_int.h | 2 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_main.c | 3 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_receiver.c | 9 | ||||
-rw-r--r-- | drivers/md/bitmap.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-io.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-kcopyd.c | 5 | ||||
-rw-r--r-- | drivers/md/md.c | 5 | ||||
-rw-r--r-- | fs/btrfs/extent_io.c | 2 | ||||
-rw-r--r-- | fs/buffer.c | 14 | ||||
-rw-r--r-- | fs/direct-io.c | 2 | ||||
-rw-r--r-- | fs/ext4/page-io.c | 3 | ||||
-rw-r--r-- | fs/gfs2/log.c | 4 | ||||
-rw-r--r-- | fs/gfs2/lops.c | 12 | ||||
-rw-r--r-- | fs/gfs2/meta_io.c | 2 | ||||
-rw-r--r-- | fs/jbd/commit.c | 2 | ||||
-rw-r--r-- | fs/jbd2/commit.c | 6 | ||||
-rw-r--r-- | fs/nilfs2/segbuf.c | 2 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/xfs_aops.c | 3 | ||||
-rw-r--r-- | include/linux/blk_types.h | 2 | ||||
-rw-r--r-- | include/linux/fs.h | 28 | ||||
-rw-r--r-- | kernel/power/block_io.c | 2 | ||||
-rw-r--r-- | mm/page_io.c | 2 |
24 files changed, 43 insertions, 75 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 82a45898ba76..7e9715ae18c8 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -1290,7 +1290,7 @@ get_rq: | |||
1290 | } | 1290 | } |
1291 | 1291 | ||
1292 | plug = current->plug; | 1292 | plug = current->plug; |
1293 | if (plug && !sync) { | 1293 | if (plug) { |
1294 | if (!plug->should_sort && !list_empty(&plug->list)) { | 1294 | if (!plug->should_sort && !list_empty(&plug->list)) { |
1295 | struct request *__rq; | 1295 | struct request *__rq; |
1296 | 1296 | ||
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index 2096628d6e65..aca302492ff2 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c | |||
@@ -80,7 +80,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev, | |||
80 | 80 | ||
81 | if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags)) | 81 | if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags)) |
82 | rw |= REQ_FUA; | 82 | rw |= REQ_FUA; |
83 | rw |= REQ_UNPLUG | REQ_SYNC; | 83 | rw |= REQ_SYNC; |
84 | 84 | ||
85 | bio = bio_alloc(GFP_NOIO, 1); | 85 | bio = bio_alloc(GFP_NOIO, 1); |
86 | bio->bi_bdev = bdev->md_bdev; | 86 | bio->bi_bdev = bdev->md_bdev; |
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 0b5718e19586..b0bd27dfc1e8 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h | |||
@@ -377,7 +377,7 @@ union p_header { | |||
377 | #define DP_HARDBARRIER 1 /* depricated */ | 377 | #define DP_HARDBARRIER 1 /* depricated */ |
378 | #define DP_RW_SYNC 2 /* equals REQ_SYNC */ | 378 | #define DP_RW_SYNC 2 /* equals REQ_SYNC */ |
379 | #define DP_MAY_SET_IN_SYNC 4 | 379 | #define DP_MAY_SET_IN_SYNC 4 |
380 | #define DP_UNPLUG 8 /* equals REQ_UNPLUG */ | 380 | #define DP_UNPLUG 8 /* not used anymore */ |
381 | #define DP_FUA 16 /* equals REQ_FUA */ | 381 | #define DP_FUA 16 /* equals REQ_FUA */ |
382 | #define DP_FLUSH 32 /* equals REQ_FLUSH */ | 382 | #define DP_FLUSH 32 /* equals REQ_FLUSH */ |
383 | #define DP_DISCARD 64 /* equals REQ_DISCARD */ | 383 | #define DP_DISCARD 64 /* equals REQ_DISCARD */ |
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 6049cb85310d..8a43ce0edeed 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c | |||
@@ -2477,12 +2477,11 @@ static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw) | |||
2477 | { | 2477 | { |
2478 | if (mdev->agreed_pro_version >= 95) | 2478 | if (mdev->agreed_pro_version >= 95) |
2479 | return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) | | 2479 | return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) | |
2480 | (bi_rw & REQ_UNPLUG ? DP_UNPLUG : 0) | | ||
2481 | (bi_rw & REQ_FUA ? DP_FUA : 0) | | 2480 | (bi_rw & REQ_FUA ? DP_FUA : 0) | |
2482 | (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) | | 2481 | (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) | |
2483 | (bi_rw & REQ_DISCARD ? DP_DISCARD : 0); | 2482 | (bi_rw & REQ_DISCARD ? DP_DISCARD : 0); |
2484 | else | 2483 | else |
2485 | return bi_rw & (REQ_SYNC | REQ_UNPLUG) ? DP_RW_SYNC : 0; | 2484 | return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0; |
2486 | } | 2485 | } |
2487 | 2486 | ||
2488 | /* Used to send write requests | 2487 | /* Used to send write requests |
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 84132f8bf8a4..8e68be939deb 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c | |||
@@ -1100,8 +1100,6 @@ next_bio: | |||
1100 | /* > e->sector, unless this is the first bio */ | 1100 | /* > e->sector, unless this is the first bio */ |
1101 | bio->bi_sector = sector; | 1101 | bio->bi_sector = sector; |
1102 | bio->bi_bdev = mdev->ldev->backing_bdev; | 1102 | bio->bi_bdev = mdev->ldev->backing_bdev; |
1103 | /* we special case some flags in the multi-bio case, see below | ||
1104 | * (REQ_UNPLUG) */ | ||
1105 | bio->bi_rw = rw; | 1103 | bio->bi_rw = rw; |
1106 | bio->bi_private = e; | 1104 | bio->bi_private = e; |
1107 | bio->bi_end_io = drbd_endio_sec; | 1105 | bio->bi_end_io = drbd_endio_sec; |
@@ -1130,10 +1128,6 @@ next_bio: | |||
1130 | bios = bios->bi_next; | 1128 | bios = bios->bi_next; |
1131 | bio->bi_next = NULL; | 1129 | bio->bi_next = NULL; |
1132 | 1130 | ||
1133 | /* strip off REQ_UNPLUG unless it is the last bio */ | ||
1134 | if (bios) | ||
1135 | bio->bi_rw &= ~REQ_UNPLUG; | ||
1136 | |||
1137 | drbd_generic_make_request(mdev, fault_type, bio); | 1131 | drbd_generic_make_request(mdev, fault_type, bio); |
1138 | } while (bios); | 1132 | } while (bios); |
1139 | return 0; | 1133 | return 0; |
@@ -1621,12 +1615,11 @@ static unsigned long write_flags_to_bio(struct drbd_conf *mdev, u32 dpf) | |||
1621 | { | 1615 | { |
1622 | if (mdev->agreed_pro_version >= 95) | 1616 | if (mdev->agreed_pro_version >= 95) |
1623 | return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | | 1617 | return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | |
1624 | (dpf & DP_UNPLUG ? REQ_UNPLUG : 0) | | ||
1625 | (dpf & DP_FUA ? REQ_FUA : 0) | | 1618 | (dpf & DP_FUA ? REQ_FUA : 0) | |
1626 | (dpf & DP_FLUSH ? REQ_FUA : 0) | | 1619 | (dpf & DP_FLUSH ? REQ_FUA : 0) | |
1627 | (dpf & DP_DISCARD ? REQ_DISCARD : 0); | 1620 | (dpf & DP_DISCARD ? REQ_DISCARD : 0); |
1628 | else | 1621 | else |
1629 | return dpf & DP_RW_SYNC ? (REQ_SYNC | REQ_UNPLUG) : 0; | 1622 | return dpf & DP_RW_SYNC ? REQ_SYNC : 0; |
1630 | } | 1623 | } |
1631 | 1624 | ||
1632 | /* mirrored write */ | 1625 | /* mirrored write */ |
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 54bfc274b39a..ca203cb23f3c 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -347,7 +347,7 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait) | |||
347 | atomic_inc(&bitmap->pending_writes); | 347 | atomic_inc(&bitmap->pending_writes); |
348 | set_buffer_locked(bh); | 348 | set_buffer_locked(bh); |
349 | set_buffer_mapped(bh); | 349 | set_buffer_mapped(bh); |
350 | submit_bh(WRITE | REQ_UNPLUG | REQ_SYNC, bh); | 350 | submit_bh(WRITE | REQ_SYNC, bh); |
351 | bh = bh->b_this_page; | 351 | bh = bh->b_this_page; |
352 | } | 352 | } |
353 | 353 | ||
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 136d4f71a116..76a5af00a26b 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c | |||
@@ -352,7 +352,7 @@ static void dispatch_io(int rw, unsigned int num_regions, | |||
352 | BUG_ON(num_regions > DM_IO_MAX_REGIONS); | 352 | BUG_ON(num_regions > DM_IO_MAX_REGIONS); |
353 | 353 | ||
354 | if (sync) | 354 | if (sync) |
355 | rw |= REQ_SYNC | REQ_UNPLUG; | 355 | rw |= REQ_SYNC; |
356 | 356 | ||
357 | /* | 357 | /* |
358 | * For multiple regions we need to be careful to rewind | 358 | * For multiple regions we need to be careful to rewind |
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 400cf35094a4..1bb73a13ca40 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c | |||
@@ -356,11 +356,8 @@ static int run_io_job(struct kcopyd_job *job) | |||
356 | 356 | ||
357 | if (job->rw == READ) | 357 | if (job->rw == READ) |
358 | r = dm_io(&io_req, 1, &job->source, NULL); | 358 | r = dm_io(&io_req, 1, &job->source, NULL); |
359 | else { | 359 | else |
360 | if (job->num_dests > 1) | ||
361 | io_req.bi_rw |= REQ_UNPLUG; | ||
362 | r = dm_io(&io_req, job->num_dests, job->dests, NULL); | 360 | r = dm_io(&io_req, job->num_dests, job->dests, NULL); |
363 | } | ||
364 | 361 | ||
365 | return r; | 362 | return r; |
366 | } | 363 | } |
diff --git a/drivers/md/md.c b/drivers/md/md.c index ca0d79c264b9..28f9c1ee4e3a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -777,8 +777,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, | |||
777 | bio->bi_end_io = super_written; | 777 | bio->bi_end_io = super_written; |
778 | 778 | ||
779 | atomic_inc(&mddev->pending_writes); | 779 | atomic_inc(&mddev->pending_writes); |
780 | submit_bio(REQ_WRITE | REQ_SYNC | REQ_UNPLUG | REQ_FLUSH | REQ_FUA, | 780 | submit_bio(REQ_WRITE | REQ_SYNC | REQ_FLUSH | REQ_FUA, bio); |
781 | bio); | ||
782 | } | 781 | } |
783 | 782 | ||
784 | void md_super_wait(mddev_t *mddev) | 783 | void md_super_wait(mddev_t *mddev) |
@@ -806,7 +805,7 @@ int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size, | |||
806 | struct completion event; | 805 | struct completion event; |
807 | int ret; | 806 | int ret; |
808 | 807 | ||
809 | rw |= REQ_SYNC | REQ_UNPLUG; | 808 | rw |= REQ_SYNC; |
810 | 809 | ||
811 | bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? | 810 | bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? |
812 | rdev->meta_bdev : rdev->bdev; | 811 | rdev->meta_bdev : rdev->bdev; |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 92ac5192c518..b76f7cd47401 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -2182,7 +2182,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, | |||
2182 | unsigned long nr_written = 0; | 2182 | unsigned long nr_written = 0; |
2183 | 2183 | ||
2184 | if (wbc->sync_mode == WB_SYNC_ALL) | 2184 | if (wbc->sync_mode == WB_SYNC_ALL) |
2185 | write_flags = WRITE_SYNC_PLUG; | 2185 | write_flags = WRITE_SYNC; |
2186 | else | 2186 | else |
2187 | write_flags = WRITE; | 2187 | write_flags = WRITE; |
2188 | 2188 | ||
diff --git a/fs/buffer.c b/fs/buffer.c index f903f2e5b4fe..42534f67d71b 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -767,7 +767,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) | |||
767 | * still in flight on potentially older | 767 | * still in flight on potentially older |
768 | * contents. | 768 | * contents. |
769 | */ | 769 | */ |
770 | write_dirty_buffer(bh, WRITE_SYNC_PLUG); | 770 | write_dirty_buffer(bh, WRITE_SYNC); |
771 | 771 | ||
772 | /* | 772 | /* |
773 | * Kick off IO for the previous mapping. Note | 773 | * Kick off IO for the previous mapping. Note |
@@ -1602,14 +1602,8 @@ EXPORT_SYMBOL(unmap_underlying_metadata); | |||
1602 | * prevents this contention from occurring. | 1602 | * prevents this contention from occurring. |
1603 | * | 1603 | * |
1604 | * If block_write_full_page() is called with wbc->sync_mode == | 1604 | * If block_write_full_page() is called with wbc->sync_mode == |
1605 | * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this | 1605 | * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this |
1606 | * causes the writes to be flagged as synchronous writes, but the | 1606 | * causes the writes to be flagged as synchronous writes. |
1607 | * block device queue will NOT be unplugged, since usually many pages | ||
1608 | * will be pushed to the out before the higher-level caller actually | ||
1609 | * waits for the writes to be completed. The various wait functions, | ||
1610 | * such as wait_on_writeback_range() will ultimately call sync_page() | ||
1611 | * which will ultimately call blk_run_backing_dev(), which will end up | ||
1612 | * unplugging the device queue. | ||
1613 | */ | 1607 | */ |
1614 | static int __block_write_full_page(struct inode *inode, struct page *page, | 1608 | static int __block_write_full_page(struct inode *inode, struct page *page, |
1615 | get_block_t *get_block, struct writeback_control *wbc, | 1609 | get_block_t *get_block, struct writeback_control *wbc, |
@@ -1622,7 +1616,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, | |||
1622 | const unsigned blocksize = 1 << inode->i_blkbits; | 1616 | const unsigned blocksize = 1 << inode->i_blkbits; |
1623 | int nr_underway = 0; | 1617 | int nr_underway = 0; |
1624 | int write_op = (wbc->sync_mode == WB_SYNC_ALL ? | 1618 | int write_op = (wbc->sync_mode == WB_SYNC_ALL ? |
1625 | WRITE_SYNC_PLUG : WRITE); | 1619 | WRITE_SYNC : WRITE); |
1626 | 1620 | ||
1627 | BUG_ON(!PageLocked(page)); | 1621 | BUG_ON(!PageLocked(page)); |
1628 | 1622 | ||
diff --git a/fs/direct-io.c b/fs/direct-io.c index df709b3b860a..426083136099 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
@@ -1173,7 +1173,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | |||
1173 | struct dio *dio; | 1173 | struct dio *dio; |
1174 | 1174 | ||
1175 | if (rw & WRITE) | 1175 | if (rw & WRITE) |
1176 | rw = WRITE_ODIRECT_PLUG; | 1176 | rw = WRITE_ODIRECT; |
1177 | 1177 | ||
1178 | if (bdev) | 1178 | if (bdev) |
1179 | bdev_blkbits = blksize_bits(bdev_logical_block_size(bdev)); | 1179 | bdev_blkbits = blksize_bits(bdev_logical_block_size(bdev)); |
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 955cc309142f..e2cd90e4bb7c 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c | |||
@@ -310,8 +310,7 @@ static int io_submit_init(struct ext4_io_submit *io, | |||
310 | io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh); | 310 | io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh); |
311 | 311 | ||
312 | io->io_bio = bio; | 312 | io->io_bio = bio; |
313 | io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? | 313 | io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); |
314 | WRITE_SYNC_PLUG : WRITE); | ||
315 | io->io_next_block = bh->b_blocknr; | 314 | io->io_next_block = bh->b_blocknr; |
316 | return 0; | 315 | return 0; |
317 | } | 316 | } |
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index eb01f3575e10..7f1c11202342 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c | |||
@@ -121,7 +121,7 @@ __acquires(&sdp->sd_log_lock) | |||
121 | lock_buffer(bh); | 121 | lock_buffer(bh); |
122 | if (test_clear_buffer_dirty(bh)) { | 122 | if (test_clear_buffer_dirty(bh)) { |
123 | bh->b_end_io = end_buffer_write_sync; | 123 | bh->b_end_io = end_buffer_write_sync; |
124 | submit_bh(WRITE_SYNC_PLUG, bh); | 124 | submit_bh(WRITE_SYNC, bh); |
125 | } else { | 125 | } else { |
126 | unlock_buffer(bh); | 126 | unlock_buffer(bh); |
127 | brelse(bh); | 127 | brelse(bh); |
@@ -647,7 +647,7 @@ static void gfs2_ordered_write(struct gfs2_sbd *sdp) | |||
647 | lock_buffer(bh); | 647 | lock_buffer(bh); |
648 | if (buffer_mapped(bh) && test_clear_buffer_dirty(bh)) { | 648 | if (buffer_mapped(bh) && test_clear_buffer_dirty(bh)) { |
649 | bh->b_end_io = end_buffer_write_sync; | 649 | bh->b_end_io = end_buffer_write_sync; |
650 | submit_bh(WRITE_SYNC_PLUG, bh); | 650 | submit_bh(WRITE_SYNC, bh); |
651 | } else { | 651 | } else { |
652 | unlock_buffer(bh); | 652 | unlock_buffer(bh); |
653 | brelse(bh); | 653 | brelse(bh); |
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index bf33f822058d..48b545a1979a 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c | |||
@@ -200,7 +200,7 @@ static void buf_lo_before_commit(struct gfs2_sbd *sdp) | |||
200 | } | 200 | } |
201 | 201 | ||
202 | gfs2_log_unlock(sdp); | 202 | gfs2_log_unlock(sdp); |
203 | submit_bh(WRITE_SYNC_PLUG, bh); | 203 | submit_bh(WRITE_SYNC, bh); |
204 | gfs2_log_lock(sdp); | 204 | gfs2_log_lock(sdp); |
205 | 205 | ||
206 | n = 0; | 206 | n = 0; |
@@ -210,7 +210,7 @@ static void buf_lo_before_commit(struct gfs2_sbd *sdp) | |||
210 | gfs2_log_unlock(sdp); | 210 | gfs2_log_unlock(sdp); |
211 | lock_buffer(bd2->bd_bh); | 211 | lock_buffer(bd2->bd_bh); |
212 | bh = gfs2_log_fake_buf(sdp, bd2->bd_bh); | 212 | bh = gfs2_log_fake_buf(sdp, bd2->bd_bh); |
213 | submit_bh(WRITE_SYNC_PLUG, bh); | 213 | submit_bh(WRITE_SYNC, bh); |
214 | gfs2_log_lock(sdp); | 214 | gfs2_log_lock(sdp); |
215 | if (++n >= num) | 215 | if (++n >= num) |
216 | break; | 216 | break; |
@@ -352,7 +352,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp) | |||
352 | sdp->sd_log_num_revoke--; | 352 | sdp->sd_log_num_revoke--; |
353 | 353 | ||
354 | if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) { | 354 | if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) { |
355 | submit_bh(WRITE_SYNC_PLUG, bh); | 355 | submit_bh(WRITE_SYNC, bh); |
356 | 356 | ||
357 | bh = gfs2_log_get_buf(sdp); | 357 | bh = gfs2_log_get_buf(sdp); |
358 | mh = (struct gfs2_meta_header *)bh->b_data; | 358 | mh = (struct gfs2_meta_header *)bh->b_data; |
@@ -369,7 +369,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp) | |||
369 | } | 369 | } |
370 | gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); | 370 | gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); |
371 | 371 | ||
372 | submit_bh(WRITE_SYNC_PLUG, bh); | 372 | submit_bh(WRITE_SYNC, bh); |
373 | } | 373 | } |
374 | 374 | ||
375 | static void revoke_lo_before_scan(struct gfs2_jdesc *jd, | 375 | static void revoke_lo_before_scan(struct gfs2_jdesc *jd, |
@@ -571,7 +571,7 @@ static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh, | |||
571 | ptr = bh_log_ptr(bh); | 571 | ptr = bh_log_ptr(bh); |
572 | 572 | ||
573 | get_bh(bh); | 573 | get_bh(bh); |
574 | submit_bh(WRITE_SYNC_PLUG, bh); | 574 | submit_bh(WRITE_SYNC, bh); |
575 | gfs2_log_lock(sdp); | 575 | gfs2_log_lock(sdp); |
576 | while(!list_empty(list)) { | 576 | while(!list_empty(list)) { |
577 | bd = list_entry(list->next, struct gfs2_bufdata, bd_le.le_list); | 577 | bd = list_entry(list->next, struct gfs2_bufdata, bd_le.le_list); |
@@ -597,7 +597,7 @@ static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh, | |||
597 | } else { | 597 | } else { |
598 | bh1 = gfs2_log_fake_buf(sdp, bd->bd_bh); | 598 | bh1 = gfs2_log_fake_buf(sdp, bd->bd_bh); |
599 | } | 599 | } |
600 | submit_bh(WRITE_SYNC_PLUG, bh1); | 600 | submit_bh(WRITE_SYNC, bh1); |
601 | gfs2_log_lock(sdp); | 601 | gfs2_log_lock(sdp); |
602 | ptr += 2; | 602 | ptr += 2; |
603 | } | 603 | } |
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index a566331db4e1..867b713cba92 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c | |||
@@ -37,7 +37,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb | |||
37 | struct buffer_head *bh, *head; | 37 | struct buffer_head *bh, *head; |
38 | int nr_underway = 0; | 38 | int nr_underway = 0; |
39 | int write_op = REQ_META | | 39 | int write_op = REQ_META | |
40 | (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC_PLUG : WRITE); | 40 | (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); |
41 | 41 | ||
42 | BUG_ON(!PageLocked(page)); | 42 | BUG_ON(!PageLocked(page)); |
43 | BUG_ON(!page_has_buffers(page)); | 43 | BUG_ON(!page_has_buffers(page)); |
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c index 34a4861c14b8..66be299acb1b 100644 --- a/fs/jbd/commit.c +++ b/fs/jbd/commit.c | |||
@@ -333,7 +333,7 @@ void journal_commit_transaction(journal_t *journal) | |||
333 | * instead we rely on sync_buffer() doing the unplug for us. | 333 | * instead we rely on sync_buffer() doing the unplug for us. |
334 | */ | 334 | */ |
335 | if (commit_transaction->t_synchronous_commit) | 335 | if (commit_transaction->t_synchronous_commit) |
336 | write_op = WRITE_SYNC_PLUG; | 336 | write_op = WRITE_SYNC; |
337 | spin_lock(&commit_transaction->t_handle_lock); | 337 | spin_lock(&commit_transaction->t_handle_lock); |
338 | while (commit_transaction->t_updates) { | 338 | while (commit_transaction->t_updates) { |
339 | DEFINE_WAIT(wait); | 339 | DEFINE_WAIT(wait); |
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index f3ad1598b201..3da1cc4346d5 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c | |||
@@ -137,9 +137,9 @@ static int journal_submit_commit_record(journal_t *journal, | |||
137 | if (journal->j_flags & JBD2_BARRIER && | 137 | if (journal->j_flags & JBD2_BARRIER && |
138 | !JBD2_HAS_INCOMPAT_FEATURE(journal, | 138 | !JBD2_HAS_INCOMPAT_FEATURE(journal, |
139 | JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) | 139 | JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) |
140 | ret = submit_bh(WRITE_SYNC_PLUG | WRITE_FLUSH_FUA, bh); | 140 | ret = submit_bh(WRITE_SYNC | WRITE_FLUSH_FUA, bh); |
141 | else | 141 | else |
142 | ret = submit_bh(WRITE_SYNC_PLUG, bh); | 142 | ret = submit_bh(WRITE_SYNC, bh); |
143 | 143 | ||
144 | *cbh = bh; | 144 | *cbh = bh; |
145 | return ret; | 145 | return ret; |
@@ -369,7 +369,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) | |||
369 | * instead we rely on sync_buffer() doing the unplug for us. | 369 | * instead we rely on sync_buffer() doing the unplug for us. |
370 | */ | 370 | */ |
371 | if (commit_transaction->t_synchronous_commit) | 371 | if (commit_transaction->t_synchronous_commit) |
372 | write_op = WRITE_SYNC_PLUG; | 372 | write_op = WRITE_SYNC; |
373 | trace_jbd2_commit_locking(journal, commit_transaction); | 373 | trace_jbd2_commit_locking(journal, commit_transaction); |
374 | stats.run.rs_wait = commit_transaction->t_max_wait; | 374 | stats.run.rs_wait = commit_transaction->t_max_wait; |
375 | stats.run.rs_locked = jiffies; | 375 | stats.run.rs_locked = jiffies; |
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c index 0f83e93935b2..2853ff20f85a 100644 --- a/fs/nilfs2/segbuf.c +++ b/fs/nilfs2/segbuf.c | |||
@@ -509,7 +509,7 @@ static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf, | |||
509 | * Last BIO is always sent through the following | 509 | * Last BIO is always sent through the following |
510 | * submission. | 510 | * submission. |
511 | */ | 511 | */ |
512 | rw |= REQ_SYNC | REQ_UNPLUG; | 512 | rw |= REQ_SYNC; |
513 | res = nilfs_segbuf_submit_bio(segbuf, &wi, rw); | 513 | res = nilfs_segbuf_submit_bio(segbuf, &wi, rw); |
514 | } | 514 | } |
515 | 515 | ||
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 83c1c20d145a..6bbb0ee33253 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c | |||
@@ -413,8 +413,7 @@ xfs_submit_ioend_bio( | |||
413 | if (xfs_ioend_new_eof(ioend)) | 413 | if (xfs_ioend_new_eof(ioend)) |
414 | xfs_mark_inode_dirty(XFS_I(ioend->io_inode)); | 414 | xfs_mark_inode_dirty(XFS_I(ioend->io_inode)); |
415 | 415 | ||
416 | submit_bio(wbc->sync_mode == WB_SYNC_ALL ? | 416 | submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio); |
417 | WRITE_SYNC_PLUG : WRITE, bio); | ||
418 | } | 417 | } |
419 | 418 | ||
420 | STATIC struct bio * | 419 | STATIC struct bio * |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 16b286473042..be50d9e70a7d 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
@@ -128,7 +128,6 @@ enum rq_flag_bits { | |||
128 | __REQ_NOIDLE, /* don't anticipate more IO after this one */ | 128 | __REQ_NOIDLE, /* don't anticipate more IO after this one */ |
129 | 129 | ||
130 | /* bio only flags */ | 130 | /* bio only flags */ |
131 | __REQ_UNPLUG, /* unplug the immediately after submission */ | ||
132 | __REQ_RAHEAD, /* read ahead, can fail anytime */ | 131 | __REQ_RAHEAD, /* read ahead, can fail anytime */ |
133 | __REQ_THROTTLED, /* This bio has already been subjected to | 132 | __REQ_THROTTLED, /* This bio has already been subjected to |
134 | * throttling rules. Don't do it again. */ | 133 | * throttling rules. Don't do it again. */ |
@@ -172,7 +171,6 @@ enum rq_flag_bits { | |||
172 | REQ_NOIDLE | REQ_FLUSH | REQ_FUA) | 171 | REQ_NOIDLE | REQ_FLUSH | REQ_FUA) |
173 | #define REQ_CLONE_MASK REQ_COMMON_MASK | 172 | #define REQ_CLONE_MASK REQ_COMMON_MASK |
174 | 173 | ||
175 | #define REQ_UNPLUG (1 << __REQ_UNPLUG) | ||
176 | #define REQ_RAHEAD (1 << __REQ_RAHEAD) | 174 | #define REQ_RAHEAD (1 << __REQ_RAHEAD) |
177 | #define REQ_THROTTLED (1 << __REQ_THROTTLED) | 175 | #define REQ_THROTTLED (1 << __REQ_THROTTLED) |
178 | 176 | ||
diff --git a/include/linux/fs.h b/include/linux/fs.h index 9f2cf69911b8..543e226ea6a3 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -135,16 +135,10 @@ struct inodes_stat_t { | |||
135 | * block layer could (in theory) choose to ignore this | 135 | * block layer could (in theory) choose to ignore this |
136 | * request if it runs into resource problems. | 136 | * request if it runs into resource problems. |
137 | * WRITE A normal async write. Device will be plugged. | 137 | * WRITE A normal async write. Device will be plugged. |
138 | * WRITE_SYNC_PLUG Synchronous write. Identical to WRITE, but passes down | 138 | * WRITE_SYNC Synchronous write. Identical to WRITE, but passes down |
139 | * the hint that someone will be waiting on this IO | 139 | * the hint that someone will be waiting on this IO |
140 | * shortly. The device must still be unplugged explicitly, | 140 | * shortly. The write equivalent of READ_SYNC. |
141 | * WRITE_SYNC_PLUG does not do this as we could be | 141 | * WRITE_ODIRECT Special case write for O_DIRECT only. |
142 | * submitting more writes before we actually wait on any | ||
143 | * of them. | ||
144 | * WRITE_SYNC Like WRITE_SYNC_PLUG, but also unplugs the device | ||
145 | * immediately after submission. The write equivalent | ||
146 | * of READ_SYNC. | ||
147 | * WRITE_ODIRECT_PLUG Special case write for O_DIRECT only. | ||
148 | * WRITE_FLUSH Like WRITE_SYNC but with preceding cache flush. | 142 | * WRITE_FLUSH Like WRITE_SYNC but with preceding cache flush. |
149 | * WRITE_FUA Like WRITE_SYNC but data is guaranteed to be on | 143 | * WRITE_FUA Like WRITE_SYNC but data is guaranteed to be on |
150 | * non-volatile media on completion. | 144 | * non-volatile media on completion. |
@@ -160,18 +154,14 @@ struct inodes_stat_t { | |||
160 | #define WRITE RW_MASK | 154 | #define WRITE RW_MASK |
161 | #define READA RWA_MASK | 155 | #define READA RWA_MASK |
162 | 156 | ||
163 | #define READ_SYNC (READ | REQ_SYNC | REQ_UNPLUG) | 157 | #define READ_SYNC (READ | REQ_SYNC) |
164 | #define READ_META (READ | REQ_META) | 158 | #define READ_META (READ | REQ_META) |
165 | #define WRITE_SYNC_PLUG (WRITE | REQ_SYNC | REQ_NOIDLE) | 159 | #define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE) |
166 | #define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG) | 160 | #define WRITE_ODIRECT (WRITE | REQ_SYNC) |
167 | #define WRITE_ODIRECT_PLUG (WRITE | REQ_SYNC) | ||
168 | #define WRITE_META (WRITE | REQ_META) | 161 | #define WRITE_META (WRITE | REQ_META) |
169 | #define WRITE_FLUSH (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \ | 162 | #define WRITE_FLUSH (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH) |
170 | REQ_FLUSH) | 163 | #define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA) |
171 | #define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \ | 164 | #define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA) |
172 | REQ_FUA) | ||
173 | #define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \ | ||
174 | REQ_FLUSH | REQ_FUA) | ||
175 | 165 | ||
176 | #define SEL_IN 1 | 166 | #define SEL_IN 1 |
177 | #define SEL_OUT 2 | 167 | #define SEL_OUT 2 |
diff --git a/kernel/power/block_io.c b/kernel/power/block_io.c index 83bbc7c02df9..d09dd10c5a5e 100644 --- a/kernel/power/block_io.c +++ b/kernel/power/block_io.c | |||
@@ -28,7 +28,7 @@ | |||
28 | static int submit(int rw, struct block_device *bdev, sector_t sector, | 28 | static int submit(int rw, struct block_device *bdev, sector_t sector, |
29 | struct page *page, struct bio **bio_chain) | 29 | struct page *page, struct bio **bio_chain) |
30 | { | 30 | { |
31 | const int bio_rw = rw | REQ_SYNC | REQ_UNPLUG; | 31 | const int bio_rw = rw | REQ_SYNC; |
32 | struct bio *bio; | 32 | struct bio *bio; |
33 | 33 | ||
34 | bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); | 34 | bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); |
diff --git a/mm/page_io.c b/mm/page_io.c index 2dee975bf469..dc76b4d0611e 100644 --- a/mm/page_io.c +++ b/mm/page_io.c | |||
@@ -106,7 +106,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc) | |||
106 | goto out; | 106 | goto out; |
107 | } | 107 | } |
108 | if (wbc->sync_mode == WB_SYNC_ALL) | 108 | if (wbc->sync_mode == WB_SYNC_ALL) |
109 | rw |= REQ_SYNC | REQ_UNPLUG; | 109 | rw |= REQ_SYNC; |
110 | count_vm_event(PSWPOUT); | 110 | count_vm_event(PSWPOUT); |
111 | set_page_writeback(page); | 111 | set_page_writeback(page); |
112 | unlock_page(page); | 112 | unlock_page(page); |