aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/libata-scsi.c2
-rw-r--r--drivers/block/aoe/aoeblk.c2
-rw-r--r--drivers/block/brd.c2
-rw-r--r--drivers/block/drbd/drbd_actlog.c8
-rw-r--r--drivers/block/drbd/drbd_main.c6
-rw-r--r--drivers/block/drbd/drbd_receiver.c22
-rw-r--r--drivers/block/drbd/drbd_req.c2
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/block/umem.c2
-rw-r--r--drivers/ide/ide-cd_ioctl.c2
-rw-r--r--drivers/ide/ide-floppy.c2
-rw-r--r--drivers/md/dm-io.c12
-rw-r--r--drivers/md/dm-kcopyd.c2
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/dm-stripe.c2
-rw-r--r--drivers/md/dm.c14
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/md.c10
-rw-r--r--drivers/md/md.h4
-rw-r--r--drivers/md/multipath.c8
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c22
-rw-r--r--drivers/md/raid10.c12
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/scsi/osd/osd_initiator.c8
26 files changed, 77 insertions, 79 deletions
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index a5c08b082edb..0a8cd3484791 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1114,7 +1114,7 @@ static int atapi_drain_needed(struct request *rq)
1114 if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC)) 1114 if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC))
1115 return 0; 1115 return 0;
1116 1116
1117 if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_RW)) 1117 if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_WRITE))
1118 return 0; 1118 return 0;
1119 1119
1120 return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC; 1120 return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 035cefe4045a..65deffde60ac 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -173,7 +173,7 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
173 BUG(); 173 BUG();
174 bio_endio(bio, -ENXIO); 174 bio_endio(bio, -ENXIO);
175 return 0; 175 return 0;
176 } else if (bio_rw_flagged(bio, BIO_RW_BARRIER)) { 176 } else if (bio->bi_rw & REQ_HARDBARRIER) {
177 bio_endio(bio, -EOPNOTSUPP); 177 bio_endio(bio, -EOPNOTSUPP);
178 return 0; 178 return 0;
179 } else if (bio->bi_io_vec == NULL) { 179 } else if (bio->bi_io_vec == NULL) {
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index f1bf79d9bc0a..1b218c6b6820 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -340,7 +340,7 @@ static int brd_make_request(struct request_queue *q, struct bio *bio)
340 get_capacity(bdev->bd_disk)) 340 get_capacity(bdev->bd_disk))
341 goto out; 341 goto out;
342 342
343 if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) { 343 if (unlikely(bio->bi_rw & REQ_DISCARD)) {
344 err = 0; 344 err = 0;
345 discard_from_brd(brd, sector, bio->bi_size); 345 discard_from_brd(brd, sector, bio->bi_size);
346 goto out; 346 goto out;
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index df018990c422..9400845d602e 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -79,8 +79,8 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
79 md_io.error = 0; 79 md_io.error = 0;
80 80
81 if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags)) 81 if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags))
82 rw |= (1 << BIO_RW_BARRIER); 82 rw |= REQ_HARDBARRIER;
83 rw |= ((1<<BIO_RW_UNPLUG) | (1<<BIO_RW_SYNCIO)); 83 rw |= REQ_UNPLUG | REQ_SYNC;
84 84
85 retry: 85 retry:
86 bio = bio_alloc(GFP_NOIO, 1); 86 bio = bio_alloc(GFP_NOIO, 1);
@@ -103,11 +103,11 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
103 /* check for unsupported barrier op. 103 /* check for unsupported barrier op.
104 * would rather check on EOPNOTSUPP, but that is not reliable. 104 * would rather check on EOPNOTSUPP, but that is not reliable.
105 * don't try again for ANY return value != 0 */ 105 * don't try again for ANY return value != 0 */
106 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER) && !ok)) { 106 if (unlikely((bio->bi_rw & REQ_HARDBARRIER) && !ok)) {
107 /* Try again with no barrier */ 107 /* Try again with no barrier */
108 dev_warn(DEV, "Barriers not supported on meta data device - disabling\n"); 108 dev_warn(DEV, "Barriers not supported on meta data device - disabling\n");
109 set_bit(MD_NO_BARRIER, &mdev->flags); 109 set_bit(MD_NO_BARRIER, &mdev->flags);
110 rw &= ~(1 << BIO_RW_BARRIER); 110 rw &= ~REQ_HARDBARRIER;
111 bio_put(bio); 111 bio_put(bio);
112 goto retry; 112 goto retry;
113 } 113 }
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 7258c95e895e..e2ab13d99d69 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2425,15 +2425,15 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
2425 /* NOTE: no need to check if barriers supported here as we would 2425 /* NOTE: no need to check if barriers supported here as we would
2426 * not pass the test in make_request_common in that case 2426 * not pass the test in make_request_common in that case
2427 */ 2427 */
2428 if (bio_rw_flagged(req->master_bio, BIO_RW_BARRIER)) { 2428 if (req->master_bio->bi_rw & REQ_HARDBARRIER) {
2429 dev_err(DEV, "ASSERT FAILED would have set DP_HARDBARRIER\n"); 2429 dev_err(DEV, "ASSERT FAILED would have set DP_HARDBARRIER\n");
2430 /* dp_flags |= DP_HARDBARRIER; */ 2430 /* dp_flags |= DP_HARDBARRIER; */
2431 } 2431 }
2432 if (bio_rw_flagged(req->master_bio, BIO_RW_SYNCIO)) 2432 if (req->master_bio->bi_rw & REQ_SYNC)
2433 dp_flags |= DP_RW_SYNC; 2433 dp_flags |= DP_RW_SYNC;
2434 /* for now handle SYNCIO and UNPLUG 2434 /* for now handle SYNCIO and UNPLUG
2435 * as if they still were one and the same flag */ 2435 * as if they still were one and the same flag */
2436 if (bio_rw_flagged(req->master_bio, BIO_RW_UNPLUG)) 2436 if (req->master_bio->bi_rw & REQ_UNPLUG)
2437 dp_flags |= DP_RW_SYNC; 2437 dp_flags |= DP_RW_SYNC;
2438 if (mdev->state.conn >= C_SYNC_SOURCE && 2438 if (mdev->state.conn >= C_SYNC_SOURCE &&
2439 mdev->state.conn <= C_PAUSED_SYNC_T) 2439 mdev->state.conn <= C_PAUSED_SYNC_T)
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index dff48701b84d..cba1deb7b271 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1180,7 +1180,7 @@ next_bio:
1180 bio->bi_sector = sector; 1180 bio->bi_sector = sector;
1181 bio->bi_bdev = mdev->ldev->backing_bdev; 1181 bio->bi_bdev = mdev->ldev->backing_bdev;
1182 /* we special case some flags in the multi-bio case, see below 1182 /* we special case some flags in the multi-bio case, see below
1183 * (BIO_RW_UNPLUG, BIO_RW_BARRIER) */ 1183 * (REQ_UNPLUG, REQ_HARDBARRIER) */
1184 bio->bi_rw = rw; 1184 bio->bi_rw = rw;
1185 bio->bi_private = e; 1185 bio->bi_private = e;
1186 bio->bi_end_io = drbd_endio_sec; 1186 bio->bi_end_io = drbd_endio_sec;
@@ -1209,16 +1209,16 @@ next_bio:
1209 bios = bios->bi_next; 1209 bios = bios->bi_next;
1210 bio->bi_next = NULL; 1210 bio->bi_next = NULL;
1211 1211
1212 /* strip off BIO_RW_UNPLUG unless it is the last bio */ 1212 /* strip off REQ_UNPLUG unless it is the last bio */
1213 if (bios) 1213 if (bios)
1214 bio->bi_rw &= ~(1<<BIO_RW_UNPLUG); 1214 bio->bi_rw &= ~REQ_UNPLUG;
1215 1215
1216 drbd_generic_make_request(mdev, fault_type, bio); 1216 drbd_generic_make_request(mdev, fault_type, bio);
1217 1217
1218 /* strip off BIO_RW_BARRIER, 1218 /* strip off REQ_HARDBARRIER,
1219 * unless it is the first or last bio */ 1219 * unless it is the first or last bio */
1220 if (bios && bios->bi_next) 1220 if (bios && bios->bi_next)
1221 bios->bi_rw &= ~(1<<BIO_RW_BARRIER); 1221 bios->bi_rw &= ~REQ_HARDBARRIER;
1222 } while (bios); 1222 } while (bios);
1223 maybe_kick_lo(mdev); 1223 maybe_kick_lo(mdev);
1224 return 0; 1224 return 0;
@@ -1233,7 +1233,7 @@ fail:
1233} 1233}
1234 1234
1235/** 1235/**
1236 * w_e_reissue() - Worker callback; Resubmit a bio, without BIO_RW_BARRIER set 1236 * w_e_reissue() - Worker callback; Resubmit a bio, without REQ_HARDBARRIER set
1237 * @mdev: DRBD device. 1237 * @mdev: DRBD device.
1238 * @w: work object. 1238 * @w: work object.
1239 * @cancel: The connection will be closed anyways (unused in this callback) 1239 * @cancel: The connection will be closed anyways (unused in this callback)
@@ -1245,7 +1245,7 @@ int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __relea
1245 (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch) 1245 (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
1246 so that we can finish that epoch in drbd_may_finish_epoch(). 1246 so that we can finish that epoch in drbd_may_finish_epoch().
1247 That is necessary if we already have a long chain of Epochs, before 1247 That is necessary if we already have a long chain of Epochs, before
1248 we realize that BIO_RW_BARRIER is actually not supported */ 1248 we realize that REQ_HARDBARRIER is actually not supported */
1249 1249
1250 /* As long as the -ENOTSUPP on the barrier is reported immediately 1250 /* As long as the -ENOTSUPP on the barrier is reported immediately
1251 that will never trigger. If it is reported late, we will just 1251 that will never trigger. If it is reported late, we will just
@@ -1824,14 +1824,14 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
1824 epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list); 1824 epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list);
1825 if (epoch == e->epoch) { 1825 if (epoch == e->epoch) {
1826 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags); 1826 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
1827 rw |= (1<<BIO_RW_BARRIER); 1827 rw |= REQ_HARDBARRIER;
1828 e->flags |= EE_IS_BARRIER; 1828 e->flags |= EE_IS_BARRIER;
1829 } else { 1829 } else {
1830 if (atomic_read(&epoch->epoch_size) > 1 || 1830 if (atomic_read(&epoch->epoch_size) > 1 ||
1831 !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) { 1831 !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
1832 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags); 1832 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
1833 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags); 1833 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
1834 rw |= (1<<BIO_RW_BARRIER); 1834 rw |= REQ_HARDBARRIER;
1835 e->flags |= EE_IS_BARRIER; 1835 e->flags |= EE_IS_BARRIER;
1836 } 1836 }
1837 } 1837 }
@@ -1841,10 +1841,10 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
1841 dp_flags = be32_to_cpu(p->dp_flags); 1841 dp_flags = be32_to_cpu(p->dp_flags);
1842 if (dp_flags & DP_HARDBARRIER) { 1842 if (dp_flags & DP_HARDBARRIER) {
1843 dev_err(DEV, "ASSERT FAILED would have submitted barrier request\n"); 1843 dev_err(DEV, "ASSERT FAILED would have submitted barrier request\n");
1844 /* rw |= (1<<BIO_RW_BARRIER); */ 1844 /* rw |= REQ_HARDBARRIER; */
1845 } 1845 }
1846 if (dp_flags & DP_RW_SYNC) 1846 if (dp_flags & DP_RW_SYNC)
1847 rw |= (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG); 1847 rw |= REQ_SYNC | REQ_UNPLUG;
1848 if (dp_flags & DP_MAY_SET_IN_SYNC) 1848 if (dp_flags & DP_MAY_SET_IN_SYNC)
1849 e->flags |= EE_MAY_SET_IN_SYNC; 1849 e->flags |= EE_MAY_SET_IN_SYNC;
1850 1850
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 654f1ef5cbb0..f761d98a4e90 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -997,7 +997,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
997 * because of those XXX, this is not yet enabled, 997 * because of those XXX, this is not yet enabled,
998 * i.e. in drbd_init_set_defaults we set the NO_BARRIER_SUPP bit. 998 * i.e. in drbd_init_set_defaults we set the NO_BARRIER_SUPP bit.
999 */ 999 */
1000 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags))) { 1000 if (unlikely(bio->bi_rw & REQ_HARDBARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags)) {
1001 /* dev_warn(DEV, "Rejecting barrier request as underlying device does not support\n"); */ 1001 /* dev_warn(DEV, "Rejecting barrier request as underlying device does not support\n"); */
1002 bio_endio(bio, -EOPNOTSUPP); 1002 bio_endio(bio, -EOPNOTSUPP);
1003 return 0; 1003 return 0;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 6120922f459f..fedfdb7d3cdf 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -476,7 +476,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
476 pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; 476 pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
477 477
478 if (bio_rw(bio) == WRITE) { 478 if (bio_rw(bio) == WRITE) {
479 bool barrier = bio_rw_flagged(bio, BIO_RW_BARRIER); 479 bool barrier = (bio->bi_rw & REQ_HARDBARRIER);
480 struct file *file = lo->lo_backing_file; 480 struct file *file = lo->lo_backing_file;
481 481
482 if (barrier) { 482 if (barrier) {
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 8a549db2aa78..9f3e4454274b 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1221,7 +1221,7 @@ static int pkt_start_recovery(struct packet_data *pkt)
1221 pkt->bio->bi_flags = 1 << BIO_UPTODATE; 1221 pkt->bio->bi_flags = 1 << BIO_UPTODATE;
1222 pkt->bio->bi_idx = 0; 1222 pkt->bio->bi_idx = 0;
1223 1223
1224 BUG_ON(pkt->bio->bi_rw != (1 << BIO_RW)); 1224 BUG_ON(pkt->bio->bi_rw != REQ_WRITE);
1225 BUG_ON(pkt->bio->bi_vcnt != pkt->frames); 1225 BUG_ON(pkt->bio->bi_vcnt != pkt->frames);
1226 BUG_ON(pkt->bio->bi_size != pkt->frames * CD_FRAMESIZE); 1226 BUG_ON(pkt->bio->bi_size != pkt->frames * CD_FRAMESIZE);
1227 BUG_ON(pkt->bio->bi_end_io != pkt_end_io_packet_write); 1227 BUG_ON(pkt->bio->bi_end_io != pkt_end_io_packet_write);
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 2f9470ff8f7c..8be57151f5d6 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -478,7 +478,7 @@ static void process_page(unsigned long data)
478 le32_to_cpu(desc->local_addr)>>9, 478 le32_to_cpu(desc->local_addr)>>9,
479 le32_to_cpu(desc->transfer_size)); 479 le32_to_cpu(desc->transfer_size));
480 dump_dmastat(card, control); 480 dump_dmastat(card, control);
481 } else if (test_bit(BIO_RW, &bio->bi_rw) && 481 } else if ((bio->bi_rw & REQ_WRITE) &&
482 le32_to_cpu(desc->local_addr) >> 9 == 482 le32_to_cpu(desc->local_addr) >> 9 ==
483 card->init_size) { 483 card->init_size) {
484 card->init_size += le32_to_cpu(desc->transfer_size) >> 9; 484 card->init_size += le32_to_cpu(desc->transfer_size) >> 9;
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index 02712bf045c1..766b3deeb23c 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -454,7 +454,7 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi,
454 touch it at all. */ 454 touch it at all. */
455 455
456 if (cgc->data_direction == CGC_DATA_WRITE) 456 if (cgc->data_direction == CGC_DATA_WRITE)
457 flags |= REQ_RW; 457 flags |= REQ_WRITE;
458 458
459 if (cgc->sense) 459 if (cgc->sense)
460 memset(cgc->sense, 0, sizeof(struct request_sense)); 460 memset(cgc->sense, 0, sizeof(struct request_sense));
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index c7d0737bb18a..5406b6ea3ad1 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -207,7 +207,7 @@ static void idefloppy_create_rw_cmd(ide_drive_t *drive,
207 memcpy(rq->cmd, pc->c, 12); 207 memcpy(rq->cmd, pc->c, 12);
208 208
209 pc->rq = rq; 209 pc->rq = rq;
210 if (rq->cmd_flags & REQ_RW) 210 if (rq->cmd_flags & REQ_WRITE)
211 pc->flags |= PC_FLAG_WRITING; 211 pc->flags |= PC_FLAG_WRITING;
212 212
213 pc->flags |= PC_FLAG_DMA_OK; 213 pc->flags |= PC_FLAG_DMA_OK;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 10f457ca6af2..0590c75b0ab6 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -356,7 +356,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
356 BUG_ON(num_regions > DM_IO_MAX_REGIONS); 356 BUG_ON(num_regions > DM_IO_MAX_REGIONS);
357 357
358 if (sync) 358 if (sync)
359 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); 359 rw |= REQ_SYNC | REQ_UNPLUG;
360 360
361 /* 361 /*
362 * For multiple regions we need to be careful to rewind 362 * For multiple regions we need to be careful to rewind
@@ -364,7 +364,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
364 */ 364 */
365 for (i = 0; i < num_regions; i++) { 365 for (i = 0; i < num_regions; i++) {
366 *dp = old_pages; 366 *dp = old_pages;
367 if (where[i].count || (rw & (1 << BIO_RW_BARRIER))) 367 if (where[i].count || (rw & REQ_HARDBARRIER))
368 do_region(rw, i, where + i, dp, io); 368 do_region(rw, i, where + i, dp, io);
369 } 369 }
370 370
@@ -412,8 +412,8 @@ retry:
412 } 412 }
413 set_current_state(TASK_RUNNING); 413 set_current_state(TASK_RUNNING);
414 414
415 if (io->eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) { 415 if (io->eopnotsupp_bits && (rw & REQ_HARDBARRIER)) {
416 rw &= ~(1 << BIO_RW_BARRIER); 416 rw &= ~REQ_HARDBARRIER;
417 goto retry; 417 goto retry;
418 } 418 }
419 419
@@ -479,8 +479,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
479 * New collapsed (a)synchronous interface. 479 * New collapsed (a)synchronous interface.
480 * 480 *
481 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug 481 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
482 * the queue with blk_unplug() some time later or set the BIO_RW_SYNC bit in 482 * the queue with blk_unplug() some time later or set REQ_SYNC in
483 * io_req->bi_rw. If you fail to do one of these, the IO will be submitted to 483io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
484 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c. 484 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
485 */ 485 */
486int dm_io(struct dm_io_request *io_req, unsigned num_regions, 486int dm_io(struct dm_io_request *io_req, unsigned num_regions,
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index addf83475040..d8587bac5682 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -345,7 +345,7 @@ static int run_io_job(struct kcopyd_job *job)
345{ 345{
346 int r; 346 int r;
347 struct dm_io_request io_req = { 347 struct dm_io_request io_req = {
348 .bi_rw = job->rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG), 348 .bi_rw = job->rw | REQ_SYNC | REQ_UNPLUG,
349 .mem.type = DM_IO_PAGE_LIST, 349 .mem.type = DM_IO_PAGE_LIST,
350 .mem.ptr.pl = job->pages, 350 .mem.ptr.pl = job->pages,
351 .mem.offset = job->offset, 351 .mem.offset = job->offset,
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index ddda531723dc..74136262d654 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1211,7 +1211,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1211 if (error == -EOPNOTSUPP) 1211 if (error == -EOPNOTSUPP)
1212 goto out; 1212 goto out;
1213 1213
1214 if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD)) 1214 if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
1215 goto out; 1215 goto out;
1216 1216
1217 if (unlikely(error)) { 1217 if (unlikely(error)) {
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index e610725db766..d6e28d732b4d 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -284,7 +284,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
284 if (!error) 284 if (!error)
285 return 0; /* I/O complete */ 285 return 0; /* I/O complete */
286 286
287 if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD)) 287 if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
288 return error; 288 return error;
289 289
290 if (error == -EOPNOTSUPP) 290 if (error == -EOPNOTSUPP)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 1e0e6dd51501..d6f77baeafd6 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -614,7 +614,7 @@ static void dec_pending(struct dm_io *io, int error)
614 */ 614 */
615 spin_lock_irqsave(&md->deferred_lock, flags); 615 spin_lock_irqsave(&md->deferred_lock, flags);
616 if (__noflush_suspending(md)) { 616 if (__noflush_suspending(md)) {
617 if (!bio_rw_flagged(io->bio, BIO_RW_BARRIER)) 617 if (!(io->bio->bi_rw & REQ_HARDBARRIER))
618 bio_list_add_head(&md->deferred, 618 bio_list_add_head(&md->deferred,
619 io->bio); 619 io->bio);
620 } else 620 } else
@@ -626,7 +626,7 @@ static void dec_pending(struct dm_io *io, int error)
626 io_error = io->error; 626 io_error = io->error;
627 bio = io->bio; 627 bio = io->bio;
628 628
629 if (bio_rw_flagged(bio, BIO_RW_BARRIER)) { 629 if (bio->bi_rw & REQ_HARDBARRIER) {
630 /* 630 /*
631 * There can be just one barrier request so we use 631 * There can be just one barrier request so we use
632 * a per-device variable for error reporting. 632 * a per-device variable for error reporting.
@@ -1106,7 +1106,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
1106 1106
1107 clone->bi_sector = sector; 1107 clone->bi_sector = sector;
1108 clone->bi_bdev = bio->bi_bdev; 1108 clone->bi_bdev = bio->bi_bdev;
1109 clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER); 1109 clone->bi_rw = bio->bi_rw & ~REQ_HARDBARRIER;
1110 clone->bi_vcnt = 1; 1110 clone->bi_vcnt = 1;
1111 clone->bi_size = to_bytes(len); 1111 clone->bi_size = to_bytes(len);
1112 clone->bi_io_vec->bv_offset = offset; 1112 clone->bi_io_vec->bv_offset = offset;
@@ -1133,7 +1133,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
1133 1133
1134 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); 1134 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
1135 __bio_clone(clone, bio); 1135 __bio_clone(clone, bio);
1136 clone->bi_rw &= ~(1 << BIO_RW_BARRIER); 1136 clone->bi_rw &= ~REQ_HARDBARRIER;
1137 clone->bi_destructor = dm_bio_destructor; 1137 clone->bi_destructor = dm_bio_destructor;
1138 clone->bi_sector = sector; 1138 clone->bi_sector = sector;
1139 clone->bi_idx = idx; 1139 clone->bi_idx = idx;
@@ -1301,7 +1301,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
1301 1301
1302 ci.map = dm_get_live_table(md); 1302 ci.map = dm_get_live_table(md);
1303 if (unlikely(!ci.map)) { 1303 if (unlikely(!ci.map)) {
1304 if (!bio_rw_flagged(bio, BIO_RW_BARRIER)) 1304 if (!(bio->bi_rw & REQ_HARDBARRIER))
1305 bio_io_error(bio); 1305 bio_io_error(bio);
1306 else 1306 else
1307 if (!md->barrier_error) 1307 if (!md->barrier_error)
@@ -1414,7 +1414,7 @@ static int _dm_request(struct request_queue *q, struct bio *bio)
1414 * we have to queue this io for later. 1414 * we have to queue this io for later.
1415 */ 1415 */
1416 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) || 1416 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
1417 unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { 1417 unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
1418 up_read(&md->io_lock); 1418 up_read(&md->io_lock);
1419 1419
1420 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) && 1420 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
@@ -2296,7 +2296,7 @@ static void dm_wq_work(struct work_struct *work)
2296 if (dm_request_based(md)) 2296 if (dm_request_based(md))
2297 generic_make_request(c); 2297 generic_make_request(c);
2298 else { 2298 else {
2299 if (bio_rw_flagged(c, BIO_RW_BARRIER)) 2299 if (c->bi_rw & REQ_HARDBARRIER)
2300 process_barrier(md, c); 2300 process_barrier(md, c);
2301 else 2301 else
2302 __split_and_process_bio(md, c); 2302 __split_and_process_bio(md, c);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 7e0e057db9a7..ba19060bcf3f 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -294,7 +294,7 @@ static int linear_make_request (mddev_t *mddev, struct bio *bio)
294 dev_info_t *tmp_dev; 294 dev_info_t *tmp_dev;
295 sector_t start_sector; 295 sector_t start_sector;
296 296
297 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { 297 if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
298 md_barrier_request(mddev, bio); 298 md_barrier_request(mddev, bio);
299 return 0; 299 return 0;
300 } 300 }
diff --git a/drivers/md/md.c b/drivers/md/md.c
index cb20d0b0555a..1893af678779 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -353,7 +353,7 @@ static void md_submit_barrier(struct work_struct *ws)
353 /* an empty barrier - all done */ 353 /* an empty barrier - all done */
354 bio_endio(bio, 0); 354 bio_endio(bio, 0);
355 else { 355 else {
356 bio->bi_rw &= ~(1<<BIO_RW_BARRIER); 356 bio->bi_rw &= ~REQ_HARDBARRIER;
357 if (mddev->pers->make_request(mddev, bio)) 357 if (mddev->pers->make_request(mddev, bio))
358 generic_make_request(bio); 358 generic_make_request(bio);
359 mddev->barrier = POST_REQUEST_BARRIER; 359 mddev->barrier = POST_REQUEST_BARRIER;
@@ -675,11 +675,11 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
675 * if zero is reached. 675 * if zero is reached.
676 * If an error occurred, call md_error 676 * If an error occurred, call md_error
677 * 677 *
678 * As we might need to resubmit the request if BIO_RW_BARRIER 678 * As we might need to resubmit the request if REQ_HARDBARRIER
679 * causes ENOTSUPP, we allocate a spare bio... 679 * causes ENOTSUPP, we allocate a spare bio...
680 */ 680 */
681 struct bio *bio = bio_alloc(GFP_NOIO, 1); 681 struct bio *bio = bio_alloc(GFP_NOIO, 1);
682 int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG); 682 int rw = REQ_WRITE | REQ_SYNC | REQ_UNPLUG;
683 683
684 bio->bi_bdev = rdev->bdev; 684 bio->bi_bdev = rdev->bdev;
685 bio->bi_sector = sector; 685 bio->bi_sector = sector;
@@ -691,7 +691,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
691 atomic_inc(&mddev->pending_writes); 691 atomic_inc(&mddev->pending_writes);
692 if (!test_bit(BarriersNotsupp, &rdev->flags)) { 692 if (!test_bit(BarriersNotsupp, &rdev->flags)) {
693 struct bio *rbio; 693 struct bio *rbio;
694 rw |= (1<<BIO_RW_BARRIER); 694 rw |= REQ_HARDBARRIER;
695 rbio = bio_clone(bio, GFP_NOIO); 695 rbio = bio_clone(bio, GFP_NOIO);
696 rbio->bi_private = bio; 696 rbio->bi_private = bio;
697 rbio->bi_end_io = super_written_barrier; 697 rbio->bi_end_io = super_written_barrier;
@@ -736,7 +736,7 @@ int sync_page_io(struct block_device *bdev, sector_t sector, int size,
736 struct completion event; 736 struct completion event;
737 int ret; 737 int ret;
738 738
739 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); 739 rw |= REQ_SYNC | REQ_UNPLUG;
740 740
741 bio->bi_bdev = bdev; 741 bio->bi_bdev = bdev;
742 bio->bi_sector = sector; 742 bio->bi_sector = sector;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 10597bfec000..fc56e0f21c80 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -67,7 +67,7 @@ struct mdk_rdev_s
67#define Faulty 1 /* device is known to have a fault */ 67#define Faulty 1 /* device is known to have a fault */
68#define In_sync 2 /* device is in_sync with rest of array */ 68#define In_sync 2 /* device is in_sync with rest of array */
69#define WriteMostly 4 /* Avoid reading if at all possible */ 69#define WriteMostly 4 /* Avoid reading if at all possible */
70#define BarriersNotsupp 5 /* BIO_RW_BARRIER is not supported */ 70#define BarriersNotsupp 5 /* REQ_HARDBARRIER is not supported */
71#define AllReserved 6 /* If whole device is reserved for 71#define AllReserved 6 /* If whole device is reserved for
72 * one array */ 72 * one array */
73#define AutoDetected 7 /* added by auto-detect */ 73#define AutoDetected 7 /* added by auto-detect */
@@ -254,7 +254,7 @@ struct mddev_s
254 * fails. Only supported 254 * fails. Only supported
255 */ 255 */
256 struct bio *biolist; /* bios that need to be retried 256 struct bio *biolist; /* bios that need to be retried
257 * because BIO_RW_BARRIER is not supported 257 * because REQ_HARDBARRIER is not supported
258 */ 258 */
259 259
260 atomic_t recovery_active; /* blocks scheduled, but not written */ 260 atomic_t recovery_active; /* blocks scheduled, but not written */
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 410fb60699ac..0307d217e7a4 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -91,7 +91,7 @@ static void multipath_end_request(struct bio *bio, int error)
91 91
92 if (uptodate) 92 if (uptodate)
93 multipath_end_bh_io(mp_bh, 0); 93 multipath_end_bh_io(mp_bh, 0);
94 else if (!bio_rw_flagged(bio, BIO_RW_AHEAD)) { 94 else if (!(bio->bi_rw & REQ_RAHEAD)) {
95 /* 95 /*
96 * oops, IO error: 96 * oops, IO error:
97 */ 97 */
@@ -142,7 +142,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio)
142 struct multipath_bh * mp_bh; 142 struct multipath_bh * mp_bh;
143 struct multipath_info *multipath; 143 struct multipath_info *multipath;
144 144
145 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { 145 if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
146 md_barrier_request(mddev, bio); 146 md_barrier_request(mddev, bio);
147 return 0; 147 return 0;
148 } 148 }
@@ -163,7 +163,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio)
163 mp_bh->bio = *bio; 163 mp_bh->bio = *bio;
164 mp_bh->bio.bi_sector += multipath->rdev->data_offset; 164 mp_bh->bio.bi_sector += multipath->rdev->data_offset;
165 mp_bh->bio.bi_bdev = multipath->rdev->bdev; 165 mp_bh->bio.bi_bdev = multipath->rdev->bdev;
166 mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT); 166 mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
167 mp_bh->bio.bi_end_io = multipath_end_request; 167 mp_bh->bio.bi_end_io = multipath_end_request;
168 mp_bh->bio.bi_private = mp_bh; 168 mp_bh->bio.bi_private = mp_bh;
169 generic_make_request(&mp_bh->bio); 169 generic_make_request(&mp_bh->bio);
@@ -398,7 +398,7 @@ static void multipathd (mddev_t *mddev)
398 *bio = *(mp_bh->master_bio); 398 *bio = *(mp_bh->master_bio);
399 bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset; 399 bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset;
400 bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; 400 bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
401 bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT); 401 bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
402 bio->bi_end_io = multipath_end_request; 402 bio->bi_end_io = multipath_end_request;
403 bio->bi_private = mp_bh; 403 bio->bi_private = mp_bh;
404 generic_make_request(bio); 404 generic_make_request(bio);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 563abed5a2cb..6f7af46d623c 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -483,7 +483,7 @@ static int raid0_make_request(mddev_t *mddev, struct bio *bio)
483 struct strip_zone *zone; 483 struct strip_zone *zone;
484 mdk_rdev_t *tmp_dev; 484 mdk_rdev_t *tmp_dev;
485 485
486 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { 486 if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
487 md_barrier_request(mddev, bio); 487 md_barrier_request(mddev, bio);
488 return 0; 488 return 0;
489 } 489 }
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a948da8012de..73cc74ffc26b 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -787,7 +787,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
787 struct bio_list bl; 787 struct bio_list bl;
788 struct page **behind_pages = NULL; 788 struct page **behind_pages = NULL;
789 const int rw = bio_data_dir(bio); 789 const int rw = bio_data_dir(bio);
790 const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO); 790 const bool do_sync = (bio->bi_rw & REQ_SYNC);
791 bool do_barriers; 791 bool do_barriers;
792 mdk_rdev_t *blocked_rdev; 792 mdk_rdev_t *blocked_rdev;
793 793
@@ -822,7 +822,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
822 finish_wait(&conf->wait_barrier, &w); 822 finish_wait(&conf->wait_barrier, &w);
823 } 823 }
824 if (unlikely(!mddev->barriers_work && 824 if (unlikely(!mddev->barriers_work &&
825 bio_rw_flagged(bio, BIO_RW_BARRIER))) { 825 (bio->bi_rw & REQ_HARDBARRIER))) {
826 if (rw == WRITE) 826 if (rw == WRITE)
827 md_write_end(mddev); 827 md_write_end(mddev);
828 bio_endio(bio, -EOPNOTSUPP); 828 bio_endio(bio, -EOPNOTSUPP);
@@ -877,7 +877,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
877 read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset; 877 read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
878 read_bio->bi_bdev = mirror->rdev->bdev; 878 read_bio->bi_bdev = mirror->rdev->bdev;
879 read_bio->bi_end_io = raid1_end_read_request; 879 read_bio->bi_end_io = raid1_end_read_request;
880 read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO); 880 read_bio->bi_rw = READ | do_sync;
881 read_bio->bi_private = r1_bio; 881 read_bio->bi_private = r1_bio;
882 882
883 generic_make_request(read_bio); 883 generic_make_request(read_bio);
@@ -959,7 +959,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
959 atomic_set(&r1_bio->remaining, 0); 959 atomic_set(&r1_bio->remaining, 0);
960 atomic_set(&r1_bio->behind_remaining, 0); 960 atomic_set(&r1_bio->behind_remaining, 0);
961 961
962 do_barriers = bio_rw_flagged(bio, BIO_RW_BARRIER); 962 do_barriers = bio->bi_rw & REQ_HARDBARRIER;
963 if (do_barriers) 963 if (do_barriers)
964 set_bit(R1BIO_Barrier, &r1_bio->state); 964 set_bit(R1BIO_Barrier, &r1_bio->state);
965 965
@@ -975,8 +975,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
975 mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; 975 mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset;
976 mbio->bi_bdev = conf->mirrors[i].rdev->bdev; 976 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
977 mbio->bi_end_io = raid1_end_write_request; 977 mbio->bi_end_io = raid1_end_write_request;
978 mbio->bi_rw = WRITE | (do_barriers << BIO_RW_BARRIER) | 978 mbio->bi_rw = WRITE | do_barriers | do_sync;
979 (do_sync << BIO_RW_SYNCIO);
980 mbio->bi_private = r1_bio; 979 mbio->bi_private = r1_bio;
981 980
982 if (behind_pages) { 981 if (behind_pages) {
@@ -1633,7 +1632,7 @@ static void raid1d(mddev_t *mddev)
1633 sync_request_write(mddev, r1_bio); 1632 sync_request_write(mddev, r1_bio);
1634 unplug = 1; 1633 unplug = 1;
1635 } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) { 1634 } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
1636 /* some requests in the r1bio were BIO_RW_BARRIER 1635 /* some requests in the r1bio were REQ_HARDBARRIER
1637 * requests which failed with -EOPNOTSUPP. Hohumm.. 1636 * requests which failed with -EOPNOTSUPP. Hohumm..
1638 * Better resubmit without the barrier. 1637 * Better resubmit without the barrier.
1639 * We know which devices to resubmit for, because 1638 * We know which devices to resubmit for, because
@@ -1641,7 +1640,7 @@ static void raid1d(mddev_t *mddev)
1641 * We already have a nr_pending reference on these rdevs. 1640 * We already have a nr_pending reference on these rdevs.
1642 */ 1641 */
1643 int i; 1642 int i;
1644 const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO); 1643 const bool do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC);
1645 clear_bit(R1BIO_BarrierRetry, &r1_bio->state); 1644 clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
1646 clear_bit(R1BIO_Barrier, &r1_bio->state); 1645 clear_bit(R1BIO_Barrier, &r1_bio->state);
1647 for (i=0; i < conf->raid_disks; i++) 1646 for (i=0; i < conf->raid_disks; i++)
@@ -1662,8 +1661,7 @@ static void raid1d(mddev_t *mddev)
1662 conf->mirrors[i].rdev->data_offset; 1661 conf->mirrors[i].rdev->data_offset;
1663 bio->bi_bdev = conf->mirrors[i].rdev->bdev; 1662 bio->bi_bdev = conf->mirrors[i].rdev->bdev;
1664 bio->bi_end_io = raid1_end_write_request; 1663 bio->bi_end_io = raid1_end_write_request;
1665 bio->bi_rw = WRITE | 1664 bio->bi_rw = WRITE | do_sync;
1666 (do_sync << BIO_RW_SYNCIO);
1667 bio->bi_private = r1_bio; 1665 bio->bi_private = r1_bio;
1668 r1_bio->bios[i] = bio; 1666 r1_bio->bios[i] = bio;
1669 generic_make_request(bio); 1667 generic_make_request(bio);
@@ -1698,7 +1696,7 @@ static void raid1d(mddev_t *mddev)
1698 (unsigned long long)r1_bio->sector); 1696 (unsigned long long)r1_bio->sector);
1699 raid_end_bio_io(r1_bio); 1697 raid_end_bio_io(r1_bio);
1700 } else { 1698 } else {
1701 const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO); 1699 const bool do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC;
1702 r1_bio->bios[r1_bio->read_disk] = 1700 r1_bio->bios[r1_bio->read_disk] =
1703 mddev->ro ? IO_BLOCKED : NULL; 1701 mddev->ro ? IO_BLOCKED : NULL;
1704 r1_bio->read_disk = disk; 1702 r1_bio->read_disk = disk;
@@ -1715,7 +1713,7 @@ static void raid1d(mddev_t *mddev)
1715 bio->bi_sector = r1_bio->sector + rdev->data_offset; 1713 bio->bi_sector = r1_bio->sector + rdev->data_offset;
1716 bio->bi_bdev = rdev->bdev; 1714 bio->bi_bdev = rdev->bdev;
1717 bio->bi_end_io = raid1_end_read_request; 1715 bio->bi_end_io = raid1_end_read_request;
1718 bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO); 1716 bio->bi_rw = READ | do_sync;
1719 bio->bi_private = r1_bio; 1717 bio->bi_private = r1_bio;
1720 unplug = 1; 1718 unplug = 1;
1721 generic_make_request(bio); 1719 generic_make_request(bio);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 42e64e4e5e25..62ecb6650fd0 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -799,12 +799,12 @@ static int make_request(mddev_t *mddev, struct bio * bio)
799 int i; 799 int i;
800 int chunk_sects = conf->chunk_mask + 1; 800 int chunk_sects = conf->chunk_mask + 1;
801 const int rw = bio_data_dir(bio); 801 const int rw = bio_data_dir(bio);
802 const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO); 802 const bool do_sync = (bio->bi_rw & REQ_SYNC);
803 struct bio_list bl; 803 struct bio_list bl;
804 unsigned long flags; 804 unsigned long flags;
805 mdk_rdev_t *blocked_rdev; 805 mdk_rdev_t *blocked_rdev;
806 806
807 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { 807 if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
808 md_barrier_request(mddev, bio); 808 md_barrier_request(mddev, bio);
809 return 0; 809 return 0;
810 } 810 }
@@ -879,7 +879,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
879 mirror->rdev->data_offset; 879 mirror->rdev->data_offset;
880 read_bio->bi_bdev = mirror->rdev->bdev; 880 read_bio->bi_bdev = mirror->rdev->bdev;
881 read_bio->bi_end_io = raid10_end_read_request; 881 read_bio->bi_end_io = raid10_end_read_request;
882 read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO); 882 read_bio->bi_rw = READ | do_sync;
883 read_bio->bi_private = r10_bio; 883 read_bio->bi_private = r10_bio;
884 884
885 generic_make_request(read_bio); 885 generic_make_request(read_bio);
@@ -947,7 +947,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
947 conf->mirrors[d].rdev->data_offset; 947 conf->mirrors[d].rdev->data_offset;
948 mbio->bi_bdev = conf->mirrors[d].rdev->bdev; 948 mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
949 mbio->bi_end_io = raid10_end_write_request; 949 mbio->bi_end_io = raid10_end_write_request;
950 mbio->bi_rw = WRITE | (do_sync << BIO_RW_SYNCIO); 950 mbio->bi_rw = WRITE | do_sync;
951 mbio->bi_private = r10_bio; 951 mbio->bi_private = r10_bio;
952 952
953 atomic_inc(&r10_bio->remaining); 953 atomic_inc(&r10_bio->remaining);
@@ -1716,7 +1716,7 @@ static void raid10d(mddev_t *mddev)
1716 raid_end_bio_io(r10_bio); 1716 raid_end_bio_io(r10_bio);
1717 bio_put(bio); 1717 bio_put(bio);
1718 } else { 1718 } else {
1719 const bool do_sync = bio_rw_flagged(r10_bio->master_bio, BIO_RW_SYNCIO); 1719 const bool do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
1720 bio_put(bio); 1720 bio_put(bio);
1721 rdev = conf->mirrors[mirror].rdev; 1721 rdev = conf->mirrors[mirror].rdev;
1722 if (printk_ratelimit()) 1722 if (printk_ratelimit())
@@ -1730,7 +1730,7 @@ static void raid10d(mddev_t *mddev)
1730 bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr 1730 bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr
1731 + rdev->data_offset; 1731 + rdev->data_offset;
1732 bio->bi_bdev = rdev->bdev; 1732 bio->bi_bdev = rdev->bdev;
1733 bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO); 1733 bio->bi_rw = READ | do_sync;
1734 bio->bi_private = r10_bio; 1734 bio->bi_private = r10_bio;
1735 bio->bi_end_io = raid10_end_read_request; 1735 bio->bi_end_io = raid10_end_read_request;
1736 unplug = 1; 1736 unplug = 1;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 96c690279fc6..20ac2f14376a 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3958,7 +3958,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
3958 const int rw = bio_data_dir(bi); 3958 const int rw = bio_data_dir(bi);
3959 int remaining; 3959 int remaining;
3960 3960
3961 if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) { 3961 if (unlikely(bi->bi_rw & REQ_HARDBARRIER)) {
3962 /* Drain all pending writes. We only really need 3962 /* Drain all pending writes. We only really need
3963 * to ensure they have been submitted, but this is 3963 * to ensure they have been submitted, but this is
3964 * easier. 3964 * easier.
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index ee4b6914667f..fda4de3440c4 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -716,7 +716,7 @@ static int _osd_req_list_objects(struct osd_request *or,
716 return PTR_ERR(bio); 716 return PTR_ERR(bio);
717 } 717 }
718 718
719 bio->bi_rw &= ~(1 << BIO_RW); 719 bio->bi_rw &= ~REQ_WRITE;
720 or->in.bio = bio; 720 or->in.bio = bio;
721 or->in.total_bytes = bio->bi_size; 721 or->in.total_bytes = bio->bi_size;
722 return 0; 722 return 0;
@@ -814,7 +814,7 @@ void osd_req_write(struct osd_request *or,
814{ 814{
815 _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len); 815 _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len);
816 WARN_ON(or->out.bio || or->out.total_bytes); 816 WARN_ON(or->out.bio || or->out.total_bytes);
817 WARN_ON(0 == bio_rw_flagged(bio, BIO_RW)); 817 WARN_ON(0 == (bio->bi_rw & REQ_WRITE));
818 or->out.bio = bio; 818 or->out.bio = bio;
819 or->out.total_bytes = len; 819 or->out.total_bytes = len;
820} 820}
@@ -829,7 +829,7 @@ int osd_req_write_kern(struct osd_request *or,
829 if (IS_ERR(bio)) 829 if (IS_ERR(bio))
830 return PTR_ERR(bio); 830 return PTR_ERR(bio);
831 831
832 bio->bi_rw |= (1 << BIO_RW); /* FIXME: bio_set_dir() */ 832 bio->bi_rw |= REQ_WRITE; /* FIXME: bio_set_dir() */
833 osd_req_write(or, obj, offset, bio, len); 833 osd_req_write(or, obj, offset, bio, len);
834 return 0; 834 return 0;
835} 835}
@@ -865,7 +865,7 @@ void osd_req_read(struct osd_request *or,
865{ 865{
866 _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len); 866 _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
867 WARN_ON(or->in.bio || or->in.total_bytes); 867 WARN_ON(or->in.bio || or->in.total_bytes);
868 WARN_ON(1 == bio_rw_flagged(bio, BIO_RW)); 868 WARN_ON(1 == (bio->bi_rw & REQ_WRITE));
869 or->in.bio = bio; 869 or->in.bio = bio;
870 or->in.total_bytes = len; 870 or->in.total_bytes = len;
871} 871}