aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/brd.c16
-rw-r--r--drivers/block/drbd/drbd_main.c8
-rw-r--r--drivers/block/drbd/drbd_receiver.c2
-rw-r--r--drivers/block/drbd/drbd_req.c6
-rw-r--r--drivers/block/drbd/drbd_worker.c2
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/block/umem.c2
-rw-r--r--drivers/block/zram/zram_drv.c23
-rw-r--r--drivers/md/bcache/request.c12
-rw-r--r--drivers/md/bcache/super.c2
-rw-r--r--drivers/md/bcache/writeback.h2
-rw-r--r--drivers/md/dm-cache-target.c8
-rw-r--r--drivers/md/dm-crypt.c4
-rw-r--r--drivers/md/dm-era-target.c2
-rw-r--r--drivers/md/dm-flakey.c6
-rw-r--r--drivers/md/dm-io.c6
-rw-r--r--drivers/md/dm-log-writes.c4
-rw-r--r--drivers/md/dm-mpath.c2
-rw-r--r--drivers/md/dm-raid1.c10
-rw-r--r--drivers/md/dm-region-hash.c4
-rw-r--r--drivers/md/dm-snap.c6
-rw-r--r--drivers/md/dm-stripe.c4
-rw-r--r--drivers/md/dm-thin.c8
-rw-r--r--drivers/md/dm-zero.c2
-rw-r--r--drivers/md/dm.c10
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/md.c4
-rw-r--r--drivers/md/multipath.c8
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c6
-rw-r--r--drivers/md/raid10.c8
-rw-r--r--drivers/md/raid5-cache.c2
-rw-r--r--drivers/md/raid5.c20
-rw-r--r--drivers/nvdimm/btt.c12
-rw-r--r--drivers/nvdimm/pmem.c16
-rw-r--r--drivers/target/target_core_iblock.c2
36 files changed, 118 insertions, 117 deletions
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 3439b28cce8b..0c76d4016eeb 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -300,20 +300,20 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
300 * Process a single bvec of a bio. 300 * Process a single bvec of a bio.
301 */ 301 */
302static int brd_do_bvec(struct brd_device *brd, struct page *page, 302static int brd_do_bvec(struct brd_device *brd, struct page *page,
303 unsigned int len, unsigned int off, int op, 303 unsigned int len, unsigned int off, bool is_write,
304 sector_t sector) 304 sector_t sector)
305{ 305{
306 void *mem; 306 void *mem;
307 int err = 0; 307 int err = 0;
308 308
309 if (op_is_write(op)) { 309 if (is_write) {
310 err = copy_to_brd_setup(brd, sector, len); 310 err = copy_to_brd_setup(brd, sector, len);
311 if (err) 311 if (err)
312 goto out; 312 goto out;
313 } 313 }
314 314
315 mem = kmap_atomic(page); 315 mem = kmap_atomic(page);
316 if (!op_is_write(op)) { 316 if (!is_write) {
317 copy_from_brd(mem + off, brd, sector, len); 317 copy_from_brd(mem + off, brd, sector, len);
318 flush_dcache_page(page); 318 flush_dcache_page(page);
319 } else { 319 } else {
@@ -350,8 +350,8 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
350 unsigned int len = bvec.bv_len; 350 unsigned int len = bvec.bv_len;
351 int err; 351 int err;
352 352
353 err = brd_do_bvec(brd, bvec.bv_page, len, 353 err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
354 bvec.bv_offset, bio_op(bio), sector); 354 op_is_write(bio_op(bio)), sector);
355 if (err) 355 if (err)
356 goto io_error; 356 goto io_error;
357 sector += len >> SECTOR_SHIFT; 357 sector += len >> SECTOR_SHIFT;
@@ -366,11 +366,11 @@ io_error:
366} 366}
367 367
368static int brd_rw_page(struct block_device *bdev, sector_t sector, 368static int brd_rw_page(struct block_device *bdev, sector_t sector,
369 struct page *page, int op) 369 struct page *page, bool is_write)
370{ 370{
371 struct brd_device *brd = bdev->bd_disk->private_data; 371 struct brd_device *brd = bdev->bd_disk->private_data;
372 int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, op, sector); 372 int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, is_write, sector);
373 page_endio(page, op, err); 373 page_endio(page, is_write, err);
374 return err; 374 return err;
375} 375}
376 376
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 0501ae0c517b..100be556e613 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1663,13 +1663,13 @@ static u32 bio_flags_to_wire(struct drbd_connection *connection,
1663 struct bio *bio) 1663 struct bio *bio)
1664{ 1664{
1665 if (connection->agreed_pro_version >= 95) 1665 if (connection->agreed_pro_version >= 95)
1666 return (bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) | 1666 return (bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0) |
1667 (bio->bi_rw & REQ_FUA ? DP_FUA : 0) | 1667 (bio->bi_opf & REQ_FUA ? DP_FUA : 0) |
1668 (bio->bi_rw & REQ_PREFLUSH ? DP_FLUSH : 0) | 1668 (bio->bi_opf & REQ_PREFLUSH ? DP_FLUSH : 0) |
1669 (bio_op(bio) == REQ_OP_WRITE_SAME ? DP_WSAME : 0) | 1669 (bio_op(bio) == REQ_OP_WRITE_SAME ? DP_WSAME : 0) |
1670 (bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0); 1670 (bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0);
1671 else 1671 else
1672 return bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0; 1672 return bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0;
1673} 1673}
1674 1674
1675/* Used to send write or TRIM aka REQ_DISCARD requests 1675/* Used to send write or TRIM aka REQ_DISCARD requests
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index df45713dfbe8..942384f34e22 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1564,7 +1564,7 @@ static void drbd_issue_peer_wsame(struct drbd_device *device,
1564 * drbd_submit_peer_request() 1564 * drbd_submit_peer_request()
1565 * @device: DRBD device. 1565 * @device: DRBD device.
1566 * @peer_req: peer request 1566 * @peer_req: peer request
1567 * @rw: flag field, see bio->bi_rw 1567 * @rw: flag field, see bio->bi_opf
1568 * 1568 *
1569 * May spread the pages to multiple bios, 1569 * May spread the pages to multiple bios,
1570 * depending on bio_add_page restrictions. 1570 * depending on bio_add_page restrictions.
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 66b8e4bb74d8..de279fe4e4fd 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -288,7 +288,7 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
288 */ 288 */
289 if (!ok && 289 if (!ok &&
290 bio_op(req->master_bio) == REQ_OP_READ && 290 bio_op(req->master_bio) == REQ_OP_READ &&
291 !(req->master_bio->bi_rw & REQ_RAHEAD) && 291 !(req->master_bio->bi_opf & REQ_RAHEAD) &&
292 !list_empty(&req->tl_requests)) 292 !list_empty(&req->tl_requests))
293 req->rq_state |= RQ_POSTPONED; 293 req->rq_state |= RQ_POSTPONED;
294 294
@@ -1137,7 +1137,7 @@ static int drbd_process_write_request(struct drbd_request *req)
1137 * replicating, in which case there is no point. */ 1137 * replicating, in which case there is no point. */
1138 if (unlikely(req->i.size == 0)) { 1138 if (unlikely(req->i.size == 0)) {
1139 /* The only size==0 bios we expect are empty flushes. */ 1139 /* The only size==0 bios we expect are empty flushes. */
1140 D_ASSERT(device, req->master_bio->bi_rw & REQ_PREFLUSH); 1140 D_ASSERT(device, req->master_bio->bi_opf & REQ_PREFLUSH);
1141 if (remote) 1141 if (remote)
1142 _req_mod(req, QUEUE_AS_DRBD_BARRIER); 1142 _req_mod(req, QUEUE_AS_DRBD_BARRIER);
1143 return remote; 1143 return remote;
@@ -1176,7 +1176,7 @@ drbd_submit_req_private_bio(struct drbd_request *req)
1176 1176
1177 if (bio_op(bio) != REQ_OP_READ) 1177 if (bio_op(bio) != REQ_OP_READ)
1178 type = DRBD_FAULT_DT_WR; 1178 type = DRBD_FAULT_DT_WR;
1179 else if (bio->bi_rw & REQ_RAHEAD) 1179 else if (bio->bi_opf & REQ_RAHEAD)
1180 type = DRBD_FAULT_DT_RA; 1180 type = DRBD_FAULT_DT_RA;
1181 else 1181 else
1182 type = DRBD_FAULT_DT_RD; 1182 type = DRBD_FAULT_DT_RD;
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 35dbb3dca47e..c6755c9a0aea 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -256,7 +256,7 @@ void drbd_request_endio(struct bio *bio)
256 what = DISCARD_COMPLETED_WITH_ERROR; 256 what = DISCARD_COMPLETED_WITH_ERROR;
257 break; 257 break;
258 case REQ_OP_READ: 258 case REQ_OP_READ:
259 if (bio->bi_rw & REQ_RAHEAD) 259 if (bio->bi_opf & REQ_RAHEAD)
260 what = READ_AHEAD_COMPLETED_WITH_ERROR; 260 what = READ_AHEAD_COMPLETED_WITH_ERROR;
261 else 261 else
262 what = READ_COMPLETED_WITH_ERROR; 262 what = READ_COMPLETED_WITH_ERROR;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 9393bc730acf..90fa4ac149db 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1157,7 +1157,7 @@ static int pkt_start_recovery(struct packet_data *pkt)
1157 1157
1158 bio_reset(pkt->bio); 1158 bio_reset(pkt->bio);
1159 pkt->bio->bi_bdev = pd->bdev; 1159 pkt->bio->bi_bdev = pd->bdev;
1160 pkt->bio->bi_rw = REQ_WRITE; 1160 bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0);
1161 pkt->bio->bi_iter.bi_sector = new_sector; 1161 pkt->bio->bi_iter.bi_sector = new_sector;
1162 pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE; 1162 pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
1163 pkt->bio->bi_vcnt = pkt->frames; 1163 pkt->bio->bi_vcnt = pkt->frames;
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index d0a3e6d4515f..be90e15854ed 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -535,7 +535,7 @@ static blk_qc_t mm_make_request(struct request_queue *q, struct bio *bio)
535 *card->biotail = bio; 535 *card->biotail = bio;
536 bio->bi_next = NULL; 536 bio->bi_next = NULL;
537 card->biotail = &bio->bi_next; 537 card->biotail = &bio->bi_next;
538 if (bio->bi_rw & REQ_SYNC || !mm_check_plugged(card)) 538 if (bio->bi_opf & REQ_SYNC || !mm_check_plugged(card))
539 activate(card); 539 activate(card);
540 spin_unlock_irq(&card->lock); 540 spin_unlock_irq(&card->lock);
541 541
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index ca29649c4b08..04365b17ee67 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -843,15 +843,16 @@ static void zram_bio_discard(struct zram *zram, u32 index,
843} 843}
844 844
845static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, 845static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
846 int offset, int op) 846 int offset, bool is_write)
847{ 847{
848 unsigned long start_time = jiffies; 848 unsigned long start_time = jiffies;
849 int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ;
849 int ret; 850 int ret;
850 851
851 generic_start_io_acct(op, bvec->bv_len >> SECTOR_SHIFT, 852 generic_start_io_acct(rw_acct, bvec->bv_len >> SECTOR_SHIFT,
852 &zram->disk->part0); 853 &zram->disk->part0);
853 854
854 if (!op_is_write(op)) { 855 if (!is_write) {
855 atomic64_inc(&zram->stats.num_reads); 856 atomic64_inc(&zram->stats.num_reads);
856 ret = zram_bvec_read(zram, bvec, index, offset); 857 ret = zram_bvec_read(zram, bvec, index, offset);
857 } else { 858 } else {
@@ -859,10 +860,10 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
859 ret = zram_bvec_write(zram, bvec, index, offset); 860 ret = zram_bvec_write(zram, bvec, index, offset);
860 } 861 }
861 862
862 generic_end_io_acct(op, &zram->disk->part0, start_time); 863 generic_end_io_acct(rw_acct, &zram->disk->part0, start_time);
863 864
864 if (unlikely(ret)) { 865 if (unlikely(ret)) {
865 if (!op_is_write(op)) 866 if (!is_write)
866 atomic64_inc(&zram->stats.failed_reads); 867 atomic64_inc(&zram->stats.failed_reads);
867 else 868 else
868 atomic64_inc(&zram->stats.failed_writes); 869 atomic64_inc(&zram->stats.failed_writes);
@@ -903,17 +904,17 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
903 bv.bv_offset = bvec.bv_offset; 904 bv.bv_offset = bvec.bv_offset;
904 905
905 if (zram_bvec_rw(zram, &bv, index, offset, 906 if (zram_bvec_rw(zram, &bv, index, offset,
906 bio_op(bio)) < 0) 907 op_is_write(bio_op(bio))) < 0)
907 goto out; 908 goto out;
908 909
909 bv.bv_len = bvec.bv_len - max_transfer_size; 910 bv.bv_len = bvec.bv_len - max_transfer_size;
910 bv.bv_offset += max_transfer_size; 911 bv.bv_offset += max_transfer_size;
911 if (zram_bvec_rw(zram, &bv, index + 1, 0, 912 if (zram_bvec_rw(zram, &bv, index + 1, 0,
912 bio_op(bio)) < 0) 913 op_is_write(bio_op(bio))) < 0)
913 goto out; 914 goto out;
914 } else 915 } else
915 if (zram_bvec_rw(zram, &bvec, index, offset, 916 if (zram_bvec_rw(zram, &bvec, index, offset,
916 bio_op(bio)) < 0) 917 op_is_write(bio_op(bio))) < 0)
917 goto out; 918 goto out;
918 919
919 update_position(&index, &offset, &bvec); 920 update_position(&index, &offset, &bvec);
@@ -970,7 +971,7 @@ static void zram_slot_free_notify(struct block_device *bdev,
970} 971}
971 972
972static int zram_rw_page(struct block_device *bdev, sector_t sector, 973static int zram_rw_page(struct block_device *bdev, sector_t sector,
973 struct page *page, int op) 974 struct page *page, bool is_write)
974{ 975{
975 int offset, err = -EIO; 976 int offset, err = -EIO;
976 u32 index; 977 u32 index;
@@ -994,7 +995,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
994 bv.bv_len = PAGE_SIZE; 995 bv.bv_len = PAGE_SIZE;
995 bv.bv_offset = 0; 996 bv.bv_offset = 0;
996 997
997 err = zram_bvec_rw(zram, &bv, index, offset, op); 998 err = zram_bvec_rw(zram, &bv, index, offset, is_write);
998put_zram: 999put_zram:
999 zram_meta_put(zram); 1000 zram_meta_put(zram);
1000out: 1001out:
@@ -1007,7 +1008,7 @@ out:
1007 * (e.g., SetPageError, set_page_dirty and extra works). 1008 * (e.g., SetPageError, set_page_dirty and extra works).
1008 */ 1009 */
1009 if (err == 0) 1010 if (err == 0)
1010 page_endio(page, op, 0); 1011 page_endio(page, is_write, 0);
1011 return err; 1012 return err;
1012} 1013}
1013 1014
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 69f16f43f8ab..4b177fe11ebb 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -208,7 +208,7 @@ static void bch_data_insert_start(struct closure *cl)
208 * Journal writes are marked REQ_PREFLUSH; if the original write was a 208 * Journal writes are marked REQ_PREFLUSH; if the original write was a
209 * flush, it'll wait on the journal write. 209 * flush, it'll wait on the journal write.
210 */ 210 */
211 bio->bi_rw &= ~(REQ_PREFLUSH|REQ_FUA); 211 bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
212 212
213 do { 213 do {
214 unsigned i; 214 unsigned i;
@@ -405,7 +405,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
405 if (!congested && 405 if (!congested &&
406 mode == CACHE_MODE_WRITEBACK && 406 mode == CACHE_MODE_WRITEBACK &&
407 op_is_write(bio_op(bio)) && 407 op_is_write(bio_op(bio)) &&
408 (bio->bi_rw & REQ_SYNC)) 408 (bio->bi_opf & REQ_SYNC))
409 goto rescale; 409 goto rescale;
410 410
411 spin_lock(&dc->io_lock); 411 spin_lock(&dc->io_lock);
@@ -668,7 +668,7 @@ static inline struct search *search_alloc(struct bio *bio,
668 s->iop.write_prio = 0; 668 s->iop.write_prio = 0;
669 s->iop.error = 0; 669 s->iop.error = 0;
670 s->iop.flags = 0; 670 s->iop.flags = 0;
671 s->iop.flush_journal = (bio->bi_rw & (REQ_PREFLUSH|REQ_FUA)) != 0; 671 s->iop.flush_journal = (bio->bi_opf & (REQ_PREFLUSH|REQ_FUA)) != 0;
672 s->iop.wq = bcache_wq; 672 s->iop.wq = bcache_wq;
673 673
674 return s; 674 return s;
@@ -796,8 +796,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
796 goto out_submit; 796 goto out_submit;
797 } 797 }
798 798
799 if (!(bio->bi_rw & REQ_RAHEAD) && 799 if (!(bio->bi_opf & REQ_RAHEAD) &&
800 !(bio->bi_rw & REQ_META) && 800 !(bio->bi_opf & REQ_META) &&
801 s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA) 801 s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
802 reada = min_t(sector_t, dc->readahead >> 9, 802 reada = min_t(sector_t, dc->readahead >> 9,
803 bdev_sectors(bio->bi_bdev) - bio_end_sector(bio)); 803 bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
@@ -920,7 +920,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
920 bch_writeback_add(dc); 920 bch_writeback_add(dc);
921 s->iop.bio = bio; 921 s->iop.bio = bio;
922 922
923 if (bio->bi_rw & REQ_PREFLUSH) { 923 if (bio->bi_opf & REQ_PREFLUSH) {
924 /* Also need to send a flush to the backing device */ 924 /* Also need to send a flush to the backing device */
925 struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0, 925 struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
926 dc->disk.bio_split); 926 dc->disk.bio_split);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 88ef6d14cce3..95a4ca6ce6ff 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -347,7 +347,7 @@ static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
347 for (i = 0; i < KEY_PTRS(k); i++) { 347 for (i = 0; i < KEY_PTRS(k); i++) {
348 struct bio *bio = bch_bbio_alloc(c); 348 struct bio *bio = bch_bbio_alloc(c);
349 349
350 bio->bi_rw = REQ_SYNC|REQ_META|op_flags; 350 bio->bi_opf = REQ_SYNC | REQ_META | op_flags;
351 bio->bi_iter.bi_size = KEY_SIZE(k) << 9; 351 bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
352 352
353 bio->bi_end_io = uuid_endio; 353 bio->bi_end_io = uuid_endio;
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index 073a042aed24..301eaf565167 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -57,7 +57,7 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
57 if (would_skip) 57 if (would_skip)
58 return false; 58 return false;
59 59
60 return bio->bi_rw & REQ_SYNC || 60 return bio->bi_opf & REQ_SYNC ||
61 in_use <= CUTOFF_WRITEBACK; 61 in_use <= CUTOFF_WRITEBACK;
62} 62}
63 63
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 718744db62df..59b2c50562e4 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -788,7 +788,7 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
788 788
789 spin_lock_irqsave(&cache->lock, flags); 789 spin_lock_irqsave(&cache->lock, flags);
790 if (cache->need_tick_bio && 790 if (cache->need_tick_bio &&
791 !(bio->bi_rw & (REQ_FUA | REQ_PREFLUSH)) && 791 !(bio->bi_opf & (REQ_FUA | REQ_PREFLUSH)) &&
792 bio_op(bio) != REQ_OP_DISCARD) { 792 bio_op(bio) != REQ_OP_DISCARD) {
793 pb->tick = true; 793 pb->tick = true;
794 cache->need_tick_bio = false; 794 cache->need_tick_bio = false;
@@ -830,7 +830,7 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
830 830
831static int bio_triggers_commit(struct cache *cache, struct bio *bio) 831static int bio_triggers_commit(struct cache *cache, struct bio *bio)
832{ 832{
833 return bio->bi_rw & (REQ_PREFLUSH | REQ_FUA); 833 return bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
834} 834}
835 835
836/* 836/*
@@ -1069,7 +1069,7 @@ static void dec_io_migrations(struct cache *cache)
1069static bool discard_or_flush(struct bio *bio) 1069static bool discard_or_flush(struct bio *bio)
1070{ 1070{
1071 return bio_op(bio) == REQ_OP_DISCARD || 1071 return bio_op(bio) == REQ_OP_DISCARD ||
1072 bio->bi_rw & (REQ_PREFLUSH | REQ_FUA); 1072 bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
1073} 1073}
1074 1074
1075static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell) 1075static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell)
@@ -1980,7 +1980,7 @@ static void process_deferred_bios(struct cache *cache)
1980 1980
1981 bio = bio_list_pop(&bios); 1981 bio = bio_list_pop(&bios);
1982 1982
1983 if (bio->bi_rw & REQ_PREFLUSH) 1983 if (bio->bi_opf & REQ_PREFLUSH)
1984 process_flush_bio(cache, bio); 1984 process_flush_bio(cache, bio);
1985 else if (bio_op(bio) == REQ_OP_DISCARD) 1985 else if (bio_op(bio) == REQ_OP_DISCARD)
1986 process_discard_bio(cache, &structs, bio); 1986 process_discard_bio(cache, &structs, bio);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 8f2e3e2ffd26..4e9784b4e0ac 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1136,7 +1136,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
1136 clone->bi_private = io; 1136 clone->bi_private = io;
1137 clone->bi_end_io = crypt_endio; 1137 clone->bi_end_io = crypt_endio;
1138 clone->bi_bdev = cc->dev->bdev; 1138 clone->bi_bdev = cc->dev->bdev;
1139 bio_set_op_attrs(clone, bio_op(io->base_bio), io->base_bio->bi_rw); 1139 bio_set_op_attrs(clone, bio_op(io->base_bio), io->base_bio->bi_opf);
1140} 1140}
1141 1141
1142static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) 1142static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
@@ -1915,7 +1915,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
1915 * - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight 1915 * - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight
1916 * - for REQ_OP_DISCARD caller must use flush if IO ordering matters 1916 * - for REQ_OP_DISCARD caller must use flush if IO ordering matters
1917 */ 1917 */
1918 if (unlikely(bio->bi_rw & REQ_PREFLUSH || 1918 if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
1919 bio_op(bio) == REQ_OP_DISCARD)) { 1919 bio_op(bio) == REQ_OP_DISCARD)) {
1920 bio->bi_bdev = cc->dev->bdev; 1920 bio->bi_bdev = cc->dev->bdev;
1921 if (bio_sectors(bio)) 1921 if (bio_sectors(bio))
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index 2faf49d8f4d7..bf2b2676cb8a 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -1542,7 +1542,7 @@ static int era_map(struct dm_target *ti, struct bio *bio)
1542 /* 1542 /*
1543 * REQ_PREFLUSH bios carry no data, so we're not interested in them. 1543 * REQ_PREFLUSH bios carry no data, so we're not interested in them.
1544 */ 1544 */
1545 if (!(bio->bi_rw & REQ_PREFLUSH) && 1545 if (!(bio->bi_opf & REQ_PREFLUSH) &&
1546 (bio_data_dir(bio) == WRITE) && 1546 (bio_data_dir(bio) == WRITE) &&
1547 !metadata_current_marked(era->md, block)) { 1547 !metadata_current_marked(era->md, block)) {
1548 defer_bio(era, bio); 1548 defer_bio(era, bio);
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 19db13e99466..97e446d54a15 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -16,7 +16,7 @@
16#define DM_MSG_PREFIX "flakey" 16#define DM_MSG_PREFIX "flakey"
17 17
18#define all_corrupt_bio_flags_match(bio, fc) \ 18#define all_corrupt_bio_flags_match(bio, fc) \
19 (((bio)->bi_rw & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags) 19 (((bio)->bi_opf & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
20 20
21/* 21/*
22 * Flakey: Used for testing only, simulates intermittent, 22 * Flakey: Used for testing only, simulates intermittent,
@@ -266,9 +266,9 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
266 data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value; 266 data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value;
267 267
268 DMDEBUG("Corrupting data bio=%p by writing %u to byte %u " 268 DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
269 "(rw=%c bi_rw=%u bi_sector=%llu cur_bytes=%u)\n", 269 "(rw=%c bi_opf=%u bi_sector=%llu cur_bytes=%u)\n",
270 bio, fc->corrupt_bio_value, fc->corrupt_bio_byte, 270 bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
271 (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw, 271 (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf,
272 (unsigned long long)bio->bi_iter.bi_sector, bio_bytes); 272 (unsigned long long)bio->bi_iter.bi_sector, bio_bytes);
273 } 273 }
274} 274}
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index daa03e41654a..0bf1a12e35fe 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -505,9 +505,9 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
505 * New collapsed (a)synchronous interface. 505 * New collapsed (a)synchronous interface.
506 * 506 *
507 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug 507 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
508 * the queue with blk_unplug() some time later or set REQ_SYNC in io_req->bi_rw. 508 * the queue with blk_unplug() some time later or set REQ_SYNC in
509 * If you fail to do one of these, the IO will be submitted to the disk after 509 * io_req->bi_opf. If you fail to do one of these, the IO will be submitted to
510 * q->unplug_delay, which defaults to 3ms in blk-settings.c. 510 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
511 */ 511 */
512int dm_io(struct dm_io_request *io_req, unsigned num_regions, 512int dm_io(struct dm_io_request *io_req, unsigned num_regions,
513 struct dm_io_region *where, unsigned long *sync_error_bits) 513 struct dm_io_region *where, unsigned long *sync_error_bits)
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index b5dbf7a0515e..4ab68033f9d1 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -555,8 +555,8 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio)
555 struct bio_vec bv; 555 struct bio_vec bv;
556 size_t alloc_size; 556 size_t alloc_size;
557 int i = 0; 557 int i = 0;
558 bool flush_bio = (bio->bi_rw & REQ_PREFLUSH); 558 bool flush_bio = (bio->bi_opf & REQ_PREFLUSH);
559 bool fua_bio = (bio->bi_rw & REQ_FUA); 559 bool fua_bio = (bio->bi_opf & REQ_FUA);
560 bool discard_bio = (bio_op(bio) == REQ_OP_DISCARD); 560 bool discard_bio = (bio_op(bio) == REQ_OP_DISCARD);
561 561
562 pb->block = NULL; 562 pb->block = NULL;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index d7107d23b897..ac734e5bbe48 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -661,7 +661,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
661 661
662 bio->bi_error = 0; 662 bio->bi_error = 0;
663 bio->bi_bdev = pgpath->path.dev->bdev; 663 bio->bi_bdev = pgpath->path.dev->bdev;
664 bio->bi_rw |= REQ_FAILFAST_TRANSPORT; 664 bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
665 665
666 if (pgpath->pg->ps.type->start_io) 666 if (pgpath->pg->ps.type->start_io)
667 pgpath->pg->ps.type->start_io(&pgpath->pg->ps, 667 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index dac55b254a09..bdf1606f67bc 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -657,7 +657,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
657 struct mirror *m; 657 struct mirror *m;
658 struct dm_io_request io_req = { 658 struct dm_io_request io_req = {
659 .bi_op = REQ_OP_WRITE, 659 .bi_op = REQ_OP_WRITE,
660 .bi_op_flags = bio->bi_rw & WRITE_FLUSH_FUA, 660 .bi_op_flags = bio->bi_opf & WRITE_FLUSH_FUA,
661 .mem.type = DM_IO_BIO, 661 .mem.type = DM_IO_BIO,
662 .mem.ptr.bio = bio, 662 .mem.ptr.bio = bio,
663 .notify.fn = write_callback, 663 .notify.fn = write_callback,
@@ -704,7 +704,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
704 bio_list_init(&requeue); 704 bio_list_init(&requeue);
705 705
706 while ((bio = bio_list_pop(writes))) { 706 while ((bio = bio_list_pop(writes))) {
707 if ((bio->bi_rw & REQ_PREFLUSH) || 707 if ((bio->bi_opf & REQ_PREFLUSH) ||
708 (bio_op(bio) == REQ_OP_DISCARD)) { 708 (bio_op(bio) == REQ_OP_DISCARD)) {
709 bio_list_add(&sync, bio); 709 bio_list_add(&sync, bio);
710 continue; 710 continue;
@@ -1217,7 +1217,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
1217 * If region is not in-sync queue the bio. 1217 * If region is not in-sync queue the bio.
1218 */ 1218 */
1219 if (!r || (r == -EWOULDBLOCK)) { 1219 if (!r || (r == -EWOULDBLOCK)) {
1220 if (bio->bi_rw & REQ_RAHEAD) 1220 if (bio->bi_opf & REQ_RAHEAD)
1221 return -EWOULDBLOCK; 1221 return -EWOULDBLOCK;
1222 1222
1223 queue_bio(ms, bio, rw); 1223 queue_bio(ms, bio, rw);
@@ -1253,7 +1253,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
1253 * We need to dec pending if this was a write. 1253 * We need to dec pending if this was a write.
1254 */ 1254 */
1255 if (rw == WRITE) { 1255 if (rw == WRITE) {
1256 if (!(bio->bi_rw & REQ_PREFLUSH) && 1256 if (!(bio->bi_opf & REQ_PREFLUSH) &&
1257 bio_op(bio) != REQ_OP_DISCARD) 1257 bio_op(bio) != REQ_OP_DISCARD)
1258 dm_rh_dec(ms->rh, bio_record->write_region); 1258 dm_rh_dec(ms->rh, bio_record->write_region);
1259 return error; 1259 return error;
@@ -1262,7 +1262,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
1262 if (error == -EOPNOTSUPP) 1262 if (error == -EOPNOTSUPP)
1263 goto out; 1263 goto out;
1264 1264
1265 if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD)) 1265 if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
1266 goto out; 1266 goto out;
1267 1267
1268 if (unlikely(error)) { 1268 if (unlikely(error)) {
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index b11813431f31..85c32b22a420 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -398,7 +398,7 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
398 region_t region = dm_rh_bio_to_region(rh, bio); 398 region_t region = dm_rh_bio_to_region(rh, bio);
399 int recovering = 0; 399 int recovering = 0;
400 400
401 if (bio->bi_rw & REQ_PREFLUSH) { 401 if (bio->bi_opf & REQ_PREFLUSH) {
402 rh->flush_failure = 1; 402 rh->flush_failure = 1;
403 return; 403 return;
404 } 404 }
@@ -526,7 +526,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
526 struct bio *bio; 526 struct bio *bio;
527 527
528 for (bio = bios->head; bio; bio = bio->bi_next) { 528 for (bio = bios->head; bio; bio = bio->bi_next) {
529 if (bio->bi_rw & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD) 529 if (bio->bi_opf & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD)
530 continue; 530 continue;
531 rh_inc(rh, dm_rh_bio_to_region(rh, bio)); 531 rh_inc(rh, dm_rh_bio_to_region(rh, bio));
532 } 532 }
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index ce2a910709f7..c65feeada864 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1680,7 +1680,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
1680 1680
1681 init_tracked_chunk(bio); 1681 init_tracked_chunk(bio);
1682 1682
1683 if (bio->bi_rw & REQ_PREFLUSH) { 1683 if (bio->bi_opf & REQ_PREFLUSH) {
1684 bio->bi_bdev = s->cow->bdev; 1684 bio->bi_bdev = s->cow->bdev;
1685 return DM_MAPIO_REMAPPED; 1685 return DM_MAPIO_REMAPPED;
1686 } 1686 }
@@ -1800,7 +1800,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
1800 1800
1801 init_tracked_chunk(bio); 1801 init_tracked_chunk(bio);
1802 1802
1803 if (bio->bi_rw & REQ_PREFLUSH) { 1803 if (bio->bi_opf & REQ_PREFLUSH) {
1804 if (!dm_bio_get_target_bio_nr(bio)) 1804 if (!dm_bio_get_target_bio_nr(bio))
1805 bio->bi_bdev = s->origin->bdev; 1805 bio->bi_bdev = s->origin->bdev;
1806 else 1806 else
@@ -2286,7 +2286,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
2286 2286
2287 bio->bi_bdev = o->dev->bdev; 2287 bio->bi_bdev = o->dev->bdev;
2288 2288
2289 if (unlikely(bio->bi_rw & REQ_PREFLUSH)) 2289 if (unlikely(bio->bi_opf & REQ_PREFLUSH))
2290 return DM_MAPIO_REMAPPED; 2290 return DM_MAPIO_REMAPPED;
2291 2291
2292 if (bio_data_dir(bio) != WRITE) 2292 if (bio_data_dir(bio) != WRITE)
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 83f1d4667195..28193a57bf47 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -286,7 +286,7 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
286 uint32_t stripe; 286 uint32_t stripe;
287 unsigned target_bio_nr; 287 unsigned target_bio_nr;
288 288
289 if (bio->bi_rw & REQ_PREFLUSH) { 289 if (bio->bi_opf & REQ_PREFLUSH) {
290 target_bio_nr = dm_bio_get_target_bio_nr(bio); 290 target_bio_nr = dm_bio_get_target_bio_nr(bio);
291 BUG_ON(target_bio_nr >= sc->stripes); 291 BUG_ON(target_bio_nr >= sc->stripes);
292 bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev; 292 bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev;
@@ -383,7 +383,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
383 if (!error) 383 if (!error)
384 return 0; /* I/O complete */ 384 return 0; /* I/O complete */
385 385
386 if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD)) 386 if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
387 return error; 387 return error;
388 388
389 if (error == -EOPNOTSUPP) 389 if (error == -EOPNOTSUPP)
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 197ea2003400..d1c05c12a9db 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -699,7 +699,7 @@ static void remap_to_origin(struct thin_c *tc, struct bio *bio)
699 699
700static int bio_triggers_commit(struct thin_c *tc, struct bio *bio) 700static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
701{ 701{
702 return (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) && 702 return (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) &&
703 dm_thin_changed_this_transaction(tc->td); 703 dm_thin_changed_this_transaction(tc->td);
704} 704}
705 705
@@ -870,7 +870,7 @@ static void __inc_remap_and_issue_cell(void *context,
870 struct bio *bio; 870 struct bio *bio;
871 871
872 while ((bio = bio_list_pop(&cell->bios))) { 872 while ((bio = bio_list_pop(&cell->bios))) {
873 if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) || 873 if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) ||
874 bio_op(bio) == REQ_OP_DISCARD) 874 bio_op(bio) == REQ_OP_DISCARD)
875 bio_list_add(&info->defer_bios, bio); 875 bio_list_add(&info->defer_bios, bio);
876 else { 876 else {
@@ -1717,7 +1717,7 @@ static void __remap_and_issue_shared_cell(void *context,
1717 1717
1718 while ((bio = bio_list_pop(&cell->bios))) { 1718 while ((bio = bio_list_pop(&cell->bios))) {
1719 if ((bio_data_dir(bio) == WRITE) || 1719 if ((bio_data_dir(bio) == WRITE) ||
1720 (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) || 1720 (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) ||
1721 bio_op(bio) == REQ_OP_DISCARD)) 1721 bio_op(bio) == REQ_OP_DISCARD))
1722 bio_list_add(&info->defer_bios, bio); 1722 bio_list_add(&info->defer_bios, bio);
1723 else { 1723 else {
@@ -2635,7 +2635,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
2635 return DM_MAPIO_SUBMITTED; 2635 return DM_MAPIO_SUBMITTED;
2636 } 2636 }
2637 2637
2638 if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) || 2638 if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) ||
2639 bio_op(bio) == REQ_OP_DISCARD) { 2639 bio_op(bio) == REQ_OP_DISCARD) {
2640 thin_defer_bio_with_throttle(tc, bio); 2640 thin_defer_bio_with_throttle(tc, bio);
2641 return DM_MAPIO_SUBMITTED; 2641 return DM_MAPIO_SUBMITTED;
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index 618b8752dcf1..b616f11d8473 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -37,7 +37,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio)
37{ 37{
38 switch (bio_op(bio)) { 38 switch (bio_op(bio)) {
39 case REQ_OP_READ: 39 case REQ_OP_READ:
40 if (bio->bi_rw & REQ_RAHEAD) { 40 if (bio->bi_opf & REQ_RAHEAD) {
41 /* readahead of null bytes only wastes buffer cache */ 41 /* readahead of null bytes only wastes buffer cache */
42 return -EIO; 42 return -EIO;
43 } 43 }
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index dfa09e14e847..fa9b1cb4438a 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -798,12 +798,12 @@ static void dec_pending(struct dm_io *io, int error)
798 if (io_error == DM_ENDIO_REQUEUE) 798 if (io_error == DM_ENDIO_REQUEUE)
799 return; 799 return;
800 800
801 if ((bio->bi_rw & REQ_PREFLUSH) && bio->bi_iter.bi_size) { 801 if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
802 /* 802 /*
803 * Preflush done for flush with data, reissue 803 * Preflush done for flush with data, reissue
804 * without REQ_PREFLUSH. 804 * without REQ_PREFLUSH.
805 */ 805 */
806 bio->bi_rw &= ~REQ_PREFLUSH; 806 bio->bi_opf &= ~REQ_PREFLUSH;
807 queue_io(md, bio); 807 queue_io(md, bio);
808 } else { 808 } else {
809 /* done with normal IO or empty flush */ 809 /* done with normal IO or empty flush */
@@ -964,7 +964,7 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
964{ 964{
965 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 965 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
966 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 966 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
967 BUG_ON(bio->bi_rw & REQ_PREFLUSH); 967 BUG_ON(bio->bi_opf & REQ_PREFLUSH);
968 BUG_ON(bi_size > *tio->len_ptr); 968 BUG_ON(bi_size > *tio->len_ptr);
969 BUG_ON(n_sectors > bi_size); 969 BUG_ON(n_sectors > bi_size);
970 *tio->len_ptr -= bi_size - n_sectors; 970 *tio->len_ptr -= bi_size - n_sectors;
@@ -1252,7 +1252,7 @@ static void __split_and_process_bio(struct mapped_device *md,
1252 1252
1253 start_io_acct(ci.io); 1253 start_io_acct(ci.io);
1254 1254
1255 if (bio->bi_rw & REQ_PREFLUSH) { 1255 if (bio->bi_opf & REQ_PREFLUSH) {
1256 ci.bio = &ci.md->flush_bio; 1256 ci.bio = &ci.md->flush_bio;
1257 ci.sector_count = 0; 1257 ci.sector_count = 0;
1258 error = __send_empty_flush(&ci); 1258 error = __send_empty_flush(&ci);
@@ -1290,7 +1290,7 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
1290 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 1290 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
1291 dm_put_live_table(md, srcu_idx); 1291 dm_put_live_table(md, srcu_idx);
1292 1292
1293 if (!(bio->bi_rw & REQ_RAHEAD)) 1293 if (!(bio->bi_opf & REQ_RAHEAD))
1294 queue_io(md, bio); 1294 queue_io(md, bio);
1295 else 1295 else
1296 bio_io_error(bio); 1296 bio_io_error(bio);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 70ff888d25d0..86f5d435901d 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -221,7 +221,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
221 struct bio *split; 221 struct bio *split;
222 sector_t start_sector, end_sector, data_offset; 222 sector_t start_sector, end_sector, data_offset;
223 223
224 if (unlikely(bio->bi_rw & REQ_PREFLUSH)) { 224 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
225 md_flush_request(mddev, bio); 225 md_flush_request(mddev, bio);
226 return; 226 return;
227 } 227 }
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 2c3ab6f5e6be..d646f6e444f0 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -285,7 +285,7 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
285 */ 285 */
286 sectors = bio_sectors(bio); 286 sectors = bio_sectors(bio);
287 /* bio could be mergeable after passing to underlayer */ 287 /* bio could be mergeable after passing to underlayer */
288 bio->bi_rw &= ~REQ_NOMERGE; 288 bio->bi_opf &= ~REQ_NOMERGE;
289 mddev->pers->make_request(mddev, bio); 289 mddev->pers->make_request(mddev, bio);
290 290
291 cpu = part_stat_lock(); 291 cpu = part_stat_lock();
@@ -414,7 +414,7 @@ static void md_submit_flush_data(struct work_struct *ws)
414 /* an empty barrier - all done */ 414 /* an empty barrier - all done */
415 bio_endio(bio); 415 bio_endio(bio);
416 else { 416 else {
417 bio->bi_rw &= ~REQ_PREFLUSH; 417 bio->bi_opf &= ~REQ_PREFLUSH;
418 mddev->pers->make_request(mddev, bio); 418 mddev->pers->make_request(mddev, bio);
419 } 419 }
420 420
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 4974682842ae..673efbd6fc47 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -91,7 +91,7 @@ static void multipath_end_request(struct bio *bio)
91 91
92 if (!bio->bi_error) 92 if (!bio->bi_error)
93 multipath_end_bh_io(mp_bh, 0); 93 multipath_end_bh_io(mp_bh, 0);
94 else if (!(bio->bi_rw & REQ_RAHEAD)) { 94 else if (!(bio->bi_opf & REQ_RAHEAD)) {
95 /* 95 /*
96 * oops, IO error: 96 * oops, IO error:
97 */ 97 */
@@ -112,7 +112,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
112 struct multipath_bh * mp_bh; 112 struct multipath_bh * mp_bh;
113 struct multipath_info *multipath; 113 struct multipath_info *multipath;
114 114
115 if (unlikely(bio->bi_rw & REQ_PREFLUSH)) { 115 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
116 md_flush_request(mddev, bio); 116 md_flush_request(mddev, bio);
117 return; 117 return;
118 } 118 }
@@ -135,7 +135,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
135 135
136 mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset; 136 mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
137 mp_bh->bio.bi_bdev = multipath->rdev->bdev; 137 mp_bh->bio.bi_bdev = multipath->rdev->bdev;
138 mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT; 138 mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT;
139 mp_bh->bio.bi_end_io = multipath_end_request; 139 mp_bh->bio.bi_end_io = multipath_end_request;
140 mp_bh->bio.bi_private = mp_bh; 140 mp_bh->bio.bi_private = mp_bh;
141 generic_make_request(&mp_bh->bio); 141 generic_make_request(&mp_bh->bio);
@@ -360,7 +360,7 @@ static void multipathd(struct md_thread *thread)
360 bio->bi_iter.bi_sector += 360 bio->bi_iter.bi_sector +=
361 conf->multipaths[mp_bh->path].rdev->data_offset; 361 conf->multipaths[mp_bh->path].rdev->data_offset;
362 bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; 362 bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
363 bio->bi_rw |= REQ_FAILFAST_TRANSPORT; 363 bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
364 bio->bi_end_io = multipath_end_request; 364 bio->bi_end_io = multipath_end_request;
365 bio->bi_private = mp_bh; 365 bio->bi_private = mp_bh;
366 generic_make_request(bio); 366 generic_make_request(bio);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index c3d439083212..258986a2699d 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -458,7 +458,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
458 struct md_rdev *tmp_dev; 458 struct md_rdev *tmp_dev;
459 struct bio *split; 459 struct bio *split;
460 460
461 if (unlikely(bio->bi_rw & REQ_PREFLUSH)) { 461 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
462 md_flush_request(mddev, bio); 462 md_flush_request(mddev, bio);
463 return; 463 return;
464 } 464 }
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 46168ef2e279..21dc00eb1989 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1043,8 +1043,8 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
1043 unsigned long flags; 1043 unsigned long flags;
1044 const int op = bio_op(bio); 1044 const int op = bio_op(bio);
1045 const int rw = bio_data_dir(bio); 1045 const int rw = bio_data_dir(bio);
1046 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); 1046 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1047 const unsigned long do_flush_fua = (bio->bi_rw & 1047 const unsigned long do_flush_fua = (bio->bi_opf &
1048 (REQ_PREFLUSH | REQ_FUA)); 1048 (REQ_PREFLUSH | REQ_FUA));
1049 struct md_rdev *blocked_rdev; 1049 struct md_rdev *blocked_rdev;
1050 struct blk_plug_cb *cb; 1050 struct blk_plug_cb *cb;
@@ -2318,7 +2318,7 @@ read_more:
2318 raid_end_bio_io(r1_bio); 2318 raid_end_bio_io(r1_bio);
2319 } else { 2319 } else {
2320 const unsigned long do_sync 2320 const unsigned long do_sync
2321 = r1_bio->master_bio->bi_rw & REQ_SYNC; 2321 = r1_bio->master_bio->bi_opf & REQ_SYNC;
2322 if (bio) { 2322 if (bio) {
2323 r1_bio->bios[r1_bio->read_disk] = 2323 r1_bio->bios[r1_bio->read_disk] =
2324 mddev->ro ? IO_BLOCKED : NULL; 2324 mddev->ro ? IO_BLOCKED : NULL;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index ed29fc899f06..0e4efcd10795 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1054,8 +1054,8 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
1054 int i; 1054 int i;
1055 const int op = bio_op(bio); 1055 const int op = bio_op(bio);
1056 const int rw = bio_data_dir(bio); 1056 const int rw = bio_data_dir(bio);
1057 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); 1057 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1058 const unsigned long do_fua = (bio->bi_rw & REQ_FUA); 1058 const unsigned long do_fua = (bio->bi_opf & REQ_FUA);
1059 unsigned long flags; 1059 unsigned long flags;
1060 struct md_rdev *blocked_rdev; 1060 struct md_rdev *blocked_rdev;
1061 struct blk_plug_cb *cb; 1061 struct blk_plug_cb *cb;
@@ -1440,7 +1440,7 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
1440 1440
1441 struct bio *split; 1441 struct bio *split;
1442 1442
1443 if (unlikely(bio->bi_rw & REQ_PREFLUSH)) { 1443 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1444 md_flush_request(mddev, bio); 1444 md_flush_request(mddev, bio);
1445 return; 1445 return;
1446 } 1446 }
@@ -2533,7 +2533,7 @@ read_more:
2533 return; 2533 return;
2534 } 2534 }
2535 2535
2536 do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC); 2536 do_sync = (r10_bio->master_bio->bi_opf & REQ_SYNC);
2537 slot = r10_bio->read_slot; 2537 slot = r10_bio->read_slot;
2538 printk_ratelimited( 2538 printk_ratelimited(
2539 KERN_ERR 2539 KERN_ERR
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 5504ce2bac06..51f76ddbe265 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -536,7 +536,7 @@ int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
536 bio_endio(bio); 536 bio_endio(bio);
537 return 0; 537 return 0;
538 } 538 }
539 bio->bi_rw &= ~REQ_PREFLUSH; 539 bio->bi_opf &= ~REQ_PREFLUSH;
540 return -EAGAIN; 540 return -EAGAIN;
541} 541}
542 542
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index d189e894b921..8912407a4dd0 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -806,7 +806,7 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
806 dd_idx = 0; 806 dd_idx = 0;
807 while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) 807 while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
808 dd_idx++; 808 dd_idx++;
809 if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw || 809 if (head->dev[dd_idx].towrite->bi_opf != sh->dev[dd_idx].towrite->bi_opf ||
810 bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite)) 810 bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite))
811 goto unlock_out; 811 goto unlock_out;
812 812
@@ -1003,7 +1003,7 @@ again:
1003 1003
1004 pr_debug("%s: for %llu schedule op %d on disc %d\n", 1004 pr_debug("%s: for %llu schedule op %d on disc %d\n",
1005 __func__, (unsigned long long)sh->sector, 1005 __func__, (unsigned long long)sh->sector,
1006 bi->bi_rw, i); 1006 bi->bi_opf, i);
1007 atomic_inc(&sh->count); 1007 atomic_inc(&sh->count);
1008 if (sh != head_sh) 1008 if (sh != head_sh)
1009 atomic_inc(&head_sh->count); 1009 atomic_inc(&head_sh->count);
@@ -1014,7 +1014,7 @@ again:
1014 bi->bi_iter.bi_sector = (sh->sector 1014 bi->bi_iter.bi_sector = (sh->sector
1015 + rdev->data_offset); 1015 + rdev->data_offset);
1016 if (test_bit(R5_ReadNoMerge, &head_sh->dev[i].flags)) 1016 if (test_bit(R5_ReadNoMerge, &head_sh->dev[i].flags))
1017 bi->bi_rw |= REQ_NOMERGE; 1017 bi->bi_opf |= REQ_NOMERGE;
1018 1018
1019 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) 1019 if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
1020 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); 1020 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
@@ -1055,7 +1055,7 @@ again:
1055 pr_debug("%s: for %llu schedule op %d on " 1055 pr_debug("%s: for %llu schedule op %d on "
1056 "replacement disc %d\n", 1056 "replacement disc %d\n",
1057 __func__, (unsigned long long)sh->sector, 1057 __func__, (unsigned long long)sh->sector,
1058 rbi->bi_rw, i); 1058 rbi->bi_opf, i);
1059 atomic_inc(&sh->count); 1059 atomic_inc(&sh->count);
1060 if (sh != head_sh) 1060 if (sh != head_sh)
1061 atomic_inc(&head_sh->count); 1061 atomic_inc(&head_sh->count);
@@ -1088,7 +1088,7 @@ again:
1088 if (op_is_write(op)) 1088 if (op_is_write(op))
1089 set_bit(STRIPE_DEGRADED, &sh->state); 1089 set_bit(STRIPE_DEGRADED, &sh->state);
1090 pr_debug("skip op %d on disc %d for sector %llu\n", 1090 pr_debug("skip op %d on disc %d for sector %llu\n",
1091 bi->bi_rw, i, (unsigned long long)sh->sector); 1091 bi->bi_opf, i, (unsigned long long)sh->sector);
1092 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1092 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1093 set_bit(STRIPE_HANDLE, &sh->state); 1093 set_bit(STRIPE_HANDLE, &sh->state);
1094 } 1094 }
@@ -1619,9 +1619,9 @@ again:
1619 1619
1620 while (wbi && wbi->bi_iter.bi_sector < 1620 while (wbi && wbi->bi_iter.bi_sector <
1621 dev->sector + STRIPE_SECTORS) { 1621 dev->sector + STRIPE_SECTORS) {
1622 if (wbi->bi_rw & REQ_FUA) 1622 if (wbi->bi_opf & REQ_FUA)
1623 set_bit(R5_WantFUA, &dev->flags); 1623 set_bit(R5_WantFUA, &dev->flags);
1624 if (wbi->bi_rw & REQ_SYNC) 1624 if (wbi->bi_opf & REQ_SYNC)
1625 set_bit(R5_SyncIO, &dev->flags); 1625 set_bit(R5_SyncIO, &dev->flags);
1626 if (bio_op(wbi) == REQ_OP_DISCARD) 1626 if (bio_op(wbi) == REQ_OP_DISCARD)
1627 set_bit(R5_Discard, &dev->flags); 1627 set_bit(R5_Discard, &dev->flags);
@@ -5154,7 +5154,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
5154 DEFINE_WAIT(w); 5154 DEFINE_WAIT(w);
5155 bool do_prepare; 5155 bool do_prepare;
5156 5156
5157 if (unlikely(bi->bi_rw & REQ_PREFLUSH)) { 5157 if (unlikely(bi->bi_opf & REQ_PREFLUSH)) {
5158 int ret = r5l_handle_flush_request(conf->log, bi); 5158 int ret = r5l_handle_flush_request(conf->log, bi);
5159 5159
5160 if (ret == 0) 5160 if (ret == 0)
@@ -5237,7 +5237,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
5237 (unsigned long long)logical_sector); 5237 (unsigned long long)logical_sector);
5238 5238
5239 sh = raid5_get_active_stripe(conf, new_sector, previous, 5239 sh = raid5_get_active_stripe(conf, new_sector, previous,
5240 (bi->bi_rw & REQ_RAHEAD), 0); 5240 (bi->bi_opf & REQ_RAHEAD), 0);
5241 if (sh) { 5241 if (sh) {
5242 if (unlikely(previous)) { 5242 if (unlikely(previous)) {
5243 /* expansion might have moved on while waiting for a 5243 /* expansion might have moved on while waiting for a
@@ -5305,7 +5305,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
5305 set_bit(STRIPE_HANDLE, &sh->state); 5305 set_bit(STRIPE_HANDLE, &sh->state);
5306 clear_bit(STRIPE_DELAYED, &sh->state); 5306 clear_bit(STRIPE_DELAYED, &sh->state);
5307 if ((!sh->batch_head || sh == sh->batch_head) && 5307 if ((!sh->batch_head || sh == sh->batch_head) &&
5308 (bi->bi_rw & REQ_SYNC) && 5308 (bi->bi_opf & REQ_SYNC) &&
5309 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 5309 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
5310 atomic_inc(&conf->preread_active_stripes); 5310 atomic_inc(&conf->preread_active_stripes);
5311 release_stripe_plug(mddev, sh); 5311 release_stripe_plug(mddev, sh);
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 7cf3bdfaf809..88e91666f145 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1133,11 +1133,11 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1133 1133
1134static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip, 1134static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
1135 struct page *page, unsigned int len, unsigned int off, 1135 struct page *page, unsigned int len, unsigned int off,
1136 int op, sector_t sector) 1136 bool is_write, sector_t sector)
1137{ 1137{
1138 int ret; 1138 int ret;
1139 1139
1140 if (!op_is_write(op)) { 1140 if (!is_write) {
1141 ret = btt_read_pg(btt, bip, page, off, sector, len); 1141 ret = btt_read_pg(btt, bip, page, off, sector, len);
1142 flush_dcache_page(page); 1142 flush_dcache_page(page);
1143 } else { 1143 } else {
@@ -1180,7 +1180,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
1180 BUG_ON(len % btt->sector_size); 1180 BUG_ON(len % btt->sector_size);
1181 1181
1182 err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset, 1182 err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
1183 bio_op(bio), iter.bi_sector); 1183 op_is_write(bio_op(bio)), iter.bi_sector);
1184 if (err) { 1184 if (err) {
1185 dev_info(&btt->nd_btt->dev, 1185 dev_info(&btt->nd_btt->dev,
1186 "io error in %s sector %lld, len %d,\n", 1186 "io error in %s sector %lld, len %d,\n",
@@ -1200,12 +1200,12 @@ out:
1200} 1200}
1201 1201
1202static int btt_rw_page(struct block_device *bdev, sector_t sector, 1202static int btt_rw_page(struct block_device *bdev, sector_t sector,
1203 struct page *page, int op) 1203 struct page *page, bool is_write)
1204{ 1204{
1205 struct btt *btt = bdev->bd_disk->private_data; 1205 struct btt *btt = bdev->bd_disk->private_data;
1206 1206
1207 btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, op, sector); 1207 btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, is_write, sector);
1208 page_endio(page, op, 0); 1208 page_endio(page, is_write, 0);
1209 return 0; 1209 return 0;
1210} 1210}
1211 1211
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index d64d92481c1d..571a6c7ee2fc 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -67,7 +67,7 @@ static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
67} 67}
68 68
69static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, 69static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
70 unsigned int len, unsigned int off, int op, 70 unsigned int len, unsigned int off, bool is_write,
71 sector_t sector) 71 sector_t sector)
72{ 72{
73 int rc = 0; 73 int rc = 0;
@@ -79,7 +79,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
79 if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) 79 if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
80 bad_pmem = true; 80 bad_pmem = true;
81 81
82 if (!op_is_write(op)) { 82 if (!is_write) {
83 if (unlikely(bad_pmem)) 83 if (unlikely(bad_pmem))
84 rc = -EIO; 84 rc = -EIO;
85 else { 85 else {
@@ -128,13 +128,13 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
128 struct pmem_device *pmem = q->queuedata; 128 struct pmem_device *pmem = q->queuedata;
129 struct nd_region *nd_region = to_region(pmem); 129 struct nd_region *nd_region = to_region(pmem);
130 130
131 if (bio->bi_rw & REQ_FLUSH) 131 if (bio->bi_opf & REQ_FLUSH)
132 nvdimm_flush(nd_region); 132 nvdimm_flush(nd_region);
133 133
134 do_acct = nd_iostat_start(bio, &start); 134 do_acct = nd_iostat_start(bio, &start);
135 bio_for_each_segment(bvec, bio, iter) { 135 bio_for_each_segment(bvec, bio, iter) {
136 rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, 136 rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
137 bvec.bv_offset, bio_op(bio), 137 bvec.bv_offset, op_is_write(bio_op(bio)),
138 iter.bi_sector); 138 iter.bi_sector);
139 if (rc) { 139 if (rc) {
140 bio->bi_error = rc; 140 bio->bi_error = rc;
@@ -144,7 +144,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
144 if (do_acct) 144 if (do_acct)
145 nd_iostat_end(bio, start); 145 nd_iostat_end(bio, start);
146 146
147 if (bio->bi_rw & REQ_FUA) 147 if (bio->bi_opf & REQ_FUA)
148 nvdimm_flush(nd_region); 148 nvdimm_flush(nd_region);
149 149
150 bio_endio(bio); 150 bio_endio(bio);
@@ -152,12 +152,12 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
152} 152}
153 153
154static int pmem_rw_page(struct block_device *bdev, sector_t sector, 154static int pmem_rw_page(struct block_device *bdev, sector_t sector,
155 struct page *page, int op) 155 struct page *page, bool is_write)
156{ 156{
157 struct pmem_device *pmem = bdev->bd_queue->queuedata; 157 struct pmem_device *pmem = bdev->bd_queue->queuedata;
158 int rc; 158 int rc;
159 159
160 rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, op, sector); 160 rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, is_write, sector);
161 161
162 /* 162 /*
163 * The ->rw_page interface is subtle and tricky. The core 163 * The ->rw_page interface is subtle and tricky. The core
@@ -166,7 +166,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
166 * caused by double completion. 166 * caused by double completion.
167 */ 167 */
168 if (rc == 0) 168 if (rc == 0)
169 page_endio(page, op, 0); 169 page_endio(page, is_write, 0);
170 170
171 return rc; 171 return rc;
172} 172}
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 47cf6c977367..372d744315f3 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -388,7 +388,7 @@ iblock_execute_sync_cache(struct se_cmd *cmd)
388 bio = bio_alloc(GFP_KERNEL, 0); 388 bio = bio_alloc(GFP_KERNEL, 0);
389 bio->bi_end_io = iblock_end_io_flush; 389 bio->bi_end_io = iblock_end_io_flush;
390 bio->bi_bdev = ib_dev->ibd_bd; 390 bio->bi_bdev = ib_dev->ibd_bd;
391 bio->bi_rw = WRITE_FLUSH; 391 bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
392 if (!immed) 392 if (!immed)
393 bio->bi_private = cmd; 393 bio->bi_private = cmd;
394 submit_bio(bio); 394 submit_bio(bio);