diff options
51 files changed, 158 insertions, 157 deletions
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt index 026d13362aca..bcdb2b4c1f12 100644 --- a/Documentation/block/biodoc.txt +++ b/Documentation/block/biodoc.txt | |||
| @@ -269,7 +269,7 @@ Arjan's proposed request priority scheme allows higher levels some broad | |||
| 269 | requests which haven't aged too much on the queue. Potentially this priority | 269 | requests which haven't aged too much on the queue. Potentially this priority |
| 270 | could even be exposed to applications in some manner, providing higher level | 270 | could even be exposed to applications in some manner, providing higher level |
| 271 | tunability. Time based aging avoids starvation of lower priority | 271 | tunability. Time based aging avoids starvation of lower priority |
| 272 | requests. Some bits in the bi_rw flags field in the bio structure are | 272 | requests. Some bits in the bi_opf flags field in the bio structure are |
| 273 | intended to be used for this priority information. | 273 | intended to be used for this priority information. |
| 274 | 274 | ||
| 275 | 275 | ||
| @@ -432,7 +432,7 @@ struct bio { | |||
| 432 | struct bio *bi_next; /* request queue link */ | 432 | struct bio *bi_next; /* request queue link */ |
| 433 | struct block_device *bi_bdev; /* target device */ | 433 | struct block_device *bi_bdev; /* target device */ |
| 434 | unsigned long bi_flags; /* status, command, etc */ | 434 | unsigned long bi_flags; /* status, command, etc */ |
| 435 | unsigned long bi_rw; /* low bits: r/w, high: priority */ | 435 | unsigned long bi_opf; /* low bits: r/w, high: priority */ |
| 436 | 436 | ||
| 437 | unsigned int bi_vcnt; /* how may bio_vec's */ | 437 | unsigned int bi_vcnt; /* how may bio_vec's */ |
| 438 | struct bvec_iter bi_iter; /* current index into bio_vec array */ | 438 | struct bvec_iter bi_iter; /* current index into bio_vec array */ |
diff --git a/Documentation/device-mapper/dm-flakey.txt b/Documentation/device-mapper/dm-flakey.txt index 6ff5c2327227..c43030718cef 100644 --- a/Documentation/device-mapper/dm-flakey.txt +++ b/Documentation/device-mapper/dm-flakey.txt | |||
| @@ -42,7 +42,7 @@ Optional feature parameters: | |||
| 42 | <direction>: Either 'r' to corrupt reads or 'w' to corrupt writes. | 42 | <direction>: Either 'r' to corrupt reads or 'w' to corrupt writes. |
| 43 | 'w' is incompatible with drop_writes. | 43 | 'w' is incompatible with drop_writes. |
| 44 | <value>: The value (from 0-255) to write. | 44 | <value>: The value (from 0-255) to write. |
| 45 | <flags>: Perform the replacement only if bio->bi_rw has all the | 45 | <flags>: Perform the replacement only if bio->bi_opf has all the |
| 46 | selected flags set. | 46 | selected flags set. |
| 47 | 47 | ||
| 48 | Examples: | 48 | Examples: |
diff --git a/block/bio-integrity.c b/block/bio-integrity.c index f70cc3bdfd01..63f72f00c72e 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c | |||
| @@ -86,7 +86,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, | |||
| 86 | 86 | ||
| 87 | bip->bip_bio = bio; | 87 | bip->bip_bio = bio; |
| 88 | bio->bi_integrity = bip; | 88 | bio->bi_integrity = bip; |
| 89 | bio->bi_rw |= REQ_INTEGRITY; | 89 | bio->bi_opf |= REQ_INTEGRITY; |
| 90 | 90 | ||
| 91 | return bip; | 91 | return bip; |
| 92 | err: | 92 | err: |
diff --git a/block/bio.c b/block/bio.c index 3f76a38a5e2d..f39477538fef 100644 --- a/block/bio.c +++ b/block/bio.c | |||
| @@ -580,7 +580,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src) | |||
| 580 | */ | 580 | */ |
| 581 | bio->bi_bdev = bio_src->bi_bdev; | 581 | bio->bi_bdev = bio_src->bi_bdev; |
| 582 | bio_set_flag(bio, BIO_CLONED); | 582 | bio_set_flag(bio, BIO_CLONED); |
| 583 | bio->bi_rw = bio_src->bi_rw; | 583 | bio->bi_opf = bio_src->bi_opf; |
| 584 | bio->bi_iter = bio_src->bi_iter; | 584 | bio->bi_iter = bio_src->bi_iter; |
| 585 | bio->bi_io_vec = bio_src->bi_io_vec; | 585 | bio->bi_io_vec = bio_src->bi_io_vec; |
| 586 | 586 | ||
| @@ -663,7 +663,7 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, | |||
| 663 | if (!bio) | 663 | if (!bio) |
| 664 | return NULL; | 664 | return NULL; |
| 665 | bio->bi_bdev = bio_src->bi_bdev; | 665 | bio->bi_bdev = bio_src->bi_bdev; |
| 666 | bio->bi_rw = bio_src->bi_rw; | 666 | bio->bi_opf = bio_src->bi_opf; |
| 667 | bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; | 667 | bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; |
| 668 | bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; | 668 | bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; |
| 669 | 669 | ||
| @@ -873,7 +873,7 @@ int submit_bio_wait(struct bio *bio) | |||
| 873 | init_completion(&ret.event); | 873 | init_completion(&ret.event); |
| 874 | bio->bi_private = &ret; | 874 | bio->bi_private = &ret; |
| 875 | bio->bi_end_io = submit_bio_wait_endio; | 875 | bio->bi_end_io = submit_bio_wait_endio; |
| 876 | bio->bi_rw |= REQ_SYNC; | 876 | bio->bi_opf |= REQ_SYNC; |
| 877 | submit_bio(bio); | 877 | submit_bio(bio); |
| 878 | wait_for_completion_io(&ret.event); | 878 | wait_for_completion_io(&ret.event); |
| 879 | 879 | ||
diff --git a/block/blk-core.c b/block/blk-core.c index a687e9cc16c2..999442ec4601 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
| @@ -1029,7 +1029,7 @@ static bool blk_rq_should_init_elevator(struct bio *bio) | |||
| 1029 | * Flush requests do not use the elevator so skip initialization. | 1029 | * Flush requests do not use the elevator so skip initialization. |
| 1030 | * This allows a request to share the flush and elevator data. | 1030 | * This allows a request to share the flush and elevator data. |
| 1031 | */ | 1031 | */ |
| 1032 | if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) | 1032 | if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) |
| 1033 | return false; | 1033 | return false; |
| 1034 | 1034 | ||
| 1035 | return true; | 1035 | return true; |
| @@ -1504,7 +1504,7 @@ EXPORT_SYMBOL_GPL(blk_add_request_payload); | |||
| 1504 | bool bio_attempt_back_merge(struct request_queue *q, struct request *req, | 1504 | bool bio_attempt_back_merge(struct request_queue *q, struct request *req, |
| 1505 | struct bio *bio) | 1505 | struct bio *bio) |
| 1506 | { | 1506 | { |
| 1507 | const int ff = bio->bi_rw & REQ_FAILFAST_MASK; | 1507 | const int ff = bio->bi_opf & REQ_FAILFAST_MASK; |
| 1508 | 1508 | ||
| 1509 | if (!ll_back_merge_fn(q, req, bio)) | 1509 | if (!ll_back_merge_fn(q, req, bio)) |
| 1510 | return false; | 1510 | return false; |
| @@ -1526,7 +1526,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req, | |||
| 1526 | bool bio_attempt_front_merge(struct request_queue *q, struct request *req, | 1526 | bool bio_attempt_front_merge(struct request_queue *q, struct request *req, |
| 1527 | struct bio *bio) | 1527 | struct bio *bio) |
| 1528 | { | 1528 | { |
| 1529 | const int ff = bio->bi_rw & REQ_FAILFAST_MASK; | 1529 | const int ff = bio->bi_opf & REQ_FAILFAST_MASK; |
| 1530 | 1530 | ||
| 1531 | if (!ll_front_merge_fn(q, req, bio)) | 1531 | if (!ll_front_merge_fn(q, req, bio)) |
| 1532 | return false; | 1532 | return false; |
| @@ -1648,8 +1648,8 @@ void init_request_from_bio(struct request *req, struct bio *bio) | |||
| 1648 | { | 1648 | { |
| 1649 | req->cmd_type = REQ_TYPE_FS; | 1649 | req->cmd_type = REQ_TYPE_FS; |
| 1650 | 1650 | ||
| 1651 | req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK; | 1651 | req->cmd_flags |= bio->bi_opf & REQ_COMMON_MASK; |
| 1652 | if (bio->bi_rw & REQ_RAHEAD) | 1652 | if (bio->bi_opf & REQ_RAHEAD) |
| 1653 | req->cmd_flags |= REQ_FAILFAST_MASK; | 1653 | req->cmd_flags |= REQ_FAILFAST_MASK; |
| 1654 | 1654 | ||
| 1655 | req->errors = 0; | 1655 | req->errors = 0; |
| @@ -1660,7 +1660,7 @@ void init_request_from_bio(struct request *req, struct bio *bio) | |||
| 1660 | 1660 | ||
| 1661 | static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) | 1661 | static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) |
| 1662 | { | 1662 | { |
| 1663 | const bool sync = !!(bio->bi_rw & REQ_SYNC); | 1663 | const bool sync = !!(bio->bi_opf & REQ_SYNC); |
| 1664 | struct blk_plug *plug; | 1664 | struct blk_plug *plug; |
| 1665 | int el_ret, rw_flags = 0, where = ELEVATOR_INSERT_SORT; | 1665 | int el_ret, rw_flags = 0, where = ELEVATOR_INSERT_SORT; |
| 1666 | struct request *req; | 1666 | struct request *req; |
| @@ -1681,7 +1681,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) | |||
| 1681 | return BLK_QC_T_NONE; | 1681 | return BLK_QC_T_NONE; |
| 1682 | } | 1682 | } |
| 1683 | 1683 | ||
| 1684 | if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) { | 1684 | if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) { |
| 1685 | spin_lock_irq(q->queue_lock); | 1685 | spin_lock_irq(q->queue_lock); |
| 1686 | where = ELEVATOR_INSERT_FLUSH; | 1686 | where = ELEVATOR_INSERT_FLUSH; |
| 1687 | goto get_rq; | 1687 | goto get_rq; |
| @@ -1728,7 +1728,7 @@ get_rq: | |||
| 1728 | /* | 1728 | /* |
| 1729 | * Add in META/PRIO flags, if set, before we get to the IO scheduler | 1729 | * Add in META/PRIO flags, if set, before we get to the IO scheduler |
| 1730 | */ | 1730 | */ |
| 1731 | rw_flags |= (bio->bi_rw & (REQ_META | REQ_PRIO)); | 1731 | rw_flags |= (bio->bi_opf & (REQ_META | REQ_PRIO)); |
| 1732 | 1732 | ||
| 1733 | /* | 1733 | /* |
| 1734 | * Grab a free request. This is might sleep but can not fail. | 1734 | * Grab a free request. This is might sleep but can not fail. |
| @@ -1805,7 +1805,7 @@ static void handle_bad_sector(struct bio *bio) | |||
| 1805 | printk(KERN_INFO "attempt to access beyond end of device\n"); | 1805 | printk(KERN_INFO "attempt to access beyond end of device\n"); |
| 1806 | printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n", | 1806 | printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n", |
| 1807 | bdevname(bio->bi_bdev, b), | 1807 | bdevname(bio->bi_bdev, b), |
| 1808 | bio->bi_rw, | 1808 | bio->bi_opf, |
| 1809 | (unsigned long long)bio_end_sector(bio), | 1809 | (unsigned long long)bio_end_sector(bio), |
| 1810 | (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9)); | 1810 | (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9)); |
| 1811 | } | 1811 | } |
| @@ -1918,9 +1918,9 @@ generic_make_request_checks(struct bio *bio) | |||
| 1918 | * drivers without flush support don't have to worry | 1918 | * drivers without flush support don't have to worry |
| 1919 | * about them. | 1919 | * about them. |
| 1920 | */ | 1920 | */ |
| 1921 | if ((bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) && | 1921 | if ((bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) && |
| 1922 | !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { | 1922 | !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { |
| 1923 | bio->bi_rw &= ~(REQ_PREFLUSH | REQ_FUA); | 1923 | bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); |
| 1924 | if (!nr_sectors) { | 1924 | if (!nr_sectors) { |
| 1925 | err = 0; | 1925 | err = 0; |
| 1926 | goto end_io; | 1926 | goto end_io; |
| @@ -2219,7 +2219,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq) | |||
| 2219 | * one. | 2219 | * one. |
| 2220 | */ | 2220 | */ |
| 2221 | for (bio = rq->bio; bio; bio = bio->bi_next) { | 2221 | for (bio = rq->bio; bio; bio = bio->bi_next) { |
| 2222 | if ((bio->bi_rw & ff) != ff) | 2222 | if ((bio->bi_opf & ff) != ff) |
| 2223 | break; | 2223 | break; |
| 2224 | bytes += bio->bi_iter.bi_size; | 2224 | bytes += bio->bi_iter.bi_size; |
| 2225 | } | 2225 | } |
| @@ -2630,7 +2630,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) | |||
| 2630 | /* mixed attributes always follow the first bio */ | 2630 | /* mixed attributes always follow the first bio */ |
| 2631 | if (req->cmd_flags & REQ_MIXED_MERGE) { | 2631 | if (req->cmd_flags & REQ_MIXED_MERGE) { |
| 2632 | req->cmd_flags &= ~REQ_FAILFAST_MASK; | 2632 | req->cmd_flags &= ~REQ_FAILFAST_MASK; |
| 2633 | req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK; | 2633 | req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK; |
| 2634 | } | 2634 | } |
| 2635 | 2635 | ||
| 2636 | /* | 2636 | /* |
diff --git a/block/blk-merge.c b/block/blk-merge.c index 41cbd4878958..3eec75a9e91d 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
| @@ -186,7 +186,7 @@ void blk_queue_split(struct request_queue *q, struct bio **bio, | |||
| 186 | 186 | ||
| 187 | if (split) { | 187 | if (split) { |
| 188 | /* there isn't chance to merge the splitted bio */ | 188 | /* there isn't chance to merge the splitted bio */ |
| 189 | split->bi_rw |= REQ_NOMERGE; | 189 | split->bi_opf |= REQ_NOMERGE; |
| 190 | 190 | ||
| 191 | bio_chain(split, *bio); | 191 | bio_chain(split, *bio); |
| 192 | trace_block_split(q, split, (*bio)->bi_iter.bi_sector); | 192 | trace_block_split(q, split, (*bio)->bi_iter.bi_sector); |
| @@ -616,9 +616,9 @@ void blk_rq_set_mixed_merge(struct request *rq) | |||
| 616 | * Distributes the attributs to each bio. | 616 | * Distributes the attributs to each bio. |
| 617 | */ | 617 | */ |
| 618 | for (bio = rq->bio; bio; bio = bio->bi_next) { | 618 | for (bio = rq->bio; bio; bio = bio->bi_next) { |
| 619 | WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) && | 619 | WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) && |
| 620 | (bio->bi_rw & REQ_FAILFAST_MASK) != ff); | 620 | (bio->bi_opf & REQ_FAILFAST_MASK) != ff); |
| 621 | bio->bi_rw |= ff; | 621 | bio->bi_opf |= ff; |
| 622 | } | 622 | } |
| 623 | rq->cmd_flags |= REQ_MIXED_MERGE; | 623 | rq->cmd_flags |= REQ_MIXED_MERGE; |
| 624 | } | 624 | } |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 6a63da101bc4..e931a0e8e73d 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
| @@ -1234,7 +1234,7 @@ static struct request *blk_mq_map_request(struct request_queue *q, | |||
| 1234 | ctx = blk_mq_get_ctx(q); | 1234 | ctx = blk_mq_get_ctx(q); |
| 1235 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | 1235 | hctx = q->mq_ops->map_queue(q, ctx->cpu); |
| 1236 | 1236 | ||
| 1237 | if (rw_is_sync(bio_op(bio), bio->bi_rw)) | 1237 | if (rw_is_sync(bio_op(bio), bio->bi_opf)) |
| 1238 | op_flags |= REQ_SYNC; | 1238 | op_flags |= REQ_SYNC; |
| 1239 | 1239 | ||
| 1240 | trace_block_getrq(q, bio, op); | 1240 | trace_block_getrq(q, bio, op); |
| @@ -1302,8 +1302,8 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie) | |||
| 1302 | */ | 1302 | */ |
| 1303 | static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) | 1303 | static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) |
| 1304 | { | 1304 | { |
| 1305 | const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw); | 1305 | const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf); |
| 1306 | const int is_flush_fua = bio->bi_rw & (REQ_PREFLUSH | REQ_FUA); | 1306 | const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA); |
| 1307 | struct blk_map_ctx data; | 1307 | struct blk_map_ctx data; |
| 1308 | struct request *rq; | 1308 | struct request *rq; |
| 1309 | unsigned int request_count = 0; | 1309 | unsigned int request_count = 0; |
| @@ -1396,8 +1396,8 @@ done: | |||
| 1396 | */ | 1396 | */ |
| 1397 | static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) | 1397 | static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) |
| 1398 | { | 1398 | { |
| 1399 | const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw); | 1399 | const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf); |
| 1400 | const int is_flush_fua = bio->bi_rw & (REQ_PREFLUSH | REQ_FUA); | 1400 | const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA); |
| 1401 | struct blk_plug *plug; | 1401 | struct blk_plug *plug; |
| 1402 | unsigned int request_count = 0; | 1402 | unsigned int request_count = 0; |
| 1403 | struct blk_map_ctx data; | 1403 | struct blk_map_ctx data; |
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index c5494e403239..f1aba26f4719 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
| @@ -821,8 +821,8 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) | |||
| 821 | * second time when it eventually gets issued. Set it when a bio | 821 | * second time when it eventually gets issued. Set it when a bio |
| 822 | * is being charged to a tg. | 822 | * is being charged to a tg. |
| 823 | */ | 823 | */ |
| 824 | if (!(bio->bi_rw & REQ_THROTTLED)) | 824 | if (!(bio->bi_opf & REQ_THROTTLED)) |
| 825 | bio->bi_rw |= REQ_THROTTLED; | 825 | bio->bi_opf |= REQ_THROTTLED; |
| 826 | } | 826 | } |
| 827 | 827 | ||
| 828 | /** | 828 | /** |
| @@ -1399,7 +1399,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, | |||
| 1399 | WARN_ON_ONCE(!rcu_read_lock_held()); | 1399 | WARN_ON_ONCE(!rcu_read_lock_held()); |
| 1400 | 1400 | ||
| 1401 | /* see throtl_charge_bio() */ | 1401 | /* see throtl_charge_bio() */ |
| 1402 | if ((bio->bi_rw & REQ_THROTTLED) || !tg->has_rules[rw]) | 1402 | if ((bio->bi_opf & REQ_THROTTLED) || !tg->has_rules[rw]) |
| 1403 | goto out; | 1403 | goto out; |
| 1404 | 1404 | ||
| 1405 | spin_lock_irq(q->queue_lock); | 1405 | spin_lock_irq(q->queue_lock); |
| @@ -1478,7 +1478,7 @@ out: | |||
| 1478 | * being issued. | 1478 | * being issued. |
| 1479 | */ | 1479 | */ |
| 1480 | if (!throttled) | 1480 | if (!throttled) |
| 1481 | bio->bi_rw &= ~REQ_THROTTLED; | 1481 | bio->bi_opf &= ~REQ_THROTTLED; |
| 1482 | return throttled; | 1482 | return throttled; |
| 1483 | } | 1483 | } |
| 1484 | 1484 | ||
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index acabba198de9..cc2f6dbd4303 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
| @@ -918,7 +918,7 @@ static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic) | |||
| 918 | */ | 918 | */ |
| 919 | static inline bool cfq_bio_sync(struct bio *bio) | 919 | static inline bool cfq_bio_sync(struct bio *bio) |
| 920 | { | 920 | { |
| 921 | return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC); | 921 | return bio_data_dir(bio) == READ || (bio->bi_opf & REQ_SYNC); |
| 922 | } | 922 | } |
| 923 | 923 | ||
| 924 | /* | 924 | /* |
| @@ -2565,7 +2565,7 @@ static void cfq_merged_request(struct request_queue *q, struct request *req, | |||
| 2565 | static void cfq_bio_merged(struct request_queue *q, struct request *req, | 2565 | static void cfq_bio_merged(struct request_queue *q, struct request *req, |
| 2566 | struct bio *bio) | 2566 | struct bio *bio) |
| 2567 | { | 2567 | { |
| 2568 | cfqg_stats_update_io_merged(RQ_CFQG(req), bio_op(bio), bio->bi_rw); | 2568 | cfqg_stats_update_io_merged(RQ_CFQG(req), bio_op(bio), bio->bi_opf); |
| 2569 | } | 2569 | } |
| 2570 | 2570 | ||
| 2571 | static void | 2571 | static void |
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 0501ae0c517b..100be556e613 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c | |||
| @@ -1663,13 +1663,13 @@ static u32 bio_flags_to_wire(struct drbd_connection *connection, | |||
| 1663 | struct bio *bio) | 1663 | struct bio *bio) |
| 1664 | { | 1664 | { |
| 1665 | if (connection->agreed_pro_version >= 95) | 1665 | if (connection->agreed_pro_version >= 95) |
| 1666 | return (bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) | | 1666 | return (bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0) | |
| 1667 | (bio->bi_rw & REQ_FUA ? DP_FUA : 0) | | 1667 | (bio->bi_opf & REQ_FUA ? DP_FUA : 0) | |
| 1668 | (bio->bi_rw & REQ_PREFLUSH ? DP_FLUSH : 0) | | 1668 | (bio->bi_opf & REQ_PREFLUSH ? DP_FLUSH : 0) | |
| 1669 | (bio_op(bio) == REQ_OP_WRITE_SAME ? DP_WSAME : 0) | | 1669 | (bio_op(bio) == REQ_OP_WRITE_SAME ? DP_WSAME : 0) | |
| 1670 | (bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0); | 1670 | (bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0); |
| 1671 | else | 1671 | else |
| 1672 | return bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0; | 1672 | return bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0; |
| 1673 | } | 1673 | } |
| 1674 | 1674 | ||
| 1675 | /* Used to send write or TRIM aka REQ_DISCARD requests | 1675 | /* Used to send write or TRIM aka REQ_DISCARD requests |
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index df45713dfbe8..942384f34e22 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c | |||
| @@ -1564,7 +1564,7 @@ static void drbd_issue_peer_wsame(struct drbd_device *device, | |||
| 1564 | * drbd_submit_peer_request() | 1564 | * drbd_submit_peer_request() |
| 1565 | * @device: DRBD device. | 1565 | * @device: DRBD device. |
| 1566 | * @peer_req: peer request | 1566 | * @peer_req: peer request |
| 1567 | * @rw: flag field, see bio->bi_rw | 1567 | * @rw: flag field, see bio->bi_opf |
| 1568 | * | 1568 | * |
| 1569 | * May spread the pages to multiple bios, | 1569 | * May spread the pages to multiple bios, |
| 1570 | * depending on bio_add_page restrictions. | 1570 | * depending on bio_add_page restrictions. |
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 66b8e4bb74d8..de279fe4e4fd 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c | |||
| @@ -288,7 +288,7 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) | |||
| 288 | */ | 288 | */ |
| 289 | if (!ok && | 289 | if (!ok && |
| 290 | bio_op(req->master_bio) == REQ_OP_READ && | 290 | bio_op(req->master_bio) == REQ_OP_READ && |
| 291 | !(req->master_bio->bi_rw & REQ_RAHEAD) && | 291 | !(req->master_bio->bi_opf & REQ_RAHEAD) && |
| 292 | !list_empty(&req->tl_requests)) | 292 | !list_empty(&req->tl_requests)) |
| 293 | req->rq_state |= RQ_POSTPONED; | 293 | req->rq_state |= RQ_POSTPONED; |
| 294 | 294 | ||
| @@ -1137,7 +1137,7 @@ static int drbd_process_write_request(struct drbd_request *req) | |||
| 1137 | * replicating, in which case there is no point. */ | 1137 | * replicating, in which case there is no point. */ |
| 1138 | if (unlikely(req->i.size == 0)) { | 1138 | if (unlikely(req->i.size == 0)) { |
| 1139 | /* The only size==0 bios we expect are empty flushes. */ | 1139 | /* The only size==0 bios we expect are empty flushes. */ |
| 1140 | D_ASSERT(device, req->master_bio->bi_rw & REQ_PREFLUSH); | 1140 | D_ASSERT(device, req->master_bio->bi_opf & REQ_PREFLUSH); |
| 1141 | if (remote) | 1141 | if (remote) |
| 1142 | _req_mod(req, QUEUE_AS_DRBD_BARRIER); | 1142 | _req_mod(req, QUEUE_AS_DRBD_BARRIER); |
| 1143 | return remote; | 1143 | return remote; |
| @@ -1176,7 +1176,7 @@ drbd_submit_req_private_bio(struct drbd_request *req) | |||
| 1176 | 1176 | ||
| 1177 | if (bio_op(bio) != REQ_OP_READ) | 1177 | if (bio_op(bio) != REQ_OP_READ) |
| 1178 | type = DRBD_FAULT_DT_WR; | 1178 | type = DRBD_FAULT_DT_WR; |
| 1179 | else if (bio->bi_rw & REQ_RAHEAD) | 1179 | else if (bio->bi_opf & REQ_RAHEAD) |
| 1180 | type = DRBD_FAULT_DT_RA; | 1180 | type = DRBD_FAULT_DT_RA; |
| 1181 | else | 1181 | else |
| 1182 | type = DRBD_FAULT_DT_RD; | 1182 | type = DRBD_FAULT_DT_RD; |
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 35dbb3dca47e..c6755c9a0aea 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c | |||
| @@ -256,7 +256,7 @@ void drbd_request_endio(struct bio *bio) | |||
| 256 | what = DISCARD_COMPLETED_WITH_ERROR; | 256 | what = DISCARD_COMPLETED_WITH_ERROR; |
| 257 | break; | 257 | break; |
| 258 | case REQ_OP_READ: | 258 | case REQ_OP_READ: |
| 259 | if (bio->bi_rw & REQ_RAHEAD) | 259 | if (bio->bi_opf & REQ_RAHEAD) |
| 260 | what = READ_AHEAD_COMPLETED_WITH_ERROR; | 260 | what = READ_AHEAD_COMPLETED_WITH_ERROR; |
| 261 | else | 261 | else |
| 262 | what = READ_COMPLETED_WITH_ERROR; | 262 | what = READ_COMPLETED_WITH_ERROR; |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 9393bc730acf..90fa4ac149db 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
| @@ -1157,7 +1157,7 @@ static int pkt_start_recovery(struct packet_data *pkt) | |||
| 1157 | 1157 | ||
| 1158 | bio_reset(pkt->bio); | 1158 | bio_reset(pkt->bio); |
| 1159 | pkt->bio->bi_bdev = pd->bdev; | 1159 | pkt->bio->bi_bdev = pd->bdev; |
| 1160 | pkt->bio->bi_rw = REQ_WRITE; | 1160 | bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0); |
| 1161 | pkt->bio->bi_iter.bi_sector = new_sector; | 1161 | pkt->bio->bi_iter.bi_sector = new_sector; |
| 1162 | pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE; | 1162 | pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE; |
| 1163 | pkt->bio->bi_vcnt = pkt->frames; | 1163 | pkt->bio->bi_vcnt = pkt->frames; |
diff --git a/drivers/block/umem.c b/drivers/block/umem.c index d0a3e6d4515f..be90e15854ed 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c | |||
| @@ -535,7 +535,7 @@ static blk_qc_t mm_make_request(struct request_queue *q, struct bio *bio) | |||
| 535 | *card->biotail = bio; | 535 | *card->biotail = bio; |
| 536 | bio->bi_next = NULL; | 536 | bio->bi_next = NULL; |
| 537 | card->biotail = &bio->bi_next; | 537 | card->biotail = &bio->bi_next; |
| 538 | if (bio->bi_rw & REQ_SYNC || !mm_check_plugged(card)) | 538 | if (bio->bi_opf & REQ_SYNC || !mm_check_plugged(card)) |
| 539 | activate(card); | 539 | activate(card); |
| 540 | spin_unlock_irq(&card->lock); | 540 | spin_unlock_irq(&card->lock); |
| 541 | 541 | ||
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 69f16f43f8ab..4b177fe11ebb 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c | |||
| @@ -208,7 +208,7 @@ static void bch_data_insert_start(struct closure *cl) | |||
| 208 | * Journal writes are marked REQ_PREFLUSH; if the original write was a | 208 | * Journal writes are marked REQ_PREFLUSH; if the original write was a |
| 209 | * flush, it'll wait on the journal write. | 209 | * flush, it'll wait on the journal write. |
| 210 | */ | 210 | */ |
| 211 | bio->bi_rw &= ~(REQ_PREFLUSH|REQ_FUA); | 211 | bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA); |
| 212 | 212 | ||
| 213 | do { | 213 | do { |
| 214 | unsigned i; | 214 | unsigned i; |
| @@ -405,7 +405,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) | |||
| 405 | if (!congested && | 405 | if (!congested && |
| 406 | mode == CACHE_MODE_WRITEBACK && | 406 | mode == CACHE_MODE_WRITEBACK && |
| 407 | op_is_write(bio_op(bio)) && | 407 | op_is_write(bio_op(bio)) && |
| 408 | (bio->bi_rw & REQ_SYNC)) | 408 | (bio->bi_opf & REQ_SYNC)) |
| 409 | goto rescale; | 409 | goto rescale; |
| 410 | 410 | ||
| 411 | spin_lock(&dc->io_lock); | 411 | spin_lock(&dc->io_lock); |
| @@ -668,7 +668,7 @@ static inline struct search *search_alloc(struct bio *bio, | |||
| 668 | s->iop.write_prio = 0; | 668 | s->iop.write_prio = 0; |
| 669 | s->iop.error = 0; | 669 | s->iop.error = 0; |
| 670 | s->iop.flags = 0; | 670 | s->iop.flags = 0; |
| 671 | s->iop.flush_journal = (bio->bi_rw & (REQ_PREFLUSH|REQ_FUA)) != 0; | 671 | s->iop.flush_journal = (bio->bi_opf & (REQ_PREFLUSH|REQ_FUA)) != 0; |
| 672 | s->iop.wq = bcache_wq; | 672 | s->iop.wq = bcache_wq; |
| 673 | 673 | ||
| 674 | return s; | 674 | return s; |
| @@ -796,8 +796,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, | |||
| 796 | goto out_submit; | 796 | goto out_submit; |
| 797 | } | 797 | } |
| 798 | 798 | ||
| 799 | if (!(bio->bi_rw & REQ_RAHEAD) && | 799 | if (!(bio->bi_opf & REQ_RAHEAD) && |
| 800 | !(bio->bi_rw & REQ_META) && | 800 | !(bio->bi_opf & REQ_META) && |
| 801 | s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA) | 801 | s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA) |
| 802 | reada = min_t(sector_t, dc->readahead >> 9, | 802 | reada = min_t(sector_t, dc->readahead >> 9, |
| 803 | bdev_sectors(bio->bi_bdev) - bio_end_sector(bio)); | 803 | bdev_sectors(bio->bi_bdev) - bio_end_sector(bio)); |
| @@ -920,7 +920,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) | |||
| 920 | bch_writeback_add(dc); | 920 | bch_writeback_add(dc); |
| 921 | s->iop.bio = bio; | 921 | s->iop.bio = bio; |
| 922 | 922 | ||
| 923 | if (bio->bi_rw & REQ_PREFLUSH) { | 923 | if (bio->bi_opf & REQ_PREFLUSH) { |
| 924 | /* Also need to send a flush to the backing device */ | 924 | /* Also need to send a flush to the backing device */ |
| 925 | struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0, | 925 | struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0, |
| 926 | dc->disk.bio_split); | 926 | dc->disk.bio_split); |
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 88ef6d14cce3..95a4ca6ce6ff 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c | |||
| @@ -347,7 +347,7 @@ static void uuid_io(struct cache_set *c, int op, unsigned long op_flags, | |||
| 347 | for (i = 0; i < KEY_PTRS(k); i++) { | 347 | for (i = 0; i < KEY_PTRS(k); i++) { |
| 348 | struct bio *bio = bch_bbio_alloc(c); | 348 | struct bio *bio = bch_bbio_alloc(c); |
| 349 | 349 | ||
| 350 | bio->bi_rw = REQ_SYNC|REQ_META|op_flags; | 350 | bio->bi_opf = REQ_SYNC | REQ_META | op_flags; |
| 351 | bio->bi_iter.bi_size = KEY_SIZE(k) << 9; | 351 | bio->bi_iter.bi_size = KEY_SIZE(k) << 9; |
| 352 | 352 | ||
| 353 | bio->bi_end_io = uuid_endio; | 353 | bio->bi_end_io = uuid_endio; |
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h index 073a042aed24..301eaf565167 100644 --- a/drivers/md/bcache/writeback.h +++ b/drivers/md/bcache/writeback.h | |||
| @@ -57,7 +57,7 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, | |||
| 57 | if (would_skip) | 57 | if (would_skip) |
| 58 | return false; | 58 | return false; |
| 59 | 59 | ||
| 60 | return bio->bi_rw & REQ_SYNC || | 60 | return bio->bi_opf & REQ_SYNC || |
| 61 | in_use <= CUTOFF_WRITEBACK; | 61 | in_use <= CUTOFF_WRITEBACK; |
| 62 | } | 62 | } |
| 63 | 63 | ||
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 718744db62df..59b2c50562e4 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
| @@ -788,7 +788,7 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) | |||
| 788 | 788 | ||
| 789 | spin_lock_irqsave(&cache->lock, flags); | 789 | spin_lock_irqsave(&cache->lock, flags); |
| 790 | if (cache->need_tick_bio && | 790 | if (cache->need_tick_bio && |
| 791 | !(bio->bi_rw & (REQ_FUA | REQ_PREFLUSH)) && | 791 | !(bio->bi_opf & (REQ_FUA | REQ_PREFLUSH)) && |
| 792 | bio_op(bio) != REQ_OP_DISCARD) { | 792 | bio_op(bio) != REQ_OP_DISCARD) { |
| 793 | pb->tick = true; | 793 | pb->tick = true; |
| 794 | cache->need_tick_bio = false; | 794 | cache->need_tick_bio = false; |
| @@ -830,7 +830,7 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) | |||
| 830 | 830 | ||
| 831 | static int bio_triggers_commit(struct cache *cache, struct bio *bio) | 831 | static int bio_triggers_commit(struct cache *cache, struct bio *bio) |
| 832 | { | 832 | { |
| 833 | return bio->bi_rw & (REQ_PREFLUSH | REQ_FUA); | 833 | return bio->bi_opf & (REQ_PREFLUSH | REQ_FUA); |
| 834 | } | 834 | } |
| 835 | 835 | ||
| 836 | /* | 836 | /* |
| @@ -1069,7 +1069,7 @@ static void dec_io_migrations(struct cache *cache) | |||
| 1069 | static bool discard_or_flush(struct bio *bio) | 1069 | static bool discard_or_flush(struct bio *bio) |
| 1070 | { | 1070 | { |
| 1071 | return bio_op(bio) == REQ_OP_DISCARD || | 1071 | return bio_op(bio) == REQ_OP_DISCARD || |
| 1072 | bio->bi_rw & (REQ_PREFLUSH | REQ_FUA); | 1072 | bio->bi_opf & (REQ_PREFLUSH | REQ_FUA); |
| 1073 | } | 1073 | } |
| 1074 | 1074 | ||
| 1075 | static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell) | 1075 | static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell) |
| @@ -1980,7 +1980,7 @@ static void process_deferred_bios(struct cache *cache) | |||
| 1980 | 1980 | ||
| 1981 | bio = bio_list_pop(&bios); | 1981 | bio = bio_list_pop(&bios); |
| 1982 | 1982 | ||
| 1983 | if (bio->bi_rw & REQ_PREFLUSH) | 1983 | if (bio->bi_opf & REQ_PREFLUSH) |
| 1984 | process_flush_bio(cache, bio); | 1984 | process_flush_bio(cache, bio); |
| 1985 | else if (bio_op(bio) == REQ_OP_DISCARD) | 1985 | else if (bio_op(bio) == REQ_OP_DISCARD) |
| 1986 | process_discard_bio(cache, &structs, bio); | 1986 | process_discard_bio(cache, &structs, bio); |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 8f2e3e2ffd26..4e9784b4e0ac 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
| @@ -1136,7 +1136,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone) | |||
| 1136 | clone->bi_private = io; | 1136 | clone->bi_private = io; |
| 1137 | clone->bi_end_io = crypt_endio; | 1137 | clone->bi_end_io = crypt_endio; |
| 1138 | clone->bi_bdev = cc->dev->bdev; | 1138 | clone->bi_bdev = cc->dev->bdev; |
| 1139 | bio_set_op_attrs(clone, bio_op(io->base_bio), io->base_bio->bi_rw); | 1139 | bio_set_op_attrs(clone, bio_op(io->base_bio), io->base_bio->bi_opf); |
| 1140 | } | 1140 | } |
| 1141 | 1141 | ||
| 1142 | static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) | 1142 | static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) |
| @@ -1915,7 +1915,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) | |||
| 1915 | * - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight | 1915 | * - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight |
| 1916 | * - for REQ_OP_DISCARD caller must use flush if IO ordering matters | 1916 | * - for REQ_OP_DISCARD caller must use flush if IO ordering matters |
| 1917 | */ | 1917 | */ |
| 1918 | if (unlikely(bio->bi_rw & REQ_PREFLUSH || | 1918 | if (unlikely(bio->bi_opf & REQ_PREFLUSH || |
| 1919 | bio_op(bio) == REQ_OP_DISCARD)) { | 1919 | bio_op(bio) == REQ_OP_DISCARD)) { |
| 1920 | bio->bi_bdev = cc->dev->bdev; | 1920 | bio->bi_bdev = cc->dev->bdev; |
| 1921 | if (bio_sectors(bio)) | 1921 | if (bio_sectors(bio)) |
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c index 2faf49d8f4d7..bf2b2676cb8a 100644 --- a/drivers/md/dm-era-target.c +++ b/drivers/md/dm-era-target.c | |||
| @@ -1542,7 +1542,7 @@ static int era_map(struct dm_target *ti, struct bio *bio) | |||
| 1542 | /* | 1542 | /* |
| 1543 | * REQ_PREFLUSH bios carry no data, so we're not interested in them. | 1543 | * REQ_PREFLUSH bios carry no data, so we're not interested in them. |
| 1544 | */ | 1544 | */ |
| 1545 | if (!(bio->bi_rw & REQ_PREFLUSH) && | 1545 | if (!(bio->bi_opf & REQ_PREFLUSH) && |
| 1546 | (bio_data_dir(bio) == WRITE) && | 1546 | (bio_data_dir(bio) == WRITE) && |
| 1547 | !metadata_current_marked(era->md, block)) { | 1547 | !metadata_current_marked(era->md, block)) { |
| 1548 | defer_bio(era, bio); | 1548 | defer_bio(era, bio); |
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index 19db13e99466..97e446d54a15 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c | |||
| @@ -16,7 +16,7 @@ | |||
| 16 | #define DM_MSG_PREFIX "flakey" | 16 | #define DM_MSG_PREFIX "flakey" |
| 17 | 17 | ||
| 18 | #define all_corrupt_bio_flags_match(bio, fc) \ | 18 | #define all_corrupt_bio_flags_match(bio, fc) \ |
| 19 | (((bio)->bi_rw & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags) | 19 | (((bio)->bi_opf & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags) |
| 20 | 20 | ||
| 21 | /* | 21 | /* |
| 22 | * Flakey: Used for testing only, simulates intermittent, | 22 | * Flakey: Used for testing only, simulates intermittent, |
| @@ -266,9 +266,9 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) | |||
| 266 | data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value; | 266 | data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value; |
| 267 | 267 | ||
| 268 | DMDEBUG("Corrupting data bio=%p by writing %u to byte %u " | 268 | DMDEBUG("Corrupting data bio=%p by writing %u to byte %u " |
| 269 | "(rw=%c bi_rw=%u bi_sector=%llu cur_bytes=%u)\n", | 269 | "(rw=%c bi_opf=%u bi_sector=%llu cur_bytes=%u)\n", |
| 270 | bio, fc->corrupt_bio_value, fc->corrupt_bio_byte, | 270 | bio, fc->corrupt_bio_value, fc->corrupt_bio_byte, |
| 271 | (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw, | 271 | (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf, |
| 272 | (unsigned long long)bio->bi_iter.bi_sector, bio_bytes); | 272 | (unsigned long long)bio->bi_iter.bi_sector, bio_bytes); |
| 273 | } | 273 | } |
| 274 | } | 274 | } |
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index daa03e41654a..0bf1a12e35fe 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c | |||
| @@ -505,9 +505,9 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp, | |||
| 505 | * New collapsed (a)synchronous interface. | 505 | * New collapsed (a)synchronous interface. |
| 506 | * | 506 | * |
| 507 | * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug | 507 | * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug |
| 508 | * the queue with blk_unplug() some time later or set REQ_SYNC in io_req->bi_rw. | 508 | * the queue with blk_unplug() some time later or set REQ_SYNC in |
| 509 | * If you fail to do one of these, the IO will be submitted to the disk after | 509 | * io_req->bi_opf. If you fail to do one of these, the IO will be submitted to |
| 510 | * q->unplug_delay, which defaults to 3ms in blk-settings.c. | 510 | * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c. |
| 511 | */ | 511 | */ |
| 512 | int dm_io(struct dm_io_request *io_req, unsigned num_regions, | 512 | int dm_io(struct dm_io_request *io_req, unsigned num_regions, |
| 513 | struct dm_io_region *where, unsigned long *sync_error_bits) | 513 | struct dm_io_region *where, unsigned long *sync_error_bits) |
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index b5dbf7a0515e..4ab68033f9d1 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c | |||
| @@ -555,8 +555,8 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio) | |||
| 555 | struct bio_vec bv; | 555 | struct bio_vec bv; |
| 556 | size_t alloc_size; | 556 | size_t alloc_size; |
| 557 | int i = 0; | 557 | int i = 0; |
| 558 | bool flush_bio = (bio->bi_rw & REQ_PREFLUSH); | 558 | bool flush_bio = (bio->bi_opf & REQ_PREFLUSH); |
| 559 | bool fua_bio = (bio->bi_rw & REQ_FUA); | 559 | bool fua_bio = (bio->bi_opf & REQ_FUA); |
| 560 | bool discard_bio = (bio_op(bio) == REQ_OP_DISCARD); | 560 | bool discard_bio = (bio_op(bio) == REQ_OP_DISCARD); |
| 561 | 561 | ||
| 562 | pb->block = NULL; | 562 | pb->block = NULL; |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index d7107d23b897..ac734e5bbe48 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
| @@ -661,7 +661,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m | |||
| 661 | 661 | ||
| 662 | bio->bi_error = 0; | 662 | bio->bi_error = 0; |
| 663 | bio->bi_bdev = pgpath->path.dev->bdev; | 663 | bio->bi_bdev = pgpath->path.dev->bdev; |
| 664 | bio->bi_rw |= REQ_FAILFAST_TRANSPORT; | 664 | bio->bi_opf |= REQ_FAILFAST_TRANSPORT; |
| 665 | 665 | ||
| 666 | if (pgpath->pg->ps.type->start_io) | 666 | if (pgpath->pg->ps.type->start_io) |
| 667 | pgpath->pg->ps.type->start_io(&pgpath->pg->ps, | 667 | pgpath->pg->ps.type->start_io(&pgpath->pg->ps, |
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index dac55b254a09..bdf1606f67bc 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
| @@ -657,7 +657,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio) | |||
| 657 | struct mirror *m; | 657 | struct mirror *m; |
| 658 | struct dm_io_request io_req = { | 658 | struct dm_io_request io_req = { |
| 659 | .bi_op = REQ_OP_WRITE, | 659 | .bi_op = REQ_OP_WRITE, |
| 660 | .bi_op_flags = bio->bi_rw & WRITE_FLUSH_FUA, | 660 | .bi_op_flags = bio->bi_opf & WRITE_FLUSH_FUA, |
| 661 | .mem.type = DM_IO_BIO, | 661 | .mem.type = DM_IO_BIO, |
| 662 | .mem.ptr.bio = bio, | 662 | .mem.ptr.bio = bio, |
| 663 | .notify.fn = write_callback, | 663 | .notify.fn = write_callback, |
| @@ -704,7 +704,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) | |||
| 704 | bio_list_init(&requeue); | 704 | bio_list_init(&requeue); |
| 705 | 705 | ||
| 706 | while ((bio = bio_list_pop(writes))) { | 706 | while ((bio = bio_list_pop(writes))) { |
| 707 | if ((bio->bi_rw & REQ_PREFLUSH) || | 707 | if ((bio->bi_opf & REQ_PREFLUSH) || |
| 708 | (bio_op(bio) == REQ_OP_DISCARD)) { | 708 | (bio_op(bio) == REQ_OP_DISCARD)) { |
| 709 | bio_list_add(&sync, bio); | 709 | bio_list_add(&sync, bio); |
| 710 | continue; | 710 | continue; |
| @@ -1217,7 +1217,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio) | |||
| 1217 | * If region is not in-sync queue the bio. | 1217 | * If region is not in-sync queue the bio. |
| 1218 | */ | 1218 | */ |
| 1219 | if (!r || (r == -EWOULDBLOCK)) { | 1219 | if (!r || (r == -EWOULDBLOCK)) { |
| 1220 | if (bio->bi_rw & REQ_RAHEAD) | 1220 | if (bio->bi_opf & REQ_RAHEAD) |
| 1221 | return -EWOULDBLOCK; | 1221 | return -EWOULDBLOCK; |
| 1222 | 1222 | ||
| 1223 | queue_bio(ms, bio, rw); | 1223 | queue_bio(ms, bio, rw); |
| @@ -1253,7 +1253,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error) | |||
| 1253 | * We need to dec pending if this was a write. | 1253 | * We need to dec pending if this was a write. |
| 1254 | */ | 1254 | */ |
| 1255 | if (rw == WRITE) { | 1255 | if (rw == WRITE) { |
| 1256 | if (!(bio->bi_rw & REQ_PREFLUSH) && | 1256 | if (!(bio->bi_opf & REQ_PREFLUSH) && |
| 1257 | bio_op(bio) != REQ_OP_DISCARD) | 1257 | bio_op(bio) != REQ_OP_DISCARD) |
| 1258 | dm_rh_dec(ms->rh, bio_record->write_region); | 1258 | dm_rh_dec(ms->rh, bio_record->write_region); |
| 1259 | return error; | 1259 | return error; |
| @@ -1262,7 +1262,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error) | |||
| 1262 | if (error == -EOPNOTSUPP) | 1262 | if (error == -EOPNOTSUPP) |
| 1263 | goto out; | 1263 | goto out; |
| 1264 | 1264 | ||
| 1265 | if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD)) | 1265 | if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD)) |
| 1266 | goto out; | 1266 | goto out; |
| 1267 | 1267 | ||
| 1268 | if (unlikely(error)) { | 1268 | if (unlikely(error)) { |
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c index b11813431f31..85c32b22a420 100644 --- a/drivers/md/dm-region-hash.c +++ b/drivers/md/dm-region-hash.c | |||
| @@ -398,7 +398,7 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio) | |||
| 398 | region_t region = dm_rh_bio_to_region(rh, bio); | 398 | region_t region = dm_rh_bio_to_region(rh, bio); |
| 399 | int recovering = 0; | 399 | int recovering = 0; |
| 400 | 400 | ||
| 401 | if (bio->bi_rw & REQ_PREFLUSH) { | 401 | if (bio->bi_opf & REQ_PREFLUSH) { |
| 402 | rh->flush_failure = 1; | 402 | rh->flush_failure = 1; |
| 403 | return; | 403 | return; |
| 404 | } | 404 | } |
| @@ -526,7 +526,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios) | |||
| 526 | struct bio *bio; | 526 | struct bio *bio; |
| 527 | 527 | ||
| 528 | for (bio = bios->head; bio; bio = bio->bi_next) { | 528 | for (bio = bios->head; bio; bio = bio->bi_next) { |
| 529 | if (bio->bi_rw & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD) | 529 | if (bio->bi_opf & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD) |
| 530 | continue; | 530 | continue; |
| 531 | rh_inc(rh, dm_rh_bio_to_region(rh, bio)); | 531 | rh_inc(rh, dm_rh_bio_to_region(rh, bio)); |
| 532 | } | 532 | } |
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index ce2a910709f7..c65feeada864 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
| @@ -1680,7 +1680,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) | |||
| 1680 | 1680 | ||
| 1681 | init_tracked_chunk(bio); | 1681 | init_tracked_chunk(bio); |
| 1682 | 1682 | ||
| 1683 | if (bio->bi_rw & REQ_PREFLUSH) { | 1683 | if (bio->bi_opf & REQ_PREFLUSH) { |
| 1684 | bio->bi_bdev = s->cow->bdev; | 1684 | bio->bi_bdev = s->cow->bdev; |
| 1685 | return DM_MAPIO_REMAPPED; | 1685 | return DM_MAPIO_REMAPPED; |
| 1686 | } | 1686 | } |
| @@ -1800,7 +1800,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio) | |||
| 1800 | 1800 | ||
| 1801 | init_tracked_chunk(bio); | 1801 | init_tracked_chunk(bio); |
| 1802 | 1802 | ||
| 1803 | if (bio->bi_rw & REQ_PREFLUSH) { | 1803 | if (bio->bi_opf & REQ_PREFLUSH) { |
| 1804 | if (!dm_bio_get_target_bio_nr(bio)) | 1804 | if (!dm_bio_get_target_bio_nr(bio)) |
| 1805 | bio->bi_bdev = s->origin->bdev; | 1805 | bio->bi_bdev = s->origin->bdev; |
| 1806 | else | 1806 | else |
| @@ -2286,7 +2286,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio) | |||
| 2286 | 2286 | ||
| 2287 | bio->bi_bdev = o->dev->bdev; | 2287 | bio->bi_bdev = o->dev->bdev; |
| 2288 | 2288 | ||
| 2289 | if (unlikely(bio->bi_rw & REQ_PREFLUSH)) | 2289 | if (unlikely(bio->bi_opf & REQ_PREFLUSH)) |
| 2290 | return DM_MAPIO_REMAPPED; | 2290 | return DM_MAPIO_REMAPPED; |
| 2291 | 2291 | ||
| 2292 | if (bio_data_dir(bio) != WRITE) | 2292 | if (bio_data_dir(bio) != WRITE) |
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index 83f1d4667195..28193a57bf47 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c | |||
| @@ -286,7 +286,7 @@ static int stripe_map(struct dm_target *ti, struct bio *bio) | |||
| 286 | uint32_t stripe; | 286 | uint32_t stripe; |
| 287 | unsigned target_bio_nr; | 287 | unsigned target_bio_nr; |
| 288 | 288 | ||
| 289 | if (bio->bi_rw & REQ_PREFLUSH) { | 289 | if (bio->bi_opf & REQ_PREFLUSH) { |
| 290 | target_bio_nr = dm_bio_get_target_bio_nr(bio); | 290 | target_bio_nr = dm_bio_get_target_bio_nr(bio); |
| 291 | BUG_ON(target_bio_nr >= sc->stripes); | 291 | BUG_ON(target_bio_nr >= sc->stripes); |
| 292 | bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev; | 292 | bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev; |
| @@ -383,7 +383,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error) | |||
| 383 | if (!error) | 383 | if (!error) |
| 384 | return 0; /* I/O complete */ | 384 | return 0; /* I/O complete */ |
| 385 | 385 | ||
| 386 | if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD)) | 386 | if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD)) |
| 387 | return error; | 387 | return error; |
| 388 | 388 | ||
| 389 | if (error == -EOPNOTSUPP) | 389 | if (error == -EOPNOTSUPP) |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 197ea2003400..d1c05c12a9db 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
| @@ -699,7 +699,7 @@ static void remap_to_origin(struct thin_c *tc, struct bio *bio) | |||
| 699 | 699 | ||
| 700 | static int bio_triggers_commit(struct thin_c *tc, struct bio *bio) | 700 | static int bio_triggers_commit(struct thin_c *tc, struct bio *bio) |
| 701 | { | 701 | { |
| 702 | return (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) && | 702 | return (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) && |
| 703 | dm_thin_changed_this_transaction(tc->td); | 703 | dm_thin_changed_this_transaction(tc->td); |
| 704 | } | 704 | } |
| 705 | 705 | ||
| @@ -870,7 +870,7 @@ static void __inc_remap_and_issue_cell(void *context, | |||
| 870 | struct bio *bio; | 870 | struct bio *bio; |
| 871 | 871 | ||
| 872 | while ((bio = bio_list_pop(&cell->bios))) { | 872 | while ((bio = bio_list_pop(&cell->bios))) { |
| 873 | if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) || | 873 | if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) || |
| 874 | bio_op(bio) == REQ_OP_DISCARD) | 874 | bio_op(bio) == REQ_OP_DISCARD) |
| 875 | bio_list_add(&info->defer_bios, bio); | 875 | bio_list_add(&info->defer_bios, bio); |
| 876 | else { | 876 | else { |
| @@ -1717,7 +1717,7 @@ static void __remap_and_issue_shared_cell(void *context, | |||
| 1717 | 1717 | ||
| 1718 | while ((bio = bio_list_pop(&cell->bios))) { | 1718 | while ((bio = bio_list_pop(&cell->bios))) { |
| 1719 | if ((bio_data_dir(bio) == WRITE) || | 1719 | if ((bio_data_dir(bio) == WRITE) || |
| 1720 | (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) || | 1720 | (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) || |
| 1721 | bio_op(bio) == REQ_OP_DISCARD)) | 1721 | bio_op(bio) == REQ_OP_DISCARD)) |
| 1722 | bio_list_add(&info->defer_bios, bio); | 1722 | bio_list_add(&info->defer_bios, bio); |
| 1723 | else { | 1723 | else { |
| @@ -2635,7 +2635,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) | |||
| 2635 | return DM_MAPIO_SUBMITTED; | 2635 | return DM_MAPIO_SUBMITTED; |
| 2636 | } | 2636 | } |
| 2637 | 2637 | ||
| 2638 | if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) || | 2638 | if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) || |
| 2639 | bio_op(bio) == REQ_OP_DISCARD) { | 2639 | bio_op(bio) == REQ_OP_DISCARD) { |
| 2640 | thin_defer_bio_with_throttle(tc, bio); | 2640 | thin_defer_bio_with_throttle(tc, bio); |
| 2641 | return DM_MAPIO_SUBMITTED; | 2641 | return DM_MAPIO_SUBMITTED; |
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c index 618b8752dcf1..b616f11d8473 100644 --- a/drivers/md/dm-zero.c +++ b/drivers/md/dm-zero.c | |||
| @@ -37,7 +37,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio) | |||
| 37 | { | 37 | { |
| 38 | switch (bio_op(bio)) { | 38 | switch (bio_op(bio)) { |
| 39 | case REQ_OP_READ: | 39 | case REQ_OP_READ: |
| 40 | if (bio->bi_rw & REQ_RAHEAD) { | 40 | if (bio->bi_opf & REQ_RAHEAD) { |
| 41 | /* readahead of null bytes only wastes buffer cache */ | 41 | /* readahead of null bytes only wastes buffer cache */ |
| 42 | return -EIO; | 42 | return -EIO; |
| 43 | } | 43 | } |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index dfa09e14e847..fa9b1cb4438a 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
| @@ -798,12 +798,12 @@ static void dec_pending(struct dm_io *io, int error) | |||
| 798 | if (io_error == DM_ENDIO_REQUEUE) | 798 | if (io_error == DM_ENDIO_REQUEUE) |
| 799 | return; | 799 | return; |
| 800 | 800 | ||
| 801 | if ((bio->bi_rw & REQ_PREFLUSH) && bio->bi_iter.bi_size) { | 801 | if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { |
| 802 | /* | 802 | /* |
| 803 | * Preflush done for flush with data, reissue | 803 | * Preflush done for flush with data, reissue |
| 804 | * without REQ_PREFLUSH. | 804 | * without REQ_PREFLUSH. |
| 805 | */ | 805 | */ |
| 806 | bio->bi_rw &= ~REQ_PREFLUSH; | 806 | bio->bi_opf &= ~REQ_PREFLUSH; |
| 807 | queue_io(md, bio); | 807 | queue_io(md, bio); |
| 808 | } else { | 808 | } else { |
| 809 | /* done with normal IO or empty flush */ | 809 | /* done with normal IO or empty flush */ |
| @@ -964,7 +964,7 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) | |||
| 964 | { | 964 | { |
| 965 | struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); | 965 | struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); |
| 966 | unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; | 966 | unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; |
| 967 | BUG_ON(bio->bi_rw & REQ_PREFLUSH); | 967 | BUG_ON(bio->bi_opf & REQ_PREFLUSH); |
| 968 | BUG_ON(bi_size > *tio->len_ptr); | 968 | BUG_ON(bi_size > *tio->len_ptr); |
| 969 | BUG_ON(n_sectors > bi_size); | 969 | BUG_ON(n_sectors > bi_size); |
| 970 | *tio->len_ptr -= bi_size - n_sectors; | 970 | *tio->len_ptr -= bi_size - n_sectors; |
| @@ -1252,7 +1252,7 @@ static void __split_and_process_bio(struct mapped_device *md, | |||
| 1252 | 1252 | ||
| 1253 | start_io_acct(ci.io); | 1253 | start_io_acct(ci.io); |
| 1254 | 1254 | ||
| 1255 | if (bio->bi_rw & REQ_PREFLUSH) { | 1255 | if (bio->bi_opf & REQ_PREFLUSH) { |
| 1256 | ci.bio = &ci.md->flush_bio; | 1256 | ci.bio = &ci.md->flush_bio; |
| 1257 | ci.sector_count = 0; | 1257 | ci.sector_count = 0; |
| 1258 | error = __send_empty_flush(&ci); | 1258 | error = __send_empty_flush(&ci); |
| @@ -1290,7 +1290,7 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) | |||
| 1290 | if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { | 1290 | if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { |
| 1291 | dm_put_live_table(md, srcu_idx); | 1291 | dm_put_live_table(md, srcu_idx); |
| 1292 | 1292 | ||
| 1293 | if (!(bio->bi_rw & REQ_RAHEAD)) | 1293 | if (!(bio->bi_opf & REQ_RAHEAD)) |
| 1294 | queue_io(md, bio); | 1294 | queue_io(md, bio); |
| 1295 | else | 1295 | else |
| 1296 | bio_io_error(bio); | 1296 | bio_io_error(bio); |
diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 70ff888d25d0..86f5d435901d 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c | |||
| @@ -221,7 +221,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio) | |||
| 221 | struct bio *split; | 221 | struct bio *split; |
| 222 | sector_t start_sector, end_sector, data_offset; | 222 | sector_t start_sector, end_sector, data_offset; |
| 223 | 223 | ||
| 224 | if (unlikely(bio->bi_rw & REQ_PREFLUSH)) { | 224 | if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { |
| 225 | md_flush_request(mddev, bio); | 225 | md_flush_request(mddev, bio); |
| 226 | return; | 226 | return; |
| 227 | } | 227 | } |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 2c3ab6f5e6be..d646f6e444f0 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
| @@ -285,7 +285,7 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) | |||
| 285 | */ | 285 | */ |
| 286 | sectors = bio_sectors(bio); | 286 | sectors = bio_sectors(bio); |
| 287 | /* bio could be mergeable after passing to underlayer */ | 287 | /* bio could be mergeable after passing to underlayer */ |
| 288 | bio->bi_rw &= ~REQ_NOMERGE; | 288 | bio->bi_opf &= ~REQ_NOMERGE; |
| 289 | mddev->pers->make_request(mddev, bio); | 289 | mddev->pers->make_request(mddev, bio); |
| 290 | 290 | ||
| 291 | cpu = part_stat_lock(); | 291 | cpu = part_stat_lock(); |
| @@ -414,7 +414,7 @@ static void md_submit_flush_data(struct work_struct *ws) | |||
| 414 | /* an empty barrier - all done */ | 414 | /* an empty barrier - all done */ |
| 415 | bio_endio(bio); | 415 | bio_endio(bio); |
| 416 | else { | 416 | else { |
| 417 | bio->bi_rw &= ~REQ_PREFLUSH; | 417 | bio->bi_opf &= ~REQ_PREFLUSH; |
| 418 | mddev->pers->make_request(mddev, bio); | 418 | mddev->pers->make_request(mddev, bio); |
| 419 | } | 419 | } |
| 420 | 420 | ||
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 4974682842ae..673efbd6fc47 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
| @@ -91,7 +91,7 @@ static void multipath_end_request(struct bio *bio) | |||
| 91 | 91 | ||
| 92 | if (!bio->bi_error) | 92 | if (!bio->bi_error) |
| 93 | multipath_end_bh_io(mp_bh, 0); | 93 | multipath_end_bh_io(mp_bh, 0); |
| 94 | else if (!(bio->bi_rw & REQ_RAHEAD)) { | 94 | else if (!(bio->bi_opf & REQ_RAHEAD)) { |
| 95 | /* | 95 | /* |
| 96 | * oops, IO error: | 96 | * oops, IO error: |
| 97 | */ | 97 | */ |
| @@ -112,7 +112,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio) | |||
| 112 | struct multipath_bh * mp_bh; | 112 | struct multipath_bh * mp_bh; |
| 113 | struct multipath_info *multipath; | 113 | struct multipath_info *multipath; |
| 114 | 114 | ||
| 115 | if (unlikely(bio->bi_rw & REQ_PREFLUSH)) { | 115 | if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { |
| 116 | md_flush_request(mddev, bio); | 116 | md_flush_request(mddev, bio); |
| 117 | return; | 117 | return; |
| 118 | } | 118 | } |
| @@ -135,7 +135,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio) | |||
| 135 | 135 | ||
| 136 | mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset; | 136 | mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset; |
| 137 | mp_bh->bio.bi_bdev = multipath->rdev->bdev; | 137 | mp_bh->bio.bi_bdev = multipath->rdev->bdev; |
| 138 | mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT; | 138 | mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT; |
| 139 | mp_bh->bio.bi_end_io = multipath_end_request; | 139 | mp_bh->bio.bi_end_io = multipath_end_request; |
| 140 | mp_bh->bio.bi_private = mp_bh; | 140 | mp_bh->bio.bi_private = mp_bh; |
| 141 | generic_make_request(&mp_bh->bio); | 141 | generic_make_request(&mp_bh->bio); |
| @@ -360,7 +360,7 @@ static void multipathd(struct md_thread *thread) | |||
| 360 | bio->bi_iter.bi_sector += | 360 | bio->bi_iter.bi_sector += |
| 361 | conf->multipaths[mp_bh->path].rdev->data_offset; | 361 | conf->multipaths[mp_bh->path].rdev->data_offset; |
| 362 | bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; | 362 | bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; |
| 363 | bio->bi_rw |= REQ_FAILFAST_TRANSPORT; | 363 | bio->bi_opf |= REQ_FAILFAST_TRANSPORT; |
| 364 | bio->bi_end_io = multipath_end_request; | 364 | bio->bi_end_io = multipath_end_request; |
| 365 | bio->bi_private = mp_bh; | 365 | bio->bi_private = mp_bh; |
| 366 | generic_make_request(bio); | 366 | generic_make_request(bio); |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index c3d439083212..258986a2699d 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
| @@ -458,7 +458,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) | |||
| 458 | struct md_rdev *tmp_dev; | 458 | struct md_rdev *tmp_dev; |
| 459 | struct bio *split; | 459 | struct bio *split; |
| 460 | 460 | ||
| 461 | if (unlikely(bio->bi_rw & REQ_PREFLUSH)) { | 461 | if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { |
| 462 | md_flush_request(mddev, bio); | 462 | md_flush_request(mddev, bio); |
| 463 | return; | 463 | return; |
| 464 | } | 464 | } |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 46168ef2e279..21dc00eb1989 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
| @@ -1043,8 +1043,8 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio) | |||
| 1043 | unsigned long flags; | 1043 | unsigned long flags; |
| 1044 | const int op = bio_op(bio); | 1044 | const int op = bio_op(bio); |
| 1045 | const int rw = bio_data_dir(bio); | 1045 | const int rw = bio_data_dir(bio); |
| 1046 | const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); | 1046 | const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); |
| 1047 | const unsigned long do_flush_fua = (bio->bi_rw & | 1047 | const unsigned long do_flush_fua = (bio->bi_opf & |
| 1048 | (REQ_PREFLUSH | REQ_FUA)); | 1048 | (REQ_PREFLUSH | REQ_FUA)); |
| 1049 | struct md_rdev *blocked_rdev; | 1049 | struct md_rdev *blocked_rdev; |
| 1050 | struct blk_plug_cb *cb; | 1050 | struct blk_plug_cb *cb; |
| @@ -2318,7 +2318,7 @@ read_more: | |||
| 2318 | raid_end_bio_io(r1_bio); | 2318 | raid_end_bio_io(r1_bio); |
| 2319 | } else { | 2319 | } else { |
| 2320 | const unsigned long do_sync | 2320 | const unsigned long do_sync |
| 2321 | = r1_bio->master_bio->bi_rw & REQ_SYNC; | 2321 | = r1_bio->master_bio->bi_opf & REQ_SYNC; |
| 2322 | if (bio) { | 2322 | if (bio) { |
| 2323 | r1_bio->bios[r1_bio->read_disk] = | 2323 | r1_bio->bios[r1_bio->read_disk] = |
| 2324 | mddev->ro ? IO_BLOCKED : NULL; | 2324 | mddev->ro ? IO_BLOCKED : NULL; |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index ed29fc899f06..0e4efcd10795 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
| @@ -1054,8 +1054,8 @@ static void __make_request(struct mddev *mddev, struct bio *bio) | |||
| 1054 | int i; | 1054 | int i; |
| 1055 | const int op = bio_op(bio); | 1055 | const int op = bio_op(bio); |
| 1056 | const int rw = bio_data_dir(bio); | 1056 | const int rw = bio_data_dir(bio); |
| 1057 | const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); | 1057 | const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); |
| 1058 | const unsigned long do_fua = (bio->bi_rw & REQ_FUA); | 1058 | const unsigned long do_fua = (bio->bi_opf & REQ_FUA); |
| 1059 | unsigned long flags; | 1059 | unsigned long flags; |
| 1060 | struct md_rdev *blocked_rdev; | 1060 | struct md_rdev *blocked_rdev; |
| 1061 | struct blk_plug_cb *cb; | 1061 | struct blk_plug_cb *cb; |
| @@ -1440,7 +1440,7 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio) | |||
| 1440 | 1440 | ||
| 1441 | struct bio *split; | 1441 | struct bio *split; |
| 1442 | 1442 | ||
| 1443 | if (unlikely(bio->bi_rw & REQ_PREFLUSH)) { | 1443 | if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { |
| 1444 | md_flush_request(mddev, bio); | 1444 | md_flush_request(mddev, bio); |
| 1445 | return; | 1445 | return; |
| 1446 | } | 1446 | } |
| @@ -2533,7 +2533,7 @@ read_more: | |||
| 2533 | return; | 2533 | return; |
| 2534 | } | 2534 | } |
| 2535 | 2535 | ||
| 2536 | do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC); | 2536 | do_sync = (r10_bio->master_bio->bi_opf & REQ_SYNC); |
| 2537 | slot = r10_bio->read_slot; | 2537 | slot = r10_bio->read_slot; |
| 2538 | printk_ratelimited( | 2538 | printk_ratelimited( |
| 2539 | KERN_ERR | 2539 | KERN_ERR |
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 5504ce2bac06..51f76ddbe265 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c | |||
| @@ -536,7 +536,7 @@ int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio) | |||
| 536 | bio_endio(bio); | 536 | bio_endio(bio); |
| 537 | return 0; | 537 | return 0; |
| 538 | } | 538 | } |
| 539 | bio->bi_rw &= ~REQ_PREFLUSH; | 539 | bio->bi_opf &= ~REQ_PREFLUSH; |
| 540 | return -EAGAIN; | 540 | return -EAGAIN; |
| 541 | } | 541 | } |
| 542 | 542 | ||
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index d189e894b921..8912407a4dd0 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
| @@ -806,7 +806,7 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh | |||
| 806 | dd_idx = 0; | 806 | dd_idx = 0; |
| 807 | while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) | 807 | while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) |
| 808 | dd_idx++; | 808 | dd_idx++; |
| 809 | if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw || | 809 | if (head->dev[dd_idx].towrite->bi_opf != sh->dev[dd_idx].towrite->bi_opf || |
| 810 | bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite)) | 810 | bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite)) |
| 811 | goto unlock_out; | 811 | goto unlock_out; |
| 812 | 812 | ||
| @@ -1003,7 +1003,7 @@ again: | |||
| 1003 | 1003 | ||
| 1004 | pr_debug("%s: for %llu schedule op %d on disc %d\n", | 1004 | pr_debug("%s: for %llu schedule op %d on disc %d\n", |
| 1005 | __func__, (unsigned long long)sh->sector, | 1005 | __func__, (unsigned long long)sh->sector, |
| 1006 | bi->bi_rw, i); | 1006 | bi->bi_opf, i); |
| 1007 | atomic_inc(&sh->count); | 1007 | atomic_inc(&sh->count); |
| 1008 | if (sh != head_sh) | 1008 | if (sh != head_sh) |
| 1009 | atomic_inc(&head_sh->count); | 1009 | atomic_inc(&head_sh->count); |
| @@ -1014,7 +1014,7 @@ again: | |||
| 1014 | bi->bi_iter.bi_sector = (sh->sector | 1014 | bi->bi_iter.bi_sector = (sh->sector |
| 1015 | + rdev->data_offset); | 1015 | + rdev->data_offset); |
| 1016 | if (test_bit(R5_ReadNoMerge, &head_sh->dev[i].flags)) | 1016 | if (test_bit(R5_ReadNoMerge, &head_sh->dev[i].flags)) |
| 1017 | bi->bi_rw |= REQ_NOMERGE; | 1017 | bi->bi_opf |= REQ_NOMERGE; |
| 1018 | 1018 | ||
| 1019 | if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) | 1019 | if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) |
| 1020 | WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); | 1020 | WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); |
| @@ -1055,7 +1055,7 @@ again: | |||
| 1055 | pr_debug("%s: for %llu schedule op %d on " | 1055 | pr_debug("%s: for %llu schedule op %d on " |
| 1056 | "replacement disc %d\n", | 1056 | "replacement disc %d\n", |
| 1057 | __func__, (unsigned long long)sh->sector, | 1057 | __func__, (unsigned long long)sh->sector, |
| 1058 | rbi->bi_rw, i); | 1058 | rbi->bi_opf, i); |
| 1059 | atomic_inc(&sh->count); | 1059 | atomic_inc(&sh->count); |
| 1060 | if (sh != head_sh) | 1060 | if (sh != head_sh) |
| 1061 | atomic_inc(&head_sh->count); | 1061 | atomic_inc(&head_sh->count); |
| @@ -1088,7 +1088,7 @@ again: | |||
| 1088 | if (op_is_write(op)) | 1088 | if (op_is_write(op)) |
| 1089 | set_bit(STRIPE_DEGRADED, &sh->state); | 1089 | set_bit(STRIPE_DEGRADED, &sh->state); |
| 1090 | pr_debug("skip op %d on disc %d for sector %llu\n", | 1090 | pr_debug("skip op %d on disc %d for sector %llu\n", |
| 1091 | bi->bi_rw, i, (unsigned long long)sh->sector); | 1091 | bi->bi_opf, i, (unsigned long long)sh->sector); |
| 1092 | clear_bit(R5_LOCKED, &sh->dev[i].flags); | 1092 | clear_bit(R5_LOCKED, &sh->dev[i].flags); |
| 1093 | set_bit(STRIPE_HANDLE, &sh->state); | 1093 | set_bit(STRIPE_HANDLE, &sh->state); |
| 1094 | } | 1094 | } |
| @@ -1619,9 +1619,9 @@ again: | |||
| 1619 | 1619 | ||
| 1620 | while (wbi && wbi->bi_iter.bi_sector < | 1620 | while (wbi && wbi->bi_iter.bi_sector < |
| 1621 | dev->sector + STRIPE_SECTORS) { | 1621 | dev->sector + STRIPE_SECTORS) { |
| 1622 | if (wbi->bi_rw & REQ_FUA) | 1622 | if (wbi->bi_opf & REQ_FUA) |
| 1623 | set_bit(R5_WantFUA, &dev->flags); | 1623 | set_bit(R5_WantFUA, &dev->flags); |
| 1624 | if (wbi->bi_rw & REQ_SYNC) | 1624 | if (wbi->bi_opf & REQ_SYNC) |
| 1625 | set_bit(R5_SyncIO, &dev->flags); | 1625 | set_bit(R5_SyncIO, &dev->flags); |
| 1626 | if (bio_op(wbi) == REQ_OP_DISCARD) | 1626 | if (bio_op(wbi) == REQ_OP_DISCARD) |
| 1627 | set_bit(R5_Discard, &dev->flags); | 1627 | set_bit(R5_Discard, &dev->flags); |
| @@ -5154,7 +5154,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi) | |||
| 5154 | DEFINE_WAIT(w); | 5154 | DEFINE_WAIT(w); |
| 5155 | bool do_prepare; | 5155 | bool do_prepare; |
| 5156 | 5156 | ||
| 5157 | if (unlikely(bi->bi_rw & REQ_PREFLUSH)) { | 5157 | if (unlikely(bi->bi_opf & REQ_PREFLUSH)) { |
| 5158 | int ret = r5l_handle_flush_request(conf->log, bi); | 5158 | int ret = r5l_handle_flush_request(conf->log, bi); |
| 5159 | 5159 | ||
| 5160 | if (ret == 0) | 5160 | if (ret == 0) |
| @@ -5237,7 +5237,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi) | |||
| 5237 | (unsigned long long)logical_sector); | 5237 | (unsigned long long)logical_sector); |
| 5238 | 5238 | ||
| 5239 | sh = raid5_get_active_stripe(conf, new_sector, previous, | 5239 | sh = raid5_get_active_stripe(conf, new_sector, previous, |
| 5240 | (bi->bi_rw & REQ_RAHEAD), 0); | 5240 | (bi->bi_opf & REQ_RAHEAD), 0); |
| 5241 | if (sh) { | 5241 | if (sh) { |
| 5242 | if (unlikely(previous)) { | 5242 | if (unlikely(previous)) { |
| 5243 | /* expansion might have moved on while waiting for a | 5243 | /* expansion might have moved on while waiting for a |
| @@ -5305,7 +5305,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi) | |||
| 5305 | set_bit(STRIPE_HANDLE, &sh->state); | 5305 | set_bit(STRIPE_HANDLE, &sh->state); |
| 5306 | clear_bit(STRIPE_DELAYED, &sh->state); | 5306 | clear_bit(STRIPE_DELAYED, &sh->state); |
| 5307 | if ((!sh->batch_head || sh == sh->batch_head) && | 5307 | if ((!sh->batch_head || sh == sh->batch_head) && |
| 5308 | (bi->bi_rw & REQ_SYNC) && | 5308 | (bi->bi_opf & REQ_SYNC) && |
| 5309 | !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) | 5309 | !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) |
| 5310 | atomic_inc(&conf->preread_active_stripes); | 5310 | atomic_inc(&conf->preread_active_stripes); |
| 5311 | release_stripe_plug(mddev, sh); | 5311 | release_stripe_plug(mddev, sh); |
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 20bae50c231d..571a6c7ee2fc 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c | |||
| @@ -128,7 +128,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) | |||
| 128 | struct pmem_device *pmem = q->queuedata; | 128 | struct pmem_device *pmem = q->queuedata; |
| 129 | struct nd_region *nd_region = to_region(pmem); | 129 | struct nd_region *nd_region = to_region(pmem); |
| 130 | 130 | ||
| 131 | if (bio->bi_rw & REQ_FLUSH) | 131 | if (bio->bi_opf & REQ_FLUSH) |
| 132 | nvdimm_flush(nd_region); | 132 | nvdimm_flush(nd_region); |
| 133 | 133 | ||
| 134 | do_acct = nd_iostat_start(bio, &start); | 134 | do_acct = nd_iostat_start(bio, &start); |
| @@ -144,7 +144,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) | |||
| 144 | if (do_acct) | 144 | if (do_acct) |
| 145 | nd_iostat_end(bio, start); | 145 | nd_iostat_end(bio, start); |
| 146 | 146 | ||
| 147 | if (bio->bi_rw & REQ_FUA) | 147 | if (bio->bi_opf & REQ_FUA) |
| 148 | nvdimm_flush(nd_region); | 148 | nvdimm_flush(nd_region); |
| 149 | 149 | ||
| 150 | bio_endio(bio); | 150 | bio_endio(bio); |
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c index 5d5cae05818d..66789471b49d 100644 --- a/fs/btrfs/check-integrity.c +++ b/fs/btrfs/check-integrity.c | |||
| @@ -2945,7 +2945,7 @@ static void __btrfsic_submit_bio(struct bio *bio) | |||
| 2945 | printk(KERN_INFO | 2945 | printk(KERN_INFO |
| 2946 | "submit_bio(rw=%d,0x%x, bi_vcnt=%u," | 2946 | "submit_bio(rw=%d,0x%x, bi_vcnt=%u," |
| 2947 | " bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n", | 2947 | " bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n", |
| 2948 | bio_op(bio), bio->bi_rw, bio->bi_vcnt, | 2948 | bio_op(bio), bio->bi_opf, bio->bi_vcnt, |
| 2949 | (unsigned long long)bio->bi_iter.bi_sector, | 2949 | (unsigned long long)bio->bi_iter.bi_sector, |
| 2950 | dev_bytenr, bio->bi_bdev); | 2950 | dev_bytenr, bio->bi_bdev); |
| 2951 | 2951 | ||
| @@ -2976,18 +2976,18 @@ static void __btrfsic_submit_bio(struct bio *bio) | |||
| 2976 | btrfsic_process_written_block(dev_state, dev_bytenr, | 2976 | btrfsic_process_written_block(dev_state, dev_bytenr, |
| 2977 | mapped_datav, bio->bi_vcnt, | 2977 | mapped_datav, bio->bi_vcnt, |
| 2978 | bio, &bio_is_patched, | 2978 | bio, &bio_is_patched, |
| 2979 | NULL, bio->bi_rw); | 2979 | NULL, bio->bi_opf); |
| 2980 | while (i > 0) { | 2980 | while (i > 0) { |
| 2981 | i--; | 2981 | i--; |
| 2982 | kunmap(bio->bi_io_vec[i].bv_page); | 2982 | kunmap(bio->bi_io_vec[i].bv_page); |
| 2983 | } | 2983 | } |
| 2984 | kfree(mapped_datav); | 2984 | kfree(mapped_datav); |
| 2985 | } else if (NULL != dev_state && (bio->bi_rw & REQ_PREFLUSH)) { | 2985 | } else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) { |
| 2986 | if (dev_state->state->print_mask & | 2986 | if (dev_state->state->print_mask & |
| 2987 | BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) | 2987 | BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) |
| 2988 | printk(KERN_INFO | 2988 | printk(KERN_INFO |
| 2989 | "submit_bio(rw=%d,0x%x FLUSH, bdev=%p)\n", | 2989 | "submit_bio(rw=%d,0x%x FLUSH, bdev=%p)\n", |
| 2990 | bio_op(bio), bio->bi_rw, bio->bi_bdev); | 2990 | bio_op(bio), bio->bi_opf, bio->bi_bdev); |
| 2991 | if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) { | 2991 | if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) { |
| 2992 | if ((dev_state->state->print_mask & | 2992 | if ((dev_state->state->print_mask & |
| 2993 | (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH | | 2993 | (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH | |
| @@ -3005,7 +3005,7 @@ static void __btrfsic_submit_bio(struct bio *bio) | |||
| 3005 | block->never_written = 0; | 3005 | block->never_written = 0; |
| 3006 | block->iodone_w_error = 0; | 3006 | block->iodone_w_error = 0; |
| 3007 | block->flush_gen = dev_state->last_flush_gen + 1; | 3007 | block->flush_gen = dev_state->last_flush_gen + 1; |
| 3008 | block->submit_bio_bh_rw = bio->bi_rw; | 3008 | block->submit_bio_bh_rw = bio->bi_opf; |
| 3009 | block->orig_bio_bh_private = bio->bi_private; | 3009 | block->orig_bio_bh_private = bio->bi_private; |
| 3010 | block->orig_bio_bh_end_io.bio = bio->bi_end_io; | 3010 | block->orig_bio_bh_end_io.bio = bio->bi_end_io; |
| 3011 | block->next_in_same_bio = NULL; | 3011 | block->next_in_same_bio = NULL; |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 87dad552e39a..59febfb8d04a 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
| @@ -870,7 +870,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, | |||
| 870 | 870 | ||
| 871 | atomic_inc(&fs_info->nr_async_submits); | 871 | atomic_inc(&fs_info->nr_async_submits); |
| 872 | 872 | ||
| 873 | if (bio->bi_rw & REQ_SYNC) | 873 | if (bio->bi_opf & REQ_SYNC) |
| 874 | btrfs_set_work_high_priority(&async->work); | 874 | btrfs_set_work_high_priority(&async->work); |
| 875 | 875 | ||
| 876 | btrfs_queue_work(fs_info->workers, &async->work); | 876 | btrfs_queue_work(fs_info->workers, &async->work); |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index b0f421f332ae..2f5975954ccf 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
| @@ -8209,7 +8209,7 @@ static void btrfs_end_dio_bio(struct bio *bio) | |||
| 8209 | if (err) | 8209 | if (err) |
| 8210 | btrfs_warn(BTRFS_I(dip->inode)->root->fs_info, | 8210 | btrfs_warn(BTRFS_I(dip->inode)->root->fs_info, |
| 8211 | "direct IO failed ino %llu rw %d,%u sector %#Lx len %u err no %d", | 8211 | "direct IO failed ino %llu rw %d,%u sector %#Lx len %u err no %d", |
| 8212 | btrfs_ino(dip->inode), bio_op(bio), bio->bi_rw, | 8212 | btrfs_ino(dip->inode), bio_op(bio), bio->bi_opf, |
| 8213 | (unsigned long long)bio->bi_iter.bi_sector, | 8213 | (unsigned long long)bio->bi_iter.bi_sector, |
| 8214 | bio->bi_iter.bi_size, err); | 8214 | bio->bi_iter.bi_size, err); |
| 8215 | 8215 | ||
| @@ -8373,7 +8373,7 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip, | |||
| 8373 | if (!bio) | 8373 | if (!bio) |
| 8374 | return -ENOMEM; | 8374 | return -ENOMEM; |
| 8375 | 8375 | ||
| 8376 | bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_rw); | 8376 | bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_opf); |
| 8377 | bio->bi_private = dip; | 8377 | bio->bi_private = dip; |
| 8378 | bio->bi_end_io = btrfs_end_dio_bio; | 8378 | bio->bi_end_io = btrfs_end_dio_bio; |
| 8379 | btrfs_io_bio(bio)->logical = file_offset; | 8379 | btrfs_io_bio(bio)->logical = file_offset; |
| @@ -8411,7 +8411,7 @@ next_block: | |||
| 8411 | start_sector, GFP_NOFS); | 8411 | start_sector, GFP_NOFS); |
| 8412 | if (!bio) | 8412 | if (!bio) |
| 8413 | goto out_err; | 8413 | goto out_err; |
| 8414 | bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_rw); | 8414 | bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_opf); |
| 8415 | bio->bi_private = dip; | 8415 | bio->bi_private = dip; |
| 8416 | bio->bi_end_io = btrfs_end_dio_bio; | 8416 | bio->bi_end_io = btrfs_end_dio_bio; |
| 8417 | btrfs_io_bio(bio)->logical = file_offset; | 8417 | btrfs_io_bio(bio)->logical = file_offset; |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index bb0addce7558..51f125508771 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
| @@ -6012,7 +6012,7 @@ static void btrfs_end_bio(struct bio *bio) | |||
| 6012 | else | 6012 | else |
| 6013 | btrfs_dev_stat_inc(dev, | 6013 | btrfs_dev_stat_inc(dev, |
| 6014 | BTRFS_DEV_STAT_READ_ERRS); | 6014 | BTRFS_DEV_STAT_READ_ERRS); |
| 6015 | if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH) | 6015 | if ((bio->bi_opf & WRITE_FLUSH) == WRITE_FLUSH) |
| 6016 | btrfs_dev_stat_inc(dev, | 6016 | btrfs_dev_stat_inc(dev, |
| 6017 | BTRFS_DEV_STAT_FLUSH_ERRS); | 6017 | BTRFS_DEV_STAT_FLUSH_ERRS); |
| 6018 | btrfs_dev_stat_print_on_error(dev); | 6018 | btrfs_dev_stat_print_on_error(dev); |
| @@ -6089,7 +6089,7 @@ static noinline void btrfs_schedule_bio(struct btrfs_root *root, | |||
| 6089 | bio->bi_next = NULL; | 6089 | bio->bi_next = NULL; |
| 6090 | 6090 | ||
| 6091 | spin_lock(&device->io_lock); | 6091 | spin_lock(&device->io_lock); |
| 6092 | if (bio->bi_rw & REQ_SYNC) | 6092 | if (bio->bi_opf & REQ_SYNC) |
| 6093 | pending_bios = &device->pending_sync_bios; | 6093 | pending_bios = &device->pending_sync_bios; |
| 6094 | else | 6094 | else |
| 6095 | pending_bios = &device->pending_bios; | 6095 | pending_bios = &device->pending_bios; |
| @@ -6127,7 +6127,7 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio, | |||
| 6127 | rcu_read_lock(); | 6127 | rcu_read_lock(); |
| 6128 | name = rcu_dereference(dev->name); | 6128 | name = rcu_dereference(dev->name); |
| 6129 | pr_debug("btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu " | 6129 | pr_debug("btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu " |
| 6130 | "(%s id %llu), size=%u\n", bio_op(bio), bio->bi_rw, | 6130 | "(%s id %llu), size=%u\n", bio_op(bio), bio->bi_opf, |
| 6131 | (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev, | 6131 | (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev, |
| 6132 | name->str, dev->devid, bio->bi_iter.bi_size); | 6132 | name->str, dev->devid, bio->bi_iter.bi_size); |
| 6133 | rcu_read_unlock(); | 6133 | rcu_read_unlock(); |
diff --git a/include/linux/bio.h b/include/linux/bio.h index e09a8895fc31..59ffaa68b11b 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
| @@ -95,7 +95,7 @@ static inline bool bio_is_rw(struct bio *bio) | |||
| 95 | 95 | ||
| 96 | static inline bool bio_mergeable(struct bio *bio) | 96 | static inline bool bio_mergeable(struct bio *bio) |
| 97 | { | 97 | { |
| 98 | if (bio->bi_rw & REQ_NOMERGE_FLAGS) | 98 | if (bio->bi_opf & REQ_NOMERGE_FLAGS) |
| 99 | return false; | 99 | return false; |
| 100 | 100 | ||
| 101 | return true; | 101 | return true; |
| @@ -318,7 +318,7 @@ struct bio_integrity_payload { | |||
| 318 | 318 | ||
| 319 | static inline struct bio_integrity_payload *bio_integrity(struct bio *bio) | 319 | static inline struct bio_integrity_payload *bio_integrity(struct bio *bio) |
| 320 | { | 320 | { |
| 321 | if (bio->bi_rw & REQ_INTEGRITY) | 321 | if (bio->bi_opf & REQ_INTEGRITY) |
| 322 | return bio->bi_integrity; | 322 | return bio->bi_integrity; |
| 323 | 323 | ||
| 324 | return NULL; | 324 | return NULL; |
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index f77150a4a96a..10648e300c93 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h | |||
| @@ -714,9 +714,9 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q, | |||
| 714 | 714 | ||
| 715 | if (!throtl) { | 715 | if (!throtl) { |
| 716 | blkg = blkg ?: q->root_blkg; | 716 | blkg = blkg ?: q->root_blkg; |
| 717 | blkg_rwstat_add(&blkg->stat_bytes, bio_op(bio), bio->bi_rw, | 717 | blkg_rwstat_add(&blkg->stat_bytes, bio_op(bio), bio->bi_opf, |
| 718 | bio->bi_iter.bi_size); | 718 | bio->bi_iter.bi_size); |
| 719 | blkg_rwstat_add(&blkg->stat_ios, bio_op(bio), bio->bi_rw, 1); | 719 | blkg_rwstat_add(&blkg->stat_ios, bio_op(bio), bio->bi_opf, 1); |
| 720 | } | 720 | } |
| 721 | 721 | ||
| 722 | rcu_read_unlock(); | 722 | rcu_read_unlock(); |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index f254eb264924..436f43f87da9 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
| @@ -27,8 +27,9 @@ struct bio { | |||
| 27 | struct bio *bi_next; /* request queue link */ | 27 | struct bio *bi_next; /* request queue link */ |
| 28 | struct block_device *bi_bdev; | 28 | struct block_device *bi_bdev; |
| 29 | int bi_error; | 29 | int bi_error; |
| 30 | unsigned int bi_rw; /* bottom bits req flags, | 30 | unsigned int bi_opf; /* bottom bits req flags, |
| 31 | * top bits REQ_OP | 31 | * top bits REQ_OP. Use |
| 32 | * accessors. | ||
| 32 | */ | 33 | */ |
| 33 | unsigned short bi_flags; /* status, command, etc */ | 34 | unsigned short bi_flags; /* status, command, etc */ |
| 34 | unsigned short bi_ioprio; | 35 | unsigned short bi_ioprio; |
| @@ -89,13 +90,13 @@ struct bio { | |||
| 89 | }; | 90 | }; |
| 90 | 91 | ||
| 91 | #define BIO_OP_SHIFT (8 * sizeof(unsigned int) - REQ_OP_BITS) | 92 | #define BIO_OP_SHIFT (8 * sizeof(unsigned int) - REQ_OP_BITS) |
| 92 | #define bio_op(bio) ((bio)->bi_rw >> BIO_OP_SHIFT) | 93 | #define bio_op(bio) ((bio)->bi_opf >> BIO_OP_SHIFT) |
| 93 | 94 | ||
| 94 | #define bio_set_op_attrs(bio, op, op_flags) do { \ | 95 | #define bio_set_op_attrs(bio, op, op_flags) do { \ |
| 95 | WARN_ON(op >= (1 << REQ_OP_BITS)); \ | 96 | WARN_ON(op >= (1 << REQ_OP_BITS)); \ |
| 96 | (bio)->bi_rw &= ((1 << BIO_OP_SHIFT) - 1); \ | 97 | (bio)->bi_opf &= ((1 << BIO_OP_SHIFT) - 1); \ |
| 97 | (bio)->bi_rw |= ((unsigned int) (op) << BIO_OP_SHIFT); \ | 98 | (bio)->bi_opf |= ((unsigned int) (op) << BIO_OP_SHIFT); \ |
| 98 | (bio)->bi_rw |= op_flags; \ | 99 | (bio)->bi_opf |= op_flags; \ |
| 99 | } while (0) | 100 | } while (0) |
| 100 | 101 | ||
| 101 | #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) | 102 | #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) |
| @@ -138,7 +139,7 @@ struct bio { | |||
| 138 | 139 | ||
| 139 | /* | 140 | /* |
| 140 | * Request flags. For use in the cmd_flags field of struct request, and in | 141 | * Request flags. For use in the cmd_flags field of struct request, and in |
| 141 | * bi_rw of struct bio. Note that some flags are only valid in either one. | 142 | * bi_opf of struct bio. Note that some flags are only valid in either one. |
| 142 | */ | 143 | */ |
| 143 | enum rq_flag_bits { | 144 | enum rq_flag_bits { |
| 144 | /* common flags */ | 145 | /* common flags */ |
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h index 65673d8b81ac..d336b890e31f 100644 --- a/include/trace/events/bcache.h +++ b/include/trace/events/bcache.h | |||
| @@ -27,7 +27,7 @@ DECLARE_EVENT_CLASS(bcache_request, | |||
| 27 | __entry->sector = bio->bi_iter.bi_sector; | 27 | __entry->sector = bio->bi_iter.bi_sector; |
| 28 | __entry->orig_sector = bio->bi_iter.bi_sector - 16; | 28 | __entry->orig_sector = bio->bi_iter.bi_sector - 16; |
| 29 | __entry->nr_sector = bio->bi_iter.bi_size >> 9; | 29 | __entry->nr_sector = bio->bi_iter.bi_size >> 9; |
| 30 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw, | 30 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, |
| 31 | bio->bi_iter.bi_size); | 31 | bio->bi_iter.bi_size); |
| 32 | ), | 32 | ), |
| 33 | 33 | ||
| @@ -102,7 +102,7 @@ DECLARE_EVENT_CLASS(bcache_bio, | |||
| 102 | __entry->dev = bio->bi_bdev->bd_dev; | 102 | __entry->dev = bio->bi_bdev->bd_dev; |
| 103 | __entry->sector = bio->bi_iter.bi_sector; | 103 | __entry->sector = bio->bi_iter.bi_sector; |
| 104 | __entry->nr_sector = bio->bi_iter.bi_size >> 9; | 104 | __entry->nr_sector = bio->bi_iter.bi_size >> 9; |
| 105 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw, | 105 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, |
| 106 | bio->bi_iter.bi_size); | 106 | bio->bi_iter.bi_size); |
| 107 | ), | 107 | ), |
| 108 | 108 | ||
| @@ -138,7 +138,7 @@ TRACE_EVENT(bcache_read, | |||
| 138 | __entry->dev = bio->bi_bdev->bd_dev; | 138 | __entry->dev = bio->bi_bdev->bd_dev; |
| 139 | __entry->sector = bio->bi_iter.bi_sector; | 139 | __entry->sector = bio->bi_iter.bi_sector; |
| 140 | __entry->nr_sector = bio->bi_iter.bi_size >> 9; | 140 | __entry->nr_sector = bio->bi_iter.bi_size >> 9; |
| 141 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw, | 141 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, |
| 142 | bio->bi_iter.bi_size); | 142 | bio->bi_iter.bi_size); |
| 143 | __entry->cache_hit = hit; | 143 | __entry->cache_hit = hit; |
| 144 | __entry->bypass = bypass; | 144 | __entry->bypass = bypass; |
| @@ -170,7 +170,7 @@ TRACE_EVENT(bcache_write, | |||
| 170 | __entry->inode = inode; | 170 | __entry->inode = inode; |
| 171 | __entry->sector = bio->bi_iter.bi_sector; | 171 | __entry->sector = bio->bi_iter.bi_sector; |
| 172 | __entry->nr_sector = bio->bi_iter.bi_size >> 9; | 172 | __entry->nr_sector = bio->bi_iter.bi_size >> 9; |
| 173 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw, | 173 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, |
| 174 | bio->bi_iter.bi_size); | 174 | bio->bi_iter.bi_size); |
| 175 | __entry->writeback = writeback; | 175 | __entry->writeback = writeback; |
| 176 | __entry->bypass = bypass; | 176 | __entry->bypass = bypass; |
diff --git a/include/trace/events/block.h b/include/trace/events/block.h index 5a2a7592068f..8f3a163b8166 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h | |||
| @@ -274,7 +274,7 @@ TRACE_EVENT(block_bio_bounce, | |||
| 274 | bio->bi_bdev->bd_dev : 0; | 274 | bio->bi_bdev->bd_dev : 0; |
| 275 | __entry->sector = bio->bi_iter.bi_sector; | 275 | __entry->sector = bio->bi_iter.bi_sector; |
| 276 | __entry->nr_sector = bio_sectors(bio); | 276 | __entry->nr_sector = bio_sectors(bio); |
| 277 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw, | 277 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, |
| 278 | bio->bi_iter.bi_size); | 278 | bio->bi_iter.bi_size); |
| 279 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | 279 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); |
| 280 | ), | 280 | ), |
| @@ -313,7 +313,7 @@ TRACE_EVENT(block_bio_complete, | |||
| 313 | __entry->sector = bio->bi_iter.bi_sector; | 313 | __entry->sector = bio->bi_iter.bi_sector; |
| 314 | __entry->nr_sector = bio_sectors(bio); | 314 | __entry->nr_sector = bio_sectors(bio); |
| 315 | __entry->error = error; | 315 | __entry->error = error; |
| 316 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw, | 316 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, |
| 317 | bio->bi_iter.bi_size); | 317 | bio->bi_iter.bi_size); |
| 318 | ), | 318 | ), |
| 319 | 319 | ||
| @@ -341,7 +341,7 @@ DECLARE_EVENT_CLASS(block_bio_merge, | |||
| 341 | __entry->dev = bio->bi_bdev->bd_dev; | 341 | __entry->dev = bio->bi_bdev->bd_dev; |
| 342 | __entry->sector = bio->bi_iter.bi_sector; | 342 | __entry->sector = bio->bi_iter.bi_sector; |
| 343 | __entry->nr_sector = bio_sectors(bio); | 343 | __entry->nr_sector = bio_sectors(bio); |
| 344 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw, | 344 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, |
| 345 | bio->bi_iter.bi_size); | 345 | bio->bi_iter.bi_size); |
| 346 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | 346 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); |
| 347 | ), | 347 | ), |
| @@ -409,7 +409,7 @@ TRACE_EVENT(block_bio_queue, | |||
| 409 | __entry->dev = bio->bi_bdev->bd_dev; | 409 | __entry->dev = bio->bi_bdev->bd_dev; |
| 410 | __entry->sector = bio->bi_iter.bi_sector; | 410 | __entry->sector = bio->bi_iter.bi_sector; |
| 411 | __entry->nr_sector = bio_sectors(bio); | 411 | __entry->nr_sector = bio_sectors(bio); |
| 412 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw, | 412 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, |
| 413 | bio->bi_iter.bi_size); | 413 | bio->bi_iter.bi_size); |
| 414 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | 414 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); |
| 415 | ), | 415 | ), |
| @@ -439,7 +439,7 @@ DECLARE_EVENT_CLASS(block_get_rq, | |||
| 439 | __entry->sector = bio ? bio->bi_iter.bi_sector : 0; | 439 | __entry->sector = bio ? bio->bi_iter.bi_sector : 0; |
| 440 | __entry->nr_sector = bio ? bio_sectors(bio) : 0; | 440 | __entry->nr_sector = bio ? bio_sectors(bio) : 0; |
| 441 | blk_fill_rwbs(__entry->rwbs, bio ? bio_op(bio) : 0, | 441 | blk_fill_rwbs(__entry->rwbs, bio ? bio_op(bio) : 0, |
| 442 | bio ? bio->bi_rw : 0, __entry->nr_sector); | 442 | bio ? bio->bi_opf : 0, __entry->nr_sector); |
| 443 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | 443 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); |
| 444 | ), | 444 | ), |
| 445 | 445 | ||
| @@ -573,7 +573,7 @@ TRACE_EVENT(block_split, | |||
| 573 | __entry->dev = bio->bi_bdev->bd_dev; | 573 | __entry->dev = bio->bi_bdev->bd_dev; |
| 574 | __entry->sector = bio->bi_iter.bi_sector; | 574 | __entry->sector = bio->bi_iter.bi_sector; |
| 575 | __entry->new_sector = new_sector; | 575 | __entry->new_sector = new_sector; |
| 576 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw, | 576 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, |
| 577 | bio->bi_iter.bi_size); | 577 | bio->bi_iter.bi_size); |
| 578 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | 578 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); |
| 579 | ), | 579 | ), |
| @@ -617,7 +617,7 @@ TRACE_EVENT(block_bio_remap, | |||
| 617 | __entry->nr_sector = bio_sectors(bio); | 617 | __entry->nr_sector = bio_sectors(bio); |
| 618 | __entry->old_dev = dev; | 618 | __entry->old_dev = dev; |
| 619 | __entry->old_sector = from; | 619 | __entry->old_sector = from; |
| 620 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw, | 620 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, |
| 621 | bio->bi_iter.bi_size); | 621 | bio->bi_iter.bi_size); |
| 622 | ), | 622 | ), |
| 623 | 623 | ||
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index fb345cd11883..7598e6ca817a 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
| @@ -776,7 +776,7 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, | |||
| 776 | return; | 776 | return; |
| 777 | 777 | ||
| 778 | __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, | 778 | __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, |
| 779 | bio_op(bio), bio->bi_rw, what, error, 0, NULL); | 779 | bio_op(bio), bio->bi_opf, what, error, 0, NULL); |
| 780 | } | 780 | } |
| 781 | 781 | ||
| 782 | static void blk_add_trace_bio_bounce(void *ignore, | 782 | static void blk_add_trace_bio_bounce(void *ignore, |
| @@ -881,7 +881,7 @@ static void blk_add_trace_split(void *ignore, | |||
| 881 | __be64 rpdu = cpu_to_be64(pdu); | 881 | __be64 rpdu = cpu_to_be64(pdu); |
| 882 | 882 | ||
| 883 | __blk_add_trace(bt, bio->bi_iter.bi_sector, | 883 | __blk_add_trace(bt, bio->bi_iter.bi_sector, |
| 884 | bio->bi_iter.bi_size, bio_op(bio), bio->bi_rw, | 884 | bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf, |
| 885 | BLK_TA_SPLIT, bio->bi_error, sizeof(rpdu), | 885 | BLK_TA_SPLIT, bio->bi_error, sizeof(rpdu), |
| 886 | &rpdu); | 886 | &rpdu); |
| 887 | } | 887 | } |
| @@ -915,7 +915,7 @@ static void blk_add_trace_bio_remap(void *ignore, | |||
| 915 | r.sector_from = cpu_to_be64(from); | 915 | r.sector_from = cpu_to_be64(from); |
| 916 | 916 | ||
| 917 | __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, | 917 | __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, |
| 918 | bio_op(bio), bio->bi_rw, BLK_TA_REMAP, bio->bi_error, | 918 | bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_error, |
| 919 | sizeof(r), &r); | 919 | sizeof(r), &r); |
| 920 | } | 920 | } |
| 921 | 921 | ||
