diff options
author | Jens Axboe <axboe@fb.com> | 2016-08-05 17:35:16 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2016-08-07 16:41:02 -0400 |
commit | 1eff9d322a444245c67515edb52bc0eb68374aa8 (patch) | |
tree | aed4c3bfdf94202b93b9b5ce74c6e247f4c3ab85 /drivers/md/bcache/request.c | |
parent | 31c64f78767948986c6c4c6f488803722c6b0e7a (diff) |
block: rename bio bi_rw to bi_opf
Since commit 63a4cc24867d, bio->bi_rw contains flags in the lower
portion and the op code in the higher portions. This means that
old code that relies on manually setting bi_rw is most likely
going to be broken. Instead of letting that brokeness linger,
rename the member, to force old and out-of-tree code to break
at compile time instead of at runtime.
No intended functional changes in this commit.
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/md/bcache/request.c')
-rw-r--r-- | drivers/md/bcache/request.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 69f16f43f8ab..4b177fe11ebb 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c | |||
@@ -208,7 +208,7 @@ static void bch_data_insert_start(struct closure *cl) | |||
208 | * Journal writes are marked REQ_PREFLUSH; if the original write was a | 208 | * Journal writes are marked REQ_PREFLUSH; if the original write was a |
209 | * flush, it'll wait on the journal write. | 209 | * flush, it'll wait on the journal write. |
210 | */ | 210 | */ |
211 | bio->bi_rw &= ~(REQ_PREFLUSH|REQ_FUA); | 211 | bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA); |
212 | 212 | ||
213 | do { | 213 | do { |
214 | unsigned i; | 214 | unsigned i; |
@@ -405,7 +405,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) | |||
405 | if (!congested && | 405 | if (!congested && |
406 | mode == CACHE_MODE_WRITEBACK && | 406 | mode == CACHE_MODE_WRITEBACK && |
407 | op_is_write(bio_op(bio)) && | 407 | op_is_write(bio_op(bio)) && |
408 | (bio->bi_rw & REQ_SYNC)) | 408 | (bio->bi_opf & REQ_SYNC)) |
409 | goto rescale; | 409 | goto rescale; |
410 | 410 | ||
411 | spin_lock(&dc->io_lock); | 411 | spin_lock(&dc->io_lock); |
@@ -668,7 +668,7 @@ static inline struct search *search_alloc(struct bio *bio, | |||
668 | s->iop.write_prio = 0; | 668 | s->iop.write_prio = 0; |
669 | s->iop.error = 0; | 669 | s->iop.error = 0; |
670 | s->iop.flags = 0; | 670 | s->iop.flags = 0; |
671 | s->iop.flush_journal = (bio->bi_rw & (REQ_PREFLUSH|REQ_FUA)) != 0; | 671 | s->iop.flush_journal = (bio->bi_opf & (REQ_PREFLUSH|REQ_FUA)) != 0; |
672 | s->iop.wq = bcache_wq; | 672 | s->iop.wq = bcache_wq; |
673 | 673 | ||
674 | return s; | 674 | return s; |
@@ -796,8 +796,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, | |||
796 | goto out_submit; | 796 | goto out_submit; |
797 | } | 797 | } |
798 | 798 | ||
799 | if (!(bio->bi_rw & REQ_RAHEAD) && | 799 | if (!(bio->bi_opf & REQ_RAHEAD) && |
800 | !(bio->bi_rw & REQ_META) && | 800 | !(bio->bi_opf & REQ_META) && |
801 | s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA) | 801 | s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA) |
802 | reada = min_t(sector_t, dc->readahead >> 9, | 802 | reada = min_t(sector_t, dc->readahead >> 9, |
803 | bdev_sectors(bio->bi_bdev) - bio_end_sector(bio)); | 803 | bdev_sectors(bio->bi_bdev) - bio_end_sector(bio)); |
@@ -920,7 +920,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) | |||
920 | bch_writeback_add(dc); | 920 | bch_writeback_add(dc); |
921 | s->iop.bio = bio; | 921 | s->iop.bio = bio; |
922 | 922 | ||
923 | if (bio->bi_rw & REQ_PREFLUSH) { | 923 | if (bio->bi_opf & REQ_PREFLUSH) { |
924 | /* Also need to send a flush to the backing device */ | 924 | /* Also need to send a flush to the backing device */ |
925 | struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0, | 925 | struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0, |
926 | dc->disk.bio_split); | 926 | dc->disk.bio_split); |