diff options
author | Mike Christie <mchristi@redhat.com> | 2016-06-05 15:31:47 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2016-06-07 15:41:38 -0400 |
commit | c8d93247f1d0cf478222a7f4fc37d453d6193d04 (patch) | |
tree | c39ae14478ad516b0a01a975181833f30a483f63 | |
parent | 511116669346a0029b7e54eaaa8e5a7029f89ab3 (diff) |
bcache: use op_is_write instead of checking for REQ_WRITE
We currently set REQ_WRITE/WRITE for all non READ IOs
like discard, flush, writesame, etc. In the next patches where we
no longer set up the op as a bitmap, we will not be able to
detect a operation direction like writesame by testing if REQ_WRITE is
set.
This has bcache use the op_is_write helper which will do the right
thing.
Signed-off-by: Mike Christie <mchristi@redhat.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r-- | drivers/md/bcache/io.c | 2 | ||||
-rw-r--r-- | drivers/md/bcache/request.c | 6 |
2 files changed, 4 insertions, 4 deletions
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index 86a0bb87124e..fd885cc2afad 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c | |||
@@ -111,7 +111,7 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, | |||
111 | struct bbio *b = container_of(bio, struct bbio, bio); | 111 | struct bbio *b = container_of(bio, struct bbio, bio); |
112 | struct cache *ca = PTR_CACHE(c, &b->key, 0); | 112 | struct cache *ca = PTR_CACHE(c, &b->key, 0); |
113 | 113 | ||
114 | unsigned threshold = bio->bi_rw & REQ_WRITE | 114 | unsigned threshold = op_is_write(bio_op(bio)) |
115 | ? c->congested_write_threshold_us | 115 | ? c->congested_write_threshold_us |
116 | : c->congested_read_threshold_us; | 116 | : c->congested_read_threshold_us; |
117 | 117 | ||
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 25fa8445bb24..6b85a23ec92a 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c | |||
@@ -383,7 +383,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) | |||
383 | 383 | ||
384 | if (mode == CACHE_MODE_NONE || | 384 | if (mode == CACHE_MODE_NONE || |
385 | (mode == CACHE_MODE_WRITEAROUND && | 385 | (mode == CACHE_MODE_WRITEAROUND && |
386 | (bio->bi_rw & REQ_WRITE))) | 386 | op_is_write(bio_op(bio)))) |
387 | goto skip; | 387 | goto skip; |
388 | 388 | ||
389 | if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) || | 389 | if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) || |
@@ -404,7 +404,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) | |||
404 | 404 | ||
405 | if (!congested && | 405 | if (!congested && |
406 | mode == CACHE_MODE_WRITEBACK && | 406 | mode == CACHE_MODE_WRITEBACK && |
407 | (bio->bi_rw & REQ_WRITE) && | 407 | op_is_write(bio_op(bio)) && |
408 | (bio->bi_rw & REQ_SYNC)) | 408 | (bio->bi_rw & REQ_SYNC)) |
409 | goto rescale; | 409 | goto rescale; |
410 | 410 | ||
@@ -657,7 +657,7 @@ static inline struct search *search_alloc(struct bio *bio, | |||
657 | s->cache_miss = NULL; | 657 | s->cache_miss = NULL; |
658 | s->d = d; | 658 | s->d = d; |
659 | s->recoverable = 1; | 659 | s->recoverable = 1; |
660 | s->write = (bio->bi_rw & REQ_WRITE) != 0; | 660 | s->write = op_is_write(bio_op(bio)); |
661 | s->read_dirty_data = 0; | 661 | s->read_dirty_data = 0; |
662 | s->start_time = jiffies; | 662 | s->start_time = jiffies; |
663 | 663 | ||