diff options
author | Michael Callahan <michaelcallahan@fb.com> | 2018-07-18 07:47:39 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-07-18 10:44:20 -0400 |
commit | ddcf35d397976421a4ec1d0d00fbcc027a8cb034 (patch) | |
tree | ebffc9e2971b6c7bc3223a1c584a9c81372a34bc /drivers/md/bcache/request.c | |
parent | dbae2c551377b6533a00c11fc7ede370100ab404 (diff) |
block: Add and use op_stat_group() for indexing disk_stat fields.
Add and use a new op_stat_group() function for indexing partition stat
fields rather than indexing them by rq_data_dir() or bio_data_dir().
This function works similarly to op_is_sync() in that it takes the
request::cmd_flags or bio::bi_opf flags and determines which stats
should et updated.
In addition, the second parameter to generic_start_io_acct() and
generic_end_io_acct() is now a REQ_OP rather than simply a read or
write bit and it uses op_stat_group() on the parameter to determine
the stat group.
Note that the partition in_flight counts are not part of the per-cpu
statistics and as such are not indexed via this function. It's now
indexed by op_is_write().
tj: Refreshed on top of v4.17. Updated to pass around REQ_OP.
Signed-off-by: Michael Callahan <michaelcallahan@fb.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Joshua Morris <josh.h.morris@us.ibm.com>
Cc: Philipp Reisner <philipp.reisner@linbit.com>
Cc: Matias Bjorling <mb@lightnvm.io>
Cc: Kent Overstreet <kent.overstreet@gmail.com>
Cc: Alasdair Kergon <agk@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/md/bcache/request.c')
-rw-r--r-- | drivers/md/bcache/request.c | 13 |
1 files changed, 5 insertions, 8 deletions
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index ae67f5fa8047..97707b0c54ce 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c | |||
@@ -667,8 +667,7 @@ static void backing_request_endio(struct bio *bio) | |||
667 | static void bio_complete(struct search *s) | 667 | static void bio_complete(struct search *s) |
668 | { | 668 | { |
669 | if (s->orig_bio) { | 669 | if (s->orig_bio) { |
670 | generic_end_io_acct(s->d->disk->queue, | 670 | generic_end_io_acct(s->d->disk->queue, bio_op(s->orig_bio), |
671 | bio_data_dir(s->orig_bio), | ||
672 | &s->d->disk->part0, s->start_time); | 671 | &s->d->disk->part0, s->start_time); |
673 | 672 | ||
674 | trace_bcache_request_end(s->d, s->orig_bio); | 673 | trace_bcache_request_end(s->d, s->orig_bio); |
@@ -1062,8 +1061,7 @@ static void detached_dev_end_io(struct bio *bio) | |||
1062 | bio->bi_end_io = ddip->bi_end_io; | 1061 | bio->bi_end_io = ddip->bi_end_io; |
1063 | bio->bi_private = ddip->bi_private; | 1062 | bio->bi_private = ddip->bi_private; |
1064 | 1063 | ||
1065 | generic_end_io_acct(ddip->d->disk->queue, | 1064 | generic_end_io_acct(ddip->d->disk->queue, bio_op(bio), |
1066 | bio_data_dir(bio), | ||
1067 | &ddip->d->disk->part0, ddip->start_time); | 1065 | &ddip->d->disk->part0, ddip->start_time); |
1068 | 1066 | ||
1069 | if (bio->bi_status) { | 1067 | if (bio->bi_status) { |
@@ -1120,7 +1118,7 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q, | |||
1120 | } | 1118 | } |
1121 | 1119 | ||
1122 | atomic_set(&dc->backing_idle, 0); | 1120 | atomic_set(&dc->backing_idle, 0); |
1123 | generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0); | 1121 | generic_start_io_acct(q, bio_op(bio), bio_sectors(bio), &d->disk->part0); |
1124 | 1122 | ||
1125 | bio_set_dev(bio, dc->bdev); | 1123 | bio_set_dev(bio, dc->bdev); |
1126 | bio->bi_iter.bi_sector += dc->sb.data_offset; | 1124 | bio->bi_iter.bi_sector += dc->sb.data_offset; |
@@ -1229,7 +1227,6 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q, | |||
1229 | struct search *s; | 1227 | struct search *s; |
1230 | struct closure *cl; | 1228 | struct closure *cl; |
1231 | struct bcache_device *d = bio->bi_disk->private_data; | 1229 | struct bcache_device *d = bio->bi_disk->private_data; |
1232 | int rw = bio_data_dir(bio); | ||
1233 | 1230 | ||
1234 | if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) { | 1231 | if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) { |
1235 | bio->bi_status = BLK_STS_IOERR; | 1232 | bio->bi_status = BLK_STS_IOERR; |
@@ -1237,7 +1234,7 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q, | |||
1237 | return BLK_QC_T_NONE; | 1234 | return BLK_QC_T_NONE; |
1238 | } | 1235 | } |
1239 | 1236 | ||
1240 | generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0); | 1237 | generic_start_io_acct(q, bio_op(bio), bio_sectors(bio), &d->disk->part0); |
1241 | 1238 | ||
1242 | s = search_alloc(bio, d); | 1239 | s = search_alloc(bio, d); |
1243 | cl = &s->cl; | 1240 | cl = &s->cl; |
@@ -1254,7 +1251,7 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q, | |||
1254 | flash_dev_nodata, | 1251 | flash_dev_nodata, |
1255 | bcache_wq); | 1252 | bcache_wq); |
1256 | return BLK_QC_T_NONE; | 1253 | return BLK_QC_T_NONE; |
1257 | } else if (rw) { | 1254 | } else if (bio_data_dir(bio)) { |
1258 | bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, | 1255 | bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, |
1259 | &KEY(d->id, bio->bi_iter.bi_sector, 0), | 1256 | &KEY(d->id, bio->bi_iter.bi_sector, 0), |
1260 | &KEY(d->id, bio_end_sector(bio), 0)); | 1257 | &KEY(d->id, bio_end_sector(bio), 0)); |