diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-13 17:22:26 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-13 17:22:26 -0500 |
commit | 9ea18f8cab5f1c36cdd0f09717e35ceb48c36a87 (patch) | |
tree | 0c8da7ac47cb59fe39f177ab0407f554aff77194 /drivers/md | |
parent | caf292ae5bb9d57198ce001d8b762f7abae3a94d (diff) | |
parent | 849c6e7746e4f6317ace6aa7d2fcdcd844e99ddb (diff) |
Merge branch 'for-3.19/drivers' of git://git.kernel.dk/linux-block
Pull block layer driver updates from Jens Axboe:
- NVMe updates:
- The blk-mq conversion from Matias (and others)
- A stack of NVMe bug fixes from the nvme tree, mostly from Keith.
- Various bug fixes from me, fixing issues in both the blk-mq
conversion and generic bugs.
- Abort and CPU online fix from Sam.
- Hot add/remove fix from Indraneel.
- A couple of drbd fixes from the drbd team (Andreas, Lars, Philipp)
- With the generic IO stat accounting from 3.19/core, converting md,
bcache, and rsxx to use those. From Gu Zheng.
- Boundary check for queue/irq mode for null_blk from Matias. Fixes
cases where invalid values could be given, causing the device to hang.
- The xen blkfront pull request, with two bug fixes from Vitaly.
* 'for-3.19/drivers' of git://git.kernel.dk/linux-block: (56 commits)
NVMe: fix race condition in nvme_submit_sync_cmd()
NVMe: fix retry/error logic in nvme_queue_rq()
NVMe: Fix FS mount issue (hot-remove followed by hot-add)
NVMe: fix error return checking from blk_mq_alloc_request()
NVMe: fix freeing of wrong request in abort path
xen/blkfront: remove redundant flush_op
xen/blkfront: improve protection against issuing unsupported REQ_FUA
NVMe: Fix command setup on IO retry
null_blk: boundary check queue_mode and irqmode
block/rsxx: use generic io stats accounting functions to simplify io stat accounting
md: use generic io stats accounting functions to simplify io stat accounting
drbd: use generic io stats accounting functions to simplify io stat accounting
md/bcache: use generic io stats accounting functions to simplify io stat accounting
NVMe: Update module version major number
NVMe: fail pci initialization if the device doesn't have any BARs
NVMe: add ->exit_hctx() hook
NVMe: make setup work for devices that don't do INTx
NVMe: enable IO stats by default
NVMe: nvme_submit_async_admin_req() must use atomic rq allocation
NVMe: replace blk_put_request() with blk_mq_free_request()
...
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/bcache/request.c | 23 | ||||
-rw-r--r-- | drivers/md/dm.c | 13 | ||||
-rw-r--r-- | drivers/md/md.c | 6 |
3 files changed, 10 insertions, 32 deletions
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 62e6e98186b5..ab43faddb447 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c | |||
@@ -601,13 +601,8 @@ static void request_endio(struct bio *bio, int error) | |||
601 | static void bio_complete(struct search *s) | 601 | static void bio_complete(struct search *s) |
602 | { | 602 | { |
603 | if (s->orig_bio) { | 603 | if (s->orig_bio) { |
604 | int cpu, rw = bio_data_dir(s->orig_bio); | 604 | generic_end_io_acct(bio_data_dir(s->orig_bio), |
605 | unsigned long duration = jiffies - s->start_time; | 605 | &s->d->disk->part0, s->start_time); |
606 | |||
607 | cpu = part_stat_lock(); | ||
608 | part_round_stats(cpu, &s->d->disk->part0); | ||
609 | part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration); | ||
610 | part_stat_unlock(); | ||
611 | 606 | ||
612 | trace_bcache_request_end(s->d, s->orig_bio); | 607 | trace_bcache_request_end(s->d, s->orig_bio); |
613 | bio_endio(s->orig_bio, s->iop.error); | 608 | bio_endio(s->orig_bio, s->iop.error); |
@@ -959,12 +954,9 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio) | |||
959 | struct search *s; | 954 | struct search *s; |
960 | struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; | 955 | struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; |
961 | struct cached_dev *dc = container_of(d, struct cached_dev, disk); | 956 | struct cached_dev *dc = container_of(d, struct cached_dev, disk); |
962 | int cpu, rw = bio_data_dir(bio); | 957 | int rw = bio_data_dir(bio); |
963 | 958 | ||
964 | cpu = part_stat_lock(); | 959 | generic_start_io_acct(rw, bio_sectors(bio), &d->disk->part0); |
965 | part_stat_inc(cpu, &d->disk->part0, ios[rw]); | ||
966 | part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio)); | ||
967 | part_stat_unlock(); | ||
968 | 960 | ||
969 | bio->bi_bdev = dc->bdev; | 961 | bio->bi_bdev = dc->bdev; |
970 | bio->bi_iter.bi_sector += dc->sb.data_offset; | 962 | bio->bi_iter.bi_sector += dc->sb.data_offset; |
@@ -1074,12 +1066,9 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio) | |||
1074 | struct search *s; | 1066 | struct search *s; |
1075 | struct closure *cl; | 1067 | struct closure *cl; |
1076 | struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; | 1068 | struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; |
1077 | int cpu, rw = bio_data_dir(bio); | 1069 | int rw = bio_data_dir(bio); |
1078 | 1070 | ||
1079 | cpu = part_stat_lock(); | 1071 | generic_start_io_acct(rw, bio_sectors(bio), &d->disk->part0); |
1080 | part_stat_inc(cpu, &d->disk->part0, ios[rw]); | ||
1081 | part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio)); | ||
1082 | part_stat_unlock(); | ||
1083 | 1072 | ||
1084 | s = search_alloc(bio, d); | 1073 | s = search_alloc(bio, d); |
1085 | cl = &s->cl; | 1074 | cl = &s->cl; |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 8f37ed215b19..4c06585bf165 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -605,13 +605,10 @@ static void end_io_acct(struct dm_io *io) | |||
605 | struct mapped_device *md = io->md; | 605 | struct mapped_device *md = io->md; |
606 | struct bio *bio = io->bio; | 606 | struct bio *bio = io->bio; |
607 | unsigned long duration = jiffies - io->start_time; | 607 | unsigned long duration = jiffies - io->start_time; |
608 | int pending, cpu; | 608 | int pending; |
609 | int rw = bio_data_dir(bio); | 609 | int rw = bio_data_dir(bio); |
610 | 610 | ||
611 | cpu = part_stat_lock(); | 611 | generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time); |
612 | part_round_stats(cpu, &dm_disk(md)->part0); | ||
613 | part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration); | ||
614 | part_stat_unlock(); | ||
615 | 612 | ||
616 | if (unlikely(dm_stats_used(&md->stats))) | 613 | if (unlikely(dm_stats_used(&md->stats))) |
617 | dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, | 614 | dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, |
@@ -1651,16 +1648,12 @@ static void _dm_request(struct request_queue *q, struct bio *bio) | |||
1651 | { | 1648 | { |
1652 | int rw = bio_data_dir(bio); | 1649 | int rw = bio_data_dir(bio); |
1653 | struct mapped_device *md = q->queuedata; | 1650 | struct mapped_device *md = q->queuedata; |
1654 | int cpu; | ||
1655 | int srcu_idx; | 1651 | int srcu_idx; |
1656 | struct dm_table *map; | 1652 | struct dm_table *map; |
1657 | 1653 | ||
1658 | map = dm_get_live_table(md, &srcu_idx); | 1654 | map = dm_get_live_table(md, &srcu_idx); |
1659 | 1655 | ||
1660 | cpu = part_stat_lock(); | 1656 | generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0); |
1661 | part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]); | ||
1662 | part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio)); | ||
1663 | part_stat_unlock(); | ||
1664 | 1657 | ||
1665 | /* if we're suspended, we have to queue this io for later */ | 1658 | /* if we're suspended, we have to queue this io for later */ |
1666 | if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { | 1659 | if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 9233c71138f1..056ccd28c037 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -247,7 +247,6 @@ static void md_make_request(struct request_queue *q, struct bio *bio) | |||
247 | { | 247 | { |
248 | const int rw = bio_data_dir(bio); | 248 | const int rw = bio_data_dir(bio); |
249 | struct mddev *mddev = q->queuedata; | 249 | struct mddev *mddev = q->queuedata; |
250 | int cpu; | ||
251 | unsigned int sectors; | 250 | unsigned int sectors; |
252 | 251 | ||
253 | if (mddev == NULL || mddev->pers == NULL | 252 | if (mddev == NULL || mddev->pers == NULL |
@@ -284,10 +283,7 @@ static void md_make_request(struct request_queue *q, struct bio *bio) | |||
284 | sectors = bio_sectors(bio); | 283 | sectors = bio_sectors(bio); |
285 | mddev->pers->make_request(mddev, bio); | 284 | mddev->pers->make_request(mddev, bio); |
286 | 285 | ||
287 | cpu = part_stat_lock(); | 286 | generic_start_io_acct(rw, sectors, &mddev->gendisk->part0); |
288 | part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); | ||
289 | part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors); | ||
290 | part_stat_unlock(); | ||
291 | 287 | ||
292 | if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) | 288 | if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) |
293 | wake_up(&mddev->sb_wait); | 289 | wake_up(&mddev->sb_wait); |