diff options
author | NeilBrown <neilb@suse.de> | 2010-03-25 01:20:56 -0400 |
---|---|---|
committer | NeilBrown <neilb@suse.de> | 2010-05-18 01:27:52 -0400 |
commit | 490773268cf64f68da2470e07b52c7944da6312d (patch) | |
tree | d394aafa7203c316db6b63f128b8894e18993fca /drivers/md | |
parent | 2b7f22284d71975e37a82db154386348eec0e52c (diff) |
md: move io accounting out of personalities into md_make_request
While I generally prefer letting personalities do as much as possible,
given that we have a central md_make_request anyway we may as well use
it to simplify code.
Also this centralises knowledge of ->gendisk which will help later.
Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/linear.c | 8 | ||||
-rw-r--r-- | drivers/md/md.c | 11 | ||||
-rw-r--r-- | drivers/md/multipath.c | 8 | ||||
-rw-r--r-- | drivers/md/raid0.c | 8 | ||||
-rw-r--r-- | drivers/md/raid1.c | 7 | ||||
-rw-r--r-- | drivers/md/raid10.c | 7 | ||||
-rw-r--r-- | drivers/md/raid5.c | 8 |
7 files changed, 12 insertions, 45 deletions
diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 9db8ee0614a4..3048c1704f40 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c | |||
@@ -288,23 +288,15 @@ static int linear_stop (mddev_t *mddev) | |||
288 | 288 | ||
289 | static int linear_make_request (struct request_queue *q, struct bio *bio) | 289 | static int linear_make_request (struct request_queue *q, struct bio *bio) |
290 | { | 290 | { |
291 | const int rw = bio_data_dir(bio); | ||
292 | mddev_t *mddev = q->queuedata; | 291 | mddev_t *mddev = q->queuedata; |
293 | dev_info_t *tmp_dev; | 292 | dev_info_t *tmp_dev; |
294 | sector_t start_sector; | 293 | sector_t start_sector; |
295 | int cpu; | ||
296 | 294 | ||
297 | if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { | 295 | if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { |
298 | md_barrier_request(mddev, bio); | 296 | md_barrier_request(mddev, bio); |
299 | return 0; | 297 | return 0; |
300 | } | 298 | } |
301 | 299 | ||
302 | cpu = part_stat_lock(); | ||
303 | part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); | ||
304 | part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], | ||
305 | bio_sectors(bio)); | ||
306 | part_stat_unlock(); | ||
307 | |||
308 | rcu_read_lock(); | 300 | rcu_read_lock(); |
309 | tmp_dev = which_dev(mddev, bio->bi_sector); | 301 | tmp_dev = which_dev(mddev, bio->bi_sector); |
310 | start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors; | 302 | start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors; |
diff --git a/drivers/md/md.c b/drivers/md/md.c index c5a1b0725c9f..117663d2a4e5 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -214,8 +214,11 @@ static DEFINE_SPINLOCK(all_mddevs_lock); | |||
214 | */ | 214 | */ |
215 | static int md_make_request(struct request_queue *q, struct bio *bio) | 215 | static int md_make_request(struct request_queue *q, struct bio *bio) |
216 | { | 216 | { |
217 | const int rw = bio_data_dir(bio); | ||
217 | mddev_t *mddev = q->queuedata; | 218 | mddev_t *mddev = q->queuedata; |
218 | int rv; | 219 | int rv; |
220 | int cpu; | ||
221 | |||
219 | if (mddev == NULL || mddev->pers == NULL) { | 222 | if (mddev == NULL || mddev->pers == NULL) { |
220 | bio_io_error(bio); | 223 | bio_io_error(bio); |
221 | return 0; | 224 | return 0; |
@@ -236,7 +239,15 @@ static int md_make_request(struct request_queue *q, struct bio *bio) | |||
236 | } | 239 | } |
237 | atomic_inc(&mddev->active_io); | 240 | atomic_inc(&mddev->active_io); |
238 | rcu_read_unlock(); | 241 | rcu_read_unlock(); |
242 | |||
239 | rv = mddev->pers->make_request(q, bio); | 243 | rv = mddev->pers->make_request(q, bio); |
244 | |||
245 | cpu = part_stat_lock(); | ||
246 | part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); | ||
247 | part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], | ||
248 | bio_sectors(bio)); | ||
249 | part_stat_unlock(); | ||
250 | |||
240 | if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) | 251 | if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) |
241 | wake_up(&mddev->sb_wait); | 252 | wake_up(&mddev->sb_wait); |
242 | 253 | ||
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 97befd5cc0e3..5b4e2918663a 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -141,8 +141,6 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio) | |||
141 | multipath_conf_t *conf = mddev->private; | 141 | multipath_conf_t *conf = mddev->private; |
142 | struct multipath_bh * mp_bh; | 142 | struct multipath_bh * mp_bh; |
143 | struct multipath_info *multipath; | 143 | struct multipath_info *multipath; |
144 | const int rw = bio_data_dir(bio); | ||
145 | int cpu; | ||
146 | 144 | ||
147 | if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { | 145 | if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { |
148 | md_barrier_request(mddev, bio); | 146 | md_barrier_request(mddev, bio); |
@@ -154,12 +152,6 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio) | |||
154 | mp_bh->master_bio = bio; | 152 | mp_bh->master_bio = bio; |
155 | mp_bh->mddev = mddev; | 153 | mp_bh->mddev = mddev; |
156 | 154 | ||
157 | cpu = part_stat_lock(); | ||
158 | part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); | ||
159 | part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], | ||
160 | bio_sectors(bio)); | ||
161 | part_stat_unlock(); | ||
162 | |||
163 | mp_bh->path = multipath_map(conf); | 155 | mp_bh->path = multipath_map(conf); |
164 | if (mp_bh->path < 0) { | 156 | if (mp_bh->path < 0) { |
165 | bio_endio(bio, -EIO); | 157 | bio_endio(bio, -EIO); |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index afddf624bad3..d535f9be39f4 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -472,20 +472,12 @@ static int raid0_make_request(struct request_queue *q, struct bio *bio) | |||
472 | sector_t sector_offset; | 472 | sector_t sector_offset; |
473 | struct strip_zone *zone; | 473 | struct strip_zone *zone; |
474 | mdk_rdev_t *tmp_dev; | 474 | mdk_rdev_t *tmp_dev; |
475 | const int rw = bio_data_dir(bio); | ||
476 | int cpu; | ||
477 | 475 | ||
478 | if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { | 476 | if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { |
479 | md_barrier_request(mddev, bio); | 477 | md_barrier_request(mddev, bio); |
480 | return 0; | 478 | return 0; |
481 | } | 479 | } |
482 | 480 | ||
483 | cpu = part_stat_lock(); | ||
484 | part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); | ||
485 | part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], | ||
486 | bio_sectors(bio)); | ||
487 | part_stat_unlock(); | ||
488 | |||
489 | chunk_sects = mddev->chunk_sectors; | 481 | chunk_sects = mddev->chunk_sectors; |
490 | if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) { | 482 | if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) { |
491 | sector_t sector = bio->bi_sector; | 483 | sector_t sector = bio->bi_sector; |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 23a7516abbfd..e277013ac808 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -787,7 +787,6 @@ static int make_request(struct request_queue *q, struct bio * bio) | |||
787 | struct page **behind_pages = NULL; | 787 | struct page **behind_pages = NULL; |
788 | const int rw = bio_data_dir(bio); | 788 | const int rw = bio_data_dir(bio); |
789 | const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO); | 789 | const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO); |
790 | int cpu; | ||
791 | bool do_barriers; | 790 | bool do_barriers; |
792 | mdk_rdev_t *blocked_rdev; | 791 | mdk_rdev_t *blocked_rdev; |
793 | 792 | ||
@@ -833,12 +832,6 @@ static int make_request(struct request_queue *q, struct bio * bio) | |||
833 | 832 | ||
834 | bitmap = mddev->bitmap; | 833 | bitmap = mddev->bitmap; |
835 | 834 | ||
836 | cpu = part_stat_lock(); | ||
837 | part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); | ||
838 | part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], | ||
839 | bio_sectors(bio)); | ||
840 | part_stat_unlock(); | ||
841 | |||
842 | /* | 835 | /* |
843 | * make_request() can abort the operation when READA is being | 836 | * make_request() can abort the operation when READA is being |
844 | * used and no empty request is available. | 837 | * used and no empty request is available. |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 57d71d5d88f4..ca313d646fd1 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -795,7 +795,6 @@ static int make_request(struct request_queue *q, struct bio * bio) | |||
795 | mirror_info_t *mirror; | 795 | mirror_info_t *mirror; |
796 | r10bio_t *r10_bio; | 796 | r10bio_t *r10_bio; |
797 | struct bio *read_bio; | 797 | struct bio *read_bio; |
798 | int cpu; | ||
799 | int i; | 798 | int i; |
800 | int chunk_sects = conf->chunk_mask + 1; | 799 | int chunk_sects = conf->chunk_mask + 1; |
801 | const int rw = bio_data_dir(bio); | 800 | const int rw = bio_data_dir(bio); |
@@ -850,12 +849,6 @@ static int make_request(struct request_queue *q, struct bio * bio) | |||
850 | */ | 849 | */ |
851 | wait_barrier(conf); | 850 | wait_barrier(conf); |
852 | 851 | ||
853 | cpu = part_stat_lock(); | ||
854 | part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); | ||
855 | part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], | ||
856 | bio_sectors(bio)); | ||
857 | part_stat_unlock(); | ||
858 | |||
859 | r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); | 852 | r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); |
860 | 853 | ||
861 | r10_bio->master_bio = bio; | 854 | r10_bio->master_bio = bio; |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 7bfeba3ce1e0..c6ae7c194915 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -3879,7 +3879,7 @@ static int make_request(struct request_queue *q, struct bio * bi) | |||
3879 | sector_t logical_sector, last_sector; | 3879 | sector_t logical_sector, last_sector; |
3880 | struct stripe_head *sh; | 3880 | struct stripe_head *sh; |
3881 | const int rw = bio_data_dir(bi); | 3881 | const int rw = bio_data_dir(bi); |
3882 | int cpu, remaining; | 3882 | int remaining; |
3883 | 3883 | ||
3884 | if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) { | 3884 | if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) { |
3885 | /* Drain all pending writes. We only really need | 3885 | /* Drain all pending writes. We only really need |
@@ -3894,12 +3894,6 @@ static int make_request(struct request_queue *q, struct bio * bi) | |||
3894 | 3894 | ||
3895 | md_write_start(mddev, bi); | 3895 | md_write_start(mddev, bi); |
3896 | 3896 | ||
3897 | cpu = part_stat_lock(); | ||
3898 | part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); | ||
3899 | part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], | ||
3900 | bio_sectors(bi)); | ||
3901 | part_stat_unlock(); | ||
3902 | |||
3903 | if (rw == READ && | 3897 | if (rw == READ && |
3904 | mddev->reshape_position == MaxSector && | 3898 | mddev->reshape_position == MaxSector && |
3905 | chunk_aligned_read(q,bi)) | 3899 | chunk_aligned_read(q,bi)) |