aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm.c27
-rw-r--r--drivers/md/linear.c9
-rw-r--r--drivers/md/md.c4
-rw-r--r--drivers/md/multipath.c9
-rw-r--r--drivers/md/raid0.c9
-rw-r--r--drivers/md/raid1.c9
-rw-r--r--drivers/md/raid10.c9
-rw-r--r--drivers/md/raid5.c9
8 files changed, 46 insertions, 39 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 637806695bb9..327de03a5bdf 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -381,10 +381,10 @@ static void start_io_acct(struct dm_io *io)
381 381
382 io->start_time = jiffies; 382 io->start_time = jiffies;
383 383
384 cpu = disk_stat_lock(); 384 cpu = part_stat_lock();
385 disk_round_stats(cpu, dm_disk(md)); 385 part_round_stats(cpu, &dm_disk(md)->part0);
386 disk_stat_unlock(); 386 part_stat_unlock();
387 dm_disk(md)->in_flight = atomic_inc_return(&md->pending); 387 dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
388} 388}
389 389
390static int end_io_acct(struct dm_io *io) 390static int end_io_acct(struct dm_io *io)
@@ -395,12 +395,13 @@ static int end_io_acct(struct dm_io *io)
395 int pending, cpu; 395 int pending, cpu;
396 int rw = bio_data_dir(bio); 396 int rw = bio_data_dir(bio);
397 397
398 cpu = disk_stat_lock(); 398 cpu = part_stat_lock();
399 disk_round_stats(cpu, dm_disk(md)); 399 part_round_stats(cpu, &dm_disk(md)->part0);
400 disk_stat_add(cpu, dm_disk(md), ticks[rw], duration); 400 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
401 disk_stat_unlock(); 401 part_stat_unlock();
402 402
403 dm_disk(md)->in_flight = pending = atomic_dec_return(&md->pending); 403 dm_disk(md)->part0.in_flight = pending =
404 atomic_dec_return(&md->pending);
404 405
405 return !pending; 406 return !pending;
406} 407}
@@ -899,10 +900,10 @@ static int dm_request(struct request_queue *q, struct bio *bio)
899 900
900 down_read(&md->io_lock); 901 down_read(&md->io_lock);
901 902
902 cpu = disk_stat_lock(); 903 cpu = part_stat_lock();
903 disk_stat_inc(cpu, dm_disk(md), ios[rw]); 904 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
904 disk_stat_add(cpu, dm_disk(md), sectors[rw], bio_sectors(bio)); 905 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
905 disk_stat_unlock(); 906 part_stat_unlock();
906 907
907 /* 908 /*
908 * If we're suspended we have to queue 909 * If we're suspended we have to queue
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 00cbc8e47294..c80ea90593d3 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -325,10 +325,11 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
325 return 0; 325 return 0;
326 } 326 }
327 327
328 cpu = disk_stat_lock(); 328 cpu = part_stat_lock();
329 disk_stat_inc(cpu, mddev->gendisk, ios[rw]); 329 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
330 disk_stat_add(cpu, mddev->gendisk, sectors[rw], bio_sectors(bio)); 330 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
331 disk_stat_unlock(); 331 bio_sectors(bio));
332 part_stat_unlock();
332 333
333 tmp_dev = which_dev(mddev, bio->bi_sector); 334 tmp_dev = which_dev(mddev, bio->bi_sector);
334 block = bio->bi_sector >> 1; 335 block = bio->bi_sector >> 1;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 2bd9cf416123..0a3a4bdcd4af 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5546,8 +5546,8 @@ static int is_mddev_idle(mddev_t *mddev)
5546 rcu_read_lock(); 5546 rcu_read_lock();
5547 rdev_for_each_rcu(rdev, mddev) { 5547 rdev_for_each_rcu(rdev, mddev) {
5548 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; 5548 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
5549 curr_events = disk_stat_read(disk, sectors[0]) + 5549 curr_events = part_stat_read(&disk->part0, sectors[0]) +
5550 disk_stat_read(disk, sectors[1]) - 5550 part_stat_read(&disk->part0, sectors[1]) -
5551 atomic_read(&disk->sync_io); 5551 atomic_read(&disk->sync_io);
5552 /* sync IO will cause sync_io to increase before the disk_stats 5552 /* sync IO will cause sync_io to increase before the disk_stats
5553 * as sync_io is counted when a request starts, and 5553 * as sync_io is counted when a request starts, and
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 182f5a94cdc5..8bb8794129b3 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -159,10 +159,11 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
159 mp_bh->master_bio = bio; 159 mp_bh->master_bio = bio;
160 mp_bh->mddev = mddev; 160 mp_bh->mddev = mddev;
161 161
162 cpu = disk_stat_lock(); 162 cpu = part_stat_lock();
163 disk_stat_inc(cpu, mddev->gendisk, ios[rw]); 163 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
164 disk_stat_add(cpu, mddev->gendisk, sectors[rw], bio_sectors(bio)); 164 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
165 disk_stat_unlock(); 165 bio_sectors(bio));
166 part_stat_unlock();
166 167
167 mp_bh->path = multipath_map(conf); 168 mp_bh->path = multipath_map(conf);
168 if (mp_bh->path < 0) { 169 if (mp_bh->path < 0) {
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index e26030fa59ab..f52f442a735f 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -406,10 +406,11 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
406 return 0; 406 return 0;
407 } 407 }
408 408
409 cpu = disk_stat_lock(); 409 cpu = part_stat_lock();
410 disk_stat_inc(cpu, mddev->gendisk, ios[rw]); 410 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
411 disk_stat_add(cpu, mddev->gendisk, sectors[rw], bio_sectors(bio)); 411 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
412 disk_stat_unlock(); 412 bio_sectors(bio));
413 part_stat_unlock();
413 414
414 chunk_size = mddev->chunk_size >> 10; 415 chunk_size = mddev->chunk_size >> 10;
415 chunk_sects = mddev->chunk_size >> 9; 416 chunk_sects = mddev->chunk_size >> 9;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index babb13036f93..b9764429d856 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -804,10 +804,11 @@ static int make_request(struct request_queue *q, struct bio * bio)
804 804
805 bitmap = mddev->bitmap; 805 bitmap = mddev->bitmap;
806 806
807 cpu = disk_stat_lock(); 807 cpu = part_stat_lock();
808 disk_stat_inc(cpu, mddev->gendisk, ios[rw]); 808 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
809 disk_stat_add(cpu, mddev->gendisk, sectors[rw], bio_sectors(bio)); 809 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
810 disk_stat_unlock(); 810 bio_sectors(bio));
811 part_stat_unlock();
811 812
812 /* 813 /*
813 * make_request() can abort the operation when READA is being 814 * make_request() can abort the operation when READA is being
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 5ec80da0a9d7..5f990133f5ef 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -844,10 +844,11 @@ static int make_request(struct request_queue *q, struct bio * bio)
844 */ 844 */
845 wait_barrier(conf); 845 wait_barrier(conf);
846 846
847 cpu = disk_stat_lock(); 847 cpu = part_stat_lock();
848 disk_stat_inc(cpu, mddev->gendisk, ios[rw]); 848 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
849 disk_stat_add(cpu, mddev->gendisk, sectors[rw], bio_sectors(bio)); 849 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
850 disk_stat_unlock(); 850 bio_sectors(bio));
851 part_stat_unlock();
851 852
852 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); 853 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
853 854
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 5899f211515f..ae16794bef20 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3396,10 +3396,11 @@ static int make_request(struct request_queue *q, struct bio * bi)
3396 3396
3397 md_write_start(mddev, bi); 3397 md_write_start(mddev, bi);
3398 3398
3399 cpu = disk_stat_lock(); 3399 cpu = part_stat_lock();
3400 disk_stat_inc(cpu, mddev->gendisk, ios[rw]); 3400 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
3401 disk_stat_add(cpu, mddev->gendisk, sectors[rw], bio_sectors(bi)); 3401 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
3402 disk_stat_unlock(); 3402 bio_sectors(bi));
3403 part_stat_unlock();
3403 3404
3404 if (rw == READ && 3405 if (rw == READ &&
3405 mddev->reshape_position == MaxSector && 3406 mddev->reshape_position == MaxSector &&