aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2008-08-25 06:56:14 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-10-09 02:56:08 -0400
commit074a7aca7afa6f230104e8e65eba3420263714a5 (patch)
treef418313e45bd55be8156c8a3e8f9a216cf63058d /drivers
parenteddb2e26b5ee3c5da68ba4bf1921ba20e2097bff (diff)
block: move stats from disk to part0
Move stats related fields - stamp, in_flight, dkstats - from disk to part0 and unify stat handling such that... * part_stat_*() now updates part0 together if the specified partition is not part0. ie. part_stat_*() are now essentially all_stat_*(). * {disk|all}_stat_*() are gone. * part_round_stats() is updated similary. It handles part0 stats automatically and disk_round_stats() is killed. * part_{inc|dec}_in_fligh() is implemented which automatically updates part0 stats for parts other than part0. * disk_map_sector_rcu() is updated to return part0 if no part matches. Combined with the above changes, this makes NULL special case handling in callers unnecessary. * Separate stats show code paths for disk are collapsed into part stats show code paths. * Rename disk_stat_lock/unlock() to part_stat_lock/unlock() While at it, reposition stat handling macros a bit and add missing parentheses around macro parameters. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/aoe/aoecmd.c12
-rw-r--r--drivers/md/dm.c27
-rw-r--r--drivers/md/linear.c9
-rw-r--r--drivers/md/md.c4
-rw-r--r--drivers/md/multipath.c9
-rw-r--r--drivers/md/raid0.c9
-rw-r--r--drivers/md/raid1.c9
-rw-r--r--drivers/md/raid10.c9
-rw-r--r--drivers/md/raid5.c9
9 files changed, 52 insertions, 45 deletions
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 934800f979c9..961d29a53cab 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -758,15 +758,15 @@ diskstats(struct gendisk *disk, struct bio *bio, ulong duration, sector_t sector
758 struct hd_struct *part; 758 struct hd_struct *part;
759 int cpu; 759 int cpu;
760 760
761 cpu = disk_stat_lock(); 761 cpu = part_stat_lock();
762 part = disk_map_sector_rcu(disk, sector); 762 part = disk_map_sector_rcu(disk, sector);
763 763
764 all_stat_inc(cpu, disk, part, ios[rw], sector); 764 part_stat_inc(cpu, part, ios[rw]);
765 all_stat_add(cpu, disk, part, ticks[rw], duration, sector); 765 part_stat_add(cpu, part, ticks[rw], duration);
766 all_stat_add(cpu, disk, part, sectors[rw], n_sect, sector); 766 part_stat_add(cpu, part, sectors[rw], n_sect);
767 all_stat_add(cpu, disk, part, io_ticks, duration, sector); 767 part_stat_add(cpu, part, io_ticks, duration);
768 768
769 disk_stat_unlock(); 769 part_stat_unlock();
770} 770}
771 771
772void 772void
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 637806695bb9..327de03a5bdf 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -381,10 +381,10 @@ static void start_io_acct(struct dm_io *io)
381 381
382 io->start_time = jiffies; 382 io->start_time = jiffies;
383 383
384 cpu = disk_stat_lock(); 384 cpu = part_stat_lock();
385 disk_round_stats(cpu, dm_disk(md)); 385 part_round_stats(cpu, &dm_disk(md)->part0);
386 disk_stat_unlock(); 386 part_stat_unlock();
387 dm_disk(md)->in_flight = atomic_inc_return(&md->pending); 387 dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
388} 388}
389 389
390static int end_io_acct(struct dm_io *io) 390static int end_io_acct(struct dm_io *io)
@@ -395,12 +395,13 @@ static int end_io_acct(struct dm_io *io)
395 int pending, cpu; 395 int pending, cpu;
396 int rw = bio_data_dir(bio); 396 int rw = bio_data_dir(bio);
397 397
398 cpu = disk_stat_lock(); 398 cpu = part_stat_lock();
399 disk_round_stats(cpu, dm_disk(md)); 399 part_round_stats(cpu, &dm_disk(md)->part0);
400 disk_stat_add(cpu, dm_disk(md), ticks[rw], duration); 400 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
401 disk_stat_unlock(); 401 part_stat_unlock();
402 402
403 dm_disk(md)->in_flight = pending = atomic_dec_return(&md->pending); 403 dm_disk(md)->part0.in_flight = pending =
404 atomic_dec_return(&md->pending);
404 405
405 return !pending; 406 return !pending;
406} 407}
@@ -899,10 +900,10 @@ static int dm_request(struct request_queue *q, struct bio *bio)
899 900
900 down_read(&md->io_lock); 901 down_read(&md->io_lock);
901 902
902 cpu = disk_stat_lock(); 903 cpu = part_stat_lock();
903 disk_stat_inc(cpu, dm_disk(md), ios[rw]); 904 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
904 disk_stat_add(cpu, dm_disk(md), sectors[rw], bio_sectors(bio)); 905 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
905 disk_stat_unlock(); 906 part_stat_unlock();
906 907
907 /* 908 /*
908 * If we're suspended we have to queue 909 * If we're suspended we have to queue
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 00cbc8e47294..c80ea90593d3 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -325,10 +325,11 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
325 return 0; 325 return 0;
326 } 326 }
327 327
328 cpu = disk_stat_lock(); 328 cpu = part_stat_lock();
329 disk_stat_inc(cpu, mddev->gendisk, ios[rw]); 329 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
330 disk_stat_add(cpu, mddev->gendisk, sectors[rw], bio_sectors(bio)); 330 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
331 disk_stat_unlock(); 331 bio_sectors(bio));
332 part_stat_unlock();
332 333
333 tmp_dev = which_dev(mddev, bio->bi_sector); 334 tmp_dev = which_dev(mddev, bio->bi_sector);
334 block = bio->bi_sector >> 1; 335 block = bio->bi_sector >> 1;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 2bd9cf416123..0a3a4bdcd4af 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5546,8 +5546,8 @@ static int is_mddev_idle(mddev_t *mddev)
5546 rcu_read_lock(); 5546 rcu_read_lock();
5547 rdev_for_each_rcu(rdev, mddev) { 5547 rdev_for_each_rcu(rdev, mddev) {
5548 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; 5548 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
5549 curr_events = disk_stat_read(disk, sectors[0]) + 5549 curr_events = part_stat_read(&disk->part0, sectors[0]) +
5550 disk_stat_read(disk, sectors[1]) - 5550 part_stat_read(&disk->part0, sectors[1]) -
5551 atomic_read(&disk->sync_io); 5551 atomic_read(&disk->sync_io);
5552 /* sync IO will cause sync_io to increase before the disk_stats 5552 /* sync IO will cause sync_io to increase before the disk_stats
5553 * as sync_io is counted when a request starts, and 5553 * as sync_io is counted when a request starts, and
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 182f5a94cdc5..8bb8794129b3 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -159,10 +159,11 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
159 mp_bh->master_bio = bio; 159 mp_bh->master_bio = bio;
160 mp_bh->mddev = mddev; 160 mp_bh->mddev = mddev;
161 161
162 cpu = disk_stat_lock(); 162 cpu = part_stat_lock();
163 disk_stat_inc(cpu, mddev->gendisk, ios[rw]); 163 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
164 disk_stat_add(cpu, mddev->gendisk, sectors[rw], bio_sectors(bio)); 164 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
165 disk_stat_unlock(); 165 bio_sectors(bio));
166 part_stat_unlock();
166 167
167 mp_bh->path = multipath_map(conf); 168 mp_bh->path = multipath_map(conf);
168 if (mp_bh->path < 0) { 169 if (mp_bh->path < 0) {
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index e26030fa59ab..f52f442a735f 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -406,10 +406,11 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
406 return 0; 406 return 0;
407 } 407 }
408 408
409 cpu = disk_stat_lock(); 409 cpu = part_stat_lock();
410 disk_stat_inc(cpu, mddev->gendisk, ios[rw]); 410 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
411 disk_stat_add(cpu, mddev->gendisk, sectors[rw], bio_sectors(bio)); 411 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
412 disk_stat_unlock(); 412 bio_sectors(bio));
413 part_stat_unlock();
413 414
414 chunk_size = mddev->chunk_size >> 10; 415 chunk_size = mddev->chunk_size >> 10;
415 chunk_sects = mddev->chunk_size >> 9; 416 chunk_sects = mddev->chunk_size >> 9;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index babb13036f93..b9764429d856 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -804,10 +804,11 @@ static int make_request(struct request_queue *q, struct bio * bio)
804 804
805 bitmap = mddev->bitmap; 805 bitmap = mddev->bitmap;
806 806
807 cpu = disk_stat_lock(); 807 cpu = part_stat_lock();
808 disk_stat_inc(cpu, mddev->gendisk, ios[rw]); 808 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
809 disk_stat_add(cpu, mddev->gendisk, sectors[rw], bio_sectors(bio)); 809 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
810 disk_stat_unlock(); 810 bio_sectors(bio));
811 part_stat_unlock();
811 812
812 /* 813 /*
813 * make_request() can abort the operation when READA is being 814 * make_request() can abort the operation when READA is being
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 5ec80da0a9d7..5f990133f5ef 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -844,10 +844,11 @@ static int make_request(struct request_queue *q, struct bio * bio)
844 */ 844 */
845 wait_barrier(conf); 845 wait_barrier(conf);
846 846
847 cpu = disk_stat_lock(); 847 cpu = part_stat_lock();
848 disk_stat_inc(cpu, mddev->gendisk, ios[rw]); 848 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
849 disk_stat_add(cpu, mddev->gendisk, sectors[rw], bio_sectors(bio)); 849 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
850 disk_stat_unlock(); 850 bio_sectors(bio));
851 part_stat_unlock();
851 852
852 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); 853 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
853 854
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 5899f211515f..ae16794bef20 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3396,10 +3396,11 @@ static int make_request(struct request_queue *q, struct bio * bi)
3396 3396
3397 md_write_start(mddev, bi); 3397 md_write_start(mddev, bi);
3398 3398
3399 cpu = disk_stat_lock(); 3399 cpu = part_stat_lock();
3400 disk_stat_inc(cpu, mddev->gendisk, ios[rw]); 3400 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
3401 disk_stat_add(cpu, mddev->gendisk, sectors[rw], bio_sectors(bi)); 3401 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
3402 disk_stat_unlock(); 3402 bio_sectors(bi));
3403 part_stat_unlock();
3403 3404
3404 if (rw == READ && 3405 if (rw == READ &&
3405 mddev->reshape_position == MaxSector && 3406 mddev->reshape_position == MaxSector &&