aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid0.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2008-08-25 06:47:21 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-10-09 02:56:06 -0400
commitc9959059161ddd7bf4670cf47367033d6b2f79c4 (patch)
tree6454db55f8e34361fe472358e10e0c5cfac1e366 /drivers/md/raid0.c
parente71bf0d0ee89e51b92776391c5634938236977d5 (diff)
block: fix diskstats access
There are two variants of stat functions - ones prefixed with double underbars which don't care about preemption and ones without which disable preemption before manipulating per-cpu counters. It's unclear whether the underbarred ones assume that preemtion is disabled on entry as some callers don't do that. This patch unifies diskstats access by implementing disk_stat_lock() and disk_stat_unlock() which take care of both RCU (for partition access) and preemption (for per-cpu counter access). diskstats access should always be enclosed between the two functions. As such, there's no need for the versions which disables preemption. They're removed and double underbars ones are renamed to drop the underbars. As an extra argument is added, there's no danger of using the old version unconverted. disk_stat_lock() uses get_cpu() and returns the cpu index and all diskstat functions which access per-cpu counters now has @cpu argument to help RT. This change adds RCU or preemption operations at some places but also collapses several preemption ops into one at others. Overall, the performance difference should be negligible as all involved ops are very lightweight per-cpu ones. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/md/raid0.c')
-rw-r--r--drivers/md/raid0.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 183610635661..e26030fa59ab 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -399,14 +399,17 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
399 sector_t chunk; 399 sector_t chunk;
400 sector_t block, rsect; 400 sector_t block, rsect;
401 const int rw = bio_data_dir(bio); 401 const int rw = bio_data_dir(bio);
402 int cpu;
402 403
403 if (unlikely(bio_barrier(bio))) { 404 if (unlikely(bio_barrier(bio))) {
404 bio_endio(bio, -EOPNOTSUPP); 405 bio_endio(bio, -EOPNOTSUPP);
405 return 0; 406 return 0;
406 } 407 }
407 408
408 disk_stat_inc(mddev->gendisk, ios[rw]); 409 cpu = disk_stat_lock();
409 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); 410 disk_stat_inc(cpu, mddev->gendisk, ios[rw]);
411 disk_stat_add(cpu, mddev->gendisk, sectors[rw], bio_sectors(bio));
412 disk_stat_unlock();
410 413
411 chunk_size = mddev->chunk_size >> 10; 414 chunk_size = mddev->chunk_size >> 10;
412 chunk_sects = mddev->chunk_size >> 9; 415 chunk_sects = mddev->chunk_size >> 9;