diff options
author | Nikanth Karthikesan <knikanth@suse.de> | 2009-10-06 14:16:55 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-10-06 14:16:55 -0400 |
commit | 316d315bffa4026f28085f6b24ebcebede370ac7 (patch) | |
tree | 10b6b057fec2382536371d2e14f9d4c0d6cf9eea /drivers/md | |
parent | 23e018a1b083ecb4b8bb2fb43d58e7c19b5d7959 (diff) |
block: Seperate read and write statistics of in_flight requests v2
Commit a9327cac440be4d8333bba975cbbf76045096275 added seperate read
and write statistics of in_flight requests. And exported the number
of read and write requests in progress seperately through sysfs.
But Corrado Zoccolo <czoccolo@gmail.com> reported getting strange
output from "iostat -kx 2". Global values for service time and
utilization were garbage. For interval values, utilization was always
100%, and service time is higher than normal.
So this was reverted by commit 0f78ab9899e9d6acb09d5465def618704255963b
The problem was in part_round_stats_single(), I missed the following:
if (now == part->stamp)
return;
- if (part->in_flight) {
+ if (part_in_flight(part)) {
__part_stat_add(cpu, part, time_in_queue,
part_in_flight(part) * (now - part->stamp));
__part_stat_add(cpu, part, io_ticks, (now - part->stamp));
With this chunk included, the reported regression gets fixed.
Signed-off-by: Nikanth Karthikesan <knikanth@suse.de>
--
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm.c | 16 |
1 files changed, 10 insertions, 6 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 23e76fe0d359..376f1ab48a24 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -130,7 +130,7 @@ struct mapped_device { | |||
130 | /* | 130 | /* |
131 | * A list of ios that arrived while we were suspended. | 131 | * A list of ios that arrived while we were suspended. |
132 | */ | 132 | */ |
133 | atomic_t pending; | 133 | atomic_t pending[2]; |
134 | wait_queue_head_t wait; | 134 | wait_queue_head_t wait; |
135 | struct work_struct work; | 135 | struct work_struct work; |
136 | struct bio_list deferred; | 136 | struct bio_list deferred; |
@@ -453,13 +453,14 @@ static void start_io_acct(struct dm_io *io) | |||
453 | { | 453 | { |
454 | struct mapped_device *md = io->md; | 454 | struct mapped_device *md = io->md; |
455 | int cpu; | 455 | int cpu; |
456 | int rw = bio_data_dir(io->bio); | ||
456 | 457 | ||
457 | io->start_time = jiffies; | 458 | io->start_time = jiffies; |
458 | 459 | ||
459 | cpu = part_stat_lock(); | 460 | cpu = part_stat_lock(); |
460 | part_round_stats(cpu, &dm_disk(md)->part0); | 461 | part_round_stats(cpu, &dm_disk(md)->part0); |
461 | part_stat_unlock(); | 462 | part_stat_unlock(); |
462 | dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending); | 463 | dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]); |
463 | } | 464 | } |
464 | 465 | ||
465 | static void end_io_acct(struct dm_io *io) | 466 | static void end_io_acct(struct dm_io *io) |
@@ -479,8 +480,9 @@ static void end_io_acct(struct dm_io *io) | |||
479 | * After this is decremented the bio must not be touched if it is | 480 | * After this is decremented the bio must not be touched if it is |
480 | * a barrier. | 481 | * a barrier. |
481 | */ | 482 | */ |
482 | dm_disk(md)->part0.in_flight = pending = | 483 | dm_disk(md)->part0.in_flight[rw] = pending = |
483 | atomic_dec_return(&md->pending); | 484 | atomic_dec_return(&md->pending[rw]); |
485 | pending += atomic_read(&md->pending[rw^0x1]); | ||
484 | 486 | ||
485 | /* nudge anyone waiting on suspend queue */ | 487 | /* nudge anyone waiting on suspend queue */ |
486 | if (!pending) | 488 | if (!pending) |
@@ -1785,7 +1787,8 @@ static struct mapped_device *alloc_dev(int minor) | |||
1785 | if (!md->disk) | 1787 | if (!md->disk) |
1786 | goto bad_disk; | 1788 | goto bad_disk; |
1787 | 1789 | ||
1788 | atomic_set(&md->pending, 0); | 1790 | atomic_set(&md->pending[0], 0); |
1791 | atomic_set(&md->pending[1], 0); | ||
1789 | init_waitqueue_head(&md->wait); | 1792 | init_waitqueue_head(&md->wait); |
1790 | INIT_WORK(&md->work, dm_wq_work); | 1793 | INIT_WORK(&md->work, dm_wq_work); |
1791 | init_waitqueue_head(&md->eventq); | 1794 | init_waitqueue_head(&md->eventq); |
@@ -2088,7 +2091,8 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible) | |||
2088 | break; | 2091 | break; |
2089 | } | 2092 | } |
2090 | spin_unlock_irqrestore(q->queue_lock, flags); | 2093 | spin_unlock_irqrestore(q->queue_lock, flags); |
2091 | } else if (!atomic_read(&md->pending)) | 2094 | } else if (!atomic_read(&md->pending[0]) && |
2095 | !atomic_read(&md->pending[1])) | ||
2092 | break; | 2096 | break; |
2093 | 2097 | ||
2094 | if (interruptible == TASK_INTERRUPTIBLE && | 2098 | if (interruptible == TASK_INTERRUPTIBLE && |