aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorYasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>2010-10-19 03:05:00 -0400
committerJens Axboe <jaxboe@fusionio.com>2010-10-19 03:07:02 -0400
commit7681bfeeccff5efa9eb29bf09249a3c400b15327 (patch)
tree8557964a2df96e253dcf1a61734b98dbfbf192d6 /block/blk-core.c
parent495d2b3883682fcd1c3dee3a45e38fd00154ae25 (diff)
block: fix accounting bug on cross partition merges
/proc/diskstats would display a strange output as follows. $ cat /proc/diskstats |grep sda 8 0 sda 90524 7579 102154 20464 0 0 0 0 0 14096 20089 8 1 sda1 19085 1352 21841 4209 0 0 0 0 4294967064 15689 4293424691 ~~~~~~~~~~ 8 2 sda2 71252 3624 74891 15950 0 0 0 0 232 23995 1562390 8 3 sda3 54 487 2188 92 0 0 0 0 0 88 92 8 4 sda4 4 0 8 0 0 0 0 0 0 0 0 8 5 sda5 81 2027 2130 138 0 0 0 0 0 87 137 Its reason is the wrong way of accounting hd_struct->in_flight. When a bio is merged into a request belongs to different partition by ELEVATOR_FRONT_MERGE. The detailed root cause is as follows. Assuming that there are two partition, sda1 and sda2. 1. A request for sda2 is in request_queue. Hence sda1's hd_struct->in_flight is 0 and sda2's one is 1. | hd_struct->in_flight --------------------------- sda1 | 0 sda2 | 1 --------------------------- 2. A bio belongs to sda1 is issued and is merged into the request mentioned on step1 by ELEVATOR_BACK_MERGE. The first sector of the request is changed from sda2 region to sda1 region. However the two partition's hd_struct->in_flight are not changed. | hd_struct->in_flight --------------------------- sda1 | 0 sda2 | 1 --------------------------- 3. The request is finished and blk_account_io_done() is called. In this case, sda2's hd_struct->in_flight, not a sda1's one, is decremented. | hd_struct->in_flight --------------------------- sda1 | -1 sda2 | 1 --------------------------- The patch fixes the problem by caching the partition lookup inside the request structure, hence making sure that the increment and decrement will always happen on the same partition struct. This also speeds up IO with accounting enabled, since it cuts down on the number of lookups we have to do. When reloading partition tables, quiesce IO to ensure that no request references to the partition struct exists. When it is safe to free the partition table, the IO for that device is restarted again. Signed-off-by: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com> Cc: stable@kernel.org Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c24
1 files changed, 16 insertions, 8 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 797d5095eb83..ddc68332d655 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -64,13 +64,15 @@ static void drive_stat_acct(struct request *rq, int new_io)
64 return; 64 return;
65 65
66 cpu = part_stat_lock(); 66 cpu = part_stat_lock();
67 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
68 67
69 if (!new_io) 68 if (!new_io) {
69 part = rq->part;
70 part_stat_inc(cpu, part, merges[rw]); 70 part_stat_inc(cpu, part, merges[rw]);
71 else { 71 } else {
72 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
72 part_round_stats(cpu, part); 73 part_round_stats(cpu, part);
73 part_inc_in_flight(part, rw); 74 part_inc_in_flight(part, rw);
75 rq->part = part;
74 } 76 }
75 77
76 part_stat_unlock(); 78 part_stat_unlock();
@@ -128,6 +130,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
128 rq->ref_count = 1; 130 rq->ref_count = 1;
129 rq->start_time = jiffies; 131 rq->start_time = jiffies;
130 set_start_time_ns(rq); 132 set_start_time_ns(rq);
133 rq->part = NULL;
131} 134}
132EXPORT_SYMBOL(blk_rq_init); 135EXPORT_SYMBOL(blk_rq_init);
133 136
@@ -804,11 +807,16 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
804 rl->starved[is_sync] = 0; 807 rl->starved[is_sync] = 0;
805 808
806 priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); 809 priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
807 if (priv) 810 if (priv) {
808 rl->elvpriv++; 811 rl->elvpriv++;
809 812
810 if (blk_queue_io_stat(q)) 813 /*
811 rw_flags |= REQ_IO_STAT; 814 * Don't do stats for non-priv requests
815 */
816 if (blk_queue_io_stat(q))
817 rw_flags |= REQ_IO_STAT;
818 }
819
812 spin_unlock_irq(q->queue_lock); 820 spin_unlock_irq(q->queue_lock);
813 821
814 rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); 822 rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
@@ -1777,7 +1785,7 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
1777 int cpu; 1785 int cpu;
1778 1786
1779 cpu = part_stat_lock(); 1787 cpu = part_stat_lock();
1780 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); 1788 part = req->part;
1781 part_stat_add(cpu, part, sectors[rw], bytes >> 9); 1789 part_stat_add(cpu, part, sectors[rw], bytes >> 9);
1782 part_stat_unlock(); 1790 part_stat_unlock();
1783 } 1791 }
@@ -1797,7 +1805,7 @@ static void blk_account_io_done(struct request *req)
1797 int cpu; 1805 int cpu;
1798 1806
1799 cpu = part_stat_lock(); 1807 cpu = part_stat_lock();
1800 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); 1808 part = req->part;
1801 1809
1802 part_stat_inc(cpu, part, ios[rw]); 1810 part_stat_inc(cpu, part, ios[rw]);
1803 part_stat_add(cpu, part, ticks[rw], duration); 1811 part_stat_add(cpu, part, ticks[rw], duration);