aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorJens Axboe <jaxboe@fusionio.com>2010-10-24 16:06:02 -0400
committerJens Axboe <jaxboe@fusionio.com>2010-10-24 16:06:02 -0400
commitf253b86b4ad1b3220544e75880510fd455ebd23f (patch)
treecc2dd76b8ffc8df4356c1e95bd15276169dd335e /block/blk-core.c
parent35da7a307c535f9c2929cae277f3df425c9f9b1e (diff)
Revert "block: fix accounting bug on cross partition merges"
This reverts commit 7681bfeeccff5efa9eb29bf09249a3c400b15327. Conflicts: include/linux/genhd.h It has numerous issues with the cleanup path and non-elevator devices. Revert it for now so we can come up with a clean version without rushing things. Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c24
1 files changed, 8 insertions, 16 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 51efd835d4c..f8548876d7e 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -64,15 +64,13 @@ static void drive_stat_acct(struct request *rq, int new_io)
64 return; 64 return;
65 65
66 cpu = part_stat_lock(); 66 cpu = part_stat_lock();
67 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
67 68
68 if (!new_io) { 69 if (!new_io)
69 part = rq->part;
70 part_stat_inc(cpu, part, merges[rw]); 70 part_stat_inc(cpu, part, merges[rw]);
71 } else { 71 else {
72 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
73 part_round_stats(cpu, part); 72 part_round_stats(cpu, part);
74 part_inc_in_flight(part, rw); 73 part_inc_in_flight(part, rw);
75 rq->part = part;
76 } 74 }
77 75
78 part_stat_unlock(); 76 part_stat_unlock();
@@ -130,7 +128,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
130 rq->ref_count = 1; 128 rq->ref_count = 1;
131 rq->start_time = jiffies; 129 rq->start_time = jiffies;
132 set_start_time_ns(rq); 130 set_start_time_ns(rq);
133 rq->part = NULL;
134} 131}
135EXPORT_SYMBOL(blk_rq_init); 132EXPORT_SYMBOL(blk_rq_init);
136 133
@@ -805,16 +802,11 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
805 rl->starved[is_sync] = 0; 802 rl->starved[is_sync] = 0;
806 803
807 priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); 804 priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
808 if (priv) { 805 if (priv)
809 rl->elvpriv++; 806 rl->elvpriv++;
810 807
811 /* 808 if (blk_queue_io_stat(q))
812 * Don't do stats for non-priv requests 809 rw_flags |= REQ_IO_STAT;
813 */
814 if (blk_queue_io_stat(q))
815 rw_flags |= REQ_IO_STAT;
816 }
817
818 spin_unlock_irq(q->queue_lock); 810 spin_unlock_irq(q->queue_lock);
819 811
820 rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); 812 rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
@@ -1791,7 +1783,7 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
1791 int cpu; 1783 int cpu;
1792 1784
1793 cpu = part_stat_lock(); 1785 cpu = part_stat_lock();
1794 part = req->part; 1786 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
1795 part_stat_add(cpu, part, sectors[rw], bytes >> 9); 1787 part_stat_add(cpu, part, sectors[rw], bytes >> 9);
1796 part_stat_unlock(); 1788 part_stat_unlock();
1797 } 1789 }
@@ -1811,7 +1803,7 @@ static void blk_account_io_done(struct request *req)
1811 int cpu; 1803 int cpu;
1812 1804
1813 cpu = part_stat_lock(); 1805 cpu = part_stat_lock();
1814 part = req->part; 1806 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
1815 1807
1816 part_stat_inc(cpu, part, ios[rw]); 1808 part_stat_inc(cpu, part, ios[rw]);
1817 part_stat_add(cpu, part, ticks[rw], duration); 1809 part_stat_add(cpu, part, ticks[rw], duration);