diff options
author | Jens Axboe <jaxboe@fusionio.com> | 2010-10-24 16:06:02 -0400 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2010-10-24 16:06:02 -0400 |
commit | f253b86b4ad1b3220544e75880510fd455ebd23f (patch) | |
tree | cc2dd76b8ffc8df4356c1e95bd15276169dd335e /block | |
parent | 35da7a307c535f9c2929cae277f3df425c9f9b1e (diff) |
Revert "block: fix accounting bug on cross partition merges"
This reverts commit 7681bfeeccff5efa9eb29bf09249a3c400b15327.
Conflicts:
include/linux/genhd.h
It has numerous issues with the cleanup path and non-elevator
devices. Revert it for now so we can come up with a clean
version without rushing things.
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 24 | ||||
-rw-r--r-- | block/blk-merge.c | 2 | ||||
-rw-r--r-- | block/blk.h | 4 | ||||
-rw-r--r-- | block/genhd.c | 14 |
4 files changed, 13 insertions, 31 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 51efd835d4cf..f8548876d7ea 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -64,15 +64,13 @@ static void drive_stat_acct(struct request *rq, int new_io) | |||
64 | return; | 64 | return; |
65 | 65 | ||
66 | cpu = part_stat_lock(); | 66 | cpu = part_stat_lock(); |
67 | part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); | ||
67 | 68 | ||
68 | if (!new_io) { | 69 | if (!new_io) |
69 | part = rq->part; | ||
70 | part_stat_inc(cpu, part, merges[rw]); | 70 | part_stat_inc(cpu, part, merges[rw]); |
71 | } else { | 71 | else { |
72 | part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); | ||
73 | part_round_stats(cpu, part); | 72 | part_round_stats(cpu, part); |
74 | part_inc_in_flight(part, rw); | 73 | part_inc_in_flight(part, rw); |
75 | rq->part = part; | ||
76 | } | 74 | } |
77 | 75 | ||
78 | part_stat_unlock(); | 76 | part_stat_unlock(); |
@@ -130,7 +128,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq) | |||
130 | rq->ref_count = 1; | 128 | rq->ref_count = 1; |
131 | rq->start_time = jiffies; | 129 | rq->start_time = jiffies; |
132 | set_start_time_ns(rq); | 130 | set_start_time_ns(rq); |
133 | rq->part = NULL; | ||
134 | } | 131 | } |
135 | EXPORT_SYMBOL(blk_rq_init); | 132 | EXPORT_SYMBOL(blk_rq_init); |
136 | 133 | ||
@@ -805,16 +802,11 @@ static struct request *get_request(struct request_queue *q, int rw_flags, | |||
805 | rl->starved[is_sync] = 0; | 802 | rl->starved[is_sync] = 0; |
806 | 803 | ||
807 | priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); | 804 | priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); |
808 | if (priv) { | 805 | if (priv) |
809 | rl->elvpriv++; | 806 | rl->elvpriv++; |
810 | 807 | ||
811 | /* | 808 | if (blk_queue_io_stat(q)) |
812 | * Don't do stats for non-priv requests | 809 | rw_flags |= REQ_IO_STAT; |
813 | */ | ||
814 | if (blk_queue_io_stat(q)) | ||
815 | rw_flags |= REQ_IO_STAT; | ||
816 | } | ||
817 | |||
818 | spin_unlock_irq(q->queue_lock); | 810 | spin_unlock_irq(q->queue_lock); |
819 | 811 | ||
820 | rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); | 812 | rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); |
@@ -1791,7 +1783,7 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes) | |||
1791 | int cpu; | 1783 | int cpu; |
1792 | 1784 | ||
1793 | cpu = part_stat_lock(); | 1785 | cpu = part_stat_lock(); |
1794 | part = req->part; | 1786 | part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); |
1795 | part_stat_add(cpu, part, sectors[rw], bytes >> 9); | 1787 | part_stat_add(cpu, part, sectors[rw], bytes >> 9); |
1796 | part_stat_unlock(); | 1788 | part_stat_unlock(); |
1797 | } | 1789 | } |
@@ -1811,7 +1803,7 @@ static void blk_account_io_done(struct request *req) | |||
1811 | int cpu; | 1803 | int cpu; |
1812 | 1804 | ||
1813 | cpu = part_stat_lock(); | 1805 | cpu = part_stat_lock(); |
1814 | part = req->part; | 1806 | part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); |
1815 | 1807 | ||
1816 | part_stat_inc(cpu, part, ios[rw]); | 1808 | part_stat_inc(cpu, part, ios[rw]); |
1817 | part_stat_add(cpu, part, ticks[rw], duration); | 1809 | part_stat_add(cpu, part, ticks[rw], duration); |
diff --git a/block/blk-merge.c b/block/blk-merge.c index 0a2fd8a48a38..77b7c26df6b5 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -351,7 +351,7 @@ static void blk_account_io_merge(struct request *req) | |||
351 | int cpu; | 351 | int cpu; |
352 | 352 | ||
353 | cpu = part_stat_lock(); | 353 | cpu = part_stat_lock(); |
354 | part = req->part; | 354 | part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); |
355 | 355 | ||
356 | part_round_stats(cpu, part); | 356 | part_round_stats(cpu, part); |
357 | part_dec_in_flight(part, rq_data_dir(req)); | 357 | part_dec_in_flight(part, rq_data_dir(req)); |
diff --git a/block/blk.h b/block/blk.h index 1e675e5ade02..2db8f32838e7 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -116,6 +116,10 @@ void blk_queue_congestion_threshold(struct request_queue *q); | |||
116 | 116 | ||
117 | int blk_dev_init(void); | 117 | int blk_dev_init(void); |
118 | 118 | ||
119 | void elv_quiesce_start(struct request_queue *q); | ||
120 | void elv_quiesce_end(struct request_queue *q); | ||
121 | |||
122 | |||
119 | /* | 123 | /* |
120 | * Return the threshold (number of used requests) at which the queue is | 124 | * Return the threshold (number of used requests) at which the queue is |
121 | * considered to be congested. It include a little hysteresis to keep the | 125 | * considered to be congested. It include a little hysteresis to keep the |
diff --git a/block/genhd.c b/block/genhd.c index a8adf96a4b41..5fa2b44a72ff 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -929,15 +929,8 @@ static void disk_free_ptbl_rcu_cb(struct rcu_head *head) | |||
929 | { | 929 | { |
930 | struct disk_part_tbl *ptbl = | 930 | struct disk_part_tbl *ptbl = |
931 | container_of(head, struct disk_part_tbl, rcu_head); | 931 | container_of(head, struct disk_part_tbl, rcu_head); |
932 | struct gendisk *disk = ptbl->disk; | ||
933 | struct request_queue *q = disk->queue; | ||
934 | unsigned long flags; | ||
935 | 932 | ||
936 | kfree(ptbl); | 933 | kfree(ptbl); |
937 | |||
938 | spin_lock_irqsave(q->queue_lock, flags); | ||
939 | elv_quiesce_end(q); | ||
940 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
941 | } | 934 | } |
942 | 935 | ||
943 | /** | 936 | /** |
@@ -955,17 +948,11 @@ static void disk_replace_part_tbl(struct gendisk *disk, | |||
955 | struct disk_part_tbl *new_ptbl) | 948 | struct disk_part_tbl *new_ptbl) |
956 | { | 949 | { |
957 | struct disk_part_tbl *old_ptbl = disk->part_tbl; | 950 | struct disk_part_tbl *old_ptbl = disk->part_tbl; |
958 | struct request_queue *q = disk->queue; | ||
959 | 951 | ||
960 | rcu_assign_pointer(disk->part_tbl, new_ptbl); | 952 | rcu_assign_pointer(disk->part_tbl, new_ptbl); |
961 | 953 | ||
962 | if (old_ptbl) { | 954 | if (old_ptbl) { |
963 | rcu_assign_pointer(old_ptbl->last_lookup, NULL); | 955 | rcu_assign_pointer(old_ptbl->last_lookup, NULL); |
964 | |||
965 | spin_lock_irq(q->queue_lock); | ||
966 | elv_quiesce_start(q); | ||
967 | spin_unlock_irq(q->queue_lock); | ||
968 | |||
969 | call_rcu(&old_ptbl->rcu_head, disk_free_ptbl_rcu_cb); | 956 | call_rcu(&old_ptbl->rcu_head, disk_free_ptbl_rcu_cb); |
970 | } | 957 | } |
971 | } | 958 | } |
@@ -1006,7 +993,6 @@ int disk_expand_part_tbl(struct gendisk *disk, int partno) | |||
1006 | return -ENOMEM; | 993 | return -ENOMEM; |
1007 | 994 | ||
1008 | new_ptbl->len = target; | 995 | new_ptbl->len = target; |
1009 | new_ptbl->disk = disk; | ||
1010 | 996 | ||
1011 | for (i = 0; i < len; i++) | 997 | for (i = 0; i < len; i++) |
1012 | rcu_assign_pointer(new_ptbl->part[i], old_ptbl->part[i]); | 998 | rcu_assign_pointer(new_ptbl->part[i], old_ptbl->part[i]); |