diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2008-05-09 18:24:09 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2008-05-09 18:24:09 -0400 |
commit | 1f2ee6496b1f71e9d5aa2448745e65fbafdc3bd5 (patch) | |
tree | 3f143311afca5e316afd06c2fc4f7d73b19cdcf0 /block | |
parent | 5bf6c6e30d8b71d092e8830208e182d84b907fcd (diff) | |
parent | da109897a142dd017172c0ce7abf0be8646f7109 (diff) |
Merge branch 'for-rmk' of git://git.kernel.org/pub/scm/linux/kernel/git/nico/orion into fixes
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 26 | ||||
-rw-r--r-- | block/blk-ioc.c | 2 | ||||
-rw-r--r-- | block/blk-merge.c | 12 | ||||
-rw-r--r-- | block/blk-sysfs.c | 6 | ||||
-rw-r--r-- | block/blk-tag.c | 9 | ||||
-rw-r--r-- | block/cfq-iosched.c | 23 |
6 files changed, 47 insertions, 31 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index b754a4a2f9bd..2987fe47b5ee 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -54,15 +54,16 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done); | |||
54 | 54 | ||
55 | static void drive_stat_acct(struct request *rq, int new_io) | 55 | static void drive_stat_acct(struct request *rq, int new_io) |
56 | { | 56 | { |
57 | struct hd_struct *part; | ||
57 | int rw = rq_data_dir(rq); | 58 | int rw = rq_data_dir(rq); |
58 | 59 | ||
59 | if (!blk_fs_request(rq) || !rq->rq_disk) | 60 | if (!blk_fs_request(rq) || !rq->rq_disk) |
60 | return; | 61 | return; |
61 | 62 | ||
62 | if (!new_io) { | 63 | part = get_part(rq->rq_disk, rq->sector); |
63 | __all_stat_inc(rq->rq_disk, merges[rw], rq->sector); | 64 | if (!new_io) |
64 | } else { | 65 | __all_stat_inc(rq->rq_disk, part, merges[rw], rq->sector); |
65 | struct hd_struct *part = get_part(rq->rq_disk, rq->sector); | 66 | else { |
66 | disk_round_stats(rq->rq_disk); | 67 | disk_round_stats(rq->rq_disk); |
67 | rq->rq_disk->in_flight++; | 68 | rq->rq_disk->in_flight++; |
68 | if (part) { | 69 | if (part) { |
@@ -253,9 +254,11 @@ EXPORT_SYMBOL(__generic_unplug_device); | |||
253 | **/ | 254 | **/ |
254 | void generic_unplug_device(struct request_queue *q) | 255 | void generic_unplug_device(struct request_queue *q) |
255 | { | 256 | { |
256 | spin_lock_irq(q->queue_lock); | 257 | if (blk_queue_plugged(q)) { |
257 | __generic_unplug_device(q); | 258 | spin_lock_irq(q->queue_lock); |
258 | spin_unlock_irq(q->queue_lock); | 259 | __generic_unplug_device(q); |
260 | spin_unlock_irq(q->queue_lock); | ||
261 | } | ||
259 | } | 262 | } |
260 | EXPORT_SYMBOL(generic_unplug_device); | 263 | EXPORT_SYMBOL(generic_unplug_device); |
261 | 264 | ||
@@ -1536,10 +1539,11 @@ static int __end_that_request_first(struct request *req, int error, | |||
1536 | } | 1539 | } |
1537 | 1540 | ||
1538 | if (blk_fs_request(req) && req->rq_disk) { | 1541 | if (blk_fs_request(req) && req->rq_disk) { |
1542 | struct hd_struct *part = get_part(req->rq_disk, req->sector); | ||
1539 | const int rw = rq_data_dir(req); | 1543 | const int rw = rq_data_dir(req); |
1540 | 1544 | ||
1541 | all_stat_add(req->rq_disk, sectors[rw], | 1545 | all_stat_add(req->rq_disk, part, sectors[rw], |
1542 | nr_bytes >> 9, req->sector); | 1546 | nr_bytes >> 9, req->sector); |
1543 | } | 1547 | } |
1544 | 1548 | ||
1545 | total_bytes = bio_nbytes = 0; | 1549 | total_bytes = bio_nbytes = 0; |
@@ -1725,8 +1729,8 @@ static void end_that_request_last(struct request *req, int error) | |||
1725 | const int rw = rq_data_dir(req); | 1729 | const int rw = rq_data_dir(req); |
1726 | struct hd_struct *part = get_part(disk, req->sector); | 1730 | struct hd_struct *part = get_part(disk, req->sector); |
1727 | 1731 | ||
1728 | __all_stat_inc(disk, ios[rw], req->sector); | 1732 | __all_stat_inc(disk, part, ios[rw], req->sector); |
1729 | __all_stat_add(disk, ticks[rw], duration, req->sector); | 1733 | __all_stat_add(disk, part, ticks[rw], duration, req->sector); |
1730 | disk_round_stats(disk); | 1734 | disk_round_stats(disk); |
1731 | disk->in_flight--; | 1735 | disk->in_flight--; |
1732 | if (part) { | 1736 | if (part) { |
diff --git a/block/blk-ioc.c b/block/blk-ioc.c index e34df7c9fc36..012f065ac8e2 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c | |||
@@ -41,8 +41,8 @@ int put_io_context(struct io_context *ioc) | |||
41 | rcu_read_lock(); | 41 | rcu_read_lock(); |
42 | if (ioc->aic && ioc->aic->dtor) | 42 | if (ioc->aic && ioc->aic->dtor) |
43 | ioc->aic->dtor(ioc->aic); | 43 | ioc->aic->dtor(ioc->aic); |
44 | rcu_read_unlock(); | ||
45 | cfq_dtor(ioc); | 44 | cfq_dtor(ioc); |
45 | rcu_read_unlock(); | ||
46 | 46 | ||
47 | kmem_cache_free(iocontext_cachep, ioc); | 47 | kmem_cache_free(iocontext_cachep, ioc); |
48 | return 1; | 48 | return 1; |
diff --git a/block/blk-merge.c b/block/blk-merge.c index 73b23562af20..651136aae76e 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -149,9 +149,9 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, | |||
149 | static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio, | 149 | static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio, |
150 | struct bio *nxt) | 150 | struct bio *nxt) |
151 | { | 151 | { |
152 | if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) | 152 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
153 | blk_recount_segments(q, bio); | 153 | blk_recount_segments(q, bio); |
154 | if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID))) | 154 | if (!bio_flagged(nxt, BIO_SEG_VALID)) |
155 | blk_recount_segments(q, nxt); | 155 | blk_recount_segments(q, nxt); |
156 | if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) || | 156 | if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) || |
157 | BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size)) | 157 | BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size)) |
@@ -312,9 +312,9 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, | |||
312 | q->last_merge = NULL; | 312 | q->last_merge = NULL; |
313 | return 0; | 313 | return 0; |
314 | } | 314 | } |
315 | if (unlikely(!bio_flagged(req->biotail, BIO_SEG_VALID))) | 315 | if (!bio_flagged(req->biotail, BIO_SEG_VALID)) |
316 | blk_recount_segments(q, req->biotail); | 316 | blk_recount_segments(q, req->biotail); |
317 | if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) | 317 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
318 | blk_recount_segments(q, bio); | 318 | blk_recount_segments(q, bio); |
319 | len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size; | 319 | len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size; |
320 | if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) | 320 | if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) |
@@ -352,9 +352,9 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req, | |||
352 | return 0; | 352 | return 0; |
353 | } | 353 | } |
354 | len = bio->bi_hw_back_size + req->bio->bi_hw_front_size; | 354 | len = bio->bi_hw_back_size + req->bio->bi_hw_front_size; |
355 | if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) | 355 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
356 | blk_recount_segments(q, bio); | 356 | blk_recount_segments(q, bio); |
357 | if (unlikely(!bio_flagged(req->bio, BIO_SEG_VALID))) | 357 | if (!bio_flagged(req->bio, BIO_SEG_VALID)) |
358 | blk_recount_segments(q, req->bio); | 358 | blk_recount_segments(q, req->bio); |
359 | if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) && | 359 | if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) && |
360 | !BIOVEC_VIRT_OVERSIZE(len)) { | 360 | !BIOVEC_VIRT_OVERSIZE(len)) { |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index e85c4013e8a2..304ec73ab821 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -146,11 +146,13 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, | |||
146 | unsigned long nm; | 146 | unsigned long nm; |
147 | ssize_t ret = queue_var_store(&nm, page, count); | 147 | ssize_t ret = queue_var_store(&nm, page, count); |
148 | 148 | ||
149 | spin_lock_irq(q->queue_lock); | ||
149 | if (nm) | 150 | if (nm) |
150 | set_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags); | 151 | queue_flag_set(QUEUE_FLAG_NOMERGES, q); |
151 | else | 152 | else |
152 | clear_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags); | 153 | queue_flag_clear(QUEUE_FLAG_NOMERGES, q); |
153 | 154 | ||
155 | spin_unlock_irq(q->queue_lock); | ||
154 | return ret; | 156 | return ret; |
155 | } | 157 | } |
156 | 158 | ||
diff --git a/block/blk-tag.c b/block/blk-tag.c index de64e0429977..32667beb03ee 100644 --- a/block/blk-tag.c +++ b/block/blk-tag.c | |||
@@ -70,7 +70,7 @@ void __blk_queue_free_tags(struct request_queue *q) | |||
70 | __blk_free_tags(bqt); | 70 | __blk_free_tags(bqt); |
71 | 71 | ||
72 | q->queue_tags = NULL; | 72 | q->queue_tags = NULL; |
73 | queue_flag_clear(QUEUE_FLAG_QUEUED, q); | 73 | queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); |
74 | } | 74 | } |
75 | 75 | ||
76 | /** | 76 | /** |
@@ -98,7 +98,7 @@ EXPORT_SYMBOL(blk_free_tags); | |||
98 | **/ | 98 | **/ |
99 | void blk_queue_free_tags(struct request_queue *q) | 99 | void blk_queue_free_tags(struct request_queue *q) |
100 | { | 100 | { |
101 | queue_flag_clear(QUEUE_FLAG_QUEUED, q); | 101 | queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); |
102 | } | 102 | } |
103 | EXPORT_SYMBOL(blk_queue_free_tags); | 103 | EXPORT_SYMBOL(blk_queue_free_tags); |
104 | 104 | ||
@@ -171,6 +171,9 @@ EXPORT_SYMBOL(blk_init_tags); | |||
171 | * @q: the request queue for the device | 171 | * @q: the request queue for the device |
172 | * @depth: the maximum queue depth supported | 172 | * @depth: the maximum queue depth supported |
173 | * @tags: the tag to use | 173 | * @tags: the tag to use |
174 | * | ||
175 | * Queue lock must be held here if the function is called to resize an | ||
176 | * existing map. | ||
174 | **/ | 177 | **/ |
175 | int blk_queue_init_tags(struct request_queue *q, int depth, | 178 | int blk_queue_init_tags(struct request_queue *q, int depth, |
176 | struct blk_queue_tag *tags) | 179 | struct blk_queue_tag *tags) |
@@ -197,7 +200,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth, | |||
197 | * assign it, all done | 200 | * assign it, all done |
198 | */ | 201 | */ |
199 | q->queue_tags = tags; | 202 | q->queue_tags = tags; |
200 | queue_flag_set(QUEUE_FLAG_QUEUED, q); | 203 | queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q); |
201 | INIT_LIST_HEAD(&q->tag_busy_list); | 204 | INIT_LIST_HEAD(&q->tag_busy_list); |
202 | return 0; | 205 | return 0; |
203 | fail: | 206 | fail: |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index f4e1006c253d..b399c62936e0 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -1142,6 +1142,17 @@ static void cfq_put_queue(struct cfq_queue *cfqq) | |||
1142 | kmem_cache_free(cfq_pool, cfqq); | 1142 | kmem_cache_free(cfq_pool, cfqq); |
1143 | } | 1143 | } |
1144 | 1144 | ||
1145 | static void | ||
1146 | __call_for_each_cic(struct io_context *ioc, | ||
1147 | void (*func)(struct io_context *, struct cfq_io_context *)) | ||
1148 | { | ||
1149 | struct cfq_io_context *cic; | ||
1150 | struct hlist_node *n; | ||
1151 | |||
1152 | hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) | ||
1153 | func(ioc, cic); | ||
1154 | } | ||
1155 | |||
1145 | /* | 1156 | /* |
1146 | * Call func for each cic attached to this ioc. | 1157 | * Call func for each cic attached to this ioc. |
1147 | */ | 1158 | */ |
@@ -1149,12 +1160,8 @@ static void | |||
1149 | call_for_each_cic(struct io_context *ioc, | 1160 | call_for_each_cic(struct io_context *ioc, |
1150 | void (*func)(struct io_context *, struct cfq_io_context *)) | 1161 | void (*func)(struct io_context *, struct cfq_io_context *)) |
1151 | { | 1162 | { |
1152 | struct cfq_io_context *cic; | ||
1153 | struct hlist_node *n; | ||
1154 | |||
1155 | rcu_read_lock(); | 1163 | rcu_read_lock(); |
1156 | hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) | 1164 | __call_for_each_cic(ioc, func); |
1157 | func(ioc, cic); | ||
1158 | rcu_read_unlock(); | 1165 | rcu_read_unlock(); |
1159 | } | 1166 | } |
1160 | 1167 | ||
@@ -1198,7 +1205,7 @@ static void cfq_free_io_context(struct io_context *ioc) | |||
1198 | * should be ok to iterate over the known list, we will see all cic's | 1205 | * should be ok to iterate over the known list, we will see all cic's |
1199 | * since no new ones are added. | 1206 | * since no new ones are added. |
1200 | */ | 1207 | */ |
1201 | call_for_each_cic(ioc, cic_free_func); | 1208 | __call_for_each_cic(ioc, cic_free_func); |
1202 | } | 1209 | } |
1203 | 1210 | ||
1204 | static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 1211 | static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
@@ -1296,10 +1303,10 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc) | |||
1296 | printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); | 1303 | printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); |
1297 | case IOPRIO_CLASS_NONE: | 1304 | case IOPRIO_CLASS_NONE: |
1298 | /* | 1305 | /* |
1299 | * no prio set, place us in the middle of the BE classes | 1306 | * no prio set, inherit CPU scheduling settings |
1300 | */ | 1307 | */ |
1301 | cfqq->ioprio = task_nice_ioprio(tsk); | 1308 | cfqq->ioprio = task_nice_ioprio(tsk); |
1302 | cfqq->ioprio_class = IOPRIO_CLASS_BE; | 1309 | cfqq->ioprio_class = task_nice_ioclass(tsk); |
1303 | break; | 1310 | break; |
1304 | case IOPRIO_CLASS_RT: | 1311 | case IOPRIO_CLASS_RT: |
1305 | cfqq->ioprio = task_ioprio(ioc); | 1312 | cfqq->ioprio = task_ioprio(ioc); |