diff options
author | Nick Piggin <npiggin@suse.de> | 2008-04-29 08:48:33 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-04-29 08:48:33 -0400 |
commit | 75ad23bc0fcb4f992a5d06982bf0857ab1738e9e (patch) | |
tree | 8668ef63b1f420252ae41aed9e13737d49fd8054 /block | |
parent | 68154e90c9d1492d570671ae181d9a8f8530da55 (diff) |
block: make queue flags non-atomic
We can save some atomic ops in the IO path, if we clearly define
the rules of how to modify the queue flags.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 39 | ||||
-rw-r--r-- | block/blk-merge.c | 6 | ||||
-rw-r--r-- | block/blk-settings.c | 2 | ||||
-rw-r--r-- | block/blk-tag.c | 8 | ||||
-rw-r--r-- | block/elevator.c | 13 |
5 files changed, 44 insertions, 24 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index e447799256d6..d2f23ec5ebfa 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -198,7 +198,8 @@ void blk_plug_device(struct request_queue *q) | |||
198 | if (blk_queue_stopped(q)) | 198 | if (blk_queue_stopped(q)) |
199 | return; | 199 | return; |
200 | 200 | ||
201 | if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) { | 201 | if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) { |
202 | __set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags); | ||
202 | mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); | 203 | mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); |
203 | blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); | 204 | blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); |
204 | } | 205 | } |
@@ -213,9 +214,10 @@ int blk_remove_plug(struct request_queue *q) | |||
213 | { | 214 | { |
214 | WARN_ON(!irqs_disabled()); | 215 | WARN_ON(!irqs_disabled()); |
215 | 216 | ||
216 | if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) | 217 | if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) |
217 | return 0; | 218 | return 0; |
218 | 219 | ||
220 | queue_flag_clear(QUEUE_FLAG_PLUGGED, q); | ||
219 | del_timer(&q->unplug_timer); | 221 | del_timer(&q->unplug_timer); |
220 | return 1; | 222 | return 1; |
221 | } | 223 | } |
@@ -311,15 +313,16 @@ void blk_start_queue(struct request_queue *q) | |||
311 | { | 313 | { |
312 | WARN_ON(!irqs_disabled()); | 314 | WARN_ON(!irqs_disabled()); |
313 | 315 | ||
314 | clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); | 316 | queue_flag_clear(QUEUE_FLAG_STOPPED, q); |
315 | 317 | ||
316 | /* | 318 | /* |
317 | * one level of recursion is ok and is much faster than kicking | 319 | * one level of recursion is ok and is much faster than kicking |
318 | * the unplug handling | 320 | * the unplug handling |
319 | */ | 321 | */ |
320 | if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { | 322 | if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { |
323 | queue_flag_set(QUEUE_FLAG_REENTER, q); | ||
321 | q->request_fn(q); | 324 | q->request_fn(q); |
322 | clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags); | 325 | queue_flag_clear(QUEUE_FLAG_REENTER, q); |
323 | } else { | 326 | } else { |
324 | blk_plug_device(q); | 327 | blk_plug_device(q); |
325 | kblockd_schedule_work(&q->unplug_work); | 328 | kblockd_schedule_work(&q->unplug_work); |
@@ -344,7 +347,7 @@ EXPORT_SYMBOL(blk_start_queue); | |||
344 | void blk_stop_queue(struct request_queue *q) | 347 | void blk_stop_queue(struct request_queue *q) |
345 | { | 348 | { |
346 | blk_remove_plug(q); | 349 | blk_remove_plug(q); |
347 | set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); | 350 | queue_flag_set(QUEUE_FLAG_STOPPED, q); |
348 | } | 351 | } |
349 | EXPORT_SYMBOL(blk_stop_queue); | 352 | EXPORT_SYMBOL(blk_stop_queue); |
350 | 353 | ||
@@ -373,11 +376,8 @@ EXPORT_SYMBOL(blk_sync_queue); | |||
373 | * blk_run_queue - run a single device queue | 376 | * blk_run_queue - run a single device queue |
374 | * @q: The queue to run | 377 | * @q: The queue to run |
375 | */ | 378 | */ |
376 | void blk_run_queue(struct request_queue *q) | 379 | void __blk_run_queue(struct request_queue *q) |
377 | { | 380 | { |
378 | unsigned long flags; | ||
379 | |||
380 | spin_lock_irqsave(q->queue_lock, flags); | ||
381 | blk_remove_plug(q); | 381 | blk_remove_plug(q); |
382 | 382 | ||
383 | /* | 383 | /* |
@@ -385,15 +385,28 @@ void blk_run_queue(struct request_queue *q) | |||
385 | * handling reinvoke the handler shortly if we already got there. | 385 | * handling reinvoke the handler shortly if we already got there. |
386 | */ | 386 | */ |
387 | if (!elv_queue_empty(q)) { | 387 | if (!elv_queue_empty(q)) { |
388 | if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { | 388 | if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { |
389 | queue_flag_set(QUEUE_FLAG_REENTER, q); | ||
389 | q->request_fn(q); | 390 | q->request_fn(q); |
390 | clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags); | 391 | queue_flag_clear(QUEUE_FLAG_REENTER, q); |
391 | } else { | 392 | } else { |
392 | blk_plug_device(q); | 393 | blk_plug_device(q); |
393 | kblockd_schedule_work(&q->unplug_work); | 394 | kblockd_schedule_work(&q->unplug_work); |
394 | } | 395 | } |
395 | } | 396 | } |
397 | } | ||
398 | EXPORT_SYMBOL(__blk_run_queue); | ||
396 | 399 | ||
400 | /** | ||
401 | * blk_run_queue - run a single device queue | ||
402 | * @q: The queue to run | ||
403 | */ | ||
404 | void blk_run_queue(struct request_queue *q) | ||
405 | { | ||
406 | unsigned long flags; | ||
407 | |||
408 | spin_lock_irqsave(q->queue_lock, flags); | ||
409 | __blk_run_queue(q); | ||
397 | spin_unlock_irqrestore(q->queue_lock, flags); | 410 | spin_unlock_irqrestore(q->queue_lock, flags); |
398 | } | 411 | } |
399 | EXPORT_SYMBOL(blk_run_queue); | 412 | EXPORT_SYMBOL(blk_run_queue); |
@@ -406,7 +419,7 @@ void blk_put_queue(struct request_queue *q) | |||
406 | void blk_cleanup_queue(struct request_queue *q) | 419 | void blk_cleanup_queue(struct request_queue *q) |
407 | { | 420 | { |
408 | mutex_lock(&q->sysfs_lock); | 421 | mutex_lock(&q->sysfs_lock); |
409 | set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); | 422 | queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); |
410 | mutex_unlock(&q->sysfs_lock); | 423 | mutex_unlock(&q->sysfs_lock); |
411 | 424 | ||
412 | if (q->elevator) | 425 | if (q->elevator) |
diff --git a/block/blk-merge.c b/block/blk-merge.c index b5c5c4a9e3f0..73b23562af20 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -55,7 +55,7 @@ void blk_recalc_rq_segments(struct request *rq) | |||
55 | if (!rq->bio) | 55 | if (!rq->bio) |
56 | return; | 56 | return; |
57 | 57 | ||
58 | cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER); | 58 | cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); |
59 | hw_seg_size = seg_size = 0; | 59 | hw_seg_size = seg_size = 0; |
60 | phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0; | 60 | phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0; |
61 | rq_for_each_segment(bv, rq, iter) { | 61 | rq_for_each_segment(bv, rq, iter) { |
@@ -128,7 +128,7 @@ EXPORT_SYMBOL(blk_recount_segments); | |||
128 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, | 128 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, |
129 | struct bio *nxt) | 129 | struct bio *nxt) |
130 | { | 130 | { |
131 | if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER))) | 131 | if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) |
132 | return 0; | 132 | return 0; |
133 | 133 | ||
134 | if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt))) | 134 | if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt))) |
@@ -175,7 +175,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |||
175 | int nsegs, cluster; | 175 | int nsegs, cluster; |
176 | 176 | ||
177 | nsegs = 0; | 177 | nsegs = 0; |
178 | cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER); | 178 | cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); |
179 | 179 | ||
180 | /* | 180 | /* |
181 | * for each bio in rq | 181 | * for each bio in rq |
diff --git a/block/blk-settings.c b/block/blk-settings.c index 77b51dc37a3c..6089384ab064 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -287,7 +287,7 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) | |||
287 | t->max_segment_size = min(t->max_segment_size, b->max_segment_size); | 287 | t->max_segment_size = min(t->max_segment_size, b->max_segment_size); |
288 | t->hardsect_size = max(t->hardsect_size, b->hardsect_size); | 288 | t->hardsect_size = max(t->hardsect_size, b->hardsect_size); |
289 | if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) | 289 | if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) |
290 | clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags); | 290 | queue_flag_clear(QUEUE_FLAG_CLUSTER, t); |
291 | } | 291 | } |
292 | EXPORT_SYMBOL(blk_queue_stack_limits); | 292 | EXPORT_SYMBOL(blk_queue_stack_limits); |
293 | 293 | ||
diff --git a/block/blk-tag.c b/block/blk-tag.c index 4780a46ce234..e176ddbe599e 100644 --- a/block/blk-tag.c +++ b/block/blk-tag.c | |||
@@ -70,7 +70,7 @@ void __blk_queue_free_tags(struct request_queue *q) | |||
70 | __blk_free_tags(bqt); | 70 | __blk_free_tags(bqt); |
71 | 71 | ||
72 | q->queue_tags = NULL; | 72 | q->queue_tags = NULL; |
73 | q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED); | 73 | queue_flag_clear(QUEUE_FLAG_QUEUED, q); |
74 | } | 74 | } |
75 | 75 | ||
76 | /** | 76 | /** |
@@ -98,7 +98,7 @@ EXPORT_SYMBOL(blk_free_tags); | |||
98 | **/ | 98 | **/ |
99 | void blk_queue_free_tags(struct request_queue *q) | 99 | void blk_queue_free_tags(struct request_queue *q) |
100 | { | 100 | { |
101 | clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); | 101 | queue_flag_clear(QUEUE_FLAG_QUEUED, q); |
102 | } | 102 | } |
103 | EXPORT_SYMBOL(blk_queue_free_tags); | 103 | EXPORT_SYMBOL(blk_queue_free_tags); |
104 | 104 | ||
@@ -188,7 +188,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth, | |||
188 | rc = blk_queue_resize_tags(q, depth); | 188 | rc = blk_queue_resize_tags(q, depth); |
189 | if (rc) | 189 | if (rc) |
190 | return rc; | 190 | return rc; |
191 | set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); | 191 | queue_flag_set(QUEUE_FLAG_QUEUED, q); |
192 | return 0; | 192 | return 0; |
193 | } else | 193 | } else |
194 | atomic_inc(&tags->refcnt); | 194 | atomic_inc(&tags->refcnt); |
@@ -197,7 +197,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth, | |||
197 | * assign it, all done | 197 | * assign it, all done |
198 | */ | 198 | */ |
199 | q->queue_tags = tags; | 199 | q->queue_tags = tags; |
200 | q->queue_flags |= (1 << QUEUE_FLAG_QUEUED); | 200 | queue_flag_set(QUEUE_FLAG_QUEUED, q); |
201 | INIT_LIST_HEAD(&q->tag_busy_list); | 201 | INIT_LIST_HEAD(&q->tag_busy_list); |
202 | return 0; | 202 | return 0; |
203 | fail: | 203 | fail: |
diff --git a/block/elevator.c b/block/elevator.c index 88318c383608..e8a90fe23424 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -1070,7 +1070,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) | |||
1070 | */ | 1070 | */ |
1071 | spin_lock_irq(q->queue_lock); | 1071 | spin_lock_irq(q->queue_lock); |
1072 | 1072 | ||
1073 | set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); | 1073 | queue_flag_set(QUEUE_FLAG_ELVSWITCH, q); |
1074 | 1074 | ||
1075 | elv_drain_elevator(q); | 1075 | elv_drain_elevator(q); |
1076 | 1076 | ||
@@ -1104,7 +1104,10 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) | |||
1104 | * finally exit old elevator and turn off BYPASS. | 1104 | * finally exit old elevator and turn off BYPASS. |
1105 | */ | 1105 | */ |
1106 | elevator_exit(old_elevator); | 1106 | elevator_exit(old_elevator); |
1107 | clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); | 1107 | spin_lock_irq(q->queue_lock); |
1108 | queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); | ||
1109 | spin_unlock_irq(q->queue_lock); | ||
1110 | |||
1108 | return 1; | 1111 | return 1; |
1109 | 1112 | ||
1110 | fail_register: | 1113 | fail_register: |
@@ -1115,7 +1118,11 @@ fail_register: | |||
1115 | elevator_exit(e); | 1118 | elevator_exit(e); |
1116 | q->elevator = old_elevator; | 1119 | q->elevator = old_elevator; |
1117 | elv_register_queue(q); | 1120 | elv_register_queue(q); |
1118 | clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); | 1121 | |
1122 | spin_lock_irq(q->queue_lock); | ||
1123 | queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); | ||
1124 | spin_unlock_irq(q->queue_lock); | ||
1125 | |||
1119 | return 0; | 1126 | return 0; |
1120 | } | 1127 | } |
1121 | 1128 | ||