diff options
author | Nick Piggin <npiggin@suse.de> | 2008-04-29 08:48:33 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-04-29 08:48:33 -0400 |
commit | 75ad23bc0fcb4f992a5d06982bf0857ab1738e9e (patch) | |
tree | 8668ef63b1f420252ae41aed9e13737d49fd8054 /block/blk-core.c | |
parent | 68154e90c9d1492d570671ae181d9a8f8530da55 (diff) |
block: make queue flags non-atomic
We can save some atomic ops in the IO path, if we clearly define
the rules of how to modify the queue flags.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 39 |
1 files changed, 26 insertions, 13 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index e447799256d6..d2f23ec5ebfa 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -198,7 +198,8 @@ void blk_plug_device(struct request_queue *q) | |||
198 | if (blk_queue_stopped(q)) | 198 | if (blk_queue_stopped(q)) |
199 | return; | 199 | return; |
200 | 200 | ||
201 | if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) { | 201 | if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) { |
202 | __set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags); | ||
202 | mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); | 203 | mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); |
203 | blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); | 204 | blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); |
204 | } | 205 | } |
@@ -213,9 +214,10 @@ int blk_remove_plug(struct request_queue *q) | |||
213 | { | 214 | { |
214 | WARN_ON(!irqs_disabled()); | 215 | WARN_ON(!irqs_disabled()); |
215 | 216 | ||
216 | if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) | 217 | if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) |
217 | return 0; | 218 | return 0; |
218 | 219 | ||
220 | queue_flag_clear(QUEUE_FLAG_PLUGGED, q); | ||
219 | del_timer(&q->unplug_timer); | 221 | del_timer(&q->unplug_timer); |
220 | return 1; | 222 | return 1; |
221 | } | 223 | } |
@@ -311,15 +313,16 @@ void blk_start_queue(struct request_queue *q) | |||
311 | { | 313 | { |
312 | WARN_ON(!irqs_disabled()); | 314 | WARN_ON(!irqs_disabled()); |
313 | 315 | ||
314 | clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); | 316 | queue_flag_clear(QUEUE_FLAG_STOPPED, q); |
315 | 317 | ||
316 | /* | 318 | /* |
317 | * one level of recursion is ok and is much faster than kicking | 319 | * one level of recursion is ok and is much faster than kicking |
318 | * the unplug handling | 320 | * the unplug handling |
319 | */ | 321 | */ |
320 | if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { | 322 | if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { |
323 | queue_flag_set(QUEUE_FLAG_REENTER, q); | ||
321 | q->request_fn(q); | 324 | q->request_fn(q); |
322 | clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags); | 325 | queue_flag_clear(QUEUE_FLAG_REENTER, q); |
323 | } else { | 326 | } else { |
324 | blk_plug_device(q); | 327 | blk_plug_device(q); |
325 | kblockd_schedule_work(&q->unplug_work); | 328 | kblockd_schedule_work(&q->unplug_work); |
@@ -344,7 +347,7 @@ EXPORT_SYMBOL(blk_start_queue); | |||
344 | void blk_stop_queue(struct request_queue *q) | 347 | void blk_stop_queue(struct request_queue *q) |
345 | { | 348 | { |
346 | blk_remove_plug(q); | 349 | blk_remove_plug(q); |
347 | set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); | 350 | queue_flag_set(QUEUE_FLAG_STOPPED, q); |
348 | } | 351 | } |
349 | EXPORT_SYMBOL(blk_stop_queue); | 352 | EXPORT_SYMBOL(blk_stop_queue); |
350 | 353 | ||
@@ -373,11 +376,8 @@ EXPORT_SYMBOL(blk_sync_queue); | |||
373 | * blk_run_queue - run a single device queue | 376 | * blk_run_queue - run a single device queue |
374 | * @q: The queue to run | 377 | * @q: The queue to run |
375 | */ | 378 | */ |
376 | void blk_run_queue(struct request_queue *q) | 379 | void __blk_run_queue(struct request_queue *q) |
377 | { | 380 | { |
378 | unsigned long flags; | ||
379 | |||
380 | spin_lock_irqsave(q->queue_lock, flags); | ||
381 | blk_remove_plug(q); | 381 | blk_remove_plug(q); |
382 | 382 | ||
383 | /* | 383 | /* |
@@ -385,15 +385,28 @@ void blk_run_queue(struct request_queue *q) | |||
385 | * handling reinvoke the handler shortly if we already got there. | 385 | * handling reinvoke the handler shortly if we already got there. |
386 | */ | 386 | */ |
387 | if (!elv_queue_empty(q)) { | 387 | if (!elv_queue_empty(q)) { |
388 | if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { | 388 | if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { |
389 | queue_flag_set(QUEUE_FLAG_REENTER, q); | ||
389 | q->request_fn(q); | 390 | q->request_fn(q); |
390 | clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags); | 391 | queue_flag_clear(QUEUE_FLAG_REENTER, q); |
391 | } else { | 392 | } else { |
392 | blk_plug_device(q); | 393 | blk_plug_device(q); |
393 | kblockd_schedule_work(&q->unplug_work); | 394 | kblockd_schedule_work(&q->unplug_work); |
394 | } | 395 | } |
395 | } | 396 | } |
397 | } | ||
398 | EXPORT_SYMBOL(__blk_run_queue); | ||
396 | 399 | ||
400 | /** | ||
401 | * blk_run_queue - run a single device queue | ||
402 | * @q: The queue to run | ||
403 | */ | ||
404 | void blk_run_queue(struct request_queue *q) | ||
405 | { | ||
406 | unsigned long flags; | ||
407 | |||
408 | spin_lock_irqsave(q->queue_lock, flags); | ||
409 | __blk_run_queue(q); | ||
397 | spin_unlock_irqrestore(q->queue_lock, flags); | 410 | spin_unlock_irqrestore(q->queue_lock, flags); |
398 | } | 411 | } |
399 | EXPORT_SYMBOL(blk_run_queue); | 412 | EXPORT_SYMBOL(blk_run_queue); |
@@ -406,7 +419,7 @@ void blk_put_queue(struct request_queue *q) | |||
406 | void blk_cleanup_queue(struct request_queue *q) | 419 | void blk_cleanup_queue(struct request_queue *q) |
407 | { | 420 | { |
408 | mutex_lock(&q->sysfs_lock); | 421 | mutex_lock(&q->sysfs_lock); |
409 | set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); | 422 | queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); |
410 | mutex_unlock(&q->sysfs_lock); | 423 | mutex_unlock(&q->sysfs_lock); |
411 | 424 | ||
412 | if (q->elevator) | 425 | if (q->elevator) |