diff options
author | Bart Van Assche <bvanassche@acm.org> | 2012-11-28 07:43:38 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-12-06 08:30:59 -0500 |
commit | 807592a4fafba1fea6e98b9cf1fb02b7c38fb24c (patch) | |
tree | 663e227bd15b6abacd2439c1c4cdfd42ec95e493 /block | |
parent | 3f3299d5c0268d6cc3f47b446e8aca436e4a5651 (diff) |
block: Let blk_drain_queue() caller obtain the queue lock
Let the caller of blk_drain_queue() obtain the queue lock to improve
readability of the patch called "Avoid that request_fn is invoked on
a dead queue".
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Acked-by: Tejun Heo <tj@kernel.org>
Cc: James Bottomley <JBottomley@Parallels.com>
Cc: Mike Christie <michaelc@cs.wisc.edu>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Chanho Min <chanho.min@lge.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 30 |
1 files changed, 18 insertions, 12 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 1a95272cca50..a182b586b06a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -349,7 +349,7 @@ void blk_put_queue(struct request_queue *q) | |||
349 | EXPORT_SYMBOL(blk_put_queue); | 349 | EXPORT_SYMBOL(blk_put_queue); |
350 | 350 | ||
351 | /** | 351 | /** |
352 | * blk_drain_queue - drain requests from request_queue | 352 | * __blk_drain_queue - drain requests from request_queue |
353 | * @q: queue to drain | 353 | * @q: queue to drain |
354 | * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV | 354 | * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV |
355 | * | 355 | * |
@@ -357,15 +357,17 @@ EXPORT_SYMBOL(blk_put_queue); | |||
357 | * If not, only ELVPRIV requests are drained. The caller is responsible | 357 | * If not, only ELVPRIV requests are drained. The caller is responsible |
358 | * for ensuring that no new requests which need to be drained are queued. | 358 | * for ensuring that no new requests which need to be drained are queued. |
359 | */ | 359 | */ |
360 | void blk_drain_queue(struct request_queue *q, bool drain_all) | 360 | static void __blk_drain_queue(struct request_queue *q, bool drain_all) |
361 | __releases(q->queue_lock) | ||
362 | __acquires(q->queue_lock) | ||
361 | { | 363 | { |
362 | int i; | 364 | int i; |
363 | 365 | ||
366 | lockdep_assert_held(q->queue_lock); | ||
367 | |||
364 | while (true) { | 368 | while (true) { |
365 | bool drain = false; | 369 | bool drain = false; |
366 | 370 | ||
367 | spin_lock_irq(q->queue_lock); | ||
368 | |||
369 | /* | 371 | /* |
370 | * The caller might be trying to drain @q before its | 372 | * The caller might be trying to drain @q before its |
371 | * elevator is initialized. | 373 | * elevator is initialized. |
@@ -401,11 +403,14 @@ void blk_drain_queue(struct request_queue *q, bool drain_all) | |||
401 | } | 403 | } |
402 | } | 404 | } |
403 | 405 | ||
404 | spin_unlock_irq(q->queue_lock); | ||
405 | |||
406 | if (!drain) | 406 | if (!drain) |
407 | break; | 407 | break; |
408 | |||
409 | spin_unlock_irq(q->queue_lock); | ||
410 | |||
408 | msleep(10); | 411 | msleep(10); |
412 | |||
413 | spin_lock_irq(q->queue_lock); | ||
409 | } | 414 | } |
410 | 415 | ||
411 | /* | 416 | /* |
@@ -416,13 +421,9 @@ void blk_drain_queue(struct request_queue *q, bool drain_all) | |||
416 | if (q->request_fn) { | 421 | if (q->request_fn) { |
417 | struct request_list *rl; | 422 | struct request_list *rl; |
418 | 423 | ||
419 | spin_lock_irq(q->queue_lock); | ||
420 | |||
421 | blk_queue_for_each_rl(rl, q) | 424 | blk_queue_for_each_rl(rl, q) |
422 | for (i = 0; i < ARRAY_SIZE(rl->wait); i++) | 425 | for (i = 0; i < ARRAY_SIZE(rl->wait); i++) |
423 | wake_up_all(&rl->wait[i]); | 426 | wake_up_all(&rl->wait[i]); |
424 | |||
425 | spin_unlock_irq(q->queue_lock); | ||
426 | } | 427 | } |
427 | } | 428 | } |
428 | 429 | ||
@@ -446,7 +447,10 @@ void blk_queue_bypass_start(struct request_queue *q) | |||
446 | spin_unlock_irq(q->queue_lock); | 447 | spin_unlock_irq(q->queue_lock); |
447 | 448 | ||
448 | if (drain) { | 449 | if (drain) { |
449 | blk_drain_queue(q, false); | 450 | spin_lock_irq(q->queue_lock); |
451 | __blk_drain_queue(q, false); | ||
452 | spin_unlock_irq(q->queue_lock); | ||
453 | |||
450 | /* ensure blk_queue_bypass() is %true inside RCU read lock */ | 454 | /* ensure blk_queue_bypass() is %true inside RCU read lock */ |
451 | synchronize_rcu(); | 455 | synchronize_rcu(); |
452 | } | 456 | } |
@@ -504,7 +508,9 @@ void blk_cleanup_queue(struct request_queue *q) | |||
504 | mutex_unlock(&q->sysfs_lock); | 508 | mutex_unlock(&q->sysfs_lock); |
505 | 509 | ||
506 | /* drain all requests queued before DYING marking */ | 510 | /* drain all requests queued before DYING marking */ |
507 | blk_drain_queue(q, true); | 511 | spin_lock_irq(lock); |
512 | __blk_drain_queue(q, true); | ||
513 | spin_unlock_irq(lock); | ||
508 | 514 | ||
509 | /* @q won't process any more request, flush async actions */ | 515 | /* @q won't process any more request, flush async actions */ |
510 | del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); | 516 | del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); |