aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c25
1 files changed, 19 insertions, 6 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 3c923a7aeb56..93eb3e4f88ce 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -361,9 +361,10 @@ EXPORT_SYMBOL(blk_put_queue);
361 */ 361 */
362void blk_drain_queue(struct request_queue *q, bool drain_all) 362void blk_drain_queue(struct request_queue *q, bool drain_all)
363{ 363{
364 int i;
365
364 while (true) { 366 while (true) {
365 bool drain = false; 367 bool drain = false;
366 int i;
367 368
368 spin_lock_irq(q->queue_lock); 369 spin_lock_irq(q->queue_lock);
369 370
@@ -408,6 +409,18 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
408 break; 409 break;
409 msleep(10); 410 msleep(10);
410 } 411 }
412
413 /*
414 * With queue marked dead, any woken up waiter will fail the
415 * allocation path, so the wakeup chaining is lost and we're
416 * left with hung waiters. We need to wake up those waiters.
417 */
418 if (q->request_fn) {
419 spin_lock_irq(q->queue_lock);
420 for (i = 0; i < ARRAY_SIZE(q->rq.wait); i++)
421 wake_up_all(&q->rq.wait[i]);
422 spin_unlock_irq(q->queue_lock);
423 }
411} 424}
412 425
413/** 426/**
@@ -467,7 +480,6 @@ void blk_cleanup_queue(struct request_queue *q)
467 /* mark @q DEAD, no new request or merges will be allowed afterwards */ 480 /* mark @q DEAD, no new request or merges will be allowed afterwards */
468 mutex_lock(&q->sysfs_lock); 481 mutex_lock(&q->sysfs_lock);
469 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); 482 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
470
471 spin_lock_irq(lock); 483 spin_lock_irq(lock);
472 484
473 /* 485 /*
@@ -485,10 +497,6 @@ void blk_cleanup_queue(struct request_queue *q)
485 queue_flag_set(QUEUE_FLAG_NOMERGES, q); 497 queue_flag_set(QUEUE_FLAG_NOMERGES, q);
486 queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 498 queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
487 queue_flag_set(QUEUE_FLAG_DEAD, q); 499 queue_flag_set(QUEUE_FLAG_DEAD, q);
488
489 if (q->queue_lock != &q->__queue_lock)
490 q->queue_lock = &q->__queue_lock;
491
492 spin_unlock_irq(lock); 500 spin_unlock_irq(lock);
493 mutex_unlock(&q->sysfs_lock); 501 mutex_unlock(&q->sysfs_lock);
494 502
@@ -499,6 +507,11 @@ void blk_cleanup_queue(struct request_queue *q)
499 del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); 507 del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
500 blk_sync_queue(q); 508 blk_sync_queue(q);
501 509
510 spin_lock_irq(lock);
511 if (q->queue_lock != &q->__queue_lock)
512 q->queue_lock = &q->__queue_lock;
513 spin_unlock_irq(lock);
514
502 /* @q is and will stay empty, shutdown and put */ 515 /* @q is and will stay empty, shutdown and put */
503 blk_put_queue(q); 516 blk_put_queue(q);
504} 517}