diff options
author | Jens Axboe <jaxboe@fusionio.com> | 2011-04-04 21:29:57 -0400 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2011-04-05 17:52:49 -0400 |
commit | 782b86e2656762382ae1c2686d8d5c91f7d5eacf (patch) | |
tree | a2d47765c87d1407e67ac55634c2bcbfa6cddb59 | |
parent | f83e826181f7f8fb152e4190d03854fc3a5dd040 (diff) |
ide: always ensure that blk_delay_queue() is called if we have pending IO
Just because we are not requeuing a request does not mean that
some aren't pending. So always issue a blk_delay_queue() if
either we are requeueing OR there's pending IO.
This fixes a boot problem for some IDE boxes.
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
-rw-r--r-- | drivers/ide/ide-io.c | 42 |
1 files changed, 21 insertions, 21 deletions
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index ca27d3090aeb..177db6d5b2f5 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c | |||
@@ -430,6 +430,26 @@ static inline void ide_unlock_host(struct ide_host *host) | |||
430 | } | 430 | } |
431 | } | 431 | } |
432 | 432 | ||
433 | static void __ide_requeue_and_plug(struct request_queue *q, struct request *rq) | ||
434 | { | ||
435 | if (rq) | ||
436 | blk_requeue_request(q, rq); | ||
437 | if (rq || blk_peek_request(q)) { | ||
438 | /* Use 3ms as that was the old plug delay */ | ||
439 | blk_delay_queue(q, 3); | ||
440 | } | ||
441 | } | ||
442 | |||
443 | void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) | ||
444 | { | ||
445 | struct request_queue *q = drive->queue; | ||
446 | unsigned long flags; | ||
447 | |||
448 | spin_lock_irqsave(q->queue_lock, flags); | ||
449 | __ide_requeue_and_plug(q, rq); | ||
450 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
451 | } | ||
452 | |||
433 | /* | 453 | /* |
434 | * Issue a new request to a device. | 454 | * Issue a new request to a device. |
435 | */ | 455 | */ |
@@ -550,27 +570,7 @@ plug_device: | |||
550 | ide_unlock_host(host); | 570 | ide_unlock_host(host); |
551 | plug_device_2: | 571 | plug_device_2: |
552 | spin_lock_irq(q->queue_lock); | 572 | spin_lock_irq(q->queue_lock); |
553 | 573 | __ide_requeue_and_plug(q, rq); | |
554 | if (rq) { | ||
555 | blk_requeue_request(q, rq); | ||
556 | blk_delay_queue(q, queue_run_ms); | ||
557 | } | ||
558 | } | ||
559 | |||
560 | void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) | ||
561 | { | ||
562 | struct request_queue *q = drive->queue; | ||
563 | unsigned long flags; | ||
564 | |||
565 | spin_lock_irqsave(q->queue_lock, flags); | ||
566 | |||
567 | if (rq) | ||
568 | blk_requeue_request(q, rq); | ||
569 | |||
570 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
571 | |||
572 | /* Use 3ms as that was the old plug delay */ | ||
573 | blk_delay_queue(q, 3); | ||
574 | } | 574 | } |
575 | 575 | ||
576 | static int drive_is_ready(ide_drive_t *drive) | 576 | static int drive_is_ready(ide_drive_t *drive) |