diff options
author | Tejun Heo <tj@kernel.org> | 2009-05-07 22:53:59 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-05-11 03:52:14 -0400 |
commit | 8f6205cd572fece673da0255d74843680f67f879 (patch) | |
tree | bb89c1c5fe4436f5a0ec27f0d96074c7031a83ae /drivers/ide | |
parent | 1011c1b9f2e45ce7c6e38888d2b83936aec38771 (diff) |
ide: dequeue in-flight request
ide generally has single request in flight and tracks it using
hwif->rq and all state handlers follow the following convention.
* ide_started is returned if the request is in flight.
* ide_stopped is returned if the queue needs to be restarted. The
request might or might not have been processed fully or partially.
* hwif->rq is set to NULL, when an issued request completes.
So, dequeueing model can be implemented by dequeueing after fetch,
requeueing if hwif->rq isn't NULL on ide_stopped return and doing
about the same thing on completion / port unlock paths. These changes
can be made in ide-io proper.
In addition to the above main changes, the following updates are
necessary.
* ide-cd shouldn't dequeue a request when issuing REQUEST SENSE for it
as the request is already dequeued.
* ide-atapi uses request queue as stack when issuing REQUEST SENSE to
put the REQUEST SENSE in front of the failed request. This now
needs to be done using requeueing.
[ Impact: dequeue in-flight request ]
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
Cc: Borislav Petkov <petkovbb@googlemail.com>
Cc: Sergei Shtylyov <sshtylyov@ru.mvista.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/ide')
-rw-r--r-- | drivers/ide/ide-atapi.c | 14 | ||||
-rw-r--r-- | drivers/ide/ide-cd.c | 8 | ||||
-rw-r--r-- | drivers/ide/ide-io.c | 34 |
3 files changed, 39 insertions, 17 deletions
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c index 792534db8f85..2874c3d703a9 100644 --- a/drivers/ide/ide-atapi.c +++ b/drivers/ide/ide-atapi.c | |||
@@ -246,6 +246,7 @@ EXPORT_SYMBOL_GPL(ide_queue_sense_rq); | |||
246 | */ | 246 | */ |
247 | void ide_retry_pc(ide_drive_t *drive) | 247 | void ide_retry_pc(ide_drive_t *drive) |
248 | { | 248 | { |
249 | struct request *failed_rq = drive->hwif->rq; | ||
249 | struct request *sense_rq = &drive->sense_rq; | 250 | struct request *sense_rq = &drive->sense_rq; |
250 | struct ide_atapi_pc *pc = &drive->request_sense_pc; | 251 | struct ide_atapi_pc *pc = &drive->request_sense_pc; |
251 | 252 | ||
@@ -260,8 +261,17 @@ void ide_retry_pc(ide_drive_t *drive) | |||
260 | if (drive->media == ide_tape) | 261 | if (drive->media == ide_tape) |
261 | set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags); | 262 | set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags); |
262 | 263 | ||
263 | if (ide_queue_sense_rq(drive, pc)) | 264 | /* |
264 | ide_complete_rq(drive, -EIO, blk_rq_bytes(drive->hwif->rq)); | 265 | * Push back the failed request and put request sense on top |
266 | * of it. The failed command will be retried after sense data | ||
267 | * is acquired. | ||
268 | */ | ||
269 | blk_requeue_request(failed_rq->q, failed_rq); | ||
270 | drive->hwif->rq = NULL; | ||
271 | if (ide_queue_sense_rq(drive, pc)) { | ||
272 | blkdev_dequeue_request(failed_rq); | ||
273 | ide_complete_rq(drive, -EIO, blk_rq_bytes(failed_rq)); | ||
274 | } | ||
265 | } | 275 | } |
266 | EXPORT_SYMBOL_GPL(ide_retry_pc); | 276 | EXPORT_SYMBOL_GPL(ide_retry_pc); |
267 | 277 | ||
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 2eadc9d2e965..4c7792fd5f93 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c | |||
@@ -405,15 +405,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat) | |||
405 | 405 | ||
406 | end_request: | 406 | end_request: |
407 | if (stat & ATA_ERR) { | 407 | if (stat & ATA_ERR) { |
408 | struct request_queue *q = drive->queue; | ||
409 | unsigned long flags; | ||
410 | |||
411 | spin_lock_irqsave(q->queue_lock, flags); | ||
412 | blkdev_dequeue_request(rq); | ||
413 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
414 | |||
415 | hwif->rq = NULL; | 408 | hwif->rq = NULL; |
416 | |||
417 | return ide_queue_sense_rq(drive, rq) ? 2 : 1; | 409 | return ide_queue_sense_rq(drive, rq) ? 2 : 1; |
418 | } else | 410 | } else |
419 | return 2; | 411 | return 2; |
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index ca2519d7b994..abda7337b3f4 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c | |||
@@ -487,10 +487,10 @@ void do_ide_request(struct request_queue *q) | |||
487 | 487 | ||
488 | if (!ide_lock_port(hwif)) { | 488 | if (!ide_lock_port(hwif)) { |
489 | ide_hwif_t *prev_port; | 489 | ide_hwif_t *prev_port; |
490 | |||
491 | WARN_ON_ONCE(hwif->rq); | ||
490 | repeat: | 492 | repeat: |
491 | prev_port = hwif->host->cur_port; | 493 | prev_port = hwif->host->cur_port; |
492 | hwif->rq = NULL; | ||
493 | |||
494 | if (drive->dev_flags & IDE_DFLAG_SLEEPING && | 494 | if (drive->dev_flags & IDE_DFLAG_SLEEPING && |
495 | time_after(drive->sleep, jiffies)) { | 495 | time_after(drive->sleep, jiffies)) { |
496 | ide_unlock_port(hwif); | 496 | ide_unlock_port(hwif); |
@@ -519,7 +519,12 @@ repeat: | |||
519 | * we know that the queue isn't empty, but this can happen | 519 | * we know that the queue isn't empty, but this can happen |
520 | * if the q->prep_rq_fn() decides to kill a request | 520 | * if the q->prep_rq_fn() decides to kill a request |
521 | */ | 521 | */ |
522 | rq = elv_next_request(drive->queue); | 522 | if (!rq) { |
523 | rq = elv_next_request(drive->queue); | ||
524 | if (rq) | ||
525 | blkdev_dequeue_request(rq); | ||
526 | } | ||
527 | |||
523 | spin_unlock_irq(q->queue_lock); | 528 | spin_unlock_irq(q->queue_lock); |
524 | spin_lock_irq(&hwif->lock); | 529 | spin_lock_irq(&hwif->lock); |
525 | 530 | ||
@@ -555,8 +560,11 @@ repeat: | |||
555 | startstop = start_request(drive, rq); | 560 | startstop = start_request(drive, rq); |
556 | spin_lock_irq(&hwif->lock); | 561 | spin_lock_irq(&hwif->lock); |
557 | 562 | ||
558 | if (startstop == ide_stopped) | 563 | if (startstop == ide_stopped) { |
564 | rq = hwif->rq; | ||
565 | hwif->rq = NULL; | ||
559 | goto repeat; | 566 | goto repeat; |
567 | } | ||
560 | } else | 568 | } else |
561 | goto plug_device; | 569 | goto plug_device; |
562 | out: | 570 | out: |
@@ -572,18 +580,24 @@ plug_device: | |||
572 | plug_device_2: | 580 | plug_device_2: |
573 | spin_lock_irq(q->queue_lock); | 581 | spin_lock_irq(q->queue_lock); |
574 | 582 | ||
583 | if (rq) | ||
584 | blk_requeue_request(q, rq); | ||
575 | if (!elv_queue_empty(q)) | 585 | if (!elv_queue_empty(q)) |
576 | blk_plug_device(q); | 586 | blk_plug_device(q); |
577 | } | 587 | } |
578 | 588 | ||
579 | static void ide_plug_device(ide_drive_t *drive) | 589 | static void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) |
580 | { | 590 | { |
581 | struct request_queue *q = drive->queue; | 591 | struct request_queue *q = drive->queue; |
582 | unsigned long flags; | 592 | unsigned long flags; |
583 | 593 | ||
584 | spin_lock_irqsave(q->queue_lock, flags); | 594 | spin_lock_irqsave(q->queue_lock, flags); |
595 | |||
596 | if (rq) | ||
597 | blk_requeue_request(q, rq); | ||
585 | if (!elv_queue_empty(q)) | 598 | if (!elv_queue_empty(q)) |
586 | blk_plug_device(q); | 599 | blk_plug_device(q); |
600 | |||
587 | spin_unlock_irqrestore(q->queue_lock, flags); | 601 | spin_unlock_irqrestore(q->queue_lock, flags); |
588 | } | 602 | } |
589 | 603 | ||
@@ -632,6 +646,7 @@ void ide_timer_expiry (unsigned long data) | |||
632 | unsigned long flags; | 646 | unsigned long flags; |
633 | int wait = -1; | 647 | int wait = -1; |
634 | int plug_device = 0; | 648 | int plug_device = 0; |
649 | struct request *uninitialized_var(rq_in_flight); | ||
635 | 650 | ||
636 | spin_lock_irqsave(&hwif->lock, flags); | 651 | spin_lock_irqsave(&hwif->lock, flags); |
637 | 652 | ||
@@ -693,6 +708,8 @@ void ide_timer_expiry (unsigned long data) | |||
693 | spin_lock_irq(&hwif->lock); | 708 | spin_lock_irq(&hwif->lock); |
694 | enable_irq(hwif->irq); | 709 | enable_irq(hwif->irq); |
695 | if (startstop == ide_stopped) { | 710 | if (startstop == ide_stopped) { |
711 | rq_in_flight = hwif->rq; | ||
712 | hwif->rq = NULL; | ||
696 | ide_unlock_port(hwif); | 713 | ide_unlock_port(hwif); |
697 | plug_device = 1; | 714 | plug_device = 1; |
698 | } | 715 | } |
@@ -701,7 +718,7 @@ void ide_timer_expiry (unsigned long data) | |||
701 | 718 | ||
702 | if (plug_device) { | 719 | if (plug_device) { |
703 | ide_unlock_host(hwif->host); | 720 | ide_unlock_host(hwif->host); |
704 | ide_plug_device(drive); | 721 | ide_requeue_and_plug(drive, rq_in_flight); |
705 | } | 722 | } |
706 | } | 723 | } |
707 | 724 | ||
@@ -787,6 +804,7 @@ irqreturn_t ide_intr (int irq, void *dev_id) | |||
787 | ide_startstop_t startstop; | 804 | ide_startstop_t startstop; |
788 | irqreturn_t irq_ret = IRQ_NONE; | 805 | irqreturn_t irq_ret = IRQ_NONE; |
789 | int plug_device = 0; | 806 | int plug_device = 0; |
807 | struct request *uninitialized_var(rq_in_flight); | ||
790 | 808 | ||
791 | if (host->host_flags & IDE_HFLAG_SERIALIZE) { | 809 | if (host->host_flags & IDE_HFLAG_SERIALIZE) { |
792 | if (hwif != host->cur_port) | 810 | if (hwif != host->cur_port) |
@@ -866,6 +884,8 @@ irqreturn_t ide_intr (int irq, void *dev_id) | |||
866 | */ | 884 | */ |
867 | if (startstop == ide_stopped) { | 885 | if (startstop == ide_stopped) { |
868 | BUG_ON(hwif->handler); | 886 | BUG_ON(hwif->handler); |
887 | rq_in_flight = hwif->rq; | ||
888 | hwif->rq = NULL; | ||
869 | ide_unlock_port(hwif); | 889 | ide_unlock_port(hwif); |
870 | plug_device = 1; | 890 | plug_device = 1; |
871 | } | 891 | } |
@@ -875,7 +895,7 @@ out: | |||
875 | out_early: | 895 | out_early: |
876 | if (plug_device) { | 896 | if (plug_device) { |
877 | ide_unlock_host(hwif->host); | 897 | ide_unlock_host(hwif->host); |
878 | ide_plug_device(drive); | 898 | ide_requeue_and_plug(drive, rq_in_flight); |
879 | } | 899 | } |
880 | 900 | ||
881 | return irq_ret; | 901 | return irq_ret; |