diff options
Diffstat (limited to 'drivers/scsi/scsi.c')
-rw-r--r-- | drivers/scsi/scsi.c | 105 |
1 files changed, 24 insertions, 81 deletions
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index ee6be596503d..2ac3cb2b9081 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
@@ -291,7 +291,6 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask) | |||
291 | unsigned long flags; | 291 | unsigned long flags; |
292 | 292 | ||
293 | cmd->device = dev; | 293 | cmd->device = dev; |
294 | init_timer(&cmd->eh_timeout); | ||
295 | INIT_LIST_HEAD(&cmd->list); | 294 | INIT_LIST_HEAD(&cmd->list); |
296 | spin_lock_irqsave(&dev->list_lock, flags); | 295 | spin_lock_irqsave(&dev->list_lock, flags); |
297 | list_add_tail(&cmd->list, &dev->cmd_list); | 296 | list_add_tail(&cmd->list, &dev->cmd_list); |
@@ -652,26 +651,33 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) | |||
652 | unsigned long timeout; | 651 | unsigned long timeout; |
653 | int rtn = 0; | 652 | int rtn = 0; |
654 | 653 | ||
654 | /* | ||
655 | * We will use a queued command if possible, otherwise we will | ||
656 | * emulate the queuing and calling of completion function ourselves. | ||
657 | */ | ||
658 | atomic_inc(&cmd->device->iorequest_cnt); | ||
659 | |||
655 | /* check if the device is still usable */ | 660 | /* check if the device is still usable */ |
656 | if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { | 661 | if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { |
657 | /* in SDEV_DEL we error all commands. DID_NO_CONNECT | 662 | /* in SDEV_DEL we error all commands. DID_NO_CONNECT |
658 | * returns an immediate error upwards, and signals | 663 | * returns an immediate error upwards, and signals |
659 | * that the device is no longer present */ | 664 | * that the device is no longer present */ |
660 | cmd->result = DID_NO_CONNECT << 16; | 665 | cmd->result = DID_NO_CONNECT << 16; |
661 | atomic_inc(&cmd->device->iorequest_cnt); | 666 | scsi_done(cmd); |
662 | __scsi_done(cmd); | ||
663 | /* return 0 (because the command has been processed) */ | 667 | /* return 0 (because the command has been processed) */ |
664 | goto out; | 668 | goto out; |
665 | } | 669 | } |
666 | 670 | ||
667 | /* Check to see if the scsi lld put this device into state SDEV_BLOCK. */ | 671 | /* Check to see if the scsi lld made this device blocked. */ |
668 | if (unlikely(cmd->device->sdev_state == SDEV_BLOCK)) { | 672 | if (unlikely(scsi_device_blocked(cmd->device))) { |
669 | /* | 673 | /* |
670 | * in SDEV_BLOCK, the command is just put back on the device | 674 | * in blocked state, the command is just put back on |
671 | * queue. The suspend state has already blocked the queue so | 675 | * the device queue. The suspend state has already |
672 | * future requests should not occur until the device | 676 | * blocked the queue so future requests should not |
673 | * transitions out of the suspend state. | 677 | * occur until the device transitions out of the |
678 | * suspend state. | ||
674 | */ | 679 | */ |
680 | |||
675 | scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); | 681 | scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); |
676 | 682 | ||
677 | SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n")); | 683 | SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n")); |
@@ -714,21 +720,9 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) | |||
714 | host->resetting = 0; | 720 | host->resetting = 0; |
715 | } | 721 | } |
716 | 722 | ||
717 | /* | ||
718 | * AK: unlikely race here: for some reason the timer could | ||
719 | * expire before the serial number is set up below. | ||
720 | */ | ||
721 | scsi_add_timer(cmd, cmd->timeout_per_command, scsi_times_out); | ||
722 | |||
723 | scsi_log_send(cmd); | 723 | scsi_log_send(cmd); |
724 | 724 | ||
725 | /* | 725 | /* |
726 | * We will use a queued command if possible, otherwise we will | ||
727 | * emulate the queuing and calling of completion function ourselves. | ||
728 | */ | ||
729 | atomic_inc(&cmd->device->iorequest_cnt); | ||
730 | |||
731 | /* | ||
732 | * Before we queue this command, check if the command | 726 | * Before we queue this command, check if the command |
733 | * length exceeds what the host adapter can handle. | 727 | * length exceeds what the host adapter can handle. |
734 | */ | 728 | */ |
@@ -744,6 +738,12 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) | |||
744 | } | 738 | } |
745 | 739 | ||
746 | spin_lock_irqsave(host->host_lock, flags); | 740 | spin_lock_irqsave(host->host_lock, flags); |
741 | /* | ||
742 | * AK: unlikely race here: for some reason the timer could | ||
743 | * expire before the serial number is set up below. | ||
744 | * | ||
745 | * TODO: kill serial or move to blk layer | ||
746 | */ | ||
747 | scsi_cmd_get_serial(host, cmd); | 747 | scsi_cmd_get_serial(host, cmd); |
748 | 748 | ||
749 | if (unlikely(host->shost_state == SHOST_DEL)) { | 749 | if (unlikely(host->shost_state == SHOST_DEL)) { |
@@ -754,12 +754,8 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) | |||
754 | } | 754 | } |
755 | spin_unlock_irqrestore(host->host_lock, flags); | 755 | spin_unlock_irqrestore(host->host_lock, flags); |
756 | if (rtn) { | 756 | if (rtn) { |
757 | if (scsi_delete_timer(cmd)) { | 757 | scsi_queue_insert(cmd, (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ? |
758 | atomic_inc(&cmd->device->iodone_cnt); | 758 | rtn : SCSI_MLQUEUE_HOST_BUSY); |
759 | scsi_queue_insert(cmd, | ||
760 | (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ? | ||
761 | rtn : SCSI_MLQUEUE_HOST_BUSY); | ||
762 | } | ||
763 | SCSI_LOG_MLQUEUE(3, | 759 | SCSI_LOG_MLQUEUE(3, |
764 | printk("queuecommand : request rejected\n")); | 760 | printk("queuecommand : request rejected\n")); |
765 | } | 761 | } |
@@ -770,24 +766,6 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) | |||
770 | } | 766 | } |
771 | 767 | ||
772 | /** | 768 | /** |
773 | * scsi_req_abort_cmd -- Request command recovery for the specified command | ||
774 | * @cmd: pointer to the SCSI command of interest | ||
775 | * | ||
776 | * This function requests that SCSI Core start recovery for the | ||
777 | * command by deleting the timer and adding the command to the eh | ||
778 | * queue. It can be called by either LLDDs or SCSI Core. LLDDs who | ||
779 | * implement their own error recovery MAY ignore the timeout event if | ||
780 | * they generated scsi_req_abort_cmd. | ||
781 | */ | ||
782 | void scsi_req_abort_cmd(struct scsi_cmnd *cmd) | ||
783 | { | ||
784 | if (!scsi_delete_timer(cmd)) | ||
785 | return; | ||
786 | scsi_times_out(cmd); | ||
787 | } | ||
788 | EXPORT_SYMBOL(scsi_req_abort_cmd); | ||
789 | |||
790 | /** | ||
791 | * scsi_done - Enqueue the finished SCSI command into the done queue. | 769 | * scsi_done - Enqueue the finished SCSI command into the done queue. |
792 | * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives | 770 | * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives |
793 | * ownership back to SCSI Core -- i.e. the LLDD has finished with it. | 771 | * ownership back to SCSI Core -- i.e. the LLDD has finished with it. |
@@ -802,42 +780,7 @@ EXPORT_SYMBOL(scsi_req_abort_cmd); | |||
802 | */ | 780 | */ |
803 | static void scsi_done(struct scsi_cmnd *cmd) | 781 | static void scsi_done(struct scsi_cmnd *cmd) |
804 | { | 782 | { |
805 | /* | 783 | blk_complete_request(cmd->request); |
806 | * We don't have to worry about this one timing out anymore. | ||
807 | * If we are unable to remove the timer, then the command | ||
808 | * has already timed out. In which case, we have no choice but to | ||
809 | * let the timeout function run, as we have no idea where in fact | ||
810 | * that function could really be. It might be on another processor, | ||
811 | * etc, etc. | ||
812 | */ | ||
813 | if (!scsi_delete_timer(cmd)) | ||
814 | return; | ||
815 | __scsi_done(cmd); | ||
816 | } | ||
817 | |||
818 | /* Private entry to scsi_done() to complete a command when the timer | ||
819 | * isn't running --- used by scsi_times_out */ | ||
820 | void __scsi_done(struct scsi_cmnd *cmd) | ||
821 | { | ||
822 | struct request *rq = cmd->request; | ||
823 | |||
824 | /* | ||
825 | * Set the serial numbers back to zero | ||
826 | */ | ||
827 | cmd->serial_number = 0; | ||
828 | |||
829 | atomic_inc(&cmd->device->iodone_cnt); | ||
830 | if (cmd->result) | ||
831 | atomic_inc(&cmd->device->ioerr_cnt); | ||
832 | |||
833 | BUG_ON(!rq); | ||
834 | |||
835 | /* | ||
836 | * The uptodate/nbytes values don't matter, as we allow partial | ||
837 | * completes and thus will check this in the softirq callback | ||
838 | */ | ||
839 | rq->completion_data = cmd; | ||
840 | blk_complete_request(rq); | ||
841 | } | 784 | } |
842 | 785 | ||
843 | /* Move this to a header if it becomes more generally useful */ | 786 | /* Move this to a header if it becomes more generally useful */ |