diff options
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_scsi.c')
-rw-r--r-- | drivers/scsi/lpfc/lpfc_scsi.c | 295 |
1 files changed, 207 insertions, 88 deletions
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index c63275e66e2e..9ee8218404c0 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
@@ -41,6 +41,20 @@ | |||
41 | #define LPFC_ABORT_WAIT 2 | 41 | #define LPFC_ABORT_WAIT 2 |
42 | 42 | ||
43 | 43 | ||
44 | static inline void | ||
45 | lpfc_block_requests(struct lpfc_hba * phba) | ||
46 | { | ||
47 | down(&phba->hba_can_block); | ||
48 | scsi_block_requests(phba->host); | ||
49 | } | ||
50 | |||
51 | static inline void | ||
52 | lpfc_unblock_requests(struct lpfc_hba * phba) | ||
53 | { | ||
54 | scsi_unblock_requests(phba->host); | ||
55 | up(&phba->hba_can_block); | ||
56 | } | ||
57 | |||
44 | /* | 58 | /* |
45 | * This routine allocates a scsi buffer, which contains all the necessary | 59 | * This routine allocates a scsi buffer, which contains all the necessary |
46 | * information needed to initiate a SCSI I/O. The non-DMAable buffer region | 60 | * information needed to initiate a SCSI I/O. The non-DMAable buffer region |
@@ -137,18 +151,22 @@ lpfc_new_scsi_buf(struct lpfc_hba * phba) | |||
137 | } | 151 | } |
138 | 152 | ||
139 | struct lpfc_scsi_buf* | 153 | struct lpfc_scsi_buf* |
140 | lpfc_sli_get_scsi_buf(struct lpfc_hba * phba) | 154 | lpfc_get_scsi_buf(struct lpfc_hba * phba) |
141 | { | 155 | { |
142 | struct lpfc_scsi_buf * lpfc_cmd = NULL; | 156 | struct lpfc_scsi_buf * lpfc_cmd = NULL; |
143 | struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; | 157 | struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; |
158 | unsigned long iflag = 0; | ||
144 | 159 | ||
160 | spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); | ||
145 | list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); | 161 | list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); |
162 | spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); | ||
146 | return lpfc_cmd; | 163 | return lpfc_cmd; |
147 | } | 164 | } |
148 | 165 | ||
149 | static void | 166 | static void |
150 | lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) | 167 | lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) |
151 | { | 168 | { |
169 | unsigned long iflag = 0; | ||
152 | /* | 170 | /* |
153 | * There are only two special cases to consider. (1) the scsi command | 171 | * There are only two special cases to consider. (1) the scsi command |
154 | * requested scatter-gather usage or (2) the scsi command allocated | 172 | * requested scatter-gather usage or (2) the scsi command allocated |
@@ -166,8 +184,10 @@ lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) | |||
166 | } | 184 | } |
167 | } | 185 | } |
168 | 186 | ||
187 | spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); | ||
169 | psb->pCmd = NULL; | 188 | psb->pCmd = NULL; |
170 | list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); | 189 | list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); |
190 | spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); | ||
171 | } | 191 | } |
172 | 192 | ||
173 | static int | 193 | static int |
@@ -389,7 +409,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
389 | struct lpfc_rport_data *rdata = lpfc_cmd->rdata; | 409 | struct lpfc_rport_data *rdata = lpfc_cmd->rdata; |
390 | struct lpfc_nodelist *pnode = rdata->pnode; | 410 | struct lpfc_nodelist *pnode = rdata->pnode; |
391 | struct scsi_cmnd *cmd = lpfc_cmd->pCmd; | 411 | struct scsi_cmnd *cmd = lpfc_cmd->pCmd; |
392 | unsigned long iflag; | 412 | int result; |
413 | struct scsi_device *sdev, *tmp_sdev; | ||
414 | int depth = 0; | ||
393 | 415 | ||
394 | lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; | 416 | lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; |
395 | lpfc_cmd->status = pIocbOut->iocb.ulpStatus; | 417 | lpfc_cmd->status = pIocbOut->iocb.ulpStatus; |
@@ -441,11 +463,64 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
441 | *lp, *(lp + 3), cmd->retries, cmd->resid); | 463 | *lp, *(lp + 3), cmd->retries, cmd->resid); |
442 | } | 464 | } |
443 | 465 | ||
466 | result = cmd->result; | ||
467 | sdev = cmd->device; | ||
444 | cmd->scsi_done(cmd); | 468 | cmd->scsi_done(cmd); |
445 | 469 | ||
446 | spin_lock_irqsave(phba->host->host_lock, iflag); | 470 | if (!result && |
471 | ((jiffies - pnode->last_ramp_up_time) > | ||
472 | LPFC_Q_RAMP_UP_INTERVAL * HZ) && | ||
473 | ((jiffies - pnode->last_q_full_time) > | ||
474 | LPFC_Q_RAMP_UP_INTERVAL * HZ) && | ||
475 | (phba->cfg_lun_queue_depth > sdev->queue_depth)) { | ||
476 | shost_for_each_device(tmp_sdev, sdev->host) { | ||
477 | if (phba->cfg_lun_queue_depth > tmp_sdev->queue_depth) { | ||
478 | if (tmp_sdev->id != sdev->id) | ||
479 | continue; | ||
480 | if (tmp_sdev->ordered_tags) | ||
481 | scsi_adjust_queue_depth(tmp_sdev, | ||
482 | MSG_ORDERED_TAG, | ||
483 | tmp_sdev->queue_depth+1); | ||
484 | else | ||
485 | scsi_adjust_queue_depth(tmp_sdev, | ||
486 | MSG_SIMPLE_TAG, | ||
487 | tmp_sdev->queue_depth+1); | ||
488 | |||
489 | pnode->last_ramp_up_time = jiffies; | ||
490 | } | ||
491 | } | ||
492 | } | ||
493 | |||
494 | /* | ||
495 | * Check for queue full. If the lun is reporting queue full, then | ||
496 | * back off the lun queue depth to prevent target overloads. | ||
497 | */ | ||
498 | if (result == SAM_STAT_TASK_SET_FULL) { | ||
499 | pnode->last_q_full_time = jiffies; | ||
500 | |||
501 | shost_for_each_device(tmp_sdev, sdev->host) { | ||
502 | if (tmp_sdev->id != sdev->id) | ||
503 | continue; | ||
504 | depth = scsi_track_queue_full(tmp_sdev, | ||
505 | tmp_sdev->queue_depth - 1); | ||
506 | } | ||
507 | /* | ||
508 | * The queue depth cannot be lowered any more. | ||
509 | * Modify the returned error code to store | ||
510 | * the final depth value set by | ||
511 | * scsi_track_queue_full. | ||
512 | */ | ||
513 | if (depth == -1) | ||
514 | depth = sdev->host->cmd_per_lun; | ||
515 | |||
516 | if (depth) { | ||
517 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, | ||
518 | "%d:0711 detected queue full - lun queue depth " | ||
519 | " adjusted to %d.\n", phba->brd_no, depth); | ||
520 | } | ||
521 | } | ||
522 | |||
447 | lpfc_release_scsi_buf(phba, lpfc_cmd); | 523 | lpfc_release_scsi_buf(phba, lpfc_cmd); |
448 | spin_unlock_irqrestore(phba->host->host_lock, iflag); | ||
449 | } | 524 | } |
450 | 525 | ||
451 | static void | 526 | static void |
@@ -693,6 +768,37 @@ lpfc_info(struct Scsi_Host *host) | |||
693 | return lpfcinfobuf; | 768 | return lpfcinfobuf; |
694 | } | 769 | } |
695 | 770 | ||
771 | static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) | ||
772 | { | ||
773 | unsigned long poll_tmo_expires = | ||
774 | (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo)); | ||
775 | |||
776 | if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt) | ||
777 | mod_timer(&phba->fcp_poll_timer, | ||
778 | poll_tmo_expires); | ||
779 | } | ||
780 | |||
781 | void lpfc_poll_start_timer(struct lpfc_hba * phba) | ||
782 | { | ||
783 | lpfc_poll_rearm_timer(phba); | ||
784 | } | ||
785 | |||
786 | void lpfc_poll_timeout(unsigned long ptr) | ||
787 | { | ||
788 | struct lpfc_hba *phba = (struct lpfc_hba *)ptr; | ||
789 | unsigned long iflag; | ||
790 | |||
791 | spin_lock_irqsave(phba->host->host_lock, iflag); | ||
792 | |||
793 | if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { | ||
794 | lpfc_sli_poll_fcp_ring (phba); | ||
795 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) | ||
796 | lpfc_poll_rearm_timer(phba); | ||
797 | } | ||
798 | |||
799 | spin_unlock_irqrestore(phba->host->host_lock, iflag); | ||
800 | } | ||
801 | |||
696 | static int | 802 | static int |
697 | lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | 803 | lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) |
698 | { | 804 | { |
@@ -719,10 +825,11 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
719 | cmnd->result = ScsiResult(DID_BUS_BUSY, 0); | 825 | cmnd->result = ScsiResult(DID_BUS_BUSY, 0); |
720 | goto out_fail_command; | 826 | goto out_fail_command; |
721 | } | 827 | } |
722 | lpfc_cmd = lpfc_sli_get_scsi_buf (phba); | 828 | lpfc_cmd = lpfc_get_scsi_buf (phba); |
723 | if (lpfc_cmd == NULL) { | 829 | if (lpfc_cmd == NULL) { |
724 | printk(KERN_WARNING "%s: No buffer available - list empty, " | 830 | lpfc_printf_log(phba, KERN_INFO, LOG_FCP, |
725 | "total count %d\n", __FUNCTION__, phba->total_scsi_bufs); | 831 | "%d:0707 driver's buffer pool is empty, " |
832 | "IO busied\n", phba->brd_no); | ||
726 | goto out_host_busy; | 833 | goto out_host_busy; |
727 | } | 834 | } |
728 | 835 | ||
@@ -746,11 +853,17 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
746 | &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); | 853 | &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); |
747 | if (err) | 854 | if (err) |
748 | goto out_host_busy_free_buf; | 855 | goto out_host_busy_free_buf; |
856 | |||
857 | if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { | ||
858 | lpfc_sli_poll_fcp_ring(phba); | ||
859 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) | ||
860 | lpfc_poll_rearm_timer(phba); | ||
861 | } | ||
862 | |||
749 | return 0; | 863 | return 0; |
750 | 864 | ||
751 | out_host_busy_free_buf: | 865 | out_host_busy_free_buf: |
752 | lpfc_release_scsi_buf(phba, lpfc_cmd); | 866 | lpfc_release_scsi_buf(phba, lpfc_cmd); |
753 | cmnd->host_scribble = NULL; | ||
754 | out_host_busy: | 867 | out_host_busy: |
755 | return SCSI_MLQUEUE_HOST_BUSY; | 868 | return SCSI_MLQUEUE_HOST_BUSY; |
756 | 869 | ||
@@ -759,11 +872,12 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
759 | return 0; | 872 | return 0; |
760 | } | 873 | } |
761 | 874 | ||
875 | |||
762 | static int | 876 | static int |
763 | __lpfc_abort_handler(struct scsi_cmnd *cmnd) | 877 | lpfc_abort_handler(struct scsi_cmnd *cmnd) |
764 | { | 878 | { |
765 | struct lpfc_hba *phba = | 879 | struct Scsi_Host *shost = cmnd->device->host; |
766 | (struct lpfc_hba *)cmnd->device->host->hostdata[0]; | 880 | struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0]; |
767 | struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring]; | 881 | struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring]; |
768 | struct lpfc_iocbq *iocb; | 882 | struct lpfc_iocbq *iocb; |
769 | struct lpfc_iocbq *abtsiocb; | 883 | struct lpfc_iocbq *abtsiocb; |
@@ -772,6 +886,8 @@ __lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
772 | unsigned int loop_count = 0; | 886 | unsigned int loop_count = 0; |
773 | int ret = SUCCESS; | 887 | int ret = SUCCESS; |
774 | 888 | ||
889 | lpfc_block_requests(phba); | ||
890 | spin_lock_irq(shost->host_lock); | ||
775 | 891 | ||
776 | lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; | 892 | lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; |
777 | BUG_ON(!lpfc_cmd); | 893 | BUG_ON(!lpfc_cmd); |
@@ -821,9 +937,15 @@ __lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
821 | goto out; | 937 | goto out; |
822 | } | 938 | } |
823 | 939 | ||
940 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) | ||
941 | lpfc_sli_poll_fcp_ring (phba); | ||
942 | |||
824 | /* Wait for abort to complete */ | 943 | /* Wait for abort to complete */ |
825 | while (lpfc_cmd->pCmd == cmnd) | 944 | while (lpfc_cmd->pCmd == cmnd) |
826 | { | 945 | { |
946 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) | ||
947 | lpfc_sli_poll_fcp_ring (phba); | ||
948 | |||
827 | spin_unlock_irq(phba->host->host_lock); | 949 | spin_unlock_irq(phba->host->host_lock); |
828 | schedule_timeout_uninterruptible(LPFC_ABORT_WAIT*HZ); | 950 | schedule_timeout_uninterruptible(LPFC_ABORT_WAIT*HZ); |
829 | spin_lock_irq(phba->host->host_lock); | 951 | spin_lock_irq(phba->host->host_lock); |
@@ -844,26 +966,19 @@ __lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
844 | 966 | ||
845 | out: | 967 | out: |
846 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, | 968 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, |
847 | "%d:0749 SCSI layer issued abort device: ret %#x, " | 969 | "%d:0749 SCSI Layer I/O Abort Request " |
848 | "ID %d, LUN %d, snum %#lx\n", | 970 | "Status x%x ID %d LUN %d snum %#lx\n", |
849 | phba->brd_no, ret, cmnd->device->id, | 971 | phba->brd_no, ret, cmnd->device->id, |
850 | cmnd->device->lun, cmnd->serial_number); | 972 | cmnd->device->lun, cmnd->serial_number); |
851 | 973 | ||
852 | return ret; | 974 | spin_unlock_irq(shost->host_lock); |
853 | } | 975 | lpfc_unblock_requests(phba); |
854 | 976 | ||
855 | static int | 977 | return ret; |
856 | lpfc_abort_handler(struct scsi_cmnd *cmnd) | ||
857 | { | ||
858 | int rc; | ||
859 | spin_lock_irq(cmnd->device->host->host_lock); | ||
860 | rc = __lpfc_abort_handler(cmnd); | ||
861 | spin_unlock_irq(cmnd->device->host->host_lock); | ||
862 | return rc; | ||
863 | } | 978 | } |
864 | 979 | ||
865 | static int | 980 | static int |
866 | __lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) | 981 | lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) |
867 | { | 982 | { |
868 | struct Scsi_Host *shost = cmnd->device->host; | 983 | struct Scsi_Host *shost = cmnd->device->host; |
869 | struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0]; | 984 | struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0]; |
@@ -871,9 +986,12 @@ __lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) | |||
871 | struct lpfc_iocbq *iocbq, *iocbqrsp; | 986 | struct lpfc_iocbq *iocbq, *iocbqrsp; |
872 | struct lpfc_rport_data *rdata = cmnd->device->hostdata; | 987 | struct lpfc_rport_data *rdata = cmnd->device->hostdata; |
873 | struct lpfc_nodelist *pnode = rdata->pnode; | 988 | struct lpfc_nodelist *pnode = rdata->pnode; |
989 | uint32_t cmd_result = 0, cmd_status = 0; | ||
874 | int ret = FAILED; | 990 | int ret = FAILED; |
875 | int cnt, loopcnt; | 991 | int cnt, loopcnt; |
876 | 992 | ||
993 | lpfc_block_requests(phba); | ||
994 | spin_lock_irq(shost->host_lock); | ||
877 | /* | 995 | /* |
878 | * If target is not in a MAPPED state, delay the reset until | 996 | * If target is not in a MAPPED state, delay the reset until |
879 | * target is rediscovered or nodev timeout expires. | 997 | * target is rediscovered or nodev timeout expires. |
@@ -891,7 +1009,7 @@ __lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) | |||
891 | break; | 1009 | break; |
892 | } | 1010 | } |
893 | 1011 | ||
894 | lpfc_cmd = lpfc_sli_get_scsi_buf (phba); | 1012 | lpfc_cmd = lpfc_get_scsi_buf (phba); |
895 | if (lpfc_cmd == NULL) | 1013 | if (lpfc_cmd == NULL) |
896 | goto out; | 1014 | goto out; |
897 | 1015 | ||
@@ -916,26 +1034,28 @@ __lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) | |||
916 | if (ret == IOCB_SUCCESS) | 1034 | if (ret == IOCB_SUCCESS) |
917 | ret = SUCCESS; | 1035 | ret = SUCCESS; |
918 | 1036 | ||
919 | lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4]; | 1037 | |
920 | lpfc_cmd->status = iocbqrsp->iocb.ulpStatus; | 1038 | cmd_result = iocbqrsp->iocb.un.ulpWord[4]; |
921 | if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT) | 1039 | cmd_status = iocbqrsp->iocb.ulpStatus; |
922 | if (lpfc_cmd->result & IOERR_DRVR_MASK) | 1040 | |
923 | lpfc_cmd->status = IOSTAT_DRIVER_REJECT; | 1041 | lpfc_sli_release_iocbq(phba, iocbqrsp); |
1042 | lpfc_release_scsi_buf(phba, lpfc_cmd); | ||
924 | 1043 | ||
925 | /* | 1044 | /* |
926 | * All outstanding txcmplq I/Os should have been aborted by the target. | 1045 | * All outstanding txcmplq I/Os should have been aborted by the device. |
927 | * Unfortunately, some targets do not abide by this forcing the driver | 1046 | * Unfortunately, some targets do not abide by this forcing the driver |
928 | * to double check. | 1047 | * to double check. |
929 | */ | 1048 | */ |
930 | lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], | 1049 | cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], |
931 | cmnd->device->id, cmnd->device->lun, 0, | 1050 | cmnd->device->id, cmnd->device->lun, |
932 | LPFC_CTX_LUN); | 1051 | LPFC_CTX_LUN); |
933 | 1052 | if (cnt) | |
1053 | lpfc_sli_abort_iocb(phba, | ||
1054 | &phba->sli.ring[phba->sli.fcp_ring], | ||
1055 | cmnd->device->id, cmnd->device->lun, | ||
1056 | 0, LPFC_CTX_LUN); | ||
934 | loopcnt = 0; | 1057 | loopcnt = 0; |
935 | while((cnt = lpfc_sli_sum_iocb(phba, | 1058 | while(cnt) { |
936 | &phba->sli.ring[phba->sli.fcp_ring], | ||
937 | cmnd->device->id, cmnd->device->lun, | ||
938 | LPFC_CTX_LUN))) { | ||
939 | spin_unlock_irq(phba->host->host_lock); | 1059 | spin_unlock_irq(phba->host->host_lock); |
940 | schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); | 1060 | schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); |
941 | spin_lock_irq(phba->host->host_lock); | 1061 | spin_lock_irq(phba->host->host_lock); |
@@ -943,6 +1063,11 @@ __lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) | |||
943 | if (++loopcnt | 1063 | if (++loopcnt |
944 | > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT) | 1064 | > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT) |
945 | break; | 1065 | break; |
1066 | |||
1067 | cnt = lpfc_sli_sum_iocb(phba, | ||
1068 | &phba->sli.ring[phba->sli.fcp_ring], | ||
1069 | cmnd->device->id, cmnd->device->lun, | ||
1070 | LPFC_CTX_LUN); | ||
946 | } | 1071 | } |
947 | 1072 | ||
948 | if (cnt) { | 1073 | if (cnt) { |
@@ -952,35 +1077,21 @@ __lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) | |||
952 | ret = FAILED; | 1077 | ret = FAILED; |
953 | } | 1078 | } |
954 | 1079 | ||
955 | lpfc_sli_release_iocbq(phba, iocbqrsp); | ||
956 | |||
957 | out_free_scsi_buf: | 1080 | out_free_scsi_buf: |
958 | lpfc_printf_log(phba, KERN_ERR, LOG_FCP, | 1081 | lpfc_printf_log(phba, KERN_ERR, LOG_FCP, |
959 | "%d:0713 SCSI layer issued LUN reset (%d, %d) " | 1082 | "%d:0713 SCSI layer issued LUN reset (%d, %d) " |
960 | "Data: x%x x%x x%x\n", | 1083 | "Data: x%x x%x x%x\n", |
961 | phba->brd_no, lpfc_cmd->pCmd->device->id, | 1084 | phba->brd_no, cmnd->device->id,cmnd->device->lun, |
962 | lpfc_cmd->pCmd->device->lun, ret, lpfc_cmd->status, | 1085 | ret, cmd_status, cmd_result); |
963 | lpfc_cmd->result); | 1086 | |
964 | lpfc_release_scsi_buf(phba, lpfc_cmd); | ||
965 | out: | 1087 | out: |
1088 | spin_unlock_irq(shost->host_lock); | ||
1089 | lpfc_unblock_requests(phba); | ||
966 | return ret; | 1090 | return ret; |
967 | } | 1091 | } |
968 | 1092 | ||
969 | static int | 1093 | static int |
970 | lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) | 1094 | lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) |
971 | { | ||
972 | int rc; | ||
973 | spin_lock_irq(cmnd->device->host->host_lock); | ||
974 | rc = __lpfc_reset_lun_handler(cmnd); | ||
975 | spin_unlock_irq(cmnd->device->host->host_lock); | ||
976 | return rc; | ||
977 | } | ||
978 | |||
979 | /* | ||
980 | * Note: midlayer calls this function with the host_lock held | ||
981 | */ | ||
982 | static int | ||
983 | __lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) | ||
984 | { | 1095 | { |
985 | struct Scsi_Host *shost = cmnd->device->host; | 1096 | struct Scsi_Host *shost = cmnd->device->host; |
986 | struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0]; | 1097 | struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0]; |
@@ -991,7 +1102,10 @@ __lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) | |||
991 | unsigned int midlayer_id = 0; | 1102 | unsigned int midlayer_id = 0; |
992 | struct lpfc_scsi_buf * lpfc_cmd; | 1103 | struct lpfc_scsi_buf * lpfc_cmd; |
993 | 1104 | ||
994 | lpfc_cmd = lpfc_sli_get_scsi_buf (phba); | 1105 | lpfc_block_requests(phba); |
1106 | spin_lock_irq(shost->host_lock); | ||
1107 | |||
1108 | lpfc_cmd = lpfc_get_scsi_buf(phba); | ||
995 | if (lpfc_cmd == NULL) | 1109 | if (lpfc_cmd == NULL) |
996 | goto out; | 1110 | goto out; |
997 | 1111 | ||
@@ -1022,18 +1136,31 @@ __lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) | |||
1022 | lpfc_cmd->pCmd->device->hostdata = ndlp->rport->dd_data; | 1136 | lpfc_cmd->pCmd->device->hostdata = ndlp->rport->dd_data; |
1023 | ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba); | 1137 | ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba); |
1024 | if (ret != SUCCESS) { | 1138 | if (ret != SUCCESS) { |
1025 | lpfc_printf_log(phba, KERN_INFO, LOG_FCP, | 1139 | lpfc_printf_log(phba, KERN_ERR, LOG_FCP, |
1026 | "%d:0713 Bus Reset on target %d failed\n", | 1140 | "%d:0713 Bus Reset on target %d failed\n", |
1027 | phba->brd_no, i); | 1141 | phba->brd_no, i); |
1028 | err_count++; | 1142 | err_count++; |
1029 | } | 1143 | } |
1030 | } | 1144 | } |
1031 | 1145 | ||
1146 | if (err_count == 0) | ||
1147 | ret = SUCCESS; | ||
1148 | |||
1149 | lpfc_release_scsi_buf(phba, lpfc_cmd); | ||
1150 | |||
1151 | /* | ||
1152 | * All outstanding txcmplq I/Os should have been aborted by | ||
1153 | * the targets. Unfortunately, some targets do not abide by | ||
1154 | * this forcing the driver to double check. | ||
1155 | */ | ||
1032 | cmnd->device->id = midlayer_id; | 1156 | cmnd->device->id = midlayer_id; |
1157 | cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], | ||
1158 | 0, 0, LPFC_CTX_HOST); | ||
1159 | if (cnt) | ||
1160 | lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], | ||
1161 | 0, 0, 0, LPFC_CTX_HOST); | ||
1033 | loopcnt = 0; | 1162 | loopcnt = 0; |
1034 | while((cnt = lpfc_sli_sum_iocb(phba, | 1163 | while(cnt) { |
1035 | &phba->sli.ring[phba->sli.fcp_ring], | ||
1036 | 0, 0, LPFC_CTX_HOST))) { | ||
1037 | spin_unlock_irq(phba->host->host_lock); | 1164 | spin_unlock_irq(phba->host->host_lock); |
1038 | schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); | 1165 | schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); |
1039 | spin_lock_irq(phba->host->host_lock); | 1166 | spin_lock_irq(phba->host->host_lock); |
@@ -1041,45 +1168,31 @@ __lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) | |||
1041 | if (++loopcnt | 1168 | if (++loopcnt |
1042 | > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT) | 1169 | > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT) |
1043 | break; | 1170 | break; |
1171 | |||
1172 | cnt = lpfc_sli_sum_iocb(phba, | ||
1173 | &phba->sli.ring[phba->sli.fcp_ring], | ||
1174 | 0, 0, LPFC_CTX_HOST); | ||
1044 | } | 1175 | } |
1045 | 1176 | ||
1046 | if (cnt) { | 1177 | if (cnt) { |
1047 | /* flush all outstanding commands on the host */ | 1178 | lpfc_printf_log(phba, KERN_ERR, LOG_FCP, |
1048 | i = lpfc_sli_abort_iocb(phba, | ||
1049 | &phba->sli.ring[phba->sli.fcp_ring], 0, 0, 0, | ||
1050 | LPFC_CTX_HOST); | ||
1051 | |||
1052 | lpfc_printf_log(phba, KERN_INFO, LOG_FCP, | ||
1053 | "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n", | 1179 | "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n", |
1054 | phba->brd_no, cnt, i); | 1180 | phba->brd_no, cnt, i); |
1055 | } | ||
1056 | |||
1057 | if (cnt == 0) | ||
1058 | ret = SUCCESS; | ||
1059 | else | ||
1060 | ret = FAILED; | 1181 | ret = FAILED; |
1182 | } | ||
1061 | 1183 | ||
1062 | lpfc_release_scsi_buf(phba, lpfc_cmd); | ||
1063 | lpfc_printf_log(phba, | 1184 | lpfc_printf_log(phba, |
1064 | KERN_ERR, | 1185 | KERN_ERR, |
1065 | LOG_FCP, | 1186 | LOG_FCP, |
1066 | "%d:0714 SCSI layer issued Bus Reset Data: x%x\n", | 1187 | "%d:0714 SCSI layer issued Bus Reset Data: x%x\n", |
1067 | phba->brd_no, ret); | 1188 | phba->brd_no, ret); |
1068 | out: | 1189 | out: |
1190 | spin_unlock_irq(shost->host_lock); | ||
1191 | lpfc_unblock_requests(phba); | ||
1069 | return ret; | 1192 | return ret; |
1070 | } | 1193 | } |
1071 | 1194 | ||
1072 | static int | 1195 | static int |
1073 | lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) | ||
1074 | { | ||
1075 | int rc; | ||
1076 | spin_lock_irq(cmnd->device->host->host_lock); | ||
1077 | rc = __lpfc_reset_bus_handler(cmnd); | ||
1078 | spin_unlock_irq(cmnd->device->host->host_lock); | ||
1079 | return rc; | ||
1080 | } | ||
1081 | |||
1082 | static int | ||
1083 | lpfc_slave_alloc(struct scsi_device *sdev) | 1196 | lpfc_slave_alloc(struct scsi_device *sdev) |
1084 | { | 1197 | { |
1085 | struct lpfc_hba *phba = (struct lpfc_hba *)sdev->host->hostdata[0]; | 1198 | struct lpfc_hba *phba = (struct lpfc_hba *)sdev->host->hostdata[0]; |
@@ -1127,10 +1240,10 @@ lpfc_slave_alloc(struct scsi_device *sdev) | |||
1127 | break; | 1240 | break; |
1128 | } | 1241 | } |
1129 | 1242 | ||
1130 | spin_lock_irqsave(phba->host->host_lock, flags); | 1243 | spin_lock_irqsave(&phba->scsi_buf_list_lock, flags); |
1131 | phba->total_scsi_bufs++; | 1244 | phba->total_scsi_bufs++; |
1132 | list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list); | 1245 | list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list); |
1133 | spin_unlock_irqrestore(phba->host->host_lock, flags); | 1246 | spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags); |
1134 | } | 1247 | } |
1135 | return 0; | 1248 | return 0; |
1136 | } | 1249 | } |
@@ -1154,6 +1267,12 @@ lpfc_slave_configure(struct scsi_device *sdev) | |||
1154 | */ | 1267 | */ |
1155 | rport->dev_loss_tmo = phba->cfg_nodev_tmo + 5; | 1268 | rport->dev_loss_tmo = phba->cfg_nodev_tmo + 5; |
1156 | 1269 | ||
1270 | if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { | ||
1271 | lpfc_sli_poll_fcp_ring(phba); | ||
1272 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) | ||
1273 | lpfc_poll_rearm_timer(phba); | ||
1274 | } | ||
1275 | |||
1157 | return 0; | 1276 | return 0; |
1158 | } | 1277 | } |
1159 | 1278 | ||