aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_scsi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_scsi.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c399
1 files changed, 152 insertions, 247 deletions
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index b5ad1871d34b..c34d3cf4f19c 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -50,12 +50,13 @@
50 * and the BPL BDE is setup in the IOCB. 50 * and the BPL BDE is setup in the IOCB.
51 */ 51 */
52static struct lpfc_scsi_buf * 52static struct lpfc_scsi_buf *
53lpfc_get_scsi_buf(struct lpfc_hba * phba) 53lpfc_new_scsi_buf(struct lpfc_hba * phba)
54{ 54{
55 struct lpfc_scsi_buf *psb; 55 struct lpfc_scsi_buf *psb;
56 struct ulp_bde64 *bpl; 56 struct ulp_bde64 *bpl;
57 IOCB_t *iocb; 57 IOCB_t *iocb;
58 dma_addr_t pdma_phys; 58 dma_addr_t pdma_phys;
59 uint16_t iotag;
59 60
60 psb = kmalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 61 psb = kmalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
61 if (!psb) 62 if (!psb)
@@ -79,6 +80,16 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
79 /* Initialize virtual ptrs to dma_buf region. */ 80 /* Initialize virtual ptrs to dma_buf region. */
80 memset(psb->data, 0, phba->cfg_sg_dma_buf_size); 81 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
81 82
83 /* Allocate iotag for psb->cur_iocbq. */
84 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
85 if (iotag == 0) {
86 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
87 psb->data, psb->dma_handle);
88 kfree (psb);
89 return NULL;
90 }
91 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
92
82 psb->fcp_cmnd = psb->data; 93 psb->fcp_cmnd = psb->data;
83 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); 94 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
84 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + 95 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
@@ -125,11 +136,19 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
125 return psb; 136 return psb;
126} 137}
127 138
128static void 139struct lpfc_scsi_buf*
129lpfc_free_scsi_buf(struct lpfc_scsi_buf * psb) 140lpfc_sli_get_scsi_buf(struct lpfc_hba * phba)
130{ 141{
131 struct lpfc_hba *phba = psb->scsi_hba; 142 struct lpfc_scsi_buf * lpfc_cmd = NULL;
143 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
144
145 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
146 return lpfc_cmd;
147}
132 148
149static void
150lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
151{
133 /* 152 /*
134 * There are only two special cases to consider. (1) the scsi command 153 * There are only two special cases to consider. (1) the scsi command
135 * requested scatter-gather usage or (2) the scsi command allocated 154 * requested scatter-gather usage or (2) the scsi command allocated
@@ -147,6 +166,7 @@ lpfc_free_scsi_buf(struct lpfc_scsi_buf * psb)
147 } 166 }
148 } 167 }
149 168
169 psb->pCmd = NULL;
150 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); 170 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
151} 171}
152 172
@@ -403,14 +423,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
403 break; 423 break;
404 } 424 }
405 425
406 if (pnode) { 426 if ((pnode == NULL )
407 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) 427 || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
408 cmd->result = ScsiResult(DID_BUS_BUSY, 428 cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY);
409 SAM_STAT_BUSY);
410 }
411 else {
412 cmd->result = ScsiResult(DID_NO_CONNECT, 0);
413 }
414 } else { 429 } else {
415 cmd->result = ScsiResult(DID_OK, 0); 430 cmd->result = ScsiResult(DID_OK, 0);
416 } 431 }
@@ -426,12 +441,11 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
426 *lp, *(lp + 3), cmd->retries, cmd->resid); 441 *lp, *(lp + 3), cmd->retries, cmd->resid);
427 } 442 }
428 443
444 cmd->scsi_done(cmd);
445
429 spin_lock_irqsave(phba->host->host_lock, iflag); 446 spin_lock_irqsave(phba->host->host_lock, iflag);
430 lpfc_free_scsi_buf(lpfc_cmd); 447 lpfc_release_scsi_buf(phba, lpfc_cmd);
431 cmd->host_scribble = NULL;
432 spin_unlock_irqrestore(phba->host->host_lock, iflag); 448 spin_unlock_irqrestore(phba->host->host_lock, iflag);
433
434 cmd->scsi_done(cmd);
435} 449}
436 450
437static void 451static void
@@ -539,7 +553,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
539 struct lpfc_rport_data *rdata = scsi_dev->hostdata; 553 struct lpfc_rport_data *rdata = scsi_dev->hostdata;
540 struct lpfc_nodelist *ndlp = rdata->pnode; 554 struct lpfc_nodelist *ndlp = rdata->pnode;
541 555
542 if ((ndlp == 0) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 556 if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
543 return 0; 557 return 0;
544 } 558 }
545 559
@@ -618,8 +632,7 @@ static int
618lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba) 632lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba)
619{ 633{
620 struct lpfc_iocbq *iocbq; 634 struct lpfc_iocbq *iocbq;
621 struct lpfc_iocbq *iocbqrsp = NULL; 635 struct lpfc_iocbq *iocbqrsp;
622 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
623 int ret; 636 int ret;
624 637
625 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET); 638 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET);
@@ -628,17 +641,14 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba)
628 641
629 lpfc_cmd->scsi_hba = phba; 642 lpfc_cmd->scsi_hba = phba;
630 iocbq = &lpfc_cmd->cur_iocbq; 643 iocbq = &lpfc_cmd->cur_iocbq;
631 list_remove_head(lpfc_iocb_list, iocbqrsp, struct lpfc_iocbq, list); 644 iocbqrsp = lpfc_sli_get_iocbq(phba);
645
632 if (!iocbqrsp) 646 if (!iocbqrsp)
633 return FAILED; 647 return FAILED;
634 memset(iocbqrsp, 0, sizeof (struct lpfc_iocbq)); 648
635 649 ret = lpfc_sli_issue_iocb_wait(phba,
636 iocbq->iocb_flag |= LPFC_IO_POLL; 650 &phba->sli.ring[phba->sli.fcp_ring],
637 ret = lpfc_sli_issue_iocb_wait_high_priority(phba, 651 iocbq, iocbqrsp, lpfc_cmd->timeout);
638 &phba->sli.ring[phba->sli.fcp_ring],
639 iocbq, SLI_IOCB_HIGH_PRIORITY,
640 iocbqrsp,
641 lpfc_cmd->timeout);
642 if (ret != IOCB_SUCCESS) { 652 if (ret != IOCB_SUCCESS) {
643 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 653 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
644 ret = FAILED; 654 ret = FAILED;
@@ -651,45 +661,10 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba)
651 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 661 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
652 } 662 }
653 663
654 /* 664 lpfc_sli_release_iocbq(phba, iocbqrsp);
655 * All outstanding txcmplq I/Os should have been aborted by the target.
656 * Unfortunately, some targets do not abide by this forcing the driver
657 * to double check.
658 */
659 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
660 lpfc_cmd->pCmd->device->id,
661 lpfc_cmd->pCmd->device->lun, 0, LPFC_CTX_TGT);
662
663 /* Return response IOCB to free list. */
664 list_add_tail(&iocbqrsp->list, lpfc_iocb_list);
665 return ret; 665 return ret;
666} 666}
667 667
668static void
669lpfc_scsi_cmd_iocb_cleanup (struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
670 struct lpfc_iocbq *pIocbOut)
671{
672 unsigned long iflag;
673 struct lpfc_scsi_buf *lpfc_cmd =
674 (struct lpfc_scsi_buf *) pIocbIn->context1;
675
676 spin_lock_irqsave(phba->host->host_lock, iflag);
677 lpfc_free_scsi_buf(lpfc_cmd);
678 spin_unlock_irqrestore(phba->host->host_lock, iflag);
679}
680
681static void
682lpfc_scsi_cmd_iocb_cmpl_aborted(struct lpfc_hba *phba,
683 struct lpfc_iocbq *pIocbIn,
684 struct lpfc_iocbq *pIocbOut)
685{
686 struct scsi_cmnd *ml_cmd =
687 ((struct lpfc_scsi_buf *) pIocbIn->context1)->pCmd;
688
689 lpfc_scsi_cmd_iocb_cleanup (phba, pIocbIn, pIocbOut);
690 ml_cmd->host_scribble = NULL;
691}
692
693const char * 668const char *
694lpfc_info(struct Scsi_Host *host) 669lpfc_info(struct Scsi_Host *host)
695{ 670{
@@ -726,43 +701,25 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
726 struct lpfc_sli *psli = &phba->sli; 701 struct lpfc_sli *psli = &phba->sli;
727 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 702 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
728 struct lpfc_nodelist *ndlp = rdata->pnode; 703 struct lpfc_nodelist *ndlp = rdata->pnode;
729 struct lpfc_scsi_buf *lpfc_cmd = NULL; 704 struct lpfc_scsi_buf *lpfc_cmd;
730 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; 705 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
731 int err = 0; 706 int err;
732 707
733 /* 708 err = fc_remote_port_chkready(rport);
734 * The target pointer is guaranteed not to be NULL because the driver 709 if (err) {
735 * only clears the device->hostdata field in lpfc_slave_destroy. This 710 cmnd->result = err;
736 * approach guarantees no further IO calls on this target.
737 */
738 if (!ndlp) {
739 cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
740 goto out_fail_command; 711 goto out_fail_command;
741 } 712 }
742 713
743 /* 714 /*
744 * A Fibre Channel target is present and functioning only when the node 715 * Catch race where our node has transitioned, but the
745 * state is MAPPED. Any other state is a failure. 716 * transport is still transitioning.
746 */ 717 */
747 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) { 718 if (!ndlp) {
748 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 719 cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
749 (ndlp->nlp_state == NLP_STE_UNUSED_NODE)) { 720 goto out_fail_command;
750 cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
751 goto out_fail_command;
752 }
753 else if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
754 cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
755 goto out_fail_command;
756 }
757 /*
758 * The device is most likely recovered and the driver
759 * needs a bit more time to finish. Ask the midlayer
760 * to retry.
761 */
762 goto out_host_busy;
763 } 721 }
764 722 lpfc_cmd = lpfc_sli_get_scsi_buf (phba);
765 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
766 if (lpfc_cmd == NULL) { 723 if (lpfc_cmd == NULL) {
767 printk(KERN_WARNING "%s: No buffer available - list empty, " 724 printk(KERN_WARNING "%s: No buffer available - list empty, "
768 "total count %d\n", __FUNCTION__, phba->total_scsi_bufs); 725 "total count %d\n", __FUNCTION__, phba->total_scsi_bufs);
@@ -792,7 +749,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
792 return 0; 749 return 0;
793 750
794 out_host_busy_free_buf: 751 out_host_busy_free_buf:
795 lpfc_free_scsi_buf(lpfc_cmd); 752 lpfc_release_scsi_buf(phba, lpfc_cmd);
796 cmnd->host_scribble = NULL; 753 cmnd->host_scribble = NULL;
797 out_host_busy: 754 out_host_busy:
798 return SCSI_MLQUEUE_HOST_BUSY; 755 return SCSI_MLQUEUE_HOST_BUSY;
@@ -808,119 +765,92 @@ __lpfc_abort_handler(struct scsi_cmnd *cmnd)
808 struct lpfc_hba *phba = 765 struct lpfc_hba *phba =
809 (struct lpfc_hba *)cmnd->device->host->hostdata[0]; 766 (struct lpfc_hba *)cmnd->device->host->hostdata[0];
810 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring]; 767 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
811 struct lpfc_iocbq *iocb, *next_iocb; 768 struct lpfc_iocbq *iocb;
812 struct lpfc_iocbq *abtsiocb = NULL; 769 struct lpfc_iocbq *abtsiocb;
813 struct lpfc_scsi_buf *lpfc_cmd; 770 struct lpfc_scsi_buf *lpfc_cmd;
814 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
815 IOCB_t *cmd, *icmd; 771 IOCB_t *cmd, *icmd;
816 unsigned long snum;
817 unsigned int id, lun;
818 unsigned int loop_count = 0; 772 unsigned int loop_count = 0;
819 int ret = IOCB_SUCCESS; 773 int ret = SUCCESS;
820 774
821 /*
822 * If the host_scribble data area is NULL, then the driver has already
823 * completed this command, but the midlayer did not see the completion
824 * before the eh fired. Just return SUCCESS.
825 */
826 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
827 if (!lpfc_cmd)
828 return SUCCESS;
829 775
830 /* save these now since lpfc_cmd can be freed */ 776 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
831 id = lpfc_cmd->pCmd->device->id; 777 BUG_ON(!lpfc_cmd);
832 lun = lpfc_cmd->pCmd->device->lun;
833 snum = lpfc_cmd->pCmd->serial_number;
834 778
835 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 779 /*
836 cmd = &iocb->iocb; 780 * If pCmd field of the corresponding lpfc_scsi_buf structure
837 if (iocb->context1 != lpfc_cmd) 781 * points to a different SCSI command, then the driver has
838 continue; 782 * already completed this command, but the midlayer did not
783 * see the completion before the eh fired. Just return
784 * SUCCESS.
785 */
786 iocb = &lpfc_cmd->cur_iocbq;
787 if (lpfc_cmd->pCmd != cmnd)
788 goto out;
839 789
840 list_del_init(&iocb->list); 790 BUG_ON(iocb->context1 != lpfc_cmd);
841 pring->txq_cnt--;
842 if (!iocb->iocb_cmpl) {
843 list_add_tail(&iocb->list, lpfc_iocb_list);
844 }
845 else {
846 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
847 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
848 lpfc_scsi_cmd_iocb_cmpl_aborted(phba, iocb, iocb);
849 }
850 791
792 abtsiocb = lpfc_sli_get_iocbq(phba);
793 if (abtsiocb == NULL) {
794 ret = FAILED;
851 goto out; 795 goto out;
852 } 796 }
853 797
854 list_remove_head(lpfc_iocb_list, abtsiocb, struct lpfc_iocbq, list);
855 if (abtsiocb == NULL)
856 return FAILED;
857
858 memset(abtsiocb, 0, sizeof (struct lpfc_iocbq));
859
860 /* 798 /*
861 * The scsi command was not in the txq. Check the txcmplq and if it is 799 * The scsi command can not be in txq and it is in flight because the
862 * found, send an abort to the FW. 800 * pCmd is still pointig at the SCSI command we have to abort. There
801 * is no need to search the txcmplq. Just send an abort to the FW.
863 */ 802 */
864 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
865 if (iocb->context1 != lpfc_cmd)
866 continue;
867 803
868 iocb->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl_aborted; 804 cmd = &iocb->iocb;
869 cmd = &iocb->iocb; 805 icmd = &abtsiocb->iocb;
870 icmd = &abtsiocb->iocb; 806 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
871 icmd->un.acxri.abortType = ABORT_TYPE_ABTS; 807 icmd->un.acxri.abortContextTag = cmd->ulpContext;
872 icmd->un.acxri.abortContextTag = cmd->ulpContext; 808 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
873 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
874
875 icmd->ulpLe = 1;
876 icmd->ulpClass = cmd->ulpClass;
877 if (phba->hba_state >= LPFC_LINK_UP)
878 icmd->ulpCommand = CMD_ABORT_XRI_CN;
879 else
880 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
881 809
882 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 810 icmd->ulpLe = 1;
883 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == 811 icmd->ulpClass = cmd->ulpClass;
884 IOCB_ERROR) { 812 if (phba->hba_state >= LPFC_LINK_UP)
885 list_add_tail(&abtsiocb->list, lpfc_iocb_list); 813 icmd->ulpCommand = CMD_ABORT_XRI_CN;
886 ret = IOCB_ERROR; 814 else
887 break; 815 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
888 }
889 816
890 /* Wait for abort to complete */ 817 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
891 while (cmnd->host_scribble) 818 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) {
892 { 819 lpfc_sli_release_iocbq(phba, abtsiocb);
893 spin_unlock_irq(phba->host->host_lock); 820 ret = FAILED;
894 set_current_state(TASK_UNINTERRUPTIBLE); 821 goto out;
895 schedule_timeout(LPFC_ABORT_WAIT*HZ); 822 }
896 spin_lock_irq(phba->host->host_lock);
897 if (++loop_count
898 > (2 * phba->cfg_nodev_tmo)/LPFC_ABORT_WAIT)
899 break;
900 }
901 823
902 if(cmnd->host_scribble) { 824 /* Wait for abort to complete */
903 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 825 while (lpfc_cmd->pCmd == cmnd)
904 "%d:0748 abort handler timed " 826 {
905 "out waiting for abort to " 827 spin_unlock_irq(phba->host->host_lock);
906 "complete. Data: " 828 set_current_state(TASK_UNINTERRUPTIBLE);
907 "x%x x%x x%x x%lx\n", 829 schedule_timeout(LPFC_ABORT_WAIT*HZ);
908 phba->brd_no, ret, id, lun, snum); 830 spin_lock_irq(phba->host->host_lock);
909 cmnd->host_scribble = NULL; 831 if (++loop_count
910 iocb->iocb_cmpl = lpfc_scsi_cmd_iocb_cleanup; 832 > (2 * phba->cfg_nodev_tmo)/LPFC_ABORT_WAIT)
911 ret = IOCB_ERROR; 833 break;
912 } 834 }
913 835
914 break; 836 if (lpfc_cmd->pCmd == cmnd) {
837 ret = FAILED;
838 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
839 "%d:0748 abort handler timed out waiting for "
840 "abort to complete: ret %#x, ID %d, LUN %d, "
841 "snum %#lx\n",
842 phba->brd_no, ret, cmnd->device->id,
843 cmnd->device->lun, cmnd->serial_number);
915 } 844 }
916 845
917 out: 846 out:
918 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 847 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
919 "%d:0749 SCSI layer issued abort device " 848 "%d:0749 SCSI layer issued abort device: ret %#x, "
920 "Data: x%x x%x x%x x%lx\n", 849 "ID %d, LUN %d, snum %#lx\n",
921 phba->brd_no, ret, id, lun, snum); 850 phba->brd_no, ret, cmnd->device->id,
851 cmnd->device->lun, cmnd->serial_number);
922 852
923 return ret == IOCB_SUCCESS ? SUCCESS : FAILED; 853 return ret;
924} 854}
925 855
926static int 856static int
@@ -938,11 +868,8 @@ __lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
938{ 868{
939 struct Scsi_Host *shost = cmnd->device->host; 869 struct Scsi_Host *shost = cmnd->device->host;
940 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0]; 870 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0];
941 struct lpfc_sli *psli = &phba->sli; 871 struct lpfc_scsi_buf *lpfc_cmd;
942 struct lpfc_scsi_buf *lpfc_cmd = NULL; 872 struct lpfc_iocbq *iocbq, *iocbqrsp;
943 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
944 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
945 struct lpfc_iocbq *iocbq, *iocbqrsp = NULL;
946 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 873 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
947 struct lpfc_nodelist *pnode = rdata->pnode; 874 struct lpfc_nodelist *pnode = rdata->pnode;
948 int ret = FAILED; 875 int ret = FAILED;
@@ -966,7 +893,7 @@ __lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
966 break; 893 break;
967 } 894 }
968 895
969 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); 896 lpfc_cmd = lpfc_sli_get_scsi_buf (phba);
970 if (lpfc_cmd == NULL) 897 if (lpfc_cmd == NULL)
971 goto out; 898 goto out;
972 899
@@ -981,18 +908,13 @@ __lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
981 iocbq = &lpfc_cmd->cur_iocbq; 908 iocbq = &lpfc_cmd->cur_iocbq;
982 909
983 /* get a buffer for this IOCB command response */ 910 /* get a buffer for this IOCB command response */
984 list_remove_head(lpfc_iocb_list, iocbqrsp, struct lpfc_iocbq, list); 911 iocbqrsp = lpfc_sli_get_iocbq(phba);
985 if (iocbqrsp == NULL) 912 if (iocbqrsp == NULL)
986 goto out_free_scsi_buf; 913 goto out_free_scsi_buf;
987 914
988 memset(iocbqrsp, 0, sizeof (struct lpfc_iocbq)); 915 ret = lpfc_sli_issue_iocb_wait(phba,
989 916 &phba->sli.ring[phba->sli.fcp_ring],
990 iocbq->iocb_flag |= LPFC_IO_POLL; 917 iocbq, iocbqrsp, lpfc_cmd->timeout);
991 iocbq->iocb_cmpl = lpfc_sli_wake_iocb_high_priority;
992
993 ret = lpfc_sli_issue_iocb_wait_high_priority(phba,
994 &phba->sli.ring[psli->fcp_ring],
995 iocbq, 0, iocbqrsp, 60);
996 if (ret == IOCB_SUCCESS) 918 if (ret == IOCB_SUCCESS)
997 ret = SUCCESS; 919 ret = SUCCESS;
998 920
@@ -1027,12 +949,13 @@ __lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
1027 } 949 }
1028 950
1029 if (cnt) { 951 if (cnt) {
1030 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 952 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1031 "%d:0719 LUN Reset I/O flush failure: cnt x%x\n", 953 "%d:0719 LUN Reset I/O flush failure: cnt x%x\n",
1032 phba->brd_no, cnt); 954 phba->brd_no, cnt);
955 ret = FAILED;
1033 } 956 }
1034 957
1035 list_add_tail(&iocbqrsp->list, lpfc_iocb_list); 958 lpfc_sli_release_iocbq(phba, iocbqrsp);
1036 959
1037out_free_scsi_buf: 960out_free_scsi_buf:
1038 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 961 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
@@ -1041,7 +964,7 @@ out_free_scsi_buf:
1041 phba->brd_no, lpfc_cmd->pCmd->device->id, 964 phba->brd_no, lpfc_cmd->pCmd->device->id,
1042 lpfc_cmd->pCmd->device->lun, ret, lpfc_cmd->status, 965 lpfc_cmd->pCmd->device->lun, ret, lpfc_cmd->status,
1043 lpfc_cmd->result); 966 lpfc_cmd->result);
1044 lpfc_free_scsi_buf(lpfc_cmd); 967 lpfc_release_scsi_buf(phba, lpfc_cmd);
1045out: 968out:
1046 return ret; 969 return ret;
1047} 970}
@@ -1069,10 +992,9 @@ __lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1069 int ret = FAILED, i, err_count = 0; 992 int ret = FAILED, i, err_count = 0;
1070 int cnt, loopcnt; 993 int cnt, loopcnt;
1071 unsigned int midlayer_id = 0; 994 unsigned int midlayer_id = 0;
1072 struct lpfc_scsi_buf * lpfc_cmd = NULL; 995 struct lpfc_scsi_buf * lpfc_cmd;
1073 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
1074 996
1075 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); 997 lpfc_cmd = lpfc_sli_get_scsi_buf (phba);
1076 if (lpfc_cmd == NULL) 998 if (lpfc_cmd == NULL)
1077 goto out; 999 goto out;
1078 1000
@@ -1136,10 +1058,12 @@ __lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1136 phba->brd_no, cnt, i); 1058 phba->brd_no, cnt, i);
1137 } 1059 }
1138 1060
1139 if (!err_count) 1061 if (cnt == 0)
1140 ret = SUCCESS; 1062 ret = SUCCESS;
1063 else
1064 ret = FAILED;
1141 1065
1142 lpfc_free_scsi_buf(lpfc_cmd); 1066 lpfc_release_scsi_buf(phba, lpfc_cmd);
1143 lpfc_printf_log(phba, 1067 lpfc_printf_log(phba,
1144 KERN_ERR, 1068 KERN_ERR,
1145 LOG_FCP, 1069 LOG_FCP,
@@ -1163,66 +1087,47 @@ static int
1163lpfc_slave_alloc(struct scsi_device *sdev) 1087lpfc_slave_alloc(struct scsi_device *sdev)
1164{ 1088{
1165 struct lpfc_hba *phba = (struct lpfc_hba *)sdev->host->hostdata[0]; 1089 struct lpfc_hba *phba = (struct lpfc_hba *)sdev->host->hostdata[0];
1166 struct lpfc_nodelist *ndlp = NULL;
1167 int match = 0;
1168 struct lpfc_scsi_buf *scsi_buf = NULL; 1090 struct lpfc_scsi_buf *scsi_buf = NULL;
1091 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1169 uint32_t total = 0, i; 1092 uint32_t total = 0, i;
1170 uint32_t num_to_alloc = 0; 1093 uint32_t num_to_alloc = 0;
1171 unsigned long flags; 1094 unsigned long flags;
1172 struct list_head *listp;
1173 struct list_head *node_list[6];
1174
1175 /*
1176 * Store the target pointer in the scsi_device hostdata pointer provided
1177 * the driver has already discovered the target id.
1178 */
1179
1180 /* Search the nlp lists other than unmap_list for this target ID */
1181 node_list[0] = &phba->fc_npr_list;
1182 node_list[1] = &phba->fc_nlpmap_list;
1183 node_list[2] = &phba->fc_prli_list;
1184 node_list[3] = &phba->fc_reglogin_list;
1185 node_list[4] = &phba->fc_adisc_list;
1186 node_list[5] = &phba->fc_plogi_list;
1187
1188 for (i = 0; i < 6 && !match; i++) {
1189 listp = node_list[i];
1190 if (list_empty(listp))
1191 continue;
1192 list_for_each_entry(ndlp, listp, nlp_listp) {
1193 if ((sdev->id == ndlp->nlp_sid) && ndlp->rport) {
1194 match = 1;
1195 break;
1196 }
1197 }
1198 }
1199 1095
1200 if (!match) 1096 if (!rport || fc_remote_port_chkready(rport))
1201 return -ENXIO; 1097 return -ENXIO;
1202 1098
1203 sdev->hostdata = ndlp->rport->dd_data; 1099 sdev->hostdata = rport->dd_data;
1204 1100
1205 /* 1101 /*
1206 * Populate the cmds_per_lun count scsi_bufs into this host's globally 1102 * Populate the cmds_per_lun count scsi_bufs into this host's globally
1207 * available list of scsi buffers. Don't allocate more than the 1103 * available list of scsi buffers. Don't allocate more than the
1208 * HBA limit conveyed to the midlayer via the host structure. Note 1104 * HBA limit conveyed to the midlayer via the host structure. The
1209 * that this list of scsi bufs exists for the lifetime of the driver. 1105 * formula accounts for the lun_queue_depth + error handlers + 1
1106 * extra. This list of scsi bufs exists for the lifetime of the driver.
1210 */ 1107 */
1211 total = phba->total_scsi_bufs; 1108 total = phba->total_scsi_bufs;
1212 num_to_alloc = LPFC_CMD_PER_LUN; 1109 num_to_alloc = phba->cfg_lun_queue_depth + 2;
1213 if (total >= phba->cfg_hba_queue_depth) { 1110 if (total >= phba->cfg_hba_queue_depth) {
1214 printk(KERN_WARNING "%s, At config limitation of " 1111 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
1215 "%d allocated scsi_bufs\n", __FUNCTION__, total); 1112 "%d:0704 At limitation of %d preallocated "
1113 "command buffers\n", phba->brd_no, total);
1216 return 0; 1114 return 0;
1217 } else if (total + num_to_alloc > phba->cfg_hba_queue_depth) { 1115 } else if (total + num_to_alloc > phba->cfg_hba_queue_depth) {
1116 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
1117 "%d:0705 Allocation request of %d command "
1118 "buffers will exceed max of %d. Reducing "
1119 "allocation request to %d.\n", phba->brd_no,
1120 num_to_alloc, phba->cfg_hba_queue_depth,
1121 (phba->cfg_hba_queue_depth - total));
1218 num_to_alloc = phba->cfg_hba_queue_depth - total; 1122 num_to_alloc = phba->cfg_hba_queue_depth - total;
1219 } 1123 }
1220 1124
1221 for (i = 0; i < num_to_alloc; i++) { 1125 for (i = 0; i < num_to_alloc; i++) {
1222 scsi_buf = lpfc_get_scsi_buf(phba); 1126 scsi_buf = lpfc_new_scsi_buf(phba);
1223 if (!scsi_buf) { 1127 if (!scsi_buf) {
1224 printk(KERN_ERR "%s, failed to allocate " 1128 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1225 "scsi_buf\n", __FUNCTION__); 1129 "%d:0706 Failed to allocate command "
1130 "buffer\n", phba->brd_no);
1226 break; 1131 break;
1227 } 1132 }
1228 1133