aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_scsi.c
diff options
context:
space:
mode:
authorJames Smart <James.Smart@Emulex.Com>2009-05-22 14:51:39 -0400
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2009-06-08 12:21:29 -0400
commitda0436e915a5c17ee79e72c1bf978a4ebb1cbf4d (patch)
tree7784646b7627117fa7849a901c85294fae905505 /drivers/scsi/lpfc/lpfc_scsi.c
parent3772a99175f5378b5001e8da364341a8b8226a4a (diff)
[SCSI] lpfc 8.3.2 : Addition of SLI4 Interface - Base Support
Adds new hardware and interface definitions. Adds new interface routines - utilizing the reorganized layout of the driver. Adds SLI-4 specific functions for attachment, initialization, teardown, etc. Signed-off-by: James Smart <james.smart@emulex.com> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_scsi.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c492
1 files changed, 492 insertions, 0 deletions
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index a226c053c0f4..9af2db355bc6 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -31,8 +31,10 @@
31#include <scsi/scsi_transport_fc.h> 31#include <scsi/scsi_transport_fc.h>
32 32
33#include "lpfc_version.h" 33#include "lpfc_version.h"
34#include "lpfc_hw4.h"
34#include "lpfc_hw.h" 35#include "lpfc_hw.h"
35#include "lpfc_sli.h" 36#include "lpfc_sli.h"
37#include "lpfc_sli4.h"
36#include "lpfc_nl.h" 38#include "lpfc_nl.h"
37#include "lpfc_disc.h" 39#include "lpfc_disc.h"
38#include "lpfc_scsi.h" 40#include "lpfc_scsi.h"
@@ -57,6 +59,8 @@ static char *dif_op_str[] = {
57 "SCSI_PROT_READ_CONVERT", 59 "SCSI_PROT_READ_CONVERT",
58 "SCSI_PROT_WRITE_CONVERT" 60 "SCSI_PROT_WRITE_CONVERT"
59}; 61};
62static void
63lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
60 64
61static void 65static void
62lpfc_debug_save_data(struct scsi_cmnd *cmnd) 66lpfc_debug_save_data(struct scsi_cmnd *cmnd)
@@ -565,6 +569,8 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
565 } 569 }
566 iocb->ulpClass = CLASS3; 570 iocb->ulpClass = CLASS3;
567 psb->status = IOSTAT_SUCCESS; 571 psb->status = IOSTAT_SUCCESS;
572 /* Put it back into the SCSI buffer list */
573 lpfc_release_scsi_buf_s4(phba, psb);
568 574
569 } 575 }
570 576
@@ -572,6 +578,271 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
572} 578}
573 579
574/** 580/**
581 * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
582 * @phba: pointer to lpfc hba data structure.
583 * @axri: pointer to the fcp xri abort wcqe structure.
584 *
585 * This routine is invoked by the worker thread to process a SLI4 fast-path
586 * FCP aborted xri.
587 **/
588void
589lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
590 struct sli4_wcqe_xri_aborted *axri)
591{
592 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
593 struct lpfc_scsi_buf *psb, *next_psb;
594 unsigned long iflag = 0;
595
596 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag);
597 list_for_each_entry_safe(psb, next_psb,
598 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
599 if (psb->cur_iocbq.sli4_xritag == xri) {
600 list_del(&psb->list);
601 psb->status = IOSTAT_SUCCESS;
602 spin_unlock_irqrestore(
603 &phba->sli4_hba.abts_scsi_buf_list_lock,
604 iflag);
605 lpfc_release_scsi_buf_s4(phba, psb);
606 return;
607 }
608 }
609 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
610 iflag);
611}
612
613/**
614 * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block
615 * @phba: pointer to lpfc hba data structure.
616 *
617 * This routine walks the list of scsi buffers that have been allocated and
618 * repost them to the HBA by using SGL block post. This is needed after a
619 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
620 * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
621 * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
622 *
623 * Returns: 0 = success, non-zero failure.
624 **/
625int
626lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
627{
628 struct lpfc_scsi_buf *psb;
629 int index, status, bcnt = 0, rcnt = 0, rc = 0;
630 LIST_HEAD(sblist);
631
632 for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) {
633 psb = phba->sli4_hba.lpfc_scsi_psb_array[index];
634 if (psb) {
635 /* Remove from SCSI buffer list */
636 list_del(&psb->list);
637 /* Add it to a local SCSI buffer list */
638 list_add_tail(&psb->list, &sblist);
639 if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) {
640 bcnt = rcnt;
641 rcnt = 0;
642 }
643 } else
644 /* A hole present in the XRI array, need to skip */
645 bcnt = rcnt;
646
647 if (index == phba->sli4_hba.scsi_xri_cnt - 1)
648 /* End of XRI array for SCSI buffer, complete */
649 bcnt = rcnt;
650
651 /* Continue until collect up to a nembed page worth of sgls */
652 if (bcnt == 0)
653 continue;
654 /* Now, post the SCSI buffer list sgls as a block */
655 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
656 /* Reset SCSI buffer count for next round of posting */
657 bcnt = 0;
658 while (!list_empty(&sblist)) {
659 list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
660 list);
661 if (status) {
662 /* Put this back on the abort scsi list */
663 psb->status = IOSTAT_LOCAL_REJECT;
664 psb->result = IOERR_ABORT_REQUESTED;
665 rc++;
666 } else
667 psb->status = IOSTAT_SUCCESS;
668 /* Put it back into the SCSI buffer list */
669 lpfc_release_scsi_buf_s4(phba, psb);
670 }
671 }
672 return rc;
673}
674
675/**
676 * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
677 * @vport: The virtual port for which this call being executed.
678 * @num_to_allocate: The requested number of buffers to allocate.
679 *
680 * This routine allocates a scsi buffer for device with SLI-4 interface spec,
681 * the scsi buffer contains all the necessary information needed to initiate
682 * a SCSI I/O.
683 *
684 * Return codes:
685 * int - number of scsi buffers that were allocated.
686 * 0 = failure, less than num_to_alloc is a partial failure.
687 **/
688static int
689lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
690{
691 struct lpfc_hba *phba = vport->phba;
692 struct lpfc_scsi_buf *psb;
693 struct sli4_sge *sgl;
694 IOCB_t *iocb;
695 dma_addr_t pdma_phys_fcp_cmd;
696 dma_addr_t pdma_phys_fcp_rsp;
697 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
698 uint16_t iotag, last_xritag = NO_XRI;
699 int status = 0, index;
700 int bcnt;
701 int non_sequential_xri = 0;
702 int rc = 0;
703 LIST_HEAD(sblist);
704
705 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
706 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
707 if (!psb)
708 break;
709
710 /*
711 * Get memory from the pci pool to map the virt space to pci bus
712 * space for an I/O. The DMA buffer includes space for the
713 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
714 * necessary to support the sg_tablesize.
715 */
716 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
717 GFP_KERNEL, &psb->dma_handle);
718 if (!psb->data) {
719 kfree(psb);
720 break;
721 }
722
723 /* Initialize virtual ptrs to dma_buf region. */
724 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
725
726 /* Allocate iotag for psb->cur_iocbq. */
727 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
728 if (iotag == 0) {
729 kfree(psb);
730 break;
731 }
732
733 psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba);
734 if (psb->cur_iocbq.sli4_xritag == NO_XRI) {
735 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
736 psb->data, psb->dma_handle);
737 kfree(psb);
738 break;
739 }
740 if (last_xritag != NO_XRI
741 && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
742 non_sequential_xri = 1;
743 } else
744 list_add_tail(&psb->list, &sblist);
745 last_xritag = psb->cur_iocbq.sli4_xritag;
746
747 index = phba->sli4_hba.scsi_xri_cnt++;
748 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
749
750 psb->fcp_bpl = psb->data;
751 psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
752 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
753 psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
754 sizeof(struct fcp_cmnd));
755
756 /* Initialize local short-hand pointers. */
757 sgl = (struct sli4_sge *)psb->fcp_bpl;
758 pdma_phys_bpl = psb->dma_handle;
759 pdma_phys_fcp_cmd =
760 (psb->dma_handle + phba->cfg_sg_dma_buf_size)
761 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
762 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
763
764 /*
765 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
766 * are sg list bdes. Initialize the first two and leave the
767 * rest for queuecommand.
768 */
769 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
770 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
771 bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_cmnd));
772 bf_set(lpfc_sli4_sge_last, sgl, 0);
773 sgl->word2 = cpu_to_le32(sgl->word2);
774 sgl->word3 = cpu_to_le32(sgl->word3);
775 sgl++;
776
777 /* Setup the physical region for the FCP RSP */
778 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
779 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
780 bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_rsp));
781 bf_set(lpfc_sli4_sge_last, sgl, 1);
782 sgl->word2 = cpu_to_le32(sgl->word2);
783 sgl->word3 = cpu_to_le32(sgl->word3);
784
785 /*
786 * Since the IOCB for the FCP I/O is built into this
787 * lpfc_scsi_buf, initialize it with all known data now.
788 */
789 iocb = &psb->cur_iocbq.iocb;
790 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
791 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
792 /* setting the BLP size to 2 * sizeof BDE may not be correct.
793 * We are setting the bpl to point to out sgl. An sgl's
794 * entries are 16 bytes, a bpl entries are 12 bytes.
795 */
796 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
797 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
798 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
799 iocb->ulpBdeCount = 1;
800 iocb->ulpLe = 1;
801 iocb->ulpClass = CLASS3;
802 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
803 pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
804 else
805 pdma_phys_bpl1 = 0;
806 psb->dma_phys_bpl = pdma_phys_bpl;
807 phba->sli4_hba.lpfc_scsi_psb_array[index] = psb;
808 if (non_sequential_xri) {
809 status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl,
810 pdma_phys_bpl1,
811 psb->cur_iocbq.sli4_xritag);
812 if (status) {
813 /* Put this back on the abort scsi list */
814 psb->status = IOSTAT_LOCAL_REJECT;
815 psb->result = IOERR_ABORT_REQUESTED;
816 rc++;
817 } else
818 psb->status = IOSTAT_SUCCESS;
819 /* Put it back into the SCSI buffer list */
820 lpfc_release_scsi_buf_s4(phba, psb);
821 break;
822 }
823 }
824 if (bcnt) {
825 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
826 /* Reset SCSI buffer count for next round of posting */
827 while (!list_empty(&sblist)) {
828 list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
829 list);
830 if (status) {
831 /* Put this back on the abort scsi list */
832 psb->status = IOSTAT_LOCAL_REJECT;
833 psb->result = IOERR_ABORT_REQUESTED;
834 rc++;
835 } else
836 psb->status = IOSTAT_SUCCESS;
837 /* Put it back into the SCSI buffer list */
838 lpfc_release_scsi_buf_s4(phba, psb);
839 }
840 }
841
842 return bcnt + non_sequential_xri - rc;
843}
844
845/**
575 * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator 846 * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
576 * @vport: The virtual port for which this call being executed. 847 * @vport: The virtual port for which this call being executed.
577 * @num_to_allocate: The requested number of buffers to allocate. 848 * @num_to_allocate: The requested number of buffers to allocate.
@@ -638,6 +909,39 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
638} 909}
639 910
640/** 911/**
912 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
913 * @phba: The Hba for which this call is being executed.
914 * @psb: The scsi buffer which is being released.
915 *
916 * This routine releases @psb scsi buffer by adding it to tail of @phba
917 * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
918 * and cannot be reused for at least RA_TOV amount of time if it was
919 * aborted.
920 **/
921static void
922lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
923{
924 unsigned long iflag = 0;
925
926 if (psb->status == IOSTAT_LOCAL_REJECT
927 && psb->result == IOERR_ABORT_REQUESTED) {
928 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
929 iflag);
930 psb->pCmd = NULL;
931 list_add_tail(&psb->list,
932 &phba->sli4_hba.lpfc_abts_scsi_buf_list);
933 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
934 iflag);
935 } else {
936
937 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
938 psb->pCmd = NULL;
939 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
940 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
941 }
942}
943
944/**
641 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list. 945 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
642 * @phba: The Hba for which this call is being executed. 946 * @phba: The Hba for which this call is being executed.
643 * @psb: The scsi buffer which is being released. 947 * @psb: The scsi buffer which is being released.
@@ -1455,6 +1759,115 @@ out:
1455} 1759}
1456 1760
1457/** 1761/**
1762 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
1763 * @phba: The Hba for which this call is being executed.
1764 * @lpfc_cmd: The scsi buffer which is going to be mapped.
1765 *
1766 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
1767 * field of @lpfc_cmd for device with SLI-4 interface spec.
1768 *
1769 * Return codes:
1770 * 1 - Error
1771 * 0 - Success
1772 **/
1773static int
1774lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1775{
1776 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1777 struct scatterlist *sgel = NULL;
1778 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1779 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
1780 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1781 dma_addr_t physaddr;
1782 uint32_t num_bde = 0;
1783 uint32_t dma_len;
1784 uint32_t dma_offset = 0;
1785 int nseg;
1786
1787 /*
1788 * There are three possibilities here - use scatter-gather segment, use
1789 * the single mapping, or neither. Start the lpfc command prep by
1790 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1791 * data bde entry.
1792 */
1793 if (scsi_sg_count(scsi_cmnd)) {
1794 /*
1795 * The driver stores the segment count returned from pci_map_sg
1796 * because this a count of dma-mappings used to map the use_sg
1797 * pages. They are not guaranteed to be the same for those
1798 * architectures that implement an IOMMU.
1799 */
1800
1801 nseg = scsi_dma_map(scsi_cmnd);
1802 if (unlikely(!nseg))
1803 return 1;
1804 sgl += 1;
1805 /* clear the last flag in the fcp_rsp map entry */
1806 sgl->word2 = le32_to_cpu(sgl->word2);
1807 bf_set(lpfc_sli4_sge_last, sgl, 0);
1808 sgl->word2 = cpu_to_le32(sgl->word2);
1809 sgl += 1;
1810
1811 lpfc_cmd->seg_cnt = nseg;
1812 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1813 printk(KERN_ERR "%s: Too many sg segments from "
1814 "dma_map_sg. Config %d, seg_cnt %d\n",
1815 __func__, phba->cfg_sg_seg_cnt,
1816 lpfc_cmd->seg_cnt);
1817 scsi_dma_unmap(scsi_cmnd);
1818 return 1;
1819 }
1820
1821 /*
1822 * The driver established a maximum scatter-gather segment count
1823 * during probe that limits the number of sg elements in any
1824 * single scsi command. Just run through the seg_cnt and format
1825 * the sge's.
1826 * When using SLI-3 the driver will try to fit all the BDEs into
1827 * the IOCB. If it can't then the BDEs get added to a BPL as it
1828 * does for SLI-2 mode.
1829 */
1830 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
1831 physaddr = sg_dma_address(sgel);
1832 dma_len = sg_dma_len(sgel);
1833 bf_set(lpfc_sli4_sge_len, sgl, sg_dma_len(sgel));
1834 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
1835 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
1836 if ((num_bde + 1) == nseg)
1837 bf_set(lpfc_sli4_sge_last, sgl, 1);
1838 else
1839 bf_set(lpfc_sli4_sge_last, sgl, 0);
1840 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1841 sgl->word2 = cpu_to_le32(sgl->word2);
1842 sgl->word3 = cpu_to_le32(sgl->word3);
1843 dma_offset += dma_len;
1844 sgl++;
1845 }
1846 } else {
1847 sgl += 1;
1848 /* clear the last flag in the fcp_rsp map entry */
1849 sgl->word2 = le32_to_cpu(sgl->word2);
1850 bf_set(lpfc_sli4_sge_last, sgl, 1);
1851 sgl->word2 = cpu_to_le32(sgl->word2);
1852 }
1853
1854 /*
1855 * Finish initializing those IOCB fields that are dependent on the
1856 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
1857 * explicitly reinitialized.
1858 * all iocb memory resources are reused.
1859 */
1860 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1861
1862 /*
1863 * Due to difference in data length between DIF/non-DIF paths,
1864 * we need to set word 4 of IOCB here
1865 */
1866 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1867 return 0;
1868}
1869
1870/**
1458 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer 1871 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
1459 * @phba: The Hba for which this call is being executed. 1872 * @phba: The Hba for which this call is being executed.
1460 * @lpfc_cmd: The scsi buffer which is going to be mapped. 1873 * @lpfc_cmd: The scsi buffer which is going to be mapped.
@@ -1590,6 +2003,22 @@ lpfc_scsi_unprep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1590} 2003}
1591 2004
1592/** 2005/**
2006 * lpfc_scsi_unprep_dma_buf_s4 - Un-map DMA mapping of SG-list for SLI4 dev
2007 * @phba: The Hba for which this call is being executed.
2008 * @psb: The scsi buffer which is going to be un-mapped.
2009 *
2010 * This routine does DMA un-mapping of scatter gather list of scsi command
2011 * field of @lpfc_cmd for device with SLI-4 interface spec. If we have to
2012 * remove the sgl for this scsi buffer then we will do it here. For now
2013 * we should be able to just call the sli3 unprep routine.
2014 **/
2015static void
2016lpfc_scsi_unprep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
2017{
2018 lpfc_scsi_unprep_dma_buf_s3(phba, psb);
2019}
2020
2021/**
1593 * lpfc_scsi_unprep_dma_buf - Wrapper function for unmap DMA mapping of SG-list 2022 * lpfc_scsi_unprep_dma_buf - Wrapper function for unmap DMA mapping of SG-list
1594 * @phba: The Hba for which this call is being executed. 2023 * @phba: The Hba for which this call is being executed.
1595 * @psb: The scsi buffer which is going to be un-mapped. 2024 * @psb: The scsi buffer which is going to be un-mapped.
@@ -2129,6 +2558,29 @@ lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2129} 2558}
2130 2559
2131/** 2560/**
2561 * lpfc_scsi_prep_cmnd_s4 - Convert scsi cmnd to FCP infor unit for SLI4 dev
2562 * @vport: The virtual port for which this call is being executed.
2563 * @lpfc_cmd: The scsi command which needs to send.
2564 * @pnode: Pointer to lpfc_nodelist.
2565 *
2566 * This routine initializes fcp_cmnd and iocb data structure from scsi command
2567 * to transfer for device with SLI4 interface spec.
2568 **/
2569static void
2570lpfc_scsi_prep_cmnd_s4(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2571 struct lpfc_nodelist *pnode)
2572{
2573 /*
2574 * The prep cmnd routines do not touch the sgl or its
2575 * entries. We may not have to do anything different.
2576 * I will leave this function in place until we can
2577 * run some IO through the driver and determine if changes
2578 * are needed.
2579 */
2580 return lpfc_scsi_prep_cmnd_s3(vport, lpfc_cmd, pnode);
2581}
2582
2583/**
2132 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit 2584 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
2133 * @vport: The virtual port for which this call is being executed. 2585 * @vport: The virtual port for which this call is being executed.
2134 * @lpfc_cmd: The scsi command which needs to send. 2586 * @lpfc_cmd: The scsi command which needs to send.
@@ -2209,6 +2661,37 @@ lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport,
2209} 2661}
2210 2662
2211/** 2663/**
2664 * lpfc_scsi_prep_task_mgmt_cmnd_s4 - Convert SLI4 scsi TM cmd to FCP info unit
2665 * @vport: The virtual port for which this call is being executed.
2666 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2667 * @lun: Logical unit number.
2668 * @task_mgmt_cmd: SCSI task management command.
2669 *
2670 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
2671 * for device with SLI-4 interface spec.
2672 *
2673 * Return codes:
2674 * 0 - Error
2675 * 1 - Success
2676 **/
2677static int
2678lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport,
2679 struct lpfc_scsi_buf *lpfc_cmd,
2680 unsigned int lun,
2681 uint8_t task_mgmt_cmd)
2682{
2683 /*
2684 * The prep cmnd routines do not touch the sgl or its
2685 * entries. We may not have to do anything different.
2686 * I will leave this function in place until we can
2687 * run some IO through the driver and determine if changes
2688 * are needed.
2689 */
2690 return lpfc_scsi_prep_task_mgmt_cmd_s3(vport, lpfc_cmd, lun,
2691 task_mgmt_cmd);
2692}
2693
2694/**
2212 * lpfc_scsi_prep_task_mgmt_cmnd - Wrapper func convert scsi TM cmd to FCP info 2695 * lpfc_scsi_prep_task_mgmt_cmnd - Wrapper func convert scsi TM cmd to FCP info
2213 * @vport: The virtual port for which this call is being executed. 2696 * @vport: The virtual port for which this call is being executed.
2214 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. 2697 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
@@ -2257,6 +2740,15 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
2257 lpfc_scsi_prep_task_mgmt_cmd_s3; 2740 lpfc_scsi_prep_task_mgmt_cmd_s3;
2258 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; 2741 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
2259 break; 2742 break;
2743 case LPFC_PCI_DEV_OC:
2744 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
2745 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
2746 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s4;
2747 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s4;
2748 phba->lpfc_scsi_prep_task_mgmt_cmd =
2749 lpfc_scsi_prep_task_mgmt_cmd_s4;
2750 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
2751 break;
2260 default: 2752 default:
2261 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2753 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2262 "1418 Invalid HBA PCI-device group: 0x%x\n", 2754 "1418 Invalid HBA PCI-device group: 0x%x\n",