aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-02 22:01:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-02 22:01:32 -0400
commit3151367f8778a1789d6f6e6f6c642681b6cd6d64 (patch)
tree1869d5429a25abd994ae94079808b8db060ec6f3 /drivers/scsi/lpfc
parent16642a2e7be23bbda013fc32d8f6c68982eab603 (diff)
parentfe709ed827d370e6b0c0a9f9456da1c22bdcd118 (diff)
Merge tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull first round of SCSI updates from James Bottomley: "This is a large set of updates, mostly for drivers (qla2xxx [including support for new 83xx based card], qla4xxx, mpt2sas, bfa, zfcp, hpsa, be2iscsi, isci, lpfc, ipr, ibmvfc, ibmvscsi, megaraid_sas). There's also a rework for tape adding virtually unlimited numbers of tape drives plus a set of dif fixes for sd and a fix for a live lock on hot remove of SCSI devices. This round includes a signed tag pull of isci-for-3.6 Signed-off-by: James Bottomley <JBottomley@Parallels.com>" Fix up trivial conflict in drivers/scsi/qla2xxx/qla_nx.c due to new PCI helper function use in a function that was removed by this pull. * tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (198 commits) [SCSI] st: remove st_mutex [SCSI] sd: Ensure we correctly disable devices with unknown protection type [SCSI] hpsa: gen8plus Smart Array IDs [SCSI] qla4xxx: Update driver version to 5.03.00-k1 [SCSI] qla4xxx: Disable generating pause frames for ISP83XX [SCSI] qla4xxx: Fix double clearing of risc_intr for ISP83XX [SCSI] qla4xxx: IDC implementation for Loopback [SCSI] qla4xxx: update copyrights in LICENSE.qla4xxx [SCSI] qla4xxx: Fix panic while rmmod [SCSI] qla4xxx: Fail probe_adapter if IRQ allocation fails [SCSI] qla4xxx: Prevent MSI/MSI-X falling back to INTx for ISP82XX [SCSI] qla4xxx: Update idc reg in case of PCI AER [SCSI] qla4xxx: Fix double IDC locking in qla4_8xxx_error_recovery [SCSI] qla4xxx: Clear interrupt while unloading driver for ISP83XX [SCSI] qla4xxx: Print correct IDC version [SCSI] qla4xxx: Added new mbox cmd to pass driver version to FW [SCSI] scsi_dh_alua: Enable STPG for unavailable ports [SCSI] scsi_remove_target: fix softlockup regression on hot remove [SCSI] ibmvscsi: Fix host config length field overflow [SCSI] ibmvscsi: Remove backend abstraction ...
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/lpfc.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c76
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c494
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h72
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c186
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c98
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h42
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c839
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c38
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c127
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c144
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c621
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h42
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h63
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h7
21 files changed, 1732 insertions, 1187 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index a65c05a8d488..a184c2443a64 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -73,6 +73,8 @@ struct lpfc_sli2_slim;
73#define LPFC_HB_MBOX_INTERVAL 5 /* Heart beat interval in seconds. */ 73#define LPFC_HB_MBOX_INTERVAL 5 /* Heart beat interval in seconds. */
74#define LPFC_HB_MBOX_TIMEOUT 30 /* Heart beat timeout in seconds. */ 74#define LPFC_HB_MBOX_TIMEOUT 30 /* Heart beat timeout in seconds. */
75 75
76#define LPFC_LOOK_AHEAD_OFF 0 /* Look ahead logic is turned off */
77
76/* Error Attention event polling interval */ 78/* Error Attention event polling interval */
77#define LPFC_ERATT_POLL_INTERVAL 5 /* EATT poll interval in seconds */ 79#define LPFC_ERATT_POLL_INTERVAL 5 /* EATT poll interval in seconds */
78 80
@@ -684,6 +686,7 @@ struct lpfc_hba {
684#define LPFC_FCF_FOV 1 /* Fast fcf failover */ 686#define LPFC_FCF_FOV 1 /* Fast fcf failover */
685#define LPFC_FCF_PRIORITY 2 /* Priority fcf failover */ 687#define LPFC_FCF_PRIORITY 2 /* Priority fcf failover */
686 uint32_t cfg_fcf_failover_policy; 688 uint32_t cfg_fcf_failover_policy;
689 uint32_t cfg_fcp_io_sched;
687 uint32_t cfg_cr_delay; 690 uint32_t cfg_cr_delay;
688 uint32_t cfg_cr_count; 691 uint32_t cfg_cr_count;
689 uint32_t cfg_multi_ring_support; 692 uint32_t cfg_multi_ring_support;
@@ -695,6 +698,7 @@ struct lpfc_hba {
695 uint32_t cfg_fcp_imax; 698 uint32_t cfg_fcp_imax;
696 uint32_t cfg_fcp_wq_count; 699 uint32_t cfg_fcp_wq_count;
697 uint32_t cfg_fcp_eq_count; 700 uint32_t cfg_fcp_eq_count;
701 uint32_t cfg_fcp_io_channel;
698 uint32_t cfg_sg_seg_cnt; 702 uint32_t cfg_sg_seg_cnt;
699 uint32_t cfg_prot_sg_seg_cnt; 703 uint32_t cfg_prot_sg_seg_cnt;
700 uint32_t cfg_sg_dma_buf_size; 704 uint32_t cfg_sg_dma_buf_size;
@@ -732,7 +736,7 @@ struct lpfc_hba {
732 uint32_t hbq_count; /* Count of configured HBQs */ 736 uint32_t hbq_count; /* Count of configured HBQs */
733 struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ 737 struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
734 738
735 uint32_t fcp_qidx; /* next work queue to post work to */ 739 atomic_t fcp_qidx; /* next work queue to post work to */
736 740
737 unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */ 741 unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */
738 unsigned long pci_bar1_map; /* Physical address for PCI BAR1 */ 742 unsigned long pci_bar1_map; /* Physical address for PCI BAR1 */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index adef5bb2100e..b032562aa0d9 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3643,18 +3643,25 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
3643 struct lpfc_hba *phba = vport->phba; 3643 struct lpfc_hba *phba = vport->phba;
3644 int val = 0, i; 3644 int val = 0, i;
3645 3645
3646 /* fcp_imax is only valid for SLI4 */
3647 if (phba->sli_rev != LPFC_SLI_REV4)
3648 return -EINVAL;
3649
3646 /* Sanity check on user data */ 3650 /* Sanity check on user data */
3647 if (!isdigit(buf[0])) 3651 if (!isdigit(buf[0]))
3648 return -EINVAL; 3652 return -EINVAL;
3649 if (sscanf(buf, "%i", &val) != 1) 3653 if (sscanf(buf, "%i", &val) != 1)
3650 return -EINVAL; 3654 return -EINVAL;
3651 3655
3652 /* Value range is [636,651042] */ 3656 /*
3653 if (val < LPFC_MIM_IMAX || val > LPFC_DMULT_CONST) 3657 * Value range for the HBA is [5000,5000000]
3658 * The value for each EQ depends on how many EQs are configured.
3659 */
3660 if (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX)
3654 return -EINVAL; 3661 return -EINVAL;
3655 3662
3656 phba->cfg_fcp_imax = (uint32_t)val; 3663 phba->cfg_fcp_imax = (uint32_t)val;
3657 for (i = 0; i < phba->cfg_fcp_eq_count; i += LPFC_MAX_EQ_DELAY) 3664 for (i = 0; i < phba->cfg_fcp_io_channel; i += LPFC_MAX_EQ_DELAY)
3658 lpfc_modify_fcp_eq_delay(phba, i); 3665 lpfc_modify_fcp_eq_delay(phba, i);
3659 3666
3660 return strlen(buf); 3667 return strlen(buf);
@@ -3662,13 +3669,14 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
3662 3669
3663/* 3670/*
3664# lpfc_fcp_imax: The maximum number of fast-path FCP interrupts per second 3671# lpfc_fcp_imax: The maximum number of fast-path FCP interrupts per second
3672# for the HBA.
3665# 3673#
3666# Value range is [636,651042]. Default value is 10000. 3674# Value range is [5,000 to 5,000,000]. Default value is 50,000.
3667*/ 3675*/
3668static int lpfc_fcp_imax = LPFC_FP_DEF_IMAX; 3676static int lpfc_fcp_imax = LPFC_DEF_IMAX;
3669module_param(lpfc_fcp_imax, int, S_IRUGO|S_IWUSR); 3677module_param(lpfc_fcp_imax, int, S_IRUGO|S_IWUSR);
3670MODULE_PARM_DESC(lpfc_fcp_imax, 3678MODULE_PARM_DESC(lpfc_fcp_imax,
3671 "Set the maximum number of fast-path FCP interrupts per second"); 3679 "Set the maximum number of FCP interrupts per second per HBA");
3672lpfc_param_show(fcp_imax) 3680lpfc_param_show(fcp_imax)
3673 3681
3674/** 3682/**
@@ -3687,14 +3695,19 @@ lpfc_param_show(fcp_imax)
3687static int 3695static int
3688lpfc_fcp_imax_init(struct lpfc_hba *phba, int val) 3696lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
3689{ 3697{
3690 if (val >= LPFC_MIM_IMAX && val <= LPFC_DMULT_CONST) { 3698 if (phba->sli_rev != LPFC_SLI_REV4) {
3699 phba->cfg_fcp_imax = 0;
3700 return 0;
3701 }
3702
3703 if (val >= LPFC_MIN_IMAX && val <= LPFC_MAX_IMAX) {
3691 phba->cfg_fcp_imax = val; 3704 phba->cfg_fcp_imax = val;
3692 return 0; 3705 return 0;
3693 } 3706 }
3694 3707
3695 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3708 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3696 "3016 fcp_imax: %d out of range, using default\n", val); 3709 "3016 fcp_imax: %d out of range, using default\n", val);
3697 phba->cfg_fcp_imax = LPFC_FP_DEF_IMAX; 3710 phba->cfg_fcp_imax = LPFC_DEF_IMAX;
3698 3711
3699 return 0; 3712 return 0;
3700} 3713}
@@ -3765,6 +3778,16 @@ static DEVICE_ATTR(lpfc_max_scsicmpl_time, S_IRUGO | S_IWUSR,
3765LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support"); 3778LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
3766 3779
3767/* 3780/*
3781# lpfc_fcp_io_sched: Determine scheduling algrithmn for issuing FCP cmds
3782# range is [0,1]. Default value is 0.
3783# For [0], FCP commands are issued to Work Queues ina round robin fashion.
3784# For [1], FCP commands are issued to a Work Queue associated with the
3785# current CPU.
3786*/
3787LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algrithmn for "
3788 "issuing commands [0] - Round Robin, [1] - Current CPU");
3789
3790/*
3768# lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing 3791# lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing
3769# cr_delay (msec) or cr_count outstanding commands. cr_delay can take 3792# cr_delay (msec) or cr_count outstanding commands. cr_delay can take
3770# value [0,63]. cr_count can take value [1,255]. Default value of cr_delay 3793# value [0,63]. cr_count can take value [1,255]. Default value of cr_delay
@@ -3844,21 +3867,33 @@ LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
3844 3867
3845/* 3868/*
3846# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues 3869# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues
3870# This parameter is ignored and will eventually be depricated
3847# 3871#
3848# Value range is [1,31]. Default value is 4. 3872# Value range is [1,7]. Default value is 4.
3849*/ 3873*/
3850LPFC_ATTR_R(fcp_wq_count, LPFC_FP_WQN_DEF, LPFC_FP_WQN_MIN, LPFC_FP_WQN_MAX, 3874LPFC_ATTR_R(fcp_wq_count, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
3875 LPFC_FCP_IO_CHAN_MAX,
3851 "Set the number of fast-path FCP work queues, if possible"); 3876 "Set the number of fast-path FCP work queues, if possible");
3852 3877
3853/* 3878/*
3854# lpfc_fcp_eq_count: Set the number of fast-path FCP event queues 3879# lpfc_fcp_eq_count: Set the number of FCP EQ/CQ/WQ IO channels
3855# 3880#
3856# Value range is [1,7]. Default value is 1. 3881# Value range is [1,7]. Default value is 4.
3857*/ 3882*/
3858LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_DEF, LPFC_FP_EQN_MIN, LPFC_FP_EQN_MAX, 3883LPFC_ATTR_R(fcp_eq_count, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
3884 LPFC_FCP_IO_CHAN_MAX,
3859 "Set the number of fast-path FCP event queues, if possible"); 3885 "Set the number of fast-path FCP event queues, if possible");
3860 3886
3861/* 3887/*
3888# lpfc_fcp_io_channel: Set the number of FCP EQ/CQ/WQ IO channels
3889#
3890# Value range is [1,7]. Default value is 4.
3891*/
3892LPFC_ATTR_R(fcp_io_channel, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
3893 LPFC_FCP_IO_CHAN_MAX,
3894 "Set the number of FCP I/O channels");
3895
3896/*
3862# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. 3897# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
3863# 0 = HBA resets disabled 3898# 0 = HBA resets disabled
3864# 1 = HBA resets enabled (default) 3899# 1 = HBA resets enabled (default)
@@ -3883,6 +3918,17 @@ LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat.");
3883LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); 3918LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
3884 3919
3885/* 3920/*
3921# lpfc_fcp_look_ahead: Look ahead for completions in FCP start routine
3922# 0 = disabled (default)
3923# 1 = enabled
3924# Value range is [0,1]. Default value is 0.
3925*/
3926unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF;
3927
3928module_param(lpfc_fcp_look_ahead, uint, S_IRUGO);
3929MODULE_PARM_DESC(lpfc_fcp_look_ahead, "Look ahead for completions");
3930
3931/*
3886# lpfc_prot_mask: i 3932# lpfc_prot_mask: i
3887# - Bit mask of host protection capabilities used to register with the 3933# - Bit mask of host protection capabilities used to register with the
3888# SCSI mid-layer 3934# SCSI mid-layer
@@ -3976,6 +4022,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
3976 &dev_attr_lpfc_topology, 4022 &dev_attr_lpfc_topology,
3977 &dev_attr_lpfc_scan_down, 4023 &dev_attr_lpfc_scan_down,
3978 &dev_attr_lpfc_link_speed, 4024 &dev_attr_lpfc_link_speed,
4025 &dev_attr_lpfc_fcp_io_sched,
3979 &dev_attr_lpfc_cr_delay, 4026 &dev_attr_lpfc_cr_delay,
3980 &dev_attr_lpfc_cr_count, 4027 &dev_attr_lpfc_cr_count,
3981 &dev_attr_lpfc_multi_ring_support, 4028 &dev_attr_lpfc_multi_ring_support,
@@ -4002,6 +4049,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
4002 &dev_attr_lpfc_fcp_imax, 4049 &dev_attr_lpfc_fcp_imax,
4003 &dev_attr_lpfc_fcp_wq_count, 4050 &dev_attr_lpfc_fcp_wq_count,
4004 &dev_attr_lpfc_fcp_eq_count, 4051 &dev_attr_lpfc_fcp_eq_count,
4052 &dev_attr_lpfc_fcp_io_channel,
4005 &dev_attr_lpfc_enable_bg, 4053 &dev_attr_lpfc_enable_bg,
4006 &dev_attr_lpfc_soft_wwnn, 4054 &dev_attr_lpfc_soft_wwnn,
4007 &dev_attr_lpfc_soft_wwpn, 4055 &dev_attr_lpfc_soft_wwpn,
@@ -4964,6 +5012,7 @@ struct fc_function_template lpfc_vport_transport_functions = {
4964void 5012void
4965lpfc_get_cfgparam(struct lpfc_hba *phba) 5013lpfc_get_cfgparam(struct lpfc_hba *phba)
4966{ 5014{
5015 lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched);
4967 lpfc_cr_delay_init(phba, lpfc_cr_delay); 5016 lpfc_cr_delay_init(phba, lpfc_cr_delay);
4968 lpfc_cr_count_init(phba, lpfc_cr_count); 5017 lpfc_cr_count_init(phba, lpfc_cr_count);
4969 lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support); 5018 lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support);
@@ -4980,6 +5029,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
4980 lpfc_fcp_imax_init(phba, lpfc_fcp_imax); 5029 lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
4981 lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count); 5030 lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
4982 lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count); 5031 lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
5032 lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
4983 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); 5033 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
4984 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); 5034 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
4985 lpfc_enable_bg_init(phba, lpfc_enable_bg); 5035 lpfc_enable_bg_init(phba, lpfc_enable_bg);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 253d9a857346..f7368eb80415 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -195,7 +195,7 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
195 195
196 if (rsp->ulpStatus) { 196 if (rsp->ulpStatus) {
197 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 197 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
198 switch (rsp->un.ulpWord[4] & 0xff) { 198 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
199 case IOERR_SEQUENCE_TIMEOUT: 199 case IOERR_SEQUENCE_TIMEOUT:
200 rc = -ETIMEDOUT; 200 rc = -ETIMEDOUT;
201 break; 201 break;
@@ -1234,7 +1234,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1234 1234
1235 if (rsp->ulpStatus) { 1235 if (rsp->ulpStatus) {
1236 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 1236 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
1237 switch (rsp->un.ulpWord[4] & 0xff) { 1237 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
1238 case IOERR_SEQUENCE_TIMEOUT: 1238 case IOERR_SEQUENCE_TIMEOUT:
1239 rc = -ETIMEDOUT; 1239 rc = -ETIMEDOUT;
1240 break; 1240 break;
@@ -1714,6 +1714,8 @@ lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
1714 phba->sli4_hba.lnk_info.lnk_no); 1714 phba->sli4_hba.lnk_info.lnk_no);
1715 1715
1716 link_diag_state = &pmboxq->u.mqe.un.link_diag_state; 1716 link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
1717 bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req,
1718 LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE);
1717 bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req, 1719 bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
1718 phba->sli4_hba.lnk_info.lnk_no); 1720 phba->sli4_hba.lnk_info.lnk_no);
1719 bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req, 1721 bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
@@ -4796,7 +4798,7 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
4796 menlo_resp->xri = rsp->ulpContext; 4798 menlo_resp->xri = rsp->ulpContext;
4797 if (rsp->ulpStatus) { 4799 if (rsp->ulpStatus) {
4798 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 4800 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
4799 switch (rsp->un.ulpWord[4] & 0xff) { 4801 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
4800 case IOERR_SEQUENCE_TIMEOUT: 4802 case IOERR_SEQUENCE_TIMEOUT:
4801 rc = -ETIMEDOUT; 4803 rc = -ETIMEDOUT;
4802 break; 4804 break;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 8a2a514a2553..e470c489de07 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -196,8 +196,7 @@ irqreturn_t lpfc_sli_intr_handler(int, void *);
196irqreturn_t lpfc_sli_sp_intr_handler(int, void *); 196irqreturn_t lpfc_sli_sp_intr_handler(int, void *);
197irqreturn_t lpfc_sli_fp_intr_handler(int, void *); 197irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
198irqreturn_t lpfc_sli4_intr_handler(int, void *); 198irqreturn_t lpfc_sli4_intr_handler(int, void *);
199irqreturn_t lpfc_sli4_sp_intr_handler(int, void *); 199irqreturn_t lpfc_sli4_hba_intr_handler(int, void *);
200irqreturn_t lpfc_sli4_fp_intr_handler(int, void *);
201 200
202void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *); 201void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
203void lpfc_sli4_swap_str(struct lpfc_hba *, LPFC_MBOXQ_t *); 202void lpfc_sli4_swap_str(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -391,6 +390,7 @@ extern spinlock_t pgcnt_lock;
391extern unsigned int pgcnt; 390extern unsigned int pgcnt;
392extern unsigned int lpfc_prot_mask; 391extern unsigned int lpfc_prot_mask;
393extern unsigned char lpfc_prot_guard; 392extern unsigned char lpfc_prot_guard;
393extern unsigned int lpfc_fcp_look_ahead;
394 394
395/* Interface exported by fabric iocb scheduler */ 395/* Interface exported by fabric iocb scheduler */
396void lpfc_fabric_abort_nport(struct lpfc_nodelist *); 396void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
@@ -457,6 +457,8 @@ int lpfc_sli4_queue_create(struct lpfc_hba *);
457void lpfc_sli4_queue_destroy(struct lpfc_hba *); 457void lpfc_sli4_queue_destroy(struct lpfc_hba *);
458void lpfc_sli4_abts_err_handler(struct lpfc_hba *, struct lpfc_nodelist *, 458void lpfc_sli4_abts_err_handler(struct lpfc_hba *, struct lpfc_nodelist *,
459 struct sli4_wcqe_xri_aborted *); 459 struct sli4_wcqe_xri_aborted *);
460void lpfc_sli_abts_recover_port(struct lpfc_vport *,
461 struct lpfc_nodelist *);
460int lpfc_hba_init_link_fc_topology(struct lpfc_hba *, uint32_t, uint32_t); 462int lpfc_hba_init_link_fc_topology(struct lpfc_hba *, uint32_t, uint32_t);
461int lpfc_issue_reg_vfi(struct lpfc_vport *); 463int lpfc_issue_reg_vfi(struct lpfc_vport *);
462int lpfc_issue_unreg_vfi(struct lpfc_vport *); 464int lpfc_issue_unreg_vfi(struct lpfc_vport *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 93e96b3c9097..7ffabb7e3afa 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -104,7 +104,8 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
104 if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) { 104 if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
105 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 105 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
106 } else if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) && 106 } else if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
107 ((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) { 107 ((icmd->un.ulpWord[4] & IOERR_PARAM_MASK) ==
108 IOERR_RCV_BUFFER_WAITING)) {
108 /* Not enough posted buffers; Try posting more buffers */ 109 /* Not enough posted buffers; Try posting more buffers */
109 phba->fc_stat.NoRcvBuf++; 110 phba->fc_stat.NoRcvBuf++;
110 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 111 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
@@ -633,7 +634,8 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
633 /* Check for retry */ 634 /* Check for retry */
634 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { 635 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
635 if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT || 636 if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
636 irsp->un.ulpWord[4] != IOERR_NO_RESOURCES) 637 (irsp->un.ulpWord[4] && IOERR_PARAM_MASK) !=
638 IOERR_NO_RESOURCES)
637 vport->fc_ns_retry++; 639 vport->fc_ns_retry++;
638 640
639 /* CT command is being retried */ 641 /* CT command is being retried */
@@ -783,7 +785,9 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
783 if (cmdiocb->retry < LPFC_MAX_NS_RETRY) { 785 if (cmdiocb->retry < LPFC_MAX_NS_RETRY) {
784 retry = 1; 786 retry = 1;
785 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 787 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
786 switch (irsp->un.ulpWord[4]) { 788 switch ((irsp->un.ulpWord[4] &
789 IOERR_PARAM_MASK)) {
790
787 case IOERR_NO_RESOURCES: 791 case IOERR_NO_RESOURCES:
788 /* We don't increment the retry 792 /* We don't increment the retry
789 * count for this case. 793 * count for this case.
@@ -908,8 +912,10 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
908 cmdcode, irsp->ulpStatus, irsp->un.ulpWord[4]); 912 cmdcode, irsp->ulpStatus, irsp->un.ulpWord[4]);
909 913
910 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 914 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
911 ((irsp->un.ulpWord[4] == IOERR_SLI_DOWN) || 915 (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
912 (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))) 916 IOERR_SLI_DOWN) ||
917 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
918 IOERR_SLI_ABORTED)))
913 goto out; 919 goto out;
914 920
915 retry = cmdiocb->retry; 921 retry = cmdiocb->retry;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 3217d63ed282..f63f5ff7f274 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -490,9 +490,11 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
490 len += snprintf(buf+len, size-len, 490 len += snprintf(buf+len, size-len,
491 "Ring %d: CMD GetInx:%d (Max:%d Next:%d " 491 "Ring %d: CMD GetInx:%d (Max:%d Next:%d "
492 "Local:%d flg:x%x) RSP PutInx:%d Max:%d\n", 492 "Local:%d flg:x%x) RSP PutInx:%d Max:%d\n",
493 i, pgpp->cmdGetInx, pring->numCiocb, 493 i, pgpp->cmdGetInx, pring->sli.sli3.numCiocb,
494 pring->next_cmdidx, pring->local_getidx, 494 pring->sli.sli3.next_cmdidx,
495 pring->flag, pgpp->rspPutInx, pring->numRiocb); 495 pring->sli.sli3.local_getidx,
496 pring->flag, pgpp->rspPutInx,
497 pring->sli.sli3.numRiocb);
496 } 498 }
497 499
498 if (phba->sli_rev <= LPFC_SLI_REV3) { 500 if (phba->sli_rev <= LPFC_SLI_REV3) {
@@ -557,6 +559,9 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
557 case NLP_STE_PRLI_ISSUE: 559 case NLP_STE_PRLI_ISSUE:
558 statep = "PRLI "; 560 statep = "PRLI ";
559 break; 561 break;
562 case NLP_STE_LOGO_ISSUE:
563 statep = "LOGO ";
564 break;
560 case NLP_STE_UNMAPPED_NODE: 565 case NLP_STE_UNMAPPED_NODE:
561 statep = "UNMAP "; 566 statep = "UNMAP ";
562 break; 567 break;
@@ -581,8 +586,13 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
581 "WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ", 586 "WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ",
582 *name, *(name+1), *(name+2), *(name+3), 587 *name, *(name+1), *(name+2), *(name+3),
583 *(name+4), *(name+5), *(name+6), *(name+7)); 588 *(name+4), *(name+5), *(name+6), *(name+7));
584 len += snprintf(buf+len, size-len, "RPI:%03d flag:x%08x ", 589 if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
585 ndlp->nlp_rpi, ndlp->nlp_flag); 590 len += snprintf(buf+len, size-len, "RPI:%03d ",
591 ndlp->nlp_rpi);
592 else
593 len += snprintf(buf+len, size-len, "RPI:none ");
594 len += snprintf(buf+len, size-len, "flag:x%08x ",
595 ndlp->nlp_flag);
586 if (!ndlp->nlp_type) 596 if (!ndlp->nlp_type)
587 len += snprintf(buf+len, size-len, "UNKNOWN_TYPE "); 597 len += snprintf(buf+len, size-len, "UNKNOWN_TYPE ");
588 if (ndlp->nlp_type & NLP_FC_NODE) 598 if (ndlp->nlp_type & NLP_FC_NODE)
@@ -1999,207 +2009,298 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
1999{ 2009{
2000 struct lpfc_debug *debug = file->private_data; 2010 struct lpfc_debug *debug = file->private_data;
2001 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; 2011 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
2002 int len = 0, fcp_qidx; 2012 int len = 0;
2003 char *pbuffer; 2013 char *pbuffer;
2014 int x, cnt;
2015 int max_cnt;
2016 struct lpfc_queue *qp = NULL;
2017
2004 2018
2005 if (!debug->buffer) 2019 if (!debug->buffer)
2006 debug->buffer = kmalloc(LPFC_QUE_INFO_GET_BUF_SIZE, GFP_KERNEL); 2020 debug->buffer = kmalloc(LPFC_QUE_INFO_GET_BUF_SIZE, GFP_KERNEL);
2007 if (!debug->buffer) 2021 if (!debug->buffer)
2008 return 0; 2022 return 0;
2009 pbuffer = debug->buffer; 2023 pbuffer = debug->buffer;
2024 max_cnt = LPFC_QUE_INFO_GET_BUF_SIZE - 128;
2010 2025
2011 if (*ppos) 2026 if (*ppos)
2012 return 0; 2027 return 0;
2013 2028
2014 /* Get slow-path event queue information */ 2029 spin_lock_irq(&phba->hbalock);
2015 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2030
2016 "Slow-path EQ information:\n"); 2031 /* Fast-path event queue */
2017 if (phba->sli4_hba.sp_eq) { 2032 if (phba->sli4_hba.hba_eq && phba->cfg_fcp_io_channel) {
2018 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2033 cnt = phba->cfg_fcp_io_channel;
2019 "\tEQID[%02d], " 2034
2020 "QE-COUNT[%04d], QE-SIZE[%04d], " 2035 for (x = 0; x < cnt; x++) {
2021 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", 2036
2022 phba->sli4_hba.sp_eq->queue_id, 2037 /* Fast-path EQ */
2023 phba->sli4_hba.sp_eq->entry_count, 2038 qp = phba->sli4_hba.hba_eq[x];
2024 phba->sli4_hba.sp_eq->entry_size, 2039 if (!qp)
2025 phba->sli4_hba.sp_eq->host_index, 2040 goto proc_cq;
2026 phba->sli4_hba.sp_eq->hba_index); 2041
2027 } 2042 len += snprintf(pbuffer+len,
2043 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2044 "\nHBA EQ info: "
2045 "EQ-STAT[max:x%x noE:x%x "
2046 "bs:x%x proc:x%llx]\n",
2047 qp->q_cnt_1, qp->q_cnt_2,
2048 qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
2049
2050 len += snprintf(pbuffer+len,
2051 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2052 "EQID[%02d], "
2053 "QE-CNT[%04d], QE-SIZE[%04d], "
2054 "HOST-IDX[%04d], PORT-IDX[%04d]",
2055 qp->queue_id,
2056 qp->entry_count,
2057 qp->entry_size,
2058 qp->host_index,
2059 qp->hba_index);
2060
2061
2062 /* Reset max counter */
2063 qp->EQ_max_eqe = 0;
2064
2065 len += snprintf(pbuffer+len,
2066 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2067 if (len >= max_cnt)
2068 goto too_big;
2069proc_cq:
2070 /* Fast-path FCP CQ */
2071 qp = phba->sli4_hba.fcp_cq[x];
2072 len += snprintf(pbuffer+len,
2073 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2074 "\tFCP CQ info: ");
2075 len += snprintf(pbuffer+len,
2076 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2077 "AssocEQID[%02d]: "
2078 "CQ STAT[max:x%x relw:x%x "
2079 "xabt:x%x wq:x%llx]\n",
2080 qp->assoc_qid,
2081 qp->q_cnt_1, qp->q_cnt_2,
2082 qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
2083 len += snprintf(pbuffer+len,
2084 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2085 "\tCQID[%02d], "
2086 "QE-CNT[%04d], QE-SIZE[%04d], "
2087 "HOST-IDX[%04d], PORT-IDX[%04d]",
2088 qp->queue_id, qp->entry_count,
2089 qp->entry_size, qp->host_index,
2090 qp->hba_index);
2091
2028 2092
2029 /* Get fast-path event queue information */ 2093 /* Reset max counter */
2030 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2094 qp->CQ_max_cqe = 0;
2031 "Fast-path EQ information:\n"); 2095
2032 if (phba->sli4_hba.fp_eq) { 2096 len += snprintf(pbuffer+len,
2033 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; 2097 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2034 fcp_qidx++) { 2098 if (len >= max_cnt)
2035 if (phba->sli4_hba.fp_eq[fcp_qidx]) { 2099 goto too_big;
2100
2101 /* Fast-path FCP WQ */
2102 qp = phba->sli4_hba.fcp_wq[x];
2103
2104 len += snprintf(pbuffer+len,
2105 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2106 "\t\tFCP WQ info: ");
2107 len += snprintf(pbuffer+len,
2108 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2109 "AssocCQID[%02d]: "
2110 "WQ-STAT[oflow:x%x posted:x%llx]\n",
2111 qp->assoc_qid,
2112 qp->q_cnt_1, (unsigned long long)qp->q_cnt_4);
2113 len += snprintf(pbuffer+len,
2114 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2115 "\t\tWQID[%02d], "
2116 "QE-CNT[%04d], QE-SIZE[%04d], "
2117 "HOST-IDX[%04d], PORT-IDX[%04d]",
2118 qp->queue_id,
2119 qp->entry_count,
2120 qp->entry_size,
2121 qp->host_index,
2122 qp->hba_index);
2123
2124 len += snprintf(pbuffer+len,
2125 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2126 if (len >= max_cnt)
2127 goto too_big;
2128
2129 if (x)
2130 continue;
2131
2132 /* Only EQ 0 has slow path CQs configured */
2133
2134 /* Slow-path mailbox CQ */
2135 qp = phba->sli4_hba.mbx_cq;
2136 if (qp) {
2137 len += snprintf(pbuffer+len,
2138 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2139 "\tMBX CQ info: ");
2140 len += snprintf(pbuffer+len,
2141 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2142 "AssocEQID[%02d]: "
2143 "CQ-STAT[mbox:x%x relw:x%x "
2144 "xabt:x%x wq:x%llx]\n",
2145 qp->assoc_qid,
2146 qp->q_cnt_1, qp->q_cnt_2,
2147 qp->q_cnt_3,
2148 (unsigned long long)qp->q_cnt_4);
2036 len += snprintf(pbuffer+len, 2149 len += snprintf(pbuffer+len,
2037 LPFC_QUE_INFO_GET_BUF_SIZE-len, 2150 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2038 "\tEQID[%02d], " 2151 "\tCQID[%02d], "
2039 "QE-COUNT[%04d], QE-SIZE[%04d], " 2152 "QE-CNT[%04d], QE-SIZE[%04d], "
2040 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", 2153 "HOST-IDX[%04d], PORT-IDX[%04d]",
2041 phba->sli4_hba.fp_eq[fcp_qidx]->queue_id, 2154 qp->queue_id, qp->entry_count,
2042 phba->sli4_hba.fp_eq[fcp_qidx]->entry_count, 2155 qp->entry_size, qp->host_index,
2043 phba->sli4_hba.fp_eq[fcp_qidx]->entry_size, 2156 qp->hba_index);
2044 phba->sli4_hba.fp_eq[fcp_qidx]->host_index, 2157
2045 phba->sli4_hba.fp_eq[fcp_qidx]->hba_index); 2158 len += snprintf(pbuffer+len,
2159 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2160 if (len >= max_cnt)
2161 goto too_big;
2046 } 2162 }
2047 }
2048 }
2049 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2050
2051 /* Get mailbox complete queue information */
2052 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2053 "Slow-path MBX CQ information:\n");
2054 if (phba->sli4_hba.mbx_cq) {
2055 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2056 "Associated EQID[%02d]:\n",
2057 phba->sli4_hba.mbx_cq->assoc_qid);
2058 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2059 "\tCQID[%02d], "
2060 "QE-COUNT[%04d], QE-SIZE[%04d], "
2061 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
2062 phba->sli4_hba.mbx_cq->queue_id,
2063 phba->sli4_hba.mbx_cq->entry_count,
2064 phba->sli4_hba.mbx_cq->entry_size,
2065 phba->sli4_hba.mbx_cq->host_index,
2066 phba->sli4_hba.mbx_cq->hba_index);
2067 }
2068 2163
2069 /* Get slow-path complete queue information */ 2164 /* Slow-path MBOX MQ */
2070 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2165 qp = phba->sli4_hba.mbx_wq;
2071 "Slow-path ELS CQ information:\n"); 2166 if (qp) {
2072 if (phba->sli4_hba.els_cq) { 2167 len += snprintf(pbuffer+len,
2073 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2168 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2074 "Associated EQID[%02d]:\n", 2169 "\t\tMBX MQ info: ");
2075 phba->sli4_hba.els_cq->assoc_qid); 2170 len += snprintf(pbuffer+len,
2076 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2171 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2077 "\tCQID [%02d], " 2172 "AssocCQID[%02d]:\n",
2078 "QE-COUNT[%04d], QE-SIZE[%04d], " 2173 phba->sli4_hba.mbx_wq->assoc_qid);
2079 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", 2174 len += snprintf(pbuffer+len,
2080 phba->sli4_hba.els_cq->queue_id, 2175 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2081 phba->sli4_hba.els_cq->entry_count, 2176 "\t\tWQID[%02d], "
2082 phba->sli4_hba.els_cq->entry_size, 2177 "QE-CNT[%04d], QE-SIZE[%04d], "
2083 phba->sli4_hba.els_cq->host_index, 2178 "HOST-IDX[%04d], PORT-IDX[%04d]",
2084 phba->sli4_hba.els_cq->hba_index); 2179 qp->queue_id, qp->entry_count,
2085 } 2180 qp->entry_size, qp->host_index,
2181 qp->hba_index);
2182
2183 len += snprintf(pbuffer+len,
2184 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2185 if (len >= max_cnt)
2186 goto too_big;
2187 }
2086 2188
2087 /* Get fast-path complete queue information */ 2189 /* Slow-path ELS response CQ */
2088 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2190 qp = phba->sli4_hba.els_cq;
2089 "Fast-path FCP CQ information:\n"); 2191 if (qp) {
2090 fcp_qidx = 0;
2091 if (phba->sli4_hba.fcp_cq) {
2092 do {
2093 if (phba->sli4_hba.fcp_cq[fcp_qidx]) {
2094 len += snprintf(pbuffer+len, 2192 len += snprintf(pbuffer+len,
2095 LPFC_QUE_INFO_GET_BUF_SIZE-len, 2193 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2096 "Associated EQID[%02d]:\n", 2194 "\tELS CQ info: ");
2097 phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid);
2098 len += snprintf(pbuffer+len, 2195 len += snprintf(pbuffer+len,
2099 LPFC_QUE_INFO_GET_BUF_SIZE-len, 2196 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2100 "\tCQID[%02d], " 2197 "AssocEQID[%02d]: "
2101 "QE-COUNT[%04d], QE-SIZE[%04d], " 2198 "CQ-STAT[max:x%x relw:x%x "
2102 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", 2199 "xabt:x%x wq:x%llx]\n",
2103 phba->sli4_hba.fcp_cq[fcp_qidx]->queue_id, 2200 qp->assoc_qid,
2104 phba->sli4_hba.fcp_cq[fcp_qidx]->entry_count, 2201 qp->q_cnt_1, qp->q_cnt_2,
2105 phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size, 2202 qp->q_cnt_3,
2106 phba->sli4_hba.fcp_cq[fcp_qidx]->host_index, 2203 (unsigned long long)qp->q_cnt_4);
2107 phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index); 2204 len += snprintf(pbuffer+len,
2205 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2206 "\tCQID [%02d], "
2207 "QE-CNT[%04d], QE-SIZE[%04d], "
2208 "HOST-IDX[%04d], PORT-IDX[%04d]",
2209 qp->queue_id, qp->entry_count,
2210 qp->entry_size, qp->host_index,
2211 qp->hba_index);
2212
2213 /* Reset max counter */
2214 qp->CQ_max_cqe = 0;
2215
2216 len += snprintf(pbuffer+len,
2217 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2218 if (len >= max_cnt)
2219 goto too_big;
2108 } 2220 }
2109 } while (++fcp_qidx < phba->cfg_fcp_eq_count);
2110 len += snprintf(pbuffer+len,
2111 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2112 }
2113 2221
2114 /* Get mailbox queue information */ 2222 /* Slow-path ELS WQ */
2115 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2223 qp = phba->sli4_hba.els_wq;
2116 "Slow-path MBX MQ information:\n"); 2224 if (qp) {
2117 if (phba->sli4_hba.mbx_wq) { 2225 len += snprintf(pbuffer+len,
2118 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2226 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2119 "Associated CQID[%02d]:\n", 2227 "\t\tELS WQ info: ");
2120 phba->sli4_hba.mbx_wq->assoc_qid); 2228 len += snprintf(pbuffer+len,
2121 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2229 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2122 "\tWQID[%02d], " 2230 "AssocCQID[%02d]: "
2123 "QE-COUNT[%04d], QE-SIZE[%04d], " 2231 " WQ-STAT[oflow:x%x "
2124 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", 2232 "posted:x%llx]\n",
2125 phba->sli4_hba.mbx_wq->queue_id, 2233 qp->assoc_qid,
2126 phba->sli4_hba.mbx_wq->entry_count, 2234 qp->q_cnt_1,
2127 phba->sli4_hba.mbx_wq->entry_size, 2235 (unsigned long long)qp->q_cnt_4);
2128 phba->sli4_hba.mbx_wq->host_index, 2236 len += snprintf(pbuffer+len,
2129 phba->sli4_hba.mbx_wq->hba_index); 2237 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2130 } 2238 "\t\tWQID[%02d], "
2239 "QE-CNT[%04d], QE-SIZE[%04d], "
2240 "HOST-IDX[%04d], PORT-IDX[%04d]",
2241 qp->queue_id, qp->entry_count,
2242 qp->entry_size, qp->host_index,
2243 qp->hba_index);
2244
2245 len += snprintf(pbuffer+len,
2246 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2247 if (len >= max_cnt)
2248 goto too_big;
2249 }
2131 2250
2132 /* Get slow-path work queue information */ 2251 if (phba->sli4_hba.hdr_rq && phba->sli4_hba.dat_rq) {
2133 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2252 /* Slow-path RQ header */
2134 "Slow-path ELS WQ information:\n"); 2253 qp = phba->sli4_hba.hdr_rq;
2135 if (phba->sli4_hba.els_wq) {
2136 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2137 "Associated CQID[%02d]:\n",
2138 phba->sli4_hba.els_wq->assoc_qid);
2139 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2140 "\tWQID[%02d], "
2141 "QE-COUNT[%04d], QE-SIZE[%04d], "
2142 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
2143 phba->sli4_hba.els_wq->queue_id,
2144 phba->sli4_hba.els_wq->entry_count,
2145 phba->sli4_hba.els_wq->entry_size,
2146 phba->sli4_hba.els_wq->host_index,
2147 phba->sli4_hba.els_wq->hba_index);
2148 }
2149 2254
2150 /* Get fast-path work queue information */ 2255 len += snprintf(pbuffer+len,
2151 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2256 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2152 "Fast-path FCP WQ information:\n"); 2257 "\t\tRQ info: ");
2153 if (phba->sli4_hba.fcp_wq) { 2258 len += snprintf(pbuffer+len,
2154 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
2155 fcp_qidx++) {
2156 if (!phba->sli4_hba.fcp_wq[fcp_qidx])
2157 continue;
2158 len += snprintf(pbuffer+len,
2159 LPFC_QUE_INFO_GET_BUF_SIZE-len, 2259 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2160 "Associated CQID[%02d]:\n", 2260 "AssocCQID[%02d]: "
2161 phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid); 2261 "RQ-STAT[nopost:x%x nobuf:x%x "
2162 len += snprintf(pbuffer+len, 2262 "trunc:x%x rcv:x%llx]\n",
2263 qp->assoc_qid,
2264 qp->q_cnt_1, qp->q_cnt_2,
2265 qp->q_cnt_3,
2266 (unsigned long long)qp->q_cnt_4);
2267 len += snprintf(pbuffer+len,
2163 LPFC_QUE_INFO_GET_BUF_SIZE-len, 2268 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2164 "\tWQID[%02d], " 2269 "\t\tHQID[%02d], "
2165 "QE-COUNT[%04d], WQE-SIZE[%04d], " 2270 "QE-CNT[%04d], QE-SIZE[%04d], "
2166 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", 2271 "HOST-IDX[%04d], PORT-IDX[%04d]\n",
2167 phba->sli4_hba.fcp_wq[fcp_qidx]->queue_id, 2272 qp->queue_id,
2168 phba->sli4_hba.fcp_wq[fcp_qidx]->entry_count, 2273 qp->entry_count,
2169 phba->sli4_hba.fcp_wq[fcp_qidx]->entry_size, 2274 qp->entry_size,
2170 phba->sli4_hba.fcp_wq[fcp_qidx]->host_index, 2275 qp->host_index,
2171 phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index); 2276 qp->hba_index);
2277
2278 /* Slow-path RQ data */
2279 qp = phba->sli4_hba.dat_rq;
2280 len += snprintf(pbuffer+len,
2281 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2282 "\t\tDQID[%02d], "
2283 "QE-CNT[%04d], QE-SIZE[%04d], "
2284 "HOST-IDX[%04d], PORT-IDX[%04d]\n",
2285 qp->queue_id,
2286 qp->entry_count,
2287 qp->entry_size,
2288 qp->host_index,
2289 qp->hba_index);
2290
2291 len += snprintf(pbuffer+len,
2292 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2293 }
2172 } 2294 }
2173 len += snprintf(pbuffer+len,
2174 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2175 } 2295 }
2176 2296
2177 /* Get receive queue information */ 2297 spin_unlock_irq(&phba->hbalock);
2178 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2298 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
2179 "Slow-path RQ information:\n"); 2299
2180 if (phba->sli4_hba.hdr_rq && phba->sli4_hba.dat_rq) { 2300too_big:
2181 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2301 len += snprintf(pbuffer+len,
2182 "Associated CQID[%02d]:\n", 2302 LPFC_QUE_INFO_GET_BUF_SIZE-len, "Truncated ...\n");
2183 phba->sli4_hba.hdr_rq->assoc_qid); 2303 spin_unlock_irq(&phba->hbalock);
2184 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2185 "\tHQID[%02d], "
2186 "QE-COUNT[%04d], QE-SIZE[%04d], "
2187 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
2188 phba->sli4_hba.hdr_rq->queue_id,
2189 phba->sli4_hba.hdr_rq->entry_count,
2190 phba->sli4_hba.hdr_rq->entry_size,
2191 phba->sli4_hba.hdr_rq->host_index,
2192 phba->sli4_hba.hdr_rq->hba_index);
2193 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2194 "\tDQID[%02d], "
2195 "QE-COUNT[%04d], QE-SIZE[%04d], "
2196 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
2197 phba->sli4_hba.dat_rq->queue_id,
2198 phba->sli4_hba.dat_rq->entry_count,
2199 phba->sli4_hba.dat_rq->entry_size,
2200 phba->sli4_hba.dat_rq->host_index,
2201 phba->sli4_hba.dat_rq->hba_index);
2202 }
2203 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); 2304 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
2204} 2305}
2205 2306
@@ -2408,31 +2509,21 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
2408 2509
2409 switch (quetp) { 2510 switch (quetp) {
2410 case LPFC_IDIAG_EQ: 2511 case LPFC_IDIAG_EQ:
2411 /* Slow-path event queue */ 2512 /* HBA event queue */
2412 if (phba->sli4_hba.sp_eq && 2513 if (phba->sli4_hba.hba_eq) {
2413 phba->sli4_hba.sp_eq->queue_id == queid) { 2514 for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
2414 /* Sanity check */ 2515 qidx++) {
2415 rc = lpfc_idiag_que_param_check( 2516 if (phba->sli4_hba.hba_eq[qidx] &&
2416 phba->sli4_hba.sp_eq, index, count); 2517 phba->sli4_hba.hba_eq[qidx]->queue_id ==
2417 if (rc)
2418 goto error_out;
2419 idiag.ptr_private = phba->sli4_hba.sp_eq;
2420 goto pass_check;
2421 }
2422 /* Fast-path event queue */
2423 if (phba->sli4_hba.fp_eq) {
2424 for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
2425 if (phba->sli4_hba.fp_eq[qidx] &&
2426 phba->sli4_hba.fp_eq[qidx]->queue_id ==
2427 queid) { 2518 queid) {
2428 /* Sanity check */ 2519 /* Sanity check */
2429 rc = lpfc_idiag_que_param_check( 2520 rc = lpfc_idiag_que_param_check(
2430 phba->sli4_hba.fp_eq[qidx], 2521 phba->sli4_hba.hba_eq[qidx],
2431 index, count); 2522 index, count);
2432 if (rc) 2523 if (rc)
2433 goto error_out; 2524 goto error_out;
2434 idiag.ptr_private = 2525 idiag.ptr_private =
2435 phba->sli4_hba.fp_eq[qidx]; 2526 phba->sli4_hba.hba_eq[qidx];
2436 goto pass_check; 2527 goto pass_check;
2437 } 2528 }
2438 } 2529 }
@@ -2479,7 +2570,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
2479 phba->sli4_hba.fcp_cq[qidx]; 2570 phba->sli4_hba.fcp_cq[qidx];
2480 goto pass_check; 2571 goto pass_check;
2481 } 2572 }
2482 } while (++qidx < phba->cfg_fcp_eq_count); 2573 } while (++qidx < phba->cfg_fcp_io_channel);
2483 } 2574 }
2484 goto error_out; 2575 goto error_out;
2485 break; 2576 break;
@@ -2511,7 +2602,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
2511 } 2602 }
2512 /* FCP work queue */ 2603 /* FCP work queue */
2513 if (phba->sli4_hba.fcp_wq) { 2604 if (phba->sli4_hba.fcp_wq) {
2514 for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) { 2605 for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
2606 qidx++) {
2515 if (!phba->sli4_hba.fcp_wq[qidx]) 2607 if (!phba->sli4_hba.fcp_wq[qidx])
2516 continue; 2608 continue;
2517 if (phba->sli4_hba.fcp_wq[qidx]->queue_id == 2609 if (phba->sli4_hba.fcp_wq[qidx]->queue_id ==
@@ -4490,7 +4582,7 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
4490 lpfc_debug_dump_mbx_wq(phba); 4582 lpfc_debug_dump_mbx_wq(phba);
4491 lpfc_debug_dump_els_wq(phba); 4583 lpfc_debug_dump_els_wq(phba);
4492 4584
4493 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) 4585 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++)
4494 lpfc_debug_dump_fcp_wq(phba, fcp_wqidx); 4586 lpfc_debug_dump_fcp_wq(phba, fcp_wqidx);
4495 4587
4496 lpfc_debug_dump_hdr_rq(phba); 4588 lpfc_debug_dump_hdr_rq(phba);
@@ -4501,14 +4593,12 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
4501 lpfc_debug_dump_mbx_cq(phba); 4593 lpfc_debug_dump_mbx_cq(phba);
4502 lpfc_debug_dump_els_cq(phba); 4594 lpfc_debug_dump_els_cq(phba);
4503 4595
4504 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) 4596 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++)
4505 lpfc_debug_dump_fcp_cq(phba, fcp_wqidx); 4597 lpfc_debug_dump_fcp_cq(phba, fcp_wqidx);
4506 4598
4507 /* 4599 /*
4508 * Dump Event Queues (EQs) 4600 * Dump Event Queues (EQs)
4509 */ 4601 */
4510 lpfc_debug_dump_sp_eq(phba); 4602 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++)
4511 4603 lpfc_debug_dump_hba_eq(phba, fcp_wqidx);
4512 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++)
4513 lpfc_debug_dump_fcp_eq(phba, fcp_wqidx);
4514} 4604}
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index afe368fd1b98..8b2b6a3bfc25 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -36,6 +36,9 @@
36/* dumpHostSlim output buffer size */ 36/* dumpHostSlim output buffer size */
37#define LPFC_DUMPHOSTSLIM_SIZE 4096 37#define LPFC_DUMPHOSTSLIM_SIZE 4096
38 38
39/* dumpSLIqinfo output buffer size */
40#define LPFC_DUMPSLIQINFO_SIZE 4096
41
39/* hbqinfo output buffer size */ 42/* hbqinfo output buffer size */
40#define LPFC_HBQINFO_SIZE 8192 43#define LPFC_HBQINFO_SIZE 8192
41 44
@@ -366,7 +369,7 @@ static inline void
366lpfc_debug_dump_fcp_wq(struct lpfc_hba *phba, int fcp_wqidx) 369lpfc_debug_dump_fcp_wq(struct lpfc_hba *phba, int fcp_wqidx)
367{ 370{
368 /* sanity check */ 371 /* sanity check */
369 if (fcp_wqidx >= phba->cfg_fcp_wq_count) 372 if (fcp_wqidx >= phba->cfg_fcp_io_channel)
370 return; 373 return;
371 374
372 printk(KERN_ERR "FCP WQ: WQ[Idx:%d|Qid:%d]\n", 375 printk(KERN_ERR "FCP WQ: WQ[Idx:%d|Qid:%d]\n",
@@ -388,15 +391,15 @@ lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx)
388 int fcp_cqidx, fcp_cqid; 391 int fcp_cqidx, fcp_cqid;
389 392
390 /* sanity check */ 393 /* sanity check */
391 if (fcp_wqidx >= phba->cfg_fcp_wq_count) 394 if (fcp_wqidx >= phba->cfg_fcp_io_channel)
392 return; 395 return;
393 396
394 fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid; 397 fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid;
395 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) 398 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++)
396 if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid) 399 if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid)
397 break; 400 break;
398 if (phba->intr_type == MSIX) { 401 if (phba->intr_type == MSIX) {
399 if (fcp_cqidx >= phba->cfg_fcp_eq_count) 402 if (fcp_cqidx >= phba->cfg_fcp_io_channel)
400 return; 403 return;
401 } else { 404 } else {
402 if (fcp_cqidx > 0) 405 if (fcp_cqidx > 0)
@@ -410,7 +413,7 @@ lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx)
410} 413}
411 414
412/** 415/**
413 * lpfc_debug_dump_fcp_eq - dump all entries from a fcp work queue's evt queue 416 * lpfc_debug_dump_hba_eq - dump all entries from a fcp work queue's evt queue
414 * @phba: Pointer to HBA context object. 417 * @phba: Pointer to HBA context object.
415 * @fcp_wqidx: Index to a FCP work queue. 418 * @fcp_wqidx: Index to a FCP work queue.
416 * 419 *
@@ -418,36 +421,30 @@ lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx)
418 * associated to the FCP work queue specified by the @fcp_wqidx. 421 * associated to the FCP work queue specified by the @fcp_wqidx.
419 **/ 422 **/
420static inline void 423static inline void
421lpfc_debug_dump_fcp_eq(struct lpfc_hba *phba, int fcp_wqidx) 424lpfc_debug_dump_hba_eq(struct lpfc_hba *phba, int fcp_wqidx)
422{ 425{
423 struct lpfc_queue *qdesc; 426 struct lpfc_queue *qdesc;
424 int fcp_eqidx, fcp_eqid; 427 int fcp_eqidx, fcp_eqid;
425 int fcp_cqidx, fcp_cqid; 428 int fcp_cqidx, fcp_cqid;
426 429
427 /* sanity check */ 430 /* sanity check */
428 if (fcp_wqidx >= phba->cfg_fcp_wq_count) 431 if (fcp_wqidx >= phba->cfg_fcp_io_channel)
429 return; 432 return;
430 fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid; 433 fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid;
431 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) 434 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++)
432 if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid) 435 if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid)
433 break; 436 break;
434 if (phba->intr_type == MSIX) { 437 if (phba->intr_type == MSIX) {
435 if (fcp_cqidx >= phba->cfg_fcp_eq_count) 438 if (fcp_cqidx >= phba->cfg_fcp_io_channel)
436 return; 439 return;
437 } else { 440 } else {
438 if (fcp_cqidx > 0) 441 if (fcp_cqidx > 0)
439 return; 442 return;
440 } 443 }
441 444
442 if (phba->cfg_fcp_eq_count == 0) { 445 fcp_eqidx = fcp_cqidx;
443 fcp_eqidx = -1; 446 fcp_eqid = phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id;
444 fcp_eqid = phba->sli4_hba.sp_eq->queue_id; 447 qdesc = phba->sli4_hba.hba_eq[fcp_eqidx];
445 qdesc = phba->sli4_hba.sp_eq;
446 } else {
447 fcp_eqidx = fcp_cqidx;
448 fcp_eqid = phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id;
449 qdesc = phba->sli4_hba.fp_eq[fcp_eqidx];
450 }
451 448
452 printk(KERN_ERR "FCP EQ: WQ[Idx:%d|Qid:%d]->CQ[Idx:%d|Qid:%d]->" 449 printk(KERN_ERR "FCP EQ: WQ[Idx:%d|Qid:%d]->CQ[Idx:%d|Qid:%d]->"
453 "EQ[Idx:%d|Qid:%d]\n", 450 "EQ[Idx:%d|Qid:%d]\n",
@@ -543,25 +540,6 @@ lpfc_debug_dump_mbx_cq(struct lpfc_hba *phba)
543} 540}
544 541
545/** 542/**
546 * lpfc_debug_dump_sp_eq - dump all entries from slow-path event queue
547 * @phba: Pointer to HBA context object.
548 *
549 * This function dumps all entries from the slow-path event queue.
550 **/
551static inline void
552lpfc_debug_dump_sp_eq(struct lpfc_hba *phba)
553{
554 printk(KERN_ERR "SP EQ: WQ[Qid:%d/Qid:%d]->CQ[Qid:%d/Qid:%d]->"
555 "EQ[Qid:%d]:\n",
556 phba->sli4_hba.mbx_wq->queue_id,
557 phba->sli4_hba.els_wq->queue_id,
558 phba->sli4_hba.mbx_cq->queue_id,
559 phba->sli4_hba.els_cq->queue_id,
560 phba->sli4_hba.sp_eq->queue_id);
561 lpfc_debug_dump_q(phba->sli4_hba.sp_eq);
562}
563
564/**
565 * lpfc_debug_dump_wq_by_id - dump all entries from a work queue by queue id 543 * lpfc_debug_dump_wq_by_id - dump all entries from a work queue by queue id
566 * @phba: Pointer to HBA context object. 544 * @phba: Pointer to HBA context object.
567 * @qid: Work queue identifier. 545 * @qid: Work queue identifier.
@@ -574,10 +552,10 @@ lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid)
574{ 552{
575 int wq_idx; 553 int wq_idx;
576 554
577 for (wq_idx = 0; wq_idx < phba->cfg_fcp_wq_count; wq_idx++) 555 for (wq_idx = 0; wq_idx < phba->cfg_fcp_io_channel; wq_idx++)
578 if (phba->sli4_hba.fcp_wq[wq_idx]->queue_id == qid) 556 if (phba->sli4_hba.fcp_wq[wq_idx]->queue_id == qid)
579 break; 557 break;
580 if (wq_idx < phba->cfg_fcp_wq_count) { 558 if (wq_idx < phba->cfg_fcp_io_channel) {
581 printk(KERN_ERR "FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid); 559 printk(KERN_ERR "FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
582 lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[wq_idx]); 560 lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[wq_idx]);
583 return; 561 return;
@@ -644,9 +622,9 @@ lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid)
644 do { 622 do {
645 if (phba->sli4_hba.fcp_cq[cq_idx]->queue_id == qid) 623 if (phba->sli4_hba.fcp_cq[cq_idx]->queue_id == qid)
646 break; 624 break;
647 } while (++cq_idx < phba->cfg_fcp_eq_count); 625 } while (++cq_idx < phba->cfg_fcp_io_channel);
648 626
649 if (cq_idx < phba->cfg_fcp_eq_count) { 627 if (cq_idx < phba->cfg_fcp_io_channel) {
650 printk(KERN_ERR "FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid); 628 printk(KERN_ERR "FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
651 lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[cq_idx]); 629 lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[cq_idx]);
652 return; 630 return;
@@ -677,21 +655,17 @@ lpfc_debug_dump_eq_by_id(struct lpfc_hba *phba, int qid)
677{ 655{
678 int eq_idx; 656 int eq_idx;
679 657
680 for (eq_idx = 0; eq_idx < phba->cfg_fcp_eq_count; eq_idx++) { 658 for (eq_idx = 0; eq_idx < phba->cfg_fcp_io_channel; eq_idx++) {
681 if (phba->sli4_hba.fp_eq[eq_idx]->queue_id == qid) 659 if (phba->sli4_hba.hba_eq[eq_idx]->queue_id == qid)
682 break; 660 break;
683 } 661 }
684 662
685 if (eq_idx < phba->cfg_fcp_eq_count) { 663 if (eq_idx < phba->cfg_fcp_io_channel) {
686 printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid); 664 printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid);
687 lpfc_debug_dump_q(phba->sli4_hba.fp_eq[eq_idx]); 665 lpfc_debug_dump_q(phba->sli4_hba.hba_eq[eq_idx]);
688 return; 666 return;
689 } 667 }
690 668
691 if (phba->sli4_hba.sp_eq->queue_id == qid) {
692 printk(KERN_ERR "SP EQ[|Qid:%d]\n", qid);
693 lpfc_debug_dump_q(phba->sli4_hba.sp_eq);
694 }
695} 669}
696 670
697void lpfc_debug_dump_all_queues(struct lpfc_hba *); 671void lpfc_debug_dump_all_queues(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 1d84b63fccad..af49fb03dbb8 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -145,6 +145,7 @@ struct lpfc_node_rrq {
145#define NLP_RCV_PLOGI 0x00080000 /* Rcv'ed PLOGI from remote system */ 145#define NLP_RCV_PLOGI 0x00080000 /* Rcv'ed PLOGI from remote system */
146#define NLP_LOGO_ACC 0x00100000 /* Process LOGO after ACC completes */ 146#define NLP_LOGO_ACC 0x00100000 /* Process LOGO after ACC completes */
147#define NLP_TGT_NO_SCSIID 0x00200000 /* good PRLI but no binding for scsid */ 147#define NLP_TGT_NO_SCSIID 0x00200000 /* good PRLI but no binding for scsid */
148#define NLP_ISSUE_LOGO 0x00400000 /* waiting to issue a LOGO */
148#define NLP_ACC_REGLOGIN 0x01000000 /* Issue Reg Login after successful 149#define NLP_ACC_REGLOGIN 0x01000000 /* Issue Reg Login after successful
149 ACC */ 150 ACC */
150#define NLP_NPR_ADISC 0x02000000 /* Issue ADISC when dq'ed from 151#define NLP_NPR_ADISC 0x02000000 /* Issue ADISC when dq'ed from
@@ -201,10 +202,11 @@ struct lpfc_node_rrq {
201#define NLP_STE_ADISC_ISSUE 0x2 /* ADISC was sent to NL_PORT */ 202#define NLP_STE_ADISC_ISSUE 0x2 /* ADISC was sent to NL_PORT */
202#define NLP_STE_REG_LOGIN_ISSUE 0x3 /* REG_LOGIN was issued for NL_PORT */ 203#define NLP_STE_REG_LOGIN_ISSUE 0x3 /* REG_LOGIN was issued for NL_PORT */
203#define NLP_STE_PRLI_ISSUE 0x4 /* PRLI was sent to NL_PORT */ 204#define NLP_STE_PRLI_ISSUE 0x4 /* PRLI was sent to NL_PORT */
204#define NLP_STE_UNMAPPED_NODE 0x5 /* PRLI completed from NL_PORT */ 205#define NLP_STE_LOGO_ISSUE 0x5 /* LOGO was sent to NL_PORT */
205#define NLP_STE_MAPPED_NODE 0x6 /* Identified as a FCP Target */ 206#define NLP_STE_UNMAPPED_NODE 0x6 /* PRLI completed from NL_PORT */
206#define NLP_STE_NPR_NODE 0x7 /* NPort disappeared */ 207#define NLP_STE_MAPPED_NODE 0x7 /* Identified as a FCP Target */
207#define NLP_STE_MAX_STATE 0x8 208#define NLP_STE_NPR_NODE 0x8 /* NPort disappeared */
209#define NLP_STE_MAX_STATE 0x9
208#define NLP_STE_FREED_NODE 0xff /* node entry was freed to MEM_NLP */ 210#define NLP_STE_FREED_NODE 0xff /* node entry was freed to MEM_NLP */
209 211
210/* For UNUSED_NODE state, the node has just been allocated. 212/* For UNUSED_NODE state, the node has just been allocated.
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index d54ae1999797..cfe533bc9790 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -962,7 +962,8 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
962 if ((phba->fcoe_cvl_eventtag_attn == 962 if ((phba->fcoe_cvl_eventtag_attn ==
963 phba->fcoe_cvl_eventtag) && 963 phba->fcoe_cvl_eventtag) &&
964 (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 964 (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
965 (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED)) 965 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
966 IOERR_SLI_ABORTED))
966 goto stop_rr_fcf_flogi; 967 goto stop_rr_fcf_flogi;
967 else 968 else
968 phba->fcoe_cvl_eventtag_attn = 969 phba->fcoe_cvl_eventtag_attn =
@@ -1108,8 +1109,10 @@ flogifail:
1108 /* Start discovery */ 1109 /* Start discovery */
1109 lpfc_disc_start(vport); 1110 lpfc_disc_start(vport);
1110 } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 1111 } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
1111 ((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) && 1112 (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
1112 (irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) && 1113 IOERR_SLI_ABORTED) &&
1114 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
1115 IOERR_SLI_DOWN))) &&
1113 (phba->link_state != LPFC_CLEAR_LA)) { 1116 (phba->link_state != LPFC_CLEAR_LA)) {
1114 /* If FLOGI failed enable link interrupt. */ 1117 /* If FLOGI failed enable link interrupt. */
1115 lpfc_issue_clear_la(phba, vport); 1118 lpfc_issue_clear_la(phba, vport);
@@ -1476,6 +1479,10 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1476 return ndlp; 1479 return ndlp;
1477 memset(&rrq.xri_bitmap, 0, sizeof(new_ndlp->active_rrqs.xri_bitmap)); 1480 memset(&rrq.xri_bitmap, 0, sizeof(new_ndlp->active_rrqs.xri_bitmap));
1478 1481
1482 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1483 "3178 PLOGI confirm: ndlp %p x%x: new_ndlp %p\n",
1484 ndlp, ndlp->nlp_DID, new_ndlp);
1485
1479 if (!new_ndlp) { 1486 if (!new_ndlp) {
1480 rc = memcmp(&ndlp->nlp_portname, name, 1487 rc = memcmp(&ndlp->nlp_portname, name,
1481 sizeof(struct lpfc_name)); 1488 sizeof(struct lpfc_name));
@@ -1527,6 +1534,9 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1527 /* The new_ndlp is replacing ndlp totally, so we need 1534 /* The new_ndlp is replacing ndlp totally, so we need
1528 * to put ndlp on UNUSED list and try to free it. 1535 * to put ndlp on UNUSED list and try to free it.
1529 */ 1536 */
1537 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1538 "3179 PLOGI confirm NEW: %x %x\n",
1539 new_ndlp->nlp_DID, keepDID);
1530 1540
1531 /* Fix up the rport accordingly */ 1541 /* Fix up the rport accordingly */
1532 rport = ndlp->rport; 1542 rport = ndlp->rport;
@@ -1559,23 +1569,34 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1559 lpfc_drop_node(vport, ndlp); 1569 lpfc_drop_node(vport, ndlp);
1560 } 1570 }
1561 else { 1571 else {
1572 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1573 "3180 PLOGI confirm SWAP: %x %x\n",
1574 new_ndlp->nlp_DID, keepDID);
1575
1562 lpfc_unreg_rpi(vport, ndlp); 1576 lpfc_unreg_rpi(vport, ndlp);
1577
1563 /* Two ndlps cannot have the same did */ 1578 /* Two ndlps cannot have the same did */
1564 ndlp->nlp_DID = keepDID; 1579 ndlp->nlp_DID = keepDID;
1565 if (phba->sli_rev == LPFC_SLI_REV4) 1580 if (phba->sli_rev == LPFC_SLI_REV4)
1566 memcpy(&ndlp->active_rrqs.xri_bitmap, 1581 memcpy(&ndlp->active_rrqs.xri_bitmap,
1567 &rrq.xri_bitmap, 1582 &rrq.xri_bitmap,
1568 sizeof(ndlp->active_rrqs.xri_bitmap)); 1583 sizeof(ndlp->active_rrqs.xri_bitmap));
1584
1569 /* Since we are swapping the ndlp passed in with the new one 1585 /* Since we are swapping the ndlp passed in with the new one
1570 * and the did has already been swapped, copy over the 1586 * and the did has already been swapped, copy over state.
1571 * state and names. 1587 * The new WWNs are already in new_ndlp since thats what
1588 * we looked it up by in the begining of this routine.
1572 */ 1589 */
1573 memcpy(&new_ndlp->nlp_portname, &ndlp->nlp_portname,
1574 sizeof(struct lpfc_name));
1575 memcpy(&new_ndlp->nlp_nodename, &ndlp->nlp_nodename,
1576 sizeof(struct lpfc_name));
1577 new_ndlp->nlp_state = ndlp->nlp_state; 1590 new_ndlp->nlp_state = ndlp->nlp_state;
1578 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1591
1592 /* Since we are switching over to the new_ndlp, the old
1593 * ndlp should be put in the NPR state, unless we have
1594 * already started re-discovery on it.
1595 */
1596 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
1597 (ndlp->nlp_state == NLP_STE_MAPPED_NODE))
1598 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1599
1579 /* Fix up the rport accordingly */ 1600 /* Fix up the rport accordingly */
1580 rport = ndlp->rport; 1601 rport = ndlp->rport;
1581 if (rport) { 1602 if (rport) {
@@ -2367,6 +2388,8 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2367 IOCB_t *irsp; 2388 IOCB_t *irsp;
2368 struct lpfc_sli *psli; 2389 struct lpfc_sli *psli;
2369 struct lpfcMboxq *mbox; 2390 struct lpfcMboxq *mbox;
2391 unsigned long flags;
2392 uint32_t skip_recovery = 0;
2370 2393
2371 psli = &phba->sli; 2394 psli = &phba->sli;
2372 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2395 /* we pass cmdiocb to state machine which needs rspiocb as well */
@@ -2381,47 +2404,52 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2381 "LOGO cmpl: status:x%x/x%x did:x%x", 2404 "LOGO cmpl: status:x%x/x%x did:x%x",
2382 irsp->ulpStatus, irsp->un.ulpWord[4], 2405 irsp->ulpStatus, irsp->un.ulpWord[4],
2383 ndlp->nlp_DID); 2406 ndlp->nlp_DID);
2407
2384 /* LOGO completes to NPort <nlp_DID> */ 2408 /* LOGO completes to NPort <nlp_DID> */
2385 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2409 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2386 "0105 LOGO completes to NPort x%x " 2410 "0105 LOGO completes to NPort x%x "
2387 "Data: x%x x%x x%x x%x\n", 2411 "Data: x%x x%x x%x x%x\n",
2388 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 2412 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2389 irsp->ulpTimeout, vport->num_disc_nodes); 2413 irsp->ulpTimeout, vport->num_disc_nodes);
2390 /* Check to see if link went down during discovery */ 2414
2391 if (lpfc_els_chk_latt(vport)) 2415 if (lpfc_els_chk_latt(vport)) {
2416 skip_recovery = 1;
2392 goto out; 2417 goto out;
2418 }
2393 2419
2420 /* Check to see if link went down during discovery */
2394 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { 2421 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
2395 /* NLP_EVT_DEVICE_RM should unregister the RPI 2422 /* NLP_EVT_DEVICE_RM should unregister the RPI
2396 * which should abort all outstanding IOs. 2423 * which should abort all outstanding IOs.
2397 */ 2424 */
2398 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2425 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2399 NLP_EVT_DEVICE_RM); 2426 NLP_EVT_DEVICE_RM);
2427 skip_recovery = 1;
2400 goto out; 2428 goto out;
2401 } 2429 }
2402 2430
2403 if (irsp->ulpStatus) { 2431 if (irsp->ulpStatus) {
2404 /* Check for retry */ 2432 /* Check for retry */
2405 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 2433 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2406 /* ELS command is being retried */ 2434 /* ELS command is being retried */
2435 skip_recovery = 1;
2407 goto out; 2436 goto out;
2437 }
2408 /* LOGO failed */ 2438 /* LOGO failed */
2409 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2439 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2410 "2756 LOGO failure DID:%06X Status:x%x/x%x\n", 2440 "2756 LOGO failure DID:%06X Status:x%x/x%x\n",
2411 ndlp->nlp_DID, irsp->ulpStatus, 2441 ndlp->nlp_DID, irsp->ulpStatus,
2412 irsp->un.ulpWord[4]); 2442 irsp->un.ulpWord[4]);
2413 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2443 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2414 if (lpfc_error_lost_link(irsp)) 2444 if (lpfc_error_lost_link(irsp)) {
2445 skip_recovery = 1;
2415 goto out; 2446 goto out;
2416 else 2447 }
2417 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2448 }
2418 NLP_EVT_CMPL_LOGO); 2449
2419 } else 2450 /* Call state machine. This will unregister the rpi if needed. */
2420 /* Good status, call state machine. 2451 lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
2421 * This will unregister the rpi if needed. 2452
2422 */
2423 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2424 NLP_EVT_CMPL_LOGO);
2425out: 2453out:
2426 lpfc_els_free_iocb(phba, cmdiocb); 2454 lpfc_els_free_iocb(phba, cmdiocb);
2427 /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */ 2455 /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */
@@ -2436,9 +2464,30 @@ out:
2436 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == 2464 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
2437 MBX_NOT_FINISHED) { 2465 MBX_NOT_FINISHED) {
2438 mempool_free(mbox, phba->mbox_mem_pool); 2466 mempool_free(mbox, phba->mbox_mem_pool);
2467 skip_recovery = 1;
2439 } 2468 }
2440 } 2469 }
2441 } 2470 }
2471
2472 /*
2473 * If the node is a target, the handling attempts to recover the port.
2474 * For any other port type, the rpi is unregistered as an implicit
2475 * LOGO.
2476 */
2477 if ((ndlp->nlp_type & NLP_FCP_TARGET) && (skip_recovery == 0)) {
2478 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2479 spin_lock_irqsave(shost->host_lock, flags);
2480 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2481 spin_unlock_irqrestore(shost->host_lock, flags);
2482
2483 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2484 "3187 LOGO completes to NPort x%x: Start "
2485 "Recovery Data: x%x x%x x%x x%x\n",
2486 ndlp->nlp_DID, irsp->ulpStatus,
2487 irsp->un.ulpWord[4], irsp->ulpTimeout,
2488 vport->num_disc_nodes);
2489 lpfc_disc_start(vport);
2490 }
2442 return; 2491 return;
2443} 2492}
2444 2493
@@ -2501,10 +2550,27 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2501 "Issue LOGO: did:x%x", 2550 "Issue LOGO: did:x%x",
2502 ndlp->nlp_DID, 0, 0); 2551 ndlp->nlp_DID, 0, 0);
2503 2552
2553 /*
2554 * If we are issuing a LOGO, we may try to recover the remote NPort
2555 * by issuing a PLOGI later. Even though we issue ELS cmds by the
2556 * VPI, if we have a valid RPI, and that RPI gets unreg'ed while
2557 * that ELS command is in-flight, the HBA returns a IOERR_INVALID_RPI
2558 * for that ELS cmd. To avoid this situation, lets get rid of the
2559 * RPI right now, before any ELS cmds are sent.
2560 */
2561 spin_lock_irq(shost->host_lock);
2562 ndlp->nlp_flag |= NLP_ISSUE_LOGO;
2563 spin_unlock_irq(shost->host_lock);
2564 if (lpfc_unreg_rpi(vport, ndlp)) {
2565 lpfc_els_free_iocb(phba, elsiocb);
2566 return 0;
2567 }
2568
2504 phba->fc_stat.elsXmitLOGO++; 2569 phba->fc_stat.elsXmitLOGO++;
2505 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo; 2570 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
2506 spin_lock_irq(shost->host_lock); 2571 spin_lock_irq(shost->host_lock);
2507 ndlp->nlp_flag |= NLP_LOGO_SND; 2572 ndlp->nlp_flag |= NLP_LOGO_SND;
2573 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
2508 spin_unlock_irq(shost->host_lock); 2574 spin_unlock_irq(shost->host_lock);
2509 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2575 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
2510 2576
@@ -2920,7 +2986,7 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
2920 case ELS_CMD_LOGO: 2986 case ELS_CMD_LOGO:
2921 if (!lpfc_issue_els_logo(vport, ndlp, retry)) { 2987 if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
2922 ndlp->nlp_prev_state = ndlp->nlp_state; 2988 ndlp->nlp_prev_state = ndlp->nlp_state;
2923 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 2989 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
2924 } 2990 }
2925 break; 2991 break;
2926 case ELS_CMD_FDISC: 2992 case ELS_CMD_FDISC:
@@ -3007,7 +3073,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3007 } 3073 }
3008 break; 3074 break;
3009 case IOSTAT_LOCAL_REJECT: 3075 case IOSTAT_LOCAL_REJECT:
3010 switch ((irsp->un.ulpWord[4] & 0xff)) { 3076 switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
3011 case IOERR_LOOP_OPEN_FAILURE: 3077 case IOERR_LOOP_OPEN_FAILURE:
3012 if (cmd == ELS_CMD_FLOGI) { 3078 if (cmd == ELS_CMD_FLOGI) {
3013 if (PCI_DEVICE_ID_HORNET == 3079 if (PCI_DEVICE_ID_HORNET ==
@@ -3214,7 +3280,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3214 3280
3215 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) && 3281 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
3216 ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 3282 ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
3217 ((irsp->un.ulpWord[4] & 0xff) != IOERR_NO_RESOURCES))) { 3283 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
3284 IOERR_NO_RESOURCES))) {
3218 /* Don't reset timer for no resources */ 3285 /* Don't reset timer for no resources */
3219 3286
3220 /* If discovery / RSCN timer is running, reset it */ 3287 /* If discovery / RSCN timer is running, reset it */
@@ -3273,7 +3340,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3273 return 1; 3340 return 1;
3274 case ELS_CMD_LOGO: 3341 case ELS_CMD_LOGO:
3275 ndlp->nlp_prev_state = ndlp->nlp_state; 3342 ndlp->nlp_prev_state = ndlp->nlp_state;
3276 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 3343 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
3277 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry); 3344 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
3278 return 1; 3345 return 1;
3279 } 3346 }
@@ -3533,13 +3600,17 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3533 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3600 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3534 kfree(mp); 3601 kfree(mp);
3535 mempool_free(pmb, phba->mbox_mem_pool); 3602 mempool_free(pmb, phba->mbox_mem_pool);
3536 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { 3603 if (ndlp) {
3537 lpfc_nlp_put(ndlp); 3604 if (NLP_CHK_NODE_ACT(ndlp)) {
3538 /* This is the end of the default RPI cleanup logic for this 3605 lpfc_nlp_put(ndlp);
3539 * ndlp. If no other discovery threads are using this ndlp. 3606 /* This is the end of the default RPI cleanup logic for
3540 * we should free all resources associated with it. 3607 * this ndlp. If no other discovery threads are using
3541 */ 3608 * this ndlp, free all resources associated with it.
3542 lpfc_nlp_not_used(ndlp); 3609 */
3610 lpfc_nlp_not_used(ndlp);
3611 } else {
3612 lpfc_drop_node(ndlp->vport, ndlp);
3613 }
3543 } 3614 }
3544 3615
3545 return; 3616 return;
@@ -6803,7 +6874,8 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6803 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) { 6874 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
6804 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 6875 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
6805 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT && 6876 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
6806 (icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING) { 6877 (icmd->un.ulpWord[4] & IOERR_PARAM_MASK) ==
6878 IOERR_RCV_BUFFER_WAITING) {
6807 phba->fc_stat.NoRcvBuf++; 6879 phba->fc_stat.NoRcvBuf++;
6808 /* Not enough posted buffers; Try posting more buffers */ 6880 /* Not enough posted buffers; Try posting more buffers */
6809 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 6881 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
@@ -7985,3 +8057,47 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
7985 spin_unlock_irqrestore(&phba->hbalock, iflag); 8057 spin_unlock_irqrestore(&phba->hbalock, iflag);
7986 return; 8058 return;
7987} 8059}
8060
8061/* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req.
8062 * @vport: pointer to virtual port object.
8063 * @ndlp: nodelist pointer for the impacted node.
8064 *
8065 * The driver calls this routine in response to an SLI4 XRI ABORT CQE
8066 * or an SLI3 ASYNC_STATUS_CN event from the port. For either event,
8067 * the driver is required to send a LOGO to the remote node before it
8068 * attempts to recover its login to the remote node.
8069 */
8070void
8071lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
8072 struct lpfc_nodelist *ndlp)
8073{
8074 struct Scsi_Host *shost;
8075 struct lpfc_hba *phba;
8076 unsigned long flags = 0;
8077
8078 shost = lpfc_shost_from_vport(vport);
8079 phba = vport->phba;
8080 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
8081 lpfc_printf_log(phba, KERN_INFO,
8082 LOG_SLI, "3093 No rport recovery needed. "
8083 "rport in state 0x%x\n", ndlp->nlp_state);
8084 return;
8085 }
8086 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8087 "3094 Start rport recovery on shost id 0x%x "
8088 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
8089 "flags 0x%x\n",
8090 shost->host_no, ndlp->nlp_DID,
8091 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state,
8092 ndlp->nlp_flag);
8093 /*
8094 * The rport is not responding. Remove the FCP-2 flag to prevent
8095 * an ADISC in the follow-up recovery code.
8096 */
8097 spin_lock_irqsave(shost->host_lock, flags);
8098 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
8099 spin_unlock_irqrestore(shost->host_lock, flags);
8100 lpfc_issue_els_logo(vport, ndlp, 0);
8101 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
8102}
8103
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 9b4f92941dce..e9845d2ecf10 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -123,6 +123,10 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
123 "rport devlosscb: sid:x%x did:x%x flg:x%x", 123 "rport devlosscb: sid:x%x did:x%x flg:x%x",
124 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); 124 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
125 125
126 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
127 "3181 dev_loss_callbk x%06x, rport %p flg x%x\n",
128 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
129
126 /* Don't defer this if we are in the process of deleting the vport 130 /* Don't defer this if we are in the process of deleting the vport
127 * or unloading the driver. The unload will cleanup the node 131 * or unloading the driver. The unload will cleanup the node
128 * appropriately we just need to cleanup the ndlp rport info here. 132 * appropriately we just need to cleanup the ndlp rport info here.
@@ -142,6 +146,15 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
142 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) 146 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
143 return; 147 return;
144 148
149 if (ndlp->nlp_type & NLP_FABRIC) {
150
151 /* If the WWPN of the rport and ndlp don't match, ignore it */
152 if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn)) {
153 put_device(&rport->dev);
154 return;
155 }
156 }
157
145 evtp = &ndlp->dev_loss_evt; 158 evtp = &ndlp->dev_loss_evt;
146 159
147 if (!list_empty(&evtp->evt_listp)) 160 if (!list_empty(&evtp->evt_listp))
@@ -202,6 +215,10 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
202 "rport devlosstmo:did:x%x type:x%x id:x%x", 215 "rport devlosstmo:did:x%x type:x%x id:x%x",
203 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id); 216 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
204 217
218 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
219 "3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n",
220 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
221
205 /* Don't defer this if we are in the process of deleting the vport 222 /* Don't defer this if we are in the process of deleting the vport
206 * or unloading the driver. The unload will cleanup the node 223 * or unloading the driver. The unload will cleanup the node
207 * appropriately we just need to cleanup the ndlp rport info here. 224 * appropriately we just need to cleanup the ndlp rport info here.
@@ -3492,7 +3509,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
3492 LPFC_MBOXQ_t *pmb = NULL; 3509 LPFC_MBOXQ_t *pmb = NULL;
3493 MAILBOX_t *mb; 3510 MAILBOX_t *mb;
3494 struct static_vport_info *vport_info; 3511 struct static_vport_info *vport_info;
3495 int rc = 0, i; 3512 int mbx_wait_rc = 0, i;
3496 struct fc_vport_identifiers vport_id; 3513 struct fc_vport_identifiers vport_id;
3497 struct fc_vport *new_fc_vport; 3514 struct fc_vport *new_fc_vport;
3498 struct Scsi_Host *shost; 3515 struct Scsi_Host *shost;
@@ -3509,7 +3526,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
3509 " allocate mailbox memory\n"); 3526 " allocate mailbox memory\n");
3510 return; 3527 return;
3511 } 3528 }
3512 3529 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
3513 mb = &pmb->u.mb; 3530 mb = &pmb->u.mb;
3514 3531
3515 vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL); 3532 vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
@@ -3523,24 +3540,31 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
3523 3540
3524 vport_buff = (uint8_t *) vport_info; 3541 vport_buff = (uint8_t *) vport_info;
3525 do { 3542 do {
3543 /* free dma buffer from previous round */
3544 if (pmb->context1) {
3545 mp = (struct lpfc_dmabuf *)pmb->context1;
3546 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3547 kfree(mp);
3548 }
3526 if (lpfc_dump_static_vport(phba, pmb, offset)) 3549 if (lpfc_dump_static_vport(phba, pmb, offset))
3527 goto out; 3550 goto out;
3528 3551
3529 pmb->vport = phba->pport; 3552 pmb->vport = phba->pport;
3530 rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO); 3553 mbx_wait_rc = lpfc_sli_issue_mbox_wait(phba, pmb,
3554 LPFC_MBOX_TMO);
3531 3555
3532 if ((rc != MBX_SUCCESS) || mb->mbxStatus) { 3556 if ((mbx_wait_rc != MBX_SUCCESS) || mb->mbxStatus) {
3533 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3557 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3534 "0544 lpfc_create_static_vport failed to" 3558 "0544 lpfc_create_static_vport failed to"
3535 " issue dump mailbox command ret 0x%x " 3559 " issue dump mailbox command ret 0x%x "
3536 "status 0x%x\n", 3560 "status 0x%x\n",
3537 rc, mb->mbxStatus); 3561 mbx_wait_rc, mb->mbxStatus);
3538 goto out; 3562 goto out;
3539 } 3563 }
3540 3564
3541 if (phba->sli_rev == LPFC_SLI_REV4) { 3565 if (phba->sli_rev == LPFC_SLI_REV4) {
3542 byte_count = pmb->u.mqe.un.mb_words[5]; 3566 byte_count = pmb->u.mqe.un.mb_words[5];
3543 mp = (struct lpfc_dmabuf *) pmb->context2; 3567 mp = (struct lpfc_dmabuf *)pmb->context1;
3544 if (byte_count > sizeof(struct static_vport_info) - 3568 if (byte_count > sizeof(struct static_vport_info) -
3545 offset) 3569 offset)
3546 byte_count = sizeof(struct static_vport_info) 3570 byte_count = sizeof(struct static_vport_info)
@@ -3604,9 +3628,9 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
3604 3628
3605out: 3629out:
3606 kfree(vport_info); 3630 kfree(vport_info);
3607 if (rc != MBX_TIMEOUT) { 3631 if (mbx_wait_rc != MBX_TIMEOUT) {
3608 if (pmb->context2) { 3632 if (pmb->context1) {
3609 mp = (struct lpfc_dmabuf *) pmb->context2; 3633 mp = (struct lpfc_dmabuf *)pmb->context1;
3610 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3634 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3611 kfree(mp); 3635 kfree(mp);
3612 } 3636 }
@@ -3834,6 +3858,10 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3834 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) 3858 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
3835 fc_remote_port_rolechg(rport, rport_ids.roles); 3859 fc_remote_port_rolechg(rport, rport_ids.roles);
3836 3860
3861 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
3862 "3183 rport register x%06x, rport %p role x%x\n",
3863 ndlp->nlp_DID, rport, rport_ids.roles);
3864
3837 if ((rport->scsi_target_id != -1) && 3865 if ((rport->scsi_target_id != -1) &&
3838 (rport->scsi_target_id < LPFC_MAX_TARGET)) { 3866 (rport->scsi_target_id < LPFC_MAX_TARGET)) {
3839 ndlp->nlp_sid = rport->scsi_target_id; 3867 ndlp->nlp_sid = rport->scsi_target_id;
@@ -3850,6 +3878,10 @@ lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
3850 "rport delete: did:x%x flg:x%x type x%x", 3878 "rport delete: did:x%x flg:x%x type x%x",
3851 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); 3879 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
3852 3880
3881 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
3882 "3184 rport unregister x%06x, rport %p\n",
3883 ndlp->nlp_DID, rport);
3884
3853 fc_remote_port_delete(rport); 3885 fc_remote_port_delete(rport);
3854 3886
3855 return; 3887 return;
@@ -3964,6 +3996,7 @@ lpfc_nlp_state_name(char *buffer, size_t size, int state)
3964 [NLP_STE_ADISC_ISSUE] = "ADISC", 3996 [NLP_STE_ADISC_ISSUE] = "ADISC",
3965 [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN", 3997 [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
3966 [NLP_STE_PRLI_ISSUE] = "PRLI", 3998 [NLP_STE_PRLI_ISSUE] = "PRLI",
3999 [NLP_STE_LOGO_ISSUE] = "LOGO",
3967 [NLP_STE_UNMAPPED_NODE] = "UNMAPPED", 4000 [NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
3968 [NLP_STE_MAPPED_NODE] = "MAPPED", 4001 [NLP_STE_MAPPED_NODE] = "MAPPED",
3969 [NLP_STE_NPR_NODE] = "NPR", 4002 [NLP_STE_NPR_NODE] = "NPR",
@@ -4330,6 +4363,26 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
4330 return 0; 4363 return 0;
4331} 4364}
4332 4365
4366/**
4367 * lpfc_nlp_logo_unreg - Unreg mailbox completion handler before LOGO
4368 * @phba: Pointer to HBA context object.
4369 * @pmb: Pointer to mailbox object.
4370 *
4371 * This function will issue an ELS LOGO command after completing
4372 * the UNREG_RPI.
4373 **/
4374void
4375lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4376{
4377 struct lpfc_vport *vport = pmb->vport;
4378 struct lpfc_nodelist *ndlp;
4379
4380 ndlp = (struct lpfc_nodelist *)(pmb->context1);
4381 if (!ndlp)
4382 return;
4383 lpfc_issue_els_logo(vport, ndlp, 0);
4384}
4385
4333/* 4386/*
4334 * Free rpi associated with LPFC_NODELIST entry. 4387 * Free rpi associated with LPFC_NODELIST entry.
4335 * This routine is called from lpfc_freenode(), when we are removing 4388 * This routine is called from lpfc_freenode(), when we are removing
@@ -4354,9 +4407,16 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4354 rpi = ndlp->nlp_rpi; 4407 rpi = ndlp->nlp_rpi;
4355 if (phba->sli_rev == LPFC_SLI_REV4) 4408 if (phba->sli_rev == LPFC_SLI_REV4)
4356 rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 4409 rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4410
4357 lpfc_unreg_login(phba, vport->vpi, rpi, mbox); 4411 lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
4358 mbox->vport = vport; 4412 mbox->vport = vport;
4359 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4413 if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
4414 mbox->context1 = ndlp;
4415 mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
4416 } else {
4417 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4418 }
4419
4360 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4420 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4361 if (rc == MBX_NOT_FINISHED) 4421 if (rc == MBX_NOT_FINISHED)
4362 mempool_free(mbox, phba->mbox_mem_pool); 4422 mempool_free(mbox, phba->mbox_mem_pool);
@@ -4499,9 +4559,13 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4499 lpfc_disable_node(vport, ndlp); 4559 lpfc_disable_node(vport, ndlp);
4500 } 4560 }
4501 4561
4562
4563 /* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */
4564
4502 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 4565 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
4503 if ((mb = phba->sli.mbox_active)) { 4566 if ((mb = phba->sli.mbox_active)) {
4504 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && 4567 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
4568 !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
4505 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 4569 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
4506 mb->context2 = NULL; 4570 mb->context2 = NULL;
4507 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4571 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -4512,6 +4576,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4512 /* Cleanup REG_LOGIN completions which are not yet processed */ 4576 /* Cleanup REG_LOGIN completions which are not yet processed */
4513 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { 4577 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
4514 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) || 4578 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
4579 (mb->mbox_flag & LPFC_MBX_IMED_UNREG) ||
4515 (ndlp != (struct lpfc_nodelist *) mb->context2)) 4580 (ndlp != (struct lpfc_nodelist *) mb->context2))
4516 continue; 4581 continue;
4517 4582
@@ -4521,6 +4586,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4521 4586
4522 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 4587 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
4523 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && 4588 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
4589 !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
4524 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 4590 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
4525 mp = (struct lpfc_dmabuf *) (mb->context1); 4591 mp = (struct lpfc_dmabuf *) (mb->context1);
4526 if (mp) { 4592 if (mp) {
@@ -4585,7 +4651,7 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4585 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; 4651 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
4586 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 4652 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
4587 mbox->vport = vport; 4653 mbox->vport = vport;
4588 mbox->context2 = NULL; 4654 mbox->context2 = ndlp;
4589 rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4655 rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4590 if (rc == MBX_NOT_FINISHED) { 4656 if (rc == MBX_NOT_FINISHED) {
4591 mempool_free(mbox, phba->mbox_mem_pool); 4657 mempool_free(mbox, phba->mbox_mem_pool);
@@ -5365,9 +5431,17 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
5365 struct lpfc_nodelist *ndlp; 5431 struct lpfc_nodelist *ndlp;
5366 5432
5367 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 5433 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5368 if (filter(ndlp, param)) 5434 if (filter(ndlp, param)) {
5435 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5436 "3185 FIND node filter %p DID "
5437 "Data: x%p x%x x%x\n",
5438 filter, ndlp, ndlp->nlp_DID,
5439 ndlp->nlp_flag);
5369 return ndlp; 5440 return ndlp;
5441 }
5370 } 5442 }
5443 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5444 "3186 FIND node filter %p NOT FOUND.\n", filter);
5371 return NULL; 5445 return NULL;
5372} 5446}
5373 5447
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 41bb1d2fb625..7398ca862e97 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1188,8 +1188,8 @@ typedef struct {
1188 */ 1188 */
1189 1189
1190/* Number of rings currently used and available. */ 1190/* Number of rings currently used and available. */
1191#define MAX_CONFIGURED_RINGS 3 1191#define MAX_SLI3_CONFIGURED_RINGS 3
1192#define MAX_RINGS 4 1192#define MAX_SLI3_RINGS 4
1193 1193
1194/* IOCB / Mailbox is owned by FireFly */ 1194/* IOCB / Mailbox is owned by FireFly */
1195#define OWN_CHIP 1 1195#define OWN_CHIP 1
@@ -1251,6 +1251,8 @@ typedef struct {
1251#define PCI_VENDOR_ID_SERVERENGINE 0x19a2 1251#define PCI_VENDOR_ID_SERVERENGINE 0x19a2
1252#define PCI_DEVICE_ID_TIGERSHARK 0x0704 1252#define PCI_DEVICE_ID_TIGERSHARK 0x0704
1253#define PCI_DEVICE_ID_TOMCAT 0x0714 1253#define PCI_DEVICE_ID_TOMCAT 0x0714
1254#define PCI_DEVICE_ID_SKYHAWK 0x0724
1255#define PCI_DEVICE_ID_SKYHAWK_VF 0x072c
1254 1256
1255#define JEDEC_ID_ADDRESS 0x0080001c 1257#define JEDEC_ID_ADDRESS 0x0080001c
1256#define FIREFLY_JEDEC_ID 0x1ACC 1258#define FIREFLY_JEDEC_ID 0x1ACC
@@ -1458,6 +1460,7 @@ typedef struct { /* FireFly BIU registers */
1458#define MBX_UNREG_FCFI 0xA2 1460#define MBX_UNREG_FCFI 0xA2
1459#define MBX_INIT_VFI 0xA3 1461#define MBX_INIT_VFI 0xA3
1460#define MBX_INIT_VPI 0xA4 1462#define MBX_INIT_VPI 0xA4
1463#define MBX_ACCESS_VDATA 0xA5
1461 1464
1462#define MBX_AUTH_PORT 0xF8 1465#define MBX_AUTH_PORT 0xF8
1463#define MBX_SECURITY_MGMT 0xF9 1466#define MBX_SECURITY_MGMT 0xF9
@@ -2991,7 +2994,7 @@ typedef struct _PCB {
2991 2994
2992 uint32_t pgpAddrLow; 2995 uint32_t pgpAddrLow;
2993 uint32_t pgpAddrHigh; 2996 uint32_t pgpAddrHigh;
2994 SLI2_RDSC rdsc[MAX_RINGS]; 2997 SLI2_RDSC rdsc[MAX_SLI3_RINGS];
2995} PCB_t; 2998} PCB_t;
2996 2999
2997/* NEW_FEATURE */ 3000/* NEW_FEATURE */
@@ -3101,18 +3104,18 @@ struct lpfc_pgp {
3101 3104
3102struct sli2_desc { 3105struct sli2_desc {
3103 uint32_t unused1[16]; 3106 uint32_t unused1[16];
3104 struct lpfc_hgp host[MAX_RINGS]; 3107 struct lpfc_hgp host[MAX_SLI3_RINGS];
3105 struct lpfc_pgp port[MAX_RINGS]; 3108 struct lpfc_pgp port[MAX_SLI3_RINGS];
3106}; 3109};
3107 3110
3108struct sli3_desc { 3111struct sli3_desc {
3109 struct lpfc_hgp host[MAX_RINGS]; 3112 struct lpfc_hgp host[MAX_SLI3_RINGS];
3110 uint32_t reserved[8]; 3113 uint32_t reserved[8];
3111 uint32_t hbq_put[16]; 3114 uint32_t hbq_put[16];
3112}; 3115};
3113 3116
3114struct sli3_pgp { 3117struct sli3_pgp {
3115 struct lpfc_pgp port[MAX_RINGS]; 3118 struct lpfc_pgp port[MAX_SLI3_RINGS];
3116 uint32_t hbq_get[16]; 3119 uint32_t hbq_get[16];
3117}; 3120};
3118 3121
@@ -3242,6 +3245,7 @@ typedef struct {
3242#define IOERR_SLI_DOWN 0x101 /* ulpStatus - Driver defined */ 3245#define IOERR_SLI_DOWN 0x101 /* ulpStatus - Driver defined */
3243#define IOERR_SLI_BRESET 0x102 3246#define IOERR_SLI_BRESET 0x102
3244#define IOERR_SLI_ABORTED 0x103 3247#define IOERR_SLI_ABORTED 0x103
3248#define IOERR_PARAM_MASK 0x1ff
3245} PARM_ERR; 3249} PARM_ERR;
3246 3250
3247typedef union { 3251typedef union {
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 953603a7a43c..834b699cac76 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -187,11 +187,17 @@ struct lpfc_sli_intf {
187/* Active interrupt test count */ 187/* Active interrupt test count */
188#define LPFC_ACT_INTR_CNT 4 188#define LPFC_ACT_INTR_CNT 4
189 189
190/* Algrithmns for scheduling FCP commands to WQs */
191#define LPFC_FCP_SCHED_ROUND_ROBIN 0
192#define LPFC_FCP_SCHED_BY_CPU 1
193
190/* Delay Multiplier constant */ 194/* Delay Multiplier constant */
191#define LPFC_DMULT_CONST 651042 195#define LPFC_DMULT_CONST 651042
192#define LPFC_MIM_IMAX 636 196
193#define LPFC_FP_DEF_IMAX 10000 197/* Configuration of Interrupts / sec for entire HBA port */
194#define LPFC_SP_DEF_IMAX 10000 198#define LPFC_MIN_IMAX 5000
199#define LPFC_MAX_IMAX 5000000
200#define LPFC_DEF_IMAX 50000
195 201
196/* PORT_CAPABILITIES constants. */ 202/* PORT_CAPABILITIES constants. */
197#define LPFC_MAX_SUPPORTED_PAGES 8 203#define LPFC_MAX_SUPPORTED_PAGES 8
@@ -338,7 +344,7 @@ struct lpfc_cqe {
338 * Define mask value for xri_aborted and wcqe completed CQE extended status. 344 * Define mask value for xri_aborted and wcqe completed CQE extended status.
339 * Currently, extended status is limited to 9 bits (0x0 -> 0x103) . 345 * Currently, extended status is limited to 9 bits (0x0 -> 0x103) .
340 */ 346 */
341#define WCQE_PARAM_MASK 0x1FF; 347#define WCQE_PARAM_MASK 0x1FF
342 348
343/* completion queue entry for wqe completions */ 349/* completion queue entry for wqe completions */
344struct lpfc_wcqe_complete { 350struct lpfc_wcqe_complete {
@@ -880,13 +886,19 @@ struct mbox_header {
880#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37 886#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37
881#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A 887#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A
882#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D 888#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
889#define LPFC_MBOX_OPCODE_SET_PHYSICAL_LINK_CONFIG 0x3E
890#define LPFC_MBOX_OPCODE_SET_BOOT_CONFIG 0x43
883#define LPFC_MBOX_OPCODE_GET_PORT_NAME 0x4D 891#define LPFC_MBOX_OPCODE_GET_PORT_NAME 0x4D
884#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A 892#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
893#define LPFC_MBOX_OPCODE_GET_VPD_DATA 0x5B
894#define LPFC_MBOX_OPCODE_SEND_ACTIVATION 0x73
895#define LPFC_MBOX_OPCODE_RESET_LICENSES 0x74
885#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A 896#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A
886#define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT 0x9B 897#define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT 0x9B
887#define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT 0x9C 898#define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT 0x9C
888#define LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT 0x9D 899#define LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT 0x9D
889#define LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG 0xA0 900#define LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG 0xA0
901#define LPFC_MBOX_OPCODE_GET_PROFILE_CAPACITIES 0xA1
890#define LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG 0xA4 902#define LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG 0xA4
891#define LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG 0xA5 903#define LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG 0xA5
892#define LPFC_MBOX_OPCODE_GET_PROFILE_LIST 0xA6 904#define LPFC_MBOX_OPCODE_GET_PROFILE_LIST 0xA6
@@ -1382,6 +1394,11 @@ struct lpfc_mbx_set_link_diag_state {
1382#define lpfc_mbx_set_diag_state_diag_SHIFT 0 1394#define lpfc_mbx_set_diag_state_diag_SHIFT 0
1383#define lpfc_mbx_set_diag_state_diag_MASK 0x00000001 1395#define lpfc_mbx_set_diag_state_diag_MASK 0x00000001
1384#define lpfc_mbx_set_diag_state_diag_WORD word0 1396#define lpfc_mbx_set_diag_state_diag_WORD word0
1397#define lpfc_mbx_set_diag_state_diag_bit_valid_SHIFT 2
1398#define lpfc_mbx_set_diag_state_diag_bit_valid_MASK 0x00000001
1399#define lpfc_mbx_set_diag_state_diag_bit_valid_WORD word0
1400#define LPFC_DIAG_STATE_DIAG_BIT_VALID_NO_CHANGE 0
1401#define LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE 1
1385#define lpfc_mbx_set_diag_state_link_num_SHIFT 16 1402#define lpfc_mbx_set_diag_state_link_num_SHIFT 16
1386#define lpfc_mbx_set_diag_state_link_num_MASK 0x0000003F 1403#define lpfc_mbx_set_diag_state_link_num_MASK 0x0000003F
1387#define lpfc_mbx_set_diag_state_link_num_WORD word0 1404#define lpfc_mbx_set_diag_state_link_num_WORD word0
@@ -2556,7 +2573,7 @@ struct lpfc_mbx_get_sli4_parameters {
2556}; 2573};
2557 2574
2558struct lpfc_rscr_desc_generic { 2575struct lpfc_rscr_desc_generic {
2559#define LPFC_RSRC_DESC_WSIZE 18 2576#define LPFC_RSRC_DESC_WSIZE 22
2560 uint32_t desc[LPFC_RSRC_DESC_WSIZE]; 2577 uint32_t desc[LPFC_RSRC_DESC_WSIZE];
2561}; 2578};
2562 2579
@@ -2566,6 +2583,9 @@ struct lpfc_rsrc_desc_pcie {
2566#define lpfc_rsrc_desc_pcie_type_MASK 0x000000ff 2583#define lpfc_rsrc_desc_pcie_type_MASK 0x000000ff
2567#define lpfc_rsrc_desc_pcie_type_WORD word0 2584#define lpfc_rsrc_desc_pcie_type_WORD word0
2568#define LPFC_RSRC_DESC_TYPE_PCIE 0x40 2585#define LPFC_RSRC_DESC_TYPE_PCIE 0x40
2586#define lpfc_rsrc_desc_pcie_length_SHIFT 8
2587#define lpfc_rsrc_desc_pcie_length_MASK 0x000000ff
2588#define lpfc_rsrc_desc_pcie_length_WORD word0
2569 uint32_t word1; 2589 uint32_t word1;
2570#define lpfc_rsrc_desc_pcie_pfnum_SHIFT 0 2590#define lpfc_rsrc_desc_pcie_pfnum_SHIFT 0
2571#define lpfc_rsrc_desc_pcie_pfnum_MASK 0x000000ff 2591#define lpfc_rsrc_desc_pcie_pfnum_MASK 0x000000ff
@@ -2593,6 +2613,12 @@ struct lpfc_rsrc_desc_fcfcoe {
2593#define lpfc_rsrc_desc_fcfcoe_type_MASK 0x000000ff 2613#define lpfc_rsrc_desc_fcfcoe_type_MASK 0x000000ff
2594#define lpfc_rsrc_desc_fcfcoe_type_WORD word0 2614#define lpfc_rsrc_desc_fcfcoe_type_WORD word0
2595#define LPFC_RSRC_DESC_TYPE_FCFCOE 0x43 2615#define LPFC_RSRC_DESC_TYPE_FCFCOE 0x43
2616#define lpfc_rsrc_desc_fcfcoe_length_SHIFT 8
2617#define lpfc_rsrc_desc_fcfcoe_length_MASK 0x000000ff
2618#define lpfc_rsrc_desc_fcfcoe_length_WORD word0
2619#define LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD 0
2620#define LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH 72
2621#define LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH 88
2596 uint32_t word1; 2622 uint32_t word1;
2597#define lpfc_rsrc_desc_fcfcoe_vfnum_SHIFT 0 2623#define lpfc_rsrc_desc_fcfcoe_vfnum_SHIFT 0
2598#define lpfc_rsrc_desc_fcfcoe_vfnum_MASK 0x000000ff 2624#define lpfc_rsrc_desc_fcfcoe_vfnum_MASK 0x000000ff
@@ -2651,6 +2677,12 @@ struct lpfc_rsrc_desc_fcfcoe {
2651#define lpfc_rsrc_desc_fcfcoe_eq_cnt_SHIFT 16 2677#define lpfc_rsrc_desc_fcfcoe_eq_cnt_SHIFT 16
2652#define lpfc_rsrc_desc_fcfcoe_eq_cnt_MASK 0x0000ffff 2678#define lpfc_rsrc_desc_fcfcoe_eq_cnt_MASK 0x0000ffff
2653#define lpfc_rsrc_desc_fcfcoe_eq_cnt_WORD word13 2679#define lpfc_rsrc_desc_fcfcoe_eq_cnt_WORD word13
2680/* extended FC/FCoE Resource Descriptor when length = 88 bytes */
2681 uint32_t bw_min;
2682 uint32_t bw_max;
2683 uint32_t iops_min;
2684 uint32_t iops_max;
2685 uint32_t reserved[4];
2654}; 2686};
2655 2687
2656struct lpfc_func_cfg { 2688struct lpfc_func_cfg {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 628a703abddb..8a55a586dd65 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -480,11 +480,11 @@ lpfc_config_port_post(struct lpfc_hba *phba)
480 phba->link_state = LPFC_LINK_DOWN; 480 phba->link_state = LPFC_LINK_DOWN;
481 481
482 /* Only process IOCBs on ELS ring till hba_state is READY */ 482 /* Only process IOCBs on ELS ring till hba_state is READY */
483 if (psli->ring[psli->extra_ring].cmdringaddr) 483 if (psli->ring[psli->extra_ring].sli.sli3.cmdringaddr)
484 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 484 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
485 if (psli->ring[psli->fcp_ring].cmdringaddr) 485 if (psli->ring[psli->fcp_ring].sli.sli3.cmdringaddr)
486 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 486 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
487 if (psli->ring[psli->next_ring].cmdringaddr) 487 if (psli->ring[psli->next_ring].sli.sli3.cmdringaddr)
488 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 488 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
489 489
490 /* Post receive buffers for desired rings */ 490 /* Post receive buffers for desired rings */
@@ -2059,6 +2059,11 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2059 oneConnect = 1; 2059 oneConnect = 1;
2060 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2060 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2061 break; 2061 break;
2062 case PCI_DEVICE_ID_SKYHAWK:
2063 case PCI_DEVICE_ID_SKYHAWK_VF:
2064 oneConnect = 1;
2065 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2066 break;
2062 default: 2067 default:
2063 m = (typeof(m)){"Unknown", "", ""}; 2068 m = (typeof(m)){"Unknown", "", ""};
2064 break; 2069 break;
@@ -4546,6 +4551,13 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
4546 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 4551 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
4547 } 4552 }
4548 4553
4554 if (!phba->sli.ring)
4555 phba->sli.ring = (struct lpfc_sli_ring *)
4556 kzalloc(LPFC_SLI3_MAX_RING *
4557 sizeof(struct lpfc_sli_ring), GFP_KERNEL);
4558 if (!phba->sli.ring)
4559 return -ENOMEM;
4560
4549 /* 4561 /*
4550 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4562 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4551 * used to create the sg_dma_buf_pool must be dynamically calculated. 4563 * used to create the sg_dma_buf_pool must be dynamically calculated.
@@ -4690,6 +4702,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4690 /* Get all the module params for configuring this host */ 4702 /* Get all the module params for configuring this host */
4691 lpfc_get_cfgparam(phba); 4703 lpfc_get_cfgparam(phba);
4692 phba->max_vpi = LPFC_MAX_VPI; 4704 phba->max_vpi = LPFC_MAX_VPI;
4705
4706 /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */
4707 phba->cfg_fcp_io_channel = phba->cfg_fcp_eq_count;
4708
4693 /* This will be set to correct value after the read_config mbox */ 4709 /* This will be set to correct value after the read_config mbox */
4694 phba->max_vports = 0; 4710 phba->max_vports = 0;
4695 4711
@@ -4705,6 +4721,16 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4705 sges_per_segment = 2; 4721 sges_per_segment = 2;
4706 4722
4707 /* 4723 /*
4724 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
4725 * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple.
4726 */
4727 if (!phba->sli.ring)
4728 phba->sli.ring = kzalloc(
4729 (LPFC_SLI3_MAX_RING + phba->cfg_fcp_io_channel) *
4730 sizeof(struct lpfc_sli_ring), GFP_KERNEL);
4731 if (!phba->sli.ring)
4732 return -ENOMEM;
4733 /*
4708 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4734 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4709 * used to create the sg_dma_buf_pool must be dynamically calculated. 4735 * used to create the sg_dma_buf_pool must be dynamically calculated.
4710 * 2 segments are added since the IOCB needs a command and response bde. 4736 * 2 segments are added since the IOCB needs a command and response bde.
@@ -4909,21 +4935,15 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4909 goto out_remove_rpi_hdrs; 4935 goto out_remove_rpi_hdrs;
4910 } 4936 }
4911 4937
4912 /* 4938 phba->sli4_hba.fcp_eq_hdl =
4913 * The cfg_fcp_eq_count can be zero whenever there is exactly one 4939 kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
4914 * interrupt vector. This is not an error 4940 phba->cfg_fcp_io_channel), GFP_KERNEL);
4915 */ 4941 if (!phba->sli4_hba.fcp_eq_hdl) {
4916 if (phba->cfg_fcp_eq_count) { 4942 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4917 phba->sli4_hba.fcp_eq_hdl = 4943 "2572 Failed allocate memory for "
4918 kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 4944 "fast-path per-EQ handle array\n");
4919 phba->cfg_fcp_eq_count), GFP_KERNEL); 4945 rc = -ENOMEM;
4920 if (!phba->sli4_hba.fcp_eq_hdl) { 4946 goto out_free_fcf_rr_bmask;
4921 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4922 "2572 Failed allocate memory for "
4923 "fast-path per-EQ handle array\n");
4924 rc = -ENOMEM;
4925 goto out_free_fcf_rr_bmask;
4926 }
4927 } 4947 }
4928 4948
4929 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 4949 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
@@ -5550,6 +5570,10 @@ lpfc_hba_free(struct lpfc_hba *phba)
5550 /* Release the driver assigned board number */ 5570 /* Release the driver assigned board number */
5551 idr_remove(&lpfc_hba_index, phba->brd_no); 5571 idr_remove(&lpfc_hba_index, phba->brd_no);
5552 5572
5573 /* Free memory allocated with sli rings */
5574 kfree(phba->sli.ring);
5575 phba->sli.ring = NULL;
5576
5553 kfree(phba); 5577 kfree(phba);
5554 return; 5578 return;
5555} 5579}
@@ -6275,8 +6299,9 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
6275 uint32_t shdr_status, shdr_add_status; 6299 uint32_t shdr_status, shdr_add_status;
6276 struct lpfc_mbx_get_func_cfg *get_func_cfg; 6300 struct lpfc_mbx_get_func_cfg *get_func_cfg;
6277 struct lpfc_rsrc_desc_fcfcoe *desc; 6301 struct lpfc_rsrc_desc_fcfcoe *desc;
6302 char *pdesc_0;
6278 uint32_t desc_count; 6303 uint32_t desc_count;
6279 int length, i, rc = 0; 6304 int length, i, rc = 0, rc2;
6280 6305
6281 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6306 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6282 if (!pmb) { 6307 if (!pmb) {
@@ -6388,18 +6413,17 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
6388 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, 6413 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
6389 length, LPFC_SLI4_MBX_EMBED); 6414 length, LPFC_SLI4_MBX_EMBED);
6390 6415
6391 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 6416 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6392 shdr = (union lpfc_sli4_cfg_shdr *) 6417 shdr = (union lpfc_sli4_cfg_shdr *)
6393 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 6418 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
6394 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6419 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6395 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6420 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6396 if (rc || shdr_status || shdr_add_status) { 6421 if (rc2 || shdr_status || shdr_add_status) {
6397 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6422 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6398 "3026 Mailbox failed , mbxCmd x%x " 6423 "3026 Mailbox failed , mbxCmd x%x "
6399 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 6424 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
6400 bf_get(lpfc_mqe_command, &pmb->u.mqe), 6425 bf_get(lpfc_mqe_command, &pmb->u.mqe),
6401 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 6426 bf_get(lpfc_mqe_status, &pmb->u.mqe));
6402 rc = -EIO;
6403 goto read_cfg_out; 6427 goto read_cfg_out;
6404 } 6428 }
6405 6429
@@ -6407,11 +6431,18 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
6407 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 6431 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
6408 desc_count = get_func_cfg->func_cfg.rsrc_desc_count; 6432 desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
6409 6433
6434 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
6435 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
6436 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
6437 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
6438 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
6439 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
6440 goto read_cfg_out;
6441
6410 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { 6442 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
6411 desc = (struct lpfc_rsrc_desc_fcfcoe *) 6443 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
6412 &get_func_cfg->func_cfg.desc[i];
6413 if (LPFC_RSRC_DESC_TYPE_FCFCOE == 6444 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
6414 bf_get(lpfc_rsrc_desc_pcie_type, desc)) { 6445 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
6415 phba->sli4_hba.iov.pf_number = 6446 phba->sli4_hba.iov.pf_number =
6416 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); 6447 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
6417 phba->sli4_hba.iov.vf_number = 6448 phba->sli4_hba.iov.vf_number =
@@ -6425,13 +6456,11 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
6425 "3027 GET_FUNCTION_CONFIG: pf_number:%d, " 6456 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
6426 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 6457 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
6427 phba->sli4_hba.iov.vf_number); 6458 phba->sli4_hba.iov.vf_number);
6428 else { 6459 else
6429 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6460 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6430 "3028 GET_FUNCTION_CONFIG: failed to find " 6461 "3028 GET_FUNCTION_CONFIG: failed to find "
6431 "Resrouce Descriptor:x%x\n", 6462 "Resrouce Descriptor:x%x\n",
6432 LPFC_RSRC_DESC_TYPE_FCFCOE); 6463 LPFC_RSRC_DESC_TYPE_FCFCOE);
6433 rc = -EIO;
6434 }
6435 6464
6436read_cfg_out: 6465read_cfg_out:
6437 mempool_free(pmb, phba->mbox_mem_pool); 6466 mempool_free(pmb, phba->mbox_mem_pool);
@@ -6512,53 +6541,40 @@ lpfc_setup_endian_order(struct lpfc_hba *phba)
6512static int 6541static int
6513lpfc_sli4_queue_verify(struct lpfc_hba *phba) 6542lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6514{ 6543{
6515 int cfg_fcp_wq_count; 6544 int cfg_fcp_io_channel;
6516 int cfg_fcp_eq_count; 6545 uint32_t cpu;
6546 uint32_t i = 0;
6547
6517 6548
6518 /* 6549 /*
6519 * Sanity check for confiugred queue parameters against the run-time 6550 * Sanity check for configured queue parameters against the run-time
6520 * device parameters 6551 * device parameters
6521 */ 6552 */
6522 6553
6523 /* Sanity check on FCP fast-path WQ parameters */ 6554 /* Sanity check on HBA EQ parameters */
6524 cfg_fcp_wq_count = phba->cfg_fcp_wq_count; 6555 cfg_fcp_io_channel = phba->cfg_fcp_io_channel;
6525 if (cfg_fcp_wq_count > 6556
6526 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) { 6557 /* It doesn't make sense to have more io channels then CPUs */
6527 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq - 6558 for_each_online_cpu(cpu) {
6528 LPFC_SP_WQN_DEF; 6559 i++;
6529 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) { 6560 }
6530 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6561 if (i < cfg_fcp_io_channel) {
6531 "2581 Not enough WQs (%d) from "
6532 "the pci function for supporting "
6533 "FCP WQs (%d)\n",
6534 phba->sli4_hba.max_cfg_param.max_wq,
6535 phba->cfg_fcp_wq_count);
6536 goto out_error;
6537 }
6538 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6562 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6539 "2582 Not enough WQs (%d) from the pci " 6563 "3188 Reducing IO channels to match number of "
6540 "function for supporting the requested " 6564 "CPUs: from %d to %d\n", cfg_fcp_io_channel, i);
6541 "FCP WQs (%d), the actual FCP WQs can " 6565 cfg_fcp_io_channel = i;
6542 "be supported: %d\n", 6566 }
6543 phba->sli4_hba.max_cfg_param.max_wq, 6567
6544 phba->cfg_fcp_wq_count, cfg_fcp_wq_count); 6568 if (cfg_fcp_io_channel >
6545 } 6569 phba->sli4_hba.max_cfg_param.max_eq) {
6546 /* The actual number of FCP work queues adopted */ 6570 cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq;
6547 phba->cfg_fcp_wq_count = cfg_fcp_wq_count; 6571 if (cfg_fcp_io_channel < LPFC_FCP_IO_CHAN_MIN) {
6548
6549 /* Sanity check on FCP fast-path EQ parameters */
6550 cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
6551 if (cfg_fcp_eq_count >
6552 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
6553 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
6554 LPFC_SP_EQN_DEF;
6555 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
6556 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6572 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6557 "2574 Not enough EQs (%d) from the " 6573 "2574 Not enough EQs (%d) from the "
6558 "pci function for supporting FCP " 6574 "pci function for supporting FCP "
6559 "EQs (%d)\n", 6575 "EQs (%d)\n",
6560 phba->sli4_hba.max_cfg_param.max_eq, 6576 phba->sli4_hba.max_cfg_param.max_eq,
6561 phba->cfg_fcp_eq_count); 6577 phba->cfg_fcp_io_channel);
6562 goto out_error; 6578 goto out_error;
6563 } 6579 }
6564 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6580 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -6567,22 +6583,16 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6567 "FCP EQs (%d), the actual FCP EQs can " 6583 "FCP EQs (%d), the actual FCP EQs can "
6568 "be supported: %d\n", 6584 "be supported: %d\n",
6569 phba->sli4_hba.max_cfg_param.max_eq, 6585 phba->sli4_hba.max_cfg_param.max_eq,
6570 phba->cfg_fcp_eq_count, cfg_fcp_eq_count); 6586 phba->cfg_fcp_io_channel, cfg_fcp_io_channel);
6571 }
6572 /* It does not make sense to have more EQs than WQs */
6573 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
6574 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6575 "2593 The FCP EQ count(%d) cannot be greater "
6576 "than the FCP WQ count(%d), limiting the "
6577 "FCP EQ count to %d\n", cfg_fcp_eq_count,
6578 phba->cfg_fcp_wq_count,
6579 phba->cfg_fcp_wq_count);
6580 cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
6581 } 6587 }
6588
6589 /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */
6590
6582 /* The actual number of FCP event queues adopted */ 6591 /* The actual number of FCP event queues adopted */
6583 phba->cfg_fcp_eq_count = cfg_fcp_eq_count; 6592 phba->cfg_fcp_eq_count = cfg_fcp_io_channel;
6584 /* The overall number of event queues used */ 6593 phba->cfg_fcp_wq_count = cfg_fcp_io_channel;
6585 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; 6594 phba->cfg_fcp_io_channel = cfg_fcp_io_channel;
6595 phba->sli4_hba.cfg_eqn = cfg_fcp_io_channel;
6586 6596
6587 /* Get EQ depth from module parameter, fake the default for now */ 6597 /* Get EQ depth from module parameter, fake the default for now */
6588 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 6598 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
@@ -6615,50 +6625,104 @@ int
6615lpfc_sli4_queue_create(struct lpfc_hba *phba) 6625lpfc_sli4_queue_create(struct lpfc_hba *phba)
6616{ 6626{
6617 struct lpfc_queue *qdesc; 6627 struct lpfc_queue *qdesc;
6618 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 6628 int idx;
6619 6629
6620 /* 6630 /*
6621 * Create Event Queues (EQs) 6631 * Create HBA Record arrays.
6622 */ 6632 */
6633 if (!phba->cfg_fcp_io_channel)
6634 return -ERANGE;
6623 6635
6624 /* Create slow path event queue */ 6636 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
6625 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 6637 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
6626 phba->sli4_hba.eq_ecount); 6638 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
6627 if (!qdesc) { 6639 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
6640 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
6641 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
6642
6643 phba->sli4_hba.hba_eq = kzalloc((sizeof(struct lpfc_queue *) *
6644 phba->cfg_fcp_io_channel), GFP_KERNEL);
6645 if (!phba->sli4_hba.hba_eq) {
6646 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6647 "2576 Failed allocate memory for "
6648 "fast-path EQ record array\n");
6649 goto out_error;
6650 }
6651
6652 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
6653 phba->cfg_fcp_io_channel), GFP_KERNEL);
6654 if (!phba->sli4_hba.fcp_cq) {
6628 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6655 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6629 "0496 Failed allocate slow-path EQ\n"); 6656 "2577 Failed allocate memory for fast-path "
6657 "CQ record array\n");
6658 goto out_error;
6659 }
6660
6661 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
6662 phba->cfg_fcp_io_channel), GFP_KERNEL);
6663 if (!phba->sli4_hba.fcp_wq) {
6664 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6665 "2578 Failed allocate memory for fast-path "
6666 "WQ record array\n");
6630 goto out_error; 6667 goto out_error;
6631 } 6668 }
6632 phba->sli4_hba.sp_eq = qdesc;
6633 6669
6634 /* 6670 /*
6635 * Create fast-path FCP Event Queue(s). The cfg_fcp_eq_count can be 6671 * Since the first EQ can have multiple CQs associated with it,
6636 * zero whenever there is exactly one interrupt vector. This is not 6672 * this array is used to quickly see if we have a FCP fast-path
6637 * an error. 6673 * CQ match.
6638 */ 6674 */
6639 if (phba->cfg_fcp_eq_count) { 6675 phba->sli4_hba.fcp_cq_map = kzalloc((sizeof(uint16_t) *
6640 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * 6676 phba->cfg_fcp_io_channel), GFP_KERNEL);
6641 phba->cfg_fcp_eq_count), GFP_KERNEL); 6677 if (!phba->sli4_hba.fcp_cq_map) {
6642 if (!phba->sli4_hba.fp_eq) { 6678 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6643 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6679 "2545 Failed allocate memory for fast-path "
6644 "2576 Failed allocate memory for " 6680 "CQ map\n");
6645 "fast-path EQ record array\n"); 6681 goto out_error;
6646 goto out_free_sp_eq;
6647 }
6648 } 6682 }
6649 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 6683
6684 /*
6685 * Create HBA Event Queues (EQs). The cfg_fcp_io_channel specifies
6686 * how many EQs to create.
6687 */
6688 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
6689
6690 /* Create EQs */
6650 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 6691 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
6651 phba->sli4_hba.eq_ecount); 6692 phba->sli4_hba.eq_ecount);
6652 if (!qdesc) { 6693 if (!qdesc) {
6653 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6694 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6654 "0497 Failed allocate fast-path EQ\n"); 6695 "0497 Failed allocate EQ (%d)\n", idx);
6655 goto out_free_fp_eq; 6696 goto out_error;
6656 } 6697 }
6657 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc; 6698 phba->sli4_hba.hba_eq[idx] = qdesc;
6699
6700 /* Create Fast Path FCP CQs */
6701 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6702 phba->sli4_hba.cq_ecount);
6703 if (!qdesc) {
6704 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6705 "0499 Failed allocate fast-path FCP "
6706 "CQ (%d)\n", idx);
6707 goto out_error;
6708 }
6709 phba->sli4_hba.fcp_cq[idx] = qdesc;
6710
6711 /* Create Fast Path FCP WQs */
6712 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
6713 phba->sli4_hba.wq_ecount);
6714 if (!qdesc) {
6715 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6716 "0503 Failed allocate fast-path FCP "
6717 "WQ (%d)\n", idx);
6718 goto out_error;
6719 }
6720 phba->sli4_hba.fcp_wq[idx] = qdesc;
6658 } 6721 }
6659 6722
6723
6660 /* 6724 /*
6661 * Create Complete Queues (CQs) 6725 * Create Slow Path Completion Queues (CQs)
6662 */ 6726 */
6663 6727
6664 /* Create slow-path Mailbox Command Complete Queue */ 6728 /* Create slow-path Mailbox Command Complete Queue */
@@ -6667,7 +6731,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
6667 if (!qdesc) { 6731 if (!qdesc) {
6668 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6732 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6669 "0500 Failed allocate slow-path mailbox CQ\n"); 6733 "0500 Failed allocate slow-path mailbox CQ\n");
6670 goto out_free_fp_eq; 6734 goto out_error;
6671 } 6735 }
6672 phba->sli4_hba.mbx_cq = qdesc; 6736 phba->sli4_hba.mbx_cq = qdesc;
6673 6737
@@ -6677,59 +6741,29 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
6677 if (!qdesc) { 6741 if (!qdesc) {
6678 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6742 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6679 "0501 Failed allocate slow-path ELS CQ\n"); 6743 "0501 Failed allocate slow-path ELS CQ\n");
6680 goto out_free_mbx_cq; 6744 goto out_error;
6681 } 6745 }
6682 phba->sli4_hba.els_cq = qdesc; 6746 phba->sli4_hba.els_cq = qdesc;
6683 6747
6684 6748
6685 /* 6749 /*
6686 * Create fast-path FCP Completion Queue(s), one-to-one with FCP EQs. 6750 * Create Slow Path Work Queues (WQs)
6687 * If there are no FCP EQs then create exactly one FCP CQ.
6688 */ 6751 */
6689 if (phba->cfg_fcp_eq_count)
6690 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
6691 phba->cfg_fcp_eq_count),
6692 GFP_KERNEL);
6693 else
6694 phba->sli4_hba.fcp_cq = kzalloc(sizeof(struct lpfc_queue *),
6695 GFP_KERNEL);
6696 if (!phba->sli4_hba.fcp_cq) {
6697 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6698 "2577 Failed allocate memory for fast-path "
6699 "CQ record array\n");
6700 goto out_free_els_cq;
6701 }
6702 fcp_cqidx = 0;
6703 do {
6704 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6705 phba->sli4_hba.cq_ecount);
6706 if (!qdesc) {
6707 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6708 "0499 Failed allocate fast-path FCP "
6709 "CQ (%d)\n", fcp_cqidx);
6710 goto out_free_fcp_cq;
6711 }
6712 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
6713 } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
6714 6752
6715 /* Create Mailbox Command Queue */ 6753 /* Create Mailbox Command Queue */
6716 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
6717 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
6718 6754
6719 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, 6755 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
6720 phba->sli4_hba.mq_ecount); 6756 phba->sli4_hba.mq_ecount);
6721 if (!qdesc) { 6757 if (!qdesc) {
6722 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6758 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6723 "0505 Failed allocate slow-path MQ\n"); 6759 "0505 Failed allocate slow-path MQ\n");
6724 goto out_free_fcp_cq; 6760 goto out_error;
6725 } 6761 }
6726 phba->sli4_hba.mbx_wq = qdesc; 6762 phba->sli4_hba.mbx_wq = qdesc;
6727 6763
6728 /* 6764 /*
6729 * Create all the Work Queues (WQs) 6765 * Create ELS Work Queues
6730 */ 6766 */
6731 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
6732 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
6733 6767
6734 /* Create slow-path ELS Work Queue */ 6768 /* Create slow-path ELS Work Queue */
6735 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 6769 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
@@ -6737,36 +6771,13 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
6737 if (!qdesc) { 6771 if (!qdesc) {
6738 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6772 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6739 "0504 Failed allocate slow-path ELS WQ\n"); 6773 "0504 Failed allocate slow-path ELS WQ\n");
6740 goto out_free_mbx_wq; 6774 goto out_error;
6741 } 6775 }
6742 phba->sli4_hba.els_wq = qdesc; 6776 phba->sli4_hba.els_wq = qdesc;
6743 6777
6744 /* Create fast-path FCP Work Queue(s) */
6745 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
6746 phba->cfg_fcp_wq_count), GFP_KERNEL);
6747 if (!phba->sli4_hba.fcp_wq) {
6748 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6749 "2578 Failed allocate memory for fast-path "
6750 "WQ record array\n");
6751 goto out_free_els_wq;
6752 }
6753 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6754 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
6755 phba->sli4_hba.wq_ecount);
6756 if (!qdesc) {
6757 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6758 "0503 Failed allocate fast-path FCP "
6759 "WQ (%d)\n", fcp_wqidx);
6760 goto out_free_fcp_wq;
6761 }
6762 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
6763 }
6764
6765 /* 6778 /*
6766 * Create Receive Queue (RQ) 6779 * Create Receive Queue (RQ)
6767 */ 6780 */
6768 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
6769 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
6770 6781
6771 /* Create Receive Queue for header */ 6782 /* Create Receive Queue for header */
6772 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 6783 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
@@ -6774,7 +6785,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
6774 if (!qdesc) { 6785 if (!qdesc) {
6775 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6786 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6776 "0506 Failed allocate receive HRQ\n"); 6787 "0506 Failed allocate receive HRQ\n");
6777 goto out_free_fcp_wq; 6788 goto out_error;
6778 } 6789 }
6779 phba->sli4_hba.hdr_rq = qdesc; 6790 phba->sli4_hba.hdr_rq = qdesc;
6780 6791
@@ -6784,52 +6795,14 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
6784 if (!qdesc) { 6795 if (!qdesc) {
6785 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6796 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6786 "0507 Failed allocate receive DRQ\n"); 6797 "0507 Failed allocate receive DRQ\n");
6787 goto out_free_hdr_rq; 6798 goto out_error;
6788 } 6799 }
6789 phba->sli4_hba.dat_rq = qdesc; 6800 phba->sli4_hba.dat_rq = qdesc;
6790 6801
6791 return 0; 6802 return 0;
6792 6803
6793out_free_hdr_rq:
6794 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
6795 phba->sli4_hba.hdr_rq = NULL;
6796out_free_fcp_wq:
6797 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
6798 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
6799 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
6800 }
6801 kfree(phba->sli4_hba.fcp_wq);
6802 phba->sli4_hba.fcp_wq = NULL;
6803out_free_els_wq:
6804 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
6805 phba->sli4_hba.els_wq = NULL;
6806out_free_mbx_wq:
6807 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
6808 phba->sli4_hba.mbx_wq = NULL;
6809out_free_fcp_cq:
6810 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
6811 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
6812 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
6813 }
6814 kfree(phba->sli4_hba.fcp_cq);
6815 phba->sli4_hba.fcp_cq = NULL;
6816out_free_els_cq:
6817 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
6818 phba->sli4_hba.els_cq = NULL;
6819out_free_mbx_cq:
6820 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
6821 phba->sli4_hba.mbx_cq = NULL;
6822out_free_fp_eq:
6823 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
6824 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
6825 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
6826 }
6827 kfree(phba->sli4_hba.fp_eq);
6828 phba->sli4_hba.fp_eq = NULL;
6829out_free_sp_eq:
6830 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
6831 phba->sli4_hba.sp_eq = NULL;
6832out_error: 6804out_error:
6805 lpfc_sli4_queue_destroy(phba);
6833 return -ENOMEM; 6806 return -ENOMEM;
6834} 6807}
6835 6808
@@ -6848,58 +6821,86 @@ out_error:
6848void 6821void
6849lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 6822lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6850{ 6823{
6851 int fcp_qidx; 6824 int idx;
6825
6826 if (phba->sli4_hba.hba_eq != NULL) {
6827 /* Release HBA event queue */
6828 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
6829 if (phba->sli4_hba.hba_eq[idx] != NULL) {
6830 lpfc_sli4_queue_free(
6831 phba->sli4_hba.hba_eq[idx]);
6832 phba->sli4_hba.hba_eq[idx] = NULL;
6833 }
6834 }
6835 kfree(phba->sli4_hba.hba_eq);
6836 phba->sli4_hba.hba_eq = NULL;
6837 }
6838
6839 if (phba->sli4_hba.fcp_cq != NULL) {
6840 /* Release FCP completion queue */
6841 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
6842 if (phba->sli4_hba.fcp_cq[idx] != NULL) {
6843 lpfc_sli4_queue_free(
6844 phba->sli4_hba.fcp_cq[idx]);
6845 phba->sli4_hba.fcp_cq[idx] = NULL;
6846 }
6847 }
6848 kfree(phba->sli4_hba.fcp_cq);
6849 phba->sli4_hba.fcp_cq = NULL;
6850 }
6851
6852 if (phba->sli4_hba.fcp_wq != NULL) {
6853 /* Release FCP work queue */
6854 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
6855 if (phba->sli4_hba.fcp_wq[idx] != NULL) {
6856 lpfc_sli4_queue_free(
6857 phba->sli4_hba.fcp_wq[idx]);
6858 phba->sli4_hba.fcp_wq[idx] = NULL;
6859 }
6860 }
6861 kfree(phba->sli4_hba.fcp_wq);
6862 phba->sli4_hba.fcp_wq = NULL;
6863 }
6864
6865 /* Release FCP CQ mapping array */
6866 if (phba->sli4_hba.fcp_cq_map != NULL) {
6867 kfree(phba->sli4_hba.fcp_cq_map);
6868 phba->sli4_hba.fcp_cq_map = NULL;
6869 }
6852 6870
6853 /* Release mailbox command work queue */ 6871 /* Release mailbox command work queue */
6854 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 6872 if (phba->sli4_hba.mbx_wq != NULL) {
6855 phba->sli4_hba.mbx_wq = NULL; 6873 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
6874 phba->sli4_hba.mbx_wq = NULL;
6875 }
6856 6876
6857 /* Release ELS work queue */ 6877 /* Release ELS work queue */
6858 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 6878 if (phba->sli4_hba.els_wq != NULL) {
6859 phba->sli4_hba.els_wq = NULL; 6879 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
6860 6880 phba->sli4_hba.els_wq = NULL;
6861 /* Release FCP work queue */ 6881 }
6862 if (phba->sli4_hba.fcp_wq != NULL)
6863 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
6864 fcp_qidx++)
6865 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
6866 kfree(phba->sli4_hba.fcp_wq);
6867 phba->sli4_hba.fcp_wq = NULL;
6868 6882
6869 /* Release unsolicited receive queue */ 6883 /* Release unsolicited receive queue */
6870 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 6884 if (phba->sli4_hba.hdr_rq != NULL) {
6871 phba->sli4_hba.hdr_rq = NULL; 6885 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
6872 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 6886 phba->sli4_hba.hdr_rq = NULL;
6873 phba->sli4_hba.dat_rq = NULL; 6887 }
6888 if (phba->sli4_hba.dat_rq != NULL) {
6889 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
6890 phba->sli4_hba.dat_rq = NULL;
6891 }
6874 6892
6875 /* Release ELS complete queue */ 6893 /* Release ELS complete queue */
6876 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 6894 if (phba->sli4_hba.els_cq != NULL) {
6877 phba->sli4_hba.els_cq = NULL; 6895 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
6896 phba->sli4_hba.els_cq = NULL;
6897 }
6878 6898
6879 /* Release mailbox command complete queue */ 6899 /* Release mailbox command complete queue */
6880 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 6900 if (phba->sli4_hba.mbx_cq != NULL) {
6881 phba->sli4_hba.mbx_cq = NULL; 6901 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
6882 6902 phba->sli4_hba.mbx_cq = NULL;
6883 /* Release FCP response complete queue */ 6903 }
6884 fcp_qidx = 0;
6885 if (phba->sli4_hba.fcp_cq != NULL)
6886 do
6887 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
6888 while (++fcp_qidx < phba->cfg_fcp_eq_count);
6889 kfree(phba->sli4_hba.fcp_cq);
6890 phba->sli4_hba.fcp_cq = NULL;
6891
6892 /* Release fast-path event queue */
6893 if (phba->sli4_hba.fp_eq != NULL)
6894 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
6895 fcp_qidx++)
6896 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
6897 kfree(phba->sli4_hba.fp_eq);
6898 phba->sli4_hba.fp_eq = NULL;
6899
6900 /* Release slow-path event queue */
6901 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
6902 phba->sli4_hba.sp_eq = NULL;
6903 6904
6904 return; 6905 return;
6905} 6906}
@@ -6919,61 +6920,124 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6919int 6920int
6920lpfc_sli4_queue_setup(struct lpfc_hba *phba) 6921lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6921{ 6922{
6923 struct lpfc_sli *psli = &phba->sli;
6924 struct lpfc_sli_ring *pring;
6922 int rc = -ENOMEM; 6925 int rc = -ENOMEM;
6923 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 6926 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
6924 int fcp_cq_index = 0; 6927 int fcp_cq_index = 0;
6925 6928
6926 /* 6929 /*
6927 * Set up Event Queues (EQs) 6930 * Set up HBA Event Queues (EQs)
6928 */ 6931 */
6929 6932
6930 /* Set up slow-path event queue */ 6933 /* Set up HBA event queue */
6931 if (!phba->sli4_hba.sp_eq) { 6934 if (phba->cfg_fcp_io_channel && !phba->sli4_hba.hba_eq) {
6932 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6933 "0520 Slow-path EQ not allocated\n");
6934 goto out_error;
6935 }
6936 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
6937 LPFC_SP_DEF_IMAX);
6938 if (rc) {
6939 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6940 "0521 Failed setup of slow-path EQ: "
6941 "rc = 0x%x\n", rc);
6942 goto out_error;
6943 }
6944 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6945 "2583 Slow-path EQ setup: queue-id=%d\n",
6946 phba->sli4_hba.sp_eq->queue_id);
6947
6948 /* Set up fast-path event queue */
6949 if (phba->cfg_fcp_eq_count && !phba->sli4_hba.fp_eq) {
6950 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6935 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6951 "3147 Fast-path EQs not allocated\n"); 6936 "3147 Fast-path EQs not allocated\n");
6952 rc = -ENOMEM; 6937 rc = -ENOMEM;
6953 goto out_destroy_sp_eq; 6938 goto out_error;
6954 } 6939 }
6955 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 6940 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
6956 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { 6941 if (!phba->sli4_hba.hba_eq[fcp_eqidx]) {
6957 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6942 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6958 "0522 Fast-path EQ (%d) not " 6943 "0522 Fast-path EQ (%d) not "
6959 "allocated\n", fcp_eqidx); 6944 "allocated\n", fcp_eqidx);
6960 rc = -ENOMEM; 6945 rc = -ENOMEM;
6961 goto out_destroy_fp_eq; 6946 goto out_destroy_hba_eq;
6962 } 6947 }
6963 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx], 6948 rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[fcp_eqidx],
6964 phba->cfg_fcp_imax); 6949 (phba->cfg_fcp_imax / phba->cfg_fcp_io_channel));
6965 if (rc) { 6950 if (rc) {
6966 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6951 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6967 "0523 Failed setup of fast-path EQ " 6952 "0523 Failed setup of fast-path EQ "
6968 "(%d), rc = 0x%x\n", fcp_eqidx, rc); 6953 "(%d), rc = 0x%x\n", fcp_eqidx, rc);
6969 goto out_destroy_fp_eq; 6954 goto out_destroy_hba_eq;
6970 } 6955 }
6971 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6956 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6972 "2584 Fast-path EQ setup: " 6957 "2584 HBA EQ setup: "
6973 "queue[%d]-id=%d\n", fcp_eqidx, 6958 "queue[%d]-id=%d\n", fcp_eqidx,
6974 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id); 6959 phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id);
6975 } 6960 }
6976 6961
6962 /* Set up fast-path FCP Response Complete Queue */
6963 if (!phba->sli4_hba.fcp_cq) {
6964 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6965 "3148 Fast-path FCP CQ array not "
6966 "allocated\n");
6967 rc = -ENOMEM;
6968 goto out_destroy_hba_eq;
6969 }
6970
6971 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) {
6972 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
6973 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6974 "0526 Fast-path FCP CQ (%d) not "
6975 "allocated\n", fcp_cqidx);
6976 rc = -ENOMEM;
6977 goto out_destroy_fcp_cq;
6978 }
6979 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
6980 phba->sli4_hba.hba_eq[fcp_cqidx], LPFC_WCQ, LPFC_FCP);
6981 if (rc) {
6982 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6983 "0527 Failed setup of fast-path FCP "
6984 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
6985 goto out_destroy_fcp_cq;
6986 }
6987
6988 /* Setup fcp_cq_map for fast lookup */
6989 phba->sli4_hba.fcp_cq_map[fcp_cqidx] =
6990 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id;
6991
6992 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6993 "2588 FCP CQ setup: cq[%d]-id=%d, "
6994 "parent seq[%d]-id=%d\n",
6995 fcp_cqidx,
6996 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
6997 fcp_cqidx,
6998 phba->sli4_hba.hba_eq[fcp_cqidx]->queue_id);
6999 }
7000
7001 /* Set up fast-path FCP Work Queue */
7002 if (!phba->sli4_hba.fcp_wq) {
7003 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7004 "3149 Fast-path FCP WQ array not "
7005 "allocated\n");
7006 rc = -ENOMEM;
7007 goto out_destroy_fcp_cq;
7008 }
7009
7010 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) {
7011 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
7012 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7013 "0534 Fast-path FCP WQ (%d) not "
7014 "allocated\n", fcp_wqidx);
7015 rc = -ENOMEM;
7016 goto out_destroy_fcp_wq;
7017 }
7018 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
7019 phba->sli4_hba.fcp_cq[fcp_wqidx],
7020 LPFC_FCP);
7021 if (rc) {
7022 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7023 "0535 Failed setup of fast-path FCP "
7024 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
7025 goto out_destroy_fcp_wq;
7026 }
7027
7028 /* Bind this WQ to the next FCP ring */
7029 pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx];
7030 pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx];
7031 phba->sli4_hba.fcp_cq[fcp_wqidx]->pring = pring;
7032
7033 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7034 "2591 FCP WQ setup: wq[%d]-id=%d, "
7035 "parent cq[%d]-id=%d\n",
7036 fcp_wqidx,
7037 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
7038 fcp_cq_index,
7039 phba->sli4_hba.fcp_cq[fcp_wqidx]->queue_id);
7040 }
6977 /* 7041 /*
6978 * Set up Complete Queues (CQs) 7042 * Set up Complete Queues (CQs)
6979 */ 7043 */
@@ -6983,20 +7047,20 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6983 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7047 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6984 "0528 Mailbox CQ not allocated\n"); 7048 "0528 Mailbox CQ not allocated\n");
6985 rc = -ENOMEM; 7049 rc = -ENOMEM;
6986 goto out_destroy_fp_eq; 7050 goto out_destroy_fcp_wq;
6987 } 7051 }
6988 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq, 7052 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq,
6989 LPFC_MCQ, LPFC_MBOX); 7053 phba->sli4_hba.hba_eq[0], LPFC_MCQ, LPFC_MBOX);
6990 if (rc) { 7054 if (rc) {
6991 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7055 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6992 "0529 Failed setup of slow-path mailbox CQ: " 7056 "0529 Failed setup of slow-path mailbox CQ: "
6993 "rc = 0x%x\n", rc); 7057 "rc = 0x%x\n", rc);
6994 goto out_destroy_fp_eq; 7058 goto out_destroy_fcp_wq;
6995 } 7059 }
6996 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7060 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6997 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", 7061 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
6998 phba->sli4_hba.mbx_cq->queue_id, 7062 phba->sli4_hba.mbx_cq->queue_id,
6999 phba->sli4_hba.sp_eq->queue_id); 7063 phba->sli4_hba.hba_eq[0]->queue_id);
7000 7064
7001 /* Set up slow-path ELS Complete Queue */ 7065 /* Set up slow-path ELS Complete Queue */
7002 if (!phba->sli4_hba.els_cq) { 7066 if (!phba->sli4_hba.els_cq) {
@@ -7005,8 +7069,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7005 rc = -ENOMEM; 7069 rc = -ENOMEM;
7006 goto out_destroy_mbx_cq; 7070 goto out_destroy_mbx_cq;
7007 } 7071 }
7008 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq, 7072 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq,
7009 LPFC_WCQ, LPFC_ELS); 7073 phba->sli4_hba.hba_eq[0], LPFC_WCQ, LPFC_ELS);
7010 if (rc) { 7074 if (rc) {
7011 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7075 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7012 "0531 Failed setup of slow-path ELS CQ: " 7076 "0531 Failed setup of slow-path ELS CQ: "
@@ -7016,52 +7080,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7016 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7080 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7017 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", 7081 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
7018 phba->sli4_hba.els_cq->queue_id, 7082 phba->sli4_hba.els_cq->queue_id,
7019 phba->sli4_hba.sp_eq->queue_id); 7083 phba->sli4_hba.hba_eq[0]->queue_id);
7020
7021 /* Set up fast-path FCP Response Complete Queue */
7022 if (!phba->sli4_hba.fcp_cq) {
7023 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7024 "3148 Fast-path FCP CQ array not "
7025 "allocated\n");
7026 rc = -ENOMEM;
7027 goto out_destroy_els_cq;
7028 }
7029 fcp_cqidx = 0;
7030 do {
7031 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
7032 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7033 "0526 Fast-path FCP CQ (%d) not "
7034 "allocated\n", fcp_cqidx);
7035 rc = -ENOMEM;
7036 goto out_destroy_fcp_cq;
7037 }
7038 if (phba->cfg_fcp_eq_count)
7039 rc = lpfc_cq_create(phba,
7040 phba->sli4_hba.fcp_cq[fcp_cqidx],
7041 phba->sli4_hba.fp_eq[fcp_cqidx],
7042 LPFC_WCQ, LPFC_FCP);
7043 else
7044 rc = lpfc_cq_create(phba,
7045 phba->sli4_hba.fcp_cq[fcp_cqidx],
7046 phba->sli4_hba.sp_eq,
7047 LPFC_WCQ, LPFC_FCP);
7048 if (rc) {
7049 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7050 "0527 Failed setup of fast-path FCP "
7051 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
7052 goto out_destroy_fcp_cq;
7053 }
7054 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7055 "2588 FCP CQ setup: cq[%d]-id=%d, "
7056 "parent %seq[%d]-id=%d\n",
7057 fcp_cqidx,
7058 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
7059 (phba->cfg_fcp_eq_count) ? "" : "sp_",
7060 fcp_cqidx,
7061 (phba->cfg_fcp_eq_count) ?
7062 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id :
7063 phba->sli4_hba.sp_eq->queue_id);
7064 } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
7065 7084
7066 /* 7085 /*
7067 * Set up all the Work Queues (WQs) 7086 * Set up all the Work Queues (WQs)
@@ -7072,7 +7091,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7072 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7091 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7073 "0538 Slow-path MQ not allocated\n"); 7092 "0538 Slow-path MQ not allocated\n");
7074 rc = -ENOMEM; 7093 rc = -ENOMEM;
7075 goto out_destroy_fcp_cq; 7094 goto out_destroy_els_cq;
7076 } 7095 }
7077 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, 7096 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
7078 phba->sli4_hba.mbx_cq, LPFC_MBOX); 7097 phba->sli4_hba.mbx_cq, LPFC_MBOX);
@@ -7080,7 +7099,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7080 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7099 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7081 "0539 Failed setup of slow-path MQ: " 7100 "0539 Failed setup of slow-path MQ: "
7082 "rc = 0x%x\n", rc); 7101 "rc = 0x%x\n", rc);
7083 goto out_destroy_fcp_cq; 7102 goto out_destroy_els_cq;
7084 } 7103 }
7085 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7104 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7086 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 7105 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
@@ -7102,49 +7121,17 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7102 "rc = 0x%x\n", rc); 7121 "rc = 0x%x\n", rc);
7103 goto out_destroy_mbx_wq; 7122 goto out_destroy_mbx_wq;
7104 } 7123 }
7124
7125 /* Bind this WQ to the ELS ring */
7126 pring = &psli->ring[LPFC_ELS_RING];
7127 pring->sli.sli4.wqp = (void *)phba->sli4_hba.els_wq;
7128 phba->sli4_hba.els_cq->pring = pring;
7129
7105 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7130 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7106 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 7131 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
7107 phba->sli4_hba.els_wq->queue_id, 7132 phba->sli4_hba.els_wq->queue_id,
7108 phba->sli4_hba.els_cq->queue_id); 7133 phba->sli4_hba.els_cq->queue_id);
7109 7134
7110 /* Set up fast-path FCP Work Queue */
7111 if (!phba->sli4_hba.fcp_wq) {
7112 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7113 "3149 Fast-path FCP WQ array not "
7114 "allocated\n");
7115 rc = -ENOMEM;
7116 goto out_destroy_els_wq;
7117 }
7118 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
7119 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
7120 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7121 "0534 Fast-path FCP WQ (%d) not "
7122 "allocated\n", fcp_wqidx);
7123 rc = -ENOMEM;
7124 goto out_destroy_fcp_wq;
7125 }
7126 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
7127 phba->sli4_hba.fcp_cq[fcp_cq_index],
7128 LPFC_FCP);
7129 if (rc) {
7130 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7131 "0535 Failed setup of fast-path FCP "
7132 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
7133 goto out_destroy_fcp_wq;
7134 }
7135 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7136 "2591 FCP WQ setup: wq[%d]-id=%d, "
7137 "parent cq[%d]-id=%d\n",
7138 fcp_wqidx,
7139 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
7140 fcp_cq_index,
7141 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
7142 /* Round robin FCP Work Queue's Completion Queue assignment */
7143 if (phba->cfg_fcp_eq_count)
7144 fcp_cq_index = ((fcp_cq_index + 1) %
7145 phba->cfg_fcp_eq_count);
7146 }
7147
7148 /* 7135 /*
7149 * Create Receive Queue (RQ) 7136 * Create Receive Queue (RQ)
7150 */ 7137 */
@@ -7152,7 +7139,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7152 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7139 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7153 "0540 Receive Queue not allocated\n"); 7140 "0540 Receive Queue not allocated\n");
7154 rc = -ENOMEM; 7141 rc = -ENOMEM;
7155 goto out_destroy_fcp_wq; 7142 goto out_destroy_els_wq;
7156 } 7143 }
7157 7144
7158 lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ); 7145 lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
@@ -7175,25 +7162,23 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7175 phba->sli4_hba.els_cq->queue_id); 7162 phba->sli4_hba.els_cq->queue_id);
7176 return 0; 7163 return 0;
7177 7164
7178out_destroy_fcp_wq:
7179 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
7180 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
7181out_destroy_els_wq: 7165out_destroy_els_wq:
7182 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 7166 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
7183out_destroy_mbx_wq: 7167out_destroy_mbx_wq:
7184 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 7168 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
7185out_destroy_fcp_cq:
7186 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
7187 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
7188out_destroy_els_cq: 7169out_destroy_els_cq:
7189 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 7170 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
7190out_destroy_mbx_cq: 7171out_destroy_mbx_cq:
7191 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 7172 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
7192out_destroy_fp_eq: 7173out_destroy_fcp_wq:
7174 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
7175 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
7176out_destroy_fcp_cq:
7177 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
7178 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
7179out_destroy_hba_eq:
7193 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 7180 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
7194 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); 7181 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_eqidx]);
7195out_destroy_sp_eq:
7196 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
7197out_error: 7182out_error:
7198 return rc; 7183 return rc;
7199} 7184}
@@ -7222,27 +7207,27 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
7222 /* Unset unsolicited receive queue */ 7207 /* Unset unsolicited receive queue */
7223 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 7208 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
7224 /* Unset FCP work queue */ 7209 /* Unset FCP work queue */
7225 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 7210 if (phba->sli4_hba.fcp_wq) {
7226 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); 7211 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
7212 fcp_qidx++)
7213 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
7214 }
7227 /* Unset mailbox command complete queue */ 7215 /* Unset mailbox command complete queue */
7228 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 7216 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
7229 /* Unset ELS complete queue */ 7217 /* Unset ELS complete queue */
7230 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 7218 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
7231 /* Unset FCP response complete queue */ 7219 /* Unset FCP response complete queue */
7232 if (phba->sli4_hba.fcp_cq) { 7220 if (phba->sli4_hba.fcp_cq) {
7233 fcp_qidx = 0; 7221 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
7234 do { 7222 fcp_qidx++)
7235 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 7223 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
7236 } while (++fcp_qidx < phba->cfg_fcp_eq_count);
7237 } 7224 }
7238 /* Unset fast-path event queue */ 7225 /* Unset fast-path event queue */
7239 if (phba->sli4_hba.fp_eq) { 7226 if (phba->sli4_hba.hba_eq) {
7240 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; 7227 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
7241 fcp_qidx++) 7228 fcp_qidx++)
7242 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); 7229 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_qidx]);
7243 } 7230 }
7244 /* Unset slow-path event queue */
7245 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
7246} 7231}
7247 7232
7248/** 7233/**
@@ -7590,10 +7575,11 @@ lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
7590 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */ 7575 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */
7591 length = (sizeof(struct lpfc_mbx_nop) - 7576 length = (sizeof(struct lpfc_mbx_nop) -
7592 sizeof(struct lpfc_sli4_cfg_mhdr)); 7577 sizeof(struct lpfc_sli4_cfg_mhdr));
7593 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7594 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
7595 7578
7596 for (cmdsent = 0; cmdsent < cnt; cmdsent++) { 7579 for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
7580 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7581 LPFC_MBOX_OPCODE_NOP, length,
7582 LPFC_SLI4_MBX_EMBED);
7597 if (!phba->sli4_hba.intr_enable) 7583 if (!phba->sli4_hba.intr_enable)
7598 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7584 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7599 else { 7585 else {
@@ -8133,33 +8119,22 @@ enable_msix_vectors:
8133 "message=%d\n", index, 8119 "message=%d\n", index,
8134 phba->sli4_hba.msix_entries[index].vector, 8120 phba->sli4_hba.msix_entries[index].vector,
8135 phba->sli4_hba.msix_entries[index].entry); 8121 phba->sli4_hba.msix_entries[index].entry);
8122
8136 /* 8123 /*
8137 * Assign MSI-X vectors to interrupt handlers 8124 * Assign MSI-X vectors to interrupt handlers
8138 */ 8125 */
8139 if (vectors > 1) 8126 for (index = 0; index < vectors; index++) {
8140 rc = request_irq(phba->sli4_hba.msix_entries[0].vector, 8127 memset(&phba->sli4_hba.handler_name[index], 0, 16);
8141 &lpfc_sli4_sp_intr_handler, IRQF_SHARED, 8128 sprintf((char *)&phba->sli4_hba.handler_name[index],
8142 LPFC_SP_DRIVER_HANDLER_NAME, phba); 8129 LPFC_DRIVER_HANDLER_NAME"%d", index);
8143 else
8144 /* All Interrupts need to be handled by one EQ */
8145 rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
8146 &lpfc_sli4_intr_handler, IRQF_SHARED,
8147 LPFC_DRIVER_NAME, phba);
8148 if (rc) {
8149 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8150 "0485 MSI-X slow-path request_irq failed "
8151 "(%d)\n", rc);
8152 goto msi_fail_out;
8153 }
8154 8130
8155 /* The rest of the vector(s) are associated to fast-path handler(s) */ 8131 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8156 for (index = 1; index < vectors; index++) { 8132 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8157 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; 8133 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1);
8158 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
8159 rc = request_irq(phba->sli4_hba.msix_entries[index].vector, 8134 rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
8160 &lpfc_sli4_fp_intr_handler, IRQF_SHARED, 8135 &lpfc_sli4_hba_intr_handler, IRQF_SHARED,
8161 LPFC_FP_DRIVER_HANDLER_NAME, 8136 (char *)&phba->sli4_hba.handler_name[index],
8162 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 8137 &phba->sli4_hba.fcp_eq_hdl[index]);
8163 if (rc) { 8138 if (rc) {
8164 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8139 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8165 "0486 MSI-X fast-path (%d) " 8140 "0486 MSI-X fast-path (%d) "
@@ -8173,12 +8148,9 @@ enable_msix_vectors:
8173 8148
8174cfg_fail_out: 8149cfg_fail_out:
8175 /* free the irq already requested */ 8150 /* free the irq already requested */
8176 for (--index; index >= 1; index--) 8151 for (--index; index >= 0; index--)
8177 free_irq(phba->sli4_hba.msix_entries[index - 1].vector, 8152 free_irq(phba->sli4_hba.msix_entries[index].vector,
8178 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 8153 &phba->sli4_hba.fcp_eq_hdl[index]);
8179
8180 /* free the irq already requested */
8181 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
8182 8154
8183msi_fail_out: 8155msi_fail_out:
8184 /* Unconfigure MSI-X capability structure */ 8156 /* Unconfigure MSI-X capability structure */
@@ -8199,11 +8171,9 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba)
8199 int index; 8171 int index;
8200 8172
8201 /* Free up MSI-X multi-message vectors */ 8173 /* Free up MSI-X multi-message vectors */
8202 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 8174 for (index = 0; index < phba->sli4_hba.msix_vec_nr; index++)
8203
8204 for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++)
8205 free_irq(phba->sli4_hba.msix_entries[index].vector, 8175 free_irq(phba->sli4_hba.msix_entries[index].vector,
8206 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 8176 &phba->sli4_hba.fcp_eq_hdl[index]);
8207 8177
8208 /* Disable MSI-X */ 8178 /* Disable MSI-X */
8209 pci_disable_msix(phba->pcidev); 8179 pci_disable_msix(phba->pcidev);
@@ -8249,7 +8219,7 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
8249 return rc; 8219 return rc;
8250 } 8220 }
8251 8221
8252 for (index = 0; index < phba->cfg_fcp_eq_count; index++) { 8222 for (index = 0; index < phba->cfg_fcp_io_channel; index++) {
8253 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 8223 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8254 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 8224 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8255 } 8225 }
@@ -8329,10 +8299,12 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
8329 /* Indicate initialization to INTx mode */ 8299 /* Indicate initialization to INTx mode */
8330 phba->intr_type = INTx; 8300 phba->intr_type = INTx;
8331 intr_mode = 0; 8301 intr_mode = 0;
8332 for (index = 0; index < phba->cfg_fcp_eq_count; 8302 for (index = 0; index < phba->cfg_fcp_io_channel;
8333 index++) { 8303 index++) {
8334 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 8304 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8335 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 8305 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8306 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
8307 fcp_eq_in_use, 1);
8336 } 8308 }
8337 } 8309 }
8338 } 8310 }
@@ -9449,7 +9421,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9449 int error; 9421 int error;
9450 uint32_t cfg_mode, intr_mode; 9422 uint32_t cfg_mode, intr_mode;
9451 int mcnt; 9423 int mcnt;
9452 int adjusted_fcp_eq_count; 9424 int adjusted_fcp_io_channel;
9453 const struct firmware *fw; 9425 const struct firmware *fw;
9454 uint8_t file_name[16]; 9426 uint8_t file_name[16];
9455 9427
@@ -9552,13 +9524,13 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9552 } 9524 }
9553 /* Default to single EQ for non-MSI-X */ 9525 /* Default to single EQ for non-MSI-X */
9554 if (phba->intr_type != MSIX) 9526 if (phba->intr_type != MSIX)
9555 adjusted_fcp_eq_count = 0; 9527 adjusted_fcp_io_channel = 1;
9556 else if (phba->sli4_hba.msix_vec_nr < 9528 else if (phba->sli4_hba.msix_vec_nr <
9557 phba->cfg_fcp_eq_count + 1) 9529 phba->cfg_fcp_io_channel)
9558 adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1; 9530 adjusted_fcp_io_channel = phba->sli4_hba.msix_vec_nr;
9559 else 9531 else
9560 adjusted_fcp_eq_count = phba->cfg_fcp_eq_count; 9532 adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
9561 phba->cfg_fcp_eq_count = adjusted_fcp_eq_count; 9533 phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
9562 /* Set up SLI-4 HBA */ 9534 /* Set up SLI-4 HBA */
9563 if (lpfc_sli4_hba_setup(phba)) { 9535 if (lpfc_sli4_hba_setup(phba)) {
9564 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9536 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -9694,6 +9666,7 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
9694 * buffers are released to their corresponding pools here. 9666 * buffers are released to their corresponding pools here.
9695 */ 9667 */
9696 lpfc_scsi_free(phba); 9668 lpfc_scsi_free(phba);
9669
9697 lpfc_sli4_driver_resource_unset(phba); 9670 lpfc_sli4_driver_resource_unset(phba);
9698 9671
9699 /* Unmap adapter Control and Doorbell registers */ 9672 /* Unmap adapter Control and Doorbell registers */
@@ -10420,6 +10393,10 @@ static struct pci_device_id lpfc_id_table[] = {
10420 PCI_ANY_ID, PCI_ANY_ID, }, 10393 PCI_ANY_ID, PCI_ANY_ID, },
10421 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF, 10394 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
10422 PCI_ANY_ID, PCI_ANY_ID, }, 10395 PCI_ANY_ID, PCI_ANY_ID, },
10396 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK,
10397 PCI_ANY_ID, PCI_ANY_ID, },
10398 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF,
10399 PCI_ANY_ID, PCI_ANY_ID, },
10423 { 0 } 10400 { 0 }
10424}; 10401};
10425 10402
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 20336f09fb3c..efc9cd9def8b 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -92,7 +92,7 @@ lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
92 memset(mp->virt, 0, LPFC_BPL_SIZE); 92 memset(mp->virt, 0, LPFC_BPL_SIZE);
93 INIT_LIST_HEAD(&mp->list); 93 INIT_LIST_HEAD(&mp->list);
94 /* save address for completion */ 94 /* save address for completion */
95 pmb->context2 = (uint8_t *) mp; 95 pmb->context1 = (uint8_t *)mp;
96 mb->un.varWords[3] = putPaddrLow(mp->phys); 96 mb->un.varWords[3] = putPaddrLow(mp->phys);
97 mb->un.varWords[4] = putPaddrHigh(mp->phys); 97 mb->un.varWords[4] = putPaddrHigh(mp->phys);
98 mb->un.varDmp.sli4_length = sizeof(struct static_vport_info); 98 mb->un.varDmp.sli4_length = sizeof(struct static_vport_info);
@@ -950,44 +950,47 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
950 for (i = 0; i < psli->num_rings; i++) { 950 for (i = 0; i < psli->num_rings; i++) {
951 pring = &psli->ring[i]; 951 pring = &psli->ring[i];
952 952
953 pring->sizeCiocb = phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE: 953 pring->sli.sli3.sizeCiocb =
954 phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE :
954 SLI2_IOCB_CMD_SIZE; 955 SLI2_IOCB_CMD_SIZE;
955 pring->sizeRiocb = phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE: 956 pring->sli.sli3.sizeRiocb =
957 phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE :
956 SLI2_IOCB_RSP_SIZE; 958 SLI2_IOCB_RSP_SIZE;
957 /* A ring MUST have both cmd and rsp entries defined to be 959 /* A ring MUST have both cmd and rsp entries defined to be
958 valid */ 960 valid */
959 if ((pring->numCiocb == 0) || (pring->numRiocb == 0)) { 961 if ((pring->sli.sli3.numCiocb == 0) ||
962 (pring->sli.sli3.numRiocb == 0)) {
960 pcbp->rdsc[i].cmdEntries = 0; 963 pcbp->rdsc[i].cmdEntries = 0;
961 pcbp->rdsc[i].rspEntries = 0; 964 pcbp->rdsc[i].rspEntries = 0;
962 pcbp->rdsc[i].cmdAddrHigh = 0; 965 pcbp->rdsc[i].cmdAddrHigh = 0;
963 pcbp->rdsc[i].rspAddrHigh = 0; 966 pcbp->rdsc[i].rspAddrHigh = 0;
964 pcbp->rdsc[i].cmdAddrLow = 0; 967 pcbp->rdsc[i].cmdAddrLow = 0;
965 pcbp->rdsc[i].rspAddrLow = 0; 968 pcbp->rdsc[i].rspAddrLow = 0;
966 pring->cmdringaddr = NULL; 969 pring->sli.sli3.cmdringaddr = NULL;
967 pring->rspringaddr = NULL; 970 pring->sli.sli3.rspringaddr = NULL;
968 continue; 971 continue;
969 } 972 }
970 /* Command ring setup for ring */ 973 /* Command ring setup for ring */
971 pring->cmdringaddr = (void *)&phba->IOCBs[iocbCnt]; 974 pring->sli.sli3.cmdringaddr = (void *)&phba->IOCBs[iocbCnt];
972 pcbp->rdsc[i].cmdEntries = pring->numCiocb; 975 pcbp->rdsc[i].cmdEntries = pring->sli.sli3.numCiocb;
973 976
974 offset = (uint8_t *) &phba->IOCBs[iocbCnt] - 977 offset = (uint8_t *) &phba->IOCBs[iocbCnt] -
975 (uint8_t *) phba->slim2p.virt; 978 (uint8_t *) phba->slim2p.virt;
976 pdma_addr = phba->slim2p.phys + offset; 979 pdma_addr = phba->slim2p.phys + offset;
977 pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr); 980 pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
978 pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr); 981 pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
979 iocbCnt += pring->numCiocb; 982 iocbCnt += pring->sli.sli3.numCiocb;
980 983
981 /* Response ring setup for ring */ 984 /* Response ring setup for ring */
982 pring->rspringaddr = (void *) &phba->IOCBs[iocbCnt]; 985 pring->sli.sli3.rspringaddr = (void *) &phba->IOCBs[iocbCnt];
983 986
984 pcbp->rdsc[i].rspEntries = pring->numRiocb; 987 pcbp->rdsc[i].rspEntries = pring->sli.sli3.numRiocb;
985 offset = (uint8_t *)&phba->IOCBs[iocbCnt] - 988 offset = (uint8_t *)&phba->IOCBs[iocbCnt] -
986 (uint8_t *)phba->slim2p.virt; 989 (uint8_t *)phba->slim2p.virt;
987 pdma_addr = phba->slim2p.phys + offset; 990 pdma_addr = phba->slim2p.phys + offset;
988 pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr); 991 pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr);
989 pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr); 992 pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr);
990 iocbCnt += pring->numRiocb; 993 iocbCnt += pring->sli.sli3.numRiocb;
991 } 994 }
992} 995}
993 996
@@ -1609,12 +1612,15 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1609 1612
1610 switch (mbox->mbxCommand) { 1613 switch (mbox->mbxCommand) {
1611 case MBX_WRITE_NV: /* 0x03 */ 1614 case MBX_WRITE_NV: /* 0x03 */
1615 case MBX_DUMP_MEMORY: /* 0x17 */
1612 case MBX_UPDATE_CFG: /* 0x1B */ 1616 case MBX_UPDATE_CFG: /* 0x1B */
1613 case MBX_DOWN_LOAD: /* 0x1C */ 1617 case MBX_DOWN_LOAD: /* 0x1C */
1614 case MBX_DEL_LD_ENTRY: /* 0x1D */ 1618 case MBX_DEL_LD_ENTRY: /* 0x1D */
1619 case MBX_WRITE_VPARMS: /* 0x32 */
1615 case MBX_LOAD_AREA: /* 0x81 */ 1620 case MBX_LOAD_AREA: /* 0x81 */
1616 case MBX_WRITE_WWN: /* 0x98 */ 1621 case MBX_WRITE_WWN: /* 0x98 */
1617 case MBX_LOAD_EXP_ROM: /* 0x9C */ 1622 case MBX_LOAD_EXP_ROM: /* 0x9C */
1623 case MBX_ACCESS_VDATA: /* 0xA5 */
1618 return LPFC_MBOX_TMO_FLASH_CMD; 1624 return LPFC_MBOX_TMO_FLASH_CMD;
1619 case MBX_SLI4_CONFIG: /* 0x9b */ 1625 case MBX_SLI4_CONFIG: /* 0x9b */
1620 subsys = lpfc_sli_config_mbox_subsys_get(phba, mboxq); 1626 subsys = lpfc_sli_config_mbox_subsys_get(phba, mboxq);
@@ -1625,11 +1631,17 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1625 case LPFC_MBOX_OPCODE_WRITE_OBJECT: 1631 case LPFC_MBOX_OPCODE_WRITE_OBJECT:
1626 case LPFC_MBOX_OPCODE_READ_OBJECT_LIST: 1632 case LPFC_MBOX_OPCODE_READ_OBJECT_LIST:
1627 case LPFC_MBOX_OPCODE_DELETE_OBJECT: 1633 case LPFC_MBOX_OPCODE_DELETE_OBJECT:
1628 case LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG:
1629 case LPFC_MBOX_OPCODE_GET_PROFILE_LIST: 1634 case LPFC_MBOX_OPCODE_GET_PROFILE_LIST:
1630 case LPFC_MBOX_OPCODE_SET_ACT_PROFILE: 1635 case LPFC_MBOX_OPCODE_SET_ACT_PROFILE:
1636 case LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG:
1631 case LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG: 1637 case LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG:
1632 case LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG: 1638 case LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG:
1639 case LPFC_MBOX_OPCODE_GET_PROFILE_CAPACITIES:
1640 case LPFC_MBOX_OPCODE_SEND_ACTIVATION:
1641 case LPFC_MBOX_OPCODE_RESET_LICENSES:
1642 case LPFC_MBOX_OPCODE_SET_BOOT_CONFIG:
1643 case LPFC_MBOX_OPCODE_GET_VPD_DATA:
1644 case LPFC_MBOX_OPCODE_SET_PHYSICAL_LINK_CONFIG:
1633 return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO; 1645 return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO;
1634 } 1646 }
1635 } 1647 }
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index ade763d3930a..cd86069a0ba8 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -194,6 +194,10 @@ lpfc_mem_free(struct lpfc_hba *phba)
194 pci_pool_destroy(phba->lpfc_hbq_pool); 194 pci_pool_destroy(phba->lpfc_hbq_pool);
195 phba->lpfc_hbq_pool = NULL; 195 phba->lpfc_hbq_pool = NULL;
196 196
197 if (phba->rrq_pool)
198 mempool_destroy(phba->rrq_pool);
199 phba->rrq_pool = NULL;
200
197 /* Free NLP memory pool */ 201 /* Free NLP memory pool */
198 mempool_destroy(phba->nlp_mem_pool); 202 mempool_destroy(phba->nlp_mem_pool);
199 phba->nlp_mem_pool = NULL; 203 phba->nlp_mem_pool = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 9133a97f045f..d8fadcb2db73 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1778,6 +1778,117 @@ lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
1778} 1778}
1779 1779
1780static uint32_t 1780static uint32_t
1781lpfc_rcv_plogi_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1782 void *arg, uint32_t evt)
1783{
1784 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1785 struct ls_rjt stat;
1786
1787 memset(&stat, 0, sizeof(struct ls_rjt));
1788 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1789 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1790 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1791 return ndlp->nlp_state;
1792}
1793
1794static uint32_t
1795lpfc_rcv_prli_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1796 void *arg, uint32_t evt)
1797{
1798 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1799 struct ls_rjt stat;
1800
1801 memset(&stat, 0, sizeof(struct ls_rjt));
1802 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1803 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1804 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1805 return ndlp->nlp_state;
1806}
1807
1808static uint32_t
1809lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1810 void *arg, uint32_t evt)
1811{
1812 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1813 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1814
1815 spin_lock_irq(shost->host_lock);
1816 ndlp->nlp_flag &= NLP_LOGO_ACC;
1817 spin_unlock_irq(shost->host_lock);
1818 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
1819 return ndlp->nlp_state;
1820}
1821
1822static uint32_t
1823lpfc_rcv_padisc_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1824 void *arg, uint32_t evt)
1825{
1826 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1827 struct ls_rjt stat;
1828
1829 memset(&stat, 0, sizeof(struct ls_rjt));
1830 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1831 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1832 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1833 return ndlp->nlp_state;
1834}
1835
1836static uint32_t
1837lpfc_rcv_prlo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1838 void *arg, uint32_t evt)
1839{
1840 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1841 struct ls_rjt stat;
1842
1843 memset(&stat, 0, sizeof(struct ls_rjt));
1844 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1845 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1846 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1847 return ndlp->nlp_state;
1848}
1849
1850static uint32_t
1851lpfc_cmpl_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1852 void *arg, uint32_t evt)
1853{
1854 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1855
1856 ndlp->nlp_prev_state = NLP_STE_LOGO_ISSUE;
1857 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1858 spin_lock_irq(shost->host_lock);
1859 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1860 spin_unlock_irq(shost->host_lock);
1861 lpfc_disc_set_adisc(vport, ndlp);
1862 return ndlp->nlp_state;
1863}
1864
1865static uint32_t
1866lpfc_device_rm_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1867 void *arg, uint32_t evt)
1868{
1869 /*
1870 * Take no action. If a LOGO is outstanding, then possibly DevLoss has
1871 * timed out and is calling for Device Remove. In this case, the LOGO
1872 * must be allowed to complete in state LOGO_ISSUE so that the rpi
1873 * and other NLP flags are correctly cleaned up.
1874 */
1875 return ndlp->nlp_state;
1876}
1877
1878static uint32_t
1879lpfc_device_recov_logo_issue(struct lpfc_vport *vport,
1880 struct lpfc_nodelist *ndlp,
1881 void *arg, uint32_t evt)
1882{
1883 /*
1884 * Device Recovery events have no meaning for a node with a LOGO
1885 * outstanding. The LOGO has to complete first and handle the
1886 * node from that point.
1887 */
1888 return ndlp->nlp_state;
1889}
1890
1891static uint32_t
1781lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1892lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1782 void *arg, uint32_t evt) 1893 void *arg, uint32_t evt)
1783{ 1894{
@@ -2083,6 +2194,8 @@ lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2083 void *arg, uint32_t evt) 2194 void *arg, uint32_t evt)
2084{ 2195{
2085 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2196 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2197
2198 /* For the fabric port just clear the fc flags. */
2086 if (ndlp->nlp_DID == Fabric_DID) { 2199 if (ndlp->nlp_DID == Fabric_DID) {
2087 spin_lock_irq(shost->host_lock); 2200 spin_lock_irq(shost->host_lock);
2088 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 2201 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
@@ -2297,6 +2410,20 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
2297 lpfc_device_rm_prli_issue, /* DEVICE_RM */ 2410 lpfc_device_rm_prli_issue, /* DEVICE_RM */
2298 lpfc_device_recov_prli_issue, /* DEVICE_RECOVERY */ 2411 lpfc_device_recov_prli_issue, /* DEVICE_RECOVERY */
2299 2412
2413 lpfc_rcv_plogi_logo_issue, /* RCV_PLOGI LOGO_ISSUE */
2414 lpfc_rcv_prli_logo_issue, /* RCV_PRLI */
2415 lpfc_rcv_logo_logo_issue, /* RCV_LOGO */
2416 lpfc_rcv_padisc_logo_issue, /* RCV_ADISC */
2417 lpfc_rcv_padisc_logo_issue, /* RCV_PDISC */
2418 lpfc_rcv_prlo_logo_issue, /* RCV_PRLO */
2419 lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */
2420 lpfc_disc_illegal, /* CMPL_PRLI */
2421 lpfc_cmpl_logo_logo_issue, /* CMPL_LOGO */
2422 lpfc_disc_illegal, /* CMPL_ADISC */
2423 lpfc_disc_illegal, /* CMPL_REG_LOGIN */
2424 lpfc_device_rm_logo_issue, /* DEVICE_RM */
2425 lpfc_device_recov_logo_issue, /* DEVICE_RECOVERY */
2426
2300 lpfc_rcv_plogi_unmap_node, /* RCV_PLOGI UNMAPPED_NODE */ 2427 lpfc_rcv_plogi_unmap_node, /* RCV_PLOGI UNMAPPED_NODE */
2301 lpfc_rcv_prli_unmap_node, /* RCV_PRLI */ 2428 lpfc_rcv_prli_unmap_node, /* RCV_PRLI */
2302 lpfc_rcv_logo_unmap_node, /* RCV_LOGO */ 2429 lpfc_rcv_logo_unmap_node, /* RCV_LOGO */
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 925975d2d765..64013f3097ad 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -60,12 +60,6 @@ static char *dif_op_str[] = {
60 "PROT_WRITE_PASS", 60 "PROT_WRITE_PASS",
61}; 61};
62 62
63static char *dif_grd_str[] = {
64 "NO_GUARD",
65 "DIF_CRC",
66 "DIX_IP",
67};
68
69struct scsi_dif_tuple { 63struct scsi_dif_tuple {
70 __be16 guard_tag; /* Checksum */ 64 __be16 guard_tag; /* Checksum */
71 __be16 app_tag; /* Opaque storage */ 65 __be16 app_tag; /* Opaque storage */
@@ -3482,9 +3476,15 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3482 } 3476 }
3483 lp = (uint32_t *)cmnd->sense_buffer; 3477 lp = (uint32_t *)cmnd->sense_buffer;
3484 3478
3485 if (!scsi_status && (resp_info & RESID_UNDER) && 3479 /* special handling for under run conditions */
3486 vport->cfg_log_verbose & LOG_FCP_UNDER) 3480 if (!scsi_status && (resp_info & RESID_UNDER)) {
3487 logit = LOG_FCP_UNDER; 3481 /* don't log under runs if fcp set... */
3482 if (vport->cfg_log_verbose & LOG_FCP)
3483 logit = LOG_FCP_ERROR;
3484 /* unless operator says so */
3485 if (vport->cfg_log_verbose & LOG_FCP_UNDER)
3486 logit = LOG_FCP_UNDER;
3487 }
3488 3488
3489 lpfc_printf_vlog(vport, KERN_WARNING, logit, 3489 lpfc_printf_vlog(vport, KERN_WARNING, logit,
3490 "9024 FCP command x%x failed: x%x SNS x%x x%x " 3490 "9024 FCP command x%x failed: x%x SNS x%x x%x "
@@ -3552,11 +3552,11 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3552 3552
3553 /* 3553 /*
3554 * Check SLI validation that all the transfer was actually done 3554 * Check SLI validation that all the transfer was actually done
3555 * (fcpi_parm should be zero). Apply check only to reads. 3555 * (fcpi_parm should be zero).
3556 */ 3556 */
3557 } else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { 3557 } else if (fcpi_parm) {
3558 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, 3558 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
3559 "9029 FCP Read Check Error Data: " 3559 "9029 FCP Data Transfer Check Error: "
3560 "x%x x%x x%x x%x x%x\n", 3560 "x%x x%x x%x x%x x%x\n",
3561 be32_to_cpu(fcpcmd->fcpDl), 3561 be32_to_cpu(fcpcmd->fcpDl),
3562 be32_to_cpu(fcprsp->rspResId), 3562 be32_to_cpu(fcprsp->rspResId),
@@ -3615,7 +3615,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3615 cmd = lpfc_cmd->pCmd; 3615 cmd = lpfc_cmd->pCmd;
3616 shost = cmd->device->host; 3616 shost = cmd->device->host;
3617 3617
3618 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; 3618 lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
3619 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 3619 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
3620 /* pick up SLI4 exhange busy status from HBA */ 3620 /* pick up SLI4 exhange busy status from HBA */
3621 lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY; 3621 lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
@@ -3660,10 +3660,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3660 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 3660 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
3661 else if (lpfc_cmd->status >= IOSTAT_CNT) 3661 else if (lpfc_cmd->status >= IOSTAT_CNT)
3662 lpfc_cmd->status = IOSTAT_DEFAULT; 3662 lpfc_cmd->status = IOSTAT_DEFAULT;
3663 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR 3663 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
3664 && !lpfc_cmd->fcp_rsp->rspStatus3 3664 !lpfc_cmd->fcp_rsp->rspStatus3 &&
3665 && (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) 3665 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
3666 && !(phba->cfg_log_verbose & LOG_FCP_UNDER)) 3666 !(vport->cfg_log_verbose & LOG_FCP_UNDER))
3667 logit = 0; 3667 logit = 0;
3668 else 3668 else
3669 logit = LOG_FCP | LOG_FCP_UNDER; 3669 logit = LOG_FCP | LOG_FCP_UNDER;
@@ -3829,12 +3829,15 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3829 cmd->scsi_done(cmd); 3829 cmd->scsi_done(cmd);
3830 3830
3831 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 3831 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
3832 spin_lock_irq(&phba->hbalock);
3833 lpfc_cmd->pCmd = NULL;
3834 spin_unlock_irq(&phba->hbalock);
3835
3832 /* 3836 /*
3833 * If there is a thread waiting for command completion 3837 * If there is a thread waiting for command completion
3834 * wake up the thread. 3838 * wake up the thread.
3835 */ 3839 */
3836 spin_lock_irqsave(shost->host_lock, flags); 3840 spin_lock_irqsave(shost->host_lock, flags);
3837 lpfc_cmd->pCmd = NULL;
3838 if (lpfc_cmd->waitq) 3841 if (lpfc_cmd->waitq)
3839 wake_up(lpfc_cmd->waitq); 3842 wake_up(lpfc_cmd->waitq);
3840 spin_unlock_irqrestore(shost->host_lock, flags); 3843 spin_unlock_irqrestore(shost->host_lock, flags);
@@ -3868,12 +3871,15 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3868 } 3871 }
3869 } 3872 }
3870 3873
3874 spin_lock_irq(&phba->hbalock);
3875 lpfc_cmd->pCmd = NULL;
3876 spin_unlock_irq(&phba->hbalock);
3877
3871 /* 3878 /*
3872 * If there is a thread waiting for command completion 3879 * If there is a thread waiting for command completion
3873 * wake up the thread. 3880 * wake up the thread.
3874 */ 3881 */
3875 spin_lock_irqsave(shost->host_lock, flags); 3882 spin_lock_irqsave(shost->host_lock, flags);
3876 lpfc_cmd->pCmd = NULL;
3877 if (lpfc_cmd->waitq) 3883 if (lpfc_cmd->waitq)
3878 wake_up(lpfc_cmd->waitq); 3884 wake_up(lpfc_cmd->waitq);
3879 spin_unlock_irqrestore(shost->host_lock, flags); 3885 spin_unlock_irqrestore(shost->host_lock, flags);
@@ -3919,6 +3925,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3919 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq); 3925 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
3920 int datadir = scsi_cmnd->sc_data_direction; 3926 int datadir = scsi_cmnd->sc_data_direction;
3921 char tag[2]; 3927 char tag[2];
3928 uint8_t *ptr;
3929 bool sli4;
3922 3930
3923 if (!pnode || !NLP_CHK_NODE_ACT(pnode)) 3931 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3924 return; 3932 return;
@@ -3930,8 +3938,13 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3930 int_to_scsilun(lpfc_cmd->pCmd->device->lun, 3938 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
3931 &lpfc_cmd->fcp_cmnd->fcp_lun); 3939 &lpfc_cmd->fcp_cmnd->fcp_lun);
3932 3940
3933 memset(&fcp_cmnd->fcpCdb[0], 0, LPFC_FCP_CDB_LEN); 3941 ptr = &fcp_cmnd->fcpCdb[0];
3934 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, scsi_cmnd->cmd_len); 3942 memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
3943 if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
3944 ptr += scsi_cmnd->cmd_len;
3945 memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
3946 }
3947
3935 if (scsi_populate_tag_msg(scsi_cmnd, tag)) { 3948 if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
3936 switch (tag[0]) { 3949 switch (tag[0]) {
3937 case HEAD_OF_QUEUE_TAG: 3950 case HEAD_OF_QUEUE_TAG:
@@ -3947,6 +3960,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3947 } else 3960 } else
3948 fcp_cmnd->fcpCntl1 = 0; 3961 fcp_cmnd->fcpCntl1 = 0;
3949 3962
3963 sli4 = (phba->sli_rev == LPFC_SLI_REV4);
3964
3950 /* 3965 /*
3951 * There are three possibilities here - use scatter-gather segment, use 3966 * There are three possibilities here - use scatter-gather segment, use
3952 * the single mapping, or neither. Start the lpfc command prep by 3967 * the single mapping, or neither. Start the lpfc command prep by
@@ -3956,11 +3971,12 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3956 if (scsi_sg_count(scsi_cmnd)) { 3971 if (scsi_sg_count(scsi_cmnd)) {
3957 if (datadir == DMA_TO_DEVICE) { 3972 if (datadir == DMA_TO_DEVICE) {
3958 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 3973 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
3959 if (phba->sli_rev < LPFC_SLI_REV4) { 3974 if (sli4)
3975 iocb_cmd->ulpPU = PARM_READ_CHECK;
3976 else {
3960 iocb_cmd->un.fcpi.fcpi_parm = 0; 3977 iocb_cmd->un.fcpi.fcpi_parm = 0;
3961 iocb_cmd->ulpPU = 0; 3978 iocb_cmd->ulpPU = 0;
3962 } else 3979 }
3963 iocb_cmd->ulpPU = PARM_READ_CHECK;
3964 fcp_cmnd->fcpCntl3 = WRITE_DATA; 3980 fcp_cmnd->fcpCntl3 = WRITE_DATA;
3965 phba->fc4OutputRequests++; 3981 phba->fc4OutputRequests++;
3966 } else { 3982 } else {
@@ -3984,7 +4000,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3984 * of the scsi_cmnd request_buffer 4000 * of the scsi_cmnd request_buffer
3985 */ 4001 */
3986 piocbq->iocb.ulpContext = pnode->nlp_rpi; 4002 piocbq->iocb.ulpContext = pnode->nlp_rpi;
3987 if (phba->sli_rev == LPFC_SLI_REV4) 4003 if (sli4)
3988 piocbq->iocb.ulpContext = 4004 piocbq->iocb.ulpContext =
3989 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]; 4005 phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
3990 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) 4006 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
@@ -4241,9 +4257,8 @@ void lpfc_poll_timeout(unsigned long ptr)
4241 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily. 4257 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
4242 **/ 4258 **/
4243static int 4259static int
4244lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) 4260lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4245{ 4261{
4246 struct Scsi_Host *shost = cmnd->device->host;
4247 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4262 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4248 struct lpfc_hba *phba = vport->phba; 4263 struct lpfc_hba *phba = vport->phba;
4249 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 4264 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
@@ -4299,53 +4314,28 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
4299 lpfc_cmd->timeout = 0; 4314 lpfc_cmd->timeout = 0;
4300 lpfc_cmd->start_time = jiffies; 4315 lpfc_cmd->start_time = jiffies;
4301 cmnd->host_scribble = (unsigned char *)lpfc_cmd; 4316 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
4302 cmnd->scsi_done = done;
4303 4317
4304 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { 4318 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
4305 if (vport->phba->cfg_enable_bg) { 4319 if (vport->phba->cfg_enable_bg) {
4306 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 4320 lpfc_printf_vlog(vport, KERN_INFO, LOG_BG,
4307 "9033 BLKGRD: rcvd protected cmd:%02x op=%s " 4321 "9033 BLKGRD: rcvd %s cmd:x%x "
4308 "guard=%s\n", cmnd->cmnd[0], 4322 "sector x%llx cnt %u pt %x\n",
4309 dif_op_str[scsi_get_prot_op(cmnd)], 4323 dif_op_str[scsi_get_prot_op(cmnd)],
4310 dif_grd_str[scsi_host_get_guard(shost)]); 4324 cmnd->cmnd[0],
4311 if (cmnd->cmnd[0] == READ_10) 4325 (unsigned long long)scsi_get_lba(cmnd),
4312 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 4326 blk_rq_sectors(cmnd->request),
4313 "9035 BLKGRD: READ @ sector %llu, " 4327 (cmnd->cmnd[1]>>5));
4314 "cnt %u, rpt %d\n",
4315 (unsigned long long)scsi_get_lba(cmnd),
4316 blk_rq_sectors(cmnd->request),
4317 (cmnd->cmnd[1]>>5));
4318 else if (cmnd->cmnd[0] == WRITE_10)
4319 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
4320 "9036 BLKGRD: WRITE @ sector %llu, "
4321 "cnt %u, wpt %d\n",
4322 (unsigned long long)scsi_get_lba(cmnd),
4323 blk_rq_sectors(cmnd->request),
4324 (cmnd->cmnd[1]>>5));
4325 } 4328 }
4326
4327 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 4329 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
4328 } else { 4330 } else {
4329 if (vport->phba->cfg_enable_bg) { 4331 if (vport->phba->cfg_enable_bg) {
4330 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 4332 lpfc_printf_vlog(vport, KERN_INFO, LOG_BG,
4331 "9038 BLKGRD: rcvd unprotected cmd:" 4333 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
4332 "%02x op=%s guard=%s\n", cmnd->cmnd[0], 4334 "x%x sector x%llx cnt %u pt %x\n",
4333 dif_op_str[scsi_get_prot_op(cmnd)], 4335 cmnd->cmnd[0],
4334 dif_grd_str[scsi_host_get_guard(shost)]); 4336 (unsigned long long)scsi_get_lba(cmnd),
4335 if (cmnd->cmnd[0] == READ_10)
4336 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
4337 "9040 dbg: READ @ sector %llu, "
4338 "cnt %u, rpt %d\n",
4339 (unsigned long long)scsi_get_lba(cmnd),
4340 blk_rq_sectors(cmnd->request), 4337 blk_rq_sectors(cmnd->request),
4341 (cmnd->cmnd[1]>>5)); 4338 (cmnd->cmnd[1]>>5));
4342 else if (cmnd->cmnd[0] == WRITE_10)
4343 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
4344 "9041 dbg: WRITE @ sector %llu, "
4345 "cnt %u, wpt %d\n",
4346 (unsigned long long)scsi_get_lba(cmnd),
4347 blk_rq_sectors(cmnd->request),
4348 (cmnd->cmnd[1]>>5));
4349 } 4339 }
4350 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 4340 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
4351 } 4341 }
@@ -4363,11 +4353,9 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
4363 goto out_host_busy_free_buf; 4353 goto out_host_busy_free_buf;
4364 } 4354 }
4365 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 4355 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4366 spin_unlock(shost->host_lock);
4367 lpfc_sli_handle_fast_ring_event(phba, 4356 lpfc_sli_handle_fast_ring_event(phba,
4368 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); 4357 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4369 4358
4370 spin_lock(shost->host_lock);
4371 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 4359 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4372 lpfc_poll_rearm_timer(phba); 4360 lpfc_poll_rearm_timer(phba);
4373 } 4361 }
@@ -4384,11 +4372,10 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
4384 return SCSI_MLQUEUE_TARGET_BUSY; 4372 return SCSI_MLQUEUE_TARGET_BUSY;
4385 4373
4386 out_fail_command: 4374 out_fail_command:
4387 done(cmnd); 4375 cmnd->scsi_done(cmnd);
4388 return 0; 4376 return 0;
4389} 4377}
4390 4378
4391static DEF_SCSI_QCMD(lpfc_queuecommand)
4392 4379
4393/** 4380/**
4394 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point 4381 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
@@ -4414,7 +4401,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4414 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 4401 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
4415 4402
4416 status = fc_block_scsi_eh(cmnd); 4403 status = fc_block_scsi_eh(cmnd);
4417 if (status) 4404 if (status != 0 && status != SUCCESS)
4418 return status; 4405 return status;
4419 4406
4420 spin_lock_irq(&phba->hbalock); 4407 spin_lock_irq(&phba->hbalock);
@@ -4428,7 +4415,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4428 } 4415 }
4429 4416
4430 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 4417 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
4431 if (!lpfc_cmd) { 4418 if (!lpfc_cmd || !lpfc_cmd->pCmd) {
4432 spin_unlock_irq(&phba->hbalock); 4419 spin_unlock_irq(&phba->hbalock);
4433 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 4420 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4434 "2873 SCSI Layer I/O Abort Request IO CMPL Status " 4421 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
@@ -4521,9 +4508,10 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4521 ret = FAILED; 4508 ret = FAILED;
4522 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 4509 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4523 "0748 abort handler timed out waiting " 4510 "0748 abort handler timed out waiting "
4524 "for abort to complete: ret %#x, ID %d, " 4511 "for abortng I/O (xri:x%x) to complete: "
4525 "LUN %d\n", 4512 "ret %#x, ID %d, LUN %d\n",
4526 ret, cmnd->device->id, cmnd->device->lun); 4513 iocb->sli4_xritag, ret,
4514 cmnd->device->id, cmnd->device->lun);
4527 } 4515 }
4528 goto out; 4516 goto out;
4529 4517
@@ -4769,7 +4757,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
4769 } 4757 }
4770 pnode = rdata->pnode; 4758 pnode = rdata->pnode;
4771 status = fc_block_scsi_eh(cmnd); 4759 status = fc_block_scsi_eh(cmnd);
4772 if (status) 4760 if (status != 0 && status != SUCCESS)
4773 return status; 4761 return status;
4774 4762
4775 status = lpfc_chk_tgt_mapped(vport, cmnd); 4763 status = lpfc_chk_tgt_mapped(vport, cmnd);
@@ -4836,7 +4824,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
4836 } 4824 }
4837 pnode = rdata->pnode; 4825 pnode = rdata->pnode;
4838 status = fc_block_scsi_eh(cmnd); 4826 status = fc_block_scsi_eh(cmnd);
4839 if (status) 4827 if (status != 0 && status != SUCCESS)
4840 return status; 4828 return status;
4841 4829
4842 status = lpfc_chk_tgt_mapped(vport, cmnd); 4830 status = lpfc_chk_tgt_mapped(vport, cmnd);
@@ -4904,7 +4892,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
4904 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 4892 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
4905 4893
4906 status = fc_block_scsi_eh(cmnd); 4894 status = fc_block_scsi_eh(cmnd);
4907 if (status) 4895 if (status != 0 && status != SUCCESS)
4908 return status; 4896 return status;
4909 4897
4910 /* 4898 /*
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 0e7e144507b2..219bf534ef99 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -69,6 +69,8 @@ static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
69 struct lpfc_cqe *); 69 struct lpfc_cqe *);
70static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *, 70static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *,
71 int); 71 int);
72static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,
73 uint32_t);
72 74
73static IOCB_t * 75static IOCB_t *
74lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 76lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
@@ -94,6 +96,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
94 union lpfc_wqe *temp_wqe; 96 union lpfc_wqe *temp_wqe;
95 struct lpfc_register doorbell; 97 struct lpfc_register doorbell;
96 uint32_t host_index; 98 uint32_t host_index;
99 uint32_t idx;
97 100
98 /* sanity check on queue memory */ 101 /* sanity check on queue memory */
99 if (unlikely(!q)) 102 if (unlikely(!q))
@@ -101,8 +104,12 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
101 temp_wqe = q->qe[q->host_index].wqe; 104 temp_wqe = q->qe[q->host_index].wqe;
102 105
103 /* If the host has not yet processed the next entry then we are done */ 106 /* If the host has not yet processed the next entry then we are done */
104 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 107 idx = ((q->host_index + 1) % q->entry_count);
108 if (idx == q->hba_index) {
109 q->WQ_overflow++;
105 return -ENOMEM; 110 return -ENOMEM;
111 }
112 q->WQ_posted++;
106 /* set consumption flag every once in a while */ 113 /* set consumption flag every once in a while */
107 if (!((q->host_index + 1) % q->entry_repost)) 114 if (!((q->host_index + 1) % q->entry_repost))
108 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); 115 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
@@ -112,7 +119,8 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
112 119
113 /* Update the host index before invoking device */ 120 /* Update the host index before invoking device */
114 host_index = q->host_index; 121 host_index = q->host_index;
115 q->host_index = ((q->host_index + 1) % q->entry_count); 122
123 q->host_index = idx;
116 124
117 /* Ring Doorbell */ 125 /* Ring Doorbell */
118 doorbell.word0 = 0; 126 doorbell.word0 = 0;
@@ -120,7 +128,6 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
120 bf_set(lpfc_wq_doorbell_index, &doorbell, host_index); 128 bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
121 bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id); 129 bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
122 writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr); 130 writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
123 readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */
124 131
125 return 0; 132 return 0;
126} 133}
@@ -194,7 +201,6 @@ lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
194 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); 201 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
195 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); 202 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
196 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); 203 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
197 readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */
198 return 0; 204 return 0;
199} 205}
200 206
@@ -234,6 +240,7 @@ static struct lpfc_eqe *
234lpfc_sli4_eq_get(struct lpfc_queue *q) 240lpfc_sli4_eq_get(struct lpfc_queue *q)
235{ 241{
236 struct lpfc_eqe *eqe; 242 struct lpfc_eqe *eqe;
243 uint32_t idx;
237 244
238 /* sanity check on queue memory */ 245 /* sanity check on queue memory */
239 if (unlikely(!q)) 246 if (unlikely(!q))
@@ -244,14 +251,34 @@ lpfc_sli4_eq_get(struct lpfc_queue *q)
244 if (!bf_get_le32(lpfc_eqe_valid, eqe)) 251 if (!bf_get_le32(lpfc_eqe_valid, eqe))
245 return NULL; 252 return NULL;
246 /* If the host has not yet processed the next entry then we are done */ 253 /* If the host has not yet processed the next entry then we are done */
247 if (((q->hba_index + 1) % q->entry_count) == q->host_index) 254 idx = ((q->hba_index + 1) % q->entry_count);
255 if (idx == q->host_index)
248 return NULL; 256 return NULL;
249 257
250 q->hba_index = ((q->hba_index + 1) % q->entry_count); 258 q->hba_index = idx;
251 return eqe; 259 return eqe;
252} 260}
253 261
254/** 262/**
263 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
264 * @q: The Event Queue to disable interrupts
265 *
266 **/
267static inline void
268lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
269{
270 struct lpfc_register doorbell;
271
272 doorbell.word0 = 0;
273 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
274 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
275 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
276 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
277 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
278 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
279}
280
281/**
255 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ 282 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
256 * @q: The Event Queue that the host has completed processing for. 283 * @q: The Event Queue that the host has completed processing for.
257 * @arm: Indicates whether the host wants to arms this CQ. 284 * @arm: Indicates whether the host wants to arms this CQ.
@@ -318,6 +345,7 @@ static struct lpfc_cqe *
318lpfc_sli4_cq_get(struct lpfc_queue *q) 345lpfc_sli4_cq_get(struct lpfc_queue *q)
319{ 346{
320 struct lpfc_cqe *cqe; 347 struct lpfc_cqe *cqe;
348 uint32_t idx;
321 349
322 /* sanity check on queue memory */ 350 /* sanity check on queue memory */
323 if (unlikely(!q)) 351 if (unlikely(!q))
@@ -327,11 +355,12 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
327 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) 355 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
328 return NULL; 356 return NULL;
329 /* If the host has not yet processed the next entry then we are done */ 357 /* If the host has not yet processed the next entry then we are done */
330 if (((q->hba_index + 1) % q->entry_count) == q->host_index) 358 idx = ((q->hba_index + 1) % q->entry_count);
359 if (idx == q->host_index)
331 return NULL; 360 return NULL;
332 361
333 cqe = q->qe[q->hba_index].cqe; 362 cqe = q->qe[q->hba_index].cqe;
334 q->hba_index = ((q->hba_index + 1) % q->entry_count); 363 q->hba_index = idx;
335 return cqe; 364 return cqe;
336} 365}
337 366
@@ -472,8 +501,8 @@ lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
472static inline IOCB_t * 501static inline IOCB_t *
473lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 502lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
474{ 503{
475 return (IOCB_t *) (((char *) pring->cmdringaddr) + 504 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
476 pring->cmdidx * phba->iocb_cmd_size); 505 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
477} 506}
478 507
479/** 508/**
@@ -489,8 +518,8 @@ lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
489static inline IOCB_t * 518static inline IOCB_t *
490lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 519lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
491{ 520{
492 return (IOCB_t *) (((char *) pring->rspringaddr) + 521 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
493 pring->rspidx * phba->iocb_rsp_size); 522 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
494} 523}
495 524
496/** 525/**
@@ -1320,21 +1349,23 @@ static IOCB_t *
1320lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1349lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1321{ 1350{
1322 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 1351 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1323 uint32_t max_cmd_idx = pring->numCiocb; 1352 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1324 if ((pring->next_cmdidx == pring->cmdidx) && 1353 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1325 (++pring->next_cmdidx >= max_cmd_idx)) 1354 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1326 pring->next_cmdidx = 0; 1355 pring->sli.sli3.next_cmdidx = 0;
1327 1356
1328 if (unlikely(pring->local_getidx == pring->next_cmdidx)) { 1357 if (unlikely(pring->sli.sli3.local_getidx ==
1358 pring->sli.sli3.next_cmdidx)) {
1329 1359
1330 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 1360 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1331 1361
1332 if (unlikely(pring->local_getidx >= max_cmd_idx)) { 1362 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1333 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1363 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1334 "0315 Ring %d issue: portCmdGet %d " 1364 "0315 Ring %d issue: portCmdGet %d "
1335 "is bigger than cmd ring %d\n", 1365 "is bigger than cmd ring %d\n",
1336 pring->ringno, 1366 pring->ringno,
1337 pring->local_getidx, max_cmd_idx); 1367 pring->sli.sli3.local_getidx,
1368 max_cmd_idx);
1338 1369
1339 phba->link_state = LPFC_HBA_ERROR; 1370 phba->link_state = LPFC_HBA_ERROR;
1340 /* 1371 /*
@@ -1349,7 +1380,7 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1349 return NULL; 1380 return NULL;
1350 } 1381 }
1351 1382
1352 if (pring->local_getidx == pring->next_cmdidx) 1383 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1353 return NULL; 1384 return NULL;
1354 } 1385 }
1355 1386
@@ -1484,8 +1515,8 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1484 * Let the HBA know what IOCB slot will be the next one the 1515 * Let the HBA know what IOCB slot will be the next one the
1485 * driver will put a command into. 1516 * driver will put a command into.
1486 */ 1517 */
1487 pring->cmdidx = pring->next_cmdidx; 1518 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1488 writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); 1519 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1489} 1520}
1490 1521
1491/** 1522/**
@@ -2056,6 +2087,7 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2056 case MBX_READ_EVENT_LOG: 2087 case MBX_READ_EVENT_LOG:
2057 case MBX_SECURITY_MGMT: 2088 case MBX_SECURITY_MGMT:
2058 case MBX_AUTH_PORT: 2089 case MBX_AUTH_PORT:
2090 case MBX_ACCESS_VDATA:
2059 ret = mbxCommand; 2091 ret = mbxCommand;
2060 break; 2092 break;
2061 default: 2093 default:
@@ -2786,7 +2818,7 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2786 "0312 Ring %d handler: portRspPut %d " 2818 "0312 Ring %d handler: portRspPut %d "
2787 "is bigger than rsp ring %d\n", 2819 "is bigger than rsp ring %d\n",
2788 pring->ringno, le32_to_cpu(pgp->rspPutInx), 2820 pring->ringno, le32_to_cpu(pgp->rspPutInx),
2789 pring->numRiocb); 2821 pring->sli.sli3.numRiocb);
2790 2822
2791 phba->link_state = LPFC_HBA_ERROR; 2823 phba->link_state = LPFC_HBA_ERROR;
2792 2824
@@ -2815,10 +2847,26 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2815void lpfc_poll_eratt(unsigned long ptr) 2847void lpfc_poll_eratt(unsigned long ptr)
2816{ 2848{
2817 struct lpfc_hba *phba; 2849 struct lpfc_hba *phba;
2818 uint32_t eratt = 0; 2850 uint32_t eratt = 0, rem;
2851 uint64_t sli_intr, cnt;
2819 2852
2820 phba = (struct lpfc_hba *)ptr; 2853 phba = (struct lpfc_hba *)ptr;
2821 2854
2855 /* Here we will also keep track of interrupts per sec of the hba */
2856 sli_intr = phba->sli.slistat.sli_intr;
2857
2858 if (phba->sli.slistat.sli_prev_intr > sli_intr)
2859 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
2860 sli_intr);
2861 else
2862 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
2863
2864 /* 64-bit integer division not supporte on 32-bit x86 - use do_div */
2865 rem = do_div(cnt, LPFC_ERATT_POLL_INTERVAL);
2866 phba->sli.slistat.sli_ips = cnt;
2867
2868 phba->sli.slistat.sli_prev_intr = sli_intr;
2869
2822 /* Check chip HA register for error event */ 2870 /* Check chip HA register for error event */
2823 eratt = lpfc_sli_check_eratt(phba); 2871 eratt = lpfc_sli_check_eratt(phba);
2824 2872
@@ -2873,7 +2921,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2873 * The next available response entry should never exceed the maximum 2921 * The next available response entry should never exceed the maximum
2874 * entries. If it does, treat it as an adapter hardware error. 2922 * entries. If it does, treat it as an adapter hardware error.
2875 */ 2923 */
2876 portRspMax = pring->numRiocb; 2924 portRspMax = pring->sli.sli3.numRiocb;
2877 portRspPut = le32_to_cpu(pgp->rspPutInx); 2925 portRspPut = le32_to_cpu(pgp->rspPutInx);
2878 if (unlikely(portRspPut >= portRspMax)) { 2926 if (unlikely(portRspPut >= portRspMax)) {
2879 lpfc_sli_rsp_pointers_error(phba, pring); 2927 lpfc_sli_rsp_pointers_error(phba, pring);
@@ -2887,7 +2935,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2887 phba->fcp_ring_in_use = 1; 2935 phba->fcp_ring_in_use = 1;
2888 2936
2889 rmb(); 2937 rmb();
2890 while (pring->rspidx != portRspPut) { 2938 while (pring->sli.sli3.rspidx != portRspPut) {
2891 /* 2939 /*
2892 * Fetch an entry off the ring and copy it into a local data 2940 * Fetch an entry off the ring and copy it into a local data
2893 * structure. The copy involves a byte-swap since the 2941 * structure. The copy involves a byte-swap since the
@@ -2896,8 +2944,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2896 entry = lpfc_resp_iocb(phba, pring); 2944 entry = lpfc_resp_iocb(phba, pring);
2897 phba->last_completion_time = jiffies; 2945 phba->last_completion_time = jiffies;
2898 2946
2899 if (++pring->rspidx >= portRspMax) 2947 if (++pring->sli.sli3.rspidx >= portRspMax)
2900 pring->rspidx = 0; 2948 pring->sli.sli3.rspidx = 0;
2901 2949
2902 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 2950 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
2903 (uint32_t *) &rspiocbq.iocb, 2951 (uint32_t *) &rspiocbq.iocb,
@@ -2915,7 +2963,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2915 * queuedepths of the SCSI device. 2963 * queuedepths of the SCSI device.
2916 */ 2964 */
2917 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 2965 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2918 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 2966 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
2967 IOERR_NO_RESOURCES)) {
2919 spin_unlock_irqrestore(&phba->hbalock, iflag); 2968 spin_unlock_irqrestore(&phba->hbalock, iflag);
2920 phba->lpfc_rampdown_queue_depth(phba); 2969 phba->lpfc_rampdown_queue_depth(phba);
2921 spin_lock_irqsave(&phba->hbalock, iflag); 2970 spin_lock_irqsave(&phba->hbalock, iflag);
@@ -2998,9 +3047,10 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2998 * been updated, sync the pgp->rspPutInx and fetch the new port 3047 * been updated, sync the pgp->rspPutInx and fetch the new port
2999 * response put pointer. 3048 * response put pointer.
3000 */ 3049 */
3001 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 3050 writel(pring->sli.sli3.rspidx,
3051 &phba->host_gp[pring->ringno].rspGetInx);
3002 3052
3003 if (pring->rspidx == portRspPut) 3053 if (pring->sli.sli3.rspidx == portRspPut)
3004 portRspPut = le32_to_cpu(pgp->rspPutInx); 3054 portRspPut = le32_to_cpu(pgp->rspPutInx);
3005 } 3055 }
3006 3056
@@ -3015,7 +3065,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3015 pring->stats.iocb_cmd_empty++; 3065 pring->stats.iocb_cmd_empty++;
3016 3066
3017 /* Force update of the local copy of cmdGetInx */ 3067 /* Force update of the local copy of cmdGetInx */
3018 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 3068 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3019 lpfc_sli_resume_iocb(phba, pring); 3069 lpfc_sli_resume_iocb(phba, pring);
3020 3070
3021 if ((pring->lpfc_sli_cmd_available)) 3071 if ((pring->lpfc_sli_cmd_available))
@@ -3086,7 +3136,8 @@ lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3086 * queuedepths of the SCSI device. 3136 * queuedepths of the SCSI device.
3087 */ 3137 */
3088 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 3138 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3089 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 3139 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3140 IOERR_NO_RESOURCES)) {
3090 spin_unlock_irqrestore(&phba->hbalock, iflag); 3141 spin_unlock_irqrestore(&phba->hbalock, iflag);
3091 phba->lpfc_rampdown_queue_depth(phba); 3142 phba->lpfc_rampdown_queue_depth(phba);
3092 spin_lock_irqsave(&phba->hbalock, iflag); 3143 spin_lock_irqsave(&phba->hbalock, iflag);
@@ -3247,7 +3298,7 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3247 * The next available response entry should never exceed the maximum 3298 * The next available response entry should never exceed the maximum
3248 * entries. If it does, treat it as an adapter hardware error. 3299 * entries. If it does, treat it as an adapter hardware error.
3249 */ 3300 */
3250 portRspMax = pring->numRiocb; 3301 portRspMax = pring->sli.sli3.numRiocb;
3251 portRspPut = le32_to_cpu(pgp->rspPutInx); 3302 portRspPut = le32_to_cpu(pgp->rspPutInx);
3252 if (portRspPut >= portRspMax) { 3303 if (portRspPut >= portRspMax) {
3253 /* 3304 /*
@@ -3269,7 +3320,7 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3269 } 3320 }
3270 3321
3271 rmb(); 3322 rmb();
3272 while (pring->rspidx != portRspPut) { 3323 while (pring->sli.sli3.rspidx != portRspPut) {
3273 /* 3324 /*
3274 * Build a completion list and call the appropriate handler. 3325 * Build a completion list and call the appropriate handler.
3275 * The process is to get the next available response iocb, get 3326 * The process is to get the next available response iocb, get
@@ -3297,8 +3348,8 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3297 phba->iocb_rsp_size); 3348 phba->iocb_rsp_size);
3298 irsp = &rspiocbp->iocb; 3349 irsp = &rspiocbp->iocb;
3299 3350
3300 if (++pring->rspidx >= portRspMax) 3351 if (++pring->sli.sli3.rspidx >= portRspMax)
3301 pring->rspidx = 0; 3352 pring->sli.sli3.rspidx = 0;
3302 3353
3303 if (pring->ringno == LPFC_ELS_RING) { 3354 if (pring->ringno == LPFC_ELS_RING) {
3304 lpfc_debugfs_slow_ring_trc(phba, 3355 lpfc_debugfs_slow_ring_trc(phba,
@@ -3308,7 +3359,8 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3308 *(((uint32_t *) irsp) + 7)); 3359 *(((uint32_t *) irsp) + 7));
3309 } 3360 }
3310 3361
3311 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 3362 writel(pring->sli.sli3.rspidx,
3363 &phba->host_gp[pring->ringno].rspGetInx);
3312 3364
3313 spin_unlock_irqrestore(&phba->hbalock, iflag); 3365 spin_unlock_irqrestore(&phba->hbalock, iflag);
3314 /* Handle the response IOCB */ 3366 /* Handle the response IOCB */
@@ -3320,10 +3372,10 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3320 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 3372 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3321 * response put pointer. 3373 * response put pointer.
3322 */ 3374 */
3323 if (pring->rspidx == portRspPut) { 3375 if (pring->sli.sli3.rspidx == portRspPut) {
3324 portRspPut = le32_to_cpu(pgp->rspPutInx); 3376 portRspPut = le32_to_cpu(pgp->rspPutInx);
3325 } 3377 }
3326 } /* while (pring->rspidx != portRspPut) */ 3378 } /* while (pring->sli.sli3.rspidx != portRspPut) */
3327 3379
3328 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { 3380 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3329 /* At least one response entry has been freed */ 3381 /* At least one response entry has been freed */
@@ -3338,7 +3390,7 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3338 pring->stats.iocb_cmd_empty++; 3390 pring->stats.iocb_cmd_empty++;
3339 3391
3340 /* Force update of the local copy of cmdGetInx */ 3392 /* Force update of the local copy of cmdGetInx */
3341 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 3393 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3342 lpfc_sli_resume_iocb(phba, pring); 3394 lpfc_sli_resume_iocb(phba, pring);
3343 3395
3344 if ((pring->lpfc_sli_cmd_available)) 3396 if ((pring->lpfc_sli_cmd_available))
@@ -3859,10 +3911,10 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
3859 for (i = 0; i < psli->num_rings; i++) { 3911 for (i = 0; i < psli->num_rings; i++) {
3860 pring = &psli->ring[i]; 3912 pring = &psli->ring[i];
3861 pring->flag = 0; 3913 pring->flag = 0;
3862 pring->rspidx = 0; 3914 pring->sli.sli3.rspidx = 0;
3863 pring->next_cmdidx = 0; 3915 pring->sli.sli3.next_cmdidx = 0;
3864 pring->local_getidx = 0; 3916 pring->sli.sli3.local_getidx = 0;
3865 pring->cmdidx = 0; 3917 pring->sli.sli3.cmdidx = 0;
3866 pring->missbufcnt = 0; 3918 pring->missbufcnt = 0;
3867 } 3919 }
3868 3920
@@ -4893,16 +4945,15 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4893 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); 4945 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
4894 fcp_eqidx = 0; 4946 fcp_eqidx = 0;
4895 if (phba->sli4_hba.fcp_cq) { 4947 if (phba->sli4_hba.fcp_cq) {
4896 do 4948 do {
4897 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], 4949 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4898 LPFC_QUEUE_REARM); 4950 LPFC_QUEUE_REARM);
4899 while (++fcp_eqidx < phba->cfg_fcp_eq_count); 4951 } while (++fcp_eqidx < phba->cfg_fcp_io_channel);
4900 } 4952 }
4901 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM); 4953 if (phba->sli4_hba.hba_eq) {
4902 if (phba->sli4_hba.fp_eq) { 4954 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
4903 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count;
4904 fcp_eqidx++) 4955 fcp_eqidx++)
4905 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx], 4956 lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx],
4906 LPFC_QUEUE_REARM); 4957 LPFC_QUEUE_REARM);
4907 } 4958 }
4908} 4959}
@@ -7784,14 +7835,18 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
7784 * 7835 *
7785 * Return: index into SLI4 fast-path FCP queue index. 7836 * Return: index into SLI4 fast-path FCP queue index.
7786 **/ 7837 **/
7787static uint32_t 7838static inline uint32_t
7788lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba) 7839lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
7789{ 7840{
7790 ++phba->fcp_qidx; 7841 int i;
7791 if (phba->fcp_qidx >= phba->cfg_fcp_wq_count) 7842
7792 phba->fcp_qidx = 0; 7843 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU)
7844 i = smp_processor_id();
7845 else
7846 i = atomic_add_return(1, &phba->fcp_qidx);
7793 7847
7794 return phba->fcp_qidx; 7848 i = (i % phba->cfg_fcp_io_channel);
7849 return i;
7795} 7850}
7796 7851
7797/** 7852/**
@@ -8311,16 +8366,6 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
8311 8366
8312 if ((piocb->iocb_flag & LPFC_IO_FCP) || 8367 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
8313 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 8368 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
8314 /*
8315 * For FCP command IOCB, get a new WQ index to distribute
8316 * WQE across the WQsr. On the other hand, for abort IOCB,
8317 * it carries the same WQ index to the original command
8318 * IOCB.
8319 */
8320 if (piocb->iocb_flag & LPFC_IO_FCP)
8321 piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
8322 if (unlikely(!phba->sli4_hba.fcp_wq))
8323 return IOCB_ERROR;
8324 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx], 8369 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
8325 &wqe)) 8370 &wqe))
8326 return IOCB_ERROR; 8371 return IOCB_ERROR;
@@ -8401,13 +8446,68 @@ int
8401lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 8446lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
8402 struct lpfc_iocbq *piocb, uint32_t flag) 8447 struct lpfc_iocbq *piocb, uint32_t flag)
8403{ 8448{
8449 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
8450 struct lpfc_sli_ring *pring;
8451 struct lpfc_queue *fpeq;
8452 struct lpfc_eqe *eqe;
8404 unsigned long iflags; 8453 unsigned long iflags;
8405 int rc; 8454 int rc, idx;
8406 8455
8407 spin_lock_irqsave(&phba->hbalock, iflags); 8456 if (phba->sli_rev == LPFC_SLI_REV4) {
8408 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 8457 if (piocb->iocb_flag & LPFC_IO_FCP) {
8409 spin_unlock_irqrestore(&phba->hbalock, iflags); 8458 if (unlikely(!phba->sli4_hba.fcp_wq))
8459 return IOCB_ERROR;
8460 idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
8461 piocb->fcp_wqidx = idx;
8462 ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
8463
8464 pring = &phba->sli.ring[ring_number];
8465 spin_lock_irqsave(&pring->ring_lock, iflags);
8466 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
8467 flag);
8468 spin_unlock_irqrestore(&pring->ring_lock, iflags);
8469
8470 if (lpfc_fcp_look_ahead) {
8471 fcp_eq_hdl = &phba->sli4_hba.fcp_eq_hdl[idx];
8472
8473 if (atomic_dec_and_test(&fcp_eq_hdl->
8474 fcp_eq_in_use)) {
8410 8475
8476 /* Get associated EQ with this index */
8477 fpeq = phba->sli4_hba.hba_eq[idx];
8478
8479 /* Turn off interrupts from this EQ */
8480 lpfc_sli4_eq_clr_intr(fpeq);
8481
8482 /*
8483 * Process all the events on FCP EQ
8484 */
8485 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
8486 lpfc_sli4_hba_handle_eqe(phba,
8487 eqe, idx);
8488 fpeq->EQ_processed++;
8489 }
8490
8491 /* Always clear and re-arm the EQ */
8492 lpfc_sli4_eq_release(fpeq,
8493 LPFC_QUEUE_REARM);
8494 }
8495 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
8496 }
8497 } else {
8498 pring = &phba->sli.ring[ring_number];
8499 spin_lock_irqsave(&pring->ring_lock, iflags);
8500 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
8501 flag);
8502 spin_unlock_irqrestore(&pring->ring_lock, iflags);
8503
8504 }
8505 } else {
8506 /* For now, SLI2/3 will still use hbalock */
8507 spin_lock_irqsave(&phba->hbalock, iflags);
8508 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
8509 spin_unlock_irqrestore(&phba->hbalock, iflags);
8510 }
8411 return rc; 8511 return rc;
8412} 8512}
8413 8513
@@ -8434,18 +8534,18 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
8434 8534
8435 /* Take some away from the FCP ring */ 8535 /* Take some away from the FCP ring */
8436 pring = &psli->ring[psli->fcp_ring]; 8536 pring = &psli->ring[psli->fcp_ring];
8437 pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 8537 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8438 pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 8538 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8439 pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 8539 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8440 pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 8540 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
8441 8541
8442 /* and give them to the extra ring */ 8542 /* and give them to the extra ring */
8443 pring = &psli->ring[psli->extra_ring]; 8543 pring = &psli->ring[psli->extra_ring];
8444 8544
8445 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 8545 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8446 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 8546 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8447 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 8547 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8448 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 8548 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
8449 8549
8450 /* Setup default profile for this ring */ 8550 /* Setup default profile for this ring */
8451 pring->iotag_max = 4096; 8551 pring->iotag_max = 4096;
@@ -8457,56 +8557,6 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
8457 return 0; 8557 return 0;
8458} 8558}
8459 8559
8460/* lpfc_sli_abts_recover_port - Recover a port that failed an ABTS.
8461 * @vport: pointer to virtual port object.
8462 * @ndlp: nodelist pointer for the impacted rport.
8463 *
8464 * The driver calls this routine in response to a XRI ABORT CQE
8465 * event from the port. In this event, the driver is required to
8466 * recover its login to the rport even though its login may be valid
8467 * from the driver's perspective. The failed ABTS notice from the
8468 * port indicates the rport is not responding.
8469 */
8470static void
8471lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
8472 struct lpfc_nodelist *ndlp)
8473{
8474 struct Scsi_Host *shost;
8475 struct lpfc_hba *phba;
8476 unsigned long flags = 0;
8477
8478 shost = lpfc_shost_from_vport(vport);
8479 phba = vport->phba;
8480 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
8481 lpfc_printf_log(phba, KERN_INFO,
8482 LOG_SLI, "3093 No rport recovery needed. "
8483 "rport in state 0x%x\n",
8484 ndlp->nlp_state);
8485 return;
8486 }
8487 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8488 "3094 Start rport recovery on shost id 0x%x "
8489 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
8490 "flags 0x%x\n",
8491 shost->host_no, ndlp->nlp_DID,
8492 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state,
8493 ndlp->nlp_flag);
8494 /*
8495 * The rport is not responding. Don't attempt ADISC recovery.
8496 * Remove the FCP-2 flag to force a PLOGI.
8497 */
8498 spin_lock_irqsave(shost->host_lock, flags);
8499 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
8500 spin_unlock_irqrestore(shost->host_lock, flags);
8501 lpfc_disc_state_machine(vport, ndlp, NULL,
8502 NLP_EVT_DEVICE_RECOVERY);
8503 lpfc_cancel_retry_delay_tmo(vport, ndlp);
8504 spin_lock_irqsave(shost->host_lock, flags);
8505 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
8506 spin_unlock_irqrestore(shost->host_lock, flags);
8507 lpfc_disc_start(vport);
8508}
8509
8510/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port. 8560/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
8511 * @phba: Pointer to HBA context object. 8561 * @phba: Pointer to HBA context object.
8512 * @iocbq: Pointer to iocb object. 8562 * @iocbq: Pointer to iocb object.
@@ -8594,7 +8644,7 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
8594 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and 8644 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
8595 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT. 8645 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
8596 */ 8646 */
8597 ext_status = axri->parameter & WCQE_PARAM_MASK; 8647 ext_status = axri->parameter & IOERR_PARAM_MASK;
8598 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) && 8648 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
8599 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0))) 8649 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
8600 lpfc_sli_abts_recover_port(vport, ndlp); 8650 lpfc_sli_abts_recover_port(vport, ndlp);
@@ -8692,7 +8742,9 @@ lpfc_sli_setup(struct lpfc_hba *phba)
8692 struct lpfc_sli *psli = &phba->sli; 8742 struct lpfc_sli *psli = &phba->sli;
8693 struct lpfc_sli_ring *pring; 8743 struct lpfc_sli_ring *pring;
8694 8744
8695 psli->num_rings = MAX_CONFIGURED_RINGS; 8745 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
8746 if (phba->sli_rev == LPFC_SLI_REV4)
8747 psli->num_rings += phba->cfg_fcp_io_channel;
8696 psli->sli_flag = 0; 8748 psli->sli_flag = 0;
8697 psli->fcp_ring = LPFC_FCP_RING; 8749 psli->fcp_ring = LPFC_FCP_RING;
8698 psli->next_ring = LPFC_FCP_NEXT_RING; 8750 psli->next_ring = LPFC_FCP_NEXT_RING;
@@ -8707,16 +8759,20 @@ lpfc_sli_setup(struct lpfc_hba *phba)
8707 switch (i) { 8759 switch (i) {
8708 case LPFC_FCP_RING: /* ring 0 - FCP */ 8760 case LPFC_FCP_RING: /* ring 0 - FCP */
8709 /* numCiocb and numRiocb are used in config_port */ 8761 /* numCiocb and numRiocb are used in config_port */
8710 pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 8762 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
8711 pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 8763 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
8712 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 8764 pring->sli.sli3.numCiocb +=
8713 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 8765 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8714 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 8766 pring->sli.sli3.numRiocb +=
8715 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 8767 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8716 pring->sizeCiocb = (phba->sli_rev == 3) ? 8768 pring->sli.sli3.numCiocb +=
8769 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8770 pring->sli.sli3.numRiocb +=
8771 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
8772 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
8717 SLI3_IOCB_CMD_SIZE : 8773 SLI3_IOCB_CMD_SIZE :
8718 SLI2_IOCB_CMD_SIZE; 8774 SLI2_IOCB_CMD_SIZE;
8719 pring->sizeRiocb = (phba->sli_rev == 3) ? 8775 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
8720 SLI3_IOCB_RSP_SIZE : 8776 SLI3_IOCB_RSP_SIZE :
8721 SLI2_IOCB_RSP_SIZE; 8777 SLI2_IOCB_RSP_SIZE;
8722 pring->iotag_ctr = 0; 8778 pring->iotag_ctr = 0;
@@ -8727,12 +8783,12 @@ lpfc_sli_setup(struct lpfc_hba *phba)
8727 break; 8783 break;
8728 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ 8784 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
8729 /* numCiocb and numRiocb are used in config_port */ 8785 /* numCiocb and numRiocb are used in config_port */
8730 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 8786 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
8731 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 8787 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
8732 pring->sizeCiocb = (phba->sli_rev == 3) ? 8788 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
8733 SLI3_IOCB_CMD_SIZE : 8789 SLI3_IOCB_CMD_SIZE :
8734 SLI2_IOCB_CMD_SIZE; 8790 SLI2_IOCB_CMD_SIZE;
8735 pring->sizeRiocb = (phba->sli_rev == 3) ? 8791 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
8736 SLI3_IOCB_RSP_SIZE : 8792 SLI3_IOCB_RSP_SIZE :
8737 SLI2_IOCB_RSP_SIZE; 8793 SLI2_IOCB_RSP_SIZE;
8738 pring->iotag_max = phba->cfg_hba_queue_depth; 8794 pring->iotag_max = phba->cfg_hba_queue_depth;
@@ -8740,12 +8796,12 @@ lpfc_sli_setup(struct lpfc_hba *phba)
8740 break; 8796 break;
8741 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 8797 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
8742 /* numCiocb and numRiocb are used in config_port */ 8798 /* numCiocb and numRiocb are used in config_port */
8743 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 8799 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
8744 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 8800 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
8745 pring->sizeCiocb = (phba->sli_rev == 3) ? 8801 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
8746 SLI3_IOCB_CMD_SIZE : 8802 SLI3_IOCB_CMD_SIZE :
8747 SLI2_IOCB_CMD_SIZE; 8803 SLI2_IOCB_CMD_SIZE;
8748 pring->sizeRiocb = (phba->sli_rev == 3) ? 8804 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
8749 SLI3_IOCB_RSP_SIZE : 8805 SLI3_IOCB_RSP_SIZE :
8750 SLI2_IOCB_RSP_SIZE; 8806 SLI2_IOCB_RSP_SIZE;
8751 pring->fast_iotag = 0; 8807 pring->fast_iotag = 0;
@@ -8786,8 +8842,9 @@ lpfc_sli_setup(struct lpfc_hba *phba)
8786 lpfc_sli4_ct_abort_unsol_event; 8842 lpfc_sli4_ct_abort_unsol_event;
8787 break; 8843 break;
8788 } 8844 }
8789 totiocbsize += (pring->numCiocb * pring->sizeCiocb) + 8845 totiocbsize += (pring->sli.sli3.numCiocb *
8790 (pring->numRiocb * pring->sizeRiocb); 8846 pring->sli.sli3.sizeCiocb) +
8847 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
8791 } 8848 }
8792 if (totiocbsize > MAX_SLIM_IOCB_SIZE) { 8849 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
8793 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 8850 /* Too many cmd / rsp ring entries in SLI2 SLIM */
@@ -8828,14 +8885,15 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
8828 for (i = 0; i < psli->num_rings; i++) { 8885 for (i = 0; i < psli->num_rings; i++) {
8829 pring = &psli->ring[i]; 8886 pring = &psli->ring[i];
8830 pring->ringno = i; 8887 pring->ringno = i;
8831 pring->next_cmdidx = 0; 8888 pring->sli.sli3.next_cmdidx = 0;
8832 pring->local_getidx = 0; 8889 pring->sli.sli3.local_getidx = 0;
8833 pring->cmdidx = 0; 8890 pring->sli.sli3.cmdidx = 0;
8834 INIT_LIST_HEAD(&pring->txq); 8891 INIT_LIST_HEAD(&pring->txq);
8835 INIT_LIST_HEAD(&pring->txcmplq); 8892 INIT_LIST_HEAD(&pring->txcmplq);
8836 INIT_LIST_HEAD(&pring->iocb_continueq); 8893 INIT_LIST_HEAD(&pring->iocb_continueq);
8837 INIT_LIST_HEAD(&pring->iocb_continue_saveq); 8894 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
8838 INIT_LIST_HEAD(&pring->postbufq); 8895 INIT_LIST_HEAD(&pring->postbufq);
8896 spin_lock_init(&pring->ring_lock);
8839 } 8897 }
8840 spin_unlock_irq(&phba->hbalock); 8898 spin_unlock_irq(&phba->hbalock);
8841 return 1; 8899 return 1;
@@ -9334,6 +9392,7 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9334 IOCB_t *icmd = NULL; 9392 IOCB_t *icmd = NULL;
9335 IOCB_t *iabt = NULL; 9393 IOCB_t *iabt = NULL;
9336 int retval; 9394 int retval;
9395 unsigned long iflags;
9337 9396
9338 /* 9397 /*
9339 * There are certain command types we don't want to abort. And we 9398 * There are certain command types we don't want to abort. And we
@@ -9386,7 +9445,17 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9386 iabt->un.acxri.abortIoTag, 9445 iabt->un.acxri.abortIoTag,
9387 iabt->un.acxri.abortContextTag, 9446 iabt->un.acxri.abortContextTag,
9388 abtsiocbp->iotag); 9447 abtsiocbp->iotag);
9389 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0); 9448
9449 if (phba->sli_rev == LPFC_SLI_REV4) {
9450 /* Note: both hbalock and ring_lock need to be set here */
9451 spin_lock_irqsave(&pring->ring_lock, iflags);
9452 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
9453 abtsiocbp, 0);
9454 spin_unlock_irqrestore(&pring->ring_lock, iflags);
9455 } else {
9456 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
9457 abtsiocbp, 0);
9458 }
9390 9459
9391 if (retval) 9460 if (retval)
9392 __lpfc_sli_release_iocbq(phba, abtsiocbp); 9461 __lpfc_sli_release_iocbq(phba, abtsiocbp);
@@ -10947,12 +11016,12 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
10947 unsigned long iflags; 11016 unsigned long iflags;
10948 11017
10949 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; 11018 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
10950 spin_lock_irqsave(&phba->hbalock, iflags); 11019 spin_lock_irqsave(&pring->ring_lock, iflags);
10951 pring->stats.iocb_event++; 11020 pring->stats.iocb_event++;
10952 /* Look up the ELS command IOCB and create pseudo response IOCB */ 11021 /* Look up the ELS command IOCB and create pseudo response IOCB */
10953 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 11022 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
10954 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 11023 bf_get(lpfc_wcqe_c_request_tag, wcqe));
10955 spin_unlock_irqrestore(&phba->hbalock, iflags); 11024 spin_unlock_irqrestore(&pring->ring_lock, iflags);
10956 11025
10957 if (unlikely(!cmdiocbq)) { 11026 if (unlikely(!cmdiocbq)) {
10958 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11027 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
@@ -11154,6 +11223,7 @@ lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
11154/** 11223/**
11155 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event 11224 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
11156 * @phba: Pointer to HBA context object. 11225 * @phba: Pointer to HBA context object.
11226 * @cq: Pointer to associated CQ
11157 * @wcqe: Pointer to work-queue completion queue entry. 11227 * @wcqe: Pointer to work-queue completion queue entry.
11158 * 11228 *
11159 * This routine handles an ELS work-queue completion event. 11229 * This routine handles an ELS work-queue completion event.
@@ -11161,12 +11231,12 @@ lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
11161 * Return: true if work posted to worker thread, otherwise false. 11231 * Return: true if work posted to worker thread, otherwise false.
11162 **/ 11232 **/
11163static bool 11233static bool
11164lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, 11234lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11165 struct lpfc_wcqe_complete *wcqe) 11235 struct lpfc_wcqe_complete *wcqe)
11166{ 11236{
11167 struct lpfc_iocbq *irspiocbq; 11237 struct lpfc_iocbq *irspiocbq;
11168 unsigned long iflags; 11238 unsigned long iflags;
11169 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING]; 11239 struct lpfc_sli_ring *pring = cq->pring;
11170 11240
11171 /* Get an irspiocbq for later ELS response processing use */ 11241 /* Get an irspiocbq for later ELS response processing use */
11172 irspiocbq = lpfc_sli_get_iocbq(phba); 11242 irspiocbq = lpfc_sli_get_iocbq(phba);
@@ -11311,14 +11381,17 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
11311 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 11381 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
11312 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11382 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11313 "2537 Receive Frame Truncated!!\n"); 11383 "2537 Receive Frame Truncated!!\n");
11384 hrq->RQ_buf_trunc++;
11314 case FC_STATUS_RQ_SUCCESS: 11385 case FC_STATUS_RQ_SUCCESS:
11315 lpfc_sli4_rq_release(hrq, drq); 11386 lpfc_sli4_rq_release(hrq, drq);
11316 spin_lock_irqsave(&phba->hbalock, iflags); 11387 spin_lock_irqsave(&phba->hbalock, iflags);
11317 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 11388 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
11318 if (!dma_buf) { 11389 if (!dma_buf) {
11390 hrq->RQ_no_buf_found++;
11319 spin_unlock_irqrestore(&phba->hbalock, iflags); 11391 spin_unlock_irqrestore(&phba->hbalock, iflags);
11320 goto out; 11392 goto out;
11321 } 11393 }
11394 hrq->RQ_rcv_buf++;
11322 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); 11395 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
11323 /* save off the frame for the word thread to process */ 11396 /* save off the frame for the word thread to process */
11324 list_add_tail(&dma_buf->cq_event.list, 11397 list_add_tail(&dma_buf->cq_event.list,
@@ -11330,6 +11403,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
11330 break; 11403 break;
11331 case FC_STATUS_INSUFF_BUF_NEED_BUF: 11404 case FC_STATUS_INSUFF_BUF_NEED_BUF:
11332 case FC_STATUS_INSUFF_BUF_FRM_DISC: 11405 case FC_STATUS_INSUFF_BUF_FRM_DISC:
11406 hrq->RQ_no_posted_buf++;
11333 /* Post more buffers if possible */ 11407 /* Post more buffers if possible */
11334 spin_lock_irqsave(&phba->hbalock, iflags); 11408 spin_lock_irqsave(&phba->hbalock, iflags);
11335 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; 11409 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
@@ -11367,7 +11441,7 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11367 case CQE_CODE_COMPL_WQE: 11441 case CQE_CODE_COMPL_WQE:
11368 /* Process the WQ/RQ complete event */ 11442 /* Process the WQ/RQ complete event */
11369 phba->last_completion_time = jiffies; 11443 phba->last_completion_time = jiffies;
11370 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, 11444 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
11371 (struct lpfc_wcqe_complete *)&cqevt); 11445 (struct lpfc_wcqe_complete *)&cqevt);
11372 break; 11446 break;
11373 case CQE_CODE_RELEASE_WQE: 11447 case CQE_CODE_RELEASE_WQE:
@@ -11411,31 +11485,18 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11411 * 11485 *
11412 **/ 11486 **/
11413static void 11487static void
11414lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) 11488lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11489 struct lpfc_queue *speq)
11415{ 11490{
11416 struct lpfc_queue *cq = NULL, *childq, *speq; 11491 struct lpfc_queue *cq = NULL, *childq;
11417 struct lpfc_cqe *cqe; 11492 struct lpfc_cqe *cqe;
11418 bool workposted = false; 11493 bool workposted = false;
11419 int ecount = 0; 11494 int ecount = 0;
11420 uint16_t cqid; 11495 uint16_t cqid;
11421 11496
11422 if (bf_get_le32(lpfc_eqe_major_code, eqe) != 0) {
11423 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11424 "0359 Not a valid slow-path completion "
11425 "event: majorcode=x%x, minorcode=x%x\n",
11426 bf_get_le32(lpfc_eqe_major_code, eqe),
11427 bf_get_le32(lpfc_eqe_minor_code, eqe));
11428 return;
11429 }
11430
11431 /* Get the reference to the corresponding CQ */ 11497 /* Get the reference to the corresponding CQ */
11432 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 11498 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
11433 11499
11434 /* Search for completion queue pointer matching this cqid */
11435 speq = phba->sli4_hba.sp_eq;
11436 /* sanity check on queue memory */
11437 if (unlikely(!speq))
11438 return;
11439 list_for_each_entry(childq, &speq->child_list, list) { 11500 list_for_each_entry(childq, &speq->child_list, list) {
11440 if (childq->queue_id == cqid) { 11501 if (childq->queue_id == cqid) {
11441 cq = childq; 11502 cq = childq;
@@ -11457,6 +11518,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
11457 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); 11518 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
11458 if (!(++ecount % cq->entry_repost)) 11519 if (!(++ecount % cq->entry_repost))
11459 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 11520 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11521 cq->CQ_mbox++;
11460 } 11522 }
11461 break; 11523 break;
11462 case LPFC_WCQ: 11524 case LPFC_WCQ:
@@ -11470,6 +11532,10 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
11470 if (!(++ecount % cq->entry_repost)) 11532 if (!(++ecount % cq->entry_repost))
11471 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 11533 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11472 } 11534 }
11535
11536 /* Track the max number of CQEs processed in 1 EQ */
11537 if (ecount > cq->CQ_max_cqe)
11538 cq->CQ_max_cqe = ecount;
11473 break; 11539 break;
11474 default: 11540 default:
11475 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11541 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -11494,34 +11560,33 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
11494 11560
11495/** 11561/**
11496 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry 11562 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
11497 * @eqe: Pointer to fast-path completion queue entry. 11563 * @phba: Pointer to HBA context object.
11564 * @cq: Pointer to associated CQ
11565 * @wcqe: Pointer to work-queue completion queue entry.
11498 * 11566 *
11499 * This routine process a fast-path work queue completion entry from fast-path 11567 * This routine process a fast-path work queue completion entry from fast-path
11500 * event queue for FCP command response completion. 11568 * event queue for FCP command response completion.
11501 **/ 11569 **/
11502static void 11570static void
11503lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, 11571lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11504 struct lpfc_wcqe_complete *wcqe) 11572 struct lpfc_wcqe_complete *wcqe)
11505{ 11573{
11506 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING]; 11574 struct lpfc_sli_ring *pring = cq->pring;
11507 struct lpfc_iocbq *cmdiocbq; 11575 struct lpfc_iocbq *cmdiocbq;
11508 struct lpfc_iocbq irspiocbq; 11576 struct lpfc_iocbq irspiocbq;
11509 unsigned long iflags; 11577 unsigned long iflags;
11510 11578
11511 spin_lock_irqsave(&phba->hbalock, iflags);
11512 pring->stats.iocb_event++;
11513 spin_unlock_irqrestore(&phba->hbalock, iflags);
11514
11515 /* Check for response status */ 11579 /* Check for response status */
11516 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 11580 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
11517 /* If resource errors reported from HBA, reduce queue 11581 /* If resource errors reported from HBA, reduce queue
11518 * depth of the SCSI device. 11582 * depth of the SCSI device.
11519 */ 11583 */
11520 if ((bf_get(lpfc_wcqe_c_status, wcqe) == 11584 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
11521 IOSTAT_LOCAL_REJECT) && 11585 IOSTAT_LOCAL_REJECT)) &&
11522 (wcqe->parameter == IOERR_NO_RESOURCES)) { 11586 ((wcqe->parameter & IOERR_PARAM_MASK) ==
11587 IOERR_NO_RESOURCES))
11523 phba->lpfc_rampdown_queue_depth(phba); 11588 phba->lpfc_rampdown_queue_depth(phba);
11524 } 11589
11525 /* Log the error status */ 11590 /* Log the error status */
11526 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11591 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11527 "0373 FCP complete error: status=x%x, " 11592 "0373 FCP complete error: status=x%x, "
@@ -11534,10 +11599,11 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
11534 } 11599 }
11535 11600
11536 /* Look up the FCP command IOCB and create pseudo response IOCB */ 11601 /* Look up the FCP command IOCB and create pseudo response IOCB */
11537 spin_lock_irqsave(&phba->hbalock, iflags); 11602 spin_lock_irqsave(&pring->ring_lock, iflags);
11603 pring->stats.iocb_event++;
11538 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 11604 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
11539 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 11605 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11540 spin_unlock_irqrestore(&phba->hbalock, iflags); 11606 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11541 if (unlikely(!cmdiocbq)) { 11607 if (unlikely(!cmdiocbq)) {
11542 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11608 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11543 "0374 FCP complete with no corresponding " 11609 "0374 FCP complete with no corresponding "
@@ -11621,17 +11687,20 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11621 /* Check and process for different type of WCQE and dispatch */ 11687 /* Check and process for different type of WCQE and dispatch */
11622 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { 11688 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
11623 case CQE_CODE_COMPL_WQE: 11689 case CQE_CODE_COMPL_WQE:
11690 cq->CQ_wq++;
11624 /* Process the WQ complete event */ 11691 /* Process the WQ complete event */
11625 phba->last_completion_time = jiffies; 11692 phba->last_completion_time = jiffies;
11626 lpfc_sli4_fp_handle_fcp_wcqe(phba, 11693 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
11627 (struct lpfc_wcqe_complete *)&wcqe); 11694 (struct lpfc_wcqe_complete *)&wcqe);
11628 break; 11695 break;
11629 case CQE_CODE_RELEASE_WQE: 11696 case CQE_CODE_RELEASE_WQE:
11697 cq->CQ_release_wqe++;
11630 /* Process the WQ release event */ 11698 /* Process the WQ release event */
11631 lpfc_sli4_fp_handle_rel_wcqe(phba, cq, 11699 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
11632 (struct lpfc_wcqe_release *)&wcqe); 11700 (struct lpfc_wcqe_release *)&wcqe);
11633 break; 11701 break;
11634 case CQE_CODE_XRI_ABORTED: 11702 case CQE_CODE_XRI_ABORTED:
11703 cq->CQ_xri_aborted++;
11635 /* Process the WQ XRI abort event */ 11704 /* Process the WQ XRI abort event */
11636 phba->last_completion_time = jiffies; 11705 phba->last_completion_time = jiffies;
11637 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 11706 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
@@ -11647,7 +11716,7 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11647} 11716}
11648 11717
11649/** 11718/**
11650 * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry 11719 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
11651 * @phba: Pointer to HBA context object. 11720 * @phba: Pointer to HBA context object.
11652 * @eqe: Pointer to fast-path event queue entry. 11721 * @eqe: Pointer to fast-path event queue entry.
11653 * 11722 *
@@ -11659,8 +11728,8 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11659 * completion queue, and then return. 11728 * completion queue, and then return.
11660 **/ 11729 **/
11661static void 11730static void
11662lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 11731lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11663 uint32_t fcp_cqidx) 11732 uint32_t qidx)
11664{ 11733{
11665 struct lpfc_queue *cq; 11734 struct lpfc_queue *cq;
11666 struct lpfc_cqe *cqe; 11735 struct lpfc_cqe *cqe;
@@ -11670,30 +11739,38 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11670 11739
11671 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 11740 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
11672 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11741 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11673 "0366 Not a valid fast-path completion " 11742 "0366 Not a valid completion "
11674 "event: majorcode=x%x, minorcode=x%x\n", 11743 "event: majorcode=x%x, minorcode=x%x\n",
11675 bf_get_le32(lpfc_eqe_major_code, eqe), 11744 bf_get_le32(lpfc_eqe_major_code, eqe),
11676 bf_get_le32(lpfc_eqe_minor_code, eqe)); 11745 bf_get_le32(lpfc_eqe_minor_code, eqe));
11677 return; 11746 return;
11678 } 11747 }
11679 11748
11749 /* Get the reference to the corresponding CQ */
11750 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
11751
11752 /* Check if this is a Slow path event */
11753 if (unlikely(cqid != phba->sli4_hba.fcp_cq_map[qidx])) {
11754 lpfc_sli4_sp_handle_eqe(phba, eqe,
11755 phba->sli4_hba.hba_eq[qidx]);
11756 return;
11757 }
11758
11680 if (unlikely(!phba->sli4_hba.fcp_cq)) { 11759 if (unlikely(!phba->sli4_hba.fcp_cq)) {
11681 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11760 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11682 "3146 Fast-path completion queues " 11761 "3146 Fast-path completion queues "
11683 "does not exist\n"); 11762 "does not exist\n");
11684 return; 11763 return;
11685 } 11764 }
11686 cq = phba->sli4_hba.fcp_cq[fcp_cqidx]; 11765 cq = phba->sli4_hba.fcp_cq[qidx];
11687 if (unlikely(!cq)) { 11766 if (unlikely(!cq)) {
11688 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 11767 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
11689 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11768 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11690 "0367 Fast-path completion queue " 11769 "0367 Fast-path completion queue "
11691 "(%d) does not exist\n", fcp_cqidx); 11770 "(%d) does not exist\n", qidx);
11692 return; 11771 return;
11693 } 11772 }
11694 11773
11695 /* Get the reference to the corresponding CQ */
11696 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
11697 if (unlikely(cqid != cq->queue_id)) { 11774 if (unlikely(cqid != cq->queue_id)) {
11698 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11775 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11699 "0368 Miss-matched fast-path completion " 11776 "0368 Miss-matched fast-path completion "
@@ -11709,6 +11786,10 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11709 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 11786 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11710 } 11787 }
11711 11788
11789 /* Track the max number of CQEs processed in 1 EQ */
11790 if (ecount > cq->CQ_max_cqe)
11791 cq->CQ_max_cqe = ecount;
11792
11712 /* Catch the no cq entry condition */ 11793 /* Catch the no cq entry condition */
11713 if (unlikely(ecount == 0)) 11794 if (unlikely(ecount == 0))
11714 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11795 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -11737,86 +11818,7 @@ lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
11737} 11818}
11738 11819
11739/** 11820/**
11740 * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device 11821 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
11741 * @irq: Interrupt number.
11742 * @dev_id: The device context pointer.
11743 *
11744 * This function is directly called from the PCI layer as an interrupt
11745 * service routine when device with SLI-4 interface spec is enabled with
11746 * MSI-X multi-message interrupt mode and there are slow-path events in
11747 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
11748 * interrupt mode, this function is called as part of the device-level
11749 * interrupt handler. When the PCI slot is in error recovery or the HBA is
11750 * undergoing initialization, the interrupt handler will not process the
11751 * interrupt. The link attention and ELS ring attention events are handled
11752 * by the worker thread. The interrupt handler signals the worker thread
11753 * and returns for these events. This function is called without any lock
11754 * held. It gets the hbalock to access and update SLI data structures.
11755 *
11756 * This function returns IRQ_HANDLED when interrupt is handled else it
11757 * returns IRQ_NONE.
11758 **/
11759irqreturn_t
11760lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
11761{
11762 struct lpfc_hba *phba;
11763 struct lpfc_queue *speq;
11764 struct lpfc_eqe *eqe;
11765 unsigned long iflag;
11766 int ecount = 0;
11767
11768 /*
11769 * Get the driver's phba structure from the dev_id
11770 */
11771 phba = (struct lpfc_hba *)dev_id;
11772
11773 if (unlikely(!phba))
11774 return IRQ_NONE;
11775
11776 /* Get to the EQ struct associated with this vector */
11777 speq = phba->sli4_hba.sp_eq;
11778 if (unlikely(!speq))
11779 return IRQ_NONE;
11780
11781 /* Check device state for handling interrupt */
11782 if (unlikely(lpfc_intr_state_check(phba))) {
11783 /* Check again for link_state with lock held */
11784 spin_lock_irqsave(&phba->hbalock, iflag);
11785 if (phba->link_state < LPFC_LINK_DOWN)
11786 /* Flush, clear interrupt, and rearm the EQ */
11787 lpfc_sli4_eq_flush(phba, speq);
11788 spin_unlock_irqrestore(&phba->hbalock, iflag);
11789 return IRQ_NONE;
11790 }
11791
11792 /*
11793 * Process all the event on FCP slow-path EQ
11794 */
11795 while ((eqe = lpfc_sli4_eq_get(speq))) {
11796 lpfc_sli4_sp_handle_eqe(phba, eqe);
11797 if (!(++ecount % speq->entry_repost))
11798 lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM);
11799 }
11800
11801 /* Always clear and re-arm the slow-path EQ */
11802 lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM);
11803
11804 /* Catch the no cq entry condition */
11805 if (unlikely(ecount == 0)) {
11806 if (phba->intr_type == MSIX)
11807 /* MSI-X treated interrupt served as no EQ share INT */
11808 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11809 "0357 MSI-X interrupt with no EQE\n");
11810 else
11811 /* Non MSI-X treated on interrupt as EQ share INT */
11812 return IRQ_NONE;
11813 }
11814
11815 return IRQ_HANDLED;
11816} /* lpfc_sli4_sp_intr_handler */
11817
11818/**
11819 * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device
11820 * @irq: Interrupt number. 11822 * @irq: Interrupt number.
11821 * @dev_id: The device context pointer. 11823 * @dev_id: The device context pointer.
11822 * 11824 *
@@ -11833,11 +11835,16 @@ lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
11833 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is 11835 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
11834 * equal to that of FCP CQ index. 11836 * equal to that of FCP CQ index.
11835 * 11837 *
11838 * The link attention and ELS ring attention events are handled
11839 * by the worker thread. The interrupt handler signals the worker thread
11840 * and returns for these events. This function is called without any lock
11841 * held. It gets the hbalock to access and update SLI data structures.
11842 *
11836 * This function returns IRQ_HANDLED when interrupt is handled else it 11843 * This function returns IRQ_HANDLED when interrupt is handled else it
11837 * returns IRQ_NONE. 11844 * returns IRQ_NONE.
11838 **/ 11845 **/
11839irqreturn_t 11846irqreturn_t
11840lpfc_sli4_fp_intr_handler(int irq, void *dev_id) 11847lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
11841{ 11848{
11842 struct lpfc_hba *phba; 11849 struct lpfc_hba *phba;
11843 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; 11850 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
@@ -11854,22 +11861,34 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
11854 11861
11855 if (unlikely(!phba)) 11862 if (unlikely(!phba))
11856 return IRQ_NONE; 11863 return IRQ_NONE;
11857 if (unlikely(!phba->sli4_hba.fp_eq)) 11864 if (unlikely(!phba->sli4_hba.hba_eq))
11858 return IRQ_NONE; 11865 return IRQ_NONE;
11859 11866
11860 /* Get to the EQ struct associated with this vector */ 11867 /* Get to the EQ struct associated with this vector */
11861 fpeq = phba->sli4_hba.fp_eq[fcp_eqidx]; 11868 fpeq = phba->sli4_hba.hba_eq[fcp_eqidx];
11862 if (unlikely(!fpeq)) 11869 if (unlikely(!fpeq))
11863 return IRQ_NONE; 11870 return IRQ_NONE;
11864 11871
11872 if (lpfc_fcp_look_ahead) {
11873 if (atomic_dec_and_test(&fcp_eq_hdl->fcp_eq_in_use))
11874 lpfc_sli4_eq_clr_intr(fpeq);
11875 else {
11876 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
11877 return IRQ_NONE;
11878 }
11879 }
11880
11865 /* Check device state for handling interrupt */ 11881 /* Check device state for handling interrupt */
11866 if (unlikely(lpfc_intr_state_check(phba))) { 11882 if (unlikely(lpfc_intr_state_check(phba))) {
11883 fpeq->EQ_badstate++;
11867 /* Check again for link_state with lock held */ 11884 /* Check again for link_state with lock held */
11868 spin_lock_irqsave(&phba->hbalock, iflag); 11885 spin_lock_irqsave(&phba->hbalock, iflag);
11869 if (phba->link_state < LPFC_LINK_DOWN) 11886 if (phba->link_state < LPFC_LINK_DOWN)
11870 /* Flush, clear interrupt, and rearm the EQ */ 11887 /* Flush, clear interrupt, and rearm the EQ */
11871 lpfc_sli4_eq_flush(phba, fpeq); 11888 lpfc_sli4_eq_flush(phba, fpeq);
11872 spin_unlock_irqrestore(&phba->hbalock, iflag); 11889 spin_unlock_irqrestore(&phba->hbalock, iflag);
11890 if (lpfc_fcp_look_ahead)
11891 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
11873 return IRQ_NONE; 11892 return IRQ_NONE;
11874 } 11893 }
11875 11894
@@ -11877,15 +11896,27 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
11877 * Process all the event on FCP fast-path EQ 11896 * Process all the event on FCP fast-path EQ
11878 */ 11897 */
11879 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 11898 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
11880 lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx); 11899 lpfc_sli4_hba_handle_eqe(phba, eqe, fcp_eqidx);
11881 if (!(++ecount % fpeq->entry_repost)) 11900 if (!(++ecount % fpeq->entry_repost))
11882 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); 11901 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
11902 fpeq->EQ_processed++;
11883 } 11903 }
11884 11904
11905 /* Track the max number of EQEs processed in 1 intr */
11906 if (ecount > fpeq->EQ_max_eqe)
11907 fpeq->EQ_max_eqe = ecount;
11908
11885 /* Always clear and re-arm the fast-path EQ */ 11909 /* Always clear and re-arm the fast-path EQ */
11886 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); 11910 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
11887 11911
11888 if (unlikely(ecount == 0)) { 11912 if (unlikely(ecount == 0)) {
11913 fpeq->EQ_no_entry++;
11914
11915 if (lpfc_fcp_look_ahead) {
11916 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
11917 return IRQ_NONE;
11918 }
11919
11889 if (phba->intr_type == MSIX) 11920 if (phba->intr_type == MSIX)
11890 /* MSI-X treated interrupt served as no EQ share INT */ 11921 /* MSI-X treated interrupt served as no EQ share INT */
11891 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11922 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
@@ -11895,6 +11926,8 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
11895 return IRQ_NONE; 11926 return IRQ_NONE;
11896 } 11927 }
11897 11928
11929 if (lpfc_fcp_look_ahead)
11930 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
11898 return IRQ_HANDLED; 11931 return IRQ_HANDLED;
11899} /* lpfc_sli4_fp_intr_handler */ 11932} /* lpfc_sli4_fp_intr_handler */
11900 11933
@@ -11919,8 +11952,8 @@ irqreturn_t
11919lpfc_sli4_intr_handler(int irq, void *dev_id) 11952lpfc_sli4_intr_handler(int irq, void *dev_id)
11920{ 11953{
11921 struct lpfc_hba *phba; 11954 struct lpfc_hba *phba;
11922 irqreturn_t sp_irq_rc, fp_irq_rc; 11955 irqreturn_t hba_irq_rc;
11923 bool fp_handled = false; 11956 bool hba_handled = false;
11924 uint32_t fcp_eqidx; 11957 uint32_t fcp_eqidx;
11925 11958
11926 /* Get the driver's phba structure from the dev_id */ 11959 /* Get the driver's phba structure from the dev_id */
@@ -11930,21 +11963,16 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
11930 return IRQ_NONE; 11963 return IRQ_NONE;
11931 11964
11932 /* 11965 /*
11933 * Invokes slow-path host attention interrupt handling as appropriate.
11934 */
11935 sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id);
11936
11937 /*
11938 * Invoke fast-path host attention interrupt handling as appropriate. 11966 * Invoke fast-path host attention interrupt handling as appropriate.
11939 */ 11967 */
11940 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 11968 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
11941 fp_irq_rc = lpfc_sli4_fp_intr_handler(irq, 11969 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
11942 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]); 11970 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
11943 if (fp_irq_rc == IRQ_HANDLED) 11971 if (hba_irq_rc == IRQ_HANDLED)
11944 fp_handled |= true; 11972 hba_handled |= true;
11945 } 11973 }
11946 11974
11947 return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc; 11975 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
11948} /* lpfc_sli4_intr_handler */ 11976} /* lpfc_sli4_intr_handler */
11949 11977
11950/** 11978/**
@@ -12075,7 +12103,7 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
12075 union lpfc_sli4_cfg_shdr *shdr; 12103 union lpfc_sli4_cfg_shdr *shdr;
12076 uint16_t dmult; 12104 uint16_t dmult;
12077 12105
12078 if (startq >= phba->cfg_fcp_eq_count) 12106 if (startq >= phba->cfg_fcp_io_channel)
12079 return 0; 12107 return 0;
12080 12108
12081 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12109 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -12089,12 +12117,13 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
12089 eq_delay = &mbox->u.mqe.un.eq_delay; 12117 eq_delay = &mbox->u.mqe.un.eq_delay;
12090 12118
12091 /* Calculate delay multiper from maximum interrupt per second */ 12119 /* Calculate delay multiper from maximum interrupt per second */
12092 dmult = LPFC_DMULT_CONST/phba->cfg_fcp_imax - 1; 12120 dmult = phba->cfg_fcp_imax / phba->cfg_fcp_io_channel;
12121 dmult = LPFC_DMULT_CONST/dmult - 1;
12093 12122
12094 cnt = 0; 12123 cnt = 0;
12095 for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_eq_count; 12124 for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_io_channel;
12096 fcp_eqidx++) { 12125 fcp_eqidx++) {
12097 eq = phba->sli4_hba.fp_eq[fcp_eqidx]; 12126 eq = phba->sli4_hba.hba_eq[fcp_eqidx];
12098 if (!eq) 12127 if (!eq)
12099 continue; 12128 continue;
12100 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id; 12129 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 2626f58c0747..2f48d000a3b4 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -131,7 +131,9 @@ typedef struct lpfcMboxq {
131 131
132#define LPFC_MAX_RING_MASK 5 /* max num of rctl/type masks allowed per 132#define LPFC_MAX_RING_MASK 5 /* max num of rctl/type masks allowed per
133 ring */ 133 ring */
134#define LPFC_MAX_RING 4 /* max num of SLI rings used by driver */ 134#define LPFC_SLI3_MAX_RING 4 /* Max num of SLI3 rings used by driver.
135 For SLI4, an additional ring for each
136 FCP WQ will be allocated. */
135 137
136struct lpfc_sli_ring; 138struct lpfc_sli_ring;
137 139
@@ -158,6 +160,24 @@ struct lpfc_sli_ring_stat {
158 uint64_t iocb_rsp_full; /* IOCB rsp ring full */ 160 uint64_t iocb_rsp_full; /* IOCB rsp ring full */
159}; 161};
160 162
163struct lpfc_sli3_ring {
164 uint32_t local_getidx; /* last available cmd index (from cmdGetInx) */
165 uint32_t next_cmdidx; /* next_cmd index */
166 uint32_t rspidx; /* current index in response ring */
167 uint32_t cmdidx; /* current index in command ring */
168 uint16_t numCiocb; /* number of command iocb's per ring */
169 uint16_t numRiocb; /* number of rsp iocb's per ring */
170 uint16_t sizeCiocb; /* Size of command iocb's in this ring */
171 uint16_t sizeRiocb; /* Size of response iocb's in this ring */
172 uint32_t *cmdringaddr; /* virtual address for cmd rings */
173 uint32_t *rspringaddr; /* virtual address for rsp rings */
174};
175
176struct lpfc_sli4_ring {
177 struct lpfc_queue *wqp; /* Pointer to associated WQ */
178};
179
180
161/* Structure used to hold SLI ring information */ 181/* Structure used to hold SLI ring information */
162struct lpfc_sli_ring { 182struct lpfc_sli_ring {
163 uint16_t flag; /* ring flags */ 183 uint16_t flag; /* ring flags */
@@ -166,16 +186,10 @@ struct lpfc_sli_ring {
166#define LPFC_STOP_IOCB_EVENT 0x020 /* Stop processing IOCB cmds event */ 186#define LPFC_STOP_IOCB_EVENT 0x020 /* Stop processing IOCB cmds event */
167 uint16_t abtsiotag; /* tracks next iotag to use for ABTS */ 187 uint16_t abtsiotag; /* tracks next iotag to use for ABTS */
168 188
169 uint32_t local_getidx; /* last available cmd index (from cmdGetInx) */
170 uint32_t next_cmdidx; /* next_cmd index */
171 uint32_t rspidx; /* current index in response ring */
172 uint32_t cmdidx; /* current index in command ring */
173 uint8_t rsvd; 189 uint8_t rsvd;
174 uint8_t ringno; /* ring number */ 190 uint8_t ringno; /* ring number */
175 uint16_t numCiocb; /* number of command iocb's per ring */ 191
176 uint16_t numRiocb; /* number of rsp iocb's per ring */ 192 spinlock_t ring_lock; /* lock for issuing commands */
177 uint16_t sizeCiocb; /* Size of command iocb's in this ring */
178 uint16_t sizeRiocb; /* Size of response iocb's in this ring */
179 193
180 uint32_t fast_iotag; /* max fastlookup based iotag */ 194 uint32_t fast_iotag; /* max fastlookup based iotag */
181 uint32_t iotag_ctr; /* keeps track of the next iotag to use */ 195 uint32_t iotag_ctr; /* keeps track of the next iotag to use */
@@ -186,8 +200,6 @@ struct lpfc_sli_ring {
186 struct list_head txcmplq; 200 struct list_head txcmplq;
187 uint16_t txcmplq_cnt; /* current length of queue */ 201 uint16_t txcmplq_cnt; /* current length of queue */
188 uint16_t txcmplq_max; /* max length */ 202 uint16_t txcmplq_max; /* max length */
189 uint32_t *cmdringaddr; /* virtual address for cmd rings */
190 uint32_t *rspringaddr; /* virtual address for rsp rings */
191 uint32_t missbufcnt; /* keep track of buffers to post */ 203 uint32_t missbufcnt; /* keep track of buffers to post */
192 struct list_head postbufq; 204 struct list_head postbufq;
193 uint16_t postbufq_cnt; /* current length of queue */ 205 uint16_t postbufq_cnt; /* current length of queue */
@@ -207,6 +219,10 @@ struct lpfc_sli_ring {
207 /* cmd ring available */ 219 /* cmd ring available */
208 void (*lpfc_sli_cmd_available) (struct lpfc_hba *, 220 void (*lpfc_sli_cmd_available) (struct lpfc_hba *,
209 struct lpfc_sli_ring *); 221 struct lpfc_sli_ring *);
222 union {
223 struct lpfc_sli3_ring sli3;
224 struct lpfc_sli4_ring sli4;
225 } sli;
210}; 226};
211 227
212/* Structure used for configuring rings to a specific profile or rctl / type */ 228/* Structure used for configuring rings to a specific profile or rctl / type */
@@ -239,6 +255,8 @@ struct lpfc_sli_stat {
239 uint64_t mbox_stat_err; /* Mbox cmds completed status error */ 255 uint64_t mbox_stat_err; /* Mbox cmds completed status error */
240 uint64_t mbox_cmd; /* Mailbox commands issued */ 256 uint64_t mbox_cmd; /* Mailbox commands issued */
241 uint64_t sli_intr; /* Count of Host Attention interrupts */ 257 uint64_t sli_intr; /* Count of Host Attention interrupts */
258 uint64_t sli_prev_intr; /* Previous cnt of Host Attention interrupts */
259 uint64_t sli_ips; /* Host Attention interrupts per sec */
242 uint32_t err_attn_event; /* Error Attn event counters */ 260 uint32_t err_attn_event; /* Error Attn event counters */
243 uint32_t link_event; /* Link event counters */ 261 uint32_t link_event; /* Link event counters */
244 uint32_t mbox_event; /* Mailbox event counters */ 262 uint32_t mbox_event; /* Mailbox event counters */
@@ -270,7 +288,7 @@ struct lpfc_sli {
270#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */ 288#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */
271#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */ 289#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */
272 290
273 struct lpfc_sli_ring ring[LPFC_MAX_RING]; 291 struct lpfc_sli_ring *ring;
274 int fcp_ring; /* ring used for FCP initiator commands */ 292 int fcp_ring; /* ring used for FCP initiator commands */
275 int next_ring; 293 int next_ring;
276 294
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index ec756118c5c1..bd4bc4342ae2 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -34,18 +34,10 @@
34/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */ 34/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
35#define LPFC_NEMBED_MBOX_SGL_CNT 254 35#define LPFC_NEMBED_MBOX_SGL_CNT 254
36 36
37/* Multi-queue arrangement for fast-path FCP work queues */ 37/* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */
38#define LPFC_FN_EQN_MAX 8 38#define LPFC_FCP_IO_CHAN_DEF 4
39#define LPFC_SP_EQN_DEF 1 39#define LPFC_FCP_IO_CHAN_MIN 1
40#define LPFC_FP_EQN_DEF 4 40#define LPFC_FCP_IO_CHAN_MAX 8
41#define LPFC_FP_EQN_MIN 1
42#define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF)
43
44#define LPFC_FN_WQN_MAX 32
45#define LPFC_SP_WQN_DEF 1
46#define LPFC_FP_WQN_DEF 4
47#define LPFC_FP_WQN_MIN 1
48#define LPFC_FP_WQN_MAX (LPFC_FN_WQN_MAX - LPFC_SP_WQN_DEF)
49 41
50/* 42/*
51 * Provide the default FCF Record attributes used by the driver 43 * Provide the default FCF Record attributes used by the driver
@@ -141,6 +133,37 @@ struct lpfc_queue {
141 uint32_t page_count; /* Number of pages allocated for this queue */ 133 uint32_t page_count; /* Number of pages allocated for this queue */
142 uint32_t host_index; /* The host's index for putting or getting */ 134 uint32_t host_index; /* The host's index for putting or getting */
143 uint32_t hba_index; /* The last known hba index for get or put */ 135 uint32_t hba_index; /* The last known hba index for get or put */
136
137 struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */
138
139 /* For q stats */
140 uint32_t q_cnt_1;
141 uint32_t q_cnt_2;
142 uint32_t q_cnt_3;
143 uint64_t q_cnt_4;
144/* defines for EQ stats */
145#define EQ_max_eqe q_cnt_1
146#define EQ_no_entry q_cnt_2
147#define EQ_badstate q_cnt_3
148#define EQ_processed q_cnt_4
149
150/* defines for CQ stats */
151#define CQ_mbox q_cnt_1
152#define CQ_max_cqe q_cnt_1
153#define CQ_release_wqe q_cnt_2
154#define CQ_xri_aborted q_cnt_3
155#define CQ_wq q_cnt_4
156
157/* defines for WQ stats */
158#define WQ_overflow q_cnt_1
159#define WQ_posted q_cnt_4
160
161/* defines for RQ stats */
162#define RQ_no_posted_buf q_cnt_1
163#define RQ_no_buf_found q_cnt_2
164#define RQ_buf_trunc q_cnt_3
165#define RQ_rcv_buf q_cnt_4
166
144 union sli4_qe qe[1]; /* array to index entries (must be last) */ 167 union sli4_qe qe[1]; /* array to index entries (must be last) */
145}; 168};
146 169
@@ -350,6 +373,7 @@ struct lpfc_hba;
350struct lpfc_fcp_eq_hdl { 373struct lpfc_fcp_eq_hdl {
351 uint32_t idx; 374 uint32_t idx;
352 struct lpfc_hba *phba; 375 struct lpfc_hba *phba;
376 atomic_t fcp_eq_in_use;
353}; 377};
354 378
355/* Port Capabilities for SLI4 Parameters */ 379/* Port Capabilities for SLI4 Parameters */
@@ -407,6 +431,8 @@ struct lpfc_sli4_lnk_info {
407 uint8_t lnk_no; 431 uint8_t lnk_no;
408}; 432};
409 433
434#define LPFC_SLI4_HANDLER_NAME_SZ 16
435
410/* SLI4 HBA data structure entries */ 436/* SLI4 HBA data structure entries */
411struct lpfc_sli4_hba { 437struct lpfc_sli4_hba {
412 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for 438 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
@@ -463,20 +489,23 @@ struct lpfc_sli4_hba {
463 struct lpfc_register sli_intf; 489 struct lpfc_register sli_intf;
464 struct lpfc_pc_sli4_params pc_sli4_params; 490 struct lpfc_pc_sli4_params pc_sli4_params;
465 struct msix_entry *msix_entries; 491 struct msix_entry *msix_entries;
492 uint8_t handler_name[LPFC_FCP_IO_CHAN_MAX][LPFC_SLI4_HANDLER_NAME_SZ];
466 uint32_t cfg_eqn; 493 uint32_t cfg_eqn;
467 uint32_t msix_vec_nr; 494 uint32_t msix_vec_nr;
468 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */ 495 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
496
469 /* Pointers to the constructed SLI4 queues */ 497 /* Pointers to the constructed SLI4 queues */
470 struct lpfc_queue **fp_eq; /* Fast-path event queue */ 498 struct lpfc_queue **hba_eq;/* Event queues for HBA */
471 struct lpfc_queue *sp_eq; /* Slow-path event queue */ 499 struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
472 struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */ 500 struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */
501 uint16_t *fcp_cq_map;
502
503 struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
504 struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
473 struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */ 505 struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */
474 struct lpfc_queue *els_wq; /* Slow-path ELS work queue */ 506 struct lpfc_queue *els_wq; /* Slow-path ELS work queue */
475 struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */ 507 struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
476 struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */ 508 struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
477 struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
478 struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
479 struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
480 509
481 /* Setup information for various queue parameters */ 510 /* Setup information for various queue parameters */
482 int eq_esize; 511 int eq_esize;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 4704e5b5088e..04265a1c4e52 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,11 +18,16 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.32" 21#define LPFC_DRIVER_VERSION "8.3.34"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23
24/* Used for SLI 2/3 */
23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 25#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" 26#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
25 27
28/* Used for SLI4 */
29#define LPFC_DRIVER_HANDLER_NAME "lpfc:"
30
26#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \ 31#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
27 LPFC_DRIVER_VERSION 32 LPFC_DRIVER_VERSION
28#define LPFC_COPYRIGHT "Copyright(c) 2004-2009 Emulex. All rights reserved." 33#define LPFC_COPYRIGHT "Copyright(c) 2004-2009 Emulex. All rights reserved."