aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-01-10 13:36:08 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-10 13:36:08 -0500
commitd04baa157d1b35cbd27c87b4a13111d9675b61f3 (patch)
treec46966fbea1c34bed2bd38629ce948d5a088281c
parent88266917b518e2ca954d85983470592aaaf82993 (diff)
parent5c41dc3a79150e93e5d050871a10b761be8281a1 (diff)
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
SCSI updates for post 3.2 merge window * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (67 commits) [SCSI] lpfc 8.3.28: Update driver version to 8.3.28 [SCSI] lpfc 8.3.28: Add Loopback support for SLI4 adapters [SCSI] lpfc 8.3.28: Critical Miscellaneous fixes [SCSI] Lpfc 8.3.28: FC and SCSI Discovery Fixes [SCSI] lpfc 8.3.28: Add support for ABTS failure handling [SCSI] lpfc 8.3.28: SLI fixes and added SLI4 support [SCSI] lpfc 8.3.28: Miscellaneous fixes in sysfs and mgmt interfaces [SCSI] mpt2sas: Removed redundant calling of _scsih_probe_devices() from _scsih_probe [SCSI] mac_scsi: Remove obsolete IRQ_FLG_* users [SCSI] qla4xxx: Update driver version to 5.02.00-k10 [SCSI] qla4xxx: check for FW alive before calling chip_reset [SCSI] qla4xxx: Fix qla4xxx_dump_buffer to dump buffer correctly [SCSI] qla4xxx: Fix the IDC locking mechanism [SCSI] qla4xxx: Wait for disable_acb before doing set_acb [SCSI] qla4xxx: Don't recover adapter if device state is FAILED [SCSI] qla4xxx: fix call trace on rmmod with ql4xdontresethba=1 [SCSI] qla4xxx: Fix CPU lockups when ql4xdontresethba set [SCSI] qla4xxx: Perform context resets in case of context failures. [SCSI] iscsi class: export pid of process that created [SCSI] mpt2sas: Remove unused duplicate diag_buffer_enable param ...
-rw-r--r--drivers/message/fusion/lsi/mpi_cnfg.h1
-rw-r--r--drivers/message/fusion/mptbase.c7
-rw-r--r--drivers/message/fusion/mptbase.h1
-rw-r--r--drivers/message/fusion/mptsas.c2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c5
-rw-r--r--drivers/scsi/bfa/bfa_defs.h4
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h437
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c6
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c3
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c58
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c19
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c19
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c19
-rw-r--r--drivers/scsi/hpsa.c6
-rw-r--r--drivers/scsi/lpfc/lpfc.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c436
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c432
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_compat.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h13
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c172
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c214
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c157
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h21
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c338
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c25
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c17
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c704
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c6
-rw-r--r--drivers/scsi/mac_scsi.c3
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h10
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h28
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_ioc.h49
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_raid.h67
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_tool.h9
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c205
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h26
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c8
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c168
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c20
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c310
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h19
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c95
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c641
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c253
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c510
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c363
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.c6
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h1
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c30
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c23
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c127
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c21
-rw-r--r--include/scsi/scsi_device.h1
-rw-r--r--include/scsi/scsi_transport_iscsi.h5
70 files changed, 3623 insertions, 2601 deletions
diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h
index 22027e7946f7..d9bcfba6b049 100644
--- a/drivers/message/fusion/lsi/mpi_cnfg.h
+++ b/drivers/message/fusion/lsi/mpi_cnfg.h
@@ -583,6 +583,7 @@ typedef struct _MSG_CONFIG_REPLY
583#define MPI_MANUFACTPAGE_DEVID_SAS1066E (0x005A) 583#define MPI_MANUFACTPAGE_DEVID_SAS1066E (0x005A)
584#define MPI_MANUFACTPAGE_DEVID_SAS1068 (0x0054) 584#define MPI_MANUFACTPAGE_DEVID_SAS1068 (0x0054)
585#define MPI_MANUFACTPAGE_DEVID_SAS1068E (0x0058) 585#define MPI_MANUFACTPAGE_DEVID_SAS1068E (0x0058)
586#define MPI_MANUFACTPAGE_DEVID_SAS1068_820XELP (0x0059)
586#define MPI_MANUFACTPAGE_DEVID_SAS1078 (0x0062) 587#define MPI_MANUFACTPAGE_DEVID_SAS1078 (0x0062)
587 588
588 589
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index e9c6a6047a00..a7dc4672d996 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -115,7 +115,8 @@ module_param(mpt_fwfault_debug, int, 0600);
115MODULE_PARM_DESC(mpt_fwfault_debug, 115MODULE_PARM_DESC(mpt_fwfault_debug,
116 "Enable detection of Firmware fault and halt Firmware on fault - (default=0)"); 116 "Enable detection of Firmware fault and halt Firmware on fault - (default=0)");
117 117
118static char MptCallbacksName[MPT_MAX_PROTOCOL_DRIVERS][50]; 118static char MptCallbacksName[MPT_MAX_PROTOCOL_DRIVERS]
119 [MPT_MAX_CALLBACKNAME_LEN+1];
119 120
120#ifdef MFCNT 121#ifdef MFCNT
121static int mfcounter = 0; 122static int mfcounter = 0;
@@ -717,8 +718,8 @@ mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass, char *func_name)
717 MptDriverClass[cb_idx] = dclass; 718 MptDriverClass[cb_idx] = dclass;
718 MptEvHandlers[cb_idx] = NULL; 719 MptEvHandlers[cb_idx] = NULL;
719 last_drv_idx = cb_idx; 720 last_drv_idx = cb_idx;
720 memcpy(MptCallbacksName[cb_idx], func_name, 721 strlcpy(MptCallbacksName[cb_idx], func_name,
721 strlen(func_name) > 50 ? 50 : strlen(func_name)); 722 MPT_MAX_CALLBACKNAME_LEN+1);
722 break; 723 break;
723 } 724 }
724 } 725 }
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index b4d24dc081ae..76c05bc24cb7 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -89,6 +89,7 @@
89 */ 89 */
90#define MPT_MAX_ADAPTERS 18 90#define MPT_MAX_ADAPTERS 18
91#define MPT_MAX_PROTOCOL_DRIVERS 16 91#define MPT_MAX_PROTOCOL_DRIVERS 16
92#define MPT_MAX_CALLBACKNAME_LEN 49
92#define MPT_MAX_BUS 1 /* Do not change */ 93#define MPT_MAX_BUS 1 /* Do not change */
93#define MPT_MAX_FC_DEVICES 255 94#define MPT_MAX_FC_DEVICES 255
94#define MPT_MAX_SCSI_DEVICES 16 95#define MPT_MAX_SCSI_DEVICES 16
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 9d9504298549..551262e4b96e 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -5376,6 +5376,8 @@ static struct pci_device_id mptsas_pci_table[] = {
5376 PCI_ANY_ID, PCI_ANY_ID }, 5376 PCI_ANY_ID, PCI_ANY_ID },
5377 { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1078, 5377 { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1078,
5378 PCI_ANY_ID, PCI_ANY_ID }, 5378 PCI_ANY_ID, PCI_ANY_ID },
5379 { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1068_820XELP,
5380 PCI_ANY_ID, PCI_ANY_ID },
5379 {0} /* Terminating entry */ 5381 {0} /* Terminating entry */
5380}; 5382};
5381MODULE_DEVICE_TABLE(pci, mptsas_pci_table); 5383MODULE_DEVICE_TABLE(pci, mptsas_pci_table);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 797a43994b55..375756fa95cf 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -1105,7 +1105,6 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
1105 struct be_status_bhs *sts_bhs = 1105 struct be_status_bhs *sts_bhs =
1106 (struct be_status_bhs *)io_task->cmd_bhs; 1106 (struct be_status_bhs *)io_task->cmd_bhs;
1107 struct iscsi_conn *conn = beiscsi_conn->conn; 1107 struct iscsi_conn *conn = beiscsi_conn->conn;
1108 unsigned int sense_len;
1109 unsigned char *sense; 1108 unsigned char *sense;
1110 u32 resid = 0, exp_cmdsn, max_cmdsn; 1109 u32 resid = 0, exp_cmdsn, max_cmdsn;
1111 u8 rsp, status, flags; 1110 u8 rsp, status, flags;
@@ -1153,9 +1152,11 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
1153 } 1152 }
1154 1153
1155 if (status == SAM_STAT_CHECK_CONDITION) { 1154 if (status == SAM_STAT_CHECK_CONDITION) {
1155 u16 sense_len;
1156 unsigned short *slen = (unsigned short *)sts_bhs->sense_info; 1156 unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
1157
1157 sense = sts_bhs->sense_info + sizeof(unsigned short); 1158 sense = sts_bhs->sense_info + sizeof(unsigned short);
1158 sense_len = cpu_to_be16(*slen); 1159 sense_len = be16_to_cpu(*slen);
1159 memcpy(task->sc->sense_buffer, sense, 1160 memcpy(task->sc->sense_buffer, sense,
1160 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE)); 1161 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
1161 } 1162 }
diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h
index 7b3d235d20b4..b5a1595cc0a5 100644
--- a/drivers/scsi/bfa/bfa_defs.h
+++ b/drivers/scsi/bfa/bfa_defs.h
@@ -902,7 +902,7 @@ struct sfp_mem_s {
902union sfp_xcvr_e10g_code_u { 902union sfp_xcvr_e10g_code_u {
903 u8 b; 903 u8 b;
904 struct { 904 struct {
905#ifdef __BIGENDIAN 905#ifdef __BIG_ENDIAN
906 u8 e10g_unall:1; /* 10G Ethernet compliance */ 906 u8 e10g_unall:1; /* 10G Ethernet compliance */
907 u8 e10g_lrm:1; 907 u8 e10g_lrm:1;
908 u8 e10g_lr:1; 908 u8 e10g_lr:1;
@@ -982,7 +982,7 @@ union sfp_xcvr_fc2_code_u {
982union sfp_xcvr_fc3_code_u { 982union sfp_xcvr_fc3_code_u {
983 u8 b; 983 u8 b;
984 struct { 984 struct {
985#ifdef __BIGENDIAN 985#ifdef __BIG_ENDIAN
986 u8 rsv4:1; 986 u8 rsv4:1;
987 u8 mb800:1; /* 800 Mbytes/sec */ 987 u8 mb800:1; /* 800 Mbytes/sec */
988 u8 mb1600:1; /* 1600 Mbytes/sec */ 988 u8 mb1600:1; /* 1600 Mbytes/sec */
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index 863c6ba7d5eb..78963be2c4fb 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -34,22 +34,22 @@
34struct bfa_iocfc_intr_attr_s { 34struct bfa_iocfc_intr_attr_s {
35 u8 coalesce; /* enable/disable coalescing */ 35 u8 coalesce; /* enable/disable coalescing */
36 u8 rsvd[3]; 36 u8 rsvd[3];
37 __be16 latency; /* latency in microseconds */ 37 __be16 latency; /* latency in microseconds */
38 __be16 delay; /* delay in microseconds */ 38 __be16 delay; /* delay in microseconds */
39}; 39};
40 40
41/* 41/*
42 * IOC firmware configuraton 42 * IOC firmware configuraton
43 */ 43 */
44struct bfa_iocfc_fwcfg_s { 44struct bfa_iocfc_fwcfg_s {
45 u16 num_fabrics; /* number of fabrics */ 45 u16 num_fabrics; /* number of fabrics */
46 u16 num_lports; /* number of local lports */ 46 u16 num_lports; /* number of local lports */
47 u16 num_rports; /* number of remote ports */ 47 u16 num_rports; /* number of remote ports */
48 u16 num_ioim_reqs; /* number of IO reqs */ 48 u16 num_ioim_reqs; /* number of IO reqs */
49 u16 num_tskim_reqs; /* task management requests */ 49 u16 num_tskim_reqs; /* task management requests */
50 u16 num_fwtio_reqs; /* number of TM IO reqs in FW */ 50 u16 num_fwtio_reqs; /* number of TM IO reqs in FW */
51 u16 num_fcxp_reqs; /* unassisted FC exchanges */ 51 u16 num_fcxp_reqs; /* unassisted FC exchanges */
52 u16 num_uf_bufs; /* unsolicited recv buffers */ 52 u16 num_uf_bufs; /* unsolicited recv buffers */
53 u8 num_cqs; 53 u8 num_cqs;
54 u8 fw_tick_res; /* FW clock resolution in ms */ 54 u8 fw_tick_res; /* FW clock resolution in ms */
55 u8 rsvd[2]; 55 u8 rsvd[2];
@@ -57,19 +57,19 @@ struct bfa_iocfc_fwcfg_s {
57#pragma pack() 57#pragma pack()
58 58
59struct bfa_iocfc_drvcfg_s { 59struct bfa_iocfc_drvcfg_s {
60 u16 num_reqq_elems; /* number of req queue elements */ 60 u16 num_reqq_elems; /* number of req queue elements */
61 u16 num_rspq_elems; /* number of rsp queue elements */ 61 u16 num_rspq_elems; /* number of rsp queue elements */
62 u16 num_sgpgs; /* number of total SG pages */ 62 u16 num_sgpgs; /* number of total SG pages */
63 u16 num_sboot_tgts; /* number of SAN boot targets */ 63 u16 num_sboot_tgts; /* number of SAN boot targets */
64 u16 num_sboot_luns; /* number of SAN boot luns */ 64 u16 num_sboot_luns; /* number of SAN boot luns */
65 u16 ioc_recover; /* IOC recovery mode */ 65 u16 ioc_recover; /* IOC recovery mode */
66 u16 min_cfg; /* minimum configuration */ 66 u16 min_cfg; /* minimum configuration */
67 u16 path_tov; /* device path timeout */ 67 u16 path_tov; /* device path timeout */
68 u16 num_tio_reqs; /*!< number of TM IO reqs */ 68 u16 num_tio_reqs; /* number of TM IO reqs */
69 u8 port_mode; 69 u8 port_mode;
70 u8 rsvd_a; 70 u8 rsvd_a;
71 bfa_boolean_t delay_comp; /* delay completion of 71 bfa_boolean_t delay_comp; /* delay completion of failed
72 failed inflight IOs */ 72 * inflight IOs */
73 u16 num_ttsk_reqs; /* TM task management requests */ 73 u16 num_ttsk_reqs; /* TM task management requests */
74 u32 rsvd; 74 u32 rsvd;
75}; 75};
@@ -101,8 +101,8 @@ struct bfa_fw_ioim_stats_s {
101 u32 fw_frm_drop; /* f/w drop the frame */ 101 u32 fw_frm_drop; /* f/w drop the frame */
102 102
103 u32 rec_timeout; /* FW rec timed out */ 103 u32 rec_timeout; /* FW rec timed out */
104 u32 error_rec; /* FW sending rec on 104 u32 error_rec; /* FW sending rec on
105 * an error condition*/ 105 * an error condition*/
106 u32 wait_for_si; /* FW wait for SI */ 106 u32 wait_for_si; /* FW wait for SI */
107 u32 rec_rsp_inval; /* REC rsp invalid */ 107 u32 rec_rsp_inval; /* REC rsp invalid */
108 u32 seqr_io_abort; /* target does not know cmd so abort */ 108 u32 seqr_io_abort; /* target does not know cmd so abort */
@@ -124,9 +124,9 @@ struct bfa_fw_ioim_stats_s {
124 u32 unexp_fcp_rsp; /* fcp response in wrong state */ 124 u32 unexp_fcp_rsp; /* fcp response in wrong state */
125 125
126 u32 fcp_rsp_under_run; /* fcp rsp IO underrun */ 126 u32 fcp_rsp_under_run; /* fcp rsp IO underrun */
127 u32 fcp_rsp_under_run_wr; /* fcp rsp IO underrun for write */ 127 u32 fcp_rsp_under_run_wr; /* fcp rsp IO underrun for write */
128 u32 fcp_rsp_under_run_err; /* fcp rsp IO underrun error */ 128 u32 fcp_rsp_under_run_err; /* fcp rsp IO underrun error */
129 u32 fcp_rsp_resid_inval; /* invalid residue */ 129 u32 fcp_rsp_resid_inval; /* invalid residue */
130 u32 fcp_rsp_over_run; /* fcp rsp IO overrun */ 130 u32 fcp_rsp_over_run; /* fcp rsp IO overrun */
131 u32 fcp_rsp_over_run_err; /* fcp rsp IO overrun error */ 131 u32 fcp_rsp_over_run_err; /* fcp rsp IO overrun error */
132 u32 fcp_rsp_proto_err; /* protocol error in fcp rsp */ 132 u32 fcp_rsp_proto_err; /* protocol error in fcp rsp */
@@ -142,21 +142,20 @@ struct bfa_fw_ioim_stats_s {
142 u32 ioh_hit_class2_event; /* IOH hit class2 */ 142 u32 ioh_hit_class2_event; /* IOH hit class2 */
143 u32 ioh_miss_other_event; /* IOH miss other */ 143 u32 ioh_miss_other_event; /* IOH miss other */
144 u32 ioh_seq_cnt_err_event; /* IOH seq cnt error */ 144 u32 ioh_seq_cnt_err_event; /* IOH seq cnt error */
145 u32 ioh_len_err_event; /* IOH len error - fcp_dl != 145 u32 ioh_len_err_event; /* IOH len error - fcp_dl !=
146 * bytes xfered */ 146 * bytes xfered */
147 u32 ioh_seq_len_err_event; /* IOH seq len error */ 147 u32 ioh_seq_len_err_event; /* IOH seq len error */
148 u32 ioh_data_oor_event; /* Data out of range */ 148 u32 ioh_data_oor_event; /* Data out of range */
149 u32 ioh_ro_ooo_event; /* Relative offset out of range */ 149 u32 ioh_ro_ooo_event; /* Relative offset out of range */
150 u32 ioh_cpu_owned_event; /* IOH hit -iost owned by f/w */ 150 u32 ioh_cpu_owned_event; /* IOH hit -iost owned by f/w */
151 u32 ioh_unexp_frame_event; /* unexpected frame received 151 u32 ioh_unexp_frame_event; /* unexpected frame received
152 * count */ 152 * count */
153 u32 ioh_err_int; /* IOH error int during data-phase 153 u32 ioh_err_int; /* IOH error int during data-phase
154 * for scsi write 154 * for scsi write */
155 */
156}; 155};
157 156
158struct bfa_fw_tio_stats_s { 157struct bfa_fw_tio_stats_s {
159 u32 tio_conf_proc; /* TIO CONF processed */ 158 u32 tio_conf_proc; /* TIO CONF processed */
160 u32 tio_conf_drop; /* TIO CONF dropped */ 159 u32 tio_conf_drop; /* TIO CONF dropped */
161 u32 tio_cleanup_req; /* TIO cleanup requested */ 160 u32 tio_cleanup_req; /* TIO cleanup requested */
162 u32 tio_cleanup_comp; /* TIO cleanup completed */ 161 u32 tio_cleanup_comp; /* TIO cleanup completed */
@@ -164,34 +163,36 @@ struct bfa_fw_tio_stats_s {
164 u32 tio_abort_rsp_comp; /* TIO abort rsp completed */ 163 u32 tio_abort_rsp_comp; /* TIO abort rsp completed */
165 u32 tio_abts_req; /* TIO ABTS requested */ 164 u32 tio_abts_req; /* TIO ABTS requested */
166 u32 tio_abts_ack; /* TIO ABTS ack-ed */ 165 u32 tio_abts_ack; /* TIO ABTS ack-ed */
167 u32 tio_abts_ack_nocomp; /* TIO ABTS ack-ed but not completed */ 166 u32 tio_abts_ack_nocomp;/* TIO ABTS ack-ed but not completed */
168 u32 tio_abts_tmo; /* TIO ABTS timeout */ 167 u32 tio_abts_tmo; /* TIO ABTS timeout */
169 u32 tio_snsdata_dma; /* TIO sense data DMA */ 168 u32 tio_snsdata_dma; /* TIO sense data DMA */
170 u32 tio_rxwchan_wait; /* TIO waiting for RX wait channel */ 169 u32 tio_rxwchan_wait; /* TIO waiting for RX wait channel */
171 u32 tio_rxwchan_avail; /* TIO RX wait channel available */ 170 u32 tio_rxwchan_avail; /* TIO RX wait channel available */
172 u32 tio_hit_bls; /* TIO IOH BLS event */ 171 u32 tio_hit_bls; /* TIO IOH BLS event */
173 u32 tio_uf_recv; /* TIO received UF */ 172 u32 tio_uf_recv; /* TIO received UF */
174 u32 tio_rd_invalid_sm; /* TIO read reqst in wrong state machine */ 173 u32 tio_rd_invalid_sm; /* TIO read reqst in wrong state machine */
175 u32 tio_wr_invalid_sm;/* TIO write reqst in wrong state machine */ 174 u32 tio_wr_invalid_sm; /* TIO write reqst in wrong state machine */
176 175
177 u32 ds_rxwchan_wait; /* DS waiting for RX wait channel */ 176 u32 ds_rxwchan_wait; /* DS waiting for RX wait channel */
178 u32 ds_rxwchan_avail; /* DS RX wait channel available */ 177 u32 ds_rxwchan_avail; /* DS RX wait channel available */
179 u32 ds_unaligned_rd; /* DS unaligned read */ 178 u32 ds_unaligned_rd; /* DS unaligned read */
180 u32 ds_rdcomp_invalid_sm; /* DS read completed in wrong state machine */ 179 u32 ds_rdcomp_invalid_sm; /* DS read completed in wrong state
181 u32 ds_wrcomp_invalid_sm; /* DS write completed in wrong state machine */ 180 * machine */
181 u32 ds_wrcomp_invalid_sm; /* DS write completed in wrong state
182 * machine */
182 u32 ds_flush_req; /* DS flush requested */ 183 u32 ds_flush_req; /* DS flush requested */
183 u32 ds_flush_comp; /* DS flush completed */ 184 u32 ds_flush_comp; /* DS flush completed */
184 u32 ds_xfrdy_exp; /* DS XFER_RDY expired */ 185 u32 ds_xfrdy_exp; /* DS XFER_RDY expired */
185 u32 ds_seq_cnt_err; /* DS seq cnt error */ 186 u32 ds_seq_cnt_err; /* DS seq cnt error */
186 u32 ds_seq_len_err; /* DS seq len error */ 187 u32 ds_seq_len_err; /* DS seq len error */
187 u32 ds_data_oor; /* DS data out of order */ 188 u32 ds_data_oor; /* DS data out of order */
188 u32 ds_hit_bls; /* DS hit BLS */ 189 u32 ds_hit_bls; /* DS hit BLS */
189 u32 ds_edtov_timer_exp; /* DS edtov expired */ 190 u32 ds_edtov_timer_exp; /* DS edtov expired */
190 u32 ds_cpu_owned; /* DS cpu owned */ 191 u32 ds_cpu_owned; /* DS cpu owned */
191 u32 ds_hit_class2; /* DS hit class2 */ 192 u32 ds_hit_class2; /* DS hit class2 */
192 u32 ds_length_err; /* DS length error */ 193 u32 ds_length_err; /* DS length error */
193 u32 ds_ro_ooo_err; /* DS relative offset out-of-order error */ 194 u32 ds_ro_ooo_err; /* DS relative offset out-of-order error */
194 u32 ds_rectov_timer_exp; /* DS rectov expired */ 195 u32 ds_rectov_timer_exp;/* DS rectov expired */
195 u32 ds_unexp_fr_err; /* DS unexp frame error */ 196 u32 ds_unexp_fr_err; /* DS unexp frame error */
196}; 197};
197 198
@@ -208,119 +209,119 @@ struct bfa_fw_io_stats_s {
208 */ 209 */
209 210
210struct bfa_fw_port_fpg_stats_s { 211struct bfa_fw_port_fpg_stats_s {
211 u32 intr_evt; 212 u32 intr_evt;
212 u32 intr; 213 u32 intr;
213 u32 intr_excess; 214 u32 intr_excess;
214 u32 intr_cause0; 215 u32 intr_cause0;
215 u32 intr_other; 216 u32 intr_other;
216 u32 intr_other_ign; 217 u32 intr_other_ign;
217 u32 sig_lost; 218 u32 sig_lost;
218 u32 sig_regained; 219 u32 sig_regained;
219 u32 sync_lost; 220 u32 sync_lost;
220 u32 sync_to; 221 u32 sync_to;
221 u32 sync_regained; 222 u32 sync_regained;
222 u32 div2_overflow; 223 u32 div2_overflow;
223 u32 div2_underflow; 224 u32 div2_underflow;
224 u32 efifo_overflow; 225 u32 efifo_overflow;
225 u32 efifo_underflow; 226 u32 efifo_underflow;
226 u32 idle_rx; 227 u32 idle_rx;
227 u32 lrr_rx; 228 u32 lrr_rx;
228 u32 lr_rx; 229 u32 lr_rx;
229 u32 ols_rx; 230 u32 ols_rx;
230 u32 nos_rx; 231 u32 nos_rx;
231 u32 lip_rx; 232 u32 lip_rx;
232 u32 arbf0_rx; 233 u32 arbf0_rx;
233 u32 arb_rx; 234 u32 arb_rx;
234 u32 mrk_rx; 235 u32 mrk_rx;
235 u32 const_mrk_rx; 236 u32 const_mrk_rx;
236 u32 prim_unknown; 237 u32 prim_unknown;
237}; 238};
238 239
239 240
240struct bfa_fw_port_lksm_stats_s { 241struct bfa_fw_port_lksm_stats_s {
241 u32 hwsm_success; /* hwsm state machine success */ 242 u32 hwsm_success; /* hwsm state machine success */
242 u32 hwsm_fails; /* hwsm fails */ 243 u32 hwsm_fails; /* hwsm fails */
243 u32 hwsm_wdtov; /* hwsm timed out */ 244 u32 hwsm_wdtov; /* hwsm timed out */
244 u32 swsm_success; /* swsm success */ 245 u32 swsm_success; /* swsm success */
245 u32 swsm_fails; /* swsm fails */ 246 u32 swsm_fails; /* swsm fails */
246 u32 swsm_wdtov; /* swsm timed out */ 247 u32 swsm_wdtov; /* swsm timed out */
247 u32 busybufs; /* link init failed due to busybuf */ 248 u32 busybufs; /* link init failed due to busybuf */
248 u32 buf_waits; /* bufwait state entries */ 249 u32 buf_waits; /* bufwait state entries */
249 u32 link_fails; /* link failures */ 250 u32 link_fails; /* link failures */
250 u32 psp_errors; /* primitive sequence protocol errors */ 251 u32 psp_errors; /* primitive sequence protocol errors */
251 u32 lr_unexp; /* No. of times LR rx-ed unexpectedly */ 252 u32 lr_unexp; /* No. of times LR rx-ed unexpectedly */
252 u32 lrr_unexp; /* No. of times LRR rx-ed unexpectedly */ 253 u32 lrr_unexp; /* No. of times LRR rx-ed unexpectedly */
253 u32 lr_tx; /* No. of times LR tx started */ 254 u32 lr_tx; /* No. of times LR tx started */
254 u32 lrr_tx; /* No. of times LRR tx started */ 255 u32 lrr_tx; /* No. of times LRR tx started */
255 u32 ols_tx; /* No. of times OLS tx started */ 256 u32 ols_tx; /* No. of times OLS tx started */
256 u32 nos_tx; /* No. of times NOS tx started */ 257 u32 nos_tx; /* No. of times NOS tx started */
257 u32 hwsm_lrr_rx; /* No. of times LRR rx-ed by HWSM */ 258 u32 hwsm_lrr_rx; /* No. of times LRR rx-ed by HWSM */
258 u32 hwsm_lr_rx; /* No. of times LR rx-ed by HWSM */ 259 u32 hwsm_lr_rx; /* No. of times LR rx-ed by HWSM */
259 u32 bbsc_lr; /* LKSM LR tx for credit recovery */ 260 u32 bbsc_lr; /* LKSM LR tx for credit recovery */
260}; 261};
261 262
262struct bfa_fw_port_snsm_stats_s { 263struct bfa_fw_port_snsm_stats_s {
263 u32 hwsm_success; /* Successful hwsm terminations */ 264 u32 hwsm_success; /* Successful hwsm terminations */
264 u32 hwsm_fails; /* hwsm fail count */ 265 u32 hwsm_fails; /* hwsm fail count */
265 u32 hwsm_wdtov; /* hwsm timed out */ 266 u32 hwsm_wdtov; /* hwsm timed out */
266 u32 swsm_success; /* swsm success */ 267 u32 swsm_success; /* swsm success */
267 u32 swsm_wdtov; /* swsm timed out */ 268 u32 swsm_wdtov; /* swsm timed out */
268 u32 error_resets; /* error resets initiated by upsm */ 269 u32 error_resets; /* error resets initiated by upsm */
269 u32 sync_lost; /* Sync loss count */ 270 u32 sync_lost; /* Sync loss count */
270 u32 sig_lost; /* Signal loss count */ 271 u32 sig_lost; /* Signal loss count */
271 u32 asn8g_attempts; /* SNSM HWSM at 8Gbps attempts */ 272 u32 asn8g_attempts; /* SNSM HWSM at 8Gbps attempts */
272}; 273};
273 274
274struct bfa_fw_port_physm_stats_s { 275struct bfa_fw_port_physm_stats_s {
275 u32 module_inserts; /* Module insert count */ 276 u32 module_inserts; /* Module insert count */
276 u32 module_xtracts; /* Module extracts count */ 277 u32 module_xtracts; /* Module extracts count */
277 u32 module_invalids; /* Invalid module inserted count */ 278 u32 module_invalids; /* Invalid module inserted count */
278 u32 module_read_ign; /* Module validation status ignored */ 279 u32 module_read_ign; /* Module validation status ignored */
279 u32 laser_faults; /* Laser fault count */ 280 u32 laser_faults; /* Laser fault count */
280 u32 rsvd; 281 u32 rsvd;
281}; 282};
282 283
283struct bfa_fw_fip_stats_s { 284struct bfa_fw_fip_stats_s {
284 u32 vlan_req; /* vlan discovery requests */ 285 u32 vlan_req; /* vlan discovery requests */
285 u32 vlan_notify; /* vlan notifications */ 286 u32 vlan_notify; /* vlan notifications */
286 u32 vlan_err; /* vlan response error */ 287 u32 vlan_err; /* vlan response error */
287 u32 vlan_timeouts; /* vlan disvoery timeouts */ 288 u32 vlan_timeouts; /* vlan disvoery timeouts */
288 u32 vlan_invalids; /* invalid vlan in discovery advert. */ 289 u32 vlan_invalids; /* invalid vlan in discovery advert. */
289 u32 disc_req; /* Discovery solicit requests */ 290 u32 disc_req; /* Discovery solicit requests */
290 u32 disc_rsp; /* Discovery solicit response */ 291 u32 disc_rsp; /* Discovery solicit response */
291 u32 disc_err; /* Discovery advt. parse errors */ 292 u32 disc_err; /* Discovery advt. parse errors */
292 u32 disc_unsol; /* Discovery unsolicited */ 293 u32 disc_unsol; /* Discovery unsolicited */
293 u32 disc_timeouts; /* Discovery timeouts */ 294 u32 disc_timeouts; /* Discovery timeouts */
294 u32 disc_fcf_unavail; /* Discovery FCF Not Avail. */ 295 u32 disc_fcf_unavail; /* Discovery FCF Not Avail. */
295 u32 linksvc_unsupp; /* Unsupported link service req */ 296 u32 linksvc_unsupp; /* Unsupported link service req */
296 u32 linksvc_err; /* Parse error in link service req */ 297 u32 linksvc_err; /* Parse error in link service req */
297 u32 logo_req; /* FIP logos received */ 298 u32 logo_req; /* FIP logos received */
298 u32 clrvlink_req; /* Clear virtual link req */ 299 u32 clrvlink_req; /* Clear virtual link req */
299 u32 op_unsupp; /* Unsupported FIP operation */ 300 u32 op_unsupp; /* Unsupported FIP operation */
300 u32 untagged; /* Untagged frames (ignored) */ 301 u32 untagged; /* Untagged frames (ignored) */
301 u32 invalid_version; /* Invalid FIP version */ 302 u32 invalid_version; /* Invalid FIP version */
302}; 303};
303 304
304struct bfa_fw_lps_stats_s { 305struct bfa_fw_lps_stats_s {
305 u32 mac_invalids; /* Invalid mac assigned */ 306 u32 mac_invalids; /* Invalid mac assigned */
306 u32 rsvd; 307 u32 rsvd;
307}; 308};
308 309
309struct bfa_fw_fcoe_stats_s { 310struct bfa_fw_fcoe_stats_s {
310 u32 cee_linkups; /* CEE link up count */ 311 u32 cee_linkups; /* CEE link up count */
311 u32 cee_linkdns; /* CEE link down count */ 312 u32 cee_linkdns; /* CEE link down count */
312 u32 fip_linkups; /* FIP link up count */ 313 u32 fip_linkups; /* FIP link up count */
313 u32 fip_linkdns; /* FIP link up count */ 314 u32 fip_linkdns; /* FIP link up count */
314 u32 fip_fails; /* FIP fail count */ 315 u32 fip_fails; /* FIP fail count */
315 u32 mac_invalids; /* Invalid mac assigned */ 316 u32 mac_invalids; /* Invalid mac assigned */
316}; 317};
317 318
318/* 319/*
319 * IOC firmware FCoE port stats 320 * IOC firmware FCoE port stats
320 */ 321 */
321struct bfa_fw_fcoe_port_stats_s { 322struct bfa_fw_fcoe_port_stats_s {
322 struct bfa_fw_fcoe_stats_s fcoe_stats; 323 struct bfa_fw_fcoe_stats_s fcoe_stats;
323 struct bfa_fw_fip_stats_s fip_stats; 324 struct bfa_fw_fip_stats_s fip_stats;
324}; 325};
325 326
326/* 327/*
@@ -335,8 +336,8 @@ struct bfa_fw_fc_uport_stats_s {
335 * IOC firmware FC port stats 336 * IOC firmware FC port stats
336 */ 337 */
337union bfa_fw_fc_port_stats_s { 338union bfa_fw_fc_port_stats_s {
338 struct bfa_fw_fc_uport_stats_s fc_stats; 339 struct bfa_fw_fc_uport_stats_s fc_stats;
339 struct bfa_fw_fcoe_port_stats_s fcoe_stats; 340 struct bfa_fw_fcoe_port_stats_s fcoe_stats;
340}; 341};
341 342
342/* 343/*
@@ -366,25 +367,25 @@ struct bfa_fw_lpsm_stats_s {
366 */ 367 */
367struct bfa_fw_trunk_stats_s { 368struct bfa_fw_trunk_stats_s {
368 u32 emt_recvd; /* Trunk EMT received */ 369 u32 emt_recvd; /* Trunk EMT received */
369 u32 emt_accepted; /* Trunk EMT Accepted */ 370 u32 emt_accepted; /* Trunk EMT Accepted */
370 u32 emt_rejected; /* Trunk EMT rejected */ 371 u32 emt_rejected; /* Trunk EMT rejected */
371 u32 etp_recvd; /* Trunk ETP received */ 372 u32 etp_recvd; /* Trunk ETP received */
372 u32 etp_accepted; /* Trunk ETP Accepted */ 373 u32 etp_accepted; /* Trunk ETP Accepted */
373 u32 etp_rejected; /* Trunk ETP rejected */ 374 u32 etp_rejected; /* Trunk ETP rejected */
374 u32 lr_recvd; /* Trunk LR received */ 375 u32 lr_recvd; /* Trunk LR received */
375 u32 rsvd; /* padding for 64 bit alignment */ 376 u32 rsvd; /* padding for 64 bit alignment */
376}; 377};
377 378
378struct bfa_fw_advsm_stats_s { 379struct bfa_fw_advsm_stats_s {
379 u32 flogi_sent; /* Flogi sent */ 380 u32 flogi_sent; /* Flogi sent */
380 u32 flogi_acc_recvd; /* Flogi Acc received */ 381 u32 flogi_acc_recvd; /* Flogi Acc received */
381 u32 flogi_rjt_recvd; /* Flogi rejects received */ 382 u32 flogi_rjt_recvd; /* Flogi rejects received */
382 u32 flogi_retries; /* Flogi retries */ 383 u32 flogi_retries; /* Flogi retries */
383 384
384 u32 elp_recvd; /* ELP received */ 385 u32 elp_recvd; /* ELP received */
385 u32 elp_accepted; /* ELP Accepted */ 386 u32 elp_accepted; /* ELP Accepted */
386 u32 elp_rejected; /* ELP rejected */ 387 u32 elp_rejected; /* ELP rejected */
387 u32 elp_dropped; /* ELP dropped */ 388 u32 elp_dropped; /* ELP dropped */
388}; 389};
389 390
390/* 391/*
@@ -521,7 +522,7 @@ struct bfa_qos_vc_attr_s {
521 u16 total_vc_count; /* Total VC Count */ 522 u16 total_vc_count; /* Total VC Count */
522 u16 shared_credit; 523 u16 shared_credit;
523 u32 elp_opmode_flags; 524 u32 elp_opmode_flags;
524 struct bfa_qos_vc_info_s vc_info[BFA_QOS_MAX_VC]; /* as many as 525 struct bfa_qos_vc_info_s vc_info[BFA_QOS_MAX_VC]; /* as many as
525 * total_vc_count */ 526 * total_vc_count */
526}; 527};
527 528
@@ -531,16 +532,16 @@ struct bfa_qos_vc_attr_s {
531struct bfa_qos_stats_s { 532struct bfa_qos_stats_s {
532 u32 flogi_sent; /* QoS Flogi sent */ 533 u32 flogi_sent; /* QoS Flogi sent */
533 u32 flogi_acc_recvd; /* QoS Flogi Acc received */ 534 u32 flogi_acc_recvd; /* QoS Flogi Acc received */
534 u32 flogi_rjt_recvd; /* QoS Flogi rejects received */ 535 u32 flogi_rjt_recvd; /* QoS Flogi rejects received */
535 u32 flogi_retries; /* QoS Flogi retries */ 536 u32 flogi_retries; /* QoS Flogi retries */
536 537
537 u32 elp_recvd; /* QoS ELP received */ 538 u32 elp_recvd; /* QoS ELP received */
538 u32 elp_accepted; /* QoS ELP Accepted */ 539 u32 elp_accepted; /* QoS ELP Accepted */
539 u32 elp_rejected; /* QoS ELP rejected */ 540 u32 elp_rejected; /* QoS ELP rejected */
540 u32 elp_dropped; /* QoS ELP dropped */ 541 u32 elp_dropped; /* QoS ELP dropped */
541 542
542 u32 qos_rscn_recvd; /* QoS RSCN received */ 543 u32 qos_rscn_recvd; /* QoS RSCN received */
543 u32 rsvd; /* padding for 64 bit alignment */ 544 u32 rsvd; /* padding for 64 bit alignment */
544}; 545};
545 546
546/* 547/*
@@ -548,9 +549,9 @@ struct bfa_qos_stats_s {
548 */ 549 */
549struct bfa_fcoe_stats_s { 550struct bfa_fcoe_stats_s {
550 u64 secs_reset; /* Seconds since stats reset */ 551 u64 secs_reset; /* Seconds since stats reset */
551 u64 cee_linkups; /* CEE link up */ 552 u64 cee_linkups; /* CEE link up */
552 u64 cee_linkdns; /* CEE link down */ 553 u64 cee_linkdns; /* CEE link down */
553 u64 fip_linkups; /* FIP link up */ 554 u64 fip_linkups; /* FIP link up */
554 u64 fip_linkdns; /* FIP link down */ 555 u64 fip_linkdns; /* FIP link down */
555 u64 fip_fails; /* FIP failures */ 556 u64 fip_fails; /* FIP failures */
556 u64 mac_invalids; /* Invalid mac assignments */ 557 u64 mac_invalids; /* Invalid mac assignments */
@@ -560,38 +561,38 @@ struct bfa_fcoe_stats_s {
560 u64 vlan_timeouts; /* Vlan request timeouts */ 561 u64 vlan_timeouts; /* Vlan request timeouts */
561 u64 vlan_invalids; /* Vlan invalids */ 562 u64 vlan_invalids; /* Vlan invalids */
562 u64 disc_req; /* Discovery requests */ 563 u64 disc_req; /* Discovery requests */
563 u64 disc_rsp; /* Discovery responses */ 564 u64 disc_rsp; /* Discovery responses */
564 u64 disc_err; /* Discovery error frames */ 565 u64 disc_err; /* Discovery error frames */
565 u64 disc_unsol; /* Discovery unsolicited */ 566 u64 disc_unsol; /* Discovery unsolicited */
566 u64 disc_timeouts; /* Discovery timeouts */ 567 u64 disc_timeouts; /* Discovery timeouts */
567 u64 disc_fcf_unavail; /* Discovery FCF not avail */ 568 u64 disc_fcf_unavail; /* Discovery FCF not avail */
568 u64 linksvc_unsupp; /* FIP link service req unsupp. */ 569 u64 linksvc_unsupp; /* FIP link service req unsupp */
569 u64 linksvc_err; /* FIP link service req errors */ 570 u64 linksvc_err; /* FIP link service req errors */
570 u64 logo_req; /* FIP logos received */ 571 u64 logo_req; /* FIP logos received */
571 u64 clrvlink_req; /* Clear virtual link requests */ 572 u64 clrvlink_req; /* Clear virtual link requests */
572 u64 op_unsupp; /* FIP operation unsupp. */ 573 u64 op_unsupp; /* FIP operation unsupp. */
573 u64 untagged; /* FIP untagged frames */ 574 u64 untagged; /* FIP untagged frames */
574 u64 txf_ucast; /* Tx FCoE unicast frames */ 575 u64 txf_ucast; /* Tx FCoE unicast frames */
575 u64 txf_ucast_vlan; /* Tx FCoE unicast vlan frames */ 576 u64 txf_ucast_vlan; /* Tx FCoE unicast vlan frames */
576 u64 txf_ucast_octets; /* Tx FCoE unicast octets */ 577 u64 txf_ucast_octets; /* Tx FCoE unicast octets */
577 u64 txf_mcast; /* Tx FCoE multicast frames */ 578 u64 txf_mcast; /* Tx FCoE multicast frames */
578 u64 txf_mcast_vlan; /* Tx FCoE multicast vlan frames */ 579 u64 txf_mcast_vlan; /* Tx FCoE multicast vlan frames */
579 u64 txf_mcast_octets; /* Tx FCoE multicast octets */ 580 u64 txf_mcast_octets; /* Tx FCoE multicast octets */
580 u64 txf_bcast; /* Tx FCoE broadcast frames */ 581 u64 txf_bcast; /* Tx FCoE broadcast frames */
581 u64 txf_bcast_vlan; /* Tx FCoE broadcast vlan frames */ 582 u64 txf_bcast_vlan; /* Tx FCoE broadcast vlan frames */
582 u64 txf_bcast_octets; /* Tx FCoE broadcast octets */ 583 u64 txf_bcast_octets; /* Tx FCoE broadcast octets */
583 u64 txf_timeout; /* Tx timeouts */ 584 u64 txf_timeout; /* Tx timeouts */
584 u64 txf_parity_errors; /* Transmit parity err */ 585 u64 txf_parity_errors; /* Transmit parity err */
585 u64 txf_fid_parity_errors; /* Transmit FID parity err */ 586 u64 txf_fid_parity_errors; /* Transmit FID parity err */
586 u64 rxf_ucast_octets; /* Rx FCoE unicast octets */ 587 u64 rxf_ucast_octets; /* Rx FCoE unicast octets */
587 u64 rxf_ucast; /* Rx FCoE unicast frames */ 588 u64 rxf_ucast; /* Rx FCoE unicast frames */
588 u64 rxf_ucast_vlan; /* Rx FCoE unicast vlan frames */ 589 u64 rxf_ucast_vlan; /* Rx FCoE unicast vlan frames */
589 u64 rxf_mcast_octets; /* Rx FCoE multicast octets */ 590 u64 rxf_mcast_octets; /* Rx FCoE multicast octets */
590 u64 rxf_mcast; /* Rx FCoE multicast frames */ 591 u64 rxf_mcast; /* Rx FCoE multicast frames */
591 u64 rxf_mcast_vlan; /* Rx FCoE multicast vlan frames */ 592 u64 rxf_mcast_vlan; /* Rx FCoE multicast vlan frames */
592 u64 rxf_bcast_octets; /* Rx FCoE broadcast octets */ 593 u64 rxf_bcast_octets; /* Rx FCoE broadcast octets */
593 u64 rxf_bcast; /* Rx FCoE broadcast frames */ 594 u64 rxf_bcast; /* Rx FCoE broadcast frames */
594 u64 rxf_bcast_vlan; /* Rx FCoE broadcast vlan frames */ 595 u64 rxf_bcast_vlan; /* Rx FCoE broadcast vlan frames */
595}; 596};
596 597
597/* 598/*
@@ -852,12 +853,12 @@ struct bfa_port_cfg_s {
852 u8 tx_bbcredit; /* transmit buffer credits */ 853 u8 tx_bbcredit; /* transmit buffer credits */
853 u8 ratelimit; /* ratelimit enabled or not */ 854 u8 ratelimit; /* ratelimit enabled or not */
854 u8 trl_def_speed; /* ratelimit default speed */ 855 u8 trl_def_speed; /* ratelimit default speed */
855 u8 bb_scn; /* BB_SCN value from FLOGI Exchg */ 856 u8 bb_scn; /* BB_SCN value from FLOGI Exchg */
856 u8 bb_scn_state; /* Config state of BB_SCN */ 857 u8 bb_scn_state; /* Config state of BB_SCN */
857 u8 faa_state; /* FAA enabled/disabled */ 858 u8 faa_state; /* FAA enabled/disabled */
858 u8 rsvd[1]; 859 u8 rsvd[1];
859 u16 path_tov; /* device path timeout */ 860 u16 path_tov; /* device path timeout */
860 u16 q_depth; /* SCSI Queue depth */ 861 u16 q_depth; /* SCSI Queue depth */
861}; 862};
862#pragma pack() 863#pragma pack()
863 864
@@ -868,20 +869,21 @@ struct bfa_port_attr_s {
868 /* 869 /*
869 * Static fields 870 * Static fields
870 */ 871 */
871 wwn_t nwwn; /* node wwn */ 872 wwn_t nwwn; /* node wwn */
872 wwn_t pwwn; /* port wwn */ 873 wwn_t pwwn; /* port wwn */
873 wwn_t factorynwwn; /* factory node wwn */ 874 wwn_t factorynwwn; /* factory node wwn */
874 wwn_t factorypwwn; /* factory port wwn */ 875 wwn_t factorypwwn; /* factory port wwn */
875 enum fc_cos cos_supported; /* supported class of services */ 876 enum fc_cos cos_supported; /* supported class of
876 u32 rsvd; 877 * services */
878 u32 rsvd;
877 struct fc_symname_s port_symname; /* port symbolic name */ 879 struct fc_symname_s port_symname; /* port symbolic name */
878 enum bfa_port_speed speed_supported; /* supported speeds */ 880 enum bfa_port_speed speed_supported; /* supported speeds */
879 bfa_boolean_t pbind_enabled; 881 bfa_boolean_t pbind_enabled;
880 882
881 /* 883 /*
882 * Configured values 884 * Configured values
883 */ 885 */
884 struct bfa_port_cfg_s pport_cfg; /* pport cfg */ 886 struct bfa_port_cfg_s pport_cfg; /* pport cfg */
885 887
886 /* 888 /*
887 * Dynamic field - info from BFA 889 * Dynamic field - info from BFA
@@ -890,19 +892,20 @@ struct bfa_port_attr_s {
890 enum bfa_port_speed speed; /* current speed */ 892 enum bfa_port_speed speed; /* current speed */
891 enum bfa_port_topology topology; /* current topology */ 893 enum bfa_port_topology topology; /* current topology */
892 bfa_boolean_t beacon; /* current beacon status */ 894 bfa_boolean_t beacon; /* current beacon status */
893 bfa_boolean_t link_e2e_beacon; /* link beacon is on */ 895 bfa_boolean_t link_e2e_beacon; /* link beacon is on */
894 bfa_boolean_t bbsc_op_status; /* fc credit recovery oper state */ 896 bfa_boolean_t bbsc_op_status; /* fc credit recovery oper
897 * state */
895 898
896 /* 899 /*
897 * Dynamic field - info from FCS 900 * Dynamic field - info from FCS
898 */ 901 */
899 u32 pid; /* port ID */ 902 u32 pid; /* port ID */
900 enum bfa_port_type port_type; /* current topology */ 903 enum bfa_port_type port_type; /* current topology */
901 u32 loopback; /* external loopback */ 904 u32 loopback; /* external loopback */
902 u32 authfail; /* auth fail state */ 905 u32 authfail; /* auth fail state */
903 906
904 /* FCoE specific */ 907 /* FCoE specific */
905 u16 fcoe_vlan; 908 u16 fcoe_vlan;
906 u8 rsvd1[2]; 909 u8 rsvd1[2];
907}; 910};
908 911
@@ -910,48 +913,48 @@ struct bfa_port_attr_s {
910 * Port FCP mappings. 913 * Port FCP mappings.
911 */ 914 */
912struct bfa_port_fcpmap_s { 915struct bfa_port_fcpmap_s {
913 char osdevname[256]; 916 char osdevname[256];
914 u32 bus; 917 u32 bus;
915 u32 target; 918 u32 target;
916 u32 oslun; 919 u32 oslun;
917 u32 fcid; 920 u32 fcid;
918 wwn_t nwwn; 921 wwn_t nwwn;
919 wwn_t pwwn; 922 wwn_t pwwn;
920 u64 fcplun; 923 u64 fcplun;
921 char luid[256]; 924 char luid[256];
922}; 925};
923 926
924/* 927/*
925 * Port RNID info. 928 * Port RNID info.
926 */ 929 */
927struct bfa_port_rnid_s { 930struct bfa_port_rnid_s {
928 wwn_t wwn; 931 wwn_t wwn;
929 u32 unittype; 932 u32 unittype;
930 u32 portid; 933 u32 portid;
931 u32 attached_nodes_num; 934 u32 attached_nodes_num;
932 u16 ip_version; 935 u16 ip_version;
933 u16 udp_port; 936 u16 udp_port;
934 u8 ipaddr[16]; 937 u8 ipaddr[16];
935 u16 rsvd; 938 u16 rsvd;
936 u16 topologydiscoveryflags; 939 u16 topologydiscoveryflags;
937}; 940};
938 941
939#pragma pack(1) 942#pragma pack(1)
940struct bfa_fcport_fcf_s { 943struct bfa_fcport_fcf_s {
941 wwn_t name; /* FCF name */ 944 wwn_t name; /* FCF name */
942 wwn_t fabric_name; /* Fabric Name */ 945 wwn_t fabric_name; /* Fabric Name */
943 u8 fipenabled; /* FIP enabled or not */ 946 u8 fipenabled; /* FIP enabled or not */
944 u8 fipfailed; /* FIP failed or not */ 947 u8 fipfailed; /* FIP failed or not */
945 u8 resv[2]; 948 u8 resv[2];
946 u8 pri; /* FCF priority */ 949 u8 pri; /* FCF priority */
947 u8 version; /* FIP version used */ 950 u8 version; /* FIP version used */
948 u8 available; /* Available for login */ 951 u8 available; /* Available for login */
949 u8 fka_disabled; /* FKA is disabled */ 952 u8 fka_disabled; /* FKA is disabled */
950 u8 maxsz_verified; /* FCoE max size verified */ 953 u8 maxsz_verified; /* FCoE max size verified */
951 u8 fc_map[3]; /* FC map */ 954 u8 fc_map[3]; /* FC map */
952 __be16 vlan; /* FCoE vlan tag/priority */ 955 __be16 vlan; /* FCoE vlan tag/priority */
953 u32 fka_adv_per; /* FIP ka advert. period */ 956 u32 fka_adv_per; /* FIP ka advert. period */
954 mac_t mac; /* FCF mac */ 957 mac_t mac; /* FCF mac */
955}; 958};
956 959
957/* 960/*
@@ -981,7 +984,7 @@ struct bfa_port_link_s {
981 u8 linkstate_rsn; /* bfa_port_linkstate_rsn_t */ 984 u8 linkstate_rsn; /* bfa_port_linkstate_rsn_t */
982 u8 topology; /* P2P/LOOP bfa_port_topology */ 985 u8 topology; /* P2P/LOOP bfa_port_topology */
983 u8 speed; /* Link speed (1/2/4/8 G) */ 986 u8 speed; /* Link speed (1/2/4/8 G) */
984 u32 linkstate_opt; /* Linkstate optional data (debug) */ 987 u32 linkstate_opt; /* Linkstate optional data (debug) */
985 u8 trunked; /* Trunked or not (1 or 0) */ 988 u8 trunked; /* Trunked or not (1 or 0) */
986 u8 resvd[3]; 989 u8 resvd[3];
987 struct bfa_qos_attr_s qos_attr; /* QoS Attributes */ 990 struct bfa_qos_attr_s qos_attr; /* QoS Attributes */
@@ -1035,7 +1038,7 @@ struct bfa_rport_hal_stats_s {
1035 u32 sm_fwc_del; /* fw create: delete events */ 1038 u32 sm_fwc_del; /* fw create: delete events */
1036 u32 sm_fwc_off; /* fw create: offline events */ 1039 u32 sm_fwc_off; /* fw create: offline events */
1037 u32 sm_fwc_hwf; /* fw create: IOC down */ 1040 u32 sm_fwc_hwf; /* fw create: IOC down */
1038 u32 sm_fwc_unexp; /* fw create: exception events*/ 1041 u32 sm_fwc_unexp; /* fw create: exception events*/
1039 u32 sm_on_off; /* online: offline events */ 1042 u32 sm_on_off; /* online: offline events */
1040 u32 sm_on_del; /* online: delete events */ 1043 u32 sm_on_del; /* online: delete events */
1041 u32 sm_on_hwf; /* online: IOC down events */ 1044 u32 sm_on_hwf; /* online: IOC down events */
@@ -1043,25 +1046,25 @@ struct bfa_rport_hal_stats_s {
1043 u32 sm_fwd_rsp; /* fw delete: fw responses */ 1046 u32 sm_fwd_rsp; /* fw delete: fw responses */
1044 u32 sm_fwd_del; /* fw delete: delete events */ 1047 u32 sm_fwd_del; /* fw delete: delete events */
1045 u32 sm_fwd_hwf; /* fw delete: IOC down events */ 1048 u32 sm_fwd_hwf; /* fw delete: IOC down events */
1046 u32 sm_fwd_unexp; /* fw delete: exception events*/ 1049 u32 sm_fwd_unexp; /* fw delete: exception events*/
1047 u32 sm_off_del; /* offline: delete events */ 1050 u32 sm_off_del; /* offline: delete events */
1048 u32 sm_off_on; /* offline: online events */ 1051 u32 sm_off_on; /* offline: online events */
1049 u32 sm_off_hwf; /* offline: IOC down events */ 1052 u32 sm_off_hwf; /* offline: IOC down events */
1050 u32 sm_off_unexp; /* offline: exception events */ 1053 u32 sm_off_unexp; /* offline: exception events */
1051 u32 sm_del_fwrsp; /* delete: fw responses */ 1054 u32 sm_del_fwrsp; /* delete: fw responses */
1052 u32 sm_del_hwf; /* delete: IOC down events */ 1055 u32 sm_del_hwf; /* delete: IOC down events */
1053 u32 sm_del_unexp; /* delete: exception events */ 1056 u32 sm_del_unexp; /* delete: exception events */
1054 u32 sm_delp_fwrsp; /* delete pend: fw responses */ 1057 u32 sm_delp_fwrsp; /* delete pend: fw responses */
1055 u32 sm_delp_hwf; /* delete pend: IOC downs */ 1058 u32 sm_delp_hwf; /* delete pend: IOC downs */
1056 u32 sm_delp_unexp; /* delete pend: exceptions */ 1059 u32 sm_delp_unexp; /* delete pend: exceptions */
1057 u32 sm_offp_fwrsp; /* off-pending: fw responses */ 1060 u32 sm_offp_fwrsp; /* off-pending: fw responses */
1058 u32 sm_offp_del; /* off-pending: deletes */ 1061 u32 sm_offp_del; /* off-pending: deletes */
1059 u32 sm_offp_hwf; /* off-pending: IOC downs */ 1062 u32 sm_offp_hwf; /* off-pending: IOC downs */
1060 u32 sm_offp_unexp; /* off-pending: exceptions */ 1063 u32 sm_offp_unexp; /* off-pending: exceptions */
1061 u32 sm_iocd_off; /* IOC down: offline events */ 1064 u32 sm_iocd_off; /* IOC down: offline events */
1062 u32 sm_iocd_del; /* IOC down: delete events */ 1065 u32 sm_iocd_del; /* IOC down: delete events */
1063 u32 sm_iocd_on; /* IOC down: online events */ 1066 u32 sm_iocd_on; /* IOC down: online events */
1064 u32 sm_iocd_unexp; /* IOC down: exceptions */ 1067 u32 sm_iocd_unexp; /* IOC down: exceptions */
1065 u32 rsvd; 1068 u32 rsvd;
1066}; 1069};
1067#pragma pack(1) 1070#pragma pack(1)
@@ -1069,9 +1072,9 @@ struct bfa_rport_hal_stats_s {
1069 * Rport's QoS attributes 1072 * Rport's QoS attributes
1070 */ 1073 */
1071struct bfa_rport_qos_attr_s { 1074struct bfa_rport_qos_attr_s {
1072 u8 qos_priority; /* rport's QoS priority */ 1075 u8 qos_priority; /* rport's QoS priority */
1073 u8 rsvd[3]; 1076 u8 rsvd[3];
1074 u32 qos_flow_id; /* QoS flow Id */ 1077 u32 qos_flow_id; /* QoS flow Id */
1075}; 1078};
1076#pragma pack() 1079#pragma pack()
1077 1080
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 1ac5aecf25a6..eca7ab78085b 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -3727,11 +3727,11 @@ bfa_sfp_media_get(struct bfa_sfp_s *sfp)
3727 (xmtr_tech & SFP_XMTR_TECH_SA)) 3727 (xmtr_tech & SFP_XMTR_TECH_SA))
3728 *media = BFA_SFP_MEDIA_SW; 3728 *media = BFA_SFP_MEDIA_SW;
3729 /* Check 10G Ethernet Compilance code */ 3729 /* Check 10G Ethernet Compilance code */
3730 else if (e10g.b & 0x10) 3730 else if (e10g.r.e10g_sr)
3731 *media = BFA_SFP_MEDIA_SW; 3731 *media = BFA_SFP_MEDIA_SW;
3732 else if (e10g.b & 0x60) 3732 else if (e10g.r.e10g_lrm && e10g.r.e10g_lr)
3733 *media = BFA_SFP_MEDIA_LW; 3733 *media = BFA_SFP_MEDIA_LW;
3734 else if (e10g.r.e10g_unall & 0x80) 3734 else if (e10g.r.e10g_unall)
3735 *media = BFA_SFP_MEDIA_UNKNOWN; 3735 *media = BFA_SFP_MEDIA_UNKNOWN;
3736 else 3736 else
3737 bfa_trc(sfp, 0); 3737 bfa_trc(sfp, 0);
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index caca9b7c8309..439c012be763 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -557,8 +557,7 @@ bfad_debugfs_exit(struct bfad_port_s *port)
557 } 557 }
558 } 558 }
559 559
560 /* 560 /* Remove the pci_dev debugfs directory for the port */
561 * Remove the pci_dev debugfs directory for the port */
562 if (port->port_debugfs_root) { 561 if (port->port_debugfs_root) {
563 debugfs_remove(port->port_debugfs_root); 562 debugfs_remove(port->port_debugfs_root);
564 port->port_debugfs_root = NULL; 563 port->port_debugfs_root = NULL;
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
index 23149b9e297c..48e46f5b77cc 100644
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -28,7 +28,6 @@
28 28
29static DEFINE_SPINLOCK(list_lock); 29static DEFINE_SPINLOCK(list_lock);
30static LIST_HEAD(scsi_dh_list); 30static LIST_HEAD(scsi_dh_list);
31static int scsi_dh_list_idx = 1;
32 31
33static struct scsi_device_handler *get_device_handler(const char *name) 32static struct scsi_device_handler *get_device_handler(const char *name)
34{ 33{
@@ -45,21 +44,6 @@ static struct scsi_device_handler *get_device_handler(const char *name)
45 return found; 44 return found;
46} 45}
47 46
48static struct scsi_device_handler *get_device_handler_by_idx(int idx)
49{
50 struct scsi_device_handler *tmp, *found = NULL;
51
52 spin_lock(&list_lock);
53 list_for_each_entry(tmp, &scsi_dh_list, list) {
54 if (tmp->idx == idx) {
55 found = tmp;
56 break;
57 }
58 }
59 spin_unlock(&list_lock);
60 return found;
61}
62
63/* 47/*
64 * device_handler_match_function - Match a device handler to a device 48 * device_handler_match_function - Match a device handler to a device
65 * @sdev - SCSI device to be tested 49 * @sdev - SCSI device to be tested
@@ -84,23 +68,6 @@ device_handler_match_function(struct scsi_device *sdev)
84} 68}
85 69
86/* 70/*
87 * device_handler_match_devlist - Match a device handler to a device
88 * @sdev - SCSI device to be tested
89 *
90 * Tests @sdev against all device_handler registered in the devlist.
91 * Returns the found device handler or NULL if not found.
92 */
93static struct scsi_device_handler *
94device_handler_match_devlist(struct scsi_device *sdev)
95{
96 int idx;
97
98 idx = scsi_get_device_flags_keyed(sdev, sdev->vendor, sdev->model,
99 SCSI_DEVINFO_DH);
100 return get_device_handler_by_idx(idx);
101}
102
103/*
104 * device_handler_match - Attach a device handler to a device 71 * device_handler_match - Attach a device handler to a device
105 * @scsi_dh - The device handler to match against or NULL 72 * @scsi_dh - The device handler to match against or NULL
106 * @sdev - SCSI device to be tested against @scsi_dh 73 * @sdev - SCSI device to be tested against @scsi_dh
@@ -116,8 +83,6 @@ device_handler_match(struct scsi_device_handler *scsi_dh,
116 struct scsi_device_handler *found_dh; 83 struct scsi_device_handler *found_dh;
117 84
118 found_dh = device_handler_match_function(sdev); 85 found_dh = device_handler_match_function(sdev);
119 if (!found_dh)
120 found_dh = device_handler_match_devlist(sdev);
121 86
122 if (scsi_dh && found_dh != scsi_dh) 87 if (scsi_dh && found_dh != scsi_dh)
123 found_dh = NULL; 88 found_dh = NULL;
@@ -361,25 +326,14 @@ static int scsi_dh_notifier_remove(struct device *dev, void *data)
361 */ 326 */
362int scsi_register_device_handler(struct scsi_device_handler *scsi_dh) 327int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
363{ 328{
364 int i;
365 329
366 if (get_device_handler(scsi_dh->name)) 330 if (get_device_handler(scsi_dh->name))
367 return -EBUSY; 331 return -EBUSY;
368 332
369 spin_lock(&list_lock); 333 spin_lock(&list_lock);
370 scsi_dh->idx = scsi_dh_list_idx++;
371 list_add(&scsi_dh->list, &scsi_dh_list); 334 list_add(&scsi_dh->list, &scsi_dh_list);
372 spin_unlock(&list_lock); 335 spin_unlock(&list_lock);
373 336
374 for (i = 0; scsi_dh->devlist && scsi_dh->devlist[i].vendor; i++) {
375 scsi_dev_info_list_add_keyed(0,
376 scsi_dh->devlist[i].vendor,
377 scsi_dh->devlist[i].model,
378 NULL,
379 scsi_dh->idx,
380 SCSI_DEVINFO_DH);
381 }
382
383 bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add); 337 bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
384 printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name); 338 printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name);
385 339
@@ -396,7 +350,6 @@ EXPORT_SYMBOL_GPL(scsi_register_device_handler);
396 */ 350 */
397int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh) 351int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
398{ 352{
399 int i;
400 353
401 if (!get_device_handler(scsi_dh->name)) 354 if (!get_device_handler(scsi_dh->name))
402 return -ENODEV; 355 return -ENODEV;
@@ -404,12 +357,6 @@ int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
404 bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, 357 bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh,
405 scsi_dh_notifier_remove); 358 scsi_dh_notifier_remove);
406 359
407 for (i = 0; scsi_dh->devlist && scsi_dh->devlist[i].vendor; i++) {
408 scsi_dev_info_list_del_keyed(scsi_dh->devlist[i].vendor,
409 scsi_dh->devlist[i].model,
410 SCSI_DEVINFO_DH);
411 }
412
413 spin_lock(&list_lock); 360 spin_lock(&list_lock);
414 list_del(&scsi_dh->list); 361 list_del(&scsi_dh->list);
415 spin_unlock(&list_lock); 362 spin_unlock(&list_lock);
@@ -588,10 +535,6 @@ static int __init scsi_dh_init(void)
588{ 535{
589 int r; 536 int r;
590 537
591 r = scsi_dev_info_add_list(SCSI_DEVINFO_DH, "SCSI Device Handler");
592 if (r)
593 return r;
594
595 r = bus_register_notifier(&scsi_bus_type, &scsi_dh_nb); 538 r = bus_register_notifier(&scsi_bus_type, &scsi_dh_nb);
596 539
597 if (!r) 540 if (!r)
@@ -606,7 +549,6 @@ static void __exit scsi_dh_exit(void)
606 bus_for_each_dev(&scsi_bus_type, NULL, NULL, 549 bus_for_each_dev(&scsi_bus_type, NULL, NULL,
607 scsi_dh_sysfs_attr_remove); 550 scsi_dh_sysfs_attr_remove);
608 bus_unregister_notifier(&scsi_bus_type, &scsi_dh_nb); 551 bus_unregister_notifier(&scsi_bus_type, &scsi_dh_nb);
609 scsi_dev_info_remove_list(SCSI_DEVINFO_DH);
610} 552}
611 553
612module_init(scsi_dh_init); 554module_init(scsi_dh_init);
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 591186cf1896..e1c8be06de9d 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -629,6 +629,24 @@ static const struct scsi_dh_devlist clariion_dev_list[] = {
629 {NULL, NULL}, 629 {NULL, NULL},
630}; 630};
631 631
632static bool clariion_match(struct scsi_device *sdev)
633{
634 int i;
635
636 if (scsi_device_tpgs(sdev))
637 return false;
638
639 for (i = 0; clariion_dev_list[i].vendor; i++) {
640 if (!strncmp(sdev->vendor, clariion_dev_list[i].vendor,
641 strlen(clariion_dev_list[i].vendor)) &&
642 !strncmp(sdev->model, clariion_dev_list[i].model,
643 strlen(clariion_dev_list[i].model))) {
644 return true;
645 }
646 }
647 return false;
648}
649
632static int clariion_bus_attach(struct scsi_device *sdev); 650static int clariion_bus_attach(struct scsi_device *sdev);
633static void clariion_bus_detach(struct scsi_device *sdev); 651static void clariion_bus_detach(struct scsi_device *sdev);
634 652
@@ -642,6 +660,7 @@ static struct scsi_device_handler clariion_dh = {
642 .activate = clariion_activate, 660 .activate = clariion_activate,
643 .prep_fn = clariion_prep_fn, 661 .prep_fn = clariion_prep_fn,
644 .set_params = clariion_set_params, 662 .set_params = clariion_set_params,
663 .match = clariion_match,
645}; 664};
646 665
647static int clariion_bus_attach(struct scsi_device *sdev) 666static int clariion_bus_attach(struct scsi_device *sdev)
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 0f86a18b157d..084062bb8ee9 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -320,6 +320,24 @@ static const struct scsi_dh_devlist hp_sw_dh_data_list[] = {
320 {NULL, NULL}, 320 {NULL, NULL},
321}; 321};
322 322
323static bool hp_sw_match(struct scsi_device *sdev)
324{
325 int i;
326
327 if (scsi_device_tpgs(sdev))
328 return false;
329
330 for (i = 0; hp_sw_dh_data_list[i].vendor; i++) {
331 if (!strncmp(sdev->vendor, hp_sw_dh_data_list[i].vendor,
332 strlen(hp_sw_dh_data_list[i].vendor)) &&
333 !strncmp(sdev->model, hp_sw_dh_data_list[i].model,
334 strlen(hp_sw_dh_data_list[i].model))) {
335 return true;
336 }
337 }
338 return false;
339}
340
323static int hp_sw_bus_attach(struct scsi_device *sdev); 341static int hp_sw_bus_attach(struct scsi_device *sdev);
324static void hp_sw_bus_detach(struct scsi_device *sdev); 342static void hp_sw_bus_detach(struct scsi_device *sdev);
325 343
@@ -331,6 +349,7 @@ static struct scsi_device_handler hp_sw_dh = {
331 .detach = hp_sw_bus_detach, 349 .detach = hp_sw_bus_detach,
332 .activate = hp_sw_activate, 350 .activate = hp_sw_activate,
333 .prep_fn = hp_sw_prep_fn, 351 .prep_fn = hp_sw_prep_fn,
352 .match = hp_sw_match,
334}; 353};
335 354
336static int hp_sw_bus_attach(struct scsi_device *sdev) 355static int hp_sw_bus_attach(struct scsi_device *sdev)
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 1d3127920063..841ebf4a6788 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -820,6 +820,24 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
820 {NULL, NULL}, 820 {NULL, NULL},
821}; 821};
822 822
823static bool rdac_match(struct scsi_device *sdev)
824{
825 int i;
826
827 if (scsi_device_tpgs(sdev))
828 return false;
829
830 for (i = 0; rdac_dev_list[i].vendor; i++) {
831 if (!strncmp(sdev->vendor, rdac_dev_list[i].vendor,
832 strlen(rdac_dev_list[i].vendor)) &&
833 !strncmp(sdev->model, rdac_dev_list[i].model,
834 strlen(rdac_dev_list[i].model))) {
835 return true;
836 }
837 }
838 return false;
839}
840
823static int rdac_bus_attach(struct scsi_device *sdev); 841static int rdac_bus_attach(struct scsi_device *sdev);
824static void rdac_bus_detach(struct scsi_device *sdev); 842static void rdac_bus_detach(struct scsi_device *sdev);
825 843
@@ -832,6 +850,7 @@ static struct scsi_device_handler rdac_dh = {
832 .attach = rdac_bus_attach, 850 .attach = rdac_bus_attach,
833 .detach = rdac_bus_detach, 851 .detach = rdac_bus_detach,
834 .activate = rdac_activate, 852 .activate = rdac_activate,
853 .match = rdac_match,
835}; 854};
836 855
837static int rdac_bus_attach(struct scsi_device *sdev) 856static int rdac_bus_attach(struct scsi_device *sdev)
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 865d452542be..5140f5d0fd6b 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -293,12 +293,14 @@ static u32 unresettable_controller[] = {
293 0x3215103C, /* Smart Array E200i */ 293 0x3215103C, /* Smart Array E200i */
294 0x3237103C, /* Smart Array E500 */ 294 0x3237103C, /* Smart Array E500 */
295 0x323D103C, /* Smart Array P700m */ 295 0x323D103C, /* Smart Array P700m */
296 0x40800E11, /* Smart Array 5i */
296 0x409C0E11, /* Smart Array 6400 */ 297 0x409C0E11, /* Smart Array 6400 */
297 0x409D0E11, /* Smart Array 6400 EM */ 298 0x409D0E11, /* Smart Array 6400 EM */
298}; 299};
299 300
300/* List of controllers which cannot even be soft reset */ 301/* List of controllers which cannot even be soft reset */
301static u32 soft_unresettable_controller[] = { 302static u32 soft_unresettable_controller[] = {
303 0x40800E11, /* Smart Array 5i */
302 /* Exclude 640x boards. These are two pci devices in one slot 304 /* Exclude 640x boards. These are two pci devices in one slot
303 * which share a battery backed cache module. One controls the 305 * which share a battery backed cache module. One controls the
304 * cache, the other accesses the cache through the one that controls 306 * cache, the other accesses the cache through the one that controls
@@ -4072,10 +4074,10 @@ static int hpsa_request_irq(struct ctlr_info *h,
4072 4074
4073 if (h->msix_vector || h->msi_vector) 4075 if (h->msix_vector || h->msi_vector)
4074 rc = request_irq(h->intr[h->intr_mode], msixhandler, 4076 rc = request_irq(h->intr[h->intr_mode], msixhandler,
4075 IRQF_DISABLED, h->devname, h); 4077 0, h->devname, h);
4076 else 4078 else
4077 rc = request_irq(h->intr[h->intr_mode], intxhandler, 4079 rc = request_irq(h->intr[h->intr_mode], intxhandler,
4078 IRQF_DISABLED, h->devname, h); 4080 IRQF_SHARED, h->devname, h);
4079 if (rc) { 4081 if (rc) {
4080 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n", 4082 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
4081 h->intr[h->intr_mode], h->devname); 4083 h->intr[h->intr_mode], h->devname);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index bb4c8e0584e2..825f9307417a 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -247,18 +247,6 @@ struct lpfc_stats {
247 uint32_t fcpLocalErr; 247 uint32_t fcpLocalErr;
248}; 248};
249 249
250enum sysfs_mbox_state {
251 SMBOX_IDLE,
252 SMBOX_WRITING,
253 SMBOX_READING
254};
255
256struct lpfc_sysfs_mbox {
257 enum sysfs_mbox_state state;
258 size_t offset;
259 struct lpfcMboxq * mbox;
260};
261
262struct lpfc_hba; 250struct lpfc_hba;
263 251
264 252
@@ -783,8 +771,6 @@ struct lpfc_hba {
783 uint64_t bg_apptag_err_cnt; 771 uint64_t bg_apptag_err_cnt;
784 uint64_t bg_reftag_err_cnt; 772 uint64_t bg_reftag_err_cnt;
785 773
786 struct lpfc_sysfs_mbox sysfs_mbox;
787
788 /* fastpath list. */ 774 /* fastpath list. */
789 spinlock_t scsi_buf_list_lock; 775 spinlock_t scsi_buf_list_lock;
790 struct list_head lpfc_scsi_buf_list; 776 struct list_head lpfc_scsi_buf_list;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index d0ebaeb7ef60..f6697cb0e216 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -351,10 +351,23 @@ lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
351 struct Scsi_Host *shost = class_to_shost(dev); 351 struct Scsi_Host *shost = class_to_shost(dev);
352 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 352 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
353 struct lpfc_hba *phba = vport->phba; 353 struct lpfc_hba *phba = vport->phba;
354 uint32_t if_type;
355 uint8_t sli_family;
354 char fwrev[32]; 356 char fwrev[32];
357 int len;
355 358
356 lpfc_decode_firmware_rev(phba, fwrev, 1); 359 lpfc_decode_firmware_rev(phba, fwrev, 1);
357 return snprintf(buf, PAGE_SIZE, "%s, sli-%d\n", fwrev, phba->sli_rev); 360 if_type = phba->sli4_hba.pc_sli4_params.if_type;
361 sli_family = phba->sli4_hba.pc_sli4_params.sli_family;
362
363 if (phba->sli_rev < LPFC_SLI_REV4)
364 len = snprintf(buf, PAGE_SIZE, "%s, sli-%d\n",
365 fwrev, phba->sli_rev);
366 else
367 len = snprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n",
368 fwrev, phba->sli_rev, if_type, sli_family);
369
370 return len;
358} 371}
359 372
360/** 373/**
@@ -488,6 +501,34 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
488} 501}
489 502
490/** 503/**
504 * lpfc_sli4_protocol_show - Return the fip mode of the HBA
505 * @dev: class unused variable.
506 * @attr: device attribute, not used.
507 * @buf: on return contains the module description text.
508 *
509 * Returns: size of formatted string.
510 **/
511static ssize_t
512lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr,
513 char *buf)
514{
515 struct Scsi_Host *shost = class_to_shost(dev);
516 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
517 struct lpfc_hba *phba = vport->phba;
518
519 if (phba->sli_rev < LPFC_SLI_REV4)
520 return snprintf(buf, PAGE_SIZE, "fc\n");
521
522 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) {
523 if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_GE)
524 return snprintf(buf, PAGE_SIZE, "fcoe\n");
525 if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)
526 return snprintf(buf, PAGE_SIZE, "fc\n");
527 }
528 return snprintf(buf, PAGE_SIZE, "unknown\n");
529}
530
531/**
491 * lpfc_link_state_store - Transition the link_state on an HBA port 532 * lpfc_link_state_store - Transition the link_state on an HBA port
492 * @dev: class device that is converted into a Scsi_host. 533 * @dev: class device that is converted into a Scsi_host.
493 * @attr: device attribute, not used. 534 * @attr: device attribute, not used.
@@ -773,7 +814,12 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
773 * the readyness after performing a firmware reset. 814 * the readyness after performing a firmware reset.
774 * 815 *
775 * Returns: 816 * Returns:
776 * zero for success 817 * zero for success, -EPERM when port does not have privilage to perform the
818 * reset, -EIO when port timeout from recovering from the reset.
819 *
820 * Note:
821 * As the caller will interpret the return code by value, be careful in making
822 * change or addition to return codes.
777 **/ 823 **/
778int 824int
779lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba) 825lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
@@ -826,9 +872,11 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
826{ 872{
827 struct completion online_compl; 873 struct completion online_compl;
828 struct pci_dev *pdev = phba->pcidev; 874 struct pci_dev *pdev = phba->pcidev;
875 uint32_t before_fc_flag;
876 uint32_t sriov_nr_virtfn;
829 uint32_t reg_val; 877 uint32_t reg_val;
830 int status = 0; 878 int status = 0, rc = 0;
831 int rc; 879 int job_posted = 1, sriov_err;
832 880
833 if (!phba->cfg_enable_hba_reset) 881 if (!phba->cfg_enable_hba_reset)
834 return -EACCES; 882 return -EACCES;
@@ -838,6 +886,10 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
838 LPFC_SLI_INTF_IF_TYPE_2)) 886 LPFC_SLI_INTF_IF_TYPE_2))
839 return -EPERM; 887 return -EPERM;
840 888
889 /* Keep state if we need to restore back */
890 before_fc_flag = phba->pport->fc_flag;
891 sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn;
892
841 /* Disable SR-IOV virtual functions if enabled */ 893 /* Disable SR-IOV virtual functions if enabled */
842 if (phba->cfg_sriov_nr_virtfn) { 894 if (phba->cfg_sriov_nr_virtfn) {
843 pci_disable_sriov(pdev); 895 pci_disable_sriov(pdev);
@@ -869,21 +921,44 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
869 /* delay driver action following IF_TYPE_2 reset */ 921 /* delay driver action following IF_TYPE_2 reset */
870 rc = lpfc_sli4_pdev_status_reg_wait(phba); 922 rc = lpfc_sli4_pdev_status_reg_wait(phba);
871 923
872 if (rc) 924 if (rc == -EPERM) {
925 /* no privilage for reset, restore if needed */
926 if (before_fc_flag & FC_OFFLINE_MODE)
927 goto out;
928 } else if (rc == -EIO) {
929 /* reset failed, there is nothing more we can do */
873 return rc; 930 return rc;
931 }
932
933 /* keep the original port state */
934 if (before_fc_flag & FC_OFFLINE_MODE)
935 goto out;
874 936
875 init_completion(&online_compl); 937 init_completion(&online_compl);
876 rc = lpfc_workq_post_event(phba, &status, &online_compl, 938 job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
877 LPFC_EVT_ONLINE); 939 LPFC_EVT_ONLINE);
878 if (rc == 0) 940 if (!job_posted)
879 return -ENOMEM; 941 goto out;
880 942
881 wait_for_completion(&online_compl); 943 wait_for_completion(&online_compl);
882 944
883 if (status != 0) 945out:
884 return -EIO; 946 /* in any case, restore the virtual functions enabled as before */
947 if (sriov_nr_virtfn) {
948 sriov_err =
949 lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn);
950 if (!sriov_err)
951 phba->cfg_sriov_nr_virtfn = sriov_nr_virtfn;
952 }
885 953
886 return 0; 954 /* return proper error code */
955 if (!rc) {
956 if (!job_posted)
957 rc = -ENOMEM;
958 else if (status)
959 rc = -EIO;
960 }
961 return rc;
887} 962}
888 963
889/** 964/**
@@ -955,33 +1030,38 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
955 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1030 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
956 struct lpfc_hba *phba = vport->phba; 1031 struct lpfc_hba *phba = vport->phba;
957 struct completion online_compl; 1032 struct completion online_compl;
958 int status=0; 1033 char *board_mode_str = NULL;
1034 int status = 0;
959 int rc; 1035 int rc;
960 1036
961 if (!phba->cfg_enable_hba_reset) 1037 if (!phba->cfg_enable_hba_reset) {
962 return -EACCES; 1038 status = -EACCES;
1039 goto board_mode_out;
1040 }
963 1041
964 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 1042 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
965 "3050 lpfc_board_mode set to %s\n", buf); 1043 "3050 lpfc_board_mode set to %s\n", buf);
966 1044
967 init_completion(&online_compl); 1045 init_completion(&online_compl);
968 1046
969 if(strncmp(buf, "online", sizeof("online") - 1) == 0) { 1047 if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
970 rc = lpfc_workq_post_event(phba, &status, &online_compl, 1048 rc = lpfc_workq_post_event(phba, &status, &online_compl,
971 LPFC_EVT_ONLINE); 1049 LPFC_EVT_ONLINE);
972 if (rc == 0) 1050 if (rc == 0) {
973 return -ENOMEM; 1051 status = -ENOMEM;
1052 goto board_mode_out;
1053 }
974 wait_for_completion(&online_compl); 1054 wait_for_completion(&online_compl);
975 } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0) 1055 } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
976 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); 1056 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
977 else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0) 1057 else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0)
978 if (phba->sli_rev == LPFC_SLI_REV4) 1058 if (phba->sli_rev == LPFC_SLI_REV4)
979 return -EINVAL; 1059 status = -EINVAL;
980 else 1060 else
981 status = lpfc_do_offline(phba, LPFC_EVT_WARM_START); 1061 status = lpfc_do_offline(phba, LPFC_EVT_WARM_START);
982 else if (strncmp(buf, "error", sizeof("error") - 1) == 0) 1062 else if (strncmp(buf, "error", sizeof("error") - 1) == 0)
983 if (phba->sli_rev == LPFC_SLI_REV4) 1063 if (phba->sli_rev == LPFC_SLI_REV4)
984 return -EINVAL; 1064 status = -EINVAL;
985 else 1065 else
986 status = lpfc_do_offline(phba, LPFC_EVT_KILL); 1066 status = lpfc_do_offline(phba, LPFC_EVT_KILL);
987 else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0) 1067 else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0)
@@ -991,12 +1071,21 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
991 else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0) 1071 else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
992 status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET); 1072 status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
993 else 1073 else
994 return -EINVAL; 1074 status = -EINVAL;
995 1075
1076board_mode_out:
996 if (!status) 1077 if (!status)
997 return strlen(buf); 1078 return strlen(buf);
998 else 1079 else {
1080 board_mode_str = strchr(buf, '\n');
1081 if (board_mode_str)
1082 *board_mode_str = '\0';
1083 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1084 "3097 Failed \"%s\", status(%d), "
1085 "fc_flag(x%x)\n",
1086 buf, status, phba->pport->fc_flag);
999 return status; 1087 return status;
1088 }
1000} 1089}
1001 1090
1002/** 1091/**
@@ -1942,6 +2031,7 @@ static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL);
1942static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL); 2031static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
1943static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO, 2032static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO,
1944 lpfc_sriov_hw_max_virtfn_show, NULL); 2033 lpfc_sriov_hw_max_virtfn_show, NULL);
2034static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
1945 2035
1946static char *lpfc_soft_wwn_key = "C99G71SL8032A"; 2036static char *lpfc_soft_wwn_key = "C99G71SL8032A";
1947 2037
@@ -2687,6 +2777,14 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
2687 if (val >= 0 && val <= 6) { 2777 if (val >= 0 && val <= 6) {
2688 prev_val = phba->cfg_topology; 2778 prev_val = phba->cfg_topology;
2689 phba->cfg_topology = val; 2779 phba->cfg_topology = val;
2780 if (phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G &&
2781 val == 4) {
2782 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
2783 "3113 Loop mode not supported at speed %d\n",
2784 phba->cfg_link_speed);
2785 phba->cfg_topology = prev_val;
2786 return -EINVAL;
2787 }
2690 if (nolip) 2788 if (nolip)
2691 return strlen(buf); 2789 return strlen(buf);
2692 2790
@@ -3132,6 +3230,14 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
3132 val); 3230 val);
3133 return -EINVAL; 3231 return -EINVAL;
3134 } 3232 }
3233 if (val == LPFC_USER_LINK_SPEED_16G &&
3234 phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3235 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3236 "3112 lpfc_link_speed attribute cannot be set "
3237 "to %d. Speed is not supported in loop mode.\n",
3238 val);
3239 return -EINVAL;
3240 }
3135 if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) && 3241 if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) &&
3136 (LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) { 3242 (LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) {
3137 prev_val = phba->cfg_link_speed; 3243 prev_val = phba->cfg_link_speed;
@@ -3176,6 +3282,13 @@ lpfc_param_show(link_speed)
3176static int 3282static int
3177lpfc_link_speed_init(struct lpfc_hba *phba, int val) 3283lpfc_link_speed_init(struct lpfc_hba *phba, int val)
3178{ 3284{
3285 if (val == LPFC_USER_LINK_SPEED_16G && phba->cfg_topology == 4) {
3286 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3287 "3111 lpfc_link_speed of %d cannot "
3288 "support loop mode, setting topology to default.\n",
3289 val);
3290 phba->cfg_topology = 0;
3291 }
3179 if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) && 3292 if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) &&
3180 (LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) { 3293 (LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) {
3181 phba->cfg_link_speed = val; 3294 phba->cfg_link_speed = val;
@@ -3830,6 +3943,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
3830 &dev_attr_lpfc_fips_rev, 3943 &dev_attr_lpfc_fips_rev,
3831 &dev_attr_lpfc_dss, 3944 &dev_attr_lpfc_dss,
3832 &dev_attr_lpfc_sriov_hw_max_virtfn, 3945 &dev_attr_lpfc_sriov_hw_max_virtfn,
3946 &dev_attr_protocol,
3833 NULL, 3947 NULL,
3834}; 3948};
3835 3949
@@ -3988,23 +4102,6 @@ static struct bin_attribute sysfs_ctlreg_attr = {
3988}; 4102};
3989 4103
3990/** 4104/**
3991 * sysfs_mbox_idle - frees the sysfs mailbox
3992 * @phba: lpfc_hba pointer
3993 **/
3994static void
3995sysfs_mbox_idle(struct lpfc_hba *phba)
3996{
3997 phba->sysfs_mbox.state = SMBOX_IDLE;
3998 phba->sysfs_mbox.offset = 0;
3999
4000 if (phba->sysfs_mbox.mbox) {
4001 mempool_free(phba->sysfs_mbox.mbox,
4002 phba->mbox_mem_pool);
4003 phba->sysfs_mbox.mbox = NULL;
4004 }
4005}
4006
4007/**
4008 * sysfs_mbox_write - Write method for writing information via mbox 4105 * sysfs_mbox_write - Write method for writing information via mbox
4009 * @filp: open sysfs file 4106 * @filp: open sysfs file
4010 * @kobj: kernel kobject that contains the kernel class device. 4107 * @kobj: kernel kobject that contains the kernel class device.
@@ -4014,71 +4111,18 @@ sysfs_mbox_idle(struct lpfc_hba *phba)
4014 * @count: bytes to transfer. 4111 * @count: bytes to transfer.
4015 * 4112 *
4016 * Description: 4113 * Description:
4017 * Accessed via /sys/class/scsi_host/hostxxx/mbox. 4114 * Deprecated function. All mailbox access from user space is performed via the
4018 * Uses the sysfs mbox to send buf contents to the adapter. 4115 * bsg interface.
4019 * 4116 *
4020 * Returns: 4117 * Returns:
4021 * -ERANGE off and count combo out of range 4118 * -EPERM operation not permitted
4022 * -EINVAL off, count or buff address invalid
4023 * zero if count is zero
4024 * -EPERM adapter is offline
4025 * -ENOMEM failed to allocate memory for the mail box
4026 * -EAGAIN offset, state or mbox is NULL
4027 * count number of bytes transferred
4028 **/ 4119 **/
4029static ssize_t 4120static ssize_t
4030sysfs_mbox_write(struct file *filp, struct kobject *kobj, 4121sysfs_mbox_write(struct file *filp, struct kobject *kobj,
4031 struct bin_attribute *bin_attr, 4122 struct bin_attribute *bin_attr,
4032 char *buf, loff_t off, size_t count) 4123 char *buf, loff_t off, size_t count)
4033{ 4124{
4034 struct device *dev = container_of(kobj, struct device, kobj); 4125 return -EPERM;
4035 struct Scsi_Host *shost = class_to_shost(dev);
4036 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4037 struct lpfc_hba *phba = vport->phba;
4038 struct lpfcMboxq *mbox = NULL;
4039
4040 if ((count + off) > MAILBOX_CMD_SIZE)
4041 return -ERANGE;
4042
4043 if (off % 4 || count % 4 || (unsigned long)buf % 4)
4044 return -EINVAL;
4045
4046 if (count == 0)
4047 return 0;
4048
4049 if (off == 0) {
4050 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4051 if (!mbox)
4052 return -ENOMEM;
4053 memset(mbox, 0, sizeof (LPFC_MBOXQ_t));
4054 }
4055
4056 spin_lock_irq(&phba->hbalock);
4057
4058 if (off == 0) {
4059 if (phba->sysfs_mbox.mbox)
4060 mempool_free(mbox, phba->mbox_mem_pool);
4061 else
4062 phba->sysfs_mbox.mbox = mbox;
4063 phba->sysfs_mbox.state = SMBOX_WRITING;
4064 } else {
4065 if (phba->sysfs_mbox.state != SMBOX_WRITING ||
4066 phba->sysfs_mbox.offset != off ||
4067 phba->sysfs_mbox.mbox == NULL) {
4068 sysfs_mbox_idle(phba);
4069 spin_unlock_irq(&phba->hbalock);
4070 return -EAGAIN;
4071 }
4072 }
4073
4074 memcpy((uint8_t *) &phba->sysfs_mbox.mbox->u.mb + off,
4075 buf, count);
4076
4077 phba->sysfs_mbox.offset = off + count;
4078
4079 spin_unlock_irq(&phba->hbalock);
4080
4081 return count;
4082} 4126}
4083 4127
4084/** 4128/**
@@ -4091,201 +4135,18 @@ sysfs_mbox_write(struct file *filp, struct kobject *kobj,
4091 * @count: bytes to transfer. 4135 * @count: bytes to transfer.
4092 * 4136 *
4093 * Description: 4137 * Description:
4094 * Accessed via /sys/class/scsi_host/hostxxx/mbox. 4138 * Deprecated function. All mailbox access from user space is performed via the
4095 * Uses the sysfs mbox to receive data from to the adapter. 4139 * bsg interface.
4096 * 4140 *
4097 * Returns: 4141 * Returns:
4098 * -ERANGE off greater than mailbox command size 4142 * -EPERM operation not permitted
4099 * -EINVAL off, count or buff address invalid
4100 * zero if off and count are zero
4101 * -EACCES adapter over temp
4102 * -EPERM garbage can value to catch a multitude of errors
4103 * -EAGAIN management IO not permitted, state or off error
4104 * -ETIME mailbox timeout
4105 * -ENODEV mailbox error
4106 * count number of bytes transferred
4107 **/ 4143 **/
4108static ssize_t 4144static ssize_t
4109sysfs_mbox_read(struct file *filp, struct kobject *kobj, 4145sysfs_mbox_read(struct file *filp, struct kobject *kobj,
4110 struct bin_attribute *bin_attr, 4146 struct bin_attribute *bin_attr,
4111 char *buf, loff_t off, size_t count) 4147 char *buf, loff_t off, size_t count)
4112{ 4148{
4113 struct device *dev = container_of(kobj, struct device, kobj); 4149 return -EPERM;
4114 struct Scsi_Host *shost = class_to_shost(dev);
4115 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4116 struct lpfc_hba *phba = vport->phba;
4117 LPFC_MBOXQ_t *mboxq;
4118 MAILBOX_t *pmb;
4119 uint32_t mbox_tmo;
4120 int rc;
4121
4122 if (off > MAILBOX_CMD_SIZE)
4123 return -ERANGE;
4124
4125 if ((count + off) > MAILBOX_CMD_SIZE)
4126 count = MAILBOX_CMD_SIZE - off;
4127
4128 if (off % 4 || count % 4 || (unsigned long)buf % 4)
4129 return -EINVAL;
4130
4131 if (off && count == 0)
4132 return 0;
4133
4134 spin_lock_irq(&phba->hbalock);
4135
4136 if (phba->over_temp_state == HBA_OVER_TEMP) {
4137 sysfs_mbox_idle(phba);
4138 spin_unlock_irq(&phba->hbalock);
4139 return -EACCES;
4140 }
4141
4142 if (off == 0 &&
4143 phba->sysfs_mbox.state == SMBOX_WRITING &&
4144 phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) {
4145 mboxq = (LPFC_MBOXQ_t *)&phba->sysfs_mbox.mbox;
4146 pmb = &mboxq->u.mb;
4147 switch (pmb->mbxCommand) {
4148 /* Offline only */
4149 case MBX_INIT_LINK:
4150 case MBX_DOWN_LINK:
4151 case MBX_CONFIG_LINK:
4152 case MBX_CONFIG_RING:
4153 case MBX_RESET_RING:
4154 case MBX_UNREG_LOGIN:
4155 case MBX_CLEAR_LA:
4156 case MBX_DUMP_CONTEXT:
4157 case MBX_RUN_DIAGS:
4158 case MBX_RESTART:
4159 case MBX_SET_MASK:
4160 case MBX_SET_DEBUG:
4161 if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
4162 printk(KERN_WARNING "mbox_read:Command 0x%x "
4163 "is illegal in on-line state\n",
4164 pmb->mbxCommand);
4165 sysfs_mbox_idle(phba);
4166 spin_unlock_irq(&phba->hbalock);
4167 return -EPERM;
4168 }
4169 case MBX_WRITE_NV:
4170 case MBX_WRITE_VPARMS:
4171 case MBX_LOAD_SM:
4172 case MBX_READ_NV:
4173 case MBX_READ_CONFIG:
4174 case MBX_READ_RCONFIG:
4175 case MBX_READ_STATUS:
4176 case MBX_READ_XRI:
4177 case MBX_READ_REV:
4178 case MBX_READ_LNK_STAT:
4179 case MBX_DUMP_MEMORY:
4180 case MBX_DOWN_LOAD:
4181 case MBX_UPDATE_CFG:
4182 case MBX_KILL_BOARD:
4183 case MBX_LOAD_AREA:
4184 case MBX_LOAD_EXP_ROM:
4185 case MBX_BEACON:
4186 case MBX_DEL_LD_ENTRY:
4187 case MBX_SET_VARIABLE:
4188 case MBX_WRITE_WWN:
4189 case MBX_PORT_CAPABILITIES:
4190 case MBX_PORT_IOV_CONTROL:
4191 break;
4192 case MBX_SECURITY_MGMT:
4193 case MBX_AUTH_PORT:
4194 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
4195 printk(KERN_WARNING "mbox_read:Command 0x%x "
4196 "is not permitted\n", pmb->mbxCommand);
4197 sysfs_mbox_idle(phba);
4198 spin_unlock_irq(&phba->hbalock);
4199 return -EPERM;
4200 }
4201 break;
4202 case MBX_READ_SPARM64:
4203 case MBX_READ_TOPOLOGY:
4204 case MBX_REG_LOGIN:
4205 case MBX_REG_LOGIN64:
4206 case MBX_CONFIG_PORT:
4207 case MBX_RUN_BIU_DIAG:
4208 printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n",
4209 pmb->mbxCommand);
4210 sysfs_mbox_idle(phba);
4211 spin_unlock_irq(&phba->hbalock);
4212 return -EPERM;
4213 default:
4214 printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n",
4215 pmb->mbxCommand);
4216 sysfs_mbox_idle(phba);
4217 spin_unlock_irq(&phba->hbalock);
4218 return -EPERM;
4219 }
4220
4221 /* If HBA encountered an error attention, allow only DUMP
4222 * or RESTART mailbox commands until the HBA is restarted.
4223 */
4224 if (phba->pport->stopped &&
4225 pmb->mbxCommand != MBX_DUMP_MEMORY &&
4226 pmb->mbxCommand != MBX_RESTART &&
4227 pmb->mbxCommand != MBX_WRITE_VPARMS &&
4228 pmb->mbxCommand != MBX_WRITE_WWN)
4229 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
4230 "1259 mbox: Issued mailbox cmd "
4231 "0x%x while in stopped state.\n",
4232 pmb->mbxCommand);
4233
4234 phba->sysfs_mbox.mbox->vport = vport;
4235
4236 /* Don't allow mailbox commands to be sent when blocked
4237 * or when in the middle of discovery
4238 */
4239 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
4240 sysfs_mbox_idle(phba);
4241 spin_unlock_irq(&phba->hbalock);
4242 return -EAGAIN;
4243 }
4244
4245 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
4246 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
4247
4248 spin_unlock_irq(&phba->hbalock);
4249 rc = lpfc_sli_issue_mbox (phba,
4250 phba->sysfs_mbox.mbox,
4251 MBX_POLL);
4252 spin_lock_irq(&phba->hbalock);
4253
4254 } else {
4255 spin_unlock_irq(&phba->hbalock);
4256 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
4257 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
4258 spin_lock_irq(&phba->hbalock);
4259 }
4260
4261 if (rc != MBX_SUCCESS) {
4262 if (rc == MBX_TIMEOUT) {
4263 phba->sysfs_mbox.mbox = NULL;
4264 }
4265 sysfs_mbox_idle(phba);
4266 spin_unlock_irq(&phba->hbalock);
4267 return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
4268 }
4269 phba->sysfs_mbox.state = SMBOX_READING;
4270 }
4271 else if (phba->sysfs_mbox.offset != off ||
4272 phba->sysfs_mbox.state != SMBOX_READING) {
4273 printk(KERN_WARNING "mbox_read: Bad State\n");
4274 sysfs_mbox_idle(phba);
4275 spin_unlock_irq(&phba->hbalock);
4276 return -EAGAIN;
4277 }
4278
4279 memcpy(buf, (uint8_t *) &pmb + off, count);
4280
4281 phba->sysfs_mbox.offset = off + count;
4282
4283 if (phba->sysfs_mbox.offset == MAILBOX_CMD_SIZE)
4284 sysfs_mbox_idle(phba);
4285
4286 spin_unlock_irq(&phba->hbalock);
4287
4288 return count;
4289} 4150}
4290 4151
4291static struct bin_attribute sysfs_mbox_attr = { 4152static struct bin_attribute sysfs_mbox_attr = {
@@ -4429,8 +4290,13 @@ lpfc_get_host_port_state(struct Scsi_Host *shost)
4429 case LPFC_LINK_UP: 4290 case LPFC_LINK_UP:
4430 case LPFC_CLEAR_LA: 4291 case LPFC_CLEAR_LA:
4431 case LPFC_HBA_READY: 4292 case LPFC_HBA_READY:
4432 /* Links up, beyond this port_type reports state */ 4293 /* Links up, reports port state accordingly */
4433 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; 4294 if (vport->port_state < LPFC_VPORT_READY)
4295 fc_host_port_state(shost) =
4296 FC_PORTSTATE_BYPASSED;
4297 else
4298 fc_host_port_state(shost) =
4299 FC_PORTSTATE_ONLINE;
4434 break; 4300 break;
4435 case LPFC_HBA_ERROR: 4301 case LPFC_HBA_ERROR:
4436 fc_host_port_state(shost) = FC_PORTSTATE_ERROR; 4302 fc_host_port_state(shost) = FC_PORTSTATE_ERROR;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 6760c69f5253..56a86baece5b 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -916,9 +916,11 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
916 } else { 916 } else {
917 switch (cmd) { 917 switch (cmd) {
918 case ELX_LOOPBACK_DATA: 918 case ELX_LOOPBACK_DATA:
919 diag_cmd_data_free(phba, 919 if (phba->sli_rev <
920 (struct lpfc_dmabufext *) 920 LPFC_SLI_REV4)
921 dmabuf); 921 diag_cmd_data_free(phba,
922 (struct lpfc_dmabufext
923 *)dmabuf);
922 break; 924 break;
923 case ELX_LOOPBACK_XRI_SETUP: 925 case ELX_LOOPBACK_XRI_SETUP:
924 if ((phba->sli_rev == 926 if ((phba->sli_rev ==
@@ -1000,7 +1002,8 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1000error_ct_unsol_exit: 1002error_ct_unsol_exit:
1001 if (!list_empty(&head)) 1003 if (!list_empty(&head))
1002 list_del(&head); 1004 list_del(&head);
1003 if (evt_req_id == SLI_CT_ELX_LOOPBACK) 1005 if ((phba->sli_rev < LPFC_SLI_REV4) &&
1006 (evt_req_id == SLI_CT_ELX_LOOPBACK))
1004 return 0; 1007 return 0;
1005 return 1; 1008 return 1;
1006} 1009}
@@ -1566,7 +1569,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1566 struct diag_mode_set *loopback_mode; 1569 struct diag_mode_set *loopback_mode;
1567 uint32_t link_flags; 1570 uint32_t link_flags;
1568 uint32_t timeout; 1571 uint32_t timeout;
1569 LPFC_MBOXQ_t *pmboxq; 1572 LPFC_MBOXQ_t *pmboxq = NULL;
1570 int mbxstatus = MBX_SUCCESS; 1573 int mbxstatus = MBX_SUCCESS;
1571 int i = 0; 1574 int i = 0;
1572 int rc = 0; 1575 int rc = 0;
@@ -1615,7 +1618,6 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1615 rc = -ETIMEDOUT; 1618 rc = -ETIMEDOUT;
1616 goto loopback_mode_exit; 1619 goto loopback_mode_exit;
1617 } 1620 }
1618
1619 msleep(10); 1621 msleep(10);
1620 } 1622 }
1621 1623
@@ -1635,7 +1637,9 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1635 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) 1637 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1636 rc = -ENODEV; 1638 rc = -ENODEV;
1637 else { 1639 else {
1640 spin_lock_irq(&phba->hbalock);
1638 phba->link_flag |= LS_LOOPBACK_MODE; 1641 phba->link_flag |= LS_LOOPBACK_MODE;
1642 spin_unlock_irq(&phba->hbalock);
1639 /* wait for the link attention interrupt */ 1643 /* wait for the link attention interrupt */
1640 msleep(100); 1644 msleep(100);
1641 1645
@@ -1659,7 +1663,7 @@ loopback_mode_exit:
1659 /* 1663 /*
1660 * Let SLI layer release mboxq if mbox command completed after timeout. 1664 * Let SLI layer release mboxq if mbox command completed after timeout.
1661 */ 1665 */
1662 if (mbxstatus != MBX_TIMEOUT) 1666 if (pmboxq && mbxstatus != MBX_TIMEOUT)
1663 mempool_free(pmboxq, phba->mbox_mem_pool); 1667 mempool_free(pmboxq, phba->mbox_mem_pool);
1664 1668
1665job_error: 1669job_error:
@@ -1700,11 +1704,16 @@ lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
1700 rc = -ENOMEM; 1704 rc = -ENOMEM;
1701 goto link_diag_state_set_out; 1705 goto link_diag_state_set_out;
1702 } 1706 }
1707 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1708 "3128 Set link to diagnostic state:x%x (x%x/x%x)\n",
1709 diag, phba->sli4_hba.lnk_info.lnk_tp,
1710 phba->sli4_hba.lnk_info.lnk_no);
1711
1703 link_diag_state = &pmboxq->u.mqe.un.link_diag_state; 1712 link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
1704 bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req, 1713 bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
1705 phba->sli4_hba.link_state.number); 1714 phba->sli4_hba.lnk_info.lnk_no);
1706 bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req, 1715 bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
1707 phba->sli4_hba.link_state.type); 1716 phba->sli4_hba.lnk_info.lnk_tp);
1708 if (diag) 1717 if (diag)
1709 bf_set(lpfc_mbx_set_diag_state_diag, 1718 bf_set(lpfc_mbx_set_diag_state_diag,
1710 &link_diag_state->u.req, 1); 1719 &link_diag_state->u.req, 1);
@@ -1727,6 +1736,79 @@ link_diag_state_set_out:
1727} 1736}
1728 1737
1729/** 1738/**
1739 * lpfc_sli4_bsg_set_internal_loopback - set sli4 internal loopback diagnostic
1740 * @phba: Pointer to HBA context object.
1741 *
1742 * This function is responsible for issuing a sli4 mailbox command for setting
1743 * up internal loopback diagnostic.
1744 */
1745static int
1746lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba)
1747{
1748 LPFC_MBOXQ_t *pmboxq;
1749 uint32_t req_len, alloc_len;
1750 struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
1751 int mbxstatus = MBX_SUCCESS, rc = 0;
1752
1753 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1754 if (!pmboxq)
1755 return -ENOMEM;
1756 req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
1757 sizeof(struct lpfc_sli4_cfg_mhdr));
1758 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1759 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
1760 req_len, LPFC_SLI4_MBX_EMBED);
1761 if (alloc_len != req_len) {
1762 mempool_free(pmboxq, phba->mbox_mem_pool);
1763 return -ENOMEM;
1764 }
1765 link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
1766 bf_set(lpfc_mbx_set_diag_state_link_num,
1767 &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_no);
1768 bf_set(lpfc_mbx_set_diag_state_link_type,
1769 &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp);
1770 bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
1771 LPFC_DIAG_LOOPBACK_TYPE_SERDES);
1772
1773 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1774 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
1775 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1776 "3127 Failed setup loopback mode mailbox "
1777 "command, rc:x%x, status:x%x\n", mbxstatus,
1778 pmboxq->u.mb.mbxStatus);
1779 rc = -ENODEV;
1780 }
1781 if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1782 mempool_free(pmboxq, phba->mbox_mem_pool);
1783 return rc;
1784}
1785
1786/**
1787 * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic
1788 * @phba: Pointer to HBA context object.
1789 *
1790 * This function set up SLI4 FC port registrations for diagnostic run, which
1791 * includes all the rpis, vfi, and also vpi.
1792 */
1793static int
1794lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
1795{
1796 int rc;
1797
1798 if (phba->pport->fc_flag & FC_VFI_REGISTERED) {
1799 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1800 "3136 Port still had vfi registered: "
1801 "mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n",
1802 phba->pport->fc_myDID, phba->fcf.fcfi,
1803 phba->sli4_hba.vfi_ids[phba->pport->vfi],
1804 phba->vpi_ids[phba->pport->vpi]);
1805 return -EINVAL;
1806 }
1807 rc = lpfc_issue_reg_vfi(phba->pport);
1808 return rc;
1809}
1810
1811/**
1730 * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command 1812 * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
1731 * @phba: Pointer to HBA context object. 1813 * @phba: Pointer to HBA context object.
1732 * @job: LPFC_BSG_VENDOR_DIAG_MODE 1814 * @job: LPFC_BSG_VENDOR_DIAG_MODE
@@ -1738,10 +1820,8 @@ static int
1738lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job) 1820lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1739{ 1821{
1740 struct diag_mode_set *loopback_mode; 1822 struct diag_mode_set *loopback_mode;
1741 uint32_t link_flags, timeout, req_len, alloc_len; 1823 uint32_t link_flags, timeout;
1742 struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback; 1824 int i, rc = 0;
1743 LPFC_MBOXQ_t *pmboxq = NULL;
1744 int mbxstatus = MBX_SUCCESS, i, rc = 0;
1745 1825
1746 /* no data to return just the return code */ 1826 /* no data to return just the return code */
1747 job->reply->reply_payload_rcv_len = 0; 1827 job->reply->reply_payload_rcv_len = 0;
@@ -1762,65 +1842,100 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1762 if (rc) 1842 if (rc)
1763 goto job_error; 1843 goto job_error;
1764 1844
1845 /* indicate we are in loobpack diagnostic mode */
1846 spin_lock_irq(&phba->hbalock);
1847 phba->link_flag |= LS_LOOPBACK_MODE;
1848 spin_unlock_irq(&phba->hbalock);
1849
1850 /* reset port to start frome scratch */
1851 rc = lpfc_selective_reset(phba);
1852 if (rc)
1853 goto job_error;
1854
1765 /* bring the link to diagnostic mode */ 1855 /* bring the link to diagnostic mode */
1856 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1857 "3129 Bring link to diagnostic state.\n");
1766 loopback_mode = (struct diag_mode_set *) 1858 loopback_mode = (struct diag_mode_set *)
1767 job->request->rqst_data.h_vendor.vendor_cmd; 1859 job->request->rqst_data.h_vendor.vendor_cmd;
1768 link_flags = loopback_mode->type; 1860 link_flags = loopback_mode->type;
1769 timeout = loopback_mode->timeout * 100; 1861 timeout = loopback_mode->timeout * 100;
1770 1862
1771 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1); 1863 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
1772 if (rc) 1864 if (rc) {
1865 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1866 "3130 Failed to bring link to diagnostic "
1867 "state, rc:x%x\n", rc);
1773 goto loopback_mode_exit; 1868 goto loopback_mode_exit;
1869 }
1774 1870
1775 /* wait for link down before proceeding */ 1871 /* wait for link down before proceeding */
1776 i = 0; 1872 i = 0;
1777 while (phba->link_state != LPFC_LINK_DOWN) { 1873 while (phba->link_state != LPFC_LINK_DOWN) {
1778 if (i++ > timeout) { 1874 if (i++ > timeout) {
1779 rc = -ETIMEDOUT; 1875 rc = -ETIMEDOUT;
1876 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1877 "3131 Timeout waiting for link to "
1878 "diagnostic mode, timeout:%d ms\n",
1879 timeout * 10);
1780 goto loopback_mode_exit; 1880 goto loopback_mode_exit;
1781 } 1881 }
1782 msleep(10); 1882 msleep(10);
1783 } 1883 }
1884
1784 /* set up loopback mode */ 1885 /* set up loopback mode */
1785 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1886 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1786 if (!pmboxq) { 1887 "3132 Set up loopback mode:x%x\n", link_flags);
1787 rc = -ENOMEM; 1888
1788 goto loopback_mode_exit; 1889 if (link_flags == INTERNAL_LOOP_BACK)
1789 } 1890 rc = lpfc_sli4_bsg_set_internal_loopback(phba);
1790 req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) - 1891 else if (link_flags == EXTERNAL_LOOP_BACK)
1791 sizeof(struct lpfc_sli4_cfg_mhdr)); 1892 rc = lpfc_hba_init_link_fc_topology(phba,
1792 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 1893 FLAGS_TOPOLOGY_MODE_PT_PT,
1793 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK, 1894 MBX_NOWAIT);
1794 req_len, LPFC_SLI4_MBX_EMBED); 1895 else {
1795 if (alloc_len != req_len) { 1896 rc = -EINVAL;
1796 rc = -ENOMEM; 1897 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
1898 "3141 Loopback mode:x%x not supported\n",
1899 link_flags);
1797 goto loopback_mode_exit; 1900 goto loopback_mode_exit;
1798 } 1901 }
1799 link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
1800 bf_set(lpfc_mbx_set_diag_state_link_num,
1801 &link_diag_loopback->u.req, phba->sli4_hba.link_state.number);
1802 bf_set(lpfc_mbx_set_diag_state_link_type,
1803 &link_diag_loopback->u.req, phba->sli4_hba.link_state.type);
1804 if (link_flags == INTERNAL_LOOP_BACK)
1805 bf_set(lpfc_mbx_set_diag_lpbk_type,
1806 &link_diag_loopback->u.req,
1807 LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
1808 else
1809 bf_set(lpfc_mbx_set_diag_lpbk_type,
1810 &link_diag_loopback->u.req,
1811 LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL);
1812 1902
1813 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); 1903 if (!rc) {
1814 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1815 rc = -ENODEV;
1816 else {
1817 phba->link_flag |= LS_LOOPBACK_MODE;
1818 /* wait for the link attention interrupt */ 1904 /* wait for the link attention interrupt */
1819 msleep(100); 1905 msleep(100);
1820 i = 0; 1906 i = 0;
1907 while (phba->link_state < LPFC_LINK_UP) {
1908 if (i++ > timeout) {
1909 rc = -ETIMEDOUT;
1910 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1911 "3137 Timeout waiting for link up "
1912 "in loopback mode, timeout:%d ms\n",
1913 timeout * 10);
1914 break;
1915 }
1916 msleep(10);
1917 }
1918 }
1919
1920 /* port resource registration setup for loopback diagnostic */
1921 if (!rc) {
1922 /* set up a none zero myDID for loopback test */
1923 phba->pport->fc_myDID = 1;
1924 rc = lpfc_sli4_diag_fcport_reg_setup(phba);
1925 } else
1926 goto loopback_mode_exit;
1927
1928 if (!rc) {
1929 /* wait for the port ready */
1930 msleep(100);
1931 i = 0;
1821 while (phba->link_state != LPFC_HBA_READY) { 1932 while (phba->link_state != LPFC_HBA_READY) {
1822 if (i++ > timeout) { 1933 if (i++ > timeout) {
1823 rc = -ETIMEDOUT; 1934 rc = -ETIMEDOUT;
1935 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1936 "3133 Timeout waiting for port "
1937 "loopback mode ready, timeout:%d ms\n",
1938 timeout * 10);
1824 break; 1939 break;
1825 } 1940 }
1826 msleep(10); 1941 msleep(10);
@@ -1828,14 +1943,14 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1828 } 1943 }
1829 1944
1830loopback_mode_exit: 1945loopback_mode_exit:
1946 /* clear loopback diagnostic mode */
1947 if (rc) {
1948 spin_lock_irq(&phba->hbalock);
1949 phba->link_flag &= ~LS_LOOPBACK_MODE;
1950 spin_unlock_irq(&phba->hbalock);
1951 }
1831 lpfc_bsg_diag_mode_exit(phba); 1952 lpfc_bsg_diag_mode_exit(phba);
1832 1953
1833 /*
1834 * Let SLI layer release mboxq if mbox command completed after timeout.
1835 */
1836 if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1837 mempool_free(pmboxq, phba->mbox_mem_pool);
1838
1839job_error: 1954job_error:
1840 /* make error code available to userspace */ 1955 /* make error code available to userspace */
1841 job->reply->result = rc; 1956 job->reply->result = rc;
@@ -1879,7 +1994,6 @@ lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
1879 rc = -ENODEV; 1994 rc = -ENODEV;
1880 1995
1881 return rc; 1996 return rc;
1882
1883} 1997}
1884 1998
1885/** 1999/**
@@ -1895,7 +2009,9 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
1895 struct Scsi_Host *shost; 2009 struct Scsi_Host *shost;
1896 struct lpfc_vport *vport; 2010 struct lpfc_vport *vport;
1897 struct lpfc_hba *phba; 2011 struct lpfc_hba *phba;
1898 int rc; 2012 struct diag_mode_set *loopback_mode_end_cmd;
2013 uint32_t timeout;
2014 int rc, i;
1899 2015
1900 shost = job->shost; 2016 shost = job->shost;
1901 if (!shost) 2017 if (!shost)
@@ -1913,11 +2029,47 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
1913 LPFC_SLI_INTF_IF_TYPE_2) 2029 LPFC_SLI_INTF_IF_TYPE_2)
1914 return -ENODEV; 2030 return -ENODEV;
1915 2031
2032 /* clear loopback diagnostic mode */
2033 spin_lock_irq(&phba->hbalock);
2034 phba->link_flag &= ~LS_LOOPBACK_MODE;
2035 spin_unlock_irq(&phba->hbalock);
2036 loopback_mode_end_cmd = (struct diag_mode_set *)
2037 job->request->rqst_data.h_vendor.vendor_cmd;
2038 timeout = loopback_mode_end_cmd->timeout * 100;
2039
1916 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0); 2040 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2041 if (rc) {
2042 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2043 "3139 Failed to bring link to diagnostic "
2044 "state, rc:x%x\n", rc);
2045 goto loopback_mode_end_exit;
2046 }
1917 2047
1918 if (!rc) 2048 /* wait for link down before proceeding */
1919 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 2049 i = 0;
2050 while (phba->link_state != LPFC_LINK_DOWN) {
2051 if (i++ > timeout) {
2052 rc = -ETIMEDOUT;
2053 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2054 "3140 Timeout waiting for link to "
2055 "diagnostic mode_end, timeout:%d ms\n",
2056 timeout * 10);
2057 /* there is nothing much we can do here */
2058 break;
2059 }
2060 msleep(10);
2061 }
2062
2063 /* reset port resource registrations */
2064 rc = lpfc_selective_reset(phba);
2065 phba->pport->fc_myDID = 0;
1920 2066
2067loopback_mode_end_exit:
2068 /* make return code available to userspace */
2069 job->reply->result = rc;
2070 /* complete the job back to userspace if no error */
2071 if (rc == 0)
2072 job->job_done(job);
1921 return rc; 2073 return rc;
1922} 2074}
1923 2075
@@ -2012,9 +2164,9 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
2012 } 2164 }
2013 run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test; 2165 run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
2014 bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req, 2166 bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
2015 phba->sli4_hba.link_state.number); 2167 phba->sli4_hba.lnk_info.lnk_no);
2016 bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req, 2168 bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
2017 phba->sli4_hba.link_state.type); 2169 phba->sli4_hba.lnk_info.lnk_tp);
2018 bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req, 2170 bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
2019 link_diag_test_cmd->test_id); 2171 link_diag_test_cmd->test_id);
2020 bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req, 2172 bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
@@ -2091,10 +2243,18 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
2091 if (!mbox) 2243 if (!mbox)
2092 return -ENOMEM; 2244 return -ENOMEM;
2093 2245
2094 if (phba->sli_rev == LPFC_SLI_REV4) 2246 if (phba->sli_rev < LPFC_SLI_REV4)
2247 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
2248 (uint8_t *)&phba->pport->fc_sparam,
2249 mbox, *rpi);
2250 else {
2095 *rpi = lpfc_sli4_alloc_rpi(phba); 2251 *rpi = lpfc_sli4_alloc_rpi(phba);
2096 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID, 2252 status = lpfc_reg_rpi(phba, phba->pport->vpi,
2097 (uint8_t *)&phba->pport->fc_sparam, mbox, *rpi); 2253 phba->pport->fc_myDID,
2254 (uint8_t *)&phba->pport->fc_sparam,
2255 mbox, *rpi);
2256 }
2257
2098 if (status) { 2258 if (status) {
2099 mempool_free(mbox, phba->mbox_mem_pool); 2259 mempool_free(mbox, phba->mbox_mem_pool);
2100 if (phba->sli_rev == LPFC_SLI_REV4) 2260 if (phba->sli_rev == LPFC_SLI_REV4)
@@ -2117,7 +2277,8 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
2117 return -ENODEV; 2277 return -ENODEV;
2118 } 2278 }
2119 2279
2120 *rpi = mbox->u.mb.un.varWords[0]; 2280 if (phba->sli_rev < LPFC_SLI_REV4)
2281 *rpi = mbox->u.mb.un.varWords[0];
2121 2282
2122 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys); 2283 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2123 kfree(dmabuff); 2284 kfree(dmabuff);
@@ -2142,7 +2303,12 @@ static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
2142 if (mbox == NULL) 2303 if (mbox == NULL)
2143 return -ENOMEM; 2304 return -ENOMEM;
2144 2305
2145 lpfc_unreg_login(phba, 0, rpi, mbox); 2306 if (phba->sli_rev < LPFC_SLI_REV4)
2307 lpfc_unreg_login(phba, 0, rpi, mbox);
2308 else
2309 lpfc_unreg_login(phba, phba->pport->vpi,
2310 phba->sli4_hba.rpi_ids[rpi], mbox);
2311
2146 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 2312 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2147 2313
2148 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { 2314 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
@@ -2630,15 +2796,15 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
2630 uint32_t full_size; 2796 uint32_t full_size;
2631 size_t segment_len = 0, segment_offset = 0, current_offset = 0; 2797 size_t segment_len = 0, segment_offset = 0, current_offset = 0;
2632 uint16_t rpi = 0; 2798 uint16_t rpi = 0;
2633 struct lpfc_iocbq *cmdiocbq, *rspiocbq; 2799 struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL;
2634 IOCB_t *cmd, *rsp; 2800 IOCB_t *cmd, *rsp = NULL;
2635 struct lpfc_sli_ct_request *ctreq; 2801 struct lpfc_sli_ct_request *ctreq;
2636 struct lpfc_dmabuf *txbmp; 2802 struct lpfc_dmabuf *txbmp;
2637 struct ulp_bde64 *txbpl = NULL; 2803 struct ulp_bde64 *txbpl = NULL;
2638 struct lpfc_dmabufext *txbuffer = NULL; 2804 struct lpfc_dmabufext *txbuffer = NULL;
2639 struct list_head head; 2805 struct list_head head;
2640 struct lpfc_dmabuf *curr; 2806 struct lpfc_dmabuf *curr;
2641 uint16_t txxri, rxxri; 2807 uint16_t txxri = 0, rxxri;
2642 uint32_t num_bde; 2808 uint32_t num_bde;
2643 uint8_t *ptr = NULL, *rx_databuf = NULL; 2809 uint8_t *ptr = NULL, *rx_databuf = NULL;
2644 int rc = 0; 2810 int rc = 0;
@@ -2665,7 +2831,6 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
2665 rc = -EINVAL; 2831 rc = -EINVAL;
2666 goto loopback_test_exit; 2832 goto loopback_test_exit;
2667 } 2833 }
2668
2669 diag_mode = (struct diag_mode_test *) 2834 diag_mode = (struct diag_mode_test *)
2670 job->request->rqst_data.h_vendor.vendor_cmd; 2835 job->request->rqst_data.h_vendor.vendor_cmd;
2671 2836
@@ -2720,18 +2885,19 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
2720 if (rc) 2885 if (rc)
2721 goto loopback_test_exit; 2886 goto loopback_test_exit;
2722 2887
2723 rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri); 2888 if (phba->sli_rev < LPFC_SLI_REV4) {
2724 if (rc) { 2889 rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
2725 lpfcdiag_loop_self_unreg(phba, rpi); 2890 if (rc) {
2726 goto loopback_test_exit; 2891 lpfcdiag_loop_self_unreg(phba, rpi);
2727 } 2892 goto loopback_test_exit;
2893 }
2728 2894
2729 rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size); 2895 rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
2730 if (rc) { 2896 if (rc) {
2731 lpfcdiag_loop_self_unreg(phba, rpi); 2897 lpfcdiag_loop_self_unreg(phba, rpi);
2732 goto loopback_test_exit; 2898 goto loopback_test_exit;
2899 }
2733 } 2900 }
2734
2735 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, 2901 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
2736 SLI_CT_ELX_LOOPBACK); 2902 SLI_CT_ELX_LOOPBACK);
2737 if (!evt) { 2903 if (!evt) {
@@ -2746,7 +2912,8 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
2746 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2912 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2747 2913
2748 cmdiocbq = lpfc_sli_get_iocbq(phba); 2914 cmdiocbq = lpfc_sli_get_iocbq(phba);
2749 rspiocbq = lpfc_sli_get_iocbq(phba); 2915 if (phba->sli_rev < LPFC_SLI_REV4)
2916 rspiocbq = lpfc_sli_get_iocbq(phba);
2750 txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2917 txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2751 2918
2752 if (txbmp) { 2919 if (txbmp) {
@@ -2759,14 +2926,18 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
2759 } 2926 }
2760 } 2927 }
2761 2928
2762 if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer || 2929 if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) {
2763 !txbmp->virt) { 2930 rc = -ENOMEM;
2931 goto err_loopback_test_exit;
2932 }
2933 if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) {
2764 rc = -ENOMEM; 2934 rc = -ENOMEM;
2765 goto err_loopback_test_exit; 2935 goto err_loopback_test_exit;
2766 } 2936 }
2767 2937
2768 cmd = &cmdiocbq->iocb; 2938 cmd = &cmdiocbq->iocb;
2769 rsp = &rspiocbq->iocb; 2939 if (phba->sli_rev < LPFC_SLI_REV4)
2940 rsp = &rspiocbq->iocb;
2770 2941
2771 INIT_LIST_HEAD(&head); 2942 INIT_LIST_HEAD(&head);
2772 list_add_tail(&head, &txbuffer->dma.list); 2943 list_add_tail(&head, &txbuffer->dma.list);
@@ -2796,7 +2967,6 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
2796 list_del(&head); 2967 list_del(&head);
2797 2968
2798 /* Build the XMIT_SEQUENCE iocb */ 2969 /* Build the XMIT_SEQUENCE iocb */
2799
2800 num_bde = (uint32_t)txbuffer->flag; 2970 num_bde = (uint32_t)txbuffer->flag;
2801 2971
2802 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys); 2972 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
@@ -2813,16 +2983,27 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
2813 cmd->ulpBdeCount = 1; 2983 cmd->ulpBdeCount = 1;
2814 cmd->ulpLe = 1; 2984 cmd->ulpLe = 1;
2815 cmd->ulpClass = CLASS3; 2985 cmd->ulpClass = CLASS3;
2816 cmd->ulpContext = txxri;
2817 2986
2987 if (phba->sli_rev < LPFC_SLI_REV4) {
2988 cmd->ulpContext = txxri;
2989 } else {
2990 cmd->un.xseq64.bdl.ulpIoTag32 = 0;
2991 cmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[rpi];
2992 cmdiocbq->context3 = txbmp;
2993 cmdiocbq->sli4_xritag = NO_XRI;
2994 cmd->unsli3.rcvsli3.ox_id = 0xffff;
2995 }
2818 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 2996 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
2819 cmdiocbq->vport = phba->pport; 2997 cmdiocbq->vport = phba->pport;
2820
2821 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, 2998 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
2822 rspiocbq, (phba->fc_ratov * 2) + 2999 rspiocbq, (phba->fc_ratov * 2) +
2823 LPFC_DRVR_TIMEOUT); 3000 LPFC_DRVR_TIMEOUT);
2824 3001
2825 if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) { 3002 if ((iocb_stat != IOCB_SUCCESS) || ((phba->sli_rev < LPFC_SLI_REV4) &&
3003 (rsp->ulpStatus != IOCB_SUCCESS))) {
3004 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3005 "3126 Failed loopback test issue iocb: "
3006 "iocb_stat:x%x\n", iocb_stat);
2826 rc = -EIO; 3007 rc = -EIO;
2827 goto err_loopback_test_exit; 3008 goto err_loopback_test_exit;
2828 } 3009 }
@@ -2832,9 +3013,12 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
2832 evt->wq, !list_empty(&evt->events_to_see), 3013 evt->wq, !list_empty(&evt->events_to_see),
2833 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); 3014 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
2834 evt->waiting = 0; 3015 evt->waiting = 0;
2835 if (list_empty(&evt->events_to_see)) 3016 if (list_empty(&evt->events_to_see)) {
2836 rc = (time_left) ? -EINTR : -ETIMEDOUT; 3017 rc = (time_left) ? -EINTR : -ETIMEDOUT;
2837 else { 3018 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3019 "3125 Not receiving unsolicited event, "
3020 "rc:x%x\n", rc);
3021 } else {
2838 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3022 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2839 list_move(evt->events_to_see.prev, &evt->events_to_get); 3023 list_move(evt->events_to_see.prev, &evt->events_to_get);
2840 evdat = list_entry(evt->events_to_get.prev, 3024 evdat = list_entry(evt->events_to_get.prev,
@@ -2891,7 +3075,7 @@ loopback_test_exit:
2891 job->reply->result = rc; 3075 job->reply->result = rc;
2892 job->dd_data = NULL; 3076 job->dd_data = NULL;
2893 /* complete the job back to userspace if no error */ 3077 /* complete the job back to userspace if no error */
2894 if (rc == 0) 3078 if (rc == IOCB_SUCCESS)
2895 job->job_done(job); 3079 job->job_done(job);
2896 return rc; 3080 return rc;
2897} 3081}
@@ -3078,7 +3262,9 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
3078 && (mb->un.varWords[1] == 1)) { 3262 && (mb->un.varWords[1] == 1)) {
3079 phba->wait_4_mlo_maint_flg = 1; 3263 phba->wait_4_mlo_maint_flg = 1;
3080 } else if (mb->un.varWords[0] == SETVAR_MLORST) { 3264 } else if (mb->un.varWords[0] == SETVAR_MLORST) {
3265 spin_lock_irq(&phba->hbalock);
3081 phba->link_flag &= ~LS_LOOPBACK_MODE; 3266 phba->link_flag &= ~LS_LOOPBACK_MODE;
3267 spin_unlock_irq(&phba->hbalock);
3082 phba->fc_topology = LPFC_TOPOLOGY_PT_PT; 3268 phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
3083 } 3269 }
3084 break; 3270 break;
@@ -3140,6 +3326,9 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3140 unsigned long flags; 3326 unsigned long flags;
3141 uint32_t size; 3327 uint32_t size;
3142 int rc = 0; 3328 int rc = 0;
3329 struct lpfc_dmabuf *dmabuf;
3330 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3331 uint8_t *pmbx;
3143 3332
3144 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3333 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3145 dd_data = pmboxq->context1; 3334 dd_data = pmboxq->context1;
@@ -3156,7 +3345,19 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3156 */ 3345 */
3157 pmb = (uint8_t *)&pmboxq->u.mb; 3346 pmb = (uint8_t *)&pmboxq->u.mb;
3158 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb; 3347 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3348 /* Copy the byte swapped response mailbox back to the user */
3159 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t)); 3349 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3350 /* if there is any non-embedded extended data copy that too */
3351 dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf;
3352 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3353 if (!bsg_bf_get(lpfc_mbox_hdr_emb,
3354 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
3355 pmbx = (uint8_t *)dmabuf->virt;
3356 /* byte swap the extended data following the mailbox command */
3357 lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
3358 &pmbx[sizeof(MAILBOX_t)],
3359 sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
3360 }
3160 3361
3161 job = dd_data->context_un.mbox.set_job; 3362 job = dd_data->context_un.mbox.set_job;
3162 if (job) { 3363 if (job) {
@@ -3519,6 +3720,18 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3519 /* state change */ 3720 /* state change */
3520 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; 3721 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3521 3722
3723 /*
3724 * Non-embedded mailbox subcommand data gets byte swapped here because
3725 * the lower level driver code only does the first 64 mailbox words.
3726 */
3727 if ((!bsg_bf_get(lpfc_mbox_hdr_emb,
3728 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) &&
3729 (nemb_tp == nemb_mse))
3730 lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
3731 &pmbx[sizeof(MAILBOX_t)],
3732 sli_cfg_mbx->un.sli_config_emb0_subsys.
3733 mse[0].buf_len);
3734
3522 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 3735 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3523 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { 3736 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3524 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3737 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
@@ -3575,7 +3788,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3575 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr); 3788 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3576 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) { 3789 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3577 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3790 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3578 "2953 Handled SLI_CONFIG(mse) wr, " 3791 "2953 Failed SLI_CONFIG(mse) wr, "
3579 "ext_buf_cnt(%d) out of range(%d)\n", 3792 "ext_buf_cnt(%d) out of range(%d)\n",
3580 ext_buf_cnt, 3793 ext_buf_cnt,
3581 LPFC_MBX_SLI_CONFIG_MAX_MSE); 3794 LPFC_MBX_SLI_CONFIG_MAX_MSE);
@@ -3593,7 +3806,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3593 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count; 3806 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3594 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) { 3807 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3595 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3808 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3596 "2954 Handled SLI_CONFIG(hbd) wr, " 3809 "2954 Failed SLI_CONFIG(hbd) wr, "
3597 "ext_buf_cnt(%d) out of range(%d)\n", 3810 "ext_buf_cnt(%d) out of range(%d)\n",
3598 ext_buf_cnt, 3811 ext_buf_cnt,
3599 LPFC_MBX_SLI_CONFIG_MAX_HBD); 3812 LPFC_MBX_SLI_CONFIG_MAX_HBD);
@@ -3687,6 +3900,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3687 "2956 Failed to issue SLI_CONFIG ext-buffer " 3900 "2956 Failed to issue SLI_CONFIG ext-buffer "
3688 "maibox command, rc:x%x\n", rc); 3901 "maibox command, rc:x%x\n", rc);
3689 rc = -EPIPE; 3902 rc = -EPIPE;
3903 goto job_error;
3690 } 3904 }
3691 3905
3692 /* wait for additoinal external buffers */ 3906 /* wait for additoinal external buffers */
@@ -3721,7 +3935,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
3721 uint32_t opcode; 3935 uint32_t opcode;
3722 int rc = SLI_CONFIG_NOT_HANDLED; 3936 int rc = SLI_CONFIG_NOT_HANDLED;
3723 3937
3724 /* state change */ 3938 /* state change on new multi-buffer pass-through mailbox command */
3725 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST; 3939 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
3726 3940
3727 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 3941 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
@@ -3752,18 +3966,36 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
3752 break; 3966 break;
3753 default: 3967 default:
3754 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3968 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3755 "2959 Not handled SLI_CONFIG " 3969 "2959 Reject SLI_CONFIG "
3756 "subsys_fcoe, opcode:x%x\n", 3970 "subsys_fcoe, opcode:x%x\n",
3757 opcode); 3971 opcode);
3758 rc = SLI_CONFIG_NOT_HANDLED; 3972 rc = -EPERM;
3973 break;
3974 }
3975 } else if (subsys == SLI_CONFIG_SUBSYS_COMN) {
3976 switch (opcode) {
3977 case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
3978 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3979 "3106 Handled SLI_CONFIG "
3980 "subsys_fcoe, opcode:x%x\n",
3981 opcode);
3982 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
3983 nemb_mse, dmabuf);
3984 break;
3985 default:
3986 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3987 "3107 Reject SLI_CONFIG "
3988 "subsys_fcoe, opcode:x%x\n",
3989 opcode);
3990 rc = -EPERM;
3759 break; 3991 break;
3760 } 3992 }
3761 } else { 3993 } else {
3762 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3994 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3763 "2977 Handled SLI_CONFIG " 3995 "2977 Reject SLI_CONFIG "
3764 "subsys:x%d, opcode:x%x\n", 3996 "subsys:x%d, opcode:x%x\n",
3765 subsys, opcode); 3997 subsys, opcode);
3766 rc = SLI_CONFIG_NOT_HANDLED; 3998 rc = -EPERM;
3767 } 3999 }
3768 } else { 4000 } else {
3769 subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys, 4001 subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
@@ -3799,12 +4031,17 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
3799 } 4031 }
3800 } else { 4032 } else {
3801 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4033 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3802 "2978 Handled SLI_CONFIG " 4034 "2978 Not handled SLI_CONFIG "
3803 "subsys:x%d, opcode:x%x\n", 4035 "subsys:x%d, opcode:x%x\n",
3804 subsys, opcode); 4036 subsys, opcode);
3805 rc = SLI_CONFIG_NOT_HANDLED; 4037 rc = SLI_CONFIG_NOT_HANDLED;
3806 } 4038 }
3807 } 4039 }
4040
4041 /* state reset on not handled new multi-buffer mailbox command */
4042 if (rc != SLI_CONFIG_HANDLED)
4043 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
4044
3808 return rc; 4045 return rc;
3809} 4046}
3810 4047
@@ -4262,11 +4499,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
4262 4499
4263 /* extended mailbox commands will need an extended buffer */ 4500 /* extended mailbox commands will need an extended buffer */
4264 if (mbox_req->inExtWLen || mbox_req->outExtWLen) { 4501 if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
4265 /* any data for the device? */ 4502 from = pmbx;
4266 if (mbox_req->inExtWLen) { 4503 ext = from + sizeof(MAILBOX_t);
4267 from = pmbx;
4268 ext = from + sizeof(MAILBOX_t);
4269 }
4270 pmboxq->context2 = ext; 4504 pmboxq->context2 = ext;
4271 pmboxq->in_ext_byte_len = 4505 pmboxq->in_ext_byte_len =
4272 mbox_req->inExtWLen * sizeof(uint32_t); 4506 mbox_req->inExtWLen * sizeof(uint32_t);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index c8c2b47ea886..edfe61fc52b1 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -96,7 +96,7 @@ struct get_mgmt_rev {
96}; 96};
97 97
98#define MANAGEMENT_MAJOR_REV 1 98#define MANAGEMENT_MAJOR_REV 1
99#define MANAGEMENT_MINOR_REV 0 99#define MANAGEMENT_MINOR_REV 1
100 100
101/* the MgmtRevInfo structure */ 101/* the MgmtRevInfo structure */
102struct MgmtRevInfo { 102struct MgmtRevInfo {
@@ -248,6 +248,7 @@ struct lpfc_sli_config_emb1_subsys {
248#define COMN_OPCODE_WRITE_OBJECT 0xAC 248#define COMN_OPCODE_WRITE_OBJECT 0xAC
249#define COMN_OPCODE_READ_OBJECT_LIST 0xAD 249#define COMN_OPCODE_READ_OBJECT_LIST 0xAD
250#define COMN_OPCODE_DELETE_OBJECT 0xAE 250#define COMN_OPCODE_DELETE_OBJECT 0xAE
251#define COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES 0x79
251 uint32_t timeout; 252 uint32_t timeout;
252 uint32_t request_length; 253 uint32_t request_length;
253 uint32_t word9; 254 uint32_t word9;
diff --git a/drivers/scsi/lpfc/lpfc_compat.h b/drivers/scsi/lpfc/lpfc_compat.h
index 75e2e569dede..c88e556ea62e 100644
--- a/drivers/scsi/lpfc/lpfc_compat.h
+++ b/drivers/scsi/lpfc/lpfc_compat.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2005 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2011 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -82,7 +82,8 @@ lpfc_memcpy_from_slim( void *dest, void __iomem *src, unsigned int bytes)
82static inline void 82static inline void
83lpfc_memcpy_to_slim( void __iomem *dest, void *src, unsigned int bytes) 83lpfc_memcpy_to_slim( void __iomem *dest, void *src, unsigned int bytes)
84{ 84{
85 __iowrite32_copy(dest, src, bytes); 85 /* convert bytes in argument list to word count for copy function */
86 __iowrite32_copy(dest, src, bytes / sizeof(uint32_t));
86} 87}
87 88
88static inline void 89static inline void
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 60f95347babf..26924b7a6cde 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -26,7 +26,7 @@ void lpfc_sli_read_link_ste(struct lpfc_hba *);
26void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t, uint16_t); 26void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t, uint16_t);
27void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *); 27void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
28int lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); 28int lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
29int lpfc_dump_fcoe_param(struct lpfc_hba *, struct lpfcMboxq *); 29int lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *, struct lpfcMboxq *);
30void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); 30void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
31void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 31void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
32 32
@@ -78,6 +78,7 @@ void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
78void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 78void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
79void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 79void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
80void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *); 80void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *);
81void lpfc_unregister_vfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
81void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *); 82void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
82void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *); 83void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
83struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *, 84struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *,
@@ -106,7 +107,7 @@ void lpfc_cleanup(struct lpfc_vport *);
106void lpfc_disc_timeout(unsigned long); 107void lpfc_disc_timeout(unsigned long);
107 108
108struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t); 109struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
109 110struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
110void lpfc_worker_wake_up(struct lpfc_hba *); 111void lpfc_worker_wake_up(struct lpfc_hba *);
111int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t); 112int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t);
112int lpfc_do_work(void *); 113int lpfc_do_work(void *);
@@ -453,3 +454,11 @@ int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int);
453uint16_t lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *); 454uint16_t lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *);
454int lpfc_sli4_queue_create(struct lpfc_hba *); 455int lpfc_sli4_queue_create(struct lpfc_hba *);
455void lpfc_sli4_queue_destroy(struct lpfc_hba *); 456void lpfc_sli4_queue_destroy(struct lpfc_hba *);
457void lpfc_sli4_abts_err_handler(struct lpfc_hba *, struct lpfc_nodelist *,
458 struct sli4_wcqe_xri_aborted *);
459int lpfc_hba_init_link_fc_topology(struct lpfc_hba *, uint32_t, uint32_t);
460int lpfc_issue_reg_vfi(struct lpfc_vport *);
461int lpfc_issue_unreg_vfi(struct lpfc_vport *);
462int lpfc_selective_reset(struct lpfc_hba *);
463int lpfc_sli4_read_config(struct lpfc_hba *phba);
464int lpfc_scsi_buf_update(struct lpfc_hba *phba);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 28382596fb9a..3587a3fe8fcb 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1997,7 +1997,8 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
1997 /* Get slow-path event queue information */ 1997 /* Get slow-path event queue information */
1998 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1998 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1999 "Slow-path EQ information:\n"); 1999 "Slow-path EQ information:\n");
2000 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2000 if (phba->sli4_hba.sp_eq) {
2001 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2001 "\tEQID[%02d], " 2002 "\tEQID[%02d], "
2002 "QE-COUNT[%04d], QE-SIZE[%04d], " 2003 "QE-COUNT[%04d], QE-SIZE[%04d], "
2003 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", 2004 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2006,12 +2007,17 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
2006 phba->sli4_hba.sp_eq->entry_size, 2007 phba->sli4_hba.sp_eq->entry_size,
2007 phba->sli4_hba.sp_eq->host_index, 2008 phba->sli4_hba.sp_eq->host_index,
2008 phba->sli4_hba.sp_eq->hba_index); 2009 phba->sli4_hba.sp_eq->hba_index);
2010 }
2009 2011
2010 /* Get fast-path event queue information */ 2012 /* Get fast-path event queue information */
2011 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2013 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2012 "Fast-path EQ information:\n"); 2014 "Fast-path EQ information:\n");
2013 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) { 2015 if (phba->sli4_hba.fp_eq) {
2014 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2016 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
2017 fcp_qidx++) {
2018 if (phba->sli4_hba.fp_eq[fcp_qidx]) {
2019 len += snprintf(pbuffer+len,
2020 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2015 "\tEQID[%02d], " 2021 "\tEQID[%02d], "
2016 "QE-COUNT[%04d], QE-SIZE[%04d], " 2022 "QE-COUNT[%04d], QE-SIZE[%04d], "
2017 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", 2023 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2020,16 +2026,19 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
2020 phba->sli4_hba.fp_eq[fcp_qidx]->entry_size, 2026 phba->sli4_hba.fp_eq[fcp_qidx]->entry_size,
2021 phba->sli4_hba.fp_eq[fcp_qidx]->host_index, 2027 phba->sli4_hba.fp_eq[fcp_qidx]->host_index,
2022 phba->sli4_hba.fp_eq[fcp_qidx]->hba_index); 2028 phba->sli4_hba.fp_eq[fcp_qidx]->hba_index);
2029 }
2030 }
2023 } 2031 }
2024 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); 2032 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2025 2033
2026 /* Get mailbox complete queue information */ 2034 /* Get mailbox complete queue information */
2027 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2035 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2028 "Slow-path MBX CQ information:\n"); 2036 "Slow-path MBX CQ information:\n");
2029 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2037 if (phba->sli4_hba.mbx_cq) {
2038 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2030 "Associated EQID[%02d]:\n", 2039 "Associated EQID[%02d]:\n",
2031 phba->sli4_hba.mbx_cq->assoc_qid); 2040 phba->sli4_hba.mbx_cq->assoc_qid);
2032 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2041 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2033 "\tCQID[%02d], " 2042 "\tCQID[%02d], "
2034 "QE-COUNT[%04d], QE-SIZE[%04d], " 2043 "QE-COUNT[%04d], QE-SIZE[%04d], "
2035 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", 2044 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2038,14 +2047,16 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
2038 phba->sli4_hba.mbx_cq->entry_size, 2047 phba->sli4_hba.mbx_cq->entry_size,
2039 phba->sli4_hba.mbx_cq->host_index, 2048 phba->sli4_hba.mbx_cq->host_index,
2040 phba->sli4_hba.mbx_cq->hba_index); 2049 phba->sli4_hba.mbx_cq->hba_index);
2050 }
2041 2051
2042 /* Get slow-path complete queue information */ 2052 /* Get slow-path complete queue information */
2043 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2053 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2044 "Slow-path ELS CQ information:\n"); 2054 "Slow-path ELS CQ information:\n");
2045 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2055 if (phba->sli4_hba.els_cq) {
2056 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2046 "Associated EQID[%02d]:\n", 2057 "Associated EQID[%02d]:\n",
2047 phba->sli4_hba.els_cq->assoc_qid); 2058 phba->sli4_hba.els_cq->assoc_qid);
2048 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2059 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2049 "\tCQID [%02d], " 2060 "\tCQID [%02d], "
2050 "QE-COUNT[%04d], QE-SIZE[%04d], " 2061 "QE-COUNT[%04d], QE-SIZE[%04d], "
2051 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", 2062 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2054,16 +2065,21 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
2054 phba->sli4_hba.els_cq->entry_size, 2065 phba->sli4_hba.els_cq->entry_size,
2055 phba->sli4_hba.els_cq->host_index, 2066 phba->sli4_hba.els_cq->host_index,
2056 phba->sli4_hba.els_cq->hba_index); 2067 phba->sli4_hba.els_cq->hba_index);
2068 }
2057 2069
2058 /* Get fast-path complete queue information */ 2070 /* Get fast-path complete queue information */
2059 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2071 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2060 "Fast-path FCP CQ information:\n"); 2072 "Fast-path FCP CQ information:\n");
2061 fcp_qidx = 0; 2073 fcp_qidx = 0;
2062 do { 2074 if (phba->sli4_hba.fcp_cq) {
2063 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2075 do {
2076 if (phba->sli4_hba.fcp_cq[fcp_qidx]) {
2077 len += snprintf(pbuffer+len,
2078 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2064 "Associated EQID[%02d]:\n", 2079 "Associated EQID[%02d]:\n",
2065 phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid); 2080 phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid);
2066 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2081 len += snprintf(pbuffer+len,
2082 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2067 "\tCQID[%02d], " 2083 "\tCQID[%02d], "
2068 "QE-COUNT[%04d], QE-SIZE[%04d], " 2084 "QE-COUNT[%04d], QE-SIZE[%04d], "
2069 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", 2085 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2072,16 +2088,20 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
2072 phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size, 2088 phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size,
2073 phba->sli4_hba.fcp_cq[fcp_qidx]->host_index, 2089 phba->sli4_hba.fcp_cq[fcp_qidx]->host_index,
2074 phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index); 2090 phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index);
2075 } while (++fcp_qidx < phba->cfg_fcp_eq_count); 2091 }
2076 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); 2092 } while (++fcp_qidx < phba->cfg_fcp_eq_count);
2093 len += snprintf(pbuffer+len,
2094 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2095 }
2077 2096
2078 /* Get mailbox queue information */ 2097 /* Get mailbox queue information */
2079 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2098 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2080 "Slow-path MBX MQ information:\n"); 2099 "Slow-path MBX MQ information:\n");
2081 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2100 if (phba->sli4_hba.mbx_wq) {
2101 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2082 "Associated CQID[%02d]:\n", 2102 "Associated CQID[%02d]:\n",
2083 phba->sli4_hba.mbx_wq->assoc_qid); 2103 phba->sli4_hba.mbx_wq->assoc_qid);
2084 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2104 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2085 "\tWQID[%02d], " 2105 "\tWQID[%02d], "
2086 "QE-COUNT[%04d], QE-SIZE[%04d], " 2106 "QE-COUNT[%04d], QE-SIZE[%04d], "
2087 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", 2107 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2090,14 +2110,16 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
2090 phba->sli4_hba.mbx_wq->entry_size, 2110 phba->sli4_hba.mbx_wq->entry_size,
2091 phba->sli4_hba.mbx_wq->host_index, 2111 phba->sli4_hba.mbx_wq->host_index,
2092 phba->sli4_hba.mbx_wq->hba_index); 2112 phba->sli4_hba.mbx_wq->hba_index);
2113 }
2093 2114
2094 /* Get slow-path work queue information */ 2115 /* Get slow-path work queue information */
2095 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2116 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2096 "Slow-path ELS WQ information:\n"); 2117 "Slow-path ELS WQ information:\n");
2097 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2118 if (phba->sli4_hba.els_wq) {
2119 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2098 "Associated CQID[%02d]:\n", 2120 "Associated CQID[%02d]:\n",
2099 phba->sli4_hba.els_wq->assoc_qid); 2121 phba->sli4_hba.els_wq->assoc_qid);
2100 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2122 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2101 "\tWQID[%02d], " 2123 "\tWQID[%02d], "
2102 "QE-COUNT[%04d], QE-SIZE[%04d], " 2124 "QE-COUNT[%04d], QE-SIZE[%04d], "
2103 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", 2125 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2106,15 +2128,22 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
2106 phba->sli4_hba.els_wq->entry_size, 2128 phba->sli4_hba.els_wq->entry_size,
2107 phba->sli4_hba.els_wq->host_index, 2129 phba->sli4_hba.els_wq->host_index,
2108 phba->sli4_hba.els_wq->hba_index); 2130 phba->sli4_hba.els_wq->hba_index);
2131 }
2109 2132
2110 /* Get fast-path work queue information */ 2133 /* Get fast-path work queue information */
2111 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2134 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2112 "Fast-path FCP WQ information:\n"); 2135 "Fast-path FCP WQ information:\n");
2113 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) { 2136 if (phba->sli4_hba.fcp_wq) {
2114 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2137 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
2138 fcp_qidx++) {
2139 if (!phba->sli4_hba.fcp_wq[fcp_qidx])
2140 continue;
2141 len += snprintf(pbuffer+len,
2142 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2115 "Associated CQID[%02d]:\n", 2143 "Associated CQID[%02d]:\n",
2116 phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid); 2144 phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid);
2117 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2145 len += snprintf(pbuffer+len,
2146 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2118 "\tWQID[%02d], " 2147 "\tWQID[%02d], "
2119 "QE-COUNT[%04d], WQE-SIZE[%04d], " 2148 "QE-COUNT[%04d], WQE-SIZE[%04d], "
2120 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", 2149 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2123,16 +2152,19 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
2123 phba->sli4_hba.fcp_wq[fcp_qidx]->entry_size, 2152 phba->sli4_hba.fcp_wq[fcp_qidx]->entry_size,
2124 phba->sli4_hba.fcp_wq[fcp_qidx]->host_index, 2153 phba->sli4_hba.fcp_wq[fcp_qidx]->host_index,
2125 phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index); 2154 phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index);
2155 }
2156 len += snprintf(pbuffer+len,
2157 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2126 } 2158 }
2127 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2128 2159
2129 /* Get receive queue information */ 2160 /* Get receive queue information */
2130 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2161 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2131 "Slow-path RQ information:\n"); 2162 "Slow-path RQ information:\n");
2132 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2163 if (phba->sli4_hba.hdr_rq && phba->sli4_hba.dat_rq) {
2164 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2133 "Associated CQID[%02d]:\n", 2165 "Associated CQID[%02d]:\n",
2134 phba->sli4_hba.hdr_rq->assoc_qid); 2166 phba->sli4_hba.hdr_rq->assoc_qid);
2135 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2167 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2136 "\tHQID[%02d], " 2168 "\tHQID[%02d], "
2137 "QE-COUNT[%04d], QE-SIZE[%04d], " 2169 "QE-COUNT[%04d], QE-SIZE[%04d], "
2138 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", 2170 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2141,7 +2173,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
2141 phba->sli4_hba.hdr_rq->entry_size, 2173 phba->sli4_hba.hdr_rq->entry_size,
2142 phba->sli4_hba.hdr_rq->host_index, 2174 phba->sli4_hba.hdr_rq->host_index,
2143 phba->sli4_hba.hdr_rq->hba_index); 2175 phba->sli4_hba.hdr_rq->hba_index);
2144 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2176 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2145 "\tDQID[%02d], " 2177 "\tDQID[%02d], "
2146 "QE-COUNT[%04d], QE-SIZE[%04d], " 2178 "QE-COUNT[%04d], QE-SIZE[%04d], "
2147 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", 2179 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2150,7 +2182,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
2150 phba->sli4_hba.dat_rq->entry_size, 2182 phba->sli4_hba.dat_rq->entry_size,
2151 phba->sli4_hba.dat_rq->host_index, 2183 phba->sli4_hba.dat_rq->host_index,
2152 phba->sli4_hba.dat_rq->hba_index); 2184 phba->sli4_hba.dat_rq->hba_index);
2153 2185 }
2154 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); 2186 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
2155} 2187}
2156 2188
@@ -2360,7 +2392,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
2360 switch (quetp) { 2392 switch (quetp) {
2361 case LPFC_IDIAG_EQ: 2393 case LPFC_IDIAG_EQ:
2362 /* Slow-path event queue */ 2394 /* Slow-path event queue */
2363 if (phba->sli4_hba.sp_eq->queue_id == queid) { 2395 if (phba->sli4_hba.sp_eq &&
2396 phba->sli4_hba.sp_eq->queue_id == queid) {
2364 /* Sanity check */ 2397 /* Sanity check */
2365 rc = lpfc_idiag_que_param_check( 2398 rc = lpfc_idiag_que_param_check(
2366 phba->sli4_hba.sp_eq, index, count); 2399 phba->sli4_hba.sp_eq, index, count);
@@ -2370,23 +2403,29 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
2370 goto pass_check; 2403 goto pass_check;
2371 } 2404 }
2372 /* Fast-path event queue */ 2405 /* Fast-path event queue */
2373 for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) { 2406 if (phba->sli4_hba.fp_eq) {
2374 if (phba->sli4_hba.fp_eq[qidx]->queue_id == queid) { 2407 for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
2375 /* Sanity check */ 2408 if (phba->sli4_hba.fp_eq[qidx] &&
2376 rc = lpfc_idiag_que_param_check( 2409 phba->sli4_hba.fp_eq[qidx]->queue_id ==
2410 queid) {
2411 /* Sanity check */
2412 rc = lpfc_idiag_que_param_check(
2377 phba->sli4_hba.fp_eq[qidx], 2413 phba->sli4_hba.fp_eq[qidx],
2378 index, count); 2414 index, count);
2379 if (rc) 2415 if (rc)
2380 goto error_out; 2416 goto error_out;
2381 idiag.ptr_private = phba->sli4_hba.fp_eq[qidx]; 2417 idiag.ptr_private =
2382 goto pass_check; 2418 phba->sli4_hba.fp_eq[qidx];
2419 goto pass_check;
2420 }
2383 } 2421 }
2384 } 2422 }
2385 goto error_out; 2423 goto error_out;
2386 break; 2424 break;
2387 case LPFC_IDIAG_CQ: 2425 case LPFC_IDIAG_CQ:
2388 /* MBX complete queue */ 2426 /* MBX complete queue */
2389 if (phba->sli4_hba.mbx_cq->queue_id == queid) { 2427 if (phba->sli4_hba.mbx_cq &&
2428 phba->sli4_hba.mbx_cq->queue_id == queid) {
2390 /* Sanity check */ 2429 /* Sanity check */
2391 rc = lpfc_idiag_que_param_check( 2430 rc = lpfc_idiag_que_param_check(
2392 phba->sli4_hba.mbx_cq, index, count); 2431 phba->sli4_hba.mbx_cq, index, count);
@@ -2396,7 +2435,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
2396 goto pass_check; 2435 goto pass_check;
2397 } 2436 }
2398 /* ELS complete queue */ 2437 /* ELS complete queue */
2399 if (phba->sli4_hba.els_cq->queue_id == queid) { 2438 if (phba->sli4_hba.els_cq &&
2439 phba->sli4_hba.els_cq->queue_id == queid) {
2400 /* Sanity check */ 2440 /* Sanity check */
2401 rc = lpfc_idiag_que_param_check( 2441 rc = lpfc_idiag_que_param_check(
2402 phba->sli4_hba.els_cq, index, count); 2442 phba->sli4_hba.els_cq, index, count);
@@ -2406,25 +2446,30 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
2406 goto pass_check; 2446 goto pass_check;
2407 } 2447 }
2408 /* FCP complete queue */ 2448 /* FCP complete queue */
2409 qidx = 0; 2449 if (phba->sli4_hba.fcp_cq) {
2410 do { 2450 qidx = 0;
2411 if (phba->sli4_hba.fcp_cq[qidx]->queue_id == queid) { 2451 do {
2412 /* Sanity check */ 2452 if (phba->sli4_hba.fcp_cq[qidx] &&
2413 rc = lpfc_idiag_que_param_check( 2453 phba->sli4_hba.fcp_cq[qidx]->queue_id ==
2454 queid) {
2455 /* Sanity check */
2456 rc = lpfc_idiag_que_param_check(
2414 phba->sli4_hba.fcp_cq[qidx], 2457 phba->sli4_hba.fcp_cq[qidx],
2415 index, count); 2458 index, count);
2416 if (rc) 2459 if (rc)
2417 goto error_out; 2460 goto error_out;
2418 idiag.ptr_private = 2461 idiag.ptr_private =
2419 phba->sli4_hba.fcp_cq[qidx]; 2462 phba->sli4_hba.fcp_cq[qidx];
2420 goto pass_check; 2463 goto pass_check;
2421 } 2464 }
2422 } while (++qidx < phba->cfg_fcp_eq_count); 2465 } while (++qidx < phba->cfg_fcp_eq_count);
2466 }
2423 goto error_out; 2467 goto error_out;
2424 break; 2468 break;
2425 case LPFC_IDIAG_MQ: 2469 case LPFC_IDIAG_MQ:
2426 /* MBX work queue */ 2470 /* MBX work queue */
2427 if (phba->sli4_hba.mbx_wq->queue_id == queid) { 2471 if (phba->sli4_hba.mbx_wq &&
2472 phba->sli4_hba.mbx_wq->queue_id == queid) {
2428 /* Sanity check */ 2473 /* Sanity check */
2429 rc = lpfc_idiag_que_param_check( 2474 rc = lpfc_idiag_que_param_check(
2430 phba->sli4_hba.mbx_wq, index, count); 2475 phba->sli4_hba.mbx_wq, index, count);
@@ -2433,10 +2478,12 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
2433 idiag.ptr_private = phba->sli4_hba.mbx_wq; 2478 idiag.ptr_private = phba->sli4_hba.mbx_wq;
2434 goto pass_check; 2479 goto pass_check;
2435 } 2480 }
2481 goto error_out;
2436 break; 2482 break;
2437 case LPFC_IDIAG_WQ: 2483 case LPFC_IDIAG_WQ:
2438 /* ELS work queue */ 2484 /* ELS work queue */
2439 if (phba->sli4_hba.els_wq->queue_id == queid) { 2485 if (phba->sli4_hba.els_wq &&
2486 phba->sli4_hba.els_wq->queue_id == queid) {
2440 /* Sanity check */ 2487 /* Sanity check */
2441 rc = lpfc_idiag_que_param_check( 2488 rc = lpfc_idiag_que_param_check(
2442 phba->sli4_hba.els_wq, index, count); 2489 phba->sli4_hba.els_wq, index, count);
@@ -2446,24 +2493,30 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
2446 goto pass_check; 2493 goto pass_check;
2447 } 2494 }
2448 /* FCP work queue */ 2495 /* FCP work queue */
2449 for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) { 2496 if (phba->sli4_hba.fcp_wq) {
2450 if (phba->sli4_hba.fcp_wq[qidx]->queue_id == queid) { 2497 for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) {
2451 /* Sanity check */ 2498 if (!phba->sli4_hba.fcp_wq[qidx])
2452 rc = lpfc_idiag_que_param_check( 2499 continue;
2500 if (phba->sli4_hba.fcp_wq[qidx]->queue_id ==
2501 queid) {
2502 /* Sanity check */
2503 rc = lpfc_idiag_que_param_check(
2453 phba->sli4_hba.fcp_wq[qidx], 2504 phba->sli4_hba.fcp_wq[qidx],
2454 index, count); 2505 index, count);
2455 if (rc) 2506 if (rc)
2456 goto error_out; 2507 goto error_out;
2457 idiag.ptr_private = 2508 idiag.ptr_private =
2458 phba->sli4_hba.fcp_wq[qidx]; 2509 phba->sli4_hba.fcp_wq[qidx];
2459 goto pass_check; 2510 goto pass_check;
2511 }
2460 } 2512 }
2461 } 2513 }
2462 goto error_out; 2514 goto error_out;
2463 break; 2515 break;
2464 case LPFC_IDIAG_RQ: 2516 case LPFC_IDIAG_RQ:
2465 /* HDR queue */ 2517 /* HDR queue */
2466 if (phba->sli4_hba.hdr_rq->queue_id == queid) { 2518 if (phba->sli4_hba.hdr_rq &&
2519 phba->sli4_hba.hdr_rq->queue_id == queid) {
2467 /* Sanity check */ 2520 /* Sanity check */
2468 rc = lpfc_idiag_que_param_check( 2521 rc = lpfc_idiag_que_param_check(
2469 phba->sli4_hba.hdr_rq, index, count); 2522 phba->sli4_hba.hdr_rq, index, count);
@@ -2473,7 +2526,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
2473 goto pass_check; 2526 goto pass_check;
2474 } 2527 }
2475 /* DAT queue */ 2528 /* DAT queue */
2476 if (phba->sli4_hba.dat_rq->queue_id == queid) { 2529 if (phba->sli4_hba.dat_rq &&
2530 phba->sli4_hba.dat_rq->queue_id == queid) {
2477 /* Sanity check */ 2531 /* Sanity check */
2478 rc = lpfc_idiag_que_param_check( 2532 rc = lpfc_idiag_que_param_check(
2479 phba->sli4_hba.dat_rq, index, count); 2533 phba->sli4_hba.dat_rq, index, count);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 445826a4c981..7afc757338de 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -421,13 +421,13 @@ fail:
421 * @vport: pointer to a host virtual N_Port data structure. 421 * @vport: pointer to a host virtual N_Port data structure.
422 * 422 *
423 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for 423 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
424 * the @vport. This mailbox command is necessary for FCoE only. 424 * the @vport. This mailbox command is necessary for SLI4 port only.
425 * 425 *
426 * Return code 426 * Return code
427 * 0 - successfully issued REG_VFI for @vport 427 * 0 - successfully issued REG_VFI for @vport
428 * A failure code otherwise. 428 * A failure code otherwise.
429 **/ 429 **/
430static int 430int
431lpfc_issue_reg_vfi(struct lpfc_vport *vport) 431lpfc_issue_reg_vfi(struct lpfc_vport *vport)
432{ 432{
433 struct lpfc_hba *phba = vport->phba; 433 struct lpfc_hba *phba = vport->phba;
@@ -438,10 +438,14 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
438 int rc = 0; 438 int rc = 0;
439 439
440 sp = &phba->fc_fabparam; 440 sp = &phba->fc_fabparam;
441 ndlp = lpfc_findnode_did(vport, Fabric_DID); 441 /* move forward in case of SLI4 FC port loopback test */
442 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 442 if ((phba->sli_rev == LPFC_SLI_REV4) &&
443 rc = -ENODEV; 443 !(phba->link_flag & LS_LOOPBACK_MODE)) {
444 goto fail; 444 ndlp = lpfc_findnode_did(vport, Fabric_DID);
445 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
446 rc = -ENODEV;
447 goto fail;
448 }
445 } 449 }
446 450
447 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 451 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
@@ -487,6 +491,54 @@ fail:
487} 491}
488 492
489/** 493/**
494 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login
495 * @vport: pointer to a host virtual N_Port data structure.
496 *
497 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for
498 * the @vport. This mailbox command is necessary for SLI4 port only.
499 *
500 * Return code
501 * 0 - successfully issued REG_VFI for @vport
502 * A failure code otherwise.
503 **/
504int
505lpfc_issue_unreg_vfi(struct lpfc_vport *vport)
506{
507 struct lpfc_hba *phba = vport->phba;
508 struct Scsi_Host *shost;
509 LPFC_MBOXQ_t *mboxq;
510 int rc;
511
512 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
513 if (!mboxq) {
514 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
515 "2556 UNREG_VFI mbox allocation failed"
516 "HBA state x%x\n", phba->pport->port_state);
517 return -ENOMEM;
518 }
519
520 lpfc_unreg_vfi(mboxq, vport);
521 mboxq->vport = vport;
522 mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl;
523
524 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
525 if (rc == MBX_NOT_FINISHED) {
526 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
527 "2557 UNREG_VFI issue mbox failed rc x%x "
528 "HBA state x%x\n",
529 rc, phba->pport->port_state);
530 mempool_free(mboxq, phba->mbox_mem_pool);
531 return -EIO;
532 }
533
534 shost = lpfc_shost_from_vport(vport);
535 spin_lock_irq(shost->host_lock);
536 vport->fc_flag &= ~FC_VFI_REGISTERED;
537 spin_unlock_irq(shost->host_lock);
538 return 0;
539}
540
541/**
490 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean. 542 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
491 * @vport: pointer to a host virtual N_Port data structure. 543 * @vport: pointer to a host virtual N_Port data structure.
492 * @sp: pointer to service parameter data structure. 544 * @sp: pointer to service parameter data structure.
@@ -615,7 +667,9 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
615 "1816 FLOGI NPIV supported, " 667 "1816 FLOGI NPIV supported, "
616 "response data 0x%x\n", 668 "response data 0x%x\n",
617 sp->cmn.response_multiple_NPort); 669 sp->cmn.response_multiple_NPort);
670 spin_lock_irq(&phba->hbalock);
618 phba->link_flag |= LS_NPIV_FAB_SUPPORTED; 671 phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
672 spin_unlock_irq(&phba->hbalock);
619 } else { 673 } else {
620 /* Because we asked f/w for NPIV it still expects us 674 /* Because we asked f/w for NPIV it still expects us
621 to call reg_vnpid atleast for the physcial host */ 675 to call reg_vnpid atleast for the physcial host */
@@ -623,7 +677,9 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
623 LOG_ELS | LOG_VPORT, 677 LOG_ELS | LOG_VPORT,
624 "1817 Fabric does not support NPIV " 678 "1817 Fabric does not support NPIV "
625 "- configuring single port mode.\n"); 679 "- configuring single port mode.\n");
680 spin_lock_irq(&phba->hbalock);
626 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 681 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
682 spin_unlock_irq(&phba->hbalock);
627 } 683 }
628 } 684 }
629 685
@@ -686,11 +742,16 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
686 lpfc_do_scr_ns_plogi(phba, vport); 742 lpfc_do_scr_ns_plogi(phba, vport);
687 } else if (vport->fc_flag & FC_VFI_REGISTERED) 743 } else if (vport->fc_flag & FC_VFI_REGISTERED)
688 lpfc_issue_init_vpi(vport); 744 lpfc_issue_init_vpi(vport);
689 else 745 else {
746 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
747 "3135 Need register VFI: (x%x/%x)\n",
748 vport->fc_prevDID, vport->fc_myDID);
690 lpfc_issue_reg_vfi(vport); 749 lpfc_issue_reg_vfi(vport);
750 }
691 } 751 }
692 return 0; 752 return 0;
693} 753}
754
694/** 755/**
695 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port 756 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
696 * @vport: pointer to a host virtual N_Port data structure. 757 * @vport: pointer to a host virtual N_Port data structure.
@@ -907,17 +968,16 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
907 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no 968 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
908 * alpa map would take too long otherwise. 969 * alpa map would take too long otherwise.
909 */ 970 */
910 if (phba->alpa_map[0] == 0) { 971 if (phba->alpa_map[0] == 0)
911 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; 972 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
912 if ((phba->sli_rev == LPFC_SLI_REV4) && 973 if ((phba->sli_rev == LPFC_SLI_REV4) &&
913 (!(vport->fc_flag & FC_VFI_REGISTERED) || 974 (!(vport->fc_flag & FC_VFI_REGISTERED) ||
914 (vport->fc_prevDID != vport->fc_myDID))) { 975 (vport->fc_prevDID != vport->fc_myDID))) {
915 if (vport->fc_flag & FC_VFI_REGISTERED) 976 if (vport->fc_flag & FC_VFI_REGISTERED)
916 lpfc_sli4_unreg_all_rpis(vport); 977 lpfc_sli4_unreg_all_rpis(vport);
917 lpfc_issue_reg_vfi(vport); 978 lpfc_issue_reg_vfi(vport);
918 lpfc_nlp_put(ndlp); 979 lpfc_nlp_put(ndlp);
919 goto out; 980 goto out;
920 }
921 } 981 }
922 goto flogifail; 982 goto flogifail;
923 } 983 }
@@ -1075,6 +1135,7 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1075 /* Setup CSPs accordingly for Fabric */ 1135 /* Setup CSPs accordingly for Fabric */
1076 sp->cmn.e_d_tov = 0; 1136 sp->cmn.e_d_tov = 0;
1077 sp->cmn.w2.r_a_tov = 0; 1137 sp->cmn.w2.r_a_tov = 0;
1138 sp->cmn.virtual_fabric_support = 0;
1078 sp->cls1.classValid = 0; 1139 sp->cls1.classValid = 0;
1079 sp->cls2.seqDelivery = 1; 1140 sp->cls2.seqDelivery = 1;
1080 sp->cls3.seqDelivery = 1; 1141 sp->cls3.seqDelivery = 1;
@@ -1163,8 +1224,7 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
1163 spin_lock_irq(&phba->hbalock); 1224 spin_lock_irq(&phba->hbalock);
1164 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 1225 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
1165 icmd = &iocb->iocb; 1226 icmd = &iocb->iocb;
1166 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR && 1227 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
1167 icmd->un.elsreq64.bdl.ulpIoTag32) {
1168 ndlp = (struct lpfc_nodelist *)(iocb->context1); 1228 ndlp = (struct lpfc_nodelist *)(iocb->context1);
1169 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 1229 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1170 (ndlp->nlp_DID == Fabric_DID)) 1230 (ndlp->nlp_DID == Fabric_DID))
@@ -3066,17 +3126,22 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3066 if (did == FDMI_DID) 3126 if (did == FDMI_DID)
3067 retry = 1; 3127 retry = 1;
3068 3128
3069 if (((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) && 3129 if ((cmd == ELS_CMD_FLOGI) &&
3070 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) && 3130 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
3071 !lpfc_error_lost_link(irsp)) { 3131 !lpfc_error_lost_link(irsp)) {
3072 /* FLOGI retry policy */ 3132 /* FLOGI retry policy */
3073 retry = 1; 3133 retry = 1;
3074 /* retry forever */ 3134 /* retry FLOGI forever */
3075 maxretry = 0; 3135 maxretry = 0;
3076 if (cmdiocb->retry >= 100) 3136 if (cmdiocb->retry >= 100)
3077 delay = 5000; 3137 delay = 5000;
3078 else if (cmdiocb->retry >= 32) 3138 else if (cmdiocb->retry >= 32)
3079 delay = 1000; 3139 delay = 1000;
3140 } else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) {
3141 /* retry FDISCs every second up to devloss */
3142 retry = 1;
3143 maxretry = vport->cfg_devloss_tmo;
3144 delay = 1000;
3080 } 3145 }
3081 3146
3082 cmdiocb->retry++; 3147 cmdiocb->retry++;
@@ -3389,11 +3454,17 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3389 3454
3390 /* 3455 /*
3391 * The driver received a LOGO from the rport and has ACK'd it. 3456 * The driver received a LOGO from the rport and has ACK'd it.
3392 * At this point, the driver is done so release the IOCB and 3457 * At this point, the driver is done so release the IOCB
3393 * remove the ndlp reference.
3394 */ 3458 */
3395 lpfc_els_free_iocb(phba, cmdiocb); 3459 lpfc_els_free_iocb(phba, cmdiocb);
3396 lpfc_nlp_put(ndlp); 3460
3461 /*
3462 * Remove the ndlp reference if it's a fabric node that has
3463 * sent us an unsolicted LOGO.
3464 */
3465 if (ndlp->nlp_type & NLP_FABRIC)
3466 lpfc_nlp_put(ndlp);
3467
3397 return; 3468 return;
3398} 3469}
3399 3470
@@ -4867,23 +4938,31 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4867 sizeof(struct lpfc_name)); 4938 sizeof(struct lpfc_name));
4868 4939
4869 if (!rc) { 4940 if (!rc) {
4870 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4941 if (phba->sli_rev < LPFC_SLI_REV4) {
4871 if (!mbox) 4942 mbox = mempool_alloc(phba->mbox_mem_pool,
4943 GFP_KERNEL);
4944 if (!mbox)
4945 return 1;
4946 lpfc_linkdown(phba);
4947 lpfc_init_link(phba, mbox,
4948 phba->cfg_topology,
4949 phba->cfg_link_speed);
4950 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
4951 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4952 mbox->vport = vport;
4953 rc = lpfc_sli_issue_mbox(phba, mbox,
4954 MBX_NOWAIT);
4955 lpfc_set_loopback_flag(phba);
4956 if (rc == MBX_NOT_FINISHED)
4957 mempool_free(mbox, phba->mbox_mem_pool);
4872 return 1; 4958 return 1;
4873 4959 } else {
4874 lpfc_linkdown(phba); 4960 /* abort the flogi coming back to ourselves
4875 lpfc_init_link(phba, mbox, 4961 * due to external loopback on the port.
4876 phba->cfg_topology, 4962 */
4877 phba->cfg_link_speed); 4963 lpfc_els_abort_flogi(phba);
4878 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 4964 return 0;
4879 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4880 mbox->vport = vport;
4881 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4882 lpfc_set_loopback_flag(phba);
4883 if (rc == MBX_NOT_FINISHED) {
4884 mempool_free(mbox, phba->mbox_mem_pool);
4885 } 4965 }
4886 return 1;
4887 } else if (rc > 0) { /* greater than */ 4966 } else if (rc > 0) { /* greater than */
4888 spin_lock_irq(shost->host_lock); 4967 spin_lock_irq(shost->host_lock);
4889 vport->fc_flag |= FC_PT2PT_PLOGI; 4968 vport->fc_flag |= FC_PT2PT_PLOGI;
@@ -5838,8 +5917,12 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5838 vport->fc_myDID = vport->fc_prevDID; 5917 vport->fc_myDID = vport->fc_prevDID;
5839 if (phba->sli_rev < LPFC_SLI_REV4) 5918 if (phba->sli_rev < LPFC_SLI_REV4)
5840 lpfc_issue_fabric_reglogin(vport); 5919 lpfc_issue_fabric_reglogin(vport);
5841 else 5920 else {
5921 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5922 "3138 Need register VFI: (x%x/%x)\n",
5923 vport->fc_prevDID, vport->fc_myDID);
5842 lpfc_issue_reg_vfi(vport); 5924 lpfc_issue_reg_vfi(vport);
5925 }
5843 } 5926 }
5844 } 5927 }
5845 return 0; 5928 return 0;
@@ -6596,56 +6679,6 @@ dropit:
6596} 6679}
6597 6680
6598/** 6681/**
6599 * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
6600 * @phba: pointer to lpfc hba data structure.
6601 * @vpi: host virtual N_Port identifier.
6602 *
6603 * This routine finds a vport on a HBA (referred by @phba) through a
6604 * @vpi. The function walks the HBA's vport list and returns the address
6605 * of the vport with the matching @vpi.
6606 *
6607 * Return code
6608 * NULL - No vport with the matching @vpi found
6609 * Otherwise - Address to the vport with the matching @vpi.
6610 **/
6611struct lpfc_vport *
6612lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
6613{
6614 struct lpfc_vport *vport;
6615 unsigned long flags;
6616 int i = 0;
6617
6618 /* The physical ports are always vpi 0 - translate is unnecessary. */
6619 if (vpi > 0) {
6620 /*
6621 * Translate the physical vpi to the logical vpi. The
6622 * vport stores the logical vpi.
6623 */
6624 for (i = 0; i < phba->max_vpi; i++) {
6625 if (vpi == phba->vpi_ids[i])
6626 break;
6627 }
6628
6629 if (i >= phba->max_vpi) {
6630 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
6631 "2936 Could not find Vport mapped "
6632 "to vpi %d\n", vpi);
6633 return NULL;
6634 }
6635 }
6636
6637 spin_lock_irqsave(&phba->hbalock, flags);
6638 list_for_each_entry(vport, &phba->port_list, listentry) {
6639 if (vport->vpi == i) {
6640 spin_unlock_irqrestore(&phba->hbalock, flags);
6641 return vport;
6642 }
6643 }
6644 spin_unlock_irqrestore(&phba->hbalock, flags);
6645 return NULL;
6646}
6647
6648/**
6649 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring 6682 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring
6650 * @phba: pointer to lpfc hba data structure. 6683 * @phba: pointer to lpfc hba data structure.
6651 * @pring: pointer to a SLI ring. 6684 * @pring: pointer to a SLI ring.
@@ -7281,6 +7314,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
7281 /* Setup CSPs accordingly for Fabric */ 7314 /* Setup CSPs accordingly for Fabric */
7282 sp->cmn.e_d_tov = 0; 7315 sp->cmn.e_d_tov = 0;
7283 sp->cmn.w2.r_a_tov = 0; 7316 sp->cmn.w2.r_a_tov = 0;
7317 sp->cmn.virtual_fabric_support = 0;
7284 sp->cls1.classValid = 0; 7318 sp->cls1.classValid = 0;
7285 sp->cls2.seqDelivery = 1; 7319 sp->cls2.seqDelivery = 1;
7286 sp->cls3.seqDelivery = 1; 7320 sp->cls3.seqDelivery = 1;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 091f68e5cb70..678a4b11059c 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1074,6 +1074,12 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1074 1074
1075 mempool_free(pmb, phba->mbox_mem_pool); 1075 mempool_free(pmb, phba->mbox_mem_pool);
1076 1076
1077 /* don't perform discovery for SLI4 loopback diagnostic test */
1078 if ((phba->sli_rev == LPFC_SLI_REV4) &&
1079 !(phba->hba_flag & HBA_FCOE_MODE) &&
1080 (phba->link_flag & LS_LOOPBACK_MODE))
1081 return;
1082
1077 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP && 1083 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
1078 vport->fc_flag & FC_PUBLIC_LOOP && 1084 vport->fc_flag & FC_PUBLIC_LOOP &&
1079 !(vport->fc_flag & FC_LBIT)) { 1085 !(vport->fc_flag & FC_LBIT)) {
@@ -2646,9 +2652,14 @@ lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2646{ 2652{
2647 struct lpfc_vport *vport = mboxq->vport; 2653 struct lpfc_vport *vport = mboxq->vport;
2648 2654
2649 /* VFI not supported on interface type 0, just do the flogi */ 2655 /*
2650 if (mboxq->u.mb.mbxStatus && (bf_get(lpfc_sli_intf_if_type, 2656 * VFI not supported on interface type 0, just do the flogi
2651 &phba->sli4_hba.sli_intf) != LPFC_SLI_INTF_IF_TYPE_0)) { 2657 * Also continue if the VFI is in use - just use the same one.
2658 */
2659 if (mboxq->u.mb.mbxStatus &&
2660 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
2661 LPFC_SLI_INTF_IF_TYPE_0) &&
2662 mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
2652 lpfc_printf_vlog(vport, KERN_ERR, 2663 lpfc_printf_vlog(vport, KERN_ERR,
2653 LOG_MBOX, 2664 LOG_MBOX,
2654 "2891 Init VFI mailbox failed 0x%x\n", 2665 "2891 Init VFI mailbox failed 0x%x\n",
@@ -2842,10 +2853,10 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2842 lpfc_disc_list_loopmap(vport); 2853 lpfc_disc_list_loopmap(vport);
2843 /* Start discovery */ 2854 /* Start discovery */
2844 lpfc_disc_start(vport); 2855 lpfc_disc_start(vport);
2845 goto fail_free_mem; 2856 goto out_free_mem;
2846 } 2857 }
2847 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2858 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2848 goto fail_free_mem; 2859 goto out_free_mem;
2849 } 2860 }
2850 /* The VPI is implicitly registered when the VFI is registered */ 2861 /* The VPI is implicitly registered when the VFI is registered */
2851 spin_lock_irq(shost->host_lock); 2862 spin_lock_irq(shost->host_lock);
@@ -2855,10 +2866,16 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2855 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI; 2866 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
2856 spin_unlock_irq(shost->host_lock); 2867 spin_unlock_irq(shost->host_lock);
2857 2868
2869 /* In case SLI4 FC loopback test, we are ready */
2870 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2871 (phba->link_flag & LS_LOOPBACK_MODE)) {
2872 phba->link_state = LPFC_HBA_READY;
2873 goto out_free_mem;
2874 }
2875
2858 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 2876 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
2859 /* For private loop just start discovery and we are done. */ 2877 /* For private loop just start discovery and we are done. */
2860 if ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) && 2878 if ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
2861 (phba->alpa_map[0] == 0) &&
2862 !(vport->fc_flag & FC_PUBLIC_LOOP)) { 2879 !(vport->fc_flag & FC_PUBLIC_LOOP)) {
2863 /* Use loop map to make discovery list */ 2880 /* Use loop map to make discovery list */
2864 lpfc_disc_list_loopmap(vport); 2881 lpfc_disc_list_loopmap(vport);
@@ -2870,7 +2887,7 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2870 } 2887 }
2871 } 2888 }
2872 2889
2873fail_free_mem: 2890out_free_mem:
2874 mempool_free(mboxq, phba->mbox_mem_pool); 2891 mempool_free(mboxq, phba->mbox_mem_pool);
2875 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); 2892 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
2876 kfree(dmabuf); 2893 kfree(dmabuf);
@@ -2923,6 +2940,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
2923{ 2940{
2924 struct lpfc_vport *vport = phba->pport; 2941 struct lpfc_vport *vport = phba->pport;
2925 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL; 2942 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
2943 struct Scsi_Host *shost;
2926 int i; 2944 int i;
2927 struct lpfc_dmabuf *mp; 2945 struct lpfc_dmabuf *mp;
2928 int rc; 2946 int rc;
@@ -2946,6 +2964,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
2946 phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la); 2964 phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
2947 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 2965 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
2948 2966
2967 shost = lpfc_shost_from_vport(vport);
2949 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 2968 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
2950 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 2969 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
2951 2970
@@ -2957,8 +2976,11 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
2957 "1309 Link Up Event npiv not supported in loop " 2976 "1309 Link Up Event npiv not supported in loop "
2958 "topology\n"); 2977 "topology\n");
2959 /* Get Loop Map information */ 2978 /* Get Loop Map information */
2960 if (bf_get(lpfc_mbx_read_top_il, la)) 2979 if (bf_get(lpfc_mbx_read_top_il, la)) {
2980 spin_lock_irq(shost->host_lock);
2961 vport->fc_flag |= FC_LBIT; 2981 vport->fc_flag |= FC_LBIT;
2982 spin_unlock_irq(shost->host_lock);
2983 }
2962 2984
2963 vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la); 2985 vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
2964 i = la->lilpBde64.tus.f.bdeSize; 2986 i = la->lilpBde64.tus.f.bdeSize;
@@ -3003,11 +3025,13 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
3003 } else { 3025 } else {
3004 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) { 3026 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
3005 if (phba->max_vpi && phba->cfg_enable_npiv && 3027 if (phba->max_vpi && phba->cfg_enable_npiv &&
3006 (phba->sli_rev == 3)) 3028 (phba->sli_rev >= LPFC_SLI_REV3))
3007 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 3029 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
3008 } 3030 }
3009 vport->fc_myDID = phba->fc_pref_DID; 3031 vport->fc_myDID = phba->fc_pref_DID;
3032 spin_lock_irq(shost->host_lock);
3010 vport->fc_flag |= FC_LBIT; 3033 vport->fc_flag |= FC_LBIT;
3034 spin_unlock_irq(shost->host_lock);
3011 } 3035 }
3012 spin_unlock_irq(&phba->hbalock); 3036 spin_unlock_irq(&phba->hbalock);
3013 3037
@@ -3224,15 +3248,14 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3224 } else if (bf_get(lpfc_mbx_read_top_att_type, la) == 3248 } else if (bf_get(lpfc_mbx_read_top_att_type, la) ==
3225 LPFC_ATT_LINK_DOWN) { 3249 LPFC_ATT_LINK_DOWN) {
3226 phba->fc_stat.LinkDown++; 3250 phba->fc_stat.LinkDown++;
3227 if (phba->link_flag & LS_LOOPBACK_MODE) { 3251 if (phba->link_flag & LS_LOOPBACK_MODE)
3228 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3252 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3229 "1308 Link Down Event in loop back mode " 3253 "1308 Link Down Event in loop back mode "
3230 "x%x received " 3254 "x%x received "
3231 "Data: x%x x%x x%x\n", 3255 "Data: x%x x%x x%x\n",
3232 la->eventTag, phba->fc_eventTag, 3256 la->eventTag, phba->fc_eventTag,
3233 phba->pport->port_state, vport->fc_flag); 3257 phba->pport->port_state, vport->fc_flag);
3234 } 3258 else
3235 else {
3236 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3259 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3237 "1305 Link Down Event x%x received " 3260 "1305 Link Down Event x%x received "
3238 "Data: x%x x%x x%x x%x x%x\n", 3261 "Data: x%x x%x x%x x%x x%x\n",
@@ -3240,7 +3263,6 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3240 phba->pport->port_state, vport->fc_flag, 3263 phba->pport->port_state, vport->fc_flag,
3241 bf_get(lpfc_mbx_read_top_mm, la), 3264 bf_get(lpfc_mbx_read_top_mm, la),
3242 bf_get(lpfc_mbx_read_top_fa, la)); 3265 bf_get(lpfc_mbx_read_top_fa, la));
3243 }
3244 lpfc_mbx_issue_link_down(phba); 3266 lpfc_mbx_issue_link_down(phba);
3245 } 3267 }
3246 if ((bf_get(lpfc_mbx_read_top_mm, la)) && 3268 if ((bf_get(lpfc_mbx_read_top_mm, la)) &&
@@ -3594,6 +3616,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3594 MAILBOX_t *mb = &pmb->u.mb; 3616 MAILBOX_t *mb = &pmb->u.mb;
3595 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3617 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3596 struct lpfc_nodelist *ndlp; 3618 struct lpfc_nodelist *ndlp;
3619 struct Scsi_Host *shost;
3597 3620
3598 ndlp = (struct lpfc_nodelist *) pmb->context2; 3621 ndlp = (struct lpfc_nodelist *) pmb->context2;
3599 pmb->context1 = NULL; 3622 pmb->context1 = NULL;
@@ -3639,8 +3662,12 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3639 * vport discovery */ 3662 * vport discovery */
3640 if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) 3663 if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
3641 lpfc_start_fdiscs(phba); 3664 lpfc_start_fdiscs(phba);
3642 else 3665 else {
3666 shost = lpfc_shost_from_vport(vport);
3667 spin_lock_irq(shost->host_lock);
3643 vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ; 3668 vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ;
3669 spin_unlock_irq(shost->host_lock);
3670 }
3644 lpfc_do_scr_ns_plogi(phba, vport); 3671 lpfc_do_scr_ns_plogi(phba, vport);
3645 } 3672 }
3646 3673
@@ -5353,6 +5380,73 @@ lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
5353 return ndlp; 5380 return ndlp;
5354} 5381}
5355 5382
5383/*
5384 * This routine looks up the ndlp lists for the given RPI. If the rpi
5385 * is found, the routine returns the node element list pointer else
5386 * return NULL.
5387 */
5388struct lpfc_nodelist *
5389lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
5390{
5391 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5392 struct lpfc_nodelist *ndlp;
5393
5394 spin_lock_irq(shost->host_lock);
5395 ndlp = __lpfc_findnode_rpi(vport, rpi);
5396 spin_unlock_irq(shost->host_lock);
5397 return ndlp;
5398}
5399
5400/**
5401 * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
5402 * @phba: pointer to lpfc hba data structure.
5403 * @vpi: the physical host virtual N_Port identifier.
5404 *
5405 * This routine finds a vport on a HBA (referred by @phba) through a
5406 * @vpi. The function walks the HBA's vport list and returns the address
5407 * of the vport with the matching @vpi.
5408 *
5409 * Return code
5410 * NULL - No vport with the matching @vpi found
5411 * Otherwise - Address to the vport with the matching @vpi.
5412 **/
5413struct lpfc_vport *
5414lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
5415{
5416 struct lpfc_vport *vport;
5417 unsigned long flags;
5418 int i = 0;
5419
5420 /* The physical ports are always vpi 0 - translate is unnecessary. */
5421 if (vpi > 0) {
5422 /*
5423 * Translate the physical vpi to the logical vpi. The
5424 * vport stores the logical vpi.
5425 */
5426 for (i = 0; i < phba->max_vpi; i++) {
5427 if (vpi == phba->vpi_ids[i])
5428 break;
5429 }
5430
5431 if (i >= phba->max_vpi) {
5432 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
5433 "2936 Could not find Vport mapped "
5434 "to vpi %d\n", vpi);
5435 return NULL;
5436 }
5437 }
5438
5439 spin_lock_irqsave(&phba->hbalock, flags);
5440 list_for_each_entry(vport, &phba->port_list, listentry) {
5441 if (vport->vpi == i) {
5442 spin_unlock_irqrestore(&phba->hbalock, flags);
5443 return vport;
5444 }
5445 }
5446 spin_unlock_irqrestore(&phba->hbalock, flags);
5447 return NULL;
5448}
5449
5356void 5450void
5357lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 5451lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
5358 uint32_t did) 5452 uint32_t did)
@@ -5599,7 +5693,7 @@ out:
5599 * 5693 *
5600 * This function frees memory associated with the mailbox command. 5694 * This function frees memory associated with the mailbox command.
5601 */ 5695 */
5602static void 5696void
5603lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 5697lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
5604{ 5698{
5605 struct lpfc_vport *vport = mboxq->vport; 5699 struct lpfc_vport *vport = mboxq->vport;
@@ -5651,7 +5745,6 @@ lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
5651int 5745int
5652lpfc_unregister_fcf_prep(struct lpfc_hba *phba) 5746lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
5653{ 5747{
5654 LPFC_MBOXQ_t *mbox;
5655 struct lpfc_vport **vports; 5748 struct lpfc_vport **vports;
5656 struct lpfc_nodelist *ndlp; 5749 struct lpfc_nodelist *ndlp;
5657 struct Scsi_Host *shost; 5750 struct Scsi_Host *shost;
@@ -5687,35 +5780,9 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
5687 /* Cleanup any outstanding ELS commands */ 5780 /* Cleanup any outstanding ELS commands */
5688 lpfc_els_flush_all_cmd(phba); 5781 lpfc_els_flush_all_cmd(phba);
5689 5782
5690 /* Unregister VFI */ 5783 /* Unregister the physical port VFI */
5691 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5784 rc = lpfc_issue_unreg_vfi(phba->pport);
5692 if (!mbox) { 5785 return rc;
5693 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
5694 "2556 UNREG_VFI mbox allocation failed"
5695 "HBA state x%x\n", phba->pport->port_state);
5696 return -ENOMEM;
5697 }
5698
5699 lpfc_unreg_vfi(mbox, phba->pport);
5700 mbox->vport = phba->pport;
5701 mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl;
5702
5703 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5704 if (rc == MBX_NOT_FINISHED) {
5705 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
5706 "2557 UNREG_VFI issue mbox failed rc x%x "
5707 "HBA state x%x\n",
5708 rc, phba->pport->port_state);
5709 mempool_free(mbox, phba->mbox_mem_pool);
5710 return -EIO;
5711 }
5712
5713 shost = lpfc_shost_from_vport(phba->pport);
5714 spin_lock_irq(shost->host_lock);
5715 phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
5716 spin_unlock_irq(shost->host_lock);
5717
5718 return 0;
5719} 5786}
5720 5787
5721/** 5788/**
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 046edc4ab35f..7245bead3755 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -349,6 +349,12 @@ struct csp {
349 * Word 1 Bit 31 in FLOGI response is clean address bit 349 * Word 1 Bit 31 in FLOGI response is clean address bit
350 */ 350 */
351#define clean_address_bit request_multiple_Nport /* Word 1, bit 31 */ 351#define clean_address_bit request_multiple_Nport /* Word 1, bit 31 */
352/*
353 * Word 1 Bit 30 in common service parameter is overloaded.
354 * Word 1 Bit 30 in FLOGI request is Virtual Fabrics
355 * Word 1 Bit 30 in PLOGI request is random offset
356 */
357#define virtual_fabric_support randomOffset /* Word 1, bit 30 */
352#ifdef __BIG_ENDIAN_BITFIELD 358#ifdef __BIG_ENDIAN_BITFIELD
353 uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */ 359 uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */
354 uint16_t randomOffset:1; /* FC Word 1, bit 30 */ 360 uint16_t randomOffset:1; /* FC Word 1, bit 30 */
@@ -1852,8 +1858,8 @@ typedef struct {
1852 uint8_t fabric_AL_PA; /* If using a Fabric Assigned AL_PA */ 1858 uint8_t fabric_AL_PA; /* If using a Fabric Assigned AL_PA */
1853#endif 1859#endif
1854 1860
1855#define FLAGS_LOCAL_LB 0x01 /* link_flags (=1) ENDEC loopback */
1856#define FLAGS_TOPOLOGY_MODE_LOOP_PT 0x00 /* Attempt loop then pt-pt */ 1861#define FLAGS_TOPOLOGY_MODE_LOOP_PT 0x00 /* Attempt loop then pt-pt */
1862#define FLAGS_LOCAL_LB 0x01 /* link_flags (=1) ENDEC loopback */
1857#define FLAGS_TOPOLOGY_MODE_PT_PT 0x02 /* Attempt pt-pt only */ 1863#define FLAGS_TOPOLOGY_MODE_PT_PT 0x02 /* Attempt pt-pt only */
1858#define FLAGS_TOPOLOGY_MODE_LOOP 0x04 /* Attempt loop only */ 1864#define FLAGS_TOPOLOGY_MODE_LOOP 0x04 /* Attempt loop only */
1859#define FLAGS_TOPOLOGY_MODE_PT_LOOP 0x06 /* Attempt pt-pt then loop */ 1865#define FLAGS_TOPOLOGY_MODE_PT_LOOP 0x06 /* Attempt pt-pt then loop */
@@ -2819,7 +2825,8 @@ typedef struct {
2819#ifdef __BIG_ENDIAN_BITFIELD 2825#ifdef __BIG_ENDIAN_BITFIELD
2820 uint32_t rsvd1 : 19; /* Reserved */ 2826 uint32_t rsvd1 : 19; /* Reserved */
2821 uint32_t cdss : 1; /* Configure Data Security SLI */ 2827 uint32_t cdss : 1; /* Configure Data Security SLI */
2822 uint32_t rsvd2 : 3; /* Reserved */ 2828 uint32_t casabt : 1; /* Configure async abts status notice */
2829 uint32_t rsvd2 : 2; /* Reserved */
2823 uint32_t cbg : 1; /* Configure BlockGuard */ 2830 uint32_t cbg : 1; /* Configure BlockGuard */
2824 uint32_t cmv : 1; /* Configure Max VPIs */ 2831 uint32_t cmv : 1; /* Configure Max VPIs */
2825 uint32_t ccrp : 1; /* Config Command Ring Polling */ 2832 uint32_t ccrp : 1; /* Config Command Ring Polling */
@@ -2839,14 +2846,16 @@ typedef struct {
2839 uint32_t ccrp : 1; /* Config Command Ring Polling */ 2846 uint32_t ccrp : 1; /* Config Command Ring Polling */
2840 uint32_t cmv : 1; /* Configure Max VPIs */ 2847 uint32_t cmv : 1; /* Configure Max VPIs */
2841 uint32_t cbg : 1; /* Configure BlockGuard */ 2848 uint32_t cbg : 1; /* Configure BlockGuard */
2842 uint32_t rsvd2 : 3; /* Reserved */ 2849 uint32_t rsvd2 : 2; /* Reserved */
2850 uint32_t casabt : 1; /* Configure async abts status notice */
2843 uint32_t cdss : 1; /* Configure Data Security SLI */ 2851 uint32_t cdss : 1; /* Configure Data Security SLI */
2844 uint32_t rsvd1 : 19; /* Reserved */ 2852 uint32_t rsvd1 : 19; /* Reserved */
2845#endif 2853#endif
2846#ifdef __BIG_ENDIAN_BITFIELD 2854#ifdef __BIG_ENDIAN_BITFIELD
2847 uint32_t rsvd3 : 19; /* Reserved */ 2855 uint32_t rsvd3 : 19; /* Reserved */
2848 uint32_t gdss : 1; /* Configure Data Security SLI */ 2856 uint32_t gdss : 1; /* Configure Data Security SLI */
2849 uint32_t rsvd4 : 3; /* Reserved */ 2857 uint32_t gasabt : 1; /* Grant async abts status notice */
2858 uint32_t rsvd4 : 2; /* Reserved */
2850 uint32_t gbg : 1; /* Grant BlockGuard */ 2859 uint32_t gbg : 1; /* Grant BlockGuard */
2851 uint32_t gmv : 1; /* Grant Max VPIs */ 2860 uint32_t gmv : 1; /* Grant Max VPIs */
2852 uint32_t gcrp : 1; /* Grant Command Ring Polling */ 2861 uint32_t gcrp : 1; /* Grant Command Ring Polling */
@@ -2866,7 +2875,8 @@ typedef struct {
2866 uint32_t gcrp : 1; /* Grant Command Ring Polling */ 2875 uint32_t gcrp : 1; /* Grant Command Ring Polling */
2867 uint32_t gmv : 1; /* Grant Max VPIs */ 2876 uint32_t gmv : 1; /* Grant Max VPIs */
2868 uint32_t gbg : 1; /* Grant BlockGuard */ 2877 uint32_t gbg : 1; /* Grant BlockGuard */
2869 uint32_t rsvd4 : 3; /* Reserved */ 2878 uint32_t rsvd4 : 2; /* Reserved */
2879 uint32_t gasabt : 1; /* Grant async abts status notice */
2870 uint32_t gdss : 1; /* Configure Data Security SLI */ 2880 uint32_t gdss : 1; /* Configure Data Security SLI */
2871 uint32_t rsvd3 : 19; /* Reserved */ 2881 uint32_t rsvd3 : 19; /* Reserved */
2872#endif 2882#endif
@@ -3465,6 +3475,7 @@ typedef struct {
3465} ASYNCSTAT_FIELDS; 3475} ASYNCSTAT_FIELDS;
3466#define ASYNC_TEMP_WARN 0x100 3476#define ASYNC_TEMP_WARN 0x100
3467#define ASYNC_TEMP_SAFE 0x101 3477#define ASYNC_TEMP_SAFE 0x101
3478#define ASYNC_STATUS_CN 0x102
3468 3479
3469/* IOCB Command template for CMD_IOCB_RCV_ELS64_CX (0xB7) 3480/* IOCB Command template for CMD_IOCB_RCV_ELS64_CX (0xB7)
3470 or CMD_IOCB_RCV_SEQ64_CX (0xB5) */ 3481 or CMD_IOCB_RCV_SEQ64_CX (0xB5) */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 98d21521f539..e5bfa7f334e3 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1351,11 +1351,11 @@ struct lpfc_mbx_set_link_diag_loopback {
1351 struct { 1351 struct {
1352 uint32_t word0; 1352 uint32_t word0;
1353#define lpfc_mbx_set_diag_lpbk_type_SHIFT 0 1353#define lpfc_mbx_set_diag_lpbk_type_SHIFT 0
1354#define lpfc_mbx_set_diag_lpbk_type_MASK 0x00000001 1354#define lpfc_mbx_set_diag_lpbk_type_MASK 0x00000003
1355#define lpfc_mbx_set_diag_lpbk_type_WORD word0 1355#define lpfc_mbx_set_diag_lpbk_type_WORD word0
1356#define LPFC_DIAG_LOOPBACK_TYPE_DISABLE 0x0 1356#define LPFC_DIAG_LOOPBACK_TYPE_DISABLE 0x0
1357#define LPFC_DIAG_LOOPBACK_TYPE_INTERNAL 0x1 1357#define LPFC_DIAG_LOOPBACK_TYPE_INTERNAL 0x1
1358#define LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL 0x2 1358#define LPFC_DIAG_LOOPBACK_TYPE_SERDES 0x2
1359#define lpfc_mbx_set_diag_lpbk_link_num_SHIFT 16 1359#define lpfc_mbx_set_diag_lpbk_link_num_SHIFT 16
1360#define lpfc_mbx_set_diag_lpbk_link_num_MASK 0x0000003F 1360#define lpfc_mbx_set_diag_lpbk_link_num_MASK 0x0000003F
1361#define lpfc_mbx_set_diag_lpbk_link_num_WORD word0 1361#define lpfc_mbx_set_diag_lpbk_link_num_WORD word0
@@ -1830,6 +1830,8 @@ struct lpfc_mbx_init_vfi {
1830#define lpfc_init_vfi_hop_count_MASK 0x000000FF 1830#define lpfc_init_vfi_hop_count_MASK 0x000000FF
1831#define lpfc_init_vfi_hop_count_WORD word4 1831#define lpfc_init_vfi_hop_count_WORD word4
1832}; 1832};
1833#define MBX_VFI_IN_USE 0x9F02
1834
1833 1835
1834struct lpfc_mbx_reg_vfi { 1836struct lpfc_mbx_reg_vfi {
1835 uint32_t word1; 1837 uint32_t word1;
@@ -2104,6 +2106,8 @@ struct lpfc_mbx_read_config {
2104#define lpfc_mbx_rd_conf_lnk_type_SHIFT 6 2106#define lpfc_mbx_rd_conf_lnk_type_SHIFT 6
2105#define lpfc_mbx_rd_conf_lnk_type_MASK 0x00000003 2107#define lpfc_mbx_rd_conf_lnk_type_MASK 0x00000003
2106#define lpfc_mbx_rd_conf_lnk_type_WORD word2 2108#define lpfc_mbx_rd_conf_lnk_type_WORD word2
2109#define LPFC_LNK_TYPE_GE 0
2110#define LPFC_LNK_TYPE_FC 1
2107#define lpfc_mbx_rd_conf_lnk_ldv_SHIFT 8 2111#define lpfc_mbx_rd_conf_lnk_ldv_SHIFT 8
2108#define lpfc_mbx_rd_conf_lnk_ldv_MASK 0x00000001 2112#define lpfc_mbx_rd_conf_lnk_ldv_MASK 0x00000001
2109#define lpfc_mbx_rd_conf_lnk_ldv_WORD word2 2113#define lpfc_mbx_rd_conf_lnk_ldv_WORD word2
@@ -3320,6 +3324,9 @@ struct wqe_rctl_dfctl {
3320#define wqe_la_SHIFT 3 3324#define wqe_la_SHIFT 3
3321#define wqe_la_MASK 0x000000001 3325#define wqe_la_MASK 0x000000001
3322#define wqe_la_WORD word5 3326#define wqe_la_WORD word5
3327#define wqe_xo_SHIFT 6
3328#define wqe_xo_MASK 0x000000001
3329#define wqe_xo_WORD word5
3323#define wqe_ls_SHIFT 7 3330#define wqe_ls_SHIFT 7
3324#define wqe_ls_MASK 0x000000001 3331#define wqe_ls_MASK 0x000000001
3325#define wqe_ls_WORD word5 3332#define wqe_ls_WORD word5
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 55bc4fc7376f..dfea2dada02c 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -62,7 +62,6 @@ static int lpfc_post_rcv_buf(struct lpfc_hba *);
62static int lpfc_sli4_queue_verify(struct lpfc_hba *); 62static int lpfc_sli4_queue_verify(struct lpfc_hba *);
63static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 63static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
64static int lpfc_setup_endian_order(struct lpfc_hba *); 64static int lpfc_setup_endian_order(struct lpfc_hba *);
65static int lpfc_sli4_read_config(struct lpfc_hba *);
66static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 65static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
67static void lpfc_free_sgl_list(struct lpfc_hba *); 66static void lpfc_free_sgl_list(struct lpfc_hba *);
68static int lpfc_init_sgl_list(struct lpfc_hba *); 67static int lpfc_init_sgl_list(struct lpfc_hba *);
@@ -475,27 +474,6 @@ lpfc_config_port_post(struct lpfc_hba *phba)
475 /* Get the default values for Model Name and Description */ 474 /* Get the default values for Model Name and Description */
476 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 475 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
477 476
478 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_16G)
479 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G)
480 && !(phba->lmt & LMT_1Gb))
481 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G)
482 && !(phba->lmt & LMT_2Gb))
483 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G)
484 && !(phba->lmt & LMT_4Gb))
485 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G)
486 && !(phba->lmt & LMT_8Gb))
487 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G)
488 && !(phba->lmt & LMT_10Gb))
489 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G)
490 && !(phba->lmt & LMT_16Gb))) {
491 /* Reset link speed to auto */
492 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
493 "1302 Invalid speed for this board: "
494 "Reset link speed to auto: x%x\n",
495 phba->cfg_link_speed);
496 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
497 }
498
499 phba->link_state = LPFC_LINK_DOWN; 477 phba->link_state = LPFC_LINK_DOWN;
500 478
501 /* Only process IOCBs on ELS ring till hba_state is READY */ 479 /* Only process IOCBs on ELS ring till hba_state is READY */
@@ -585,28 +563,10 @@ lpfc_config_port_post(struct lpfc_hba *phba)
585 return -EIO; 563 return -EIO;
586 } 564 }
587 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 565 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
588 lpfc_init_link(phba, pmb, phba->cfg_topology, 566 mempool_free(pmb, phba->mbox_mem_pool);
589 phba->cfg_link_speed); 567 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
590 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 568 if (rc)
591 lpfc_set_loopback_flag(phba); 569 return rc;
592 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
593 if (rc != MBX_SUCCESS) {
594 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
595 "0454 Adapter failed to init, mbxCmd x%x "
596 "INIT_LINK, mbxStatus x%x\n",
597 mb->mbxCommand, mb->mbxStatus);
598
599 /* Clear all interrupt enable conditions */
600 writel(0, phba->HCregaddr);
601 readl(phba->HCregaddr); /* flush */
602 /* Clear all pending interrupts */
603 writel(0xffffffff, phba->HAregaddr);
604 readl(phba->HAregaddr); /* flush */
605 phba->link_state = LPFC_HBA_ERROR;
606 if (rc != MBX_BUSY)
607 mempool_free(pmb, phba->mbox_mem_pool);
608 return -EIO;
609 }
610 } 570 }
611 /* MBOX buffer will be freed in mbox compl */ 571 /* MBOX buffer will be freed in mbox compl */
612 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 572 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -668,6 +628,28 @@ lpfc_config_port_post(struct lpfc_hba *phba)
668int 628int
669lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 629lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
670{ 630{
631 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
632}
633
634/**
635 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
636 * @phba: pointer to lpfc hba data structure.
637 * @fc_topology: desired fc topology.
638 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
639 *
640 * This routine will issue the INIT_LINK mailbox command call.
641 * It is available to other drivers through the lpfc_hba data
642 * structure for use as a delayed link up mechanism with the
643 * module parameter lpfc_suppress_link_up.
644 *
645 * Return code
646 * 0 - success
647 * Any other value - error
648 **/
649int
650lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
651 uint32_t flag)
652{
671 struct lpfc_vport *vport = phba->pport; 653 struct lpfc_vport *vport = phba->pport;
672 LPFC_MBOXQ_t *pmb; 654 LPFC_MBOXQ_t *pmb;
673 MAILBOX_t *mb; 655 MAILBOX_t *mb;
@@ -681,9 +663,30 @@ lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
681 mb = &pmb->u.mb; 663 mb = &pmb->u.mb;
682 pmb->vport = vport; 664 pmb->vport = vport;
683 665
684 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); 666 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
667 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
668 !(phba->lmt & LMT_1Gb)) ||
669 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
670 !(phba->lmt & LMT_2Gb)) ||
671 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
672 !(phba->lmt & LMT_4Gb)) ||
673 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
674 !(phba->lmt & LMT_8Gb)) ||
675 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
676 !(phba->lmt & LMT_10Gb)) ||
677 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
678 !(phba->lmt & LMT_16Gb))) {
679 /* Reset link speed to auto */
680 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
681 "1302 Invalid speed for this board:%d "
682 "Reset link speed to auto.\n",
683 phba->cfg_link_speed);
684 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
685 }
686 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
685 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 687 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
686 lpfc_set_loopback_flag(phba); 688 if (phba->sli_rev < LPFC_SLI_REV4)
689 lpfc_set_loopback_flag(phba);
687 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 690 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
688 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 691 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
689 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 692 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -1437,7 +1440,10 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1437 uint32_t event_data; 1440 uint32_t event_data;
1438 struct Scsi_Host *shost; 1441 struct Scsi_Host *shost;
1439 uint32_t if_type; 1442 uint32_t if_type;
1440 struct lpfc_register portstat_reg; 1443 struct lpfc_register portstat_reg = {0};
1444 uint32_t reg_err1, reg_err2;
1445 uint32_t uerrlo_reg, uemasklo_reg;
1446 uint32_t pci_rd_rc1, pci_rd_rc2;
1441 int rc; 1447 int rc;
1442 1448
1443 /* If the pci channel is offline, ignore possible errors, since 1449 /* If the pci channel is offline, ignore possible errors, since
@@ -1449,38 +1455,52 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1449 if (!phba->cfg_enable_hba_reset) 1455 if (!phba->cfg_enable_hba_reset)
1450 return; 1456 return;
1451 1457
1452 /* Send an internal error event to mgmt application */
1453 lpfc_board_errevt_to_mgmt(phba);
1454
1455 /* For now, the actual action for SLI4 device handling is not
1456 * specified yet, just treated it as adaptor hardware failure
1457 */
1458 event_data = FC_REG_DUMP_EVENT;
1459 shost = lpfc_shost_from_vport(vport);
1460 fc_host_post_vendor_event(shost, fc_get_event_number(),
1461 sizeof(event_data), (char *) &event_data,
1462 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1463
1464 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 1458 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1465 switch (if_type) { 1459 switch (if_type) {
1466 case LPFC_SLI_INTF_IF_TYPE_0: 1460 case LPFC_SLI_INTF_IF_TYPE_0:
1461 pci_rd_rc1 = lpfc_readl(
1462 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1463 &uerrlo_reg);
1464 pci_rd_rc2 = lpfc_readl(
1465 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1466 &uemasklo_reg);
1467 /* consider PCI bus read error as pci_channel_offline */
1468 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1469 return;
1467 lpfc_sli4_offline_eratt(phba); 1470 lpfc_sli4_offline_eratt(phba);
1468 break; 1471 break;
1469 case LPFC_SLI_INTF_IF_TYPE_2: 1472 case LPFC_SLI_INTF_IF_TYPE_2:
1470 portstat_reg.word0 = 1473 pci_rd_rc1 = lpfc_readl(
1471 readl(phba->sli4_hba.u.if_type2.STATUSregaddr); 1474 phba->sli4_hba.u.if_type2.STATUSregaddr,
1472 1475 &portstat_reg.word0);
1476 /* consider PCI bus read error as pci_channel_offline */
1477 if (pci_rd_rc1 == -EIO)
1478 return;
1479 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
1480 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
1473 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 1481 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
1474 /* TODO: Register for Overtemp async events. */ 1482 /* TODO: Register for Overtemp async events. */
1475 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1483 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1476 "2889 Port Overtemperature event, " 1484 "2889 Port Overtemperature event, "
1477 "taking port\n"); 1485 "taking port offline\n");
1478 spin_lock_irq(&phba->hbalock); 1486 spin_lock_irq(&phba->hbalock);
1479 phba->over_temp_state = HBA_OVER_TEMP; 1487 phba->over_temp_state = HBA_OVER_TEMP;
1480 spin_unlock_irq(&phba->hbalock); 1488 spin_unlock_irq(&phba->hbalock);
1481 lpfc_sli4_offline_eratt(phba); 1489 lpfc_sli4_offline_eratt(phba);
1482 return; 1490 break;
1483 } 1491 }
1492 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1493 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART)
1494 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1495 "3143 Port Down: Firmware Restarted\n");
1496 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1497 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1498 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1499 "3144 Port Down: Debug Dump\n");
1500 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1501 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
1502 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1503 "3145 Port Down: Provisioning\n");
1484 /* 1504 /*
1485 * On error status condition, driver need to wait for port 1505 * On error status condition, driver need to wait for port
1486 * ready before performing reset. 1506 * ready before performing reset.
@@ -1489,14 +1509,19 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1489 if (!rc) { 1509 if (!rc) {
1490 /* need reset: attempt for port recovery */ 1510 /* need reset: attempt for port recovery */
1491 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1511 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1492 "2887 Port Error: Attempting " 1512 "2887 Reset Needed: Attempting Port "
1493 "Port Recovery\n"); 1513 "Recovery...\n");
1494 lpfc_offline_prep(phba); 1514 lpfc_offline_prep(phba);
1495 lpfc_offline(phba); 1515 lpfc_offline(phba);
1496 lpfc_sli_brdrestart(phba); 1516 lpfc_sli_brdrestart(phba);
1497 if (lpfc_online(phba) == 0) { 1517 if (lpfc_online(phba) == 0) {
1498 lpfc_unblock_mgmt_io(phba); 1518 lpfc_unblock_mgmt_io(phba);
1499 return; 1519 /* don't report event on forced debug dump */
1520 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1521 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1522 return;
1523 else
1524 break;
1500 } 1525 }
1501 /* fall through for not able to recover */ 1526 /* fall through for not able to recover */
1502 } 1527 }
@@ -1506,6 +1531,16 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1506 default: 1531 default:
1507 break; 1532 break;
1508 } 1533 }
1534 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1535 "3123 Report dump event to upper layer\n");
1536 /* Send an internal error event to mgmt application */
1537 lpfc_board_errevt_to_mgmt(phba);
1538
1539 event_data = FC_REG_DUMP_EVENT;
1540 shost = lpfc_shost_from_vport(vport);
1541 fc_host_post_vendor_event(shost, fc_get_event_number(),
1542 sizeof(event_data), (char *) &event_data,
1543 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1509} 1544}
1510 1545
1511/** 1546/**
@@ -2674,6 +2709,32 @@ lpfc_offline(struct lpfc_hba *phba)
2674} 2709}
2675 2710
2676/** 2711/**
2712 * lpfc_scsi_buf_update - Update the scsi_buffers that are already allocated.
2713 * @phba: pointer to lpfc hba data structure.
2714 *
2715 * This routine goes through all the scsi buffers in the system and updates the
2716 * Physical XRIs assigned to the SCSI buffer because these may change after any
2717 * firmware reset
2718 *
2719 * Return codes
2720 * 0 - successful (for now, it always returns 0)
2721 **/
2722int
2723lpfc_scsi_buf_update(struct lpfc_hba *phba)
2724{
2725 struct lpfc_scsi_buf *sb, *sb_next;
2726
2727 spin_lock_irq(&phba->hbalock);
2728 spin_lock(&phba->scsi_buf_list_lock);
2729 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list)
2730 sb->cur_iocbq.sli4_xritag =
2731 phba->sli4_hba.xri_ids[sb->cur_iocbq.sli4_lxritag];
2732 spin_unlock(&phba->scsi_buf_list_lock);
2733 spin_unlock_irq(&phba->hbalock);
2734 return 0;
2735}
2736
2737/**
2677 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 2738 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
2678 * @phba: pointer to lpfc hba data structure. 2739 * @phba: pointer to lpfc hba data structure.
2679 * 2740 *
@@ -5040,15 +5101,8 @@ lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
5040 struct lpfc_rpi_hdr *rpi_hdr; 5101 struct lpfc_rpi_hdr *rpi_hdr;
5041 5102
5042 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 5103 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
5043 /* 5104 if (!phba->sli4_hba.rpi_hdrs_in_use)
5044 * If the SLI4 port supports extents, posting the rpi header isn't
5045 * required. Set the expected maximum count and let the actual value
5046 * get set when extents are fully allocated.
5047 */
5048 if (!phba->sli4_hba.rpi_hdrs_in_use) {
5049 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
5050 return rc; 5105 return rc;
5051 }
5052 if (phba->sli4_hba.extents_in_use) 5106 if (phba->sli4_hba.extents_in_use)
5053 return -EIO; 5107 return -EIO;
5054 5108
@@ -5942,7 +5996,7 @@ lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
5942 * -ENOMEM - No available memory 5996 * -ENOMEM - No available memory
5943 * -EIO - The mailbox failed to complete successfully. 5997 * -EIO - The mailbox failed to complete successfully.
5944 **/ 5998 **/
5945static int 5999int
5946lpfc_sli4_read_config(struct lpfc_hba *phba) 6000lpfc_sli4_read_config(struct lpfc_hba *phba)
5947{ 6001{
5948 LPFC_MBOXQ_t *pmb; 6002 LPFC_MBOXQ_t *pmb;
@@ -5974,6 +6028,20 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5974 rc = -EIO; 6028 rc = -EIO;
5975 } else { 6029 } else {
5976 rd_config = &pmb->u.mqe.un.rd_config; 6030 rd_config = &pmb->u.mqe.un.rd_config;
6031 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
6032 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
6033 phba->sli4_hba.lnk_info.lnk_tp =
6034 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
6035 phba->sli4_hba.lnk_info.lnk_no =
6036 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
6037 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6038 "3081 lnk_type:%d, lnk_numb:%d\n",
6039 phba->sli4_hba.lnk_info.lnk_tp,
6040 phba->sli4_hba.lnk_info.lnk_no);
6041 } else
6042 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6043 "3082 Mailbox (x%x) returned ldv:x0\n",
6044 bf_get(lpfc_mqe_command, &pmb->u.mqe));
5977 phba->sli4_hba.extents_in_use = 6045 phba->sli4_hba.extents_in_use =
5978 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); 6046 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
5979 phba->sli4_hba.max_cfg_param.max_xri = 6047 phba->sli4_hba.max_cfg_param.max_xri =
@@ -6462,6 +6530,7 @@ out_free_fcp_wq:
6462 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; 6530 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
6463 } 6531 }
6464 kfree(phba->sli4_hba.fcp_wq); 6532 kfree(phba->sli4_hba.fcp_wq);
6533 phba->sli4_hba.fcp_wq = NULL;
6465out_free_els_wq: 6534out_free_els_wq:
6466 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 6535 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
6467 phba->sli4_hba.els_wq = NULL; 6536 phba->sli4_hba.els_wq = NULL;
@@ -6474,6 +6543,7 @@ out_free_fcp_cq:
6474 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; 6543 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
6475 } 6544 }
6476 kfree(phba->sli4_hba.fcp_cq); 6545 kfree(phba->sli4_hba.fcp_cq);
6546 phba->sli4_hba.fcp_cq = NULL;
6477out_free_els_cq: 6547out_free_els_cq:
6478 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 6548 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
6479 phba->sli4_hba.els_cq = NULL; 6549 phba->sli4_hba.els_cq = NULL;
@@ -6486,6 +6556,7 @@ out_free_fp_eq:
6486 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; 6556 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
6487 } 6557 }
6488 kfree(phba->sli4_hba.fp_eq); 6558 kfree(phba->sli4_hba.fp_eq);
6559 phba->sli4_hba.fp_eq = NULL;
6489out_free_sp_eq: 6560out_free_sp_eq:
6490 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 6561 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
6491 phba->sli4_hba.sp_eq = NULL; 6562 phba->sli4_hba.sp_eq = NULL;
@@ -6519,8 +6590,10 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6519 phba->sli4_hba.els_wq = NULL; 6590 phba->sli4_hba.els_wq = NULL;
6520 6591
6521 /* Release FCP work queue */ 6592 /* Release FCP work queue */
6522 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 6593 if (phba->sli4_hba.fcp_wq != NULL)
6523 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); 6594 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
6595 fcp_qidx++)
6596 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
6524 kfree(phba->sli4_hba.fcp_wq); 6597 kfree(phba->sli4_hba.fcp_wq);
6525 phba->sli4_hba.fcp_wq = NULL; 6598 phba->sli4_hba.fcp_wq = NULL;
6526 6599
@@ -6540,15 +6613,18 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6540 6613
6541 /* Release FCP response complete queue */ 6614 /* Release FCP response complete queue */
6542 fcp_qidx = 0; 6615 fcp_qidx = 0;
6543 do 6616 if (phba->sli4_hba.fcp_cq != NULL)
6544 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); 6617 do
6545 while (++fcp_qidx < phba->cfg_fcp_eq_count); 6618 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
6619 while (++fcp_qidx < phba->cfg_fcp_eq_count);
6546 kfree(phba->sli4_hba.fcp_cq); 6620 kfree(phba->sli4_hba.fcp_cq);
6547 phba->sli4_hba.fcp_cq = NULL; 6621 phba->sli4_hba.fcp_cq = NULL;
6548 6622
6549 /* Release fast-path event queue */ 6623 /* Release fast-path event queue */
6550 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6624 if (phba->sli4_hba.fp_eq != NULL)
6551 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); 6625 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
6626 fcp_qidx++)
6627 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
6552 kfree(phba->sli4_hba.fp_eq); 6628 kfree(phba->sli4_hba.fp_eq);
6553 phba->sli4_hba.fp_eq = NULL; 6629 phba->sli4_hba.fp_eq = NULL;
6554 6630
@@ -6601,11 +6677,18 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6601 phba->sli4_hba.sp_eq->queue_id); 6677 phba->sli4_hba.sp_eq->queue_id);
6602 6678
6603 /* Set up fast-path event queue */ 6679 /* Set up fast-path event queue */
6680 if (phba->cfg_fcp_eq_count && !phba->sli4_hba.fp_eq) {
6681 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6682 "3147 Fast-path EQs not allocated\n");
6683 rc = -ENOMEM;
6684 goto out_destroy_sp_eq;
6685 }
6604 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 6686 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
6605 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { 6687 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
6606 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6688 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6607 "0522 Fast-path EQ (%d) not " 6689 "0522 Fast-path EQ (%d) not "
6608 "allocated\n", fcp_eqidx); 6690 "allocated\n", fcp_eqidx);
6691 rc = -ENOMEM;
6609 goto out_destroy_fp_eq; 6692 goto out_destroy_fp_eq;
6610 } 6693 }
6611 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx], 6694 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
@@ -6630,6 +6713,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6630 if (!phba->sli4_hba.mbx_cq) { 6713 if (!phba->sli4_hba.mbx_cq) {
6631 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6714 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6632 "0528 Mailbox CQ not allocated\n"); 6715 "0528 Mailbox CQ not allocated\n");
6716 rc = -ENOMEM;
6633 goto out_destroy_fp_eq; 6717 goto out_destroy_fp_eq;
6634 } 6718 }
6635 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq, 6719 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
@@ -6649,6 +6733,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6649 if (!phba->sli4_hba.els_cq) { 6733 if (!phba->sli4_hba.els_cq) {
6650 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6734 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6651 "0530 ELS CQ not allocated\n"); 6735 "0530 ELS CQ not allocated\n");
6736 rc = -ENOMEM;
6652 goto out_destroy_mbx_cq; 6737 goto out_destroy_mbx_cq;
6653 } 6738 }
6654 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq, 6739 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
@@ -6665,12 +6750,20 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6665 phba->sli4_hba.sp_eq->queue_id); 6750 phba->sli4_hba.sp_eq->queue_id);
6666 6751
6667 /* Set up fast-path FCP Response Complete Queue */ 6752 /* Set up fast-path FCP Response Complete Queue */
6753 if (!phba->sli4_hba.fcp_cq) {
6754 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6755 "3148 Fast-path FCP CQ array not "
6756 "allocated\n");
6757 rc = -ENOMEM;
6758 goto out_destroy_els_cq;
6759 }
6668 fcp_cqidx = 0; 6760 fcp_cqidx = 0;
6669 do { 6761 do {
6670 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 6762 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
6671 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6763 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6672 "0526 Fast-path FCP CQ (%d) not " 6764 "0526 Fast-path FCP CQ (%d) not "
6673 "allocated\n", fcp_cqidx); 6765 "allocated\n", fcp_cqidx);
6766 rc = -ENOMEM;
6674 goto out_destroy_fcp_cq; 6767 goto out_destroy_fcp_cq;
6675 } 6768 }
6676 if (phba->cfg_fcp_eq_count) 6769 if (phba->cfg_fcp_eq_count)
@@ -6709,6 +6802,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6709 if (!phba->sli4_hba.mbx_wq) { 6802 if (!phba->sli4_hba.mbx_wq) {
6710 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6803 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6711 "0538 Slow-path MQ not allocated\n"); 6804 "0538 Slow-path MQ not allocated\n");
6805 rc = -ENOMEM;
6712 goto out_destroy_fcp_cq; 6806 goto out_destroy_fcp_cq;
6713 } 6807 }
6714 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, 6808 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
@@ -6728,6 +6822,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6728 if (!phba->sli4_hba.els_wq) { 6822 if (!phba->sli4_hba.els_wq) {
6729 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6823 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6730 "0536 Slow-path ELS WQ not allocated\n"); 6824 "0536 Slow-path ELS WQ not allocated\n");
6825 rc = -ENOMEM;
6731 goto out_destroy_mbx_wq; 6826 goto out_destroy_mbx_wq;
6732 } 6827 }
6733 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, 6828 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
@@ -6744,11 +6839,19 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6744 phba->sli4_hba.els_cq->queue_id); 6839 phba->sli4_hba.els_cq->queue_id);
6745 6840
6746 /* Set up fast-path FCP Work Queue */ 6841 /* Set up fast-path FCP Work Queue */
6842 if (!phba->sli4_hba.fcp_wq) {
6843 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6844 "3149 Fast-path FCP WQ array not "
6845 "allocated\n");
6846 rc = -ENOMEM;
6847 goto out_destroy_els_wq;
6848 }
6747 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 6849 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6748 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { 6850 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
6749 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6851 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6750 "0534 Fast-path FCP WQ (%d) not " 6852 "0534 Fast-path FCP WQ (%d) not "
6751 "allocated\n", fcp_wqidx); 6853 "allocated\n", fcp_wqidx);
6854 rc = -ENOMEM;
6752 goto out_destroy_fcp_wq; 6855 goto out_destroy_fcp_wq;
6753 } 6856 }
6754 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], 6857 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
@@ -6779,6 +6882,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6779 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 6882 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
6780 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6883 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6781 "0540 Receive Queue not allocated\n"); 6884 "0540 Receive Queue not allocated\n");
6885 rc = -ENOMEM;
6782 goto out_destroy_fcp_wq; 6886 goto out_destroy_fcp_wq;
6783 } 6887 }
6784 6888
@@ -6805,18 +6909,21 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6805out_destroy_fcp_wq: 6909out_destroy_fcp_wq:
6806 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) 6910 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
6807 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); 6911 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
6912out_destroy_els_wq:
6808 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 6913 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6809out_destroy_mbx_wq: 6914out_destroy_mbx_wq:
6810 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 6915 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6811out_destroy_fcp_cq: 6916out_destroy_fcp_cq:
6812 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 6917 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
6813 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 6918 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
6919out_destroy_els_cq:
6814 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6920 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6815out_destroy_mbx_cq: 6921out_destroy_mbx_cq:
6816 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6922 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6817out_destroy_fp_eq: 6923out_destroy_fp_eq:
6818 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 6924 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
6819 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); 6925 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
6926out_destroy_sp_eq:
6820 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 6927 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6821out_error: 6928out_error:
6822 return rc; 6929 return rc;
@@ -6853,13 +6960,18 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
6853 /* Unset ELS complete queue */ 6960 /* Unset ELS complete queue */
6854 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6961 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6855 /* Unset FCP response complete queue */ 6962 /* Unset FCP response complete queue */
6856 fcp_qidx = 0; 6963 if (phba->sli4_hba.fcp_cq) {
6857 do { 6964 fcp_qidx = 0;
6858 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 6965 do {
6859 } while (++fcp_qidx < phba->cfg_fcp_eq_count); 6966 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
6967 } while (++fcp_qidx < phba->cfg_fcp_eq_count);
6968 }
6860 /* Unset fast-path event queue */ 6969 /* Unset fast-path event queue */
6861 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6970 if (phba->sli4_hba.fp_eq) {
6862 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); 6971 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
6972 fcp_qidx++)
6973 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
6974 }
6863 /* Unset slow-path event queue */ 6975 /* Unset slow-path event queue */
6864 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 6976 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6865} 6977}
@@ -7398,22 +7510,25 @@ out:
7398static void 7510static void
7399lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 7511lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
7400{ 7512{
7401 struct pci_dev *pdev; 7513 uint32_t if_type;
7402 7514 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7403 /* Obtain PCI device reference */
7404 if (!phba->pcidev)
7405 return;
7406 else
7407 pdev = phba->pcidev;
7408
7409 /* Free coherent DMA memory allocated */
7410
7411 /* Unmap I/O memory space */
7412 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
7413 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
7414 iounmap(phba->sli4_hba.conf_regs_memmap_p);
7415 7515
7416 return; 7516 switch (if_type) {
7517 case LPFC_SLI_INTF_IF_TYPE_0:
7518 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
7519 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
7520 iounmap(phba->sli4_hba.conf_regs_memmap_p);
7521 break;
7522 case LPFC_SLI_INTF_IF_TYPE_2:
7523 iounmap(phba->sli4_hba.conf_regs_memmap_p);
7524 break;
7525 case LPFC_SLI_INTF_IF_TYPE_1:
7526 default:
7527 dev_printk(KERN_ERR, &phba->pcidev->dev,
7528 "FATAL - unsupported SLI4 interface type - %d\n",
7529 if_type);
7530 break;
7531 }
7417} 7532}
7418 7533
7419/** 7534/**
@@ -9198,12 +9313,15 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9198 /* Perform post initialization setup */ 9313 /* Perform post initialization setup */
9199 lpfc_post_init_setup(phba); 9314 lpfc_post_init_setup(phba);
9200 9315
9201 /* check for firmware upgrade or downgrade */ 9316 /* check for firmware upgrade or downgrade (if_type 2 only) */
9202 snprintf(file_name, 16, "%s.grp", phba->ModelName); 9317 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
9203 error = request_firmware(&fw, file_name, &phba->pcidev->dev); 9318 LPFC_SLI_INTF_IF_TYPE_2) {
9204 if (!error) { 9319 snprintf(file_name, 16, "%s.grp", phba->ModelName);
9205 lpfc_write_firmware(phba, fw); 9320 error = request_firmware(&fw, file_name, &phba->pcidev->dev);
9206 release_firmware(fw); 9321 if (!error) {
9322 lpfc_write_firmware(phba, fw);
9323 release_firmware(fw);
9324 }
9207 } 9325 }
9208 9326
9209 /* Check if there are static vports to be created. */ 9327 /* Check if there are static vports to be created. */
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 2ebc7d2540c0..20336f09fb3c 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1293,6 +1293,10 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1293 phba->sli_rev = LPFC_SLI_REV2; 1293 phba->sli_rev = LPFC_SLI_REV2;
1294 mb->un.varCfgPort.sli_mode = phba->sli_rev; 1294 mb->un.varCfgPort.sli_mode = phba->sli_rev;
1295 1295
1296 /* If this is an SLI3 port, configure async status notification. */
1297 if (phba->sli_rev == LPFC_SLI_REV3)
1298 mb->un.varCfgPort.casabt = 1;
1299
1296 /* Now setup pcb */ 1300 /* Now setup pcb */
1297 phba->pcb->type = TYPE_NATIVE_SLI2; 1301 phba->pcb->type = TYPE_NATIVE_SLI2;
1298 phba->pcb->feature = FEATURE_INITIAL_SLI2; 1302 phba->pcb->feature = FEATURE_INITIAL_SLI2;
@@ -2129,6 +2133,14 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
2129 reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam); 2133 reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
2130 reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 2134 reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2131 bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID); 2135 bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
2136 lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
2137 "3134 Register VFI, mydid:x%x, fcfi:%d, "
2138 " vfi:%d, vpi:%d, fc_pname:%x%x\n",
2139 vport->fc_myDID,
2140 vport->phba->fcf.fcfi,
2141 vport->phba->sli4_hba.vfi_ids[vport->vfi],
2142 vport->phba->vpi_ids[vport->vpi],
2143 reg_vfi->wwn[0], reg_vfi->wwn[1]);
2132} 2144}
2133 2145
2134/** 2146/**
@@ -2175,16 +2187,15 @@ lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
2175} 2187}
2176 2188
2177/** 2189/**
2178 * lpfc_dump_fcoe_param - Dump config region 23 to get FCoe parameters. 2190 * lpfc_sli4_dump_cfg_rg23 - Dump sli4 port config region 23
2179 * @phba: pointer to the hba structure containing. 2191 * @phba: pointer to the hba structure containing.
2180 * @mbox: pointer to lpfc mbox command to initialize. 2192 * @mbox: pointer to lpfc mbox command to initialize.
2181 * 2193 *
2182 * This function create a SLI4 dump mailbox command to dump FCoE 2194 * This function create a SLI4 dump mailbox command to dump configure
2183 * parameters stored in region 23. 2195 * region 23.
2184 **/ 2196 **/
2185int 2197int
2186lpfc_dump_fcoe_param(struct lpfc_hba *phba, 2198lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
2187 struct lpfcMboxq *mbox)
2188{ 2199{
2189 struct lpfc_dmabuf *mp = NULL; 2200 struct lpfc_dmabuf *mp = NULL;
2190 MAILBOX_t *mb; 2201 MAILBOX_t *mb;
@@ -2198,9 +2209,9 @@ lpfc_dump_fcoe_param(struct lpfc_hba *phba,
2198 2209
2199 if (!mp || !mp->virt) { 2210 if (!mp || !mp->virt) {
2200 kfree(mp); 2211 kfree(mp);
2201 /* dump_fcoe_param failed to allocate memory */ 2212 /* dump config region 23 failed to allocate memory */
2202 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 2213 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
2203 "2569 lpfc_dump_fcoe_param: memory" 2214 "2569 lpfc dump config region 23: memory"
2204 " allocation failed\n"); 2215 " allocation failed\n");
2205 return 1; 2216 return 1;
2206 } 2217 }
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 10d5b5e41499..ade763d3930a 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -389,7 +389,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
389{ 389{
390 struct hbq_dmabuf *hbqbp; 390 struct hbq_dmabuf *hbqbp;
391 391
392 hbqbp = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); 392 hbqbp = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
393 if (!hbqbp) 393 if (!hbqbp)
394 return NULL; 394 return NULL;
395 395
@@ -441,7 +441,7 @@ lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
441{ 441{
442 struct hbq_dmabuf *dma_buf; 442 struct hbq_dmabuf *dma_buf;
443 443
444 dma_buf = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); 444 dma_buf = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
445 if (!dma_buf) 445 if (!dma_buf)
446 return NULL; 446 return NULL;
447 447
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 2ddd02f7c603..e8bb00559943 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -783,6 +783,14 @@ lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
783} 783}
784 784
785static uint32_t 785static uint32_t
786lpfc_device_recov_unused_node(struct lpfc_vport *vport,
787 struct lpfc_nodelist *ndlp,
788 void *arg, uint32_t evt)
789{
790 return ndlp->nlp_state;
791}
792
793static uint32_t
786lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 794lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
787 void *arg, uint32_t evt) 795 void *arg, uint32_t evt)
788{ 796{
@@ -2147,7 +2155,7 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
2147 lpfc_disc_illegal, /* CMPL_ADISC */ 2155 lpfc_disc_illegal, /* CMPL_ADISC */
2148 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 2156 lpfc_disc_illegal, /* CMPL_REG_LOGIN */
2149 lpfc_device_rm_unused_node, /* DEVICE_RM */ 2157 lpfc_device_rm_unused_node, /* DEVICE_RM */
2150 lpfc_disc_illegal, /* DEVICE_RECOVERY */ 2158 lpfc_device_recov_unused_node, /* DEVICE_RECOVERY */
2151 2159
2152 lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */ 2160 lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */
2153 lpfc_rcv_prli_plogi_issue, /* RCV_PRLI */ 2161 lpfc_rcv_prli_plogi_issue, /* RCV_PRLI */
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 2e1e54e5c3ae..c60f5d0b3869 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -681,8 +681,10 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
681 681
682 rrq_empty = list_empty(&phba->active_rrq_list); 682 rrq_empty = list_empty(&phba->active_rrq_list);
683 spin_unlock_irqrestore(&phba->hbalock, iflag); 683 spin_unlock_irqrestore(&phba->hbalock, iflag);
684 if (ndlp) 684 if (ndlp) {
685 lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1); 685 lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1);
686 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
687 }
686 lpfc_release_scsi_buf_s4(phba, psb); 688 lpfc_release_scsi_buf_s4(phba, psb);
687 if (rrq_empty) 689 if (rrq_empty)
688 lpfc_worker_wake_up(phba); 690 lpfc_worker_wake_up(phba);
@@ -2911,8 +2913,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2911 int_to_scsilun(lpfc_cmd->pCmd->device->lun, 2913 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
2912 &lpfc_cmd->fcp_cmnd->fcp_lun); 2914 &lpfc_cmd->fcp_cmnd->fcp_lun);
2913 2915
2914 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16); 2916 memset(&fcp_cmnd->fcpCdb[0], 0, LPFC_FCP_CDB_LEN);
2915 2917 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
2916 if (scsi_populate_tag_msg(scsi_cmnd, tag)) { 2918 if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
2917 switch (tag[0]) { 2919 switch (tag[0]) {
2918 case HEAD_OF_QUEUE_TAG: 2920 case HEAD_OF_QUEUE_TAG:
@@ -3236,6 +3238,15 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
3236 cmnd->result = err; 3238 cmnd->result = err;
3237 goto out_fail_command; 3239 goto out_fail_command;
3238 } 3240 }
3241 /*
3242 * Do not let the mid-layer retry I/O too fast. If an I/O is retried
3243 * without waiting a bit then indicate that the device is busy.
3244 */
3245 if (cmnd->retries &&
3246 time_before(jiffies, (cmnd->jiffies_at_alloc +
3247 msecs_to_jiffies(LPFC_RETRY_PAUSE *
3248 cmnd->retries))))
3249 return SCSI_MLQUEUE_DEVICE_BUSY;
3239 ndlp = rdata->pnode; 3250 ndlp = rdata->pnode;
3240 3251
3241 if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) && 3252 if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index ce645b20a6ad..9075a08cf781 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -21,6 +21,7 @@
21#include <asm/byteorder.h> 21#include <asm/byteorder.h>
22 22
23struct lpfc_hba; 23struct lpfc_hba;
24#define LPFC_FCP_CDB_LEN 16
24 25
25#define list_remove_head(list, entry, type, member) \ 26#define list_remove_head(list, entry, type, member) \
26 do { \ 27 do { \
@@ -102,7 +103,7 @@ struct fcp_cmnd {
102#define WRITE_DATA 0x01 /* Bit 0 */ 103#define WRITE_DATA 0x01 /* Bit 0 */
103#define READ_DATA 0x02 /* Bit 1 */ 104#define READ_DATA 0x02 /* Bit 1 */
104 105
105 uint8_t fcpCdb[16]; /* SRB cdb field is copied here */ 106 uint8_t fcpCdb[LPFC_FCP_CDB_LEN]; /* SRB cdb field is copied here */
106 uint32_t fcpDl; /* Total transfer length */ 107 uint32_t fcpDl; /* Total transfer length */
107 108
108}; 109};
@@ -153,5 +154,5 @@ struct lpfc_scsi_buf {
153 154
154#define LPFC_SCSI_DMA_EXT_SIZE 264 155#define LPFC_SCSI_DMA_EXT_SIZE 264
155#define LPFC_BPL_SIZE 1024 156#define LPFC_BPL_SIZE 1024
156 157#define LPFC_RETRY_PAUSE 300
157#define MDAC_DIRECT_CMD 0x22 158#define MDAC_DIRECT_CMD 0x22
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 4d4104f38c98..23a27592388c 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -89,15 +89,20 @@ lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
89static uint32_t 89static uint32_t
90lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) 90lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
91{ 91{
92 union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe; 92 union lpfc_wqe *temp_wqe;
93 struct lpfc_register doorbell; 93 struct lpfc_register doorbell;
94 uint32_t host_index; 94 uint32_t host_index;
95 95
96 /* sanity check on queue memory */
97 if (unlikely(!q))
98 return -ENOMEM;
99 temp_wqe = q->qe[q->host_index].wqe;
100
96 /* If the host has not yet processed the next entry then we are done */ 101 /* If the host has not yet processed the next entry then we are done */
97 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 102 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
98 return -ENOMEM; 103 return -ENOMEM;
99 /* set consumption flag every once in a while */ 104 /* set consumption flag every once in a while */
100 if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL)) 105 if (!((q->host_index + 1) % q->entry_repost))
101 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); 106 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
102 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) 107 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
103 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); 108 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
@@ -134,6 +139,10 @@ lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
134{ 139{
135 uint32_t released = 0; 140 uint32_t released = 0;
136 141
142 /* sanity check on queue memory */
143 if (unlikely(!q))
144 return 0;
145
137 if (q->hba_index == index) 146 if (q->hba_index == index)
138 return 0; 147 return 0;
139 do { 148 do {
@@ -158,10 +167,15 @@ lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
158static uint32_t 167static uint32_t
159lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) 168lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
160{ 169{
161 struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe; 170 struct lpfc_mqe *temp_mqe;
162 struct lpfc_register doorbell; 171 struct lpfc_register doorbell;
163 uint32_t host_index; 172 uint32_t host_index;
164 173
174 /* sanity check on queue memory */
175 if (unlikely(!q))
176 return -ENOMEM;
177 temp_mqe = q->qe[q->host_index].mqe;
178
165 /* If the host has not yet processed the next entry then we are done */ 179 /* If the host has not yet processed the next entry then we are done */
166 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 180 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
167 return -ENOMEM; 181 return -ENOMEM;
@@ -195,6 +209,10 @@ lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
195static uint32_t 209static uint32_t
196lpfc_sli4_mq_release(struct lpfc_queue *q) 210lpfc_sli4_mq_release(struct lpfc_queue *q)
197{ 211{
212 /* sanity check on queue memory */
213 if (unlikely(!q))
214 return 0;
215
198 /* Clear the mailbox pointer for completion */ 216 /* Clear the mailbox pointer for completion */
199 q->phba->mbox = NULL; 217 q->phba->mbox = NULL;
200 q->hba_index = ((q->hba_index + 1) % q->entry_count); 218 q->hba_index = ((q->hba_index + 1) % q->entry_count);
@@ -213,7 +231,12 @@ lpfc_sli4_mq_release(struct lpfc_queue *q)
213static struct lpfc_eqe * 231static struct lpfc_eqe *
214lpfc_sli4_eq_get(struct lpfc_queue *q) 232lpfc_sli4_eq_get(struct lpfc_queue *q)
215{ 233{
216 struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe; 234 struct lpfc_eqe *eqe;
235
236 /* sanity check on queue memory */
237 if (unlikely(!q))
238 return NULL;
239 eqe = q->qe[q->hba_index].eqe;
217 240
218 /* If the next EQE is not valid then we are done */ 241 /* If the next EQE is not valid then we are done */
219 if (!bf_get_le32(lpfc_eqe_valid, eqe)) 242 if (!bf_get_le32(lpfc_eqe_valid, eqe))
@@ -248,6 +271,10 @@ lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
248 struct lpfc_eqe *temp_eqe; 271 struct lpfc_eqe *temp_eqe;
249 struct lpfc_register doorbell; 272 struct lpfc_register doorbell;
250 273
274 /* sanity check on queue memory */
275 if (unlikely(!q))
276 return 0;
277
251 /* while there are valid entries */ 278 /* while there are valid entries */
252 while (q->hba_index != q->host_index) { 279 while (q->hba_index != q->host_index) {
253 temp_eqe = q->qe[q->host_index].eqe; 280 temp_eqe = q->qe[q->host_index].eqe;
@@ -288,6 +315,10 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
288{ 315{
289 struct lpfc_cqe *cqe; 316 struct lpfc_cqe *cqe;
290 317
318 /* sanity check on queue memory */
319 if (unlikely(!q))
320 return NULL;
321
291 /* If the next CQE is not valid then we are done */ 322 /* If the next CQE is not valid then we are done */
292 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) 323 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
293 return NULL; 324 return NULL;
@@ -322,6 +353,9 @@ lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
322 struct lpfc_cqe *temp_qe; 353 struct lpfc_cqe *temp_qe;
323 struct lpfc_register doorbell; 354 struct lpfc_register doorbell;
324 355
356 /* sanity check on queue memory */
357 if (unlikely(!q))
358 return 0;
325 /* while there are valid entries */ 359 /* while there are valid entries */
326 while (q->hba_index != q->host_index) { 360 while (q->hba_index != q->host_index) {
327 temp_qe = q->qe[q->host_index].cqe; 361 temp_qe = q->qe[q->host_index].cqe;
@@ -359,11 +393,17 @@ static int
359lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, 393lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
360 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) 394 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
361{ 395{
362 struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe; 396 struct lpfc_rqe *temp_hrqe;
363 struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe; 397 struct lpfc_rqe *temp_drqe;
364 struct lpfc_register doorbell; 398 struct lpfc_register doorbell;
365 int put_index = hq->host_index; 399 int put_index = hq->host_index;
366 400
401 /* sanity check on queue memory */
402 if (unlikely(!hq) || unlikely(!dq))
403 return -ENOMEM;
404 temp_hrqe = hq->qe[hq->host_index].rqe;
405 temp_drqe = dq->qe[dq->host_index].rqe;
406
367 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) 407 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
368 return -EINVAL; 408 return -EINVAL;
369 if (hq->host_index != dq->host_index) 409 if (hq->host_index != dq->host_index)
@@ -402,6 +442,10 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
402static uint32_t 442static uint32_t
403lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) 443lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
404{ 444{
445 /* sanity check on queue memory */
446 if (unlikely(!hq) || unlikely(!dq))
447 return 0;
448
405 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) 449 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
406 return 0; 450 return 0;
407 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); 451 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
@@ -3575,8 +3619,8 @@ lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3575 * lpfc_reset_barrier - Make HBA ready for HBA reset 3619 * lpfc_reset_barrier - Make HBA ready for HBA reset
3576 * @phba: Pointer to HBA context object. 3620 * @phba: Pointer to HBA context object.
3577 * 3621 *
3578 * This function is called before resetting an HBA. This 3622 * This function is called before resetting an HBA. This function is called
3579 * function requests HBA to quiesce DMAs before a reset. 3623 * with hbalock held and requests HBA to quiesce DMAs before a reset.
3580 **/ 3624 **/
3581void lpfc_reset_barrier(struct lpfc_hba *phba) 3625void lpfc_reset_barrier(struct lpfc_hba *phba)
3582{ 3626{
@@ -3851,7 +3895,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
3851{ 3895{
3852 struct lpfc_sli *psli = &phba->sli; 3896 struct lpfc_sli *psli = &phba->sli;
3853 uint16_t cfg_value; 3897 uint16_t cfg_value;
3854 uint8_t qindx;
3855 3898
3856 /* Reset HBA */ 3899 /* Reset HBA */
3857 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3900 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -3867,19 +3910,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
3867 spin_lock_irq(&phba->hbalock); 3910 spin_lock_irq(&phba->hbalock);
3868 psli->sli_flag &= ~(LPFC_PROCESS_LA); 3911 psli->sli_flag &= ~(LPFC_PROCESS_LA);
3869 phba->fcf.fcf_flag = 0; 3912 phba->fcf.fcf_flag = 0;
3870 /* Clean up the child queue list for the CQs */
3871 list_del_init(&phba->sli4_hba.mbx_wq->list);
3872 list_del_init(&phba->sli4_hba.els_wq->list);
3873 list_del_init(&phba->sli4_hba.hdr_rq->list);
3874 list_del_init(&phba->sli4_hba.dat_rq->list);
3875 list_del_init(&phba->sli4_hba.mbx_cq->list);
3876 list_del_init(&phba->sli4_hba.els_cq->list);
3877 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
3878 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
3879 qindx = 0;
3880 do
3881 list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
3882 while (++qindx < phba->cfg_fcp_eq_count);
3883 spin_unlock_irq(&phba->hbalock); 3913 spin_unlock_irq(&phba->hbalock);
3884 3914
3885 /* Now physically reset the device */ 3915 /* Now physically reset the device */
@@ -3892,6 +3922,7 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
3892 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 3922 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3893 3923
3894 /* Perform FCoE PCI function reset */ 3924 /* Perform FCoE PCI function reset */
3925 lpfc_sli4_queue_destroy(phba);
3895 lpfc_pci_function_reset(phba); 3926 lpfc_pci_function_reset(phba);
3896 3927
3897 /* Restore PCI cmd register */ 3928 /* Restore PCI cmd register */
@@ -4339,6 +4370,11 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
4339 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 4370 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4340 spin_unlock_irq(&phba->hbalock); 4371 spin_unlock_irq(&phba->hbalock);
4341 done = 1; 4372 done = 1;
4373
4374 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
4375 (pmb->u.mb.un.varCfgPort.gasabt == 0))
4376 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4377 "3110 Port did not grant ASABT\n");
4342 } 4378 }
4343 } 4379 }
4344 if (!done) { 4380 if (!done) {
@@ -4551,9 +4587,9 @@ lpfc_sli_hba_setup_error:
4551 * data structure. 4587 * data structure.
4552 **/ 4588 **/
4553static int 4589static int
4554lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba, 4590lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
4555 LPFC_MBOXQ_t *mboxq)
4556{ 4591{
4592 LPFC_MBOXQ_t *mboxq;
4557 struct lpfc_dmabuf *mp; 4593 struct lpfc_dmabuf *mp;
4558 struct lpfc_mqe *mqe; 4594 struct lpfc_mqe *mqe;
4559 uint32_t data_length; 4595 uint32_t data_length;
@@ -4565,10 +4601,16 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
4565 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 4601 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4566 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 4602 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4567 4603
4568 mqe = &mboxq->u.mqe; 4604 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4569 if (lpfc_dump_fcoe_param(phba, mboxq)) 4605 if (!mboxq)
4570 return -ENOMEM; 4606 return -ENOMEM;
4571 4607
4608 mqe = &mboxq->u.mqe;
4609 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
4610 rc = -ENOMEM;
4611 goto out_free_mboxq;
4612 }
4613
4572 mp = (struct lpfc_dmabuf *) mboxq->context1; 4614 mp = (struct lpfc_dmabuf *) mboxq->context1;
4573 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4615 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4574 4616
@@ -4596,19 +4638,25 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
4596 if (rc) { 4638 if (rc) {
4597 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4639 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4598 kfree(mp); 4640 kfree(mp);
4599 return -EIO; 4641 rc = -EIO;
4642 goto out_free_mboxq;
4600 } 4643 }
4601 data_length = mqe->un.mb_words[5]; 4644 data_length = mqe->un.mb_words[5];
4602 if (data_length > DMP_RGN23_SIZE) { 4645 if (data_length > DMP_RGN23_SIZE) {
4603 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4646 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4604 kfree(mp); 4647 kfree(mp);
4605 return -EIO; 4648 rc = -EIO;
4649 goto out_free_mboxq;
4606 } 4650 }
4607 4651
4608 lpfc_parse_fcoe_conf(phba, mp->virt, data_length); 4652 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
4609 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4653 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4610 kfree(mp); 4654 kfree(mp);
4611 return 0; 4655 rc = 0;
4656
4657out_free_mboxq:
4658 mempool_free(mboxq, phba->mbox_mem_pool);
4659 return rc;
4612} 4660}
4613 4661
4614/** 4662/**
@@ -4706,7 +4754,6 @@ static int
4706lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba) 4754lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
4707{ 4755{
4708 LPFC_MBOXQ_t *mboxq; 4756 LPFC_MBOXQ_t *mboxq;
4709 struct lpfc_mbx_read_config *rd_config;
4710 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr; 4757 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
4711 struct lpfc_controller_attribute *cntl_attr; 4758 struct lpfc_controller_attribute *cntl_attr;
4712 struct lpfc_mbx_get_port_name *get_port_name; 4759 struct lpfc_mbx_get_port_name *get_port_name;
@@ -4724,33 +4771,11 @@ lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
4724 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4771 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4725 if (!mboxq) 4772 if (!mboxq)
4726 return -ENOMEM; 4773 return -ENOMEM;
4727
4728 /* obtain link type and link number via READ_CONFIG */ 4774 /* obtain link type and link number via READ_CONFIG */
4729 lpfc_read_config(phba, mboxq); 4775 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
4730 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4776 lpfc_sli4_read_config(phba);
4731 if (rc == MBX_SUCCESS) { 4777 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
4732 rd_config = &mboxq->u.mqe.un.rd_config; 4778 goto retrieve_ppname;
4733 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
4734 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
4735 phba->sli4_hba.lnk_info.lnk_tp =
4736 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
4737 phba->sli4_hba.lnk_info.lnk_no =
4738 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
4739 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4740 "3081 lnk_type:%d, lnk_numb:%d\n",
4741 phba->sli4_hba.lnk_info.lnk_tp,
4742 phba->sli4_hba.lnk_info.lnk_no);
4743 goto retrieve_ppname;
4744 } else
4745 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4746 "3082 Mailbox (x%x) returned ldv:x0\n",
4747 bf_get(lpfc_mqe_command,
4748 &mboxq->u.mqe));
4749 } else
4750 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4751 "3083 Mailbox (x%x) failed, status:x%x\n",
4752 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4753 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4754 4779
4755 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */ 4780 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
4756 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes); 4781 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
@@ -4875,14 +4900,19 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4875 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); 4900 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4876 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); 4901 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
4877 fcp_eqidx = 0; 4902 fcp_eqidx = 0;
4878 do 4903 if (phba->sli4_hba.fcp_cq) {
4879 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], 4904 do
4880 LPFC_QUEUE_REARM); 4905 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4881 while (++fcp_eqidx < phba->cfg_fcp_eq_count); 4906 LPFC_QUEUE_REARM);
4907 while (++fcp_eqidx < phba->cfg_fcp_eq_count);
4908 }
4882 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM); 4909 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
4883 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) 4910 if (phba->sli4_hba.fp_eq) {
4884 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx], 4911 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count;
4885 LPFC_QUEUE_REARM); 4912 fcp_eqidx++)
4913 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
4914 LPFC_QUEUE_REARM);
4915 }
4886} 4916}
4887 4917
4888/** 4918/**
@@ -5457,6 +5487,8 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5457 uint16_t count, base; 5487 uint16_t count, base;
5458 unsigned long longs; 5488 unsigned long longs;
5459 5489
5490 if (!phba->sli4_hba.rpi_hdrs_in_use)
5491 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
5460 if (phba->sli4_hba.extents_in_use) { 5492 if (phba->sli4_hba.extents_in_use) {
5461 /* 5493 /*
5462 * The port supports resource extents. The XRI, VPI, VFI, RPI 5494 * The port supports resource extents. The XRI, VPI, VFI, RPI
@@ -5538,9 +5570,10 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5538 * need any action - just exit. 5570 * need any action - just exit.
5539 */ 5571 */
5540 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 5572 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5541 LPFC_IDX_RSRC_RDY) 5573 LPFC_IDX_RSRC_RDY) {
5542 return 0; 5574 lpfc_sli4_dealloc_resource_identifiers(phba);
5543 5575 lpfc_sli4_remove_rpis(phba);
5576 }
5544 /* RPIs. */ 5577 /* RPIs. */
5545 count = phba->sli4_hba.max_cfg_param.max_rpi; 5578 count = phba->sli4_hba.max_cfg_param.max_rpi;
5546 base = phba->sli4_hba.max_cfg_param.rpi_base; 5579 base = phba->sli4_hba.max_cfg_param.rpi_base;
@@ -5880,14 +5913,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
5880 if (!mboxq) 5913 if (!mboxq)
5881 return -ENOMEM; 5914 return -ENOMEM;
5882 5915
5883 /*
5884 * Continue initialization with default values even if driver failed
5885 * to read FCoE param config regions
5886 */
5887 if (lpfc_sli4_read_fcoe_params(phba, mboxq))
5888 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
5889 "2570 Failed to read FCoE parameters\n");
5890
5891 /* Issue READ_REV to collect vpd and FW information. */ 5916 /* Issue READ_REV to collect vpd and FW information. */
5892 vpd_size = SLI4_PAGE_SIZE; 5917 vpd_size = SLI4_PAGE_SIZE;
5893 vpd = kzalloc(vpd_size, GFP_KERNEL); 5918 vpd = kzalloc(vpd_size, GFP_KERNEL);
@@ -5925,6 +5950,16 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
5925 } 5950 }
5926 5951
5927 /* 5952 /*
5953 * Continue initialization with default values even if driver failed
5954 * to read FCoE param config regions, only read parameters if the
5955 * board is FCoE
5956 */
5957 if (phba->hba_flag & HBA_FCOE_MODE &&
5958 lpfc_sli4_read_fcoe_params(phba))
5959 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
5960 "2570 Failed to read FCoE parameters\n");
5961
5962 /*
5928 * Retrieve sli4 device physical port name, failure of doing it 5963 * Retrieve sli4 device physical port name, failure of doing it
5929 * is considered as non-fatal. 5964 * is considered as non-fatal.
5930 */ 5965 */
@@ -6044,6 +6079,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6044 "rc = x%x\n", rc); 6079 "rc = x%x\n", rc);
6045 goto out_free_mbox; 6080 goto out_free_mbox;
6046 } 6081 }
6082 /* update physical xri mappings in the scsi buffers */
6083 lpfc_scsi_buf_update(phba);
6047 6084
6048 /* Read the port's service parameters. */ 6085 /* Read the port's service parameters. */
6049 rc = lpfc_read_sparam(phba, mboxq, vport->vpi); 6086 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
@@ -6205,7 +6242,11 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6205 rc = 0; 6242 rc = 0;
6206 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, 6243 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
6207 &mboxq->u.mqe.un.reg_fcfi); 6244 &mboxq->u.mqe.un.reg_fcfi);
6245
6246 /* Check if the port is configured to be disabled */
6247 lpfc_sli_read_link_ste(phba);
6208 } 6248 }
6249
6209 /* 6250 /*
6210 * The port is ready, set the host's link state to LINK_DOWN 6251 * The port is ready, set the host's link state to LINK_DOWN
6211 * in preparation for link interrupts. 6252 * in preparation for link interrupts.
@@ -6213,10 +6254,25 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6213 spin_lock_irq(&phba->hbalock); 6254 spin_lock_irq(&phba->hbalock);
6214 phba->link_state = LPFC_LINK_DOWN; 6255 phba->link_state = LPFC_LINK_DOWN;
6215 spin_unlock_irq(&phba->hbalock); 6256 spin_unlock_irq(&phba->hbalock);
6216 if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 6257 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
6217 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 6258 (phba->hba_flag & LINK_DISABLED)) {
6218 if (rc) 6259 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
6260 "3103 Adapter Link is disabled.\n");
6261 lpfc_down_link(phba, mboxq);
6262 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6263 if (rc != MBX_SUCCESS) {
6264 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
6265 "3104 Adapter failed to issue "
6266 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
6219 goto out_unset_queue; 6267 goto out_unset_queue;
6268 }
6269 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
6270 /* don't perform init_link on SLI4 FC port loopback test */
6271 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
6272 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
6273 if (rc)
6274 goto out_unset_queue;
6275 }
6220 } 6276 }
6221 mempool_free(mboxq, phba->mbox_mem_pool); 6277 mempool_free(mboxq, phba->mbox_mem_pool);
6222 return rc; 6278 return rc;
@@ -7487,6 +7543,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
7487 struct ulp_bde64 *bpl = NULL; 7543 struct ulp_bde64 *bpl = NULL;
7488 struct ulp_bde64 bde; 7544 struct ulp_bde64 bde;
7489 struct sli4_sge *sgl = NULL; 7545 struct sli4_sge *sgl = NULL;
7546 struct lpfc_dmabuf *dmabuf;
7490 IOCB_t *icmd; 7547 IOCB_t *icmd;
7491 int numBdes = 0; 7548 int numBdes = 0;
7492 int i = 0; 7549 int i = 0;
@@ -7505,9 +7562,12 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
7505 * have not been byteswapped yet so there is no 7562 * have not been byteswapped yet so there is no
7506 * need to swap them back. 7563 * need to swap them back.
7507 */ 7564 */
7508 bpl = (struct ulp_bde64 *) 7565 if (piocbq->context3)
7509 ((struct lpfc_dmabuf *)piocbq->context3)->virt; 7566 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
7567 else
7568 return xritag;
7510 7569
7570 bpl = (struct ulp_bde64 *)dmabuf->virt;
7511 if (!bpl) 7571 if (!bpl)
7512 return xritag; 7572 return xritag;
7513 7573
@@ -7616,6 +7676,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7616 int numBdes, i; 7676 int numBdes, i;
7617 struct ulp_bde64 bde; 7677 struct ulp_bde64 bde;
7618 struct lpfc_nodelist *ndlp; 7678 struct lpfc_nodelist *ndlp;
7679 uint32_t *pcmd;
7680 uint32_t if_type;
7619 7681
7620 fip = phba->hba_flag & HBA_FIP_SUPPORT; 7682 fip = phba->hba_flag & HBA_FIP_SUPPORT;
7621 /* The fcp commands will set command type */ 7683 /* The fcp commands will set command type */
@@ -7669,6 +7731,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7669 iocbq->iocb.ulpCommand); 7731 iocbq->iocb.ulpCommand);
7670 return IOCB_ERROR; 7732 return IOCB_ERROR;
7671 } 7733 }
7734
7672 wqe->els_req.payload_len = xmit_len; 7735 wqe->els_req.payload_len = xmit_len;
7673 /* Els_reguest64 has a TMO */ 7736 /* Els_reguest64 has a TMO */
7674 bf_set(wqe_tmo, &wqe->els_req.wqe_com, 7737 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
@@ -7683,9 +7746,28 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7683 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); 7746 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
7684 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0); 7747 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
7685 /* CCP CCPE PV PRI in word10 were set in the memcpy */ 7748 /* CCP CCPE PV PRI in word10 were set in the memcpy */
7686 if (command_type == ELS_COMMAND_FIP) { 7749 if (command_type == ELS_COMMAND_FIP)
7687 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) 7750 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
7688 >> LPFC_FIP_ELS_ID_SHIFT); 7751 >> LPFC_FIP_ELS_ID_SHIFT);
7752 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
7753 iocbq->context2)->virt);
7754 if_type = bf_get(lpfc_sli_intf_if_type,
7755 &phba->sli4_hba.sli_intf);
7756 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
7757 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
7758 *pcmd == ELS_CMD_SCR ||
7759 *pcmd == ELS_CMD_PLOGI)) {
7760 bf_set(els_req64_sp, &wqe->els_req, 1);
7761 bf_set(els_req64_sid, &wqe->els_req,
7762 iocbq->vport->fc_myDID);
7763 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
7764 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
7765 phba->vpi_ids[phba->pport->vpi]);
7766 } else if (iocbq->context1) {
7767 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
7768 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
7769 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
7770 }
7689 } 7771 }
7690 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, 7772 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
7691 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 7773 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
@@ -7704,6 +7786,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7704 /* The entire sequence is transmitted for this IOCB */ 7786 /* The entire sequence is transmitted for this IOCB */
7705 xmit_len = total_len; 7787 xmit_len = total_len;
7706 cmnd = CMD_XMIT_SEQUENCE64_CR; 7788 cmnd = CMD_XMIT_SEQUENCE64_CR;
7789 if (phba->link_flag & LS_LOOPBACK_MODE)
7790 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
7707 case CMD_XMIT_SEQUENCE64_CR: 7791 case CMD_XMIT_SEQUENCE64_CR:
7708 /* word3 iocb=io_tag32 wqe=reserved */ 7792 /* word3 iocb=io_tag32 wqe=reserved */
7709 wqe->xmit_sequence.rsvd3 = 0; 7793 wqe->xmit_sequence.rsvd3 = 0;
@@ -7846,6 +7930,16 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7846 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); 7930 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
7847 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, 7931 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
7848 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 7932 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
7933 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
7934 iocbq->context2)->virt);
7935 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
7936 bf_set(els_req64_sp, &wqe->els_req, 1);
7937 bf_set(els_req64_sid, &wqe->els_req,
7938 iocbq->vport->fc_myDID);
7939 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
7940 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
7941 phba->vpi_ids[phba->pport->vpi]);
7942 }
7849 command_type = OTHER_COMMAND; 7943 command_type = OTHER_COMMAND;
7850 break; 7944 break;
7851 case CMD_CLOSE_XRI_CN: 7945 case CMD_CLOSE_XRI_CN:
@@ -8037,6 +8131,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
8037 */ 8131 */
8038 if (piocb->iocb_flag & LPFC_IO_FCP) 8132 if (piocb->iocb_flag & LPFC_IO_FCP)
8039 piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba); 8133 piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
8134 if (unlikely(!phba->sli4_hba.fcp_wq))
8135 return IOCB_ERROR;
8040 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx], 8136 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
8041 &wqe)) 8137 &wqe))
8042 return IOCB_ERROR; 8138 return IOCB_ERROR;
@@ -8173,6 +8269,137 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
8173 return 0; 8269 return 0;
8174} 8270}
8175 8271
8272/* lpfc_sli_abts_recover_port - Recover a port that failed an ABTS.
8273 * @vport: pointer to virtual port object.
8274 * @ndlp: nodelist pointer for the impacted rport.
8275 *
8276 * The driver calls this routine in response to a XRI ABORT CQE
8277 * event from the port. In this event, the driver is required to
8278 * recover its login to the rport even though its login may be valid
8279 * from the driver's perspective. The failed ABTS notice from the
8280 * port indicates the rport is not responding.
8281 */
8282static void
8283lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
8284 struct lpfc_nodelist *ndlp)
8285{
8286 struct Scsi_Host *shost;
8287 struct lpfc_hba *phba;
8288 unsigned long flags = 0;
8289
8290 shost = lpfc_shost_from_vport(vport);
8291 phba = vport->phba;
8292 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
8293 lpfc_printf_log(phba, KERN_INFO,
8294 LOG_SLI, "3093 No rport recovery needed. "
8295 "rport in state 0x%x\n",
8296 ndlp->nlp_state);
8297 return;
8298 }
8299 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8300 "3094 Start rport recovery on shost id 0x%x "
8301 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
8302 "flags 0x%x\n",
8303 shost->host_no, ndlp->nlp_DID,
8304 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state,
8305 ndlp->nlp_flag);
8306 /*
8307 * The rport is not responding. Don't attempt ADISC recovery.
8308 * Remove the FCP-2 flag to force a PLOGI.
8309 */
8310 spin_lock_irqsave(shost->host_lock, flags);
8311 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
8312 spin_unlock_irqrestore(shost->host_lock, flags);
8313 lpfc_disc_state_machine(vport, ndlp, NULL,
8314 NLP_EVT_DEVICE_RECOVERY);
8315 lpfc_cancel_retry_delay_tmo(vport, ndlp);
8316 spin_lock_irqsave(shost->host_lock, flags);
8317 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
8318 spin_unlock_irqrestore(shost->host_lock, flags);
8319 lpfc_disc_start(vport);
8320}
8321
8322/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
8323 * @phba: Pointer to HBA context object.
8324 * @iocbq: Pointer to iocb object.
8325 *
8326 * The async_event handler calls this routine when it receives
8327 * an ASYNC_STATUS_CN event from the port. The port generates
8328 * this event when an Abort Sequence request to an rport fails
8329 * twice in succession. The abort could be originated by the
8330 * driver or by the port. The ABTS could have been for an ELS
8331 * or FCP IO. The port only generates this event when an ABTS
8332 * fails to complete after one retry.
8333 */
8334static void
8335lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
8336 struct lpfc_iocbq *iocbq)
8337{
8338 struct lpfc_nodelist *ndlp = NULL;
8339 uint16_t rpi = 0, vpi = 0;
8340 struct lpfc_vport *vport = NULL;
8341
8342 /* The rpi in the ulpContext is vport-sensitive. */
8343 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
8344 rpi = iocbq->iocb.ulpContext;
8345
8346 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8347 "3092 Port generated ABTS async event "
8348 "on vpi %d rpi %d status 0x%x\n",
8349 vpi, rpi, iocbq->iocb.ulpStatus);
8350
8351 vport = lpfc_find_vport_by_vpid(phba, vpi);
8352 if (!vport)
8353 goto err_exit;
8354 ndlp = lpfc_findnode_rpi(vport, rpi);
8355 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
8356 goto err_exit;
8357
8358 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
8359 lpfc_sli_abts_recover_port(vport, ndlp);
8360 return;
8361
8362 err_exit:
8363 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8364 "3095 Event Context not found, no "
8365 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
8366 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
8367 vpi, rpi);
8368}
8369
8370/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
8371 * @phba: pointer to HBA context object.
8372 * @ndlp: nodelist pointer for the impacted rport.
8373 * @axri: pointer to the wcqe containing the failed exchange.
8374 *
8375 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
8376 * port. The port generates this event when an abort exchange request to an
8377 * rport fails twice in succession with no reply. The abort could be originated
8378 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
8379 */
8380void
8381lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
8382 struct lpfc_nodelist *ndlp,
8383 struct sli4_wcqe_xri_aborted *axri)
8384{
8385 struct lpfc_vport *vport;
8386
8387 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
8388 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8389 "3115 Node Context not found, driver "
8390 "ignoring abts err event\n");
8391 vport = ndlp->vport;
8392 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8393 "3116 Port generated FCP XRI ABORT event on "
8394 "vpi %d rpi %d xri x%x status 0x%x\n",
8395 ndlp->vport->vpi, ndlp->nlp_rpi,
8396 bf_get(lpfc_wcqe_xa_xri, axri),
8397 bf_get(lpfc_wcqe_xa_status, axri));
8398
8399 if (bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT)
8400 lpfc_sli_abts_recover_port(vport, ndlp);
8401}
8402
8176/** 8403/**
8177 * lpfc_sli_async_event_handler - ASYNC iocb handler function 8404 * lpfc_sli_async_event_handler - ASYNC iocb handler function
8178 * @phba: Pointer to HBA context object. 8405 * @phba: Pointer to HBA context object.
@@ -8192,63 +8419,58 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba,
8192{ 8419{
8193 IOCB_t *icmd; 8420 IOCB_t *icmd;
8194 uint16_t evt_code; 8421 uint16_t evt_code;
8195 uint16_t temp;
8196 struct temp_event temp_event_data; 8422 struct temp_event temp_event_data;
8197 struct Scsi_Host *shost; 8423 struct Scsi_Host *shost;
8198 uint32_t *iocb_w; 8424 uint32_t *iocb_w;
8199 8425
8200 icmd = &iocbq->iocb; 8426 icmd = &iocbq->iocb;
8201 evt_code = icmd->un.asyncstat.evt_code; 8427 evt_code = icmd->un.asyncstat.evt_code;
8202 temp = icmd->ulpContext;
8203 8428
8204 if ((evt_code != ASYNC_TEMP_WARN) && 8429 switch (evt_code) {
8205 (evt_code != ASYNC_TEMP_SAFE)) { 8430 case ASYNC_TEMP_WARN:
8431 case ASYNC_TEMP_SAFE:
8432 temp_event_data.data = (uint32_t) icmd->ulpContext;
8433 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
8434 if (evt_code == ASYNC_TEMP_WARN) {
8435 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
8436 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
8437 "0347 Adapter is very hot, please take "
8438 "corrective action. temperature : %d Celsius\n",
8439 (uint32_t) icmd->ulpContext);
8440 } else {
8441 temp_event_data.event_code = LPFC_NORMAL_TEMP;
8442 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
8443 "0340 Adapter temperature is OK now. "
8444 "temperature : %d Celsius\n",
8445 (uint32_t) icmd->ulpContext);
8446 }
8447
8448 /* Send temperature change event to applications */
8449 shost = lpfc_shost_from_vport(phba->pport);
8450 fc_host_post_vendor_event(shost, fc_get_event_number(),
8451 sizeof(temp_event_data), (char *) &temp_event_data,
8452 LPFC_NL_VENDOR_ID);
8453 break;
8454 case ASYNC_STATUS_CN:
8455 lpfc_sli_abts_err_handler(phba, iocbq);
8456 break;
8457 default:
8206 iocb_w = (uint32_t *) icmd; 8458 iocb_w = (uint32_t *) icmd;
8207 lpfc_printf_log(phba, 8459 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8208 KERN_ERR,
8209 LOG_SLI,
8210 "0346 Ring %d handler: unexpected ASYNC_STATUS" 8460 "0346 Ring %d handler: unexpected ASYNC_STATUS"
8211 " evt_code 0x%x\n" 8461 " evt_code 0x%x\n"
8212 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n" 8462 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
8213 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n" 8463 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
8214 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n" 8464 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
8215 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n", 8465 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
8216 pring->ringno, 8466 pring->ringno, icmd->un.asyncstat.evt_code,
8217 icmd->un.asyncstat.evt_code,
8218 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3], 8467 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
8219 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7], 8468 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
8220 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11], 8469 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
8221 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]); 8470 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
8222 8471
8223 return; 8472 break;
8224 }
8225 temp_event_data.data = (uint32_t)temp;
8226 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
8227 if (evt_code == ASYNC_TEMP_WARN) {
8228 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
8229 lpfc_printf_log(phba,
8230 KERN_ERR,
8231 LOG_TEMP,
8232 "0347 Adapter is very hot, please take "
8233 "corrective action. temperature : %d Celsius\n",
8234 temp);
8235 }
8236 if (evt_code == ASYNC_TEMP_SAFE) {
8237 temp_event_data.event_code = LPFC_NORMAL_TEMP;
8238 lpfc_printf_log(phba,
8239 KERN_ERR,
8240 LOG_TEMP,
8241 "0340 Adapter temperature is OK now. "
8242 "temperature : %d Celsius\n",
8243 temp);
8244 } 8473 }
8245
8246 /* Send temperature change event to applications */
8247 shost = lpfc_shost_from_vport(phba->pport);
8248 fc_host_post_vendor_event(shost, fc_get_event_number(),
8249 sizeof(temp_event_data), (char *) &temp_event_data,
8250 LPFC_NL_VENDOR_ID);
8251
8252} 8474}
8253 8475
8254 8476
@@ -8823,12 +9045,14 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
8823{ 9045{
8824 IOCB_t *irsp = &rspiocb->iocb; 9046 IOCB_t *irsp = &rspiocb->iocb;
8825 uint16_t abort_iotag, abort_context; 9047 uint16_t abort_iotag, abort_context;
8826 struct lpfc_iocbq *abort_iocb; 9048 struct lpfc_iocbq *abort_iocb = NULL;
8827 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
8828
8829 abort_iocb = NULL;
8830 9049
8831 if (irsp->ulpStatus) { 9050 if (irsp->ulpStatus) {
9051
9052 /*
9053 * Assume that the port already completed and returned, or
9054 * will return the iocb. Just Log the message.
9055 */
8832 abort_context = cmdiocb->iocb.un.acxri.abortContextTag; 9056 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
8833 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 9057 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
8834 9058
@@ -8846,68 +9070,15 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
8846 */ 9070 */
8847 abort_iocb = phba->sli.iocbq_lookup[abort_context]; 9071 abort_iocb = phba->sli.iocbq_lookup[abort_context];
8848 9072
8849 /*
8850 * If the iocb is not found in Firmware queue the iocb
8851 * might have completed already. Do not free it again.
8852 */
8853 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
8854 if (irsp->un.ulpWord[4] != IOERR_NO_XRI) {
8855 spin_unlock_irq(&phba->hbalock);
8856 lpfc_sli_release_iocbq(phba, cmdiocb);
8857 return;
8858 }
8859 /* For SLI4 the ulpContext field for abort IOCB
8860 * holds the iotag of the IOCB being aborted so
8861 * the local abort_context needs to be reset to
8862 * match the aborted IOCBs ulpContext.
8863 */
8864 if (abort_iocb && phba->sli_rev == LPFC_SLI_REV4)
8865 abort_context = abort_iocb->iocb.ulpContext;
8866 }
8867
8868 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI, 9073 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
8869 "0327 Cannot abort els iocb %p " 9074 "0327 Cannot abort els iocb %p "
8870 "with tag %x context %x, abort status %x, " 9075 "with tag %x context %x, abort status %x, "
8871 "abort code %x\n", 9076 "abort code %x\n",
8872 abort_iocb, abort_iotag, abort_context, 9077 abort_iocb, abort_iotag, abort_context,
8873 irsp->ulpStatus, irsp->un.ulpWord[4]); 9078 irsp->ulpStatus, irsp->un.ulpWord[4]);
8874 /*
8875 * make sure we have the right iocbq before taking it
8876 * off the txcmplq and try to call completion routine.
8877 */
8878 if (!abort_iocb ||
8879 abort_iocb->iocb.ulpContext != abort_context ||
8880 (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
8881 spin_unlock_irq(&phba->hbalock);
8882 else if (phba->sli_rev < LPFC_SLI_REV4) {
8883 /*
8884 * leave the SLI4 aborted command on the txcmplq
8885 * list and the command complete WCQE's XB bit
8886 * will tell whether the SGL (XRI) can be released
8887 * immediately or to the aborted SGL list for the
8888 * following abort XRI from the HBA.
8889 */
8890 list_del_init(&abort_iocb->list);
8891 if (abort_iocb->iocb_flag & LPFC_IO_ON_Q) {
8892 abort_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
8893 pring->txcmplq_cnt--;
8894 }
8895 9079
8896 /* Firmware could still be in progress of DMAing 9080 spin_unlock_irq(&phba->hbalock);
8897 * payload, so don't free data buffer till after
8898 * a hbeat.
8899 */
8900 abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE;
8901 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
8902 spin_unlock_irq(&phba->hbalock);
8903
8904 abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
8905 abort_iocb->iocb.un.ulpWord[4] = IOERR_ABORT_REQUESTED;
8906 (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
8907 } else
8908 spin_unlock_irq(&phba->hbalock);
8909 } 9081 }
8910
8911 lpfc_sli_release_iocbq(phba, cmdiocb); 9082 lpfc_sli_release_iocbq(phba, cmdiocb);
8912 return; 9083 return;
8913} 9084}
@@ -9258,6 +9429,14 @@ void
9258lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9429lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9259 struct lpfc_iocbq *rspiocb) 9430 struct lpfc_iocbq *rspiocb)
9260{ 9431{
9432 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9433 "3096 ABORT_XRI_CN completing on xri x%x "
9434 "original iotag x%x, abort cmd iotag x%x "
9435 "status 0x%x, reason 0x%x\n",
9436 cmdiocb->iocb.un.acxri.abortContextTag,
9437 cmdiocb->iocb.un.acxri.abortIoTag,
9438 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
9439 rspiocb->iocb.un.ulpWord[4]);
9261 lpfc_sli_release_iocbq(phba, cmdiocb); 9440 lpfc_sli_release_iocbq(phba, cmdiocb);
9262 return; 9441 return;
9263} 9442}
@@ -9771,7 +9950,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
9771 phba->work_status[1] = 9950 phba->work_status[1] =
9772 readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 9951 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
9773 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9952 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9774 "2885 Port Error Detected: " 9953 "2885 Port Status Event: "
9775 "port status reg 0x%x, " 9954 "port status reg 0x%x, "
9776 "port smphr reg 0x%x, " 9955 "port smphr reg 0x%x, "
9777 "error 1=0x%x, error 2=0x%x\n", 9956 "error 1=0x%x, error 2=0x%x\n",
@@ -10777,6 +10956,9 @@ static void
10777lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, 10956lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
10778 struct lpfc_wcqe_release *wcqe) 10957 struct lpfc_wcqe_release *wcqe)
10779{ 10958{
10959 /* sanity check on queue memory */
10960 if (unlikely(!phba->sli4_hba.els_wq))
10961 return;
10780 /* Check for the slow-path ELS work queue */ 10962 /* Check for the slow-path ELS work queue */
10781 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) 10963 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
10782 lpfc_sli4_wq_release(phba->sli4_hba.els_wq, 10964 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
@@ -10866,6 +11048,10 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
10866 uint32_t status, rq_id; 11048 uint32_t status, rq_id;
10867 unsigned long iflags; 11049 unsigned long iflags;
10868 11050
11051 /* sanity check on queue memory */
11052 if (unlikely(!hrq) || unlikely(!drq))
11053 return workposted;
11054
10869 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 11055 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
10870 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 11056 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
10871 else 11057 else
@@ -11000,6 +11186,9 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
11000 11186
11001 /* Search for completion queue pointer matching this cqid */ 11187 /* Search for completion queue pointer matching this cqid */
11002 speq = phba->sli4_hba.sp_eq; 11188 speq = phba->sli4_hba.sp_eq;
11189 /* sanity check on queue memory */
11190 if (unlikely(!speq))
11191 return;
11003 list_for_each_entry(childq, &speq->child_list, list) { 11192 list_for_each_entry(childq, &speq->child_list, list) {
11004 if (childq->queue_id == cqid) { 11193 if (childq->queue_id == cqid) {
11005 cq = childq; 11194 cq = childq;
@@ -11241,12 +11430,18 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11241 return; 11430 return;
11242 } 11431 }
11243 11432
11433 if (unlikely(!phba->sli4_hba.fcp_cq)) {
11434 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11435 "3146 Fast-path completion queues "
11436 "does not exist\n");
11437 return;
11438 }
11244 cq = phba->sli4_hba.fcp_cq[fcp_cqidx]; 11439 cq = phba->sli4_hba.fcp_cq[fcp_cqidx];
11245 if (unlikely(!cq)) { 11440 if (unlikely(!cq)) {
11246 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 11441 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
11247 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11442 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11248 "0367 Fast-path completion queue " 11443 "0367 Fast-path completion queue "
11249 "does not exist\n"); 11444 "(%d) does not exist\n", fcp_cqidx);
11250 return; 11445 return;
11251 } 11446 }
11252 11447
@@ -11417,6 +11612,8 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
11417 11612
11418 /* Get to the EQ struct associated with this vector */ 11613 /* Get to the EQ struct associated with this vector */
11419 fpeq = phba->sli4_hba.fp_eq[fcp_eqidx]; 11614 fpeq = phba->sli4_hba.fp_eq[fcp_eqidx];
11615 if (unlikely(!fpeq))
11616 return IRQ_NONE;
11420 11617
11421 /* Check device state for handling interrupt */ 11618 /* Check device state for handling interrupt */
11422 if (unlikely(lpfc_intr_state_check(phba))) { 11619 if (unlikely(lpfc_intr_state_check(phba))) {
@@ -11635,6 +11832,9 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
11635 uint16_t dmult; 11832 uint16_t dmult;
11636 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 11833 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
11637 11834
11835 /* sanity check on queue memory */
11836 if (!eq)
11837 return -ENODEV;
11638 if (!phba->sli4_hba.pc_sli4_params.supported) 11838 if (!phba->sli4_hba.pc_sli4_params.supported)
11639 hw_page_size = SLI4_PAGE_SIZE; 11839 hw_page_size = SLI4_PAGE_SIZE;
11640 11840
@@ -11751,6 +11951,9 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
11751 union lpfc_sli4_cfg_shdr *shdr; 11951 union lpfc_sli4_cfg_shdr *shdr;
11752 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 11952 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
11753 11953
11954 /* sanity check on queue memory */
11955 if (!cq || !eq)
11956 return -ENODEV;
11754 if (!phba->sli4_hba.pc_sli4_params.supported) 11957 if (!phba->sli4_hba.pc_sli4_params.supported)
11755 hw_page_size = SLI4_PAGE_SIZE; 11958 hw_page_size = SLI4_PAGE_SIZE;
11756 11959
@@ -11933,6 +12136,9 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
11933 union lpfc_sli4_cfg_shdr *shdr; 12136 union lpfc_sli4_cfg_shdr *shdr;
11934 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12137 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
11935 12138
12139 /* sanity check on queue memory */
12140 if (!mq || !cq)
12141 return -ENODEV;
11936 if (!phba->sli4_hba.pc_sli4_params.supported) 12142 if (!phba->sli4_hba.pc_sli4_params.supported)
11937 hw_page_size = SLI4_PAGE_SIZE; 12143 hw_page_size = SLI4_PAGE_SIZE;
11938 12144
@@ -12083,6 +12289,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
12083 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12289 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12084 struct dma_address *page; 12290 struct dma_address *page;
12085 12291
12292 /* sanity check on queue memory */
12293 if (!wq || !cq)
12294 return -ENODEV;
12086 if (!phba->sli4_hba.pc_sli4_params.supported) 12295 if (!phba->sli4_hba.pc_sli4_params.supported)
12087 hw_page_size = SLI4_PAGE_SIZE; 12296 hw_page_size = SLI4_PAGE_SIZE;
12088 12297
@@ -12151,6 +12360,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
12151 wq->subtype = subtype; 12360 wq->subtype = subtype;
12152 wq->host_index = 0; 12361 wq->host_index = 0;
12153 wq->hba_index = 0; 12362 wq->hba_index = 0;
12363 wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
12154 12364
12155 /* link the wq onto the parent cq child list */ 12365 /* link the wq onto the parent cq child list */
12156 list_add_tail(&wq->list, &cq->child_list); 12366 list_add_tail(&wq->list, &cq->child_list);
@@ -12174,6 +12384,9 @@ lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
12174{ 12384{
12175 uint32_t cnt; 12385 uint32_t cnt;
12176 12386
12387 /* sanity check on queue memory */
12388 if (!rq)
12389 return;
12177 cnt = lpfc_hbq_defs[qno]->entry_count; 12390 cnt = lpfc_hbq_defs[qno]->entry_count;
12178 12391
12179 /* Recalc repost for RQs based on buffers initially posted */ 12392 /* Recalc repost for RQs based on buffers initially posted */
@@ -12219,6 +12432,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
12219 union lpfc_sli4_cfg_shdr *shdr; 12432 union lpfc_sli4_cfg_shdr *shdr;
12220 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12433 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12221 12434
12435 /* sanity check on queue memory */
12436 if (!hrq || !drq || !cq)
12437 return -ENODEV;
12222 if (!phba->sli4_hba.pc_sli4_params.supported) 12438 if (!phba->sli4_hba.pc_sli4_params.supported)
12223 hw_page_size = SLI4_PAGE_SIZE; 12439 hw_page_size = SLI4_PAGE_SIZE;
12224 12440
@@ -12420,6 +12636,7 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
12420 uint32_t shdr_status, shdr_add_status; 12636 uint32_t shdr_status, shdr_add_status;
12421 union lpfc_sli4_cfg_shdr *shdr; 12637 union lpfc_sli4_cfg_shdr *shdr;
12422 12638
12639 /* sanity check on queue memory */
12423 if (!eq) 12640 if (!eq)
12424 return -ENODEV; 12641 return -ENODEV;
12425 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); 12642 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
@@ -12475,6 +12692,7 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
12475 uint32_t shdr_status, shdr_add_status; 12692 uint32_t shdr_status, shdr_add_status;
12476 union lpfc_sli4_cfg_shdr *shdr; 12693 union lpfc_sli4_cfg_shdr *shdr;
12477 12694
12695 /* sanity check on queue memory */
12478 if (!cq) 12696 if (!cq)
12479 return -ENODEV; 12697 return -ENODEV;
12480 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); 12698 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
@@ -12528,6 +12746,7 @@ lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
12528 uint32_t shdr_status, shdr_add_status; 12746 uint32_t shdr_status, shdr_add_status;
12529 union lpfc_sli4_cfg_shdr *shdr; 12747 union lpfc_sli4_cfg_shdr *shdr;
12530 12748
12749 /* sanity check on queue memory */
12531 if (!mq) 12750 if (!mq)
12532 return -ENODEV; 12751 return -ENODEV;
12533 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); 12752 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
@@ -12581,6 +12800,7 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
12581 uint32_t shdr_status, shdr_add_status; 12800 uint32_t shdr_status, shdr_add_status;
12582 union lpfc_sli4_cfg_shdr *shdr; 12801 union lpfc_sli4_cfg_shdr *shdr;
12583 12802
12803 /* sanity check on queue memory */
12584 if (!wq) 12804 if (!wq)
12585 return -ENODEV; 12805 return -ENODEV;
12586 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); 12806 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
@@ -12634,6 +12854,7 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
12634 uint32_t shdr_status, shdr_add_status; 12854 uint32_t shdr_status, shdr_add_status;
12635 union lpfc_sli4_cfg_shdr *shdr; 12855 union lpfc_sli4_cfg_shdr *shdr;
12636 12856
12857 /* sanity check on queue memory */
12637 if (!hrq || !drq) 12858 if (!hrq || !drq)
12638 return -ENODEV; 12859 return -ENODEV;
12639 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); 12860 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
@@ -15252,45 +15473,42 @@ lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
15252} 15473}
15253 15474
15254/** 15475/**
15255 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. 15476 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
15256 * @phba: pointer to lpfc hba data structure. 15477 * @phba: pointer to lpfc hba data structure.
15478 * @rgn23_data: pointer to configure region 23 data.
15257 * 15479 *
15258 * This function read region 23 and parse TLV for port status to 15480 * This function gets SLI3 port configure region 23 data through memory dump
15259 * decide if the user disaled the port. If the TLV indicates the 15481 * mailbox command. When it successfully retrieves data, the size of the data
15260 * port is disabled, the hba_flag is set accordingly. 15482 * will be returned, otherwise, 0 will be returned.
15261 **/ 15483 **/
15262void 15484static uint32_t
15263lpfc_sli_read_link_ste(struct lpfc_hba *phba) 15485lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
15264{ 15486{
15265 LPFC_MBOXQ_t *pmb = NULL; 15487 LPFC_MBOXQ_t *pmb = NULL;
15266 MAILBOX_t *mb; 15488 MAILBOX_t *mb;
15267 uint8_t *rgn23_data = NULL; 15489 uint32_t offset = 0;
15268 uint32_t offset = 0, data_size, sub_tlv_len, tlv_offset;
15269 int rc; 15490 int rc;
15270 15491
15492 if (!rgn23_data)
15493 return 0;
15494
15271 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15495 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15272 if (!pmb) { 15496 if (!pmb) {
15273 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15497 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15274 "2600 lpfc_sli_read_serdes_param failed to" 15498 "2600 failed to allocate mailbox memory\n");
15275 " allocate mailbox memory\n"); 15499 return 0;
15276 goto out;
15277 } 15500 }
15278 mb = &pmb->u.mb; 15501 mb = &pmb->u.mb;
15279 15502
15280 /* Get adapter Region 23 data */
15281 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
15282 if (!rgn23_data)
15283 goto out;
15284
15285 do { 15503 do {
15286 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23); 15504 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
15287 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 15505 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
15288 15506
15289 if (rc != MBX_SUCCESS) { 15507 if (rc != MBX_SUCCESS) {
15290 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15508 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15291 "2601 lpfc_sli_read_link_ste failed to" 15509 "2601 failed to read config "
15292 " read config region 23 rc 0x%x Status 0x%x\n", 15510 "region 23, rc 0x%x Status 0x%x\n",
15293 rc, mb->mbxStatus); 15511 rc, mb->mbxStatus);
15294 mb->un.varDmp.word_cnt = 0; 15512 mb->un.varDmp.word_cnt = 0;
15295 } 15513 }
15296 /* 15514 /*
@@ -15303,13 +15521,96 @@ lpfc_sli_read_link_ste(struct lpfc_hba *phba)
15303 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset; 15521 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
15304 15522
15305 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 15523 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
15306 rgn23_data + offset, 15524 rgn23_data + offset,
15307 mb->un.varDmp.word_cnt); 15525 mb->un.varDmp.word_cnt);
15308 offset += mb->un.varDmp.word_cnt; 15526 offset += mb->un.varDmp.word_cnt;
15309 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE); 15527 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
15310 15528
15311 data_size = offset; 15529 mempool_free(pmb, phba->mbox_mem_pool);
15312 offset = 0; 15530 return offset;
15531}
15532
15533/**
15534 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
15535 * @phba: pointer to lpfc hba data structure.
15536 * @rgn23_data: pointer to configure region 23 data.
15537 *
15538 * This function gets SLI4 port configure region 23 data through memory dump
15539 * mailbox command. When it successfully retrieves data, the size of the data
15540 * will be returned, otherwise, 0 will be returned.
15541 **/
15542static uint32_t
15543lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
15544{
15545 LPFC_MBOXQ_t *mboxq = NULL;
15546 struct lpfc_dmabuf *mp = NULL;
15547 struct lpfc_mqe *mqe;
15548 uint32_t data_length = 0;
15549 int rc;
15550
15551 if (!rgn23_data)
15552 return 0;
15553
15554 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15555 if (!mboxq) {
15556 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15557 "3105 failed to allocate mailbox memory\n");
15558 return 0;
15559 }
15560
15561 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
15562 goto out;
15563 mqe = &mboxq->u.mqe;
15564 mp = (struct lpfc_dmabuf *) mboxq->context1;
15565 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
15566 if (rc)
15567 goto out;
15568 data_length = mqe->un.mb_words[5];
15569 if (data_length == 0)
15570 goto out;
15571 if (data_length > DMP_RGN23_SIZE) {
15572 data_length = 0;
15573 goto out;
15574 }
15575 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
15576out:
15577 mempool_free(mboxq, phba->mbox_mem_pool);
15578 if (mp) {
15579 lpfc_mbuf_free(phba, mp->virt, mp->phys);
15580 kfree(mp);
15581 }
15582 return data_length;
15583}
15584
15585/**
15586 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
15587 * @phba: pointer to lpfc hba data structure.
15588 *
15589 * This function read region 23 and parse TLV for port status to
15590 * decide if the user disaled the port. If the TLV indicates the
15591 * port is disabled, the hba_flag is set accordingly.
15592 **/
15593void
15594lpfc_sli_read_link_ste(struct lpfc_hba *phba)
15595{
15596 uint8_t *rgn23_data = NULL;
15597 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
15598 uint32_t offset = 0;
15599
15600 /* Get adapter Region 23 data */
15601 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
15602 if (!rgn23_data)
15603 goto out;
15604
15605 if (phba->sli_rev < LPFC_SLI_REV4)
15606 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
15607 else {
15608 if_type = bf_get(lpfc_sli_intf_if_type,
15609 &phba->sli4_hba.sli_intf);
15610 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
15611 goto out;
15612 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
15613 }
15313 15614
15314 if (!data_size) 15615 if (!data_size)
15315 goto out; 15616 goto out;
@@ -15373,9 +15674,8 @@ lpfc_sli_read_link_ste(struct lpfc_hba *phba)
15373 goto out; 15674 goto out;
15374 } 15675 }
15375 } 15676 }
15677
15376out: 15678out:
15377 if (pmb)
15378 mempool_free(pmb, phba->mbox_mem_pool);
15379 kfree(rgn23_data); 15679 kfree(rgn23_data);
15380 return; 15680 return;
15381} 15681}
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index d5cffd8af340..3f266e2c54e0 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -291,7 +291,7 @@ struct lpfc_bmbx {
291#define LPFC_RQE_SIZE 8 291#define LPFC_RQE_SIZE 8
292 292
293#define LPFC_EQE_DEF_COUNT 1024 293#define LPFC_EQE_DEF_COUNT 1024
294#define LPFC_CQE_DEF_COUNT 256 294#define LPFC_CQE_DEF_COUNT 1024
295#define LPFC_WQE_DEF_COUNT 256 295#define LPFC_WQE_DEF_COUNT 256
296#define LPFC_MQE_DEF_COUNT 16 296#define LPFC_MQE_DEF_COUNT 16
297#define LPFC_RQE_DEF_COUNT 512 297#define LPFC_RQE_DEF_COUNT 512
@@ -420,7 +420,16 @@ struct lpfc_sli4_hba {
420 void __iomem *STATUSregaddr; 420 void __iomem *STATUSregaddr;
421 void __iomem *CTRLregaddr; 421 void __iomem *CTRLregaddr;
422 void __iomem *ERR1regaddr; 422 void __iomem *ERR1regaddr;
423#define SLIPORT_ERR1_REG_ERR_CODE_1 0x1
424#define SLIPORT_ERR1_REG_ERR_CODE_2 0x2
423 void __iomem *ERR2regaddr; 425 void __iomem *ERR2regaddr;
426#define SLIPORT_ERR2_REG_FW_RESTART 0x0
427#define SLIPORT_ERR2_REG_FUNC_PROVISON 0x1
428#define SLIPORT_ERR2_REG_FORCED_DUMP 0x2
429#define SLIPORT_ERR2_REG_FAILURE_EQ 0x3
430#define SLIPORT_ERR2_REG_FAILURE_CQ 0x4
431#define SLIPORT_ERR2_REG_FAILURE_BUS 0x5
432#define SLIPORT_ERR2_REG_FAILURE_RQ 0x6
424 } if_type2; 433 } if_type2;
425 } u; 434 } u;
426 435
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index b0630e37f1ef..dd044d01a07f 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.27" 21#define LPFC_DRIVER_VERSION "8.3.28"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" 24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index cff6ca67415c..0fe188e66000 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -774,10 +774,10 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
774 return NULL; 774 return NULL;
775 spin_lock_irq(&phba->hbalock); 775 spin_lock_irq(&phba->hbalock);
776 list_for_each_entry(port_iterator, &phba->port_list, listentry) { 776 list_for_each_entry(port_iterator, &phba->port_list, listentry) {
777 if (port_iterator->load_flag & FC_UNLOADING)
778 continue;
777 if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) { 779 if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
778 if (!(port_iterator->load_flag & FC_UNLOADING)) 780 lpfc_printf_vlog(port_iterator, KERN_ERR, LOG_VPORT,
779 lpfc_printf_vlog(port_iterator, KERN_ERR,
780 LOG_VPORT,
781 "1801 Create vport work array FAILED: " 781 "1801 Create vport work array FAILED: "
782 "cannot do scsi_host_get\n"); 782 "cannot do scsi_host_get\n");
783 continue; 783 continue;
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index af3a6af97cc7..ea2bde206f7f 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -291,8 +291,7 @@ int __init macscsi_detect(struct scsi_host_template * tpnt)
291 ((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0; 291 ((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0;
292 292
293 if (instance->irq != SCSI_IRQ_NONE) 293 if (instance->irq != SCSI_IRQ_NONE)
294 if (request_irq(instance->irq, NCR5380_intr, IRQ_FLG_SLOW, 294 if (request_irq(instance->irq, NCR5380_intr, 0, "ncr5380", instance)) {
295 "ncr5380", instance)) {
296 printk(KERN_WARNING "scsi%d: IRQ%d not free, interrupts disabled\n", 295 printk(KERN_WARNING "scsi%d: IRQ%d not free, interrupts disabled\n",
297 instance->host_no, instance->irq); 296 instance->host_no, instance->irq);
298 instance->irq = SCSI_IRQ_NONE; 297 instance->irq = SCSI_IRQ_NONE;
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index 8dc1b32918dd..a01f0aa66f20 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -8,7 +8,7 @@
8 * scatter/gather formats. 8 * scatter/gather formats.
9 * Creation Date: June 21, 2006 9 * Creation Date: June 21, 2006
10 * 10 *
11 * mpi2.h Version: 02.00.20 11 * mpi2.h Version: 02.00.22
12 * 12 *
13 * Version History 13 * Version History
14 * --------------- 14 * ---------------
@@ -69,6 +69,8 @@
69 * 02-23-11 02.00.19 Bumped MPI2_HEADER_VERSION_UNIT. 69 * 02-23-11 02.00.19 Bumped MPI2_HEADER_VERSION_UNIT.
70 * Added MPI2_FUNCTION_SEND_HOST_MESSAGE. 70 * Added MPI2_FUNCTION_SEND_HOST_MESSAGE.
71 * 03-09-11 02.00.20 Bumped MPI2_HEADER_VERSION_UNIT. 71 * 03-09-11 02.00.20 Bumped MPI2_HEADER_VERSION_UNIT.
72 * 05-25-11 02.00.21 Bumped MPI2_HEADER_VERSION_UNIT.
73 * 08-24-11 02.00.22 Bumped MPI2_HEADER_VERSION_UNIT.
72 * -------------------------------------------------------------------------- 74 * --------------------------------------------------------------------------
73 */ 75 */
74 76
@@ -94,7 +96,7 @@
94#define MPI2_VERSION_02_00 (0x0200) 96#define MPI2_VERSION_02_00 (0x0200)
95 97
96/* versioning for this MPI header set */ 98/* versioning for this MPI header set */
97#define MPI2_HEADER_VERSION_UNIT (0x14) 99#define MPI2_HEADER_VERSION_UNIT (0x16)
98#define MPI2_HEADER_VERSION_DEV (0x00) 100#define MPI2_HEADER_VERSION_DEV (0x00)
99#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) 101#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
100#define MPI2_HEADER_VERSION_UNIT_SHIFT (8) 102#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -1073,8 +1075,10 @@ typedef struct _MPI2_IEEE_SGE_UNION
1073#define MPI2_IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02) 1075#define MPI2_IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02)
1074#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03) 1076#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
1075 /* IEEE Simple Element only */ 1077 /* IEEE Simple Element only */
1076#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR (0x03) 1078#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR (0x03)
1077 /* IEEE Chain Element only */ 1079 /* IEEE Chain Element only */
1080#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR \
1081 (MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR) /* typo in name */
1078 1082
1079/**************************************************************************** 1083/****************************************************************************
1080* IEEE SGE operation Macros 1084* IEEE SGE operation Macros
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index cfd95b4e3004..3a023dad77a1 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -6,7 +6,7 @@
6 * Title: MPI Configuration messages and pages 6 * Title: MPI Configuration messages and pages
7 * Creation Date: November 10, 2006 7 * Creation Date: November 10, 2006
8 * 8 *
9 * mpi2_cnfg.h Version: 02.00.19 9 * mpi2_cnfg.h Version: 02.00.21
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -140,6 +140,13 @@
140 * Added SASNotifyPrimitiveMasks field to 140 * Added SASNotifyPrimitiveMasks field to
141 * MPI2_CONFIG_PAGE_IOC_7. 141 * MPI2_CONFIG_PAGE_IOC_7.
142 * 03-09-11 02.00.19 Fixed IO Unit Page 10 (to match the spec). 142 * 03-09-11 02.00.19 Fixed IO Unit Page 10 (to match the spec).
143 * 05-25-11 02.00.20 Cleaned up a few comments.
144 * 08-24-11 02.00.21 Marked the IO Unit Page 7 PowerManagementCapabilities
145 * for PCIe link as obsolete.
146 * Added SpinupFlags field containing a Disable Spin-up
147 * bit to the MPI2_SAS_IOUNIT4_SPINUP_GROUP fields of
148 * SAS IO Unit Page 4.
149
143 * -------------------------------------------------------------------------- 150 * --------------------------------------------------------------------------
144 */ 151 */
145 152
@@ -904,8 +911,8 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
904#define MPI2_IOUNITPAGE7_PMCAP_12_5_PCT_IOCSPEED (0x00000400) 911#define MPI2_IOUNITPAGE7_PMCAP_12_5_PCT_IOCSPEED (0x00000400)
905#define MPI2_IOUNITPAGE7_PMCAP_25_0_PCT_IOCSPEED (0x00000200) 912#define MPI2_IOUNITPAGE7_PMCAP_25_0_PCT_IOCSPEED (0x00000200)
906#define MPI2_IOUNITPAGE7_PMCAP_50_0_PCT_IOCSPEED (0x00000100) 913#define MPI2_IOUNITPAGE7_PMCAP_50_0_PCT_IOCSPEED (0x00000100)
907#define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE (0x00000008) 914#define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE (0x00000008) /* obsolete */
908#define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE (0x00000004) 915#define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE (0x00000004) /* obsolete */
909 916
910/* defines for IO Unit Page 7 IOCTemperatureUnits field */ 917/* defines for IO Unit Page 7 IOCTemperatureUnits field */
911#define MPI2_IOUNITPAGE7_IOC_TEMP_NOT_PRESENT (0x00) 918#define MPI2_IOUNITPAGE7_IOC_TEMP_NOT_PRESENT (0x00)
@@ -1970,10 +1977,14 @@ typedef struct _MPI2_SAS_IOUNIT4_SPINUP_GROUP
1970{ 1977{
1971 U8 MaxTargetSpinup; /* 0x00 */ 1978 U8 MaxTargetSpinup; /* 0x00 */
1972 U8 SpinupDelay; /* 0x01 */ 1979 U8 SpinupDelay; /* 0x01 */
1973 U16 Reserved1; /* 0x02 */ 1980 U8 SpinupFlags; /* 0x02 */
1981 U8 Reserved1; /* 0x03 */
1974} MPI2_SAS_IOUNIT4_SPINUP_GROUP, MPI2_POINTER PTR_MPI2_SAS_IOUNIT4_SPINUP_GROUP, 1982} MPI2_SAS_IOUNIT4_SPINUP_GROUP, MPI2_POINTER PTR_MPI2_SAS_IOUNIT4_SPINUP_GROUP,
1975 Mpi2SasIOUnit4SpinupGroup_t, MPI2_POINTER pMpi2SasIOUnit4SpinupGroup_t; 1983 Mpi2SasIOUnit4SpinupGroup_t, MPI2_POINTER pMpi2SasIOUnit4SpinupGroup_t;
1976 1984
1985/* defines for SAS IO Unit Page 4 SpinupFlags */
1986#define MPI2_SASIOUNIT4_SPINUP_DISABLE_FLAG (0x01)
1987
1977/* 1988/*
1978 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to 1989 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
1979 * one and check the value returned for NumPhys at runtime. 1990 * one and check the value returned for NumPhys at runtime.
@@ -2321,13 +2332,12 @@ typedef struct _MPI2_CONFIG_PAGE_EXPANDER_1
2321 2332
2322/* use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ 2333/* use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
2323 2334
2324/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
2325
2326/* values for SAS Expander Page 1 DiscoveryInfo field */ 2335/* values for SAS Expander Page 1 DiscoveryInfo field */
2327#define MPI2_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED (0x04) 2336#define MPI2_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED (0x04)
2328#define MPI2_SAS_EXPANDER1_DISCINFO_LINK_STATUS_CHANGE (0x02) 2337#define MPI2_SAS_EXPANDER1_DISCINFO_LINK_STATUS_CHANGE (0x02)
2329#define MPI2_SAS_EXPANDER1_DISCINFO_NO_ROUTING_ENTRIES (0x01) 2338#define MPI2_SAS_EXPANDER1_DISCINFO_NO_ROUTING_ENTRIES (0x01)
2330 2339
2340/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
2331 2341
2332/**************************************************************************** 2342/****************************************************************************
2333* SAS Device Config Pages 2343* SAS Device Config Pages
@@ -2447,6 +2457,8 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_0
2447 2457
2448#define MPI2_SASPHY0_PAGEVERSION (0x03) 2458#define MPI2_SASPHY0_PAGEVERSION (0x03)
2449 2459
2460/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
2461
2450/* use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */ 2462/* use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */
2451 2463
2452/* use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */ 2464/* use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */
@@ -2454,12 +2466,10 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_0
2454/* values for SAS PHY Page 0 Flags field */ 2466/* values for SAS PHY Page 0 Flags field */
2455#define MPI2_SAS_PHY0_FLAGS_SGPIO_DIRECT_ATTACH_ENC (0x01) 2467#define MPI2_SAS_PHY0_FLAGS_SGPIO_DIRECT_ATTACH_ENC (0x01)
2456 2468
2457/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */ 2469/* use MPI2_SAS_PHYINFO_ for the PhyInfo field */
2458 2470
2459/* use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ 2471/* use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
2460 2472
2461/* use MPI2_SAS_PHYINFO_ for the PhyInfo field */
2462
2463 2473
2464/* SAS PHY Page 1 */ 2474/* SAS PHY Page 1 */
2465 2475
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
index 93d9b6956d05..9a925c07a9ec 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
@@ -6,7 +6,7 @@
6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages 6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
7 * Creation Date: October 11, 2006 7 * Creation Date: October 11, 2006
8 * 8 *
9 * mpi2_ioc.h Version: 02.00.17 9 * mpi2_ioc.h Version: 02.00.19
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -110,6 +110,13 @@
110 * Added Temperature Threshold Event. 110 * Added Temperature Threshold Event.
111 * Added Host Message Event. 111 * Added Host Message Event.
112 * Added Send Host Message request and reply. 112 * Added Send Host Message request and reply.
113 * 05-25-11 02.00.18 For Extended Image Header, added
114 * MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC and
115 * MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC defines.
116 * Deprecated MPI2_EXT_IMAGE_TYPE_MAX define.
117 * 08-24-11 02.00.19 Added PhysicalPort field to
118 * MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE structure.
119 * Marked MPI2_PM_CONTROL_FEATURE_PCIE_LINK as obsolete.
113 * -------------------------------------------------------------------------- 120 * --------------------------------------------------------------------------
114 */ 121 */
115 122
@@ -578,7 +585,7 @@ typedef struct _MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE
578{ 585{
579 U16 TaskTag; /* 0x00 */ 586 U16 TaskTag; /* 0x00 */
580 U8 ReasonCode; /* 0x02 */ 587 U8 ReasonCode; /* 0x02 */
581 U8 Reserved1; /* 0x03 */ 588 U8 PhysicalPort; /* 0x03 */
582 U8 ASC; /* 0x04 */ 589 U8 ASC; /* 0x04 */
583 U8 ASCQ; /* 0x05 */ 590 U8 ASCQ; /* 0x05 */
584 U16 DevHandle; /* 0x06 */ 591 U16 DevHandle; /* 0x06 */
@@ -1366,16 +1373,18 @@ typedef struct _MPI2_EXT_IMAGE_HEADER
1366#define MPI2_EXT_IMAGE_HEADER_SIZE (0x40) 1373#define MPI2_EXT_IMAGE_HEADER_SIZE (0x40)
1367 1374
1368/* defines for the ImageType field */ 1375/* defines for the ImageType field */
1369#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED (0x00) 1376#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED (0x00)
1370#define MPI2_EXT_IMAGE_TYPE_FW (0x01) 1377#define MPI2_EXT_IMAGE_TYPE_FW (0x01)
1371#define MPI2_EXT_IMAGE_TYPE_NVDATA (0x03) 1378#define MPI2_EXT_IMAGE_TYPE_NVDATA (0x03)
1372#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER (0x04) 1379#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER (0x04)
1373#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION (0x05) 1380#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION (0x05)
1374#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06) 1381#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06)
1375#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07) 1382#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07)
1376#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08) 1383#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08)
1377 1384#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80)
1378#define MPI2_EXT_IMAGE_TYPE_MAX (MPI2_EXT_IMAGE_TYPE_MEGARAID) 1385#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF)
1386#define MPI2_EXT_IMAGE_TYPE_MAX \
1387 (MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC) /* deprecated */
1379 1388
1380 1389
1381 1390
@@ -1568,7 +1577,7 @@ typedef struct _MPI2_PWR_MGMT_CONTROL_REQUEST {
1568/* defines for the Feature field */ 1577/* defines for the Feature field */
1569#define MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND (0x01) 1578#define MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND (0x01)
1570#define MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION (0x02) 1579#define MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION (0x02)
1571#define MPI2_PM_CONTROL_FEATURE_PCIE_LINK (0x03) 1580#define MPI2_PM_CONTROL_FEATURE_PCIE_LINK (0x03) /* obsolete */
1572#define MPI2_PM_CONTROL_FEATURE_IOC_SPEED (0x04) 1581#define MPI2_PM_CONTROL_FEATURE_IOC_SPEED (0x04)
1573#define MPI2_PM_CONTROL_FEATURE_MIN_PRODUCT_SPECIFIC (0x80) 1582#define MPI2_PM_CONTROL_FEATURE_MIN_PRODUCT_SPECIFIC (0x80)
1574#define MPI2_PM_CONTROL_FEATURE_MAX_PRODUCT_SPECIFIC (0xFF) 1583#define MPI2_PM_CONTROL_FEATURE_MAX_PRODUCT_SPECIFIC (0xFF)
@@ -1597,14 +1606,14 @@ typedef struct _MPI2_PWR_MGMT_CONTROL_REQUEST {
1597 1606
1598/* parameter usage for the MPI2_PM_CONTROL_FEATURE_PCIE_LINK Feature */ 1607/* parameter usage for the MPI2_PM_CONTROL_FEATURE_PCIE_LINK Feature */
1599/* Parameter1 indicates desired PCIe link speed using these defines */ 1608/* Parameter1 indicates desired PCIe link speed using these defines */
1600#define MPI2_PM_CONTROL_PARAM1_PCIE_2_5_GBPS (0x00) 1609#define MPI2_PM_CONTROL_PARAM1_PCIE_2_5_GBPS (0x00) /* obsolete */
1601#define MPI2_PM_CONTROL_PARAM1_PCIE_5_0_GBPS (0x01) 1610#define MPI2_PM_CONTROL_PARAM1_PCIE_5_0_GBPS (0x01) /* obsolete */
1602#define MPI2_PM_CONTROL_PARAM1_PCIE_8_0_GBPS (0x02) 1611#define MPI2_PM_CONTROL_PARAM1_PCIE_8_0_GBPS (0x02) /* obsolete */
1603/* Parameter2 indicates desired PCIe link width using these defines */ 1612/* Parameter2 indicates desired PCIe link width using these defines */
1604#define MPI2_PM_CONTROL_PARAM2_WIDTH_X1 (0x01) 1613#define MPI2_PM_CONTROL_PARAM2_WIDTH_X1 (0x01) /* obsolete */
1605#define MPI2_PM_CONTROL_PARAM2_WIDTH_X2 (0x02) 1614#define MPI2_PM_CONTROL_PARAM2_WIDTH_X2 (0x02) /* obsolete */
1606#define MPI2_PM_CONTROL_PARAM2_WIDTH_X4 (0x04) 1615#define MPI2_PM_CONTROL_PARAM2_WIDTH_X4 (0x04) /* obsolete */
1607#define MPI2_PM_CONTROL_PARAM2_WIDTH_X8 (0x08) 1616#define MPI2_PM_CONTROL_PARAM2_WIDTH_X8 (0x08) /* obsolete */
1608/* Parameter3 and Parameter4 are reserved */ 1617/* Parameter3 and Parameter4 are reserved */
1609 1618
1610/* parameter usage for the MPI2_PM_CONTROL_FEATURE_IOC_SPEED Feature */ 1619/* parameter usage for the MPI2_PM_CONTROL_FEATURE_IOC_SPEED Feature */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
index bd61a7b60a2b..0601612b875a 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
@@ -6,7 +6,7 @@
6 * Title: MPI Integrated RAID messages and structures 6 * Title: MPI Integrated RAID messages and structures
7 * Creation Date: April 26, 2007 7 * Creation Date: April 26, 2007
8 * 8 *
9 * mpi2_raid.h Version: 02.00.05 9 * mpi2_raid.h Version: 02.00.06
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -23,6 +23,10 @@
23 * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of 23 * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of
24 * VolumeCreationFlags and marked the old one as obsolete. 24 * VolumeCreationFlags and marked the old one as obsolete.
25 * 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define. 25 * 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define.
26 * 08-24-10 02.00.06 Added MPI2_RAID_ACTION_COMPATIBILITY_CHECK along with
27 * related structures and defines.
28 * Added product-specific range to RAID Action values.
29
26 * -------------------------------------------------------------------------- 30 * --------------------------------------------------------------------------
27 */ 31 */
28 32
@@ -176,7 +180,9 @@ typedef struct _MPI2_RAID_ACTION_REQUEST
176#define MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED (0x20) 180#define MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED (0x20)
177#define MPI2_RAID_ACTION_START_RAID_FUNCTION (0x21) 181#define MPI2_RAID_ACTION_START_RAID_FUNCTION (0x21)
178#define MPI2_RAID_ACTION_STOP_RAID_FUNCTION (0x22) 182#define MPI2_RAID_ACTION_STOP_RAID_FUNCTION (0x22)
179 183#define MPI2_RAID_ACTION_COMPATIBILITY_CHECK (0x23)
184#define MPI2_RAID_ACTION_MIN_PRODUCT_SPECIFIC (0x80)
185#define MPI2_RAID_ACTION_MAX_PRODUCT_SPECIFIC (0xFF)
180 186
181/* RAID Volume Creation Structure */ 187/* RAID Volume Creation Structure */
182 188
@@ -244,6 +250,23 @@ typedef struct _MPI2_RAID_ONLINE_CAPACITY_EXPANSION
244 Mpi2RaidOnlineCapacityExpansion_t, 250 Mpi2RaidOnlineCapacityExpansion_t,
245 MPI2_POINTER pMpi2RaidOnlineCapacityExpansion_t; 251 MPI2_POINTER pMpi2RaidOnlineCapacityExpansion_t;
246 252
253/* RAID Compatibility Input Structure */
254
255typedef struct _MPI2_RAID_COMPATIBILITY_INPUT_STRUCT {
256 U16 SourceDevHandle; /* 0x00 */
257 U16 CandidateDevHandle; /* 0x02 */
258 U32 Flags; /* 0x04 */
259 U32 Reserved1; /* 0x08 */
260 U32 Reserved2; /* 0x0C */
261} MPI2_RAID_COMPATIBILITY_INPUT_STRUCT,
262MPI2_POINTER PTR_MPI2_RAID_COMPATIBILITY_INPUT_STRUCT,
263Mpi2RaidCompatibilityInputStruct_t,
264MPI2_POINTER pMpi2RaidCompatibilityInputStruct_t;
265
266/* defines for RAID Compatibility Structure Flags field */
267#define MPI2_RAID_COMPAT_SOURCE_IS_VOLUME_FLAG (0x00000002)
268#define MPI2_RAID_COMPAT_REPORT_SOURCE_INFO_FLAG (0x00000001)
269
247 270
248/* RAID Volume Indicator Structure */ 271/* RAID Volume Indicator Structure */
249 272
@@ -263,15 +286,45 @@ typedef struct _MPI2_RAID_VOL_INDICATOR
263#define MPI2_RAID_VOL_FLAGS_OP_RESYNC (0x00000003) 286#define MPI2_RAID_VOL_FLAGS_OP_RESYNC (0x00000003)
264#define MPI2_RAID_VOL_FLAGS_OP_MDC (0x00000004) 287#define MPI2_RAID_VOL_FLAGS_OP_MDC (0x00000004)
265 288
289/* RAID Compatibility Result Structure */
290
291typedef struct _MPI2_RAID_COMPATIBILITY_RESULT_STRUCT {
292 U8 State; /* 0x00 */
293 U8 Reserved1; /* 0x01 */
294 U16 Reserved2; /* 0x02 */
295 U32 GenericAttributes; /* 0x04 */
296 U32 OEMSpecificAttributes; /* 0x08 */
297 U32 Reserved3; /* 0x0C */
298 U32 Reserved4; /* 0x10 */
299} MPI2_RAID_COMPATIBILITY_RESULT_STRUCT,
300MPI2_POINTER PTR_MPI2_RAID_COMPATIBILITY_RESULT_STRUCT,
301Mpi2RaidCompatibilityResultStruct_t,
302MPI2_POINTER pMpi2RaidCompatibilityResultStruct_t;
303
304/* defines for RAID Compatibility Result Structure State field */
305#define MPI2_RAID_COMPAT_STATE_COMPATIBLE (0x00)
306#define MPI2_RAID_COMPAT_STATE_NOT_COMPATIBLE (0x01)
307
308/* defines for RAID Compatibility Result Structure GenericAttributes field */
309#define MPI2_RAID_COMPAT_GENATTRIB_4K_SECTOR (0x00000010)
310
311#define MPI2_RAID_COMPAT_GENATTRIB_MEDIA_MASK (0x0000000C)
312#define MPI2_RAID_COMPAT_GENATTRIB_SOLID_STATE_DRIVE (0x00000008)
313#define MPI2_RAID_COMPAT_GENATTRIB_HARD_DISK_DRIVE (0x00000004)
314
315#define MPI2_RAID_COMPAT_GENATTRIB_PROTOCOL_MASK (0x00000003)
316#define MPI2_RAID_COMPAT_GENATTRIB_SAS_PROTOCOL (0x00000002)
317#define MPI2_RAID_COMPAT_GENATTRIB_SATA_PROTOCOL (0x00000001)
266 318
267/* RAID Action Reply ActionData union */ 319/* RAID Action Reply ActionData union */
268typedef union _MPI2_RAID_ACTION_REPLY_DATA 320typedef union _MPI2_RAID_ACTION_REPLY_DATA
269{ 321{
270 U32 Word[5]; 322 U32 Word[5];
271 MPI2_RAID_VOL_INDICATOR RaidVolumeIndicator; 323 MPI2_RAID_VOL_INDICATOR RaidVolumeIndicator;
272 U16 VolDevHandle; 324 U16 VolDevHandle;
273 U8 VolumeState; 325 U8 VolumeState;
274 U8 PhysDiskNum; 326 U8 PhysDiskNum;
327 MPI2_RAID_COMPATIBILITY_RESULT_STRUCT RaidCompatibilityResult;
275} MPI2_RAID_ACTION_REPLY_DATA, MPI2_POINTER PTR_MPI2_RAID_ACTION_REPLY_DATA, 328} MPI2_RAID_ACTION_REPLY_DATA, MPI2_POINTER PTR_MPI2_RAID_ACTION_REPLY_DATA,
276 Mpi2RaidActionReplyData_t, MPI2_POINTER pMpi2RaidActionReplyData_t; 329 Mpi2RaidActionReplyData_t, MPI2_POINTER pMpi2RaidActionReplyData_t;
277 330
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
index 2a4bceda364b..3cbe677c6886 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
@@ -6,7 +6,7 @@
6 * Title: MPI diagnostic tool structures and definitions 6 * Title: MPI diagnostic tool structures and definitions
7 * Creation Date: March 26, 2007 7 * Creation Date: March 26, 2007
8 * 8 *
9 * mpi2_tool.h Version: 02.00.06 9 * mpi2_tool.h Version: 02.00.07
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -25,6 +25,8 @@
25 * 05-12-10 02.00.05 Added Diagnostic Data Upload tool. 25 * 05-12-10 02.00.05 Added Diagnostic Data Upload tool.
26 * 08-11-10 02.00.06 Added defines that were missing for Diagnostic Buffer 26 * 08-11-10 02.00.06 Added defines that were missing for Diagnostic Buffer
27 * Post Request. 27 * Post Request.
28 * 05-25-11 02.00.07 Added Flags field and related defines to
29 * MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST.
28 * -------------------------------------------------------------------------- 30 * --------------------------------------------------------------------------
29 */ 31 */
30 32
@@ -181,7 +183,7 @@ typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST {
181 U8 DevIndex; /* 0x14 */ 183 U8 DevIndex; /* 0x14 */
182 U8 Action; /* 0x15 */ 184 U8 Action; /* 0x15 */
183 U8 SGLFlags; /* 0x16 */ 185 U8 SGLFlags; /* 0x16 */
184 U8 Reserved7; /* 0x17 */ 186 U8 Flags; /* 0x17 */
185 U16 TxDataLength; /* 0x18 */ 187 U16 TxDataLength; /* 0x18 */
186 U16 RxDataLength; /* 0x1A */ 188 U16 RxDataLength; /* 0x1A */
187 U32 Reserved8; /* 0x1C */ 189 U32 Reserved8; /* 0x1C */
@@ -205,6 +207,9 @@ typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST {
205 207
206/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ 208/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
207 209
210/* values for the Flags field */
211#define MPI2_TOOL_ISTWI_FLAG_AUTO_RESERVE_RELEASE (0x80)
212#define MPI2_TOOL_ISTWI_FLAG_PAGE_ADDR_MASK (0x07)
208 213
209/* Toolbox ISTWI Read Write Tool reply message */ 214/* Toolbox ISTWI Read Write Tool reply message */
210typedef struct _MPI2_TOOLBOX_ISTWI_REPLY { 215typedef struct _MPI2_TOOLBOX_ISTWI_REPLY {
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index beda04a8404b..0b2c95583660 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -57,6 +57,7 @@
57#include <linux/sort.h> 57#include <linux/sort.h>
58#include <linux/io.h> 58#include <linux/io.h>
59#include <linux/time.h> 59#include <linux/time.h>
60#include <linux/kthread.h>
60#include <linux/aer.h> 61#include <linux/aer.h>
61 62
62#include "mpt2sas_base.h" 63#include "mpt2sas_base.h"
@@ -65,6 +66,8 @@ static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
65 66
66#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */ 67#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
67 68
69#define MAX_HBA_QUEUE_DEPTH 30000
70#define MAX_CHAIN_DEPTH 100000
68static int max_queue_depth = -1; 71static int max_queue_depth = -1;
69module_param(max_queue_depth, int, 0); 72module_param(max_queue_depth, int, 0);
70MODULE_PARM_DESC(max_queue_depth, " max controller queue depth "); 73MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
@@ -89,19 +92,6 @@ static int disable_discovery = -1;
89module_param(disable_discovery, int, 0); 92module_param(disable_discovery, int, 0);
90MODULE_PARM_DESC(disable_discovery, " disable discovery "); 93MODULE_PARM_DESC(disable_discovery, " disable discovery ");
91 94
92
93/* diag_buffer_enable is bitwise
94 * bit 0 set = TRACE
95 * bit 1 set = SNAPSHOT
96 * bit 2 set = EXTENDED
97 *
98 * Either bit can be set, or both
99 */
100static int diag_buffer_enable;
101module_param(diag_buffer_enable, int, 0);
102MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers "
103 "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
104
105/** 95/**
106 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug. 96 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
107 * 97 *
@@ -120,10 +110,34 @@ _scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
120 ioc->fwfault_debug = mpt2sas_fwfault_debug; 110 ioc->fwfault_debug = mpt2sas_fwfault_debug;
121 return 0; 111 return 0;
122} 112}
113
123module_param_call(mpt2sas_fwfault_debug, _scsih_set_fwfault_debug, 114module_param_call(mpt2sas_fwfault_debug, _scsih_set_fwfault_debug,
124 param_get_int, &mpt2sas_fwfault_debug, 0644); 115 param_get_int, &mpt2sas_fwfault_debug, 0644);
125 116
126/** 117/**
118 * mpt2sas_remove_dead_ioc_func - kthread context to remove dead ioc
119 * @arg: input argument, used to derive ioc
120 *
121 * Return 0 if controller is removed from pci subsystem.
122 * Return -1 for other case.
123 */
124static int mpt2sas_remove_dead_ioc_func(void *arg)
125{
126 struct MPT2SAS_ADAPTER *ioc = (struct MPT2SAS_ADAPTER *)arg;
127 struct pci_dev *pdev;
128
129 if ((ioc == NULL))
130 return -1;
131
132 pdev = ioc->pdev;
133 if ((pdev == NULL))
134 return -1;
135 pci_remove_bus_device(pdev);
136 return 0;
137}
138
139
140/**
127 * _base_fault_reset_work - workq handling ioc fault conditions 141 * _base_fault_reset_work - workq handling ioc fault conditions
128 * @work: input argument, used to derive ioc 142 * @work: input argument, used to derive ioc
129 * Context: sleep. 143 * Context: sleep.
@@ -138,6 +152,7 @@ _base_fault_reset_work(struct work_struct *work)
138 unsigned long flags; 152 unsigned long flags;
139 u32 doorbell; 153 u32 doorbell;
140 int rc; 154 int rc;
155 struct task_struct *p;
141 156
142 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 157 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
143 if (ioc->shost_recovery) 158 if (ioc->shost_recovery)
@@ -145,6 +160,39 @@ _base_fault_reset_work(struct work_struct *work)
145 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 160 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
146 161
147 doorbell = mpt2sas_base_get_iocstate(ioc, 0); 162 doorbell = mpt2sas_base_get_iocstate(ioc, 0);
163 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
164 printk(MPT2SAS_INFO_FMT "%s : SAS host is non-operational !!!!\n",
165 ioc->name, __func__);
166
167 /*
168 * Call _scsih_flush_pending_cmds callback so that we flush all
169 * pending commands back to OS. This call is required to aovid
170 * deadlock at block layer. Dead IOC will fail to do diag reset,
171 * and this call is safe since dead ioc will never return any
172 * command back from HW.
173 */
174 ioc->schedule_dead_ioc_flush_running_cmds(ioc);
175 /*
176 * Set remove_host flag early since kernel thread will
177 * take some time to execute.
178 */
179 ioc->remove_host = 1;
180 /*Remove the Dead Host */
181 p = kthread_run(mpt2sas_remove_dead_ioc_func, ioc,
182 "mpt2sas_dead_ioc_%d", ioc->id);
183 if (IS_ERR(p)) {
184 printk(MPT2SAS_ERR_FMT
185 "%s: Running mpt2sas_dead_ioc thread failed !!!!\n",
186 ioc->name, __func__);
187 } else {
188 printk(MPT2SAS_ERR_FMT
189 "%s: Running mpt2sas_dead_ioc thread success !!!!\n",
190 ioc->name, __func__);
191 }
192
193 return; /* don't rearm timer */
194 }
195
148 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 196 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
149 rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, 197 rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
150 FORCE_BIG_HAMMER); 198 FORCE_BIG_HAMMER);
@@ -1346,7 +1394,7 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
1346 if (_base_check_enable_msix(ioc) != 0) 1394 if (_base_check_enable_msix(ioc) != 0)
1347 goto try_ioapic; 1395 goto try_ioapic;
1348 1396
1349 ioc->reply_queue_count = min_t(u8, ioc->cpu_count, 1397 ioc->reply_queue_count = min_t(int, ioc->cpu_count,
1350 ioc->msix_vector_count); 1398 ioc->msix_vector_count);
1351 1399
1352 entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry), 1400 entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
@@ -1916,6 +1964,10 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
1916 printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 1964 printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1917 MPT2SAS_INTEL_RMS2LL040_BRANDING); 1965 MPT2SAS_INTEL_RMS2LL040_BRANDING);
1918 break; 1966 break;
1967 case MPT2SAS_INTEL_RAMSDALE_SSDID:
1968 printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1969 MPT2SAS_INTEL_RAMSDALE_BRANDING);
1970 break;
1919 default: 1971 default:
1920 break; 1972 break;
1921 } 1973 }
@@ -1925,6 +1977,22 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
1925 printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 1977 printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1926 MPT2SAS_INTEL_RS25GB008_BRANDING); 1978 MPT2SAS_INTEL_RS25GB008_BRANDING);
1927 break; 1979 break;
1980 case MPT2SAS_INTEL_RMS25JB080_SSDID:
1981 printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1982 MPT2SAS_INTEL_RMS25JB080_BRANDING);
1983 break;
1984 case MPT2SAS_INTEL_RMS25JB040_SSDID:
1985 printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1986 MPT2SAS_INTEL_RMS25JB040_BRANDING);
1987 break;
1988 case MPT2SAS_INTEL_RMS25KB080_SSDID:
1989 printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1990 MPT2SAS_INTEL_RMS25KB080_BRANDING);
1991 break;
1992 case MPT2SAS_INTEL_RMS25KB040_SSDID:
1993 printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1994 MPT2SAS_INTEL_RMS25KB040_BRANDING);
1995 break;
1928 default: 1996 default:
1929 break; 1997 break;
1930 } 1998 }
@@ -2311,8 +2379,6 @@ _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
2311 } 2379 }
2312 if (ioc->chain_dma_pool) 2380 if (ioc->chain_dma_pool)
2313 pci_pool_destroy(ioc->chain_dma_pool); 2381 pci_pool_destroy(ioc->chain_dma_pool);
2314 }
2315 if (ioc->chain_lookup) {
2316 free_pages((ulong)ioc->chain_lookup, ioc->chain_pages); 2382 free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
2317 ioc->chain_lookup = NULL; 2383 ioc->chain_lookup = NULL;
2318 } 2384 }
@@ -2330,9 +2396,7 @@ static int
2330_base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) 2396_base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2331{ 2397{
2332 struct mpt2sas_facts *facts; 2398 struct mpt2sas_facts *facts;
2333 u32 queue_size, queue_diff;
2334 u16 max_sge_elements; 2399 u16 max_sge_elements;
2335 u16 num_of_reply_frames;
2336 u16 chains_needed_per_io; 2400 u16 chains_needed_per_io;
2337 u32 sz, total_sz, reply_post_free_sz; 2401 u32 sz, total_sz, reply_post_free_sz;
2338 u32 retry_sz; 2402 u32 retry_sz;
@@ -2359,7 +2423,8 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2359 max_request_credit = (max_queue_depth < facts->RequestCredit) 2423 max_request_credit = (max_queue_depth < facts->RequestCredit)
2360 ? max_queue_depth : facts->RequestCredit; 2424 ? max_queue_depth : facts->RequestCredit;
2361 else 2425 else
2362 max_request_credit = facts->RequestCredit; 2426 max_request_credit = min_t(u16, facts->RequestCredit,
2427 MAX_HBA_QUEUE_DEPTH);
2363 2428
2364 ioc->hba_queue_depth = max_request_credit; 2429 ioc->hba_queue_depth = max_request_credit;
2365 ioc->hi_priority_depth = facts->HighPriorityCredit; 2430 ioc->hi_priority_depth = facts->HighPriorityCredit;
@@ -2400,50 +2465,25 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2400 } 2465 }
2401 ioc->chains_needed_per_io = chains_needed_per_io; 2466 ioc->chains_needed_per_io = chains_needed_per_io;
2402 2467
2403 /* reply free queue sizing - taking into account for events */ 2468 /* reply free queue sizing - taking into account for 64 FW events */
2404 num_of_reply_frames = ioc->hba_queue_depth + 32; 2469 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
2405
2406 /* number of replies frames can't be a multiple of 16 */
2407 /* decrease number of reply frames by 1 */
2408 if (!(num_of_reply_frames % 16))
2409 num_of_reply_frames--;
2410
2411 /* calculate number of reply free queue entries
2412 * (must be multiple of 16)
2413 */
2414
2415 /* (we know reply_free_queue_depth is not a multiple of 16) */
2416 queue_size = num_of_reply_frames;
2417 queue_size += 16 - (queue_size % 16);
2418 ioc->reply_free_queue_depth = queue_size;
2419
2420 /* reply descriptor post queue sizing */
2421 /* this size should be the number of request frames + number of reply
2422 * frames
2423 */
2424
2425 queue_size = ioc->hba_queue_depth + num_of_reply_frames + 1;
2426 /* round up to 16 byte boundary */
2427 if (queue_size % 16)
2428 queue_size += 16 - (queue_size % 16);
2429
2430 /* check against IOC maximum reply post queue depth */
2431 if (queue_size > facts->MaxReplyDescriptorPostQueueDepth) {
2432 queue_diff = queue_size -
2433 facts->MaxReplyDescriptorPostQueueDepth;
2434 2470
2435 /* round queue_diff up to multiple of 16 */ 2471 /* align the reply post queue on the next 16 count boundary */
2436 if (queue_diff % 16) 2472 if (!ioc->reply_free_queue_depth % 16)
2437 queue_diff += 16 - (queue_diff % 16); 2473 ioc->reply_post_queue_depth = ioc->reply_free_queue_depth + 16;
2438 2474 else
2439 /* adjust hba_queue_depth, reply_free_queue_depth, 2475 ioc->reply_post_queue_depth = ioc->reply_free_queue_depth +
2440 * and queue_size 2476 32 - (ioc->reply_free_queue_depth % 16);
2441 */ 2477 if (ioc->reply_post_queue_depth >
2442 ioc->hba_queue_depth -= (queue_diff / 2); 2478 facts->MaxReplyDescriptorPostQueueDepth) {
2443 ioc->reply_free_queue_depth -= (queue_diff / 2); 2479 ioc->reply_post_queue_depth = min_t(u16,
2444 queue_size = facts->MaxReplyDescriptorPostQueueDepth; 2480 (facts->MaxReplyDescriptorPostQueueDepth -
2481 (facts->MaxReplyDescriptorPostQueueDepth % 16)),
2482 (ioc->hba_queue_depth - (ioc->hba_queue_depth % 16)));
2483 ioc->reply_free_queue_depth = ioc->reply_post_queue_depth - 16;
2484 ioc->hba_queue_depth = ioc->reply_free_queue_depth - 64;
2445 } 2485 }
2446 ioc->reply_post_queue_depth = queue_size; 2486
2447 2487
2448 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: " 2488 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: "
2449 "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), " 2489 "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
@@ -2529,15 +2569,12 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2529 "depth(%d)\n", ioc->name, ioc->request, 2569 "depth(%d)\n", ioc->name, ioc->request,
2530 ioc->scsiio_depth)); 2570 ioc->scsiio_depth));
2531 2571
2532 /* loop till the allocation succeeds */ 2572 ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
2533 do { 2573 sz = ioc->chain_depth * sizeof(struct chain_tracker);
2534 sz = ioc->chain_depth * sizeof(struct chain_tracker); 2574 ioc->chain_pages = get_order(sz);
2535 ioc->chain_pages = get_order(sz); 2575
2536 ioc->chain_lookup = (struct chain_tracker *)__get_free_pages( 2576 ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
2537 GFP_KERNEL, ioc->chain_pages); 2577 GFP_KERNEL, ioc->chain_pages);
2538 if (ioc->chain_lookup == NULL)
2539 ioc->chain_depth -= 100;
2540 } while (ioc->chain_lookup == NULL);
2541 ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev, 2578 ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
2542 ioc->request_sz, 16, 0); 2579 ioc->request_sz, 16, 0);
2543 if (!ioc->chain_dma_pool) { 2580 if (!ioc->chain_dma_pool) {
@@ -3136,8 +3173,8 @@ mpt2sas_base_sas_iounit_control(struct MPT2SAS_ADAPTER *ioc,
3136 if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || 3173 if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
3137 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) 3174 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
3138 ioc->ioc_link_reset_in_progress = 1; 3175 ioc->ioc_link_reset_in_progress = 1;
3139 mpt2sas_base_put_smid_default(ioc, smid);
3140 init_completion(&ioc->base_cmds.done); 3176 init_completion(&ioc->base_cmds.done);
3177 mpt2sas_base_put_smid_default(ioc, smid);
3141 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 3178 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
3142 msecs_to_jiffies(10000)); 3179 msecs_to_jiffies(10000));
3143 if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || 3180 if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
@@ -3238,8 +3275,8 @@ mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc,
3238 request = mpt2sas_base_get_msg_frame(ioc, smid); 3275 request = mpt2sas_base_get_msg_frame(ioc, smid);
3239 ioc->base_cmds.smid = smid; 3276 ioc->base_cmds.smid = smid;
3240 memcpy(request, mpi_request, sizeof(Mpi2SepReply_t)); 3277 memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
3241 mpt2sas_base_put_smid_default(ioc, smid);
3242 init_completion(&ioc->base_cmds.done); 3278 init_completion(&ioc->base_cmds.done);
3279 mpt2sas_base_put_smid_default(ioc, smid);
3243 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 3280 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
3244 msecs_to_jiffies(10000)); 3281 msecs_to_jiffies(10000));
3245 if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) { 3282 if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
@@ -3746,8 +3783,8 @@ _base_event_notification(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3746 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 3783 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3747 mpi_request->EventMasks[i] = 3784 mpi_request->EventMasks[i] =
3748 cpu_to_le32(ioc->event_masks[i]); 3785 cpu_to_le32(ioc->event_masks[i]);
3749 mpt2sas_base_put_smid_default(ioc, smid);
3750 init_completion(&ioc->base_cmds.done); 3786 init_completion(&ioc->base_cmds.done);
3787 mpt2sas_base_put_smid_default(ioc, smid);
3751 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ); 3788 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
3752 if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) { 3789 if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
3753 printk(MPT2SAS_ERR_FMT "%s: timeout\n", 3790 printk(MPT2SAS_ERR_FMT "%s: timeout\n",
@@ -4062,7 +4099,8 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
4062 ioc->reply_free[i] = cpu_to_le32(reply_address); 4099 ioc->reply_free[i] = cpu_to_le32(reply_address);
4063 4100
4064 /* initialize reply queues */ 4101 /* initialize reply queues */
4065 _base_assign_reply_queues(ioc); 4102 if (ioc->is_driver_loading)
4103 _base_assign_reply_queues(ioc);
4066 4104
4067 /* initialize Reply Post Free Queue */ 4105 /* initialize Reply Post Free Queue */
4068 reply_post_free = (long)ioc->reply_post_free; 4106 reply_post_free = (long)ioc->reply_post_free;
@@ -4110,24 +4148,17 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
4110 4148
4111 4149
4112 if (ioc->is_driver_loading) { 4150 if (ioc->is_driver_loading) {
4113 4151 if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
4114 4152 == 0x80) {
4115
4116 ioc->wait_for_discovery_to_complete =
4117 _base_determine_wait_on_discovery(ioc);
4118 return r; /* scan_start and scan_finished support */
4119 }
4120
4121
4122 if (ioc->wait_for_discovery_to_complete && ioc->is_warpdrive) {
4123 if (ioc->manu_pg10.OEMIdentifier == 0x80) {
4124 hide_flag = (u8) (ioc->manu_pg10.OEMSpecificFlags0 & 4153 hide_flag = (u8) (ioc->manu_pg10.OEMSpecificFlags0 &
4125 MFG_PAGE10_HIDE_SSDS_MASK); 4154 MFG_PAGE10_HIDE_SSDS_MASK);
4126 if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK) 4155 if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
4127 ioc->mfg_pg10_hide_flag = hide_flag; 4156 ioc->mfg_pg10_hide_flag = hide_flag;
4128 } 4157 }
4158 ioc->wait_for_discovery_to_complete =
4159 _base_determine_wait_on_discovery(ioc);
4160 return r; /* scan_start and scan_finished support */
4129 } 4161 }
4130
4131 r = _base_send_port_enable(ioc, sleep_flag); 4162 r = _base_send_port_enable(ioc, sleep_flag);
4132 if (r) 4163 if (r)
4133 return r; 4164 return r;
@@ -4206,7 +4237,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
4206 4237
4207 r = mpt2sas_base_map_resources(ioc); 4238 r = mpt2sas_base_map_resources(ioc);
4208 if (r) 4239 if (r)
4209 return r; 4240 goto out_free_resources;
4210 4241
4211 if (ioc->is_warpdrive) { 4242 if (ioc->is_warpdrive) {
4212 ioc->reply_post_host_index[0] = 4243 ioc->reply_post_host_index[0] =
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 3c3babc7d260..c7459fdc06cc 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,8 +69,8 @@
69#define MPT2SAS_DRIVER_NAME "mpt2sas" 69#define MPT2SAS_DRIVER_NAME "mpt2sas"
70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" 70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" 71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
72#define MPT2SAS_DRIVER_VERSION "10.100.00.00" 72#define MPT2SAS_DRIVER_VERSION "12.100.00.00"
73#define MPT2SAS_MAJOR_VERSION 10 73#define MPT2SAS_MAJOR_VERSION 12
74#define MPT2SAS_MINOR_VERSION 100 74#define MPT2SAS_MINOR_VERSION 100
75#define MPT2SAS_BUILD_VERSION 00 75#define MPT2SAS_BUILD_VERSION 00
76#define MPT2SAS_RELEASE_VERSION 00 76#define MPT2SAS_RELEASE_VERSION 00
@@ -157,20 +157,33 @@
157/* 157/*
158 * Intel HBA branding 158 * Intel HBA branding
159 */ 159 */
160#define MPT2SAS_INTEL_RMS25JB080_BRANDING \
161 "Intel(R) Integrated RAID Module RMS25JB080"
162#define MPT2SAS_INTEL_RMS25JB040_BRANDING \
163 "Intel(R) Integrated RAID Module RMS25JB040"
164#define MPT2SAS_INTEL_RMS25KB080_BRANDING \
165 "Intel(R) Integrated RAID Module RMS25KB080"
166#define MPT2SAS_INTEL_RMS25KB040_BRANDING \
167 "Intel(R) Integrated RAID Module RMS25KB040"
160#define MPT2SAS_INTEL_RMS2LL080_BRANDING \ 168#define MPT2SAS_INTEL_RMS2LL080_BRANDING \
161 "Intel Integrated RAID Module RMS2LL080" 169 "Intel Integrated RAID Module RMS2LL080"
162#define MPT2SAS_INTEL_RMS2LL040_BRANDING \ 170#define MPT2SAS_INTEL_RMS2LL040_BRANDING \
163 "Intel Integrated RAID Module RMS2LL040" 171 "Intel Integrated RAID Module RMS2LL040"
164#define MPT2SAS_INTEL_RS25GB008_BRANDING \ 172#define MPT2SAS_INTEL_RS25GB008_BRANDING \
165 "Intel(R) RAID Controller RS25GB008" 173 "Intel(R) RAID Controller RS25GB008"
166 174#define MPT2SAS_INTEL_RAMSDALE_BRANDING \
175 "Intel 720 Series SSD"
167/* 176/*
168 * Intel HBA SSDIDs 177 * Intel HBA SSDIDs
169 */ 178 */
179#define MPT2SAS_INTEL_RMS25JB080_SSDID 0x3516
180#define MPT2SAS_INTEL_RMS25JB040_SSDID 0x3517
181#define MPT2SAS_INTEL_RMS25KB080_SSDID 0x3518
182#define MPT2SAS_INTEL_RMS25KB040_SSDID 0x3519
170#define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E 183#define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E
171#define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F 184#define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F
172#define MPT2SAS_INTEL_RS25GB008_SSDID 0x3000 185#define MPT2SAS_INTEL_RS25GB008_SSDID 0x3000
173 186#define MPT2SAS_INTEL_RAMSDALE_SSDID 0x3700
174 187
175/* 188/*
176 * HP HBA branding 189 * HP HBA branding
@@ -373,6 +386,7 @@ struct _sas_device {
373 * @percent_complete: resync percent complete 386 * @percent_complete: resync percent complete
374 * @direct_io_enabled: Whether direct io to PDs are allowed or not 387 * @direct_io_enabled: Whether direct io to PDs are allowed or not
375 * @stripe_exponent: X where 2powX is the stripe sz in blocks 388 * @stripe_exponent: X where 2powX is the stripe sz in blocks
389 * @block_exponent: X where 2powX is the block sz in bytes
376 * @max_lba: Maximum number of LBA in the volume 390 * @max_lba: Maximum number of LBA in the volume
377 * @stripe_sz: Stripe Size of the volume 391 * @stripe_sz: Stripe Size of the volume
378 * @device_info: Device info of the volume member disk 392 * @device_info: Device info of the volume member disk
@@ -394,6 +408,7 @@ struct _raid_device {
394 u8 percent_complete; 408 u8 percent_complete;
395 u8 direct_io_enabled; 409 u8 direct_io_enabled;
396 u8 stripe_exponent; 410 u8 stripe_exponent;
411 u8 block_exponent;
397 u64 max_lba; 412 u64 max_lba;
398 u32 stripe_sz; 413 u32 stripe_sz;
399 u32 device_info; 414 u32 device_info;
@@ -623,6 +638,7 @@ enum mutex_type {
623 TM_MUTEX_ON = 1, 638 TM_MUTEX_ON = 1,
624}; 639};
625 640
641typedef void (*MPT2SAS_FLUSH_RUNNING_CMDS)(struct MPT2SAS_ADAPTER *ioc);
626/** 642/**
627 * struct MPT2SAS_ADAPTER - per adapter struct 643 * struct MPT2SAS_ADAPTER - per adapter struct
628 * @list: ioc_list 644 * @list: ioc_list
@@ -665,6 +681,7 @@ enum mutex_type {
665 * @msix_vector_count: number msix vectors 681 * @msix_vector_count: number msix vectors
666 * @cpu_msix_table: table for mapping cpus to msix index 682 * @cpu_msix_table: table for mapping cpus to msix index
667 * @cpu_msix_table_sz: table size 683 * @cpu_msix_table_sz: table size
684 * @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands
668 * @scsi_io_cb_idx: shost generated commands 685 * @scsi_io_cb_idx: shost generated commands
669 * @tm_cb_idx: task management commands 686 * @tm_cb_idx: task management commands
670 * @scsih_cb_idx: scsih internal commands 687 * @scsih_cb_idx: scsih internal commands
@@ -816,6 +833,7 @@ struct MPT2SAS_ADAPTER {
816 resource_size_t **reply_post_host_index; 833 resource_size_t **reply_post_host_index;
817 u16 cpu_msix_table_sz; 834 u16 cpu_msix_table_sz;
818 u32 ioc_reset_count; 835 u32 ioc_reset_count;
836 MPT2SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds;
819 837
820 /* internal commands, callback index */ 838 /* internal commands, callback index */
821 u8 scsi_io_cb_idx; 839 u8 scsi_io_cb_idx;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index aabcb911706e..7fceb899029e 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -818,6 +818,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
818 _ctl_display_some_debug(ioc, smid, "ctl_request", NULL); 818 _ctl_display_some_debug(ioc, smid, "ctl_request", NULL);
819#endif 819#endif
820 820
821 init_completion(&ioc->ctl_cmds.done);
821 switch (mpi_request->Function) { 822 switch (mpi_request->Function) {
822 case MPI2_FUNCTION_SCSI_IO_REQUEST: 823 case MPI2_FUNCTION_SCSI_IO_REQUEST:
823 case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 824 case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
@@ -903,7 +904,6 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
903 timeout = MPT2_IOCTL_DEFAULT_TIMEOUT; 904 timeout = MPT2_IOCTL_DEFAULT_TIMEOUT;
904 else 905 else
905 timeout = karg.timeout; 906 timeout = karg.timeout;
906 init_completion(&ioc->ctl_cmds.done);
907 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, 907 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
908 timeout*HZ); 908 timeout*HZ);
909 if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) { 909 if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
@@ -1477,8 +1477,8 @@ _ctl_diag_register_2(struct MPT2SAS_ADAPTER *ioc,
1477 mpi_request->ProductSpecific[i] = 1477 mpi_request->ProductSpecific[i] =
1478 cpu_to_le32(ioc->product_specific[buffer_type][i]); 1478 cpu_to_le32(ioc->product_specific[buffer_type][i]);
1479 1479
1480 mpt2sas_base_put_smid_default(ioc, smid);
1481 init_completion(&ioc->ctl_cmds.done); 1480 init_completion(&ioc->ctl_cmds.done);
1481 mpt2sas_base_put_smid_default(ioc, smid);
1482 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, 1482 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
1483 MPT2_IOCTL_DEFAULT_TIMEOUT*HZ); 1483 MPT2_IOCTL_DEFAULT_TIMEOUT*HZ);
1484 1484
@@ -1821,8 +1821,8 @@ _ctl_send_release(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type, u8 *issue_reset)
1821 mpi_request->VF_ID = 0; /* TODO */ 1821 mpi_request->VF_ID = 0; /* TODO */
1822 mpi_request->VP_ID = 0; 1822 mpi_request->VP_ID = 0;
1823 1823
1824 mpt2sas_base_put_smid_default(ioc, smid);
1825 init_completion(&ioc->ctl_cmds.done); 1824 init_completion(&ioc->ctl_cmds.done);
1825 mpt2sas_base_put_smid_default(ioc, smid);
1826 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, 1826 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
1827 MPT2_IOCTL_DEFAULT_TIMEOUT*HZ); 1827 MPT2_IOCTL_DEFAULT_TIMEOUT*HZ);
1828 1828
@@ -2095,8 +2095,8 @@ _ctl_diag_read_buffer(void __user *arg, enum block_state state)
2095 mpi_request->VF_ID = 0; /* TODO */ 2095 mpi_request->VF_ID = 0; /* TODO */
2096 mpi_request->VP_ID = 0; 2096 mpi_request->VP_ID = 0;
2097 2097
2098 mpt2sas_base_put_smid_default(ioc, smid);
2099 init_completion(&ioc->ctl_cmds.done); 2098 init_completion(&ioc->ctl_cmds.done);
2099 mpt2sas_base_put_smid_default(ioc, smid);
2100 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, 2100 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
2101 MPT2_IOCTL_DEFAULT_TIMEOUT*HZ); 2101 MPT2_IOCTL_DEFAULT_TIMEOUT*HZ);
2102 2102
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index d570573b7963..193e33e28e49 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -99,7 +99,7 @@ MODULE_PARM_DESC(logging_level, " bits for enabling additional logging info "
99 99
100static ushort max_sectors = 0xFFFF; 100static ushort max_sectors = 0xFFFF;
101module_param(max_sectors, ushort, 0); 101module_param(max_sectors, ushort, 0);
102MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 8192 default=8192"); 102MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767");
103 103
104/* scsi-mid layer global parmeter is max_report_luns, which is 511 */ 104/* scsi-mid layer global parmeter is max_report_luns, which is 511 */
105#define MPT2SAS_MAX_LUN (16895) 105#define MPT2SAS_MAX_LUN (16895)
@@ -612,13 +612,17 @@ _scsih_sas_device_add(struct MPT2SAS_ADAPTER *ioc,
612 if (!mpt2sas_transport_port_add(ioc, sas_device->handle, 612 if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
613 sas_device->sas_address_parent)) { 613 sas_device->sas_address_parent)) {
614 _scsih_sas_device_remove(ioc, sas_device); 614 _scsih_sas_device_remove(ioc, sas_device);
615 } else if (!sas_device->starget) { 615 } else if (!sas_device->starget) {
616 if (!ioc->is_driver_loading) 616 /* When asyn scanning is enabled, its not possible to remove
617 mpt2sas_transport_port_remove(ioc, 617 * devices while scanning is turned on due to an oops in
618 sas_device->sas_address, 618 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
619 sas_device->sas_address_parent); 619 */
620 _scsih_sas_device_remove(ioc, sas_device); 620 if (!ioc->is_driver_loading)
621 } 621 mpt2sas_transport_port_remove(ioc,
622 sas_device->sas_address,
623 sas_device->sas_address_parent);
624 _scsih_sas_device_remove(ioc, sas_device);
625 }
622} 626}
623 627
624/** 628/**
@@ -1007,8 +1011,8 @@ _scsih_get_chain_buffer_tracker(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1007 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 1011 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1008 if (list_empty(&ioc->free_chain_list)) { 1012 if (list_empty(&ioc->free_chain_list)) {
1009 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 1013 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1010 printk(MPT2SAS_WARN_FMT "chain buffers not available\n", 1014 dfailprintk(ioc, printk(MPT2SAS_WARN_FMT "chain buffers not "
1011 ioc->name); 1015 "available\n", ioc->name));
1012 return NULL; 1016 return NULL;
1013 } 1017 }
1014 chain_req = list_entry(ioc->free_chain_list.next, 1018 chain_req = list_entry(ioc->free_chain_list.next,
@@ -1449,7 +1453,7 @@ _scsih_slave_destroy(struct scsi_device *sdev)
1449 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1453 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1450 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 1454 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
1451 sas_target_priv_data->sas_address); 1455 sas_target_priv_data->sas_address);
1452 if (sas_device) 1456 if (sas_device && !sas_target_priv_data->num_luns)
1453 sas_device->starget = NULL; 1457 sas_device->starget = NULL;
1454 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1458 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1455 } 1459 }
@@ -1776,11 +1780,9 @@ _scsih_init_warpdrive_properties(struct MPT2SAS_ADAPTER *ioc,
1776 Mpi2ConfigReply_t mpi_reply; 1780 Mpi2ConfigReply_t mpi_reply;
1777 u16 sz; 1781 u16 sz;
1778 u8 num_pds, count; 1782 u8 num_pds, count;
1779 u64 mb = 1024 * 1024; 1783 unsigned long stripe_sz, block_sz;
1780 u64 tb_2 = 2 * mb * mb; 1784 u8 stripe_exp, block_exp;
1781 u64 capacity; 1785 u64 dev_max_lba;
1782 u32 stripe_sz;
1783 u8 i, stripe_exp;
1784 1786
1785 if (!ioc->is_warpdrive) 1787 if (!ioc->is_warpdrive)
1786 return; 1788 return;
@@ -1844,51 +1846,57 @@ _scsih_init_warpdrive_properties(struct MPT2SAS_ADAPTER *ioc,
1844 vol_pg0->PhysDisk[count].PhysDiskNum); 1846 vol_pg0->PhysDisk[count].PhysDiskNum);
1845 goto out_error; 1847 goto out_error;
1846 } 1848 }
1849 /* Disable direct I/O if member drive lba exceeds 4 bytes */
1850 dev_max_lba = le64_to_cpu(pd_pg0.DeviceMaxLBA);
1851 if (dev_max_lba >> 32) {
1852 printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is "
1853 "disabled for the drive with handle(0x%04x) member"
1854 "handle (0x%04x) unsupported max lba 0x%016llx\n",
1855 ioc->name, raid_device->handle,
1856 le16_to_cpu(pd_pg0.DevHandle),
1857 (unsigned long long)dev_max_lba);
1858 goto out_error;
1859 }
1860
1847 raid_device->pd_handle[count] = le16_to_cpu(pd_pg0.DevHandle); 1861 raid_device->pd_handle[count] = le16_to_cpu(pd_pg0.DevHandle);
1848 } 1862 }
1849 1863
1850 /* 1864 /*
1851 * Assumption for WD: Direct I/O is not supported if the volume is 1865 * Assumption for WD: Direct I/O is not supported if the volume is
1852 * not RAID0, if the stripe size is not 64KB, if the block size is 1866 * not RAID0
1853 * not 512 and if the volume size is >2TB
1854 */ 1867 */
1855 if (raid_device->volume_type != MPI2_RAID_VOL_TYPE_RAID0 || 1868 if (raid_device->volume_type != MPI2_RAID_VOL_TYPE_RAID0) {
1856 le16_to_cpu(vol_pg0->BlockSize) != 512) {
1857 printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled " 1869 printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
1858 "for the drive with handle(0x%04x): type=%d, " 1870 "for the drive with handle(0x%04x): type=%d, "
1859 "s_sz=%uK, blk_size=%u\n", ioc->name, 1871 "s_sz=%uK, blk_size=%u\n", ioc->name,
1860 raid_device->handle, raid_device->volume_type, 1872 raid_device->handle, raid_device->volume_type,
1861 le32_to_cpu(vol_pg0->StripeSize)/2, 1873 (le32_to_cpu(vol_pg0->StripeSize) *
1874 le16_to_cpu(vol_pg0->BlockSize)) / 1024,
1862 le16_to_cpu(vol_pg0->BlockSize)); 1875 le16_to_cpu(vol_pg0->BlockSize));
1863 goto out_error; 1876 goto out_error;
1864 } 1877 }
1865 1878
1866 capacity = (u64) le16_to_cpu(vol_pg0->BlockSize) * 1879 stripe_sz = le32_to_cpu(vol_pg0->StripeSize);
1867 (le64_to_cpu(vol_pg0->MaxLBA) + 1); 1880 stripe_exp = find_first_bit(&stripe_sz, 32);
1868 1881 if (stripe_exp == 32) {
1869 if (capacity > tb_2) {
1870 printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled " 1882 printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
1871 "for the drive with handle(0x%04x) since drive sz > 2TB\n", 1883 "for the drive with handle(0x%04x) invalid stripe sz %uK\n",
1872 ioc->name, raid_device->handle); 1884 ioc->name, raid_device->handle,
1885 (le32_to_cpu(vol_pg0->StripeSize) *
1886 le16_to_cpu(vol_pg0->BlockSize)) / 1024);
1873 goto out_error; 1887 goto out_error;
1874 } 1888 }
1875 1889 raid_device->stripe_exponent = stripe_exp;
1876 stripe_sz = le32_to_cpu(vol_pg0->StripeSize); 1890 block_sz = le16_to_cpu(vol_pg0->BlockSize);
1877 stripe_exp = 0; 1891 block_exp = find_first_bit(&block_sz, 16);
1878 for (i = 0; i < 32; i++) { 1892 if (block_exp == 16) {
1879 if (stripe_sz & 1)
1880 break;
1881 stripe_exp++;
1882 stripe_sz >>= 1;
1883 }
1884 if (i == 32) {
1885 printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled " 1893 printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
1886 "for the drive with handle(0x%04x) invalid stripe sz %uK\n", 1894 "for the drive with handle(0x%04x) invalid block sz %u\n",
1887 ioc->name, raid_device->handle, 1895 ioc->name, raid_device->handle,
1888 le32_to_cpu(vol_pg0->StripeSize)/2); 1896 le16_to_cpu(vol_pg0->BlockSize));
1889 goto out_error; 1897 goto out_error;
1890 } 1898 }
1891 raid_device->stripe_exponent = stripe_exp; 1899 raid_device->block_exponent = block_exp;
1892 raid_device->direct_io_enabled = 1; 1900 raid_device->direct_io_enabled = 1;
1893 1901
1894 printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is Enabled for the drive" 1902 printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is Enabled for the drive"
@@ -3804,8 +3812,9 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
3804{ 3812{
3805 u32 v_lba, p_lba, stripe_off, stripe_unit, column, io_size; 3813 u32 v_lba, p_lba, stripe_off, stripe_unit, column, io_size;
3806 u32 stripe_sz, stripe_exp; 3814 u32 stripe_sz, stripe_exp;
3807 u8 num_pds, *cdb_ptr, *tmp_ptr, *lba_ptr1, *lba_ptr2; 3815 u8 num_pds, *cdb_ptr, i;
3808 u8 cdb0 = scmd->cmnd[0]; 3816 u8 cdb0 = scmd->cmnd[0];
3817 u64 v_llba;
3809 3818
3810 /* 3819 /*
3811 * Try Direct I/O to RAID memeber disks 3820 * Try Direct I/O to RAID memeber disks
@@ -3816,15 +3825,11 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
3816 3825
3817 if ((cdb0 < READ_16) || !(cdb_ptr[2] | cdb_ptr[3] | cdb_ptr[4] 3826 if ((cdb0 < READ_16) || !(cdb_ptr[2] | cdb_ptr[3] | cdb_ptr[4]
3818 | cdb_ptr[5])) { 3827 | cdb_ptr[5])) {
3819 io_size = scsi_bufflen(scmd) >> 9; 3828 io_size = scsi_bufflen(scmd) >>
3829 raid_device->block_exponent;
3830 i = (cdb0 < READ_16) ? 2 : 6;
3820 /* get virtual lba */ 3831 /* get virtual lba */
3821 lba_ptr1 = lba_ptr2 = (cdb0 < READ_16) ? &cdb_ptr[2] : 3832 v_lba = be32_to_cpu(*(__be32 *)(&cdb_ptr[i]));
3822 &cdb_ptr[6];
3823 tmp_ptr = (u8 *)&v_lba + 3;
3824 *tmp_ptr-- = *lba_ptr1++;
3825 *tmp_ptr-- = *lba_ptr1++;
3826 *tmp_ptr-- = *lba_ptr1++;
3827 *tmp_ptr = *lba_ptr1;
3828 3833
3829 if (((u64)v_lba + (u64)io_size - 1) <= 3834 if (((u64)v_lba + (u64)io_size - 1) <=
3830 (u32)raid_device->max_lba) { 3835 (u32)raid_device->max_lba) {
@@ -3843,11 +3848,39 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
3843 mpi_request->DevHandle = 3848 mpi_request->DevHandle =
3844 cpu_to_le16(raid_device-> 3849 cpu_to_le16(raid_device->
3845 pd_handle[column]); 3850 pd_handle[column]);
3846 tmp_ptr = (u8 *)&p_lba + 3; 3851 (*(__be32 *)(&cdb_ptr[i])) =
3847 *lba_ptr2++ = *tmp_ptr--; 3852 cpu_to_be32(p_lba);
3848 *lba_ptr2++ = *tmp_ptr--; 3853 /*
3849 *lba_ptr2++ = *tmp_ptr--; 3854 * WD: To indicate this I/O is directI/O
3850 *lba_ptr2 = *tmp_ptr; 3855 */
3856 _scsih_scsi_direct_io_set(ioc, smid, 1);
3857 }
3858 }
3859 } else {
3860 io_size = scsi_bufflen(scmd) >>
3861 raid_device->block_exponent;
3862 /* get virtual lba */
3863 v_llba = be64_to_cpu(*(__be64 *)(&cdb_ptr[2]));
3864
3865 if ((v_llba + (u64)io_size - 1) <=
3866 raid_device->max_lba) {
3867 stripe_sz = raid_device->stripe_sz;
3868 stripe_exp = raid_device->stripe_exponent;
3869 stripe_off = (u32) (v_llba & (stripe_sz - 1));
3870
3871 /* Check whether IO falls within a stripe */
3872 if ((stripe_off + io_size) <= stripe_sz) {
3873 num_pds = raid_device->num_pds;
3874 p_lba = (u32)(v_llba >> stripe_exp);
3875 stripe_unit = p_lba / num_pds;
3876 column = p_lba % num_pds;
3877 p_lba = (stripe_unit << stripe_exp) +
3878 stripe_off;
3879 mpi_request->DevHandle =
3880 cpu_to_le16(raid_device->
3881 pd_handle[column]);
3882 (*(__be64 *)(&cdb_ptr[2])) =
3883 cpu_to_be64((u64)p_lba);
3851 /* 3884 /*
3852 * WD: To indicate this I/O is directI/O 3885 * WD: To indicate this I/O is directI/O
3853 */ 3886 */
@@ -4403,11 +4436,14 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4403 scmd->result = DID_NO_CONNECT << 16; 4436 scmd->result = DID_NO_CONNECT << 16;
4404 goto out; 4437 goto out;
4405 } 4438 }
4439 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
4406 /* 4440 /*
4407 * WARPDRIVE: If direct_io is set then it is directIO, 4441 * WARPDRIVE: If direct_io is set then it is directIO,
4408 * the failed direct I/O should be redirected to volume 4442 * the failed direct I/O should be redirected to volume
4409 */ 4443 */
4410 if (_scsih_scsi_direct_io_get(ioc, smid)) { 4444 if (_scsih_scsi_direct_io_get(ioc, smid) &&
4445 ((ioc_status & MPI2_IOCSTATUS_MASK)
4446 != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
4411 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 4447 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4412 ioc->scsi_lookup[smid - 1].scmd = scmd; 4448 ioc->scsi_lookup[smid - 1].scmd = scmd;
4413 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 4449 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
@@ -4441,7 +4477,6 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4441 4477
4442 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); 4478 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
4443 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt); 4479 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
4444 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
4445 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) 4480 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
4446 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); 4481 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
4447 else 4482 else
@@ -4485,6 +4520,8 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4485 scmd->result = DID_TRANSPORT_DISRUPTED << 16; 4520 scmd->result = DID_TRANSPORT_DISRUPTED << 16;
4486 goto out; 4521 goto out;
4487 } 4522 }
4523 scmd->result = DID_SOFT_ERROR << 16;
4524 break;
4488 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 4525 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
4489 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 4526 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
4490 scmd->result = DID_RESET << 16; 4527 scmd->result = DID_RESET << 16;
@@ -6714,6 +6751,7 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
6714 } else 6751 } else
6715 sas_target_priv_data = NULL; 6752 sas_target_priv_data = NULL;
6716 raid_device->responding = 1; 6753 raid_device->responding = 1;
6754 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
6717 starget_printk(KERN_INFO, raid_device->starget, 6755 starget_printk(KERN_INFO, raid_device->starget,
6718 "handle(0x%04x), wwid(0x%016llx)\n", handle, 6756 "handle(0x%04x), wwid(0x%016llx)\n", handle,
6719 (unsigned long long)raid_device->wwid); 6757 (unsigned long long)raid_device->wwid);
@@ -6724,16 +6762,16 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
6724 */ 6762 */
6725 _scsih_init_warpdrive_properties(ioc, raid_device); 6763 _scsih_init_warpdrive_properties(ioc, raid_device);
6726 if (raid_device->handle == handle) 6764 if (raid_device->handle == handle)
6727 goto out; 6765 return;
6728 printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n", 6766 printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
6729 raid_device->handle); 6767 raid_device->handle);
6730 raid_device->handle = handle; 6768 raid_device->handle = handle;
6731 if (sas_target_priv_data) 6769 if (sas_target_priv_data)
6732 sas_target_priv_data->handle = handle; 6770 sas_target_priv_data->handle = handle;
6733 goto out; 6771 return;
6734 } 6772 }
6735 } 6773 }
6736 out: 6774
6737 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 6775 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
6738} 6776}
6739 6777
@@ -7418,7 +7456,7 @@ static struct scsi_host_template scsih_driver_template = {
7418 .can_queue = 1, 7456 .can_queue = 1,
7419 .this_id = -1, 7457 .this_id = -1,
7420 .sg_tablesize = MPT2SAS_SG_DEPTH, 7458 .sg_tablesize = MPT2SAS_SG_DEPTH,
7421 .max_sectors = 8192, 7459 .max_sectors = 32767,
7422 .cmd_per_lun = 7, 7460 .cmd_per_lun = 7,
7423 .use_clustering = ENABLE_CLUSTERING, 7461 .use_clustering = ENABLE_CLUSTERING,
7424 .shost_attrs = mpt2sas_host_attrs, 7462 .shost_attrs = mpt2sas_host_attrs,
@@ -7928,6 +7966,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
7928 ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx; 7966 ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
7929 ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx; 7967 ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
7930 ioc->logging_level = logging_level; 7968 ioc->logging_level = logging_level;
7969 ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
7931 /* misc semaphores and spin locks */ 7970 /* misc semaphores and spin locks */
7932 mutex_init(&ioc->reset_in_progress_mutex); 7971 mutex_init(&ioc->reset_in_progress_mutex);
7933 spin_lock_init(&ioc->ioc_reset_in_progress_lock); 7972 spin_lock_init(&ioc->ioc_reset_in_progress_lock);
@@ -7958,11 +7997,11 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
7958 printk(MPT2SAS_WARN_FMT "Invalid value %d passed " 7997 printk(MPT2SAS_WARN_FMT "Invalid value %d passed "
7959 "for max_sectors, range is 64 to 8192. Assigning " 7998 "for max_sectors, range is 64 to 8192. Assigning "
7960 "value of 64.\n", ioc->name, max_sectors); 7999 "value of 64.\n", ioc->name, max_sectors);
7961 } else if (max_sectors > 8192) { 8000 } else if (max_sectors > 32767) {
7962 shost->max_sectors = 8192; 8001 shost->max_sectors = 32767;
7963 printk(MPT2SAS_WARN_FMT "Invalid value %d passed " 8002 printk(MPT2SAS_WARN_FMT "Invalid value %d passed "
7964 "for max_sectors, range is 64 to 8192. Assigning " 8003 "for max_sectors, range is 64 to 8192. Assigning "
7965 "default value of 8192.\n", ioc->name, 8004 "default value of 32767.\n", ioc->name,
7966 max_sectors); 8005 max_sectors);
7967 } else { 8006 } else {
7968 shost->max_sectors = max_sectors & 0xFFFE; 8007 shost->max_sectors = max_sectors & 0xFFFE;
@@ -8000,7 +8039,6 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
8000 goto out_attach_fail; 8039 goto out_attach_fail;
8001 } 8040 }
8002 8041
8003 scsi_scan_host(shost);
8004 if (ioc->is_warpdrive) { 8042 if (ioc->is_warpdrive) {
8005 if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS) 8043 if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS)
8006 ioc->hide_drives = 0; 8044 ioc->hide_drives = 0;
@@ -8014,8 +8052,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
8014 } 8052 }
8015 } else 8053 } else
8016 ioc->hide_drives = 0; 8054 ioc->hide_drives = 0;
8055 scsi_scan_host(shost);
8017 8056
8018 _scsih_probe_devices(ioc);
8019 return 0; 8057 return 0;
8020 8058
8021 out_attach_fail: 8059 out_attach_fail:
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 230732241aa2..831047466a5a 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -398,8 +398,8 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
398 dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "report_manufacture - " 398 dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "report_manufacture - "
399 "send to sas_addr(0x%016llx)\n", ioc->name, 399 "send to sas_addr(0x%016llx)\n", ioc->name,
400 (unsigned long long)sas_address)); 400 (unsigned long long)sas_address));
401 mpt2sas_base_put_smid_default(ioc, smid);
402 init_completion(&ioc->transport_cmds.done); 401 init_completion(&ioc->transport_cmds.done);
402 mpt2sas_base_put_smid_default(ioc, smid);
403 timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done, 403 timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
404 10*HZ); 404 10*HZ);
405 405
@@ -1184,8 +1184,8 @@ _transport_get_expander_phy_error_log(struct MPT2SAS_ADAPTER *ioc,
1184 dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "phy_error_log - " 1184 dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "phy_error_log - "
1185 "send to sas_addr(0x%016llx), phy(%d)\n", ioc->name, 1185 "send to sas_addr(0x%016llx), phy(%d)\n", ioc->name,
1186 (unsigned long long)phy->identify.sas_address, phy->number)); 1186 (unsigned long long)phy->identify.sas_address, phy->number));
1187 mpt2sas_base_put_smid_default(ioc, smid);
1188 init_completion(&ioc->transport_cmds.done); 1187 init_completion(&ioc->transport_cmds.done);
1188 mpt2sas_base_put_smid_default(ioc, smid);
1189 timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done, 1189 timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
1190 10*HZ); 1190 10*HZ);
1191 1191
@@ -1509,8 +1509,9 @@ _transport_expander_phy_control(struct MPT2SAS_ADAPTER *ioc,
1509 "send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n", ioc->name, 1509 "send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n", ioc->name,
1510 (unsigned long long)phy->identify.sas_address, phy->number, 1510 (unsigned long long)phy->identify.sas_address, phy->number,
1511 phy_operation)); 1511 phy_operation));
1512 mpt2sas_base_put_smid_default(ioc, smid); 1512
1513 init_completion(&ioc->transport_cmds.done); 1513 init_completion(&ioc->transport_cmds.done);
1514 mpt2sas_base_put_smid_default(ioc, smid);
1514 timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done, 1515 timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
1515 10*HZ); 1516 10*HZ);
1516 1517
@@ -1949,8 +1950,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1949 dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "%s - " 1950 dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "%s - "
1950 "sending smp request\n", ioc->name, __func__)); 1951 "sending smp request\n", ioc->name, __func__));
1951 1952
1952 mpt2sas_base_put_smid_default(ioc, smid);
1953 init_completion(&ioc->transport_cmds.done); 1953 init_completion(&ioc->transport_cmds.done);
1954 mpt2sas_base_put_smid_default(ioc, smid);
1954 timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done, 1955 timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
1955 10*HZ); 1956 10*HZ);
1956 1957
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 6465dae5883a..a2f1b3043dfb 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -107,7 +107,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
107 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 107 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
108 break; 108 break;
109 } 109 }
110 return -EINVAL; 110 return count;
111} 111}
112 112
113static struct bin_attribute sysfs_fw_dump_attr = { 113static struct bin_attribute sysfs_fw_dump_attr = {
@@ -387,7 +387,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
387 break; 387 break;
388 case 3: 388 case 3:
389 if (ha->optrom_state != QLA_SWRITING) 389 if (ha->optrom_state != QLA_SWRITING)
390 return -ENOMEM; 390 return -EINVAL;
391 391
392 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 392 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
393 ql_log(ql_log_warn, vha, 0x7068, 393 ql_log(ql_log_warn, vha, 0x7068,
@@ -667,7 +667,7 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
667 dev, adr, len, opt); 667 dev, adr, len, opt);
668 if (rval != QLA_SUCCESS) { 668 if (rval != QLA_SUCCESS) {
669 ql_log(ql_log_warn, vha, 0x7074, 669 ql_log(ql_log_warn, vha, 0x7074,
670 "Unable to write EDC (%x) %02x:%04x:%02x:%02hhx\n", 670 "Unable to write EDC (%x) %02x:%04x:%02x:%02x:%02hhx\n",
671 rval, dev, adr, opt, len, buf[8]); 671 rval, dev, adr, opt, len, buf[8]);
672 return -EIO; 672 return -EIO;
673 } 673 }
@@ -724,7 +724,7 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
724 dev, adr, len, opt); 724 dev, adr, len, opt);
725 if (rval != QLA_SUCCESS) { 725 if (rval != QLA_SUCCESS) {
726 ql_log(ql_log_info, vha, 0x7075, 726 ql_log(ql_log_info, vha, 0x7075,
727 "Unable to write EDC status (%x) %02x:%04x:%02x.\n", 727 "Unable to write EDC status (%x) %02x:%04x:%02x:%02x.\n",
728 rval, dev, adr, opt, len); 728 rval, dev, adr, opt, len);
729 return -EIO; 729 return -EIO;
730 } 730 }
@@ -1971,8 +1971,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1971 "Queue delete failed.\n"); 1971 "Queue delete failed.\n");
1972 } 1972 }
1973 1973
1974 scsi_host_put(vha->host);
1975 ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id); 1974 ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
1975 scsi_host_put(vha->host);
1976 return 0; 1976 return 0;
1977} 1977}
1978 1978
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 8b641a8a0c74..b1d0f936bf2d 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -31,6 +31,7 @@ qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
31 memset(sp, 0, sizeof(*sp)); 31 memset(sp, 0, sizeof(*sp));
32 sp->fcport = fcport; 32 sp->fcport = fcport;
33 sp->ctx = ctx; 33 sp->ctx = ctx;
34 ctx->iocbs = 1;
34done: 35done:
35 return sp; 36 return sp;
36} 37}
@@ -102,7 +103,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
102 103
103 bsg_job->reply->reply_payload_rcv_len = 0; 104 bsg_job->reply->reply_payload_rcv_len = 0;
104 105
105 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))) { 106 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA82XX(ha))) {
106 ret = -EINVAL; 107 ret = -EINVAL;
107 goto exit_fcp_prio_cfg; 108 goto exit_fcp_prio_cfg;
108 } 109 }
@@ -389,6 +390,20 @@ done:
389 return rval; 390 return rval;
390} 391}
391 392
393inline uint16_t
394qla24xx_calc_ct_iocbs(uint16_t dsds)
395{
396 uint16_t iocbs;
397
398 iocbs = 1;
399 if (dsds > 2) {
400 iocbs += (dsds - 2) / 5;
401 if ((dsds - 2) % 5)
402 iocbs++;
403 }
404 return iocbs;
405}
406
392static int 407static int
393qla2x00_process_ct(struct fc_bsg_job *bsg_job) 408qla2x00_process_ct(struct fc_bsg_job *bsg_job)
394{ 409{
@@ -489,6 +504,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
489 ct = sp->ctx; 504 ct = sp->ctx;
490 ct->type = SRB_CT_CMD; 505 ct->type = SRB_CT_CMD;
491 ct->name = "bsg_ct"; 506 ct->name = "bsg_ct";
507 ct->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
492 ct->u.bsg_job = bsg_job; 508 ct->u.bsg_job = bsg_job;
493 509
494 ql_dbg(ql_dbg_user, vha, 0x7016, 510 ql_dbg(ql_dbg_user, vha, 0x7016,
@@ -1653,7 +1669,7 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
1653 } 1669 }
1654 1670
1655 ql_dbg(ql_dbg_user, vha, 0x7000, 1671 ql_dbg(ql_dbg_user, vha, 0x7000,
1656 "Entered %s msgcode=%d.\n", __func__, bsg_job->request->msgcode); 1672 "Entered %s msgcode=0x%x.\n", __func__, bsg_job->request->msgcode);
1657 1673
1658 switch (bsg_job->request->msgcode) { 1674 switch (bsg_job->request->msgcode) {
1659 case FC_BSG_RPT_ELS: 1675 case FC_BSG_RPT_ELS:
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index f3cddd5800c3..7c54624b5b13 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,15 +11,17 @@
11 * ---------------------------------------------------------------------- 11 * ----------------------------------------------------------------------
12 * | Level | Last Value Used | Holes | 12 * | Level | Last Value Used | Holes |
13 * ---------------------------------------------------------------------- 13 * ----------------------------------------------------------------------
14 * | Module Init and Probe | 0x0116 | | 14 * | Module Init and Probe | 0x0116 | 0xfa |
15 * | Mailbox commands | 0x112b | | 15 * | Mailbox commands | 0x112b | |
16 * | Device Discovery | 0x2083 | | 16 * | Device Discovery | 0x2084 | |
17 * | Queue Command and IO tracing | 0x302e | 0x3008 | 17 * | Queue Command and IO tracing | 0x302f | 0x3008,0x302d, |
18 * | | | 0x302e |
18 * | DPC Thread | 0x401c | | 19 * | DPC Thread | 0x401c | |
19 * | Async Events | 0x5059 | | 20 * | Async Events | 0x5057 | 0x5052 |
20 * | Timer Routines | 0x6010 | 0x600e,0x600f | 21 * | Timer Routines | 0x6011 | 0x600e,0x600f |
21 * | User Space Interactions | 0x709d | | 22 * | User Space Interactions | 0x709e | |
22 * | Task Management | 0x8041 | 0x800b | 23 * | Task Management | 0x803c | 0x8025-0x8026 |
24 * | | | 0x800b,0x8039 |
23 * | AER/EEH | 0x900f | | 25 * | AER/EEH | 0x900f | |
24 * | Virtual Port | 0xa007 | | 26 * | Virtual Port | 0xa007 | |
25 * | ISP82XX Specific | 0xb052 | | 27 * | ISP82XX Specific | 0xb052 | |
@@ -368,7 +370,7 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
368 370
369 memcpy(iter_reg, ha->fce, ntohl(fcec->size)); 371 memcpy(iter_reg, ha->fce, ntohl(fcec->size));
370 372
371 return iter_reg; 373 return (char *)iter_reg + ntohl(fcec->size);
372} 374}
373 375
374static inline void * 376static inline void *
@@ -1650,6 +1652,15 @@ qla81xx_fw_dump_failed:
1650/****************************************************************************/ 1652/****************************************************************************/
1651/* Driver Debug Functions. */ 1653/* Driver Debug Functions. */
1652/****************************************************************************/ 1654/****************************************************************************/
1655
1656static inline int
1657ql_mask_match(uint32_t level)
1658{
1659 if (ql2xextended_error_logging == 1)
1660 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
1661 return (level & ql2xextended_error_logging) == level;
1662}
1663
1653/* 1664/*
1654 * This function is for formatting and logging debug information. 1665 * This function is for formatting and logging debug information.
1655 * It is to be used when vha is available. It formats the message 1666 * It is to be used when vha is available. It formats the message
@@ -1664,34 +1675,31 @@ qla81xx_fw_dump_failed:
1664 * msg: The message to be displayed. 1675 * msg: The message to be displayed.
1665 */ 1676 */
1666void 1677void
1667ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) { 1678ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
1668 1679{
1669 char pbuf[QL_DBG_BUF_LEN]; 1680 va_list va;
1670 va_list ap; 1681 struct va_format vaf;
1671 uint32_t len; 1682
1672 struct pci_dev *pdev = NULL; 1683 if (!ql_mask_match(level))
1673 1684 return;
1674 memset(pbuf, 0, QL_DBG_BUF_LEN); 1685
1675 1686 va_start(va, fmt);
1676 va_start(ap, msg); 1687
1677 1688 vaf.fmt = fmt;
1678 if ((level & ql2xextended_error_logging) == level) { 1689 vaf.va = &va;
1679 if (vha != NULL) { 1690
1680 pdev = vha->hw->pdev; 1691 if (vha != NULL) {
1681 /* <module-name> <pci-name> <msg-id>:<host> Message */ 1692 const struct pci_dev *pdev = vha->hw->pdev;
1682 sprintf(pbuf, "%s [%s]-%04x:%ld: ", QL_MSGHDR, 1693 /* <module-name> <pci-name> <msg-id>:<host> Message */
1683 dev_name(&(pdev->dev)), id + ql_dbg_offset, 1694 pr_warn("%s [%s]-%04x:%ld: %pV",
1684 vha->host_no); 1695 QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset,
1685 } else 1696 vha->host_no, &vaf);
1686 sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR, 1697 } else {
1687 "0000:00:00.0", id + ql_dbg_offset); 1698 pr_warn("%s [%s]-%04x: : %pV",
1688 1699 QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf);
1689 len = strlen(pbuf);
1690 vsprintf(pbuf+len, msg, ap);
1691 pr_warning("%s", pbuf);
1692 } 1700 }
1693 1701
1694 va_end(ap); 1702 va_end(va);
1695 1703
1696} 1704}
1697 1705
@@ -1710,31 +1718,27 @@ ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
1710 * msg: The message to be displayed. 1718 * msg: The message to be displayed.
1711 */ 1719 */
1712void 1720void
1713ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) { 1721ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
1714 1722 const char *fmt, ...)
1715 char pbuf[QL_DBG_BUF_LEN]; 1723{
1716 va_list ap; 1724 va_list va;
1717 uint32_t len; 1725 struct va_format vaf;
1718 1726
1719 if (pdev == NULL) 1727 if (pdev == NULL)
1720 return; 1728 return;
1729 if (!ql_mask_match(level))
1730 return;
1721 1731
1722 memset(pbuf, 0, QL_DBG_BUF_LEN); 1732 va_start(va, fmt);
1723
1724 va_start(ap, msg);
1725
1726 if ((level & ql2xextended_error_logging) == level) {
1727 /* <module-name> <dev-name>:<msg-id> Message */
1728 sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
1729 dev_name(&(pdev->dev)), id + ql_dbg_offset);
1730 1733
1731 len = strlen(pbuf); 1734 vaf.fmt = fmt;
1732 vsprintf(pbuf+len, msg, ap); 1735 vaf.va = &va;
1733 pr_warning("%s", pbuf);
1734 }
1735 1736
1736 va_end(ap); 1737 /* <module-name> <dev-name>:<msg-id> Message */
1738 pr_warn("%s [%s]-%04x: : %pV",
1739 QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, &vaf);
1737 1740
1741 va_end(va);
1738} 1742}
1739 1743
1740/* 1744/*
@@ -1751,47 +1755,47 @@ ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) {
1751 * msg: The message to be displayed. 1755 * msg: The message to be displayed.
1752 */ 1756 */
1753void 1757void
1754ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) { 1758ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
1755 1759{
1756 char pbuf[QL_DBG_BUF_LEN]; 1760 va_list va;
1757 va_list ap; 1761 struct va_format vaf;
1758 uint32_t len; 1762 char pbuf[128];
1759 struct pci_dev *pdev = NULL;
1760
1761 memset(pbuf, 0, QL_DBG_BUF_LEN);
1762
1763 va_start(ap, msg);
1764
1765 if (level <= ql_errlev) {
1766 if (vha != NULL) {
1767 pdev = vha->hw->pdev;
1768 /* <module-name> <msg-id>:<host> Message */
1769 sprintf(pbuf, "%s [%s]-%04x:%ld: ", QL_MSGHDR,
1770 dev_name(&(pdev->dev)), id, vha->host_no);
1771 } else
1772 sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
1773 "0000:00:00.0", id);
1774 1763
1775 len = strlen(pbuf); 1764 if (level > ql_errlev)
1776 vsprintf(pbuf+len, msg, ap); 1765 return;
1777 1766
1778 switch (level) { 1767 if (vha != NULL) {
1779 case 0: /* FATAL LOG */ 1768 const struct pci_dev *pdev = vha->hw->pdev;
1780 pr_crit("%s", pbuf); 1769 /* <module-name> <msg-id>:<host> Message */
1781 break; 1770 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x:%ld: ",
1782 case 1: 1771 QL_MSGHDR, dev_name(&(pdev->dev)), id, vha->host_no);
1783 pr_err("%s", pbuf); 1772 } else {
1784 break; 1773 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
1785 case 2: 1774 QL_MSGHDR, "0000:00:00.0", id);
1786 pr_warn("%s", pbuf); 1775 }
1787 break; 1776 pbuf[sizeof(pbuf) - 1] = 0;
1788 default: 1777
1789 pr_info("%s", pbuf); 1778 va_start(va, fmt);
1790 break; 1779
1791 } 1780 vaf.fmt = fmt;
1781 vaf.va = &va;
1782
1783 switch (level) {
1784 case 0: /* FATAL LOG */
1785 pr_crit("%s%pV", pbuf, &vaf);
1786 break;
1787 case 1:
1788 pr_err("%s%pV", pbuf, &vaf);
1789 break;
1790 case 2:
1791 pr_warn("%s%pV", pbuf, &vaf);
1792 break;
1793 default:
1794 pr_info("%s%pV", pbuf, &vaf);
1795 break;
1792 } 1796 }
1793 1797
1794 va_end(ap); 1798 va_end(va);
1795} 1799}
1796 1800
1797/* 1801/*
@@ -1809,43 +1813,44 @@ ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
1809 * msg: The message to be displayed. 1813 * msg: The message to be displayed.
1810 */ 1814 */
1811void 1815void
1812ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) { 1816ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
1813 1817 const char *fmt, ...)
1814 char pbuf[QL_DBG_BUF_LEN]; 1818{
1815 va_list ap; 1819 va_list va;
1816 uint32_t len; 1820 struct va_format vaf;
1821 char pbuf[128];
1817 1822
1818 if (pdev == NULL) 1823 if (pdev == NULL)
1819 return; 1824 return;
1825 if (level > ql_errlev)
1826 return;
1820 1827
1821 memset(pbuf, 0, QL_DBG_BUF_LEN); 1828 /* <module-name> <dev-name>:<msg-id> Message */
1822 1829 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
1823 va_start(ap, msg); 1830 QL_MSGHDR, dev_name(&(pdev->dev)), id);
1824 1831 pbuf[sizeof(pbuf) - 1] = 0;
1825 if (level <= ql_errlev) { 1832
1826 /* <module-name> <dev-name>:<msg-id> Message */ 1833 va_start(va, fmt);
1827 sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR, 1834
1828 dev_name(&(pdev->dev)), id); 1835 vaf.fmt = fmt;
1829 1836 vaf.va = &va;
1830 len = strlen(pbuf); 1837
1831 vsprintf(pbuf+len, msg, ap); 1838 switch (level) {
1832 switch (level) { 1839 case 0: /* FATAL LOG */
1833 case 0: /* FATAL LOG */ 1840 pr_crit("%s%pV", pbuf, &vaf);
1834 pr_crit("%s", pbuf); 1841 break;
1835 break; 1842 case 1:
1836 case 1: 1843 pr_err("%s%pV", pbuf, &vaf);
1837 pr_err("%s", pbuf); 1844 break;
1838 break; 1845 case 2:
1839 case 2: 1846 pr_warn("%s%pV", pbuf, &vaf);
1840 pr_warn("%s", pbuf); 1847 break;
1841 break; 1848 default:
1842 default: 1849 pr_info("%s%pV", pbuf, &vaf);
1843 pr_info("%s", pbuf); 1850 break;
1844 break;
1845 }
1846 } 1851 }
1847 1852
1848 va_end(ap); 1853 va_end(va);
1849} 1854}
1850 1855
1851void 1856void
@@ -1858,20 +1863,20 @@ ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
1858 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 1863 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
1859 uint16_t __iomem *mbx_reg; 1864 uint16_t __iomem *mbx_reg;
1860 1865
1861 if ((level & ql2xextended_error_logging) == level) { 1866 if (!ql_mask_match(level))
1862 1867 return;
1863 if (IS_QLA82XX(ha))
1864 mbx_reg = &reg82->mailbox_in[0];
1865 else if (IS_FWI2_CAPABLE(ha))
1866 mbx_reg = &reg24->mailbox0;
1867 else
1868 mbx_reg = MAILBOX_REG(ha, reg, 0);
1869 1868
1870 ql_dbg(level, vha, id, "Mailbox registers:\n"); 1869 if (IS_QLA82XX(ha))
1871 for (i = 0; i < 6; i++) 1870 mbx_reg = &reg82->mailbox_in[0];
1872 ql_dbg(level, vha, id, 1871 else if (IS_FWI2_CAPABLE(ha))
1873 "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++)); 1872 mbx_reg = &reg24->mailbox0;
1874 } 1873 else
1874 mbx_reg = MAILBOX_REG(ha, reg, 0);
1875
1876 ql_dbg(level, vha, id, "Mailbox registers:\n");
1877 for (i = 0; i < 6; i++)
1878 ql_dbg(level, vha, id,
1879 "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
1875} 1880}
1876 1881
1877 1882
@@ -1881,24 +1886,25 @@ ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
1881{ 1886{
1882 uint32_t cnt; 1887 uint32_t cnt;
1883 uint8_t c; 1888 uint8_t c;
1884 if ((level & ql2xextended_error_logging) == level) { 1889
1885 1890 if (!ql_mask_match(level))
1886 ql_dbg(level, vha, id, " 0 1 2 3 4 5 6 7 8 " 1891 return;
1887 "9 Ah Bh Ch Dh Eh Fh\n"); 1892
1888 ql_dbg(level, vha, id, "----------------------------------" 1893 ql_dbg(level, vha, id, " 0 1 2 3 4 5 6 7 8 "
1889 "----------------------------\n"); 1894 "9 Ah Bh Ch Dh Eh Fh\n");
1890 1895 ql_dbg(level, vha, id, "----------------------------------"
1891 ql_dbg(level, vha, id, ""); 1896 "----------------------------\n");
1892 for (cnt = 0; cnt < size;) { 1897
1893 c = *b++; 1898 ql_dbg(level, vha, id, " ");
1894 printk("%02x", (uint32_t) c); 1899 for (cnt = 0; cnt < size;) {
1895 cnt++; 1900 c = *b++;
1896 if (!(cnt % 16)) 1901 printk("%02x", (uint32_t) c);
1897 printk("\n"); 1902 cnt++;
1898 else 1903 if (!(cnt % 16))
1899 printk(" "); 1904 printk("\n");
1900 } 1905 else
1901 if (cnt % 16) 1906 printk(" ");
1902 ql_dbg(level, vha, id, "\n");
1903 } 1907 }
1908 if (cnt % 16)
1909 ql_dbg(level, vha, id, "\n");
1904} 1910}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 98a377b99017..5f1b6d9c3dcb 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -232,6 +232,7 @@ struct qla2xxx_fw_dump {
232}; 232};
233 233
234#define QL_MSGHDR "qla2xxx" 234#define QL_MSGHDR "qla2xxx"
235#define QL_DBG_DEFAULT1_MASK 0x1e400000
235 236
236#define ql_log_fatal 0 /* display fatal errors */ 237#define ql_log_fatal 0 /* display fatal errors */
237#define ql_log_warn 1 /* display critical errors */ 238#define ql_log_warn 1 /* display critical errors */
@@ -244,15 +245,15 @@ struct qla2xxx_fw_dump {
244 245
245extern int ql_errlev; 246extern int ql_errlev;
246 247
247void 248void __attribute__((format (printf, 4, 5)))
248ql_dbg(uint32_t, scsi_qla_host_t *vha, int32_t, char *, ...); 249ql_dbg(uint32_t, scsi_qla_host_t *vha, int32_t, const char *fmt, ...);
249void 250void __attribute__((format (printf, 4, 5)))
250ql_dbg_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...); 251ql_dbg_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
251 252
252void 253void __attribute__((format (printf, 4, 5)))
253ql_log(uint32_t, scsi_qla_host_t *vha, int32_t, char *, ...); 254ql_log(uint32_t, scsi_qla_host_t *vha, int32_t, const char *fmt, ...);
254void 255void __attribute__((format (printf, 4, 5)))
255ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...); 256ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
256 257
257/* Debug Levels */ 258/* Debug Levels */
258/* The 0x40000000 is the max value any debug level can have 259/* The 0x40000000 is the max value any debug level can have
@@ -275,5 +276,3 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...);
275#define ql_dbg_misc 0x00010000 /* For dumping everything that is not 276#define ql_dbg_misc 0x00010000 /* For dumping everything that is not
276 * not covered by upper categories 277 * not covered by upper categories
277 */ 278 */
278
279#define QL_DBG_BUF_LEN 512
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index fcf052c50bf5..a6a4eebce4a8 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -271,6 +271,7 @@ struct srb_iocb {
271struct srb_ctx { 271struct srb_ctx {
272 uint16_t type; 272 uint16_t type;
273 char *name; 273 char *name;
274 int iocbs;
274 union { 275 union {
275 struct srb_iocb *iocb_cmd; 276 struct srb_iocb *iocb_cmd;
276 struct fc_bsg_job *bsg_job; 277 struct fc_bsg_job *bsg_job;
@@ -2244,6 +2245,7 @@ struct isp_operations {
2244 int (*get_flash_version) (struct scsi_qla_host *, void *); 2245 int (*get_flash_version) (struct scsi_qla_host *, void *);
2245 int (*start_scsi) (srb_t *); 2246 int (*start_scsi) (srb_t *);
2246 int (*abort_isp) (struct scsi_qla_host *); 2247 int (*abort_isp) (struct scsi_qla_host *);
2248 int (*iospace_config)(struct qla_hw_data*);
2247}; 2249};
2248 2250
2249/* MSI-X Support *************************************************************/ 2251/* MSI-X Support *************************************************************/
@@ -2978,10 +2980,6 @@ typedef struct scsi_qla_host {
2978 atomic_dec(&__vha->vref_count); \ 2980 atomic_dec(&__vha->vref_count); \
2979} while (0) 2981} while (0)
2980 2982
2981
2982#define qla_printk(level, ha, format, arg...) \
2983 dev_printk(level , &((ha)->pdev->dev) , format , ## arg)
2984
2985/* 2983/*
2986 * qla2x00 local function return status codes 2984 * qla2x00 local function return status codes
2987 */ 2985 */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index c0c11afb685c..408679be8fdf 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -572,7 +572,7 @@ extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *,
572 size_t, char *); 572 size_t, char *);
573extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *); 573extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *);
574extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *); 574extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *);
575extern void qla82xx_start_iocbs(srb_t *); 575extern void qla82xx_start_iocbs(scsi_qla_host_t *);
576extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *); 576extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *);
577extern int qla82xx_check_md_needed(scsi_qla_host_t *); 577extern int qla82xx_check_md_needed(scsi_qla_host_t *);
578extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *); 578extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 37937aa3c3b8..4aea4ae23300 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -758,7 +758,7 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
758 "GA_NXT Send SNS failed (%d).\n", rval); 758 "GA_NXT Send SNS failed (%d).\n", rval);
759 } else if (sns_cmd->p.gan_data[8] != 0x80 || 759 } else if (sns_cmd->p.gan_data[8] != 0x80 ||
760 sns_cmd->p.gan_data[9] != 0x02) { 760 sns_cmd->p.gan_data[9] != 0x02) {
761 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207d, 761 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2084,
762 "GA_NXT failed, rejected request ga_nxt_rsp:\n"); 762 "GA_NXT failed, rejected request ga_nxt_rsp:\n");
763 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074, 763 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074,
764 sns_cmd->p.gan_data, 16); 764 sns_cmd->p.gan_data, 16);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 54ea68cec4c5..1fa067e053d2 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -111,6 +111,7 @@ qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
111 memset(sp, 0, sizeof(*sp)); 111 memset(sp, 0, sizeof(*sp));
112 sp->fcport = fcport; 112 sp->fcport = fcport;
113 sp->ctx = ctx; 113 sp->ctx = ctx;
114 ctx->iocbs = 1;
114 ctx->u.iocb_cmd = iocb; 115 ctx->u.iocb_cmd = iocb;
115 iocb->free = qla2x00_ctx_sp_free; 116 iocb->free = qla2x00_ctx_sp_free;
116 117
@@ -154,8 +155,8 @@ qla2x00_async_iocb_timeout(srb_t *sp)
154 struct srb_ctx *ctx = sp->ctx; 155 struct srb_ctx *ctx = sp->ctx;
155 156
156 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, 157 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
157 "Async-%s timeout - portid=%02x%02x%02x.\n", 158 "Async-%s timeout - hdl=%x portid=%02x%02x%02x.\n",
158 ctx->name, fcport->d_id.b.domain, fcport->d_id.b.area, 159 ctx->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
159 fcport->d_id.b.al_pa); 160 fcport->d_id.b.al_pa);
160 161
161 fcport->flags &= ~FCF_ASYNC_SENT; 162 fcport->flags &= ~FCF_ASYNC_SENT;
@@ -211,9 +212,10 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
211 goto done_free_sp; 212 goto done_free_sp;
212 213
213 ql_dbg(ql_dbg_disc, vha, 0x2072, 214 ql_dbg(ql_dbg_disc, vha, 0x2072,
214 "Async-login - loopid=%x portid=%02x%02x%02x retries=%d.\n", 215 "Async-login - hdl=%x, loopid=%x portid=%02x%02x%02x "
215 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, 216 "retries=%d.\n", sp->handle, fcport->loop_id,
216 fcport->d_id.b.al_pa, fcport->login_retry); 217 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
218 fcport->login_retry);
217 return rval; 219 return rval;
218 220
219done_free_sp: 221done_free_sp:
@@ -258,9 +260,9 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
258 goto done_free_sp; 260 goto done_free_sp;
259 261
260 ql_dbg(ql_dbg_disc, vha, 0x2070, 262 ql_dbg(ql_dbg_disc, vha, 0x2070,
261 "Async-logout - loop-id=%x portid=%02x%02x%02x.\n", 263 "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
262 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, 264 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
263 fcport->d_id.b.al_pa); 265 fcport->d_id.b.area, fcport->d_id.b.al_pa);
264 return rval; 266 return rval;
265 267
266done_free_sp: 268done_free_sp:
@@ -308,9 +310,9 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
308 goto done_free_sp; 310 goto done_free_sp;
309 311
310 ql_dbg(ql_dbg_disc, vha, 0x206f, 312 ql_dbg(ql_dbg_disc, vha, 0x206f,
311 "Async-adisc - loopid=%x portid=%02x%02x%02x.\n", 313 "Async-adisc - hdl=%x loopid=%x portid=%02x%02x%02x.\n",
312 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, 314 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
313 fcport->d_id.b.al_pa); 315 fcport->d_id.b.area, fcport->d_id.b.al_pa);
314 return rval; 316 return rval;
315 317
316done_free_sp: 318done_free_sp:
@@ -360,9 +362,9 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
360 goto done_free_sp; 362 goto done_free_sp;
361 363
362 ql_dbg(ql_dbg_taskm, vha, 0x802f, 364 ql_dbg(ql_dbg_taskm, vha, 0x802f,
363 "Async-tmf loop-id=%x portid=%02x%02x%02x.\n", 365 "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
364 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, 366 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
365 fcport->d_id.b.al_pa); 367 fcport->d_id.b.area, fcport->d_id.b.al_pa);
366 return rval; 368 return rval;
367 369
368done_free_sp: 370done_free_sp:
@@ -514,7 +516,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
514 set_bit(0, ha->req_qid_map); 516 set_bit(0, ha->req_qid_map);
515 set_bit(0, ha->rsp_qid_map); 517 set_bit(0, ha->rsp_qid_map);
516 518
517 ql_log(ql_log_info, vha, 0x0040, 519 ql_dbg(ql_dbg_init, vha, 0x0040,
518 "Configuring PCI space...\n"); 520 "Configuring PCI space...\n");
519 rval = ha->isp_ops->pci_config(vha); 521 rval = ha->isp_ops->pci_config(vha);
520 if (rval) { 522 if (rval) {
@@ -533,7 +535,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
533 } 535 }
534 536
535 ha->isp_ops->get_flash_version(vha, req->ring); 537 ha->isp_ops->get_flash_version(vha, req->ring);
536 ql_log(ql_log_info, vha, 0x0061, 538 ql_dbg(ql_dbg_init, vha, 0x0061,
537 "Configure NVRAM parameters...\n"); 539 "Configure NVRAM parameters...\n");
538 540
539 ha->isp_ops->nvram_config(vha); 541 ha->isp_ops->nvram_config(vha);
@@ -550,7 +552,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
550 return QLA_FUNCTION_FAILED; 552 return QLA_FUNCTION_FAILED;
551 } 553 }
552 554
553 ql_log(ql_log_info, vha, 0x0078, 555 ql_dbg(ql_dbg_init, vha, 0x0078,
554 "Verifying loaded RISC code...\n"); 556 "Verifying loaded RISC code...\n");
555 557
556 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) { 558 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
@@ -1294,7 +1296,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1294 ha->flags.fce_enabled = 0; 1296 ha->flags.fce_enabled = 0;
1295 goto try_eft; 1297 goto try_eft;
1296 } 1298 }
1297 ql_log(ql_log_info, vha, 0x00c0, 1299 ql_dbg(ql_dbg_init, vha, 0x00c0,
1298 "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024); 1300 "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
1299 1301
1300 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE; 1302 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
@@ -1321,7 +1323,7 @@ try_eft:
1321 tc_dma); 1323 tc_dma);
1322 goto cont_alloc; 1324 goto cont_alloc;
1323 } 1325 }
1324 ql_log(ql_log_info, vha, 0x00c3, 1326 ql_dbg(ql_dbg_init, vha, 0x00c3,
1325 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024); 1327 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
1326 1328
1327 eft_size = EFT_SIZE; 1329 eft_size = EFT_SIZE;
@@ -1358,7 +1360,7 @@ cont_alloc:
1358 } 1360 }
1359 return; 1361 return;
1360 } 1362 }
1361 ql_log(ql_log_info, vha, 0x00c5, 1363 ql_dbg(ql_dbg_init, vha, 0x00c5,
1362 "Allocated (%d KB) for firmware dump.\n", dump_size / 1024); 1364 "Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
1363 1365
1364 ha->fw_dump_len = dump_size; 1366 ha->fw_dump_len = dump_size;
@@ -1929,7 +1931,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1929 rval = qla84xx_init_chip(vha); 1931 rval = qla84xx_init_chip(vha);
1930 if (rval != QLA_SUCCESS) { 1932 if (rval != QLA_SUCCESS) {
1931 ql_log(ql_log_warn, 1933 ql_log(ql_log_warn,
1932 vha, 0x8026, 1934 vha, 0x8007,
1933 "Init chip failed.\n"); 1935 "Init chip failed.\n");
1934 break; 1936 break;
1935 } 1937 }
@@ -1938,7 +1940,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1938 cs84xx_time = jiffies - cs84xx_time; 1940 cs84xx_time = jiffies - cs84xx_time;
1939 wtime += cs84xx_time; 1941 wtime += cs84xx_time;
1940 mtime += cs84xx_time; 1942 mtime += cs84xx_time;
1941 ql_dbg(ql_dbg_taskm, vha, 0x8025, 1943 ql_dbg(ql_dbg_taskm, vha, 0x8008,
1942 "Increasing wait time by %ld. " 1944 "Increasing wait time by %ld. "
1943 "New time %ld.\n", cs84xx_time, 1945 "New time %ld.\n", cs84xx_time,
1944 wtime); 1946 wtime);
@@ -1981,16 +1983,13 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1981 1983
1982 /* Delay for a while */ 1984 /* Delay for a while */
1983 msleep(500); 1985 msleep(500);
1984
1985 ql_dbg(ql_dbg_taskm, vha, 0x8039,
1986 "fw_state=%x curr time=%lx.\n", state[0], jiffies);
1987 } while (1); 1986 } while (1);
1988 1987
1989 ql_dbg(ql_dbg_taskm, vha, 0x803a, 1988 ql_dbg(ql_dbg_taskm, vha, 0x803a,
1990 "fw_state=%x (%x, %x, %x, %x) " "curr time=%lx.\n", state[0], 1989 "fw_state=%x (%x, %x, %x, %x) " "curr time=%lx.\n", state[0],
1991 state[1], state[2], state[3], state[4], jiffies); 1990 state[1], state[2], state[3], state[4], jiffies);
1992 1991
1993 if (rval) { 1992 if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
1994 ql_log(ql_log_warn, vha, 0x803b, 1993 ql_log(ql_log_warn, vha, 0x803b,
1995 "Firmware ready **** FAILED ****.\n"); 1994 "Firmware ready **** FAILED ****.\n");
1996 } 1995 }
@@ -2386,7 +2385,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
2386 * internal driver logging. 2385 * internal driver logging.
2387 */ 2386 */
2388 if (nv->host_p[0] & BIT_7) 2387 if (nv->host_p[0] & BIT_7)
2389 ql2xextended_error_logging = 0x7fffffff; 2388 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
2390 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0); 2389 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
2391 /* Always load RISC code on non ISP2[12]00 chips. */ 2390 /* Always load RISC code on non ISP2[12]00 chips. */
2392 if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) 2391 if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
@@ -4188,7 +4187,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
4188 spin_unlock_irqrestore(&ha->vport_slock, flags); 4187 spin_unlock_irqrestore(&ha->vport_slock, flags);
4189 4188
4190 } else { 4189 } else {
4191 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n"); 4190 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
4191 __func__);
4192 } 4192 }
4193 4193
4194 return(status); 4194 return(status);
@@ -4638,7 +4638,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
4638 struct req_que *req = ha->req_q_map[0]; 4638 struct req_que *req = ha->req_q_map[0];
4639 4639
4640 ql_dbg(ql_dbg_init, vha, 0x008b, 4640 ql_dbg(ql_dbg_init, vha, 0x008b,
4641 "Loading firmware from flash (%x).\n", faddr); 4641 "FW: Loading firmware from flash (%x).\n", faddr);
4642 4642
4643 rval = QLA_SUCCESS; 4643 rval = QLA_SUCCESS;
4644 4644
@@ -4836,8 +4836,8 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4836 return QLA_FUNCTION_FAILED; 4836 return QLA_FUNCTION_FAILED;
4837 } 4837 }
4838 4838
4839 ql_log(ql_log_info, vha, 0x0092, 4839 ql_dbg(ql_dbg_init, vha, 0x0092,
4840 "Loading via request-firmware.\n"); 4840 "FW: Loading via request-firmware.\n");
4841 4841
4842 rval = QLA_SUCCESS; 4842 rval = QLA_SUCCESS;
4843 4843
@@ -5425,7 +5425,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5425 if ((vha->device_flags & DFLG_NO_CABLE)) 5425 if ((vha->device_flags & DFLG_NO_CABLE))
5426 status = 0; 5426 status = 0;
5427 5427
5428 ql_log(ql_log_info, vha, 0x803d, 5428 ql_log(ql_log_info, vha, 0x8000,
5429 "Configure loop done, status = 0x%x.\n", status); 5429 "Configure loop done, status = 0x%x.\n", status);
5430 } 5430 }
5431 5431
@@ -5458,7 +5458,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5458 ha->fce_dma, ha->fce_bufs, ha->fce_mb, 5458 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
5459 &ha->fce_bufs); 5459 &ha->fce_bufs);
5460 if (rval) { 5460 if (rval) {
5461 ql_log(ql_log_warn, vha, 0x803e, 5461 ql_log(ql_log_warn, vha, 0x8001,
5462 "Unable to reinitialize FCE (%d).\n", 5462 "Unable to reinitialize FCE (%d).\n",
5463 rval); 5463 rval);
5464 ha->flags.fce_enabled = 0; 5464 ha->flags.fce_enabled = 0;
@@ -5470,7 +5470,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5470 rval = qla2x00_enable_eft_trace(vha, 5470 rval = qla2x00_enable_eft_trace(vha,
5471 ha->eft_dma, EFT_NUM_BUFFERS); 5471 ha->eft_dma, EFT_NUM_BUFFERS);
5472 if (rval) { 5472 if (rval) {
5473 ql_log(ql_log_warn, vha, 0x803f, 5473 ql_log(ql_log_warn, vha, 0x8010,
5474 "Unable to reinitialize EFT (%d).\n", 5474 "Unable to reinitialize EFT (%d).\n",
5475 rval); 5475 rval);
5476 } 5476 }
@@ -5478,7 +5478,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5478 } 5478 }
5479 5479
5480 if (!status) { 5480 if (!status) {
5481 ql_dbg(ql_dbg_taskm, vha, 0x8040, 5481 ql_dbg(ql_dbg_taskm, vha, 0x8011,
5482 "qla82xx_restart_isp succeeded.\n"); 5482 "qla82xx_restart_isp succeeded.\n");
5483 5483
5484 spin_lock_irqsave(&ha->vport_slock, flags); 5484 spin_lock_irqsave(&ha->vport_slock, flags);
@@ -5496,7 +5496,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5496 spin_unlock_irqrestore(&ha->vport_slock, flags); 5496 spin_unlock_irqrestore(&ha->vport_slock, flags);
5497 5497
5498 } else { 5498 } else {
5499 ql_log(ql_log_warn, vha, 0x8041, 5499 ql_log(ql_log_warn, vha, 0x8016,
5500 "qla82xx_restart_isp **** FAILED ****.\n"); 5500 "qla82xx_restart_isp **** FAILED ****.\n");
5501 } 5501 }
5502 5502
@@ -5643,13 +5643,26 @@ qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
5643 if (priority < 0) 5643 if (priority < 0)
5644 return QLA_FUNCTION_FAILED; 5644 return QLA_FUNCTION_FAILED;
5645 5645
5646 if (IS_QLA82XX(vha->hw)) {
5647 fcport->fcp_prio = priority & 0xf;
5648 return QLA_SUCCESS;
5649 }
5650
5646 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb); 5651 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
5647 if (ret == QLA_SUCCESS) 5652 if (ret == QLA_SUCCESS) {
5648 fcport->fcp_prio = priority; 5653 if (fcport->fcp_prio != priority)
5649 else 5654 ql_dbg(ql_dbg_user, vha, 0x709e,
5655 "Updated FCP_CMND priority - value=%d loop_id=%d "
5656 "port_id=%02x%02x%02x.\n", priority,
5657 fcport->loop_id, fcport->d_id.b.domain,
5658 fcport->d_id.b.area, fcport->d_id.b.al_pa);
5659 fcport->fcp_prio = priority & 0xf;
5660 } else
5650 ql_dbg(ql_dbg_user, vha, 0x704f, 5661 ql_dbg(ql_dbg_user, vha, 0x704f,
5651 "Unable to activate fcp priority, ret=0x%x.\n", ret); 5662 "Unable to update FCP_CMND priority - ret=0x%x for "
5652 5663 "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id,
5664 fcport->d_id.b.domain, fcport->d_id.b.area,
5665 fcport->d_id.b.al_pa);
5653 return ret; 5666 return ret;
5654} 5667}
5655 5668
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index a4b267e60a35..55a96761b5a4 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -11,8 +11,6 @@
11 11
12#include <scsi/scsi_tcq.h> 12#include <scsi/scsi_tcq.h>
13 13
14static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
15
16static void qla25xx_set_que(srb_t *, struct rsp_que **); 14static void qla25xx_set_que(srb_t *, struct rsp_que **);
17/** 15/**
18 * qla2x00_get_cmd_direction() - Determine control_flag data direction. 16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
@@ -468,6 +466,42 @@ queuing_error:
468} 466}
469 467
470/** 468/**
469 * qla2x00_start_iocbs() - Execute the IOCB command
470 */
471static void
472qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
473{
474 struct qla_hw_data *ha = vha->hw;
475 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
476 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
477
478 if (IS_QLA82XX(ha)) {
479 qla82xx_start_iocbs(vha);
480 } else {
481 /* Adjust ring index. */
482 req->ring_index++;
483 if (req->ring_index == req->length) {
484 req->ring_index = 0;
485 req->ring_ptr = req->ring;
486 } else
487 req->ring_ptr++;
488
489 /* Set chip new ring index. */
490 if (ha->mqenable) {
491 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
492 RD_REG_DWORD(&ioreg->hccr);
493 } else if (IS_FWI2_CAPABLE(ha)) {
494 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
495 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
496 } else {
497 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
498 req->ring_index);
499 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
500 }
501 }
502}
503
504/**
471 * qla2x00_marker() - Send a marker IOCB to the firmware. 505 * qla2x00_marker() - Send a marker IOCB to the firmware.
472 * @ha: HA context 506 * @ha: HA context
473 * @loop_id: loop ID 507 * @loop_id: loop ID
@@ -489,6 +523,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
489 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 523 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
490 524
491 mrk24 = NULL; 525 mrk24 = NULL;
526 req = ha->req_q_map[0];
492 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0); 527 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
493 if (mrk == NULL) { 528 if (mrk == NULL) {
494 ql_log(ql_log_warn, base_vha, 0x3026, 529 ql_log(ql_log_warn, base_vha, 0x3026,
@@ -515,7 +550,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
515 } 550 }
516 wmb(); 551 wmb();
517 552
518 qla2x00_isp_cmd(vha, req); 553 qla2x00_start_iocbs(vha, req);
519 554
520 return (QLA_SUCCESS); 555 return (QLA_SUCCESS);
521} 556}
@@ -536,89 +571,140 @@ qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
536} 571}
537 572
538/** 573/**
539 * qla2x00_isp_cmd() - Modify the request ring pointer. 574 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
540 * @ha: HA context 575 * Continuation Type 1 IOCBs to allocate.
576 *
577 * @dsds: number of data segment decriptors needed
541 * 578 *
542 * Note: The caller must hold the hardware lock before calling this routine. 579 * Returns the number of IOCB entries needed to store @dsds.
543 */ 580 */
544static void 581inline uint16_t
545qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req) 582qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
546{ 583{
547 struct qla_hw_data *ha = vha->hw; 584 uint16_t iocbs;
548 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
549 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
550 585
551 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x302d, 586 iocbs = 1;
552 "IOCB data:\n"); 587 if (dsds > 1) {
553 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e, 588 iocbs += (dsds - 1) / 5;
554 (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE); 589 if ((dsds - 1) % 5)
590 iocbs++;
591 }
592 return iocbs;
593}
555 594
556 /* Adjust ring index. */ 595static inline int
557 req->ring_index++; 596qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
558 if (req->ring_index == req->length) { 597 uint16_t tot_dsds)
559 req->ring_index = 0; 598{
560 req->ring_ptr = req->ring; 599 uint32_t *cur_dsd = NULL;
561 } else 600 scsi_qla_host_t *vha;
562 req->ring_ptr++; 601 struct qla_hw_data *ha;
602 struct scsi_cmnd *cmd;
603 struct scatterlist *cur_seg;
604 uint32_t *dsd_seg;
605 void *next_dsd;
606 uint8_t avail_dsds;
607 uint8_t first_iocb = 1;
608 uint32_t dsd_list_len;
609 struct dsd_dma *dsd_ptr;
610 struct ct6_dsd *ctx;
563 611
564 /* Set chip new ring index. */ 612 cmd = sp->cmd;
565 if (IS_QLA82XX(ha)) {
566 uint32_t dbval = 0x04 | (ha->portnum << 5);
567 613
568 /* write, read and verify logic */ 614 /* Update entry type to indicate Command Type 3 IOCB */
569 dbval = dbval | (req->id << 8) | (req->ring_index << 16); 615 *((uint32_t *)(&cmd_pkt->entry_type)) =
570 if (ql2xdbwr) 616 __constant_cpu_to_le32(COMMAND_TYPE_6);
571 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval); 617
572 else { 618 /* No data transfer */
573 WRT_REG_DWORD( 619 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
574 (unsigned long __iomem *)ha->nxdb_wr_ptr, 620 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
575 dbval); 621 return 0;
576 wmb(); 622 }
577 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) { 623
578 WRT_REG_DWORD((unsigned long __iomem *) 624 vha = sp->fcport->vha;
579 ha->nxdb_wr_ptr, dbval); 625 ha = vha->hw;
580 wmb(); 626
581 } 627 /* Set transfer direction */
582 } 628 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
583 } else if (ha->mqenable) { 629 cmd_pkt->control_flags =
584 /* Set chip new ring index. */ 630 __constant_cpu_to_le16(CF_WRITE_DATA);
585 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index); 631 ha->qla_stats.output_bytes += scsi_bufflen(cmd);
586 RD_REG_DWORD(&ioreg->hccr); 632 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
587 } else { 633 cmd_pkt->control_flags =
588 if (IS_FWI2_CAPABLE(ha)) { 634 __constant_cpu_to_le16(CF_READ_DATA);
589 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index); 635 ha->qla_stats.input_bytes += scsi_bufflen(cmd);
590 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in); 636 }
637
638 cur_seg = scsi_sglist(cmd);
639 ctx = sp->ctx;
640
641 while (tot_dsds) {
642 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
643 QLA_DSDS_PER_IOCB : tot_dsds;
644 tot_dsds -= avail_dsds;
645 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
646
647 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
648 struct dsd_dma, list);
649 next_dsd = dsd_ptr->dsd_addr;
650 list_del(&dsd_ptr->list);
651 ha->gbl_dsd_avail--;
652 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
653 ctx->dsd_use_cnt++;
654 ha->gbl_dsd_inuse++;
655
656 if (first_iocb) {
657 first_iocb = 0;
658 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
659 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
660 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
661 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
591 } else { 662 } else {
592 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), 663 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
593 req->ring_index); 664 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
594 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp)); 665 *cur_dsd++ = cpu_to_le32(dsd_list_len);
666 }
667 cur_dsd = (uint32_t *)next_dsd;
668 while (avail_dsds) {
669 dma_addr_t sle_dma;
670
671 sle_dma = sg_dma_address(cur_seg);
672 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
673 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
674 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
675 cur_seg = sg_next(cur_seg);
676 avail_dsds--;
595 } 677 }
596 } 678 }
597 679
680 /* Null termination */
681 *cur_dsd++ = 0;
682 *cur_dsd++ = 0;
683 *cur_dsd++ = 0;
684 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
685 return 0;
598} 686}
599 687
600/** 688/*
601 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and 689 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
602 * Continuation Type 1 IOCBs to allocate. 690 * for Command Type 6.
603 * 691 *
604 * @dsds: number of data segment decriptors needed 692 * @dsds: number of data segment decriptors needed
605 * 693 *
606 * Returns the number of IOCB entries needed to store @dsds. 694 * Returns the number of dsd list needed to store @dsds.
607 */ 695 */
608inline uint16_t 696inline uint16_t
609qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds) 697qla24xx_calc_dsd_lists(uint16_t dsds)
610{ 698{
611 uint16_t iocbs; 699 uint16_t dsd_lists = 0;
612 700
613 iocbs = 1; 701 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
614 if (dsds > 1) { 702 if (dsds % QLA_DSDS_PER_IOCB)
615 iocbs += (dsds - 1) / 5; 703 dsd_lists++;
616 if ((dsds - 1) % 5) 704 return dsd_lists;
617 iocbs++;
618 }
619 return iocbs;
620} 705}
621 706
707
622/** 708/**
623 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7 709 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
624 * IOCB types. 710 * IOCB types.
@@ -945,6 +1031,7 @@ alloc_and_fill:
945 *cur_dsd++ = 0; 1031 *cur_dsd++ = 0;
946 return 0; 1032 return 0;
947} 1033}
1034
948static int 1035static int
949qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, 1036qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
950 uint16_t tot_dsds) 1037 uint16_t tot_dsds)
@@ -1004,7 +1091,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1004 sle_dma = sg_dma_address(sg); 1091 sle_dma = sg_dma_address(sg);
1005 ql_dbg(ql_dbg_io, vha, 0x300a, 1092 ql_dbg(ql_dbg_io, vha, 0x300a,
1006 "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n", 1093 "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
1007 cur_dsd, i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg), 1094 i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
1008 sp->cmd); 1095 sp->cmd);
1009 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 1096 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1010 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 1097 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
@@ -1731,6 +1818,7 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1731 uint32_t index, handle; 1818 uint32_t index, handle;
1732 request_t *pkt; 1819 request_t *pkt;
1733 uint16_t cnt, req_cnt; 1820 uint16_t cnt, req_cnt;
1821 struct srb_ctx *ctx;
1734 1822
1735 pkt = NULL; 1823 pkt = NULL;
1736 req_cnt = 1; 1824 req_cnt = 1;
@@ -1759,6 +1847,12 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1759 req->outstanding_cmds[handle] = sp; 1847 req->outstanding_cmds[handle] = sp;
1760 sp->handle = handle; 1848 sp->handle = handle;
1761 1849
1850 /* Adjust entry-counts as needed. */
1851 if (sp->ctx) {
1852 ctx = sp->ctx;
1853 req_cnt = ctx->iocbs;
1854 }
1855
1762skip_cmd_array: 1856skip_cmd_array:
1763 /* Check for room on request queue. */ 1857 /* Check for room on request queue. */
1764 if (req->cnt < req_cnt) { 1858 if (req->cnt < req_cnt) {
@@ -1793,42 +1887,6 @@ queuing_error:
1793} 1887}
1794 1888
1795static void 1889static void
1796qla2x00_start_iocbs(srb_t *sp)
1797{
1798 struct qla_hw_data *ha = sp->fcport->vha->hw;
1799 struct req_que *req = ha->req_q_map[0];
1800 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1801 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
1802
1803 if (IS_QLA82XX(ha)) {
1804 qla82xx_start_iocbs(sp);
1805 } else {
1806 /* Adjust ring index. */
1807 req->ring_index++;
1808 if (req->ring_index == req->length) {
1809 req->ring_index = 0;
1810 req->ring_ptr = req->ring;
1811 } else
1812 req->ring_ptr++;
1813
1814 /* Set chip new ring index. */
1815 if (ha->mqenable) {
1816 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
1817 RD_REG_DWORD(&ioreg->hccr);
1818 } else if (IS_QLA82XX(ha)) {
1819 qla82xx_start_iocbs(sp);
1820 } else if (IS_FWI2_CAPABLE(ha)) {
1821 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
1822 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
1823 } else {
1824 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
1825 req->ring_index);
1826 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
1827 }
1828 }
1829}
1830
1831static void
1832qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio) 1890qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1833{ 1891{
1834 struct srb_ctx *ctx = sp->ctx; 1892 struct srb_ctx *ctx = sp->ctx;
@@ -2160,6 +2218,381 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2160 ct_iocb->entry_count = entry_count; 2218 ct_iocb->entry_count = entry_count;
2161} 2219}
2162 2220
2221/*
2222 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2223 * @sp: command to send to the ISP
2224 *
2225 * Returns non-zero if a failure occurred, else zero.
2226 */
2227int
2228qla82xx_start_scsi(srb_t *sp)
2229{
2230 int ret, nseg;
2231 unsigned long flags;
2232 struct scsi_cmnd *cmd;
2233 uint32_t *clr_ptr;
2234 uint32_t index;
2235 uint32_t handle;
2236 uint16_t cnt;
2237 uint16_t req_cnt;
2238 uint16_t tot_dsds;
2239 struct device_reg_82xx __iomem *reg;
2240 uint32_t dbval;
2241 uint32_t *fcp_dl;
2242 uint8_t additional_cdb_len;
2243 struct ct6_dsd *ctx;
2244 struct scsi_qla_host *vha = sp->fcport->vha;
2245 struct qla_hw_data *ha = vha->hw;
2246 struct req_que *req = NULL;
2247 struct rsp_que *rsp = NULL;
2248 char tag[2];
2249
2250 /* Setup device pointers. */
2251 ret = 0;
2252 reg = &ha->iobase->isp82;
2253 cmd = sp->cmd;
2254 req = vha->req;
2255 rsp = ha->rsp_q_map[0];
2256
2257 /* So we know we haven't pci_map'ed anything yet */
2258 tot_dsds = 0;
2259
2260 dbval = 0x04 | (ha->portnum << 5);
2261
2262 /* Send marker if required */
2263 if (vha->marker_needed != 0) {
2264 if (qla2x00_marker(vha, req,
2265 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2266 ql_log(ql_log_warn, vha, 0x300c,
2267 "qla2x00_marker failed for cmd=%p.\n", cmd);
2268 return QLA_FUNCTION_FAILED;
2269 }
2270 vha->marker_needed = 0;
2271 }
2272
2273 /* Acquire ring specific lock */
2274 spin_lock_irqsave(&ha->hardware_lock, flags);
2275
2276 /* Check for room in outstanding command list. */
2277 handle = req->current_outstanding_cmd;
2278 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
2279 handle++;
2280 if (handle == MAX_OUTSTANDING_COMMANDS)
2281 handle = 1;
2282 if (!req->outstanding_cmds[handle])
2283 break;
2284 }
2285 if (index == MAX_OUTSTANDING_COMMANDS)
2286 goto queuing_error;
2287
2288 /* Map the sg table so we have an accurate count of sg entries needed */
2289 if (scsi_sg_count(cmd)) {
2290 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2291 scsi_sg_count(cmd), cmd->sc_data_direction);
2292 if (unlikely(!nseg))
2293 goto queuing_error;
2294 } else
2295 nseg = 0;
2296
2297 tot_dsds = nseg;
2298
2299 if (tot_dsds > ql2xshiftctondsd) {
2300 struct cmd_type_6 *cmd_pkt;
2301 uint16_t more_dsd_lists = 0;
2302 struct dsd_dma *dsd_ptr;
2303 uint16_t i;
2304
2305 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2306 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2307 ql_dbg(ql_dbg_io, vha, 0x300d,
2308 "Num of DSD list %d is than %d for cmd=%p.\n",
2309 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2310 cmd);
2311 goto queuing_error;
2312 }
2313
2314 if (more_dsd_lists <= ha->gbl_dsd_avail)
2315 goto sufficient_dsds;
2316 else
2317 more_dsd_lists -= ha->gbl_dsd_avail;
2318
2319 for (i = 0; i < more_dsd_lists; i++) {
2320 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2321 if (!dsd_ptr) {
2322 ql_log(ql_log_fatal, vha, 0x300e,
2323 "Failed to allocate memory for dsd_dma "
2324 "for cmd=%p.\n", cmd);
2325 goto queuing_error;
2326 }
2327
2328 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2329 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2330 if (!dsd_ptr->dsd_addr) {
2331 kfree(dsd_ptr);
2332 ql_log(ql_log_fatal, vha, 0x300f,
2333 "Failed to allocate memory for dsd_addr "
2334 "for cmd=%p.\n", cmd);
2335 goto queuing_error;
2336 }
2337 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2338 ha->gbl_dsd_avail++;
2339 }
2340
2341sufficient_dsds:
2342 req_cnt = 1;
2343
2344 if (req->cnt < (req_cnt + 2)) {
2345 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2346 &reg->req_q_out[0]);
2347 if (req->ring_index < cnt)
2348 req->cnt = cnt - req->ring_index;
2349 else
2350 req->cnt = req->length -
2351 (req->ring_index - cnt);
2352 }
2353
2354 if (req->cnt < (req_cnt + 2))
2355 goto queuing_error;
2356
2357 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2358 if (!sp->ctx) {
2359 ql_log(ql_log_fatal, vha, 0x3010,
2360 "Failed to allocate ctx for cmd=%p.\n", cmd);
2361 goto queuing_error;
2362 }
2363 memset(ctx, 0, sizeof(struct ct6_dsd));
2364 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2365 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2366 if (!ctx->fcp_cmnd) {
2367 ql_log(ql_log_fatal, vha, 0x3011,
2368 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2369 goto queuing_error_fcp_cmnd;
2370 }
2371
2372 /* Initialize the DSD list and dma handle */
2373 INIT_LIST_HEAD(&ctx->dsd_list);
2374 ctx->dsd_use_cnt = 0;
2375
2376 if (cmd->cmd_len > 16) {
2377 additional_cdb_len = cmd->cmd_len - 16;
2378 if ((cmd->cmd_len % 4) != 0) {
2379 /* SCSI command bigger than 16 bytes must be
2380 * multiple of 4
2381 */
2382 ql_log(ql_log_warn, vha, 0x3012,
2383 "scsi cmd len %d not multiple of 4 "
2384 "for cmd=%p.\n", cmd->cmd_len, cmd);
2385 goto queuing_error_fcp_cmnd;
2386 }
2387 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2388 } else {
2389 additional_cdb_len = 0;
2390 ctx->fcp_cmnd_len = 12 + 16 + 4;
2391 }
2392
2393 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2394 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2395
2396 /* Zero out remaining portion of packet. */
2397 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2398 clr_ptr = (uint32_t *)cmd_pkt + 2;
2399 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2400 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2401
2402 /* Set NPORT-ID and LUN number*/
2403 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2404 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2405 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2406 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2407 cmd_pkt->vp_index = sp->fcport->vp_idx;
2408
2409 /* Build IOCB segments */
2410 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2411 goto queuing_error_fcp_cmnd;
2412
2413 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2414 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2415
2416 /* build FCP_CMND IU */
2417 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2418 int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
2419 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2420
2421 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2422 ctx->fcp_cmnd->additional_cdb_len |= 1;
2423 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2424 ctx->fcp_cmnd->additional_cdb_len |= 2;
2425
2426 /*
2427 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2428 */
2429 if (scsi_populate_tag_msg(cmd, tag)) {
2430 switch (tag[0]) {
2431 case HEAD_OF_QUEUE_TAG:
2432 ctx->fcp_cmnd->task_attribute =
2433 TSK_HEAD_OF_QUEUE;
2434 break;
2435 case ORDERED_QUEUE_TAG:
2436 ctx->fcp_cmnd->task_attribute =
2437 TSK_ORDERED;
2438 break;
2439 }
2440 }
2441
2442 /* Populate the FCP_PRIO. */
2443 if (ha->flags.fcp_prio_enabled)
2444 ctx->fcp_cmnd->task_attribute |=
2445 sp->fcport->fcp_prio << 3;
2446
2447 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2448
2449 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2450 additional_cdb_len);
2451 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2452
2453 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2454 cmd_pkt->fcp_cmnd_dseg_address[0] =
2455 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2456 cmd_pkt->fcp_cmnd_dseg_address[1] =
2457 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2458
2459 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2460 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2461 /* Set total data segment count. */
2462 cmd_pkt->entry_count = (uint8_t)req_cnt;
2463 /* Specify response queue number where
2464 * completion should happen
2465 */
2466 cmd_pkt->entry_status = (uint8_t) rsp->id;
2467 } else {
2468 struct cmd_type_7 *cmd_pkt;
2469 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2470 if (req->cnt < (req_cnt + 2)) {
2471 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2472 &reg->req_q_out[0]);
2473 if (req->ring_index < cnt)
2474 req->cnt = cnt - req->ring_index;
2475 else
2476 req->cnt = req->length -
2477 (req->ring_index - cnt);
2478 }
2479 if (req->cnt < (req_cnt + 2))
2480 goto queuing_error;
2481
2482 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2483 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2484
2485 /* Zero out remaining portion of packet. */
2486 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2487 clr_ptr = (uint32_t *)cmd_pkt + 2;
2488 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2489 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2490
2491 /* Set NPORT-ID and LUN number*/
2492 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2493 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2494 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2495 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2496 cmd_pkt->vp_index = sp->fcport->vp_idx;
2497
2498 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2499 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2500 sizeof(cmd_pkt->lun));
2501
2502 /*
2503 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2504 */
2505 if (scsi_populate_tag_msg(cmd, tag)) {
2506 switch (tag[0]) {
2507 case HEAD_OF_QUEUE_TAG:
2508 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2509 break;
2510 case ORDERED_QUEUE_TAG:
2511 cmd_pkt->task = TSK_ORDERED;
2512 break;
2513 }
2514 }
2515
2516 /* Populate the FCP_PRIO. */
2517 if (ha->flags.fcp_prio_enabled)
2518 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2519
2520 /* Load SCSI command packet. */
2521 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2522 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2523
2524 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2525
2526 /* Build IOCB segments */
2527 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2528
2529 /* Set total data segment count. */
2530 cmd_pkt->entry_count = (uint8_t)req_cnt;
2531 /* Specify response queue number where
2532 * completion should happen.
2533 */
2534 cmd_pkt->entry_status = (uint8_t) rsp->id;
2535
2536 }
2537 /* Build command packet. */
2538 req->current_outstanding_cmd = handle;
2539 req->outstanding_cmds[handle] = sp;
2540 sp->handle = handle;
2541 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2542 req->cnt -= req_cnt;
2543 wmb();
2544
2545 /* Adjust ring index. */
2546 req->ring_index++;
2547 if (req->ring_index == req->length) {
2548 req->ring_index = 0;
2549 req->ring_ptr = req->ring;
2550 } else
2551 req->ring_ptr++;
2552
2553 sp->flags |= SRB_DMA_VALID;
2554
2555 /* Set chip new ring index. */
2556 /* write, read and verify logic */
2557 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2558 if (ql2xdbwr)
2559 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2560 else {
2561 WRT_REG_DWORD(
2562 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2563 dbval);
2564 wmb();
2565 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
2566 WRT_REG_DWORD(
2567 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2568 dbval);
2569 wmb();
2570 }
2571 }
2572
2573 /* Manage unprocessed RIO/ZIO commands in response queue. */
2574 if (vha->flags.process_response_queue &&
2575 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2576 qla24xx_process_response_queue(vha, rsp);
2577
2578 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2579 return QLA_SUCCESS;
2580
2581queuing_error_fcp_cmnd:
2582 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2583queuing_error:
2584 if (tot_dsds)
2585 scsi_dma_unmap(cmd);
2586
2587 if (sp->ctx) {
2588 mempool_free(sp->ctx, ha->ctx_mempool);
2589 sp->ctx = NULL;
2590 }
2591 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2592
2593 return QLA_FUNCTION_FAILED;
2594}
2595
2163int 2596int
2164qla2x00_start_sp(srb_t *sp) 2597qla2x00_start_sp(srb_t *sp)
2165{ 2598{
@@ -2196,8 +2629,8 @@ qla2x00_start_sp(srb_t *sp)
2196 break; 2629 break;
2197 case SRB_CT_CMD: 2630 case SRB_CT_CMD:
2198 IS_FWI2_CAPABLE(ha) ? 2631 IS_FWI2_CAPABLE(ha) ?
2199 qla24xx_ct_iocb(sp, pkt) : 2632 qla24xx_ct_iocb(sp, pkt) :
2200 qla2x00_ct_iocb(sp, pkt); 2633 qla2x00_ct_iocb(sp, pkt);
2201 break; 2634 break;
2202 case SRB_ADISC_CMD: 2635 case SRB_ADISC_CMD:
2203 IS_FWI2_CAPABLE(ha) ? 2636 IS_FWI2_CAPABLE(ha) ?
@@ -2212,7 +2645,7 @@ qla2x00_start_sp(srb_t *sp)
2212 } 2645 }
2213 2646
2214 wmb(); 2647 wmb();
2215 qla2x00_start_iocbs(sp); 2648 qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
2216done: 2649done:
2217 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2650 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2218 return rval; 2651 return rval;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 7b91b290ffd6..e804585cc59c 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -242,32 +242,34 @@ static void
242qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 242qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
243{ 243{
244 uint16_t cnt; 244 uint16_t cnt;
245 uint32_t mboxes;
245 uint16_t __iomem *wptr; 246 uint16_t __iomem *wptr;
246 struct qla_hw_data *ha = vha->hw; 247 struct qla_hw_data *ha = vha->hw;
247 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 248 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
248 249
250 /* Read all mbox registers? */
251 mboxes = (1 << ha->mbx_count) - 1;
252 if (!ha->mcp)
253 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERRROR.\n");
254 else
255 mboxes = ha->mcp->in_mb;
256
249 /* Load return mailbox registers. */ 257 /* Load return mailbox registers. */
250 ha->flags.mbox_int = 1; 258 ha->flags.mbox_int = 1;
251 ha->mailbox_out[0] = mb0; 259 ha->mailbox_out[0] = mb0;
260 mboxes >>= 1;
252 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1); 261 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
253 262
254 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 263 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
255 if (IS_QLA2200(ha) && cnt == 8) 264 if (IS_QLA2200(ha) && cnt == 8)
256 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8); 265 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
257 if (cnt == 4 || cnt == 5) 266 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
258 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); 267 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
259 else 268 else if (mboxes & BIT_0)
260 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 269 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
261 270
262 wptr++; 271 wptr++;
263 } 272 mboxes >>= 1;
264
265 if (ha->mcp) {
266 ql_dbg(ql_dbg_async, vha, 0x5000,
267 "Got mbx completion. cmd=%x.\n", ha->mcp->mb[0]);
268 } else {
269 ql_dbg(ql_dbg_async, vha, 0x5001,
270 "MBX pointer ERROR.\n");
271 } 273 }
272} 274}
273 275
@@ -298,7 +300,7 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
298 return; 300 return;
299 301
300 ql_dbg(ql_dbg_async, vha, 0x5022, 302 ql_dbg(ql_dbg_async, vha, 0x5022,
301 "Inter-Driver Commucation %s -- ACK timeout=%d.\n", 303 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
302 vha->host_no, event[aen & 0xff], timeout); 304 vha->host_no, event[aen & 0xff], timeout);
303 305
304 rval = qla2x00_post_idc_ack_work(vha, mb); 306 rval = qla2x00_post_idc_ack_work(vha, mb);
@@ -453,7 +455,7 @@ skip_rio:
453 break; 455 break;
454 456
455 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 457 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
456 ql_log(ql_log_info, vha, 0x5009, 458 ql_dbg(ql_dbg_async, vha, 0x5009,
457 "LIP occurred (%x).\n", mb[1]); 459 "LIP occurred (%x).\n", mb[1]);
458 460
459 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 461 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -487,7 +489,7 @@ skip_rio:
487 ha->link_data_rate = mb[1]; 489 ha->link_data_rate = mb[1];
488 } 490 }
489 491
490 ql_log(ql_log_info, vha, 0x500a, 492 ql_dbg(ql_dbg_async, vha, 0x500a,
491 "LOOP UP detected (%s Gbps).\n", link_speed); 493 "LOOP UP detected (%s Gbps).\n", link_speed);
492 494
493 vha->flags.management_server_logged_in = 0; 495 vha->flags.management_server_logged_in = 0;
@@ -497,7 +499,7 @@ skip_rio:
497 case MBA_LOOP_DOWN: /* Loop Down Event */ 499 case MBA_LOOP_DOWN: /* Loop Down Event */
498 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0; 500 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0;
499 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx; 501 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
500 ql_log(ql_log_info, vha, 0x500b, 502 ql_dbg(ql_dbg_async, vha, 0x500b,
501 "LOOP DOWN detected (%x %x %x %x).\n", 503 "LOOP DOWN detected (%x %x %x %x).\n",
502 mb[1], mb[2], mb[3], mbx); 504 mb[1], mb[2], mb[3], mbx);
503 505
@@ -519,7 +521,7 @@ skip_rio:
519 break; 521 break;
520 522
521 case MBA_LIP_RESET: /* LIP reset occurred */ 523 case MBA_LIP_RESET: /* LIP reset occurred */
522 ql_log(ql_log_info, vha, 0x500c, 524 ql_dbg(ql_dbg_async, vha, 0x500c,
523 "LIP reset occurred (%x).\n", mb[1]); 525 "LIP reset occurred (%x).\n", mb[1]);
524 526
525 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 527 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -587,7 +589,7 @@ skip_rio:
587 if (IS_QLA2100(ha)) 589 if (IS_QLA2100(ha))
588 break; 590 break;
589 591
590 ql_log(ql_log_info, vha, 0x500f, 592 ql_dbg(ql_dbg_async, vha, 0x500f,
591 "Configuration change detected: value=%x.\n", mb[1]); 593 "Configuration change detected: value=%x.\n", mb[1]);
592 594
593 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 595 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -920,15 +922,15 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
920 QLA_LOGIO_LOGIN_RETRIED : 0; 922 QLA_LOGIO_LOGIN_RETRIED : 0;
921 if (mbx->entry_status) { 923 if (mbx->entry_status) {
922 ql_dbg(ql_dbg_async, vha, 0x5043, 924 ql_dbg(ql_dbg_async, vha, 0x5043,
923 "Async-%s error entry - portid=%02x%02x%02x " 925 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
924 "entry-status=%x status=%x state-flag=%x " 926 "entry-status=%x status=%x state-flag=%x "
925 "status-flags=%x.\n", 927 "status-flags=%x.\n", type, sp->handle,
926 type, fcport->d_id.b.domain, fcport->d_id.b.area, 928 fcport->d_id.b.domain, fcport->d_id.b.area,
927 fcport->d_id.b.al_pa, mbx->entry_status, 929 fcport->d_id.b.al_pa, mbx->entry_status,
928 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), 930 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
929 le16_to_cpu(mbx->status_flags)); 931 le16_to_cpu(mbx->status_flags));
930 932
931 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5057, 933 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
932 (uint8_t *)mbx, sizeof(*mbx)); 934 (uint8_t *)mbx, sizeof(*mbx));
933 935
934 goto logio_done; 936 goto logio_done;
@@ -940,9 +942,10 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
940 status = 0; 942 status = 0;
941 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { 943 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
942 ql_dbg(ql_dbg_async, vha, 0x5045, 944 ql_dbg(ql_dbg_async, vha, 0x5045,
943 "Async-%s complete - portid=%02x%02x%02x mbx1=%x.\n", 945 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
944 type, fcport->d_id.b.domain, fcport->d_id.b.area, 946 type, sp->handle, fcport->d_id.b.domain,
945 fcport->d_id.b.al_pa, le16_to_cpu(mbx->mb1)); 947 fcport->d_id.b.area, fcport->d_id.b.al_pa,
948 le16_to_cpu(mbx->mb1));
946 949
947 data[0] = MBS_COMMAND_COMPLETE; 950 data[0] = MBS_COMMAND_COMPLETE;
948 if (ctx->type == SRB_LOGIN_CMD) { 951 if (ctx->type == SRB_LOGIN_CMD) {
@@ -968,11 +971,10 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
968 } 971 }
969 972
970 ql_log(ql_log_warn, vha, 0x5046, 973 ql_log(ql_log_warn, vha, 0x5046,
971 "Async-%s failed - portid=%02x%02x%02x status=%x " 974 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
972 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", 975 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
973 type, fcport->d_id.b.domain, 976 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
974 fcport->d_id.b.area, fcport->d_id.b.al_pa, status, 977 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
975 le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
976 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), 978 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
977 le16_to_cpu(mbx->mb7)); 979 le16_to_cpu(mbx->mb7));
978 980
@@ -1036,7 +1038,7 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1036 bsg_job->reply->result = DID_ERROR << 16; 1038 bsg_job->reply->result = DID_ERROR << 16;
1037 bsg_job->reply->reply_payload_rcv_len = 0; 1039 bsg_job->reply->reply_payload_rcv_len = 0;
1038 } 1040 }
1039 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5058, 1041 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
1040 (uint8_t *)pkt, sizeof(*pkt)); 1042 (uint8_t *)pkt, sizeof(*pkt));
1041 } else { 1043 } else {
1042 bsg_job->reply->result = DID_OK << 16; 1044 bsg_job->reply->result = DID_OK << 16;
@@ -1111,9 +1113,9 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1111 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count); 1113 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count);
1112 1114
1113 ql_log(ql_log_info, vha, 0x503f, 1115 ql_log(ql_log_info, vha, 0x503f,
1114 "ELS-CT pass-through-%s error comp_status-status=0x%x " 1116 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1115 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", 1117 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1116 type, comp_status, fw_status[1], fw_status[2], 1118 type, sp->handle, comp_status, fw_status[1], fw_status[2],
1117 le16_to_cpu(((struct els_sts_entry_24xx *) 1119 le16_to_cpu(((struct els_sts_entry_24xx *)
1118 pkt)->total_byte_count)); 1120 pkt)->total_byte_count));
1119 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1121 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
@@ -1121,9 +1123,9 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1121 } 1123 }
1122 else { 1124 else {
1123 ql_log(ql_log_info, vha, 0x5040, 1125 ql_log(ql_log_info, vha, 0x5040,
1124 "ELS-CT pass-through-%s error comp_status-status=0x%x " 1126 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1125 "error subcode 1=0x%x error subcode 2=0x%x.\n", 1127 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1126 type, comp_status, 1128 type, sp->handle, comp_status,
1127 le16_to_cpu(((struct els_sts_entry_24xx *) 1129 le16_to_cpu(((struct els_sts_entry_24xx *)
1128 pkt)->error_subcode_1), 1130 pkt)->error_subcode_1),
1129 le16_to_cpu(((struct els_sts_entry_24xx *) 1131 le16_to_cpu(((struct els_sts_entry_24xx *)
@@ -1184,11 +1186,12 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1184 QLA_LOGIO_LOGIN_RETRIED : 0; 1186 QLA_LOGIO_LOGIN_RETRIED : 0;
1185 if (logio->entry_status) { 1187 if (logio->entry_status) {
1186 ql_log(ql_log_warn, vha, 0x5034, 1188 ql_log(ql_log_warn, vha, 0x5034,
1187 "Async-%s error entry - " 1189 "Async-%s error entry - hdl=%x"
1188 "portid=%02x%02x%02x entry-status=%x.\n", 1190 "portid=%02x%02x%02x entry-status=%x.\n",
1189 type, fcport->d_id.b.domain, fcport->d_id.b.area, 1191 type, sp->handle, fcport->d_id.b.domain,
1190 fcport->d_id.b.al_pa, logio->entry_status); 1192 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1191 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5059, 1193 logio->entry_status);
1194 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
1192 (uint8_t *)logio, sizeof(*logio)); 1195 (uint8_t *)logio, sizeof(*logio));
1193 1196
1194 goto logio_done; 1197 goto logio_done;
@@ -1196,10 +1199,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1196 1199
1197 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 1200 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1198 ql_dbg(ql_dbg_async, vha, 0x5036, 1201 ql_dbg(ql_dbg_async, vha, 0x5036,
1199 "Async-%s complete - portid=%02x%02x%02x " 1202 "Async-%s complete - hdl=%x portid=%02x%02x%02x "
1200 "iop0=%x.\n", 1203 "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1201 type, fcport->d_id.b.domain, fcport->d_id.b.area, 1204 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1202 fcport->d_id.b.al_pa,
1203 le32_to_cpu(logio->io_parameter[0])); 1205 le32_to_cpu(logio->io_parameter[0]));
1204 1206
1205 data[0] = MBS_COMMAND_COMPLETE; 1207 data[0] = MBS_COMMAND_COMPLETE;
@@ -1238,9 +1240,8 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1238 } 1240 }
1239 1241
1240 ql_dbg(ql_dbg_async, vha, 0x5037, 1242 ql_dbg(ql_dbg_async, vha, 0x5037,
1241 "Async-%s failed - portid=%02x%02x%02x comp=%x " 1243 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
1242 "iop0=%x iop1=%x.\n", 1244 "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1243 type, fcport->d_id.b.domain,
1244 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1245 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1245 le16_to_cpu(logio->comp_status), 1246 le16_to_cpu(logio->comp_status),
1246 le32_to_cpu(logio->io_parameter[0]), 1247 le32_to_cpu(logio->io_parameter[0]),
@@ -1274,25 +1275,25 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1274 1275
1275 if (sts->entry_status) { 1276 if (sts->entry_status) {
1276 ql_log(ql_log_warn, vha, 0x5038, 1277 ql_log(ql_log_warn, vha, 0x5038,
1277 "Async-%s error - entry-status(%x).\n", 1278 "Async-%s error - hdl=%x entry-status(%x).\n",
1278 type, sts->entry_status); 1279 type, sp->handle, sts->entry_status);
1279 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1280 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1280 ql_log(ql_log_warn, vha, 0x5039, 1281 ql_log(ql_log_warn, vha, 0x5039,
1281 "Async-%s error - completion status(%x).\n", 1282 "Async-%s error - hdl=%x completion status(%x).\n",
1282 type, sts->comp_status); 1283 type, sp->handle, sts->comp_status);
1283 } else if (!(le16_to_cpu(sts->scsi_status) & 1284 } else if (!(le16_to_cpu(sts->scsi_status) &
1284 SS_RESPONSE_INFO_LEN_VALID)) { 1285 SS_RESPONSE_INFO_LEN_VALID)) {
1285 ql_log(ql_log_warn, vha, 0x503a, 1286 ql_log(ql_log_warn, vha, 0x503a,
1286 "Async-%s error - no response info(%x).\n", 1287 "Async-%s error - hdl=%x no response info(%x).\n",
1287 type, sts->scsi_status); 1288 type, sp->handle, sts->scsi_status);
1288 } else if (le32_to_cpu(sts->rsp_data_len) < 4) { 1289 } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
1289 ql_log(ql_log_warn, vha, 0x503b, 1290 ql_log(ql_log_warn, vha, 0x503b,
1290 "Async-%s error - not enough response(%d).\n", 1291 "Async-%s error - hdl=%x not enough response(%d).\n",
1291 type, sts->rsp_data_len); 1292 type, sp->handle, sts->rsp_data_len);
1292 } else if (sts->data[3]) { 1293 } else if (sts->data[3]) {
1293 ql_log(ql_log_warn, vha, 0x503c, 1294 ql_log(ql_log_warn, vha, 0x503c,
1294 "Async-%s error - response(%x).\n", 1295 "Async-%s error - hdl=%x response(%x).\n",
1295 type, sts->data[3]); 1296 type, sp->handle, sts->data[3]);
1296 } else { 1297 } else {
1297 error = 0; 1298 error = 0;
1298 } 1299 }
@@ -1337,9 +1338,6 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
1337 } 1338 }
1338 1339
1339 if (pkt->entry_status != 0) { 1340 if (pkt->entry_status != 0) {
1340 ql_log(ql_log_warn, vha, 0x5035,
1341 "Process error entry.\n");
1342
1343 qla2x00_error_entry(vha, rsp, pkt); 1341 qla2x00_error_entry(vha, rsp, pkt);
1344 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1342 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1345 wmb(); 1343 wmb();
@@ -1391,7 +1389,6 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
1391} 1389}
1392 1390
1393static inline void 1391static inline void
1394
1395qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 1392qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1396 uint32_t sense_len, struct rsp_que *rsp) 1393 uint32_t sense_len, struct rsp_que *rsp)
1397{ 1394{
@@ -1413,13 +1410,14 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1413 if (sp->request_sense_length != 0) 1410 if (sp->request_sense_length != 0)
1414 rsp->status_srb = sp; 1411 rsp->status_srb = sp;
1415 1412
1416 ql_dbg(ql_dbg_io, vha, 0x301c, 1413 if (sense_len) {
1417 "Check condition Sense data, scsi(%ld:%d:%d:%d) cmd=%p.\n", 1414 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
1418 sp->fcport->vha->host_no, cp->device->channel, cp->device->id, 1415 "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
1419 cp->device->lun, cp); 1416 sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
1420 if (sense_len) 1417 cp);
1421 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, 1418 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
1422 cp->sense_buffer, sense_len); 1419 cp->sense_buffer, sense_len);
1420 }
1423} 1421}
1424 1422
1425struct scsi_dif_tuple { 1423struct scsi_dif_tuple {
@@ -1506,7 +1504,7 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1506 } 1504 }
1507 1505
1508 if (k != blocks_done) { 1506 if (k != blocks_done) {
1509 qla_printk(KERN_WARNING, sp->fcport->vha->hw, 1507 ql_log(ql_log_warn, vha, 0x302f,
1510 "unexpected tag values tag:lba=%x:%llx)\n", 1508 "unexpected tag values tag:lba=%x:%llx)\n",
1511 e_ref_tag, (unsigned long long)lba_s); 1509 e_ref_tag, (unsigned long long)lba_s);
1512 return 1; 1510 return 1;
@@ -1611,7 +1609,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1611 sp = NULL; 1609 sp = NULL;
1612 1610
1613 if (sp == NULL) { 1611 if (sp == NULL) {
1614 ql_log(ql_log_warn, vha, 0x3017, 1612 ql_dbg(ql_dbg_io, vha, 0x3017,
1615 "Invalid status handle (0x%x).\n", sts->handle); 1613 "Invalid status handle (0x%x).\n", sts->handle);
1616 1614
1617 if (IS_QLA82XX(ha)) 1615 if (IS_QLA82XX(ha))
@@ -1623,7 +1621,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1623 } 1621 }
1624 cp = sp->cmd; 1622 cp = sp->cmd;
1625 if (cp == NULL) { 1623 if (cp == NULL) {
1626 ql_log(ql_log_warn, vha, 0x3018, 1624 ql_dbg(ql_dbg_io, vha, 0x3018,
1627 "Command already returned (0x%x/%p).\n", 1625 "Command already returned (0x%x/%p).\n",
1628 sts->handle, sp); 1626 sts->handle, sp);
1629 1627
@@ -1670,7 +1668,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1670 par_sense_len -= rsp_info_len; 1668 par_sense_len -= rsp_info_len;
1671 } 1669 }
1672 if (rsp_info_len > 3 && rsp_info[3]) { 1670 if (rsp_info_len > 3 && rsp_info[3]) {
1673 ql_log(ql_log_warn, vha, 0x3019, 1671 ql_dbg(ql_dbg_io, vha, 0x3019,
1674 "FCP I/O protocol failure (0x%x/0x%x).\n", 1672 "FCP I/O protocol failure (0x%x/0x%x).\n",
1675 rsp_info_len, rsp_info[3]); 1673 rsp_info_len, rsp_info[3]);
1676 1674
@@ -1701,7 +1699,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1701 if (!lscsi_status && 1699 if (!lscsi_status &&
1702 ((unsigned)(scsi_bufflen(cp) - resid) < 1700 ((unsigned)(scsi_bufflen(cp) - resid) <
1703 cp->underflow)) { 1701 cp->underflow)) {
1704 ql_log(ql_log_warn, vha, 0x301a, 1702 ql_dbg(ql_dbg_io, vha, 0x301a,
1705 "Mid-layer underflow " 1703 "Mid-layer underflow "
1706 "detected (0x%x of 0x%x bytes).\n", 1704 "detected (0x%x of 0x%x bytes).\n",
1707 resid, scsi_bufflen(cp)); 1705 resid, scsi_bufflen(cp));
@@ -1713,7 +1711,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1713 cp->result = DID_OK << 16 | lscsi_status; 1711 cp->result = DID_OK << 16 | lscsi_status;
1714 1712
1715 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1713 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1716 ql_log(ql_log_warn, vha, 0x301b, 1714 ql_dbg(ql_dbg_io, vha, 0x301b,
1717 "QUEUE FULL detected.\n"); 1715 "QUEUE FULL detected.\n");
1718 break; 1716 break;
1719 } 1717 }
@@ -1735,7 +1733,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1735 scsi_set_resid(cp, resid); 1733 scsi_set_resid(cp, resid);
1736 if (scsi_status & SS_RESIDUAL_UNDER) { 1734 if (scsi_status & SS_RESIDUAL_UNDER) {
1737 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { 1735 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
1738 ql_log(ql_log_warn, vha, 0x301d, 1736 ql_dbg(ql_dbg_io, vha, 0x301d,
1739 "Dropped frame(s) detected " 1737 "Dropped frame(s) detected "
1740 "(0x%x of 0x%x bytes).\n", 1738 "(0x%x of 0x%x bytes).\n",
1741 resid, scsi_bufflen(cp)); 1739 resid, scsi_bufflen(cp));
@@ -1747,7 +1745,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1747 if (!lscsi_status && 1745 if (!lscsi_status &&
1748 ((unsigned)(scsi_bufflen(cp) - resid) < 1746 ((unsigned)(scsi_bufflen(cp) - resid) <
1749 cp->underflow)) { 1747 cp->underflow)) {
1750 ql_log(ql_log_warn, vha, 0x301e, 1748 ql_dbg(ql_dbg_io, vha, 0x301e,
1751 "Mid-layer underflow " 1749 "Mid-layer underflow "
1752 "detected (0x%x of 0x%x bytes).\n", 1750 "detected (0x%x of 0x%x bytes).\n",
1753 resid, scsi_bufflen(cp)); 1751 resid, scsi_bufflen(cp));
@@ -1756,7 +1754,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1756 break; 1754 break;
1757 } 1755 }
1758 } else { 1756 } else {
1759 ql_log(ql_log_warn, vha, 0x301f, 1757 ql_dbg(ql_dbg_io, vha, 0x301f,
1760 "Dropped frame(s) detected (0x%x " 1758 "Dropped frame(s) detected (0x%x "
1761 "of 0x%x bytes).\n", resid, scsi_bufflen(cp)); 1759 "of 0x%x bytes).\n", resid, scsi_bufflen(cp));
1762 1760
@@ -1774,7 +1772,7 @@ check_scsi_status:
1774 */ 1772 */
1775 if (lscsi_status != 0) { 1773 if (lscsi_status != 0) {
1776 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1774 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1777 ql_log(ql_log_warn, vha, 0x3020, 1775 ql_dbg(ql_dbg_io, vha, 0x3020,
1778 "QUEUE FULL detected.\n"); 1776 "QUEUE FULL detected.\n");
1779 logit = 1; 1777 logit = 1;
1780 break; 1778 break;
@@ -1838,10 +1836,15 @@ out:
1838 if (logit) 1836 if (logit)
1839 ql_dbg(ql_dbg_io, vha, 0x3022, 1837 ql_dbg(ql_dbg_io, vha, 0x3022,
1840 "FCP command status: 0x%x-0x%x (0x%x) " 1838 "FCP command status: 0x%x-0x%x (0x%x) "
1841 "oxid=0x%x cdb=%02x%02x%02x len=0x%x " 1839 "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
1840 "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
1842 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n", 1841 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
1843 comp_status, scsi_status, cp->result, ox_id, cp->cmnd[0], 1842 comp_status, scsi_status, cp->result, vha->host_no,
1844 cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len, 1843 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
1844 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
1845 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
1846 cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7],
1847 cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len,
1845 resid_len, fw_resid_len); 1848 resid_len, fw_resid_len);
1846 1849
1847 if (rsp->status_srb == NULL) 1850 if (rsp->status_srb == NULL)
@@ -1899,6 +1902,45 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1899 } 1902 }
1900} 1903}
1901 1904
1905static int
1906qla2x00_free_sp_ctx(scsi_qla_host_t *vha, srb_t *sp)
1907{
1908 struct qla_hw_data *ha = vha->hw;
1909 struct srb_ctx *ctx;
1910
1911 if (!sp->ctx)
1912 return 1;
1913
1914 ctx = sp->ctx;
1915
1916 if (ctx->type == SRB_LOGIN_CMD ||
1917 ctx->type == SRB_LOGOUT_CMD ||
1918 ctx->type == SRB_TM_CMD) {
1919 ctx->u.iocb_cmd->done(sp);
1920 return 0;
1921 } else if (ctx->type == SRB_ADISC_CMD) {
1922 ctx->u.iocb_cmd->free(sp);
1923 return 0;
1924 } else {
1925 struct fc_bsg_job *bsg_job;
1926
1927 bsg_job = ctx->u.bsg_job;
1928 if (ctx->type == SRB_ELS_CMD_HST ||
1929 ctx->type == SRB_CT_CMD)
1930 kfree(sp->fcport);
1931
1932 bsg_job->reply->reply_data.ctels_reply.status =
1933 FC_CTELS_STATUS_OK;
1934 bsg_job->reply->result = DID_ERROR << 16;
1935 bsg_job->reply->reply_payload_rcv_len = 0;
1936 kfree(sp->ctx);
1937 mempool_free(sp, ha->srb_mempool);
1938 bsg_job->job_done(bsg_job);
1939 return 0;
1940 }
1941 return 1;
1942}
1943
1902/** 1944/**
1903 * qla2x00_error_entry() - Process an error entry. 1945 * qla2x00_error_entry() - Process an error entry.
1904 * @ha: SCSI driver HA context 1946 * @ha: SCSI driver HA context
@@ -1909,7 +1951,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1909{ 1951{
1910 srb_t *sp; 1952 srb_t *sp;
1911 struct qla_hw_data *ha = vha->hw; 1953 struct qla_hw_data *ha = vha->hw;
1912 uint32_t handle = LSW(pkt->handle); 1954 const char func[] = "ERROR-IOCB";
1913 uint16_t que = MSW(pkt->handle); 1955 uint16_t que = MSW(pkt->handle);
1914 struct req_que *req = ha->req_q_map[que]; 1956 struct req_que *req = ha->req_q_map[que];
1915 1957
@@ -1932,28 +1974,20 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1932 ql_dbg(ql_dbg_async, vha, 0x502f, 1974 ql_dbg(ql_dbg_async, vha, 0x502f,
1933 "UNKNOWN flag error.\n"); 1975 "UNKNOWN flag error.\n");
1934 1976
1935 /* Validate handle. */ 1977 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1936 if (handle < MAX_OUTSTANDING_COMMANDS)
1937 sp = req->outstanding_cmds[handle];
1938 else
1939 sp = NULL;
1940
1941 if (sp) { 1978 if (sp) {
1942 /* Free outstanding command slot. */ 1979 if (qla2x00_free_sp_ctx(vha, sp)) {
1943 req->outstanding_cmds[handle] = NULL; 1980 if (pkt->entry_status &
1944 1981 (RF_INV_E_ORDER | RF_INV_E_COUNT |
1945 /* Bad payload or header */ 1982 RF_INV_E_PARAM | RF_INV_E_TYPE)) {
1946 if (pkt->entry_status & 1983 sp->cmd->result = DID_ERROR << 16;
1947 (RF_INV_E_ORDER | RF_INV_E_COUNT | 1984 } else if (pkt->entry_status & RF_BUSY) {
1948 RF_INV_E_PARAM | RF_INV_E_TYPE)) { 1985 sp->cmd->result = DID_BUS_BUSY << 16;
1949 sp->cmd->result = DID_ERROR << 16; 1986 } else {
1950 } else if (pkt->entry_status & RF_BUSY) { 1987 sp->cmd->result = DID_ERROR << 16;
1951 sp->cmd->result = DID_BUS_BUSY << 16; 1988 }
1952 } else { 1989 qla2x00_sp_compl(ha, sp);
1953 sp->cmd->result = DID_ERROR << 16;
1954 } 1990 }
1955 qla2x00_sp_compl(ha, sp);
1956
1957 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type == 1991 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1958 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7 1992 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7
1959 || pkt->entry_type == COMMAND_TYPE_6) { 1993 || pkt->entry_type == COMMAND_TYPE_6) {
@@ -1977,26 +2011,30 @@ static void
1977qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 2011qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1978{ 2012{
1979 uint16_t cnt; 2013 uint16_t cnt;
2014 uint32_t mboxes;
1980 uint16_t __iomem *wptr; 2015 uint16_t __iomem *wptr;
1981 struct qla_hw_data *ha = vha->hw; 2016 struct qla_hw_data *ha = vha->hw;
1982 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2017 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1983 2018
2019 /* Read all mbox registers? */
2020 mboxes = (1 << ha->mbx_count) - 1;
2021 if (!ha->mcp)
2022 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERRROR.\n");
2023 else
2024 mboxes = ha->mcp->in_mb;
2025
1984 /* Load return mailbox registers. */ 2026 /* Load return mailbox registers. */
1985 ha->flags.mbox_int = 1; 2027 ha->flags.mbox_int = 1;
1986 ha->mailbox_out[0] = mb0; 2028 ha->mailbox_out[0] = mb0;
2029 mboxes >>= 1;
1987 wptr = (uint16_t __iomem *)&reg->mailbox1; 2030 wptr = (uint16_t __iomem *)&reg->mailbox1;
1988 2031
1989 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 2032 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
1990 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 2033 if (mboxes & BIT_0)
1991 wptr++; 2034 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
1992 }
1993 2035
1994 if (ha->mcp) { 2036 mboxes >>= 1;
1995 ql_dbg(ql_dbg_async, vha, 0x504d, 2037 wptr++;
1996 "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]);
1997 } else {
1998 ql_dbg(ql_dbg_async, vha, 0x504e,
1999 "MBX pointer ERROR.\n");
2000 } 2038 }
2001} 2039}
2002 2040
@@ -2025,9 +2063,6 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2025 } 2063 }
2026 2064
2027 if (pkt->entry_status != 0) { 2065 if (pkt->entry_status != 0) {
2028 ql_dbg(ql_dbg_async, vha, 0x5029,
2029 "Process error entry.\n");
2030
2031 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); 2066 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
2032 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2067 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2033 wmb(); 2068 wmb();
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 82a33533ed26..34344d3f8658 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -2887,7 +2887,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
2887 if (vp_idx == 0 && (MSB(stat) != 1)) 2887 if (vp_idx == 0 && (MSB(stat) != 1))
2888 goto reg_needed; 2888 goto reg_needed;
2889 2889
2890 if (MSB(stat) == 1) { 2890 if (MSB(stat) != 0) {
2891 ql_dbg(ql_dbg_mbx, vha, 0x10ba, 2891 ql_dbg(ql_dbg_mbx, vha, 0x10ba,
2892 "Could not acquire ID for VP[%d].\n", vp_idx); 2892 "Could not acquire ID for VP[%d].\n", vp_idx);
2893 return; 2893 return;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 1873940a7ccb..1cd46cd7ff90 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -369,7 +369,7 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
369 ql_dbg(ql_dbg_p3p, vha, 0xb000, 369 ql_dbg(ql_dbg_p3p, vha, 0xb000,
370 "%s: Written crbwin (0x%x) " 370 "%s: Written crbwin (0x%x) "
371 "!= Read crbwin (0x%x), off=0x%lx.\n", 371 "!= Read crbwin (0x%x), off=0x%lx.\n",
372 ha->crb_win, win_read, *off); 372 __func__, ha->crb_win, win_read, *off);
373 } 373 }
374 *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase; 374 *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
375} 375}
@@ -409,7 +409,7 @@ qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
409 } 409 }
410 /* strange address given */ 410 /* strange address given */
411 ql_dbg(ql_dbg_p3p, vha, 0xb001, 411 ql_dbg(ql_dbg_p3p, vha, 0xb001,
412 "%x: Warning: unm_nic_pci_set_crbwindow " 412 "%s: Warning: unm_nic_pci_set_crbwindow "
413 "called with an unknown address(%llx).\n", 413 "called with an unknown address(%llx).\n",
414 QLA2XXX_DRIVER_NAME, off); 414 QLA2XXX_DRIVER_NAME, off);
415 return off; 415 return off;
@@ -1711,12 +1711,12 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
1711 ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006, 1711 ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006,
1712 "nx_pci_base=%p iobase=%p " 1712 "nx_pci_base=%p iobase=%p "
1713 "max_req_queues=%d msix_count=%d.\n", 1713 "max_req_queues=%d msix_count=%d.\n",
1714 ha->nx_pcibase, ha->iobase, 1714 (void *)ha->nx_pcibase, ha->iobase,
1715 ha->max_req_queues, ha->msix_count); 1715 ha->max_req_queues, ha->msix_count);
1716 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010, 1716 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010,
1717 "nx_pci_base=%p iobase=%p " 1717 "nx_pci_base=%p iobase=%p "
1718 "max_req_queues=%d msix_count=%d.\n", 1718 "max_req_queues=%d msix_count=%d.\n",
1719 ha->nx_pcibase, ha->iobase, 1719 (void *)ha->nx_pcibase, ha->iobase,
1720 ha->max_req_queues, ha->msix_count); 1720 ha->max_req_queues, ha->msix_count);
1721 return 0; 1721 return 0;
1722 1722
@@ -1744,7 +1744,7 @@ qla82xx_pci_config(scsi_qla_host_t *vha)
1744 ret = pci_set_mwi(ha->pdev); 1744 ret = pci_set_mwi(ha->pdev);
1745 ha->chip_revision = ha->pdev->revision; 1745 ha->chip_revision = ha->pdev->revision;
1746 ql_dbg(ql_dbg_init, vha, 0x0043, 1746 ql_dbg(ql_dbg_init, vha, 0x0043,
1747 "Chip revision:%ld.\n", 1747 "Chip revision:%d.\n",
1748 ha->chip_revision); 1748 ha->chip_revision);
1749 return 0; 1749 return 0;
1750} 1750}
@@ -2023,13 +2023,9 @@ qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
2023 wptr++; 2023 wptr++;
2024 } 2024 }
2025 2025
2026 if (ha->mcp) { 2026 if (!ha->mcp)
2027 ql_dbg(ql_dbg_async, vha, 0x5052,
2028 "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]);
2029 } else {
2030 ql_dbg(ql_dbg_async, vha, 0x5053, 2027 ql_dbg(ql_dbg_async, vha, 0x5053,
2031 "MBX pointer ERROR.\n"); 2028 "MBX pointer ERROR.\n");
2032 }
2033} 2029}
2034 2030
2035/* 2031/*
@@ -2543,484 +2539,6 @@ qla82xx_start_firmware(scsi_qla_host_t *vha)
2543 return qla82xx_check_rcvpeg_state(ha); 2539 return qla82xx_check_rcvpeg_state(ha);
2544} 2540}
2545 2541
2546static inline int
2547qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
2548 uint16_t tot_dsds)
2549{
2550 uint32_t *cur_dsd = NULL;
2551 scsi_qla_host_t *vha;
2552 struct qla_hw_data *ha;
2553 struct scsi_cmnd *cmd;
2554 struct scatterlist *cur_seg;
2555 uint32_t *dsd_seg;
2556 void *next_dsd;
2557 uint8_t avail_dsds;
2558 uint8_t first_iocb = 1;
2559 uint32_t dsd_list_len;
2560 struct dsd_dma *dsd_ptr;
2561 struct ct6_dsd *ctx;
2562
2563 cmd = sp->cmd;
2564
2565 /* Update entry type to indicate Command Type 3 IOCB */
2566 *((uint32_t *)(&cmd_pkt->entry_type)) =
2567 __constant_cpu_to_le32(COMMAND_TYPE_6);
2568
2569 /* No data transfer */
2570 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
2571 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
2572 return 0;
2573 }
2574
2575 vha = sp->fcport->vha;
2576 ha = vha->hw;
2577
2578 /* Set transfer direction */
2579 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
2580 cmd_pkt->control_flags =
2581 __constant_cpu_to_le16(CF_WRITE_DATA);
2582 ha->qla_stats.output_bytes += scsi_bufflen(cmd);
2583 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
2584 cmd_pkt->control_flags =
2585 __constant_cpu_to_le16(CF_READ_DATA);
2586 ha->qla_stats.input_bytes += scsi_bufflen(cmd);
2587 }
2588
2589 cur_seg = scsi_sglist(cmd);
2590 ctx = sp->ctx;
2591
2592 while (tot_dsds) {
2593 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
2594 QLA_DSDS_PER_IOCB : tot_dsds;
2595 tot_dsds -= avail_dsds;
2596 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
2597
2598 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
2599 struct dsd_dma, list);
2600 next_dsd = dsd_ptr->dsd_addr;
2601 list_del(&dsd_ptr->list);
2602 ha->gbl_dsd_avail--;
2603 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
2604 ctx->dsd_use_cnt++;
2605 ha->gbl_dsd_inuse++;
2606
2607 if (first_iocb) {
2608 first_iocb = 0;
2609 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2610 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
2611 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
2612 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
2613 } else {
2614 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
2615 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
2616 *cur_dsd++ = cpu_to_le32(dsd_list_len);
2617 }
2618 cur_dsd = (uint32_t *)next_dsd;
2619 while (avail_dsds) {
2620 dma_addr_t sle_dma;
2621
2622 sle_dma = sg_dma_address(cur_seg);
2623 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2624 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2625 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
2626 cur_seg = sg_next(cur_seg);
2627 avail_dsds--;
2628 }
2629 }
2630
2631 /* Null termination */
2632 *cur_dsd++ = 0;
2633 *cur_dsd++ = 0;
2634 *cur_dsd++ = 0;
2635 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
2636 return 0;
2637}
2638
2639/*
2640 * qla82xx_calc_dsd_lists() - Determine number of DSD list required
2641 * for Command Type 6.
2642 *
2643 * @dsds: number of data segment decriptors needed
2644 *
2645 * Returns the number of dsd list needed to store @dsds.
2646 */
2647inline uint16_t
2648qla82xx_calc_dsd_lists(uint16_t dsds)
2649{
2650 uint16_t dsd_lists = 0;
2651
2652 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
2653 if (dsds % QLA_DSDS_PER_IOCB)
2654 dsd_lists++;
2655 return dsd_lists;
2656}
2657
2658/*
2659 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2660 * @sp: command to send to the ISP
2661 *
2662 * Returns non-zero if a failure occurred, else zero.
2663 */
2664int
2665qla82xx_start_scsi(srb_t *sp)
2666{
2667 int ret, nseg;
2668 unsigned long flags;
2669 struct scsi_cmnd *cmd;
2670 uint32_t *clr_ptr;
2671 uint32_t index;
2672 uint32_t handle;
2673 uint16_t cnt;
2674 uint16_t req_cnt;
2675 uint16_t tot_dsds;
2676 struct device_reg_82xx __iomem *reg;
2677 uint32_t dbval;
2678 uint32_t *fcp_dl;
2679 uint8_t additional_cdb_len;
2680 struct ct6_dsd *ctx;
2681 struct scsi_qla_host *vha = sp->fcport->vha;
2682 struct qla_hw_data *ha = vha->hw;
2683 struct req_que *req = NULL;
2684 struct rsp_que *rsp = NULL;
2685 char tag[2];
2686
2687 /* Setup device pointers. */
2688 ret = 0;
2689 reg = &ha->iobase->isp82;
2690 cmd = sp->cmd;
2691 req = vha->req;
2692 rsp = ha->rsp_q_map[0];
2693
2694 /* So we know we haven't pci_map'ed anything yet */
2695 tot_dsds = 0;
2696
2697 dbval = 0x04 | (ha->portnum << 5);
2698
2699 /* Send marker if required */
2700 if (vha->marker_needed != 0) {
2701 if (qla2x00_marker(vha, req,
2702 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2703 ql_log(ql_log_warn, vha, 0x300c,
2704 "qla2x00_marker failed for cmd=%p.\n", cmd);
2705 return QLA_FUNCTION_FAILED;
2706 }
2707 vha->marker_needed = 0;
2708 }
2709
2710 /* Acquire ring specific lock */
2711 spin_lock_irqsave(&ha->hardware_lock, flags);
2712
2713 /* Check for room in outstanding command list. */
2714 handle = req->current_outstanding_cmd;
2715 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
2716 handle++;
2717 if (handle == MAX_OUTSTANDING_COMMANDS)
2718 handle = 1;
2719 if (!req->outstanding_cmds[handle])
2720 break;
2721 }
2722 if (index == MAX_OUTSTANDING_COMMANDS)
2723 goto queuing_error;
2724
2725 /* Map the sg table so we have an accurate count of sg entries needed */
2726 if (scsi_sg_count(cmd)) {
2727 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2728 scsi_sg_count(cmd), cmd->sc_data_direction);
2729 if (unlikely(!nseg))
2730 goto queuing_error;
2731 } else
2732 nseg = 0;
2733
2734 tot_dsds = nseg;
2735
2736 if (tot_dsds > ql2xshiftctondsd) {
2737 struct cmd_type_6 *cmd_pkt;
2738 uint16_t more_dsd_lists = 0;
2739 struct dsd_dma *dsd_ptr;
2740 uint16_t i;
2741
2742 more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds);
2743 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2744 ql_dbg(ql_dbg_io, vha, 0x300d,
2745 "Num of DSD list %d is than %d for cmd=%p.\n",
2746 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2747 cmd);
2748 goto queuing_error;
2749 }
2750
2751 if (more_dsd_lists <= ha->gbl_dsd_avail)
2752 goto sufficient_dsds;
2753 else
2754 more_dsd_lists -= ha->gbl_dsd_avail;
2755
2756 for (i = 0; i < more_dsd_lists; i++) {
2757 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2758 if (!dsd_ptr) {
2759 ql_log(ql_log_fatal, vha, 0x300e,
2760 "Failed to allocate memory for dsd_dma "
2761 "for cmd=%p.\n", cmd);
2762 goto queuing_error;
2763 }
2764
2765 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2766 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2767 if (!dsd_ptr->dsd_addr) {
2768 kfree(dsd_ptr);
2769 ql_log(ql_log_fatal, vha, 0x300f,
2770 "Failed to allocate memory for dsd_addr "
2771 "for cmd=%p.\n", cmd);
2772 goto queuing_error;
2773 }
2774 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2775 ha->gbl_dsd_avail++;
2776 }
2777
2778sufficient_dsds:
2779 req_cnt = 1;
2780
2781 if (req->cnt < (req_cnt + 2)) {
2782 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2783 &reg->req_q_out[0]);
2784 if (req->ring_index < cnt)
2785 req->cnt = cnt - req->ring_index;
2786 else
2787 req->cnt = req->length -
2788 (req->ring_index - cnt);
2789 }
2790
2791 if (req->cnt < (req_cnt + 2))
2792 goto queuing_error;
2793
2794 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2795 if (!sp->ctx) {
2796 ql_log(ql_log_fatal, vha, 0x3010,
2797 "Failed to allocate ctx for cmd=%p.\n", cmd);
2798 goto queuing_error;
2799 }
2800 memset(ctx, 0, sizeof(struct ct6_dsd));
2801 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2802 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2803 if (!ctx->fcp_cmnd) {
2804 ql_log(ql_log_fatal, vha, 0x3011,
2805 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2806 goto queuing_error_fcp_cmnd;
2807 }
2808
2809 /* Initialize the DSD list and dma handle */
2810 INIT_LIST_HEAD(&ctx->dsd_list);
2811 ctx->dsd_use_cnt = 0;
2812
2813 if (cmd->cmd_len > 16) {
2814 additional_cdb_len = cmd->cmd_len - 16;
2815 if ((cmd->cmd_len % 4) != 0) {
2816 /* SCSI command bigger than 16 bytes must be
2817 * multiple of 4
2818 */
2819 ql_log(ql_log_warn, vha, 0x3012,
2820 "scsi cmd len %d not multiple of 4 "
2821 "for cmd=%p.\n", cmd->cmd_len, cmd);
2822 goto queuing_error_fcp_cmnd;
2823 }
2824 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2825 } else {
2826 additional_cdb_len = 0;
2827 ctx->fcp_cmnd_len = 12 + 16 + 4;
2828 }
2829
2830 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2831 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2832
2833 /* Zero out remaining portion of packet. */
2834 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2835 clr_ptr = (uint32_t *)cmd_pkt + 2;
2836 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2837 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2838
2839 /* Set NPORT-ID and LUN number*/
2840 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2841 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2842 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2843 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2844 cmd_pkt->vp_index = sp->fcport->vp_idx;
2845
2846 /* Build IOCB segments */
2847 if (qla2xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2848 goto queuing_error_fcp_cmnd;
2849
2850 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2851 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2852
2853 /* build FCP_CMND IU */
2854 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2855 int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
2856 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2857
2858 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2859 ctx->fcp_cmnd->additional_cdb_len |= 1;
2860 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2861 ctx->fcp_cmnd->additional_cdb_len |= 2;
2862
2863 /*
2864 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2865 */
2866 if (scsi_populate_tag_msg(cmd, tag)) {
2867 switch (tag[0]) {
2868 case HEAD_OF_QUEUE_TAG:
2869 ctx->fcp_cmnd->task_attribute =
2870 TSK_HEAD_OF_QUEUE;
2871 break;
2872 case ORDERED_QUEUE_TAG:
2873 ctx->fcp_cmnd->task_attribute =
2874 TSK_ORDERED;
2875 break;
2876 }
2877 }
2878
2879 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2880
2881 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2882 additional_cdb_len);
2883 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2884
2885 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2886 cmd_pkt->fcp_cmnd_dseg_address[0] =
2887 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2888 cmd_pkt->fcp_cmnd_dseg_address[1] =
2889 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2890
2891 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2892 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2893 /* Set total data segment count. */
2894 cmd_pkt->entry_count = (uint8_t)req_cnt;
2895 /* Specify response queue number where
2896 * completion should happen
2897 */
2898 cmd_pkt->entry_status = (uint8_t) rsp->id;
2899 } else {
2900 struct cmd_type_7 *cmd_pkt;
2901 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2902 if (req->cnt < (req_cnt + 2)) {
2903 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2904 &reg->req_q_out[0]);
2905 if (req->ring_index < cnt)
2906 req->cnt = cnt - req->ring_index;
2907 else
2908 req->cnt = req->length -
2909 (req->ring_index - cnt);
2910 }
2911 if (req->cnt < (req_cnt + 2))
2912 goto queuing_error;
2913
2914 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2915 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2916
2917 /* Zero out remaining portion of packet. */
2918 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2919 clr_ptr = (uint32_t *)cmd_pkt + 2;
2920 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2921 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2922
2923 /* Set NPORT-ID and LUN number*/
2924 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2925 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2926 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2927 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2928 cmd_pkt->vp_index = sp->fcport->vp_idx;
2929
2930 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2931 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2932 sizeof(cmd_pkt->lun));
2933
2934 /*
2935 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2936 */
2937 if (scsi_populate_tag_msg(cmd, tag)) {
2938 switch (tag[0]) {
2939 case HEAD_OF_QUEUE_TAG:
2940 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2941 break;
2942 case ORDERED_QUEUE_TAG:
2943 cmd_pkt->task = TSK_ORDERED;
2944 break;
2945 }
2946 }
2947
2948 /* Load SCSI command packet. */
2949 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2950 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2951
2952 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2953
2954 /* Build IOCB segments */
2955 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2956
2957 /* Set total data segment count. */
2958 cmd_pkt->entry_count = (uint8_t)req_cnt;
2959 /* Specify response queue number where
2960 * completion should happen.
2961 */
2962 cmd_pkt->entry_status = (uint8_t) rsp->id;
2963
2964 }
2965 /* Build command packet. */
2966 req->current_outstanding_cmd = handle;
2967 req->outstanding_cmds[handle] = sp;
2968 sp->handle = handle;
2969 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2970 req->cnt -= req_cnt;
2971 wmb();
2972
2973 /* Adjust ring index. */
2974 req->ring_index++;
2975 if (req->ring_index == req->length) {
2976 req->ring_index = 0;
2977 req->ring_ptr = req->ring;
2978 } else
2979 req->ring_ptr++;
2980
2981 sp->flags |= SRB_DMA_VALID;
2982
2983 /* Set chip new ring index. */
2984 /* write, read and verify logic */
2985 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2986 if (ql2xdbwr)
2987 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2988 else {
2989 WRT_REG_DWORD(
2990 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2991 dbval);
2992 wmb();
2993 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
2994 WRT_REG_DWORD(
2995 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2996 dbval);
2997 wmb();
2998 }
2999 }
3000
3001 /* Manage unprocessed RIO/ZIO commands in response queue. */
3002 if (vha->flags.process_response_queue &&
3003 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3004 qla24xx_process_response_queue(vha, rsp);
3005
3006 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3007 return QLA_SUCCESS;
3008
3009queuing_error_fcp_cmnd:
3010 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3011queuing_error:
3012 if (tot_dsds)
3013 scsi_dma_unmap(cmd);
3014
3015 if (sp->ctx) {
3016 mempool_free(sp->ctx, ha->ctx_mempool);
3017 sp->ctx = NULL;
3018 }
3019 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3020
3021 return QLA_FUNCTION_FAILED;
3022}
3023
3024static uint32_t * 2542static uint32_t *
3025qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, 2543qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
3026 uint32_t length) 2544 uint32_t length)
@@ -3272,9 +2790,9 @@ qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
3272} 2790}
3273 2791
3274void 2792void
3275qla82xx_start_iocbs(srb_t *sp) 2793qla82xx_start_iocbs(scsi_qla_host_t *vha)
3276{ 2794{
3277 struct qla_hw_data *ha = sp->fcport->vha->hw; 2795 struct qla_hw_data *ha = vha->hw;
3278 struct req_que *req = ha->req_q_map[0]; 2796 struct req_que *req = ha->req_q_map[0];
3279 struct device_reg_82xx __iomem *reg; 2797 struct device_reg_82xx __iomem *reg;
3280 uint32_t dbval; 2798 uint32_t dbval;
@@ -3659,11 +3177,10 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
3659 qla82xx_md_free(vha); 3177 qla82xx_md_free(vha);
3660 /* ALlocate MiniDump resources */ 3178 /* ALlocate MiniDump resources */
3661 qla82xx_md_prep(vha); 3179 qla82xx_md_prep(vha);
3662 } else 3180 }
3663 ql_log(ql_log_info, vha, 0xb02e, 3181 } else
3664 "Firmware dump available to retrieve\n", 3182 ql_log(ql_log_info, vha, 0xb02e,
3665 vha->host_no); 3183 "Firmware dump available to retrieve\n");
3666 }
3667 } 3184 }
3668 return rval; 3185 return rval;
3669} 3186}
@@ -3758,7 +3275,6 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3758 3275
3759 switch (dev_state) { 3276 switch (dev_state) {
3760 case QLA82XX_DEV_READY: 3277 case QLA82XX_DEV_READY:
3761 qla82xx_check_md_needed(vha);
3762 ha->flags.isp82xx_reset_owner = 0; 3278 ha->flags.isp82xx_reset_owner = 0;
3763 goto exit; 3279 goto exit;
3764 case QLA82XX_DEV_COLD: 3280 case QLA82XX_DEV_COLD:
@@ -4067,7 +3583,7 @@ int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
4067 } 3583 }
4068 } 3584 }
4069 ql_dbg(ql_dbg_p3p, vha, 0xb027, 3585 ql_dbg(ql_dbg_p3p, vha, 0xb027,
4070 "%s status=%d.\n", status); 3586 "%s: status=%d.\n", __func__, status);
4071 3587
4072 return status; 3588 return status;
4073} 3589}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index f9e5b85e84d8..4ed1e4a96b95 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -83,6 +83,9 @@ MODULE_PARM_DESC(ql2xextended_error_logging,
83 "\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n" 83 "\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n"
84 "\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n" 84 "\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n"
85 "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n" 85 "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n"
86 "\t\t0x1e400000 - Preferred value for capturing essential "
87 "debug information (equivalent to old "
88 "ql2xextended_error_logging=1).\n"
86 "\t\tDo LOGICAL OR of the value to enable more than one level"); 89 "\t\tDo LOGICAL OR of the value to enable more than one level");
87 90
88int ql2xshiftctondsd = 6; 91int ql2xshiftctondsd = 6;
@@ -199,7 +202,7 @@ int ql2xmdcapmask = 0x1F;
199module_param(ql2xmdcapmask, int, S_IRUGO); 202module_param(ql2xmdcapmask, int, S_IRUGO);
200MODULE_PARM_DESC(ql2xmdcapmask, 203MODULE_PARM_DESC(ql2xmdcapmask,
201 "Set the Minidump driver capture mask level. " 204 "Set the Minidump driver capture mask level. "
202 "Default is 0x7F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F."); 205 "Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
203 206
204int ql2xmdenable = 1; 207int ql2xmdenable = 1;
205module_param(ql2xmdenable, int, S_IRUGO); 208module_param(ql2xmdenable, int, S_IRUGO);
@@ -847,14 +850,10 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
847 int wait = 0; 850 int wait = 0;
848 struct qla_hw_data *ha = vha->hw; 851 struct qla_hw_data *ha = vha->hw;
849 852
850 ql_dbg(ql_dbg_taskm, vha, 0x8000,
851 "Entered %s for cmd=%p.\n", __func__, cmd);
852 if (!CMD_SP(cmd)) 853 if (!CMD_SP(cmd))
853 return SUCCESS; 854 return SUCCESS;
854 855
855 ret = fc_block_scsi_eh(cmd); 856 ret = fc_block_scsi_eh(cmd);
856 ql_dbg(ql_dbg_taskm, vha, 0x8001,
857 "Return value of fc_block_scsi_eh=%d.\n", ret);
858 if (ret != 0) 857 if (ret != 0)
859 return ret; 858 return ret;
860 ret = SUCCESS; 859 ret = SUCCESS;
@@ -870,7 +869,8 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
870 } 869 }
871 870
872 ql_dbg(ql_dbg_taskm, vha, 0x8002, 871 ql_dbg(ql_dbg_taskm, vha, 0x8002,
873 "Aborting sp=%p cmd=%p from RISC ", sp, cmd); 872 "Aborting from RISC nexus=%ld:%d:%d sp=%p cmd=%p\n",
873 vha->host_no, id, lun, sp, cmd);
874 874
875 /* Get a reference to the sp and drop the lock.*/ 875 /* Get a reference to the sp and drop the lock.*/
876 sp_get(sp); 876 sp_get(sp);
@@ -878,10 +878,10 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
878 spin_unlock_irqrestore(&ha->hardware_lock, flags); 878 spin_unlock_irqrestore(&ha->hardware_lock, flags);
879 if (ha->isp_ops->abort_command(sp)) { 879 if (ha->isp_ops->abort_command(sp)) {
880 ql_dbg(ql_dbg_taskm, vha, 0x8003, 880 ql_dbg(ql_dbg_taskm, vha, 0x8003,
881 "Abort command mbx failed for cmd=%p.\n", cmd); 881 "Abort command mbx failed cmd=%p.\n", cmd);
882 } else { 882 } else {
883 ql_dbg(ql_dbg_taskm, vha, 0x8004, 883 ql_dbg(ql_dbg_taskm, vha, 0x8004,
884 "Abort command mbx success.\n"); 884 "Abort command mbx success cmd=%p.\n", cmd);
885 wait = 1; 885 wait = 1;
886 } 886 }
887 887
@@ -897,13 +897,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
897 if (wait) { 897 if (wait) {
898 if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) { 898 if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
899 ql_log(ql_log_warn, vha, 0x8006, 899 ql_log(ql_log_warn, vha, 0x8006,
900 "Abort handler timed out for cmd=%p.\n", cmd); 900 "Abort handler timed out cmd=%p.\n", cmd);
901 ret = FAILED; 901 ret = FAILED;
902 } 902 }
903 } 903 }
904 904
905 ql_log(ql_log_info, vha, 0x801c, 905 ql_log(ql_log_info, vha, 0x801c,
906 "Abort command issued -- %d %x.\n", wait, ret); 906 "Abort command issued nexus=%ld:%d:%d -- %d %x.\n",
907 vha->host_no, id, lun, wait, ret);
907 908
908 return ret; 909 return ret;
909} 910}
@@ -972,19 +973,15 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
972 int err; 973 int err;
973 974
974 if (!fcport) { 975 if (!fcport) {
975 ql_log(ql_log_warn, vha, 0x8007,
976 "fcport is NULL.\n");
977 return FAILED; 976 return FAILED;
978 } 977 }
979 978
980 err = fc_block_scsi_eh(cmd); 979 err = fc_block_scsi_eh(cmd);
981 ql_dbg(ql_dbg_taskm, vha, 0x8008,
982 "fc_block_scsi_eh ret=%d.\n", err);
983 if (err != 0) 980 if (err != 0)
984 return err; 981 return err;
985 982
986 ql_log(ql_log_info, vha, 0x8009, 983 ql_log(ql_log_info, vha, 0x8009,
987 "%s RESET ISSUED for id %d lun %d cmd=%p.\n", name, 984 "%s RESET ISSUED nexus=%ld:%d:%d cmd=%p.\n", name, vha->host_no,
988 cmd->device->id, cmd->device->lun, cmd); 985 cmd->device->id, cmd->device->lun, cmd);
989 986
990 err = 0; 987 err = 0;
@@ -1009,15 +1006,16 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
1009 } 1006 }
1010 1007
1011 ql_log(ql_log_info, vha, 0x800e, 1008 ql_log(ql_log_info, vha, 0x800e,
1012 "%s RESET SUCCEEDED for id %d lun %d cmd=%p.\n", name, 1009 "%s RESET SUCCEEDED nexus:%ld:%d:%d cmd=%p.\n", name,
1013 cmd->device->id, cmd->device->lun, cmd); 1010 vha->host_no, cmd->device->id, cmd->device->lun, cmd);
1014 1011
1015 return SUCCESS; 1012 return SUCCESS;
1016 1013
1017eh_reset_failed: 1014eh_reset_failed:
1018 ql_log(ql_log_info, vha, 0x800f, 1015 ql_log(ql_log_info, vha, 0x800f,
1019 "%s RESET FAILED: %s for id %d lun %d cmd=%p.\n", name, 1016 "%s RESET FAILED: %s nexus=%ld:%d:%d cmd=%p.\n", name,
1020 reset_errors[err], cmd->device->id, cmd->device->lun); 1017 reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun,
1018 cmd);
1021 return FAILED; 1019 return FAILED;
1022} 1020}
1023 1021
@@ -1068,20 +1066,16 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1068 lun = cmd->device->lun; 1066 lun = cmd->device->lun;
1069 1067
1070 if (!fcport) { 1068 if (!fcport) {
1071 ql_log(ql_log_warn, vha, 0x8010,
1072 "fcport is NULL.\n");
1073 return ret; 1069 return ret;
1074 } 1070 }
1075 1071
1076 ret = fc_block_scsi_eh(cmd); 1072 ret = fc_block_scsi_eh(cmd);
1077 ql_dbg(ql_dbg_taskm, vha, 0x8011,
1078 "fc_block_scsi_eh ret=%d.\n", ret);
1079 if (ret != 0) 1073 if (ret != 0)
1080 return ret; 1074 return ret;
1081 ret = FAILED; 1075 ret = FAILED;
1082 1076
1083 ql_log(ql_log_info, vha, 0x8012, 1077 ql_log(ql_log_info, vha, 0x8012,
1084 "BUS RESET ISSUED for id %d lun %d.\n", id, lun); 1078 "BUS RESET ISSUED nexus=%ld:%d%d.\n", vha->host_no, id, lun);
1085 1079
1086 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1080 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1087 ql_log(ql_log_fatal, vha, 0x8013, 1081 ql_log(ql_log_fatal, vha, 0x8013,
@@ -1105,7 +1099,8 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1105 1099
1106eh_bus_reset_done: 1100eh_bus_reset_done:
1107 ql_log(ql_log_warn, vha, 0x802b, 1101 ql_log(ql_log_warn, vha, 0x802b,
1108 "BUS RESET %s.\n", (ret == FAILED) ? "FAILED" : "SUCCEDED"); 1102 "BUS RESET %s nexus=%ld:%d:%d.\n",
1103 (ret == FAILED) ? "FAILED" : "SUCCEDED", vha->host_no, id, lun);
1109 1104
1110 return ret; 1105 return ret;
1111} 1106}
@@ -1139,20 +1134,16 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1139 lun = cmd->device->lun; 1134 lun = cmd->device->lun;
1140 1135
1141 if (!fcport) { 1136 if (!fcport) {
1142 ql_log(ql_log_warn, vha, 0x8016,
1143 "fcport is NULL.\n");
1144 return ret; 1137 return ret;
1145 } 1138 }
1146 1139
1147 ret = fc_block_scsi_eh(cmd); 1140 ret = fc_block_scsi_eh(cmd);
1148 ql_dbg(ql_dbg_taskm, vha, 0x8017,
1149 "fc_block_scsi_eh ret=%d.\n", ret);
1150 if (ret != 0) 1141 if (ret != 0)
1151 return ret; 1142 return ret;
1152 ret = FAILED; 1143 ret = FAILED;
1153 1144
1154 ql_log(ql_log_info, vha, 0x8018, 1145 ql_log(ql_log_info, vha, 0x8018,
1155 "ADAPTER RESET ISSUED for id %d lun %d.\n", id, lun); 1146 "ADAPTER RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun);
1156 1147
1157 if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS) 1148 if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
1158 goto eh_host_reset_lock; 1149 goto eh_host_reset_lock;
@@ -1193,8 +1184,9 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1193 ret = SUCCESS; 1184 ret = SUCCESS;
1194 1185
1195eh_host_reset_lock: 1186eh_host_reset_lock:
1196 qla_printk(KERN_INFO, ha, "%s: reset %s.\n", __func__, 1187 ql_log(ql_log_info, vha, 0x8017,
1197 (ret == FAILED) ? "failed" : "succeeded"); 1188 "ADAPTER RESET %s nexus=%ld:%d:%d.\n",
1189 (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
1198 1190
1199 return ret; 1191 return ret;
1200} 1192}
@@ -1344,10 +1336,8 @@ static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
1344 return; 1336 return;
1345 1337
1346 ql_dbg(ql_dbg_io, fcport->vha, 0x3029, 1338 ql_dbg(ql_dbg_io, fcport->vha, 0x3029,
1347 "Queue depth adjusted-down " 1339 "Queue depth adjusted-down to %d for nexus=%ld:%d:%d.\n",
1348 "to %d for scsi(%ld:%d:%d:%d).\n", 1340 sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun);
1349 sdev->queue_depth, fcport->vha->host_no,
1350 sdev->channel, sdev->id, sdev->lun);
1351} 1341}
1352 1342
1353static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth) 1343static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
@@ -1369,10 +1359,8 @@ static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
1369 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth); 1359 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth);
1370 1360
1371 ql_dbg(ql_dbg_io, vha, 0x302a, 1361 ql_dbg(ql_dbg_io, vha, 0x302a,
1372 "Queue depth adjusted-up to %d for " 1362 "Queue depth adjusted-up to %d for nexus=%ld:%d:%d.\n",
1373 "scsi(%ld:%d:%d:%d).\n", 1363 sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun);
1374 sdev->queue_depth, fcport->vha->host_no,
1375 sdev->channel, sdev->id, sdev->lun);
1376} 1364}
1377 1365
1378static int 1366static int
@@ -1496,6 +1484,118 @@ qla24xx_disable_intrs(struct qla_hw_data *ha)
1496 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1484 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1497} 1485}
1498 1486
1487static int
1488qla2x00_iospace_config(struct qla_hw_data *ha)
1489{
1490 resource_size_t pio;
1491 uint16_t msix;
1492 int cpus;
1493
1494 if (IS_QLA82XX(ha))
1495 return qla82xx_iospace_config(ha);
1496
1497 if (pci_request_selected_regions(ha->pdev, ha->bars,
1498 QLA2XXX_DRIVER_NAME)) {
1499 ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
1500 "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
1501 pci_name(ha->pdev));
1502 goto iospace_error_exit;
1503 }
1504 if (!(ha->bars & 1))
1505 goto skip_pio;
1506
1507 /* We only need PIO for Flash operations on ISP2312 v2 chips. */
1508 pio = pci_resource_start(ha->pdev, 0);
1509 if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
1510 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
1511 ql_log_pci(ql_log_warn, ha->pdev, 0x0012,
1512 "Invalid pci I/O region size (%s).\n",
1513 pci_name(ha->pdev));
1514 pio = 0;
1515 }
1516 } else {
1517 ql_log_pci(ql_log_warn, ha->pdev, 0x0013,
1518 "Region #0 no a PIO resource (%s).\n",
1519 pci_name(ha->pdev));
1520 pio = 0;
1521 }
1522 ha->pio_address = pio;
1523 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014,
1524 "PIO address=%llu.\n",
1525 (unsigned long long)ha->pio_address);
1526
1527skip_pio:
1528 /* Use MMIO operations for all accesses. */
1529 if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
1530 ql_log_pci(ql_log_fatal, ha->pdev, 0x0015,
1531 "Region #1 not an MMIO resource (%s), aborting.\n",
1532 pci_name(ha->pdev));
1533 goto iospace_error_exit;
1534 }
1535 if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
1536 ql_log_pci(ql_log_fatal, ha->pdev, 0x0016,
1537 "Invalid PCI mem region size (%s), aborting.\n",
1538 pci_name(ha->pdev));
1539 goto iospace_error_exit;
1540 }
1541
1542 ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
1543 if (!ha->iobase) {
1544 ql_log_pci(ql_log_fatal, ha->pdev, 0x0017,
1545 "Cannot remap MMIO (%s), aborting.\n",
1546 pci_name(ha->pdev));
1547 goto iospace_error_exit;
1548 }
1549
1550 /* Determine queue resources */
1551 ha->max_req_queues = ha->max_rsp_queues = 1;
1552 if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) ||
1553 (ql2xmaxqueues > 1 && ql2xmultique_tag) ||
1554 (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
1555 goto mqiobase_exit;
1556
1557 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
1558 pci_resource_len(ha->pdev, 3));
1559 if (ha->mqiobase) {
1560 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018,
1561 "MQIO Base=%p.\n", ha->mqiobase);
1562 /* Read MSIX vector size of the board */
1563 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
1564 ha->msix_count = msix;
1565 /* Max queues are bounded by available msix vectors */
1566 /* queue 0 uses two msix vectors */
1567 if (ql2xmultique_tag) {
1568 cpus = num_online_cpus();
1569 ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
1570 (cpus + 1) : (ha->msix_count - 1);
1571 ha->max_req_queues = 2;
1572 } else if (ql2xmaxqueues > 1) {
1573 ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
1574 QLA_MQ_SIZE : ql2xmaxqueues;
1575 ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008,
1576 "QoS mode set, max no of request queues:%d.\n",
1577 ha->max_req_queues);
1578 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019,
1579 "QoS mode set, max no of request queues:%d.\n",
1580 ha->max_req_queues);
1581 }
1582 ql_log_pci(ql_log_info, ha->pdev, 0x001a,
1583 "MSI-X vector count: %d.\n", msix);
1584 } else
1585 ql_log_pci(ql_log_info, ha->pdev, 0x001b,
1586 "BAR 3 not enabled.\n");
1587
1588mqiobase_exit:
1589 ha->msix_count = ha->max_rsp_queues + 1;
1590 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
1591 "MSIX Count:%d.\n", ha->msix_count);
1592 return (0);
1593
1594iospace_error_exit:
1595 return (-ENOMEM);
1596}
1597
1598
1499static struct isp_operations qla2100_isp_ops = { 1599static struct isp_operations qla2100_isp_ops = {
1500 .pci_config = qla2100_pci_config, 1600 .pci_config = qla2100_pci_config,
1501 .reset_chip = qla2x00_reset_chip, 1601 .reset_chip = qla2x00_reset_chip,
@@ -1530,6 +1630,7 @@ static struct isp_operations qla2100_isp_ops = {
1530 .get_flash_version = qla2x00_get_flash_version, 1630 .get_flash_version = qla2x00_get_flash_version,
1531 .start_scsi = qla2x00_start_scsi, 1631 .start_scsi = qla2x00_start_scsi,
1532 .abort_isp = qla2x00_abort_isp, 1632 .abort_isp = qla2x00_abort_isp,
1633 .iospace_config = qla2x00_iospace_config,
1533}; 1634};
1534 1635
1535static struct isp_operations qla2300_isp_ops = { 1636static struct isp_operations qla2300_isp_ops = {
@@ -1566,6 +1667,7 @@ static struct isp_operations qla2300_isp_ops = {
1566 .get_flash_version = qla2x00_get_flash_version, 1667 .get_flash_version = qla2x00_get_flash_version,
1567 .start_scsi = qla2x00_start_scsi, 1668 .start_scsi = qla2x00_start_scsi,
1568 .abort_isp = qla2x00_abort_isp, 1669 .abort_isp = qla2x00_abort_isp,
1670 .iospace_config = qla2x00_iospace_config,
1569}; 1671};
1570 1672
1571static struct isp_operations qla24xx_isp_ops = { 1673static struct isp_operations qla24xx_isp_ops = {
@@ -1602,6 +1704,7 @@ static struct isp_operations qla24xx_isp_ops = {
1602 .get_flash_version = qla24xx_get_flash_version, 1704 .get_flash_version = qla24xx_get_flash_version,
1603 .start_scsi = qla24xx_start_scsi, 1705 .start_scsi = qla24xx_start_scsi,
1604 .abort_isp = qla2x00_abort_isp, 1706 .abort_isp = qla2x00_abort_isp,
1707 .iospace_config = qla2x00_iospace_config,
1605}; 1708};
1606 1709
1607static struct isp_operations qla25xx_isp_ops = { 1710static struct isp_operations qla25xx_isp_ops = {
@@ -1638,6 +1741,7 @@ static struct isp_operations qla25xx_isp_ops = {
1638 .get_flash_version = qla24xx_get_flash_version, 1741 .get_flash_version = qla24xx_get_flash_version,
1639 .start_scsi = qla24xx_dif_start_scsi, 1742 .start_scsi = qla24xx_dif_start_scsi,
1640 .abort_isp = qla2x00_abort_isp, 1743 .abort_isp = qla2x00_abort_isp,
1744 .iospace_config = qla2x00_iospace_config,
1641}; 1745};
1642 1746
1643static struct isp_operations qla81xx_isp_ops = { 1747static struct isp_operations qla81xx_isp_ops = {
@@ -1674,6 +1778,7 @@ static struct isp_operations qla81xx_isp_ops = {
1674 .get_flash_version = qla24xx_get_flash_version, 1778 .get_flash_version = qla24xx_get_flash_version,
1675 .start_scsi = qla24xx_dif_start_scsi, 1779 .start_scsi = qla24xx_dif_start_scsi,
1676 .abort_isp = qla2x00_abort_isp, 1780 .abort_isp = qla2x00_abort_isp,
1781 .iospace_config = qla2x00_iospace_config,
1677}; 1782};
1678 1783
1679static struct isp_operations qla82xx_isp_ops = { 1784static struct isp_operations qla82xx_isp_ops = {
@@ -1710,6 +1815,7 @@ static struct isp_operations qla82xx_isp_ops = {
1710 .get_flash_version = qla24xx_get_flash_version, 1815 .get_flash_version = qla24xx_get_flash_version,
1711 .start_scsi = qla82xx_start_scsi, 1816 .start_scsi = qla82xx_start_scsi,
1712 .abort_isp = qla82xx_abort_isp, 1817 .abort_isp = qla82xx_abort_isp,
1818 .iospace_config = qla82xx_iospace_config,
1713}; 1819};
1714 1820
1715static inline void 1821static inline void
@@ -1819,121 +1925,10 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
1819 else 1925 else
1820 ha->flags.port0 = 0; 1926 ha->flags.port0 = 0;
1821 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b, 1927 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b,
1822 "device_type=0x%x port=%d fw_srisc_address=%p.\n", 1928 "device_type=0x%x port=%d fw_srisc_address=0x%x.\n",
1823 ha->device_type, ha->flags.port0, ha->fw_srisc_address); 1929 ha->device_type, ha->flags.port0, ha->fw_srisc_address);
1824} 1930}
1825 1931
1826static int
1827qla2x00_iospace_config(struct qla_hw_data *ha)
1828{
1829 resource_size_t pio;
1830 uint16_t msix;
1831 int cpus;
1832
1833 if (IS_QLA82XX(ha))
1834 return qla82xx_iospace_config(ha);
1835
1836 if (pci_request_selected_regions(ha->pdev, ha->bars,
1837 QLA2XXX_DRIVER_NAME)) {
1838 ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
1839 "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
1840 pci_name(ha->pdev));
1841 goto iospace_error_exit;
1842 }
1843 if (!(ha->bars & 1))
1844 goto skip_pio;
1845
1846 /* We only need PIO for Flash operations on ISP2312 v2 chips. */
1847 pio = pci_resource_start(ha->pdev, 0);
1848 if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
1849 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
1850 ql_log_pci(ql_log_warn, ha->pdev, 0x0012,
1851 "Invalid pci I/O region size (%s).\n",
1852 pci_name(ha->pdev));
1853 pio = 0;
1854 }
1855 } else {
1856 ql_log_pci(ql_log_warn, ha->pdev, 0x0013,
1857 "Region #0 no a PIO resource (%s).\n",
1858 pci_name(ha->pdev));
1859 pio = 0;
1860 }
1861 ha->pio_address = pio;
1862 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014,
1863 "PIO address=%p.\n",
1864 ha->pio_address);
1865
1866skip_pio:
1867 /* Use MMIO operations for all accesses. */
1868 if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
1869 ql_log_pci(ql_log_fatal, ha->pdev, 0x0015,
1870 "Region #1 not an MMIO resource (%s), aborting.\n",
1871 pci_name(ha->pdev));
1872 goto iospace_error_exit;
1873 }
1874 if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
1875 ql_log_pci(ql_log_fatal, ha->pdev, 0x0016,
1876 "Invalid PCI mem region size (%s), aborting.\n",
1877 pci_name(ha->pdev));
1878 goto iospace_error_exit;
1879 }
1880
1881 ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
1882 if (!ha->iobase) {
1883 ql_log_pci(ql_log_fatal, ha->pdev, 0x0017,
1884 "Cannot remap MMIO (%s), aborting.\n",
1885 pci_name(ha->pdev));
1886 goto iospace_error_exit;
1887 }
1888
1889 /* Determine queue resources */
1890 ha->max_req_queues = ha->max_rsp_queues = 1;
1891 if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) ||
1892 (ql2xmaxqueues > 1 && ql2xmultique_tag) ||
1893 (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
1894 goto mqiobase_exit;
1895
1896 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
1897 pci_resource_len(ha->pdev, 3));
1898 if (ha->mqiobase) {
1899 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018,
1900 "MQIO Base=%p.\n", ha->mqiobase);
1901 /* Read MSIX vector size of the board */
1902 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
1903 ha->msix_count = msix;
1904 /* Max queues are bounded by available msix vectors */
1905 /* queue 0 uses two msix vectors */
1906 if (ql2xmultique_tag) {
1907 cpus = num_online_cpus();
1908 ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
1909 (cpus + 1) : (ha->msix_count - 1);
1910 ha->max_req_queues = 2;
1911 } else if (ql2xmaxqueues > 1) {
1912 ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
1913 QLA_MQ_SIZE : ql2xmaxqueues;
1914 ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008,
1915 "QoS mode set, max no of request queues:%d.\n",
1916 ha->max_req_queues);
1917 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019,
1918 "QoS mode set, max no of request queues:%d.\n",
1919 ha->max_req_queues);
1920 }
1921 ql_log_pci(ql_log_info, ha->pdev, 0x001a,
1922 "MSI-X vector count: %d.\n", msix);
1923 } else
1924 ql_log_pci(ql_log_info, ha->pdev, 0x001b,
1925 "BAR 3 not enabled.\n");
1926
1927mqiobase_exit:
1928 ha->msix_count = ha->max_rsp_queues + 1;
1929 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
1930 "MSIX Count:%d.\n", ha->msix_count);
1931 return (0);
1932
1933iospace_error_exit:
1934 return (-ENOMEM);
1935}
1936
1937static void 1932static void
1938qla2xxx_scan_start(struct Scsi_Host *shost) 1933qla2xxx_scan_start(struct Scsi_Host *shost)
1939{ 1934{
@@ -2032,14 +2027,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2032 pdev->needs_freset = 1; 2027 pdev->needs_freset = 1;
2033 } 2028 }
2034 2029
2035 /* Configure PCI I/O space */
2036 ret = qla2x00_iospace_config(ha);
2037 if (ret)
2038 goto probe_hw_failed;
2039
2040 ql_log_pci(ql_log_info, pdev, 0x001d,
2041 "Found an ISP%04X irq %d iobase 0x%p.\n",
2042 pdev->device, pdev->irq, ha->iobase);
2043 ha->prev_topology = 0; 2030 ha->prev_topology = 0;
2044 ha->init_cb_size = sizeof(init_cb_t); 2031 ha->init_cb_size = sizeof(init_cb_t);
2045 ha->link_data_rate = PORT_SPEED_UNKNOWN; 2032 ha->link_data_rate = PORT_SPEED_UNKNOWN;
@@ -2152,6 +2139,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2152 "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n", 2139 "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n",
2153 ha->isp_ops, ha->flash_conf_off, ha->flash_data_off, 2140 ha->isp_ops, ha->flash_conf_off, ha->flash_data_off,
2154 ha->nvram_conf_off, ha->nvram_data_off); 2141 ha->nvram_conf_off, ha->nvram_data_off);
2142
2143 /* Configure PCI I/O space */
2144 ret = ha->isp_ops->iospace_config(ha);
2145 if (ret)
2146 goto probe_hw_failed;
2147
2148 ql_log_pci(ql_log_info, pdev, 0x001d,
2149 "Found an ISP%04X irq %d iobase 0x%p.\n",
2150 pdev->device, pdev->irq, ha->iobase);
2155 mutex_init(&ha->vport_lock); 2151 mutex_init(&ha->vport_lock);
2156 init_completion(&ha->mbx_cmd_comp); 2152 init_completion(&ha->mbx_cmd_comp);
2157 complete(&ha->mbx_cmd_comp); 2153 complete(&ha->mbx_cmd_comp);
@@ -2227,7 +2223,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2227 ql_dbg(ql_dbg_init, base_vha, 0x0033, 2223 ql_dbg(ql_dbg_init, base_vha, 0x0033,
2228 "max_id=%d this_id=%d " 2224 "max_id=%d this_id=%d "
2229 "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d " 2225 "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d "
2230 "max_lun=%d transportt=%p, vendor_id=%d.\n", host->max_id, 2226 "max_lun=%d transportt=%p, vendor_id=%llu.\n", host->max_id,
2231 host->this_id, host->cmd_per_lun, host->unique_id, 2227 host->this_id, host->cmd_per_lun, host->unique_id,
2232 host->max_cmd_len, host->max_channel, host->max_lun, 2228 host->max_cmd_len, host->max_channel, host->max_lun,
2233 host->transportt, sht->vendor_id); 2229 host->transportt, sht->vendor_id);
@@ -2382,9 +2378,6 @@ skip_dpc:
2382 2378
2383 qla2x00_dfs_setup(base_vha); 2379 qla2x00_dfs_setup(base_vha);
2384 2380
2385 ql_log(ql_log_info, base_vha, 0x00fa,
2386 "QLogic Fibre Channed HBA Driver: %s.\n",
2387 qla2x00_version_str);
2388 ql_log(ql_log_info, base_vha, 0x00fb, 2381 ql_log(ql_log_info, base_vha, 0x00fb,
2389 "QLogic %s - %s.\n", 2382 "QLogic %s - %s.\n",
2390 ha->model_number, ha->model_desc ? ha->model_desc : ""); 2383 ha->model_number, ha->model_desc ? ha->model_desc : "");
@@ -2833,7 +2826,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2833 if (!ha->sns_cmd) 2826 if (!ha->sns_cmd)
2834 goto fail_dma_pool; 2827 goto fail_dma_pool;
2835 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026, 2828 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026,
2836 "sns_cmd.\n", ha->sns_cmd); 2829 "sns_cmd: %p.\n", ha->sns_cmd);
2837 } else { 2830 } else {
2838 /* Get consistent memory allocated for MS IOCB */ 2831 /* Get consistent memory allocated for MS IOCB */
2839 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 2832 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
@@ -3460,27 +3453,21 @@ qla2x00_do_dpc(void *data)
3460 schedule(); 3453 schedule();
3461 __set_current_state(TASK_RUNNING); 3454 __set_current_state(TASK_RUNNING);
3462 3455
3463 ql_dbg(ql_dbg_dpc, base_vha, 0x4001, 3456 if (!base_vha->flags.init_done || ha->flags.mbox_busy)
3464 "DPC handler waking up.\n"); 3457 goto end_loop;
3465 ql_dbg(ql_dbg_dpc, base_vha, 0x4002,
3466 "dpc_flags=0x%lx.\n", base_vha->dpc_flags);
3467
3468 /* Initialization not yet finished. Don't do anything yet. */
3469 if (!base_vha->flags.init_done)
3470 continue;
3471 3458
3472 if (ha->flags.eeh_busy) { 3459 if (ha->flags.eeh_busy) {
3473 ql_dbg(ql_dbg_dpc, base_vha, 0x4003, 3460 ql_dbg(ql_dbg_dpc, base_vha, 0x4003,
3474 "eeh_busy=%d.\n", ha->flags.eeh_busy); 3461 "eeh_busy=%d.\n", ha->flags.eeh_busy);
3475 continue; 3462 goto end_loop;
3476 } 3463 }
3477 3464
3478 ha->dpc_active = 1; 3465 ha->dpc_active = 1;
3479 3466
3480 if (ha->flags.mbox_busy) { 3467 ql_dbg(ql_dbg_dpc, base_vha, 0x4001,
3481 ha->dpc_active = 0; 3468 "DPC handler waking up.\n");
3482 continue; 3469 ql_dbg(ql_dbg_dpc, base_vha, 0x4002,
3483 } 3470 "dpc_flags=0x%lx.\n", base_vha->dpc_flags);
3484 3471
3485 qla2x00_do_work(base_vha); 3472 qla2x00_do_work(base_vha);
3486 3473
@@ -3622,6 +3609,7 @@ qla2x00_do_dpc(void *data)
3622 qla2x00_do_dpc_all_vps(base_vha); 3609 qla2x00_do_dpc_all_vps(base_vha);
3623 3610
3624 ha->dpc_active = 0; 3611 ha->dpc_active = 0;
3612end_loop:
3625 set_current_state(TASK_INTERRUPTIBLE); 3613 set_current_state(TASK_INTERRUPTIBLE);
3626 } /* End of while(1) */ 3614 } /* End of while(1) */
3627 __set_current_state(TASK_RUNNING); 3615 __set_current_state(TASK_RUNNING);
@@ -3705,16 +3693,6 @@ qla2x00_sp_free_dma(srb_t *sp)
3705 sp->flags &= ~SRB_CRC_CTX_DMA_VALID; 3693 sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
3706 } 3694 }
3707 3695
3708 CMD_SP(cmd) = NULL;
3709}
3710
3711static void
3712qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp)
3713{
3714 struct scsi_cmnd *cmd = sp->cmd;
3715
3716 qla2x00_sp_free_dma(sp);
3717
3718 if (sp->flags & SRB_FCP_CMND_DMA_VALID) { 3696 if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
3719 struct ct6_dsd *ctx = sp->ctx; 3697 struct ct6_dsd *ctx = sp->ctx;
3720 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, 3698 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd,
@@ -3726,6 +3704,15 @@ qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp)
3726 sp->ctx = NULL; 3704 sp->ctx = NULL;
3727 } 3705 }
3728 3706
3707 CMD_SP(cmd) = NULL;
3708}
3709
3710static void
3711qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp)
3712{
3713 struct scsi_cmnd *cmd = sp->cmd;
3714
3715 qla2x00_sp_free_dma(sp);
3729 mempool_free(sp, ha->srb_mempool); 3716 mempool_free(sp, ha->srb_mempool);
3730 cmd->scsi_done(cmd); 3717 cmd->scsi_done(cmd);
3731} 3718}
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index eff13563c82d..16bc72844a97 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -904,8 +904,9 @@ no_flash_data:
904 } 904 }
905done: 905done:
906 ql_dbg(ql_dbg_init, vha, 0x004d, 906 ql_dbg(ql_dbg_init, vha, 0x004d,
907 "FDT[%x]: (0x%x/0x%x) erase=0x%x " 907 "FDT[%s]: (0x%x/0x%x) erase=0x%x "
908 "pr=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid, 908 "pr=%x wrtd=0x%x blk=0x%x.\n",
909 loc, mid, fid,
909 ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd, 910 ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd,
910 ha->fdt_wrt_disable, ha->fdt_block_size); 911 ha->fdt_wrt_disable, ha->fdt_block_size);
911 912
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c
index af62c3cf8752..8d58ae274829 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.c
+++ b/drivers/scsi/qla4xxx/ql4_dbg.c
@@ -20,12 +20,12 @@ void qla4xxx_dump_buffer(void *b, uint32_t size)
20 printk("------------------------------------------------------------" 20 printk("------------------------------------------------------------"
21 "--\n"); 21 "--\n");
22 for (cnt = 0; cnt < size; c++) { 22 for (cnt = 0; cnt < size; c++) {
23 printk(KERN_INFO "%02x", *c); 23 printk("%02x", *c);
24 if (!(++cnt % 16)) 24 if (!(++cnt % 16))
25 printk(KERN_INFO "\n"); 25 printk("\n");
26 26
27 else 27 else
28 printk(KERN_INFO " "); 28 printk(" ");
29 } 29 }
30 printk(KERN_INFO "\n"); 30 printk(KERN_INFO "\n");
31} 31}
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index fd5edc6e166d..22a3ff02e48a 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -177,6 +177,7 @@
177#define LOGIN_TOV 12 177#define LOGIN_TOV 12
178 178
179#define MAX_RESET_HA_RETRIES 2 179#define MAX_RESET_HA_RETRIES 2
180#define FW_ALIVE_WAIT_TOV 3
180 181
181#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr) 182#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
182 183
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 827e93078b94..95828862eea0 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -123,13 +123,13 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
123 123
124 srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle)); 124 srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
125 if (!srb) { 125 if (!srb) {
126 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Status Entry invalid " 126 ql4_printk(KERN_WARNING, ha, "%s invalid status entry: "
127 "handle 0x%x, sp=%p. This cmd may have already " 127 "handle=0x%0x, srb=%p\n", __func__,
128 "been completed.\n", ha->host_no, __func__, 128 sts_entry->handle, srb);
129 le32_to_cpu(sts_entry->handle), srb)); 129 if (is_qla8022(ha))
130 ql4_printk(KERN_WARNING, ha, "%s invalid status entry:" 130 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
131 " handle=0x%0x\n", __func__, sts_entry->handle); 131 else
132 set_bit(DPC_RESET_HA, &ha->dpc_flags); 132 set_bit(DPC_RESET_HA, &ha->dpc_flags);
133 return; 133 return;
134 } 134 }
135 135
@@ -563,7 +563,11 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
563 case MBOX_ASTS_DHCP_LEASE_EXPIRED: 563 case MBOX_ASTS_DHCP_LEASE_EXPIRED:
564 DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, " 564 DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, "
565 "Reset HA\n", ha->host_no, mbox_status)); 565 "Reset HA\n", ha->host_no, mbox_status));
566 set_bit(DPC_RESET_HA, &ha->dpc_flags); 566 if (is_qla8022(ha))
567 set_bit(DPC_RESET_HA_FW_CONTEXT,
568 &ha->dpc_flags);
569 else
570 set_bit(DPC_RESET_HA, &ha->dpc_flags);
567 break; 571 break;
568 572
569 case MBOX_ASTS_LINK_UP: 573 case MBOX_ASTS_LINK_UP:
@@ -617,9 +621,13 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
617 (mbox_sts[2] == ACB_STATE_ACQUIRING))) 621 (mbox_sts[2] == ACB_STATE_ACQUIRING)))
618 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags); 622 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
619 else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) && 623 else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) &&
620 (mbox_sts[2] == ACB_STATE_VALID)) 624 (mbox_sts[2] == ACB_STATE_VALID)) {
621 set_bit(DPC_RESET_HA, &ha->dpc_flags); 625 if (is_qla8022(ha))
622 else if ((mbox_sts[3] == ACB_STATE_UNCONFIGURED)) 626 set_bit(DPC_RESET_HA_FW_CONTEXT,
627 &ha->dpc_flags);
628 else
629 set_bit(DPC_RESET_HA, &ha->dpc_flags);
630 } else if ((mbox_sts[3] == ACB_STATE_UNCONFIGURED))
623 complete(&ha->disable_acb_comp); 631 complete(&ha->disable_acb_comp);
624 break; 632 break;
625 633
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index f484ff438199..8d6bc1b2ff17 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -1792,8 +1792,11 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
1792 int rval = QLA_SUCCESS; 1792 int rval = QLA_SUCCESS;
1793 unsigned long dev_init_timeout; 1793 unsigned long dev_init_timeout;
1794 1794
1795 if (!test_bit(AF_INIT_DONE, &ha->flags)) 1795 if (!test_bit(AF_INIT_DONE, &ha->flags)) {
1796 qla4_8xxx_idc_lock(ha);
1796 qla4_8xxx_set_drv_active(ha); 1797 qla4_8xxx_set_drv_active(ha);
1798 qla4_8xxx_idc_unlock(ha);
1799 }
1797 1800
1798 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 1801 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
1799 ql4_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state, 1802 ql4_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state,
@@ -1802,8 +1805,8 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
1802 /* wait for 30 seconds for device to go ready */ 1805 /* wait for 30 seconds for device to go ready */
1803 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ); 1806 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
1804 1807
1808 qla4_8xxx_idc_lock(ha);
1805 while (1) { 1809 while (1) {
1806 qla4_8xxx_idc_lock(ha);
1807 1810
1808 if (time_after_eq(jiffies, dev_init_timeout)) { 1811 if (time_after_eq(jiffies, dev_init_timeout)) {
1809 ql4_printk(KERN_WARNING, ha, "Device init failed!\n"); 1812 ql4_printk(KERN_WARNING, ha, "Device init failed!\n");
@@ -1819,15 +1822,14 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
1819 /* NOTE: Make sure idc unlocked upon exit of switch statement */ 1822 /* NOTE: Make sure idc unlocked upon exit of switch statement */
1820 switch (dev_state) { 1823 switch (dev_state) {
1821 case QLA82XX_DEV_READY: 1824 case QLA82XX_DEV_READY:
1822 qla4_8xxx_idc_unlock(ha);
1823 goto exit; 1825 goto exit;
1824 case QLA82XX_DEV_COLD: 1826 case QLA82XX_DEV_COLD:
1825 rval = qla4_8xxx_device_bootstrap(ha); 1827 rval = qla4_8xxx_device_bootstrap(ha);
1826 qla4_8xxx_idc_unlock(ha);
1827 goto exit; 1828 goto exit;
1828 case QLA82XX_DEV_INITIALIZING: 1829 case QLA82XX_DEV_INITIALIZING:
1829 qla4_8xxx_idc_unlock(ha); 1830 qla4_8xxx_idc_unlock(ha);
1830 msleep(1000); 1831 msleep(1000);
1832 qla4_8xxx_idc_lock(ha);
1831 break; 1833 break;
1832 case QLA82XX_DEV_NEED_RESET: 1834 case QLA82XX_DEV_NEED_RESET:
1833 if (!ql4xdontresethba) { 1835 if (!ql4xdontresethba) {
@@ -1836,32 +1838,37 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
1836 * reset handler */ 1838 * reset handler */
1837 dev_init_timeout = jiffies + 1839 dev_init_timeout = jiffies +
1838 (ha->nx_dev_init_timeout * HZ); 1840 (ha->nx_dev_init_timeout * HZ);
1841 } else {
1842 qla4_8xxx_idc_unlock(ha);
1843 msleep(1000);
1844 qla4_8xxx_idc_lock(ha);
1839 } 1845 }
1840 qla4_8xxx_idc_unlock(ha);
1841 break; 1846 break;
1842 case QLA82XX_DEV_NEED_QUIESCENT: 1847 case QLA82XX_DEV_NEED_QUIESCENT:
1843 qla4_8xxx_idc_unlock(ha);
1844 /* idc locked/unlocked in handler */ 1848 /* idc locked/unlocked in handler */
1845 qla4_8xxx_need_qsnt_handler(ha); 1849 qla4_8xxx_need_qsnt_handler(ha);
1846 qla4_8xxx_idc_lock(ha); 1850 break;
1847 /* fall thru needs idc_locked */
1848 case QLA82XX_DEV_QUIESCENT: 1851 case QLA82XX_DEV_QUIESCENT:
1849 qla4_8xxx_idc_unlock(ha); 1852 qla4_8xxx_idc_unlock(ha);
1850 msleep(1000); 1853 msleep(1000);
1854 qla4_8xxx_idc_lock(ha);
1851 break; 1855 break;
1852 case QLA82XX_DEV_FAILED: 1856 case QLA82XX_DEV_FAILED:
1853 qla4_8xxx_idc_unlock(ha); 1857 qla4_8xxx_idc_unlock(ha);
1854 qla4xxx_dead_adapter_cleanup(ha); 1858 qla4xxx_dead_adapter_cleanup(ha);
1855 rval = QLA_ERROR; 1859 rval = QLA_ERROR;
1860 qla4_8xxx_idc_lock(ha);
1856 goto exit; 1861 goto exit;
1857 default: 1862 default:
1858 qla4_8xxx_idc_unlock(ha); 1863 qla4_8xxx_idc_unlock(ha);
1859 qla4xxx_dead_adapter_cleanup(ha); 1864 qla4xxx_dead_adapter_cleanup(ha);
1860 rval = QLA_ERROR; 1865 rval = QLA_ERROR;
1866 qla4_8xxx_idc_lock(ha);
1861 goto exit; 1867 goto exit;
1862 } 1868 }
1863 } 1869 }
1864exit: 1870exit:
1871 qla4_8xxx_idc_unlock(ha);
1865 return rval; 1872 return rval;
1866} 1873}
1867 1874
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 78bf700b365f..ec393a00c038 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -935,7 +935,16 @@ qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
935 goto exit_init_fw_cb; 935 goto exit_init_fw_cb;
936 } 936 }
937 937
938 qla4xxx_disable_acb(ha); 938 rval = qla4xxx_disable_acb(ha);
939 if (rval != QLA_SUCCESS) {
940 ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n",
941 __func__);
942 rval = -EIO;
943 goto exit_init_fw_cb;
944 }
945
946 wait_for_completion_timeout(&ha->disable_acb_comp,
947 DISABLE_ACB_TOV * HZ);
939 948
940 qla4xxx_initcb_to_acb(init_fw_cb); 949 qla4xxx_initcb_to_acb(init_fw_cb);
941 950
@@ -1966,9 +1975,10 @@ mem_alloc_error_exit:
1966 * 1975 *
1967 * Context: Interrupt 1976 * Context: Interrupt
1968 **/ 1977 **/
1969static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha) 1978static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
1970{ 1979{
1971 uint32_t fw_heartbeat_counter, halt_status; 1980 uint32_t fw_heartbeat_counter;
1981 int status = QLA_SUCCESS;
1972 1982
1973 fw_heartbeat_counter = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); 1983 fw_heartbeat_counter = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
1974 /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */ 1984 /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
@@ -1976,7 +1986,7 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
1976 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen " 1986 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
1977 "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n", 1987 "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
1978 ha->host_no, __func__)); 1988 ha->host_no, __func__));
1979 return; 1989 return status;
1980 } 1990 }
1981 1991
1982 if (ha->fw_heartbeat_counter == fw_heartbeat_counter) { 1992 if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
@@ -1984,8 +1994,6 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
1984 /* FW not alive after 2 seconds */ 1994 /* FW not alive after 2 seconds */
1985 if (ha->seconds_since_last_heartbeat == 2) { 1995 if (ha->seconds_since_last_heartbeat == 2) {
1986 ha->seconds_since_last_heartbeat = 0; 1996 ha->seconds_since_last_heartbeat = 0;
1987 halt_status = qla4_8xxx_rd_32(ha,
1988 QLA82XX_PEG_HALT_STATUS1);
1989 1997
1990 ql4_printk(KERN_INFO, ha, 1998 ql4_printk(KERN_INFO, ha,
1991 "scsi(%ld): %s, Dumping hw/fw registers:\n " 1999 "scsi(%ld): %s, Dumping hw/fw registers:\n "
@@ -1993,7 +2001,9 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
1993 " 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:" 2001 " 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
1994 " 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:" 2002 " 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
1995 " 0x%x,\n PEG_NET_4_PC: 0x%x\n", 2003 " 0x%x,\n PEG_NET_4_PC: 0x%x\n",
1996 ha->host_no, __func__, halt_status, 2004 ha->host_no, __func__,
2005 qla4_8xxx_rd_32(ha,
2006 QLA82XX_PEG_HALT_STATUS1),
1997 qla4_8xxx_rd_32(ha, 2007 qla4_8xxx_rd_32(ha,
1998 QLA82XX_PEG_HALT_STATUS2), 2008 QLA82XX_PEG_HALT_STATUS2),
1999 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 + 2009 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
@@ -2006,24 +2016,13 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
2006 0x3c), 2016 0x3c),
2007 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 + 2017 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
2008 0x3c)); 2018 0x3c));
2009 2019 status = QLA_ERROR;
2010 /* Since we cannot change dev_state in interrupt
2011 * context, set appropriate DPC flag then wakeup
2012 * DPC */
2013 if (halt_status & HALT_STATUS_UNRECOVERABLE)
2014 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
2015 else {
2016 printk("scsi%ld: %s: detect abort needed!\n",
2017 ha->host_no, __func__);
2018 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2019 }
2020 qla4xxx_wake_dpc(ha);
2021 qla4xxx_mailbox_premature_completion(ha);
2022 } 2020 }
2023 } else 2021 } else
2024 ha->seconds_since_last_heartbeat = 0; 2022 ha->seconds_since_last_heartbeat = 0;
2025 2023
2026 ha->fw_heartbeat_counter = fw_heartbeat_counter; 2024 ha->fw_heartbeat_counter = fw_heartbeat_counter;
2025 return status;
2027} 2026}
2028 2027
2029/** 2028/**
@@ -2034,14 +2033,13 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
2034 **/ 2033 **/
2035void qla4_8xxx_watchdog(struct scsi_qla_host *ha) 2034void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
2036{ 2035{
2037 uint32_t dev_state; 2036 uint32_t dev_state, halt_status;
2038
2039 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
2040 2037
2041 /* don't poll if reset is going on */ 2038 /* don't poll if reset is going on */
2042 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || 2039 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
2043 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 2040 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2044 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) { 2041 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
2042 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
2045 if (dev_state == QLA82XX_DEV_NEED_RESET && 2043 if (dev_state == QLA82XX_DEV_NEED_RESET &&
2046 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 2044 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
2047 if (!ql4xdontresethba) { 2045 if (!ql4xdontresethba) {
@@ -2049,7 +2047,6 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
2049 "NEED RESET!\n", __func__); 2047 "NEED RESET!\n", __func__);
2050 set_bit(DPC_RESET_HA, &ha->dpc_flags); 2048 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2051 qla4xxx_wake_dpc(ha); 2049 qla4xxx_wake_dpc(ha);
2052 qla4xxx_mailbox_premature_completion(ha);
2053 } 2050 }
2054 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT && 2051 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
2055 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { 2052 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
@@ -2059,7 +2056,24 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
2059 qla4xxx_wake_dpc(ha); 2056 qla4xxx_wake_dpc(ha);
2060 } else { 2057 } else {
2061 /* Check firmware health */ 2058 /* Check firmware health */
2062 qla4_8xxx_check_fw_alive(ha); 2059 if (qla4_8xxx_check_fw_alive(ha)) {
2060 halt_status = qla4_8xxx_rd_32(ha,
2061 QLA82XX_PEG_HALT_STATUS1);
2062
2063 /* Since we cannot change dev_state in interrupt
2064 * context, set appropriate DPC flag then wakeup
2065 * DPC */
2066 if (halt_status & HALT_STATUS_UNRECOVERABLE)
2067 set_bit(DPC_HA_UNRECOVERABLE,
2068 &ha->dpc_flags);
2069 else {
2070 ql4_printk(KERN_INFO, ha, "%s: detect "
2071 "abort needed!\n", __func__);
2072 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2073 }
2074 qla4xxx_mailbox_premature_completion(ha);
2075 qla4xxx_wake_dpc(ha);
2076 }
2063 } 2077 }
2064 } 2078 }
2065} 2079}
@@ -2414,6 +2428,8 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
2414{ 2428{
2415 int status = QLA_ERROR; 2429 int status = QLA_ERROR;
2416 uint8_t reset_chip = 0; 2430 uint8_t reset_chip = 0;
2431 uint32_t dev_state;
2432 unsigned long wait;
2417 2433
2418 /* Stall incoming I/O until we are done */ 2434 /* Stall incoming I/O until we are done */
2419 scsi_block_requests(ha->host); 2435 scsi_block_requests(ha->host);
@@ -2464,8 +2480,29 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
2464 * or if stop_firmware fails for ISP-82xx. 2480 * or if stop_firmware fails for ISP-82xx.
2465 * This is the default case for ISP-4xxx */ 2481 * This is the default case for ISP-4xxx */
2466 if (!is_qla8022(ha) || reset_chip) { 2482 if (!is_qla8022(ha) || reset_chip) {
2483 if (!is_qla8022(ha))
2484 goto chip_reset;
2485
2486 /* Check if 82XX firmware is alive or not
2487 * We may have arrived here from NEED_RESET
2488 * detection only */
2489 if (test_bit(AF_FW_RECOVERY, &ha->flags))
2490 goto chip_reset;
2491
2492 wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ);
2493 while (time_before(jiffies, wait)) {
2494 if (qla4_8xxx_check_fw_alive(ha)) {
2495 qla4xxx_mailbox_premature_completion(ha);
2496 break;
2497 }
2498
2499 set_current_state(TASK_UNINTERRUPTIBLE);
2500 schedule_timeout(HZ);
2501 }
2502
2467 if (!test_bit(AF_FW_RECOVERY, &ha->flags)) 2503 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
2468 qla4xxx_cmd_wait(ha); 2504 qla4xxx_cmd_wait(ha);
2505chip_reset:
2469 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 2506 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2470 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 2507 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2471 DEBUG2(ql4_printk(KERN_INFO, ha, 2508 DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -2501,6 +2538,25 @@ recover_ha_init_adapter:
2501 * Since we don't want to block the DPC for too long 2538 * Since we don't want to block the DPC for too long
2502 * with multiple resets in the same thread, 2539 * with multiple resets in the same thread,
2503 * utilize DPC to retry */ 2540 * utilize DPC to retry */
2541 if (is_qla8022(ha)) {
2542 qla4_8xxx_idc_lock(ha);
2543 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
2544 qla4_8xxx_idc_unlock(ha);
2545 if (dev_state == QLA82XX_DEV_FAILED) {
2546 ql4_printk(KERN_INFO, ha, "%s: don't retry "
2547 "recover adapter. H/W is in Failed "
2548 "state\n", __func__);
2549 qla4xxx_dead_adapter_cleanup(ha);
2550 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
2551 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
2552 clear_bit(DPC_RESET_HA_FW_CONTEXT,
2553 &ha->dpc_flags);
2554 status = QLA_ERROR;
2555
2556 goto exit_recover;
2557 }
2558 }
2559
2504 if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) { 2560 if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
2505 ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES; 2561 ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
2506 DEBUG2(printk("scsi%ld: recover adapter - retrying " 2562 DEBUG2(printk("scsi%ld: recover adapter - retrying "
@@ -2539,6 +2595,7 @@ recover_ha_init_adapter:
2539 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 2595 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
2540 } 2596 }
2541 2597
2598exit_recover:
2542 ha->adapter_error_count++; 2599 ha->adapter_error_count++;
2543 2600
2544 if (test_bit(AF_ONLINE, &ha->flags)) 2601 if (test_bit(AF_ONLINE, &ha->flags))
@@ -2806,6 +2863,7 @@ dpc_post_reset_ha:
2806 **/ 2863 **/
2807static void qla4xxx_free_adapter(struct scsi_qla_host *ha) 2864static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
2808{ 2865{
2866 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
2809 2867
2810 if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) { 2868 if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) {
2811 /* Turn-off interrupts on the card. */ 2869 /* Turn-off interrupts on the card. */
@@ -4816,6 +4874,20 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
4816} 4874}
4817 4875
4818/** 4876/**
4877 * qla4xxx_is_eh_active - check if error handler is running
4878 * @shost: Pointer to SCSI Host struct
4879 *
4880 * This routine finds that if reset host is called in EH
4881 * scenario or from some application like sg_reset
4882 **/
4883static int qla4xxx_is_eh_active(struct Scsi_Host *shost)
4884{
4885 if (shost->shost_state == SHOST_RECOVERY)
4886 return 1;
4887 return 0;
4888}
4889
4890/**
4819 * qla4xxx_eh_host_reset - kernel callback 4891 * qla4xxx_eh_host_reset - kernel callback
4820 * @cmd: Pointer to Linux's SCSI command structure 4892 * @cmd: Pointer to Linux's SCSI command structure
4821 * 4893 *
@@ -4832,6 +4904,11 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
4832 if (ql4xdontresethba) { 4904 if (ql4xdontresethba) {
4833 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", 4905 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
4834 ha->host_no, __func__)); 4906 ha->host_no, __func__));
4907
4908 /* Clear outstanding srb in queues */
4909 if (qla4xxx_is_eh_active(cmd->device->host))
4910 qla4xxx_abort_active_cmds(ha, DID_ABORT << 16);
4911
4835 return FAILED; 4912 return FAILED;
4836 } 4913 }
4837 4914
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 5254e57968f5..26a3fa34a33c 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.02.00-k9" 8#define QLA4XXX_DRIVER_VERSION "5.02.00-k10"
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 2a588955423a..68eadd1c67fd 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -45,7 +45,6 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
45enum { 45enum {
46 SCSI_DEVINFO_GLOBAL = 0, 46 SCSI_DEVINFO_GLOBAL = 0,
47 SCSI_DEVINFO_SPI, 47 SCSI_DEVINFO_SPI,
48 SCSI_DEVINFO_DH,
49}; 48};
50 49
51extern int scsi_get_device_flags(struct scsi_device *sdev, 50extern int scsi_get_device_flags(struct scsi_device *sdev,
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index e8447fbc31f3..cfd491437239 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1030,6 +1030,7 @@ iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
1030 return NULL; 1030 return NULL;
1031 1031
1032 session->transport = transport; 1032 session->transport = transport;
1033 session->creator = -1;
1033 session->recovery_tmo = 120; 1034 session->recovery_tmo = 120;
1034 session->state = ISCSI_SESSION_FREE; 1035 session->state = ISCSI_SESSION_FREE;
1035 INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout); 1036 INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
@@ -1634,8 +1635,9 @@ EXPORT_SYMBOL_GPL(iscsi_session_event);
1634 1635
1635static int 1636static int
1636iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep, 1637iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
1637 struct iscsi_uevent *ev, uint32_t initial_cmdsn, 1638 struct iscsi_uevent *ev, pid_t pid,
1638 uint16_t cmds_max, uint16_t queue_depth) 1639 uint32_t initial_cmdsn, uint16_t cmds_max,
1640 uint16_t queue_depth)
1639{ 1641{
1640 struct iscsi_transport *transport = priv->iscsi_transport; 1642 struct iscsi_transport *transport = priv->iscsi_transport;
1641 struct iscsi_cls_session *session; 1643 struct iscsi_cls_session *session;
@@ -1646,6 +1648,7 @@ iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
1646 if (!session) 1648 if (!session)
1647 return -ENOMEM; 1649 return -ENOMEM;
1648 1650
1651 session->creator = pid;
1649 shost = iscsi_session_to_shost(session); 1652 shost = iscsi_session_to_shost(session);
1650 ev->r.c_session_ret.host_no = shost->host_no; 1653 ev->r.c_session_ret.host_no = shost->host_no;
1651 ev->r.c_session_ret.sid = session->sid; 1654 ev->r.c_session_ret.sid = session->sid;
@@ -1938,6 +1941,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
1938 switch (nlh->nlmsg_type) { 1941 switch (nlh->nlmsg_type) {
1939 case ISCSI_UEVENT_CREATE_SESSION: 1942 case ISCSI_UEVENT_CREATE_SESSION:
1940 err = iscsi_if_create_session(priv, ep, ev, 1943 err = iscsi_if_create_session(priv, ep, ev,
1944 NETLINK_CREDS(skb)->pid,
1941 ev->u.c_session.initial_cmdsn, 1945 ev->u.c_session.initial_cmdsn,
1942 ev->u.c_session.cmds_max, 1946 ev->u.c_session.cmds_max,
1943 ev->u.c_session.queue_depth); 1947 ev->u.c_session.queue_depth);
@@ -1950,6 +1954,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
1950 } 1954 }
1951 1955
1952 err = iscsi_if_create_session(priv, ep, ev, 1956 err = iscsi_if_create_session(priv, ep, ev,
1957 NETLINK_CREDS(skb)->pid,
1953 ev->u.c_bound_session.initial_cmdsn, 1958 ev->u.c_bound_session.initial_cmdsn,
1954 ev->u.c_bound_session.cmds_max, 1959 ev->u.c_bound_session.cmds_max,
1955 ev->u.c_bound_session.queue_depth); 1960 ev->u.c_bound_session.queue_depth);
@@ -2298,6 +2303,15 @@ show_priv_session_state(struct device *dev, struct device_attribute *attr,
2298} 2303}
2299static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state, 2304static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
2300 NULL); 2305 NULL);
2306static ssize_t
2307show_priv_session_creator(struct device *dev, struct device_attribute *attr,
2308 char *buf)
2309{
2310 struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
2311 return sprintf(buf, "%d\n", session->creator);
2312}
2313static ISCSI_CLASS_ATTR(priv_sess, creator, S_IRUGO, show_priv_session_creator,
2314 NULL);
2301 2315
2302#define iscsi_priv_session_attr_show(field, format) \ 2316#define iscsi_priv_session_attr_show(field, format) \
2303static ssize_t \ 2317static ssize_t \
@@ -2367,6 +2381,7 @@ static struct attribute *iscsi_session_attrs[] = {
2367 &dev_attr_sess_targetalias.attr, 2381 &dev_attr_sess_targetalias.attr,
2368 &dev_attr_priv_sess_recovery_tmo.attr, 2382 &dev_attr_priv_sess_recovery_tmo.attr,
2369 &dev_attr_priv_sess_state.attr, 2383 &dev_attr_priv_sess_state.attr,
2384 &dev_attr_priv_sess_creator.attr,
2370 NULL, 2385 NULL,
2371}; 2386};
2372 2387
@@ -2424,6 +2439,8 @@ static umode_t iscsi_session_attr_is_visible(struct kobject *kobj,
2424 return S_IRUGO | S_IWUSR; 2439 return S_IRUGO | S_IWUSR;
2425 else if (attr == &dev_attr_priv_sess_state.attr) 2440 else if (attr == &dev_attr_priv_sess_state.attr)
2426 return S_IRUGO; 2441 return S_IRUGO;
2442 else if (attr == &dev_attr_priv_sess_creator.attr)
2443 return S_IRUGO;
2427 else { 2444 else {
2428 WARN_ONCE(1, "Invalid session attr"); 2445 WARN_ONCE(1, "Invalid session attr");
2429 return 0; 2446 return 0;
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 5591ed54dc93..77273f2fdd80 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -185,7 +185,6 @@ typedef void (*activate_complete)(void *, int);
185struct scsi_device_handler { 185struct scsi_device_handler {
186 /* Used by the infrastructure */ 186 /* Used by the infrastructure */
187 struct list_head list; /* list of scsi_device_handlers */ 187 struct list_head list; /* list of scsi_device_handlers */
188 int idx;
189 188
190 /* Filled by the hardware handler */ 189 /* Filled by the hardware handler */
191 struct module *module; 190 struct module *module;
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 87f34c3d447d..2c3a46d102fd 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -211,6 +211,11 @@ struct iscsi_cls_session {
211 unsigned int target_id; 211 unsigned int target_id;
212 bool ida_used; 212 bool ida_used;
213 213
214 /*
215 * pid of userspace process that created session or -1 if
216 * created by the kernel.
217 */
218 pid_t creator;
214 int state; 219 int state;
215 int sid; /* session id */ 220 int sid; /* session id */
216 void *dd_data; /* LLD private data */ 221 void *dd_data; /* LLD private data */