aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c2
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c5
-rw-r--r--drivers/scsi/bfa/bfad_attr.c17
-rw-r--r--drivers/scsi/bfa/bfad_im.c2
-rw-r--r--drivers/scsi/bfa/bfad_im.h1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h13
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_els.c18
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c217
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c39
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c122
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c21
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_hsi.h16
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h59
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c38
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c40
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c21
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c3
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c5
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c12
-rw-r--r--drivers/scsi/fcoe/Makefile2
-rw-r--r--drivers/scsi/fcoe/fcoe.c200
-rw-r--r--drivers/scsi/fcoe/fcoe.h8
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c159
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c832
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c13
-rw-r--r--drivers/scsi/libsas/sas_ata.c12
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c2
-rw-r--r--drivers/scsi/qla2xxx/Kconfig9
-rw-r--r--drivers/scsi/qla2xxx/Makefile3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c81
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h78
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c199
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c90
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c94
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c615
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c66
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c173
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c4963
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h1003
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c1904
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h82
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c134
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h22
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h28
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h8
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c95
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c111
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c738
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.h192
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c78
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi.c4
-rw-r--r--drivers/scsi/scsi_lib.c11
-rw-r--r--drivers/scsi/scsi_netlink.c7
-rw-r--r--drivers/scsi/scsi_pm.c5
-rw-r--r--drivers/scsi/scsi_scan.c7
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c9
-rw-r--r--drivers/scsi/scsi_wait_scan.c7
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/ufs/ufshcd.c5
67 files changed, 12070 insertions, 722 deletions
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index 532d212b6b2c..393e7ce8e95a 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -201,7 +201,7 @@ static void asd_get_response_tasklet(struct asd_ascb *ascb,
201 201
202 if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) { 202 if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
203 resp->frame_len = le16_to_cpu(*(__le16 *)(r+6)); 203 resp->frame_len = le16_to_cpu(*(__le16 *)(r+6));
204 memcpy(&resp->ending_fis[0], r+16, 24); 204 memcpy(&resp->ending_fis[0], r+16, ATA_RESP_FIS_SIZE);
205 ts->buf_valid_size = sizeof(*resp); 205 ts->buf_valid_size = sizeof(*resp);
206 } 206 }
207 } 207 }
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 01bb04cd9e75..2a096795b9aa 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -571,13 +571,12 @@ free_cmd:
571static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd, 571static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd,
572 int iscsi_cmd, int size) 572 int iscsi_cmd, int size)
573{ 573{
574 cmd->va = pci_alloc_consistent(phba->ctrl.pdev, sizeof(size), 574 cmd->va = pci_alloc_consistent(phba->ctrl.pdev, size, &cmd->dma);
575 &cmd->dma);
576 if (!cmd->va) { 575 if (!cmd->va) {
577 SE_DEBUG(DBG_LVL_1, "Failed to allocate memory for if info\n"); 576 SE_DEBUG(DBG_LVL_1, "Failed to allocate memory for if info\n");
578 return -ENOMEM; 577 return -ENOMEM;
579 } 578 }
580 memset(cmd->va, 0, sizeof(size)); 579 memset(cmd->va, 0, size);
581 cmd->size = size; 580 cmd->size = size;
582 be_cmd_hdr_prepare(cmd->va, CMD_SUBSYSTEM_ISCSI, iscsi_cmd, size); 581 be_cmd_hdr_prepare(cmd->va, CMD_SUBSYSTEM_ISCSI, iscsi_cmd, size);
583 return 0; 582 return 0;
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 8b6c6bf7837e..b83927440171 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -426,6 +426,23 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
426 vshost = vport->drv_port.im_port->shost; 426 vshost = vport->drv_port.im_port->shost;
427 fc_host_node_name(vshost) = wwn_to_u64((u8 *)&port_cfg.nwwn); 427 fc_host_node_name(vshost) = wwn_to_u64((u8 *)&port_cfg.nwwn);
428 fc_host_port_name(vshost) = wwn_to_u64((u8 *)&port_cfg.pwwn); 428 fc_host_port_name(vshost) = wwn_to_u64((u8 *)&port_cfg.pwwn);
429 fc_host_supported_classes(vshost) = FC_COS_CLASS3;
430
431 memset(fc_host_supported_fc4s(vshost), 0,
432 sizeof(fc_host_supported_fc4s(vshost)));
433
434 /* For FCP type 0x08 */
435 if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM)
436 fc_host_supported_fc4s(vshost)[2] = 1;
437
438 /* For fibre channel services type 0x20 */
439 fc_host_supported_fc4s(vshost)[7] = 1;
440
441 fc_host_supported_speeds(vshost) =
442 bfad_im_supported_speeds(&bfad->bfa);
443 fc_host_maxframe_size(vshost) =
444 bfa_fcport_get_maxfrsize(&bfad->bfa);
445
429 fc_vport->dd_data = vport; 446 fc_vport->dd_data = vport;
430 vport->drv_port.im_port->fc_vport = fc_vport; 447 vport->drv_port.im_port->fc_vport = fc_vport;
431 } else if (rc == BFA_STATUS_INVALID_WWN) 448 } else if (rc == BFA_STATUS_INVALID_WWN)
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 3153923f5b60..1ac09afe35ee 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -987,7 +987,7 @@ done:
987 return 0; 987 return 0;
988} 988}
989 989
990static u32 990u32
991bfad_im_supported_speeds(struct bfa_s *bfa) 991bfad_im_supported_speeds(struct bfa_s *bfa)
992{ 992{
993 struct bfa_ioc_attr_s *ioc_attr; 993 struct bfa_ioc_attr_s *ioc_attr;
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 0814367ef101..f6c1023e502a 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -37,6 +37,7 @@ int bfad_im_scsi_host_alloc(struct bfad_s *bfad,
37 struct bfad_im_port_s *im_port, struct device *dev); 37 struct bfad_im_port_s *im_port, struct device *dev);
38void bfad_im_scsi_host_free(struct bfad_s *bfad, 38void bfad_im_scsi_host_free(struct bfad_s *bfad,
39 struct bfad_im_port_s *im_port); 39 struct bfad_im_port_s *im_port);
40u32 bfad_im_supported_speeds(struct bfa_s *bfa);
40 41
41#define MAX_FCP_TARGET 1024 42#define MAX_FCP_TARGET 1024
42#define MAX_FCP_LUN 16384 43#define MAX_FCP_LUN 16384
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index a4953ef9e53a..42969e8a45bd 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -59,10 +59,11 @@
59#include "57xx_hsi_bnx2fc.h" 59#include "57xx_hsi_bnx2fc.h"
60#include "bnx2fc_debug.h" 60#include "bnx2fc_debug.h"
61#include "../../net/ethernet/broadcom/cnic_if.h" 61#include "../../net/ethernet/broadcom/cnic_if.h"
62#include "../../net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h"
62#include "bnx2fc_constants.h" 63#include "bnx2fc_constants.h"
63 64
64#define BNX2FC_NAME "bnx2fc" 65#define BNX2FC_NAME "bnx2fc"
65#define BNX2FC_VERSION "1.0.10" 66#define BNX2FC_VERSION "1.0.11"
66 67
67#define PFX "bnx2fc: " 68#define PFX "bnx2fc: "
68 69
@@ -84,6 +85,8 @@
84#define BNX2FC_NUM_MAX_SESS 1024 85#define BNX2FC_NUM_MAX_SESS 1024
85#define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS)) 86#define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS))
86 87
88#define BNX2FC_MAX_NPIV 256
89
87#define BNX2FC_MAX_OUTSTANDING_CMNDS 2048 90#define BNX2FC_MAX_OUTSTANDING_CMNDS 2048
88#define BNX2FC_CAN_QUEUE BNX2FC_MAX_OUTSTANDING_CMNDS 91#define BNX2FC_CAN_QUEUE BNX2FC_MAX_OUTSTANDING_CMNDS
89#define BNX2FC_ELSTM_XIDS BNX2FC_CAN_QUEUE 92#define BNX2FC_ELSTM_XIDS BNX2FC_CAN_QUEUE
@@ -206,6 +209,7 @@ struct bnx2fc_hba {
206 struct fcoe_statistics_params *stats_buffer; 209 struct fcoe_statistics_params *stats_buffer;
207 dma_addr_t stats_buf_dma; 210 dma_addr_t stats_buf_dma;
208 struct completion stat_req_done; 211 struct completion stat_req_done;
212 struct fcoe_capabilities fcoe_cap;
209 213
210 /*destroy handling */ 214 /*destroy handling */
211 struct timer_list destroy_timer; 215 struct timer_list destroy_timer;
@@ -228,13 +232,16 @@ struct bnx2fc_interface {
228 struct packet_type fip_packet_type; 232 struct packet_type fip_packet_type;
229 struct workqueue_struct *timer_work_queue; 233 struct workqueue_struct *timer_work_queue;
230 struct kref kref; 234 struct kref kref;
231 struct fcoe_ctlr ctlr;
232 u8 vlan_enabled; 235 u8 vlan_enabled;
233 int vlan_id; 236 int vlan_id;
234 bool enabled; 237 bool enabled;
235}; 238};
236 239
237#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_interface, ctlr) 240#define bnx2fc_from_ctlr(x) \
241 ((struct bnx2fc_interface *)((x) + 1))
242
243#define bnx2fc_to_ctlr(x) \
244 ((struct fcoe_ctlr *)(((struct fcoe_ctlr *)(x)) - 1))
238 245
239struct bnx2fc_lport { 246struct bnx2fc_lport {
240 struct list_head list; 247 struct list_head list;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
index ce0ce3e32f33..bdbbb13b8534 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_els.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -854,7 +854,6 @@ static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
854 struct fc_exch *exch = fc_seq_exch(seq); 854 struct fc_exch *exch = fc_seq_exch(seq);
855 struct fc_lport *lport = exch->lp; 855 struct fc_lport *lport = exch->lp;
856 u8 *mac; 856 u8 *mac;
857 struct fc_frame_header *fh;
858 u8 op; 857 u8 op;
859 858
860 if (IS_ERR(fp)) 859 if (IS_ERR(fp))
@@ -862,13 +861,6 @@ static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
862 861
863 mac = fr_cb(fp)->granted_mac; 862 mac = fr_cb(fp)->granted_mac;
864 if (is_zero_ether_addr(mac)) { 863 if (is_zero_ether_addr(mac)) {
865 fh = fc_frame_header_get(fp);
866 if (fh->fh_type != FC_TYPE_ELS) {
867 printk(KERN_ERR PFX "bnx2fc_flogi_resp:"
868 "fh_type != FC_TYPE_ELS\n");
869 fc_frame_free(fp);
870 return;
871 }
872 op = fc_frame_payload_op(fp); 864 op = fc_frame_payload_op(fp);
873 if (lport->vport) { 865 if (lport->vport) {
874 if (op == ELS_LS_RJT) { 866 if (op == ELS_LS_RJT) {
@@ -878,12 +870,10 @@ static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
878 return; 870 return;
879 } 871 }
880 } 872 }
881 if (fcoe_ctlr_recv_flogi(fip, lport, fp)) { 873 fcoe_ctlr_recv_flogi(fip, lport, fp);
882 fc_frame_free(fp);
883 return;
884 }
885 } 874 }
886 fip->update_mac(lport, mac); 875 if (!is_zero_ether_addr(mac))
876 fip->update_mac(lport, mac);
887done: 877done:
888 fc_lport_flogi_resp(seq, fp, lport); 878 fc_lport_flogi_resp(seq, fp, lport);
889} 879}
@@ -910,7 +900,7 @@ struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
910{ 900{
911 struct fcoe_port *port = lport_priv(lport); 901 struct fcoe_port *port = lport_priv(lport);
912 struct bnx2fc_interface *interface = port->priv; 902 struct bnx2fc_interface *interface = port->priv;
913 struct fcoe_ctlr *fip = &interface->ctlr; 903 struct fcoe_ctlr *fip = bnx2fc_to_ctlr(interface);
914 struct fc_frame_header *fh = fc_frame_header_get(fp); 904 struct fc_frame_header *fh = fc_frame_header_get(fp);
915 905
916 switch (op) { 906 switch (op) {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index c1c6a92a0b98..05fe6620b3f0 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
22 22
23#define DRV_MODULE_NAME "bnx2fc" 23#define DRV_MODULE_NAME "bnx2fc"
24#define DRV_MODULE_VERSION BNX2FC_VERSION 24#define DRV_MODULE_VERSION BNX2FC_VERSION
25#define DRV_MODULE_RELDATE "Jan 22, 2011" 25#define DRV_MODULE_RELDATE "Apr 24, 2012"
26 26
27 27
28static char version[] __devinitdata = 28static char version[] __devinitdata =
@@ -54,6 +54,7 @@ static struct cnic_ulp_ops bnx2fc_cnic_cb;
54static struct libfc_function_template bnx2fc_libfc_fcn_templ; 54static struct libfc_function_template bnx2fc_libfc_fcn_templ;
55static struct scsi_host_template bnx2fc_shost_template; 55static struct scsi_host_template bnx2fc_shost_template;
56static struct fc_function_template bnx2fc_transport_function; 56static struct fc_function_template bnx2fc_transport_function;
57static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ;
57static struct fc_function_template bnx2fc_vport_xport_function; 58static struct fc_function_template bnx2fc_vport_xport_function;
58static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode); 59static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode);
59static void __bnx2fc_destroy(struct bnx2fc_interface *interface); 60static void __bnx2fc_destroy(struct bnx2fc_interface *interface);
@@ -88,6 +89,7 @@ static void bnx2fc_port_shutdown(struct fc_lport *lport);
88static void bnx2fc_stop(struct bnx2fc_interface *interface); 89static void bnx2fc_stop(struct bnx2fc_interface *interface);
89static int __init bnx2fc_mod_init(void); 90static int __init bnx2fc_mod_init(void);
90static void __exit bnx2fc_mod_exit(void); 91static void __exit bnx2fc_mod_exit(void);
92static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev);
91 93
92unsigned int bnx2fc_debug_level; 94unsigned int bnx2fc_debug_level;
93module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR); 95module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR);
@@ -118,6 +120,41 @@ static void bnx2fc_get_lesb(struct fc_lport *lport,
118 __fcoe_get_lesb(lport, fc_lesb, netdev); 120 __fcoe_get_lesb(lport, fc_lesb, netdev);
119} 121}
120 122
123static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
124{
125 struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
126 struct net_device *netdev = bnx2fc_netdev(fip->lp);
127 struct fcoe_fc_els_lesb *fcoe_lesb;
128 struct fc_els_lesb fc_lesb;
129
130 __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
131 fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
132
133 ctlr_dev->lesb.lesb_link_fail =
134 ntohl(fcoe_lesb->lesb_link_fail);
135 ctlr_dev->lesb.lesb_vlink_fail =
136 ntohl(fcoe_lesb->lesb_vlink_fail);
137 ctlr_dev->lesb.lesb_miss_fka =
138 ntohl(fcoe_lesb->lesb_miss_fka);
139 ctlr_dev->lesb.lesb_symb_err =
140 ntohl(fcoe_lesb->lesb_symb_err);
141 ctlr_dev->lesb.lesb_err_block =
142 ntohl(fcoe_lesb->lesb_err_block);
143 ctlr_dev->lesb.lesb_fcs_error =
144 ntohl(fcoe_lesb->lesb_fcs_error);
145}
146EXPORT_SYMBOL(bnx2fc_ctlr_get_lesb);
147
148static void bnx2fc_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
149{
150 struct fcoe_ctlr_device *ctlr_dev =
151 fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
152 struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
153 struct bnx2fc_interface *fcoe = fcoe_ctlr_priv(ctlr);
154
155 fcf_dev->vlan_id = fcoe->vlan_id;
156}
157
121static void bnx2fc_clean_rx_queue(struct fc_lport *lp) 158static void bnx2fc_clean_rx_queue(struct fc_lport *lp)
122{ 159{
123 struct fcoe_percpu_s *bg; 160 struct fcoe_percpu_s *bg;
@@ -244,6 +281,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
244 struct sk_buff *skb; 281 struct sk_buff *skb;
245 struct fc_frame_header *fh; 282 struct fc_frame_header *fh;
246 struct bnx2fc_interface *interface; 283 struct bnx2fc_interface *interface;
284 struct fcoe_ctlr *ctlr;
247 struct bnx2fc_hba *hba; 285 struct bnx2fc_hba *hba;
248 struct fcoe_port *port; 286 struct fcoe_port *port;
249 struct fcoe_hdr *hp; 287 struct fcoe_hdr *hp;
@@ -256,6 +294,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
256 294
257 port = (struct fcoe_port *)lport_priv(lport); 295 port = (struct fcoe_port *)lport_priv(lport);
258 interface = port->priv; 296 interface = port->priv;
297 ctlr = bnx2fc_to_ctlr(interface);
259 hba = interface->hba; 298 hba = interface->hba;
260 299
261 fh = fc_frame_header_get(fp); 300 fh = fc_frame_header_get(fp);
@@ -268,12 +307,12 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
268 } 307 }
269 308
270 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { 309 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
271 if (!interface->ctlr.sel_fcf) { 310 if (!ctlr->sel_fcf) {
272 BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n"); 311 BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n");
273 kfree_skb(skb); 312 kfree_skb(skb);
274 return -EINVAL; 313 return -EINVAL;
275 } 314 }
276 if (fcoe_ctlr_els_send(&interface->ctlr, lport, skb)) 315 if (fcoe_ctlr_els_send(ctlr, lport, skb))
277 return 0; 316 return 0;
278 } 317 }
279 318
@@ -346,14 +385,14 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
346 /* fill up mac and fcoe headers */ 385 /* fill up mac and fcoe headers */
347 eh = eth_hdr(skb); 386 eh = eth_hdr(skb);
348 eh->h_proto = htons(ETH_P_FCOE); 387 eh->h_proto = htons(ETH_P_FCOE);
349 if (interface->ctlr.map_dest) 388 if (ctlr->map_dest)
350 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id); 389 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
351 else 390 else
352 /* insert GW address */ 391 /* insert GW address */
353 memcpy(eh->h_dest, interface->ctlr.dest_addr, ETH_ALEN); 392 memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN);
354 393
355 if (unlikely(interface->ctlr.flogi_oxid != FC_XID_UNKNOWN)) 394 if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN))
356 memcpy(eh->h_source, interface->ctlr.ctl_src_addr, ETH_ALEN); 395 memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN);
357 else 396 else
358 memcpy(eh->h_source, port->data_src_addr, ETH_ALEN); 397 memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
359 398
@@ -403,6 +442,7 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
403{ 442{
404 struct fc_lport *lport; 443 struct fc_lport *lport;
405 struct bnx2fc_interface *interface; 444 struct bnx2fc_interface *interface;
445 struct fcoe_ctlr *ctlr;
406 struct fc_frame_header *fh; 446 struct fc_frame_header *fh;
407 struct fcoe_rcv_info *fr; 447 struct fcoe_rcv_info *fr;
408 struct fcoe_percpu_s *bg; 448 struct fcoe_percpu_s *bg;
@@ -410,7 +450,8 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
410 450
411 interface = container_of(ptype, struct bnx2fc_interface, 451 interface = container_of(ptype, struct bnx2fc_interface,
412 fcoe_packet_type); 452 fcoe_packet_type);
413 lport = interface->ctlr.lp; 453 ctlr = bnx2fc_to_ctlr(interface);
454 lport = ctlr->lp;
414 455
415 if (unlikely(lport == NULL)) { 456 if (unlikely(lport == NULL)) {
416 printk(KERN_ERR PFX "bnx2fc_rcv: lport is NULL\n"); 457 printk(KERN_ERR PFX "bnx2fc_rcv: lport is NULL\n");
@@ -758,11 +799,13 @@ static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev)
758{ 799{
759 struct bnx2fc_hba *hba; 800 struct bnx2fc_hba *hba;
760 struct bnx2fc_interface *interface; 801 struct bnx2fc_interface *interface;
802 struct fcoe_ctlr *ctlr;
761 struct fcoe_port *port; 803 struct fcoe_port *port;
762 u64 wwnn, wwpn; 804 u64 wwnn, wwpn;
763 805
764 port = lport_priv(lport); 806 port = lport_priv(lport);
765 interface = port->priv; 807 interface = port->priv;
808 ctlr = bnx2fc_to_ctlr(interface);
766 hba = interface->hba; 809 hba = interface->hba;
767 810
768 /* require support for get_pauseparam ethtool op. */ 811 /* require support for get_pauseparam ethtool op. */
@@ -781,13 +824,13 @@ static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev)
781 824
782 if (!lport->vport) { 825 if (!lport->vport) {
783 if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN)) 826 if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
784 wwnn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, 827 wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
785 1, 0); 828 1, 0);
786 BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn); 829 BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn);
787 fc_set_wwnn(lport, wwnn); 830 fc_set_wwnn(lport, wwnn);
788 831
789 if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN)) 832 if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
790 wwpn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, 833 wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
791 2, 0); 834 2, 0);
792 835
793 BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn); 836 BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn);
@@ -824,6 +867,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
824 struct fc_lport *lport; 867 struct fc_lport *lport;
825 struct fc_lport *vport; 868 struct fc_lport *vport;
826 struct bnx2fc_interface *interface, *tmp; 869 struct bnx2fc_interface *interface, *tmp;
870 struct fcoe_ctlr *ctlr;
827 int wait_for_upload = 0; 871 int wait_for_upload = 0;
828 u32 link_possible = 1; 872 u32 link_possible = 1;
829 873
@@ -874,7 +918,8 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
874 if (interface->hba != hba) 918 if (interface->hba != hba)
875 continue; 919 continue;
876 920
877 lport = interface->ctlr.lp; 921 ctlr = bnx2fc_to_ctlr(interface);
922 lport = ctlr->lp;
878 BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n", 923 BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n",
879 interface->netdev->name, event); 924 interface->netdev->name, event);
880 925
@@ -889,8 +934,8 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
889 * on a stale vlan 934 * on a stale vlan
890 */ 935 */
891 if (interface->enabled) 936 if (interface->enabled)
892 fcoe_ctlr_link_up(&interface->ctlr); 937 fcoe_ctlr_link_up(ctlr);
893 } else if (fcoe_ctlr_link_down(&interface->ctlr)) { 938 } else if (fcoe_ctlr_link_down(ctlr)) {
894 mutex_lock(&lport->lp_mutex); 939 mutex_lock(&lport->lp_mutex);
895 list_for_each_entry(vport, &lport->vports, list) 940 list_for_each_entry(vport, &lport->vports, list)
896 fc_host_port_type(vport->host) = 941 fc_host_port_type(vport->host) =
@@ -995,9 +1040,11 @@ static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev,
995 struct net_device *orig_dev) 1040 struct net_device *orig_dev)
996{ 1041{
997 struct bnx2fc_interface *interface; 1042 struct bnx2fc_interface *interface;
1043 struct fcoe_ctlr *ctlr;
998 interface = container_of(ptype, struct bnx2fc_interface, 1044 interface = container_of(ptype, struct bnx2fc_interface,
999 fip_packet_type); 1045 fip_packet_type);
1000 fcoe_ctlr_recv(&interface->ctlr, skb); 1046 ctlr = bnx2fc_to_ctlr(interface);
1047 fcoe_ctlr_recv(ctlr, skb);
1001 return 0; 1048 return 0;
1002} 1049}
1003 1050
@@ -1155,6 +1202,7 @@ static int bnx2fc_interface_setup(struct bnx2fc_interface *interface)
1155{ 1202{
1156 struct net_device *netdev = interface->netdev; 1203 struct net_device *netdev = interface->netdev;
1157 struct net_device *physdev = interface->hba->phys_dev; 1204 struct net_device *physdev = interface->hba->phys_dev;
1205 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
1158 struct netdev_hw_addr *ha; 1206 struct netdev_hw_addr *ha;
1159 int sel_san_mac = 0; 1207 int sel_san_mac = 0;
1160 1208
@@ -1169,7 +1217,7 @@ static int bnx2fc_interface_setup(struct bnx2fc_interface *interface)
1169 1217
1170 if ((ha->type == NETDEV_HW_ADDR_T_SAN) && 1218 if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
1171 (is_valid_ether_addr(ha->addr))) { 1219 (is_valid_ether_addr(ha->addr))) {
1172 memcpy(interface->ctlr.ctl_src_addr, ha->addr, 1220 memcpy(ctlr->ctl_src_addr, ha->addr,
1173 ETH_ALEN); 1221 ETH_ALEN);
1174 sel_san_mac = 1; 1222 sel_san_mac = 1;
1175 BNX2FC_MISC_DBG("Found SAN MAC\n"); 1223 BNX2FC_MISC_DBG("Found SAN MAC\n");
@@ -1224,19 +1272,23 @@ static void bnx2fc_release_transport(void)
1224 1272
1225static void bnx2fc_interface_release(struct kref *kref) 1273static void bnx2fc_interface_release(struct kref *kref)
1226{ 1274{
1275 struct fcoe_ctlr_device *ctlr_dev;
1227 struct bnx2fc_interface *interface; 1276 struct bnx2fc_interface *interface;
1277 struct fcoe_ctlr *ctlr;
1228 struct net_device *netdev; 1278 struct net_device *netdev;
1229 1279
1230 interface = container_of(kref, struct bnx2fc_interface, kref); 1280 interface = container_of(kref, struct bnx2fc_interface, kref);
1231 BNX2FC_MISC_DBG("Interface is being released\n"); 1281 BNX2FC_MISC_DBG("Interface is being released\n");
1232 1282
1283 ctlr = bnx2fc_to_ctlr(interface);
1284 ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr);
1233 netdev = interface->netdev; 1285 netdev = interface->netdev;
1234 1286
1235 /* tear-down FIP controller */ 1287 /* tear-down FIP controller */
1236 if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags)) 1288 if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags))
1237 fcoe_ctlr_destroy(&interface->ctlr); 1289 fcoe_ctlr_destroy(ctlr);
1238 1290
1239 kfree(interface); 1291 fcoe_ctlr_device_delete(ctlr_dev);
1240 1292
1241 dev_put(netdev); 1293 dev_put(netdev);
1242 module_put(THIS_MODULE); 1294 module_put(THIS_MODULE);
@@ -1274,6 +1326,7 @@ static void bnx2fc_hba_destroy(struct bnx2fc_hba *hba)
1274static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic) 1326static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
1275{ 1327{
1276 struct bnx2fc_hba *hba; 1328 struct bnx2fc_hba *hba;
1329 struct fcoe_capabilities *fcoe_cap;
1277 int rc; 1330 int rc;
1278 1331
1279 hba = kzalloc(sizeof(*hba), GFP_KERNEL); 1332 hba = kzalloc(sizeof(*hba), GFP_KERNEL);
@@ -1309,6 +1362,21 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
1309 printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n"); 1362 printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n");
1310 goto cmgr_err; 1363 goto cmgr_err;
1311 } 1364 }
1365 fcoe_cap = &hba->fcoe_cap;
1366
1367 fcoe_cap->capability1 = BNX2FC_TM_MAX_SQES <<
1368 FCOE_IOS_PER_CONNECTION_SHIFT;
1369 fcoe_cap->capability1 |= BNX2FC_NUM_MAX_SESS <<
1370 FCOE_LOGINS_PER_PORT_SHIFT;
1371 fcoe_cap->capability2 = BNX2FC_MAX_OUTSTANDING_CMNDS <<
1372 FCOE_NUMBER_OF_EXCHANGES_SHIFT;
1373 fcoe_cap->capability2 |= BNX2FC_MAX_NPIV <<
1374 FCOE_NPIV_WWN_PER_PORT_SHIFT;
1375 fcoe_cap->capability3 = BNX2FC_NUM_MAX_SESS <<
1376 FCOE_TARGETS_SUPPORTED_SHIFT;
1377 fcoe_cap->capability3 |= BNX2FC_MAX_OUTSTANDING_CMNDS <<
1378 FCOE_OUTSTANDING_COMMANDS_SHIFT;
1379 fcoe_cap->capability4 = FCOE_CAPABILITY4_STATEFUL;
1312 1380
1313 init_waitqueue_head(&hba->shutdown_wait); 1381 init_waitqueue_head(&hba->shutdown_wait);
1314 init_waitqueue_head(&hba->destroy_wait); 1382 init_waitqueue_head(&hba->destroy_wait);
@@ -1329,33 +1397,40 @@ struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba,
1329 struct net_device *netdev, 1397 struct net_device *netdev,
1330 enum fip_state fip_mode) 1398 enum fip_state fip_mode)
1331{ 1399{
1400 struct fcoe_ctlr_device *ctlr_dev;
1332 struct bnx2fc_interface *interface; 1401 struct bnx2fc_interface *interface;
1402 struct fcoe_ctlr *ctlr;
1403 int size;
1333 int rc = 0; 1404 int rc = 0;
1334 1405
1335 interface = kzalloc(sizeof(*interface), GFP_KERNEL); 1406 size = (sizeof(*interface) + sizeof(struct fcoe_ctlr));
1336 if (!interface) { 1407 ctlr_dev = fcoe_ctlr_device_add(&netdev->dev, &bnx2fc_fcoe_sysfs_templ,
1408 size);
1409 if (!ctlr_dev) {
1337 printk(KERN_ERR PFX "Unable to allocate interface structure\n"); 1410 printk(KERN_ERR PFX "Unable to allocate interface structure\n");
1338 return NULL; 1411 return NULL;
1339 } 1412 }
1413 ctlr = fcoe_ctlr_device_priv(ctlr_dev);
1414 interface = fcoe_ctlr_priv(ctlr);
1340 dev_hold(netdev); 1415 dev_hold(netdev);
1341 kref_init(&interface->kref); 1416 kref_init(&interface->kref);
1342 interface->hba = hba; 1417 interface->hba = hba;
1343 interface->netdev = netdev; 1418 interface->netdev = netdev;
1344 1419
1345 /* Initialize FIP */ 1420 /* Initialize FIP */
1346 fcoe_ctlr_init(&interface->ctlr, fip_mode); 1421 fcoe_ctlr_init(ctlr, fip_mode);
1347 interface->ctlr.send = bnx2fc_fip_send; 1422 ctlr->send = bnx2fc_fip_send;
1348 interface->ctlr.update_mac = bnx2fc_update_src_mac; 1423 ctlr->update_mac = bnx2fc_update_src_mac;
1349 interface->ctlr.get_src_addr = bnx2fc_get_src_mac; 1424 ctlr->get_src_addr = bnx2fc_get_src_mac;
1350 set_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags); 1425 set_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags);
1351 1426
1352 rc = bnx2fc_interface_setup(interface); 1427 rc = bnx2fc_interface_setup(interface);
1353 if (!rc) 1428 if (!rc)
1354 return interface; 1429 return interface;
1355 1430
1356 fcoe_ctlr_destroy(&interface->ctlr); 1431 fcoe_ctlr_destroy(ctlr);
1357 dev_put(netdev); 1432 dev_put(netdev);
1358 kfree(interface); 1433 fcoe_ctlr_device_delete(ctlr_dev);
1359 return NULL; 1434 return NULL;
1360} 1435}
1361 1436
@@ -1373,6 +1448,7 @@ struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba,
1373static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, 1448static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
1374 struct device *parent, int npiv) 1449 struct device *parent, int npiv)
1375{ 1450{
1451 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
1376 struct fc_lport *lport, *n_port; 1452 struct fc_lport *lport, *n_port;
1377 struct fcoe_port *port; 1453 struct fcoe_port *port;
1378 struct Scsi_Host *shost; 1454 struct Scsi_Host *shost;
@@ -1383,7 +1459,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
1383 1459
1384 blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL); 1460 blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL);
1385 if (!blport) { 1461 if (!blport) {
1386 BNX2FC_HBA_DBG(interface->ctlr.lp, "Unable to alloc blport\n"); 1462 BNX2FC_HBA_DBG(ctlr->lp, "Unable to alloc blport\n");
1387 return NULL; 1463 return NULL;
1388 } 1464 }
1389 1465
@@ -1479,7 +1555,8 @@ static void bnx2fc_net_cleanup(struct bnx2fc_interface *interface)
1479 1555
1480static void bnx2fc_interface_cleanup(struct bnx2fc_interface *interface) 1556static void bnx2fc_interface_cleanup(struct bnx2fc_interface *interface)
1481{ 1557{
1482 struct fc_lport *lport = interface->ctlr.lp; 1558 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
1559 struct fc_lport *lport = ctlr->lp;
1483 struct fcoe_port *port = lport_priv(lport); 1560 struct fcoe_port *port = lport_priv(lport);
1484 struct bnx2fc_hba *hba = interface->hba; 1561 struct bnx2fc_hba *hba = interface->hba;
1485 1562
@@ -1519,7 +1596,8 @@ static void bnx2fc_if_destroy(struct fc_lport *lport)
1519 1596
1520static void __bnx2fc_destroy(struct bnx2fc_interface *interface) 1597static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
1521{ 1598{
1522 struct fc_lport *lport = interface->ctlr.lp; 1599 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
1600 struct fc_lport *lport = ctlr->lp;
1523 struct fcoe_port *port = lport_priv(lport); 1601 struct fcoe_port *port = lport_priv(lport);
1524 1602
1525 bnx2fc_interface_cleanup(interface); 1603 bnx2fc_interface_cleanup(interface);
@@ -1543,13 +1621,15 @@ static int bnx2fc_destroy(struct net_device *netdev)
1543{ 1621{
1544 struct bnx2fc_interface *interface = NULL; 1622 struct bnx2fc_interface *interface = NULL;
1545 struct workqueue_struct *timer_work_queue; 1623 struct workqueue_struct *timer_work_queue;
1624 struct fcoe_ctlr *ctlr;
1546 int rc = 0; 1625 int rc = 0;
1547 1626
1548 rtnl_lock(); 1627 rtnl_lock();
1549 mutex_lock(&bnx2fc_dev_lock); 1628 mutex_lock(&bnx2fc_dev_lock);
1550 1629
1551 interface = bnx2fc_interface_lookup(netdev); 1630 interface = bnx2fc_interface_lookup(netdev);
1552 if (!interface || !interface->ctlr.lp) { 1631 ctlr = bnx2fc_to_ctlr(interface);
1632 if (!interface || !ctlr->lp) {
1553 rc = -ENODEV; 1633 rc = -ENODEV;
1554 printk(KERN_ERR PFX "bnx2fc_destroy: interface or lport not found\n"); 1634 printk(KERN_ERR PFX "bnx2fc_destroy: interface or lport not found\n");
1555 goto netdev_err; 1635 goto netdev_err;
@@ -1627,6 +1707,32 @@ static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
1627 hba->pcidev = NULL; 1707 hba->pcidev = NULL;
1628} 1708}
1629 1709
1710/**
1711 * bnx2fc_ulp_get_stats - cnic callback to populate FCoE stats
1712 *
1713 * @handle: transport handle pointing to adapter struture
1714 */
1715static int bnx2fc_ulp_get_stats(void *handle)
1716{
1717 struct bnx2fc_hba *hba = handle;
1718 struct cnic_dev *cnic;
1719 struct fcoe_stats_info *stats_addr;
1720
1721 if (!hba)
1722 return -EINVAL;
1723
1724 cnic = hba->cnic;
1725 stats_addr = &cnic->stats_addr->fcoe_stat;
1726 if (!stats_addr)
1727 return -EINVAL;
1728
1729 strncpy(stats_addr->version, BNX2FC_VERSION,
1730 sizeof(stats_addr->version));
1731 stats_addr->txq_size = BNX2FC_SQ_WQES_MAX;
1732 stats_addr->rxq_size = BNX2FC_CQ_WQES_MAX;
1733
1734 return 0;
1735}
1630 1736
1631 1737
1632/** 1738/**
@@ -1646,6 +1752,7 @@ static void bnx2fc_ulp_start(void *handle)
1646{ 1752{
1647 struct bnx2fc_hba *hba = handle; 1753 struct bnx2fc_hba *hba = handle;
1648 struct bnx2fc_interface *interface; 1754 struct bnx2fc_interface *interface;
1755 struct fcoe_ctlr *ctlr;
1649 struct fc_lport *lport; 1756 struct fc_lport *lport;
1650 1757
1651 mutex_lock(&bnx2fc_dev_lock); 1758 mutex_lock(&bnx2fc_dev_lock);
@@ -1657,7 +1764,8 @@ static void bnx2fc_ulp_start(void *handle)
1657 1764
1658 list_for_each_entry(interface, &if_list, list) { 1765 list_for_each_entry(interface, &if_list, list) {
1659 if (interface->hba == hba) { 1766 if (interface->hba == hba) {
1660 lport = interface->ctlr.lp; 1767 ctlr = bnx2fc_to_ctlr(interface);
1768 lport = ctlr->lp;
1661 /* Kick off Fabric discovery*/ 1769 /* Kick off Fabric discovery*/
1662 printk(KERN_ERR PFX "ulp_init: start discovery\n"); 1770 printk(KERN_ERR PFX "ulp_init: start discovery\n");
1663 lport->tt.frame_send = bnx2fc_xmit; 1771 lport->tt.frame_send = bnx2fc_xmit;
@@ -1677,13 +1785,14 @@ static void bnx2fc_port_shutdown(struct fc_lport *lport)
1677 1785
1678static void bnx2fc_stop(struct bnx2fc_interface *interface) 1786static void bnx2fc_stop(struct bnx2fc_interface *interface)
1679{ 1787{
1788 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
1680 struct fc_lport *lport; 1789 struct fc_lport *lport;
1681 struct fc_lport *vport; 1790 struct fc_lport *vport;
1682 1791
1683 if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) 1792 if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags))
1684 return; 1793 return;
1685 1794
1686 lport = interface->ctlr.lp; 1795 lport = ctlr->lp;
1687 bnx2fc_port_shutdown(lport); 1796 bnx2fc_port_shutdown(lport);
1688 1797
1689 mutex_lock(&lport->lp_mutex); 1798 mutex_lock(&lport->lp_mutex);
@@ -1692,7 +1801,7 @@ static void bnx2fc_stop(struct bnx2fc_interface *interface)
1692 FC_PORTTYPE_UNKNOWN; 1801 FC_PORTTYPE_UNKNOWN;
1693 mutex_unlock(&lport->lp_mutex); 1802 mutex_unlock(&lport->lp_mutex);
1694 fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; 1803 fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
1695 fcoe_ctlr_link_down(&interface->ctlr); 1804 fcoe_ctlr_link_down(ctlr);
1696 fcoe_clean_pending_queue(lport); 1805 fcoe_clean_pending_queue(lport);
1697} 1806}
1698 1807
@@ -1804,6 +1913,7 @@ exit:
1804 1913
1805static void bnx2fc_start_disc(struct bnx2fc_interface *interface) 1914static void bnx2fc_start_disc(struct bnx2fc_interface *interface)
1806{ 1915{
1916 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
1807 struct fc_lport *lport; 1917 struct fc_lport *lport;
1808 int wait_cnt = 0; 1918 int wait_cnt = 0;
1809 1919
@@ -1814,18 +1924,18 @@ static void bnx2fc_start_disc(struct bnx2fc_interface *interface)
1814 return; 1924 return;
1815 } 1925 }
1816 1926
1817 lport = interface->ctlr.lp; 1927 lport = ctlr->lp;
1818 BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n"); 1928 BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n");
1819 1929
1820 if (!bnx2fc_link_ok(lport) && interface->enabled) { 1930 if (!bnx2fc_link_ok(lport) && interface->enabled) {
1821 BNX2FC_HBA_DBG(lport, "ctlr_link_up\n"); 1931 BNX2FC_HBA_DBG(lport, "ctlr_link_up\n");
1822 fcoe_ctlr_link_up(&interface->ctlr); 1932 fcoe_ctlr_link_up(ctlr);
1823 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; 1933 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
1824 set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state); 1934 set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
1825 } 1935 }
1826 1936
1827 /* wait for the FCF to be selected before issuing FLOGI */ 1937 /* wait for the FCF to be selected before issuing FLOGI */
1828 while (!interface->ctlr.sel_fcf) { 1938 while (!ctlr->sel_fcf) {
1829 msleep(250); 1939 msleep(250);
1830 /* give up after 3 secs */ 1940 /* give up after 3 secs */
1831 if (++wait_cnt > 12) 1941 if (++wait_cnt > 12)
@@ -1876,6 +1986,7 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
1876 adapter_count++; 1986 adapter_count++;
1877 mutex_unlock(&bnx2fc_dev_lock); 1987 mutex_unlock(&bnx2fc_dev_lock);
1878 1988
1989 dev->fcoe_cap = &hba->fcoe_cap;
1879 clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic); 1990 clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
1880 rc = dev->register_device(dev, CNIC_ULP_FCOE, 1991 rc = dev->register_device(dev, CNIC_ULP_FCOE,
1881 (void *) hba); 1992 (void *) hba);
@@ -1889,19 +2000,21 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
1889static int bnx2fc_disable(struct net_device *netdev) 2000static int bnx2fc_disable(struct net_device *netdev)
1890{ 2001{
1891 struct bnx2fc_interface *interface; 2002 struct bnx2fc_interface *interface;
2003 struct fcoe_ctlr *ctlr;
1892 int rc = 0; 2004 int rc = 0;
1893 2005
1894 rtnl_lock(); 2006 rtnl_lock();
1895 mutex_lock(&bnx2fc_dev_lock); 2007 mutex_lock(&bnx2fc_dev_lock);
1896 2008
1897 interface = bnx2fc_interface_lookup(netdev); 2009 interface = bnx2fc_interface_lookup(netdev);
1898 if (!interface || !interface->ctlr.lp) { 2010 ctlr = bnx2fc_to_ctlr(interface);
2011 if (!interface || !ctlr->lp) {
1899 rc = -ENODEV; 2012 rc = -ENODEV;
1900 printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n"); 2013 printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n");
1901 } else { 2014 } else {
1902 interface->enabled = false; 2015 interface->enabled = false;
1903 fcoe_ctlr_link_down(&interface->ctlr); 2016 fcoe_ctlr_link_down(ctlr);
1904 fcoe_clean_pending_queue(interface->ctlr.lp); 2017 fcoe_clean_pending_queue(ctlr->lp);
1905 } 2018 }
1906 2019
1907 mutex_unlock(&bnx2fc_dev_lock); 2020 mutex_unlock(&bnx2fc_dev_lock);
@@ -1913,17 +2026,19 @@ static int bnx2fc_disable(struct net_device *netdev)
1913static int bnx2fc_enable(struct net_device *netdev) 2026static int bnx2fc_enable(struct net_device *netdev)
1914{ 2027{
1915 struct bnx2fc_interface *interface; 2028 struct bnx2fc_interface *interface;
2029 struct fcoe_ctlr *ctlr;
1916 int rc = 0; 2030 int rc = 0;
1917 2031
1918 rtnl_lock(); 2032 rtnl_lock();
1919 mutex_lock(&bnx2fc_dev_lock); 2033 mutex_lock(&bnx2fc_dev_lock);
1920 2034
1921 interface = bnx2fc_interface_lookup(netdev); 2035 interface = bnx2fc_interface_lookup(netdev);
1922 if (!interface || !interface->ctlr.lp) { 2036 ctlr = bnx2fc_to_ctlr(interface);
2037 if (!interface || !ctlr->lp) {
1923 rc = -ENODEV; 2038 rc = -ENODEV;
1924 printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n"); 2039 printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n");
1925 } else if (!bnx2fc_link_ok(interface->ctlr.lp)) { 2040 } else if (!bnx2fc_link_ok(ctlr->lp)) {
1926 fcoe_ctlr_link_up(&interface->ctlr); 2041 fcoe_ctlr_link_up(ctlr);
1927 interface->enabled = true; 2042 interface->enabled = true;
1928 } 2043 }
1929 2044
@@ -1944,6 +2059,7 @@ static int bnx2fc_enable(struct net_device *netdev)
1944 */ 2059 */
1945static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode) 2060static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
1946{ 2061{
2062 struct fcoe_ctlr *ctlr;
1947 struct bnx2fc_interface *interface; 2063 struct bnx2fc_interface *interface;
1948 struct bnx2fc_hba *hba; 2064 struct bnx2fc_hba *hba;
1949 struct net_device *phys_dev; 2065 struct net_device *phys_dev;
@@ -2010,6 +2126,7 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
2010 goto ifput_err; 2126 goto ifput_err;
2011 } 2127 }
2012 2128
2129 ctlr = bnx2fc_to_ctlr(interface);
2013 interface->vlan_id = vlan_id; 2130 interface->vlan_id = vlan_id;
2014 interface->vlan_enabled = 1; 2131 interface->vlan_enabled = 1;
2015 2132
@@ -2035,10 +2152,10 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
2035 lport->boot_time = jiffies; 2152 lport->boot_time = jiffies;
2036 2153
2037 /* Make this master N_port */ 2154 /* Make this master N_port */
2038 interface->ctlr.lp = lport; 2155 ctlr->lp = lport;
2039 2156
2040 if (!bnx2fc_link_ok(lport)) { 2157 if (!bnx2fc_link_ok(lport)) {
2041 fcoe_ctlr_link_up(&interface->ctlr); 2158 fcoe_ctlr_link_up(ctlr);
2042 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; 2159 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
2043 set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state); 2160 set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
2044 } 2161 }
@@ -2439,6 +2556,19 @@ static void __exit bnx2fc_mod_exit(void)
2439module_init(bnx2fc_mod_init); 2556module_init(bnx2fc_mod_init);
2440module_exit(bnx2fc_mod_exit); 2557module_exit(bnx2fc_mod_exit);
2441 2558
2559static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ = {
2560 .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
2561 .get_fcoe_ctlr_link_fail = bnx2fc_ctlr_get_lesb,
2562 .get_fcoe_ctlr_vlink_fail = bnx2fc_ctlr_get_lesb,
2563 .get_fcoe_ctlr_miss_fka = bnx2fc_ctlr_get_lesb,
2564 .get_fcoe_ctlr_symb_err = bnx2fc_ctlr_get_lesb,
2565 .get_fcoe_ctlr_err_block = bnx2fc_ctlr_get_lesb,
2566 .get_fcoe_ctlr_fcs_error = bnx2fc_ctlr_get_lesb,
2567
2568 .get_fcoe_fcf_selected = fcoe_fcf_get_selected,
2569 .get_fcoe_fcf_vlan_id = bnx2fc_fcf_get_vlan_id,
2570};
2571
2442static struct fc_function_template bnx2fc_transport_function = { 2572static struct fc_function_template bnx2fc_transport_function = {
2443 .show_host_node_name = 1, 2573 .show_host_node_name = 1,
2444 .show_host_port_name = 1, 2574 .show_host_port_name = 1,
@@ -2556,4 +2686,5 @@ static struct cnic_ulp_ops bnx2fc_cnic_cb = {
2556 .cnic_stop = bnx2fc_ulp_stop, 2686 .cnic_stop = bnx2fc_ulp_stop,
2557 .indicate_kcqes = bnx2fc_indicate_kcqe, 2687 .indicate_kcqes = bnx2fc_indicate_kcqe,
2558 .indicate_netevent = bnx2fc_indicate_netevent, 2688 .indicate_netevent = bnx2fc_indicate_netevent,
2689 .cnic_get_stats = bnx2fc_ulp_get_stats,
2559}; 2690};
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index afd570962b8c..2ca6bfe4ce5e 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -167,6 +167,7 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
167{ 167{
168 struct fc_lport *lport = port->lport; 168 struct fc_lport *lport = port->lport;
169 struct bnx2fc_interface *interface = port->priv; 169 struct bnx2fc_interface *interface = port->priv;
170 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
170 struct bnx2fc_hba *hba = interface->hba; 171 struct bnx2fc_hba *hba = interface->hba;
171 struct kwqe *kwqe_arr[4]; 172 struct kwqe *kwqe_arr[4];
172 struct fcoe_kwqe_conn_offload1 ofld_req1; 173 struct fcoe_kwqe_conn_offload1 ofld_req1;
@@ -314,13 +315,13 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
314 ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2]; 315 ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2];
315 ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1]; 316 ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1];
316 ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0]; 317 ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0];
317 ofld_req4.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5]; 318 ofld_req4.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
318 /* fcf mac */ 319 /* fcf mac */
319 ofld_req4.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4]; 320 ofld_req4.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
320 ofld_req4.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3]; 321 ofld_req4.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
321 ofld_req4.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2]; 322 ofld_req4.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
322 ofld_req4.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1]; 323 ofld_req4.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
323 ofld_req4.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0]; 324 ofld_req4.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
324 325
325 ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma; 326 ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
326 ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32); 327 ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
@@ -351,6 +352,7 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
351{ 352{
352 struct kwqe *kwqe_arr[2]; 353 struct kwqe *kwqe_arr[2];
353 struct bnx2fc_interface *interface = port->priv; 354 struct bnx2fc_interface *interface = port->priv;
355 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
354 struct bnx2fc_hba *hba = interface->hba; 356 struct bnx2fc_hba *hba = interface->hba;
355 struct fcoe_kwqe_conn_enable_disable enbl_req; 357 struct fcoe_kwqe_conn_enable_disable enbl_req;
356 struct fc_lport *lport = port->lport; 358 struct fc_lport *lport = port->lport;
@@ -374,12 +376,12 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
374 enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0]; 376 enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0];
375 memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN); 377 memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
376 378
377 enbl_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5]; 379 enbl_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
378 enbl_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4]; 380 enbl_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
379 enbl_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3]; 381 enbl_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
380 enbl_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2]; 382 enbl_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
381 enbl_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1]; 383 enbl_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
382 enbl_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0]; 384 enbl_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
383 385
384 port_id = fc_host_port_id(lport->host); 386 port_id = fc_host_port_id(lport->host);
385 if (port_id != tgt->sid) { 387 if (port_id != tgt->sid) {
@@ -419,6 +421,7 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
419 struct bnx2fc_rport *tgt) 421 struct bnx2fc_rport *tgt)
420{ 422{
421 struct bnx2fc_interface *interface = port->priv; 423 struct bnx2fc_interface *interface = port->priv;
424 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
422 struct bnx2fc_hba *hba = interface->hba; 425 struct bnx2fc_hba *hba = interface->hba;
423 struct fcoe_kwqe_conn_enable_disable disable_req; 426 struct fcoe_kwqe_conn_enable_disable disable_req;
424 struct kwqe *kwqe_arr[2]; 427 struct kwqe *kwqe_arr[2];
@@ -440,12 +443,12 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
440 disable_req.src_mac_addr_hi[0] = tgt->src_addr[1]; 443 disable_req.src_mac_addr_hi[0] = tgt->src_addr[1];
441 disable_req.src_mac_addr_hi[1] = tgt->src_addr[0]; 444 disable_req.src_mac_addr_hi[1] = tgt->src_addr[0];
442 445
443 disable_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5]; 446 disable_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
444 disable_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4]; 447 disable_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
445 disable_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3]; 448 disable_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
446 disable_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2]; 449 disable_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
447 disable_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1]; 450 disable_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
448 disable_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0]; 451 disable_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
449 452
450 port_id = tgt->sid; 453 port_id = tgt->sid;
451 disable_req.s_id[0] = (port_id & 0x000000FF); 454 disable_req.s_id[0] = (port_id & 0x000000FF);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index e897ce975bb8..4f7453b9e41e 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -810,8 +810,22 @@ retry_tmf:
810 spin_lock_bh(&tgt->tgt_lock); 810 spin_lock_bh(&tgt->tgt_lock);
811 811
812 io_req->wait_for_comp = 0; 812 io_req->wait_for_comp = 0;
813 if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) 813 if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) {
814 set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags); 814 set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags);
815 if (io_req->on_tmf_queue) {
816 list_del_init(&io_req->link);
817 io_req->on_tmf_queue = 0;
818 }
819 io_req->wait_for_comp = 1;
820 bnx2fc_initiate_cleanup(io_req);
821 spin_unlock_bh(&tgt->tgt_lock);
822 rc = wait_for_completion_timeout(&io_req->tm_done,
823 BNX2FC_FW_TIMEOUT);
824 spin_lock_bh(&tgt->tgt_lock);
825 io_req->wait_for_comp = 0;
826 if (!rc)
827 kref_put(&io_req->refcount, bnx2fc_cmd_release);
828 }
815 829
816 spin_unlock_bh(&tgt->tgt_lock); 830 spin_unlock_bh(&tgt->tgt_lock);
817 831
@@ -1089,6 +1103,48 @@ int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
1089 return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET); 1103 return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
1090} 1104}
1091 1105
1106int bnx2fc_expl_logo(struct fc_lport *lport, struct bnx2fc_cmd *io_req)
1107{
1108 struct bnx2fc_rport *tgt = io_req->tgt;
1109 struct fc_rport_priv *rdata = tgt->rdata;
1110 int logo_issued;
1111 int rc = SUCCESS;
1112 int wait_cnt = 0;
1113
1114 BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n",
1115 tgt->flags);
1116 logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO,
1117 &tgt->flags);
1118 io_req->wait_for_comp = 1;
1119 bnx2fc_initiate_cleanup(io_req);
1120
1121 spin_unlock_bh(&tgt->tgt_lock);
1122
1123 wait_for_completion(&io_req->tm_done);
1124
1125 io_req->wait_for_comp = 0;
1126 /*
1127 * release the reference taken in eh_abort to allow the
1128 * target to re-login after flushing IOs
1129 */
1130 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1131
1132 if (!logo_issued) {
1133 clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
1134 mutex_lock(&lport->disc.disc_mutex);
1135 lport->tt.rport_logoff(rdata);
1136 mutex_unlock(&lport->disc.disc_mutex);
1137 do {
1138 msleep(BNX2FC_RELOGIN_WAIT_TIME);
1139 if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT) {
1140 rc = FAILED;
1141 break;
1142 }
1143 } while (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags));
1144 }
1145 spin_lock_bh(&tgt->tgt_lock);
1146 return rc;
1147}
1092/** 1148/**
1093 * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding 1149 * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding
1094 * SCSI command 1150 * SCSI command
@@ -1103,10 +1159,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1103 struct fc_rport_libfc_priv *rp = rport->dd_data; 1159 struct fc_rport_libfc_priv *rp = rport->dd_data;
1104 struct bnx2fc_cmd *io_req; 1160 struct bnx2fc_cmd *io_req;
1105 struct fc_lport *lport; 1161 struct fc_lport *lport;
1106 struct fc_rport_priv *rdata;
1107 struct bnx2fc_rport *tgt; 1162 struct bnx2fc_rport *tgt;
1108 int logo_issued;
1109 int wait_cnt = 0;
1110 int rc = FAILED; 1163 int rc = FAILED;
1111 1164
1112 1165
@@ -1183,58 +1236,31 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1183 list_add_tail(&io_req->link, &tgt->io_retire_queue); 1236 list_add_tail(&io_req->link, &tgt->io_retire_queue);
1184 1237
1185 init_completion(&io_req->tm_done); 1238 init_completion(&io_req->tm_done);
1186 io_req->wait_for_comp = 1;
1187 1239
1188 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { 1240 if (test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
1189 /* Cancel the current timer running on this io_req */
1190 if (cancel_delayed_work(&io_req->timeout_work))
1191 kref_put(&io_req->refcount,
1192 bnx2fc_cmd_release); /* drop timer hold */
1193 set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
1194 rc = bnx2fc_initiate_abts(io_req);
1195 } else {
1196 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " 1241 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
1197 "already in abts processing\n", io_req->xid); 1242 "already in abts processing\n", io_req->xid);
1198 if (cancel_delayed_work(&io_req->timeout_work)) 1243 if (cancel_delayed_work(&io_req->timeout_work))
1199 kref_put(&io_req->refcount, 1244 kref_put(&io_req->refcount,
1200 bnx2fc_cmd_release); /* drop timer hold */ 1245 bnx2fc_cmd_release); /* drop timer hold */
1201 bnx2fc_initiate_cleanup(io_req); 1246 rc = bnx2fc_expl_logo(lport, io_req);
1247 goto out;
1248 }
1202 1249
1250 /* Cancel the current timer running on this io_req */
1251 if (cancel_delayed_work(&io_req->timeout_work))
1252 kref_put(&io_req->refcount,
1253 bnx2fc_cmd_release); /* drop timer hold */
1254 set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
1255 io_req->wait_for_comp = 1;
1256 rc = bnx2fc_initiate_abts(io_req);
1257 if (rc == FAILED) {
1258 bnx2fc_initiate_cleanup(io_req);
1203 spin_unlock_bh(&tgt->tgt_lock); 1259 spin_unlock_bh(&tgt->tgt_lock);
1204
1205 wait_for_completion(&io_req->tm_done); 1260 wait_for_completion(&io_req->tm_done);
1206
1207 spin_lock_bh(&tgt->tgt_lock); 1261 spin_lock_bh(&tgt->tgt_lock);
1208 io_req->wait_for_comp = 0; 1262 io_req->wait_for_comp = 0;
1209 rdata = io_req->tgt->rdata; 1263 goto done;
1210 logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO,
1211 &tgt->flags);
1212 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1213 spin_unlock_bh(&tgt->tgt_lock);
1214
1215 if (!logo_issued) {
1216 BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n",
1217 tgt->flags);
1218 mutex_lock(&lport->disc.disc_mutex);
1219 lport->tt.rport_logoff(rdata);
1220 mutex_unlock(&lport->disc.disc_mutex);
1221 do {
1222 msleep(BNX2FC_RELOGIN_WAIT_TIME);
1223 /*
1224 * If session not recovered, let SCSI-ml
1225 * escalate error recovery.
1226 */
1227 if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT)
1228 return FAILED;
1229 } while (!test_bit(BNX2FC_FLAG_SESSION_READY,
1230 &tgt->flags));
1231 }
1232 return SUCCESS;
1233 }
1234 if (rc == FAILED) {
1235 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1236 spin_unlock_bh(&tgt->tgt_lock);
1237 return rc;
1238 } 1264 }
1239 spin_unlock_bh(&tgt->tgt_lock); 1265 spin_unlock_bh(&tgt->tgt_lock);
1240 1266
@@ -1247,7 +1273,8 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1247 /* Let the scsi-ml try to recover this command */ 1273 /* Let the scsi-ml try to recover this command */
1248 printk(KERN_ERR PFX "abort failed, xid = 0x%x\n", 1274 printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
1249 io_req->xid); 1275 io_req->xid);
1250 rc = FAILED; 1276 rc = bnx2fc_expl_logo(lport, io_req);
1277 goto out;
1251 } else { 1278 } else {
1252 /* 1279 /*
1253 * We come here even when there was a race condition 1280 * We come here even when there was a race condition
@@ -1259,9 +1286,10 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1259 bnx2fc_scsi_done(io_req, DID_ABORT); 1286 bnx2fc_scsi_done(io_req, DID_ABORT);
1260 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1287 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1261 } 1288 }
1262 1289done:
1263 /* release the reference taken in eh_abort */ 1290 /* release the reference taken in eh_abort */
1264 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1291 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1292out:
1265 spin_unlock_bh(&tgt->tgt_lock); 1293 spin_unlock_bh(&tgt->tgt_lock);
1266 return rc; 1294 return rc;
1267} 1295}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index c1800b531270..082a25c3117e 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -185,6 +185,16 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
185 BUG_ON(rc); 185 BUG_ON(rc);
186 } 186 }
187 187
188 list_for_each_safe(list, tmp, &tgt->active_tm_queue) {
189 i++;
190 io_req = (struct bnx2fc_cmd *)list;
191 list_del_init(&io_req->link);
192 io_req->on_tmf_queue = 0;
193 BNX2FC_IO_DBG(io_req, "tm_queue cleanup\n");
194 if (io_req->wait_for_comp)
195 complete(&io_req->tm_done);
196 }
197
188 list_for_each_safe(list, tmp, &tgt->els_queue) { 198 list_for_each_safe(list, tmp, &tgt->els_queue) {
189 i++; 199 i++;
190 io_req = (struct bnx2fc_cmd *)list; 200 io_req = (struct bnx2fc_cmd *)list;
@@ -213,8 +223,17 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
213 223
214 BNX2FC_IO_DBG(io_req, "retire_queue flush\n"); 224 BNX2FC_IO_DBG(io_req, "retire_queue flush\n");
215 225
216 if (cancel_delayed_work(&io_req->timeout_work)) 226 if (cancel_delayed_work(&io_req->timeout_work)) {
227 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
228 &io_req->req_flags)) {
229 /* Handle eh_abort timeout */
230 BNX2FC_IO_DBG(io_req, "eh_abort for IO "
231 "in retire_q\n");
232 if (io_req->wait_for_comp)
233 complete(&io_req->tm_done);
234 }
217 kref_put(&io_req->refcount, bnx2fc_cmd_release); 235 kref_put(&io_req->refcount, bnx2fc_cmd_release);
236 }
218 237
219 clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags); 238 clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
220 } 239 }
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
index dc0a08e69c82..f2db5fe7bdc2 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
@@ -267,7 +267,13 @@ struct bnx2i_cmd_request {
267 * task statistics for write response 267 * task statistics for write response
268 */ 268 */
269struct bnx2i_write_resp_task_stat { 269struct bnx2i_write_resp_task_stat {
270 u32 num_data_ins; 270#if defined(__BIG_ENDIAN)
271 u16 num_r2ts;
272 u16 num_data_outs;
273#elif defined(__LITTLE_ENDIAN)
274 u16 num_data_outs;
275 u16 num_r2ts;
276#endif
271}; 277};
272 278
273/* 279/*
@@ -275,11 +281,11 @@ struct bnx2i_write_resp_task_stat {
275 */ 281 */
276struct bnx2i_read_resp_task_stat { 282struct bnx2i_read_resp_task_stat {
277#if defined(__BIG_ENDIAN) 283#if defined(__BIG_ENDIAN)
278 u16 num_data_outs; 284 u16 reserved;
279 u16 num_r2ts; 285 u16 num_data_ins;
280#elif defined(__LITTLE_ENDIAN) 286#elif defined(__LITTLE_ENDIAN)
281 u16 num_r2ts; 287 u16 num_data_ins;
282 u16 num_data_outs; 288 u16 reserved;
283#endif 289#endif
284}; 290};
285 291
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 0c53c28dc3d3..3f9e7061258e 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -44,6 +44,8 @@
44#include "57xx_iscsi_hsi.h" 44#include "57xx_iscsi_hsi.h"
45#include "57xx_iscsi_constants.h" 45#include "57xx_iscsi_constants.h"
46 46
47#include "../../net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h"
48
47#define BNX2_ISCSI_DRIVER_NAME "bnx2i" 49#define BNX2_ISCSI_DRIVER_NAME "bnx2i"
48 50
49#define BNX2I_MAX_ADAPTERS 8 51#define BNX2I_MAX_ADAPTERS 8
@@ -126,6 +128,43 @@
126#define REG_WR(__hba, offset, val) \ 128#define REG_WR(__hba, offset, val) \
127 writel(val, __hba->regview + offset) 129 writel(val, __hba->regview + offset)
128 130
131#ifdef CONFIG_32BIT
132#define GET_STATS_64(__hba, dst, field) \
133 do { \
134 spin_lock_bh(&__hba->stat_lock); \
135 dst->field##_lo = __hba->stats.field##_lo; \
136 dst->field##_hi = __hba->stats.field##_hi; \
137 spin_unlock_bh(&__hba->stat_lock); \
138 } while (0)
139
140#define ADD_STATS_64(__hba, field, len) \
141 do { \
142 if (spin_trylock(&__hba->stat_lock)) { \
143 if (__hba->stats.field##_lo + len < \
144 __hba->stats.field##_lo) \
145 __hba->stats.field##_hi++; \
146 __hba->stats.field##_lo += len; \
147 spin_unlock(&__hba->stat_lock); \
148 } \
149 } while (0)
150
151#else
152#define GET_STATS_64(__hba, dst, field) \
153 do { \
154 u64 val, *out; \
155 \
156 val = __hba->bnx2i_stats.field; \
157 out = (u64 *)&__hba->stats.field##_lo; \
158 *out = cpu_to_le64(val); \
159 out = (u64 *)&dst->field##_lo; \
160 *out = cpu_to_le64(val); \
161 } while (0)
162
163#define ADD_STATS_64(__hba, field, len) \
164 do { \
165 __hba->bnx2i_stats.field += len; \
166 } while (0)
167#endif
129 168
130/** 169/**
131 * struct generic_pdu_resc - login pdu resource structure 170 * struct generic_pdu_resc - login pdu resource structure
@@ -288,6 +327,15 @@ struct iscsi_cid_queue {
288 struct bnx2i_conn **conn_cid_tbl; 327 struct bnx2i_conn **conn_cid_tbl;
289}; 328};
290 329
330
331struct bnx2i_stats_info {
332 u64 rx_pdus;
333 u64 rx_bytes;
334 u64 tx_pdus;
335 u64 tx_bytes;
336};
337
338
291/** 339/**
292 * struct bnx2i_hba - bnx2i adapter structure 340 * struct bnx2i_hba - bnx2i adapter structure
293 * 341 *
@@ -341,6 +389,8 @@ struct iscsi_cid_queue {
341 * @ctx_ccell_tasks: captures number of ccells and tasks supported by 389 * @ctx_ccell_tasks: captures number of ccells and tasks supported by
342 * currently offloaded connection, used to decode 390 * currently offloaded connection, used to decode
343 * context memory 391 * context memory
392 * @stat_lock: spin lock used by the statistic collector (32 bit)
393 * @stats: local iSCSI statistic collection place holder
344 * 394 *
345 * Adapter Data Structure 395 * Adapter Data Structure
346 */ 396 */
@@ -350,6 +400,7 @@ struct bnx2i_hba {
350 struct pci_dev *pcidev; 400 struct pci_dev *pcidev;
351 struct net_device *netdev; 401 struct net_device *netdev;
352 void __iomem *regview; 402 void __iomem *regview;
403 resource_size_t reg_base;
353 404
354 u32 age; 405 u32 age;
355 unsigned long cnic_dev_type; 406 unsigned long cnic_dev_type;
@@ -426,6 +477,12 @@ struct bnx2i_hba {
426 u32 num_sess_opened; 477 u32 num_sess_opened;
427 u32 num_conn_opened; 478 u32 num_conn_opened;
428 unsigned int ctx_ccell_tasks; 479 unsigned int ctx_ccell_tasks;
480
481#ifdef CONFIG_32BIT
482 spinlock_t stat_lock;
483#endif
484 struct bnx2i_stats_info bnx2i_stats;
485 struct iscsi_stats_info stats;
429}; 486};
430 487
431 488
@@ -749,6 +806,8 @@ extern void bnx2i_ulp_init(struct cnic_dev *dev);
749extern void bnx2i_ulp_exit(struct cnic_dev *dev); 806extern void bnx2i_ulp_exit(struct cnic_dev *dev);
750extern void bnx2i_start(void *handle); 807extern void bnx2i_start(void *handle);
751extern void bnx2i_stop(void *handle); 808extern void bnx2i_stop(void *handle);
809extern int bnx2i_get_stats(void *handle);
810
752extern struct bnx2i_hba *get_adapter_list_head(void); 811extern struct bnx2i_hba *get_adapter_list_head(void);
753 812
754struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba, 813struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index ece47e502282..33d6630529de 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1350,6 +1350,7 @@ int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
1350 struct cqe *cqe) 1350 struct cqe *cqe)
1351{ 1351{
1352 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; 1352 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1353 struct bnx2i_hba *hba = bnx2i_conn->hba;
1353 struct bnx2i_cmd_response *resp_cqe; 1354 struct bnx2i_cmd_response *resp_cqe;
1354 struct bnx2i_cmd *bnx2i_cmd; 1355 struct bnx2i_cmd *bnx2i_cmd;
1355 struct iscsi_task *task; 1356 struct iscsi_task *task;
@@ -1367,16 +1368,26 @@ int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
1367 1368
1368 if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) { 1369 if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) {
1369 conn->datain_pdus_cnt += 1370 conn->datain_pdus_cnt +=
1370 resp_cqe->task_stat.read_stat.num_data_outs; 1371 resp_cqe->task_stat.read_stat.num_data_ins;
1371 conn->rxdata_octets += 1372 conn->rxdata_octets +=
1372 bnx2i_cmd->req.total_data_transfer_length; 1373 bnx2i_cmd->req.total_data_transfer_length;
1374 ADD_STATS_64(hba, rx_pdus,
1375 resp_cqe->task_stat.read_stat.num_data_ins);
1376 ADD_STATS_64(hba, rx_bytes,
1377 bnx2i_cmd->req.total_data_transfer_length);
1373 } else { 1378 } else {
1374 conn->dataout_pdus_cnt += 1379 conn->dataout_pdus_cnt +=
1375 resp_cqe->task_stat.read_stat.num_data_outs; 1380 resp_cqe->task_stat.write_stat.num_data_outs;
1376 conn->r2t_pdus_cnt += 1381 conn->r2t_pdus_cnt +=
1377 resp_cqe->task_stat.read_stat.num_r2ts; 1382 resp_cqe->task_stat.write_stat.num_r2ts;
1378 conn->txdata_octets += 1383 conn->txdata_octets +=
1379 bnx2i_cmd->req.total_data_transfer_length; 1384 bnx2i_cmd->req.total_data_transfer_length;
1385 ADD_STATS_64(hba, tx_pdus,
1386 resp_cqe->task_stat.write_stat.num_data_outs);
1387 ADD_STATS_64(hba, tx_bytes,
1388 bnx2i_cmd->req.total_data_transfer_length);
1389 ADD_STATS_64(hba, rx_pdus,
1390 resp_cqe->task_stat.write_stat.num_r2ts);
1380 } 1391 }
1381 bnx2i_iscsi_unmap_sg_list(bnx2i_cmd); 1392 bnx2i_iscsi_unmap_sg_list(bnx2i_cmd);
1382 1393
@@ -1961,6 +1972,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
1961{ 1972{
1962 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; 1973 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1963 struct iscsi_session *session = conn->session; 1974 struct iscsi_session *session = conn->session;
1975 struct bnx2i_hba *hba = bnx2i_conn->hba;
1964 struct qp_info *qp; 1976 struct qp_info *qp;
1965 struct bnx2i_nop_in_msg *nopin; 1977 struct bnx2i_nop_in_msg *nopin;
1966 int tgt_async_msg; 1978 int tgt_async_msg;
@@ -1973,7 +1985,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
1973 1985
1974 if (!qp->cq_virt) { 1986 if (!qp->cq_virt) {
1975 printk(KERN_ALERT "bnx2i (%s): cq resr freed in bh execution!", 1987 printk(KERN_ALERT "bnx2i (%s): cq resr freed in bh execution!",
1976 bnx2i_conn->hba->netdev->name); 1988 hba->netdev->name);
1977 goto out; 1989 goto out;
1978 } 1990 }
1979 while (1) { 1991 while (1) {
@@ -1985,9 +1997,9 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
1985 if (nopin->op_code == ISCSI_OP_NOOP_IN && 1997 if (nopin->op_code == ISCSI_OP_NOOP_IN &&
1986 nopin->itt == (u16) RESERVED_ITT) { 1998 nopin->itt == (u16) RESERVED_ITT) {
1987 printk(KERN_ALERT "bnx2i: Unsolicited " 1999 printk(KERN_ALERT "bnx2i: Unsolicited "
1988 "NOP-In detected for suspended " 2000 "NOP-In detected for suspended "
1989 "connection dev=%s!\n", 2001 "connection dev=%s!\n",
1990 bnx2i_conn->hba->netdev->name); 2002 hba->netdev->name);
1991 bnx2i_unsol_pdu_adjust_rq(bnx2i_conn); 2003 bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
1992 goto cqe_out; 2004 goto cqe_out;
1993 } 2005 }
@@ -2001,7 +2013,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
2001 /* Run the kthread engine only for data cmds 2013 /* Run the kthread engine only for data cmds
2002 All other cmds will be completed in this bh! */ 2014 All other cmds will be completed in this bh! */
2003 bnx2i_queue_scsi_cmd_resp(session, bnx2i_conn, nopin); 2015 bnx2i_queue_scsi_cmd_resp(session, bnx2i_conn, nopin);
2004 break; 2016 goto done;
2005 case ISCSI_OP_LOGIN_RSP: 2017 case ISCSI_OP_LOGIN_RSP:
2006 bnx2i_process_login_resp(session, bnx2i_conn, 2018 bnx2i_process_login_resp(session, bnx2i_conn,
2007 qp->cq_cons_qe); 2019 qp->cq_cons_qe);
@@ -2044,11 +2056,15 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
2044 printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n", 2056 printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
2045 nopin->op_code); 2057 nopin->op_code);
2046 } 2058 }
2059
2060 ADD_STATS_64(hba, rx_pdus, 1);
2061 ADD_STATS_64(hba, rx_bytes, nopin->data_length);
2062done:
2047 if (!tgt_async_msg) { 2063 if (!tgt_async_msg) {
2048 if (!atomic_read(&bnx2i_conn->ep->num_active_cmds)) 2064 if (!atomic_read(&bnx2i_conn->ep->num_active_cmds))
2049 printk(KERN_ALERT "bnx2i (%s): no active cmd! " 2065 printk(KERN_ALERT "bnx2i (%s): no active cmd! "
2050 "op 0x%x\n", 2066 "op 0x%x\n",
2051 bnx2i_conn->hba->netdev->name, 2067 hba->netdev->name,
2052 nopin->op_code); 2068 nopin->op_code);
2053 else 2069 else
2054 atomic_dec(&bnx2i_conn->ep->num_active_cmds); 2070 atomic_dec(&bnx2i_conn->ep->num_active_cmds);
@@ -2692,6 +2708,7 @@ struct cnic_ulp_ops bnx2i_cnic_cb = {
2692 .cm_remote_close = bnx2i_cm_remote_close, 2708 .cm_remote_close = bnx2i_cm_remote_close,
2693 .cm_remote_abort = bnx2i_cm_remote_abort, 2709 .cm_remote_abort = bnx2i_cm_remote_abort,
2694 .iscsi_nl_send_msg = bnx2i_send_nl_mesg, 2710 .iscsi_nl_send_msg = bnx2i_send_nl_mesg,
2711 .cnic_get_stats = bnx2i_get_stats,
2695 .owner = THIS_MODULE 2712 .owner = THIS_MODULE
2696}; 2713};
2697 2714
@@ -2724,7 +2741,6 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
2724 goto arm_cq; 2741 goto arm_cq;
2725 } 2742 }
2726 2743
2727 reg_base = ep->hba->netdev->base_addr;
2728 if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) && 2744 if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) &&
2729 (ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) { 2745 (ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) {
2730 config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2); 2746 config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2);
@@ -2740,7 +2756,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
2740 /* 5709 device in normal node and 5706/5708 devices */ 2756 /* 5709 device in normal node and 5706/5708 devices */
2741 reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num); 2757 reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
2742 2758
2743 ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 2759 ep->qp.ctx_base = ioremap_nocache(ep->hba->reg_base + reg_off,
2744 MB_KERNEL_CTX_SIZE); 2760 MB_KERNEL_CTX_SIZE);
2745 if (!ep->qp.ctx_base) 2761 if (!ep->qp.ctx_base)
2746 return -ENOMEM; 2762 return -ENOMEM;
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 8b6816706ee5..b17637aab9a7 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -381,6 +381,46 @@ void bnx2i_ulp_exit(struct cnic_dev *dev)
381 381
382 382
383/** 383/**
384 * bnx2i_get_stats - Retrieve various statistic from iSCSI offload
385 * @handle: bnx2i_hba
386 *
387 * function callback exported via bnx2i - cnic driver interface to
388 * retrieve various iSCSI offload related statistics.
389 */
390int bnx2i_get_stats(void *handle)
391{
392 struct bnx2i_hba *hba = handle;
393 struct iscsi_stats_info *stats;
394
395 if (!hba)
396 return -EINVAL;
397
398 stats = (struct iscsi_stats_info *)hba->cnic->stats_addr;
399
400 if (!stats)
401 return -ENOMEM;
402
403 strlcpy(stats->version, DRV_MODULE_VERSION, sizeof(stats->version));
404 memcpy(stats->mac_add1 + 2, hba->cnic->mac_addr, ETH_ALEN);
405
406 stats->max_frame_size = hba->netdev->mtu;
407 stats->txq_size = hba->max_sqes;
408 stats->rxq_size = hba->max_cqes;
409
410 stats->txq_avg_depth = 0;
411 stats->rxq_avg_depth = 0;
412
413 GET_STATS_64(hba, stats, rx_pdus);
414 GET_STATS_64(hba, stats, rx_bytes);
415
416 GET_STATS_64(hba, stats, tx_pdus);
417 GET_STATS_64(hba, stats, tx_bytes);
418
419 return 0;
420}
421
422
423/**
384 * bnx2i_percpu_thread_create - Create a receive thread for an 424 * bnx2i_percpu_thread_create - Create a receive thread for an
385 * online CPU 425 * online CPU
386 * 426 *
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index f8d516b53161..3b34c13e2f02 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -811,13 +811,13 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
811 bnx2i_identify_device(hba); 811 bnx2i_identify_device(hba);
812 bnx2i_setup_host_queue_size(hba, shost); 812 bnx2i_setup_host_queue_size(hba, shost);
813 813
814 hba->reg_base = pci_resource_start(hba->pcidev, 0);
814 if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) { 815 if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
815 hba->regview = ioremap_nocache(hba->netdev->base_addr, 816 hba->regview = pci_iomap(hba->pcidev, 0, BNX2_MQ_CONFIG2);
816 BNX2_MQ_CONFIG2);
817 if (!hba->regview) 817 if (!hba->regview)
818 goto ioreg_map_err; 818 goto ioreg_map_err;
819 } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { 819 } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
820 hba->regview = ioremap_nocache(hba->netdev->base_addr, 4096); 820 hba->regview = pci_iomap(hba->pcidev, 0, 4096);
821 if (!hba->regview) 821 if (!hba->regview)
822 goto ioreg_map_err; 822 goto ioreg_map_err;
823 } 823 }
@@ -874,6 +874,11 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
874 hba->conn_ctx_destroy_tmo = 2 * HZ; 874 hba->conn_ctx_destroy_tmo = 2 * HZ;
875 } 875 }
876 876
877#ifdef CONFIG_32BIT
878 spin_lock_init(&hba->stat_lock);
879#endif
880 memset(&hba->stats, 0, sizeof(struct iscsi_stats_info));
881
877 if (iscsi_host_add(shost, &hba->pcidev->dev)) 882 if (iscsi_host_add(shost, &hba->pcidev->dev))
878 goto free_dump_mem; 883 goto free_dump_mem;
879 return hba; 884 return hba;
@@ -884,7 +889,7 @@ cid_que_err:
884 bnx2i_free_mp_bdt(hba); 889 bnx2i_free_mp_bdt(hba);
885mp_bdt_mem_err: 890mp_bdt_mem_err:
886 if (hba->regview) { 891 if (hba->regview) {
887 iounmap(hba->regview); 892 pci_iounmap(hba->pcidev, hba->regview);
888 hba->regview = NULL; 893 hba->regview = NULL;
889 } 894 }
890ioreg_map_err: 895ioreg_map_err:
@@ -910,7 +915,7 @@ void bnx2i_free_hba(struct bnx2i_hba *hba)
910 pci_dev_put(hba->pcidev); 915 pci_dev_put(hba->pcidev);
911 916
912 if (hba->regview) { 917 if (hba->regview) {
913 iounmap(hba->regview); 918 pci_iounmap(hba->pcidev, hba->regview);
914 hba->regview = NULL; 919 hba->regview = NULL;
915 } 920 }
916 bnx2i_free_mp_bdt(hba); 921 bnx2i_free_mp_bdt(hba);
@@ -1181,12 +1186,18 @@ static int
1181bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) 1186bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
1182{ 1187{
1183 struct bnx2i_conn *bnx2i_conn = conn->dd_data; 1188 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1189 struct bnx2i_hba *hba = bnx2i_conn->hba;
1184 struct bnx2i_cmd *cmd = task->dd_data; 1190 struct bnx2i_cmd *cmd = task->dd_data;
1185 1191
1186 memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN); 1192 memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
1187 1193
1188 bnx2i_setup_cmd_wqe_template(cmd); 1194 bnx2i_setup_cmd_wqe_template(cmd);
1189 bnx2i_conn->gen_pdu.req_buf_size = task->data_count; 1195 bnx2i_conn->gen_pdu.req_buf_size = task->data_count;
1196
1197 /* Tx PDU/data length count */
1198 ADD_STATS_64(hba, tx_pdus, 1);
1199 ADD_STATS_64(hba, tx_bytes, task->data_count);
1200
1190 if (task->data_count) { 1201 if (task->data_count) {
1191 memcpy(bnx2i_conn->gen_pdu.req_buf, task->data, 1202 memcpy(bnx2i_conn->gen_pdu.req_buf, task->data,
1192 task->data_count); 1203 task->data_count);
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 36739da8bc15..49692a1ac44a 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -966,7 +966,8 @@ static int init_act_open(struct cxgbi_sock *csk)
966 csk->saddr.sin_addr.s_addr = chba->ipv4addr; 966 csk->saddr.sin_addr.s_addr = chba->ipv4addr;
967 967
968 csk->rss_qid = 0; 968 csk->rss_qid = 0;
969 csk->l2t = t3_l2t_get(t3dev, dst, ndev); 969 csk->l2t = t3_l2t_get(t3dev, dst, ndev,
970 &csk->daddr.sin_addr.s_addr);
970 if (!csk->l2t) { 971 if (!csk->l2t) {
971 pr_err("NO l2t available.\n"); 972 pr_err("NO l2t available.\n");
972 return -EINVAL; 973 return -EINVAL;
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 5a4a3bfc60cf..cc9a06897f34 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1142,7 +1142,7 @@ static int init_act_open(struct cxgbi_sock *csk)
1142 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); 1142 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
1143 cxgbi_sock_get(csk); 1143 cxgbi_sock_get(csk);
1144 1144
1145 n = dst_get_neighbour_noref(csk->dst); 1145 n = dst_neigh_lookup(csk->dst, &csk->daddr.sin_addr.s_addr);
1146 if (!n) { 1146 if (!n) {
1147 pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name); 1147 pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
1148 goto rel_resource; 1148 goto rel_resource;
@@ -1182,9 +1182,12 @@ static int init_act_open(struct cxgbi_sock *csk)
1182 1182
1183 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); 1183 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
1184 send_act_open_req(csk, skb, csk->l2t); 1184 send_act_open_req(csk, skb, csk->l2t);
1185 neigh_release(n);
1185 return 0; 1186 return 0;
1186 1187
1187rel_resource: 1188rel_resource:
1189 if (n)
1190 neigh_release(n);
1188 if (skb) 1191 if (skb)
1189 __kfree_skb(skb); 1192 __kfree_skb(skb);
1190 return -EINVAL; 1193 return -EINVAL;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index d9253db1d0e2..b44c1cff3114 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -494,7 +494,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
494 goto err_out; 494 goto err_out;
495 } 495 }
496 dst = &rt->dst; 496 dst = &rt->dst;
497 n = dst_get_neighbour_noref(dst); 497 n = dst_neigh_lookup(dst, &daddr->sin_addr.s_addr);
498 if (!n) { 498 if (!n) {
499 err = -ENODEV; 499 err = -ENODEV;
500 goto rel_rt; 500 goto rel_rt;
@@ -506,7 +506,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
506 &daddr->sin_addr.s_addr, ntohs(daddr->sin_port), 506 &daddr->sin_addr.s_addr, ntohs(daddr->sin_port),
507 ndev->name); 507 ndev->name);
508 err = -ENETUNREACH; 508 err = -ENETUNREACH;
509 goto rel_rt; 509 goto rel_neigh;
510 } 510 }
511 511
512 if (ndev->flags & IFF_LOOPBACK) { 512 if (ndev->flags & IFF_LOOPBACK) {
@@ -521,7 +521,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
521 pr_info("dst %pI4, %s, NOT cxgbi device.\n", 521 pr_info("dst %pI4, %s, NOT cxgbi device.\n",
522 &daddr->sin_addr.s_addr, ndev->name); 522 &daddr->sin_addr.s_addr, ndev->name);
523 err = -ENETUNREACH; 523 err = -ENETUNREACH;
524 goto rel_rt; 524 goto rel_neigh;
525 } 525 }
526 log_debug(1 << CXGBI_DBG_SOCK, 526 log_debug(1 << CXGBI_DBG_SOCK,
527 "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n", 527 "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n",
@@ -531,7 +531,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
531 csk = cxgbi_sock_create(cdev); 531 csk = cxgbi_sock_create(cdev);
532 if (!csk) { 532 if (!csk) {
533 err = -ENOMEM; 533 err = -ENOMEM;
534 goto rel_rt; 534 goto rel_neigh;
535 } 535 }
536 csk->cdev = cdev; 536 csk->cdev = cdev;
537 csk->port_id = port; 537 csk->port_id = port;
@@ -541,9 +541,13 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
541 csk->daddr.sin_port = daddr->sin_port; 541 csk->daddr.sin_port = daddr->sin_port;
542 csk->daddr.sin_family = daddr->sin_family; 542 csk->daddr.sin_family = daddr->sin_family;
543 csk->saddr.sin_addr.s_addr = fl4.saddr; 543 csk->saddr.sin_addr.s_addr = fl4.saddr;
544 neigh_release(n);
544 545
545 return csk; 546 return csk;
546 547
548rel_neigh:
549 neigh_release(n);
550
547rel_rt: 551rel_rt:
548 ip_rt_put(rt); 552 ip_rt_put(rt);
549 if (csk) 553 if (csk)
diff --git a/drivers/scsi/fcoe/Makefile b/drivers/scsi/fcoe/Makefile
index f6d37d0271f7..aed0f5db3668 100644
--- a/drivers/scsi/fcoe/Makefile
+++ b/drivers/scsi/fcoe/Makefile
@@ -1,4 +1,4 @@
1obj-$(CONFIG_FCOE) += fcoe.o 1obj-$(CONFIG_FCOE) += fcoe.o
2obj-$(CONFIG_LIBFCOE) += libfcoe.o 2obj-$(CONFIG_LIBFCOE) += libfcoe.o
3 3
4libfcoe-objs := fcoe_ctlr.o fcoe_transport.o 4libfcoe-objs := fcoe_ctlr.o fcoe_transport.o fcoe_sysfs.o
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 76e3d0b5bfa6..fe30b1b65e1d 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -41,6 +41,7 @@
41 41
42#include <scsi/fc/fc_encaps.h> 42#include <scsi/fc/fc_encaps.h>
43#include <scsi/fc/fc_fip.h> 43#include <scsi/fc/fc_fip.h>
44#include <scsi/fc/fc_fcoe.h>
44 45
45#include <scsi/libfc.h> 46#include <scsi/libfc.h>
46#include <scsi/fc_frame.h> 47#include <scsi/fc_frame.h>
@@ -150,6 +151,21 @@ static int fcoe_vport_create(struct fc_vport *, bool disabled);
150static int fcoe_vport_disable(struct fc_vport *, bool disable); 151static int fcoe_vport_disable(struct fc_vport *, bool disable);
151static void fcoe_set_vport_symbolic_name(struct fc_vport *); 152static void fcoe_set_vport_symbolic_name(struct fc_vport *);
152static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *); 153static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
154static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *);
155static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *);
156
157static struct fcoe_sysfs_function_template fcoe_sysfs_templ = {
158 .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
159 .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb,
160 .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb,
161 .get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb,
162 .get_fcoe_ctlr_symb_err = fcoe_ctlr_get_lesb,
163 .get_fcoe_ctlr_err_block = fcoe_ctlr_get_lesb,
164 .get_fcoe_ctlr_fcs_error = fcoe_ctlr_get_lesb,
165
166 .get_fcoe_fcf_selected = fcoe_fcf_get_selected,
167 .get_fcoe_fcf_vlan_id = fcoe_fcf_get_vlan_id,
168};
153 169
154static struct libfc_function_template fcoe_libfc_fcn_templ = { 170static struct libfc_function_template fcoe_libfc_fcn_templ = {
155 .frame_send = fcoe_xmit, 171 .frame_send = fcoe_xmit,
@@ -282,7 +298,7 @@ static struct scsi_host_template fcoe_shost_template = {
282static int fcoe_interface_setup(struct fcoe_interface *fcoe, 298static int fcoe_interface_setup(struct fcoe_interface *fcoe,
283 struct net_device *netdev) 299 struct net_device *netdev)
284{ 300{
285 struct fcoe_ctlr *fip = &fcoe->ctlr; 301 struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
286 struct netdev_hw_addr *ha; 302 struct netdev_hw_addr *ha;
287 struct net_device *real_dev; 303 struct net_device *real_dev;
288 u8 flogi_maddr[ETH_ALEN]; 304 u8 flogi_maddr[ETH_ALEN];
@@ -366,7 +382,10 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
366static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev, 382static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
367 enum fip_state fip_mode) 383 enum fip_state fip_mode)
368{ 384{
385 struct fcoe_ctlr_device *ctlr_dev;
386 struct fcoe_ctlr *ctlr;
369 struct fcoe_interface *fcoe; 387 struct fcoe_interface *fcoe;
388 int size;
370 int err; 389 int err;
371 390
372 if (!try_module_get(THIS_MODULE)) { 391 if (!try_module_get(THIS_MODULE)) {
@@ -376,27 +395,32 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
376 goto out; 395 goto out;
377 } 396 }
378 397
379 fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL); 398 size = sizeof(struct fcoe_ctlr) + sizeof(struct fcoe_interface);
380 if (!fcoe) { 399 ctlr_dev = fcoe_ctlr_device_add(&netdev->dev, &fcoe_sysfs_templ,
381 FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n"); 400 size);
401 if (!ctlr_dev) {
402 FCOE_DBG("Failed to add fcoe_ctlr_device\n");
382 fcoe = ERR_PTR(-ENOMEM); 403 fcoe = ERR_PTR(-ENOMEM);
383 goto out_putmod; 404 goto out_putmod;
384 } 405 }
385 406
407 ctlr = fcoe_ctlr_device_priv(ctlr_dev);
408 fcoe = fcoe_ctlr_priv(ctlr);
409
386 dev_hold(netdev); 410 dev_hold(netdev);
387 411
388 /* 412 /*
389 * Initialize FIP. 413 * Initialize FIP.
390 */ 414 */
391 fcoe_ctlr_init(&fcoe->ctlr, fip_mode); 415 fcoe_ctlr_init(ctlr, fip_mode);
392 fcoe->ctlr.send = fcoe_fip_send; 416 ctlr->send = fcoe_fip_send;
393 fcoe->ctlr.update_mac = fcoe_update_src_mac; 417 ctlr->update_mac = fcoe_update_src_mac;
394 fcoe->ctlr.get_src_addr = fcoe_get_src_mac; 418 ctlr->get_src_addr = fcoe_get_src_mac;
395 419
396 err = fcoe_interface_setup(fcoe, netdev); 420 err = fcoe_interface_setup(fcoe, netdev);
397 if (err) { 421 if (err) {
398 fcoe_ctlr_destroy(&fcoe->ctlr); 422 fcoe_ctlr_destroy(ctlr);
399 kfree(fcoe); 423 fcoe_ctlr_device_delete(ctlr_dev);
400 dev_put(netdev); 424 dev_put(netdev);
401 fcoe = ERR_PTR(err); 425 fcoe = ERR_PTR(err);
402 goto out_putmod; 426 goto out_putmod;
@@ -419,7 +443,7 @@ out:
419static void fcoe_interface_remove(struct fcoe_interface *fcoe) 443static void fcoe_interface_remove(struct fcoe_interface *fcoe)
420{ 444{
421 struct net_device *netdev = fcoe->netdev; 445 struct net_device *netdev = fcoe->netdev;
422 struct fcoe_ctlr *fip = &fcoe->ctlr; 446 struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
423 u8 flogi_maddr[ETH_ALEN]; 447 u8 flogi_maddr[ETH_ALEN];
424 const struct net_device_ops *ops; 448 const struct net_device_ops *ops;
425 449
@@ -462,7 +486,8 @@ static void fcoe_interface_remove(struct fcoe_interface *fcoe)
462static void fcoe_interface_cleanup(struct fcoe_interface *fcoe) 486static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
463{ 487{
464 struct net_device *netdev = fcoe->netdev; 488 struct net_device *netdev = fcoe->netdev;
465 struct fcoe_ctlr *fip = &fcoe->ctlr; 489 struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
490 struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
466 491
467 rtnl_lock(); 492 rtnl_lock();
468 if (!fcoe->removed) 493 if (!fcoe->removed)
@@ -472,8 +497,8 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
472 /* Release the self-reference taken during fcoe_interface_create() */ 497 /* Release the self-reference taken during fcoe_interface_create() */
473 /* tear-down the FCoE controller */ 498 /* tear-down the FCoE controller */
474 fcoe_ctlr_destroy(fip); 499 fcoe_ctlr_destroy(fip);
475 scsi_host_put(fcoe->ctlr.lp->host); 500 scsi_host_put(fip->lp->host);
476 kfree(fcoe); 501 fcoe_ctlr_device_delete(ctlr_dev);
477 dev_put(netdev); 502 dev_put(netdev);
478 module_put(THIS_MODULE); 503 module_put(THIS_MODULE);
479} 504}
@@ -493,9 +518,11 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
493 struct net_device *orig_dev) 518 struct net_device *orig_dev)
494{ 519{
495 struct fcoe_interface *fcoe; 520 struct fcoe_interface *fcoe;
521 struct fcoe_ctlr *ctlr;
496 522
497 fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type); 523 fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type);
498 fcoe_ctlr_recv(&fcoe->ctlr, skb); 524 ctlr = fcoe_to_ctlr(fcoe);
525 fcoe_ctlr_recv(ctlr, skb);
499 return 0; 526 return 0;
500} 527}
501 528
@@ -645,11 +672,13 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
645 u32 mfs; 672 u32 mfs;
646 u64 wwnn, wwpn; 673 u64 wwnn, wwpn;
647 struct fcoe_interface *fcoe; 674 struct fcoe_interface *fcoe;
675 struct fcoe_ctlr *ctlr;
648 struct fcoe_port *port; 676 struct fcoe_port *port;
649 677
650 /* Setup lport private data to point to fcoe softc */ 678 /* Setup lport private data to point to fcoe softc */
651 port = lport_priv(lport); 679 port = lport_priv(lport);
652 fcoe = port->priv; 680 fcoe = port->priv;
681 ctlr = fcoe_to_ctlr(fcoe);
653 682
654 /* 683 /*
655 * Determine max frame size based on underlying device and optional 684 * Determine max frame size based on underlying device and optional
@@ -676,10 +705,10 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
676 705
677 if (!lport->vport) { 706 if (!lport->vport) {
678 if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN)) 707 if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
679 wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0); 708 wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr, 1, 0);
680 fc_set_wwnn(lport, wwnn); 709 fc_set_wwnn(lport, wwnn);
681 if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN)) 710 if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
682 wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 711 wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
683 2, 0); 712 2, 0);
684 fc_set_wwpn(lport, wwpn); 713 fc_set_wwpn(lport, wwpn);
685 } 714 }
@@ -1056,6 +1085,7 @@ static int fcoe_ddp_done(struct fc_lport *lport, u16 xid)
1056static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, 1085static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
1057 struct device *parent, int npiv) 1086 struct device *parent, int npiv)
1058{ 1087{
1088 struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
1059 struct net_device *netdev = fcoe->netdev; 1089 struct net_device *netdev = fcoe->netdev;
1060 struct fc_lport *lport, *n_port; 1090 struct fc_lport *lport, *n_port;
1061 struct fcoe_port *port; 1091 struct fcoe_port *port;
@@ -1119,7 +1149,7 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
1119 } 1149 }
1120 1150
1121 /* Initialize the library */ 1151 /* Initialize the library */
1122 rc = fcoe_libfc_config(lport, &fcoe->ctlr, &fcoe_libfc_fcn_templ, 1); 1152 rc = fcoe_libfc_config(lport, ctlr, &fcoe_libfc_fcn_templ, 1);
1123 if (rc) { 1153 if (rc) {
1124 FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the " 1154 FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the "
1125 "interface\n"); 1155 "interface\n");
@@ -1386,6 +1416,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1386{ 1416{
1387 struct fc_lport *lport; 1417 struct fc_lport *lport;
1388 struct fcoe_rcv_info *fr; 1418 struct fcoe_rcv_info *fr;
1419 struct fcoe_ctlr *ctlr;
1389 struct fcoe_interface *fcoe; 1420 struct fcoe_interface *fcoe;
1390 struct fc_frame_header *fh; 1421 struct fc_frame_header *fh;
1391 struct fcoe_percpu_s *fps; 1422 struct fcoe_percpu_s *fps;
@@ -1393,7 +1424,8 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1393 unsigned int cpu; 1424 unsigned int cpu;
1394 1425
1395 fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type); 1426 fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type);
1396 lport = fcoe->ctlr.lp; 1427 ctlr = fcoe_to_ctlr(fcoe);
1428 lport = ctlr->lp;
1397 if (unlikely(!lport)) { 1429 if (unlikely(!lport)) {
1398 FCOE_NETDEV_DBG(netdev, "Cannot find hba structure"); 1430 FCOE_NETDEV_DBG(netdev, "Cannot find hba structure");
1399 goto err2; 1431 goto err2;
@@ -1409,8 +1441,8 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1409 1441
1410 eh = eth_hdr(skb); 1442 eh = eth_hdr(skb);
1411 1443
1412 if (is_fip_mode(&fcoe->ctlr) && 1444 if (is_fip_mode(ctlr) &&
1413 compare_ether_addr(eh->h_source, fcoe->ctlr.dest_addr)) { 1445 compare_ether_addr(eh->h_source, ctlr->dest_addr)) {
1414 FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n", 1446 FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n",
1415 eh->h_source); 1447 eh->h_source);
1416 goto err; 1448 goto err;
@@ -1544,6 +1576,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1544 unsigned int elen; /* eth header, may include vlan */ 1576 unsigned int elen; /* eth header, may include vlan */
1545 struct fcoe_port *port = lport_priv(lport); 1577 struct fcoe_port *port = lport_priv(lport);
1546 struct fcoe_interface *fcoe = port->priv; 1578 struct fcoe_interface *fcoe = port->priv;
1579 struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
1547 u8 sof, eof; 1580 u8 sof, eof;
1548 struct fcoe_hdr *hp; 1581 struct fcoe_hdr *hp;
1549 1582
@@ -1559,7 +1592,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1559 } 1592 }
1560 1593
1561 if (unlikely(fh->fh_type == FC_TYPE_ELS) && 1594 if (unlikely(fh->fh_type == FC_TYPE_ELS) &&
1562 fcoe_ctlr_els_send(&fcoe->ctlr, lport, skb)) 1595 fcoe_ctlr_els_send(ctlr, lport, skb))
1563 return 0; 1596 return 0;
1564 1597
1565 sof = fr_sof(fp); 1598 sof = fr_sof(fp);
@@ -1623,12 +1656,12 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1623 /* fill up mac and fcoe headers */ 1656 /* fill up mac and fcoe headers */
1624 eh = eth_hdr(skb); 1657 eh = eth_hdr(skb);
1625 eh->h_proto = htons(ETH_P_FCOE); 1658 eh->h_proto = htons(ETH_P_FCOE);
1626 memcpy(eh->h_dest, fcoe->ctlr.dest_addr, ETH_ALEN); 1659 memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN);
1627 if (fcoe->ctlr.map_dest) 1660 if (ctlr->map_dest)
1628 memcpy(eh->h_dest + 3, fh->fh_d_id, 3); 1661 memcpy(eh->h_dest + 3, fh->fh_d_id, 3);
1629 1662
1630 if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN)) 1663 if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN))
1631 memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN); 1664 memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN);
1632 else 1665 else
1633 memcpy(eh->h_source, port->data_src_addr, ETH_ALEN); 1666 memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
1634 1667
@@ -1677,6 +1710,7 @@ static void fcoe_percpu_flush_done(struct sk_buff *skb)
1677static inline int fcoe_filter_frames(struct fc_lport *lport, 1710static inline int fcoe_filter_frames(struct fc_lport *lport,
1678 struct fc_frame *fp) 1711 struct fc_frame *fp)
1679{ 1712{
1713 struct fcoe_ctlr *ctlr;
1680 struct fcoe_interface *fcoe; 1714 struct fcoe_interface *fcoe;
1681 struct fc_frame_header *fh; 1715 struct fc_frame_header *fh;
1682 struct sk_buff *skb = (struct sk_buff *)fp; 1716 struct sk_buff *skb = (struct sk_buff *)fp;
@@ -1698,7 +1732,8 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
1698 return 0; 1732 return 0;
1699 1733
1700 fcoe = ((struct fcoe_port *)lport_priv(lport))->priv; 1734 fcoe = ((struct fcoe_port *)lport_priv(lport))->priv;
1701 if (is_fip_mode(&fcoe->ctlr) && fc_frame_payload_op(fp) == ELS_LOGO && 1735 ctlr = fcoe_to_ctlr(fcoe);
1736 if (is_fip_mode(ctlr) && fc_frame_payload_op(fp) == ELS_LOGO &&
1702 ntoh24(fh->fh_s_id) == FC_FID_FLOGI) { 1737 ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
1703 FCOE_DBG("fcoe: dropping FCoE lport LOGO in fip mode\n"); 1738 FCOE_DBG("fcoe: dropping FCoE lport LOGO in fip mode\n");
1704 return -EINVAL; 1739 return -EINVAL;
@@ -1877,6 +1912,7 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
1877 ulong event, void *ptr) 1912 ulong event, void *ptr)
1878{ 1913{
1879 struct dcb_app_type *entry = ptr; 1914 struct dcb_app_type *entry = ptr;
1915 struct fcoe_ctlr *ctlr;
1880 struct fcoe_interface *fcoe; 1916 struct fcoe_interface *fcoe;
1881 struct net_device *netdev; 1917 struct net_device *netdev;
1882 struct fcoe_port *port; 1918 struct fcoe_port *port;
@@ -1894,6 +1930,8 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
1894 if (!fcoe) 1930 if (!fcoe)
1895 return NOTIFY_OK; 1931 return NOTIFY_OK;
1896 1932
1933 ctlr = fcoe_to_ctlr(fcoe);
1934
1897 if (entry->dcbx & DCB_CAP_DCBX_VER_CEE) 1935 if (entry->dcbx & DCB_CAP_DCBX_VER_CEE)
1898 prio = ffs(entry->app.priority) - 1; 1936 prio = ffs(entry->app.priority) - 1;
1899 else 1937 else
@@ -1904,10 +1942,10 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
1904 1942
1905 if (entry->app.protocol == ETH_P_FIP || 1943 if (entry->app.protocol == ETH_P_FIP ||
1906 entry->app.protocol == ETH_P_FCOE) 1944 entry->app.protocol == ETH_P_FCOE)
1907 fcoe->ctlr.priority = prio; 1945 ctlr->priority = prio;
1908 1946
1909 if (entry->app.protocol == ETH_P_FCOE) { 1947 if (entry->app.protocol == ETH_P_FCOE) {
1910 port = lport_priv(fcoe->ctlr.lp); 1948 port = lport_priv(ctlr->lp);
1911 port->priority = prio; 1949 port->priority = prio;
1912 } 1950 }
1913 1951
@@ -1929,6 +1967,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
1929{ 1967{
1930 struct fc_lport *lport = NULL; 1968 struct fc_lport *lport = NULL;
1931 struct net_device *netdev = ptr; 1969 struct net_device *netdev = ptr;
1970 struct fcoe_ctlr *ctlr;
1932 struct fcoe_interface *fcoe; 1971 struct fcoe_interface *fcoe;
1933 struct fcoe_port *port; 1972 struct fcoe_port *port;
1934 struct fcoe_dev_stats *stats; 1973 struct fcoe_dev_stats *stats;
@@ -1938,7 +1977,8 @@ static int fcoe_device_notification(struct notifier_block *notifier,
1938 1977
1939 list_for_each_entry(fcoe, &fcoe_hostlist, list) { 1978 list_for_each_entry(fcoe, &fcoe_hostlist, list) {
1940 if (fcoe->netdev == netdev) { 1979 if (fcoe->netdev == netdev) {
1941 lport = fcoe->ctlr.lp; 1980 ctlr = fcoe_to_ctlr(fcoe);
1981 lport = ctlr->lp;
1942 break; 1982 break;
1943 } 1983 }
1944 } 1984 }
@@ -1967,7 +2007,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
1967 break; 2007 break;
1968 case NETDEV_UNREGISTER: 2008 case NETDEV_UNREGISTER:
1969 list_del(&fcoe->list); 2009 list_del(&fcoe->list);
1970 port = lport_priv(fcoe->ctlr.lp); 2010 port = lport_priv(ctlr->lp);
1971 queue_work(fcoe_wq, &port->destroy_work); 2011 queue_work(fcoe_wq, &port->destroy_work);
1972 goto out; 2012 goto out;
1973 break; 2013 break;
@@ -1982,8 +2022,8 @@ static int fcoe_device_notification(struct notifier_block *notifier,
1982 fcoe_link_speed_update(lport); 2022 fcoe_link_speed_update(lport);
1983 2023
1984 if (link_possible && !fcoe_link_ok(lport)) 2024 if (link_possible && !fcoe_link_ok(lport))
1985 fcoe_ctlr_link_up(&fcoe->ctlr); 2025 fcoe_ctlr_link_up(ctlr);
1986 else if (fcoe_ctlr_link_down(&fcoe->ctlr)) { 2026 else if (fcoe_ctlr_link_down(ctlr)) {
1987 stats = per_cpu_ptr(lport->dev_stats, get_cpu()); 2027 stats = per_cpu_ptr(lport->dev_stats, get_cpu());
1988 stats->LinkFailureCount++; 2028 stats->LinkFailureCount++;
1989 put_cpu(); 2029 put_cpu();
@@ -2003,6 +2043,7 @@ out:
2003 */ 2043 */
2004static int fcoe_disable(struct net_device *netdev) 2044static int fcoe_disable(struct net_device *netdev)
2005{ 2045{
2046 struct fcoe_ctlr *ctlr;
2006 struct fcoe_interface *fcoe; 2047 struct fcoe_interface *fcoe;
2007 int rc = 0; 2048 int rc = 0;
2008 2049
@@ -2013,8 +2054,9 @@ static int fcoe_disable(struct net_device *netdev)
2013 rtnl_unlock(); 2054 rtnl_unlock();
2014 2055
2015 if (fcoe) { 2056 if (fcoe) {
2016 fcoe_ctlr_link_down(&fcoe->ctlr); 2057 ctlr = fcoe_to_ctlr(fcoe);
2017 fcoe_clean_pending_queue(fcoe->ctlr.lp); 2058 fcoe_ctlr_link_down(ctlr);
2059 fcoe_clean_pending_queue(ctlr->lp);
2018 } else 2060 } else
2019 rc = -ENODEV; 2061 rc = -ENODEV;
2020 2062
@@ -2032,6 +2074,7 @@ static int fcoe_disable(struct net_device *netdev)
2032 */ 2074 */
2033static int fcoe_enable(struct net_device *netdev) 2075static int fcoe_enable(struct net_device *netdev)
2034{ 2076{
2077 struct fcoe_ctlr *ctlr;
2035 struct fcoe_interface *fcoe; 2078 struct fcoe_interface *fcoe;
2036 int rc = 0; 2079 int rc = 0;
2037 2080
@@ -2040,11 +2083,17 @@ static int fcoe_enable(struct net_device *netdev)
2040 fcoe = fcoe_hostlist_lookup_port(netdev); 2083 fcoe = fcoe_hostlist_lookup_port(netdev);
2041 rtnl_unlock(); 2084 rtnl_unlock();
2042 2085
2043 if (!fcoe) 2086 if (!fcoe) {
2044 rc = -ENODEV; 2087 rc = -ENODEV;
2045 else if (!fcoe_link_ok(fcoe->ctlr.lp)) 2088 goto out;
2046 fcoe_ctlr_link_up(&fcoe->ctlr); 2089 }
2090
2091 ctlr = fcoe_to_ctlr(fcoe);
2092
2093 if (!fcoe_link_ok(ctlr->lp))
2094 fcoe_ctlr_link_up(ctlr);
2047 2095
2096out:
2048 mutex_unlock(&fcoe_config_mutex); 2097 mutex_unlock(&fcoe_config_mutex);
2049 return rc; 2098 return rc;
2050} 2099}
@@ -2059,6 +2108,7 @@ static int fcoe_enable(struct net_device *netdev)
2059 */ 2108 */
2060static int fcoe_destroy(struct net_device *netdev) 2109static int fcoe_destroy(struct net_device *netdev)
2061{ 2110{
2111 struct fcoe_ctlr *ctlr;
2062 struct fcoe_interface *fcoe; 2112 struct fcoe_interface *fcoe;
2063 struct fc_lport *lport; 2113 struct fc_lport *lport;
2064 struct fcoe_port *port; 2114 struct fcoe_port *port;
@@ -2071,7 +2121,8 @@ static int fcoe_destroy(struct net_device *netdev)
2071 rc = -ENODEV; 2121 rc = -ENODEV;
2072 goto out_nodev; 2122 goto out_nodev;
2073 } 2123 }
2074 lport = fcoe->ctlr.lp; 2124 ctlr = fcoe_to_ctlr(fcoe);
2125 lport = ctlr->lp;
2075 port = lport_priv(lport); 2126 port = lport_priv(lport);
2076 list_del(&fcoe->list); 2127 list_del(&fcoe->list);
2077 queue_work(fcoe_wq, &port->destroy_work); 2128 queue_work(fcoe_wq, &port->destroy_work);
@@ -2126,7 +2177,8 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
2126 int dcbx; 2177 int dcbx;
2127 u8 fup, up; 2178 u8 fup, up;
2128 struct net_device *netdev = fcoe->realdev; 2179 struct net_device *netdev = fcoe->realdev;
2129 struct fcoe_port *port = lport_priv(fcoe->ctlr.lp); 2180 struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
2181 struct fcoe_port *port = lport_priv(ctlr->lp);
2130 struct dcb_app app = { 2182 struct dcb_app app = {
2131 .priority = 0, 2183 .priority = 0,
2132 .protocol = ETH_P_FCOE 2184 .protocol = ETH_P_FCOE
@@ -2149,7 +2201,7 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
2149 } 2201 }
2150 2202
2151 port->priority = ffs(up) ? ffs(up) - 1 : 0; 2203 port->priority = ffs(up) ? ffs(up) - 1 : 0;
2152 fcoe->ctlr.priority = ffs(fup) ? ffs(fup) - 1 : port->priority; 2204 ctlr->priority = ffs(fup) ? ffs(fup) - 1 : port->priority;
2153 } 2205 }
2154#endif 2206#endif
2155} 2207}
@@ -2166,6 +2218,8 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
2166static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode) 2218static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
2167{ 2219{
2168 int rc = 0; 2220 int rc = 0;
2221 struct fcoe_ctlr_device *ctlr_dev;
2222 struct fcoe_ctlr *ctlr;
2169 struct fcoe_interface *fcoe; 2223 struct fcoe_interface *fcoe;
2170 struct fc_lport *lport; 2224 struct fc_lport *lport;
2171 2225
@@ -2184,7 +2238,9 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
2184 goto out_nodev; 2238 goto out_nodev;
2185 } 2239 }
2186 2240
2187 lport = fcoe_if_create(fcoe, &netdev->dev, 0); 2241 ctlr = fcoe_to_ctlr(fcoe);
2242 ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr);
2243 lport = fcoe_if_create(fcoe, &ctlr_dev->dev, 0);
2188 if (IS_ERR(lport)) { 2244 if (IS_ERR(lport)) {
2189 printk(KERN_ERR "fcoe: Failed to create interface (%s)\n", 2245 printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
2190 netdev->name); 2246 netdev->name);
@@ -2195,7 +2251,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
2195 } 2251 }
2196 2252
2197 /* Make this the "master" N_Port */ 2253 /* Make this the "master" N_Port */
2198 fcoe->ctlr.lp = lport; 2254 ctlr->lp = lport;
2199 2255
2200 /* setup DCB priority attributes. */ 2256 /* setup DCB priority attributes. */
2201 fcoe_dcb_create(fcoe); 2257 fcoe_dcb_create(fcoe);
@@ -2208,7 +2264,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
2208 fc_fabric_login(lport); 2264 fc_fabric_login(lport);
2209 if (!fcoe_link_ok(lport)) { 2265 if (!fcoe_link_ok(lport)) {
2210 rtnl_unlock(); 2266 rtnl_unlock();
2211 fcoe_ctlr_link_up(&fcoe->ctlr); 2267 fcoe_ctlr_link_up(ctlr);
2212 mutex_unlock(&fcoe_config_mutex); 2268 mutex_unlock(&fcoe_config_mutex);
2213 return rc; 2269 return rc;
2214 } 2270 }
@@ -2320,11 +2376,12 @@ static int fcoe_reset(struct Scsi_Host *shost)
2320 struct fc_lport *lport = shost_priv(shost); 2376 struct fc_lport *lport = shost_priv(shost);
2321 struct fcoe_port *port = lport_priv(lport); 2377 struct fcoe_port *port = lport_priv(lport);
2322 struct fcoe_interface *fcoe = port->priv; 2378 struct fcoe_interface *fcoe = port->priv;
2379 struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
2323 2380
2324 fcoe_ctlr_link_down(&fcoe->ctlr); 2381 fcoe_ctlr_link_down(ctlr);
2325 fcoe_clean_pending_queue(fcoe->ctlr.lp); 2382 fcoe_clean_pending_queue(ctlr->lp);
2326 if (!fcoe_link_ok(fcoe->ctlr.lp)) 2383 if (!fcoe_link_ok(ctlr->lp))
2327 fcoe_ctlr_link_up(&fcoe->ctlr); 2384 fcoe_ctlr_link_up(ctlr);
2328 return 0; 2385 return 0;
2329} 2386}
2330 2387
@@ -2359,10 +2416,12 @@ fcoe_hostlist_lookup_port(const struct net_device *netdev)
2359 */ 2416 */
2360static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev) 2417static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
2361{ 2418{
2419 struct fcoe_ctlr *ctlr;
2362 struct fcoe_interface *fcoe; 2420 struct fcoe_interface *fcoe;
2363 2421
2364 fcoe = fcoe_hostlist_lookup_port(netdev); 2422 fcoe = fcoe_hostlist_lookup_port(netdev);
2365 return (fcoe) ? fcoe->ctlr.lp : NULL; 2423 ctlr = fcoe_to_ctlr(fcoe);
2424 return (fcoe) ? ctlr->lp : NULL;
2366} 2425}
2367 2426
2368/** 2427/**
@@ -2466,6 +2525,7 @@ module_init(fcoe_init);
2466static void __exit fcoe_exit(void) 2525static void __exit fcoe_exit(void)
2467{ 2526{
2468 struct fcoe_interface *fcoe, *tmp; 2527 struct fcoe_interface *fcoe, *tmp;
2528 struct fcoe_ctlr *ctlr;
2469 struct fcoe_port *port; 2529 struct fcoe_port *port;
2470 unsigned int cpu; 2530 unsigned int cpu;
2471 2531
@@ -2477,7 +2537,8 @@ static void __exit fcoe_exit(void)
2477 rtnl_lock(); 2537 rtnl_lock();
2478 list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) { 2538 list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) {
2479 list_del(&fcoe->list); 2539 list_del(&fcoe->list);
2480 port = lport_priv(fcoe->ctlr.lp); 2540 ctlr = fcoe_to_ctlr(fcoe);
2541 port = lport_priv(ctlr->lp);
2481 queue_work(fcoe_wq, &port->destroy_work); 2542 queue_work(fcoe_wq, &port->destroy_work);
2482 } 2543 }
2483 rtnl_unlock(); 2544 rtnl_unlock();
@@ -2573,7 +2634,7 @@ static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did,
2573{ 2634{
2574 struct fcoe_port *port = lport_priv(lport); 2635 struct fcoe_port *port = lport_priv(lport);
2575 struct fcoe_interface *fcoe = port->priv; 2636 struct fcoe_interface *fcoe = port->priv;
2576 struct fcoe_ctlr *fip = &fcoe->ctlr; 2637 struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
2577 struct fc_frame_header *fh = fc_frame_header_get(fp); 2638 struct fc_frame_header *fh = fc_frame_header_get(fp);
2578 2639
2579 switch (op) { 2640 switch (op) {
@@ -2730,6 +2791,40 @@ static void fcoe_get_lesb(struct fc_lport *lport,
2730 __fcoe_get_lesb(lport, fc_lesb, netdev); 2791 __fcoe_get_lesb(lport, fc_lesb, netdev);
2731} 2792}
2732 2793
2794static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
2795{
2796 struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
2797 struct net_device *netdev = fcoe_netdev(fip->lp);
2798 struct fcoe_fc_els_lesb *fcoe_lesb;
2799 struct fc_els_lesb fc_lesb;
2800
2801 __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
2802 fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
2803
2804 ctlr_dev->lesb.lesb_link_fail =
2805 ntohl(fcoe_lesb->lesb_link_fail);
2806 ctlr_dev->lesb.lesb_vlink_fail =
2807 ntohl(fcoe_lesb->lesb_vlink_fail);
2808 ctlr_dev->lesb.lesb_miss_fka =
2809 ntohl(fcoe_lesb->lesb_miss_fka);
2810 ctlr_dev->lesb.lesb_symb_err =
2811 ntohl(fcoe_lesb->lesb_symb_err);
2812 ctlr_dev->lesb.lesb_err_block =
2813 ntohl(fcoe_lesb->lesb_err_block);
2814 ctlr_dev->lesb.lesb_fcs_error =
2815 ntohl(fcoe_lesb->lesb_fcs_error);
2816}
2817
2818static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
2819{
2820 struct fcoe_ctlr_device *ctlr_dev =
2821 fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
2822 struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
2823 struct fcoe_interface *fcoe = fcoe_ctlr_priv(ctlr);
2824
2825 fcf_dev->vlan_id = vlan_dev_vlan_id(fcoe->netdev);
2826}
2827
2733/** 2828/**
2734 * fcoe_set_port_id() - Callback from libfc when Port_ID is set. 2829 * fcoe_set_port_id() - Callback from libfc when Port_ID is set.
2735 * @lport: the local port 2830 * @lport: the local port
@@ -2747,7 +2842,8 @@ static void fcoe_set_port_id(struct fc_lport *lport,
2747{ 2842{
2748 struct fcoe_port *port = lport_priv(lport); 2843 struct fcoe_port *port = lport_priv(lport);
2749 struct fcoe_interface *fcoe = port->priv; 2844 struct fcoe_interface *fcoe = port->priv;
2845 struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
2750 2846
2751 if (fp && fc_frame_payload_op(fp) == ELS_FLOGI) 2847 if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
2752 fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp); 2848 fcoe_ctlr_recv_flogi(ctlr, lport, fp);
2753} 2849}
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 96ac938d39cc..a624add4f8ec 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -68,7 +68,6 @@ do { \
68 * @netdev: The associated net device 68 * @netdev: The associated net device
69 * @fcoe_packet_type: FCoE packet type 69 * @fcoe_packet_type: FCoE packet type
70 * @fip_packet_type: FIP packet type 70 * @fip_packet_type: FIP packet type
71 * @ctlr: The FCoE controller (for FIP)
72 * @oem: The offload exchange manager for all local port 71 * @oem: The offload exchange manager for all local port
73 * instances associated with this port 72 * instances associated with this port
74 * @removed: Indicates fcoe interface removed from net device 73 * @removed: Indicates fcoe interface removed from net device
@@ -80,12 +79,15 @@ struct fcoe_interface {
80 struct net_device *realdev; 79 struct net_device *realdev;
81 struct packet_type fcoe_packet_type; 80 struct packet_type fcoe_packet_type;
82 struct packet_type fip_packet_type; 81 struct packet_type fip_packet_type;
83 struct fcoe_ctlr ctlr;
84 struct fc_exch_mgr *oem; 82 struct fc_exch_mgr *oem;
85 u8 removed; 83 u8 removed;
86}; 84};
87 85
88#define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr) 86#define fcoe_to_ctlr(x) \
87 (struct fcoe_ctlr *)(((struct fcoe_ctlr *)(x)) - 1)
88
89#define fcoe_from_ctlr(x) \
90 ((struct fcoe_interface *)((x) + 1))
89 91
90/** 92/**
91 * fcoe_netdev() - Return the net device associated with a local port 93 * fcoe_netdev() - Return the net device associated with a local port
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 5a4c7250aa77..d68d57241ee6 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -160,6 +160,76 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode)
160} 160}
161EXPORT_SYMBOL(fcoe_ctlr_init); 161EXPORT_SYMBOL(fcoe_ctlr_init);
162 162
163static int fcoe_sysfs_fcf_add(struct fcoe_fcf *new)
164{
165 struct fcoe_ctlr *fip = new->fip;
166 struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
167 struct fcoe_fcf_device temp, *fcf_dev;
168 int rc = 0;
169
170 LIBFCOE_FIP_DBG(fip, "New FCF fab %16.16llx mac %pM\n",
171 new->fabric_name, new->fcf_mac);
172
173 mutex_lock(&ctlr_dev->lock);
174
175 temp.fabric_name = new->fabric_name;
176 temp.switch_name = new->switch_name;
177 temp.fc_map = new->fc_map;
178 temp.vfid = new->vfid;
179 memcpy(temp.mac, new->fcf_mac, ETH_ALEN);
180 temp.priority = new->pri;
181 temp.fka_period = new->fka_period;
182 temp.selected = 0; /* default to unselected */
183
184 fcf_dev = fcoe_fcf_device_add(ctlr_dev, &temp);
185 if (unlikely(!fcf_dev)) {
186 rc = -ENOMEM;
187 goto out;
188 }
189
190 /*
191 * The fcoe_sysfs layer can return a CONNECTED fcf that
192 * has a priv (fcf was never deleted) or a CONNECTED fcf
193 * that doesn't have a priv (fcf was deleted). However,
194 * libfcoe will always delete FCFs before trying to add
195 * them. This is ensured because both recv_adv and
196 * age_fcfs are protected by the the fcoe_ctlr's mutex.
197 * This means that we should never get a FCF with a
198 * non-NULL priv pointer.
199 */
200 BUG_ON(fcf_dev->priv);
201
202 fcf_dev->priv = new;
203 new->fcf_dev = fcf_dev;
204
205 list_add(&new->list, &fip->fcfs);
206 fip->fcf_count++;
207
208out:
209 mutex_unlock(&ctlr_dev->lock);
210 return rc;
211}
212
213static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new)
214{
215 struct fcoe_ctlr *fip = new->fip;
216 struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
217 struct fcoe_fcf_device *fcf_dev;
218
219 list_del(&new->list);
220 fip->fcf_count--;
221
222 mutex_lock(&ctlr_dev->lock);
223
224 fcf_dev = fcoe_fcf_to_fcf_dev(new);
225 WARN_ON(!fcf_dev);
226 new->fcf_dev = NULL;
227 fcoe_fcf_device_delete(fcf_dev);
228 kfree(new);
229
230 mutex_unlock(&ctlr_dev->lock);
231}
232
163/** 233/**
164 * fcoe_ctlr_reset_fcfs() - Reset and free all FCFs for a controller 234 * fcoe_ctlr_reset_fcfs() - Reset and free all FCFs for a controller
165 * @fip: The FCoE controller whose FCFs are to be reset 235 * @fip: The FCoE controller whose FCFs are to be reset
@@ -173,10 +243,10 @@ static void fcoe_ctlr_reset_fcfs(struct fcoe_ctlr *fip)
173 243
174 fip->sel_fcf = NULL; 244 fip->sel_fcf = NULL;
175 list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { 245 list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
176 list_del(&fcf->list); 246 fcoe_sysfs_fcf_del(fcf);
177 kfree(fcf);
178 } 247 }
179 fip->fcf_count = 0; 248 WARN_ON(fip->fcf_count);
249
180 fip->sel_time = 0; 250 fip->sel_time = 0;
181} 251}
182 252
@@ -717,8 +787,11 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
717 unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD); 787 unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD);
718 unsigned long deadline; 788 unsigned long deadline;
719 unsigned long sel_time = 0; 789 unsigned long sel_time = 0;
790 struct list_head del_list;
720 struct fcoe_dev_stats *stats; 791 struct fcoe_dev_stats *stats;
721 792
793 INIT_LIST_HEAD(&del_list);
794
722 stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu()); 795 stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu());
723 796
724 list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { 797 list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
@@ -739,10 +812,13 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
739 if (time_after_eq(jiffies, deadline)) { 812 if (time_after_eq(jiffies, deadline)) {
740 if (fip->sel_fcf == fcf) 813 if (fip->sel_fcf == fcf)
741 fip->sel_fcf = NULL; 814 fip->sel_fcf = NULL;
815 /*
816 * Move to delete list so we can call
817 * fcoe_sysfs_fcf_del (which can sleep)
818 * after the put_cpu().
819 */
742 list_del(&fcf->list); 820 list_del(&fcf->list);
743 WARN_ON(!fip->fcf_count); 821 list_add(&fcf->list, &del_list);
744 fip->fcf_count--;
745 kfree(fcf);
746 stats->VLinkFailureCount++; 822 stats->VLinkFailureCount++;
747 } else { 823 } else {
748 if (time_after(next_timer, deadline)) 824 if (time_after(next_timer, deadline))
@@ -753,6 +829,12 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
753 } 829 }
754 } 830 }
755 put_cpu(); 831 put_cpu();
832
833 list_for_each_entry_safe(fcf, next, &del_list, list) {
834 /* Removes fcf from current list */
835 fcoe_sysfs_fcf_del(fcf);
836 }
837
756 if (sel_time && !fip->sel_fcf && !fip->sel_time) { 838 if (sel_time && !fip->sel_fcf && !fip->sel_time) {
757 sel_time += msecs_to_jiffies(FCOE_CTLR_START_DELAY); 839 sel_time += msecs_to_jiffies(FCOE_CTLR_START_DELAY);
758 fip->sel_time = sel_time; 840 fip->sel_time = sel_time;
@@ -903,23 +985,23 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
903{ 985{
904 struct fcoe_fcf *fcf; 986 struct fcoe_fcf *fcf;
905 struct fcoe_fcf new; 987 struct fcoe_fcf new;
906 struct fcoe_fcf *found;
907 unsigned long sol_tov = msecs_to_jiffies(FCOE_CTRL_SOL_TOV); 988 unsigned long sol_tov = msecs_to_jiffies(FCOE_CTRL_SOL_TOV);
908 int first = 0; 989 int first = 0;
909 int mtu_valid; 990 int mtu_valid;
991 int found = 0;
992 int rc = 0;
910 993
911 if (fcoe_ctlr_parse_adv(fip, skb, &new)) 994 if (fcoe_ctlr_parse_adv(fip, skb, &new))
912 return; 995 return;
913 996
914 mutex_lock(&fip->ctlr_mutex); 997 mutex_lock(&fip->ctlr_mutex);
915 first = list_empty(&fip->fcfs); 998 first = list_empty(&fip->fcfs);
916 found = NULL;
917 list_for_each_entry(fcf, &fip->fcfs, list) { 999 list_for_each_entry(fcf, &fip->fcfs, list) {
918 if (fcf->switch_name == new.switch_name && 1000 if (fcf->switch_name == new.switch_name &&
919 fcf->fabric_name == new.fabric_name && 1001 fcf->fabric_name == new.fabric_name &&
920 fcf->fc_map == new.fc_map && 1002 fcf->fc_map == new.fc_map &&
921 compare_ether_addr(fcf->fcf_mac, new.fcf_mac) == 0) { 1003 compare_ether_addr(fcf->fcf_mac, new.fcf_mac) == 0) {
922 found = fcf; 1004 found = 1;
923 break; 1005 break;
924 } 1006 }
925 } 1007 }
@@ -931,9 +1013,16 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
931 if (!fcf) 1013 if (!fcf)
932 goto out; 1014 goto out;
933 1015
934 fip->fcf_count++;
935 memcpy(fcf, &new, sizeof(new)); 1016 memcpy(fcf, &new, sizeof(new));
936 list_add(&fcf->list, &fip->fcfs); 1017 fcf->fip = fip;
1018 rc = fcoe_sysfs_fcf_add(fcf);
1019 if (rc) {
1020 printk(KERN_ERR "Failed to allocate sysfs instance "
1021 "for FCF, fab %16.16llx mac %pM\n",
1022 new.fabric_name, new.fcf_mac);
1023 kfree(fcf);
1024 goto out;
1025 }
937 } else { 1026 } else {
938 /* 1027 /*
939 * Update the FCF's keep-alive descriptor flags. 1028 * Update the FCF's keep-alive descriptor flags.
@@ -954,6 +1043,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
954 fcf->fka_period = new.fka_period; 1043 fcf->fka_period = new.fka_period;
955 memcpy(fcf->fcf_mac, new.fcf_mac, ETH_ALEN); 1044 memcpy(fcf->fcf_mac, new.fcf_mac, ETH_ALEN);
956 } 1045 }
1046
957 mtu_valid = fcoe_ctlr_mtu_valid(fcf); 1047 mtu_valid = fcoe_ctlr_mtu_valid(fcf);
958 fcf->time = jiffies; 1048 fcf->time = jiffies;
959 if (!found) 1049 if (!found)
@@ -996,6 +1086,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
996 time_before(fip->sel_time, fip->timer.expires)) 1086 time_before(fip->sel_time, fip->timer.expires))
997 mod_timer(&fip->timer, fip->sel_time); 1087 mod_timer(&fip->timer, fip->sel_time);
998 } 1088 }
1089
999out: 1090out:
1000 mutex_unlock(&fip->ctlr_mutex); 1091 mutex_unlock(&fip->ctlr_mutex);
1001} 1092}
@@ -2718,9 +2809,9 @@ unlock:
2718 2809
2719/** 2810/**
2720 * fcoe_libfc_config() - Sets up libfc related properties for local port 2811 * fcoe_libfc_config() - Sets up libfc related properties for local port
2721 * @lp: The local port to configure libfc for 2812 * @lport: The local port to configure libfc for
2722 * @fip: The FCoE controller in use by the local port 2813 * @fip: The FCoE controller in use by the local port
2723 * @tt: The libfc function template 2814 * @tt: The libfc function template
2724 * @init_fcp: If non-zero, the FCP portion of libfc should be initialized 2815 * @init_fcp: If non-zero, the FCP portion of libfc should be initialized
2725 * 2816 *
2726 * Returns : 0 for success 2817 * Returns : 0 for success
@@ -2753,3 +2844,43 @@ int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip,
2753 return 0; 2844 return 0;
2754} 2845}
2755EXPORT_SYMBOL_GPL(fcoe_libfc_config); 2846EXPORT_SYMBOL_GPL(fcoe_libfc_config);
2847
2848void fcoe_fcf_get_selected(struct fcoe_fcf_device *fcf_dev)
2849{
2850 struct fcoe_ctlr_device *ctlr_dev = fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
2851 struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
2852 struct fcoe_fcf *fcf;
2853
2854 mutex_lock(&fip->ctlr_mutex);
2855 mutex_lock(&ctlr_dev->lock);
2856
2857 fcf = fcoe_fcf_device_priv(fcf_dev);
2858 if (fcf)
2859 fcf_dev->selected = (fcf == fip->sel_fcf) ? 1 : 0;
2860 else
2861 fcf_dev->selected = 0;
2862
2863 mutex_unlock(&ctlr_dev->lock);
2864 mutex_unlock(&fip->ctlr_mutex);
2865}
2866EXPORT_SYMBOL(fcoe_fcf_get_selected);
2867
2868void fcoe_ctlr_get_fip_mode(struct fcoe_ctlr_device *ctlr_dev)
2869{
2870 struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
2871
2872 mutex_lock(&ctlr->ctlr_mutex);
2873 switch (ctlr->mode) {
2874 case FIP_MODE_FABRIC:
2875 ctlr_dev->mode = FIP_CONN_TYPE_FABRIC;
2876 break;
2877 case FIP_MODE_VN2VN:
2878 ctlr_dev->mode = FIP_CONN_TYPE_VN2VN;
2879 break;
2880 default:
2881 ctlr_dev->mode = FIP_CONN_TYPE_UNKNOWN;
2882 break;
2883 }
2884 mutex_unlock(&ctlr->ctlr_mutex);
2885}
2886EXPORT_SYMBOL(fcoe_ctlr_get_fip_mode);
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
new file mode 100644
index 000000000000..2bc163198d33
--- /dev/null
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -0,0 +1,832 @@
1/*
2 * Copyright(c) 2011 - 2012 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/etherdevice.h>
24
25#include <scsi/fcoe_sysfs.h>
26
27static atomic_t ctlr_num;
28static atomic_t fcf_num;
29
30/*
31 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
32 * should insulate the loss of a fcf.
33 */
34static unsigned int fcoe_fcf_dev_loss_tmo = 1800; /* seconds */
35
36module_param_named(fcf_dev_loss_tmo, fcoe_fcf_dev_loss_tmo,
37 uint, S_IRUGO|S_IWUSR);
38MODULE_PARM_DESC(fcf_dev_loss_tmo,
39 "Maximum number of seconds that libfcoe should"
40 " insulate the loss of a fcf. Once this value is"
41 " exceeded, the fcf is removed.");
42
43/*
44 * These are used by the fcoe_*_show_function routines, they
45 * are intentionally placed in the .c file as they're not intended
46 * for use throughout the code.
47 */
48#define fcoe_ctlr_id(x) \
49 ((x)->id)
50#define fcoe_ctlr_work_q_name(x) \
51 ((x)->work_q_name)
52#define fcoe_ctlr_work_q(x) \
53 ((x)->work_q)
54#define fcoe_ctlr_devloss_work_q_name(x) \
55 ((x)->devloss_work_q_name)
56#define fcoe_ctlr_devloss_work_q(x) \
57 ((x)->devloss_work_q)
58#define fcoe_ctlr_mode(x) \
59 ((x)->mode)
60#define fcoe_ctlr_fcf_dev_loss_tmo(x) \
61 ((x)->fcf_dev_loss_tmo)
62#define fcoe_ctlr_link_fail(x) \
63 ((x)->lesb.lesb_link_fail)
64#define fcoe_ctlr_vlink_fail(x) \
65 ((x)->lesb.lesb_vlink_fail)
66#define fcoe_ctlr_miss_fka(x) \
67 ((x)->lesb.lesb_miss_fka)
68#define fcoe_ctlr_symb_err(x) \
69 ((x)->lesb.lesb_symb_err)
70#define fcoe_ctlr_err_block(x) \
71 ((x)->lesb.lesb_err_block)
72#define fcoe_ctlr_fcs_error(x) \
73 ((x)->lesb.lesb_fcs_error)
74#define fcoe_fcf_state(x) \
75 ((x)->state)
76#define fcoe_fcf_fabric_name(x) \
77 ((x)->fabric_name)
78#define fcoe_fcf_switch_name(x) \
79 ((x)->switch_name)
80#define fcoe_fcf_fc_map(x) \
81 ((x)->fc_map)
82#define fcoe_fcf_vfid(x) \
83 ((x)->vfid)
84#define fcoe_fcf_mac(x) \
85 ((x)->mac)
86#define fcoe_fcf_priority(x) \
87 ((x)->priority)
88#define fcoe_fcf_fka_period(x) \
89 ((x)->fka_period)
90#define fcoe_fcf_dev_loss_tmo(x) \
91 ((x)->dev_loss_tmo)
92#define fcoe_fcf_selected(x) \
93 ((x)->selected)
94#define fcoe_fcf_vlan_id(x) \
95 ((x)->vlan_id)
96
97/*
98 * dev_loss_tmo attribute
99 */
100static int fcoe_str_to_dev_loss(const char *buf, unsigned long *val)
101{
102 int ret;
103
104 ret = kstrtoul(buf, 0, val);
105 if (ret || *val < 0)
106 return -EINVAL;
107 /*
108 * Check for overflow; dev_loss_tmo is u32
109 */
110 if (*val > UINT_MAX)
111 return -EINVAL;
112
113 return 0;
114}
115
116static int fcoe_fcf_set_dev_loss_tmo(struct fcoe_fcf_device *fcf,
117 unsigned long val)
118{
119 if ((fcf->state == FCOE_FCF_STATE_UNKNOWN) ||
120 (fcf->state == FCOE_FCF_STATE_DISCONNECTED) ||
121 (fcf->state == FCOE_FCF_STATE_DELETED))
122 return -EBUSY;
123 /*
124 * Check for overflow; dev_loss_tmo is u32
125 */
126 if (val > UINT_MAX)
127 return -EINVAL;
128
129 fcoe_fcf_dev_loss_tmo(fcf) = val;
130 return 0;
131}
132
133#define FCOE_DEVICE_ATTR(_prefix, _name, _mode, _show, _store) \
134struct device_attribute device_attr_fcoe_##_prefix##_##_name = \
135 __ATTR(_name, _mode, _show, _store)
136
137#define fcoe_ctlr_show_function(field, format_string, sz, cast) \
138static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \
139 struct device_attribute *attr, \
140 char *buf) \
141{ \
142 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); \
143 if (ctlr->f->get_fcoe_ctlr_##field) \
144 ctlr->f->get_fcoe_ctlr_##field(ctlr); \
145 return snprintf(buf, sz, format_string, \
146 cast fcoe_ctlr_##field(ctlr)); \
147}
148
149#define fcoe_fcf_show_function(field, format_string, sz, cast) \
150static ssize_t show_fcoe_fcf_device_##field(struct device *dev, \
151 struct device_attribute *attr, \
152 char *buf) \
153{ \
154 struct fcoe_fcf_device *fcf = dev_to_fcf(dev); \
155 struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); \
156 if (ctlr->f->get_fcoe_fcf_##field) \
157 ctlr->f->get_fcoe_fcf_##field(fcf); \
158 return snprintf(buf, sz, format_string, \
159 cast fcoe_fcf_##field(fcf)); \
160}
161
162#define fcoe_ctlr_private_show_function(field, format_string, sz, cast) \
163static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \
164 struct device_attribute *attr, \
165 char *buf) \
166{ \
167 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); \
168 return snprintf(buf, sz, format_string, cast fcoe_ctlr_##field(ctlr)); \
169}
170
171#define fcoe_fcf_private_show_function(field, format_string, sz, cast) \
172static ssize_t show_fcoe_fcf_device_##field(struct device *dev, \
173 struct device_attribute *attr, \
174 char *buf) \
175{ \
176 struct fcoe_fcf_device *fcf = dev_to_fcf(dev); \
177 return snprintf(buf, sz, format_string, cast fcoe_fcf_##field(fcf)); \
178}
179
180#define fcoe_ctlr_private_rd_attr(field, format_string, sz) \
181 fcoe_ctlr_private_show_function(field, format_string, sz, ) \
182 static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \
183 show_fcoe_ctlr_device_##field, NULL)
184
185#define fcoe_ctlr_rd_attr(field, format_string, sz) \
186 fcoe_ctlr_show_function(field, format_string, sz, ) \
187 static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \
188 show_fcoe_ctlr_device_##field, NULL)
189
190#define fcoe_fcf_rd_attr(field, format_string, sz) \
191 fcoe_fcf_show_function(field, format_string, sz, ) \
192 static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \
193 show_fcoe_fcf_device_##field, NULL)
194
195#define fcoe_fcf_private_rd_attr(field, format_string, sz) \
196 fcoe_fcf_private_show_function(field, format_string, sz, ) \
197 static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \
198 show_fcoe_fcf_device_##field, NULL)
199
200#define fcoe_ctlr_private_rd_attr_cast(field, format_string, sz, cast) \
201 fcoe_ctlr_private_show_function(field, format_string, sz, (cast)) \
202 static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \
203 show_fcoe_ctlr_device_##field, NULL)
204
205#define fcoe_fcf_private_rd_attr_cast(field, format_string, sz, cast) \
206 fcoe_fcf_private_show_function(field, format_string, sz, (cast)) \
207 static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \
208 show_fcoe_fcf_device_##field, NULL)
209
210#define fcoe_enum_name_search(title, table_type, table) \
211static const char *get_fcoe_##title##_name(enum table_type table_key) \
212{ \
213 int i; \
214 char *name = NULL; \
215 \
216 for (i = 0; i < ARRAY_SIZE(table); i++) { \
217 if (table[i].value == table_key) { \
218 name = table[i].name; \
219 break; \
220 } \
221 } \
222 return name; \
223}
224
225static struct {
226 enum fcf_state value;
227 char *name;
228} fcf_state_names[] = {
229 { FCOE_FCF_STATE_UNKNOWN, "Unknown" },
230 { FCOE_FCF_STATE_DISCONNECTED, "Disconnected" },
231 { FCOE_FCF_STATE_CONNECTED, "Connected" },
232};
233fcoe_enum_name_search(fcf_state, fcf_state, fcf_state_names)
234#define FCOE_FCF_STATE_MAX_NAMELEN 50
235
236static ssize_t show_fcf_state(struct device *dev,
237 struct device_attribute *attr,
238 char *buf)
239{
240 struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
241 const char *name;
242 name = get_fcoe_fcf_state_name(fcf->state);
243 if (!name)
244 return -EINVAL;
245 return snprintf(buf, FCOE_FCF_STATE_MAX_NAMELEN, "%s\n", name);
246}
247static FCOE_DEVICE_ATTR(fcf, state, S_IRUGO, show_fcf_state, NULL);
248
249static struct {
250 enum fip_conn_type value;
251 char *name;
252} fip_conn_type_names[] = {
253 { FIP_CONN_TYPE_UNKNOWN, "Unknown" },
254 { FIP_CONN_TYPE_FABRIC, "Fabric" },
255 { FIP_CONN_TYPE_VN2VN, "VN2VN" },
256};
257fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names)
258#define FCOE_CTLR_MODE_MAX_NAMELEN 50
259
260static ssize_t show_ctlr_mode(struct device *dev,
261 struct device_attribute *attr,
262 char *buf)
263{
264 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
265 const char *name;
266
267 if (ctlr->f->get_fcoe_ctlr_mode)
268 ctlr->f->get_fcoe_ctlr_mode(ctlr);
269
270 name = get_fcoe_ctlr_mode_name(ctlr->mode);
271 if (!name)
272 return -EINVAL;
273 return snprintf(buf, FCOE_CTLR_MODE_MAX_NAMELEN,
274 "%s\n", name);
275}
276static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO,
277 show_ctlr_mode, NULL);
278
279static ssize_t
280store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev,
281 struct device_attribute *attr,
282 const char *buf, size_t count)
283{
284 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
285 struct fcoe_fcf_device *fcf;
286 unsigned long val;
287 int rc;
288
289 rc = fcoe_str_to_dev_loss(buf, &val);
290 if (rc)
291 return rc;
292
293 fcoe_ctlr_fcf_dev_loss_tmo(ctlr) = val;
294 mutex_lock(&ctlr->lock);
295 list_for_each_entry(fcf, &ctlr->fcfs, peers)
296 fcoe_fcf_set_dev_loss_tmo(fcf, val);
297 mutex_unlock(&ctlr->lock);
298 return count;
299}
300fcoe_ctlr_private_show_function(fcf_dev_loss_tmo, "%d\n", 20, );
301static FCOE_DEVICE_ATTR(ctlr, fcf_dev_loss_tmo, S_IRUGO | S_IWUSR,
302 show_fcoe_ctlr_device_fcf_dev_loss_tmo,
303 store_private_fcoe_ctlr_fcf_dev_loss_tmo);
304
305/* Link Error Status Block (LESB) */
306fcoe_ctlr_rd_attr(link_fail, "%u\n", 20);
307fcoe_ctlr_rd_attr(vlink_fail, "%u\n", 20);
308fcoe_ctlr_rd_attr(miss_fka, "%u\n", 20);
309fcoe_ctlr_rd_attr(symb_err, "%u\n", 20);
310fcoe_ctlr_rd_attr(err_block, "%u\n", 20);
311fcoe_ctlr_rd_attr(fcs_error, "%u\n", 20);
312
313fcoe_fcf_private_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long);
314fcoe_fcf_private_rd_attr_cast(switch_name, "0x%llx\n", 20, unsigned long long);
315fcoe_fcf_private_rd_attr(priority, "%u\n", 20);
316fcoe_fcf_private_rd_attr(fc_map, "0x%x\n", 20);
317fcoe_fcf_private_rd_attr(vfid, "%u\n", 20);
318fcoe_fcf_private_rd_attr(mac, "%pM\n", 20);
319fcoe_fcf_private_rd_attr(fka_period, "%u\n", 20);
320fcoe_fcf_rd_attr(selected, "%u\n", 20);
321fcoe_fcf_rd_attr(vlan_id, "%u\n", 20);
322
323fcoe_fcf_private_show_function(dev_loss_tmo, "%d\n", 20, )
324static ssize_t
325store_fcoe_fcf_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
326 const char *buf, size_t count)
327{
328 struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
329 unsigned long val;
330 int rc;
331
332 rc = fcoe_str_to_dev_loss(buf, &val);
333 if (rc)
334 return rc;
335
336 rc = fcoe_fcf_set_dev_loss_tmo(fcf, val);
337 if (rc)
338 return rc;
339 return count;
340}
341static FCOE_DEVICE_ATTR(fcf, dev_loss_tmo, S_IRUGO | S_IWUSR,
342 show_fcoe_fcf_device_dev_loss_tmo,
343 store_fcoe_fcf_dev_loss_tmo);
344
345static struct attribute *fcoe_ctlr_lesb_attrs[] = {
346 &device_attr_fcoe_ctlr_link_fail.attr,
347 &device_attr_fcoe_ctlr_vlink_fail.attr,
348 &device_attr_fcoe_ctlr_miss_fka.attr,
349 &device_attr_fcoe_ctlr_symb_err.attr,
350 &device_attr_fcoe_ctlr_err_block.attr,
351 &device_attr_fcoe_ctlr_fcs_error.attr,
352 NULL,
353};
354
355static struct attribute_group fcoe_ctlr_lesb_attr_group = {
356 .name = "lesb",
357 .attrs = fcoe_ctlr_lesb_attrs,
358};
359
360static struct attribute *fcoe_ctlr_attrs[] = {
361 &device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr,
362 &device_attr_fcoe_ctlr_mode.attr,
363 NULL,
364};
365
366static struct attribute_group fcoe_ctlr_attr_group = {
367 .attrs = fcoe_ctlr_attrs,
368};
369
370static const struct attribute_group *fcoe_ctlr_attr_groups[] = {
371 &fcoe_ctlr_attr_group,
372 &fcoe_ctlr_lesb_attr_group,
373 NULL,
374};
375
376static struct attribute *fcoe_fcf_attrs[] = {
377 &device_attr_fcoe_fcf_fabric_name.attr,
378 &device_attr_fcoe_fcf_switch_name.attr,
379 &device_attr_fcoe_fcf_dev_loss_tmo.attr,
380 &device_attr_fcoe_fcf_fc_map.attr,
381 &device_attr_fcoe_fcf_vfid.attr,
382 &device_attr_fcoe_fcf_mac.attr,
383 &device_attr_fcoe_fcf_priority.attr,
384 &device_attr_fcoe_fcf_fka_period.attr,
385 &device_attr_fcoe_fcf_state.attr,
386 &device_attr_fcoe_fcf_selected.attr,
387 &device_attr_fcoe_fcf_vlan_id.attr,
388 NULL
389};
390
391static struct attribute_group fcoe_fcf_attr_group = {
392 .attrs = fcoe_fcf_attrs,
393};
394
395static const struct attribute_group *fcoe_fcf_attr_groups[] = {
396 &fcoe_fcf_attr_group,
397 NULL,
398};
399
400struct bus_type fcoe_bus_type;
401
402static int fcoe_bus_match(struct device *dev,
403 struct device_driver *drv)
404{
405 if (dev->bus == &fcoe_bus_type)
406 return 1;
407 return 0;
408}
409
410/**
411 * fcoe_ctlr_device_release() - Release the FIP ctlr memory
412 * @dev: Pointer to the FIP ctlr's embedded device
413 *
414 * Called when the last FIP ctlr reference is released.
415 */
416static void fcoe_ctlr_device_release(struct device *dev)
417{
418 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
419 kfree(ctlr);
420}
421
422/**
423 * fcoe_fcf_device_release() - Release the FIP fcf memory
424 * @dev: Pointer to the fcf's embedded device
425 *
426 * Called when the last FIP fcf reference is released.
427 */
428static void fcoe_fcf_device_release(struct device *dev)
429{
430 struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
431 kfree(fcf);
432}
433
434struct device_type fcoe_ctlr_device_type = {
435 .name = "fcoe_ctlr",
436 .groups = fcoe_ctlr_attr_groups,
437 .release = fcoe_ctlr_device_release,
438};
439
440struct device_type fcoe_fcf_device_type = {
441 .name = "fcoe_fcf",
442 .groups = fcoe_fcf_attr_groups,
443 .release = fcoe_fcf_device_release,
444};
445
446struct bus_type fcoe_bus_type = {
447 .name = "fcoe",
448 .match = &fcoe_bus_match,
449};
450
451/**
452 * fcoe_ctlr_device_flush_work() - Flush a FIP ctlr's workqueue
453 * @ctlr: Pointer to the FIP ctlr whose workqueue is to be flushed
454 */
455void fcoe_ctlr_device_flush_work(struct fcoe_ctlr_device *ctlr)
456{
457 if (!fcoe_ctlr_work_q(ctlr)) {
458 printk(KERN_ERR
459 "ERROR: FIP Ctlr '%d' attempted to flush work, "
460 "when no workqueue created.\n", ctlr->id);
461 dump_stack();
462 return;
463 }
464
465 flush_workqueue(fcoe_ctlr_work_q(ctlr));
466}
467
468/**
469 * fcoe_ctlr_device_queue_work() - Schedule work for a FIP ctlr's workqueue
470 * @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue
471 * @work: Work to queue for execution
472 *
473 * Return value:
474 * 1 on success / 0 already queued / < 0 for error
475 */
476int fcoe_ctlr_device_queue_work(struct fcoe_ctlr_device *ctlr,
477 struct work_struct *work)
478{
479 if (unlikely(!fcoe_ctlr_work_q(ctlr))) {
480 printk(KERN_ERR
481 "ERROR: FIP Ctlr '%d' attempted to queue work, "
482 "when no workqueue created.\n", ctlr->id);
483 dump_stack();
484
485 return -EINVAL;
486 }
487
488 return queue_work(fcoe_ctlr_work_q(ctlr), work);
489}
490
491/**
492 * fcoe_ctlr_device_flush_devloss() - Flush a FIP ctlr's devloss workqueue
493 * @ctlr: Pointer to FIP ctlr whose workqueue is to be flushed
494 */
495void fcoe_ctlr_device_flush_devloss(struct fcoe_ctlr_device *ctlr)
496{
497 if (!fcoe_ctlr_devloss_work_q(ctlr)) {
498 printk(KERN_ERR
499 "ERROR: FIP Ctlr '%d' attempted to flush work, "
500 "when no workqueue created.\n", ctlr->id);
501 dump_stack();
502 return;
503 }
504
505 flush_workqueue(fcoe_ctlr_devloss_work_q(ctlr));
506}
507
508/**
509 * fcoe_ctlr_device_queue_devloss_work() - Schedule work for a FIP ctlr's devloss workqueue
510 * @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue
511 * @work: Work to queue for execution
512 * @delay: jiffies to delay the work queuing
513 *
514 * Return value:
515 * 1 on success / 0 already queued / < 0 for error
516 */
517int fcoe_ctlr_device_queue_devloss_work(struct fcoe_ctlr_device *ctlr,
518 struct delayed_work *work,
519 unsigned long delay)
520{
521 if (unlikely(!fcoe_ctlr_devloss_work_q(ctlr))) {
522 printk(KERN_ERR
523 "ERROR: FIP Ctlr '%d' attempted to queue work, "
524 "when no workqueue created.\n", ctlr->id);
525 dump_stack();
526
527 return -EINVAL;
528 }
529
530 return queue_delayed_work(fcoe_ctlr_devloss_work_q(ctlr), work, delay);
531}
532
533static int fcoe_fcf_device_match(struct fcoe_fcf_device *new,
534 struct fcoe_fcf_device *old)
535{
536 if (new->switch_name == old->switch_name &&
537 new->fabric_name == old->fabric_name &&
538 new->fc_map == old->fc_map &&
539 compare_ether_addr(new->mac, old->mac) == 0)
540 return 1;
541 return 0;
542}
543
544/**
545 * fcoe_ctlr_device_add() - Add a FIP ctlr to sysfs
546 * @parent: The parent device to which the fcoe_ctlr instance
547 * should be attached
548 * @f: The LLD's FCoE sysfs function template pointer
549 * @priv_size: Size to be allocated with the fcoe_ctlr_device for the LLD
550 *
551 * This routine allocates a FIP ctlr object with some additional memory
552 * for the LLD. The FIP ctlr is initialized, added to sysfs and then
553 * attributes are added to it.
554 */
555struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
556 struct fcoe_sysfs_function_template *f,
557 int priv_size)
558{
559 struct fcoe_ctlr_device *ctlr;
560 int error = 0;
561
562 ctlr = kzalloc(sizeof(struct fcoe_ctlr_device) + priv_size,
563 GFP_KERNEL);
564 if (!ctlr)
565 goto out;
566
567 ctlr->id = atomic_inc_return(&ctlr_num) - 1;
568 ctlr->f = f;
569 INIT_LIST_HEAD(&ctlr->fcfs);
570 mutex_init(&ctlr->lock);
571 ctlr->dev.parent = parent;
572 ctlr->dev.bus = &fcoe_bus_type;
573 ctlr->dev.type = &fcoe_ctlr_device_type;
574
575 ctlr->fcf_dev_loss_tmo = fcoe_fcf_dev_loss_tmo;
576
577 snprintf(ctlr->work_q_name, sizeof(ctlr->work_q_name),
578 "ctlr_wq_%d", ctlr->id);
579 ctlr->work_q = create_singlethread_workqueue(
580 ctlr->work_q_name);
581 if (!ctlr->work_q)
582 goto out_del;
583
584 snprintf(ctlr->devloss_work_q_name,
585 sizeof(ctlr->devloss_work_q_name),
586 "ctlr_dl_wq_%d", ctlr->id);
587 ctlr->devloss_work_q = create_singlethread_workqueue(
588 ctlr->devloss_work_q_name);
589 if (!ctlr->devloss_work_q)
590 goto out_del_q;
591
592 dev_set_name(&ctlr->dev, "ctlr_%d", ctlr->id);
593 error = device_register(&ctlr->dev);
594 if (error)
595 goto out_del_q2;
596
597 return ctlr;
598
599out_del_q2:
600 destroy_workqueue(ctlr->devloss_work_q);
601 ctlr->devloss_work_q = NULL;
602out_del_q:
603 destroy_workqueue(ctlr->work_q);
604 ctlr->work_q = NULL;
605out_del:
606 kfree(ctlr);
607out:
608 return NULL;
609}
610EXPORT_SYMBOL_GPL(fcoe_ctlr_device_add);
611
612/**
613 * fcoe_ctlr_device_delete() - Delete a FIP ctlr and its subtree from sysfs
614 * @ctlr: A pointer to the ctlr to be deleted
615 *
616 * Deletes a FIP ctlr and any fcfs attached
617 * to it. Deleting fcfs will cause their childen
618 * to be deleted as well.
619 *
620 * The ctlr is detached from sysfs and it's resources
621 * are freed (work q), but the memory is not freed
622 * until its last reference is released.
623 *
624 * This routine expects no locks to be held before
625 * calling.
626 *
627 * TODO: Currently there are no callbacks to clean up LLD data
628 * for a fcoe_fcf_device. LLDs must keep this in mind as they need
629 * to clean up each of their LLD data for all fcoe_fcf_device before
630 * calling fcoe_ctlr_device_delete.
631 */
632void fcoe_ctlr_device_delete(struct fcoe_ctlr_device *ctlr)
633{
634 struct fcoe_fcf_device *fcf, *next;
635 /* Remove any attached fcfs */
636 mutex_lock(&ctlr->lock);
637 list_for_each_entry_safe(fcf, next,
638 &ctlr->fcfs, peers) {
639 list_del(&fcf->peers);
640 fcf->state = FCOE_FCF_STATE_DELETED;
641 fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work);
642 }
643 mutex_unlock(&ctlr->lock);
644
645 fcoe_ctlr_device_flush_work(ctlr);
646
647 destroy_workqueue(ctlr->devloss_work_q);
648 ctlr->devloss_work_q = NULL;
649 destroy_workqueue(ctlr->work_q);
650 ctlr->work_q = NULL;
651
652 device_unregister(&ctlr->dev);
653}
654EXPORT_SYMBOL_GPL(fcoe_ctlr_device_delete);
655
656/**
657 * fcoe_fcf_device_final_delete() - Final delete routine
658 * @work: The FIP fcf's embedded work struct
659 *
660 * It is expected that the fcf has been removed from
661 * the FIP ctlr's list before calling this routine.
662 */
663static void fcoe_fcf_device_final_delete(struct work_struct *work)
664{
665 struct fcoe_fcf_device *fcf =
666 container_of(work, struct fcoe_fcf_device, delete_work);
667 struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
668
669 /*
670 * Cancel any outstanding timers. These should really exist
671 * only when rmmod'ing the LLDD and we're asking for
672 * immediate termination of the rports
673 */
674 if (!cancel_delayed_work(&fcf->dev_loss_work))
675 fcoe_ctlr_device_flush_devloss(ctlr);
676
677 device_unregister(&fcf->dev);
678}
679
680/**
681 * fip_timeout_deleted_fcf() - Delete a fcf when the devloss timer fires
682 * @work: The FIP fcf's embedded work struct
683 *
684 * Removes the fcf from the FIP ctlr's list of fcfs and
685 * queues the final deletion.
686 */
687static void fip_timeout_deleted_fcf(struct work_struct *work)
688{
689 struct fcoe_fcf_device *fcf =
690 container_of(work, struct fcoe_fcf_device, dev_loss_work.work);
691 struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
692
693 mutex_lock(&ctlr->lock);
694
695 /*
696 * If the fcf is deleted or reconnected before the timer
697 * fires the devloss queue will be flushed, but the state will
698 * either be CONNECTED or DELETED. If that is the case we
699 * cancel deleting the fcf.
700 */
701 if (fcf->state != FCOE_FCF_STATE_DISCONNECTED)
702 goto out;
703
704 dev_printk(KERN_ERR, &fcf->dev,
705 "FIP fcf connection time out: removing fcf\n");
706
707 list_del(&fcf->peers);
708 fcf->state = FCOE_FCF_STATE_DELETED;
709 fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work);
710
711out:
712 mutex_unlock(&ctlr->lock);
713}
714
715/**
716 * fcoe_fcf_device_delete() - Delete a FIP fcf
717 * @fcf: Pointer to the fcf which is to be deleted
718 *
719 * Queues the FIP fcf on the devloss workqueue
720 *
721 * Expects the ctlr_attrs mutex to be held for fcf
722 * state change.
723 */
724void fcoe_fcf_device_delete(struct fcoe_fcf_device *fcf)
725{
726 struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
727 int timeout = fcf->dev_loss_tmo;
728
729 if (fcf->state != FCOE_FCF_STATE_CONNECTED)
730 return;
731
732 fcf->state = FCOE_FCF_STATE_DISCONNECTED;
733
734 /*
735 * FCF will only be re-connected by the LLD calling
736 * fcoe_fcf_device_add, and it should be setting up
737 * priv then.
738 */
739 fcf->priv = NULL;
740
741 fcoe_ctlr_device_queue_devloss_work(ctlr, &fcf->dev_loss_work,
742 timeout * HZ);
743}
744EXPORT_SYMBOL_GPL(fcoe_fcf_device_delete);
745
746/**
747 * fcoe_fcf_device_add() - Add a FCoE sysfs fcoe_fcf_device to the system
748 * @ctlr: The fcoe_ctlr_device that will be the fcoe_fcf_device parent
749 * @new_fcf: A temporary FCF used for lookups on the current list of fcfs
750 *
751 * Expects to be called with the ctlr->lock held
752 */
753struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
754 struct fcoe_fcf_device *new_fcf)
755{
756 struct fcoe_fcf_device *fcf;
757 int error = 0;
758
759 list_for_each_entry(fcf, &ctlr->fcfs, peers) {
760 if (fcoe_fcf_device_match(new_fcf, fcf)) {
761 if (fcf->state == FCOE_FCF_STATE_CONNECTED)
762 return fcf;
763
764 fcf->state = FCOE_FCF_STATE_CONNECTED;
765
766 if (!cancel_delayed_work(&fcf->dev_loss_work))
767 fcoe_ctlr_device_flush_devloss(ctlr);
768
769 return fcf;
770 }
771 }
772
773 fcf = kzalloc(sizeof(struct fcoe_fcf_device), GFP_ATOMIC);
774 if (unlikely(!fcf))
775 goto out;
776
777 INIT_WORK(&fcf->delete_work, fcoe_fcf_device_final_delete);
778 INIT_DELAYED_WORK(&fcf->dev_loss_work, fip_timeout_deleted_fcf);
779
780 fcf->dev.parent = &ctlr->dev;
781 fcf->dev.bus = &fcoe_bus_type;
782 fcf->dev.type = &fcoe_fcf_device_type;
783 fcf->id = atomic_inc_return(&fcf_num) - 1;
784 fcf->state = FCOE_FCF_STATE_UNKNOWN;
785
786 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
787
788 dev_set_name(&fcf->dev, "fcf_%d", fcf->id);
789
790 fcf->fabric_name = new_fcf->fabric_name;
791 fcf->switch_name = new_fcf->switch_name;
792 fcf->fc_map = new_fcf->fc_map;
793 fcf->vfid = new_fcf->vfid;
794 memcpy(fcf->mac, new_fcf->mac, ETH_ALEN);
795 fcf->priority = new_fcf->priority;
796 fcf->fka_period = new_fcf->fka_period;
797 fcf->selected = new_fcf->selected;
798
799 error = device_register(&fcf->dev);
800 if (error)
801 goto out_del;
802
803 fcf->state = FCOE_FCF_STATE_CONNECTED;
804 list_add_tail(&fcf->peers, &ctlr->fcfs);
805
806 return fcf;
807
808out_del:
809 kfree(fcf);
810out:
811 return NULL;
812}
813EXPORT_SYMBOL_GPL(fcoe_fcf_device_add);
814
815int __init fcoe_sysfs_setup(void)
816{
817 int error;
818
819 atomic_set(&ctlr_num, 0);
820 atomic_set(&fcf_num, 0);
821
822 error = bus_register(&fcoe_bus_type);
823 if (error)
824 return error;
825
826 return 0;
827}
828
829void __exit fcoe_sysfs_teardown(void)
830{
831 bus_unregister(&fcoe_bus_type);
832}
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index 710e149d41b6..b46f43dced78 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -815,9 +815,17 @@ out_nodev:
815 */ 815 */
816static int __init libfcoe_init(void) 816static int __init libfcoe_init(void)
817{ 817{
818 fcoe_transport_init(); 818 int rc = 0;
819 819
820 return 0; 820 rc = fcoe_transport_init();
821 if (rc)
822 return rc;
823
824 rc = fcoe_sysfs_setup();
825 if (rc)
826 fcoe_transport_exit();
827
828 return rc;
821} 829}
822module_init(libfcoe_init); 830module_init(libfcoe_init);
823 831
@@ -826,6 +834,7 @@ module_init(libfcoe_init);
826 */ 834 */
827static void __exit libfcoe_exit(void) 835static void __exit libfcoe_exit(void)
828{ 836{
837 fcoe_sysfs_teardown();
829 fcoe_transport_exit(); 838 fcoe_transport_exit();
830} 839}
831module_exit(libfcoe_exit); 840module_exit(libfcoe_exit);
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 441d88ad99a7..d109cc3a17b6 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -139,12 +139,12 @@ static void sas_ata_task_done(struct sas_task *task)
139 if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD || 139 if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
140 ((stat->stat == SAM_STAT_CHECK_CONDITION && 140 ((stat->stat == SAM_STAT_CHECK_CONDITION &&
141 dev->sata_dev.command_set == ATAPI_COMMAND_SET))) { 141 dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
142 ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf); 142 memcpy(dev->sata_dev.fis, resp->ending_fis, ATA_RESP_FIS_SIZE);
143 143
144 if (!link->sactive) { 144 if (!link->sactive) {
145 qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command); 145 qc->err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
146 } else { 146 } else {
147 link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.tf.command); 147 link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
148 if (unlikely(link->eh_info.err_mask)) 148 if (unlikely(link->eh_info.err_mask))
149 qc->flags |= ATA_QCFLAG_FAILED; 149 qc->flags |= ATA_QCFLAG_FAILED;
150 } 150 }
@@ -161,8 +161,8 @@ static void sas_ata_task_done(struct sas_task *task)
161 qc->flags |= ATA_QCFLAG_FAILED; 161 qc->flags |= ATA_QCFLAG_FAILED;
162 } 162 }
163 163
164 dev->sata_dev.tf.feature = 0x04; /* status err */ 164 dev->sata_dev.fis[3] = 0x04; /* status err */
165 dev->sata_dev.tf.command = ATA_ERR; 165 dev->sata_dev.fis[2] = ATA_ERR;
166 } 166 }
167 } 167 }
168 168
@@ -269,7 +269,7 @@ static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
269{ 269{
270 struct domain_device *dev = qc->ap->private_data; 270 struct domain_device *dev = qc->ap->private_data;
271 271
272 memcpy(&qc->result_tf, &dev->sata_dev.tf, sizeof(qc->result_tf)); 272 ata_tf_from_fis(dev->sata_dev.fis, &qc->result_tf);
273 return true; 273 return true;
274} 274}
275 275
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 6102ef2cb2d8..9d46fcbe7755 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -1792,7 +1792,7 @@ static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
1792static inline u8 1792static inline u8
1793_base_get_msix_index(struct MPT2SAS_ADAPTER *ioc) 1793_base_get_msix_index(struct MPT2SAS_ADAPTER *ioc)
1794{ 1794{
1795 return ioc->cpu_msix_table[smp_processor_id()]; 1795 return ioc->cpu_msix_table[raw_smp_processor_id()];
1796} 1796}
1797 1797
1798/** 1798/**
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index 6208d562890d..317a7fdc3b82 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -25,3 +25,12 @@ config SCSI_QLA_FC
25 Firmware images can be retrieved from: 25 Firmware images can be retrieved from:
26 26
27 ftp://ftp.qlogic.com/outgoing/linux/firmware/ 27 ftp://ftp.qlogic.com/outgoing/linux/firmware/
28
29config TCM_QLA2XXX
30 tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs"
31 depends on SCSI_QLA_FC && TARGET_CORE
32 select LIBFC
33 select BTREE
34 default n
35 ---help---
36 Say Y here to enable the TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index 5df782f4a097..dce7d788cdc9 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,5 +1,6 @@
1qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \ 1qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
2 qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \ 2 qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
3 qla_nx.o 3 qla_nx.o qla_target.o
4 4
5obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o 5obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
6obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 5926f5a87ea8..5ab953029f8d 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -5,6 +5,7 @@
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7#include "qla_def.h" 7#include "qla_def.h"
8#include "qla_target.h"
8 9
9#include <linux/kthread.h> 10#include <linux/kthread.h>
10#include <linux/vmalloc.h> 11#include <linux/vmalloc.h>
@@ -576,6 +577,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
576 scsi_block_requests(vha->host); 577 scsi_block_requests(vha->host);
577 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 578 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
578 if (IS_QLA82XX(ha)) { 579 if (IS_QLA82XX(ha)) {
580 ha->flags.isp82xx_no_md_cap = 1;
579 qla82xx_idc_lock(ha); 581 qla82xx_idc_lock(ha);
580 qla82xx_set_reset_owner(vha); 582 qla82xx_set_reset_owner(vha);
581 qla82xx_idc_unlock(ha); 583 qla82xx_idc_unlock(ha);
@@ -585,7 +587,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
585 scsi_unblock_requests(vha->host); 587 scsi_unblock_requests(vha->host);
586 break; 588 break;
587 case 0x2025d: 589 case 0x2025d:
588 if (!IS_QLA81XX(ha)) 590 if (!IS_QLA81XX(ha) || !IS_QLA8031(ha))
589 return -EPERM; 591 return -EPERM;
590 592
591 ql_log(ql_log_info, vha, 0x706f, 593 ql_log(ql_log_info, vha, 0x706f,
@@ -1105,9 +1107,8 @@ qla2x00_total_isp_aborts_show(struct device *dev,
1105 struct device_attribute *attr, char *buf) 1107 struct device_attribute *attr, char *buf)
1106{ 1108{
1107 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1109 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1108 struct qla_hw_data *ha = vha->hw;
1109 return snprintf(buf, PAGE_SIZE, "%d\n", 1110 return snprintf(buf, PAGE_SIZE, "%d\n",
1110 ha->qla_stats.total_isp_aborts); 1111 vha->qla_stats.total_isp_aborts);
1111} 1112}
1112 1113
1113static ssize_t 1114static ssize_t
@@ -1154,7 +1155,7 @@ qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
1154 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1155 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1155 struct qla_hw_data *ha = vha->hw; 1156 struct qla_hw_data *ha = vha->hw;
1156 1157
1157 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 1158 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
1158 return snprintf(buf, PAGE_SIZE, "\n"); 1159 return snprintf(buf, PAGE_SIZE, "\n");
1159 1160
1160 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n", 1161 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
@@ -1537,7 +1538,7 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1537 dma_addr_t stats_dma; 1538 dma_addr_t stats_dma;
1538 struct fc_host_statistics *pfc_host_stat; 1539 struct fc_host_statistics *pfc_host_stat;
1539 1540
1540 pfc_host_stat = &ha->fc_host_stat; 1541 pfc_host_stat = &vha->fc_host_stat;
1541 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics)); 1542 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
1542 1543
1543 if (test_bit(UNLOADING, &vha->dpc_flags)) 1544 if (test_bit(UNLOADING, &vha->dpc_flags))
@@ -1580,8 +1581,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1580 pfc_host_stat->dumped_frames = stats->dumped_frames; 1581 pfc_host_stat->dumped_frames = stats->dumped_frames;
1581 pfc_host_stat->nos_count = stats->nos_rcvd; 1582 pfc_host_stat->nos_count = stats->nos_rcvd;
1582 } 1583 }
1583 pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20; 1584 pfc_host_stat->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
1584 pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20; 1585 pfc_host_stat->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
1585 1586
1586done_free: 1587done_free:
1587 dma_pool_free(ha->s_dma_pool, stats, stats_dma); 1588 dma_pool_free(ha->s_dma_pool, stats, stats_dma);
@@ -1737,6 +1738,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1737 fc_host_supported_speeds(vha->host) = 1738 fc_host_supported_speeds(vha->host) =
1738 fc_host_supported_speeds(base_vha->host); 1739 fc_host_supported_speeds(base_vha->host);
1739 1740
1741 qlt_vport_create(vha, ha);
1740 qla24xx_vport_disable(fc_vport, disable); 1742 qla24xx_vport_disable(fc_vport, disable);
1741 1743
1742 if (ha->flags.cpu_affinity_enabled) { 1744 if (ha->flags.cpu_affinity_enabled) {
@@ -1951,12 +1953,16 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
1951 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count; 1953 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
1952 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); 1954 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
1953 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); 1955 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
1954 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 1956 fc_host_supported_classes(vha->host) = ha->tgt.enable_class_2 ?
1957 (FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3;
1955 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports; 1958 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
1956 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count; 1959 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
1957 1960
1958 if (IS_CNA_CAPABLE(ha)) 1961 if (IS_CNA_CAPABLE(ha))
1959 speed = FC_PORTSPEED_10GBIT; 1962 speed = FC_PORTSPEED_10GBIT;
1963 else if (IS_QLA2031(ha))
1964 speed = FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT |
1965 FC_PORTSPEED_4GBIT;
1960 else if (IS_QLA25XX(ha)) 1966 else if (IS_QLA25XX(ha))
1961 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | 1967 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
1962 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; 1968 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index bc3cc6d91117..c68883806c54 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -297,7 +297,6 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
297 297
298 /* Initialize all required fields of fcport */ 298 /* Initialize all required fields of fcport */
299 fcport->vha = vha; 299 fcport->vha = vha;
300 fcport->vp_idx = vha->vp_idx;
301 fcport->d_id.b.al_pa = 300 fcport->d_id.b.al_pa =
302 bsg_job->request->rqst_data.h_els.port_id[0]; 301 bsg_job->request->rqst_data.h_els.port_id[0];
303 fcport->d_id.b.area = 302 fcport->d_id.b.area =
@@ -483,7 +482,6 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
483 482
484 /* Initialize all required fields of fcport */ 483 /* Initialize all required fields of fcport */
485 fcport->vha = vha; 484 fcport->vha = vha;
486 fcport->vp_idx = vha->vp_idx;
487 fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0]; 485 fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
488 fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1]; 486 fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
489 fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2]; 487 fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
@@ -544,7 +542,7 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
544 int rval = 0; 542 int rval = 0;
545 struct qla_hw_data *ha = vha->hw; 543 struct qla_hw_data *ha = vha->hw;
546 544
547 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 545 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
548 goto done_set_internal; 546 goto done_set_internal;
549 547
550 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1); 548 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
@@ -586,7 +584,7 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
586 uint16_t new_config[4]; 584 uint16_t new_config[4];
587 struct qla_hw_data *ha = vha->hw; 585 struct qla_hw_data *ha = vha->hw;
588 586
589 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 587 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
590 goto done_reset_internal; 588 goto done_reset_internal;
591 589
592 memset(new_config, 0 , sizeof(new_config)); 590 memset(new_config, 0 , sizeof(new_config));
@@ -710,8 +708,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
710 elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 708 elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
711 709
712 if ((ha->current_topology == ISP_CFG_F || 710 if ((ha->current_topology == ISP_CFG_F ||
713 (atomic_read(&vha->loop_state) == LOOP_DOWN) || 711 ((IS_QLA81XX(ha) || IS_QLA8031(ha)) &&
714 ((IS_QLA81XX(ha) || IS_QLA83XX(ha)) &&
715 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE 712 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
716 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) && 713 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
717 elreq.options == EXTERNAL_LOOPBACK) { 714 elreq.options == EXTERNAL_LOOPBACK) {
@@ -1402,6 +1399,9 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
1402 if (rval) 1399 if (rval)
1403 return rval; 1400 return rval;
1404 1401
1402 /* Set the isp82xx_no_md_cap not to capture minidump */
1403 ha->flags.isp82xx_no_md_cap = 1;
1404
1405 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1405 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1406 bsg_job->request_payload.sg_cnt, ha->optrom_buffer, 1406 bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1407 ha->optrom_region_size); 1407 ha->optrom_region_size);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 62324a1d5573..fdee5611f3e2 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,27 +11,31 @@
11 * ---------------------------------------------------------------------- 11 * ----------------------------------------------------------------------
12 * | Level | Last Value Used | Holes | 12 * | Level | Last Value Used | Holes |
13 * ---------------------------------------------------------------------- 13 * ----------------------------------------------------------------------
14 * | Module Init and Probe | 0x0120 | 0x4b,0xba,0xfa | 14 * | Module Init and Probe | 0x0122 | 0x4b,0xba,0xfa |
15 * | Mailbox commands | 0x113e | 0x112c-0x112e | 15 * | Mailbox commands | 0x1140 | 0x111a-0x111b |
16 * | | | 0x112c-0x112e |
16 * | | | 0x113a | 17 * | | | 0x113a |
17 * | Device Discovery | 0x2086 | 0x2020-0x2022 | 18 * | Device Discovery | 0x2086 | 0x2020-0x2022 |
18 * | Queue Command and IO tracing | 0x3030 | 0x3006,0x3008 | 19 * | Queue Command and IO tracing | 0x3030 | 0x3006,0x3008 |
19 * | | | 0x302d-0x302e | 20 * | | | 0x302d-0x302e |
20 * | DPC Thread | 0x401c | | 21 * | DPC Thread | 0x401c | 0x4002,0x4013 |
21 * | Async Events | 0x505d | 0x502b-0x502f | 22 * | Async Events | 0x505f | 0x502b-0x502f |
22 * | | | 0x5047,0x5052 | 23 * | | | 0x5047,0x5052 |
23 * | Timer Routines | 0x6011 | 0x600e-0x600f | 24 * | Timer Routines | 0x6011 | |
24 * | User Space Interactions | 0x709f | 0x7018,0x702e, | 25 * | User Space Interactions | 0x709f | 0x7018,0x702e, |
25 * | | | 0x7039,0x7045, | 26 * | | | 0x7039,0x7045, |
26 * | | | 0x7073-0x7075, | 27 * | | | 0x7073-0x7075, |
27 * | | | 0x708c | 28 * | | | 0x708c |
28 * | Task Management | 0x803c | 0x8025-0x8026 | 29 * | Task Management | 0x803c | 0x8025-0x8026 |
29 * | | | 0x800b,0x8039 | 30 * | | | 0x800b,0x8039 |
30 * | AER/EEH | 0x900f | | 31 * | AER/EEH | 0x9011 | |
31 * | Virtual Port | 0xa007 | | 32 * | Virtual Port | 0xa007 | |
32 * | ISP82XX Specific | 0xb054 | 0xb053 | 33 * | ISP82XX Specific | 0xb054 | 0xb024 |
33 * | MultiQ | 0xc00c | | 34 * | MultiQ | 0xc00c | |
34 * | Misc | 0xd010 | | 35 * | Misc | 0xd010 | |
36 * | Target Mode | 0xe06f | |
37 * | Target Mode Management | 0xf071 | |
38 * | Target Mode Task Management | 0x1000b | |
35 * ---------------------------------------------------------------------- 39 * ----------------------------------------------------------------------
36 */ 40 */
37 41
@@ -379,6 +383,54 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
379} 383}
380 384
381static inline void * 385static inline void *
386qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
387 uint32_t **last_chain)
388{
389 struct qla2xxx_mqueue_chain *q;
390 struct qla2xxx_mqueue_header *qh;
391 uint32_t num_queues;
392 int que;
393 struct {
394 int length;
395 void *ring;
396 } aq, *aqp;
397
398 if (!ha->tgt.atio_q_length)
399 return ptr;
400
401 num_queues = 1;
402 aqp = &aq;
403 aqp->length = ha->tgt.atio_q_length;
404 aqp->ring = ha->tgt.atio_ring;
405
406 for (que = 0; que < num_queues; que++) {
407 /* aqp = ha->atio_q_map[que]; */
408 q = ptr;
409 *last_chain = &q->type;
410 q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
411 q->chain_size = htonl(
412 sizeof(struct qla2xxx_mqueue_chain) +
413 sizeof(struct qla2xxx_mqueue_header) +
414 (aqp->length * sizeof(request_t)));
415 ptr += sizeof(struct qla2xxx_mqueue_chain);
416
417 /* Add header. */
418 qh = ptr;
419 qh->queue = __constant_htonl(TYPE_ATIO_QUEUE);
420 qh->number = htonl(que);
421 qh->size = htonl(aqp->length * sizeof(request_t));
422 ptr += sizeof(struct qla2xxx_mqueue_header);
423
424 /* Add data. */
425 memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t));
426
427 ptr += aqp->length * sizeof(request_t);
428 }
429
430 return ptr;
431}
432
433static inline void *
382qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) 434qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
383{ 435{
384 struct qla2xxx_mqueue_chain *q; 436 struct qla2xxx_mqueue_chain *q;
@@ -873,6 +925,8 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
873 struct qla24xx_fw_dump *fw; 925 struct qla24xx_fw_dump *fw;
874 uint32_t ext_mem_cnt; 926 uint32_t ext_mem_cnt;
875 void *nxt; 927 void *nxt;
928 void *nxt_chain;
929 uint32_t *last_chain = NULL;
876 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 930 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
877 931
878 if (IS_QLA82XX(ha)) 932 if (IS_QLA82XX(ha))
@@ -1091,6 +1145,16 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1091 1145
1092 qla24xx_copy_eft(ha, nxt); 1146 qla24xx_copy_eft(ha, nxt);
1093 1147
1148 nxt_chain = (void *)ha->fw_dump + ha->chain_offset;
1149 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1150 if (last_chain) {
1151 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
1152 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
1153 }
1154
1155 /* Adjust valid length. */
1156 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1157
1094qla24xx_fw_dump_failed_0: 1158qla24xx_fw_dump_failed_0:
1095 qla2xxx_dump_post_process(base_vha, rval); 1159 qla2xxx_dump_post_process(base_vha, rval);
1096 1160
@@ -1399,6 +1463,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1399 /* Chain entries -- started with MQ. */ 1463 /* Chain entries -- started with MQ. */
1400 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); 1464 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1401 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); 1465 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1466 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1402 if (last_chain) { 1467 if (last_chain) {
1403 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); 1468 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
1404 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); 1469 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
@@ -1717,6 +1782,7 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1717 /* Chain entries -- started with MQ. */ 1782 /* Chain entries -- started with MQ. */
1718 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); 1783 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1719 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); 1784 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1785 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1720 if (last_chain) { 1786 if (last_chain) {
1721 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); 1787 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
1722 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); 1788 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
@@ -2218,6 +2284,7 @@ copy_queue:
2218 /* Chain entries -- started with MQ. */ 2284 /* Chain entries -- started with MQ. */
2219 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); 2285 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
2220 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); 2286 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
2287 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
2221 if (last_chain) { 2288 if (last_chain) {
2222 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); 2289 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
2223 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); 2290 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 2157bdf1569a..f278df8cce0f 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -244,6 +244,7 @@ struct qla2xxx_mqueue_header {
244 uint32_t queue; 244 uint32_t queue;
245#define TYPE_REQUEST_QUEUE 0x1 245#define TYPE_REQUEST_QUEUE 0x1
246#define TYPE_RESPONSE_QUEUE 0x2 246#define TYPE_RESPONSE_QUEUE 0x2
247#define TYPE_ATIO_QUEUE 0x3
247 uint32_t number; 248 uint32_t number;
248 uint32_t size; 249 uint32_t size;
249}; 250};
@@ -339,3 +340,11 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
339#define ql_dbg_misc 0x00010000 /* For dumping everything that is not 340#define ql_dbg_misc 0x00010000 /* For dumping everything that is not
340 * not covered by upper categories 341 * not covered by upper categories
341 */ 342 */
343#define ql_dbg_verbose 0x00008000 /* More verbosity for each level
344 * This is to be used with other levels where
345 * more verbosity is required. It might not
346 * be applicable to all the levels.
347 */
348#define ql_dbg_tgt 0x00004000 /* Target mode */
349#define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */
350#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index a2443031dbe7..39007f53aec0 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -186,6 +186,7 @@
186#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/ 186#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/
187#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/ 187#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/
188#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/ 188#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/
189#define ATIO_ENTRY_CNT_24XX 4096 /* Number of ATIO entries. */
189 190
190struct req_que; 191struct req_que;
191 192
@@ -1234,11 +1235,27 @@ typedef struct {
1234 * ISP queue - response queue entry definition. 1235 * ISP queue - response queue entry definition.
1235 */ 1236 */
1236typedef struct { 1237typedef struct {
1237 uint8_t data[60]; 1238 uint8_t entry_type; /* Entry type. */
1239 uint8_t entry_count; /* Entry count. */
1240 uint8_t sys_define; /* System defined. */
1241 uint8_t entry_status; /* Entry Status. */
1242 uint32_t handle; /* System defined handle */
1243 uint8_t data[52];
1238 uint32_t signature; 1244 uint32_t signature;
1239#define RESPONSE_PROCESSED 0xDEADDEAD /* Signature */ 1245#define RESPONSE_PROCESSED 0xDEADDEAD /* Signature */
1240} response_t; 1246} response_t;
1241 1247
1248/*
1249 * ISP queue - ATIO queue entry definition.
1250 */
1251struct atio {
1252 uint8_t entry_type; /* Entry type. */
1253 uint8_t entry_count; /* Entry count. */
1254 uint8_t data[58];
1255 uint32_t signature;
1256#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
1257};
1258
1242typedef union { 1259typedef union {
1243 uint16_t extended; 1260 uint16_t extended;
1244 struct { 1261 struct {
@@ -1719,11 +1736,13 @@ typedef struct fc_port {
1719 struct fc_rport *rport, *drport; 1736 struct fc_rport *rport, *drport;
1720 u32 supported_classes; 1737 u32 supported_classes;
1721 1738
1722 uint16_t vp_idx;
1723 uint8_t fc4_type; 1739 uint8_t fc4_type;
1724 uint8_t scan_state; 1740 uint8_t scan_state;
1725} fc_port_t; 1741} fc_port_t;
1726 1742
1743#define QLA_FCPORT_SCAN_NONE 0
1744#define QLA_FCPORT_SCAN_FOUND 1
1745
1727/* 1746/*
1728 * Fibre channel port/lun states. 1747 * Fibre channel port/lun states.
1729 */ 1748 */
@@ -1747,6 +1766,7 @@ static const char * const port_state_str[] = {
1747#define FCF_LOGIN_NEEDED BIT_1 1766#define FCF_LOGIN_NEEDED BIT_1
1748#define FCF_FCP2_DEVICE BIT_2 1767#define FCF_FCP2_DEVICE BIT_2
1749#define FCF_ASYNC_SENT BIT_3 1768#define FCF_ASYNC_SENT BIT_3
1769#define FCF_CONF_COMP_SUPPORTED BIT_4
1750 1770
1751/* No loop ID flag. */ 1771/* No loop ID flag. */
1752#define FC_NO_LOOP_ID 0x1000 1772#define FC_NO_LOOP_ID 0x1000
@@ -2419,6 +2439,40 @@ struct qlfc_fw {
2419 uint32_t len; 2439 uint32_t len;
2420}; 2440};
2421 2441
2442struct qlt_hw_data {
2443 /* Protected by hw lock */
2444 uint32_t enable_class_2:1;
2445 uint32_t enable_explicit_conf:1;
2446 uint32_t ini_mode_force_reverse:1;
2447 uint32_t node_name_set:1;
2448
2449 dma_addr_t atio_dma; /* Physical address. */
2450 struct atio *atio_ring; /* Base virtual address */
2451 struct atio *atio_ring_ptr; /* Current address. */
2452 uint16_t atio_ring_index; /* Current index. */
2453 uint16_t atio_q_length;
2454
2455 void *target_lport_ptr;
2456 struct qla_tgt_func_tmpl *tgt_ops;
2457 struct qla_tgt *qla_tgt;
2458 struct qla_tgt_cmd *cmds[MAX_OUTSTANDING_COMMANDS];
2459 uint16_t current_handle;
2460
2461 struct qla_tgt_vp_map *tgt_vp_map;
2462 struct mutex tgt_mutex;
2463 struct mutex tgt_host_action_mutex;
2464
2465 int saved_set;
2466 uint16_t saved_exchange_count;
2467 uint32_t saved_firmware_options_1;
2468 uint32_t saved_firmware_options_2;
2469 uint32_t saved_firmware_options_3;
2470 uint8_t saved_firmware_options[2];
2471 uint8_t saved_add_firmware_options[2];
2472
2473 uint8_t tgt_node_name[WWN_SIZE];
2474};
2475
2422/* 2476/*
2423 * Qlogic host adapter specific data structure. 2477 * Qlogic host adapter specific data structure.
2424*/ 2478*/
@@ -2460,7 +2514,9 @@ struct qla_hw_data {
2460 uint32_t thermal_supported:1; 2514 uint32_t thermal_supported:1;
2461 uint32_t isp82xx_reset_hdlr_active:1; 2515 uint32_t isp82xx_reset_hdlr_active:1;
2462 uint32_t isp82xx_reset_owner:1; 2516 uint32_t isp82xx_reset_owner:1;
2463 /* 28 bits */ 2517 uint32_t isp82xx_no_md_cap:1;
2518 uint32_t host_shutting_down:1;
2519 /* 30 bits */
2464 } flags; 2520 } flags;
2465 2521
2466 /* This spinlock is used to protect "io transactions", you must 2522 /* This spinlock is used to protect "io transactions", you must
@@ -2804,7 +2860,6 @@ struct qla_hw_data {
2804 /* ISP2322: red, green, amber. */ 2860 /* ISP2322: red, green, amber. */
2805 uint16_t zio_mode; 2861 uint16_t zio_mode;
2806 uint16_t zio_timer; 2862 uint16_t zio_timer;
2807 struct fc_host_statistics fc_host_stat;
2808 2863
2809 struct qla_msix_entry *msix_entries; 2864 struct qla_msix_entry *msix_entries;
2810 2865
@@ -2817,7 +2872,6 @@ struct qla_hw_data {
2817 int cur_vport_count; 2872 int cur_vport_count;
2818 2873
2819 struct qla_chip_state_84xx *cs84xx; 2874 struct qla_chip_state_84xx *cs84xx;
2820 struct qla_statistics qla_stats;
2821 struct isp_operations *isp_ops; 2875 struct isp_operations *isp_ops;
2822 struct workqueue_struct *wq; 2876 struct workqueue_struct *wq;
2823 struct qlfc_fw fw_buf; 2877 struct qlfc_fw fw_buf;
@@ -2863,6 +2917,8 @@ struct qla_hw_data {
2863 dma_addr_t md_tmplt_hdr_dma; 2917 dma_addr_t md_tmplt_hdr_dma;
2864 void *md_dump; 2918 void *md_dump;
2865 uint32_t md_dump_size; 2919 uint32_t md_dump_size;
2920
2921 struct qlt_hw_data tgt;
2866}; 2922};
2867 2923
2868/* 2924/*
@@ -2920,6 +2976,7 @@ typedef struct scsi_qla_host {
2920#define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */ 2976#define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */
2921#define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */ 2977#define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */
2922#define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */ 2978#define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */
2979#define SCR_PENDING 21 /* SCR in target mode */
2923 2980
2924 uint32_t device_flags; 2981 uint32_t device_flags;
2925#define SWITCH_FOUND BIT_0 2982#define SWITCH_FOUND BIT_0
@@ -2979,10 +3036,21 @@ typedef struct scsi_qla_host {
2979 struct req_que *req; 3036 struct req_que *req;
2980 int fw_heartbeat_counter; 3037 int fw_heartbeat_counter;
2981 int seconds_since_last_heartbeat; 3038 int seconds_since_last_heartbeat;
3039 struct fc_host_statistics fc_host_stat;
3040 struct qla_statistics qla_stats;
2982 3041
2983 atomic_t vref_count; 3042 atomic_t vref_count;
2984} scsi_qla_host_t; 3043} scsi_qla_host_t;
2985 3044
3045#define SET_VP_IDX 1
3046#define SET_AL_PA 2
3047#define RESET_VP_IDX 3
3048#define RESET_AL_PA 4
3049struct qla_tgt_vp_map {
3050 uint8_t idx;
3051 scsi_qla_host_t *vha;
3052};
3053
2986/* 3054/*
2987 * Macros to help code, maintain, etc. 3055 * Macros to help code, maintain, etc.
2988 */ 3056 */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 9f065804bd12..9eacd2df111b 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -175,6 +175,7 @@ extern int qla2x00_vp_abort_isp(scsi_qla_host_t *);
175/* 175/*
176 * Global Function Prototypes in qla_iocb.c source file. 176 * Global Function Prototypes in qla_iocb.c source file.
177 */ 177 */
178
178extern uint16_t qla2x00_calc_iocbs_32(uint16_t); 179extern uint16_t qla2x00_calc_iocbs_32(uint16_t);
179extern uint16_t qla2x00_calc_iocbs_64(uint16_t); 180extern uint16_t qla2x00_calc_iocbs_64(uint16_t);
180extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t); 181extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
@@ -188,6 +189,8 @@ extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t);
188extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t); 189extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
189extern int qla24xx_dif_start_scsi(srb_t *); 190extern int qla24xx_dif_start_scsi(srb_t *);
190 191
192extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
193extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
191 194
192/* 195/*
193 * Global Function Prototypes in qla_mbx.c source file. 196 * Global Function Prototypes in qla_mbx.c source file.
@@ -239,6 +242,9 @@ extern int
239qla2x00_init_firmware(scsi_qla_host_t *, uint16_t); 242qla2x00_init_firmware(scsi_qla_host_t *, uint16_t);
240 243
241extern int 244extern int
245qla2x00_get_node_name_list(scsi_qla_host_t *, void **, int *);
246
247extern int
242qla2x00_get_port_database(scsi_qla_host_t *, fc_port_t *, uint8_t); 248qla2x00_get_port_database(scsi_qla_host_t *, fc_port_t *, uint8_t);
243 249
244extern int 250extern int
@@ -383,6 +389,8 @@ extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *);
383extern void qla2x00_free_irqs(scsi_qla_host_t *); 389extern void qla2x00_free_irqs(scsi_qla_host_t *);
384 390
385extern int qla2x00_get_data_rate(scsi_qla_host_t *); 391extern int qla2x00_get_data_rate(scsi_qla_host_t *);
392extern char *qla2x00_get_link_speed_str(struct qla_hw_data *);
393
386/* 394/*
387 * Global Function Prototypes in qla_sup.c source file. 395 * Global Function Prototypes in qla_sup.c source file.
388 */ 396 */
@@ -546,6 +554,7 @@ extern void qla2x00_sp_free(void *, void *);
546extern void qla2x00_sp_timeout(unsigned long); 554extern void qla2x00_sp_timeout(unsigned long);
547extern void qla2x00_bsg_job_done(void *, void *, int); 555extern void qla2x00_bsg_job_done(void *, void *, int);
548extern void qla2x00_bsg_sp_free(void *, void *); 556extern void qla2x00_bsg_sp_free(void *, void *);
557extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *);
549 558
550/* Interrupt related */ 559/* Interrupt related */
551extern irqreturn_t qla82xx_intr_handler(int, void *); 560extern irqreturn_t qla82xx_intr_handler(int, void *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 3128f80441f5..05260d25fe46 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -5,6 +5,7 @@
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7#include "qla_def.h" 7#include "qla_def.h"
8#include "qla_target.h"
8 9
9static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *); 10static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *);
10static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *); 11static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *);
@@ -556,7 +557,8 @@ qla2x00_rff_id(scsi_qla_host_t *vha)
556 ct_req->req.rff_id.port_id[1] = vha->d_id.b.area; 557 ct_req->req.rff_id.port_id[1] = vha->d_id.b.area;
557 ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa; 558 ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa;
558 559
559 ct_req->req.rff_id.fc4_feature = BIT_1; 560 qlt_rff_id(vha, ct_req);
561
560 ct_req->req.rff_id.fc4_type = 0x08; /* SCSI - FCP */ 562 ct_req->req.rff_id.fc4_type = 0x08; /* SCSI - FCP */
561 563
562 /* Execute MS IOCB */ 564 /* Execute MS IOCB */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index b9465643396b..ca5084743135 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -17,6 +17,9 @@
17#include <asm/prom.h> 17#include <asm/prom.h>
18#endif 18#endif
19 19
20#include <target/target_core_base.h>
21#include "qla_target.h"
22
20/* 23/*
21* QLogic ISP2x00 Hardware Support Function Prototypes. 24* QLogic ISP2x00 Hardware Support Function Prototypes.
22*/ 25*/
@@ -518,7 +521,10 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
518 return QLA_FUNCTION_FAILED; 521 return QLA_FUNCTION_FAILED;
519 } 522 }
520 } 523 }
521 rval = qla2x00_init_rings(vha); 524
525 if (qla_ini_mode_enabled(vha))
526 rval = qla2x00_init_rings(vha);
527
522 ha->flags.chip_reset_done = 1; 528 ha->flags.chip_reset_done = 1;
523 529
524 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) { 530 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
@@ -1233,6 +1239,8 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1233 mq_size += ha->max_rsp_queues * 1239 mq_size += ha->max_rsp_queues *
1234 (rsp->length * sizeof(response_t)); 1240 (rsp->length * sizeof(response_t));
1235 } 1241 }
1242 if (ha->tgt.atio_q_length)
1243 mq_size += ha->tgt.atio_q_length * sizeof(request_t);
1236 /* Allocate memory for Fibre Channel Event Buffer. */ 1244 /* Allocate memory for Fibre Channel Event Buffer. */
1237 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 1245 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
1238 goto try_eft; 1246 goto try_eft;
@@ -1696,6 +1704,12 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1696 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); 1704 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1697 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); 1705 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1698 1706
1707 /* Setup ATIO queue dma pointers for target mode */
1708 icb->atio_q_inpointer = __constant_cpu_to_le16(0);
1709 icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
1710 icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
1711 icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
1712
1699 if (ha->mqenable || IS_QLA83XX(ha)) { 1713 if (ha->mqenable || IS_QLA83XX(ha)) {
1700 icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS); 1714 icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
1701 icb->rid = __constant_cpu_to_le16(rid); 1715 icb->rid = __constant_cpu_to_le16(rid);
@@ -1739,6 +1753,8 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1739 WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0); 1753 WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
1740 WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0); 1754 WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
1741 } 1755 }
1756 qlt_24xx_config_rings(vha, reg);
1757
1742 /* PCI posting */ 1758 /* PCI posting */
1743 RD_REG_DWORD(&ioreg->hccr); 1759 RD_REG_DWORD(&ioreg->hccr);
1744} 1760}
@@ -1794,6 +1810,11 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1794 1810
1795 spin_unlock(&ha->vport_slock); 1811 spin_unlock(&ha->vport_slock);
1796 1812
1813 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
1814 ha->tgt.atio_ring_index = 0;
1815 /* Initialize ATIO queue entries */
1816 qlt_init_atio_q_entries(vha);
1817
1797 ha->isp_ops->config_rings(vha); 1818 ha->isp_ops->config_rings(vha);
1798 1819
1799 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1820 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2051,6 +2072,10 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
2051 vha->d_id.b.area = area; 2072 vha->d_id.b.area = area;
2052 vha->d_id.b.al_pa = al_pa; 2073 vha->d_id.b.al_pa = al_pa;
2053 2074
2075 spin_lock(&ha->vport_slock);
2076 qlt_update_vp_map(vha, SET_AL_PA);
2077 spin_unlock(&ha->vport_slock);
2078
2054 if (!vha->flags.init_done) 2079 if (!vha->flags.init_done)
2055 ql_log(ql_log_info, vha, 0x2010, 2080 ql_log(ql_log_info, vha, 0x2010,
2056 "Topology - %s, Host Loop address 0x%x.\n", 2081 "Topology - %s, Host Loop address 0x%x.\n",
@@ -2185,7 +2210,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
2185 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) { 2210 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
2186 /* Reset NVRAM data. */ 2211 /* Reset NVRAM data. */
2187 ql_log(ql_log_warn, vha, 0x0064, 2212 ql_log(ql_log_warn, vha, 0x0064,
2188 "Inconisistent NVRAM " 2213 "Inconsistent NVRAM "
2189 "detected: checksum=0x%x id=%c version=0x%x.\n", 2214 "detected: checksum=0x%x id=%c version=0x%x.\n",
2190 chksum, nv->id[0], nv->nvram_version); 2215 chksum, nv->id[0], nv->nvram_version);
2191 ql_log(ql_log_warn, vha, 0x0065, 2216 ql_log(ql_log_warn, vha, 0x0065,
@@ -2270,7 +2295,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
2270 if (IS_QLA23XX(ha)) { 2295 if (IS_QLA23XX(ha)) {
2271 nv->firmware_options[0] |= BIT_2; 2296 nv->firmware_options[0] |= BIT_2;
2272 nv->firmware_options[0] &= ~BIT_3; 2297 nv->firmware_options[0] &= ~BIT_3;
2273 nv->firmware_options[0] &= ~BIT_6; 2298 nv->special_options[0] &= ~BIT_6;
2274 nv->add_firmware_options[1] |= BIT_5 | BIT_4; 2299 nv->add_firmware_options[1] |= BIT_5 | BIT_4;
2275 2300
2276 if (IS_QLA2300(ha)) { 2301 if (IS_QLA2300(ha)) {
@@ -2467,14 +2492,21 @@ qla2x00_rport_del(void *data)
2467{ 2492{
2468 fc_port_t *fcport = data; 2493 fc_port_t *fcport = data;
2469 struct fc_rport *rport; 2494 struct fc_rport *rport;
2495 scsi_qla_host_t *vha = fcport->vha;
2470 unsigned long flags; 2496 unsigned long flags;
2471 2497
2472 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 2498 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
2473 rport = fcport->drport ? fcport->drport: fcport->rport; 2499 rport = fcport->drport ? fcport->drport: fcport->rport;
2474 fcport->drport = NULL; 2500 fcport->drport = NULL;
2475 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); 2501 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
2476 if (rport) 2502 if (rport) {
2477 fc_remote_port_delete(rport); 2503 fc_remote_port_delete(rport);
2504 /*
2505 * Release the target mode FC NEXUS in qla_target.c code
2506 * if target mod is enabled.
2507 */
2508 qlt_fc_port_deleted(vha, fcport);
2509 }
2478} 2510}
2479 2511
2480/** 2512/**
@@ -2495,11 +2527,11 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
2495 2527
2496 /* Setup fcport template structure. */ 2528 /* Setup fcport template structure. */
2497 fcport->vha = vha; 2529 fcport->vha = vha;
2498 fcport->vp_idx = vha->vp_idx;
2499 fcport->port_type = FCT_UNKNOWN; 2530 fcport->port_type = FCT_UNKNOWN;
2500 fcport->loop_id = FC_NO_LOOP_ID; 2531 fcport->loop_id = FC_NO_LOOP_ID;
2501 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); 2532 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
2502 fcport->supported_classes = FC_COS_UNSPECIFIED; 2533 fcport->supported_classes = FC_COS_UNSPECIFIED;
2534 fcport->scan_state = QLA_FCPORT_SCAN_NONE;
2503 2535
2504 return fcport; 2536 return fcport;
2505} 2537}
@@ -2726,7 +2758,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2726 new_fcport->d_id.b.area = area; 2758 new_fcport->d_id.b.area = area;
2727 new_fcport->d_id.b.al_pa = al_pa; 2759 new_fcport->d_id.b.al_pa = al_pa;
2728 new_fcport->loop_id = loop_id; 2760 new_fcport->loop_id = loop_id;
2729 new_fcport->vp_idx = vha->vp_idx;
2730 rval2 = qla2x00_get_port_database(vha, new_fcport, 0); 2761 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
2731 if (rval2 != QLA_SUCCESS) { 2762 if (rval2 != QLA_SUCCESS) {
2732 ql_dbg(ql_dbg_disc, vha, 0x201a, 2763 ql_dbg(ql_dbg_disc, vha, 0x201a,
@@ -2760,10 +2791,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2760 2791
2761 if (!found) { 2792 if (!found) {
2762 /* New device, add to fcports list. */ 2793 /* New device, add to fcports list. */
2763 if (vha->vp_idx) {
2764 new_fcport->vha = vha;
2765 new_fcport->vp_idx = vha->vp_idx;
2766 }
2767 list_add_tail(&new_fcport->list, &vha->vp_fcports); 2794 list_add_tail(&new_fcport->list, &vha->vp_fcports);
2768 2795
2769 /* Allocate a new replacement fcport. */ 2796 /* Allocate a new replacement fcport. */
@@ -2800,8 +2827,6 @@ cleanup_allocation:
2800static void 2827static void
2801qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 2828qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2802{ 2829{
2803#define LS_UNKNOWN 2
2804 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
2805 char *link_speed; 2830 char *link_speed;
2806 int rval; 2831 int rval;
2807 uint16_t mb[4]; 2832 uint16_t mb[4];
@@ -2829,11 +2854,7 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2829 fcport->port_name[6], fcport->port_name[7], rval, 2854 fcport->port_name[6], fcport->port_name[7], rval,
2830 fcport->fp_speed, mb[0], mb[1]); 2855 fcport->fp_speed, mb[0], mb[1]);
2831 } else { 2856 } else {
2832 link_speed = link_speeds[LS_UNKNOWN]; 2857 link_speed = qla2x00_get_link_speed_str(ha);
2833 if (fcport->fp_speed < 5)
2834 link_speed = link_speeds[fcport->fp_speed];
2835 else if (fcport->fp_speed == 0x13)
2836 link_speed = link_speeds[5];
2837 ql_dbg(ql_dbg_disc, vha, 0x2005, 2858 ql_dbg(ql_dbg_disc, vha, 0x2005,
2838 "iIDMA adjusted to %s GB/s " 2859 "iIDMA adjusted to %s GB/s "
2839 "on %02x%02x%02x%02x%02x%02x%02x%02x.\n", link_speed, 2860 "on %02x%02x%02x%02x%02x%02x%02x%02x.\n", link_speed,
@@ -2864,6 +2885,12 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
2864 "Unable to allocate fc remote port.\n"); 2885 "Unable to allocate fc remote port.\n");
2865 return; 2886 return;
2866 } 2887 }
2888 /*
2889 * Create target mode FC NEXUS in qla_target.c if target mode is
2890 * enabled..
2891 */
2892 qlt_fc_port_added(vha, fcport);
2893
2867 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 2894 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
2868 *((fc_port_t **)rport->dd_data) = fcport; 2895 *((fc_port_t **)rport->dd_data) = fcport;
2869 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); 2896 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
@@ -2921,7 +2948,7 @@ static int
2921qla2x00_configure_fabric(scsi_qla_host_t *vha) 2948qla2x00_configure_fabric(scsi_qla_host_t *vha)
2922{ 2949{
2923 int rval; 2950 int rval;
2924 fc_port_t *fcport, *fcptemp; 2951 fc_port_t *fcport;
2925 uint16_t next_loopid; 2952 uint16_t next_loopid;
2926 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2953 uint16_t mb[MAILBOX_REGISTER_COUNT];
2927 uint16_t loop_id; 2954 uint16_t loop_id;
@@ -2959,7 +2986,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
2959 0xfc, mb, BIT_1|BIT_0); 2986 0xfc, mb, BIT_1|BIT_0);
2960 if (rval != QLA_SUCCESS) { 2987 if (rval != QLA_SUCCESS) {
2961 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 2988 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
2962 return rval; 2989 break;
2963 } 2990 }
2964 if (mb[0] != MBS_COMMAND_COMPLETE) { 2991 if (mb[0] != MBS_COMMAND_COMPLETE) {
2965 ql_dbg(ql_dbg_disc, vha, 0x2042, 2992 ql_dbg(ql_dbg_disc, vha, 0x2042,
@@ -2991,21 +3018,16 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
2991 } 3018 }
2992 } 3019 }
2993 3020
2994#define QLA_FCPORT_SCAN 1
2995#define QLA_FCPORT_FOUND 2
2996
2997 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2998 fcport->scan_state = QLA_FCPORT_SCAN;
2999 }
3000
3001 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports); 3021 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
3002 if (rval != QLA_SUCCESS) 3022 if (rval != QLA_SUCCESS)
3003 break; 3023 break;
3004 3024
3005 /* 3025 /* Add new ports to existing port list */
3006 * Logout all previous fabric devices marked lost, except 3026 list_splice_tail_init(&new_fcports, &vha->vp_fcports);
3007 * FCP2 devices. 3027
3008 */ 3028 /* Starting free loop ID. */
3029 next_loopid = ha->min_external_loopid;
3030
3009 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3031 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3010 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 3032 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3011 break; 3033 break;
@@ -3013,7 +3035,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3013 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) 3035 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
3014 continue; 3036 continue;
3015 3037
3016 if (fcport->scan_state == QLA_FCPORT_SCAN && 3038 /* Logout lost/gone fabric devices (non-FCP2) */
3039 if (fcport->scan_state != QLA_FCPORT_SCAN_FOUND &&
3017 atomic_read(&fcport->state) == FCS_ONLINE) { 3040 atomic_read(&fcport->state) == FCS_ONLINE) {
3018 qla2x00_mark_device_lost(vha, fcport, 3041 qla2x00_mark_device_lost(vha, fcport,
3019 ql2xplogiabsentdevice, 0); 3042 ql2xplogiabsentdevice, 0);
@@ -3026,78 +3049,30 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3026 fcport->d_id.b.domain, 3049 fcport->d_id.b.domain,
3027 fcport->d_id.b.area, 3050 fcport->d_id.b.area,
3028 fcport->d_id.b.al_pa); 3051 fcport->d_id.b.al_pa);
3029 fcport->loop_id = FC_NO_LOOP_ID;
3030 } 3052 }
3031 }
3032 }
3033
3034 /* Starting free loop ID. */
3035 next_loopid = ha->min_external_loopid;
3036
3037 /*
3038 * Scan through our port list and login entries that need to be
3039 * logged in.
3040 */
3041 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3042 if (atomic_read(&vha->loop_down_timer) ||
3043 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3044 break;
3045
3046 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
3047 (fcport->flags & FCF_LOGIN_NEEDED) == 0)
3048 continue; 3053 continue;
3049
3050 if (fcport->loop_id == FC_NO_LOOP_ID) {
3051 fcport->loop_id = next_loopid;
3052 rval = qla2x00_find_new_loop_id(
3053 base_vha, fcport);
3054 if (rval != QLA_SUCCESS) {
3055 /* Ran out of IDs to use */
3056 break;
3057 }
3058 } 3054 }
3059 /* Login and update database */ 3055 fcport->scan_state = QLA_FCPORT_SCAN_NONE;
3060 qla2x00_fabric_dev_login(vha, fcport, &next_loopid); 3056
3061 } 3057 /* Login fabric devices that need a login */
3062 3058 if ((fcport->flags & FCF_LOGIN_NEEDED) != 0 &&
3063 /* Exit if out of loop IDs. */ 3059 atomic_read(&vha->loop_down_timer) == 0) {
3064 if (rval != QLA_SUCCESS) { 3060 if (fcport->loop_id == FC_NO_LOOP_ID) {
3065 break; 3061 fcport->loop_id = next_loopid;
3066 } 3062 rval = qla2x00_find_new_loop_id(
3067 3063 base_vha, fcport);
3068 /* 3064 if (rval != QLA_SUCCESS) {
3069 * Login and add the new devices to our port list. 3065 /* Ran out of IDs to use */
3070 */ 3066 continue;
3071 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) { 3067 }
3072 if (atomic_read(&vha->loop_down_timer) || 3068 }
3073 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3074 break;
3075
3076 /* Find a new loop ID to use. */
3077 fcport->loop_id = next_loopid;
3078 rval = qla2x00_find_new_loop_id(base_vha, fcport);
3079 if (rval != QLA_SUCCESS) {
3080 /* Ran out of IDs to use */
3081 break;
3082 } 3069 }
3083 3070
3084 /* Login and update database */ 3071 /* Login and update database */
3085 qla2x00_fabric_dev_login(vha, fcport, &next_loopid); 3072 qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
3086
3087 if (vha->vp_idx) {
3088 fcport->vha = vha;
3089 fcport->vp_idx = vha->vp_idx;
3090 }
3091 list_move_tail(&fcport->list, &vha->vp_fcports);
3092 } 3073 }
3093 } while (0); 3074 } while (0);
3094 3075
3095 /* Free all new device structures not processed. */
3096 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
3097 list_del(&fcport->list);
3098 kfree(fcport);
3099 }
3100
3101 if (rval) { 3076 if (rval) {
3102 ql_dbg(ql_dbg_disc, vha, 0x2068, 3077 ql_dbg(ql_dbg_disc, vha, 0x2068,
3103 "Configure fabric error exit rval=%d.\n", rval); 3078 "Configure fabric error exit rval=%d.\n", rval);
@@ -3287,7 +3262,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3287 WWN_SIZE)) 3262 WWN_SIZE))
3288 continue; 3263 continue;
3289 3264
3290 fcport->scan_state = QLA_FCPORT_FOUND; 3265 fcport->scan_state = QLA_FCPORT_SCAN_FOUND;
3291 3266
3292 found++; 3267 found++;
3293 3268
@@ -3595,6 +3570,12 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3595 if (mb[10] & BIT_1) 3570 if (mb[10] & BIT_1)
3596 fcport->supported_classes |= FC_COS_CLASS3; 3571 fcport->supported_classes |= FC_COS_CLASS3;
3597 3572
3573 if (IS_FWI2_CAPABLE(ha)) {
3574 if (mb[10] & BIT_7)
3575 fcport->flags |=
3576 FCF_CONF_COMP_SUPPORTED;
3577 }
3578
3598 rval = QLA_SUCCESS; 3579 rval = QLA_SUCCESS;
3599 break; 3580 break;
3600 } else if (mb[0] == MBS_LOOP_ID_USED) { 3581 } else if (mb[0] == MBS_LOOP_ID_USED) {
@@ -3841,7 +3822,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
3841 vha->flags.online = 0; 3822 vha->flags.online = 0;
3842 ha->flags.chip_reset_done = 0; 3823 ha->flags.chip_reset_done = 0;
3843 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3824 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3844 ha->qla_stats.total_isp_aborts++; 3825 vha->qla_stats.total_isp_aborts++;
3845 3826
3846 ql_log(ql_log_info, vha, 0x00af, 3827 ql_log(ql_log_info, vha, 0x00af,
3847 "Performing ISP error recovery - ha=%p.\n", ha); 3828 "Performing ISP error recovery - ha=%p.\n", ha);
@@ -4066,6 +4047,7 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
4066 struct qla_hw_data *ha = vha->hw; 4047 struct qla_hw_data *ha = vha->hw;
4067 struct req_que *req = ha->req_q_map[0]; 4048 struct req_que *req = ha->req_q_map[0];
4068 struct rsp_que *rsp = ha->rsp_q_map[0]; 4049 struct rsp_que *rsp = ha->rsp_q_map[0];
4050 unsigned long flags;
4069 4051
4070 /* If firmware needs to be loaded */ 4052 /* If firmware needs to be loaded */
4071 if (qla2x00_isp_firmware(vha)) { 4053 if (qla2x00_isp_firmware(vha)) {
@@ -4090,6 +4072,16 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
4090 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 4072 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
4091 4073
4092 vha->flags.online = 1; 4074 vha->flags.online = 1;
4075
4076 /*
4077 * Process any ATIO queue entries that came in
4078 * while we weren't online.
4079 */
4080 spin_lock_irqsave(&ha->hardware_lock, flags);
4081 if (qla_tgt_mode_enabled(vha))
4082 qlt_24xx_process_atio_queue(vha);
4083 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4084
4093 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 4085 /* Wait at most MAX_TARGET RSCNs for a stable link. */
4094 wait_time = 256; 4086 wait_time = 256;
4095 do { 4087 do {
@@ -4279,7 +4271,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
4279 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { 4271 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
4280 /* Reset NVRAM data. */ 4272 /* Reset NVRAM data. */
4281 ql_log(ql_log_warn, vha, 0x006b, 4273 ql_log(ql_log_warn, vha, 0x006b,
4282 "Inconisistent NVRAM detected: checksum=0x%x id=%c " 4274 "Inconsistent NVRAM detected: checksum=0x%x id=%c "
4283 "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version); 4275 "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
4284 ql_log(ql_log_warn, vha, 0x006c, 4276 ql_log(ql_log_warn, vha, 0x006c,
4285 "Falling back to functioning (yet invalid -- WWPN) " 4277 "Falling back to functioning (yet invalid -- WWPN) "
@@ -4330,6 +4322,15 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
4330 rval = 1; 4322 rval = 1;
4331 } 4323 }
4332 4324
4325 if (!qla_ini_mode_enabled(vha)) {
4326 /* Don't enable full login after initial LIP */
4327 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
4328 /* Don't enable LIP full login for initiator */
4329 nv->host_p &= __constant_cpu_to_le32(~BIT_10);
4330 }
4331
4332 qlt_24xx_config_nvram_stage1(vha, nv);
4333
4333 /* Reset Initialization control block */ 4334 /* Reset Initialization control block */
4334 memset(icb, 0, ha->init_cb_size); 4335 memset(icb, 0, ha->init_cb_size);
4335 4336
@@ -4357,8 +4358,10 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
4357 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), 4358 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
4358 "QLA2462"); 4359 "QLA2462");
4359 4360
4360 /* Use alternate WWN? */ 4361 qlt_24xx_config_nvram_stage2(vha, icb);
4362
4361 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) { 4363 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
4364 /* Use alternate WWN? */
4362 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 4365 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
4363 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 4366 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
4364 } 4367 }
@@ -5029,7 +5032,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5029 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { 5032 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
5030 /* Reset NVRAM data. */ 5033 /* Reset NVRAM data. */
5031 ql_log(ql_log_info, vha, 0x0073, 5034 ql_log(ql_log_info, vha, 0x0073,
5032 "Inconisistent NVRAM detected: checksum=0x%x id=%c " 5035 "Inconsistent NVRAM detected: checksum=0x%x id=%c "
5033 "version=0x%x.\n", chksum, nv->id[0], 5036 "version=0x%x.\n", chksum, nv->id[0],
5034 le16_to_cpu(nv->nvram_version)); 5037 le16_to_cpu(nv->nvram_version));
5035 ql_log(ql_log_info, vha, 0x0074, 5038 ql_log(ql_log_info, vha, 0x0074,
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index eac950924497..70dbf53d9e0f 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -5,6 +5,7 @@
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7#include "qla_def.h" 7#include "qla_def.h"
8#include "qla_target.h"
8 9
9#include <linux/blkdev.h> 10#include <linux/blkdev.h>
10#include <linux/delay.h> 11#include <linux/delay.h>
@@ -23,18 +24,17 @@ qla2x00_get_cmd_direction(srb_t *sp)
23{ 24{
24 uint16_t cflags; 25 uint16_t cflags;
25 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 26 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
27 struct scsi_qla_host *vha = sp->fcport->vha;
26 28
27 cflags = 0; 29 cflags = 0;
28 30
29 /* Set transfer direction */ 31 /* Set transfer direction */
30 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 32 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
31 cflags = CF_WRITE; 33 cflags = CF_WRITE;
32 sp->fcport->vha->hw->qla_stats.output_bytes += 34 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
33 scsi_bufflen(cmd);
34 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 35 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
35 cflags = CF_READ; 36 cflags = CF_READ;
36 sp->fcport->vha->hw->qla_stats.input_bytes += 37 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
37 scsi_bufflen(cmd);
38 } 38 }
39 return (cflags); 39 return (cflags);
40} 40}
@@ -385,9 +385,10 @@ qla2x00_start_scsi(srb_t *sp)
385 else 385 else
386 req->cnt = req->length - 386 req->cnt = req->length -
387 (req->ring_index - cnt); 387 (req->ring_index - cnt);
388 /* If still no head room then bail out */
389 if (req->cnt < (req_cnt + 2))
390 goto queuing_error;
388 } 391 }
389 if (req->cnt < (req_cnt + 2))
390 goto queuing_error;
391 392
392 /* Build command packet */ 393 /* Build command packet */
393 req->current_outstanding_cmd = handle; 394 req->current_outstanding_cmd = handle;
@@ -470,7 +471,7 @@ queuing_error:
470/** 471/**
471 * qla2x00_start_iocbs() - Execute the IOCB command 472 * qla2x00_start_iocbs() - Execute the IOCB command
472 */ 473 */
473static void 474void
474qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req) 475qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
475{ 476{
476 struct qla_hw_data *ha = vha->hw; 477 struct qla_hw_data *ha = vha->hw;
@@ -571,6 +572,29 @@ qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
571 return (ret); 572 return (ret);
572} 573}
573 574
575/*
576 * qla2x00_issue_marker
577 *
578 * Issue marker
579 * Caller CAN have hardware lock held as specified by ha_locked parameter.
580 * Might release it, then reaquire.
581 */
582int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
583{
584 if (ha_locked) {
585 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
586 MK_SYNC_ALL) != QLA_SUCCESS)
587 return QLA_FUNCTION_FAILED;
588 } else {
589 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
590 MK_SYNC_ALL) != QLA_SUCCESS)
591 return QLA_FUNCTION_FAILED;
592 }
593 vha->marker_needed = 0;
594
595 return QLA_SUCCESS;
596}
597
574/** 598/**
575 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and 599 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
576 * Continuation Type 1 IOCBs to allocate. 600 * Continuation Type 1 IOCBs to allocate.
@@ -629,11 +653,11 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
629 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 653 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
630 cmd_pkt->control_flags = 654 cmd_pkt->control_flags =
631 __constant_cpu_to_le16(CF_WRITE_DATA); 655 __constant_cpu_to_le16(CF_WRITE_DATA);
632 ha->qla_stats.output_bytes += scsi_bufflen(cmd); 656 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
633 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 657 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
634 cmd_pkt->control_flags = 658 cmd_pkt->control_flags =
635 __constant_cpu_to_le16(CF_READ_DATA); 659 __constant_cpu_to_le16(CF_READ_DATA);
636 ha->qla_stats.input_bytes += scsi_bufflen(cmd); 660 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
637 } 661 }
638 662
639 cur_seg = scsi_sglist(cmd); 663 cur_seg = scsi_sglist(cmd);
@@ -745,13 +769,11 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
745 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 769 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
746 cmd_pkt->task_mgmt_flags = 770 cmd_pkt->task_mgmt_flags =
747 __constant_cpu_to_le16(TMF_WRITE_DATA); 771 __constant_cpu_to_le16(TMF_WRITE_DATA);
748 sp->fcport->vha->hw->qla_stats.output_bytes += 772 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
749 scsi_bufflen(cmd);
750 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 773 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
751 cmd_pkt->task_mgmt_flags = 774 cmd_pkt->task_mgmt_flags =
752 __constant_cpu_to_le16(TMF_READ_DATA); 775 __constant_cpu_to_le16(TMF_READ_DATA);
753 sp->fcport->vha->hw->qla_stats.input_bytes += 776 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
754 scsi_bufflen(cmd);
755 } 777 }
756 778
757 /* One DSD is available in the Command Type 3 IOCB */ 779 /* One DSD is available in the Command Type 3 IOCB */
@@ -1245,7 +1267,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1245 return QLA_SUCCESS; 1267 return QLA_SUCCESS;
1246 } 1268 }
1247 1269
1248 cmd_pkt->vp_index = sp->fcport->vp_idx; 1270 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1249 1271
1250 /* Set transfer direction */ 1272 /* Set transfer direction */
1251 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 1273 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
@@ -1502,9 +1524,9 @@ qla24xx_start_scsi(srb_t *sp)
1502 else 1524 else
1503 req->cnt = req->length - 1525 req->cnt = req->length -
1504 (req->ring_index - cnt); 1526 (req->ring_index - cnt);
1527 if (req->cnt < (req_cnt + 2))
1528 goto queuing_error;
1505 } 1529 }
1506 if (req->cnt < (req_cnt + 2))
1507 goto queuing_error;
1508 1530
1509 /* Build command packet. */ 1531 /* Build command packet. */
1510 req->current_outstanding_cmd = handle; 1532 req->current_outstanding_cmd = handle;
@@ -1527,7 +1549,7 @@ qla24xx_start_scsi(srb_t *sp)
1527 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 1549 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1528 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 1550 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1529 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 1551 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1530 cmd_pkt->vp_index = sp->fcport->vp_idx; 1552 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1531 1553
1532 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 1554 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1533 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 1555 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
@@ -1717,11 +1739,10 @@ qla24xx_dif_start_scsi(srb_t *sp)
1717 else 1739 else
1718 req->cnt = req->length - 1740 req->cnt = req->length -
1719 (req->ring_index - cnt); 1741 (req->ring_index - cnt);
1742 if (req->cnt < (req_cnt + 2))
1743 goto queuing_error;
1720 } 1744 }
1721 1745
1722 if (req->cnt < (req_cnt + 2))
1723 goto queuing_error;
1724
1725 status |= QDSS_GOT_Q_SPACE; 1746 status |= QDSS_GOT_Q_SPACE;
1726 1747
1727 /* Build header part of command packet (excluding the OPCODE). */ 1748 /* Build header part of command packet (excluding the OPCODE). */
@@ -1898,7 +1919,7 @@ qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1898 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 1919 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1899 logio->port_id[1] = sp->fcport->d_id.b.area; 1920 logio->port_id[1] = sp->fcport->d_id.b.area;
1900 logio->port_id[2] = sp->fcport->d_id.b.domain; 1921 logio->port_id[2] = sp->fcport->d_id.b.domain;
1901 logio->vp_index = sp->fcport->vp_idx; 1922 logio->vp_index = sp->fcport->vha->vp_idx;
1902} 1923}
1903 1924
1904static void 1925static void
@@ -1922,7 +1943,7 @@ qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1922 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); 1943 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1923 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | 1944 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1924 sp->fcport->d_id.b.al_pa); 1945 sp->fcport->d_id.b.al_pa);
1925 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx); 1946 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1926} 1947}
1927 1948
1928static void 1949static void
@@ -1935,7 +1956,7 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1935 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 1956 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1936 logio->port_id[1] = sp->fcport->d_id.b.area; 1957 logio->port_id[1] = sp->fcport->d_id.b.area;
1937 logio->port_id[2] = sp->fcport->d_id.b.domain; 1958 logio->port_id[2] = sp->fcport->d_id.b.domain;
1938 logio->vp_index = sp->fcport->vp_idx; 1959 logio->vp_index = sp->fcport->vha->vp_idx;
1939} 1960}
1940 1961
1941static void 1962static void
@@ -1952,7 +1973,7 @@ qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1952 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); 1973 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1953 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | 1974 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1954 sp->fcport->d_id.b.al_pa); 1975 sp->fcport->d_id.b.al_pa);
1955 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx); 1976 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1956 /* Implicit: mbx->mbx10 = 0. */ 1977 /* Implicit: mbx->mbx10 = 0. */
1957} 1978}
1958 1979
@@ -1962,7 +1983,7 @@ qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1962 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 1983 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1963 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC); 1984 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1964 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1985 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1965 logio->vp_index = sp->fcport->vp_idx; 1986 logio->vp_index = sp->fcport->vha->vp_idx;
1966} 1987}
1967 1988
1968static void 1989static void
@@ -1983,7 +2004,7 @@ qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1983 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma)); 2004 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1984 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma))); 2005 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1985 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma))); 2006 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1986 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx); 2007 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1987} 2008}
1988 2009
1989static void 2010static void
@@ -2009,7 +2030,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2009 tsk->port_id[0] = fcport->d_id.b.al_pa; 2030 tsk->port_id[0] = fcport->d_id.b.al_pa;
2010 tsk->port_id[1] = fcport->d_id.b.area; 2031 tsk->port_id[1] = fcport->d_id.b.area;
2011 tsk->port_id[2] = fcport->d_id.b.domain; 2032 tsk->port_id[2] = fcport->d_id.b.domain;
2012 tsk->vp_index = fcport->vp_idx; 2033 tsk->vp_index = fcport->vha->vp_idx;
2013 2034
2014 if (flags == TCF_LUN_RESET) { 2035 if (flags == TCF_LUN_RESET) {
2015 int_to_scsilun(lun, &tsk->lun); 2036 int_to_scsilun(lun, &tsk->lun);
@@ -2030,7 +2051,7 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2030 els_iocb->handle = sp->handle; 2051 els_iocb->handle = sp->handle;
2031 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2052 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2032 els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt); 2053 els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2033 els_iocb->vp_index = sp->fcport->vp_idx; 2054 els_iocb->vp_index = sp->fcport->vha->vp_idx;
2034 els_iocb->sof_type = EST_SOFI3; 2055 els_iocb->sof_type = EST_SOFI3;
2035 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt); 2056 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2036 2057
@@ -2160,7 +2181,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2160 ct_iocb->handle = sp->handle; 2181 ct_iocb->handle = sp->handle;
2161 2182
2162 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2183 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2163 ct_iocb->vp_index = sp->fcport->vp_idx; 2184 ct_iocb->vp_index = sp->fcport->vha->vp_idx;
2164 ct_iocb->comp_status = __constant_cpu_to_le16(0); 2185 ct_iocb->comp_status = __constant_cpu_to_le16(0);
2165 2186
2166 ct_iocb->cmd_dsd_count = 2187 ct_iocb->cmd_dsd_count =
@@ -2343,11 +2364,10 @@ sufficient_dsds:
2343 else 2364 else
2344 req->cnt = req->length - 2365 req->cnt = req->length -
2345 (req->ring_index - cnt); 2366 (req->ring_index - cnt);
2367 if (req->cnt < (req_cnt + 2))
2368 goto queuing_error;
2346 } 2369 }
2347 2370
2348 if (req->cnt < (req_cnt + 2))
2349 goto queuing_error;
2350
2351 ctx = sp->u.scmd.ctx = 2371 ctx = sp->u.scmd.ctx =
2352 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); 2372 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2353 if (!ctx) { 2373 if (!ctx) {
@@ -2362,7 +2382,7 @@ sufficient_dsds:
2362 if (!ctx->fcp_cmnd) { 2382 if (!ctx->fcp_cmnd) {
2363 ql_log(ql_log_fatal, vha, 0x3011, 2383 ql_log(ql_log_fatal, vha, 0x3011,
2364 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd); 2384 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2365 goto queuing_error_fcp_cmnd; 2385 goto queuing_error;
2366 } 2386 }
2367 2387
2368 /* Initialize the DSD list and dma handle */ 2388 /* Initialize the DSD list and dma handle */
@@ -2400,7 +2420,7 @@ sufficient_dsds:
2400 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 2420 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2401 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 2421 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2402 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 2422 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2403 cmd_pkt->vp_index = sp->fcport->vp_idx; 2423 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2404 2424
2405 /* Build IOCB segments */ 2425 /* Build IOCB segments */
2406 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds)) 2426 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
@@ -2489,7 +2509,7 @@ sufficient_dsds:
2489 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 2509 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2490 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 2510 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2491 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 2511 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2492 cmd_pkt->vp_index = sp->fcport->vp_idx; 2512 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2493 2513
2494 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 2514 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2495 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, 2515 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index ce42288049b5..6f67a9d4998b 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -5,6 +5,7 @@
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7#include "qla_def.h" 7#include "qla_def.h"
8#include "qla_target.h"
8 9
9#include <linux/delay.h> 10#include <linux/delay.h>
10#include <linux/slab.h> 11#include <linux/slab.h>
@@ -309,6 +310,28 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
309 "IDC failed to post ACK.\n"); 310 "IDC failed to post ACK.\n");
310} 311}
311 312
313#define LS_UNKNOWN 2
314char *
315qla2x00_get_link_speed_str(struct qla_hw_data *ha)
316{
317 static char *link_speeds[] = {"1", "2", "?", "4", "8", "16", "10"};
318 char *link_speed;
319 int fw_speed = ha->link_data_rate;
320
321 if (IS_QLA2100(ha) || IS_QLA2200(ha))
322 link_speed = link_speeds[0];
323 else if (fw_speed == 0x13)
324 link_speed = link_speeds[6];
325 else {
326 link_speed = link_speeds[LS_UNKNOWN];
327 if (fw_speed < 6)
328 link_speed =
329 link_speeds[fw_speed];
330 }
331
332 return link_speed;
333}
334
312/** 335/**
313 * qla2x00_async_event() - Process aynchronous events. 336 * qla2x00_async_event() - Process aynchronous events.
314 * @ha: SCSI driver HA context 337 * @ha: SCSI driver HA context
@@ -317,9 +340,6 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
317void 340void
318qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) 341qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
319{ 342{
320#define LS_UNKNOWN 2
321 static char *link_speeds[] = { "1", "2", "?", "4", "8", "16", "10" };
322 char *link_speed;
323 uint16_t handle_cnt; 343 uint16_t handle_cnt;
324 uint16_t cnt, mbx; 344 uint16_t cnt, mbx;
325 uint32_t handles[5]; 345 uint32_t handles[5];
@@ -454,8 +474,8 @@ skip_rio:
454 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 474 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
455 ql_dbg(ql_dbg_async, vha, 0x5008, 475 ql_dbg(ql_dbg_async, vha, 0x5008,
456 "Asynchronous WAKEUP_THRES.\n"); 476 "Asynchronous WAKEUP_THRES.\n");
457 break;
458 477
478 break;
459 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 479 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
460 ql_dbg(ql_dbg_async, vha, 0x5009, 480 ql_dbg(ql_dbg_async, vha, 0x5009,
461 "LIP occurred (%x).\n", mb[1]); 481 "LIP occurred (%x).\n", mb[1]);
@@ -479,20 +499,14 @@ skip_rio:
479 break; 499 break;
480 500
481 case MBA_LOOP_UP: /* Loop Up Event */ 501 case MBA_LOOP_UP: /* Loop Up Event */
482 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 502 if (IS_QLA2100(ha) || IS_QLA2200(ha))
483 link_speed = link_speeds[0];
484 ha->link_data_rate = PORT_SPEED_1GB; 503 ha->link_data_rate = PORT_SPEED_1GB;
485 } else { 504 else
486 link_speed = link_speeds[LS_UNKNOWN];
487 if (mb[1] < 6)
488 link_speed = link_speeds[mb[1]];
489 else if (mb[1] == 0x13)
490 link_speed = link_speeds[6];
491 ha->link_data_rate = mb[1]; 505 ha->link_data_rate = mb[1];
492 }
493 506
494 ql_dbg(ql_dbg_async, vha, 0x500a, 507 ql_dbg(ql_dbg_async, vha, 0x500a,
495 "LOOP UP detected (%s Gbps).\n", link_speed); 508 "LOOP UP detected (%s Gbps).\n",
509 qla2x00_get_link_speed_str(ha));
496 510
497 vha->flags.management_server_logged_in = 0; 511 vha->flags.management_server_logged_in = 0;
498 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); 512 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
@@ -638,6 +652,8 @@ skip_rio:
638 ql_dbg(ql_dbg_async, vha, 0x5010, 652 ql_dbg(ql_dbg_async, vha, 0x5010,
639 "Port unavailable %04x %04x %04x.\n", 653 "Port unavailable %04x %04x %04x.\n",
640 mb[1], mb[2], mb[3]); 654 mb[1], mb[2], mb[3]);
655 ql_log(ql_log_warn, vha, 0x505e,
656 "Link is offline.\n");
641 657
642 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 658 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
643 atomic_set(&vha->loop_state, LOOP_DOWN); 659 atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -670,12 +686,17 @@ skip_rio:
670 ql_dbg(ql_dbg_async, vha, 0x5011, 686 ql_dbg(ql_dbg_async, vha, 0x5011,
671 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", 687 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
672 mb[1], mb[2], mb[3]); 688 mb[1], mb[2], mb[3]);
689
690 qlt_async_event(mb[0], vha, mb);
673 break; 691 break;
674 } 692 }
675 693
676 ql_dbg(ql_dbg_async, vha, 0x5012, 694 ql_dbg(ql_dbg_async, vha, 0x5012,
677 "Port database changed %04x %04x %04x.\n", 695 "Port database changed %04x %04x %04x.\n",
678 mb[1], mb[2], mb[3]); 696 mb[1], mb[2], mb[3]);
697 ql_log(ql_log_warn, vha, 0x505f,
698 "Link is operational (%s Gbps).\n",
699 qla2x00_get_link_speed_str(ha));
679 700
680 /* 701 /*
681 * Mark all devices as missing so we will login again. 702 * Mark all devices as missing so we will login again.
@@ -684,8 +705,13 @@ skip_rio:
684 705
685 qla2x00_mark_all_devices_lost(vha, 1); 706 qla2x00_mark_all_devices_lost(vha, 1);
686 707
708 if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
709 set_bit(SCR_PENDING, &vha->dpc_flags);
710
687 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 711 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
688 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 712 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
713
714 qlt_async_event(mb[0], vha, mb);
689 break; 715 break;
690 716
691 case MBA_RSCN_UPDATE: /* State Change Registration */ 717 case MBA_RSCN_UPDATE: /* State Change Registration */
@@ -807,6 +833,8 @@ skip_rio:
807 mb[0], mb[1], mb[2], mb[3]); 833 mb[0], mb[1], mb[2], mb[3]);
808 } 834 }
809 835
836 qlt_async_event(mb[0], vha, mb);
837
810 if (!vha->vp_idx && ha->num_vhosts) 838 if (!vha->vp_idx && ha->num_vhosts)
811 qla2x00_alert_all_vps(rsp, mb); 839 qla2x00_alert_all_vps(rsp, mb);
812} 840}
@@ -1172,6 +1200,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1172 } else if (iop[0] & BIT_5) 1200 } else if (iop[0] & BIT_5)
1173 fcport->port_type = FCT_INITIATOR; 1201 fcport->port_type = FCT_INITIATOR;
1174 1202
1203 if (iop[0] & BIT_7)
1204 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1205
1175 if (logio->io_parameter[7] || logio->io_parameter[8]) 1206 if (logio->io_parameter[7] || logio->io_parameter[8])
1176 fcport->supported_classes |= FC_COS_CLASS2; 1207 fcport->supported_classes |= FC_COS_CLASS2;
1177 if (logio->io_parameter[9] || logio->io_parameter[10]) 1208 if (logio->io_parameter[9] || logio->io_parameter[10])
@@ -1986,6 +2017,9 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
1986 2017
1987 if (pkt->entry_status != 0) { 2018 if (pkt->entry_status != 0) {
1988 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); 2019 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
2020
2021 (void)qlt_24xx_process_response_error(vha, pkt);
2022
1989 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2023 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1990 wmb(); 2024 wmb();
1991 continue; 2025 continue;
@@ -2016,6 +2050,14 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2016 case ELS_IOCB_TYPE: 2050 case ELS_IOCB_TYPE:
2017 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); 2051 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2018 break; 2052 break;
2053 case ABTS_RECV_24XX:
2054 /* ensure that the ATIO queue is empty */
2055 qlt_24xx_process_atio_queue(vha);
2056 case ABTS_RESP_24XX:
2057 case CTIO_TYPE7:
2058 case NOTIFY_ACK_TYPE:
2059 qlt_response_pkt_all_vps(vha, (response_t *)pkt);
2060 break;
2019 case MARKER_TYPE: 2061 case MARKER_TYPE:
2020 /* Do nothing in this case, this check is to prevent it 2062 /* Do nothing in this case, this check is to prevent it
2021 * from falling into default case 2063 * from falling into default case
@@ -2168,6 +2210,13 @@ qla24xx_intr_handler(int irq, void *dev_id)
2168 case 0x14: 2210 case 0x14:
2169 qla24xx_process_response_queue(vha, rsp); 2211 qla24xx_process_response_queue(vha, rsp);
2170 break; 2212 break;
2213 case 0x1C: /* ATIO queue updated */
2214 qlt_24xx_process_atio_queue(vha);
2215 break;
2216 case 0x1D: /* ATIO and response queues updated */
2217 qlt_24xx_process_atio_queue(vha);
2218 qla24xx_process_response_queue(vha, rsp);
2219 break;
2171 default: 2220 default:
2172 ql_dbg(ql_dbg_async, vha, 0x504f, 2221 ql_dbg(ql_dbg_async, vha, 0x504f,
2173 "Unrecognized interrupt type (%d).\n", stat * 0xff); 2222 "Unrecognized interrupt type (%d).\n", stat * 0xff);
@@ -2312,6 +2361,13 @@ qla24xx_msix_default(int irq, void *dev_id)
2312 case 0x14: 2361 case 0x14:
2313 qla24xx_process_response_queue(vha, rsp); 2362 qla24xx_process_response_queue(vha, rsp);
2314 break; 2363 break;
2364 case 0x1C: /* ATIO queue updated */
2365 qlt_24xx_process_atio_queue(vha);
2366 break;
2367 case 0x1D: /* ATIO and response queues updated */
2368 qlt_24xx_process_atio_queue(vha);
2369 qla24xx_process_response_queue(vha, rsp);
2370 break;
2315 default: 2371 default:
2316 ql_dbg(ql_dbg_async, vha, 0x5051, 2372 ql_dbg(ql_dbg_async, vha, 0x5051,
2317 "Unrecognized interrupt type (%d).\n", stat & 0xff); 2373 "Unrecognized interrupt type (%d).\n", stat & 0xff);
@@ -2564,7 +2620,15 @@ void
2564qla2x00_free_irqs(scsi_qla_host_t *vha) 2620qla2x00_free_irqs(scsi_qla_host_t *vha)
2565{ 2621{
2566 struct qla_hw_data *ha = vha->hw; 2622 struct qla_hw_data *ha = vha->hw;
2567 struct rsp_que *rsp = ha->rsp_q_map[0]; 2623 struct rsp_que *rsp;
2624
2625 /*
2626 * We need to check that ha->rsp_q_map is valid in case we are called
2627 * from a probe failure context.
2628 */
2629 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
2630 return;
2631 rsp = ha->rsp_q_map[0];
2568 2632
2569 if (ha->flags.msix_enabled) 2633 if (ha->flags.msix_enabled)
2570 qla24xx_disable_msix(ha); 2634 qla24xx_disable_msix(ha);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index b4a23394a7bd..d5ce92c0a8fc 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -5,6 +5,7 @@
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7#include "qla_def.h" 7#include "qla_def.h"
8#include "qla_target.h"
8 9
9#include <linux/delay.h> 10#include <linux/delay.h>
10#include <linux/gfp.h> 11#include <linux/gfp.h>
@@ -270,11 +271,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
270 ictrl = RD_REG_WORD(&reg->isp.ictrl); 271 ictrl = RD_REG_WORD(&reg->isp.ictrl);
271 } 272 }
272 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119, 273 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
273 "MBX Command timeout for cmd %x.\n", command); 274 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
274 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111a, 275 "mb[0]=0x%x\n", command, ictrl, jiffies, mb0);
275 "iocontrol=%x jiffies=%lx.\n", ictrl, jiffies);
276 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111b,
277 "mb[0] = 0x%x.\n", mb0);
278 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019); 276 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
279 277
280 /* 278 /*
@@ -320,7 +318,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
320 CRB_NIU_XG_PAUSE_CTL_P1); 318 CRB_NIU_XG_PAUSE_CTL_P1);
321 } 319 }
322 ql_log(ql_log_info, base_vha, 0x101c, 320 ql_log(ql_log_info, base_vha, 0x101c,
323 "Mailbox cmd timeout occured, cmd=0x%x, " 321 "Mailbox cmd timeout occurred, cmd=0x%x, "
324 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP " 322 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
325 "abort.\n", command, mcp->mb[0], 323 "abort.\n", command, mcp->mb[0],
326 ha->flags.eeh_busy); 324 ha->flags.eeh_busy);
@@ -345,7 +343,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
345 CRB_NIU_XG_PAUSE_CTL_P1); 343 CRB_NIU_XG_PAUSE_CTL_P1);
346 } 344 }
347 ql_log(ql_log_info, base_vha, 0x101e, 345 ql_log(ql_log_info, base_vha, 0x101e,
348 "Mailbox cmd timeout occured, cmd=0x%x, " 346 "Mailbox cmd timeout occurred, cmd=0x%x, "
349 "mb[0]=0x%x. Scheduling ISP abort ", 347 "mb[0]=0x%x. Scheduling ISP abort ",
350 command, mcp->mb[0]); 348 command, mcp->mb[0]);
351 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 349 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
@@ -390,7 +388,8 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
390 mbx_cmd_t mc; 388 mbx_cmd_t mc;
391 mbx_cmd_t *mcp = &mc; 389 mbx_cmd_t *mcp = &mc;
392 390
393 ql_dbg(ql_dbg_mbx, vha, 0x1022, "Entered %s.\n", __func__); 391 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
392 "Entered %s.\n", __func__);
394 393
395 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) { 394 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
396 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; 395 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
@@ -424,7 +423,8 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
424 ql_dbg(ql_dbg_mbx, vha, 0x1023, 423 ql_dbg(ql_dbg_mbx, vha, 0x1023,
425 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 424 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
426 } else { 425 } else {
427 ql_dbg(ql_dbg_mbx, vha, 0x1024, "Done %s.\n", __func__); 426 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
427 "Done %s.\n", __func__);
428 } 428 }
429 429
430 return rval; 430 return rval;
@@ -454,7 +454,8 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
454 mbx_cmd_t mc; 454 mbx_cmd_t mc;
455 mbx_cmd_t *mcp = &mc; 455 mbx_cmd_t *mcp = &mc;
456 456
457 ql_dbg(ql_dbg_mbx, vha, 0x1025, "Entered %s.\n", __func__); 457 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
458 "Entered %s.\n", __func__);
458 459
459 mcp->mb[0] = MBC_EXECUTE_FIRMWARE; 460 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
460 mcp->out_mb = MBX_0; 461 mcp->out_mb = MBX_0;
@@ -489,10 +490,11 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
489 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 490 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
490 } else { 491 } else {
491 if (IS_FWI2_CAPABLE(ha)) { 492 if (IS_FWI2_CAPABLE(ha)) {
492 ql_dbg(ql_dbg_mbx, vha, 0x1027, 493 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1027,
493 "Done exchanges=%x.\n", mcp->mb[1]); 494 "Done exchanges=%x.\n", mcp->mb[1]);
494 } else { 495 } else {
495 ql_dbg(ql_dbg_mbx, vha, 0x1028, "Done %s.\n", __func__); 496 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
497 "Done %s.\n", __func__);
496 } 498 }
497 } 499 }
498 500
@@ -523,7 +525,8 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
523 mbx_cmd_t *mcp = &mc; 525 mbx_cmd_t *mcp = &mc;
524 struct qla_hw_data *ha = vha->hw; 526 struct qla_hw_data *ha = vha->hw;
525 527
526 ql_dbg(ql_dbg_mbx, vha, 0x1029, "Entered %s.\n", __func__); 528 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
529 "Entered %s.\n", __func__);
527 530
528 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; 531 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
529 mcp->out_mb = MBX_0; 532 mcp->out_mb = MBX_0;
@@ -561,11 +564,11 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
561 ha->fw_attributes_h = mcp->mb[15]; 564 ha->fw_attributes_h = mcp->mb[15];
562 ha->fw_attributes_ext[0] = mcp->mb[16]; 565 ha->fw_attributes_ext[0] = mcp->mb[16];
563 ha->fw_attributes_ext[1] = mcp->mb[17]; 566 ha->fw_attributes_ext[1] = mcp->mb[17];
564 ql_dbg(ql_dbg_mbx, vha, 0x1139, 567 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
565 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n", 568 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
566 __func__, mcp->mb[15], mcp->mb[6]); 569 __func__, mcp->mb[15], mcp->mb[6]);
567 } else 570 } else
568 ql_dbg(ql_dbg_mbx, vha, 0x112f, 571 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
569 "%s: FwAttributes [Upper] invalid, MB6:%04x\n", 572 "%s: FwAttributes [Upper] invalid, MB6:%04x\n",
570 __func__, mcp->mb[6]); 573 __func__, mcp->mb[6]);
571 } 574 }
@@ -576,7 +579,8 @@ failed:
576 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval); 579 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
577 } else { 580 } else {
578 /*EMPTY*/ 581 /*EMPTY*/
579 ql_dbg(ql_dbg_mbx, vha, 0x102b, "Done %s.\n", __func__); 582 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
583 "Done %s.\n", __func__);
580 } 584 }
581 return rval; 585 return rval;
582} 586}
@@ -602,7 +606,8 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
602 mbx_cmd_t mc; 606 mbx_cmd_t mc;
603 mbx_cmd_t *mcp = &mc; 607 mbx_cmd_t *mcp = &mc;
604 608
605 ql_dbg(ql_dbg_mbx, vha, 0x102c, "Entered %s.\n", __func__); 609 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
610 "Entered %s.\n", __func__);
606 611
607 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; 612 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
608 mcp->out_mb = MBX_0; 613 mcp->out_mb = MBX_0;
@@ -620,7 +625,8 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
620 fwopts[2] = mcp->mb[2]; 625 fwopts[2] = mcp->mb[2];
621 fwopts[3] = mcp->mb[3]; 626 fwopts[3] = mcp->mb[3];
622 627
623 ql_dbg(ql_dbg_mbx, vha, 0x102e, "Done %s.\n", __func__); 628 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
629 "Done %s.\n", __func__);
624 } 630 }
625 631
626 return rval; 632 return rval;
@@ -648,7 +654,8 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
648 mbx_cmd_t mc; 654 mbx_cmd_t mc;
649 mbx_cmd_t *mcp = &mc; 655 mbx_cmd_t *mcp = &mc;
650 656
651 ql_dbg(ql_dbg_mbx, vha, 0x102f, "Entered %s.\n", __func__); 657 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
658 "Entered %s.\n", __func__);
652 659
653 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION; 660 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
654 mcp->mb[1] = fwopts[1]; 661 mcp->mb[1] = fwopts[1];
@@ -676,7 +683,8 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
676 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]); 683 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
677 } else { 684 } else {
678 /*EMPTY*/ 685 /*EMPTY*/
679 ql_dbg(ql_dbg_mbx, vha, 0x1031, "Done %s.\n", __func__); 686 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
687 "Done %s.\n", __func__);
680 } 688 }
681 689
682 return rval; 690 return rval;
@@ -704,7 +712,8 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
704 mbx_cmd_t mc; 712 mbx_cmd_t mc;
705 mbx_cmd_t *mcp = &mc; 713 mbx_cmd_t *mcp = &mc;
706 714
707 ql_dbg(ql_dbg_mbx, vha, 0x1032, "Entered %s.\n", __func__); 715 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
716 "Entered %s.\n", __func__);
708 717
709 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; 718 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
710 mcp->mb[1] = 0xAAAA; 719 mcp->mb[1] = 0xAAAA;
@@ -734,7 +743,8 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
734 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval); 743 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
735 } else { 744 } else {
736 /*EMPTY*/ 745 /*EMPTY*/
737 ql_dbg(ql_dbg_mbx, vha, 0x1034, "Done %s.\n", __func__); 746 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
747 "Done %s.\n", __func__);
738 } 748 }
739 749
740 return rval; 750 return rval;
@@ -762,7 +772,8 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
762 mbx_cmd_t mc; 772 mbx_cmd_t mc;
763 mbx_cmd_t *mcp = &mc; 773 mbx_cmd_t *mcp = &mc;
764 774
765 ql_dbg(ql_dbg_mbx, vha, 0x1035, "Entered %s.\n", __func__); 775 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
776 "Entered %s.\n", __func__);
766 777
767 mcp->mb[0] = MBC_VERIFY_CHECKSUM; 778 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
768 mcp->out_mb = MBX_0; 779 mcp->out_mb = MBX_0;
@@ -787,7 +798,8 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
787 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ? 798 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
788 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]); 799 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
789 } else { 800 } else {
790 ql_dbg(ql_dbg_mbx, vha, 0x1037, "Done %s.\n", __func__); 801 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
802 "Done %s.\n", __func__);
791 } 803 }
792 804
793 return rval; 805 return rval;
@@ -819,7 +831,8 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
819 mbx_cmd_t mc; 831 mbx_cmd_t mc;
820 mbx_cmd_t *mcp = &mc; 832 mbx_cmd_t *mcp = &mc;
821 833
822 ql_dbg(ql_dbg_mbx, vha, 0x1038, "Entered %s.\n", __func__); 834 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
835 "Entered %s.\n", __func__);
823 836
824 mcp->mb[0] = MBC_IOCB_COMMAND_A64; 837 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
825 mcp->mb[1] = 0; 838 mcp->mb[1] = 0;
@@ -842,7 +855,8 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
842 /* Mask reserved bits. */ 855 /* Mask reserved bits. */
843 sts_entry->entry_status &= 856 sts_entry->entry_status &=
844 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK; 857 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
845 ql_dbg(ql_dbg_mbx, vha, 0x103a, "Done %s.\n", __func__); 858 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
859 "Done %s.\n", __func__);
846 } 860 }
847 861
848 return rval; 862 return rval;
@@ -884,7 +898,8 @@ qla2x00_abort_command(srb_t *sp)
884 struct req_que *req = vha->req; 898 struct req_que *req = vha->req;
885 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 899 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
886 900
887 ql_dbg(ql_dbg_mbx, vha, 0x103b, "Entered %s.\n", __func__); 901 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
902 "Entered %s.\n", __func__);
888 903
889 spin_lock_irqsave(&ha->hardware_lock, flags); 904 spin_lock_irqsave(&ha->hardware_lock, flags);
890 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 905 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
@@ -915,7 +930,8 @@ qla2x00_abort_command(srb_t *sp)
915 if (rval != QLA_SUCCESS) { 930 if (rval != QLA_SUCCESS) {
916 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval); 931 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
917 } else { 932 } else {
918 ql_dbg(ql_dbg_mbx, vha, 0x103d, "Done %s.\n", __func__); 933 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
934 "Done %s.\n", __func__);
919 } 935 }
920 936
921 return rval; 937 return rval;
@@ -934,7 +950,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
934 l = l; 950 l = l;
935 vha = fcport->vha; 951 vha = fcport->vha;
936 952
937 ql_dbg(ql_dbg_mbx, vha, 0x103e, "Entered %s.\n", __func__); 953 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
954 "Entered %s.\n", __func__);
938 955
939 req = vha->hw->req_q_map[0]; 956 req = vha->hw->req_q_map[0];
940 rsp = req->rsp; 957 rsp = req->rsp;
@@ -955,7 +972,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
955 mcp->flags = 0; 972 mcp->flags = 0;
956 rval = qla2x00_mailbox_command(vha, mcp); 973 rval = qla2x00_mailbox_command(vha, mcp);
957 if (rval != QLA_SUCCESS) { 974 if (rval != QLA_SUCCESS) {
958 ql_dbg(ql_dbg_mbx, vha, 0x103f, "Failed=%x.\n", rval); 975 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
976 "Failed=%x.\n", rval);
959 } 977 }
960 978
961 /* Issue marker IOCB. */ 979 /* Issue marker IOCB. */
@@ -965,7 +983,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
965 ql_dbg(ql_dbg_mbx, vha, 0x1040, 983 ql_dbg(ql_dbg_mbx, vha, 0x1040,
966 "Failed to issue marker IOCB (%x).\n", rval2); 984 "Failed to issue marker IOCB (%x).\n", rval2);
967 } else { 985 } else {
968 ql_dbg(ql_dbg_mbx, vha, 0x1041, "Done %s.\n", __func__); 986 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
987 "Done %s.\n", __func__);
969 } 988 }
970 989
971 return rval; 990 return rval;
@@ -983,7 +1002,8 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
983 1002
984 vha = fcport->vha; 1003 vha = fcport->vha;
985 1004
986 ql_dbg(ql_dbg_mbx, vha, 0x1042, "Entered %s.\n", __func__); 1005 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
1006 "Entered %s.\n", __func__);
987 1007
988 req = vha->hw->req_q_map[0]; 1008 req = vha->hw->req_q_map[0];
989 rsp = req->rsp; 1009 rsp = req->rsp;
@@ -1012,7 +1032,8 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
1012 ql_dbg(ql_dbg_mbx, vha, 0x1044, 1032 ql_dbg(ql_dbg_mbx, vha, 0x1044,
1013 "Failed to issue marker IOCB (%x).\n", rval2); 1033 "Failed to issue marker IOCB (%x).\n", rval2);
1014 } else { 1034 } else {
1015 ql_dbg(ql_dbg_mbx, vha, 0x1045, "Done %s.\n", __func__); 1035 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
1036 "Done %s.\n", __func__);
1016 } 1037 }
1017 1038
1018 return rval; 1039 return rval;
@@ -1046,7 +1067,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1046 mbx_cmd_t mc; 1067 mbx_cmd_t mc;
1047 mbx_cmd_t *mcp = &mc; 1068 mbx_cmd_t *mcp = &mc;
1048 1069
1049 ql_dbg(ql_dbg_mbx, vha, 0x1046, "Entered %s.\n", __func__); 1070 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
1071 "Entered %s.\n", __func__);
1050 1072
1051 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; 1073 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1052 mcp->mb[9] = vha->vp_idx; 1074 mcp->mb[9] = vha->vp_idx;
@@ -1074,7 +1096,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1074 /*EMPTY*/ 1096 /*EMPTY*/
1075 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval); 1097 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1076 } else { 1098 } else {
1077 ql_dbg(ql_dbg_mbx, vha, 0x1048, "Done %s.\n", __func__); 1099 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
1100 "Done %s.\n", __func__);
1078 1101
1079 if (IS_CNA_CAPABLE(vha->hw)) { 1102 if (IS_CNA_CAPABLE(vha->hw)) {
1080 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; 1103 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
@@ -1115,7 +1138,8 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1115 mbx_cmd_t mc; 1138 mbx_cmd_t mc;
1116 mbx_cmd_t *mcp = &mc; 1139 mbx_cmd_t *mcp = &mc;
1117 1140
1118 ql_dbg(ql_dbg_mbx, vha, 0x1049, "Entered %s.\n", __func__); 1141 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
1142 "Entered %s.\n", __func__);
1119 1143
1120 mcp->mb[0] = MBC_GET_RETRY_COUNT; 1144 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1121 mcp->out_mb = MBX_0; 1145 mcp->out_mb = MBX_0;
@@ -1138,7 +1162,7 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1138 *tov = ratov; 1162 *tov = ratov;
1139 } 1163 }
1140 1164
1141 ql_dbg(ql_dbg_mbx, vha, 0x104b, 1165 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
1142 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov); 1166 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1143 } 1167 }
1144 1168
@@ -1170,7 +1194,8 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1170 mbx_cmd_t *mcp = &mc; 1194 mbx_cmd_t *mcp = &mc;
1171 struct qla_hw_data *ha = vha->hw; 1195 struct qla_hw_data *ha = vha->hw;
1172 1196
1173 ql_dbg(ql_dbg_mbx, vha, 0x104c, "Entered %s.\n", __func__); 1197 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
1198 "Entered %s.\n", __func__);
1174 1199
1175 if (IS_QLA82XX(ha) && ql2xdbwr) 1200 if (IS_QLA82XX(ha) && ql2xdbwr)
1176 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, 1201 qla82xx_wr_32(ha, ha->nxdb_wr_ptr,
@@ -1213,9 +1238,100 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1213 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]); 1238 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
1214 } else { 1239 } else {
1215 /*EMPTY*/ 1240 /*EMPTY*/
1216 ql_dbg(ql_dbg_mbx, vha, 0x104e, "Done %s.\n", __func__); 1241 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
1242 "Done %s.\n", __func__);
1243 }
1244
1245 return rval;
1246}
1247
1248/*
1249 * qla2x00_get_node_name_list
1250 * Issue get node name list mailbox command, kmalloc()
1251 * and return the resulting list. Caller must kfree() it!
1252 *
1253 * Input:
1254 * ha = adapter state pointer.
1255 * out_data = resulting list
1256 * out_len = length of the resulting list
1257 *
1258 * Returns:
1259 * qla2x00 local function return status code.
1260 *
1261 * Context:
1262 * Kernel context.
1263 */
1264int
1265qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len)
1266{
1267 struct qla_hw_data *ha = vha->hw;
1268 struct qla_port_24xx_data *list = NULL;
1269 void *pmap;
1270 mbx_cmd_t mc;
1271 dma_addr_t pmap_dma;
1272 ulong dma_size;
1273 int rval, left;
1274
1275 left = 1;
1276 while (left > 0) {
1277 dma_size = left * sizeof(*list);
1278 pmap = dma_alloc_coherent(&ha->pdev->dev, dma_size,
1279 &pmap_dma, GFP_KERNEL);
1280 if (!pmap) {
1281 ql_log(ql_log_warn, vha, 0x113f,
1282 "%s(%ld): DMA Alloc failed of %ld\n",
1283 __func__, vha->host_no, dma_size);
1284 rval = QLA_MEMORY_ALLOC_FAILED;
1285 goto out;
1286 }
1287
1288 mc.mb[0] = MBC_PORT_NODE_NAME_LIST;
1289 mc.mb[1] = BIT_1 | BIT_3;
1290 mc.mb[2] = MSW(pmap_dma);
1291 mc.mb[3] = LSW(pmap_dma);
1292 mc.mb[6] = MSW(MSD(pmap_dma));
1293 mc.mb[7] = LSW(MSD(pmap_dma));
1294 mc.mb[8] = dma_size;
1295 mc.out_mb = MBX_0|MBX_1|MBX_2|MBX_3|MBX_6|MBX_7|MBX_8;
1296 mc.in_mb = MBX_0|MBX_1;
1297 mc.tov = 30;
1298 mc.flags = MBX_DMA_IN;
1299
1300 rval = qla2x00_mailbox_command(vha, &mc);
1301 if (rval != QLA_SUCCESS) {
1302 if ((mc.mb[0] == MBS_COMMAND_ERROR) &&
1303 (mc.mb[1] == 0xA)) {
1304 left += le16_to_cpu(mc.mb[2]) /
1305 sizeof(struct qla_port_24xx_data);
1306 goto restart;
1307 }
1308 goto out_free;
1309 }
1310
1311 left = 0;
1312
1313 list = kzalloc(dma_size, GFP_KERNEL);
1314 if (!list) {
1315 ql_log(ql_log_warn, vha, 0x1140,
1316 "%s(%ld): failed to allocate node names list "
1317 "structure.\n", __func__, vha->host_no);
1318 rval = QLA_MEMORY_ALLOC_FAILED;
1319 goto out_free;
1320 }
1321
1322 memcpy(list, pmap, dma_size);
1323restart:
1324 dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
1217 } 1325 }
1218 1326
1327 *out_data = list;
1328 *out_len = dma_size;
1329
1330out:
1331 return rval;
1332
1333out_free:
1334 dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
1219 return rval; 1335 return rval;
1220} 1336}
1221 1337
@@ -1246,7 +1362,8 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1246 dma_addr_t pd_dma; 1362 dma_addr_t pd_dma;
1247 struct qla_hw_data *ha = vha->hw; 1363 struct qla_hw_data *ha = vha->hw;
1248 1364
1249 ql_dbg(ql_dbg_mbx, vha, 0x104f, "Entered %s.\n", __func__); 1365 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
1366 "Entered %s.\n", __func__);
1250 1367
1251 pd24 = NULL; 1368 pd24 = NULL;
1252 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 1369 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
@@ -1326,6 +1443,13 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1326 fcport->port_type = FCT_INITIATOR; 1443 fcport->port_type = FCT_INITIATOR;
1327 else 1444 else
1328 fcport->port_type = FCT_TARGET; 1445 fcport->port_type = FCT_TARGET;
1446
1447 /* Passback COS information. */
1448 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
1449 FC_COS_CLASS2 : FC_COS_CLASS3;
1450
1451 if (pd24->prli_svc_param_word_3[0] & BIT_7)
1452 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1329 } else { 1453 } else {
1330 uint64_t zero = 0; 1454 uint64_t zero = 0;
1331 1455
@@ -1378,7 +1502,8 @@ gpd_error_out:
1378 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, 1502 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
1379 mcp->mb[0], mcp->mb[1]); 1503 mcp->mb[0], mcp->mb[1]);
1380 } else { 1504 } else {
1381 ql_dbg(ql_dbg_mbx, vha, 0x1053, "Done %s.\n", __func__); 1505 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
1506 "Done %s.\n", __func__);
1382 } 1507 }
1383 1508
1384 return rval; 1509 return rval;
@@ -1407,7 +1532,8 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1407 mbx_cmd_t mc; 1532 mbx_cmd_t mc;
1408 mbx_cmd_t *mcp = &mc; 1533 mbx_cmd_t *mcp = &mc;
1409 1534
1410 ql_dbg(ql_dbg_mbx, vha, 0x1054, "Entered %s.\n", __func__); 1535 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
1536 "Entered %s.\n", __func__);
1411 1537
1412 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 1538 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
1413 mcp->out_mb = MBX_0; 1539 mcp->out_mb = MBX_0;
@@ -1433,7 +1559,8 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1433 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval); 1559 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
1434 } else { 1560 } else {
1435 /*EMPTY*/ 1561 /*EMPTY*/
1436 ql_dbg(ql_dbg_mbx, vha, 0x1056, "Done %s.\n", __func__); 1562 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
1563 "Done %s.\n", __func__);
1437 } 1564 }
1438 1565
1439 return rval; 1566 return rval;
@@ -1465,7 +1592,8 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
1465 mbx_cmd_t mc; 1592 mbx_cmd_t mc;
1466 mbx_cmd_t *mcp = &mc; 1593 mbx_cmd_t *mcp = &mc;
1467 1594
1468 ql_dbg(ql_dbg_mbx, vha, 0x1057, "Entered %s.\n", __func__); 1595 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
1596 "Entered %s.\n", __func__);
1469 1597
1470 mcp->mb[0] = MBC_GET_PORT_NAME; 1598 mcp->mb[0] = MBC_GET_PORT_NAME;
1471 mcp->mb[9] = vha->vp_idx; 1599 mcp->mb[9] = vha->vp_idx;
@@ -1499,7 +1627,8 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
1499 name[7] = LSB(mcp->mb[7]); 1627 name[7] = LSB(mcp->mb[7]);
1500 } 1628 }
1501 1629
1502 ql_dbg(ql_dbg_mbx, vha, 0x1059, "Done %s.\n", __func__); 1630 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
1631 "Done %s.\n", __func__);
1503 } 1632 }
1504 1633
1505 return rval; 1634 return rval;
@@ -1527,7 +1656,8 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
1527 mbx_cmd_t mc; 1656 mbx_cmd_t mc;
1528 mbx_cmd_t *mcp = &mc; 1657 mbx_cmd_t *mcp = &mc;
1529 1658
1530 ql_dbg(ql_dbg_mbx, vha, 0x105a, "Entered %s.\n", __func__); 1659 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
1660 "Entered %s.\n", __func__);
1531 1661
1532 if (IS_CNA_CAPABLE(vha->hw)) { 1662 if (IS_CNA_CAPABLE(vha->hw)) {
1533 /* Logout across all FCFs. */ 1663 /* Logout across all FCFs. */
@@ -1564,7 +1694,8 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
1564 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval); 1694 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
1565 } else { 1695 } else {
1566 /*EMPTY*/ 1696 /*EMPTY*/
1567 ql_dbg(ql_dbg_mbx, vha, 0x105c, "Done %s.\n", __func__); 1697 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
1698 "Done %s.\n", __func__);
1568 } 1699 }
1569 1700
1570 return rval; 1701 return rval;
@@ -1596,9 +1727,10 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
1596 mbx_cmd_t mc; 1727 mbx_cmd_t mc;
1597 mbx_cmd_t *mcp = &mc; 1728 mbx_cmd_t *mcp = &mc;
1598 1729
1599 ql_dbg(ql_dbg_mbx, vha, 0x105d, "Entered %s.\n", __func__); 1730 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
1731 "Entered %s.\n", __func__);
1600 1732
1601 ql_dbg(ql_dbg_mbx, vha, 0x105e, 1733 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
1602 "Retry cnt=%d ratov=%d total tov=%d.\n", 1734 "Retry cnt=%d ratov=%d total tov=%d.\n",
1603 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov); 1735 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
1604 1736
@@ -1622,7 +1754,8 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
1622 rval, mcp->mb[0], mcp->mb[1]); 1754 rval, mcp->mb[0], mcp->mb[1]);
1623 } else { 1755 } else {
1624 /*EMPTY*/ 1756 /*EMPTY*/
1625 ql_dbg(ql_dbg_mbx, vha, 0x1060, "Done %s.\n", __func__); 1757 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
1758 "Done %s.\n", __func__);
1626 } 1759 }
1627 1760
1628 return rval; 1761 return rval;
@@ -1641,7 +1774,8 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1641 struct req_que *req; 1774 struct req_que *req;
1642 struct rsp_que *rsp; 1775 struct rsp_que *rsp;
1643 1776
1644 ql_dbg(ql_dbg_mbx, vha, 0x1061, "Entered %s.\n", __func__); 1777 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
1778 "Entered %s.\n", __func__);
1645 1779
1646 if (ha->flags.cpu_affinity_enabled) 1780 if (ha->flags.cpu_affinity_enabled)
1647 req = ha->req_q_map[0]; 1781 req = ha->req_q_map[0];
@@ -1715,7 +1849,8 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1715 break; 1849 break;
1716 } 1850 }
1717 } else { 1851 } else {
1718 ql_dbg(ql_dbg_mbx, vha, 0x1066, "Done %s.\n", __func__); 1852 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
1853 "Done %s.\n", __func__);
1719 1854
1720 iop[0] = le32_to_cpu(lg->io_parameter[0]); 1855 iop[0] = le32_to_cpu(lg->io_parameter[0]);
1721 1856
@@ -1733,6 +1868,10 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1733 mb[10] |= BIT_0; /* Class 2. */ 1868 mb[10] |= BIT_0; /* Class 2. */
1734 if (lg->io_parameter[9] || lg->io_parameter[10]) 1869 if (lg->io_parameter[9] || lg->io_parameter[10])
1735 mb[10] |= BIT_1; /* Class 3. */ 1870 mb[10] |= BIT_1; /* Class 3. */
1871 if (lg->io_parameter[0] & __constant_cpu_to_le32(BIT_7))
1872 mb[10] |= BIT_7; /* Confirmed Completion
1873 * Allowed
1874 */
1736 } 1875 }
1737 1876
1738 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 1877 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1770,7 +1909,8 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1770 mbx_cmd_t *mcp = &mc; 1909 mbx_cmd_t *mcp = &mc;
1771 struct qla_hw_data *ha = vha->hw; 1910 struct qla_hw_data *ha = vha->hw;
1772 1911
1773 ql_dbg(ql_dbg_mbx, vha, 0x1067, "Entered %s.\n", __func__); 1912 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
1913 "Entered %s.\n", __func__);
1774 1914
1775 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; 1915 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
1776 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1916 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -1818,7 +1958,8 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1818 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 1958 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
1819 } else { 1959 } else {
1820 /*EMPTY*/ 1960 /*EMPTY*/
1821 ql_dbg(ql_dbg_mbx, vha, 0x1069, "Done %s.\n", __func__); 1961 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
1962 "Done %s.\n", __func__);
1822 } 1963 }
1823 1964
1824 return rval; 1965 return rval;
@@ -1849,7 +1990,8 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
1849 mbx_cmd_t *mcp = &mc; 1990 mbx_cmd_t *mcp = &mc;
1850 struct qla_hw_data *ha = vha->hw; 1991 struct qla_hw_data *ha = vha->hw;
1851 1992
1852 ql_dbg(ql_dbg_mbx, vha, 0x106a, "Entered %s.\n", __func__); 1993 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
1994 "Entered %s.\n", __func__);
1853 1995
1854 if (IS_FWI2_CAPABLE(ha)) 1996 if (IS_FWI2_CAPABLE(ha))
1855 return qla24xx_login_fabric(vha, fcport->loop_id, 1997 return qla24xx_login_fabric(vha, fcport->loop_id,
@@ -1891,7 +2033,8 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
1891 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]); 2033 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
1892 } else { 2034 } else {
1893 /*EMPTY*/ 2035 /*EMPTY*/
1894 ql_dbg(ql_dbg_mbx, vha, 0x106c, "Done %s.\n", __func__); 2036 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
2037 "Done %s.\n", __func__);
1895 } 2038 }
1896 2039
1897 return (rval); 2040 return (rval);
@@ -1908,7 +2051,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1908 struct req_que *req; 2051 struct req_que *req;
1909 struct rsp_que *rsp; 2052 struct rsp_que *rsp;
1910 2053
1911 ql_dbg(ql_dbg_mbx, vha, 0x106d, "Entered %s.\n", __func__); 2054 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2055 "Entered %s.\n", __func__);
1912 2056
1913 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 2057 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1914 if (lg == NULL) { 2058 if (lg == NULL) {
@@ -1952,7 +2096,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1952 le32_to_cpu(lg->io_parameter[1])); 2096 le32_to_cpu(lg->io_parameter[1]));
1953 } else { 2097 } else {
1954 /*EMPTY*/ 2098 /*EMPTY*/
1955 ql_dbg(ql_dbg_mbx, vha, 0x1072, "Done %s.\n", __func__); 2099 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
2100 "Done %s.\n", __func__);
1956 } 2101 }
1957 2102
1958 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 2103 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1984,7 +2129,8 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1984 mbx_cmd_t mc; 2129 mbx_cmd_t mc;
1985 mbx_cmd_t *mcp = &mc; 2130 mbx_cmd_t *mcp = &mc;
1986 2131
1987 ql_dbg(ql_dbg_mbx, vha, 0x1073, "Entered %s.\n", __func__); 2132 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
2133 "Entered %s.\n", __func__);
1988 2134
1989 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; 2135 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
1990 mcp->out_mb = MBX_1|MBX_0; 2136 mcp->out_mb = MBX_1|MBX_0;
@@ -2007,7 +2153,8 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2007 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]); 2153 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
2008 } else { 2154 } else {
2009 /*EMPTY*/ 2155 /*EMPTY*/
2010 ql_dbg(ql_dbg_mbx, vha, 0x1075, "Done %s.\n", __func__); 2156 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
2157 "Done %s.\n", __func__);
2011 } 2158 }
2012 2159
2013 return rval; 2160 return rval;
@@ -2035,7 +2182,8 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
2035 mbx_cmd_t mc; 2182 mbx_cmd_t mc;
2036 mbx_cmd_t *mcp = &mc; 2183 mbx_cmd_t *mcp = &mc;
2037 2184
2038 ql_dbg(ql_dbg_mbx, vha, 0x1076, "Entered %s.\n", __func__); 2185 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
2186 "Entered %s.\n", __func__);
2039 2187
2040 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2188 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2041 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0; 2189 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
@@ -2052,7 +2200,8 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
2052 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval); 2200 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2053 } else { 2201 } else {
2054 /*EMPTY*/ 2202 /*EMPTY*/
2055 ql_dbg(ql_dbg_mbx, vha, 0x1078, "Done %s.\n", __func__); 2203 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
2204 "Done %s.\n", __func__);
2056 } 2205 }
2057 2206
2058 return rval; 2207 return rval;
@@ -2078,7 +2227,8 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2078 mbx_cmd_t mc; 2227 mbx_cmd_t mc;
2079 mbx_cmd_t *mcp = &mc; 2228 mbx_cmd_t *mcp = &mc;
2080 2229
2081 ql_dbg(ql_dbg_mbx, vha, 0x1079, "Entered %s.\n", __func__); 2230 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
2231 "Entered %s.\n", __func__);
2082 2232
2083 if (id_list == NULL) 2233 if (id_list == NULL)
2084 return QLA_FUNCTION_FAILED; 2234 return QLA_FUNCTION_FAILED;
@@ -2110,7 +2260,8 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2110 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval); 2260 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2111 } else { 2261 } else {
2112 *entries = mcp->mb[1]; 2262 *entries = mcp->mb[1];
2113 ql_dbg(ql_dbg_mbx, vha, 0x107b, "Done %s.\n", __func__); 2263 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
2264 "Done %s.\n", __func__);
2114 } 2265 }
2115 2266
2116 return rval; 2267 return rval;
@@ -2138,7 +2289,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2138 mbx_cmd_t mc; 2289 mbx_cmd_t mc;
2139 mbx_cmd_t *mcp = &mc; 2290 mbx_cmd_t *mcp = &mc;
2140 2291
2141 ql_dbg(ql_dbg_mbx, vha, 0x107c, "Entered %s.\n", __func__); 2292 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
2293 "Entered %s.\n", __func__);
2142 2294
2143 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; 2295 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2144 mcp->out_mb = MBX_0; 2296 mcp->out_mb = MBX_0;
@@ -2154,7 +2306,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2154 ql_dbg(ql_dbg_mbx, vha, 0x107d, 2306 ql_dbg(ql_dbg_mbx, vha, 0x107d,
2155 "Failed mb[0]=%x.\n", mcp->mb[0]); 2307 "Failed mb[0]=%x.\n", mcp->mb[0]);
2156 } else { 2308 } else {
2157 ql_dbg(ql_dbg_mbx, vha, 0x107e, 2309 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
2158 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x " 2310 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
2159 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2], 2311 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
2160 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10], 2312 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
@@ -2201,7 +2353,8 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2201 dma_addr_t pmap_dma; 2353 dma_addr_t pmap_dma;
2202 struct qla_hw_data *ha = vha->hw; 2354 struct qla_hw_data *ha = vha->hw;
2203 2355
2204 ql_dbg(ql_dbg_mbx, vha, 0x107f, "Entered %s.\n", __func__); 2356 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
2357 "Entered %s.\n", __func__);
2205 2358
2206 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); 2359 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
2207 if (pmap == NULL) { 2360 if (pmap == NULL) {
@@ -2224,7 +2377,7 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2224 rval = qla2x00_mailbox_command(vha, mcp); 2377 rval = qla2x00_mailbox_command(vha, mcp);
2225 2378
2226 if (rval == QLA_SUCCESS) { 2379 if (rval == QLA_SUCCESS) {
2227 ql_dbg(ql_dbg_mbx, vha, 0x1081, 2380 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
2228 "mb0/mb1=%x/%X FC/AL position map size (%x).\n", 2381 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
2229 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]); 2382 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
2230 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d, 2383 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
@@ -2238,7 +2391,8 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2238 if (rval != QLA_SUCCESS) { 2391 if (rval != QLA_SUCCESS) {
2239 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval); 2392 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
2240 } else { 2393 } else {
2241 ql_dbg(ql_dbg_mbx, vha, 0x1083, "Done %s.\n", __func__); 2394 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
2395 "Done %s.\n", __func__);
2242 } 2396 }
2243 2397
2244 return rval; 2398 return rval;
@@ -2267,7 +2421,8 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2267 uint32_t *siter, *diter, dwords; 2421 uint32_t *siter, *diter, dwords;
2268 struct qla_hw_data *ha = vha->hw; 2422 struct qla_hw_data *ha = vha->hw;
2269 2423
2270 ql_dbg(ql_dbg_mbx, vha, 0x1084, "Entered %s.\n", __func__); 2424 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
2425 "Entered %s.\n", __func__);
2271 2426
2272 mcp->mb[0] = MBC_GET_LINK_STATUS; 2427 mcp->mb[0] = MBC_GET_LINK_STATUS;
2273 mcp->mb[2] = MSW(stats_dma); 2428 mcp->mb[2] = MSW(stats_dma);
@@ -2301,7 +2456,8 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2301 rval = QLA_FUNCTION_FAILED; 2456 rval = QLA_FUNCTION_FAILED;
2302 } else { 2457 } else {
2303 /* Copy over data -- firmware data is LE. */ 2458 /* Copy over data -- firmware data is LE. */
2304 ql_dbg(ql_dbg_mbx, vha, 0x1086, "Done %s.\n", __func__); 2459 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
2460 "Done %s.\n", __func__);
2305 dwords = offsetof(struct link_statistics, unused1) / 4; 2461 dwords = offsetof(struct link_statistics, unused1) / 4;
2306 siter = diter = &stats->link_fail_cnt; 2462 siter = diter = &stats->link_fail_cnt;
2307 while (dwords--) 2463 while (dwords--)
@@ -2324,7 +2480,8 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2324 mbx_cmd_t *mcp = &mc; 2480 mbx_cmd_t *mcp = &mc;
2325 uint32_t *siter, *diter, dwords; 2481 uint32_t *siter, *diter, dwords;
2326 2482
2327 ql_dbg(ql_dbg_mbx, vha, 0x1088, "Entered %s.\n", __func__); 2483 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
2484 "Entered %s.\n", __func__);
2328 2485
2329 mcp->mb[0] = MBC_GET_LINK_PRIV_STATS; 2486 mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
2330 mcp->mb[2] = MSW(stats_dma); 2487 mcp->mb[2] = MSW(stats_dma);
@@ -2346,7 +2503,8 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2346 "Failed mb[0]=%x.\n", mcp->mb[0]); 2503 "Failed mb[0]=%x.\n", mcp->mb[0]);
2347 rval = QLA_FUNCTION_FAILED; 2504 rval = QLA_FUNCTION_FAILED;
2348 } else { 2505 } else {
2349 ql_dbg(ql_dbg_mbx, vha, 0x108a, "Done %s.\n", __func__); 2506 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
2507 "Done %s.\n", __func__);
2350 /* Copy over data -- firmware data is LE. */ 2508 /* Copy over data -- firmware data is LE. */
2351 dwords = sizeof(struct link_statistics) / 4; 2509 dwords = sizeof(struct link_statistics) / 4;
2352 siter = diter = &stats->link_fail_cnt; 2510 siter = diter = &stats->link_fail_cnt;
@@ -2375,7 +2533,8 @@ qla24xx_abort_command(srb_t *sp)
2375 struct qla_hw_data *ha = vha->hw; 2533 struct qla_hw_data *ha = vha->hw;
2376 struct req_que *req = vha->req; 2534 struct req_que *req = vha->req;
2377 2535
2378 ql_dbg(ql_dbg_mbx, vha, 0x108c, "Entered %s.\n", __func__); 2536 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
2537 "Entered %s.\n", __func__);
2379 2538
2380 spin_lock_irqsave(&ha->hardware_lock, flags); 2539 spin_lock_irqsave(&ha->hardware_lock, flags);
2381 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 2540 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
@@ -2404,7 +2563,7 @@ qla24xx_abort_command(srb_t *sp)
2404 abt->port_id[0] = fcport->d_id.b.al_pa; 2563 abt->port_id[0] = fcport->d_id.b.al_pa;
2405 abt->port_id[1] = fcport->d_id.b.area; 2564 abt->port_id[1] = fcport->d_id.b.area;
2406 abt->port_id[2] = fcport->d_id.b.domain; 2565 abt->port_id[2] = fcport->d_id.b.domain;
2407 abt->vp_index = fcport->vp_idx; 2566 abt->vp_index = fcport->vha->vp_idx;
2408 2567
2409 abt->req_que_no = cpu_to_le16(req->id); 2568 abt->req_que_no = cpu_to_le16(req->id);
2410 2569
@@ -2423,7 +2582,8 @@ qla24xx_abort_command(srb_t *sp)
2423 le16_to_cpu(abt->nport_handle)); 2582 le16_to_cpu(abt->nport_handle));
2424 rval = QLA_FUNCTION_FAILED; 2583 rval = QLA_FUNCTION_FAILED;
2425 } else { 2584 } else {
2426 ql_dbg(ql_dbg_mbx, vha, 0x1091, "Done %s.\n", __func__); 2585 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
2586 "Done %s.\n", __func__);
2427 } 2587 }
2428 2588
2429 dma_pool_free(ha->s_dma_pool, abt, abt_dma); 2589 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
@@ -2455,7 +2615,8 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2455 ha = vha->hw; 2615 ha = vha->hw;
2456 req = vha->req; 2616 req = vha->req;
2457 2617
2458 ql_dbg(ql_dbg_mbx, vha, 0x1092, "Entered %s.\n", __func__); 2618 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
2619 "Entered %s.\n", __func__);
2459 2620
2460 if (ha->flags.cpu_affinity_enabled) 2621 if (ha->flags.cpu_affinity_enabled)
2461 rsp = ha->rsp_q_map[tag + 1]; 2622 rsp = ha->rsp_q_map[tag + 1];
@@ -2478,7 +2639,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2478 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; 2639 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
2479 tsk->p.tsk.port_id[1] = fcport->d_id.b.area; 2640 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
2480 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain; 2641 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
2481 tsk->p.tsk.vp_index = fcport->vp_idx; 2642 tsk->p.tsk.vp_index = fcport->vha->vp_idx;
2482 if (type == TCF_LUN_RESET) { 2643 if (type == TCF_LUN_RESET) {
2483 int_to_scsilun(l, &tsk->p.tsk.lun); 2644 int_to_scsilun(l, &tsk->p.tsk.lun);
2484 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun, 2645 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
@@ -2504,7 +2665,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2504 } else if (le16_to_cpu(sts->scsi_status) & 2665 } else if (le16_to_cpu(sts->scsi_status) &
2505 SS_RESPONSE_INFO_LEN_VALID) { 2666 SS_RESPONSE_INFO_LEN_VALID) {
2506 if (le32_to_cpu(sts->rsp_data_len) < 4) { 2667 if (le32_to_cpu(sts->rsp_data_len) < 4) {
2507 ql_dbg(ql_dbg_mbx, vha, 0x1097, 2668 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
2508 "Ignoring inconsistent data length -- not enough " 2669 "Ignoring inconsistent data length -- not enough "
2509 "response info (%d).\n", 2670 "response info (%d).\n",
2510 le32_to_cpu(sts->rsp_data_len)); 2671 le32_to_cpu(sts->rsp_data_len));
@@ -2523,7 +2684,8 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2523 ql_dbg(ql_dbg_mbx, vha, 0x1099, 2684 ql_dbg(ql_dbg_mbx, vha, 0x1099,
2524 "Failed to issue marker IOCB (%x).\n", rval2); 2685 "Failed to issue marker IOCB (%x).\n", rval2);
2525 } else { 2686 } else {
2526 ql_dbg(ql_dbg_mbx, vha, 0x109a, "Done %s.\n", __func__); 2687 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
2688 "Done %s.\n", __func__);
2527 } 2689 }
2528 2690
2529 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma); 2691 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
@@ -2564,7 +2726,8 @@ qla2x00_system_error(scsi_qla_host_t *vha)
2564 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha)) 2726 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
2565 return QLA_FUNCTION_FAILED; 2727 return QLA_FUNCTION_FAILED;
2566 2728
2567 ql_dbg(ql_dbg_mbx, vha, 0x109b, "Entered %s.\n", __func__); 2729 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
2730 "Entered %s.\n", __func__);
2568 2731
2569 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR; 2732 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
2570 mcp->out_mb = MBX_0; 2733 mcp->out_mb = MBX_0;
@@ -2576,7 +2739,8 @@ qla2x00_system_error(scsi_qla_host_t *vha)
2576 if (rval != QLA_SUCCESS) { 2739 if (rval != QLA_SUCCESS) {
2577 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval); 2740 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
2578 } else { 2741 } else {
2579 ql_dbg(ql_dbg_mbx, vha, 0x109d, "Done %s.\n", __func__); 2742 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
2743 "Done %s.\n", __func__);
2580 } 2744 }
2581 2745
2582 return rval; 2746 return rval;
@@ -2596,7 +2760,8 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
2596 mbx_cmd_t mc; 2760 mbx_cmd_t mc;
2597 mbx_cmd_t *mcp = &mc; 2761 mbx_cmd_t *mcp = &mc;
2598 2762
2599 ql_dbg(ql_dbg_mbx, vha, 0x109e, "Entered %s.\n", __func__); 2763 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
2764 "Entered %s.\n", __func__);
2600 2765
2601 mcp->mb[0] = MBC_SERDES_PARAMS; 2766 mcp->mb[0] = MBC_SERDES_PARAMS;
2602 mcp->mb[1] = BIT_0; 2767 mcp->mb[1] = BIT_0;
@@ -2615,7 +2780,8 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
2615 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 2780 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2616 } else { 2781 } else {
2617 /*EMPTY*/ 2782 /*EMPTY*/
2618 ql_dbg(ql_dbg_mbx, vha, 0x10a0, "Done %s.\n", __func__); 2783 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
2784 "Done %s.\n", __func__);
2619 } 2785 }
2620 2786
2621 return rval; 2787 return rval;
@@ -2631,7 +2797,8 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
2631 if (!IS_FWI2_CAPABLE(vha->hw)) 2797 if (!IS_FWI2_CAPABLE(vha->hw))
2632 return QLA_FUNCTION_FAILED; 2798 return QLA_FUNCTION_FAILED;
2633 2799
2634 ql_dbg(ql_dbg_mbx, vha, 0x10a1, "Entered %s.\n", __func__); 2800 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
2801 "Entered %s.\n", __func__);
2635 2802
2636 mcp->mb[0] = MBC_STOP_FIRMWARE; 2803 mcp->mb[0] = MBC_STOP_FIRMWARE;
2637 mcp->mb[1] = 0; 2804 mcp->mb[1] = 0;
@@ -2646,7 +2813,8 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
2646 if (mcp->mb[0] == MBS_INVALID_COMMAND) 2813 if (mcp->mb[0] == MBS_INVALID_COMMAND)
2647 rval = QLA_INVALID_COMMAND; 2814 rval = QLA_INVALID_COMMAND;
2648 } else { 2815 } else {
2649 ql_dbg(ql_dbg_mbx, vha, 0x10a3, "Done %s.\n", __func__); 2816 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
2817 "Done %s.\n", __func__);
2650 } 2818 }
2651 2819
2652 return rval; 2820 return rval;
@@ -2660,7 +2828,8 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
2660 mbx_cmd_t mc; 2828 mbx_cmd_t mc;
2661 mbx_cmd_t *mcp = &mc; 2829 mbx_cmd_t *mcp = &mc;
2662 2830
2663 ql_dbg(ql_dbg_mbx, vha, 0x10a4, "Entered %s.\n", __func__); 2831 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
2832 "Entered %s.\n", __func__);
2664 2833
2665 if (!IS_FWI2_CAPABLE(vha->hw)) 2834 if (!IS_FWI2_CAPABLE(vha->hw))
2666 return QLA_FUNCTION_FAILED; 2835 return QLA_FUNCTION_FAILED;
@@ -2686,7 +2855,8 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
2686 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2855 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2687 rval, mcp->mb[0], mcp->mb[1]); 2856 rval, mcp->mb[0], mcp->mb[1]);
2688 } else { 2857 } else {
2689 ql_dbg(ql_dbg_mbx, vha, 0x10a6, "Done %s.\n", __func__); 2858 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
2859 "Done %s.\n", __func__);
2690 } 2860 }
2691 2861
2692 return rval; 2862 return rval;
@@ -2699,7 +2869,8 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
2699 mbx_cmd_t mc; 2869 mbx_cmd_t mc;
2700 mbx_cmd_t *mcp = &mc; 2870 mbx_cmd_t *mcp = &mc;
2701 2871
2702 ql_dbg(ql_dbg_mbx, vha, 0x10a7, "Entered %s.\n", __func__); 2872 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
2873 "Entered %s.\n", __func__);
2703 2874
2704 if (!IS_FWI2_CAPABLE(vha->hw)) 2875 if (!IS_FWI2_CAPABLE(vha->hw))
2705 return QLA_FUNCTION_FAILED; 2876 return QLA_FUNCTION_FAILED;
@@ -2719,7 +2890,8 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
2719 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2890 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2720 rval, mcp->mb[0], mcp->mb[1]); 2891 rval, mcp->mb[0], mcp->mb[1]);
2721 } else { 2892 } else {
2722 ql_dbg(ql_dbg_mbx, vha, 0x10a9, "Done %s.\n", __func__); 2893 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
2894 "Done %s.\n", __func__);
2723 } 2895 }
2724 2896
2725 return rval; 2897 return rval;
@@ -2733,7 +2905,8 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
2733 mbx_cmd_t mc; 2905 mbx_cmd_t mc;
2734 mbx_cmd_t *mcp = &mc; 2906 mbx_cmd_t *mcp = &mc;
2735 2907
2736 ql_dbg(ql_dbg_mbx, vha, 0x10aa, "Entered %s.\n", __func__); 2908 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
2909 "Entered %s.\n", __func__);
2737 2910
2738 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) && 2911 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
2739 !IS_QLA83XX(vha->hw)) 2912 !IS_QLA83XX(vha->hw))
@@ -2764,7 +2937,8 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
2764 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2937 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2765 rval, mcp->mb[0], mcp->mb[1]); 2938 rval, mcp->mb[0], mcp->mb[1]);
2766 } else { 2939 } else {
2767 ql_dbg(ql_dbg_mbx, vha, 0x10ac, "Done %s.\n", __func__); 2940 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
2941 "Done %s.\n", __func__);
2768 2942
2769 if (mb) 2943 if (mb)
2770 memcpy(mb, mcp->mb, 8 * sizeof(*mb)); 2944 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
@@ -2782,7 +2956,8 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
2782 mbx_cmd_t mc; 2956 mbx_cmd_t mc;
2783 mbx_cmd_t *mcp = &mc; 2957 mbx_cmd_t *mcp = &mc;
2784 2958
2785 ql_dbg(ql_dbg_mbx, vha, 0x10ad, "Entered %s.\n", __func__); 2959 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
2960 "Entered %s.\n", __func__);
2786 2961
2787 if (!IS_FWI2_CAPABLE(vha->hw)) 2962 if (!IS_FWI2_CAPABLE(vha->hw))
2788 return QLA_FUNCTION_FAILED; 2963 return QLA_FUNCTION_FAILED;
@@ -2804,7 +2979,8 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
2804 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2979 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2805 rval, mcp->mb[0], mcp->mb[1]); 2980 rval, mcp->mb[0], mcp->mb[1]);
2806 } else { 2981 } else {
2807 ql_dbg(ql_dbg_mbx, vha, 0x10af, "Done %s.\n", __func__); 2982 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
2983 "Done %s.\n", __func__);
2808 2984
2809 if (wr) 2985 if (wr)
2810 *wr = (uint64_t) mcp->mb[5] << 48 | 2986 *wr = (uint64_t) mcp->mb[5] << 48 |
@@ -2829,7 +3005,8 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2829 mbx_cmd_t mc; 3005 mbx_cmd_t mc;
2830 mbx_cmd_t *mcp = &mc; 3006 mbx_cmd_t *mcp = &mc;
2831 3007
2832 ql_dbg(ql_dbg_mbx, vha, 0x10b0, "Entered %s.\n", __func__); 3008 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
3009 "Entered %s.\n", __func__);
2833 3010
2834 if (!IS_IIDMA_CAPABLE(vha->hw)) 3011 if (!IS_IIDMA_CAPABLE(vha->hw))
2835 return QLA_FUNCTION_FAILED; 3012 return QLA_FUNCTION_FAILED;
@@ -2854,7 +3031,8 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2854 if (rval != QLA_SUCCESS) { 3031 if (rval != QLA_SUCCESS) {
2855 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval); 3032 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
2856 } else { 3033 } else {
2857 ql_dbg(ql_dbg_mbx, vha, 0x10b2, "Done %s.\n", __func__); 3034 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
3035 "Done %s.\n", __func__);
2858 if (port_speed) 3036 if (port_speed)
2859 *port_speed = mcp->mb[3]; 3037 *port_speed = mcp->mb[3];
2860 } 3038 }
@@ -2870,7 +3048,8 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2870 mbx_cmd_t mc; 3048 mbx_cmd_t mc;
2871 mbx_cmd_t *mcp = &mc; 3049 mbx_cmd_t *mcp = &mc;
2872 3050
2873 ql_dbg(ql_dbg_mbx, vha, 0x10b3, "Entered %s.\n", __func__); 3051 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
3052 "Entered %s.\n", __func__);
2874 3053
2875 if (!IS_IIDMA_CAPABLE(vha->hw)) 3054 if (!IS_IIDMA_CAPABLE(vha->hw))
2876 return QLA_FUNCTION_FAILED; 3055 return QLA_FUNCTION_FAILED;
@@ -2897,9 +3076,11 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2897 } 3076 }
2898 3077
2899 if (rval != QLA_SUCCESS) { 3078 if (rval != QLA_SUCCESS) {
2900 ql_dbg(ql_dbg_mbx, vha, 0x10b4, "Failed=%x.\n", rval); 3079 ql_dbg(ql_dbg_mbx, vha, 0x10b4,
3080 "Failed=%x.\n", rval);
2901 } else { 3081 } else {
2902 ql_dbg(ql_dbg_mbx, vha, 0x10b5, "Done %s.\n", __func__); 3082 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
3083 "Done %s.\n", __func__);
2903 } 3084 }
2904 3085
2905 return rval; 3086 return rval;
@@ -2915,24 +3096,25 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
2915 scsi_qla_host_t *vp; 3096 scsi_qla_host_t *vp;
2916 unsigned long flags; 3097 unsigned long flags;
2917 3098
2918 ql_dbg(ql_dbg_mbx, vha, 0x10b6, "Entered %s.\n", __func__); 3099 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3100 "Entered %s.\n", __func__);
2919 3101
2920 if (rptid_entry->entry_status != 0) 3102 if (rptid_entry->entry_status != 0)
2921 return; 3103 return;
2922 3104
2923 if (rptid_entry->format == 0) { 3105 if (rptid_entry->format == 0) {
2924 ql_dbg(ql_dbg_mbx, vha, 0x10b7, 3106 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7,
2925 "Format 0 : Number of VPs setup %d, number of " 3107 "Format 0 : Number of VPs setup %d, number of "
2926 "VPs acquired %d.\n", 3108 "VPs acquired %d.\n",
2927 MSB(le16_to_cpu(rptid_entry->vp_count)), 3109 MSB(le16_to_cpu(rptid_entry->vp_count)),
2928 LSB(le16_to_cpu(rptid_entry->vp_count))); 3110 LSB(le16_to_cpu(rptid_entry->vp_count)));
2929 ql_dbg(ql_dbg_mbx, vha, 0x10b8, 3111 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8,
2930 "Primary port id %02x%02x%02x.\n", 3112 "Primary port id %02x%02x%02x.\n",
2931 rptid_entry->port_id[2], rptid_entry->port_id[1], 3113 rptid_entry->port_id[2], rptid_entry->port_id[1],
2932 rptid_entry->port_id[0]); 3114 rptid_entry->port_id[0]);
2933 } else if (rptid_entry->format == 1) { 3115 } else if (rptid_entry->format == 1) {
2934 vp_idx = LSB(stat); 3116 vp_idx = LSB(stat);
2935 ql_dbg(ql_dbg_mbx, vha, 0x10b9, 3117 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9,
2936 "Format 1: VP[%d] enabled - status %d - with " 3118 "Format 1: VP[%d] enabled - status %d - with "
2937 "port id %02x%02x%02x.\n", vp_idx, MSB(stat), 3119 "port id %02x%02x%02x.\n", vp_idx, MSB(stat),
2938 rptid_entry->port_id[2], rptid_entry->port_id[1], 3120 rptid_entry->port_id[2], rptid_entry->port_id[1],
@@ -2999,7 +3181,8 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
2999 3181
3000 /* This can be called by the parent */ 3182 /* This can be called by the parent */
3001 3183
3002 ql_dbg(ql_dbg_mbx, vha, 0x10bb, "Entered %s.\n", __func__); 3184 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
3185 "Entered %s.\n", __func__);
3003 3186
3004 vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma); 3187 vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
3005 if (!vpmod) { 3188 if (!vpmod) {
@@ -3015,6 +3198,9 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
3015 vpmod->vp_count = 1; 3198 vpmod->vp_count = 1;
3016 vpmod->vp_index1 = vha->vp_idx; 3199 vpmod->vp_index1 = vha->vp_idx;
3017 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5; 3200 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
3201
3202 qlt_modify_vp_config(vha, vpmod);
3203
3018 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE); 3204 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
3019 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE); 3205 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
3020 vpmod->entry_count = 1; 3206 vpmod->entry_count = 1;
@@ -3035,7 +3221,8 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
3035 rval = QLA_FUNCTION_FAILED; 3221 rval = QLA_FUNCTION_FAILED;
3036 } else { 3222 } else {
3037 /* EMPTY */ 3223 /* EMPTY */
3038 ql_dbg(ql_dbg_mbx, vha, 0x10c0, "Done %s.\n", __func__); 3224 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
3225 "Done %s.\n", __func__);
3039 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING); 3226 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
3040 } 3227 }
3041 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma); 3228 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
@@ -3069,7 +3256,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
3069 int vp_index = vha->vp_idx; 3256 int vp_index = vha->vp_idx;
3070 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 3257 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3071 3258
3072 ql_dbg(ql_dbg_mbx, vha, 0x10c1, 3259 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c1,
3073 "Entered %s enabling index %d.\n", __func__, vp_index); 3260 "Entered %s enabling index %d.\n", __func__, vp_index);
3074 3261
3075 if (vp_index == 0 || vp_index >= ha->max_npiv_vports) 3262 if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
@@ -3112,7 +3299,8 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
3112 le16_to_cpu(vce->comp_status)); 3299 le16_to_cpu(vce->comp_status));
3113 rval = QLA_FUNCTION_FAILED; 3300 rval = QLA_FUNCTION_FAILED;
3114 } else { 3301 } else {
3115 ql_dbg(ql_dbg_mbx, vha, 0x10c6, "Done %s.\n", __func__); 3302 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c6,
3303 "Done %s.\n", __func__);
3116 } 3304 }
3117 3305
3118 dma_pool_free(ha->s_dma_pool, vce, vce_dma); 3306 dma_pool_free(ha->s_dma_pool, vce, vce_dma);
@@ -3149,14 +3337,8 @@ qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
3149 mbx_cmd_t mc; 3337 mbx_cmd_t mc;
3150 mbx_cmd_t *mcp = &mc; 3338 mbx_cmd_t *mcp = &mc;
3151 3339
3152 ql_dbg(ql_dbg_mbx, vha, 0x10c7, "Entered %s.\n", __func__); 3340 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
3153 3341 "Entered %s.\n", __func__);
3154 /*
3155 * This command is implicitly executed by firmware during login for the
3156 * physical hosts
3157 */
3158 if (vp_idx == 0)
3159 return QLA_FUNCTION_FAILED;
3160 3342
3161 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST; 3343 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
3162 mcp->mb[1] = format; 3344 mcp->mb[1] = format;
@@ -3185,7 +3367,8 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
3185 mbx_cmd_t mc; 3367 mbx_cmd_t mc;
3186 mbx_cmd_t *mcp = &mc; 3368 mbx_cmd_t *mcp = &mc;
3187 3369
3188 ql_dbg(ql_dbg_mbx, vha, 0x1009, "Entered %s.\n", __func__); 3370 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
3371 "Entered %s.\n", __func__);
3189 3372
3190 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) { 3373 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
3191 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 3374 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
@@ -3219,7 +3402,8 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
3219 ql_dbg(ql_dbg_mbx, vha, 0x1008, 3402 ql_dbg(ql_dbg_mbx, vha, 0x1008,
3220 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3403 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3221 } else { 3404 } else {
3222 ql_dbg(ql_dbg_mbx, vha, 0x1007, "Done %s.\n", __func__); 3405 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
3406 "Done %s.\n", __func__);
3223 } 3407 }
3224 3408
3225 return rval; 3409 return rval;
@@ -3244,7 +3428,8 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
3244 unsigned long flags; 3428 unsigned long flags;
3245 struct qla_hw_data *ha = vha->hw; 3429 struct qla_hw_data *ha = vha->hw;
3246 3430
3247 ql_dbg(ql_dbg_mbx, vha, 0x10c8, "Entered %s.\n", __func__); 3431 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
3432 "Entered %s.\n", __func__);
3248 3433
3249 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 3434 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
3250 if (mn == NULL) { 3435 if (mn == NULL) {
@@ -3285,7 +3470,7 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
3285 status[0] = le16_to_cpu(mn->p.rsp.comp_status); 3470 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
3286 status[1] = status[0] == CS_VCS_CHIP_FAILURE ? 3471 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
3287 le16_to_cpu(mn->p.rsp.failure_code) : 0; 3472 le16_to_cpu(mn->p.rsp.failure_code) : 0;
3288 ql_dbg(ql_dbg_mbx, vha, 0x10ce, 3473 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
3289 "cs=%x fc=%x.\n", status[0], status[1]); 3474 "cs=%x fc=%x.\n", status[0], status[1]);
3290 3475
3291 if (status[0] != CS_COMPLETE) { 3476 if (status[0] != CS_COMPLETE) {
@@ -3299,7 +3484,7 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
3299 retry = 1; 3484 retry = 1;
3300 } 3485 }
3301 } else { 3486 } else {
3302 ql_dbg(ql_dbg_mbx, vha, 0x10d0, 3487 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
3303 "Firmware updated to %x.\n", 3488 "Firmware updated to %x.\n",
3304 le32_to_cpu(mn->p.rsp.fw_ver)); 3489 le32_to_cpu(mn->p.rsp.fw_ver));
3305 3490
@@ -3316,9 +3501,11 @@ verify_done:
3316 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 3501 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
3317 3502
3318 if (rval != QLA_SUCCESS) { 3503 if (rval != QLA_SUCCESS) {
3319 ql_dbg(ql_dbg_mbx, vha, 0x10d1, "Failed=%x.\n", rval); 3504 ql_dbg(ql_dbg_mbx, vha, 0x10d1,
3505 "Failed=%x.\n", rval);
3320 } else { 3506 } else {
3321 ql_dbg(ql_dbg_mbx, vha, 0x10d2, "Done %s.\n", __func__); 3507 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
3508 "Done %s.\n", __func__);
3322 } 3509 }
3323 3510
3324 return rval; 3511 return rval;
@@ -3334,7 +3521,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3334 struct device_reg_25xxmq __iomem *reg; 3521 struct device_reg_25xxmq __iomem *reg;
3335 struct qla_hw_data *ha = vha->hw; 3522 struct qla_hw_data *ha = vha->hw;
3336 3523
3337 ql_dbg(ql_dbg_mbx, vha, 0x10d3, "Entered %s.\n", __func__); 3524 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
3525 "Entered %s.\n", __func__);
3338 3526
3339 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 3527 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3340 mcp->mb[1] = req->options; 3528 mcp->mb[1] = req->options;
@@ -3388,7 +3576,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3388 ql_dbg(ql_dbg_mbx, vha, 0x10d4, 3576 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
3389 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3577 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3390 } else { 3578 } else {
3391 ql_dbg(ql_dbg_mbx, vha, 0x10d5, "Done %s.\n", __func__); 3579 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
3580 "Done %s.\n", __func__);
3392 } 3581 }
3393 3582
3394 return rval; 3583 return rval;
@@ -3404,7 +3593,8 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3404 struct device_reg_25xxmq __iomem *reg; 3593 struct device_reg_25xxmq __iomem *reg;
3405 struct qla_hw_data *ha = vha->hw; 3594 struct qla_hw_data *ha = vha->hw;
3406 3595
3407 ql_dbg(ql_dbg_mbx, vha, 0x10d6, "Entered %s.\n", __func__); 3596 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
3597 "Entered %s.\n", __func__);
3408 3598
3409 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 3599 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3410 mcp->mb[1] = rsp->options; 3600 mcp->mb[1] = rsp->options;
@@ -3456,7 +3646,8 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3456 ql_dbg(ql_dbg_mbx, vha, 0x10d7, 3646 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
3457 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3647 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3458 } else { 3648 } else {
3459 ql_dbg(ql_dbg_mbx, vha, 0x10d8, "Done %s.\n", __func__); 3649 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
3650 "Done %s.\n", __func__);
3460 } 3651 }
3461 3652
3462 return rval; 3653 return rval;
@@ -3469,7 +3660,8 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
3469 mbx_cmd_t mc; 3660 mbx_cmd_t mc;
3470 mbx_cmd_t *mcp = &mc; 3661 mbx_cmd_t *mcp = &mc;
3471 3662
3472 ql_dbg(ql_dbg_mbx, vha, 0x10d9, "Entered %s.\n", __func__); 3663 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
3664 "Entered %s.\n", __func__);
3473 3665
3474 mcp->mb[0] = MBC_IDC_ACK; 3666 mcp->mb[0] = MBC_IDC_ACK;
3475 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 3667 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
@@ -3483,7 +3675,8 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
3483 ql_dbg(ql_dbg_mbx, vha, 0x10da, 3675 ql_dbg(ql_dbg_mbx, vha, 0x10da,
3484 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3676 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3485 } else { 3677 } else {
3486 ql_dbg(ql_dbg_mbx, vha, 0x10db, "Done %s.\n", __func__); 3678 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
3679 "Done %s.\n", __func__);
3487 } 3680 }
3488 3681
3489 return rval; 3682 return rval;
@@ -3496,7 +3689,8 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
3496 mbx_cmd_t mc; 3689 mbx_cmd_t mc;
3497 mbx_cmd_t *mcp = &mc; 3690 mbx_cmd_t *mcp = &mc;
3498 3691
3499 ql_dbg(ql_dbg_mbx, vha, 0x10dc, "Entered %s.\n", __func__); 3692 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
3693 "Entered %s.\n", __func__);
3500 3694
3501 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) 3695 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
3502 return QLA_FUNCTION_FAILED; 3696 return QLA_FUNCTION_FAILED;
@@ -3514,7 +3708,8 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
3514 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3708 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3515 rval, mcp->mb[0], mcp->mb[1]); 3709 rval, mcp->mb[0], mcp->mb[1]);
3516 } else { 3710 } else {
3517 ql_dbg(ql_dbg_mbx, vha, 0x10de, "Done %s.\n", __func__); 3711 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
3712 "Done %s.\n", __func__);
3518 *sector_size = mcp->mb[1]; 3713 *sector_size = mcp->mb[1];
3519 } 3714 }
3520 3715
@@ -3531,7 +3726,8 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
3531 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) 3726 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
3532 return QLA_FUNCTION_FAILED; 3727 return QLA_FUNCTION_FAILED;
3533 3728
3534 ql_dbg(ql_dbg_mbx, vha, 0x10df, "Entered %s.\n", __func__); 3729 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
3730 "Entered %s.\n", __func__);
3535 3731
3536 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 3732 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3537 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE : 3733 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
@@ -3547,7 +3743,8 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
3547 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3743 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3548 rval, mcp->mb[0], mcp->mb[1]); 3744 rval, mcp->mb[0], mcp->mb[1]);
3549 } else { 3745 } else {
3550 ql_dbg(ql_dbg_mbx, vha, 0x10e1, "Done %s.\n", __func__); 3746 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
3747 "Done %s.\n", __func__);
3551 } 3748 }
3552 3749
3553 return rval; 3750 return rval;
@@ -3563,7 +3760,8 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
3563 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) 3760 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
3564 return QLA_FUNCTION_FAILED; 3761 return QLA_FUNCTION_FAILED;
3565 3762
3566 ql_dbg(ql_dbg_mbx, vha, 0x10e2, "Entered %s.\n", __func__); 3763 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
3764 "Entered %s.\n", __func__);
3567 3765
3568 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 3766 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3569 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR; 3767 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
@@ -3582,7 +3780,8 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
3582 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 3780 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3583 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 3781 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3584 } else { 3782 } else {
3585 ql_dbg(ql_dbg_mbx, vha, 0x10e4, "Done %s.\n", __func__); 3783 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
3784 "Done %s.\n", __func__);
3586 } 3785 }
3587 3786
3588 return rval; 3787 return rval;
@@ -3595,7 +3794,8 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
3595 mbx_cmd_t mc; 3794 mbx_cmd_t mc;
3596 mbx_cmd_t *mcp = &mc; 3795 mbx_cmd_t *mcp = &mc;
3597 3796
3598 ql_dbg(ql_dbg_mbx, vha, 0x10e5, "Entered %s.\n", __func__); 3797 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
3798 "Entered %s.\n", __func__);
3599 3799
3600 mcp->mb[0] = MBC_RESTART_MPI_FW; 3800 mcp->mb[0] = MBC_RESTART_MPI_FW;
3601 mcp->out_mb = MBX_0; 3801 mcp->out_mb = MBX_0;
@@ -3609,7 +3809,8 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
3609 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3809 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3610 rval, mcp->mb[0], mcp->mb[1]); 3810 rval, mcp->mb[0], mcp->mb[1]);
3611 } else { 3811 } else {
3612 ql_dbg(ql_dbg_mbx, vha, 0x10e7, "Done %s.\n", __func__); 3812 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
3813 "Done %s.\n", __func__);
3613 } 3814 }
3614 3815
3615 return rval; 3816 return rval;
@@ -3624,7 +3825,8 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3624 mbx_cmd_t *mcp = &mc; 3825 mbx_cmd_t *mcp = &mc;
3625 struct qla_hw_data *ha = vha->hw; 3826 struct qla_hw_data *ha = vha->hw;
3626 3827
3627 ql_dbg(ql_dbg_mbx, vha, 0x10e8, "Entered %s.\n", __func__); 3828 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
3829 "Entered %s.\n", __func__);
3628 3830
3629 if (!IS_FWI2_CAPABLE(ha)) 3831 if (!IS_FWI2_CAPABLE(ha))
3630 return QLA_FUNCTION_FAILED; 3832 return QLA_FUNCTION_FAILED;
@@ -3654,7 +3856,8 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3654 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 3856 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
3655 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3857 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3656 } else { 3858 } else {
3657 ql_dbg(ql_dbg_mbx, vha, 0x10ea, "Done %s.\n", __func__); 3859 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
3860 "Done %s.\n", __func__);
3658 } 3861 }
3659 3862
3660 return rval; 3863 return rval;
@@ -3669,7 +3872,8 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3669 mbx_cmd_t *mcp = &mc; 3872 mbx_cmd_t *mcp = &mc;
3670 struct qla_hw_data *ha = vha->hw; 3873 struct qla_hw_data *ha = vha->hw;
3671 3874
3672 ql_dbg(ql_dbg_mbx, vha, 0x10eb, "Entered %s.\n", __func__); 3875 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
3876 "Entered %s.\n", __func__);
3673 3877
3674 if (!IS_FWI2_CAPABLE(ha)) 3878 if (!IS_FWI2_CAPABLE(ha))
3675 return QLA_FUNCTION_FAILED; 3879 return QLA_FUNCTION_FAILED;
@@ -3699,7 +3903,8 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3699 ql_dbg(ql_dbg_mbx, vha, 0x10ec, 3903 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
3700 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3904 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3701 } else { 3905 } else {
3702 ql_dbg(ql_dbg_mbx, vha, 0x10ed, "Done %s.\n", __func__); 3906 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
3907 "Done %s.\n", __func__);
3703 } 3908 }
3704 3909
3705 return rval; 3910 return rval;
@@ -3713,7 +3918,8 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
3713 mbx_cmd_t mc; 3918 mbx_cmd_t mc;
3714 mbx_cmd_t *mcp = &mc; 3919 mbx_cmd_t *mcp = &mc;
3715 3920
3716 ql_dbg(ql_dbg_mbx, vha, 0x10ee, "Entered %s.\n", __func__); 3921 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
3922 "Entered %s.\n", __func__);
3717 3923
3718 if (!IS_CNA_CAPABLE(vha->hw)) 3924 if (!IS_CNA_CAPABLE(vha->hw))
3719 return QLA_FUNCTION_FAILED; 3925 return QLA_FUNCTION_FAILED;
@@ -3735,7 +3941,8 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
3735 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 3941 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3736 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 3942 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3737 } else { 3943 } else {
3738 ql_dbg(ql_dbg_mbx, vha, 0x10f0, "Done %s.\n", __func__); 3944 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
3945 "Done %s.\n", __func__);
3739 3946
3740 3947
3741 *actual_size = mcp->mb[2] << 2; 3948 *actual_size = mcp->mb[2] << 2;
@@ -3752,7 +3959,8 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
3752 mbx_cmd_t mc; 3959 mbx_cmd_t mc;
3753 mbx_cmd_t *mcp = &mc; 3960 mbx_cmd_t *mcp = &mc;
3754 3961
3755 ql_dbg(ql_dbg_mbx, vha, 0x10f1, "Entered %s.\n", __func__); 3962 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
3963 "Entered %s.\n", __func__);
3756 3964
3757 if (!IS_CNA_CAPABLE(vha->hw)) 3965 if (!IS_CNA_CAPABLE(vha->hw))
3758 return QLA_FUNCTION_FAILED; 3966 return QLA_FUNCTION_FAILED;
@@ -3775,7 +3983,8 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
3775 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 3983 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3776 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 3984 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3777 } else { 3985 } else {
3778 ql_dbg(ql_dbg_mbx, vha, 0x10f3, "Done %s.\n", __func__); 3986 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
3987 "Done %s.\n", __func__);
3779 } 3988 }
3780 3989
3781 return rval; 3990 return rval;
@@ -3788,7 +3997,8 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
3788 mbx_cmd_t mc; 3997 mbx_cmd_t mc;
3789 mbx_cmd_t *mcp = &mc; 3998 mbx_cmd_t *mcp = &mc;
3790 3999
3791 ql_dbg(ql_dbg_mbx, vha, 0x10f4, "Entered %s.\n", __func__); 4000 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
4001 "Entered %s.\n", __func__);
3792 4002
3793 if (!IS_FWI2_CAPABLE(vha->hw)) 4003 if (!IS_FWI2_CAPABLE(vha->hw))
3794 return QLA_FUNCTION_FAILED; 4004 return QLA_FUNCTION_FAILED;
@@ -3805,7 +4015,8 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
3805 ql_dbg(ql_dbg_mbx, vha, 0x10f5, 4015 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
3806 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4016 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3807 } else { 4017 } else {
3808 ql_dbg(ql_dbg_mbx, vha, 0x10f6, "Done %s.\n", __func__); 4018 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
4019 "Done %s.\n", __func__);
3809 *data = mcp->mb[3] << 16 | mcp->mb[2]; 4020 *data = mcp->mb[3] << 16 | mcp->mb[2];
3810 } 4021 }
3811 4022
@@ -3821,7 +4032,8 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3821 mbx_cmd_t *mcp = &mc; 4032 mbx_cmd_t *mcp = &mc;
3822 uint32_t iter_cnt = 0x1; 4033 uint32_t iter_cnt = 0x1;
3823 4034
3824 ql_dbg(ql_dbg_mbx, vha, 0x10f7, "Entered %s.\n", __func__); 4035 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
4036 "Entered %s.\n", __func__);
3825 4037
3826 memset(mcp->mb, 0 , sizeof(mcp->mb)); 4038 memset(mcp->mb, 0 , sizeof(mcp->mb));
3827 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK; 4039 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
@@ -3865,7 +4077,8 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3865 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], 4077 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
3866 mcp->mb[3], mcp->mb[18], mcp->mb[19]); 4078 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
3867 } else { 4079 } else {
3868 ql_dbg(ql_dbg_mbx, vha, 0x10f9, "Done %s.\n", __func__); 4080 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
4081 "Done %s.\n", __func__);
3869 } 4082 }
3870 4083
3871 /* Copy mailbox information */ 4084 /* Copy mailbox information */
@@ -3882,7 +4095,8 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3882 mbx_cmd_t *mcp = &mc; 4095 mbx_cmd_t *mcp = &mc;
3883 struct qla_hw_data *ha = vha->hw; 4096 struct qla_hw_data *ha = vha->hw;
3884 4097
3885 ql_dbg(ql_dbg_mbx, vha, 0x10fa, "Entered %s.\n", __func__); 4098 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
4099 "Entered %s.\n", __func__);
3886 4100
3887 memset(mcp->mb, 0 , sizeof(mcp->mb)); 4101 memset(mcp->mb, 0 , sizeof(mcp->mb));
3888 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; 4102 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
@@ -3926,7 +4140,8 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3926 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4140 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3927 rval, mcp->mb[0], mcp->mb[1]); 4141 rval, mcp->mb[0], mcp->mb[1]);
3928 } else { 4142 } else {
3929 ql_dbg(ql_dbg_mbx, vha, 0x10fc, "Done %s.\n", __func__); 4143 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
4144 "Done %s.\n", __func__);
3930 } 4145 }
3931 4146
3932 /* Copy mailbox information */ 4147 /* Copy mailbox information */
@@ -3941,7 +4156,7 @@ qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
3941 mbx_cmd_t mc; 4156 mbx_cmd_t mc;
3942 mbx_cmd_t *mcp = &mc; 4157 mbx_cmd_t *mcp = &mc;
3943 4158
3944 ql_dbg(ql_dbg_mbx, vha, 0x10fd, 4159 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
3945 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic); 4160 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
3946 4161
3947 mcp->mb[0] = MBC_ISP84XX_RESET; 4162 mcp->mb[0] = MBC_ISP84XX_RESET;
@@ -3955,7 +4170,8 @@ qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
3955 if (rval != QLA_SUCCESS) 4170 if (rval != QLA_SUCCESS)
3956 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval); 4171 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
3957 else 4172 else
3958 ql_dbg(ql_dbg_mbx, vha, 0x10ff, "Done %s.\n", __func__); 4173 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
4174 "Done %s.\n", __func__);
3959 4175
3960 return rval; 4176 return rval;
3961} 4177}
@@ -3967,7 +4183,8 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
3967 mbx_cmd_t mc; 4183 mbx_cmd_t mc;
3968 mbx_cmd_t *mcp = &mc; 4184 mbx_cmd_t *mcp = &mc;
3969 4185
3970 ql_dbg(ql_dbg_mbx, vha, 0x1100, "Entered %s.\n", __func__); 4186 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
4187 "Entered %s.\n", __func__);
3971 4188
3972 if (!IS_FWI2_CAPABLE(vha->hw)) 4189 if (!IS_FWI2_CAPABLE(vha->hw))
3973 return QLA_FUNCTION_FAILED; 4190 return QLA_FUNCTION_FAILED;
@@ -3986,7 +4203,8 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
3986 ql_dbg(ql_dbg_mbx, vha, 0x1101, 4203 ql_dbg(ql_dbg_mbx, vha, 0x1101,
3987 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4204 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3988 } else { 4205 } else {
3989 ql_dbg(ql_dbg_mbx, vha, 0x1102, "Done %s.\n", __func__); 4206 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
4207 "Done %s.\n", __func__);
3990 } 4208 }
3991 4209
3992 return rval; 4210 return rval;
@@ -4003,7 +4221,8 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
4003 4221
4004 rval = QLA_SUCCESS; 4222 rval = QLA_SUCCESS;
4005 4223
4006 ql_dbg(ql_dbg_mbx, vha, 0x1103, "Entered %s.\n", __func__); 4224 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
4225 "Entered %s.\n", __func__);
4007 4226
4008 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 4227 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
4009 4228
@@ -4046,7 +4265,8 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
4046 ql_dbg(ql_dbg_mbx, vha, 0x1104, 4265 ql_dbg(ql_dbg_mbx, vha, 0x1104,
4047 "Failed=%x mb[0]=%x.\n", rval, mb[0]); 4266 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
4048 } else { 4267 } else {
4049 ql_dbg(ql_dbg_mbx, vha, 0x1105, "Done %s.\n", __func__); 4268 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
4269 "Done %s.\n", __func__);
4050 } 4270 }
4051 4271
4052 return rval; 4272 return rval;
@@ -4060,7 +4280,8 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
4060 mbx_cmd_t *mcp = &mc; 4280 mbx_cmd_t *mcp = &mc;
4061 struct qla_hw_data *ha = vha->hw; 4281 struct qla_hw_data *ha = vha->hw;
4062 4282
4063 ql_dbg(ql_dbg_mbx, vha, 0x1106, "Entered %s.\n", __func__); 4283 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
4284 "Entered %s.\n", __func__);
4064 4285
4065 if (!IS_FWI2_CAPABLE(ha)) 4286 if (!IS_FWI2_CAPABLE(ha))
4066 return QLA_FUNCTION_FAILED; 4287 return QLA_FUNCTION_FAILED;
@@ -4078,7 +4299,8 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
4078 ql_dbg(ql_dbg_mbx, vha, 0x1107, 4299 ql_dbg(ql_dbg_mbx, vha, 0x1107,
4079 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4300 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4080 } else { 4301 } else {
4081 ql_dbg(ql_dbg_mbx, vha, 0x1108, "Done %s.\n", __func__); 4302 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
4303 "Done %s.\n", __func__);
4082 if (mcp->mb[1] != 0x7) 4304 if (mcp->mb[1] != 0x7)
4083 ha->link_data_rate = mcp->mb[1]; 4305 ha->link_data_rate = mcp->mb[1];
4084 } 4306 }
@@ -4094,7 +4316,8 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4094 mbx_cmd_t *mcp = &mc; 4316 mbx_cmd_t *mcp = &mc;
4095 struct qla_hw_data *ha = vha->hw; 4317 struct qla_hw_data *ha = vha->hw;
4096 4318
4097 ql_dbg(ql_dbg_mbx, vha, 0x1109, "Entered %s.\n", __func__); 4319 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
4320 "Entered %s.\n", __func__);
4098 4321
4099 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 4322 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
4100 return QLA_FUNCTION_FAILED; 4323 return QLA_FUNCTION_FAILED;
@@ -4113,7 +4336,8 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4113 /* Copy all bits to preserve original value */ 4336 /* Copy all bits to preserve original value */
4114 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4); 4337 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
4115 4338
4116 ql_dbg(ql_dbg_mbx, vha, 0x110b, "Done %s.\n", __func__); 4339 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
4340 "Done %s.\n", __func__);
4117 } 4341 }
4118 return rval; 4342 return rval;
4119} 4343}
@@ -4125,7 +4349,8 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4125 mbx_cmd_t mc; 4349 mbx_cmd_t mc;
4126 mbx_cmd_t *mcp = &mc; 4350 mbx_cmd_t *mcp = &mc;
4127 4351
4128 ql_dbg(ql_dbg_mbx, vha, 0x110c, "Entered %s.\n", __func__); 4352 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
4353 "Entered %s.\n", __func__);
4129 4354
4130 mcp->mb[0] = MBC_SET_PORT_CONFIG; 4355 mcp->mb[0] = MBC_SET_PORT_CONFIG;
4131 /* Copy all bits to preserve original setting */ 4356 /* Copy all bits to preserve original setting */
@@ -4140,7 +4365,8 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4140 ql_dbg(ql_dbg_mbx, vha, 0x110d, 4365 ql_dbg(ql_dbg_mbx, vha, 0x110d,
4141 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4366 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4142 } else 4367 } else
4143 ql_dbg(ql_dbg_mbx, vha, 0x110e, "Done %s.\n", __func__); 4368 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
4369 "Done %s.\n", __func__);
4144 4370
4145 return rval; 4371 return rval;
4146} 4372}
@@ -4155,7 +4381,8 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
4155 mbx_cmd_t *mcp = &mc; 4381 mbx_cmd_t *mcp = &mc;
4156 struct qla_hw_data *ha = vha->hw; 4382 struct qla_hw_data *ha = vha->hw;
4157 4383
4158 ql_dbg(ql_dbg_mbx, vha, 0x110f, "Entered %s.\n", __func__); 4384 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
4385 "Entered %s.\n", __func__);
4159 4386
4160 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) 4387 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
4161 return QLA_FUNCTION_FAILED; 4388 return QLA_FUNCTION_FAILED;
@@ -4183,7 +4410,8 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
4183 if (rval != QLA_SUCCESS) { 4410 if (rval != QLA_SUCCESS) {
4184 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval); 4411 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
4185 } else { 4412 } else {
4186 ql_dbg(ql_dbg_mbx, vha, 0x10cc, "Done %s.\n", __func__); 4413 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
4414 "Done %s.\n", __func__);
4187 } 4415 }
4188 4416
4189 return rval; 4417 return rval;
@@ -4196,7 +4424,8 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
4196 uint8_t byte; 4424 uint8_t byte;
4197 struct qla_hw_data *ha = vha->hw; 4425 struct qla_hw_data *ha = vha->hw;
4198 4426
4199 ql_dbg(ql_dbg_mbx, vha, 0x10ca, "Entered %s.\n", __func__); 4427 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ca,
4428 "Entered %s.\n", __func__);
4200 4429
4201 /* Integer part */ 4430 /* Integer part */
4202 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0); 4431 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0);
@@ -4216,7 +4445,8 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
4216 } 4445 }
4217 *frac = (byte >> 6) * 25; 4446 *frac = (byte >> 6) * 25;
4218 4447
4219 ql_dbg(ql_dbg_mbx, vha, 0x1018, "Done %s.\n", __func__); 4448 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1018,
4449 "Done %s.\n", __func__);
4220fail: 4450fail:
4221 return rval; 4451 return rval;
4222} 4452}
@@ -4229,7 +4459,8 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
4229 mbx_cmd_t mc; 4459 mbx_cmd_t mc;
4230 mbx_cmd_t *mcp = &mc; 4460 mbx_cmd_t *mcp = &mc;
4231 4461
4232 ql_dbg(ql_dbg_mbx, vha, 0x1017, "Entered %s.\n", __func__); 4462 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
4463 "Entered %s.\n", __func__);
4233 4464
4234 if (!IS_FWI2_CAPABLE(ha)) 4465 if (!IS_FWI2_CAPABLE(ha))
4235 return QLA_FUNCTION_FAILED; 4466 return QLA_FUNCTION_FAILED;
@@ -4248,7 +4479,8 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
4248 ql_dbg(ql_dbg_mbx, vha, 0x1016, 4479 ql_dbg(ql_dbg_mbx, vha, 0x1016,
4249 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4480 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4250 } else { 4481 } else {
4251 ql_dbg(ql_dbg_mbx, vha, 0x100e, "Done %s.\n", __func__); 4482 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
4483 "Done %s.\n", __func__);
4252 } 4484 }
4253 4485
4254 return rval; 4486 return rval;
@@ -4262,7 +4494,8 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
4262 mbx_cmd_t mc; 4494 mbx_cmd_t mc;
4263 mbx_cmd_t *mcp = &mc; 4495 mbx_cmd_t *mcp = &mc;
4264 4496
4265 ql_dbg(ql_dbg_mbx, vha, 0x100d, "Entered %s.\n", __func__); 4497 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
4498 "Entered %s.\n", __func__);
4266 4499
4267 if (!IS_QLA82XX(ha)) 4500 if (!IS_QLA82XX(ha))
4268 return QLA_FUNCTION_FAILED; 4501 return QLA_FUNCTION_FAILED;
@@ -4281,7 +4514,8 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
4281 ql_dbg(ql_dbg_mbx, vha, 0x100c, 4514 ql_dbg(ql_dbg_mbx, vha, 0x100c,
4282 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4515 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4283 } else { 4516 } else {
4284 ql_dbg(ql_dbg_mbx, vha, 0x100b, "Done %s.\n", __func__); 4517 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
4518 "Done %s.\n", __func__);
4285 } 4519 }
4286 4520
4287 return rval; 4521 return rval;
@@ -4295,7 +4529,8 @@ qla82xx_md_get_template_size(scsi_qla_host_t *vha)
4295 mbx_cmd_t *mcp = &mc; 4529 mbx_cmd_t *mcp = &mc;
4296 int rval = QLA_FUNCTION_FAILED; 4530 int rval = QLA_FUNCTION_FAILED;
4297 4531
4298 ql_dbg(ql_dbg_mbx, vha, 0x111f, "Entered %s.\n", __func__); 4532 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
4533 "Entered %s.\n", __func__);
4299 4534
4300 memset(mcp->mb, 0 , sizeof(mcp->mb)); 4535 memset(mcp->mb, 0 , sizeof(mcp->mb));
4301 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 4536 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
@@ -4318,7 +4553,8 @@ qla82xx_md_get_template_size(scsi_qla_host_t *vha)
4318 (mcp->mb[1] << 16) | mcp->mb[0], 4553 (mcp->mb[1] << 16) | mcp->mb[0],
4319 (mcp->mb[3] << 16) | mcp->mb[2]); 4554 (mcp->mb[3] << 16) | mcp->mb[2]);
4320 } else { 4555 } else {
4321 ql_dbg(ql_dbg_mbx, vha, 0x1121, "Done %s.\n", __func__); 4556 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
4557 "Done %s.\n", __func__);
4322 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]); 4558 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
4323 if (!ha->md_template_size) { 4559 if (!ha->md_template_size) {
4324 ql_dbg(ql_dbg_mbx, vha, 0x1122, 4560 ql_dbg(ql_dbg_mbx, vha, 0x1122,
@@ -4337,7 +4573,8 @@ qla82xx_md_get_template(scsi_qla_host_t *vha)
4337 mbx_cmd_t *mcp = &mc; 4573 mbx_cmd_t *mcp = &mc;
4338 int rval = QLA_FUNCTION_FAILED; 4574 int rval = QLA_FUNCTION_FAILED;
4339 4575
4340 ql_dbg(ql_dbg_mbx, vha, 0x1123, "Entered %s.\n", __func__); 4576 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
4577 "Entered %s.\n", __func__);
4341 4578
4342 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, 4579 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
4343 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); 4580 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
@@ -4372,7 +4609,8 @@ qla82xx_md_get_template(scsi_qla_host_t *vha)
4372 ((mcp->mb[1] << 16) | mcp->mb[0]), 4609 ((mcp->mb[1] << 16) | mcp->mb[0]),
4373 ((mcp->mb[3] << 16) | mcp->mb[2])); 4610 ((mcp->mb[3] << 16) | mcp->mb[2]));
4374 } else 4611 } else
4375 ql_dbg(ql_dbg_mbx, vha, 0x1126, "Done %s.\n", __func__); 4612 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
4613 "Done %s.\n", __func__);
4376 return rval; 4614 return rval;
4377} 4615}
4378 4616
@@ -4387,7 +4625,8 @@ qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
4387 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 4625 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
4388 return QLA_FUNCTION_FAILED; 4626 return QLA_FUNCTION_FAILED;
4389 4627
4390 ql_dbg(ql_dbg_mbx, vha, 0x1133, "Entered %s.\n", __func__); 4628 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
4629 "Entered %s.\n", __func__);
4391 4630
4392 memset(mcp, 0, sizeof(mbx_cmd_t)); 4631 memset(mcp, 0, sizeof(mbx_cmd_t));
4393 mcp->mb[0] = MBC_SET_LED_CONFIG; 4632 mcp->mb[0] = MBC_SET_LED_CONFIG;
@@ -4412,7 +4651,8 @@ qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
4412 ql_dbg(ql_dbg_mbx, vha, 0x1134, 4651 ql_dbg(ql_dbg_mbx, vha, 0x1134,
4413 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4652 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4414 } else { 4653 } else {
4415 ql_dbg(ql_dbg_mbx, vha, 0x1135, "Done %s.\n", __func__); 4654 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
4655 "Done %s.\n", __func__);
4416 } 4656 }
4417 4657
4418 return rval; 4658 return rval;
@@ -4429,7 +4669,8 @@ qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
4429 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 4669 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
4430 return QLA_FUNCTION_FAILED; 4670 return QLA_FUNCTION_FAILED;
4431 4671
4432 ql_dbg(ql_dbg_mbx, vha, 0x1136, "Entered %s.\n", __func__); 4672 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
4673 "Entered %s.\n", __func__);
4433 4674
4434 memset(mcp, 0, sizeof(mbx_cmd_t)); 4675 memset(mcp, 0, sizeof(mbx_cmd_t));
4435 mcp->mb[0] = MBC_GET_LED_CONFIG; 4676 mcp->mb[0] = MBC_GET_LED_CONFIG;
@@ -4454,7 +4695,8 @@ qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
4454 led_cfg[4] = mcp->mb[5]; 4695 led_cfg[4] = mcp->mb[5];
4455 led_cfg[5] = mcp->mb[6]; 4696 led_cfg[5] = mcp->mb[6];
4456 } 4697 }
4457 ql_dbg(ql_dbg_mbx, vha, 0x1138, "Done %s.\n", __func__); 4698 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
4699 "Done %s.\n", __func__);
4458 } 4700 }
4459 4701
4460 return rval; 4702 return rval;
@@ -4471,7 +4713,7 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
4471 if (!IS_QLA82XX(ha)) 4713 if (!IS_QLA82XX(ha))
4472 return QLA_FUNCTION_FAILED; 4714 return QLA_FUNCTION_FAILED;
4473 4715
4474 ql_dbg(ql_dbg_mbx, vha, 0x1127, 4716 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
4475 "Entered %s.\n", __func__); 4717 "Entered %s.\n", __func__);
4476 4718
4477 memset(mcp, 0, sizeof(mbx_cmd_t)); 4719 memset(mcp, 0, sizeof(mbx_cmd_t));
@@ -4491,7 +4733,7 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
4491 ql_dbg(ql_dbg_mbx, vha, 0x1128, 4733 ql_dbg(ql_dbg_mbx, vha, 0x1128,
4492 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4734 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4493 } else { 4735 } else {
4494 ql_dbg(ql_dbg_mbx, vha, 0x1129, 4736 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
4495 "Done %s.\n", __func__); 4737 "Done %s.\n", __func__);
4496 } 4738 }
4497 4739
@@ -4509,7 +4751,8 @@ qla83xx_write_remote_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
4509 if (!IS_QLA83XX(ha)) 4751 if (!IS_QLA83XX(ha))
4510 return QLA_FUNCTION_FAILED; 4752 return QLA_FUNCTION_FAILED;
4511 4753
4512 ql_dbg(ql_dbg_mbx, vha, 0x1130, "Entered %s.\n", __func__); 4754 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
4755 "Entered %s.\n", __func__);
4513 4756
4514 mcp->mb[0] = MBC_WRITE_REMOTE_REG; 4757 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
4515 mcp->mb[1] = LSW(reg); 4758 mcp->mb[1] = LSW(reg);
@@ -4527,7 +4770,7 @@ qla83xx_write_remote_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
4527 ql_dbg(ql_dbg_mbx, vha, 0x1131, 4770 ql_dbg(ql_dbg_mbx, vha, 0x1131,
4528 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4771 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4529 } else { 4772 } else {
4530 ql_dbg(ql_dbg_mbx, vha, 0x1132, 4773 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
4531 "Done %s.\n", __func__); 4774 "Done %s.\n", __func__);
4532 } 4775 }
4533 4776
@@ -4543,13 +4786,14 @@ qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
4543 mbx_cmd_t *mcp = &mc; 4786 mbx_cmd_t *mcp = &mc;
4544 4787
4545 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 4788 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
4546 ql_dbg(ql_dbg_mbx, vha, 0x113b, 4789 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
4547 "Implicit LOGO Unsupported.\n"); 4790 "Implicit LOGO Unsupported.\n");
4548 return QLA_FUNCTION_FAILED; 4791 return QLA_FUNCTION_FAILED;
4549 } 4792 }
4550 4793
4551 4794
4552 ql_dbg(ql_dbg_mbx, vha, 0x113c, "Done %s.\n", __func__); 4795 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
4796 "Entering %s.\n", __func__);
4553 4797
4554 /* Perform Implicit LOGO. */ 4798 /* Perform Implicit LOGO. */
4555 mcp->mb[0] = MBC_PORT_LOGOUT; 4799 mcp->mb[0] = MBC_PORT_LOGOUT;
@@ -4564,7 +4808,8 @@ qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
4564 ql_dbg(ql_dbg_mbx, vha, 0x113d, 4808 ql_dbg(ql_dbg_mbx, vha, 0x113d,
4565 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4809 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4566 else 4810 else
4567 ql_dbg(ql_dbg_mbx, vha, 0x113e, "Done %s.\n", __func__); 4811 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
4812 "Done %s.\n", __func__);
4568 4813
4569 return rval; 4814 return rval;
4570} 4815}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index aa062a1b0ca4..3e8b32419e68 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -6,6 +6,7 @@
6 */ 6 */
7#include "qla_def.h" 7#include "qla_def.h"
8#include "qla_gbl.h" 8#include "qla_gbl.h"
9#include "qla_target.h"
9 10
10#include <linux/moduleparam.h> 11#include <linux/moduleparam.h>
11#include <linux/vmalloc.h> 12#include <linux/vmalloc.h>
@@ -49,6 +50,9 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
49 50
50 spin_lock_irqsave(&ha->vport_slock, flags); 51 spin_lock_irqsave(&ha->vport_slock, flags);
51 list_add_tail(&vha->list, &ha->vp_list); 52 list_add_tail(&vha->list, &ha->vp_list);
53
54 qlt_update_vp_map(vha, SET_VP_IDX);
55
52 spin_unlock_irqrestore(&ha->vport_slock, flags); 56 spin_unlock_irqrestore(&ha->vport_slock, flags);
53 57
54 mutex_unlock(&ha->vport_lock); 58 mutex_unlock(&ha->vport_lock);
@@ -79,6 +83,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
79 spin_lock_irqsave(&ha->vport_slock, flags); 83 spin_lock_irqsave(&ha->vport_slock, flags);
80 } 84 }
81 list_del(&vha->list); 85 list_del(&vha->list);
86 qlt_update_vp_map(vha, RESET_VP_IDX);
82 spin_unlock_irqrestore(&ha->vport_slock, flags); 87 spin_unlock_irqrestore(&ha->vport_slock, flags);
83 88
84 vp_id = vha->vp_idx; 89 vp_id = vha->vp_idx;
@@ -134,7 +139,7 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
134 list_for_each_entry(fcport, &vha->vp_fcports, list) { 139 list_for_each_entry(fcport, &vha->vp_fcports, list) {
135 ql_dbg(ql_dbg_vport, vha, 0xa001, 140 ql_dbg(ql_dbg_vport, vha, 0xa001,
136 "Marking port dead, loop_id=0x%04x : %x.\n", 141 "Marking port dead, loop_id=0x%04x : %x.\n",
137 fcport->loop_id, fcport->vp_idx); 142 fcport->loop_id, fcport->vha->vp_idx);
138 143
139 qla2x00_mark_device_lost(vha, fcport, 0, 0); 144 qla2x00_mark_device_lost(vha, fcport, 0, 0);
140 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); 145 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
@@ -150,6 +155,9 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
150 atomic_set(&vha->loop_state, LOOP_DOWN); 155 atomic_set(&vha->loop_state, LOOP_DOWN);
151 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 156 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
152 157
158 /* Remove port id from vp target map */
159 qlt_update_vp_map(vha, RESET_AL_PA);
160
153 qla2x00_mark_vp_devices_dead(vha); 161 qla2x00_mark_vp_devices_dead(vha);
154 atomic_set(&vha->vp_state, VP_FAILED); 162 atomic_set(&vha->vp_state, VP_FAILED);
155 vha->flags.management_server_logged_in = 0; 163 vha->flags.management_server_logged_in = 0;
@@ -295,10 +303,8 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
295static int 303static int
296qla2x00_do_dpc_vp(scsi_qla_host_t *vha) 304qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
297{ 305{
298 ql_dbg(ql_dbg_dpc, vha, 0x4012, 306 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
299 "Entering %s.\n", __func__); 307 "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
300 ql_dbg(ql_dbg_dpc, vha, 0x4013,
301 "vp_flags: 0x%lx.\n", vha->vp_flags);
302 308
303 qla2x00_do_work(vha); 309 qla2x00_do_work(vha);
304 310
@@ -348,7 +354,7 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
348 } 354 }
349 } 355 }
350 356
351 ql_dbg(ql_dbg_dpc, vha, 0x401c, 357 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
352 "Exiting %s.\n", __func__); 358 "Exiting %s.\n", __func__);
353 return 0; 359 return 0;
354} 360}
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index de722a933438..caf627ba7fa8 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1190,12 +1190,12 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1190 } 1190 }
1191 1191
1192 /* Offset in flash = lower 16 bits 1192 /* Offset in flash = lower 16 bits
1193 * Number of enteries = upper 16 bits 1193 * Number of entries = upper 16 bits
1194 */ 1194 */
1195 offset = n & 0xffffU; 1195 offset = n & 0xffffU;
1196 n = (n >> 16) & 0xffffU; 1196 n = (n >> 16) & 0xffffU;
1197 1197
1198 /* number of addr/value pair should not exceed 1024 enteries */ 1198 /* number of addr/value pair should not exceed 1024 entries */
1199 if (n >= 1024) { 1199 if (n >= 1024) {
1200 ql_log(ql_log_fatal, vha, 0x0071, 1200 ql_log(ql_log_fatal, vha, 0x0071,
1201 "Card flash not initialized:n=0x%x.\n", n); 1201 "Card flash not initialized:n=0x%x.\n", n);
@@ -2050,7 +2050,7 @@ qla82xx_intr_handler(int irq, void *dev_id)
2050 2050
2051 rsp = (struct rsp_que *) dev_id; 2051 rsp = (struct rsp_que *) dev_id;
2052 if (!rsp) { 2052 if (!rsp) {
2053 ql_log(ql_log_info, NULL, 0xb054, 2053 ql_log(ql_log_info, NULL, 0xb053,
2054 "%s: NULL response queue pointer.\n", __func__); 2054 "%s: NULL response queue pointer.\n", __func__);
2055 return IRQ_NONE; 2055 return IRQ_NONE;
2056 } 2056 }
@@ -2446,7 +2446,7 @@ qla82xx_load_fw(scsi_qla_host_t *vha)
2446 2446
2447 if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) { 2447 if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
2448 ql_log(ql_log_info, vha, 0x00a1, 2448 ql_log(ql_log_info, vha, 0x00a1,
2449 "Firmware loaded successully from flash.\n"); 2449 "Firmware loaded successfully from flash.\n");
2450 return QLA_SUCCESS; 2450 return QLA_SUCCESS;
2451 } else { 2451 } else {
2452 ql_log(ql_log_warn, vha, 0x0108, 2452 ql_log(ql_log_warn, vha, 0x0108,
@@ -2461,7 +2461,7 @@ try_blob_fw:
2461 blob = ha->hablob = qla2x00_request_firmware(vha); 2461 blob = ha->hablob = qla2x00_request_firmware(vha);
2462 if (!blob) { 2462 if (!blob) {
2463 ql_log(ql_log_fatal, vha, 0x00a3, 2463 ql_log(ql_log_fatal, vha, 0x00a3,
2464 "Firmware image not preset.\n"); 2464 "Firmware image not present.\n");
2465 goto fw_load_failed; 2465 goto fw_load_failed;
2466 } 2466 }
2467 2467
@@ -2689,7 +2689,7 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
2689 if (!optrom) { 2689 if (!optrom) {
2690 ql_log(ql_log_warn, vha, 0xb01b, 2690 ql_log(ql_log_warn, vha, 0xb01b,
2691 "Unable to allocate memory " 2691 "Unable to allocate memory "
2692 "for optron burst write (%x KB).\n", 2692 "for optrom burst write (%x KB).\n",
2693 OPTROM_BURST_SIZE / 1024); 2693 OPTROM_BURST_SIZE / 1024);
2694 } 2694 }
2695 } 2695 }
@@ -2960,9 +2960,8 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
2960 * changing the state to DEV_READY 2960 * changing the state to DEV_READY
2961 */ 2961 */
2962 ql_log(ql_log_info, vha, 0xb023, 2962 ql_log(ql_log_info, vha, 0xb023,
2963 "%s : QUIESCENT TIMEOUT.\n", QLA2XXX_DRIVER_NAME); 2963 "%s : QUIESCENT TIMEOUT DRV_ACTIVE:%d "
2964 ql_log(ql_log_info, vha, 0xb024, 2964 "DRV_STATE:%d.\n", QLA2XXX_DRIVER_NAME,
2965 "DRV_ACTIVE:%d DRV_STATE:%d.\n",
2966 drv_active, drv_state); 2965 drv_active, drv_state);
2967 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 2966 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2968 QLA82XX_DEV_READY); 2967 QLA82XX_DEV_READY);
@@ -3129,7 +3128,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3129 if (ql2xmdenable) { 3128 if (ql2xmdenable) {
3130 if (qla82xx_md_collect(vha)) 3129 if (qla82xx_md_collect(vha))
3131 ql_log(ql_log_warn, vha, 0xb02c, 3130 ql_log(ql_log_warn, vha, 0xb02c,
3132 "Not able to collect minidump.\n"); 3131 "Minidump not collected.\n");
3133 } else 3132 } else
3134 ql_log(ql_log_warn, vha, 0xb04f, 3133 ql_log(ql_log_warn, vha, 0xb04f,
3135 "Minidump disabled.\n"); 3134 "Minidump disabled.\n");
@@ -3160,11 +3159,11 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
3160 "Firmware version differs " 3159 "Firmware version differs "
3161 "Previous version: %d:%d:%d - " 3160 "Previous version: %d:%d:%d - "
3162 "New version: %d:%d:%d\n", 3161 "New version: %d:%d:%d\n",
3162 fw_major_version, fw_minor_version,
3163 fw_subminor_version,
3163 ha->fw_major_version, 3164 ha->fw_major_version,
3164 ha->fw_minor_version, 3165 ha->fw_minor_version,
3165 ha->fw_subminor_version, 3166 ha->fw_subminor_version);
3166 fw_major_version, fw_minor_version,
3167 fw_subminor_version);
3168 /* Release MiniDump resources */ 3167 /* Release MiniDump resources */
3169 qla82xx_md_free(vha); 3168 qla82xx_md_free(vha);
3170 /* ALlocate MiniDump resources */ 3169 /* ALlocate MiniDump resources */
@@ -3325,6 +3324,30 @@ exit:
3325 return rval; 3324 return rval;
3326} 3325}
3327 3326
3327static int qla82xx_check_temp(scsi_qla_host_t *vha)
3328{
3329 uint32_t temp, temp_state, temp_val;
3330 struct qla_hw_data *ha = vha->hw;
3331
3332 temp = qla82xx_rd_32(ha, CRB_TEMP_STATE);
3333 temp_state = qla82xx_get_temp_state(temp);
3334 temp_val = qla82xx_get_temp_val(temp);
3335
3336 if (temp_state == QLA82XX_TEMP_PANIC) {
3337 ql_log(ql_log_warn, vha, 0x600e,
3338 "Device temperature %d degrees C exceeds "
3339 " maximum allowed. Hardware has been shut down.\n",
3340 temp_val);
3341 return 1;
3342 } else if (temp_state == QLA82XX_TEMP_WARN) {
3343 ql_log(ql_log_warn, vha, 0x600f,
3344 "Device temperature %d degrees C exceeds "
3345 "operating range. Immediate action needed.\n",
3346 temp_val);
3347 }
3348 return 0;
3349}
3350
3328void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha) 3351void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
3329{ 3352{
3330 struct qla_hw_data *ha = vha->hw; 3353 struct qla_hw_data *ha = vha->hw;
@@ -3347,18 +3370,20 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3347 /* don't poll if reset is going on */ 3370 /* don't poll if reset is going on */
3348 if (!ha->flags.isp82xx_reset_hdlr_active) { 3371 if (!ha->flags.isp82xx_reset_hdlr_active) {
3349 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3372 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3350 if (dev_state == QLA82XX_DEV_NEED_RESET && 3373 if (qla82xx_check_temp(vha)) {
3374 set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
3375 ha->flags.isp82xx_fw_hung = 1;
3376 qla82xx_clear_pending_mbx(vha);
3377 } else if (dev_state == QLA82XX_DEV_NEED_RESET &&
3351 !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) { 3378 !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
3352 ql_log(ql_log_warn, vha, 0x6001, 3379 ql_log(ql_log_warn, vha, 0x6001,
3353 "Adapter reset needed.\n"); 3380 "Adapter reset needed.\n");
3354 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3381 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3355 qla2xxx_wake_dpc(vha);
3356 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT && 3382 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
3357 !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) { 3383 !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
3358 ql_log(ql_log_warn, vha, 0x6002, 3384 ql_log(ql_log_warn, vha, 0x6002,
3359 "Quiescent needed.\n"); 3385 "Quiescent needed.\n");
3360 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); 3386 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
3361 qla2xxx_wake_dpc(vha);
3362 } else { 3387 } else {
3363 if (qla82xx_check_fw_alive(vha)) { 3388 if (qla82xx_check_fw_alive(vha)) {
3364 ql_dbg(ql_dbg_timer, vha, 0x6011, 3389 ql_dbg(ql_dbg_timer, vha, 0x6011,
@@ -3398,7 +3423,6 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3398 set_bit(ISP_ABORT_NEEDED, 3423 set_bit(ISP_ABORT_NEEDED,
3399 &vha->dpc_flags); 3424 &vha->dpc_flags);
3400 } 3425 }
3401 qla2xxx_wake_dpc(vha);
3402 ha->flags.isp82xx_fw_hung = 1; 3426 ha->flags.isp82xx_fw_hung = 1;
3403 ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n"); 3427 ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n");
3404 qla82xx_clear_pending_mbx(vha); 3428 qla82xx_clear_pending_mbx(vha);
@@ -4113,6 +4137,14 @@ qla82xx_md_collect(scsi_qla_host_t *vha)
4113 goto md_failed; 4137 goto md_failed;
4114 } 4138 }
4115 4139
4140 if (ha->flags.isp82xx_no_md_cap) {
4141 ql_log(ql_log_warn, vha, 0xb054,
4142 "Forced reset from application, "
4143 "ignore minidump capture\n");
4144 ha->flags.isp82xx_no_md_cap = 0;
4145 goto md_failed;
4146 }
4147
4116 if (qla82xx_validate_template_chksum(vha)) { 4148 if (qla82xx_validate_template_chksum(vha)) {
4117 ql_log(ql_log_info, vha, 0xb039, 4149 ql_log(ql_log_info, vha, 0xb039,
4118 "Template checksum validation error\n"); 4150 "Template checksum validation error\n");
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 4ac50e274661..6eb210e3cc63 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -26,6 +26,7 @@
26#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c) 26#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c)
27#define BOOT_LOADER_DIMM_STATUS QLA82XX_REG(0x54) 27#define BOOT_LOADER_DIMM_STATUS QLA82XX_REG(0x54)
28#define CRB_DMA_SHIFT QLA82XX_REG(0xcc) 28#define CRB_DMA_SHIFT QLA82XX_REG(0xcc)
29#define CRB_TEMP_STATE QLA82XX_REG(0x1b4)
29#define QLA82XX_DMA_SHIFT_VALUE 0x55555555 30#define QLA82XX_DMA_SHIFT_VALUE 0x55555555
30 31
31#define QLA82XX_HW_H0_CH_HUB_ADR 0x05 32#define QLA82XX_HW_H0_CH_HUB_ADR 0x05
@@ -561,7 +562,6 @@
561#define QLA82XX_FW_VERSION_SUB (QLA82XX_CAM_RAM(0x158)) 562#define QLA82XX_FW_VERSION_SUB (QLA82XX_CAM_RAM(0x158))
562#define QLA82XX_PCIE_REG(reg) (QLA82XX_CRB_PCIE + (reg)) 563#define QLA82XX_PCIE_REG(reg) (QLA82XX_CRB_PCIE + (reg))
563 564
564#define PCIE_CHICKEN3 (0x120c8)
565#define PCIE_SETUP_FUNCTION (0x12040) 565#define PCIE_SETUP_FUNCTION (0x12040)
566#define PCIE_SETUP_FUNCTION2 (0x12048) 566#define PCIE_SETUP_FUNCTION2 (0x12048)
567 567
@@ -1178,4 +1178,16 @@ static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
1178#define CRB_NIU_XG_PAUSE_CTL_P0 0x1 1178#define CRB_NIU_XG_PAUSE_CTL_P0 0x1
1179#define CRB_NIU_XG_PAUSE_CTL_P1 0x8 1179#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
1180 1180
1181#define qla82xx_get_temp_val(x) ((x) >> 16)
1182#define qla82xx_get_temp_state(x) ((x) & 0xffff)
1183#define qla82xx_encode_temp(val, state) (((val) << 16) | (state))
1184
1185/*
1186 * Temperature control.
1187 */
1188enum {
1189 QLA82XX_TEMP_NORMAL = 0x1, /* Normal operating range */
1190 QLA82XX_TEMP_WARN, /* Sound alert, temperature getting high */
1191 QLA82XX_TEMP_PANIC /* Fatal error, hardware has shut down. */
1192};
1181#endif 1193#endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index c9c56a8427f3..6d1d873a20e2 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -13,12 +13,13 @@
13#include <linux/mutex.h> 13#include <linux/mutex.h>
14#include <linux/kobject.h> 14#include <linux/kobject.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16
17#include <scsi/scsi_tcq.h> 16#include <scsi/scsi_tcq.h>
18#include <scsi/scsicam.h> 17#include <scsi/scsicam.h>
19#include <scsi/scsi_transport.h> 18#include <scsi/scsi_transport.h>
20#include <scsi/scsi_transport_fc.h> 19#include <scsi/scsi_transport_fc.h>
21 20
21#include "qla_target.h"
22
22/* 23/*
23 * Driver version 24 * Driver version
24 */ 25 */
@@ -40,6 +41,12 @@ static struct kmem_cache *ctx_cachep;
40 */ 41 */
41int ql_errlev = ql_log_all; 42int ql_errlev = ql_log_all;
42 43
44int ql2xenableclass2;
45module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
46MODULE_PARM_DESC(ql2xenableclass2,
47 "Specify if Class 2 operations are supported from the very "
48 "beginning. Default is 0 - class 2 not supported.");
49
43int ql2xlogintimeout = 20; 50int ql2xlogintimeout = 20;
44module_param(ql2xlogintimeout, int, S_IRUGO); 51module_param(ql2xlogintimeout, int, S_IRUGO);
45MODULE_PARM_DESC(ql2xlogintimeout, 52MODULE_PARM_DESC(ql2xlogintimeout,
@@ -255,6 +262,8 @@ struct scsi_host_template qla2xxx_driver_template = {
255 262
256 .max_sectors = 0xFFFF, 263 .max_sectors = 0xFFFF,
257 .shost_attrs = qla2x00_host_attrs, 264 .shost_attrs = qla2x00_host_attrs,
265
266 .supported_mode = MODE_INITIATOR,
258}; 267};
259 268
260static struct scsi_transport_template *qla2xxx_transport_template = NULL; 269static struct scsi_transport_template *qla2xxx_transport_template = NULL;
@@ -306,7 +315,8 @@ static void qla2x00_free_fw_dump(struct qla_hw_data *);
306static void qla2x00_mem_free(struct qla_hw_data *); 315static void qla2x00_mem_free(struct qla_hw_data *);
307 316
308/* -------------------------------------------------------------------------- */ 317/* -------------------------------------------------------------------------- */
309static int qla2x00_alloc_queues(struct qla_hw_data *ha) 318static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
319 struct rsp_que *rsp)
310{ 320{
311 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 321 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
312 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues, 322 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
@@ -324,6 +334,12 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha)
324 "Unable to allocate memory for response queue ptrs.\n"); 334 "Unable to allocate memory for response queue ptrs.\n");
325 goto fail_rsp_map; 335 goto fail_rsp_map;
326 } 336 }
337 /*
338 * Make sure we record at least the request and response queue zero in
339 * case we need to free them if part of the probe fails.
340 */
341 ha->rsp_q_map[0] = rsp;
342 ha->req_q_map[0] = req;
327 set_bit(0, ha->rsp_qid_map); 343 set_bit(0, ha->rsp_qid_map);
328 set_bit(0, ha->req_qid_map); 344 set_bit(0, ha->req_qid_map);
329 return 1; 345 return 1;
@@ -642,12 +658,12 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
642 658
643 if (ha->flags.eeh_busy) { 659 if (ha->flags.eeh_busy) {
644 if (ha->flags.pci_channel_io_perm_failure) { 660 if (ha->flags.pci_channel_io_perm_failure) {
645 ql_dbg(ql_dbg_io, vha, 0x3001, 661 ql_dbg(ql_dbg_aer, vha, 0x9010,
646 "PCI Channel IO permanent failure, exiting " 662 "PCI Channel IO permanent failure, exiting "
647 "cmd=%p.\n", cmd); 663 "cmd=%p.\n", cmd);
648 cmd->result = DID_NO_CONNECT << 16; 664 cmd->result = DID_NO_CONNECT << 16;
649 } else { 665 } else {
650 ql_dbg(ql_dbg_io, vha, 0x3002, 666 ql_dbg(ql_dbg_aer, vha, 0x9011,
651 "EEH_Busy, Requeuing the cmd=%p.\n", cmd); 667 "EEH_Busy, Requeuing the cmd=%p.\n", cmd);
652 cmd->result = DID_REQUEUE << 16; 668 cmd->result = DID_REQUEUE << 16;
653 } 669 }
@@ -657,7 +673,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
657 rval = fc_remote_port_chkready(rport); 673 rval = fc_remote_port_chkready(rport);
658 if (rval) { 674 if (rval) {
659 cmd->result = rval; 675 cmd->result = rval;
660 ql_dbg(ql_dbg_io, vha, 0x3003, 676 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003,
661 "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n", 677 "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
662 cmd, rval); 678 cmd, rval);
663 goto qc24_fail_command; 679 goto qc24_fail_command;
@@ -1136,7 +1152,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1136 ret = FAILED; 1152 ret = FAILED;
1137 1153
1138 ql_log(ql_log_info, vha, 0x8012, 1154 ql_log(ql_log_info, vha, 0x8012,
1139 "BUS RESET ISSUED nexus=%ld:%d%d.\n", vha->host_no, id, lun); 1155 "BUS RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun);
1140 1156
1141 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1157 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1142 ql_log(ql_log_fatal, vha, 0x8013, 1158 ql_log(ql_log_fatal, vha, 0x8013,
@@ -2180,6 +2196,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2180 ql_dbg_pci(ql_dbg_init, pdev, 0x000a, 2196 ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
2181 "Memory allocated for ha=%p.\n", ha); 2197 "Memory allocated for ha=%p.\n", ha);
2182 ha->pdev = pdev; 2198 ha->pdev = pdev;
2199 ha->tgt.enable_class_2 = ql2xenableclass2;
2183 2200
2184 /* Clear our data area */ 2201 /* Clear our data area */
2185 ha->bars = bars; 2202 ha->bars = bars;
@@ -2243,6 +2260,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2243 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2260 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2244 req_length = REQUEST_ENTRY_CNT_24XX; 2261 req_length = REQUEST_ENTRY_CNT_24XX;
2245 rsp_length = RESPONSE_ENTRY_CNT_2300; 2262 rsp_length = RESPONSE_ENTRY_CNT_2300;
2263 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
2246 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2264 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2247 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 2265 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
2248 ha->gid_list_info_size = 8; 2266 ha->gid_list_info_size = 8;
@@ -2258,6 +2276,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2258 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2276 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2259 req_length = REQUEST_ENTRY_CNT_24XX; 2277 req_length = REQUEST_ENTRY_CNT_24XX;
2260 rsp_length = RESPONSE_ENTRY_CNT_2300; 2278 rsp_length = RESPONSE_ENTRY_CNT_2300;
2279 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
2261 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2280 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2262 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 2281 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
2263 ha->gid_list_info_size = 8; 2282 ha->gid_list_info_size = 8;
@@ -2417,6 +2436,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2417 host->max_cmd_len, host->max_channel, host->max_lun, 2436 host->max_cmd_len, host->max_channel, host->max_lun,
2418 host->transportt, sht->vendor_id); 2437 host->transportt, sht->vendor_id);
2419 2438
2439que_init:
2440 /* Alloc arrays of request and response ring ptrs */
2441 if (!qla2x00_alloc_queues(ha, req, rsp)) {
2442 ql_log(ql_log_fatal, base_vha, 0x003d,
2443 "Failed to allocate memory for queue pointers..."
2444 "aborting.\n");
2445 goto probe_init_failed;
2446 }
2447
2448 qlt_probe_one_stage1(base_vha, ha);
2449
2420 /* Set up the irqs */ 2450 /* Set up the irqs */
2421 ret = qla2x00_request_irqs(ha, rsp); 2451 ret = qla2x00_request_irqs(ha, rsp);
2422 if (ret) 2452 if (ret)
@@ -2424,20 +2454,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2424 2454
2425 pci_save_state(pdev); 2455 pci_save_state(pdev);
2426 2456
2427 /* Alloc arrays of request and response ring ptrs */ 2457 /* Assign back pointers */
2428que_init:
2429 if (!qla2x00_alloc_queues(ha)) {
2430 ql_log(ql_log_fatal, base_vha, 0x003d,
2431 "Failed to allocate memory for queue pointers.. aborting.\n");
2432 goto probe_init_failed;
2433 }
2434
2435 ha->rsp_q_map[0] = rsp;
2436 ha->req_q_map[0] = req;
2437 rsp->req = req; 2458 rsp->req = req;
2438 req->rsp = rsp; 2459 req->rsp = rsp;
2439 set_bit(0, ha->req_qid_map); 2460
2440 set_bit(0, ha->rsp_qid_map);
2441 /* FWI2-capable only. */ 2461 /* FWI2-capable only. */
2442 req->req_q_in = &ha->iobase->isp24.req_q_in; 2462 req->req_q_in = &ha->iobase->isp24.req_q_in;
2443 req->req_q_out = &ha->iobase->isp24.req_q_out; 2463 req->req_q_out = &ha->iobase->isp24.req_q_out;
@@ -2514,6 +2534,14 @@ que_init:
2514 ql_dbg(ql_dbg_init, base_vha, 0x00ee, 2534 ql_dbg(ql_dbg_init, base_vha, 0x00ee,
2515 "DPC thread started successfully.\n"); 2535 "DPC thread started successfully.\n");
2516 2536
2537 /*
2538 * If we're not coming up in initiator mode, we might sit for
2539 * a while without waking up the dpc thread, which leads to a
2540 * stuck process warning. So just kick the dpc once here and
2541 * let the kthread start (and go back to sleep in qla2x00_do_dpc).
2542 */
2543 qla2xxx_wake_dpc(base_vha);
2544
2517skip_dpc: 2545skip_dpc:
2518 list_add_tail(&base_vha->list, &ha->vp_list); 2546 list_add_tail(&base_vha->list, &ha->vp_list);
2519 base_vha->host->irq = ha->pdev->irq; 2547 base_vha->host->irq = ha->pdev->irq;
@@ -2559,7 +2587,11 @@ skip_dpc:
2559 ql_dbg(ql_dbg_init, base_vha, 0x00f2, 2587 ql_dbg(ql_dbg_init, base_vha, 0x00f2,
2560 "Init done and hba is online.\n"); 2588 "Init done and hba is online.\n");
2561 2589
2562 scsi_scan_host(host); 2590 if (qla_ini_mode_enabled(base_vha))
2591 scsi_scan_host(host);
2592 else
2593 ql_dbg(ql_dbg_init, base_vha, 0x0122,
2594 "skipping scsi_scan_host() for non-initiator port\n");
2563 2595
2564 qla2x00_alloc_sysfs_attr(base_vha); 2596 qla2x00_alloc_sysfs_attr(base_vha);
2565 2597
@@ -2577,11 +2609,17 @@ skip_dpc:
2577 base_vha->host_no, 2609 base_vha->host_no,
2578 ha->isp_ops->fw_version_str(base_vha, fw_str)); 2610 ha->isp_ops->fw_version_str(base_vha, fw_str));
2579 2611
2612 qlt_add_target(ha, base_vha);
2613
2580 return 0; 2614 return 0;
2581 2615
2582probe_init_failed: 2616probe_init_failed:
2583 qla2x00_free_req_que(ha, req); 2617 qla2x00_free_req_que(ha, req);
2618 ha->req_q_map[0] = NULL;
2619 clear_bit(0, ha->req_qid_map);
2584 qla2x00_free_rsp_que(ha, rsp); 2620 qla2x00_free_rsp_que(ha, rsp);
2621 ha->rsp_q_map[0] = NULL;
2622 clear_bit(0, ha->rsp_qid_map);
2585 ha->max_req_queues = ha->max_rsp_queues = 0; 2623 ha->max_req_queues = ha->max_rsp_queues = 0;
2586 2624
2587probe_failed: 2625probe_failed:
@@ -2621,6 +2659,22 @@ probe_out:
2621} 2659}
2622 2660
2623static void 2661static void
2662qla2x00_stop_dpc_thread(scsi_qla_host_t *vha)
2663{
2664 struct qla_hw_data *ha = vha->hw;
2665 struct task_struct *t = ha->dpc_thread;
2666
2667 if (ha->dpc_thread == NULL)
2668 return;
2669 /*
2670 * qla2xxx_wake_dpc checks for ->dpc_thread
2671 * so we need to zero it out.
2672 */
2673 ha->dpc_thread = NULL;
2674 kthread_stop(t);
2675}
2676
2677static void
2624qla2x00_shutdown(struct pci_dev *pdev) 2678qla2x00_shutdown(struct pci_dev *pdev)
2625{ 2679{
2626 scsi_qla_host_t *vha; 2680 scsi_qla_host_t *vha;
@@ -2663,9 +2717,18 @@ qla2x00_remove_one(struct pci_dev *pdev)
2663 struct qla_hw_data *ha; 2717 struct qla_hw_data *ha;
2664 unsigned long flags; 2718 unsigned long flags;
2665 2719
2720 /*
2721 * If the PCI device is disabled that means that probe failed and any
2722 * resources should be have cleaned up on probe exit.
2723 */
2724 if (!atomic_read(&pdev->enable_cnt))
2725 return;
2726
2666 base_vha = pci_get_drvdata(pdev); 2727 base_vha = pci_get_drvdata(pdev);
2667 ha = base_vha->hw; 2728 ha = base_vha->hw;
2668 2729
2730 ha->flags.host_shutting_down = 1;
2731
2669 mutex_lock(&ha->vport_lock); 2732 mutex_lock(&ha->vport_lock);
2670 while (ha->cur_vport_count) { 2733 while (ha->cur_vport_count) {
2671 struct Scsi_Host *scsi_host; 2734 struct Scsi_Host *scsi_host;
@@ -2719,6 +2782,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
2719 ha->dpc_thread = NULL; 2782 ha->dpc_thread = NULL;
2720 kthread_stop(t); 2783 kthread_stop(t);
2721 } 2784 }
2785 qlt_remove_target(ha, base_vha);
2722 2786
2723 qla2x00_free_sysfs_attr(base_vha); 2787 qla2x00_free_sysfs_attr(base_vha);
2724 2788
@@ -2770,17 +2834,7 @@ qla2x00_free_device(scsi_qla_host_t *vha)
2770 if (vha->timer_active) 2834 if (vha->timer_active)
2771 qla2x00_stop_timer(vha); 2835 qla2x00_stop_timer(vha);
2772 2836
2773 /* Kill the kernel thread for this host */ 2837 qla2x00_stop_dpc_thread(vha);
2774 if (ha->dpc_thread) {
2775 struct task_struct *t = ha->dpc_thread;
2776
2777 /*
2778 * qla2xxx_wake_dpc checks for ->dpc_thread
2779 * so we need to zero it out.
2780 */
2781 ha->dpc_thread = NULL;
2782 kthread_stop(t);
2783 }
2784 2838
2785 qla25xx_delete_queues(vha); 2839 qla25xx_delete_queues(vha);
2786 2840
@@ -2842,8 +2896,10 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
2842 spin_unlock_irqrestore(vha->host->host_lock, flags); 2896 spin_unlock_irqrestore(vha->host->host_lock, flags);
2843 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); 2897 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
2844 qla2xxx_wake_dpc(base_vha); 2898 qla2xxx_wake_dpc(base_vha);
2845 } else 2899 } else {
2846 fc_remote_port_delete(rport); 2900 fc_remote_port_delete(rport);
2901 qlt_fc_port_deleted(vha, fcport);
2902 }
2847} 2903}
2848 2904
2849/* 2905/*
@@ -2859,7 +2915,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
2859 int do_login, int defer) 2915 int do_login, int defer)
2860{ 2916{
2861 if (atomic_read(&fcport->state) == FCS_ONLINE && 2917 if (atomic_read(&fcport->state) == FCS_ONLINE &&
2862 vha->vp_idx == fcport->vp_idx) { 2918 vha->vp_idx == fcport->vha->vp_idx) {
2863 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 2919 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
2864 qla2x00_schedule_rport_del(vha, fcport, defer); 2920 qla2x00_schedule_rport_del(vha, fcport, defer);
2865 } 2921 }
@@ -2908,7 +2964,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
2908 fc_port_t *fcport; 2964 fc_port_t *fcport;
2909 2965
2910 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2966 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2911 if (vha->vp_idx != 0 && vha->vp_idx != fcport->vp_idx) 2967 if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx)
2912 continue; 2968 continue;
2913 2969
2914 /* 2970 /*
@@ -2921,7 +2977,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
2921 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 2977 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
2922 if (defer) 2978 if (defer)
2923 qla2x00_schedule_rport_del(vha, fcport, defer); 2979 qla2x00_schedule_rport_del(vha, fcport, defer);
2924 else if (vha->vp_idx == fcport->vp_idx) 2980 else if (vha->vp_idx == fcport->vha->vp_idx)
2925 qla2x00_schedule_rport_del(vha, fcport, defer); 2981 qla2x00_schedule_rport_del(vha, fcport, defer);
2926 } 2982 }
2927 } 2983 }
@@ -2946,10 +3002,13 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2946 if (!ha->init_cb) 3002 if (!ha->init_cb)
2947 goto fail; 3003 goto fail;
2948 3004
3005 if (qlt_mem_alloc(ha) < 0)
3006 goto fail_free_init_cb;
3007
2949 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, 3008 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev,
2950 qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL); 3009 qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL);
2951 if (!ha->gid_list) 3010 if (!ha->gid_list)
2952 goto fail_free_init_cb; 3011 goto fail_free_tgt_mem;
2953 3012
2954 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); 3013 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
2955 if (!ha->srb_mempool) 3014 if (!ha->srb_mempool)
@@ -3167,6 +3226,8 @@ fail_free_gid_list:
3167 ha->gid_list_dma); 3226 ha->gid_list_dma);
3168 ha->gid_list = NULL; 3227 ha->gid_list = NULL;
3169 ha->gid_list_dma = 0; 3228 ha->gid_list_dma = 0;
3229fail_free_tgt_mem:
3230 qlt_mem_free(ha);
3170fail_free_init_cb: 3231fail_free_init_cb:
3171 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb, 3232 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
3172 ha->init_cb_dma); 3233 ha->init_cb_dma);
@@ -3282,6 +3343,8 @@ qla2x00_mem_free(struct qla_hw_data *ha)
3282 if (ha->ctx_mempool) 3343 if (ha->ctx_mempool)
3283 mempool_destroy(ha->ctx_mempool); 3344 mempool_destroy(ha->ctx_mempool);
3284 3345
3346 qlt_mem_free(ha);
3347
3285 if (ha->init_cb) 3348 if (ha->init_cb)
3286 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, 3349 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
3287 ha->init_cb, ha->init_cb_dma); 3350 ha->init_cb, ha->init_cb_dma);
@@ -3311,6 +3374,10 @@ qla2x00_mem_free(struct qla_hw_data *ha)
3311 3374
3312 ha->gid_list = NULL; 3375 ha->gid_list = NULL;
3313 ha->gid_list_dma = 0; 3376 ha->gid_list_dma = 0;
3377
3378 ha->tgt.atio_ring = NULL;
3379 ha->tgt.atio_dma = 0;
3380 ha->tgt.tgt_vp_map = NULL;
3314} 3381}
3315 3382
3316struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, 3383struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
@@ -3671,10 +3738,9 @@ qla2x00_do_dpc(void *data)
3671 3738
3672 ha->dpc_active = 1; 3739 ha->dpc_active = 1;
3673 3740
3674 ql_dbg(ql_dbg_dpc, base_vha, 0x4001, 3741 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001,
3675 "DPC handler waking up.\n"); 3742 "DPC handler waking up, dpc_flags=0x%lx.\n",
3676 ql_dbg(ql_dbg_dpc, base_vha, 0x4002, 3743 base_vha->dpc_flags);
3677 "dpc_flags=0x%lx.\n", base_vha->dpc_flags);
3678 3744
3679 qla2x00_do_work(base_vha); 3745 qla2x00_do_work(base_vha);
3680 3746
@@ -3740,6 +3806,16 @@ qla2x00_do_dpc(void *data)
3740 clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); 3806 clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
3741 } 3807 }
3742 3808
3809 if (test_bit(SCR_PENDING, &base_vha->dpc_flags)) {
3810 int ret;
3811 ret = qla2x00_send_change_request(base_vha, 0x3, 0);
3812 if (ret != QLA_SUCCESS)
3813 ql_log(ql_log_warn, base_vha, 0x121,
3814 "Failed to enable receiving of RSCN "
3815 "requests: 0x%x.\n", ret);
3816 clear_bit(SCR_PENDING, &base_vha->dpc_flags);
3817 }
3818
3743 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { 3819 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
3744 ql_dbg(ql_dbg_dpc, base_vha, 0x4009, 3820 ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
3745 "Quiescence mode scheduled.\n"); 3821 "Quiescence mode scheduled.\n");
@@ -4457,6 +4533,21 @@ qla2x00_module_init(void)
4457 return -ENOMEM; 4533 return -ENOMEM;
4458 } 4534 }
4459 4535
4536 /* Initialize target kmem_cache and mem_pools */
4537 ret = qlt_init();
4538 if (ret < 0) {
4539 kmem_cache_destroy(srb_cachep);
4540 return ret;
4541 } else if (ret > 0) {
4542 /*
4543 * If initiator mode is explictly disabled by qlt_init(),
4544 * prevent scsi_transport_fc.c:fc_scsi_scan_rport() from
4545 * performing scsi_scan_target() during LOOP UP event.
4546 */
4547 qla2xxx_transport_functions.disable_target_scan = 1;
4548 qla2xxx_transport_vport_functions.disable_target_scan = 1;
4549 }
4550
4460 /* Derive version string. */ 4551 /* Derive version string. */
4461 strcpy(qla2x00_version_str, QLA2XXX_VERSION); 4552 strcpy(qla2x00_version_str, QLA2XXX_VERSION);
4462 if (ql2xextended_error_logging) 4553 if (ql2xextended_error_logging)
@@ -4468,6 +4559,7 @@ qla2x00_module_init(void)
4468 kmem_cache_destroy(srb_cachep); 4559 kmem_cache_destroy(srb_cachep);
4469 ql_log(ql_log_fatal, NULL, 0x0002, 4560 ql_log(ql_log_fatal, NULL, 0x0002,
4470 "fc_attach_transport failed...Failing load!.\n"); 4561 "fc_attach_transport failed...Failing load!.\n");
4562 qlt_exit();
4471 return -ENODEV; 4563 return -ENODEV;
4472 } 4564 }
4473 4565
@@ -4481,6 +4573,7 @@ qla2x00_module_init(void)
4481 fc_attach_transport(&qla2xxx_transport_vport_functions); 4573 fc_attach_transport(&qla2xxx_transport_vport_functions);
4482 if (!qla2xxx_transport_vport_template) { 4574 if (!qla2xxx_transport_vport_template) {
4483 kmem_cache_destroy(srb_cachep); 4575 kmem_cache_destroy(srb_cachep);
4576 qlt_exit();
4484 fc_release_transport(qla2xxx_transport_template); 4577 fc_release_transport(qla2xxx_transport_template);
4485 ql_log(ql_log_fatal, NULL, 0x0004, 4578 ql_log(ql_log_fatal, NULL, 0x0004,
4486 "fc_attach_transport vport failed...Failing load!.\n"); 4579 "fc_attach_transport vport failed...Failing load!.\n");
@@ -4492,6 +4585,7 @@ qla2x00_module_init(void)
4492 ret = pci_register_driver(&qla2xxx_pci_driver); 4585 ret = pci_register_driver(&qla2xxx_pci_driver);
4493 if (ret) { 4586 if (ret) {
4494 kmem_cache_destroy(srb_cachep); 4587 kmem_cache_destroy(srb_cachep);
4588 qlt_exit();
4495 fc_release_transport(qla2xxx_transport_template); 4589 fc_release_transport(qla2xxx_transport_template);
4496 fc_release_transport(qla2xxx_transport_vport_template); 4590 fc_release_transport(qla2xxx_transport_vport_template);
4497 ql_log(ql_log_fatal, NULL, 0x0006, 4591 ql_log(ql_log_fatal, NULL, 0x0006,
@@ -4511,6 +4605,7 @@ qla2x00_module_exit(void)
4511 pci_unregister_driver(&qla2xxx_pci_driver); 4605 pci_unregister_driver(&qla2xxx_pci_driver);
4512 qla2x00_release_firmware(); 4606 qla2x00_release_firmware();
4513 kmem_cache_destroy(srb_cachep); 4607 kmem_cache_destroy(srb_cachep);
4608 qlt_exit();
4514 if (ctx_cachep) 4609 if (ctx_cachep)
4515 kmem_cache_destroy(ctx_cachep); 4610 kmem_cache_destroy(ctx_cachep);
4516 fc_release_transport(qla2xxx_transport_template); 4611 fc_release_transport(qla2xxx_transport_template);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
new file mode 100644
index 000000000000..5b30132960c7
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -0,0 +1,4963 @@
1/*
2 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
3 *
4 * based on qla2x00t.c code:
5 *
6 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
7 * Copyright (C) 2004 - 2005 Leonid Stoljar
8 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
9 * Copyright (C) 2006 - 2010 ID7 Ltd.
10 *
11 * Forward port and refactoring to modern qla2xxx and target/configfs
12 *
13 * Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, version 2
18 * of the License.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 */
25
26#include <linux/module.h>
27#include <linux/init.h>
28#include <linux/types.h>
29#include <linux/blkdev.h>
30#include <linux/interrupt.h>
31#include <linux/pci.h>
32#include <linux/delay.h>
33#include <linux/list.h>
34#include <linux/workqueue.h>
35#include <asm/unaligned.h>
36#include <scsi/scsi.h>
37#include <scsi/scsi_host.h>
38#include <scsi/scsi_tcq.h>
39#include <target/target_core_base.h>
40#include <target/target_core_fabric.h>
41
42#include "qla_def.h"
43#include "qla_target.h"
44
45static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
46module_param(qlini_mode, charp, S_IRUGO);
47MODULE_PARM_DESC(qlini_mode,
48 "Determines when initiator mode will be enabled. Possible values: "
49 "\"exclusive\" - initiator mode will be enabled on load, "
50 "disabled on enabling target mode and then on disabling target mode "
51 "enabled back; "
52 "\"disabled\" - initiator mode will never be enabled; "
53 "\"enabled\" (default) - initiator mode will always stay enabled.");
54
55static int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
56
57/*
58 * From scsi/fc/fc_fcp.h
59 */
60enum fcp_resp_rsp_codes {
61 FCP_TMF_CMPL = 0,
62 FCP_DATA_LEN_INVALID = 1,
63 FCP_CMND_FIELDS_INVALID = 2,
64 FCP_DATA_PARAM_MISMATCH = 3,
65 FCP_TMF_REJECTED = 4,
66 FCP_TMF_FAILED = 5,
67 FCP_TMF_INVALID_LUN = 9,
68};
69
70/*
71 * fc_pri_ta from scsi/fc/fc_fcp.h
72 */
73#define FCP_PTA_SIMPLE 0 /* simple task attribute */
74#define FCP_PTA_HEADQ 1 /* head of queue task attribute */
75#define FCP_PTA_ORDERED 2 /* ordered task attribute */
76#define FCP_PTA_ACA 4 /* auto. contigent allegiance */
77#define FCP_PTA_MASK 7 /* mask for task attribute field */
78#define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
79#define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
80
81/*
82 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
83 * must be called under HW lock and could unlock/lock it inside.
84 * It isn't an issue, since in the current implementation on the time when
85 * those functions are called:
86 *
87 * - Either context is IRQ and only IRQ handler can modify HW data,
88 * including rings related fields,
89 *
90 * - Or access to target mode variables from struct qla_tgt doesn't
91 * cross those functions boundaries, except tgt_stop, which
92 * additionally protected by irq_cmd_count.
93 */
94/* Predefs for callbacks handed to qla2xxx LLD */
95static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
96 struct atio_from_isp *pkt);
97static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
98static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
99 int fn, void *iocb, int flags);
100static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
101 *cmd, struct atio_from_isp *atio, int ha_locked);
102static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
103 struct qla_tgt_srr_imm *imm, int ha_lock);
104/*
105 * Global Variables
106 */
107static struct kmem_cache *qla_tgt_cmd_cachep;
108static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
109static mempool_t *qla_tgt_mgmt_cmd_mempool;
110static struct workqueue_struct *qla_tgt_wq;
111static DEFINE_MUTEX(qla_tgt_mutex);
112static LIST_HEAD(qla_tgt_glist);
113
114/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
115static struct qla_tgt_sess *qlt_find_sess_by_port_name(
116 struct qla_tgt *tgt,
117 const uint8_t *port_name)
118{
119 struct qla_tgt_sess *sess;
120
121 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
122 if (!memcmp(sess->port_name, port_name, WWN_SIZE))
123 return sess;
124 }
125
126 return NULL;
127}
128
129/* Might release hw lock, then reaquire!! */
130static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
131{
132 /* Send marker if required */
133 if (unlikely(vha->marker_needed != 0)) {
134 int rc = qla2x00_issue_marker(vha, vha_locked);
135 if (rc != QLA_SUCCESS) {
136 ql_dbg(ql_dbg_tgt, vha, 0xe03d,
137 "qla_target(%d): issue_marker() failed\n",
138 vha->vp_idx);
139 }
140 return rc;
141 }
142 return QLA_SUCCESS;
143}
144
145static inline
146struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
147 uint8_t *d_id)
148{
149 struct qla_hw_data *ha = vha->hw;
150 uint8_t vp_idx;
151
152 if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0]))
153 return NULL;
154
155 if (vha->d_id.b.al_pa == d_id[2])
156 return vha;
157
158 BUG_ON(ha->tgt.tgt_vp_map == NULL);
159 vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx;
160 if (likely(test_bit(vp_idx, ha->vp_idx_map)))
161 return ha->tgt.tgt_vp_map[vp_idx].vha;
162
163 return NULL;
164}
165
166static inline
167struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
168 uint16_t vp_idx)
169{
170 struct qla_hw_data *ha = vha->hw;
171
172 if (vha->vp_idx == vp_idx)
173 return vha;
174
175 BUG_ON(ha->tgt.tgt_vp_map == NULL);
176 if (likely(test_bit(vp_idx, ha->vp_idx_map)))
177 return ha->tgt.tgt_vp_map[vp_idx].vha;
178
179 return NULL;
180}
181
182void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
183 struct atio_from_isp *atio)
184{
185 switch (atio->u.raw.entry_type) {
186 case ATIO_TYPE7:
187 {
188 struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
189 atio->u.isp24.fcp_hdr.d_id);
190 if (unlikely(NULL == host)) {
191 ql_dbg(ql_dbg_tgt, vha, 0xe03e,
192 "qla_target(%d): Received ATIO_TYPE7 "
193 "with unknown d_id %x:%x:%x\n", vha->vp_idx,
194 atio->u.isp24.fcp_hdr.d_id[0],
195 atio->u.isp24.fcp_hdr.d_id[1],
196 atio->u.isp24.fcp_hdr.d_id[2]);
197 break;
198 }
199 qlt_24xx_atio_pkt(host, atio);
200 break;
201 }
202
203 case IMMED_NOTIFY_TYPE:
204 {
205 struct scsi_qla_host *host = vha;
206 struct imm_ntfy_from_isp *entry =
207 (struct imm_ntfy_from_isp *)atio;
208
209 if ((entry->u.isp24.vp_index != 0xFF) &&
210 (entry->u.isp24.nport_handle != 0xFFFF)) {
211 host = qlt_find_host_by_vp_idx(vha,
212 entry->u.isp24.vp_index);
213 if (unlikely(!host)) {
214 ql_dbg(ql_dbg_tgt, vha, 0xe03f,
215 "qla_target(%d): Received "
216 "ATIO (IMMED_NOTIFY_TYPE) "
217 "with unknown vp_index %d\n",
218 vha->vp_idx, entry->u.isp24.vp_index);
219 break;
220 }
221 }
222 qlt_24xx_atio_pkt(host, atio);
223 break;
224 }
225
226 default:
227 ql_dbg(ql_dbg_tgt, vha, 0xe040,
228 "qla_target(%d): Received unknown ATIO atio "
229 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
230 break;
231 }
232
233 return;
234}
235
236void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
237{
238 switch (pkt->entry_type) {
239 case CTIO_TYPE7:
240 {
241 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
242 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
243 entry->vp_index);
244 if (unlikely(!host)) {
245 ql_dbg(ql_dbg_tgt, vha, 0xe041,
246 "qla_target(%d): Response pkt (CTIO_TYPE7) "
247 "received, with unknown vp_index %d\n",
248 vha->vp_idx, entry->vp_index);
249 break;
250 }
251 qlt_response_pkt(host, pkt);
252 break;
253 }
254
255 case IMMED_NOTIFY_TYPE:
256 {
257 struct scsi_qla_host *host = vha;
258 struct imm_ntfy_from_isp *entry =
259 (struct imm_ntfy_from_isp *)pkt;
260
261 host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
262 if (unlikely(!host)) {
263 ql_dbg(ql_dbg_tgt, vha, 0xe042,
264 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
265 "received, with unknown vp_index %d\n",
266 vha->vp_idx, entry->u.isp24.vp_index);
267 break;
268 }
269 qlt_response_pkt(host, pkt);
270 break;
271 }
272
273 case NOTIFY_ACK_TYPE:
274 {
275 struct scsi_qla_host *host = vha;
276 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
277
278 if (0xFF != entry->u.isp24.vp_index) {
279 host = qlt_find_host_by_vp_idx(vha,
280 entry->u.isp24.vp_index);
281 if (unlikely(!host)) {
282 ql_dbg(ql_dbg_tgt, vha, 0xe043,
283 "qla_target(%d): Response "
284 "pkt (NOTIFY_ACK_TYPE) "
285 "received, with unknown "
286 "vp_index %d\n", vha->vp_idx,
287 entry->u.isp24.vp_index);
288 break;
289 }
290 }
291 qlt_response_pkt(host, pkt);
292 break;
293 }
294
295 case ABTS_RECV_24XX:
296 {
297 struct abts_recv_from_24xx *entry =
298 (struct abts_recv_from_24xx *)pkt;
299 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
300 entry->vp_index);
301 if (unlikely(!host)) {
302 ql_dbg(ql_dbg_tgt, vha, 0xe044,
303 "qla_target(%d): Response pkt "
304 "(ABTS_RECV_24XX) received, with unknown "
305 "vp_index %d\n", vha->vp_idx, entry->vp_index);
306 break;
307 }
308 qlt_response_pkt(host, pkt);
309 break;
310 }
311
312 case ABTS_RESP_24XX:
313 {
314 struct abts_resp_to_24xx *entry =
315 (struct abts_resp_to_24xx *)pkt;
316 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
317 entry->vp_index);
318 if (unlikely(!host)) {
319 ql_dbg(ql_dbg_tgt, vha, 0xe045,
320 "qla_target(%d): Response pkt "
321 "(ABTS_RECV_24XX) received, with unknown "
322 "vp_index %d\n", vha->vp_idx, entry->vp_index);
323 break;
324 }
325 qlt_response_pkt(host, pkt);
326 break;
327 }
328
329 default:
330 qlt_response_pkt(vha, pkt);
331 break;
332 }
333
334}
335
336static void qlt_free_session_done(struct work_struct *work)
337{
338 struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess,
339 free_work);
340 struct qla_tgt *tgt = sess->tgt;
341 struct scsi_qla_host *vha = sess->vha;
342 struct qla_hw_data *ha = vha->hw;
343
344 BUG_ON(!tgt);
345 /*
346 * Release the target session for FC Nexus from fabric module code.
347 */
348 if (sess->se_sess != NULL)
349 ha->tgt.tgt_ops->free_session(sess);
350
351 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
352 "Unregistration of sess %p finished\n", sess);
353
354 kfree(sess);
355 /*
356 * We need to protect against race, when tgt is freed before or
357 * inside wake_up()
358 */
359 tgt->sess_count--;
360 if (tgt->sess_count == 0)
361 wake_up_all(&tgt->waitQ);
362}
363
364/* ha->hardware_lock supposed to be held on entry */
365void qlt_unreg_sess(struct qla_tgt_sess *sess)
366{
367 struct scsi_qla_host *vha = sess->vha;
368
369 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
370
371 list_del(&sess->sess_list_entry);
372 if (sess->deleted)
373 list_del(&sess->del_list_entry);
374
375 INIT_WORK(&sess->free_work, qlt_free_session_done);
376 schedule_work(&sess->free_work);
377}
378EXPORT_SYMBOL(qlt_unreg_sess);
379
380/* ha->hardware_lock supposed to be held on entry */
381static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
382{
383 struct qla_hw_data *ha = vha->hw;
384 struct qla_tgt_sess *sess = NULL;
385 uint32_t unpacked_lun, lun = 0;
386 uint16_t loop_id;
387 int res = 0;
388 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
389 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
390
391 loop_id = le16_to_cpu(n->u.isp24.nport_handle);
392 if (loop_id == 0xFFFF) {
393#if 0 /* FIXME: Re-enable Global event handling.. */
394 /* Global event */
395 atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count);
396 qlt_clear_tgt_db(ha->tgt.qla_tgt, 1);
397 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
398 sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
399 typeof(*sess), sess_list_entry);
400 switch (mcmd) {
401 case QLA_TGT_NEXUS_LOSS_SESS:
402 mcmd = QLA_TGT_NEXUS_LOSS;
403 break;
404 case QLA_TGT_ABORT_ALL_SESS:
405 mcmd = QLA_TGT_ABORT_ALL;
406 break;
407 case QLA_TGT_NEXUS_LOSS:
408 case QLA_TGT_ABORT_ALL:
409 break;
410 default:
411 ql_dbg(ql_dbg_tgt, vha, 0xe046,
412 "qla_target(%d): Not allowed "
413 "command %x in %s", vha->vp_idx,
414 mcmd, __func__);
415 sess = NULL;
416 break;
417 }
418 } else
419 sess = NULL;
420#endif
421 } else {
422 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
423 }
424
425 ql_dbg(ql_dbg_tgt, vha, 0xe000,
426 "Using sess for qla_tgt_reset: %p\n", sess);
427 if (!sess) {
428 res = -ESRCH;
429 return res;
430 }
431
432 ql_dbg(ql_dbg_tgt, vha, 0xe047,
433 "scsi(%ld): resetting (session %p from port "
434 "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, "
435 "mcmd %x, loop_id %d)\n", vha->host_no, sess,
436 sess->port_name[0], sess->port_name[1],
437 sess->port_name[2], sess->port_name[3],
438 sess->port_name[4], sess->port_name[5],
439 sess->port_name[6], sess->port_name[7],
440 mcmd, loop_id);
441
442 lun = a->u.isp24.fcp_cmnd.lun;
443 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
444
445 return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
446 iocb, QLA24XX_MGMT_SEND_NACK);
447}
448
449/* ha->hardware_lock supposed to be held on entry */
450static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
451 bool immediate)
452{
453 struct qla_tgt *tgt = sess->tgt;
454 uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
455
456 if (sess->deleted)
457 return;
458
459 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
460 "Scheduling sess %p for deletion\n", sess);
461 list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
462 sess->deleted = 1;
463
464 if (immediate)
465 dev_loss_tmo = 0;
466
467 sess->expires = jiffies + dev_loss_tmo * HZ;
468
469 ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
470 "qla_target(%d): session for port %02x:%02x:%02x:"
471 "%02x:%02x:%02x:%02x:%02x (loop ID %d) scheduled for "
472 "deletion in %u secs (expires: %lu) immed: %d\n",
473 sess->vha->vp_idx,
474 sess->port_name[0], sess->port_name[1],
475 sess->port_name[2], sess->port_name[3],
476 sess->port_name[4], sess->port_name[5],
477 sess->port_name[6], sess->port_name[7],
478 sess->loop_id, dev_loss_tmo, sess->expires, immediate);
479
480 if (immediate)
481 schedule_delayed_work(&tgt->sess_del_work, 0);
482 else
483 schedule_delayed_work(&tgt->sess_del_work,
484 jiffies - sess->expires);
485}
486
487/* ha->hardware_lock supposed to be held on entry */
488static void qlt_clear_tgt_db(struct qla_tgt *tgt, bool local_only)
489{
490 struct qla_tgt_sess *sess;
491
492 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry)
493 qlt_schedule_sess_for_deletion(sess, true);
494
495 /* At this point tgt could be already dead */
496}
497
498static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
499 uint16_t *loop_id)
500{
501 struct qla_hw_data *ha = vha->hw;
502 dma_addr_t gid_list_dma;
503 struct gid_list_info *gid_list;
504 char *id_iter;
505 int res, rc, i;
506 uint16_t entries;
507
508 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
509 &gid_list_dma, GFP_KERNEL);
510 if (!gid_list) {
511 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
512 "qla_target(%d): DMA Alloc failed of %u\n",
513 vha->vp_idx, qla2x00_gid_list_size(ha));
514 return -ENOMEM;
515 }
516
517 /* Get list of logged in devices */
518 rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries);
519 if (rc != QLA_SUCCESS) {
520 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
521 "qla_target(%d): get_id_list() failed: %x\n",
522 vha->vp_idx, rc);
523 res = -1;
524 goto out_free_id_list;
525 }
526
527 id_iter = (char *)gid_list;
528 res = -1;
529 for (i = 0; i < entries; i++) {
530 struct gid_list_info *gid = (struct gid_list_info *)id_iter;
531 if ((gid->al_pa == s_id[2]) &&
532 (gid->area == s_id[1]) &&
533 (gid->domain == s_id[0])) {
534 *loop_id = le16_to_cpu(gid->loop_id);
535 res = 0;
536 break;
537 }
538 id_iter += ha->gid_list_info_size;
539 }
540
541out_free_id_list:
542 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
543 gid_list, gid_list_dma);
544 return res;
545}
546
547static bool qlt_check_fcport_exist(struct scsi_qla_host *vha,
548 struct qla_tgt_sess *sess)
549{
550 struct qla_hw_data *ha = vha->hw;
551 struct qla_port_24xx_data *pmap24;
552 bool res, found = false;
553 int rc, i;
554 uint16_t loop_id = 0xFFFF; /* to eliminate compiler's warning */
555 uint16_t entries;
556 void *pmap;
557 int pmap_len;
558 fc_port_t *fcport;
559 int global_resets;
560
561retry:
562 global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
563
564 rc = qla2x00_get_node_name_list(vha, &pmap, &pmap_len);
565 if (rc != QLA_SUCCESS) {
566 res = false;
567 goto out;
568 }
569
570 pmap24 = pmap;
571 entries = pmap_len/sizeof(*pmap24);
572
573 for (i = 0; i < entries; ++i) {
574 if (!memcmp(sess->port_name, pmap24[i].port_name, WWN_SIZE)) {
575 loop_id = le16_to_cpu(pmap24[i].loop_id);
576 found = true;
577 break;
578 }
579 }
580
581 kfree(pmap);
582
583 if (!found) {
584 res = false;
585 goto out;
586 }
587
588 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf046,
589 "qlt_check_fcport_exist(): loop_id %d", loop_id);
590
591 fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
592 if (fcport == NULL) {
593 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf047,
594 "qla_target(%d): Allocation of tmp FC port failed",
595 vha->vp_idx);
596 res = false;
597 goto out;
598 }
599
600 fcport->loop_id = loop_id;
601
602 rc = qla2x00_get_port_database(vha, fcport, 0);
603 if (rc != QLA_SUCCESS) {
604 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf048,
605 "qla_target(%d): Failed to retrieve fcport "
606 "information -- get_port_database() returned %x "
607 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
608 res = false;
609 goto out_free_fcport;
610 }
611
612 if (global_resets !=
613 atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) {
614 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
615 "qla_target(%d): global reset during session discovery"
616 " (counter was %d, new %d), retrying",
617 vha->vp_idx, global_resets,
618 atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count));
619 goto retry;
620 }
621
622 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
623 "Updating sess %p s_id %x:%x:%x, loop_id %d) to d_id %x:%x:%x, "
624 "loop_id %d", sess, sess->s_id.b.domain, sess->s_id.b.al_pa,
625 sess->s_id.b.area, sess->loop_id, fcport->d_id.b.domain,
626 fcport->d_id.b.al_pa, fcport->d_id.b.area, fcport->loop_id);
627
628 sess->s_id = fcport->d_id;
629 sess->loop_id = fcport->loop_id;
630 sess->conf_compl_supported = !!(fcport->flags &
631 FCF_CONF_COMP_SUPPORTED);
632
633 res = true;
634
635out_free_fcport:
636 kfree(fcport);
637
638out:
639 return res;
640}
641
642/* ha->hardware_lock supposed to be held on entry */
643static void qlt_undelete_sess(struct qla_tgt_sess *sess)
644{
645 BUG_ON(!sess->deleted);
646
647 list_del(&sess->del_list_entry);
648 sess->deleted = 0;
649}
650
651static void qlt_del_sess_work_fn(struct delayed_work *work)
652{
653 struct qla_tgt *tgt = container_of(work, struct qla_tgt,
654 sess_del_work);
655 struct scsi_qla_host *vha = tgt->vha;
656 struct qla_hw_data *ha = vha->hw;
657 struct qla_tgt_sess *sess;
658 unsigned long flags;
659
660 spin_lock_irqsave(&ha->hardware_lock, flags);
661 while (!list_empty(&tgt->del_sess_list)) {
662 sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
663 del_list_entry);
664 if (time_after_eq(jiffies, sess->expires)) {
665 bool cancel;
666
667 qlt_undelete_sess(sess);
668
669 spin_unlock_irqrestore(&ha->hardware_lock, flags);
670 cancel = qlt_check_fcport_exist(vha, sess);
671
672 if (cancel) {
673 if (sess->deleted) {
674 /*
675 * sess was again deleted while we were
676 * discovering it
677 */
678 spin_lock_irqsave(&ha->hardware_lock,
679 flags);
680 continue;
681 }
682
683 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf049,
684 "qla_target(%d): cancel deletion of "
685 "session for port %02x:%02x:%02x:%02x:%02x:"
686 "%02x:%02x:%02x (loop ID %d), because "
687 " it isn't deleted by firmware",
688 vha->vp_idx, sess->port_name[0],
689 sess->port_name[1], sess->port_name[2],
690 sess->port_name[3], sess->port_name[4],
691 sess->port_name[5], sess->port_name[6],
692 sess->port_name[7], sess->loop_id);
693 } else {
694 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
695 "Timeout: sess %p about to be deleted\n",
696 sess);
697 ha->tgt.tgt_ops->shutdown_sess(sess);
698 ha->tgt.tgt_ops->put_sess(sess);
699 }
700
701 spin_lock_irqsave(&ha->hardware_lock, flags);
702 } else {
703 schedule_delayed_work(&tgt->sess_del_work,
704 jiffies - sess->expires);
705 break;
706 }
707 }
708 spin_unlock_irqrestore(&ha->hardware_lock, flags);
709}
710
711/*
712 * Adds an extra ref to allow to drop hw lock after adding sess to the list.
713 * Caller must put it.
714 */
715static struct qla_tgt_sess *qlt_create_sess(
716 struct scsi_qla_host *vha,
717 fc_port_t *fcport,
718 bool local)
719{
720 struct qla_hw_data *ha = vha->hw;
721 struct qla_tgt_sess *sess;
722 unsigned long flags;
723 unsigned char be_sid[3];
724
725 /* Check to avoid double sessions */
726 spin_lock_irqsave(&ha->hardware_lock, flags);
727 list_for_each_entry(sess, &ha->tgt.qla_tgt->sess_list,
728 sess_list_entry) {
729 if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
730 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
731 "Double sess %p found (s_id %x:%x:%x, "
732 "loop_id %d), updating to d_id %x:%x:%x, "
733 "loop_id %d", sess, sess->s_id.b.domain,
734 sess->s_id.b.al_pa, sess->s_id.b.area,
735 sess->loop_id, fcport->d_id.b.domain,
736 fcport->d_id.b.al_pa, fcport->d_id.b.area,
737 fcport->loop_id);
738
739 if (sess->deleted)
740 qlt_undelete_sess(sess);
741
742 kref_get(&sess->se_sess->sess_kref);
743 sess->s_id = fcport->d_id;
744 sess->loop_id = fcport->loop_id;
745 sess->conf_compl_supported = !!(fcport->flags &
746 FCF_CONF_COMP_SUPPORTED);
747 if (sess->local && !local)
748 sess->local = 0;
749 spin_unlock_irqrestore(&ha->hardware_lock, flags);
750
751 return sess;
752 }
753 }
754 spin_unlock_irqrestore(&ha->hardware_lock, flags);
755
756 sess = kzalloc(sizeof(*sess), GFP_KERNEL);
757 if (!sess) {
758 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a,
759 "qla_target(%u): session allocation failed, "
760 "all commands from port %02x:%02x:%02x:%02x:"
761 "%02x:%02x:%02x:%02x will be refused", vha->vp_idx,
762 fcport->port_name[0], fcport->port_name[1],
763 fcport->port_name[2], fcport->port_name[3],
764 fcport->port_name[4], fcport->port_name[5],
765 fcport->port_name[6], fcport->port_name[7]);
766
767 return NULL;
768 }
769 sess->tgt = ha->tgt.qla_tgt;
770 sess->vha = vha;
771 sess->s_id = fcport->d_id;
772 sess->loop_id = fcport->loop_id;
773 sess->local = local;
774
775 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
776 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
777 sess, ha->tgt.qla_tgt);
778
779 be_sid[0] = sess->s_id.b.domain;
780 be_sid[1] = sess->s_id.b.area;
781 be_sid[2] = sess->s_id.b.al_pa;
782 /*
783 * Determine if this fc_port->port_name is allowed to access
784 * target mode using explict NodeACLs+MappedLUNs, or using
785 * TPG demo mode. If this is successful a target mode FC nexus
786 * is created.
787 */
788 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
789 &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) {
790 kfree(sess);
791 return NULL;
792 }
793 /*
794 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
795 * access across ->hardware_lock reaquire.
796 */
797 kref_get(&sess->se_sess->sess_kref);
798
799 sess->conf_compl_supported = !!(fcport->flags &
800 FCF_CONF_COMP_SUPPORTED);
801 BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
802 memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
803
804 spin_lock_irqsave(&ha->hardware_lock, flags);
805 list_add_tail(&sess->sess_list_entry, &ha->tgt.qla_tgt->sess_list);
806 ha->tgt.qla_tgt->sess_count++;
807 spin_unlock_irqrestore(&ha->hardware_lock, flags);
808
809 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
810 "qla_target(%d): %ssession for wwn %02x:%02x:%02x:%02x:"
811 "%02x:%02x:%02x:%02x (loop_id %d, s_id %x:%x:%x, confirmed"
812 " completion %ssupported) added\n",
813 vha->vp_idx, local ? "local " : "", fcport->port_name[0],
814 fcport->port_name[1], fcport->port_name[2], fcport->port_name[3],
815 fcport->port_name[4], fcport->port_name[5], fcport->port_name[6],
816 fcport->port_name[7], fcport->loop_id, sess->s_id.b.domain,
817 sess->s_id.b.area, sess->s_id.b.al_pa, sess->conf_compl_supported ?
818 "" : "not ");
819
820 return sess;
821}
822
823/*
824 * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
825 */
826void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
827{
828 struct qla_hw_data *ha = vha->hw;
829 struct qla_tgt *tgt = ha->tgt.qla_tgt;
830 struct qla_tgt_sess *sess;
831 unsigned long flags;
832
833 if (!vha->hw->tgt.tgt_ops)
834 return;
835
836 if (!tgt || (fcport->port_type != FCT_INITIATOR))
837 return;
838
839 spin_lock_irqsave(&ha->hardware_lock, flags);
840 if (tgt->tgt_stop) {
841 spin_unlock_irqrestore(&ha->hardware_lock, flags);
842 return;
843 }
844 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
845 if (!sess) {
846 spin_unlock_irqrestore(&ha->hardware_lock, flags);
847
848 mutex_lock(&ha->tgt.tgt_mutex);
849 sess = qlt_create_sess(vha, fcport, false);
850 mutex_unlock(&ha->tgt.tgt_mutex);
851
852 spin_lock_irqsave(&ha->hardware_lock, flags);
853 } else {
854 kref_get(&sess->se_sess->sess_kref);
855
856 if (sess->deleted) {
857 qlt_undelete_sess(sess);
858
859 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
860 "qla_target(%u): %ssession for port %02x:"
861 "%02x:%02x:%02x:%02x:%02x:%02x:%02x (loop ID %d) "
862 "reappeared\n", vha->vp_idx, sess->local ? "local "
863 : "", sess->port_name[0], sess->port_name[1],
864 sess->port_name[2], sess->port_name[3],
865 sess->port_name[4], sess->port_name[5],
866 sess->port_name[6], sess->port_name[7],
867 sess->loop_id);
868
869 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
870 "Reappeared sess %p\n", sess);
871 }
872 sess->s_id = fcport->d_id;
873 sess->loop_id = fcport->loop_id;
874 sess->conf_compl_supported = !!(fcport->flags &
875 FCF_CONF_COMP_SUPPORTED);
876 }
877
878 if (sess && sess->local) {
879 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
880 "qla_target(%u): local session for "
881 "port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
882 "(loop ID %d) became global\n", vha->vp_idx,
883 fcport->port_name[0], fcport->port_name[1],
884 fcport->port_name[2], fcport->port_name[3],
885 fcport->port_name[4], fcport->port_name[5],
886 fcport->port_name[6], fcport->port_name[7],
887 sess->loop_id);
888 sess->local = 0;
889 }
890 spin_unlock_irqrestore(&ha->hardware_lock, flags);
891
892 ha->tgt.tgt_ops->put_sess(sess);
893}
894
895void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
896{
897 struct qla_hw_data *ha = vha->hw;
898 struct qla_tgt *tgt = ha->tgt.qla_tgt;
899 struct qla_tgt_sess *sess;
900 unsigned long flags;
901
902 if (!vha->hw->tgt.tgt_ops)
903 return;
904
905 if (!tgt || (fcport->port_type != FCT_INITIATOR))
906 return;
907
908 spin_lock_irqsave(&ha->hardware_lock, flags);
909 if (tgt->tgt_stop) {
910 spin_unlock_irqrestore(&ha->hardware_lock, flags);
911 return;
912 }
913 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
914 if (!sess) {
915 spin_unlock_irqrestore(&ha->hardware_lock, flags);
916 return;
917 }
918
919 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
920
921 sess->local = 1;
922 qlt_schedule_sess_for_deletion(sess, false);
923 spin_unlock_irqrestore(&ha->hardware_lock, flags);
924}
925
926static inline int test_tgt_sess_count(struct qla_tgt *tgt)
927{
928 struct qla_hw_data *ha = tgt->ha;
929 unsigned long flags;
930 int res;
931 /*
932 * We need to protect against race, when tgt is freed before or
933 * inside wake_up()
934 */
935 spin_lock_irqsave(&ha->hardware_lock, flags);
936 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
937 "tgt %p, empty(sess_list)=%d sess_count=%d\n",
938 tgt, list_empty(&tgt->sess_list), tgt->sess_count);
939 res = (tgt->sess_count == 0);
940 spin_unlock_irqrestore(&ha->hardware_lock, flags);
941
942 return res;
943}
944
945/* Called by tcm_qla2xxx configfs code */
946void qlt_stop_phase1(struct qla_tgt *tgt)
947{
948 struct scsi_qla_host *vha = tgt->vha;
949 struct qla_hw_data *ha = tgt->ha;
950 unsigned long flags;
951
952 if (tgt->tgt_stop || tgt->tgt_stopped) {
953 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
954 "Already in tgt->tgt_stop or tgt_stopped state\n");
955 dump_stack();
956 return;
957 }
958
959 ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
960 vha->host_no, vha);
961 /*
962 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
963 * Lock is needed, because we still can get an incoming packet.
964 */
965 mutex_lock(&ha->tgt.tgt_mutex);
966 spin_lock_irqsave(&ha->hardware_lock, flags);
967 tgt->tgt_stop = 1;
968 qlt_clear_tgt_db(tgt, true);
969 spin_unlock_irqrestore(&ha->hardware_lock, flags);
970 mutex_unlock(&ha->tgt.tgt_mutex);
971
972 flush_delayed_work_sync(&tgt->sess_del_work);
973
974 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
975 "Waiting for sess works (tgt %p)", tgt);
976 spin_lock_irqsave(&tgt->sess_work_lock, flags);
977 while (!list_empty(&tgt->sess_works_list)) {
978 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
979 flush_scheduled_work();
980 spin_lock_irqsave(&tgt->sess_work_lock, flags);
981 }
982 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
983
984 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
985 "Waiting for tgt %p: list_empty(sess_list)=%d "
986 "sess_count=%d\n", tgt, list_empty(&tgt->sess_list),
987 tgt->sess_count);
988
989 wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
990
991 /* Big hammer */
992 if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha))
993 qlt_disable_vha(vha);
994
995 /* Wait for sessions to clear out (just in case) */
996 wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
997}
998EXPORT_SYMBOL(qlt_stop_phase1);
999
1000/* Called by tcm_qla2xxx configfs code */
1001void qlt_stop_phase2(struct qla_tgt *tgt)
1002{
1003 struct qla_hw_data *ha = tgt->ha;
1004 unsigned long flags;
1005
1006 if (tgt->tgt_stopped) {
1007 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf04f,
1008 "Already in tgt->tgt_stopped state\n");
1009 dump_stack();
1010 return;
1011 }
1012
1013 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00b,
1014 "Waiting for %d IRQ commands to complete (tgt %p)",
1015 tgt->irq_cmd_count, tgt);
1016
1017 mutex_lock(&ha->tgt.tgt_mutex);
1018 spin_lock_irqsave(&ha->hardware_lock, flags);
1019 while (tgt->irq_cmd_count != 0) {
1020 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1021 udelay(2);
1022 spin_lock_irqsave(&ha->hardware_lock, flags);
1023 }
1024 tgt->tgt_stop = 0;
1025 tgt->tgt_stopped = 1;
1026 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1027 mutex_unlock(&ha->tgt.tgt_mutex);
1028
1029 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00c, "Stop of tgt %p finished",
1030 tgt);
1031}
1032EXPORT_SYMBOL(qlt_stop_phase2);
1033
1034/* Called from qlt_remove_target() -> qla2x00_remove_one() */
1035void qlt_release(struct qla_tgt *tgt)
1036{
1037 struct qla_hw_data *ha = tgt->ha;
1038
1039 if ((ha->tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
1040 qlt_stop_phase2(tgt);
1041
1042 ha->tgt.qla_tgt = NULL;
1043
1044 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00d,
1045 "Release of tgt %p finished\n", tgt);
1046
1047 kfree(tgt);
1048}
1049
1050/* ha->hardware_lock supposed to be held on entry */
1051static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
1052 const void *param, unsigned int param_size)
1053{
1054 struct qla_tgt_sess_work_param *prm;
1055 unsigned long flags;
1056
1057 prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
1058 if (!prm) {
1059 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
1060 "qla_target(%d): Unable to create session "
1061 "work, command will be refused", 0);
1062 return -ENOMEM;
1063 }
1064
1065 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
1066 "Scheduling work (type %d, prm %p)"
1067 " to find session for param %p (size %d, tgt %p)\n",
1068 type, prm, param, param_size, tgt);
1069
1070 prm->type = type;
1071 memcpy(&prm->tm_iocb, param, param_size);
1072
1073 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1074 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
1075 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1076
1077 schedule_work(&tgt->sess_work);
1078
1079 return 0;
1080}
1081
1082/*
1083 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1084 */
1085static void qlt_send_notify_ack(struct scsi_qla_host *vha,
1086 struct imm_ntfy_from_isp *ntfy,
1087 uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
1088 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
1089{
1090 struct qla_hw_data *ha = vha->hw;
1091 request_t *pkt;
1092 struct nack_to_isp *nack;
1093
1094 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
1095
1096 /* Send marker if required */
1097 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1098 return;
1099
1100 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
1101 if (!pkt) {
1102 ql_dbg(ql_dbg_tgt, vha, 0xe049,
1103 "qla_target(%d): %s failed: unable to allocate "
1104 "request packet\n", vha->vp_idx, __func__);
1105 return;
1106 }
1107
1108 if (ha->tgt.qla_tgt != NULL)
1109 ha->tgt.qla_tgt->notify_ack_expected++;
1110
1111 pkt->entry_type = NOTIFY_ACK_TYPE;
1112 pkt->entry_count = 1;
1113
1114 nack = (struct nack_to_isp *)pkt;
1115 nack->ox_id = ntfy->ox_id;
1116
1117 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
1118 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
1119 nack->u.isp24.flags = ntfy->u.isp24.flags &
1120 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
1121 }
1122 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
1123 nack->u.isp24.status = ntfy->u.isp24.status;
1124 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
1125 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
1126 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
1127 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
1128 nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
1129 nack->u.isp24.srr_reject_code = srr_reject_code;
1130 nack->u.isp24.srr_reject_code_expl = srr_explan;
1131 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
1132
1133 ql_dbg(ql_dbg_tgt, vha, 0xe005,
1134 "qla_target(%d): Sending 24xx Notify Ack %d\n",
1135 vha->vp_idx, nack->u.isp24.status);
1136
1137 qla2x00_start_iocbs(vha, vha->req);
1138}
1139
1140/*
1141 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1142 */
1143static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
1144 struct abts_recv_from_24xx *abts, uint32_t status,
1145 bool ids_reversed)
1146{
1147 struct qla_hw_data *ha = vha->hw;
1148 struct abts_resp_to_24xx *resp;
1149 uint32_t f_ctl;
1150 uint8_t *p;
1151
1152 ql_dbg(ql_dbg_tgt, vha, 0xe006,
1153 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
1154 ha, abts, status);
1155
1156 /* Send marker if required */
1157 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1158 return;
1159
1160 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
1161 if (!resp) {
1162 ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1163 "qla_target(%d): %s failed: unable to allocate "
1164 "request packet", vha->vp_idx, __func__);
1165 return;
1166 }
1167
1168 resp->entry_type = ABTS_RESP_24XX;
1169 resp->entry_count = 1;
1170 resp->nport_handle = abts->nport_handle;
1171 resp->vp_index = vha->vp_idx;
1172 resp->sof_type = abts->sof_type;
1173 resp->exchange_address = abts->exchange_address;
1174 resp->fcp_hdr_le = abts->fcp_hdr_le;
1175 f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1176 F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1177 F_CTL_SEQ_INITIATIVE);
1178 p = (uint8_t *)&f_ctl;
1179 resp->fcp_hdr_le.f_ctl[0] = *p++;
1180 resp->fcp_hdr_le.f_ctl[1] = *p++;
1181 resp->fcp_hdr_le.f_ctl[2] = *p;
1182 if (ids_reversed) {
1183 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
1184 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
1185 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
1186 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
1187 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
1188 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
1189 } else {
1190 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
1191 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
1192 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
1193 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
1194 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
1195 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
1196 }
1197 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1198 if (status == FCP_TMF_CMPL) {
1199 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1200 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1201 resp->payload.ba_acct.low_seq_cnt = 0x0000;
1202 resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
1203 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1204 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1205 } else {
1206 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1207 resp->payload.ba_rjt.reason_code =
1208 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1209 /* Other bytes are zero */
1210 }
1211
1212 ha->tgt.qla_tgt->abts_resp_expected++;
1213
1214 qla2x00_start_iocbs(vha, vha->req);
1215}
1216
1217/*
1218 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1219 */
1220static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1221 struct abts_resp_from_24xx_fw *entry)
1222{
1223 struct ctio7_to_24xx *ctio;
1224
1225 ql_dbg(ql_dbg_tgt, vha, 0xe007,
1226 "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
1227 /* Send marker if required */
1228 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1229 return;
1230
1231 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
1232 if (ctio == NULL) {
1233 ql_dbg(ql_dbg_tgt, vha, 0xe04b,
1234 "qla_target(%d): %s failed: unable to allocate "
1235 "request packet\n", vha->vp_idx, __func__);
1236 return;
1237 }
1238
1239 /*
1240 * We've got on entrance firmware's response on by us generated
1241 * ABTS response. So, in it ID fields are reversed.
1242 */
1243
1244 ctio->entry_type = CTIO_TYPE7;
1245 ctio->entry_count = 1;
1246 ctio->nport_handle = entry->nport_handle;
1247 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1248 ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
1249 ctio->vp_index = vha->vp_idx;
1250 ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
1251 ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
1252 ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
1253 ctio->exchange_addr = entry->exchange_addr_to_abort;
1254 ctio->u.status1.flags =
1255 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
1256 CTIO7_FLAGS_TERMINATE);
1257 ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id;
1258
1259 qla2x00_start_iocbs(vha, vha->req);
1260
1261 qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry,
1262 FCP_TMF_CMPL, true);
1263}
1264
1265/* ha->hardware_lock supposed to be held on entry */
1266static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1267 struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
1268{
1269 struct qla_hw_data *ha = vha->hw;
1270 struct qla_tgt_mgmt_cmd *mcmd;
1271 int rc;
1272
1273 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
1274 "qla_target(%d): task abort (tag=%d)\n",
1275 vha->vp_idx, abts->exchange_addr_to_abort);
1276
1277 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
1278 if (mcmd == NULL) {
1279 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
1280 "qla_target(%d): %s: Allocation of ABORT cmd failed",
1281 vha->vp_idx, __func__);
1282 return -ENOMEM;
1283 }
1284 memset(mcmd, 0, sizeof(*mcmd));
1285
1286 mcmd->sess = sess;
1287 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
1288
1289 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, TMR_ABORT_TASK,
1290 abts->exchange_addr_to_abort);
1291 if (rc != 0) {
1292 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
1293 "qla_target(%d): tgt_ops->handle_tmr()"
1294 " failed: %d", vha->vp_idx, rc);
1295 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
1296 return -EFAULT;
1297 }
1298
1299 return 0;
1300}
1301
1302/*
1303 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1304 */
1305static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1306 struct abts_recv_from_24xx *abts)
1307{
1308 struct qla_hw_data *ha = vha->hw;
1309 struct qla_tgt_sess *sess;
1310 uint32_t tag = abts->exchange_addr_to_abort;
1311 uint8_t s_id[3];
1312 int rc;
1313
1314 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
1315 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
1316 "qla_target(%d): ABTS: Abort Sequence not "
1317 "supported\n", vha->vp_idx);
1318 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1319 return;
1320 }
1321
1322 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
1323 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
1324 "qla_target(%d): ABTS: Unknown Exchange "
1325 "Address received\n", vha->vp_idx);
1326 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1327 return;
1328 }
1329
1330 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
1331 "qla_target(%d): task abort (s_id=%x:%x:%x, "
1332 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2],
1333 abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag,
1334 le32_to_cpu(abts->fcp_hdr_le.parameter));
1335
1336 s_id[0] = abts->fcp_hdr_le.s_id[2];
1337 s_id[1] = abts->fcp_hdr_le.s_id[1];
1338 s_id[2] = abts->fcp_hdr_le.s_id[0];
1339
1340 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
1341 if (!sess) {
1342 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
1343 "qla_target(%d): task abort for non-existant session\n",
1344 vha->vp_idx);
1345 rc = qlt_sched_sess_work(ha->tgt.qla_tgt,
1346 QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
1347 if (rc != 0) {
1348 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
1349 false);
1350 }
1351 return;
1352 }
1353
1354 rc = __qlt_24xx_handle_abts(vha, abts, sess);
1355 if (rc != 0) {
1356 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
1357 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
1358 vha->vp_idx, rc);
1359 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1360 return;
1361 }
1362}
1363
1364/*
1365 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1366 */
1367static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
1368 struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
1369{
1370 struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
1371 struct ctio7_to_24xx *ctio;
1372
1373 ql_dbg(ql_dbg_tgt, ha, 0xe008,
1374 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
1375 ha, atio, resp_code);
1376
1377 /* Send marker if required */
1378 if (qlt_issue_marker(ha, 1) != QLA_SUCCESS)
1379 return;
1380
1381 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL);
1382 if (ctio == NULL) {
1383 ql_dbg(ql_dbg_tgt, ha, 0xe04c,
1384 "qla_target(%d): %s failed: unable to allocate "
1385 "request packet\n", ha->vp_idx, __func__);
1386 return;
1387 }
1388
1389 ctio->entry_type = CTIO_TYPE7;
1390 ctio->entry_count = 1;
1391 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1392 ctio->nport_handle = mcmd->sess->loop_id;
1393 ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
1394 ctio->vp_index = ha->vp_idx;
1395 ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
1396 ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
1397 ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
1398 ctio->exchange_addr = atio->u.isp24.exchange_addr;
1399 ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
1400 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
1401 CTIO7_FLAGS_SEND_STATUS);
1402 ctio->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
1403 ctio->u.status1.scsi_status =
1404 __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
1405 ctio->u.status1.response_len = __constant_cpu_to_le16(8);
1406 ((uint32_t *)ctio->u.status1.sense_data)[0] = cpu_to_be32(resp_code);
1407
1408 qla2x00_start_iocbs(ha, ha->req);
1409}
1410
1411void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
1412{
1413 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
1414}
1415EXPORT_SYMBOL(qlt_free_mcmd);
1416
1417/* callback from target fabric module code */
1418void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
1419{
1420 struct scsi_qla_host *vha = mcmd->sess->vha;
1421 struct qla_hw_data *ha = vha->hw;
1422 unsigned long flags;
1423
1424 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
1425 "TM response mcmd (%p) status %#x state %#x",
1426 mcmd, mcmd->fc_tm_rsp, mcmd->flags);
1427
1428 spin_lock_irqsave(&ha->hardware_lock, flags);
1429 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK)
1430 qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
1431 0, 0, 0, 0, 0, 0);
1432 else {
1433 if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK)
1434 qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
1435 mcmd->fc_tm_rsp, false);
1436 else
1437 qlt_24xx_send_task_mgmt_ctio(vha, mcmd,
1438 mcmd->fc_tm_rsp);
1439 }
1440 /*
1441 * Make the callback for ->free_mcmd() to queue_work() and invoke
1442 * target_put_sess_cmd() to drop cmd_kref to 1. The final
1443 * target_put_sess_cmd() call will be made from TFO->check_stop_free()
1444 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
1445 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
1446 * qlt_xmit_tm_rsp() returns here..
1447 */
1448 ha->tgt.tgt_ops->free_mcmd(mcmd);
1449 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1450}
1451EXPORT_SYMBOL(qlt_xmit_tm_rsp);
1452
1453/* No locks */
1454static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
1455{
1456 struct qla_tgt_cmd *cmd = prm->cmd;
1457
1458 BUG_ON(cmd->sg_cnt == 0);
1459
1460 prm->sg = (struct scatterlist *)cmd->sg;
1461 prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg,
1462 cmd->sg_cnt, cmd->dma_data_direction);
1463 if (unlikely(prm->seg_cnt == 0))
1464 goto out_err;
1465
1466 prm->cmd->sg_mapped = 1;
1467
1468 /*
1469 * If greater than four sg entries then we need to allocate
1470 * the continuation entries
1471 */
1472 if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
1473 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
1474 prm->tgt->datasegs_per_cmd, prm->tgt->datasegs_per_cont);
1475
1476 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n",
1477 prm->seg_cnt, prm->req_cnt);
1478 return 0;
1479
1480out_err:
1481 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d,
1482 "qla_target(%d): PCI mapping failed: sg_cnt=%d",
1483 0, prm->cmd->sg_cnt);
1484 return -1;
1485}
1486
1487static inline void qlt_unmap_sg(struct scsi_qla_host *vha,
1488 struct qla_tgt_cmd *cmd)
1489{
1490 struct qla_hw_data *ha = vha->hw;
1491
1492 BUG_ON(!cmd->sg_mapped);
1493 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
1494 cmd->sg_mapped = 0;
1495}
1496
1497static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
1498 uint32_t req_cnt)
1499{
1500 struct qla_hw_data *ha = vha->hw;
1501 device_reg_t __iomem *reg = ha->iobase;
1502 uint32_t cnt;
1503
1504 if (vha->req->cnt < (req_cnt + 2)) {
1505 cnt = (uint16_t)RD_REG_DWORD(&reg->isp24.req_q_out);
1506
1507 ql_dbg(ql_dbg_tgt, vha, 0xe00a,
1508 "Request ring circled: cnt=%d, vha->->ring_index=%d, "
1509 "vha->req->cnt=%d, req_cnt=%d\n", cnt,
1510 vha->req->ring_index, vha->req->cnt, req_cnt);
1511 if (vha->req->ring_index < cnt)
1512 vha->req->cnt = cnt - vha->req->ring_index;
1513 else
1514 vha->req->cnt = vha->req->length -
1515 (vha->req->ring_index - cnt);
1516 }
1517
1518 if (unlikely(vha->req->cnt < (req_cnt + 2))) {
1519 ql_dbg(ql_dbg_tgt, vha, 0xe00b,
1520 "qla_target(%d): There is no room in the "
1521 "request ring: vha->req->ring_index=%d, vha->req->cnt=%d, "
1522 "req_cnt=%d\n", vha->vp_idx, vha->req->ring_index,
1523 vha->req->cnt, req_cnt);
1524 return -EAGAIN;
1525 }
1526 vha->req->cnt -= req_cnt;
1527
1528 return 0;
1529}
1530
1531/*
1532 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1533 */
1534static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha)
1535{
1536 /* Adjust ring index. */
1537 vha->req->ring_index++;
1538 if (vha->req->ring_index == vha->req->length) {
1539 vha->req->ring_index = 0;
1540 vha->req->ring_ptr = vha->req->ring;
1541 } else {
1542 vha->req->ring_ptr++;
1543 }
1544 return (cont_entry_t *)vha->req->ring_ptr;
1545}
1546
1547/* ha->hardware_lock supposed to be held on entry */
1548static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
1549{
1550 struct qla_hw_data *ha = vha->hw;
1551 uint32_t h;
1552
1553 h = ha->tgt.current_handle;
1554 /* always increment cmd handle */
1555 do {
1556 ++h;
1557 if (h > MAX_OUTSTANDING_COMMANDS)
1558 h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
1559 if (h == ha->tgt.current_handle) {
1560 ql_dbg(ql_dbg_tgt, vha, 0xe04e,
1561 "qla_target(%d): Ran out of "
1562 "empty cmd slots in ha %p\n", vha->vp_idx, ha);
1563 h = QLA_TGT_NULL_HANDLE;
1564 break;
1565 }
1566 } while ((h == QLA_TGT_NULL_HANDLE) ||
1567 (h == QLA_TGT_SKIP_HANDLE) ||
1568 (ha->tgt.cmds[h-1] != NULL));
1569
1570 if (h != QLA_TGT_NULL_HANDLE)
1571 ha->tgt.current_handle = h;
1572
1573 return h;
1574}
1575
1576/* ha->hardware_lock supposed to be held on entry */
1577static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
1578 struct scsi_qla_host *vha)
1579{
1580 uint32_t h;
1581 struct ctio7_to_24xx *pkt;
1582 struct qla_hw_data *ha = vha->hw;
1583 struct atio_from_isp *atio = &prm->cmd->atio;
1584
1585 pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr;
1586 prm->pkt = pkt;
1587 memset(pkt, 0, sizeof(*pkt));
1588
1589 pkt->entry_type = CTIO_TYPE7;
1590 pkt->entry_count = (uint8_t)prm->req_cnt;
1591 pkt->vp_index = vha->vp_idx;
1592
1593 h = qlt_make_handle(vha);
1594 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
1595 /*
1596 * CTIO type 7 from the firmware doesn't provide a way to
1597 * know the initiator's LOOP ID, hence we can't find
1598 * the session and, so, the command.
1599 */
1600 return -EAGAIN;
1601 } else
1602 ha->tgt.cmds[h-1] = prm->cmd;
1603
1604 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
1605 pkt->nport_handle = prm->cmd->loop_id;
1606 pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
1607 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
1608 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
1609 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
1610 pkt->exchange_addr = atio->u.isp24.exchange_addr;
1611 pkt->u.status0.flags |= (atio->u.isp24.attr << 9);
1612 pkt->u.status0.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
1613 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
1614
1615 ql_dbg(ql_dbg_tgt, vha, 0xe00c,
1616 "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n",
1617 vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT,
1618 le16_to_cpu(pkt->u.status0.ox_id));
1619 return 0;
1620}
1621
1622/*
1623 * ha->hardware_lock supposed to be held on entry. We have already made sure
1624 * that there is sufficient amount of request entries to not drop it.
1625 */
1626static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
1627 struct scsi_qla_host *vha)
1628{
1629 int cnt;
1630 uint32_t *dword_ptr;
1631 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
1632
1633 /* Build continuation packets */
1634 while (prm->seg_cnt > 0) {
1635 cont_a64_entry_t *cont_pkt64 =
1636 (cont_a64_entry_t *)qlt_get_req_pkt(vha);
1637
1638 /*
1639 * Make sure that from cont_pkt64 none of
1640 * 64-bit specific fields used for 32-bit
1641 * addressing. Cast to (cont_entry_t *) for
1642 * that.
1643 */
1644
1645 memset(cont_pkt64, 0, sizeof(*cont_pkt64));
1646
1647 cont_pkt64->entry_count = 1;
1648 cont_pkt64->sys_define = 0;
1649
1650 if (enable_64bit_addressing) {
1651 cont_pkt64->entry_type = CONTINUE_A64_TYPE;
1652 dword_ptr =
1653 (uint32_t *)&cont_pkt64->dseg_0_address;
1654 } else {
1655 cont_pkt64->entry_type = CONTINUE_TYPE;
1656 dword_ptr =
1657 (uint32_t *)&((cont_entry_t *)
1658 cont_pkt64)->dseg_0_address;
1659 }
1660
1661 /* Load continuation entry data segments */
1662 for (cnt = 0;
1663 cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt;
1664 cnt++, prm->seg_cnt--) {
1665 *dword_ptr++ =
1666 cpu_to_le32(pci_dma_lo32
1667 (sg_dma_address(prm->sg)));
1668 if (enable_64bit_addressing) {
1669 *dword_ptr++ =
1670 cpu_to_le32(pci_dma_hi32
1671 (sg_dma_address
1672 (prm->sg)));
1673 }
1674 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
1675
1676 ql_dbg(ql_dbg_tgt, vha, 0xe00d,
1677 "S/G Segment Cont. phys_addr=%llx:%llx, len=%d\n",
1678 (long long unsigned int)
1679 pci_dma_hi32(sg_dma_address(prm->sg)),
1680 (long long unsigned int)
1681 pci_dma_lo32(sg_dma_address(prm->sg)),
1682 (int)sg_dma_len(prm->sg));
1683
1684 prm->sg = sg_next(prm->sg);
1685 }
1686 }
1687}
1688
1689/*
1690 * ha->hardware_lock supposed to be held on entry. We have already made sure
1691 * that there is sufficient amount of request entries to not drop it.
1692 */
1693static void qlt_load_data_segments(struct qla_tgt_prm *prm,
1694 struct scsi_qla_host *vha)
1695{
1696 int cnt;
1697 uint32_t *dword_ptr;
1698 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
1699 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
1700
1701 ql_dbg(ql_dbg_tgt, vha, 0xe00e,
1702 "iocb->scsi_status=%x, iocb->flags=%x\n",
1703 le16_to_cpu(pkt24->u.status0.scsi_status),
1704 le16_to_cpu(pkt24->u.status0.flags));
1705
1706 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
1707
1708 /* Setup packet address segment pointer */
1709 dword_ptr = pkt24->u.status0.dseg_0_address;
1710
1711 /* Set total data segment count */
1712 if (prm->seg_cnt)
1713 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
1714
1715 if (prm->seg_cnt == 0) {
1716 /* No data transfer */
1717 *dword_ptr++ = 0;
1718 *dword_ptr = 0;
1719 return;
1720 }
1721
1722 /* If scatter gather */
1723 ql_dbg(ql_dbg_tgt, vha, 0xe00f, "%s", "Building S/G data segments...");
1724
1725 /* Load command entry data segments */
1726 for (cnt = 0;
1727 (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
1728 cnt++, prm->seg_cnt--) {
1729 *dword_ptr++ =
1730 cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
1731 if (enable_64bit_addressing) {
1732 *dword_ptr++ =
1733 cpu_to_le32(pci_dma_hi32(
1734 sg_dma_address(prm->sg)));
1735 }
1736 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
1737
1738 ql_dbg(ql_dbg_tgt, vha, 0xe010,
1739 "S/G Segment phys_addr=%llx:%llx, len=%d\n",
1740 (long long unsigned int)pci_dma_hi32(sg_dma_address(
1741 prm->sg)),
1742 (long long unsigned int)pci_dma_lo32(sg_dma_address(
1743 prm->sg)),
1744 (int)sg_dma_len(prm->sg));
1745
1746 prm->sg = sg_next(prm->sg);
1747 }
1748
1749 qlt_load_cont_data_segments(prm, vha);
1750}
1751
1752static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
1753{
1754 return cmd->bufflen > 0;
1755}
1756
1757/*
1758 * Called without ha->hardware_lock held
1759 */
1760static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
1761 struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
1762 uint32_t *full_req_cnt)
1763{
1764 struct qla_tgt *tgt = cmd->tgt;
1765 struct scsi_qla_host *vha = tgt->vha;
1766 struct qla_hw_data *ha = vha->hw;
1767 struct se_cmd *se_cmd = &cmd->se_cmd;
1768
1769 if (unlikely(cmd->aborted)) {
1770 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
1771 "qla_target(%d): terminating exchange "
1772 "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha->vp_idx, cmd,
1773 se_cmd, cmd->tag);
1774
1775 cmd->state = QLA_TGT_STATE_ABORTED;
1776
1777 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
1778
1779 /* !! At this point cmd could be already freed !! */
1780 return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
1781 }
1782
1783 ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u\n",
1784 vha->vp_idx, cmd->tag);
1785
1786 prm->cmd = cmd;
1787 prm->tgt = tgt;
1788 prm->rq_result = scsi_status;
1789 prm->sense_buffer = &cmd->sense_buffer[0];
1790 prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
1791 prm->sg = NULL;
1792 prm->seg_cnt = -1;
1793 prm->req_cnt = 1;
1794 prm->add_status_pkt = 0;
1795
1796 ql_dbg(ql_dbg_tgt, vha, 0xe012, "rq_result=%x, xmit_type=%x\n",
1797 prm->rq_result, xmit_type);
1798
1799 /* Send marker if required */
1800 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
1801 return -EFAULT;
1802
1803 ql_dbg(ql_dbg_tgt, vha, 0xe013, "CTIO start: vha(%d)\n", vha->vp_idx);
1804
1805 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
1806 if (qlt_pci_map_calc_cnt(prm) != 0)
1807 return -EAGAIN;
1808 }
1809
1810 *full_req_cnt = prm->req_cnt;
1811
1812 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
1813 prm->residual = se_cmd->residual_count;
1814 ql_dbg(ql_dbg_tgt, vha, 0xe014,
1815 "Residual underflow: %d (tag %d, "
1816 "op %x, bufflen %d, rq_result %x)\n", prm->residual,
1817 cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
1818 cmd->bufflen, prm->rq_result);
1819 prm->rq_result |= SS_RESIDUAL_UNDER;
1820 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1821 prm->residual = se_cmd->residual_count;
1822 ql_dbg(ql_dbg_tgt, vha, 0xe015,
1823 "Residual overflow: %d (tag %d, "
1824 "op %x, bufflen %d, rq_result %x)\n", prm->residual,
1825 cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
1826 cmd->bufflen, prm->rq_result);
1827 prm->rq_result |= SS_RESIDUAL_OVER;
1828 }
1829
1830 if (xmit_type & QLA_TGT_XMIT_STATUS) {
1831 /*
1832 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
1833 * ignored in *xmit_response() below
1834 */
1835 if (qlt_has_data(cmd)) {
1836 if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
1837 (IS_FWI2_CAPABLE(ha) &&
1838 (prm->rq_result != 0))) {
1839 prm->add_status_pkt = 1;
1840 (*full_req_cnt)++;
1841 }
1842 }
1843 }
1844
1845 ql_dbg(ql_dbg_tgt, vha, 0xe016,
1846 "req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d\n",
1847 prm->req_cnt, *full_req_cnt, prm->add_status_pkt);
1848
1849 return 0;
1850}
1851
1852static inline int qlt_need_explicit_conf(struct qla_hw_data *ha,
1853 struct qla_tgt_cmd *cmd, int sending_sense)
1854{
1855 if (ha->tgt.enable_class_2)
1856 return 0;
1857
1858 if (sending_sense)
1859 return cmd->conf_compl_supported;
1860 else
1861 return ha->tgt.enable_explicit_conf &&
1862 cmd->conf_compl_supported;
1863}
1864
1865#ifdef CONFIG_QLA_TGT_DEBUG_SRR
1866/*
1867 * Original taken from the XFS code
1868 */
1869static unsigned long qlt_srr_random(void)
1870{
1871 static int Inited;
1872 static unsigned long RandomValue;
1873 static DEFINE_SPINLOCK(lock);
1874 /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
1875 register long rv;
1876 register long lo;
1877 register long hi;
1878 unsigned long flags;
1879
1880 spin_lock_irqsave(&lock, flags);
1881 if (!Inited) {
1882 RandomValue = jiffies;
1883 Inited = 1;
1884 }
1885 rv = RandomValue;
1886 hi = rv / 127773;
1887 lo = rv % 127773;
1888 rv = 16807 * lo - 2836 * hi;
1889 if (rv <= 0)
1890 rv += 2147483647;
1891 RandomValue = rv;
1892 spin_unlock_irqrestore(&lock, flags);
1893 return rv;
1894}
1895
1896static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
1897{
1898#if 0 /* This is not a real status packets lost, so it won't lead to SRR */
1899 if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200)
1900 == 50) {
1901 *xmit_type &= ~QLA_TGT_XMIT_STATUS;
1902 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015,
1903 "Dropping cmd %p (tag %d) status", cmd, cmd->tag);
1904 }
1905#endif
1906 /*
1907 * It's currently not possible to simulate SRRs for FCP_WRITE without
1908 * a physical link layer failure, so don't even try here..
1909 */
1910 if (cmd->dma_data_direction != DMA_FROM_DEVICE)
1911 return;
1912
1913 if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) &&
1914 ((qlt_srr_random() % 100) == 20)) {
1915 int i, leave = 0;
1916 unsigned int tot_len = 0;
1917
1918 while (leave == 0)
1919 leave = qlt_srr_random() % cmd->sg_cnt;
1920
1921 for (i = 0; i < leave; i++)
1922 tot_len += cmd->sg[i].length;
1923
1924 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016,
1925 "Cutting cmd %p (tag %d) buffer"
1926 " tail to len %d, sg_cnt %d (cmd->bufflen %d,"
1927 " cmd->sg_cnt %d)", cmd, cmd->tag, tot_len, leave,
1928 cmd->bufflen, cmd->sg_cnt);
1929
1930 cmd->bufflen = tot_len;
1931 cmd->sg_cnt = leave;
1932 }
1933
1934 if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) {
1935 unsigned int offset = qlt_srr_random() % cmd->bufflen;
1936
1937 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017,
1938 "Cutting cmd %p (tag %d) buffer head "
1939 "to offset %d (cmd->bufflen %d)", cmd, cmd->tag, offset,
1940 cmd->bufflen);
1941 if (offset == 0)
1942 *xmit_type &= ~QLA_TGT_XMIT_DATA;
1943 else if (qlt_set_data_offset(cmd, offset)) {
1944 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018,
1945 "qlt_set_data_offset() failed (tag %d)", cmd->tag);
1946 }
1947 }
1948}
1949#else
1950static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
1951{}
1952#endif
1953
1954static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
1955 struct qla_tgt_prm *prm)
1956{
1957 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
1958 (uint32_t)sizeof(ctio->u.status1.sense_data));
1959 ctio->u.status0.flags |=
1960 __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
1961 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
1962 ctio->u.status0.flags |= __constant_cpu_to_le16(
1963 CTIO7_FLAGS_EXPLICIT_CONFORM |
1964 CTIO7_FLAGS_CONFORM_REQ);
1965 }
1966 ctio->u.status0.residual = cpu_to_le32(prm->residual);
1967 ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
1968 if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
1969 int i;
1970
1971 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
1972 if (prm->cmd->se_cmd.scsi_status != 0) {
1973 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017,
1974 "Skipping EXPLICIT_CONFORM and "
1975 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
1976 "non GOOD status\n");
1977 goto skip_explict_conf;
1978 }
1979 ctio->u.status1.flags |= __constant_cpu_to_le16(
1980 CTIO7_FLAGS_EXPLICIT_CONFORM |
1981 CTIO7_FLAGS_CONFORM_REQ);
1982 }
1983skip_explict_conf:
1984 ctio->u.status1.flags &=
1985 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
1986 ctio->u.status1.flags |=
1987 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
1988 ctio->u.status1.scsi_status |=
1989 __constant_cpu_to_le16(SS_SENSE_LEN_VALID);
1990 ctio->u.status1.sense_length =
1991 cpu_to_le16(prm->sense_buffer_len);
1992 for (i = 0; i < prm->sense_buffer_len/4; i++)
1993 ((uint32_t *)ctio->u.status1.sense_data)[i] =
1994 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
1995#if 0
1996 if (unlikely((prm->sense_buffer_len % 4) != 0)) {
1997 static int q;
1998 if (q < 10) {
1999 ql_dbg(ql_dbg_tgt, vha, 0xe04f,
2000 "qla_target(%d): %d bytes of sense "
2001 "lost", prm->tgt->ha->vp_idx,
2002 prm->sense_buffer_len % 4);
2003 q++;
2004 }
2005 }
2006#endif
2007 } else {
2008 ctio->u.status1.flags &=
2009 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2010 ctio->u.status1.flags |=
2011 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2012 ctio->u.status1.sense_length = 0;
2013 memset(ctio->u.status1.sense_data, 0,
2014 sizeof(ctio->u.status1.sense_data));
2015 }
2016
2017 /* Sense with len > 24, is it possible ??? */
2018}
2019
2020/*
2021 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
2022 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
2023 */
2024int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2025 uint8_t scsi_status)
2026{
2027 struct scsi_qla_host *vha = cmd->vha;
2028 struct qla_hw_data *ha = vha->hw;
2029 struct ctio7_to_24xx *pkt;
2030 struct qla_tgt_prm prm;
2031 uint32_t full_req_cnt = 0;
2032 unsigned long flags = 0;
2033 int res;
2034
2035 memset(&prm, 0, sizeof(prm));
2036 qlt_check_srr_debug(cmd, &xmit_type);
2037
2038 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
2039 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, "
2040 "cmd->dma_data_direction=%d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ?
2041 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction);
2042
2043 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
2044 &full_req_cnt);
2045 if (unlikely(res != 0)) {
2046 if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED)
2047 return 0;
2048
2049 return res;
2050 }
2051
2052 spin_lock_irqsave(&ha->hardware_lock, flags);
2053
2054 /* Does F/W have an IOCBs for this request */
2055 res = qlt_check_reserve_free_req(vha, full_req_cnt);
2056 if (unlikely(res))
2057 goto out_unmap_unlock;
2058
2059 res = qlt_24xx_build_ctio_pkt(&prm, vha);
2060 if (unlikely(res != 0))
2061 goto out_unmap_unlock;
2062
2063
2064 pkt = (struct ctio7_to_24xx *)prm.pkt;
2065
2066 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
2067 pkt->u.status0.flags |=
2068 __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN |
2069 CTIO7_FLAGS_STATUS_MODE_0);
2070
2071 qlt_load_data_segments(&prm, vha);
2072
2073 if (prm.add_status_pkt == 0) {
2074 if (xmit_type & QLA_TGT_XMIT_STATUS) {
2075 pkt->u.status0.scsi_status =
2076 cpu_to_le16(prm.rq_result);
2077 pkt->u.status0.residual =
2078 cpu_to_le32(prm.residual);
2079 pkt->u.status0.flags |= __constant_cpu_to_le16(
2080 CTIO7_FLAGS_SEND_STATUS);
2081 if (qlt_need_explicit_conf(ha, cmd, 0)) {
2082 pkt->u.status0.flags |=
2083 __constant_cpu_to_le16(
2084 CTIO7_FLAGS_EXPLICIT_CONFORM |
2085 CTIO7_FLAGS_CONFORM_REQ);
2086 }
2087 }
2088
2089 } else {
2090 /*
2091 * We have already made sure that there is sufficient
2092 * amount of request entries to not drop HW lock in
2093 * req_pkt().
2094 */
2095 struct ctio7_to_24xx *ctio =
2096 (struct ctio7_to_24xx *)qlt_get_req_pkt(vha);
2097
2098 ql_dbg(ql_dbg_tgt, vha, 0xe019,
2099 "Building additional status packet\n");
2100
2101 memcpy(ctio, pkt, sizeof(*ctio));
2102 ctio->entry_count = 1;
2103 ctio->dseg_count = 0;
2104 ctio->u.status1.flags &= ~__constant_cpu_to_le16(
2105 CTIO7_FLAGS_DATA_IN);
2106
2107 /* Real finish is ctio_m1's finish */
2108 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
2109 pkt->u.status0.flags |= __constant_cpu_to_le16(
2110 CTIO7_FLAGS_DONT_RET_CTIO);
2111 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
2112 &prm);
2113 pr_debug("Status CTIO7: %p\n", ctio);
2114 }
2115 } else
2116 qlt_24xx_init_ctio_to_isp(pkt, &prm);
2117
2118
2119 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
2120
2121 ql_dbg(ql_dbg_tgt, vha, 0xe01a,
2122 "Xmitting CTIO7 response pkt for 24xx: %p scsi_status: 0x%02x\n",
2123 pkt, scsi_status);
2124
2125 qla2x00_start_iocbs(vha, vha->req);
2126 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2127
2128 return 0;
2129
2130out_unmap_unlock:
2131 if (cmd->sg_mapped)
2132 qlt_unmap_sg(vha, cmd);
2133 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2134
2135 return res;
2136}
2137EXPORT_SYMBOL(qlt_xmit_response);
2138
2139int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
2140{
2141 struct ctio7_to_24xx *pkt;
2142 struct scsi_qla_host *vha = cmd->vha;
2143 struct qla_hw_data *ha = vha->hw;
2144 struct qla_tgt *tgt = cmd->tgt;
2145 struct qla_tgt_prm prm;
2146 unsigned long flags;
2147 int res = 0;
2148
2149 memset(&prm, 0, sizeof(prm));
2150 prm.cmd = cmd;
2151 prm.tgt = tgt;
2152 prm.sg = NULL;
2153 prm.req_cnt = 1;
2154
2155 /* Send marker if required */
2156 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
2157 return -EIO;
2158
2159 ql_dbg(ql_dbg_tgt, vha, 0xe01b, "CTIO_start: vha(%d)",
2160 (int)vha->vp_idx);
2161
2162 /* Calculate number of entries and segments required */
2163 if (qlt_pci_map_calc_cnt(&prm) != 0)
2164 return -EAGAIN;
2165
2166 spin_lock_irqsave(&ha->hardware_lock, flags);
2167
2168 /* Does F/W have an IOCBs for this request */
2169 res = qlt_check_reserve_free_req(vha, prm.req_cnt);
2170 if (res != 0)
2171 goto out_unlock_free_unmap;
2172
2173 res = qlt_24xx_build_ctio_pkt(&prm, vha);
2174 if (unlikely(res != 0))
2175 goto out_unlock_free_unmap;
2176 pkt = (struct ctio7_to_24xx *)prm.pkt;
2177 pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
2178 CTIO7_FLAGS_STATUS_MODE_0);
2179 qlt_load_data_segments(&prm, vha);
2180
2181 cmd->state = QLA_TGT_STATE_NEED_DATA;
2182
2183 qla2x00_start_iocbs(vha, vha->req);
2184 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2185
2186 return res;
2187
2188out_unlock_free_unmap:
2189 if (cmd->sg_mapped)
2190 qlt_unmap_sg(vha, cmd);
2191 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2192
2193 return res;
2194}
2195EXPORT_SYMBOL(qlt_rdy_to_xfer);
2196
2197/* If hardware_lock held on entry, might drop it, then reaquire */
2198/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
2199static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
2200 struct qla_tgt_cmd *cmd,
2201 struct atio_from_isp *atio)
2202{
2203 struct ctio7_to_24xx *ctio24;
2204 struct qla_hw_data *ha = vha->hw;
2205 request_t *pkt;
2206 int ret = 0;
2207
2208 ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
2209
2210 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
2211 if (pkt == NULL) {
2212 ql_dbg(ql_dbg_tgt, vha, 0xe050,
2213 "qla_target(%d): %s failed: unable to allocate "
2214 "request packet\n", vha->vp_idx, __func__);
2215 return -ENOMEM;
2216 }
2217
2218 if (cmd != NULL) {
2219 if (cmd->state < QLA_TGT_STATE_PROCESSED) {
2220 ql_dbg(ql_dbg_tgt, vha, 0xe051,
2221 "qla_target(%d): Terminating cmd %p with "
2222 "incorrect state %d\n", vha->vp_idx, cmd,
2223 cmd->state);
2224 } else
2225 ret = 1;
2226 }
2227
2228 pkt->entry_count = 1;
2229 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2230
2231 ctio24 = (struct ctio7_to_24xx *)pkt;
2232 ctio24->entry_type = CTIO_TYPE7;
2233 ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED;
2234 ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
2235 ctio24->vp_index = vha->vp_idx;
2236 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
2237 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2238 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2239 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
2240 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
2241 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
2242 CTIO7_FLAGS_TERMINATE);
2243 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
2244
2245 /* Most likely, it isn't needed */
2246 ctio24->u.status1.residual = get_unaligned((uint32_t *)
2247 &atio->u.isp24.fcp_cmnd.add_cdb[
2248 atio->u.isp24.fcp_cmnd.add_cdb_len]);
2249 if (ctio24->u.status1.residual != 0)
2250 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
2251
2252 qla2x00_start_iocbs(vha, vha->req);
2253 return ret;
2254}
2255
2256static void qlt_send_term_exchange(struct scsi_qla_host *vha,
2257 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
2258{
2259 unsigned long flags;
2260 int rc;
2261
2262 if (qlt_issue_marker(vha, ha_locked) < 0)
2263 return;
2264
2265 if (ha_locked) {
2266 rc = __qlt_send_term_exchange(vha, cmd, atio);
2267 goto done;
2268 }
2269 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
2270 rc = __qlt_send_term_exchange(vha, cmd, atio);
2271 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
2272done:
2273 if (rc == 1) {
2274 if (!ha_locked && !in_interrupt())
2275 msleep(250); /* just in case */
2276
2277 vha->hw->tgt.tgt_ops->free_cmd(cmd);
2278 }
2279}
2280
2281void qlt_free_cmd(struct qla_tgt_cmd *cmd)
2282{
2283 BUG_ON(cmd->sg_mapped);
2284
2285 if (unlikely(cmd->free_sg))
2286 kfree(cmd->sg);
2287 kmem_cache_free(qla_tgt_cmd_cachep, cmd);
2288}
2289EXPORT_SYMBOL(qlt_free_cmd);
2290
2291/* ha->hardware_lock supposed to be held on entry */
2292static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha,
2293 struct qla_tgt_cmd *cmd, void *ctio)
2294{
2295 struct qla_tgt_srr_ctio *sc;
2296 struct qla_hw_data *ha = vha->hw;
2297 struct qla_tgt *tgt = ha->tgt.qla_tgt;
2298 struct qla_tgt_srr_imm *imm;
2299
2300 tgt->ctio_srr_id++;
2301
2302 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
2303 "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx);
2304
2305 if (!ctio) {
2306 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055,
2307 "qla_target(%d): SRR CTIO, but ctio is NULL\n",
2308 vha->vp_idx);
2309 return -EINVAL;
2310 }
2311
2312 sc = kzalloc(sizeof(*sc), GFP_ATOMIC);
2313 if (sc != NULL) {
2314 sc->cmd = cmd;
2315 /* IRQ is already OFF */
2316 spin_lock(&tgt->srr_lock);
2317 sc->srr_id = tgt->ctio_srr_id;
2318 list_add_tail(&sc->srr_list_entry,
2319 &tgt->srr_ctio_list);
2320 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
2321 "CTIO SRR %p added (id %d)\n", sc, sc->srr_id);
2322 if (tgt->imm_srr_id == tgt->ctio_srr_id) {
2323 int found = 0;
2324 list_for_each_entry(imm, &tgt->srr_imm_list,
2325 srr_list_entry) {
2326 if (imm->srr_id == sc->srr_id) {
2327 found = 1;
2328 break;
2329 }
2330 }
2331 if (found) {
2332 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b,
2333 "Scheduling srr work\n");
2334 schedule_work(&tgt->srr_work);
2335 } else {
2336 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056,
2337 "qla_target(%d): imm_srr_id "
2338 "== ctio_srr_id (%d), but there is no "
2339 "corresponding SRR IMM, deleting CTIO "
2340 "SRR %p\n", vha->vp_idx,
2341 tgt->ctio_srr_id, sc);
2342 list_del(&sc->srr_list_entry);
2343 spin_unlock(&tgt->srr_lock);
2344
2345 kfree(sc);
2346 return -EINVAL;
2347 }
2348 }
2349 spin_unlock(&tgt->srr_lock);
2350 } else {
2351 struct qla_tgt_srr_imm *ti;
2352
2353 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057,
2354 "qla_target(%d): Unable to allocate SRR CTIO entry\n",
2355 vha->vp_idx);
2356 spin_lock(&tgt->srr_lock);
2357 list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list,
2358 srr_list_entry) {
2359 if (imm->srr_id == tgt->ctio_srr_id) {
2360 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c,
2361 "IMM SRR %p deleted (id %d)\n",
2362 imm, imm->srr_id);
2363 list_del(&imm->srr_list_entry);
2364 qlt_reject_free_srr_imm(vha, imm, 1);
2365 }
2366 }
2367 spin_unlock(&tgt->srr_lock);
2368
2369 return -ENOMEM;
2370 }
2371
2372 return 0;
2373}
2374
2375/*
2376 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2377 */
2378static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
2379 struct qla_tgt_cmd *cmd, uint32_t status)
2380{
2381 int term = 0;
2382
2383 if (ctio != NULL) {
2384 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
2385 term = !(c->flags &
2386 __constant_cpu_to_le16(OF_TERM_EXCH));
2387 } else
2388 term = 1;
2389
2390 if (term)
2391 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
2392
2393 return term;
2394}
2395
2396/* ha->hardware_lock supposed to be held on entry */
2397static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha,
2398 uint32_t handle)
2399{
2400 struct qla_hw_data *ha = vha->hw;
2401
2402 handle--;
2403 if (ha->tgt.cmds[handle] != NULL) {
2404 struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle];
2405 ha->tgt.cmds[handle] = NULL;
2406 return cmd;
2407 } else
2408 return NULL;
2409}
2410
2411/* ha->hardware_lock supposed to be held on entry */
2412static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
2413 uint32_t handle, void *ctio)
2414{
2415 struct qla_tgt_cmd *cmd = NULL;
2416
2417 /* Clear out internal marks */
2418 handle &= ~(CTIO_COMPLETION_HANDLE_MARK |
2419 CTIO_INTERMEDIATE_HANDLE_MARK);
2420
2421 if (handle != QLA_TGT_NULL_HANDLE) {
2422 if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) {
2423 ql_dbg(ql_dbg_tgt, vha, 0xe01d, "%s",
2424 "SKIP_HANDLE CTIO\n");
2425 return NULL;
2426 }
2427 /* handle-1 is actually used */
2428 if (unlikely(handle > MAX_OUTSTANDING_COMMANDS)) {
2429 ql_dbg(ql_dbg_tgt, vha, 0xe052,
2430 "qla_target(%d): Wrong handle %x received\n",
2431 vha->vp_idx, handle);
2432 return NULL;
2433 }
2434 cmd = qlt_get_cmd(vha, handle);
2435 if (unlikely(cmd == NULL)) {
2436 ql_dbg(ql_dbg_tgt, vha, 0xe053,
2437 "qla_target(%d): Suspicious: unable to "
2438 "find the command with handle %x\n", vha->vp_idx,
2439 handle);
2440 return NULL;
2441 }
2442 } else if (ctio != NULL) {
2443 /* We can't get loop ID from CTIO7 */
2444 ql_dbg(ql_dbg_tgt, vha, 0xe054,
2445 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
2446 "support NULL handles\n", vha->vp_idx);
2447 return NULL;
2448 }
2449
2450 return cmd;
2451}
2452
2453/*
2454 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2455 */
2456static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
2457 uint32_t status, void *ctio)
2458{
2459 struct qla_hw_data *ha = vha->hw;
2460 struct se_cmd *se_cmd;
2461 struct target_core_fabric_ops *tfo;
2462 struct qla_tgt_cmd *cmd;
2463
2464 ql_dbg(ql_dbg_tgt, vha, 0xe01e,
2465 "qla_target(%d): handle(ctio %p status %#x) <- %08x\n",
2466 vha->vp_idx, ctio, status, handle);
2467
2468 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
2469 /* That could happen only in case of an error/reset/abort */
2470 if (status != CTIO_SUCCESS) {
2471 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
2472 "Intermediate CTIO received"
2473 " (status %x)\n", status);
2474 }
2475 return;
2476 }
2477
2478 cmd = qlt_ctio_to_cmd(vha, handle, ctio);
2479 if (cmd == NULL)
2480 return;
2481
2482 se_cmd = &cmd->se_cmd;
2483 tfo = se_cmd->se_tfo;
2484
2485 if (cmd->sg_mapped)
2486 qlt_unmap_sg(vha, cmd);
2487
2488 if (unlikely(status != CTIO_SUCCESS)) {
2489 switch (status & 0xFFFF) {
2490 case CTIO_LIP_RESET:
2491 case CTIO_TARGET_RESET:
2492 case CTIO_ABORTED:
2493 case CTIO_TIMEOUT:
2494 case CTIO_INVALID_RX_ID:
2495 /* They are OK */
2496 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
2497 "qla_target(%d): CTIO with "
2498 "status %#x received, state %x, se_cmd %p, "
2499 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
2500 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
2501 status, cmd->state, se_cmd);
2502 break;
2503
2504 case CTIO_PORT_LOGGED_OUT:
2505 case CTIO_PORT_UNAVAILABLE:
2506 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
2507 "qla_target(%d): CTIO with PORT LOGGED "
2508 "OUT (29) or PORT UNAVAILABLE (28) status %x "
2509 "received (state %x, se_cmd %p)\n", vha->vp_idx,
2510 status, cmd->state, se_cmd);
2511 break;
2512
2513 case CTIO_SRR_RECEIVED:
2514 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a,
2515 "qla_target(%d): CTIO with SRR_RECEIVED"
2516 " status %x received (state %x, se_cmd %p)\n",
2517 vha->vp_idx, status, cmd->state, se_cmd);
2518 if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0)
2519 break;
2520 else
2521 return;
2522
2523 default:
2524 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
2525 "qla_target(%d): CTIO with error status "
2526 "0x%x received (state %x, se_cmd %p\n",
2527 vha->vp_idx, status, cmd->state, se_cmd);
2528 break;
2529 }
2530
2531 if (cmd->state != QLA_TGT_STATE_NEED_DATA)
2532 if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
2533 return;
2534 }
2535
2536 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
2537 ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd);
2538 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
2539 int rx_status = 0;
2540
2541 cmd->state = QLA_TGT_STATE_DATA_IN;
2542
2543 if (unlikely(status != CTIO_SUCCESS))
2544 rx_status = -EIO;
2545 else
2546 cmd->write_data_transferred = 1;
2547
2548 ql_dbg(ql_dbg_tgt, vha, 0xe020,
2549 "Data received, context %x, rx_status %d\n",
2550 0x0, rx_status);
2551
2552 ha->tgt.tgt_ops->handle_data(cmd);
2553 return;
2554 } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
2555 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
2556 "Aborted command %p (tag %d) finished\n", cmd, cmd->tag);
2557 } else {
2558 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
2559 "qla_target(%d): A command in state (%d) should "
2560 "not return a CTIO complete\n", vha->vp_idx, cmd->state);
2561 }
2562
2563 if (unlikely(status != CTIO_SUCCESS)) {
2564 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
2565 dump_stack();
2566 }
2567
2568 ha->tgt.tgt_ops->free_cmd(cmd);
2569}
2570
2571/* ha->hardware_lock supposed to be held on entry */
2572/* called via callback from qla2xxx */
2573void qlt_ctio_completion(struct scsi_qla_host *vha, uint32_t handle)
2574{
2575 struct qla_hw_data *ha = vha->hw;
2576 struct qla_tgt *tgt = ha->tgt.qla_tgt;
2577
2578 if (likely(tgt == NULL)) {
2579 ql_dbg(ql_dbg_tgt, vha, 0xe021,
2580 "CTIO, but target mode not enabled"
2581 " (ha %d %p handle %#x)", vha->vp_idx, ha, handle);
2582 return;
2583 }
2584
2585 tgt->irq_cmd_count++;
2586 qlt_do_ctio_completion(vha, handle, CTIO_SUCCESS, NULL);
2587 tgt->irq_cmd_count--;
2588}
2589
2590static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
2591 uint8_t task_codes)
2592{
2593 int fcp_task_attr;
2594
2595 switch (task_codes) {
2596 case ATIO_SIMPLE_QUEUE:
2597 fcp_task_attr = MSG_SIMPLE_TAG;
2598 break;
2599 case ATIO_HEAD_OF_QUEUE:
2600 fcp_task_attr = MSG_HEAD_TAG;
2601 break;
2602 case ATIO_ORDERED_QUEUE:
2603 fcp_task_attr = MSG_ORDERED_TAG;
2604 break;
2605 case ATIO_ACA_QUEUE:
2606 fcp_task_attr = MSG_ACA_TAG;
2607 break;
2608 case ATIO_UNTAGGED:
2609 fcp_task_attr = MSG_SIMPLE_TAG;
2610 break;
2611 default:
2612 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
2613 "qla_target: unknown task code %x, use ORDERED instead\n",
2614 task_codes);
2615 fcp_task_attr = MSG_ORDERED_TAG;
2616 break;
2617 }
2618
2619 return fcp_task_attr;
2620}
2621
2622static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *,
2623 uint8_t *);
2624/*
2625 * Process context for I/O path into tcm_qla2xxx code
2626 */
2627static void qlt_do_work(struct work_struct *work)
2628{
2629 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
2630 scsi_qla_host_t *vha = cmd->vha;
2631 struct qla_hw_data *ha = vha->hw;
2632 struct qla_tgt *tgt = ha->tgt.qla_tgt;
2633 struct qla_tgt_sess *sess = NULL;
2634 struct atio_from_isp *atio = &cmd->atio;
2635 unsigned char *cdb;
2636 unsigned long flags;
2637 uint32_t data_length;
2638 int ret, fcp_task_attr, data_dir, bidi = 0;
2639
2640 if (tgt->tgt_stop)
2641 goto out_term;
2642
2643 spin_lock_irqsave(&ha->hardware_lock, flags);
2644 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
2645 atio->u.isp24.fcp_hdr.s_id);
2646 /* Do kref_get() before dropping qla_hw_data->hardware_lock. */
2647 if (sess)
2648 kref_get(&sess->se_sess->sess_kref);
2649 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2650
2651 if (unlikely(!sess)) {
2652 uint8_t *s_id = atio->u.isp24.fcp_hdr.s_id;
2653
2654 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
2655 "qla_target(%d): Unable to find wwn login"
2656 " (s_id %x:%x:%x), trying to create it manually\n",
2657 vha->vp_idx, s_id[0], s_id[1], s_id[2]);
2658
2659 if (atio->u.raw.entry_count > 1) {
2660 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
2661 "Dropping multy entry cmd %p\n", cmd);
2662 goto out_term;
2663 }
2664
2665 mutex_lock(&ha->tgt.tgt_mutex);
2666 sess = qlt_make_local_sess(vha, s_id);
2667 /* sess has an extra creation ref. */
2668 mutex_unlock(&ha->tgt.tgt_mutex);
2669
2670 if (!sess)
2671 goto out_term;
2672 }
2673
2674 cmd->sess = sess;
2675 cmd->loop_id = sess->loop_id;
2676 cmd->conf_compl_supported = sess->conf_compl_supported;
2677
2678 cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
2679 cmd->tag = atio->u.isp24.exchange_addr;
2680 cmd->unpacked_lun = scsilun_to_int(
2681 (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
2682
2683 if (atio->u.isp24.fcp_cmnd.rddata &&
2684 atio->u.isp24.fcp_cmnd.wrdata) {
2685 bidi = 1;
2686 data_dir = DMA_TO_DEVICE;
2687 } else if (atio->u.isp24.fcp_cmnd.rddata)
2688 data_dir = DMA_FROM_DEVICE;
2689 else if (atio->u.isp24.fcp_cmnd.wrdata)
2690 data_dir = DMA_TO_DEVICE;
2691 else
2692 data_dir = DMA_NONE;
2693
2694 fcp_task_attr = qlt_get_fcp_task_attr(vha,
2695 atio->u.isp24.fcp_cmnd.task_attr);
2696 data_length = be32_to_cpu(get_unaligned((uint32_t *)
2697 &atio->u.isp24.fcp_cmnd.add_cdb[
2698 atio->u.isp24.fcp_cmnd.add_cdb_len]));
2699
2700 ql_dbg(ql_dbg_tgt, vha, 0xe022,
2701 "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n",
2702 cmd, cmd->unpacked_lun, cmd->tag);
2703
2704 ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
2705 fcp_task_attr, data_dir, bidi);
2706 if (ret != 0)
2707 goto out_term;
2708 /*
2709 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
2710 */
2711 ha->tgt.tgt_ops->put_sess(sess);
2712 return;
2713
2714out_term:
2715 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd);
2716 /*
2717 * cmd has not sent to target yet, so pass NULL as the second
2718 * argument to qlt_send_term_exchange() and free the memory here.
2719 */
2720 spin_lock_irqsave(&ha->hardware_lock, flags);
2721 qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
2722 kmem_cache_free(qla_tgt_cmd_cachep, cmd);
2723 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2724 if (sess)
2725 ha->tgt.tgt_ops->put_sess(sess);
2726}
2727
2728/* ha->hardware_lock supposed to be held on entry */
2729static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
2730 struct atio_from_isp *atio)
2731{
2732 struct qla_hw_data *ha = vha->hw;
2733 struct qla_tgt *tgt = ha->tgt.qla_tgt;
2734 struct qla_tgt_cmd *cmd;
2735
2736 if (unlikely(tgt->tgt_stop)) {
2737 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
2738 "New command while device %p is shutting down\n", tgt);
2739 return -EFAULT;
2740 }
2741
2742 cmd = kmem_cache_zalloc(qla_tgt_cmd_cachep, GFP_ATOMIC);
2743 if (!cmd) {
2744 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e,
2745 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
2746 return -ENOMEM;
2747 }
2748
2749 INIT_LIST_HEAD(&cmd->cmd_list);
2750
2751 memcpy(&cmd->atio, atio, sizeof(*atio));
2752 cmd->state = QLA_TGT_STATE_NEW;
2753 cmd->tgt = ha->tgt.qla_tgt;
2754 cmd->vha = vha;
2755
2756 INIT_WORK(&cmd->work, qlt_do_work);
2757 queue_work(qla_tgt_wq, &cmd->work);
2758 return 0;
2759
2760}
2761
2762/* ha->hardware_lock supposed to be held on entry */
2763static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
2764 int fn, void *iocb, int flags)
2765{
2766 struct scsi_qla_host *vha = sess->vha;
2767 struct qla_hw_data *ha = vha->hw;
2768 struct qla_tgt_mgmt_cmd *mcmd;
2769 int res;
2770 uint8_t tmr_func;
2771
2772 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
2773 if (!mcmd) {
2774 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
2775 "qla_target(%d): Allocation of management "
2776 "command failed, some commands and their data could "
2777 "leak\n", vha->vp_idx);
2778 return -ENOMEM;
2779 }
2780 memset(mcmd, 0, sizeof(*mcmd));
2781 mcmd->sess = sess;
2782
2783 if (iocb) {
2784 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
2785 sizeof(mcmd->orig_iocb.imm_ntfy));
2786 }
2787 mcmd->tmr_func = fn;
2788 mcmd->flags = flags;
2789
2790 switch (fn) {
2791 case QLA_TGT_CLEAR_ACA:
2792 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000,
2793 "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx);
2794 tmr_func = TMR_CLEAR_ACA;
2795 break;
2796
2797 case QLA_TGT_TARGET_RESET:
2798 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001,
2799 "qla_target(%d): TARGET_RESET received\n",
2800 sess->vha->vp_idx);
2801 tmr_func = TMR_TARGET_WARM_RESET;
2802 break;
2803
2804 case QLA_TGT_LUN_RESET:
2805 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
2806 "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
2807 tmr_func = TMR_LUN_RESET;
2808 break;
2809
2810 case QLA_TGT_CLEAR_TS:
2811 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003,
2812 "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx);
2813 tmr_func = TMR_CLEAR_TASK_SET;
2814 break;
2815
2816 case QLA_TGT_ABORT_TS:
2817 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004,
2818 "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx);
2819 tmr_func = TMR_ABORT_TASK_SET;
2820 break;
2821#if 0
2822 case QLA_TGT_ABORT_ALL:
2823 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005,
2824 "qla_target(%d): Doing ABORT_ALL_TASKS\n",
2825 sess->vha->vp_idx);
2826 tmr_func = 0;
2827 break;
2828
2829 case QLA_TGT_ABORT_ALL_SESS:
2830 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006,
2831 "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n",
2832 sess->vha->vp_idx);
2833 tmr_func = 0;
2834 break;
2835
2836 case QLA_TGT_NEXUS_LOSS_SESS:
2837 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007,
2838 "qla_target(%d): Doing NEXUS_LOSS_SESS\n",
2839 sess->vha->vp_idx);
2840 tmr_func = 0;
2841 break;
2842
2843 case QLA_TGT_NEXUS_LOSS:
2844 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008,
2845 "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx);
2846 tmr_func = 0;
2847 break;
2848#endif
2849 default:
2850 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a,
2851 "qla_target(%d): Unknown task mgmt fn 0x%x\n",
2852 sess->vha->vp_idx, fn);
2853 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2854 return -ENOSYS;
2855 }
2856
2857 res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0);
2858 if (res != 0) {
2859 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b,
2860 "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
2861 sess->vha->vp_idx, res);
2862 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2863 return -EFAULT;
2864 }
2865
2866 return 0;
2867}
2868
2869/* ha->hardware_lock supposed to be held on entry */
2870static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
2871{
2872 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
2873 struct qla_hw_data *ha = vha->hw;
2874 struct qla_tgt *tgt;
2875 struct qla_tgt_sess *sess;
2876 uint32_t lun, unpacked_lun;
2877 int lun_size, fn;
2878
2879 tgt = ha->tgt.qla_tgt;
2880
2881 lun = a->u.isp24.fcp_cmnd.lun;
2882 lun_size = sizeof(a->u.isp24.fcp_cmnd.lun);
2883 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
2884 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
2885 a->u.isp24.fcp_hdr.s_id);
2886 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
2887
2888 if (!sess) {
2889 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
2890 "qla_target(%d): task mgmt fn 0x%x for "
2891 "non-existant session\n", vha->vp_idx, fn);
2892 return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb,
2893 sizeof(struct atio_from_isp));
2894 }
2895
2896 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
2897}
2898
2899/* ha->hardware_lock supposed to be held on entry */
2900static int __qlt_abort_task(struct scsi_qla_host *vha,
2901 struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess)
2902{
2903 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
2904 struct qla_hw_data *ha = vha->hw;
2905 struct qla_tgt_mgmt_cmd *mcmd;
2906 uint32_t lun, unpacked_lun;
2907 int rc;
2908
2909 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
2910 if (mcmd == NULL) {
2911 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
2912 "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
2913 vha->vp_idx, __func__);
2914 return -ENOMEM;
2915 }
2916 memset(mcmd, 0, sizeof(*mcmd));
2917
2918 mcmd->sess = sess;
2919 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
2920 sizeof(mcmd->orig_iocb.imm_ntfy));
2921
2922 lun = a->u.isp24.fcp_cmnd.lun;
2923 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
2924
2925 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK,
2926 le16_to_cpu(iocb->u.isp2x.seq_id));
2927 if (rc != 0) {
2928 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
2929 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
2930 vha->vp_idx, rc);
2931 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2932 return -EFAULT;
2933 }
2934
2935 return 0;
2936}
2937
2938/* ha->hardware_lock supposed to be held on entry */
2939static int qlt_abort_task(struct scsi_qla_host *vha,
2940 struct imm_ntfy_from_isp *iocb)
2941{
2942 struct qla_hw_data *ha = vha->hw;
2943 struct qla_tgt_sess *sess;
2944 int loop_id;
2945
2946 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
2947
2948 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
2949 if (sess == NULL) {
2950 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
2951 "qla_target(%d): task abort for unexisting "
2952 "session\n", vha->vp_idx);
2953 return qlt_sched_sess_work(ha->tgt.qla_tgt,
2954 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
2955 }
2956
2957 return __qlt_abort_task(vha, iocb, sess);
2958}
2959
2960/*
2961 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2962 */
2963static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
2964 struct imm_ntfy_from_isp *iocb)
2965{
2966 struct qla_hw_data *ha = vha->hw;
2967 int res = 0;
2968
2969 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
2970 "qla_target(%d): Port ID: 0x%02x:%02x:%02x"
2971 " ELS opcode: 0x%02x\n", vha->vp_idx, iocb->u.isp24.port_id[0],
2972 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[2],
2973 iocb->u.isp24.status_subcode);
2974
2975 switch (iocb->u.isp24.status_subcode) {
2976 case ELS_PLOGI:
2977 case ELS_FLOGI:
2978 case ELS_PRLI:
2979 case ELS_LOGO:
2980 case ELS_PRLO:
2981 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
2982 break;
2983 case ELS_PDISC:
2984 case ELS_ADISC:
2985 {
2986 struct qla_tgt *tgt = ha->tgt.qla_tgt;
2987 if (tgt->link_reinit_iocb_pending) {
2988 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
2989 0, 0, 0, 0, 0, 0);
2990 tgt->link_reinit_iocb_pending = 0;
2991 }
2992 res = 1; /* send notify ack */
2993 break;
2994 }
2995
2996 default:
2997 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
2998 "qla_target(%d): Unsupported ELS command %x "
2999 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
3000 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
3001 break;
3002 }
3003
3004 return res;
3005}
3006
3007static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
3008{
3009 struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL;
3010 size_t first_offset = 0, rem_offset = offset, tmp = 0;
3011 int i, sg_srr_cnt, bufflen = 0;
3012
3013 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023,
3014 "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, "
3015 "cmd->sg_cnt: %u, direction: %d\n",
3016 cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
3017
3018 /*
3019 * FIXME: Reject non zero SRR relative offset until we can test
3020 * this code properly.
3021 */
3022 pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset);
3023 return -1;
3024
3025 if (!cmd->sg || !cmd->sg_cnt) {
3026 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055,
3027 "Missing cmd->sg or zero cmd->sg_cnt in"
3028 " qla_tgt_set_data_offset\n");
3029 return -EINVAL;
3030 }
3031 /*
3032 * Walk the current cmd->sg list until we locate the new sg_srr_start
3033 */
3034 for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) {
3035 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024,
3036 "sg[%d]: %p page: %p, length: %d, offset: %d\n",
3037 i, sg, sg_page(sg), sg->length, sg->offset);
3038
3039 if ((sg->length + tmp) > offset) {
3040 first_offset = rem_offset;
3041 sg_srr_start = sg;
3042 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025,
3043 "Found matching sg[%d], using %p as sg_srr_start, "
3044 "and using first_offset: %zu\n", i, sg,
3045 first_offset);
3046 break;
3047 }
3048 tmp += sg->length;
3049 rem_offset -= sg->length;
3050 }
3051
3052 if (!sg_srr_start) {
3053 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056,
3054 "Unable to locate sg_srr_start for offset: %u\n", offset);
3055 return -EINVAL;
3056 }
3057 sg_srr_cnt = (cmd->sg_cnt - i);
3058
3059 sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL);
3060 if (!sg_srr) {
3061 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057,
3062 "Unable to allocate sgp\n");
3063 return -ENOMEM;
3064 }
3065 sg_init_table(sg_srr, sg_srr_cnt);
3066 sgp = &sg_srr[0];
3067 /*
3068 * Walk the remaining list for sg_srr_start, mapping to the newly
3069 * allocated sg_srr taking first_offset into account.
3070 */
3071 for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) {
3072 if (first_offset) {
3073 sg_set_page(sgp, sg_page(sg),
3074 (sg->length - first_offset), first_offset);
3075 first_offset = 0;
3076 } else {
3077 sg_set_page(sgp, sg_page(sg), sg->length, 0);
3078 }
3079 bufflen += sgp->length;
3080
3081 sgp = sg_next(sgp);
3082 if (!sgp)
3083 break;
3084 }
3085
3086 cmd->sg = sg_srr;
3087 cmd->sg_cnt = sg_srr_cnt;
3088 cmd->bufflen = bufflen;
3089 cmd->offset += offset;
3090 cmd->free_sg = 1;
3091
3092 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg);
3093 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n",
3094 cmd->sg_cnt);
3095 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n",
3096 cmd->bufflen);
3097 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n",
3098 cmd->offset);
3099
3100 if (cmd->sg_cnt < 0)
3101 BUG();
3102
3103 if (cmd->bufflen < 0)
3104 BUG();
3105
3106 return 0;
3107}
3108
3109static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd,
3110 uint32_t srr_rel_offs, int *xmit_type)
3111{
3112 int res = 0, rel_offs;
3113
3114 rel_offs = srr_rel_offs - cmd->offset;
3115 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d",
3116 srr_rel_offs, rel_offs);
3117
3118 *xmit_type = QLA_TGT_XMIT_ALL;
3119
3120 if (rel_offs < 0) {
3121 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062,
3122 "qla_target(%d): SRR rel_offs (%d) < 0",
3123 cmd->vha->vp_idx, rel_offs);
3124 res = -1;
3125 } else if (rel_offs == cmd->bufflen)
3126 *xmit_type = QLA_TGT_XMIT_STATUS;
3127 else if (rel_offs > 0)
3128 res = qlt_set_data_offset(cmd, rel_offs);
3129
3130 return res;
3131}
3132
3133/* No locks, thread context */
3134static void qlt_handle_srr(struct scsi_qla_host *vha,
3135 struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm)
3136{
3137 struct imm_ntfy_from_isp *ntfy =
3138 (struct imm_ntfy_from_isp *)&imm->imm_ntfy;
3139 struct qla_hw_data *ha = vha->hw;
3140 struct qla_tgt_cmd *cmd = sctio->cmd;
3141 struct se_cmd *se_cmd = &cmd->se_cmd;
3142 unsigned long flags;
3143 int xmit_type = 0, resp = 0;
3144 uint32_t offset;
3145 uint16_t srr_ui;
3146
3147 offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs);
3148 srr_ui = ntfy->u.isp24.srr_ui;
3149
3150 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n",
3151 cmd, srr_ui);
3152
3153 switch (srr_ui) {
3154 case SRR_IU_STATUS:
3155 spin_lock_irqsave(&ha->hardware_lock, flags);
3156 qlt_send_notify_ack(vha, ntfy,
3157 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
3158 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3159 xmit_type = QLA_TGT_XMIT_STATUS;
3160 resp = 1;
3161 break;
3162 case SRR_IU_DATA_IN:
3163 if (!cmd->sg || !cmd->sg_cnt) {
3164 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063,
3165 "Unable to process SRR_IU_DATA_IN due to"
3166 " missing cmd->sg, state: %d\n", cmd->state);
3167 dump_stack();
3168 goto out_reject;
3169 }
3170 if (se_cmd->scsi_status != 0) {
3171 ql_dbg(ql_dbg_tgt, vha, 0xe02a,
3172 "Rejecting SRR_IU_DATA_IN with non GOOD "
3173 "scsi_status\n");
3174 goto out_reject;
3175 }
3176 cmd->bufflen = se_cmd->data_length;
3177
3178 if (qlt_has_data(cmd)) {
3179 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
3180 goto out_reject;
3181 spin_lock_irqsave(&ha->hardware_lock, flags);
3182 qlt_send_notify_ack(vha, ntfy,
3183 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
3184 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3185 resp = 1;
3186 } else {
3187 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064,
3188 "qla_target(%d): SRR for in data for cmd "
3189 "without them (tag %d, SCSI status %d), "
3190 "reject", vha->vp_idx, cmd->tag,
3191 cmd->se_cmd.scsi_status);
3192 goto out_reject;
3193 }
3194 break;
3195 case SRR_IU_DATA_OUT:
3196 if (!cmd->sg || !cmd->sg_cnt) {
3197 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065,
3198 "Unable to process SRR_IU_DATA_OUT due to"
3199 " missing cmd->sg\n");
3200 dump_stack();
3201 goto out_reject;
3202 }
3203 if (se_cmd->scsi_status != 0) {
3204 ql_dbg(ql_dbg_tgt, vha, 0xe02b,
3205 "Rejecting SRR_IU_DATA_OUT"
3206 " with non GOOD scsi_status\n");
3207 goto out_reject;
3208 }
3209 cmd->bufflen = se_cmd->data_length;
3210
3211 if (qlt_has_data(cmd)) {
3212 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
3213 goto out_reject;
3214 spin_lock_irqsave(&ha->hardware_lock, flags);
3215 qlt_send_notify_ack(vha, ntfy,
3216 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
3217 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3218 if (xmit_type & QLA_TGT_XMIT_DATA)
3219 qlt_rdy_to_xfer(cmd);
3220 } else {
3221 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066,
3222 "qla_target(%d): SRR for out data for cmd "
3223 "without them (tag %d, SCSI status %d), "
3224 "reject", vha->vp_idx, cmd->tag,
3225 cmd->se_cmd.scsi_status);
3226 goto out_reject;
3227 }
3228 break;
3229 default:
3230 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067,
3231 "qla_target(%d): Unknown srr_ui value %x",
3232 vha->vp_idx, srr_ui);
3233 goto out_reject;
3234 }
3235
3236 /* Transmit response in case of status and data-in cases */
3237 if (resp)
3238 qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
3239
3240 return;
3241
3242out_reject:
3243 spin_lock_irqsave(&ha->hardware_lock, flags);
3244 qlt_send_notify_ack(vha, ntfy, 0, 0, 0,
3245 NOTIFY_ACK_SRR_FLAGS_REJECT,
3246 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
3247 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
3248 if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3249 cmd->state = QLA_TGT_STATE_DATA_IN;
3250 dump_stack();
3251 } else
3252 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
3253 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3254}
3255
3256static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha,
3257 struct qla_tgt_srr_imm *imm, int ha_locked)
3258{
3259 struct qla_hw_data *ha = vha->hw;
3260 unsigned long flags = 0;
3261
3262 if (!ha_locked)
3263 spin_lock_irqsave(&ha->hardware_lock, flags);
3264
3265 qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0,
3266 NOTIFY_ACK_SRR_FLAGS_REJECT,
3267 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
3268 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
3269
3270 if (!ha_locked)
3271 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3272
3273 kfree(imm);
3274}
3275
3276static void qlt_handle_srr_work(struct work_struct *work)
3277{
3278 struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work);
3279 struct scsi_qla_host *vha = tgt->vha;
3280 struct qla_tgt_srr_ctio *sctio;
3281 unsigned long flags;
3282
3283 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n",
3284 tgt);
3285
3286restart:
3287 spin_lock_irqsave(&tgt->srr_lock, flags);
3288 list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) {
3289 struct qla_tgt_srr_imm *imm, *i, *ti;
3290 struct qla_tgt_cmd *cmd;
3291 struct se_cmd *se_cmd;
3292
3293 imm = NULL;
3294 list_for_each_entry_safe(i, ti, &tgt->srr_imm_list,
3295 srr_list_entry) {
3296 if (i->srr_id == sctio->srr_id) {
3297 list_del(&i->srr_list_entry);
3298 if (imm) {
3299 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068,
3300 "qla_target(%d): There must be "
3301 "only one IMM SRR per CTIO SRR "
3302 "(IMM SRR %p, id %d, CTIO %p\n",
3303 vha->vp_idx, i, i->srr_id, sctio);
3304 qlt_reject_free_srr_imm(tgt->vha, i, 0);
3305 } else
3306 imm = i;
3307 }
3308 }
3309
3310 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a,
3311 "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio,
3312 sctio->srr_id);
3313
3314 if (imm == NULL) {
3315 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b,
3316 "Not found matching IMM for SRR CTIO (id %d)\n",
3317 sctio->srr_id);
3318 continue;
3319 } else
3320 list_del(&sctio->srr_list_entry);
3321
3322 spin_unlock_irqrestore(&tgt->srr_lock, flags);
3323
3324 cmd = sctio->cmd;
3325 /*
3326 * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow
3327 * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in()
3328 * logic..
3329 */
3330 cmd->offset = 0;
3331 if (cmd->free_sg) {
3332 kfree(cmd->sg);
3333 cmd->sg = NULL;
3334 cmd->free_sg = 0;
3335 }
3336 se_cmd = &cmd->se_cmd;
3337
3338 cmd->sg_cnt = se_cmd->t_data_nents;
3339 cmd->sg = se_cmd->t_data_sg;
3340
3341 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
3342 "SRR cmd %p (se_cmd %p, tag %d, op %x), "
3343 "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag,
3344 se_cmd->t_task_cdb[0], cmd->sg_cnt, cmd->offset);
3345
3346 qlt_handle_srr(vha, sctio, imm);
3347
3348 kfree(imm);
3349 kfree(sctio);
3350 goto restart;
3351 }
3352 spin_unlock_irqrestore(&tgt->srr_lock, flags);
3353}
3354
3355/* ha->hardware_lock supposed to be held on entry */
3356static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
3357 struct imm_ntfy_from_isp *iocb)
3358{
3359 struct qla_tgt_srr_imm *imm;
3360 struct qla_hw_data *ha = vha->hw;
3361 struct qla_tgt *tgt = ha->tgt.qla_tgt;
3362 struct qla_tgt_srr_ctio *sctio;
3363
3364 tgt->imm_srr_id++;
3365
3366 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02d, "qla_target(%d): SRR received\n",
3367 vha->vp_idx);
3368
3369 imm = kzalloc(sizeof(*imm), GFP_ATOMIC);
3370 if (imm != NULL) {
3371 memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy));
3372
3373 /* IRQ is already OFF */
3374 spin_lock(&tgt->srr_lock);
3375 imm->srr_id = tgt->imm_srr_id;
3376 list_add_tail(&imm->srr_list_entry,
3377 &tgt->srr_imm_list);
3378 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e,
3379 "IMM NTFY SRR %p added (id %d, ui %x)\n",
3380 imm, imm->srr_id, iocb->u.isp24.srr_ui);
3381 if (tgt->imm_srr_id == tgt->ctio_srr_id) {
3382 int found = 0;
3383 list_for_each_entry(sctio, &tgt->srr_ctio_list,
3384 srr_list_entry) {
3385 if (sctio->srr_id == imm->srr_id) {
3386 found = 1;
3387 break;
3388 }
3389 }
3390 if (found) {
3391 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s",
3392 "Scheduling srr work\n");
3393 schedule_work(&tgt->srr_work);
3394 } else {
3395 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030,
3396 "qla_target(%d): imm_srr_id "
3397 "== ctio_srr_id (%d), but there is no "
3398 "corresponding SRR CTIO, deleting IMM "
3399 "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id,
3400 imm);
3401 list_del(&imm->srr_list_entry);
3402
3403 kfree(imm);
3404
3405 spin_unlock(&tgt->srr_lock);
3406 goto out_reject;
3407 }
3408 }
3409 spin_unlock(&tgt->srr_lock);
3410 } else {
3411 struct qla_tgt_srr_ctio *ts;
3412
3413 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069,
3414 "qla_target(%d): Unable to allocate SRR IMM "
3415 "entry, SRR request will be rejected\n", vha->vp_idx);
3416
3417 /* IRQ is already OFF */
3418 spin_lock(&tgt->srr_lock);
3419 list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list,
3420 srr_list_entry) {
3421 if (sctio->srr_id == tgt->imm_srr_id) {
3422 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031,
3423 "CTIO SRR %p deleted (id %d)\n",
3424 sctio, sctio->srr_id);
3425 list_del(&sctio->srr_list_entry);
3426 qlt_send_term_exchange(vha, sctio->cmd,
3427 &sctio->cmd->atio, 1);
3428 kfree(sctio);
3429 }
3430 }
3431 spin_unlock(&tgt->srr_lock);
3432 goto out_reject;
3433 }
3434
3435 return;
3436
3437out_reject:
3438 qlt_send_notify_ack(vha, iocb, 0, 0, 0,
3439 NOTIFY_ACK_SRR_FLAGS_REJECT,
3440 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
3441 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
3442}
3443
3444/*
3445 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3446 */
3447static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
3448 struct imm_ntfy_from_isp *iocb)
3449{
3450 struct qla_hw_data *ha = vha->hw;
3451 uint32_t add_flags = 0;
3452 int send_notify_ack = 1;
3453 uint16_t status;
3454
3455 status = le16_to_cpu(iocb->u.isp2x.status);
3456 switch (status) {
3457 case IMM_NTFY_LIP_RESET:
3458 {
3459 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
3460 "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
3461 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
3462 iocb->u.isp24.status_subcode);
3463
3464 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
3465 send_notify_ack = 0;
3466 break;
3467 }
3468
3469 case IMM_NTFY_LIP_LINK_REINIT:
3470 {
3471 struct qla_tgt *tgt = ha->tgt.qla_tgt;
3472 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
3473 "qla_target(%d): LINK REINIT (loop %#x, "
3474 "subcode %x)\n", vha->vp_idx,
3475 le16_to_cpu(iocb->u.isp24.nport_handle),
3476 iocb->u.isp24.status_subcode);
3477 if (tgt->link_reinit_iocb_pending) {
3478 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
3479 0, 0, 0, 0, 0, 0);
3480 }
3481 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
3482 tgt->link_reinit_iocb_pending = 1;
3483 /*
3484 * QLogic requires to wait after LINK REINIT for possible
3485 * PDISC or ADISC ELS commands
3486 */
3487 send_notify_ack = 0;
3488 break;
3489 }
3490
3491 case IMM_NTFY_PORT_LOGOUT:
3492 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
3493 "qla_target(%d): Port logout (loop "
3494 "%#x, subcode %x)\n", vha->vp_idx,
3495 le16_to_cpu(iocb->u.isp24.nport_handle),
3496 iocb->u.isp24.status_subcode);
3497
3498 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
3499 send_notify_ack = 0;
3500 /* The sessions will be cleared in the callback, if needed */
3501 break;
3502
3503 case IMM_NTFY_GLBL_TPRLO:
3504 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
3505 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
3506 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
3507 send_notify_ack = 0;
3508 /* The sessions will be cleared in the callback, if needed */
3509 break;
3510
3511 case IMM_NTFY_PORT_CONFIG:
3512 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
3513 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
3514 status);
3515 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
3516 send_notify_ack = 0;
3517 /* The sessions will be cleared in the callback, if needed */
3518 break;
3519
3520 case IMM_NTFY_GLBL_LOGO:
3521 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
3522 "qla_target(%d): Link failure detected\n",
3523 vha->vp_idx);
3524 /* I_T nexus loss */
3525 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
3526 send_notify_ack = 0;
3527 break;
3528
3529 case IMM_NTFY_IOCB_OVERFLOW:
3530 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
3531 "qla_target(%d): Cannot provide requested "
3532 "capability (IOCB overflowed the immediate notify "
3533 "resource count)\n", vha->vp_idx);
3534 break;
3535
3536 case IMM_NTFY_ABORT_TASK:
3537 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
3538 "qla_target(%d): Abort Task (S %08x I %#x -> "
3539 "L %#x)\n", vha->vp_idx,
3540 le16_to_cpu(iocb->u.isp2x.seq_id),
3541 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
3542 le16_to_cpu(iocb->u.isp2x.lun));
3543 if (qlt_abort_task(vha, iocb) == 0)
3544 send_notify_ack = 0;
3545 break;
3546
3547 case IMM_NTFY_RESOURCE:
3548 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
3549 "qla_target(%d): Out of resources, host %ld\n",
3550 vha->vp_idx, vha->host_no);
3551 break;
3552
3553 case IMM_NTFY_MSG_RX:
3554 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
3555 "qla_target(%d): Immediate notify task %x\n",
3556 vha->vp_idx, iocb->u.isp2x.task_flags);
3557 if (qlt_handle_task_mgmt(vha, iocb) == 0)
3558 send_notify_ack = 0;
3559 break;
3560
3561 case IMM_NTFY_ELS:
3562 if (qlt_24xx_handle_els(vha, iocb) == 0)
3563 send_notify_ack = 0;
3564 break;
3565
3566 case IMM_NTFY_SRR:
3567 qlt_prepare_srr_imm(vha, iocb);
3568 send_notify_ack = 0;
3569 break;
3570
3571 default:
3572 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
3573 "qla_target(%d): Received unknown immediate "
3574 "notify status %x\n", vha->vp_idx, status);
3575 break;
3576 }
3577
3578 if (send_notify_ack)
3579 qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0);
3580}
3581
3582/*
3583 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3584 * This function sends busy to ISP 2xxx or 24xx.
3585 */
3586static void qlt_send_busy(struct scsi_qla_host *vha,
3587 struct atio_from_isp *atio, uint16_t status)
3588{
3589 struct ctio7_to_24xx *ctio24;
3590 struct qla_hw_data *ha = vha->hw;
3591 request_t *pkt;
3592 struct qla_tgt_sess *sess = NULL;
3593
3594 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
3595 atio->u.isp24.fcp_hdr.s_id);
3596 if (!sess) {
3597 qlt_send_term_exchange(vha, NULL, atio, 1);
3598 return;
3599 }
3600 /* Sending marker isn't necessary, since we called from ISR */
3601
3602 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
3603 if (!pkt) {
3604 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06e,
3605 "qla_target(%d): %s failed: unable to allocate "
3606 "request packet", vha->vp_idx, __func__);
3607 return;
3608 }
3609
3610 pkt->entry_count = 1;
3611 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
3612
3613 ctio24 = (struct ctio7_to_24xx *)pkt;
3614 ctio24->entry_type = CTIO_TYPE7;
3615 ctio24->nport_handle = sess->loop_id;
3616 ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
3617 ctio24->vp_index = vha->vp_idx;
3618 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
3619 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
3620 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
3621 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
3622 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
3623 __constant_cpu_to_le16(
3624 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
3625 CTIO7_FLAGS_DONT_RET_CTIO);
3626 /*
3627 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
3628 * if the explicit conformation is used.
3629 */
3630 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
3631 ctio24->u.status1.scsi_status = cpu_to_le16(status);
3632 ctio24->u.status1.residual = get_unaligned((uint32_t *)
3633 &atio->u.isp24.fcp_cmnd.add_cdb[
3634 atio->u.isp24.fcp_cmnd.add_cdb_len]);
3635 if (ctio24->u.status1.residual != 0)
3636 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
3637
3638 qla2x00_start_iocbs(vha, vha->req);
3639}
3640
3641/* ha->hardware_lock supposed to be held on entry */
3642/* called via callback from qla2xxx */
3643static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
3644 struct atio_from_isp *atio)
3645{
3646 struct qla_hw_data *ha = vha->hw;
3647 struct qla_tgt *tgt = ha->tgt.qla_tgt;
3648 int rc;
3649
3650 if (unlikely(tgt == NULL)) {
3651 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf039,
3652 "ATIO pkt, but no tgt (ha %p)", ha);
3653 return;
3654 }
3655 ql_dbg(ql_dbg_tgt, vha, 0xe02c,
3656 "qla_target(%d): ATIO pkt %p: type %02x count %02x",
3657 vha->vp_idx, atio, atio->u.raw.entry_type,
3658 atio->u.raw.entry_count);
3659 /*
3660 * In tgt_stop mode we also should allow all requests to pass.
3661 * Otherwise, some commands can stuck.
3662 */
3663
3664 tgt->irq_cmd_count++;
3665
3666 switch (atio->u.raw.entry_type) {
3667 case ATIO_TYPE7:
3668 ql_dbg(ql_dbg_tgt, vha, 0xe02d,
3669 "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, "
3670 "add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n",
3671 vha->vp_idx, atio->u.isp24.fcp_cmnd.lun,
3672 atio->u.isp24.fcp_cmnd.rddata,
3673 atio->u.isp24.fcp_cmnd.wrdata,
3674 atio->u.isp24.fcp_cmnd.add_cdb_len,
3675 be32_to_cpu(get_unaligned((uint32_t *)
3676 &atio->u.isp24.fcp_cmnd.add_cdb[
3677 atio->u.isp24.fcp_cmnd.add_cdb_len])),
3678 atio->u.isp24.fcp_hdr.s_id[0],
3679 atio->u.isp24.fcp_hdr.s_id[1],
3680 atio->u.isp24.fcp_hdr.s_id[2]);
3681
3682 if (unlikely(atio->u.isp24.exchange_addr ==
3683 ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
3684 ql_dbg(ql_dbg_tgt, vha, 0xe058,
3685 "qla_target(%d): ATIO_TYPE7 "
3686 "received with UNKNOWN exchange address, "
3687 "sending QUEUE_FULL\n", vha->vp_idx);
3688 qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
3689 break;
3690 }
3691 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0))
3692 rc = qlt_handle_cmd_for_atio(vha, atio);
3693 else
3694 rc = qlt_handle_task_mgmt(vha, atio);
3695 if (unlikely(rc != 0)) {
3696 if (rc == -ESRCH) {
3697#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
3698 qlt_send_busy(vha, atio, SAM_STAT_BUSY);
3699#else
3700 qlt_send_term_exchange(vha, NULL, atio, 1);
3701#endif
3702 } else {
3703 if (tgt->tgt_stop) {
3704 ql_dbg(ql_dbg_tgt, vha, 0xe059,
3705 "qla_target: Unable to send "
3706 "command to target for req, "
3707 "ignoring.\n");
3708 } else {
3709 ql_dbg(ql_dbg_tgt, vha, 0xe05a,
3710 "qla_target(%d): Unable to send "
3711 "command to target, sending BUSY "
3712 "status.\n", vha->vp_idx);
3713 qlt_send_busy(vha, atio, SAM_STAT_BUSY);
3714 }
3715 }
3716 }
3717 break;
3718
3719 case IMMED_NOTIFY_TYPE:
3720 {
3721 if (unlikely(atio->u.isp2x.entry_status != 0)) {
3722 ql_dbg(ql_dbg_tgt, vha, 0xe05b,
3723 "qla_target(%d): Received ATIO packet %x "
3724 "with error status %x\n", vha->vp_idx,
3725 atio->u.raw.entry_type,
3726 atio->u.isp2x.entry_status);
3727 break;
3728 }
3729 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
3730 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
3731 break;
3732 }
3733
3734 default:
3735 ql_dbg(ql_dbg_tgt, vha, 0xe05c,
3736 "qla_target(%d): Received unknown ATIO atio "
3737 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
3738 break;
3739 }
3740
3741 tgt->irq_cmd_count--;
3742}
3743
3744/* ha->hardware_lock supposed to be held on entry */
3745/* called via callback from qla2xxx */
3746static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
3747{
3748 struct qla_hw_data *ha = vha->hw;
3749 struct qla_tgt *tgt = ha->tgt.qla_tgt;
3750
3751 if (unlikely(tgt == NULL)) {
3752 ql_dbg(ql_dbg_tgt, vha, 0xe05d,
3753 "qla_target(%d): Response pkt %x received, but no "
3754 "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha);
3755 return;
3756 }
3757
3758 ql_dbg(ql_dbg_tgt, vha, 0xe02f,
3759 "qla_target(%d): response pkt %p: T %02x C %02x S %02x "
3760 "handle %#x\n", vha->vp_idx, pkt, pkt->entry_type,
3761 pkt->entry_count, pkt->entry_status, pkt->handle);
3762
3763 /*
3764 * In tgt_stop mode we also should allow all requests to pass.
3765 * Otherwise, some commands can stuck.
3766 */
3767
3768 tgt->irq_cmd_count++;
3769
3770 switch (pkt->entry_type) {
3771 case CTIO_TYPE7:
3772 {
3773 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
3774 ql_dbg(ql_dbg_tgt, vha, 0xe030, "CTIO_TYPE7: instance %d\n",
3775 vha->vp_idx);
3776 qlt_do_ctio_completion(vha, entry->handle,
3777 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
3778 entry);
3779 break;
3780 }
3781
3782 case ACCEPT_TGT_IO_TYPE:
3783 {
3784 struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
3785 int rc;
3786 ql_dbg(ql_dbg_tgt, vha, 0xe031,
3787 "ACCEPT_TGT_IO instance %d status %04x "
3788 "lun %04x read/write %d data_length %04x "
3789 "target_id %02x rx_id %04x\n ", vha->vp_idx,
3790 le16_to_cpu(atio->u.isp2x.status),
3791 le16_to_cpu(atio->u.isp2x.lun),
3792 atio->u.isp2x.execution_codes,
3793 le32_to_cpu(atio->u.isp2x.data_length), GET_TARGET_ID(ha,
3794 atio), atio->u.isp2x.rx_id);
3795 if (atio->u.isp2x.status !=
3796 __constant_cpu_to_le16(ATIO_CDB_VALID)) {
3797 ql_dbg(ql_dbg_tgt, vha, 0xe05e,
3798 "qla_target(%d): ATIO with error "
3799 "status %x received\n", vha->vp_idx,
3800 le16_to_cpu(atio->u.isp2x.status));
3801 break;
3802 }
3803 ql_dbg(ql_dbg_tgt, vha, 0xe032,
3804 "FCP CDB: 0x%02x, sizeof(cdb): %lu",
3805 atio->u.isp2x.cdb[0], (unsigned long
3806 int)sizeof(atio->u.isp2x.cdb));
3807
3808 rc = qlt_handle_cmd_for_atio(vha, atio);
3809 if (unlikely(rc != 0)) {
3810 if (rc == -ESRCH) {
3811#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
3812 qlt_send_busy(vha, atio, 0);
3813#else
3814 qlt_send_term_exchange(vha, NULL, atio, 1);
3815#endif
3816 } else {
3817 if (tgt->tgt_stop) {
3818 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
3819 "qla_target: Unable to send "
3820 "command to target, sending TERM "
3821 "EXCHANGE for rsp\n");
3822 qlt_send_term_exchange(vha, NULL,
3823 atio, 1);
3824 } else {
3825 ql_dbg(ql_dbg_tgt, vha, 0xe060,
3826 "qla_target(%d): Unable to send "
3827 "command to target, sending BUSY "
3828 "status\n", vha->vp_idx);
3829 qlt_send_busy(vha, atio, 0);
3830 }
3831 }
3832 }
3833 }
3834 break;
3835
3836 case CONTINUE_TGT_IO_TYPE:
3837 {
3838 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
3839 ql_dbg(ql_dbg_tgt, vha, 0xe033,
3840 "CONTINUE_TGT_IO: instance %d\n", vha->vp_idx);
3841 qlt_do_ctio_completion(vha, entry->handle,
3842 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
3843 entry);
3844 break;
3845 }
3846
3847 case CTIO_A64_TYPE:
3848 {
3849 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
3850 ql_dbg(ql_dbg_tgt, vha, 0xe034, "CTIO_A64: instance %d\n",
3851 vha->vp_idx);
3852 qlt_do_ctio_completion(vha, entry->handle,
3853 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
3854 entry);
3855 break;
3856 }
3857
3858 case IMMED_NOTIFY_TYPE:
3859 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
3860 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
3861 break;
3862
3863 case NOTIFY_ACK_TYPE:
3864 if (tgt->notify_ack_expected > 0) {
3865 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
3866 ql_dbg(ql_dbg_tgt, vha, 0xe036,
3867 "NOTIFY_ACK seq %08x status %x\n",
3868 le16_to_cpu(entry->u.isp2x.seq_id),
3869 le16_to_cpu(entry->u.isp2x.status));
3870 tgt->notify_ack_expected--;
3871 if (entry->u.isp2x.status !=
3872 __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
3873 ql_dbg(ql_dbg_tgt, vha, 0xe061,
3874 "qla_target(%d): NOTIFY_ACK "
3875 "failed %x\n", vha->vp_idx,
3876 le16_to_cpu(entry->u.isp2x.status));
3877 }
3878 } else {
3879 ql_dbg(ql_dbg_tgt, vha, 0xe062,
3880 "qla_target(%d): Unexpected NOTIFY_ACK received\n",
3881 vha->vp_idx);
3882 }
3883 break;
3884
3885 case ABTS_RECV_24XX:
3886 ql_dbg(ql_dbg_tgt, vha, 0xe037,
3887 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
3888 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
3889 break;
3890
3891 case ABTS_RESP_24XX:
3892 if (tgt->abts_resp_expected > 0) {
3893 struct abts_resp_from_24xx_fw *entry =
3894 (struct abts_resp_from_24xx_fw *)pkt;
3895 ql_dbg(ql_dbg_tgt, vha, 0xe038,
3896 "ABTS_RESP_24XX: compl_status %x\n",
3897 entry->compl_status);
3898 tgt->abts_resp_expected--;
3899 if (le16_to_cpu(entry->compl_status) !=
3900 ABTS_RESP_COMPL_SUCCESS) {
3901 if ((entry->error_subcode1 == 0x1E) &&
3902 (entry->error_subcode2 == 0)) {
3903 /*
3904 * We've got a race here: aborted
3905 * exchange not terminated, i.e.
3906 * response for the aborted command was
3907 * sent between the abort request was
3908 * received and processed.
3909 * Unfortunately, the firmware has a
3910 * silly requirement that all aborted
3911 * exchanges must be explicitely
3912 * terminated, otherwise it refuses to
3913 * send responses for the abort
3914 * requests. So, we have to
3915 * (re)terminate the exchange and retry
3916 * the abort response.
3917 */
3918 qlt_24xx_retry_term_exchange(vha,
3919 entry);
3920 } else
3921 ql_dbg(ql_dbg_tgt, vha, 0xe063,
3922 "qla_target(%d): ABTS_RESP_24XX "
3923 "failed %x (subcode %x:%x)",
3924 vha->vp_idx, entry->compl_status,
3925 entry->error_subcode1,
3926 entry->error_subcode2);
3927 }
3928 } else {
3929 ql_dbg(ql_dbg_tgt, vha, 0xe064,
3930 "qla_target(%d): Unexpected ABTS_RESP_24XX "
3931 "received\n", vha->vp_idx);
3932 }
3933 break;
3934
3935 default:
3936 ql_dbg(ql_dbg_tgt, vha, 0xe065,
3937 "qla_target(%d): Received unknown response pkt "
3938 "type %x\n", vha->vp_idx, pkt->entry_type);
3939 break;
3940 }
3941
3942 tgt->irq_cmd_count--;
3943}
3944
3945/*
3946 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3947 */
3948void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
3949 uint16_t *mailbox)
3950{
3951 struct qla_hw_data *ha = vha->hw;
3952 struct qla_tgt *tgt = ha->tgt.qla_tgt;
3953 int login_code;
3954
3955 ql_dbg(ql_dbg_tgt, vha, 0xe039,
3956 "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n",
3957 vha->host_no, atomic_read(&vha->loop_state), vha->flags.init_done,
3958 ha->operating_mode, ha->current_topology);
3959
3960 if (!ha->tgt.tgt_ops)
3961 return;
3962
3963 if (unlikely(tgt == NULL)) {
3964 ql_dbg(ql_dbg_tgt, vha, 0xe03a,
3965 "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha);
3966 return;
3967 }
3968
3969 if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
3970 IS_QLA2100(ha))
3971 return;
3972 /*
3973 * In tgt_stop mode we also should allow all requests to pass.
3974 * Otherwise, some commands can stuck.
3975 */
3976
3977 tgt->irq_cmd_count++;
3978
3979 switch (code) {
3980 case MBA_RESET: /* Reset */
3981 case MBA_SYSTEM_ERR: /* System Error */
3982 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
3983 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
3984 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
3985 "qla_target(%d): System error async event %#x "
3986 "occured", vha->vp_idx, code);
3987 break;
3988 case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */
3989 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3990 break;
3991
3992 case MBA_LOOP_UP:
3993 {
3994 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
3995 "qla_target(%d): Async LOOP_UP occured "
3996 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
3997 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
3998 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
3999 if (tgt->link_reinit_iocb_pending) {
4000 qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb,
4001 0, 0, 0, 0, 0, 0);
4002 tgt->link_reinit_iocb_pending = 0;
4003 }
4004 break;
4005 }
4006
4007 case MBA_LIP_OCCURRED:
4008 case MBA_LOOP_DOWN:
4009 case MBA_LIP_RESET:
4010 case MBA_RSCN_UPDATE:
4011 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
4012 "qla_target(%d): Async event %#x occured "
4013 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
4014 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
4015 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
4016 break;
4017
4018 case MBA_PORT_UPDATE:
4019 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
4020 "qla_target(%d): Port update async event %#x "
4021 "occured: updating the ports database (m[0]=%x, m[1]=%x, "
4022 "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
4023 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
4024 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
4025
4026 login_code = le16_to_cpu(mailbox[2]);
4027 if (login_code == 0x4)
4028 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
4029 "Async MB 2: Got PLOGI Complete\n");
4030 else if (login_code == 0x7)
4031 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
4032 "Async MB 2: Port Logged Out\n");
4033 break;
4034
4035 default:
4036 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040,
4037 "qla_target(%d): Async event %#x occured: "
4038 "ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
4039 code, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
4040 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
4041 break;
4042 }
4043
4044 tgt->irq_cmd_count--;
4045}
4046
4047static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
4048 uint16_t loop_id)
4049{
4050 fc_port_t *fcport;
4051 int rc;
4052
4053 fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
4054 if (!fcport) {
4055 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
4056 "qla_target(%d): Allocation of tmp FC port failed",
4057 vha->vp_idx);
4058 return NULL;
4059 }
4060
4061 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf041, "loop_id %d", loop_id);
4062
4063 fcport->loop_id = loop_id;
4064
4065 rc = qla2x00_get_port_database(vha, fcport, 0);
4066 if (rc != QLA_SUCCESS) {
4067 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
4068 "qla_target(%d): Failed to retrieve fcport "
4069 "information -- get_port_database() returned %x "
4070 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
4071 kfree(fcport);
4072 return NULL;
4073 }
4074
4075 return fcport;
4076}
4077
4078/* Must be called under tgt_mutex */
4079static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
4080 uint8_t *s_id)
4081{
4082 struct qla_hw_data *ha = vha->hw;
4083 struct qla_tgt_sess *sess = NULL;
4084 fc_port_t *fcport = NULL;
4085 int rc, global_resets;
4086 uint16_t loop_id = 0;
4087
4088retry:
4089 global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
4090
4091 rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
4092 if (rc != 0) {
4093 if ((s_id[0] == 0xFF) &&
4094 (s_id[1] == 0xFC)) {
4095 /*
4096 * This is Domain Controller, so it should be
4097 * OK to drop SCSI commands from it.
4098 */
4099 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
4100 "Unable to find initiator with S_ID %x:%x:%x",
4101 s_id[0], s_id[1], s_id[2]);
4102 } else
4103 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf071,
4104 "qla_target(%d): Unable to find "
4105 "initiator with S_ID %x:%x:%x",
4106 vha->vp_idx, s_id[0], s_id[1],
4107 s_id[2]);
4108 return NULL;
4109 }
4110
4111 fcport = qlt_get_port_database(vha, loop_id);
4112 if (!fcport)
4113 return NULL;
4114
4115 if (global_resets !=
4116 atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) {
4117 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
4118 "qla_target(%d): global reset during session discovery "
4119 "(counter was %d, new %d), retrying", vha->vp_idx,
4120 global_resets,
4121 atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count));
4122 goto retry;
4123 }
4124
4125 sess = qlt_create_sess(vha, fcport, true);
4126
4127 kfree(fcport);
4128 return sess;
4129}
4130
4131static void qlt_abort_work(struct qla_tgt *tgt,
4132 struct qla_tgt_sess_work_param *prm)
4133{
4134 struct scsi_qla_host *vha = tgt->vha;
4135 struct qla_hw_data *ha = vha->hw;
4136 struct qla_tgt_sess *sess = NULL;
4137 unsigned long flags;
4138 uint32_t be_s_id;
4139 uint8_t s_id[3];
4140 int rc;
4141
4142 spin_lock_irqsave(&ha->hardware_lock, flags);
4143
4144 if (tgt->tgt_stop)
4145 goto out_term;
4146
4147 s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
4148 s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
4149 s_id[2] = prm->abts.fcp_hdr_le.s_id[0];
4150
4151 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
4152 (unsigned char *)&be_s_id);
4153 if (!sess) {
4154 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4155
4156 mutex_lock(&ha->tgt.tgt_mutex);
4157 sess = qlt_make_local_sess(vha, s_id);
4158 /* sess has got an extra creation ref */
4159 mutex_unlock(&ha->tgt.tgt_mutex);
4160
4161 spin_lock_irqsave(&ha->hardware_lock, flags);
4162 if (!sess)
4163 goto out_term;
4164 } else {
4165 kref_get(&sess->se_sess->sess_kref);
4166 }
4167
4168 if (tgt->tgt_stop)
4169 goto out_term;
4170
4171 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
4172 if (rc != 0)
4173 goto out_term;
4174 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4175
4176 ha->tgt.tgt_ops->put_sess(sess);
4177 return;
4178
4179out_term:
4180 qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
4181 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4182 if (sess)
4183 ha->tgt.tgt_ops->put_sess(sess);
4184}
4185
4186static void qlt_tmr_work(struct qla_tgt *tgt,
4187 struct qla_tgt_sess_work_param *prm)
4188{
4189 struct atio_from_isp *a = &prm->tm_iocb2;
4190 struct scsi_qla_host *vha = tgt->vha;
4191 struct qla_hw_data *ha = vha->hw;
4192 struct qla_tgt_sess *sess = NULL;
4193 unsigned long flags;
4194 uint8_t *s_id = NULL; /* to hide compiler warnings */
4195 int rc;
4196 uint32_t lun, unpacked_lun;
4197 int lun_size, fn;
4198 void *iocb;
4199
4200 spin_lock_irqsave(&ha->hardware_lock, flags);
4201
4202 if (tgt->tgt_stop)
4203 goto out_term;
4204
4205 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
4206 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
4207 if (!sess) {
4208 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4209
4210 mutex_lock(&ha->tgt.tgt_mutex);
4211 sess = qlt_make_local_sess(vha, s_id);
4212 /* sess has got an extra creation ref */
4213 mutex_unlock(&ha->tgt.tgt_mutex);
4214
4215 spin_lock_irqsave(&ha->hardware_lock, flags);
4216 if (!sess)
4217 goto out_term;
4218 } else {
4219 kref_get(&sess->se_sess->sess_kref);
4220 }
4221
4222 iocb = a;
4223 lun = a->u.isp24.fcp_cmnd.lun;
4224 lun_size = sizeof(lun);
4225 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
4226 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
4227
4228 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
4229 if (rc != 0)
4230 goto out_term;
4231 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4232
4233 ha->tgt.tgt_ops->put_sess(sess);
4234 return;
4235
4236out_term:
4237 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1);
4238 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4239 if (sess)
4240 ha->tgt.tgt_ops->put_sess(sess);
4241}
4242
4243static void qlt_sess_work_fn(struct work_struct *work)
4244{
4245 struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
4246 struct scsi_qla_host *vha = tgt->vha;
4247 unsigned long flags;
4248
4249 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
4250
4251 spin_lock_irqsave(&tgt->sess_work_lock, flags);
4252 while (!list_empty(&tgt->sess_works_list)) {
4253 struct qla_tgt_sess_work_param *prm = list_entry(
4254 tgt->sess_works_list.next, typeof(*prm),
4255 sess_works_list_entry);
4256
4257 /*
4258 * This work can be scheduled on several CPUs at time, so we
4259 * must delete the entry to eliminate double processing
4260 */
4261 list_del(&prm->sess_works_list_entry);
4262
4263 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
4264
4265 switch (prm->type) {
4266 case QLA_TGT_SESS_WORK_ABORT:
4267 qlt_abort_work(tgt, prm);
4268 break;
4269 case QLA_TGT_SESS_WORK_TM:
4270 qlt_tmr_work(tgt, prm);
4271 break;
4272 default:
4273 BUG_ON(1);
4274 break;
4275 }
4276
4277 spin_lock_irqsave(&tgt->sess_work_lock, flags);
4278
4279 kfree(prm);
4280 }
4281 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
4282}
4283
4284/* Must be called under tgt_host_action_mutex */
4285int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
4286{
4287 struct qla_tgt *tgt;
4288
4289 if (!QLA_TGT_MODE_ENABLED())
4290 return 0;
4291
4292 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
4293 "Registering target for host %ld(%p)", base_vha->host_no, ha);
4294
4295 BUG_ON((ha->tgt.qla_tgt != NULL) || (ha->tgt.tgt_ops != NULL));
4296
4297 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
4298 if (!tgt) {
4299 ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
4300 "Unable to allocate struct qla_tgt\n");
4301 return -ENOMEM;
4302 }
4303
4304 if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
4305 base_vha->host->hostt->supported_mode |= MODE_TARGET;
4306
4307 tgt->ha = ha;
4308 tgt->vha = base_vha;
4309 init_waitqueue_head(&tgt->waitQ);
4310 INIT_LIST_HEAD(&tgt->sess_list);
4311 INIT_LIST_HEAD(&tgt->del_sess_list);
4312 INIT_DELAYED_WORK(&tgt->sess_del_work,
4313 (void (*)(struct work_struct *))qlt_del_sess_work_fn);
4314 spin_lock_init(&tgt->sess_work_lock);
4315 INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
4316 INIT_LIST_HEAD(&tgt->sess_works_list);
4317 spin_lock_init(&tgt->srr_lock);
4318 INIT_LIST_HEAD(&tgt->srr_ctio_list);
4319 INIT_LIST_HEAD(&tgt->srr_imm_list);
4320 INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
4321 atomic_set(&tgt->tgt_global_resets_count, 0);
4322
4323 ha->tgt.qla_tgt = tgt;
4324
4325 ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
4326 "qla_target(%d): using 64 Bit PCI addressing",
4327 base_vha->vp_idx);
4328 tgt->tgt_enable_64bit_addr = 1;
4329 /* 3 is reserved */
4330 tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
4331 tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
4332 tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
4333
4334 mutex_lock(&qla_tgt_mutex);
4335 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
4336 mutex_unlock(&qla_tgt_mutex);
4337
4338 return 0;
4339}
4340
4341/* Must be called under tgt_host_action_mutex */
4342int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
4343{
4344 if (!ha->tgt.qla_tgt)
4345 return 0;
4346
4347 mutex_lock(&qla_tgt_mutex);
4348 list_del(&ha->tgt.qla_tgt->tgt_list_entry);
4349 mutex_unlock(&qla_tgt_mutex);
4350
4351 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
4352 vha->host_no, ha);
4353 qlt_release(ha->tgt.qla_tgt);
4354
4355 return 0;
4356}
4357
4358static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
4359 unsigned char *b)
4360{
4361 int i;
4362
4363 pr_debug("qla2xxx HW vha->node_name: ");
4364 for (i = 0; i < WWN_SIZE; i++)
4365 pr_debug("%02x ", vha->node_name[i]);
4366 pr_debug("\n");
4367 pr_debug("qla2xxx HW vha->port_name: ");
4368 for (i = 0; i < WWN_SIZE; i++)
4369 pr_debug("%02x ", vha->port_name[i]);
4370 pr_debug("\n");
4371
4372 pr_debug("qla2xxx passed configfs WWPN: ");
4373 put_unaligned_be64(wwpn, b);
4374 for (i = 0; i < WWN_SIZE; i++)
4375 pr_debug("%02x ", b[i]);
4376 pr_debug("\n");
4377}
4378
4379/**
4380 * qla_tgt_lport_register - register lport with external module
4381 *
4382 * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
4383 * @wwpn: Passwd FC target WWPN
4384 * @callback: lport initialization callback for tcm_qla2xxx code
4385 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
4386 */
4387int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
4388 int (*callback)(struct scsi_qla_host *), void *target_lport_ptr)
4389{
4390 struct qla_tgt *tgt;
4391 struct scsi_qla_host *vha;
4392 struct qla_hw_data *ha;
4393 struct Scsi_Host *host;
4394 unsigned long flags;
4395 int rc;
4396 u8 b[WWN_SIZE];
4397
4398 mutex_lock(&qla_tgt_mutex);
4399 list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
4400 vha = tgt->vha;
4401 ha = vha->hw;
4402
4403 host = vha->host;
4404 if (!host)
4405 continue;
4406
4407 if (ha->tgt.tgt_ops != NULL)
4408 continue;
4409
4410 if (!(host->hostt->supported_mode & MODE_TARGET))
4411 continue;
4412
4413 spin_lock_irqsave(&ha->hardware_lock, flags);
4414 if (host->active_mode & MODE_TARGET) {
4415 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
4416 host->host_no);
4417 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4418 continue;
4419 }
4420 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4421
4422 if (!scsi_host_get(host)) {
4423 ql_dbg(ql_dbg_tgt, vha, 0xe068,
4424 "Unable to scsi_host_get() for"
4425 " qla2xxx scsi_host\n");
4426 continue;
4427 }
4428 qlt_lport_dump(vha, wwpn, b);
4429
4430 if (memcmp(vha->port_name, b, WWN_SIZE)) {
4431 scsi_host_put(host);
4432 continue;
4433 }
4434 /*
4435 * Setup passed parameters ahead of invoking callback
4436 */
4437 ha->tgt.tgt_ops = qla_tgt_ops;
4438 ha->tgt.target_lport_ptr = target_lport_ptr;
4439 rc = (*callback)(vha);
4440 if (rc != 0) {
4441 ha->tgt.tgt_ops = NULL;
4442 ha->tgt.target_lport_ptr = NULL;
4443 }
4444 mutex_unlock(&qla_tgt_mutex);
4445 return rc;
4446 }
4447 mutex_unlock(&qla_tgt_mutex);
4448
4449 return -ENODEV;
4450}
4451EXPORT_SYMBOL(qlt_lport_register);
4452
4453/**
4454 * qla_tgt_lport_deregister - Degister lport
4455 *
4456 * @vha: Registered scsi_qla_host pointer
4457 */
4458void qlt_lport_deregister(struct scsi_qla_host *vha)
4459{
4460 struct qla_hw_data *ha = vha->hw;
4461 struct Scsi_Host *sh = vha->host;
4462 /*
4463 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
4464 */
4465 ha->tgt.target_lport_ptr = NULL;
4466 ha->tgt.tgt_ops = NULL;
4467 /*
4468 * Release the Scsi_Host reference for the underlying qla2xxx host
4469 */
4470 scsi_host_put(sh);
4471}
4472EXPORT_SYMBOL(qlt_lport_deregister);
4473
4474/* Must be called under HW lock */
4475void qlt_set_mode(struct scsi_qla_host *vha)
4476{
4477 struct qla_hw_data *ha = vha->hw;
4478
4479 switch (ql2x_ini_mode) {
4480 case QLA2XXX_INI_MODE_DISABLED:
4481 case QLA2XXX_INI_MODE_EXCLUSIVE:
4482 vha->host->active_mode = MODE_TARGET;
4483 break;
4484 case QLA2XXX_INI_MODE_ENABLED:
4485 vha->host->active_mode |= MODE_TARGET;
4486 break;
4487 default:
4488 break;
4489 }
4490
4491 if (ha->tgt.ini_mode_force_reverse)
4492 qla_reverse_ini_mode(vha);
4493}
4494
4495/* Must be called under HW lock */
4496void qlt_clear_mode(struct scsi_qla_host *vha)
4497{
4498 struct qla_hw_data *ha = vha->hw;
4499
4500 switch (ql2x_ini_mode) {
4501 case QLA2XXX_INI_MODE_DISABLED:
4502 vha->host->active_mode = MODE_UNKNOWN;
4503 break;
4504 case QLA2XXX_INI_MODE_EXCLUSIVE:
4505 vha->host->active_mode = MODE_INITIATOR;
4506 break;
4507 case QLA2XXX_INI_MODE_ENABLED:
4508 vha->host->active_mode &= ~MODE_TARGET;
4509 break;
4510 default:
4511 break;
4512 }
4513
4514 if (ha->tgt.ini_mode_force_reverse)
4515 qla_reverse_ini_mode(vha);
4516}
4517
4518/*
4519 * qla_tgt_enable_vha - NO LOCK HELD
4520 *
4521 * host_reset, bring up w/ Target Mode Enabled
4522 */
4523void
4524qlt_enable_vha(struct scsi_qla_host *vha)
4525{
4526 struct qla_hw_data *ha = vha->hw;
4527 struct qla_tgt *tgt = ha->tgt.qla_tgt;
4528 unsigned long flags;
4529
4530 if (!tgt) {
4531 ql_dbg(ql_dbg_tgt, vha, 0xe069,
4532 "Unable to locate qla_tgt pointer from"
4533 " struct qla_hw_data\n");
4534 dump_stack();
4535 return;
4536 }
4537
4538 spin_lock_irqsave(&ha->hardware_lock, flags);
4539 tgt->tgt_stopped = 0;
4540 qlt_set_mode(vha);
4541 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4542
4543 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4544 qla2xxx_wake_dpc(vha);
4545 qla2x00_wait_for_hba_online(vha);
4546}
4547EXPORT_SYMBOL(qlt_enable_vha);
4548
4549/*
4550 * qla_tgt_disable_vha - NO LOCK HELD
4551 *
4552 * Disable Target Mode and reset the adapter
4553 */
4554void
4555qlt_disable_vha(struct scsi_qla_host *vha)
4556{
4557 struct qla_hw_data *ha = vha->hw;
4558 struct qla_tgt *tgt = ha->tgt.qla_tgt;
4559 unsigned long flags;
4560
4561 if (!tgt) {
4562 ql_dbg(ql_dbg_tgt, vha, 0xe06a,
4563 "Unable to locate qla_tgt pointer from"
4564 " struct qla_hw_data\n");
4565 dump_stack();
4566 return;
4567 }
4568
4569 spin_lock_irqsave(&ha->hardware_lock, flags);
4570 qlt_clear_mode(vha);
4571 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4572
4573 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4574 qla2xxx_wake_dpc(vha);
4575 qla2x00_wait_for_hba_online(vha);
4576}
4577
4578/*
4579 * Called from qla_init.c:qla24xx_vport_create() contex to setup
4580 * the target mode specific struct scsi_qla_host and struct qla_hw_data
4581 * members.
4582 */
4583void
4584qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
4585{
4586 if (!qla_tgt_mode_enabled(vha))
4587 return;
4588
4589 mutex_init(&ha->tgt.tgt_mutex);
4590 mutex_init(&ha->tgt.tgt_host_action_mutex);
4591
4592 qlt_clear_mode(vha);
4593
4594 /*
4595 * NOTE: Currently the value is kept the same for <24xx and
4596 * >=24xx ISPs. If it is necessary to change it,
4597 * the check should be added for specific ISPs,
4598 * assigning the value appropriately.
4599 */
4600 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
4601}
4602
4603void
4604qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
4605{
4606 /*
4607 * FC-4 Feature bit 0 indicates target functionality to the name server.
4608 */
4609 if (qla_tgt_mode_enabled(vha)) {
4610 if (qla_ini_mode_enabled(vha))
4611 ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
4612 else
4613 ct_req->req.rff_id.fc4_feature = BIT_0;
4614 } else if (qla_ini_mode_enabled(vha)) {
4615 ct_req->req.rff_id.fc4_feature = BIT_1;
4616 }
4617}
4618
4619/*
4620 * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
4621 * @ha: HA context
4622 *
4623 * Beginning of ATIO ring has initialization control block already built
4624 * by nvram config routine.
4625 *
4626 * Returns 0 on success.
4627 */
4628void
4629qlt_init_atio_q_entries(struct scsi_qla_host *vha)
4630{
4631 struct qla_hw_data *ha = vha->hw;
4632 uint16_t cnt;
4633 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
4634
4635 if (!qla_tgt_mode_enabled(vha))
4636 return;
4637
4638 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
4639 pkt->u.raw.signature = ATIO_PROCESSED;
4640 pkt++;
4641 }
4642
4643}
4644
4645/*
4646 * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
4647 * @ha: SCSI driver HA context
4648 */
4649void
4650qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
4651{
4652 struct qla_hw_data *ha = vha->hw;
4653 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
4654 struct atio_from_isp *pkt;
4655 int cnt, i;
4656
4657 if (!vha->flags.online)
4658 return;
4659
4660 while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) {
4661 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
4662 cnt = pkt->u.raw.entry_count;
4663
4664 qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt);
4665
4666 for (i = 0; i < cnt; i++) {
4667 ha->tgt.atio_ring_index++;
4668 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
4669 ha->tgt.atio_ring_index = 0;
4670 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
4671 } else
4672 ha->tgt.atio_ring_ptr++;
4673
4674 pkt->u.raw.signature = ATIO_PROCESSED;
4675 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
4676 }
4677 wmb();
4678 }
4679
4680 /* Adjust ring index */
4681 WRT_REG_DWORD(&reg->atio_q_out, ha->tgt.atio_ring_index);
4682}
4683
4684void
4685qlt_24xx_config_rings(struct scsi_qla_host *vha, device_reg_t __iomem *reg)
4686{
4687 struct qla_hw_data *ha = vha->hw;
4688
4689/* FIXME: atio_q in/out for ha->mqenable=1..? */
4690 if (ha->mqenable) {
4691#if 0
4692 WRT_REG_DWORD(&reg->isp25mq.atio_q_in, 0);
4693 WRT_REG_DWORD(&reg->isp25mq.atio_q_out, 0);
4694 RD_REG_DWORD(&reg->isp25mq.atio_q_out);
4695#endif
4696 } else {
4697 /* Setup APTIO registers for target mode */
4698 WRT_REG_DWORD(&reg->isp24.atio_q_in, 0);
4699 WRT_REG_DWORD(&reg->isp24.atio_q_out, 0);
4700 RD_REG_DWORD(&reg->isp24.atio_q_out);
4701 }
4702}
4703
4704void
4705qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
4706{
4707 struct qla_hw_data *ha = vha->hw;
4708
4709 if (qla_tgt_mode_enabled(vha)) {
4710 if (!ha->tgt.saved_set) {
4711 /* We save only once */
4712 ha->tgt.saved_exchange_count = nv->exchange_count;
4713 ha->tgt.saved_firmware_options_1 =
4714 nv->firmware_options_1;
4715 ha->tgt.saved_firmware_options_2 =
4716 nv->firmware_options_2;
4717 ha->tgt.saved_firmware_options_3 =
4718 nv->firmware_options_3;
4719 ha->tgt.saved_set = 1;
4720 }
4721
4722 nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
4723
4724 /* Enable target mode */
4725 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
4726
4727 /* Disable ini mode, if requested */
4728 if (!qla_ini_mode_enabled(vha))
4729 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_5);
4730
4731 /* Disable Full Login after LIP */
4732 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
4733 /* Enable initial LIP */
4734 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
4735 /* Enable FC tapes support */
4736 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
4737 /* Disable Full Login after LIP */
4738 nv->host_p &= __constant_cpu_to_le32(~BIT_10);
4739 /* Enable target PRLI control */
4740 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
4741 } else {
4742 if (ha->tgt.saved_set) {
4743 nv->exchange_count = ha->tgt.saved_exchange_count;
4744 nv->firmware_options_1 =
4745 ha->tgt.saved_firmware_options_1;
4746 nv->firmware_options_2 =
4747 ha->tgt.saved_firmware_options_2;
4748 nv->firmware_options_3 =
4749 ha->tgt.saved_firmware_options_3;
4750 }
4751 return;
4752 }
4753
4754 /* out-of-order frames reassembly */
4755 nv->firmware_options_3 |= BIT_6|BIT_9;
4756
4757 if (ha->tgt.enable_class_2) {
4758 if (vha->flags.init_done)
4759 fc_host_supported_classes(vha->host) =
4760 FC_COS_CLASS2 | FC_COS_CLASS3;
4761
4762 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
4763 } else {
4764 if (vha->flags.init_done)
4765 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
4766
4767 nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
4768 }
4769}
4770
4771void
4772qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
4773 struct init_cb_24xx *icb)
4774{
4775 struct qla_hw_data *ha = vha->hw;
4776
4777 if (ha->tgt.node_name_set) {
4778 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
4779 icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
4780 }
4781}
4782
4783int
4784qlt_24xx_process_response_error(struct scsi_qla_host *vha,
4785 struct sts_entry_24xx *pkt)
4786{
4787 switch (pkt->entry_type) {
4788 case ABTS_RECV_24XX:
4789 case ABTS_RESP_24XX:
4790 case CTIO_TYPE7:
4791 case NOTIFY_ACK_TYPE:
4792 return 1;
4793 default:
4794 return 0;
4795 }
4796}
4797
4798void
4799qlt_modify_vp_config(struct scsi_qla_host *vha,
4800 struct vp_config_entry_24xx *vpmod)
4801{
4802 if (qla_tgt_mode_enabled(vha))
4803 vpmod->options_idx1 &= ~BIT_5;
4804 /* Disable ini mode, if requested */
4805 if (!qla_ini_mode_enabled(vha))
4806 vpmod->options_idx1 &= ~BIT_4;
4807}
4808
4809void
4810qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
4811{
4812 if (!QLA_TGT_MODE_ENABLED())
4813 return;
4814
4815 mutex_init(&ha->tgt.tgt_mutex);
4816 mutex_init(&ha->tgt.tgt_host_action_mutex);
4817 qlt_clear_mode(base_vha);
4818}
4819
4820int
4821qlt_mem_alloc(struct qla_hw_data *ha)
4822{
4823 if (!QLA_TGT_MODE_ENABLED())
4824 return 0;
4825
4826 ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) *
4827 MAX_MULTI_ID_FABRIC, GFP_KERNEL);
4828 if (!ha->tgt.tgt_vp_map)
4829 return -ENOMEM;
4830
4831 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
4832 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
4833 &ha->tgt.atio_dma, GFP_KERNEL);
4834 if (!ha->tgt.atio_ring) {
4835 kfree(ha->tgt.tgt_vp_map);
4836 return -ENOMEM;
4837 }
4838 return 0;
4839}
4840
4841void
4842qlt_mem_free(struct qla_hw_data *ha)
4843{
4844 if (!QLA_TGT_MODE_ENABLED())
4845 return;
4846
4847 if (ha->tgt.atio_ring) {
4848 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
4849 sizeof(struct atio_from_isp), ha->tgt.atio_ring,
4850 ha->tgt.atio_dma);
4851 }
4852 kfree(ha->tgt.tgt_vp_map);
4853}
4854
4855/* vport_slock to be held by the caller */
4856void
4857qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
4858{
4859 if (!QLA_TGT_MODE_ENABLED())
4860 return;
4861
4862 switch (cmd) {
4863 case SET_VP_IDX:
4864 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
4865 break;
4866 case SET_AL_PA:
4867 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx;
4868 break;
4869 case RESET_VP_IDX:
4870 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
4871 break;
4872 case RESET_AL_PA:
4873 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0;
4874 break;
4875 }
4876}
4877
4878static int __init qlt_parse_ini_mode(void)
4879{
4880 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
4881 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
4882 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
4883 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
4884 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
4885 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
4886 else
4887 return false;
4888
4889 return true;
4890}
4891
4892int __init qlt_init(void)
4893{
4894 int ret;
4895
4896 if (!qlt_parse_ini_mode()) {
4897 ql_log(ql_log_fatal, NULL, 0xe06b,
4898 "qlt_parse_ini_mode() failed\n");
4899 return -EINVAL;
4900 }
4901
4902 if (!QLA_TGT_MODE_ENABLED())
4903 return 0;
4904
4905 qla_tgt_cmd_cachep = kmem_cache_create("qla_tgt_cmd_cachep",
4906 sizeof(struct qla_tgt_cmd), __alignof__(struct qla_tgt_cmd), 0,
4907 NULL);
4908 if (!qla_tgt_cmd_cachep) {
4909 ql_log(ql_log_fatal, NULL, 0xe06c,
4910 "kmem_cache_create for qla_tgt_cmd_cachep failed\n");
4911 return -ENOMEM;
4912 }
4913
4914 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
4915 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
4916 qla_tgt_mgmt_cmd), 0, NULL);
4917 if (!qla_tgt_mgmt_cmd_cachep) {
4918 ql_log(ql_log_fatal, NULL, 0xe06d,
4919 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
4920 ret = -ENOMEM;
4921 goto out;
4922 }
4923
4924 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
4925 mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
4926 if (!qla_tgt_mgmt_cmd_mempool) {
4927 ql_log(ql_log_fatal, NULL, 0xe06e,
4928 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
4929 ret = -ENOMEM;
4930 goto out_mgmt_cmd_cachep;
4931 }
4932
4933 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
4934 if (!qla_tgt_wq) {
4935 ql_log(ql_log_fatal, NULL, 0xe06f,
4936 "alloc_workqueue for qla_tgt_wq failed\n");
4937 ret = -ENOMEM;
4938 goto out_cmd_mempool;
4939 }
4940 /*
4941 * Return 1 to signal that initiator-mode is being disabled
4942 */
4943 return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
4944
4945out_cmd_mempool:
4946 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
4947out_mgmt_cmd_cachep:
4948 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
4949out:
4950 kmem_cache_destroy(qla_tgt_cmd_cachep);
4951 return ret;
4952}
4953
4954void qlt_exit(void)
4955{
4956 if (!QLA_TGT_MODE_ENABLED())
4957 return;
4958
4959 destroy_workqueue(qla_tgt_wq);
4960 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
4961 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
4962 kmem_cache_destroy(qla_tgt_cmd_cachep);
4963}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
new file mode 100644
index 000000000000..170af1571214
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -0,0 +1,1003 @@
1/*
2 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
3 * Copyright (C) 2004 - 2005 Leonid Stoljar
4 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
5 * Copyright (C) 2007 - 2010 ID7 Ltd.
6 *
7 * Forward port and refactoring to modern qla2xxx and target/configfs
8 *
9 * Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org>
10 *
11 * Additional file for the target driver support.
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version 2
16 * of the License, or (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 */
23/*
24 * This is the global def file that is useful for including from the
25 * target portion.
26 */
27
28#ifndef __QLA_TARGET_H
29#define __QLA_TARGET_H
30
31#include "qla_def.h"
32
33/*
34 * Must be changed on any change in any initiator visible interfaces or
35 * data in the target add-on
36 */
37#define QLA2XXX_TARGET_MAGIC 269
38
39/*
40 * Must be changed on any change in any target visible interfaces or
41 * data in the initiator
42 */
43#define QLA2XXX_INITIATOR_MAGIC 57222
44
45#define QLA2XXX_INI_MODE_STR_EXCLUSIVE "exclusive"
46#define QLA2XXX_INI_MODE_STR_DISABLED "disabled"
47#define QLA2XXX_INI_MODE_STR_ENABLED "enabled"
48
49#define QLA2XXX_INI_MODE_EXCLUSIVE 0
50#define QLA2XXX_INI_MODE_DISABLED 1
51#define QLA2XXX_INI_MODE_ENABLED 2
52
53#define QLA2XXX_COMMAND_COUNT_INIT 250
54#define QLA2XXX_IMMED_NOTIFY_COUNT_INIT 250
55
56/*
57 * Used to mark which completion handles (for RIO Status's) are for CTIO's
58 * vs. regular (non-target) info. This is checked for in
59 * qla2x00_process_response_queue() to see if a handle coming back in a
60 * multi-complete should come to the tgt driver or be handled there by qla2xxx
61 */
62#define CTIO_COMPLETION_HANDLE_MARK BIT_29
63#if (CTIO_COMPLETION_HANDLE_MARK <= MAX_OUTSTANDING_COMMANDS)
64#error "CTIO_COMPLETION_HANDLE_MARK not larger than MAX_OUTSTANDING_COMMANDS"
65#endif
66#define HANDLE_IS_CTIO_COMP(h) (h & CTIO_COMPLETION_HANDLE_MARK)
67
68/* Used to mark CTIO as intermediate */
69#define CTIO_INTERMEDIATE_HANDLE_MARK BIT_30
70
71#ifndef OF_SS_MODE_0
72/*
73 * ISP target entries - Flags bit definitions.
74 */
75#define OF_SS_MODE_0 0
76#define OF_SS_MODE_1 1
77#define OF_SS_MODE_2 2
78#define OF_SS_MODE_3 3
79
80#define OF_EXPL_CONF BIT_5 /* Explicit Confirmation Requested */
81#define OF_DATA_IN BIT_6 /* Data in to initiator */
82 /* (data from target to initiator) */
83#define OF_DATA_OUT BIT_7 /* Data out from initiator */
84 /* (data from initiator to target) */
85#define OF_NO_DATA (BIT_7 | BIT_6)
86#define OF_INC_RC BIT_8 /* Increment command resource count */
87#define OF_FAST_POST BIT_9 /* Enable mailbox fast posting. */
88#define OF_CONF_REQ BIT_13 /* Confirmation Requested */
89#define OF_TERM_EXCH BIT_14 /* Terminate exchange */
90#define OF_SSTS BIT_15 /* Send SCSI status */
91#endif
92
93#ifndef QLA_TGT_DATASEGS_PER_CMD32
94#define QLA_TGT_DATASEGS_PER_CMD32 3
95#define QLA_TGT_DATASEGS_PER_CONT32 7
96#define QLA_TGT_MAX_SG32(ql) \
97 (((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD32 + \
98 QLA_TGT_DATASEGS_PER_CONT32*((ql) - 1)) : 0)
99
100#define QLA_TGT_DATASEGS_PER_CMD64 2
101#define QLA_TGT_DATASEGS_PER_CONT64 5
102#define QLA_TGT_MAX_SG64(ql) \
103 (((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD64 + \
104 QLA_TGT_DATASEGS_PER_CONT64*((ql) - 1)) : 0)
105#endif
106
107#ifndef QLA_TGT_DATASEGS_PER_CMD_24XX
108#define QLA_TGT_DATASEGS_PER_CMD_24XX 1
109#define QLA_TGT_DATASEGS_PER_CONT_24XX 5
110#define QLA_TGT_MAX_SG_24XX(ql) \
111 (min(1270, ((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD_24XX + \
112 QLA_TGT_DATASEGS_PER_CONT_24XX*((ql) - 1)) : 0))
113#endif
114#endif
115
116#define GET_TARGET_ID(ha, iocb) ((HAS_EXTENDED_IDS(ha)) \
117 ? le16_to_cpu((iocb)->u.isp2x.target.extended) \
118 : (uint16_t)(iocb)->u.isp2x.target.id.standard)
119
120#ifndef IMMED_NOTIFY_TYPE
121#define IMMED_NOTIFY_TYPE 0x0D /* Immediate notify entry. */
122/*
123 * ISP queue - immediate notify entry structure definition.
124 * This is sent by the ISP to the Target driver.
125 * This IOCB would have report of events sent by the
126 * initiator, that needs to be handled by the target
127 * driver immediately.
128 */
129struct imm_ntfy_from_isp {
130 uint8_t entry_type; /* Entry type. */
131 uint8_t entry_count; /* Entry count. */
132 uint8_t sys_define; /* System defined. */
133 uint8_t entry_status; /* Entry Status. */
134 union {
135 struct {
136 uint32_t sys_define_2; /* System defined. */
137 target_id_t target;
138 uint16_t lun;
139 uint8_t target_id;
140 uint8_t reserved_1;
141 uint16_t status_modifier;
142 uint16_t status;
143 uint16_t task_flags;
144 uint16_t seq_id;
145 uint16_t srr_rx_id;
146 uint32_t srr_rel_offs;
147 uint16_t srr_ui;
148#define SRR_IU_DATA_IN 0x1
149#define SRR_IU_DATA_OUT 0x5
150#define SRR_IU_STATUS 0x7
151 uint16_t srr_ox_id;
152 uint8_t reserved_2[28];
153 } isp2x;
154 struct {
155 uint32_t reserved;
156 uint16_t nport_handle;
157 uint16_t reserved_2;
158 uint16_t flags;
159#define NOTIFY24XX_FLAGS_GLOBAL_TPRLO BIT_1
160#define NOTIFY24XX_FLAGS_PUREX_IOCB BIT_0
161 uint16_t srr_rx_id;
162 uint16_t status;
163 uint8_t status_subcode;
164 uint8_t reserved_3;
165 uint32_t exchange_address;
166 uint32_t srr_rel_offs;
167 uint16_t srr_ui;
168 uint16_t srr_ox_id;
169 uint8_t reserved_4[19];
170 uint8_t vp_index;
171 uint32_t reserved_5;
172 uint8_t port_id[3];
173 uint8_t reserved_6;
174 } isp24;
175 } u;
176 uint16_t reserved_7;
177 uint16_t ox_id;
178} __packed;
179#endif
180
181#ifndef NOTIFY_ACK_TYPE
182#define NOTIFY_ACK_TYPE 0x0E /* Notify acknowledge entry. */
183/*
184 * ISP queue - notify acknowledge entry structure definition.
185 * This is sent to the ISP from the target driver.
186 */
187struct nack_to_isp {
188 uint8_t entry_type; /* Entry type. */
189 uint8_t entry_count; /* Entry count. */
190 uint8_t sys_define; /* System defined. */
191 uint8_t entry_status; /* Entry Status. */
192 union {
193 struct {
194 uint32_t sys_define_2; /* System defined. */
195 target_id_t target;
196 uint8_t target_id;
197 uint8_t reserved_1;
198 uint16_t flags;
199 uint16_t resp_code;
200 uint16_t status;
201 uint16_t task_flags;
202 uint16_t seq_id;
203 uint16_t srr_rx_id;
204 uint32_t srr_rel_offs;
205 uint16_t srr_ui;
206 uint16_t srr_flags;
207 uint16_t srr_reject_code;
208 uint8_t srr_reject_vendor_uniq;
209 uint8_t srr_reject_code_expl;
210 uint8_t reserved_2[24];
211 } isp2x;
212 struct {
213 uint32_t handle;
214 uint16_t nport_handle;
215 uint16_t reserved_1;
216 uint16_t flags;
217 uint16_t srr_rx_id;
218 uint16_t status;
219 uint8_t status_subcode;
220 uint8_t reserved_3;
221 uint32_t exchange_address;
222 uint32_t srr_rel_offs;
223 uint16_t srr_ui;
224 uint16_t srr_flags;
225 uint8_t reserved_4[19];
226 uint8_t vp_index;
227 uint8_t srr_reject_vendor_uniq;
228 uint8_t srr_reject_code_expl;
229 uint8_t srr_reject_code;
230 uint8_t reserved_5[5];
231 } isp24;
232 } u;
233 uint8_t reserved[2];
234 uint16_t ox_id;
235} __packed;
236#define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0
237#define NOTIFY_ACK_SRR_FLAGS_REJECT 1
238
239#define NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM 0x9
240
241#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL 0
242#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_UNABLE_TO_SUPPLY_DATA 0x2a
243
244#define NOTIFY_ACK_SUCCESS 0x01
245#endif
246
247#ifndef ACCEPT_TGT_IO_TYPE
248#define ACCEPT_TGT_IO_TYPE 0x16 /* Accept target I/O entry. */
249#endif
250
251#ifndef CONTINUE_TGT_IO_TYPE
252#define CONTINUE_TGT_IO_TYPE 0x17
253/*
254 * ISP queue - Continue Target I/O (CTIO) entry for status mode 0 structure.
255 * This structure is sent to the ISP 2xxx from target driver.
256 */
257struct ctio_to_2xxx {
258 uint8_t entry_type; /* Entry type. */
259 uint8_t entry_count; /* Entry count. */
260 uint8_t sys_define; /* System defined. */
261 uint8_t entry_status; /* Entry Status. */
262 uint32_t handle; /* System defined handle */
263 target_id_t target;
264 uint16_t rx_id;
265 uint16_t flags;
266 uint16_t status;
267 uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */
268 uint16_t dseg_count; /* Data segment count. */
269 uint32_t relative_offset;
270 uint32_t residual;
271 uint16_t reserved_1[3];
272 uint16_t scsi_status;
273 uint32_t transfer_length;
274 uint32_t dseg_0_address; /* Data segment 0 address. */
275 uint32_t dseg_0_length; /* Data segment 0 length. */
276 uint32_t dseg_1_address; /* Data segment 1 address. */
277 uint32_t dseg_1_length; /* Data segment 1 length. */
278 uint32_t dseg_2_address; /* Data segment 2 address. */
279 uint32_t dseg_2_length; /* Data segment 2 length. */
280} __packed;
281#define ATIO_PATH_INVALID 0x07
282#define ATIO_CANT_PROV_CAP 0x16
283#define ATIO_CDB_VALID 0x3D
284
285#define ATIO_EXEC_READ BIT_1
286#define ATIO_EXEC_WRITE BIT_0
287#endif
288
289#ifndef CTIO_A64_TYPE
290#define CTIO_A64_TYPE 0x1F
291#define CTIO_SUCCESS 0x01
292#define CTIO_ABORTED 0x02
293#define CTIO_INVALID_RX_ID 0x08
294#define CTIO_TIMEOUT 0x0B
295#define CTIO_LIP_RESET 0x0E
296#define CTIO_TARGET_RESET 0x17
297#define CTIO_PORT_UNAVAILABLE 0x28
298#define CTIO_PORT_LOGGED_OUT 0x29
299#define CTIO_PORT_CONF_CHANGED 0x2A
300#define CTIO_SRR_RECEIVED 0x45
301#endif
302
303#ifndef CTIO_RET_TYPE
304#define CTIO_RET_TYPE 0x17 /* CTIO return entry */
305#define ATIO_TYPE7 0x06 /* Accept target I/O entry for 24xx */
306
307struct fcp_hdr {
308 uint8_t r_ctl;
309 uint8_t d_id[3];
310 uint8_t cs_ctl;
311 uint8_t s_id[3];
312 uint8_t type;
313 uint8_t f_ctl[3];
314 uint8_t seq_id;
315 uint8_t df_ctl;
316 uint16_t seq_cnt;
317 uint16_t ox_id;
318 uint16_t rx_id;
319 uint32_t parameter;
320} __packed;
321
322struct fcp_hdr_le {
323 uint8_t d_id[3];
324 uint8_t r_ctl;
325 uint8_t s_id[3];
326 uint8_t cs_ctl;
327 uint8_t f_ctl[3];
328 uint8_t type;
329 uint16_t seq_cnt;
330 uint8_t df_ctl;
331 uint8_t seq_id;
332 uint16_t rx_id;
333 uint16_t ox_id;
334 uint32_t parameter;
335} __packed;
336
337#define F_CTL_EXCH_CONTEXT_RESP BIT_23
338#define F_CTL_SEQ_CONTEXT_RESIP BIT_22
339#define F_CTL_LAST_SEQ BIT_20
340#define F_CTL_END_SEQ BIT_19
341#define F_CTL_SEQ_INITIATIVE BIT_16
342
343#define R_CTL_BASIC_LINK_SERV 0x80
344#define R_CTL_B_ACC 0x4
345#define R_CTL_B_RJT 0x5
346
347struct atio7_fcp_cmnd {
348 uint64_t lun;
349 uint8_t cmnd_ref;
350 uint8_t task_attr:3;
351 uint8_t reserved:5;
352 uint8_t task_mgmt_flags;
353#define FCP_CMND_TASK_MGMT_CLEAR_ACA 6
354#define FCP_CMND_TASK_MGMT_TARGET_RESET 5
355#define FCP_CMND_TASK_MGMT_LU_RESET 4
356#define FCP_CMND_TASK_MGMT_CLEAR_TASK_SET 2
357#define FCP_CMND_TASK_MGMT_ABORT_TASK_SET 1
358 uint8_t wrdata:1;
359 uint8_t rddata:1;
360 uint8_t add_cdb_len:6;
361 uint8_t cdb[16];
362 /*
363 * add_cdb is optional and can absent from struct atio7_fcp_cmnd. Size 4
364 * only to make sizeof(struct atio7_fcp_cmnd) be as expected by
365 * BUILD_BUG_ON in qlt_init().
366 */
367 uint8_t add_cdb[4];
368 /* uint32_t data_length; */
369} __packed;
370
371/*
372 * ISP queue - Accept Target I/O (ATIO) type entry IOCB structure.
373 * This is sent from the ISP to the target driver.
374 */
375struct atio_from_isp {
376 union {
377 struct {
378 uint16_t entry_hdr;
379 uint8_t sys_define; /* System defined. */
380 uint8_t entry_status; /* Entry Status. */
381 uint32_t sys_define_2; /* System defined. */
382 target_id_t target;
383 uint16_t rx_id;
384 uint16_t flags;
385 uint16_t status;
386 uint8_t command_ref;
387 uint8_t task_codes;
388 uint8_t task_flags;
389 uint8_t execution_codes;
390 uint8_t cdb[MAX_CMDSZ];
391 uint32_t data_length;
392 uint16_t lun;
393 uint8_t initiator_port_name[WWN_SIZE]; /* on qla23xx */
394 uint16_t reserved_32[6];
395 uint16_t ox_id;
396 } isp2x;
397 struct {
398 uint16_t entry_hdr;
399 uint8_t fcp_cmnd_len_low;
400 uint8_t fcp_cmnd_len_high:4;
401 uint8_t attr:4;
402 uint32_t exchange_addr;
403#define ATIO_EXCHANGE_ADDRESS_UNKNOWN 0xFFFFFFFF
404 struct fcp_hdr fcp_hdr;
405 struct atio7_fcp_cmnd fcp_cmnd;
406 } isp24;
407 struct {
408 uint8_t entry_type; /* Entry type. */
409 uint8_t entry_count; /* Entry count. */
410 uint8_t data[58];
411 uint32_t signature;
412#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
413 } raw;
414 } u;
415} __packed;
416
417#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
418
419/*
420 * ISP queue - Continue Target I/O (ATIO) type 7 entry (for 24xx) structure.
421 * This structure is sent to the ISP 24xx from the target driver.
422 */
423
424struct ctio7_to_24xx {
425 uint8_t entry_type; /* Entry type. */
426 uint8_t entry_count; /* Entry count. */
427 uint8_t sys_define; /* System defined. */
428 uint8_t entry_status; /* Entry Status. */
429 uint32_t handle; /* System defined handle */
430 uint16_t nport_handle;
431#define CTIO7_NHANDLE_UNRECOGNIZED 0xFFFF
432 uint16_t timeout;
433 uint16_t dseg_count; /* Data segment count. */
434 uint8_t vp_index;
435 uint8_t add_flags;
436 uint8_t initiator_id[3];
437 uint8_t reserved;
438 uint32_t exchange_addr;
439 union {
440 struct {
441 uint16_t reserved1;
442 uint16_t flags;
443 uint32_t residual;
444 uint16_t ox_id;
445 uint16_t scsi_status;
446 uint32_t relative_offset;
447 uint32_t reserved2;
448 uint32_t transfer_length;
449 uint32_t reserved3;
450 /* Data segment 0 address. */
451 uint32_t dseg_0_address[2];
452 /* Data segment 0 length. */
453 uint32_t dseg_0_length;
454 } status0;
455 struct {
456 uint16_t sense_length;
457 uint16_t flags;
458 uint32_t residual;
459 uint16_t ox_id;
460 uint16_t scsi_status;
461 uint16_t response_len;
462 uint16_t reserved;
463 uint8_t sense_data[24];
464 } status1;
465 } u;
466} __packed;
467
468/*
469 * ISP queue - CTIO type 7 from ISP 24xx to target driver
470 * returned entry structure.
471 */
472struct ctio7_from_24xx {
473 uint8_t entry_type; /* Entry type. */
474 uint8_t entry_count; /* Entry count. */
475 uint8_t sys_define; /* System defined. */
476 uint8_t entry_status; /* Entry Status. */
477 uint32_t handle; /* System defined handle */
478 uint16_t status;
479 uint16_t timeout;
480 uint16_t dseg_count; /* Data segment count. */
481 uint8_t vp_index;
482 uint8_t reserved1[5];
483 uint32_t exchange_address;
484 uint16_t reserved2;
485 uint16_t flags;
486 uint32_t residual;
487 uint16_t ox_id;
488 uint16_t reserved3;
489 uint32_t relative_offset;
490 uint8_t reserved4[24];
491} __packed;
492
493/* CTIO7 flags values */
494#define CTIO7_FLAGS_SEND_STATUS BIT_15
495#define CTIO7_FLAGS_TERMINATE BIT_14
496#define CTIO7_FLAGS_CONFORM_REQ BIT_13
497#define CTIO7_FLAGS_DONT_RET_CTIO BIT_8
498#define CTIO7_FLAGS_STATUS_MODE_0 0
499#define CTIO7_FLAGS_STATUS_MODE_1 BIT_6
500#define CTIO7_FLAGS_EXPLICIT_CONFORM BIT_5
501#define CTIO7_FLAGS_CONFIRM_SATISF BIT_4
502#define CTIO7_FLAGS_DSD_PTR BIT_2
503#define CTIO7_FLAGS_DATA_IN BIT_1
504#define CTIO7_FLAGS_DATA_OUT BIT_0
505
506#define ELS_PLOGI 0x3
507#define ELS_FLOGI 0x4
508#define ELS_LOGO 0x5
509#define ELS_PRLI 0x20
510#define ELS_PRLO 0x21
511#define ELS_TPRLO 0x24
512#define ELS_PDISC 0x50
513#define ELS_ADISC 0x52
514
515/*
516 * ISP queue - ABTS received/response entries structure definition for 24xx.
517 */
518#define ABTS_RECV_24XX 0x54 /* ABTS received (for 24xx) */
519#define ABTS_RESP_24XX 0x55 /* ABTS responce (for 24xx) */
520
521/*
522 * ISP queue - ABTS received IOCB entry structure definition for 24xx.
523 * The ABTS BLS received from the wire is sent to the
524 * target driver by the ISP 24xx.
525 * The IOCB is placed on the response queue.
526 */
527struct abts_recv_from_24xx {
528 uint8_t entry_type; /* Entry type. */
529 uint8_t entry_count; /* Entry count. */
530 uint8_t sys_define; /* System defined. */
531 uint8_t entry_status; /* Entry Status. */
532 uint8_t reserved_1[6];
533 uint16_t nport_handle;
534 uint8_t reserved_2[2];
535 uint8_t vp_index;
536 uint8_t reserved_3:4;
537 uint8_t sof_type:4;
538 uint32_t exchange_address;
539 struct fcp_hdr_le fcp_hdr_le;
540 uint8_t reserved_4[16];
541 uint32_t exchange_addr_to_abort;
542} __packed;
543
544#define ABTS_PARAM_ABORT_SEQ BIT_0
545
546struct ba_acc_le {
547 uint16_t reserved;
548 uint8_t seq_id_last;
549 uint8_t seq_id_valid;
550#define SEQ_ID_VALID 0x80
551#define SEQ_ID_INVALID 0x00
552 uint16_t rx_id;
553 uint16_t ox_id;
554 uint16_t high_seq_cnt;
555 uint16_t low_seq_cnt;
556} __packed;
557
558struct ba_rjt_le {
559 uint8_t vendor_uniq;
560 uint8_t reason_expl;
561 uint8_t reason_code;
562#define BA_RJT_REASON_CODE_INVALID_COMMAND 0x1
563#define BA_RJT_REASON_CODE_UNABLE_TO_PERFORM 0x9
564 uint8_t reserved;
565} __packed;
566
567/*
568 * ISP queue - ABTS Response IOCB entry structure definition for 24xx.
569 * The ABTS response to the ABTS received is sent by the
570 * target driver to the ISP 24xx.
571 * The IOCB is placed on the request queue.
572 */
573struct abts_resp_to_24xx {
574 uint8_t entry_type; /* Entry type. */
575 uint8_t entry_count; /* Entry count. */
576 uint8_t sys_define; /* System defined. */
577 uint8_t entry_status; /* Entry Status. */
578 uint32_t handle;
579 uint16_t reserved_1;
580 uint16_t nport_handle;
581 uint16_t control_flags;
582#define ABTS_CONTR_FLG_TERM_EXCHG BIT_0
583 uint8_t vp_index;
584 uint8_t reserved_3:4;
585 uint8_t sof_type:4;
586 uint32_t exchange_address;
587 struct fcp_hdr_le fcp_hdr_le;
588 union {
589 struct ba_acc_le ba_acct;
590 struct ba_rjt_le ba_rjt;
591 } __packed payload;
592 uint32_t reserved_4;
593 uint32_t exchange_addr_to_abort;
594} __packed;
595
596/*
597 * ISP queue - ABTS Response IOCB from ISP24xx Firmware entry structure.
598 * The ABTS response with completion status to the ABTS response
599 * (sent by the target driver to the ISP 24xx) is sent by the
600 * ISP24xx firmware to the target driver.
601 * The IOCB is placed on the response queue.
602 */
603struct abts_resp_from_24xx_fw {
604 uint8_t entry_type; /* Entry type. */
605 uint8_t entry_count; /* Entry count. */
606 uint8_t sys_define; /* System defined. */
607 uint8_t entry_status; /* Entry Status. */
608 uint32_t handle;
609 uint16_t compl_status;
610#define ABTS_RESP_COMPL_SUCCESS 0
611#define ABTS_RESP_COMPL_SUBCODE_ERROR 0x31
612 uint16_t nport_handle;
613 uint16_t reserved_1;
614 uint8_t reserved_2;
615 uint8_t reserved_3:4;
616 uint8_t sof_type:4;
617 uint32_t exchange_address;
618 struct fcp_hdr_le fcp_hdr_le;
619 uint8_t reserved_4[8];
620 uint32_t error_subcode1;
621#define ABTS_RESP_SUBCODE_ERR_ABORTED_EXCH_NOT_TERM 0x1E
622 uint32_t error_subcode2;
623 uint32_t exchange_addr_to_abort;
624} __packed;
625
626/********************************************************************\
627 * Type Definitions used by initiator & target halves
628\********************************************************************/
629
630struct qla_tgt_mgmt_cmd;
631struct qla_tgt_sess;
632
633/*
634 * This structure provides a template of function calls that the
635 * target driver (from within qla_target.c) can issue to the
636 * target module (tcm_qla2xxx).
637 */
638struct qla_tgt_func_tmpl {
639
640 int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
641 unsigned char *, uint32_t, int, int, int);
642 void (*handle_data)(struct qla_tgt_cmd *);
643 int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t,
644 uint32_t);
645 void (*free_cmd)(struct qla_tgt_cmd *);
646 void (*free_mcmd)(struct qla_tgt_mgmt_cmd *);
647 void (*free_session)(struct qla_tgt_sess *);
648
649 int (*check_initiator_node_acl)(struct scsi_qla_host *, unsigned char *,
650 void *, uint8_t *, uint16_t);
651 struct qla_tgt_sess *(*find_sess_by_loop_id)(struct scsi_qla_host *,
652 const uint16_t);
653 struct qla_tgt_sess *(*find_sess_by_s_id)(struct scsi_qla_host *,
654 const uint8_t *);
655 void (*clear_nacl_from_fcport_map)(struct qla_tgt_sess *);
656 void (*put_sess)(struct qla_tgt_sess *);
657 void (*shutdown_sess)(struct qla_tgt_sess *);
658};
659
660int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
661
662#include <target/target_core_base.h>
663
664#define QLA_TGT_TIMEOUT 10 /* in seconds */
665
666#define QLA_TGT_MAX_HW_PENDING_TIME 60 /* in seconds */
667
668/* Immediate notify status constants */
669#define IMM_NTFY_LIP_RESET 0x000E
670#define IMM_NTFY_LIP_LINK_REINIT 0x000F
671#define IMM_NTFY_IOCB_OVERFLOW 0x0016
672#define IMM_NTFY_ABORT_TASK 0x0020
673#define IMM_NTFY_PORT_LOGOUT 0x0029
674#define IMM_NTFY_PORT_CONFIG 0x002A
675#define IMM_NTFY_GLBL_TPRLO 0x002D
676#define IMM_NTFY_GLBL_LOGO 0x002E
677#define IMM_NTFY_RESOURCE 0x0034
678#define IMM_NTFY_MSG_RX 0x0036
679#define IMM_NTFY_SRR 0x0045
680#define IMM_NTFY_ELS 0x0046
681
682/* Immediate notify task flags */
683#define IMM_NTFY_TASK_MGMT_SHIFT 8
684
685#define QLA_TGT_CLEAR_ACA 0x40
686#define QLA_TGT_TARGET_RESET 0x20
687#define QLA_TGT_LUN_RESET 0x10
688#define QLA_TGT_CLEAR_TS 0x04
689#define QLA_TGT_ABORT_TS 0x02
690#define QLA_TGT_ABORT_ALL_SESS 0xFFFF
691#define QLA_TGT_ABORT_ALL 0xFFFE
692#define QLA_TGT_NEXUS_LOSS_SESS 0xFFFD
693#define QLA_TGT_NEXUS_LOSS 0xFFFC
694
695/* Notify Acknowledge flags */
696#define NOTIFY_ACK_RES_COUNT BIT_8
697#define NOTIFY_ACK_CLEAR_LIP_RESET BIT_5
698#define NOTIFY_ACK_TM_RESP_CODE_VALID BIT_4
699
700/* Command's states */
701#define QLA_TGT_STATE_NEW 0 /* New command + target processing */
702#define QLA_TGT_STATE_NEED_DATA 1 /* target needs data to continue */
703#define QLA_TGT_STATE_DATA_IN 2 /* Data arrived + target processing */
704#define QLA_TGT_STATE_PROCESSED 3 /* target done processing */
705#define QLA_TGT_STATE_ABORTED 4 /* Command aborted */
706
707/* Special handles */
708#define QLA_TGT_NULL_HANDLE 0
709#define QLA_TGT_SKIP_HANDLE (0xFFFFFFFF & ~CTIO_COMPLETION_HANDLE_MARK)
710
711/* ATIO task_codes field */
712#define ATIO_SIMPLE_QUEUE 0
713#define ATIO_HEAD_OF_QUEUE 1
714#define ATIO_ORDERED_QUEUE 2
715#define ATIO_ACA_QUEUE 4
716#define ATIO_UNTAGGED 5
717
718/* TM failed response codes, see FCP (9.4.11 FCP_RSP_INFO) */
719#define FC_TM_SUCCESS 0
720#define FC_TM_BAD_FCP_DATA 1
721#define FC_TM_BAD_CMD 2
722#define FC_TM_FCP_DATA_MISMATCH 3
723#define FC_TM_REJECT 4
724#define FC_TM_FAILED 5
725
726/*
727 * Error code of qlt_pre_xmit_response() meaning that cmd's exchange was
728 * terminated, so no more actions is needed and success should be returned
729 * to target.
730 */
731#define QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED 0x1717
732
733#if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
734#define pci_dma_lo32(a) (a & 0xffffffff)
735#define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff)
736#else
737#define pci_dma_lo32(a) (a & 0xffffffff)
738#define pci_dma_hi32(a) 0
739#endif
740
741#define QLA_TGT_SENSE_VALID(sense) ((sense != NULL) && \
742 (((const uint8_t *)(sense))[0] & 0x70) == 0x70)
743
744struct qla_port_24xx_data {
745 uint8_t port_name[WWN_SIZE];
746 uint16_t loop_id;
747 uint16_t reserved;
748};
749
750struct qla_tgt {
751 struct scsi_qla_host *vha;
752 struct qla_hw_data *ha;
753
754 /*
755 * To sync between IRQ handlers and qlt_target_release(). Needed,
756 * because req_pkt() can drop/reaquire HW lock inside. Protected by
757 * HW lock.
758 */
759 int irq_cmd_count;
760
761 int datasegs_per_cmd, datasegs_per_cont, sg_tablesize;
762
763 /* Target's flags, serialized by pha->hardware_lock */
764 unsigned int tgt_enable_64bit_addr:1; /* 64-bits PCI addr enabled */
765 unsigned int link_reinit_iocb_pending:1;
766
767 /*
768 * Protected by tgt_mutex AND hardware_lock for writing and tgt_mutex
769 * OR hardware_lock for reading.
770 */
771 int tgt_stop; /* the target mode driver is being stopped */
772 int tgt_stopped; /* the target mode driver has been stopped */
773
774 /* Count of sessions refering qla_tgt. Protected by hardware_lock. */
775 int sess_count;
776
777 /* Protected by hardware_lock. Addition also protected by tgt_mutex. */
778 struct list_head sess_list;
779
780 /* Protected by hardware_lock */
781 struct list_head del_sess_list;
782 struct delayed_work sess_del_work;
783
784 spinlock_t sess_work_lock;
785 struct list_head sess_works_list;
786 struct work_struct sess_work;
787
788 struct imm_ntfy_from_isp link_reinit_iocb;
789 wait_queue_head_t waitQ;
790 int notify_ack_expected;
791 int abts_resp_expected;
792 int modify_lun_expected;
793
794 int ctio_srr_id;
795 int imm_srr_id;
796 spinlock_t srr_lock;
797 struct list_head srr_ctio_list;
798 struct list_head srr_imm_list;
799 struct work_struct srr_work;
800
801 atomic_t tgt_global_resets_count;
802
803 struct list_head tgt_list_entry;
804};
805
806/*
807 * Equivilant to IT Nexus (Initiator-Target)
808 */
809struct qla_tgt_sess {
810 uint16_t loop_id;
811 port_id_t s_id;
812
813 unsigned int conf_compl_supported:1;
814 unsigned int deleted:1;
815 unsigned int local:1;
816
817 struct se_session *se_sess;
818 struct scsi_qla_host *vha;
819 struct qla_tgt *tgt;
820
821 struct list_head sess_list_entry;
822 unsigned long expires;
823 struct list_head del_list_entry;
824
825 uint8_t port_name[WWN_SIZE];
826 struct work_struct free_work;
827};
828
829struct qla_tgt_cmd {
830 struct qla_tgt_sess *sess;
831 int state;
832 struct se_cmd se_cmd;
833 struct work_struct free_work;
834 struct work_struct work;
835 /* Sense buffer that will be mapped into outgoing status */
836 unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
837
838 /* to save extra sess dereferences */
839 unsigned int conf_compl_supported:1;
840 unsigned int sg_mapped:1;
841 unsigned int free_sg:1;
842 unsigned int aborted:1; /* Needed in case of SRR */
843 unsigned int write_data_transferred:1;
844
845 struct scatterlist *sg; /* cmd data buffer SG vector */
846 int sg_cnt; /* SG segments count */
847 int bufflen; /* cmd buffer length */
848 int offset;
849 uint32_t tag;
850 uint32_t unpacked_lun;
851 enum dma_data_direction dma_data_direction;
852
853 uint16_t loop_id; /* to save extra sess dereferences */
854 struct qla_tgt *tgt; /* to save extra sess dereferences */
855 struct scsi_qla_host *vha;
856 struct list_head cmd_list;
857
858 struct atio_from_isp atio;
859};
860
861struct qla_tgt_sess_work_param {
862 struct list_head sess_works_list_entry;
863
864#define QLA_TGT_SESS_WORK_ABORT 1
865#define QLA_TGT_SESS_WORK_TM 2
866 int type;
867
868 union {
869 struct abts_recv_from_24xx abts;
870 struct imm_ntfy_from_isp tm_iocb;
871 struct atio_from_isp tm_iocb2;
872 };
873};
874
875struct qla_tgt_mgmt_cmd {
876 uint8_t tmr_func;
877 uint8_t fc_tm_rsp;
878 struct qla_tgt_sess *sess;
879 struct se_cmd se_cmd;
880 struct work_struct free_work;
881 unsigned int flags;
882#define QLA24XX_MGMT_SEND_NACK 1
883 union {
884 struct atio_from_isp atio;
885 struct imm_ntfy_from_isp imm_ntfy;
886 struct abts_recv_from_24xx abts;
887 } __packed orig_iocb;
888};
889
890struct qla_tgt_prm {
891 struct qla_tgt_cmd *cmd;
892 struct qla_tgt *tgt;
893 void *pkt;
894 struct scatterlist *sg; /* cmd data buffer SG vector */
895 int seg_cnt;
896 int req_cnt;
897 uint16_t rq_result;
898 uint16_t scsi_status;
899 unsigned char *sense_buffer;
900 int sense_buffer_len;
901 int residual;
902 int add_status_pkt;
903};
904
905struct qla_tgt_srr_imm {
906 struct list_head srr_list_entry;
907 int srr_id;
908 struct imm_ntfy_from_isp imm_ntfy;
909};
910
911struct qla_tgt_srr_ctio {
912 struct list_head srr_list_entry;
913 int srr_id;
914 struct qla_tgt_cmd *cmd;
915};
916
917#define QLA_TGT_XMIT_DATA 1
918#define QLA_TGT_XMIT_STATUS 2
919#define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA)
920
921
922extern struct qla_tgt_data qla_target;
923/*
924 * Internal function prototypes
925 */
926void qlt_disable_vha(struct scsi_qla_host *);
927
928/*
929 * Function prototypes for qla_target.c logic used by qla2xxx LLD code.
930 */
931extern int qlt_add_target(struct qla_hw_data *, struct scsi_qla_host *);
932extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *);
933extern int qlt_lport_register(struct qla_tgt_func_tmpl *, u64,
934 int (*callback)(struct scsi_qla_host *), void *);
935extern void qlt_lport_deregister(struct scsi_qla_host *);
936extern void qlt_unreg_sess(struct qla_tgt_sess *);
937extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
938extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *);
939extern void qlt_set_mode(struct scsi_qla_host *ha);
940extern void qlt_clear_mode(struct scsi_qla_host *ha);
941extern int __init qlt_init(void);
942extern void qlt_exit(void);
943extern void qlt_update_vp_map(struct scsi_qla_host *, int);
944
945/*
946 * This macro is used during early initializations when host->active_mode
947 * is not set. Right now, ha value is ignored.
948 */
949#define QLA_TGT_MODE_ENABLED() (ql2x_ini_mode != QLA2XXX_INI_MODE_ENABLED)
950
951static inline bool qla_tgt_mode_enabled(struct scsi_qla_host *ha)
952{
953 return ha->host->active_mode & MODE_TARGET;
954}
955
956static inline bool qla_ini_mode_enabled(struct scsi_qla_host *ha)
957{
958 return ha->host->active_mode & MODE_INITIATOR;
959}
960
961static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha)
962{
963 if (ha->host->active_mode & MODE_INITIATOR)
964 ha->host->active_mode &= ~MODE_INITIATOR;
965 else
966 ha->host->active_mode |= MODE_INITIATOR;
967}
968
969/*
970 * Exported symbols from qla_target.c LLD logic used by qla2xxx code..
971 */
972extern void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *,
973 struct atio_from_isp *);
974extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
975extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
976extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
977extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
978extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
979extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
980extern void qlt_ctio_completion(struct scsi_qla_host *, uint32_t);
981extern void qlt_async_event(uint16_t, struct scsi_qla_host *, uint16_t *);
982extern void qlt_enable_vha(struct scsi_qla_host *);
983extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
984extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *);
985extern void qlt_init_atio_q_entries(struct scsi_qla_host *);
986extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *);
987extern void qlt_24xx_config_rings(struct scsi_qla_host *,
988 device_reg_t __iomem *);
989extern void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *,
990 struct nvram_24xx *);
991extern void qlt_24xx_config_nvram_stage2(struct scsi_qla_host *,
992 struct init_cb_24xx *);
993extern int qlt_24xx_process_response_error(struct scsi_qla_host *,
994 struct sts_entry_24xx *);
995extern void qlt_modify_vp_config(struct scsi_qla_host *,
996 struct vp_config_entry_24xx *);
997extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *);
998extern int qlt_mem_alloc(struct qla_hw_data *);
999extern void qlt_mem_free(struct qla_hw_data *);
1000extern void qlt_stop_phase1(struct qla_tgt *);
1001extern void qlt_stop_phase2(struct qla_tgt *);
1002
1003#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
new file mode 100644
index 000000000000..4752f65a9272
--- /dev/null
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -0,0 +1,1904 @@
1/*******************************************************************************
2 * This file contains tcm implementation using v4 configfs fabric infrastructure
3 * for QLogic target mode HBAs
4 *
5 * ?? Copyright 2010-2011 RisingTide Systems LLC.
6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL)
8 * version 2.
9 *
10 * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
11 *
12 * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from
13 * the TCM_FC / Open-FCoE.org fabric module.
14 *
15 * Copyright (c) 2010 Cisco Systems, Inc
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 ****************************************************************************/
27
28
29#include <linux/module.h>
30#include <linux/moduleparam.h>
31#include <generated/utsrelease.h>
32#include <linux/utsname.h>
33#include <linux/init.h>
34#include <linux/list.h>
35#include <linux/slab.h>
36#include <linux/kthread.h>
37#include <linux/types.h>
38#include <linux/string.h>
39#include <linux/configfs.h>
40#include <linux/ctype.h>
41#include <asm/unaligned.h>
42#include <scsi/scsi.h>
43#include <scsi/scsi_host.h>
44#include <scsi/scsi_device.h>
45#include <scsi/scsi_cmnd.h>
46#include <target/target_core_base.h>
47#include <target/target_core_fabric.h>
48#include <target/target_core_fabric_configfs.h>
49#include <target/target_core_configfs.h>
50#include <target/configfs_macros.h>
51
52#include "qla_def.h"
53#include "qla_target.h"
54#include "tcm_qla2xxx.h"
55
56struct workqueue_struct *tcm_qla2xxx_free_wq;
57struct workqueue_struct *tcm_qla2xxx_cmd_wq;
58
59static int tcm_qla2xxx_check_true(struct se_portal_group *se_tpg)
60{
61 return 1;
62}
63
64static int tcm_qla2xxx_check_false(struct se_portal_group *se_tpg)
65{
66 return 0;
67}
68
69/*
70 * Parse WWN.
71 * If strict, we require lower-case hex and colon separators to be sure
72 * the name is the same as what would be generated by ft_format_wwn()
73 * so the name and wwn are mapped one-to-one.
74 */
75static ssize_t tcm_qla2xxx_parse_wwn(const char *name, u64 *wwn, int strict)
76{
77 const char *cp;
78 char c;
79 u32 nibble;
80 u32 byte = 0;
81 u32 pos = 0;
82 u32 err;
83
84 *wwn = 0;
85 for (cp = name; cp < &name[TCM_QLA2XXX_NAMELEN - 1]; cp++) {
86 c = *cp;
87 if (c == '\n' && cp[1] == '\0')
88 continue;
89 if (strict && pos++ == 2 && byte++ < 7) {
90 pos = 0;
91 if (c == ':')
92 continue;
93 err = 1;
94 goto fail;
95 }
96 if (c == '\0') {
97 err = 2;
98 if (strict && byte != 8)
99 goto fail;
100 return cp - name;
101 }
102 err = 3;
103 if (isdigit(c))
104 nibble = c - '0';
105 else if (isxdigit(c) && (islower(c) || !strict))
106 nibble = tolower(c) - 'a' + 10;
107 else
108 goto fail;
109 *wwn = (*wwn << 4) | nibble;
110 }
111 err = 4;
112fail:
113 pr_debug("err %u len %zu pos %u byte %u\n",
114 err, cp - name, pos, byte);
115 return -1;
116}
117
118static ssize_t tcm_qla2xxx_format_wwn(char *buf, size_t len, u64 wwn)
119{
120 u8 b[8];
121
122 put_unaligned_be64(wwn, b);
123 return snprintf(buf, len,
124 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
125 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
126}
127
128static char *tcm_qla2xxx_get_fabric_name(void)
129{
130 return "qla2xxx";
131}
132
133/*
134 * From drivers/scsi/scsi_transport_fc.c:fc_parse_wwn
135 */
136static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm)
137{
138 unsigned int i, j;
139 u8 wwn[8];
140
141 memset(wwn, 0, sizeof(wwn));
142
143 /* Validate and store the new name */
144 for (i = 0, j = 0; i < 16; i++) {
145 int value;
146
147 value = hex_to_bin(*ns++);
148 if (value >= 0)
149 j = (j << 4) | value;
150 else
151 return -EINVAL;
152
153 if (i % 2) {
154 wwn[i/2] = j & 0xff;
155 j = 0;
156 }
157 }
158
159 *nm = wwn_to_u64(wwn);
160 return 0;
161}
162
163/*
164 * This parsing logic follows drivers/scsi/scsi_transport_fc.c:
165 * store_fc_host_vport_create()
166 */
167static int tcm_qla2xxx_npiv_parse_wwn(
168 const char *name,
169 size_t count,
170 u64 *wwpn,
171 u64 *wwnn)
172{
173 unsigned int cnt = count;
174 int rc;
175
176 *wwpn = 0;
177 *wwnn = 0;
178
179 /* count may include a LF at end of string */
180 if (name[cnt-1] == '\n')
181 cnt--;
182
183 /* validate we have enough characters for WWPN */
184 if ((cnt != (16+1+16)) || (name[16] != ':'))
185 return -EINVAL;
186
187 rc = tcm_qla2xxx_npiv_extract_wwn(&name[0], wwpn);
188 if (rc != 0)
189 return rc;
190
191 rc = tcm_qla2xxx_npiv_extract_wwn(&name[17], wwnn);
192 if (rc != 0)
193 return rc;
194
195 return 0;
196}
197
198static ssize_t tcm_qla2xxx_npiv_format_wwn(char *buf, size_t len,
199 u64 wwpn, u64 wwnn)
200{
201 u8 b[8], b2[8];
202
203 put_unaligned_be64(wwpn, b);
204 put_unaligned_be64(wwnn, b2);
205 return snprintf(buf, len,
206 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x,"
207 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
208 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7],
209 b2[0], b2[1], b2[2], b2[3], b2[4], b2[5], b2[6], b2[7]);
210}
211
212static char *tcm_qla2xxx_npiv_get_fabric_name(void)
213{
214 return "qla2xxx_npiv";
215}
216
217static u8 tcm_qla2xxx_get_fabric_proto_ident(struct se_portal_group *se_tpg)
218{
219 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
220 struct tcm_qla2xxx_tpg, se_tpg);
221 struct tcm_qla2xxx_lport *lport = tpg->lport;
222 u8 proto_id;
223
224 switch (lport->lport_proto_id) {
225 case SCSI_PROTOCOL_FCP:
226 default:
227 proto_id = fc_get_fabric_proto_ident(se_tpg);
228 break;
229 }
230
231 return proto_id;
232}
233
234static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg)
235{
236 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
237 struct tcm_qla2xxx_tpg, se_tpg);
238 struct tcm_qla2xxx_lport *lport = tpg->lport;
239
240 return &lport->lport_name[0];
241}
242
243static char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *se_tpg)
244{
245 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
246 struct tcm_qla2xxx_tpg, se_tpg);
247 struct tcm_qla2xxx_lport *lport = tpg->lport;
248
249 return &lport->lport_npiv_name[0];
250}
251
252static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg)
253{
254 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
255 struct tcm_qla2xxx_tpg, se_tpg);
256 return tpg->lport_tpgt;
257}
258
259static u32 tcm_qla2xxx_get_default_depth(struct se_portal_group *se_tpg)
260{
261 return 1;
262}
263
264static u32 tcm_qla2xxx_get_pr_transport_id(
265 struct se_portal_group *se_tpg,
266 struct se_node_acl *se_nacl,
267 struct t10_pr_registration *pr_reg,
268 int *format_code,
269 unsigned char *buf)
270{
271 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
272 struct tcm_qla2xxx_tpg, se_tpg);
273 struct tcm_qla2xxx_lport *lport = tpg->lport;
274 int ret = 0;
275
276 switch (lport->lport_proto_id) {
277 case SCSI_PROTOCOL_FCP:
278 default:
279 ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
280 format_code, buf);
281 break;
282 }
283
284 return ret;
285}
286
287static u32 tcm_qla2xxx_get_pr_transport_id_len(
288 struct se_portal_group *se_tpg,
289 struct se_node_acl *se_nacl,
290 struct t10_pr_registration *pr_reg,
291 int *format_code)
292{
293 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
294 struct tcm_qla2xxx_tpg, se_tpg);
295 struct tcm_qla2xxx_lport *lport = tpg->lport;
296 int ret = 0;
297
298 switch (lport->lport_proto_id) {
299 case SCSI_PROTOCOL_FCP:
300 default:
301 ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
302 format_code);
303 break;
304 }
305
306 return ret;
307}
308
309static char *tcm_qla2xxx_parse_pr_out_transport_id(
310 struct se_portal_group *se_tpg,
311 const char *buf,
312 u32 *out_tid_len,
313 char **port_nexus_ptr)
314{
315 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
316 struct tcm_qla2xxx_tpg, se_tpg);
317 struct tcm_qla2xxx_lport *lport = tpg->lport;
318 char *tid = NULL;
319
320 switch (lport->lport_proto_id) {
321 case SCSI_PROTOCOL_FCP:
322 default:
323 tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
324 port_nexus_ptr);
325 break;
326 }
327
328 return tid;
329}
330
331static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg)
332{
333 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
334 struct tcm_qla2xxx_tpg, se_tpg);
335
336 return QLA_TPG_ATTRIB(tpg)->generate_node_acls;
337}
338
339static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg)
340{
341 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
342 struct tcm_qla2xxx_tpg, se_tpg);
343
344 return QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls;
345}
346
347static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg)
348{
349 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
350 struct tcm_qla2xxx_tpg, se_tpg);
351
352 return QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect;
353}
354
355static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)
356{
357 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
358 struct tcm_qla2xxx_tpg, se_tpg);
359
360 return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect;
361}
362
363static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(
364 struct se_portal_group *se_tpg)
365{
366 struct tcm_qla2xxx_nacl *nacl;
367
368 nacl = kzalloc(sizeof(struct tcm_qla2xxx_nacl), GFP_KERNEL);
369 if (!nacl) {
370 pr_err("Unable to alocate struct tcm_qla2xxx_nacl\n");
371 return NULL;
372 }
373
374 return &nacl->se_node_acl;
375}
376
377static void tcm_qla2xxx_release_fabric_acl(
378 struct se_portal_group *se_tpg,
379 struct se_node_acl *se_nacl)
380{
381 struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
382 struct tcm_qla2xxx_nacl, se_node_acl);
383 kfree(nacl);
384}
385
386static u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg)
387{
388 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
389 struct tcm_qla2xxx_tpg, se_tpg);
390
391 return tpg->lport_tpgt;
392}
393
394static void tcm_qla2xxx_complete_mcmd(struct work_struct *work)
395{
396 struct qla_tgt_mgmt_cmd *mcmd = container_of(work,
397 struct qla_tgt_mgmt_cmd, free_work);
398
399 transport_generic_free_cmd(&mcmd->se_cmd, 0);
400}
401
402/*
403 * Called from qla_target_template->free_mcmd(), and will call
404 * tcm_qla2xxx_release_cmd() via normal struct target_core_fabric_ops
405 * release callback. qla_hw_data->hardware_lock is expected to be held
406 */
407static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
408{
409 INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd);
410 queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work);
411}
412
413static void tcm_qla2xxx_complete_free(struct work_struct *work)
414{
415 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
416
417 transport_generic_free_cmd(&cmd->se_cmd, 0);
418}
419
420/*
421 * Called from qla_target_template->free_cmd(), and will call
422 * tcm_qla2xxx_release_cmd via normal struct target_core_fabric_ops
423 * release callback. qla_hw_data->hardware_lock is expected to be held
424 */
425static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
426{
427 INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
428 queue_work(tcm_qla2xxx_free_wq, &cmd->work);
429}
430
431/*
432 * Called from struct target_core_fabric_ops->check_stop_free() context
433 */
434static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
435{
436 return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
437}
438
439/* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying
440 * fabric descriptor @se_cmd command to release
441 */
442static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
443{
444 struct qla_tgt_cmd *cmd;
445
446 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
447 struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
448 struct qla_tgt_mgmt_cmd, se_cmd);
449 qlt_free_mcmd(mcmd);
450 return;
451 }
452
453 cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
454 qlt_free_cmd(cmd);
455}
456
457static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess)
458{
459 struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
460 struct scsi_qla_host *vha;
461 unsigned long flags;
462
463 BUG_ON(!sess);
464 vha = sess->vha;
465
466 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
467 target_sess_cmd_list_set_waiting(se_sess);
468 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
469
470 return 1;
471}
472
473static void tcm_qla2xxx_close_session(struct se_session *se_sess)
474{
475 struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
476 struct scsi_qla_host *vha;
477 unsigned long flags;
478
479 BUG_ON(!sess);
480 vha = sess->vha;
481
482 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
483 qlt_unreg_sess(sess);
484 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
485}
486
487static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
488{
489 return 0;
490}
491
492/*
493 * The LIO target core uses DMA_TO_DEVICE to mean that data is going
494 * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean
495 * that data is coming from the target (eg handling a READ). However,
496 * this is just the opposite of what we have to tell the DMA mapping
497 * layer -- eg when handling a READ, the HBA will have to DMA the data
498 * out of memory so it can send it to the initiator, which means we
499 * need to use DMA_TO_DEVICE when we map the data.
500 */
501static enum dma_data_direction tcm_qla2xxx_mapping_dir(struct se_cmd *se_cmd)
502{
503 if (se_cmd->se_cmd_flags & SCF_BIDI)
504 return DMA_BIDIRECTIONAL;
505
506 switch (se_cmd->data_direction) {
507 case DMA_TO_DEVICE:
508 return DMA_FROM_DEVICE;
509 case DMA_FROM_DEVICE:
510 return DMA_TO_DEVICE;
511 case DMA_NONE:
512 default:
513 return DMA_NONE;
514 }
515}
516
517static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
518{
519 struct qla_tgt_cmd *cmd = container_of(se_cmd,
520 struct qla_tgt_cmd, se_cmd);
521
522 cmd->bufflen = se_cmd->data_length;
523 cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
524
525 cmd->sg_cnt = se_cmd->t_data_nents;
526 cmd->sg = se_cmd->t_data_sg;
527
528 /*
529 * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup
530 * the SGL mappings into PCIe memory for incoming FCP WRITE data.
531 */
532 return qlt_rdy_to_xfer(cmd);
533}
534
535static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
536{
537 unsigned long flags;
538 /*
539 * Check for WRITE_PENDING status to determine if we need to wait for
540 * CTIO aborts to be posted via hardware in tcm_qla2xxx_handle_data().
541 */
542 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
543 if (se_cmd->t_state == TRANSPORT_WRITE_PENDING ||
544 se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
545 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
546 wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
547 3000);
548 return 0;
549 }
550 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
551
552 return 0;
553}
554
555static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl)
556{
557 return;
558}
559
560static u32 tcm_qla2xxx_get_task_tag(struct se_cmd *se_cmd)
561{
562 struct qla_tgt_cmd *cmd = container_of(se_cmd,
563 struct qla_tgt_cmd, se_cmd);
564
565 return cmd->tag;
566}
567
568static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd)
569{
570 return 0;
571}
572
573/*
574 * Called from process context in qla_target.c:qlt_do_work() code
575 */
576static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
577 unsigned char *cdb, uint32_t data_length, int fcp_task_attr,
578 int data_dir, int bidi)
579{
580 struct se_cmd *se_cmd = &cmd->se_cmd;
581 struct se_session *se_sess;
582 struct qla_tgt_sess *sess;
583 int flags = TARGET_SCF_ACK_KREF;
584
585 if (bidi)
586 flags |= TARGET_SCF_BIDI_OP;
587
588 sess = cmd->sess;
589 if (!sess) {
590 pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n");
591 return -EINVAL;
592 }
593
594 se_sess = sess->se_sess;
595 if (!se_sess) {
596 pr_err("Unable to locate active struct se_session\n");
597 return -EINVAL;
598 }
599
600 return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
601 cmd->unpacked_lun, data_length, fcp_task_attr,
602 data_dir, flags);
603}
604
605static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
606{
607 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
608
609 /*
610 * Ensure that the complete FCP WRITE payload has been received.
611 * Otherwise return an exception via CHECK_CONDITION status.
612 */
613 if (!cmd->write_data_transferred) {
614 /*
615 * Check if se_cmd has already been aborted via LUN_RESET, and
616 * waiting upon completion in tcm_qla2xxx_write_pending_status()
617 */
618 if (cmd->se_cmd.transport_state & CMD_T_ABORTED) {
619 complete(&cmd->se_cmd.t_transport_stop_comp);
620 return;
621 }
622
623 cmd->se_cmd.scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD;
624 transport_generic_request_failure(&cmd->se_cmd);
625 return;
626 }
627
628 return target_execute_cmd(&cmd->se_cmd);
629}
630
631/*
632 * Called from qla_target.c:qlt_do_ctio_completion()
633 */
634static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
635{
636 INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work);
637 queue_work(tcm_qla2xxx_free_wq, &cmd->work);
638}
639
640/*
641 * Called from qla_target.c:qlt_issue_task_mgmt()
642 */
643static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun,
644 uint8_t tmr_func, uint32_t tag)
645{
646 struct qla_tgt_sess *sess = mcmd->sess;
647 struct se_cmd *se_cmd = &mcmd->se_cmd;
648
649 return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd,
650 tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF);
651}
652
653static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
654{
655 struct qla_tgt_cmd *cmd = container_of(se_cmd,
656 struct qla_tgt_cmd, se_cmd);
657
658 cmd->bufflen = se_cmd->data_length;
659 cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
660 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
661
662 cmd->sg_cnt = se_cmd->t_data_nents;
663 cmd->sg = se_cmd->t_data_sg;
664 cmd->offset = 0;
665
666 /*
667 * Now queue completed DATA_IN the qla2xxx LLD and response ring
668 */
669 return qlt_xmit_response(cmd, QLA_TGT_XMIT_DATA|QLA_TGT_XMIT_STATUS,
670 se_cmd->scsi_status);
671}
672
673static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
674{
675 struct qla_tgt_cmd *cmd = container_of(se_cmd,
676 struct qla_tgt_cmd, se_cmd);
677 int xmit_type = QLA_TGT_XMIT_STATUS;
678
679 cmd->bufflen = se_cmd->data_length;
680 cmd->sg = NULL;
681 cmd->sg_cnt = 0;
682 cmd->offset = 0;
683 cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
684 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
685
686 if (se_cmd->data_direction == DMA_FROM_DEVICE) {
687 /*
688 * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen
689 * for qla_tgt_xmit_response LLD code
690 */
691 se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
692 se_cmd->residual_count = se_cmd->data_length;
693
694 cmd->bufflen = 0;
695 }
696 /*
697 * Now queue status response to qla2xxx LLD code and response ring
698 */
699 return qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
700}
701
702static int tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
703{
704 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
705 struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
706 struct qla_tgt_mgmt_cmd, se_cmd);
707
708 pr_debug("queue_tm_rsp: mcmd: %p func: 0x%02x response: 0x%02x\n",
709 mcmd, se_tmr->function, se_tmr->response);
710 /*
711 * Do translation between TCM TM response codes and
712 * QLA2xxx FC TM response codes.
713 */
714 switch (se_tmr->response) {
715 case TMR_FUNCTION_COMPLETE:
716 mcmd->fc_tm_rsp = FC_TM_SUCCESS;
717 break;
718 case TMR_TASK_DOES_NOT_EXIST:
719 mcmd->fc_tm_rsp = FC_TM_BAD_CMD;
720 break;
721 case TMR_FUNCTION_REJECTED:
722 mcmd->fc_tm_rsp = FC_TM_REJECT;
723 break;
724 case TMR_LUN_DOES_NOT_EXIST:
725 default:
726 mcmd->fc_tm_rsp = FC_TM_FAILED;
727 break;
728 }
729 /*
730 * Queue the TM response to QLA2xxx LLD to build a
731 * CTIO response packet.
732 */
733 qlt_xmit_tm_rsp(mcmd);
734
735 return 0;
736}
737
738static u16 tcm_qla2xxx_get_fabric_sense_len(void)
739{
740 return 0;
741}
742
743static u16 tcm_qla2xxx_set_fabric_sense_len(struct se_cmd *se_cmd,
744 u32 sense_length)
745{
746 return 0;
747}
748
749/* Local pointer to allocated TCM configfs fabric module */
750struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
751struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
752
753static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
754 struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *);
755/*
756 * Expected to be called with struct qla_hw_data->hardware_lock held
757 */
758static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
759{
760 struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
761 struct se_portal_group *se_tpg = se_nacl->se_tpg;
762 struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
763 struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
764 struct tcm_qla2xxx_lport, lport_wwn);
765 struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
766 struct tcm_qla2xxx_nacl, se_node_acl);
767 void *node;
768
769 pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id);
770
771 node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id);
772 WARN_ON(node && (node != se_nacl));
773
774 pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n",
775 se_nacl, nacl->nport_wwnn, nacl->nport_id);
776 /*
777 * Now clear the se_nacl and session pointers from our HW lport lookup
778 * table mapping for this initiator's fabric S_ID and LOOP_ID entries.
779 *
780 * This is done ahead of callbacks into tcm_qla2xxx_free_session() ->
781 * target_wait_for_sess_cmds() before the session waits for outstanding
782 * I/O to complete, to avoid a race between session shutdown execution
783 * and incoming ATIOs or TMRs picking up a stale se_node_act reference.
784 */
785 tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess);
786}
787
788static void tcm_qla2xxx_release_session(struct kref *kref)
789{
790 struct se_session *se_sess = container_of(kref,
791 struct se_session, sess_kref);
792
793 qlt_unreg_sess(se_sess->fabric_sess_ptr);
794}
795
796static void tcm_qla2xxx_put_session(struct se_session *se_sess)
797{
798 struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
799 struct qla_hw_data *ha = sess->vha->hw;
800 unsigned long flags;
801
802 spin_lock_irqsave(&ha->hardware_lock, flags);
803 kref_put(&se_sess->sess_kref, tcm_qla2xxx_release_session);
804 spin_unlock_irqrestore(&ha->hardware_lock, flags);
805}
806
807static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
808{
809 tcm_qla2xxx_put_session(sess->se_sess);
810}
811
812static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess)
813{
814 tcm_qla2xxx_shutdown_session(sess->se_sess);
815}
816
817static struct se_node_acl *tcm_qla2xxx_make_nodeacl(
818 struct se_portal_group *se_tpg,
819 struct config_group *group,
820 const char *name)
821{
822 struct se_node_acl *se_nacl, *se_nacl_new;
823 struct tcm_qla2xxx_nacl *nacl;
824 u64 wwnn;
825 u32 qla2xxx_nexus_depth;
826
827 if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0)
828 return ERR_PTR(-EINVAL);
829
830 se_nacl_new = tcm_qla2xxx_alloc_fabric_acl(se_tpg);
831 if (!se_nacl_new)
832 return ERR_PTR(-ENOMEM);
833/* #warning FIXME: Hardcoded qla2xxx_nexus depth in tcm_qla2xxx_make_nodeacl */
834 qla2xxx_nexus_depth = 1;
835
836 /*
837 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
838 * when converting a NodeACL from demo mode -> explict
839 */
840 se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
841 name, qla2xxx_nexus_depth);
842 if (IS_ERR(se_nacl)) {
843 tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new);
844 return se_nacl;
845 }
846 /*
847 * Locate our struct tcm_qla2xxx_nacl and set the FC Nport WWPN
848 */
849 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
850 nacl->nport_wwnn = wwnn;
851 tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn);
852
853 return se_nacl;
854}
855
856static void tcm_qla2xxx_drop_nodeacl(struct se_node_acl *se_acl)
857{
858 struct se_portal_group *se_tpg = se_acl->se_tpg;
859 struct tcm_qla2xxx_nacl *nacl = container_of(se_acl,
860 struct tcm_qla2xxx_nacl, se_node_acl);
861
862 core_tpg_del_initiator_node_acl(se_tpg, se_acl, 1);
863 kfree(nacl);
864}
865
866/* Start items for tcm_qla2xxx_tpg_attrib_cit */
867
868#define DEF_QLA_TPG_ATTRIB(name) \
869 \
870static ssize_t tcm_qla2xxx_tpg_attrib_show_##name( \
871 struct se_portal_group *se_tpg, \
872 char *page) \
873{ \
874 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \
875 struct tcm_qla2xxx_tpg, se_tpg); \
876 \
877 return sprintf(page, "%u\n", QLA_TPG_ATTRIB(tpg)->name); \
878} \
879 \
880static ssize_t tcm_qla2xxx_tpg_attrib_store_##name( \
881 struct se_portal_group *se_tpg, \
882 const char *page, \
883 size_t count) \
884{ \
885 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \
886 struct tcm_qla2xxx_tpg, se_tpg); \
887 unsigned long val; \
888 int ret; \
889 \
890 ret = kstrtoul(page, 0, &val); \
891 if (ret < 0) { \
892 pr_err("kstrtoul() failed with" \
893 " ret: %d\n", ret); \
894 return -EINVAL; \
895 } \
896 ret = tcm_qla2xxx_set_attrib_##name(tpg, val); \
897 \
898 return (!ret) ? count : -EINVAL; \
899}
900
901#define DEF_QLA_TPG_ATTR_BOOL(_name) \
902 \
903static int tcm_qla2xxx_set_attrib_##_name( \
904 struct tcm_qla2xxx_tpg *tpg, \
905 unsigned long val) \
906{ \
907 struct tcm_qla2xxx_tpg_attrib *a = &tpg->tpg_attrib; \
908 \
909 if ((val != 0) && (val != 1)) { \
910 pr_err("Illegal boolean value %lu\n", val); \
911 return -EINVAL; \
912 } \
913 \
914 a->_name = val; \
915 return 0; \
916}
917
918#define QLA_TPG_ATTR(_name, _mode) \
919 TF_TPG_ATTRIB_ATTR(tcm_qla2xxx, _name, _mode);
920
921/*
922 * Define tcm_qla2xxx_tpg_attrib_s_generate_node_acls
923 */
924DEF_QLA_TPG_ATTR_BOOL(generate_node_acls);
925DEF_QLA_TPG_ATTRIB(generate_node_acls);
926QLA_TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR);
927
928/*
929 Define tcm_qla2xxx_attrib_s_cache_dynamic_acls
930 */
931DEF_QLA_TPG_ATTR_BOOL(cache_dynamic_acls);
932DEF_QLA_TPG_ATTRIB(cache_dynamic_acls);
933QLA_TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR);
934
935/*
936 * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_write_protect
937 */
938DEF_QLA_TPG_ATTR_BOOL(demo_mode_write_protect);
939DEF_QLA_TPG_ATTRIB(demo_mode_write_protect);
940QLA_TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);
941
942/*
943 * Define tcm_qla2xxx_tpg_attrib_s_prod_mode_write_protect
944 */
945DEF_QLA_TPG_ATTR_BOOL(prod_mode_write_protect);
946DEF_QLA_TPG_ATTRIB(prod_mode_write_protect);
947QLA_TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
948
949static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = {
950 &tcm_qla2xxx_tpg_attrib_generate_node_acls.attr,
951 &tcm_qla2xxx_tpg_attrib_cache_dynamic_acls.attr,
952 &tcm_qla2xxx_tpg_attrib_demo_mode_write_protect.attr,
953 &tcm_qla2xxx_tpg_attrib_prod_mode_write_protect.attr,
954 NULL,
955};
956
957/* End items for tcm_qla2xxx_tpg_attrib_cit */
958
959static ssize_t tcm_qla2xxx_tpg_show_enable(
960 struct se_portal_group *se_tpg,
961 char *page)
962{
963 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
964 struct tcm_qla2xxx_tpg, se_tpg);
965
966 return snprintf(page, PAGE_SIZE, "%d\n",
967 atomic_read(&tpg->lport_tpg_enabled));
968}
969
970static ssize_t tcm_qla2xxx_tpg_store_enable(
971 struct se_portal_group *se_tpg,
972 const char *page,
973 size_t count)
974{
975 struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
976 struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
977 struct tcm_qla2xxx_lport, lport_wwn);
978 struct scsi_qla_host *vha = lport->qla_vha;
979 struct qla_hw_data *ha = vha->hw;
980 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
981 struct tcm_qla2xxx_tpg, se_tpg);
982 unsigned long op;
983 int rc;
984
985 rc = kstrtoul(page, 0, &op);
986 if (rc < 0) {
987 pr_err("kstrtoul() returned %d\n", rc);
988 return -EINVAL;
989 }
990 if ((op != 1) && (op != 0)) {
991 pr_err("Illegal value for tpg_enable: %lu\n", op);
992 return -EINVAL;
993 }
994
995 if (op) {
996 atomic_set(&tpg->lport_tpg_enabled, 1);
997 qlt_enable_vha(vha);
998 } else {
999 if (!ha->tgt.qla_tgt) {
1000 pr_err("truct qla_hw_data *ha->tgt.qla_tgt is NULL\n");
1001 return -ENODEV;
1002 }
1003 atomic_set(&tpg->lport_tpg_enabled, 0);
1004 qlt_stop_phase1(ha->tgt.qla_tgt);
1005 }
1006
1007 return count;
1008}
1009
1010TF_TPG_BASE_ATTR(tcm_qla2xxx, enable, S_IRUGO | S_IWUSR);
1011
1012static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = {
1013 &tcm_qla2xxx_tpg_enable.attr,
1014 NULL,
1015};
1016
1017static struct se_portal_group *tcm_qla2xxx_make_tpg(
1018 struct se_wwn *wwn,
1019 struct config_group *group,
1020 const char *name)
1021{
1022 struct tcm_qla2xxx_lport *lport = container_of(wwn,
1023 struct tcm_qla2xxx_lport, lport_wwn);
1024 struct tcm_qla2xxx_tpg *tpg;
1025 unsigned long tpgt;
1026 int ret;
1027
1028 if (strstr(name, "tpgt_") != name)
1029 return ERR_PTR(-EINVAL);
1030 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
1031 return ERR_PTR(-EINVAL);
1032
1033 if (!lport->qla_npiv_vp && (tpgt != 1)) {
1034 pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n");
1035 return ERR_PTR(-ENOSYS);
1036 }
1037
1038 tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
1039 if (!tpg) {
1040 pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
1041 return ERR_PTR(-ENOMEM);
1042 }
1043 tpg->lport = lport;
1044 tpg->lport_tpgt = tpgt;
1045 /*
1046 * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic
1047 * NodeACLs
1048 */
1049 QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1;
1050 QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1;
1051 QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1;
1052
1053 ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn,
1054 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
1055 if (ret < 0) {
1056 kfree(tpg);
1057 return NULL;
1058 }
1059 /*
1060 * Setup local TPG=1 pointer for non NPIV mode.
1061 */
1062 if (lport->qla_npiv_vp == NULL)
1063 lport->tpg_1 = tpg;
1064
1065 return &tpg->se_tpg;
1066}
1067
1068static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg)
1069{
1070 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
1071 struct tcm_qla2xxx_tpg, se_tpg);
1072 struct tcm_qla2xxx_lport *lport = tpg->lport;
1073 struct scsi_qla_host *vha = lport->qla_vha;
1074 struct qla_hw_data *ha = vha->hw;
1075 /*
1076 * Call into qla2x_target.c LLD logic to shutdown the active
1077 * FC Nexuses and disable target mode operation for this qla_hw_data
1078 */
1079 if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stop)
1080 qlt_stop_phase1(ha->tgt.qla_tgt);
1081
1082 core_tpg_deregister(se_tpg);
1083 /*
1084 * Clear local TPG=1 pointer for non NPIV mode.
1085 */
1086 if (lport->qla_npiv_vp == NULL)
1087 lport->tpg_1 = NULL;
1088
1089 kfree(tpg);
1090}
1091
1092static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
1093 struct se_wwn *wwn,
1094 struct config_group *group,
1095 const char *name)
1096{
1097 struct tcm_qla2xxx_lport *lport = container_of(wwn,
1098 struct tcm_qla2xxx_lport, lport_wwn);
1099 struct tcm_qla2xxx_tpg *tpg;
1100 unsigned long tpgt;
1101 int ret;
1102
1103 if (strstr(name, "tpgt_") != name)
1104 return ERR_PTR(-EINVAL);
1105 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
1106 return ERR_PTR(-EINVAL);
1107
1108 tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
1109 if (!tpg) {
1110 pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
1111 return ERR_PTR(-ENOMEM);
1112 }
1113 tpg->lport = lport;
1114 tpg->lport_tpgt = tpgt;
1115
1116 ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn,
1117 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
1118 if (ret < 0) {
1119 kfree(tpg);
1120 return NULL;
1121 }
1122 return &tpg->se_tpg;
1123}
1124
1125/*
1126 * Expected to be called with struct qla_hw_data->hardware_lock held
1127 */
1128static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
1129 scsi_qla_host_t *vha,
1130 const uint8_t *s_id)
1131{
1132 struct qla_hw_data *ha = vha->hw;
1133 struct tcm_qla2xxx_lport *lport;
1134 struct se_node_acl *se_nacl;
1135 struct tcm_qla2xxx_nacl *nacl;
1136 u32 key;
1137
1138 lport = ha->tgt.target_lport_ptr;
1139 if (!lport) {
1140 pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
1141 dump_stack();
1142 return NULL;
1143 }
1144
1145 key = (((unsigned long)s_id[0] << 16) |
1146 ((unsigned long)s_id[1] << 8) |
1147 (unsigned long)s_id[2]);
1148 pr_debug("find_sess_by_s_id: 0x%06x\n", key);
1149
1150 se_nacl = btree_lookup32(&lport->lport_fcport_map, key);
1151 if (!se_nacl) {
1152 pr_debug("Unable to locate s_id: 0x%06x\n", key);
1153 return NULL;
1154 }
1155 pr_debug("find_sess_by_s_id: located se_nacl: %p, initiatorname: %s\n",
1156 se_nacl, se_nacl->initiatorname);
1157
1158 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
1159 if (!nacl->qla_tgt_sess) {
1160 pr_err("Unable to locate struct qla_tgt_sess\n");
1161 return NULL;
1162 }
1163
1164 return nacl->qla_tgt_sess;
1165}
1166
1167/*
1168 * Expected to be called with struct qla_hw_data->hardware_lock held
1169 */
1170static void tcm_qla2xxx_set_sess_by_s_id(
1171 struct tcm_qla2xxx_lport *lport,
1172 struct se_node_acl *new_se_nacl,
1173 struct tcm_qla2xxx_nacl *nacl,
1174 struct se_session *se_sess,
1175 struct qla_tgt_sess *qla_tgt_sess,
1176 uint8_t *s_id)
1177{
1178 u32 key;
1179 void *slot;
1180 int rc;
1181
1182 key = (((unsigned long)s_id[0] << 16) |
1183 ((unsigned long)s_id[1] << 8) |
1184 (unsigned long)s_id[2]);
1185 pr_debug("set_sess_by_s_id: %06x\n", key);
1186
1187 slot = btree_lookup32(&lport->lport_fcport_map, key);
1188 if (!slot) {
1189 if (new_se_nacl) {
1190 pr_debug("Setting up new fc_port entry to new_se_nacl\n");
1191 nacl->nport_id = key;
1192 rc = btree_insert32(&lport->lport_fcport_map, key,
1193 new_se_nacl, GFP_ATOMIC);
1194 if (rc)
1195 printk(KERN_ERR "Unable to insert s_id into fcport_map: %06x\n",
1196 (int)key);
1197 } else {
1198 pr_debug("Wiping nonexisting fc_port entry\n");
1199 }
1200
1201 qla_tgt_sess->se_sess = se_sess;
1202 nacl->qla_tgt_sess = qla_tgt_sess;
1203 return;
1204 }
1205
1206 if (nacl->qla_tgt_sess) {
1207 if (new_se_nacl == NULL) {
1208 pr_debug("Clearing existing nacl->qla_tgt_sess and fc_port entry\n");
1209 btree_remove32(&lport->lport_fcport_map, key);
1210 nacl->qla_tgt_sess = NULL;
1211 return;
1212 }
1213 pr_debug("Replacing existing nacl->qla_tgt_sess and fc_port entry\n");
1214 btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
1215 qla_tgt_sess->se_sess = se_sess;
1216 nacl->qla_tgt_sess = qla_tgt_sess;
1217 return;
1218 }
1219
1220 if (new_se_nacl == NULL) {
1221 pr_debug("Clearing existing fc_port entry\n");
1222 btree_remove32(&lport->lport_fcport_map, key);
1223 return;
1224 }
1225
1226 pr_debug("Replacing existing fc_port entry w/o active nacl->qla_tgt_sess\n");
1227 btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
1228 qla_tgt_sess->se_sess = se_sess;
1229 nacl->qla_tgt_sess = qla_tgt_sess;
1230
1231 pr_debug("Setup nacl->qla_tgt_sess %p by s_id for se_nacl: %p, initiatorname: %s\n",
1232 nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
1233}
1234
1235/*
1236 * Expected to be called with struct qla_hw_data->hardware_lock held
1237 */
1238static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id(
1239 scsi_qla_host_t *vha,
1240 const uint16_t loop_id)
1241{
1242 struct qla_hw_data *ha = vha->hw;
1243 struct tcm_qla2xxx_lport *lport;
1244 struct se_node_acl *se_nacl;
1245 struct tcm_qla2xxx_nacl *nacl;
1246 struct tcm_qla2xxx_fc_loopid *fc_loopid;
1247
1248 lport = ha->tgt.target_lport_ptr;
1249 if (!lport) {
1250 pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
1251 dump_stack();
1252 return NULL;
1253 }
1254
1255 pr_debug("find_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
1256
1257 fc_loopid = lport->lport_loopid_map + loop_id;
1258 se_nacl = fc_loopid->se_nacl;
1259 if (!se_nacl) {
1260 pr_debug("Unable to locate se_nacl by loop_id: 0x%04x\n",
1261 loop_id);
1262 return NULL;
1263 }
1264
1265 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
1266
1267 if (!nacl->qla_tgt_sess) {
1268 pr_err("Unable to locate struct qla_tgt_sess\n");
1269 return NULL;
1270 }
1271
1272 return nacl->qla_tgt_sess;
1273}
1274
1275/*
1276 * Expected to be called with struct qla_hw_data->hardware_lock held
1277 */
1278static void tcm_qla2xxx_set_sess_by_loop_id(
1279 struct tcm_qla2xxx_lport *lport,
1280 struct se_node_acl *new_se_nacl,
1281 struct tcm_qla2xxx_nacl *nacl,
1282 struct se_session *se_sess,
1283 struct qla_tgt_sess *qla_tgt_sess,
1284 uint16_t loop_id)
1285{
1286 struct se_node_acl *saved_nacl;
1287 struct tcm_qla2xxx_fc_loopid *fc_loopid;
1288
1289 pr_debug("set_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
1290
1291 fc_loopid = &((struct tcm_qla2xxx_fc_loopid *)
1292 lport->lport_loopid_map)[loop_id];
1293
1294 saved_nacl = fc_loopid->se_nacl;
1295 if (!saved_nacl) {
1296 pr_debug("Setting up new fc_loopid->se_nacl to new_se_nacl\n");
1297 fc_loopid->se_nacl = new_se_nacl;
1298 if (qla_tgt_sess->se_sess != se_sess)
1299 qla_tgt_sess->se_sess = se_sess;
1300 if (nacl->qla_tgt_sess != qla_tgt_sess)
1301 nacl->qla_tgt_sess = qla_tgt_sess;
1302 return;
1303 }
1304
1305 if (nacl->qla_tgt_sess) {
1306 if (new_se_nacl == NULL) {
1307 pr_debug("Clearing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
1308 fc_loopid->se_nacl = NULL;
1309 nacl->qla_tgt_sess = NULL;
1310 return;
1311 }
1312
1313 pr_debug("Replacing existing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
1314 fc_loopid->se_nacl = new_se_nacl;
1315 if (qla_tgt_sess->se_sess != se_sess)
1316 qla_tgt_sess->se_sess = se_sess;
1317 if (nacl->qla_tgt_sess != qla_tgt_sess)
1318 nacl->qla_tgt_sess = qla_tgt_sess;
1319 return;
1320 }
1321
1322 if (new_se_nacl == NULL) {
1323 pr_debug("Clearing fc_loopid->se_nacl\n");
1324 fc_loopid->se_nacl = NULL;
1325 return;
1326 }
1327
1328 pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->qla_tgt_sess\n");
1329 fc_loopid->se_nacl = new_se_nacl;
1330 if (qla_tgt_sess->se_sess != se_sess)
1331 qla_tgt_sess->se_sess = se_sess;
1332 if (nacl->qla_tgt_sess != qla_tgt_sess)
1333 nacl->qla_tgt_sess = qla_tgt_sess;
1334
1335 pr_debug("Setup nacl->qla_tgt_sess %p by loop_id for se_nacl: %p, initiatorname: %s\n",
1336 nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
1337}
1338
1339/*
1340 * Should always be called with qla_hw_data->hardware_lock held.
1341 */
1342static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport,
1343 struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess)
1344{
1345 struct se_session *se_sess = sess->se_sess;
1346 unsigned char be_sid[3];
1347
1348 be_sid[0] = sess->s_id.b.domain;
1349 be_sid[1] = sess->s_id.b.area;
1350 be_sid[2] = sess->s_id.b.al_pa;
1351
1352 tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess,
1353 sess, be_sid);
1354 tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess,
1355 sess, sess->loop_id);
1356}
1357
1358static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
1359{
1360 struct qla_tgt *tgt = sess->tgt;
1361 struct qla_hw_data *ha = tgt->ha;
1362 struct se_session *se_sess;
1363 struct se_node_acl *se_nacl;
1364 struct tcm_qla2xxx_lport *lport;
1365 struct tcm_qla2xxx_nacl *nacl;
1366
1367 BUG_ON(in_interrupt());
1368
1369 se_sess = sess->se_sess;
1370 if (!se_sess) {
1371 pr_err("struct qla_tgt_sess->se_sess is NULL\n");
1372 dump_stack();
1373 return;
1374 }
1375 se_nacl = se_sess->se_node_acl;
1376 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
1377
1378 lport = ha->tgt.target_lport_ptr;
1379 if (!lport) {
1380 pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
1381 dump_stack();
1382 return;
1383 }
1384 target_wait_for_sess_cmds(se_sess, 0);
1385
1386 transport_deregister_session_configfs(sess->se_sess);
1387 transport_deregister_session(sess->se_sess);
1388}
1389
1390/*
1391 * Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl()
1392 * to locate struct se_node_acl
1393 */
1394static int tcm_qla2xxx_check_initiator_node_acl(
1395 scsi_qla_host_t *vha,
1396 unsigned char *fc_wwpn,
1397 void *qla_tgt_sess,
1398 uint8_t *s_id,
1399 uint16_t loop_id)
1400{
1401 struct qla_hw_data *ha = vha->hw;
1402 struct tcm_qla2xxx_lport *lport;
1403 struct tcm_qla2xxx_tpg *tpg;
1404 struct tcm_qla2xxx_nacl *nacl;
1405 struct se_portal_group *se_tpg;
1406 struct se_node_acl *se_nacl;
1407 struct se_session *se_sess;
1408 struct qla_tgt_sess *sess = qla_tgt_sess;
1409 unsigned char port_name[36];
1410 unsigned long flags;
1411
1412 lport = ha->tgt.target_lport_ptr;
1413 if (!lport) {
1414 pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
1415 dump_stack();
1416 return -EINVAL;
1417 }
1418 /*
1419 * Locate the TPG=1 reference..
1420 */
1421 tpg = lport->tpg_1;
1422 if (!tpg) {
1423 pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n");
1424 return -EINVAL;
1425 }
1426 se_tpg = &tpg->se_tpg;
1427
1428 se_sess = transport_init_session();
1429 if (IS_ERR(se_sess)) {
1430 pr_err("Unable to initialize struct se_session\n");
1431 return PTR_ERR(se_sess);
1432 }
1433 /*
1434 * Format the FCP Initiator port_name into colon seperated values to
1435 * match the format by tcm_qla2xxx explict ConfigFS NodeACLs.
1436 */
1437 memset(&port_name, 0, 36);
1438 snprintf(port_name, 36, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1439 fc_wwpn[0], fc_wwpn[1], fc_wwpn[2], fc_wwpn[3], fc_wwpn[4],
1440 fc_wwpn[5], fc_wwpn[6], fc_wwpn[7]);
1441 /*
1442 * Locate our struct se_node_acl either from an explict NodeACL created
1443 * via ConfigFS, or via running in TPG demo mode.
1444 */
1445 se_sess->se_node_acl = core_tpg_check_initiator_node_acl(se_tpg,
1446 port_name);
1447 if (!se_sess->se_node_acl) {
1448 transport_free_session(se_sess);
1449 return -EINVAL;
1450 }
1451 se_nacl = se_sess->se_node_acl;
1452 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
1453 /*
1454 * And now setup the new se_nacl and session pointers into our HW lport
1455 * mappings for fabric S_ID and LOOP_ID.
1456 */
1457 spin_lock_irqsave(&ha->hardware_lock, flags);
1458 tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess,
1459 qla_tgt_sess, s_id);
1460 tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess,
1461 qla_tgt_sess, loop_id);
1462 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1463 /*
1464 * Finally register the new FC Nexus with TCM
1465 */
1466 __transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
1467
1468 return 0;
1469}
1470
1471/*
1472 * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path.
1473 */
1474static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
1475 .handle_cmd = tcm_qla2xxx_handle_cmd,
1476 .handle_data = tcm_qla2xxx_handle_data,
1477 .handle_tmr = tcm_qla2xxx_handle_tmr,
1478 .free_cmd = tcm_qla2xxx_free_cmd,
1479 .free_mcmd = tcm_qla2xxx_free_mcmd,
1480 .free_session = tcm_qla2xxx_free_session,
1481 .check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl,
1482 .find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id,
1483 .find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id,
1484 .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
1485 .put_sess = tcm_qla2xxx_put_sess,
1486 .shutdown_sess = tcm_qla2xxx_shutdown_sess,
1487};
1488
1489static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
1490{
1491 int rc;
1492
1493 rc = btree_init32(&lport->lport_fcport_map);
1494 if (rc) {
1495 pr_err("Unable to initialize lport->lport_fcport_map btree\n");
1496 return rc;
1497 }
1498
1499 lport->lport_loopid_map = vmalloc(sizeof(struct tcm_qla2xxx_fc_loopid) *
1500 65536);
1501 if (!lport->lport_loopid_map) {
1502 pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n",
1503 sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
1504 btree_destroy32(&lport->lport_fcport_map);
1505 return -ENOMEM;
1506 }
1507 memset(lport->lport_loopid_map, 0, sizeof(struct tcm_qla2xxx_fc_loopid)
1508 * 65536);
1509 pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n",
1510 sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
1511 return 0;
1512}
1513
1514static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha)
1515{
1516 struct qla_hw_data *ha = vha->hw;
1517 struct tcm_qla2xxx_lport *lport;
1518 /*
1519 * Setup local pointer to vha, NPIV VP pointer (if present) and
1520 * vha->tcm_lport pointer
1521 */
1522 lport = (struct tcm_qla2xxx_lport *)ha->tgt.target_lport_ptr;
1523 lport->qla_vha = vha;
1524
1525 return 0;
1526}
1527
1528static struct se_wwn *tcm_qla2xxx_make_lport(
1529 struct target_fabric_configfs *tf,
1530 struct config_group *group,
1531 const char *name)
1532{
1533 struct tcm_qla2xxx_lport *lport;
1534 u64 wwpn;
1535 int ret = -ENODEV;
1536
1537 if (tcm_qla2xxx_parse_wwn(name, &wwpn, 1) < 0)
1538 return ERR_PTR(-EINVAL);
1539
1540 lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
1541 if (!lport) {
1542 pr_err("Unable to allocate struct tcm_qla2xxx_lport\n");
1543 return ERR_PTR(-ENOMEM);
1544 }
1545 lport->lport_wwpn = wwpn;
1546 tcm_qla2xxx_format_wwn(&lport->lport_name[0], TCM_QLA2XXX_NAMELEN,
1547 wwpn);
1548
1549 ret = tcm_qla2xxx_init_lport(lport);
1550 if (ret != 0)
1551 goto out;
1552
1553 ret = qlt_lport_register(&tcm_qla2xxx_template, wwpn,
1554 tcm_qla2xxx_lport_register_cb, lport);
1555 if (ret != 0)
1556 goto out_lport;
1557
1558 return &lport->lport_wwn;
1559out_lport:
1560 vfree(lport->lport_loopid_map);
1561 btree_destroy32(&lport->lport_fcport_map);
1562out:
1563 kfree(lport);
1564 return ERR_PTR(ret);
1565}
1566
1567static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)
1568{
1569 struct tcm_qla2xxx_lport *lport = container_of(wwn,
1570 struct tcm_qla2xxx_lport, lport_wwn);
1571 struct scsi_qla_host *vha = lport->qla_vha;
1572 struct qla_hw_data *ha = vha->hw;
1573 struct se_node_acl *node;
1574 u32 key = 0;
1575
1576 /*
1577 * Call into qla2x_target.c LLD logic to complete the
1578 * shutdown of struct qla_tgt after the call to
1579 * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above..
1580 */
1581 if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stopped)
1582 qlt_stop_phase2(ha->tgt.qla_tgt);
1583
1584 qlt_lport_deregister(vha);
1585
1586 vfree(lport->lport_loopid_map);
1587 btree_for_each_safe32(&lport->lport_fcport_map, key, node)
1588 btree_remove32(&lport->lport_fcport_map, key);
1589 btree_destroy32(&lport->lport_fcport_map);
1590 kfree(lport);
1591}
1592
1593static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
1594 struct target_fabric_configfs *tf,
1595 struct config_group *group,
1596 const char *name)
1597{
1598 struct tcm_qla2xxx_lport *lport;
1599 u64 npiv_wwpn, npiv_wwnn;
1600 int ret;
1601
1602 if (tcm_qla2xxx_npiv_parse_wwn(name, strlen(name)+1,
1603 &npiv_wwpn, &npiv_wwnn) < 0)
1604 return ERR_PTR(-EINVAL);
1605
1606 lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
1607 if (!lport) {
1608 pr_err("Unable to allocate struct tcm_qla2xxx_lport for NPIV\n");
1609 return ERR_PTR(-ENOMEM);
1610 }
1611 lport->lport_npiv_wwpn = npiv_wwpn;
1612 lport->lport_npiv_wwnn = npiv_wwnn;
1613 tcm_qla2xxx_npiv_format_wwn(&lport->lport_npiv_name[0],
1614 TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn);
1615
1616/* FIXME: tcm_qla2xxx_npiv_make_lport */
1617 ret = -ENOSYS;
1618 if (ret != 0)
1619 goto out;
1620
1621 return &lport->lport_wwn;
1622out:
1623 kfree(lport);
1624 return ERR_PTR(ret);
1625}
1626
1627static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn)
1628{
1629 struct tcm_qla2xxx_lport *lport = container_of(wwn,
1630 struct tcm_qla2xxx_lport, lport_wwn);
1631 struct scsi_qla_host *vha = lport->qla_vha;
1632 struct Scsi_Host *sh = vha->host;
1633 /*
1634 * Notify libfc that we want to release the lport->npiv_vport
1635 */
1636 fc_vport_terminate(lport->npiv_vport);
1637
1638 scsi_host_put(sh);
1639 kfree(lport);
1640}
1641
1642
1643static ssize_t tcm_qla2xxx_wwn_show_attr_version(
1644 struct target_fabric_configfs *tf,
1645 char *page)
1646{
1647 return sprintf(page,
1648 "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on "
1649 UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
1650 utsname()->machine);
1651}
1652
1653TF_WWN_ATTR_RO(tcm_qla2xxx, version);
1654
1655static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = {
1656 &tcm_qla2xxx_wwn_version.attr,
1657 NULL,
1658};
1659
1660static struct target_core_fabric_ops tcm_qla2xxx_ops = {
1661 .get_fabric_name = tcm_qla2xxx_get_fabric_name,
1662 .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
1663 .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
1664 .tpg_get_tag = tcm_qla2xxx_get_tag,
1665 .tpg_get_default_depth = tcm_qla2xxx_get_default_depth,
1666 .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
1667 .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len,
1668 .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id,
1669 .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode,
1670 .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache,
1671 .tpg_check_demo_mode_write_protect =
1672 tcm_qla2xxx_check_demo_write_protect,
1673 .tpg_check_prod_mode_write_protect =
1674 tcm_qla2xxx_check_prod_write_protect,
1675 .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true,
1676 .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
1677 .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
1678 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
1679 .check_stop_free = tcm_qla2xxx_check_stop_free,
1680 .release_cmd = tcm_qla2xxx_release_cmd,
1681 .put_session = tcm_qla2xxx_put_session,
1682 .shutdown_session = tcm_qla2xxx_shutdown_session,
1683 .close_session = tcm_qla2xxx_close_session,
1684 .sess_get_index = tcm_qla2xxx_sess_get_index,
1685 .sess_get_initiator_sid = NULL,
1686 .write_pending = tcm_qla2xxx_write_pending,
1687 .write_pending_status = tcm_qla2xxx_write_pending_status,
1688 .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs,
1689 .get_task_tag = tcm_qla2xxx_get_task_tag,
1690 .get_cmd_state = tcm_qla2xxx_get_cmd_state,
1691 .queue_data_in = tcm_qla2xxx_queue_data_in,
1692 .queue_status = tcm_qla2xxx_queue_status,
1693 .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp,
1694 .get_fabric_sense_len = tcm_qla2xxx_get_fabric_sense_len,
1695 .set_fabric_sense_len = tcm_qla2xxx_set_fabric_sense_len,
1696 /*
1697 * Setup function pointers for generic logic in
1698 * target_core_fabric_configfs.c
1699 */
1700 .fabric_make_wwn = tcm_qla2xxx_make_lport,
1701 .fabric_drop_wwn = tcm_qla2xxx_drop_lport,
1702 .fabric_make_tpg = tcm_qla2xxx_make_tpg,
1703 .fabric_drop_tpg = tcm_qla2xxx_drop_tpg,
1704 .fabric_post_link = NULL,
1705 .fabric_pre_unlink = NULL,
1706 .fabric_make_np = NULL,
1707 .fabric_drop_np = NULL,
1708 .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl,
1709 .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl,
1710};
1711
1712static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
1713 .get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name,
1714 .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
1715 .tpg_get_wwn = tcm_qla2xxx_npiv_get_fabric_wwn,
1716 .tpg_get_tag = tcm_qla2xxx_get_tag,
1717 .tpg_get_default_depth = tcm_qla2xxx_get_default_depth,
1718 .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
1719 .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len,
1720 .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id,
1721 .tpg_check_demo_mode = tcm_qla2xxx_check_false,
1722 .tpg_check_demo_mode_cache = tcm_qla2xxx_check_true,
1723 .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true,
1724 .tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false,
1725 .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true,
1726 .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
1727 .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
1728 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
1729 .release_cmd = tcm_qla2xxx_release_cmd,
1730 .put_session = tcm_qla2xxx_put_session,
1731 .shutdown_session = tcm_qla2xxx_shutdown_session,
1732 .close_session = tcm_qla2xxx_close_session,
1733 .sess_get_index = tcm_qla2xxx_sess_get_index,
1734 .sess_get_initiator_sid = NULL,
1735 .write_pending = tcm_qla2xxx_write_pending,
1736 .write_pending_status = tcm_qla2xxx_write_pending_status,
1737 .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs,
1738 .get_task_tag = tcm_qla2xxx_get_task_tag,
1739 .get_cmd_state = tcm_qla2xxx_get_cmd_state,
1740 .queue_data_in = tcm_qla2xxx_queue_data_in,
1741 .queue_status = tcm_qla2xxx_queue_status,
1742 .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp,
1743 .get_fabric_sense_len = tcm_qla2xxx_get_fabric_sense_len,
1744 .set_fabric_sense_len = tcm_qla2xxx_set_fabric_sense_len,
1745 /*
1746 * Setup function pointers for generic logic in
1747 * target_core_fabric_configfs.c
1748 */
1749 .fabric_make_wwn = tcm_qla2xxx_npiv_make_lport,
1750 .fabric_drop_wwn = tcm_qla2xxx_npiv_drop_lport,
1751 .fabric_make_tpg = tcm_qla2xxx_npiv_make_tpg,
1752 .fabric_drop_tpg = tcm_qla2xxx_drop_tpg,
1753 .fabric_post_link = NULL,
1754 .fabric_pre_unlink = NULL,
1755 .fabric_make_np = NULL,
1756 .fabric_drop_np = NULL,
1757 .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl,
1758 .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl,
1759};
1760
1761static int tcm_qla2xxx_register_configfs(void)
1762{
1763 struct target_fabric_configfs *fabric, *npiv_fabric;
1764 int ret;
1765
1766 pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on "
1767 UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
1768 utsname()->machine);
1769 /*
1770 * Register the top level struct config_item_type with TCM core
1771 */
1772 fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx");
1773 if (IS_ERR(fabric)) {
1774 pr_err("target_fabric_configfs_init() failed\n");
1775 return PTR_ERR(fabric);
1776 }
1777 /*
1778 * Setup fabric->tf_ops from our local tcm_qla2xxx_ops
1779 */
1780 fabric->tf_ops = tcm_qla2xxx_ops;
1781 /*
1782 * Setup default attribute lists for various fabric->tf_cit_tmpl
1783 */
1784 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
1785 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs;
1786 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs =
1787 tcm_qla2xxx_tpg_attrib_attrs;
1788 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
1789 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
1790 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
1791 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
1792 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
1793 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
1794 /*
1795 * Register the fabric for use within TCM
1796 */
1797 ret = target_fabric_configfs_register(fabric);
1798 if (ret < 0) {
1799 pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n");
1800 return ret;
1801 }
1802 /*
1803 * Setup our local pointer to *fabric
1804 */
1805 tcm_qla2xxx_fabric_configfs = fabric;
1806 pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_fabric_configfs\n");
1807
1808 /*
1809 * Register the top level struct config_item_type for NPIV with TCM core
1810 */
1811 npiv_fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx_npiv");
1812 if (IS_ERR(npiv_fabric)) {
1813 pr_err("target_fabric_configfs_init() failed\n");
1814 ret = PTR_ERR(npiv_fabric);
1815 goto out_fabric;
1816 }
1817 /*
1818 * Setup fabric->tf_ops from our local tcm_qla2xxx_npiv_ops
1819 */
1820 npiv_fabric->tf_ops = tcm_qla2xxx_npiv_ops;
1821 /*
1822 * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl
1823 */
1824 TF_CIT_TMPL(npiv_fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
1825 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_base_cit.ct_attrs = NULL;
1826 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
1827 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
1828 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
1829 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
1830 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
1831 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
1832 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
1833 /*
1834 * Register the npiv_fabric for use within TCM
1835 */
1836 ret = target_fabric_configfs_register(npiv_fabric);
1837 if (ret < 0) {
1838 pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n");
1839 goto out_fabric;
1840 }
1841 /*
1842 * Setup our local pointer to *npiv_fabric
1843 */
1844 tcm_qla2xxx_npiv_fabric_configfs = npiv_fabric;
1845 pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_npiv_fabric_configfs\n");
1846
1847 tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free",
1848 WQ_MEM_RECLAIM, 0);
1849 if (!tcm_qla2xxx_free_wq) {
1850 ret = -ENOMEM;
1851 goto out_fabric_npiv;
1852 }
1853
1854 tcm_qla2xxx_cmd_wq = alloc_workqueue("tcm_qla2xxx_cmd", 0, 0);
1855 if (!tcm_qla2xxx_cmd_wq) {
1856 ret = -ENOMEM;
1857 goto out_free_wq;
1858 }
1859
1860 return 0;
1861
1862out_free_wq:
1863 destroy_workqueue(tcm_qla2xxx_free_wq);
1864out_fabric_npiv:
1865 target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
1866out_fabric:
1867 target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
1868 return ret;
1869}
1870
1871static void tcm_qla2xxx_deregister_configfs(void)
1872{
1873 destroy_workqueue(tcm_qla2xxx_cmd_wq);
1874 destroy_workqueue(tcm_qla2xxx_free_wq);
1875
1876 target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
1877 tcm_qla2xxx_fabric_configfs = NULL;
1878 pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_fabric_configfs\n");
1879
1880 target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
1881 tcm_qla2xxx_npiv_fabric_configfs = NULL;
1882 pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_npiv_fabric_configfs\n");
1883}
1884
1885static int __init tcm_qla2xxx_init(void)
1886{
1887 int ret;
1888
1889 ret = tcm_qla2xxx_register_configfs();
1890 if (ret < 0)
1891 return ret;
1892
1893 return 0;
1894}
1895
1896static void __exit tcm_qla2xxx_exit(void)
1897{
1898 tcm_qla2xxx_deregister_configfs();
1899}
1900
1901MODULE_DESCRIPTION("TCM QLA2XXX series NPIV enabled fabric driver");
1902MODULE_LICENSE("GPL");
1903module_init(tcm_qla2xxx_init);
1904module_exit(tcm_qla2xxx_exit);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
new file mode 100644
index 000000000000..825498103352
--- /dev/null
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -0,0 +1,82 @@
1#include <target/target_core_base.h>
2#include <linux/btree.h>
3
4#define TCM_QLA2XXX_VERSION "v0.1"
5/* length of ASCII WWPNs including pad */
6#define TCM_QLA2XXX_NAMELEN 32
7/* lenth of ASCII NPIV 'WWPN+WWNN' including pad */
8#define TCM_QLA2XXX_NPIV_NAMELEN 66
9
10#include "qla_target.h"
11
12struct tcm_qla2xxx_nacl {
13 /* From libfc struct fc_rport->port_id */
14 u32 nport_id;
15 /* Binary World Wide unique Node Name for remote FC Initiator Nport */
16 u64 nport_wwnn;
17 /* ASCII formatted WWPN for FC Initiator Nport */
18 char nport_name[TCM_QLA2XXX_NAMELEN];
19 /* Pointer to qla_tgt_sess */
20 struct qla_tgt_sess *qla_tgt_sess;
21 /* Pointer to TCM FC nexus */
22 struct se_session *nport_nexus;
23 /* Returned by tcm_qla2xxx_make_nodeacl() */
24 struct se_node_acl se_node_acl;
25};
26
27struct tcm_qla2xxx_tpg_attrib {
28 int generate_node_acls;
29 int cache_dynamic_acls;
30 int demo_mode_write_protect;
31 int prod_mode_write_protect;
32};
33
34struct tcm_qla2xxx_tpg {
35 /* FC lport target portal group tag for TCM */
36 u16 lport_tpgt;
37 /* Atomic bit to determine TPG active status */
38 atomic_t lport_tpg_enabled;
39 /* Pointer back to tcm_qla2xxx_lport */
40 struct tcm_qla2xxx_lport *lport;
41 /* Used by tcm_qla2xxx_tpg_attrib_cit */
42 struct tcm_qla2xxx_tpg_attrib tpg_attrib;
43 /* Returned by tcm_qla2xxx_make_tpg() */
44 struct se_portal_group se_tpg;
45};
46
47#define QLA_TPG_ATTRIB(tpg) (&(tpg)->tpg_attrib)
48
49struct tcm_qla2xxx_fc_loopid {
50 struct se_node_acl *se_nacl;
51};
52
53struct tcm_qla2xxx_lport {
54 /* SCSI protocol the lport is providing */
55 u8 lport_proto_id;
56 /* Binary World Wide unique Port Name for FC Target Lport */
57 u64 lport_wwpn;
58 /* Binary World Wide unique Port Name for FC NPIV Target Lport */
59 u64 lport_npiv_wwpn;
60 /* Binary World Wide unique Node Name for FC NPIV Target Lport */
61 u64 lport_npiv_wwnn;
62 /* ASCII formatted WWPN for FC Target Lport */
63 char lport_name[TCM_QLA2XXX_NAMELEN];
64 /* ASCII formatted WWPN+WWNN for NPIV FC Target Lport */
65 char lport_npiv_name[TCM_QLA2XXX_NPIV_NAMELEN];
66 /* map for fc_port pointers in 24-bit FC Port ID space */
67 struct btree_head32 lport_fcport_map;
68 /* vmalloc-ed memory for fc_port pointers for 16-bit FC loop ID */
69 struct tcm_qla2xxx_fc_loopid *lport_loopid_map;
70 /* Pointer to struct scsi_qla_host from qla2xxx LLD */
71 struct scsi_qla_host *qla_vha;
72 /* Pointer to struct scsi_qla_host for NPIV VP from qla2xxx LLD */
73 struct scsi_qla_host *qla_npiv_vp;
74 /* Pointer to struct qla_tgt pointer */
75 struct qla_tgt lport_qla_tgt;
76 /* Pointer to struct fc_vport for NPIV vport from libfc */
77 struct fc_vport *npiv_vport;
78 /* Pointer to TPG=1 for non NPIV mode */
79 struct tcm_qla2xxx_tpg *tpg_1;
80 /* Returned by tcm_qla2xxx_make_lport() */
81 struct se_wwn lport_wwn;
82};
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
index 0b0a7d42137d..c681b2a355e1 100644
--- a/drivers/scsi/qla4xxx/ql4_attr.c
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -9,6 +9,140 @@
9#include "ql4_glbl.h" 9#include "ql4_glbl.h"
10#include "ql4_dbg.h" 10#include "ql4_dbg.h"
11 11
12static ssize_t
13qla4_8xxx_sysfs_read_fw_dump(struct file *filep, struct kobject *kobj,
14 struct bin_attribute *ba, char *buf, loff_t off,
15 size_t count)
16{
17 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
18 struct device, kobj)));
19
20 if (!is_qla8022(ha))
21 return -EINVAL;
22
23 if (!test_bit(AF_82XX_DUMP_READING, &ha->flags))
24 return 0;
25
26 return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
27 ha->fw_dump_size);
28}
29
30static ssize_t
31qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
32 struct bin_attribute *ba, char *buf, loff_t off,
33 size_t count)
34{
35 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
36 struct device, kobj)));
37 uint32_t dev_state;
38 long reading;
39 int ret = 0;
40
41 if (!is_qla8022(ha))
42 return -EINVAL;
43
44 if (off != 0)
45 return ret;
46
47 buf[1] = 0;
48 ret = kstrtol(buf, 10, &reading);
49 if (ret) {
50 ql4_printk(KERN_ERR, ha, "%s: Invalid input. Return err %d\n",
51 __func__, ret);
52 return ret;
53 }
54
55 switch (reading) {
56 case 0:
57 /* clear dump collection flags */
58 if (test_and_clear_bit(AF_82XX_DUMP_READING, &ha->flags)) {
59 clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
60 /* Reload minidump template */
61 qla4xxx_alloc_fw_dump(ha);
62 DEBUG2(ql4_printk(KERN_INFO, ha,
63 "Firmware template reloaded\n"));
64 }
65 break;
66 case 1:
67 /* Set flag to read dump */
68 if (test_bit(AF_82XX_FW_DUMPED, &ha->flags) &&
69 !test_bit(AF_82XX_DUMP_READING, &ha->flags)) {
70 set_bit(AF_82XX_DUMP_READING, &ha->flags);
71 DEBUG2(ql4_printk(KERN_INFO, ha,
72 "Raw firmware dump ready for read on (%ld).\n",
73 ha->host_no));
74 }
75 break;
76 case 2:
77 /* Reset HBA */
78 qla4_8xxx_idc_lock(ha);
79 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
80 if (dev_state == QLA82XX_DEV_READY) {
81 ql4_printk(KERN_INFO, ha,
82 "%s: Setting Need reset, reset_owner is 0x%x.\n",
83 __func__, ha->func_num);
84 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
85 QLA82XX_DEV_NEED_RESET);
86 set_bit(AF_82XX_RST_OWNER, &ha->flags);
87 } else
88 ql4_printk(KERN_INFO, ha,
89 "%s: Reset not performed as device state is 0x%x\n",
90 __func__, dev_state);
91
92 qla4_8xxx_idc_unlock(ha);
93 break;
94 default:
95 /* do nothing */
96 break;
97 }
98
99 return count;
100}
101
102static struct bin_attribute sysfs_fw_dump_attr = {
103 .attr = {
104 .name = "fw_dump",
105 .mode = S_IRUSR | S_IWUSR,
106 },
107 .size = 0,
108 .read = qla4_8xxx_sysfs_read_fw_dump,
109 .write = qla4_8xxx_sysfs_write_fw_dump,
110};
111
112static struct sysfs_entry {
113 char *name;
114 struct bin_attribute *attr;
115} bin_file_entries[] = {
116 { "fw_dump", &sysfs_fw_dump_attr },
117 { NULL },
118};
119
120void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha)
121{
122 struct Scsi_Host *host = ha->host;
123 struct sysfs_entry *iter;
124 int ret;
125
126 for (iter = bin_file_entries; iter->name; iter++) {
127 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
128 iter->attr);
129 if (ret)
130 ql4_printk(KERN_ERR, ha,
131 "Unable to create sysfs %s binary attribute (%d).\n",
132 iter->name, ret);
133 }
134}
135
136void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha)
137{
138 struct Scsi_Host *host = ha->host;
139 struct sysfs_entry *iter;
140
141 for (iter = bin_file_entries; iter->name; iter++)
142 sysfs_remove_bin_file(&host->shost_gendev.kobj,
143 iter->attr);
144}
145
12/* Scsi_Host attributes. */ 146/* Scsi_Host attributes. */
13static ssize_t 147static ssize_t
14qla4xxx_fw_version_show(struct device *dev, 148qla4xxx_fw_version_show(struct device *dev,
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 7f2492e88be7..96a5616a8fda 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -398,6 +398,16 @@ struct isp_operations {
398 int (*get_sys_info) (struct scsi_qla_host *); 398 int (*get_sys_info) (struct scsi_qla_host *);
399}; 399};
400 400
401struct ql4_mdump_size_table {
402 uint32_t size;
403 uint32_t size_cmask_02;
404 uint32_t size_cmask_04;
405 uint32_t size_cmask_08;
406 uint32_t size_cmask_10;
407 uint32_t size_cmask_FF;
408 uint32_t version;
409};
410
401/*qla4xxx ipaddress configuration details */ 411/*qla4xxx ipaddress configuration details */
402struct ipaddress_config { 412struct ipaddress_config {
403 uint16_t ipv4_options; 413 uint16_t ipv4_options;
@@ -485,6 +495,10 @@ struct scsi_qla_host {
485#define AF_EEH_BUSY 20 /* 0x00100000 */ 495#define AF_EEH_BUSY 20 /* 0x00100000 */
486#define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */ 496#define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */
487#define AF_BUILD_DDB_LIST 22 /* 0x00400000 */ 497#define AF_BUILD_DDB_LIST 22 /* 0x00400000 */
498#define AF_82XX_FW_DUMPED 24 /* 0x01000000 */
499#define AF_82XX_RST_OWNER 25 /* 0x02000000 */
500#define AF_82XX_DUMP_READING 26 /* 0x04000000 */
501
488 unsigned long dpc_flags; 502 unsigned long dpc_flags;
489 503
490#define DPC_RESET_HA 1 /* 0x00000002 */ 504#define DPC_RESET_HA 1 /* 0x00000002 */
@@ -662,6 +676,11 @@ struct scsi_qla_host {
662 676
663 uint32_t nx_dev_init_timeout; 677 uint32_t nx_dev_init_timeout;
664 uint32_t nx_reset_timeout; 678 uint32_t nx_reset_timeout;
679 void *fw_dump;
680 uint32_t fw_dump_size;
681 uint32_t fw_dump_capture_mask;
682 void *fw_dump_tmplt_hdr;
683 uint32_t fw_dump_tmplt_size;
665 684
666 struct completion mbx_intr_comp; 685 struct completion mbx_intr_comp;
667 686
@@ -936,4 +955,7 @@ static inline int ql4xxx_reset_active(struct scsi_qla_host *ha)
936#define PROCESS_ALL_AENS 0 955#define PROCESS_ALL_AENS 0
937#define FLUSH_DDB_CHANGED_AENS 1 956#define FLUSH_DDB_CHANGED_AENS 1
938 957
958/* Defines for udev events */
959#define QL4_UEVENT_CODE_FW_DUMP 0
960
939#endif /*_QLA4XXX_H */ 961#endif /*_QLA4XXX_H */
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 210cd1d64475..7240948fb929 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -385,6 +385,11 @@ struct qla_flt_region {
385#define MBOX_CMD_GET_IP_ADDR_STATE 0x0091 385#define MBOX_CMD_GET_IP_ADDR_STATE 0x0091
386#define MBOX_CMD_SEND_IPV6_ROUTER_SOL 0x0092 386#define MBOX_CMD_SEND_IPV6_ROUTER_SOL 0x0092
387#define MBOX_CMD_GET_DB_ENTRY_CURRENT_IP_ADDR 0x0093 387#define MBOX_CMD_GET_DB_ENTRY_CURRENT_IP_ADDR 0x0093
388#define MBOX_CMD_MINIDUMP 0x0129
389
390/* Minidump subcommand */
391#define MINIDUMP_GET_SIZE_SUBCOMMAND 0x00
392#define MINIDUMP_GET_TMPLT_SUBCOMMAND 0x01
388 393
389/* Mailbox 1 */ 394/* Mailbox 1 */
390#define FW_STATE_READY 0x0000 395#define FW_STATE_READY 0x0000
@@ -1190,4 +1195,27 @@ struct ql_iscsi_stats {
1190 uint8_t reserved2[264]; /* 0x0308 - 0x040F */ 1195 uint8_t reserved2[264]; /* 0x0308 - 0x040F */
1191}; 1196};
1192 1197
1198#define QLA82XX_DBG_STATE_ARRAY_LEN 16
1199#define QLA82XX_DBG_CAP_SIZE_ARRAY_LEN 8
1200#define QLA82XX_DBG_RSVD_ARRAY_LEN 8
1201
1202struct qla4_8xxx_minidump_template_hdr {
1203 uint32_t entry_type;
1204 uint32_t first_entry_offset;
1205 uint32_t size_of_template;
1206 uint32_t capture_debug_level;
1207 uint32_t num_of_entries;
1208 uint32_t version;
1209 uint32_t driver_timestamp;
1210 uint32_t checksum;
1211
1212 uint32_t driver_capture_mask;
1213 uint32_t driver_info_word2;
1214 uint32_t driver_info_word3;
1215 uint32_t driver_info_word4;
1216
1217 uint32_t saved_state_array[QLA82XX_DBG_STATE_ARRAY_LEN];
1218 uint32_t capture_size_array[QLA82XX_DBG_CAP_SIZE_ARRAY_LEN];
1219};
1220
1193#endif /* _QLA4X_FW_H */ 1221#endif /* _QLA4X_FW_H */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 910536667cf5..20b49d019043 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -196,10 +196,18 @@ int qla4xxx_bsg_request(struct bsg_job *bsg_job);
196int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job); 196int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job);
197 197
198void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry); 198void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry);
199int qla4xxx_get_minidump_template(struct scsi_qla_host *ha,
200 dma_addr_t phys_addr);
201int qla4xxx_req_template_size(struct scsi_qla_host *ha);
202void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha);
203void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha);
204void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha);
199 205
200extern int ql4xextended_error_logging; 206extern int ql4xextended_error_logging;
201extern int ql4xdontresethba; 207extern int ql4xdontresethba;
202extern int ql4xenablemsix; 208extern int ql4xenablemsix;
209extern int ql4xmdcapmask;
210extern int ql4xenablemd;
203 211
204extern struct device_attribute *qla4xxx_host_attrs[]; 212extern struct device_attribute *qla4xxx_host_attrs[];
205#endif /* _QLA4x_GBL_H */ 213#endif /* _QLA4x_GBL_H */
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 90ee5d8fa731..bf36723b84e1 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -277,6 +277,94 @@ qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha)
277 return ipv4_wait|ipv6_wait; 277 return ipv4_wait|ipv6_wait;
278} 278}
279 279
280/**
281 * qla4xxx_alloc_fw_dump - Allocate memory for minidump data.
282 * @ha: pointer to host adapter structure.
283 **/
284void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha)
285{
286 int status;
287 uint32_t capture_debug_level;
288 int hdr_entry_bit, k;
289 void *md_tmp;
290 dma_addr_t md_tmp_dma;
291 struct qla4_8xxx_minidump_template_hdr *md_hdr;
292
293 if (ha->fw_dump) {
294 ql4_printk(KERN_WARNING, ha,
295 "Firmware dump previously allocated.\n");
296 return;
297 }
298
299 status = qla4xxx_req_template_size(ha);
300 if (status != QLA_SUCCESS) {
301 ql4_printk(KERN_INFO, ha,
302 "scsi%ld: Failed to get template size\n",
303 ha->host_no);
304 return;
305 }
306
307 clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
308
309 /* Allocate memory for saving the template */
310 md_tmp = dma_alloc_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size,
311 &md_tmp_dma, GFP_KERNEL);
312
313 /* Request template */
314 status = qla4xxx_get_minidump_template(ha, md_tmp_dma);
315 if (status != QLA_SUCCESS) {
316 ql4_printk(KERN_INFO, ha,
317 "scsi%ld: Failed to get minidump template\n",
318 ha->host_no);
319 goto alloc_cleanup;
320 }
321
322 md_hdr = (struct qla4_8xxx_minidump_template_hdr *)md_tmp;
323
324 capture_debug_level = md_hdr->capture_debug_level;
325
326 /* Get capture mask based on module loadtime setting. */
327 if (ql4xmdcapmask >= 0x3 && ql4xmdcapmask <= 0x7F)
328 ha->fw_dump_capture_mask = ql4xmdcapmask;
329 else
330 ha->fw_dump_capture_mask = capture_debug_level;
331
332 md_hdr->driver_capture_mask = ha->fw_dump_capture_mask;
333
334 DEBUG2(ql4_printk(KERN_INFO, ha, "Minimum num of entries = %d\n",
335 md_hdr->num_of_entries));
336 DEBUG2(ql4_printk(KERN_INFO, ha, "Dump template size = %d\n",
337 ha->fw_dump_tmplt_size));
338 DEBUG2(ql4_printk(KERN_INFO, ha, "Selected Capture mask =0x%x\n",
339 ha->fw_dump_capture_mask));
340
341 /* Calculate fw_dump_size */
342 for (hdr_entry_bit = 0x2, k = 1; (hdr_entry_bit & 0xFF);
343 hdr_entry_bit <<= 1, k++) {
344 if (hdr_entry_bit & ha->fw_dump_capture_mask)
345 ha->fw_dump_size += md_hdr->capture_size_array[k];
346 }
347
348 /* Total firmware dump size including command header */
349 ha->fw_dump_size += ha->fw_dump_tmplt_size;
350 ha->fw_dump = vmalloc(ha->fw_dump_size);
351 if (!ha->fw_dump)
352 goto alloc_cleanup;
353
354 DEBUG2(ql4_printk(KERN_INFO, ha,
355 "Minidump Tempalate Size = 0x%x KB\n",
356 ha->fw_dump_tmplt_size));
357 DEBUG2(ql4_printk(KERN_INFO, ha,
358 "Total Minidump size = 0x%x KB\n", ha->fw_dump_size));
359
360 memcpy(ha->fw_dump, md_tmp, ha->fw_dump_tmplt_size);
361 ha->fw_dump_tmplt_hdr = ha->fw_dump;
362
363alloc_cleanup:
364 dma_free_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size,
365 md_tmp, md_tmp_dma);
366}
367
280static int qla4xxx_fw_ready(struct scsi_qla_host *ha) 368static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
281{ 369{
282 uint32_t timeout_count; 370 uint32_t timeout_count;
@@ -445,9 +533,13 @@ static int qla4xxx_init_firmware(struct scsi_qla_host *ha)
445 "control block\n", ha->host_no, __func__)); 533 "control block\n", ha->host_no, __func__));
446 return status; 534 return status;
447 } 535 }
536
448 if (!qla4xxx_fw_ready(ha)) 537 if (!qla4xxx_fw_ready(ha))
449 return status; 538 return status;
450 539
540 if (is_qla8022(ha) && !test_bit(AF_INIT_DONE, &ha->flags))
541 qla4xxx_alloc_fw_dump(ha);
542
451 return qla4xxx_get_firmware_status(ha); 543 return qla4xxx_get_firmware_status(ha);
452} 544}
453 545
@@ -884,8 +976,8 @@ int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
884 switch (state) { 976 switch (state) {
885 case DDB_DS_SESSION_ACTIVE: 977 case DDB_DS_SESSION_ACTIVE:
886 case DDB_DS_DISCOVERY: 978 case DDB_DS_DISCOVERY:
887 ddb_entry->unblock_sess(ddb_entry->sess);
888 qla4xxx_update_session_conn_param(ha, ddb_entry); 979 qla4xxx_update_session_conn_param(ha, ddb_entry);
980 ddb_entry->unblock_sess(ddb_entry->sess);
889 status = QLA_SUCCESS; 981 status = QLA_SUCCESS;
890 break; 982 break;
891 case DDB_DS_SESSION_FAILED: 983 case DDB_DS_SESSION_FAILED:
@@ -897,6 +989,7 @@ int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
897 } 989 }
898 break; 990 break;
899 case DDB_DS_SESSION_ACTIVE: 991 case DDB_DS_SESSION_ACTIVE:
992 case DDB_DS_DISCOVERY:
900 switch (state) { 993 switch (state) {
901 case DDB_DS_SESSION_FAILED: 994 case DDB_DS_SESSION_FAILED:
902 /* 995 /*
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 7ac21dabbf22..cab8f665a41f 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -51,25 +51,6 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
51 } 51 }
52 } 52 }
53 53
54 if (is_qla8022(ha)) {
55 if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
56 DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
57 "prematurely completing mbx cmd as firmware "
58 "recovery detected\n", ha->host_no, __func__));
59 return status;
60 }
61 /* Do not send any mbx cmd if h/w is in failed state*/
62 qla4_8xxx_idc_lock(ha);
63 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
64 qla4_8xxx_idc_unlock(ha);
65 if (dev_state == QLA82XX_DEV_FAILED) {
66 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: H/W is in "
67 "failed state, do not send any mailbox commands\n",
68 ha->host_no, __func__);
69 return status;
70 }
71 }
72
73 if ((is_aer_supported(ha)) && 54 if ((is_aer_supported(ha)) &&
74 (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))) { 55 (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))) {
75 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Perm failure on EEH, " 56 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Perm failure on EEH, "
@@ -96,6 +77,25 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
96 msleep(10); 77 msleep(10);
97 } 78 }
98 79
80 if (is_qla8022(ha)) {
81 if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
82 DEBUG2(ql4_printk(KERN_WARNING, ha,
83 "scsi%ld: %s: prematurely completing mbx cmd as firmware recovery detected\n",
84 ha->host_no, __func__));
85 goto mbox_exit;
86 }
87 /* Do not send any mbx cmd if h/w is in failed state*/
88 qla4_8xxx_idc_lock(ha);
89 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
90 qla4_8xxx_idc_unlock(ha);
91 if (dev_state == QLA82XX_DEV_FAILED) {
92 ql4_printk(KERN_WARNING, ha,
93 "scsi%ld: %s: H/W is in failed state, do not send any mailbox commands\n",
94 ha->host_no, __func__);
95 goto mbox_exit;
96 }
97 }
98
99 spin_lock_irqsave(&ha->hardware_lock, flags); 99 spin_lock_irqsave(&ha->hardware_lock, flags);
100 100
101 ha->mbox_status_count = outCount; 101 ha->mbox_status_count = outCount;
@@ -270,6 +270,79 @@ mbox_exit:
270 return status; 270 return status;
271} 271}
272 272
273/**
274 * qla4xxx_get_minidump_template - Get the firmware template
275 * @ha: Pointer to host adapter structure.
276 * @phys_addr: dma address for template
277 *
278 * Obtain the minidump template from firmware during initialization
279 * as it may not be available when minidump is desired.
280 **/
281int qla4xxx_get_minidump_template(struct scsi_qla_host *ha,
282 dma_addr_t phys_addr)
283{
284 uint32_t mbox_cmd[MBOX_REG_COUNT];
285 uint32_t mbox_sts[MBOX_REG_COUNT];
286 int status;
287
288 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
289 memset(&mbox_sts, 0, sizeof(mbox_sts));
290
291 mbox_cmd[0] = MBOX_CMD_MINIDUMP;
292 mbox_cmd[1] = MINIDUMP_GET_TMPLT_SUBCOMMAND;
293 mbox_cmd[2] = LSDW(phys_addr);
294 mbox_cmd[3] = MSDW(phys_addr);
295 mbox_cmd[4] = ha->fw_dump_tmplt_size;
296 mbox_cmd[5] = 0;
297
298 status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0],
299 &mbox_sts[0]);
300 if (status != QLA_SUCCESS) {
301 DEBUG2(ql4_printk(KERN_INFO, ha,
302 "scsi%ld: %s: Cmd = %08X, mbx[0] = 0x%04x, mbx[1] = 0x%04x\n",
303 ha->host_no, __func__, mbox_cmd[0],
304 mbox_sts[0], mbox_sts[1]));
305 }
306 return status;
307}
308
309/**
310 * qla4xxx_req_template_size - Get minidump template size from firmware.
311 * @ha: Pointer to host adapter structure.
312 **/
313int qla4xxx_req_template_size(struct scsi_qla_host *ha)
314{
315 uint32_t mbox_cmd[MBOX_REG_COUNT];
316 uint32_t mbox_sts[MBOX_REG_COUNT];
317 int status;
318
319 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
320 memset(&mbox_sts, 0, sizeof(mbox_sts));
321
322 mbox_cmd[0] = MBOX_CMD_MINIDUMP;
323 mbox_cmd[1] = MINIDUMP_GET_SIZE_SUBCOMMAND;
324
325 status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
326 &mbox_sts[0]);
327 if (status == QLA_SUCCESS) {
328 ha->fw_dump_tmplt_size = mbox_sts[1];
329 DEBUG2(ql4_printk(KERN_INFO, ha,
330 "%s: sts[0]=0x%04x, template size=0x%04x, size_cm_02=0x%04x, size_cm_04=0x%04x, size_cm_08=0x%04x, size_cm_10=0x%04x, size_cm_FF=0x%04x, version=0x%04x\n",
331 __func__, mbox_sts[0], mbox_sts[1],
332 mbox_sts[2], mbox_sts[3], mbox_sts[4],
333 mbox_sts[5], mbox_sts[6], mbox_sts[7]));
334 if (ha->fw_dump_tmplt_size == 0)
335 status = QLA_ERROR;
336 } else {
337 ql4_printk(KERN_WARNING, ha,
338 "%s: Error sts[0]=0x%04x, mbx[1]=0x%04x\n",
339 __func__, mbox_sts[0], mbox_sts[1]);
340 status = QLA_ERROR;
341 }
342
343 return status;
344}
345
273void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha) 346void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha)
274{ 347{
275 set_bit(AF_FW_RECOVERY, &ha->flags); 348 set_bit(AF_FW_RECOVERY, &ha->flags);
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index e1e46b6dac75..228b67020d2c 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -7,6 +7,7 @@
7#include <linux/delay.h> 7#include <linux/delay.h>
8#include <linux/io.h> 8#include <linux/io.h>
9#include <linux/pci.h> 9#include <linux/pci.h>
10#include <linux/ratelimit.h>
10#include "ql4_def.h" 11#include "ql4_def.h"
11#include "ql4_glbl.h" 12#include "ql4_glbl.h"
12 13
@@ -420,6 +421,38 @@ qla4_8xxx_rd_32(struct scsi_qla_host *ha, ulong off)
420 return data; 421 return data;
421} 422}
422 423
424/* Minidump related functions */
425static int qla4_8xxx_md_rw_32(struct scsi_qla_host *ha, uint32_t off,
426 u32 data, uint8_t flag)
427{
428 uint32_t win_read, off_value, rval = QLA_SUCCESS;
429
430 off_value = off & 0xFFFF0000;
431 writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
432
433 /* Read back value to make sure write has gone through before trying
434 * to use it.
435 */
436 win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
437 if (win_read != off_value) {
438 DEBUG2(ql4_printk(KERN_INFO, ha,
439 "%s: Written (0x%x) != Read (0x%x), off=0x%x\n",
440 __func__, off_value, win_read, off));
441 return QLA_ERROR;
442 }
443
444 off_value = off & 0x0000FFFF;
445
446 if (flag)
447 writel(data, (void __iomem *)(off_value + CRB_INDIRECT_2M +
448 ha->nx_pcibase));
449 else
450 rval = readl((void __iomem *)(off_value + CRB_INDIRECT_2M +
451 ha->nx_pcibase));
452
453 return rval;
454}
455
423#define CRB_WIN_LOCK_TIMEOUT 100000000 456#define CRB_WIN_LOCK_TIMEOUT 100000000
424 457
425int qla4_8xxx_crb_win_lock(struct scsi_qla_host *ha) 458int qla4_8xxx_crb_win_lock(struct scsi_qla_host *ha)
@@ -1252,9 +1285,9 @@ qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *ha,
1252 } 1285 }
1253 1286
1254 if (j >= MAX_CTL_CHECK) { 1287 if (j >= MAX_CTL_CHECK) {
1255 if (printk_ratelimit()) 1288 printk_ratelimited(KERN_ERR
1256 ql4_printk(KERN_ERR, ha, 1289 "%s: failed to read through agent\n",
1257 "failed to read through agent\n"); 1290 __func__);
1258 break; 1291 break;
1259 } 1292 }
1260 1293
@@ -1390,7 +1423,8 @@ qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha,
1390 if (j >= MAX_CTL_CHECK) { 1423 if (j >= MAX_CTL_CHECK) {
1391 if (printk_ratelimit()) 1424 if (printk_ratelimit())
1392 ql4_printk(KERN_ERR, ha, 1425 ql4_printk(KERN_ERR, ha,
1393 "failed to write through agent\n"); 1426 "%s: failed to read through agent\n",
1427 __func__);
1394 ret = -1; 1428 ret = -1;
1395 break; 1429 break;
1396 } 1430 }
@@ -1462,6 +1496,8 @@ qla4_8xxx_set_drv_active(struct scsi_qla_host *ha)
1462 1496
1463 drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 1497 drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
1464 drv_active |= (1 << (ha->func_num * 4)); 1498 drv_active |= (1 << (ha->func_num * 4));
1499 ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
1500 __func__, ha->host_no, drv_active);
1465 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); 1501 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
1466} 1502}
1467 1503
@@ -1472,6 +1508,8 @@ qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha)
1472 1508
1473 drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 1509 drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
1474 drv_active &= ~(1 << (ha->func_num * 4)); 1510 drv_active &= ~(1 << (ha->func_num * 4));
1511 ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
1512 __func__, ha->host_no, drv_active);
1475 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); 1513 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
1476} 1514}
1477 1515
@@ -1497,6 +1535,8 @@ qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha)
1497 1535
1498 drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 1536 drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
1499 drv_state |= (1 << (ha->func_num * 4)); 1537 drv_state |= (1 << (ha->func_num * 4));
1538 ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
1539 __func__, ha->host_no, drv_state);
1500 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); 1540 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
1501} 1541}
1502 1542
@@ -1507,6 +1547,8 @@ qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha)
1507 1547
1508 drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 1548 drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
1509 drv_state &= ~(1 << (ha->func_num * 4)); 1549 drv_state &= ~(1 << (ha->func_num * 4));
1550 ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
1551 __func__, ha->host_no, drv_state);
1510 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); 1552 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
1511} 1553}
1512 1554
@@ -1601,6 +1643,629 @@ static void qla4_8xxx_rom_lock_recovery(struct scsi_qla_host *ha)
1601 qla4_8xxx_rom_unlock(ha); 1643 qla4_8xxx_rom_unlock(ha);
1602} 1644}
1603 1645
1646static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha,
1647 struct qla82xx_minidump_entry_hdr *entry_hdr,
1648 uint32_t **d_ptr)
1649{
1650 uint32_t r_addr, r_stride, loop_cnt, i, r_value;
1651 struct qla82xx_minidump_entry_crb *crb_hdr;
1652 uint32_t *data_ptr = *d_ptr;
1653
1654 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1655 crb_hdr = (struct qla82xx_minidump_entry_crb *)entry_hdr;
1656 r_addr = crb_hdr->addr;
1657 r_stride = crb_hdr->crb_strd.addr_stride;
1658 loop_cnt = crb_hdr->op_count;
1659
1660 for (i = 0; i < loop_cnt; i++) {
1661 r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
1662 *data_ptr++ = cpu_to_le32(r_addr);
1663 *data_ptr++ = cpu_to_le32(r_value);
1664 r_addr += r_stride;
1665 }
1666 *d_ptr = data_ptr;
1667}
1668
1669static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
1670 struct qla82xx_minidump_entry_hdr *entry_hdr,
1671 uint32_t **d_ptr)
1672{
1673 uint32_t addr, r_addr, c_addr, t_r_addr;
1674 uint32_t i, k, loop_count, t_value, r_cnt, r_value;
1675 unsigned long p_wait, w_time, p_mask;
1676 uint32_t c_value_w, c_value_r;
1677 struct qla82xx_minidump_entry_cache *cache_hdr;
1678 int rval = QLA_ERROR;
1679 uint32_t *data_ptr = *d_ptr;
1680
1681 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1682 cache_hdr = (struct qla82xx_minidump_entry_cache *)entry_hdr;
1683
1684 loop_count = cache_hdr->op_count;
1685 r_addr = cache_hdr->read_addr;
1686 c_addr = cache_hdr->control_addr;
1687 c_value_w = cache_hdr->cache_ctrl.write_value;
1688
1689 t_r_addr = cache_hdr->tag_reg_addr;
1690 t_value = cache_hdr->addr_ctrl.init_tag_value;
1691 r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
1692 p_wait = cache_hdr->cache_ctrl.poll_wait;
1693 p_mask = cache_hdr->cache_ctrl.poll_mask;
1694
1695 for (i = 0; i < loop_count; i++) {
1696 qla4_8xxx_md_rw_32(ha, t_r_addr, t_value, 1);
1697
1698 if (c_value_w)
1699 qla4_8xxx_md_rw_32(ha, c_addr, c_value_w, 1);
1700
1701 if (p_mask) {
1702 w_time = jiffies + p_wait;
1703 do {
1704 c_value_r = qla4_8xxx_md_rw_32(ha, c_addr,
1705 0, 0);
1706 if ((c_value_r & p_mask) == 0) {
1707 break;
1708 } else if (time_after_eq(jiffies, w_time)) {
1709 /* capturing dump failed */
1710 return rval;
1711 }
1712 } while (1);
1713 }
1714
1715 addr = r_addr;
1716 for (k = 0; k < r_cnt; k++) {
1717 r_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
1718 *data_ptr++ = cpu_to_le32(r_value);
1719 addr += cache_hdr->read_ctrl.read_addr_stride;
1720 }
1721
1722 t_value += cache_hdr->addr_ctrl.tag_value_stride;
1723 }
1724 *d_ptr = data_ptr;
1725 return QLA_SUCCESS;
1726}
1727
1728static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
1729 struct qla82xx_minidump_entry_hdr *entry_hdr)
1730{
1731 struct qla82xx_minidump_entry_crb *crb_entry;
1732 uint32_t read_value, opcode, poll_time, addr, index, rval = QLA_SUCCESS;
1733 uint32_t crb_addr;
1734 unsigned long wtime;
1735 struct qla4_8xxx_minidump_template_hdr *tmplt_hdr;
1736 int i;
1737
1738 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1739 tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
1740 ha->fw_dump_tmplt_hdr;
1741 crb_entry = (struct qla82xx_minidump_entry_crb *)entry_hdr;
1742
1743 crb_addr = crb_entry->addr;
1744 for (i = 0; i < crb_entry->op_count; i++) {
1745 opcode = crb_entry->crb_ctrl.opcode;
1746 if (opcode & QLA82XX_DBG_OPCODE_WR) {
1747 qla4_8xxx_md_rw_32(ha, crb_addr,
1748 crb_entry->value_1, 1);
1749 opcode &= ~QLA82XX_DBG_OPCODE_WR;
1750 }
1751 if (opcode & QLA82XX_DBG_OPCODE_RW) {
1752 read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
1753 qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
1754 opcode &= ~QLA82XX_DBG_OPCODE_RW;
1755 }
1756 if (opcode & QLA82XX_DBG_OPCODE_AND) {
1757 read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
1758 read_value &= crb_entry->value_2;
1759 opcode &= ~QLA82XX_DBG_OPCODE_AND;
1760 if (opcode & QLA82XX_DBG_OPCODE_OR) {
1761 read_value |= crb_entry->value_3;
1762 opcode &= ~QLA82XX_DBG_OPCODE_OR;
1763 }
1764 qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
1765 }
1766 if (opcode & QLA82XX_DBG_OPCODE_OR) {
1767 read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
1768 read_value |= crb_entry->value_3;
1769 qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
1770 opcode &= ~QLA82XX_DBG_OPCODE_OR;
1771 }
1772 if (opcode & QLA82XX_DBG_OPCODE_POLL) {
1773 poll_time = crb_entry->crb_strd.poll_timeout;
1774 wtime = jiffies + poll_time;
1775 read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
1776
1777 do {
1778 if ((read_value & crb_entry->value_2) ==
1779 crb_entry->value_1)
1780 break;
1781 else if (time_after_eq(jiffies, wtime)) {
1782 /* capturing dump failed */
1783 rval = QLA_ERROR;
1784 break;
1785 } else
1786 read_value = qla4_8xxx_md_rw_32(ha,
1787 crb_addr, 0, 0);
1788 } while (1);
1789 opcode &= ~QLA82XX_DBG_OPCODE_POLL;
1790 }
1791
1792 if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
1793 if (crb_entry->crb_strd.state_index_a) {
1794 index = crb_entry->crb_strd.state_index_a;
1795 addr = tmplt_hdr->saved_state_array[index];
1796 } else {
1797 addr = crb_addr;
1798 }
1799
1800 read_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
1801 index = crb_entry->crb_ctrl.state_index_v;
1802 tmplt_hdr->saved_state_array[index] = read_value;
1803 opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
1804 }
1805
1806 if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
1807 if (crb_entry->crb_strd.state_index_a) {
1808 index = crb_entry->crb_strd.state_index_a;
1809 addr = tmplt_hdr->saved_state_array[index];
1810 } else {
1811 addr = crb_addr;
1812 }
1813
1814 if (crb_entry->crb_ctrl.state_index_v) {
1815 index = crb_entry->crb_ctrl.state_index_v;
1816 read_value =
1817 tmplt_hdr->saved_state_array[index];
1818 } else {
1819 read_value = crb_entry->value_1;
1820 }
1821
1822 qla4_8xxx_md_rw_32(ha, addr, read_value, 1);
1823 opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
1824 }
1825
1826 if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
1827 index = crb_entry->crb_ctrl.state_index_v;
1828 read_value = tmplt_hdr->saved_state_array[index];
1829 read_value <<= crb_entry->crb_ctrl.shl;
1830 read_value >>= crb_entry->crb_ctrl.shr;
1831 if (crb_entry->value_2)
1832 read_value &= crb_entry->value_2;
1833 read_value |= crb_entry->value_3;
1834 read_value += crb_entry->value_1;
1835 tmplt_hdr->saved_state_array[index] = read_value;
1836 opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
1837 }
1838 crb_addr += crb_entry->crb_strd.addr_stride;
1839 }
1840 DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s\n", __func__));
1841 return rval;
1842}
1843
1844static void qla4_8xxx_minidump_process_rdocm(struct scsi_qla_host *ha,
1845 struct qla82xx_minidump_entry_hdr *entry_hdr,
1846 uint32_t **d_ptr)
1847{
1848 uint32_t r_addr, r_stride, loop_cnt, i, r_value;
1849 struct qla82xx_minidump_entry_rdocm *ocm_hdr;
1850 uint32_t *data_ptr = *d_ptr;
1851
1852 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1853 ocm_hdr = (struct qla82xx_minidump_entry_rdocm *)entry_hdr;
1854 r_addr = ocm_hdr->read_addr;
1855 r_stride = ocm_hdr->read_addr_stride;
1856 loop_cnt = ocm_hdr->op_count;
1857
1858 DEBUG2(ql4_printk(KERN_INFO, ha,
1859 "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n",
1860 __func__, r_addr, r_stride, loop_cnt));
1861
1862 for (i = 0; i < loop_cnt; i++) {
1863 r_value = readl((void __iomem *)(r_addr + ha->nx_pcibase));
1864 *data_ptr++ = cpu_to_le32(r_value);
1865 r_addr += r_stride;
1866 }
1867 DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%lx\n",
1868 __func__, (loop_cnt * sizeof(uint32_t))));
1869 *d_ptr = data_ptr;
1870}
1871
1872static void qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host *ha,
1873 struct qla82xx_minidump_entry_hdr *entry_hdr,
1874 uint32_t **d_ptr)
1875{
1876 uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
1877 struct qla82xx_minidump_entry_mux *mux_hdr;
1878 uint32_t *data_ptr = *d_ptr;
1879
1880 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1881 mux_hdr = (struct qla82xx_minidump_entry_mux *)entry_hdr;
1882 r_addr = mux_hdr->read_addr;
1883 s_addr = mux_hdr->select_addr;
1884 s_stride = mux_hdr->select_value_stride;
1885 s_value = mux_hdr->select_value;
1886 loop_cnt = mux_hdr->op_count;
1887
1888 for (i = 0; i < loop_cnt; i++) {
1889 qla4_8xxx_md_rw_32(ha, s_addr, s_value, 1);
1890 r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
1891 *data_ptr++ = cpu_to_le32(s_value);
1892 *data_ptr++ = cpu_to_le32(r_value);
1893 s_value += s_stride;
1894 }
1895 *d_ptr = data_ptr;
1896}
1897
1898static void qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host *ha,
1899 struct qla82xx_minidump_entry_hdr *entry_hdr,
1900 uint32_t **d_ptr)
1901{
1902 uint32_t addr, r_addr, c_addr, t_r_addr;
1903 uint32_t i, k, loop_count, t_value, r_cnt, r_value;
1904 uint32_t c_value_w;
1905 struct qla82xx_minidump_entry_cache *cache_hdr;
1906 uint32_t *data_ptr = *d_ptr;
1907
1908 cache_hdr = (struct qla82xx_minidump_entry_cache *)entry_hdr;
1909 loop_count = cache_hdr->op_count;
1910 r_addr = cache_hdr->read_addr;
1911 c_addr = cache_hdr->control_addr;
1912 c_value_w = cache_hdr->cache_ctrl.write_value;
1913
1914 t_r_addr = cache_hdr->tag_reg_addr;
1915 t_value = cache_hdr->addr_ctrl.init_tag_value;
1916 r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
1917
1918 for (i = 0; i < loop_count; i++) {
1919 qla4_8xxx_md_rw_32(ha, t_r_addr, t_value, 1);
1920 qla4_8xxx_md_rw_32(ha, c_addr, c_value_w, 1);
1921 addr = r_addr;
1922 for (k = 0; k < r_cnt; k++) {
1923 r_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
1924 *data_ptr++ = cpu_to_le32(r_value);
1925 addr += cache_hdr->read_ctrl.read_addr_stride;
1926 }
1927 t_value += cache_hdr->addr_ctrl.tag_value_stride;
1928 }
1929 *d_ptr = data_ptr;
1930}
1931
1932static void qla4_8xxx_minidump_process_queue(struct scsi_qla_host *ha,
1933 struct qla82xx_minidump_entry_hdr *entry_hdr,
1934 uint32_t **d_ptr)
1935{
1936 uint32_t s_addr, r_addr;
1937 uint32_t r_stride, r_value, r_cnt, qid = 0;
1938 uint32_t i, k, loop_cnt;
1939 struct qla82xx_minidump_entry_queue *q_hdr;
1940 uint32_t *data_ptr = *d_ptr;
1941
1942 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1943 q_hdr = (struct qla82xx_minidump_entry_queue *)entry_hdr;
1944 s_addr = q_hdr->select_addr;
1945 r_cnt = q_hdr->rd_strd.read_addr_cnt;
1946 r_stride = q_hdr->rd_strd.read_addr_stride;
1947 loop_cnt = q_hdr->op_count;
1948
1949 for (i = 0; i < loop_cnt; i++) {
1950 qla4_8xxx_md_rw_32(ha, s_addr, qid, 1);
1951 r_addr = q_hdr->read_addr;
1952 for (k = 0; k < r_cnt; k++) {
1953 r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
1954 *data_ptr++ = cpu_to_le32(r_value);
1955 r_addr += r_stride;
1956 }
1957 qid += q_hdr->q_strd.queue_id_stride;
1958 }
1959 *d_ptr = data_ptr;
1960}
1961
1962#define MD_DIRECT_ROM_WINDOW 0x42110030
1963#define MD_DIRECT_ROM_READ_BASE 0x42150000
1964
1965static void qla4_8xxx_minidump_process_rdrom(struct scsi_qla_host *ha,
1966 struct qla82xx_minidump_entry_hdr *entry_hdr,
1967 uint32_t **d_ptr)
1968{
1969 uint32_t r_addr, r_value;
1970 uint32_t i, loop_cnt;
1971 struct qla82xx_minidump_entry_rdrom *rom_hdr;
1972 uint32_t *data_ptr = *d_ptr;
1973
1974 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1975 rom_hdr = (struct qla82xx_minidump_entry_rdrom *)entry_hdr;
1976 r_addr = rom_hdr->read_addr;
1977 loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t);
1978
1979 DEBUG2(ql4_printk(KERN_INFO, ha,
1980 "[%s]: flash_addr: 0x%x, read_data_size: 0x%x\n",
1981 __func__, r_addr, loop_cnt));
1982
1983 for (i = 0; i < loop_cnt; i++) {
1984 qla4_8xxx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW,
1985 (r_addr & 0xFFFF0000), 1);
1986 r_value = qla4_8xxx_md_rw_32(ha,
1987 MD_DIRECT_ROM_READ_BASE +
1988 (r_addr & 0x0000FFFF), 0, 0);
1989 *data_ptr++ = cpu_to_le32(r_value);
1990 r_addr += sizeof(uint32_t);
1991 }
1992 *d_ptr = data_ptr;
1993}
1994
1995#define MD_MIU_TEST_AGT_CTRL 0x41000090
1996#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
1997#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
1998
1999static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
2000 struct qla82xx_minidump_entry_hdr *entry_hdr,
2001 uint32_t **d_ptr)
2002{
2003 uint32_t r_addr, r_value, r_data;
2004 uint32_t i, j, loop_cnt;
2005 struct qla82xx_minidump_entry_rdmem *m_hdr;
2006 unsigned long flags;
2007 uint32_t *data_ptr = *d_ptr;
2008
2009 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
2010 m_hdr = (struct qla82xx_minidump_entry_rdmem *)entry_hdr;
2011 r_addr = m_hdr->read_addr;
2012 loop_cnt = m_hdr->read_data_size/16;
2013
2014 DEBUG2(ql4_printk(KERN_INFO, ha,
2015 "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n",
2016 __func__, r_addr, m_hdr->read_data_size));
2017
2018 if (r_addr & 0xf) {
2019 DEBUG2(ql4_printk(KERN_INFO, ha,
2020 "[%s]: Read addr 0x%x not 16 bytes alligned\n",
2021 __func__, r_addr));
2022 return QLA_ERROR;
2023 }
2024
2025 if (m_hdr->read_data_size % 16) {
2026 DEBUG2(ql4_printk(KERN_INFO, ha,
2027 "[%s]: Read data[0x%x] not multiple of 16 bytes\n",
2028 __func__, m_hdr->read_data_size));
2029 return QLA_ERROR;
2030 }
2031
2032 DEBUG2(ql4_printk(KERN_INFO, ha,
2033 "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
2034 __func__, r_addr, m_hdr->read_data_size, loop_cnt));
2035
2036 write_lock_irqsave(&ha->hw_lock, flags);
2037 for (i = 0; i < loop_cnt; i++) {
2038 qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr, 1);
2039 r_value = 0;
2040 qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value, 1);
2041 r_value = MIU_TA_CTL_ENABLE;
2042 qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
2043 r_value = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
2044 qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
2045
2046 for (j = 0; j < MAX_CTL_CHECK; j++) {
2047 r_value = qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL,
2048 0, 0);
2049 if ((r_value & MIU_TA_CTL_BUSY) == 0)
2050 break;
2051 }
2052
2053 if (j >= MAX_CTL_CHECK) {
2054 printk_ratelimited(KERN_ERR
2055 "%s: failed to read through agent\n",
2056 __func__);
2057 write_unlock_irqrestore(&ha->hw_lock, flags);
2058 return QLA_SUCCESS;
2059 }
2060
2061 for (j = 0; j < 4; j++) {
2062 r_data = qla4_8xxx_md_rw_32(ha,
2063 MD_MIU_TEST_AGT_RDDATA[j],
2064 0, 0);
2065 *data_ptr++ = cpu_to_le32(r_data);
2066 }
2067
2068 r_addr += 16;
2069 }
2070 write_unlock_irqrestore(&ha->hw_lock, flags);
2071
2072 DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%x\n",
2073 __func__, (loop_cnt * 16)));
2074
2075 *d_ptr = data_ptr;
2076 return QLA_SUCCESS;
2077}
2078
2079static void ql4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha,
2080 struct qla82xx_minidump_entry_hdr *entry_hdr,
2081 int index)
2082{
2083 entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG;
2084 DEBUG2(ql4_printk(KERN_INFO, ha,
2085 "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",
2086 ha->host_no, index, entry_hdr->entry_type,
2087 entry_hdr->d_ctrl.entry_capture_mask));
2088}
2089
2090/**
2091 * qla82xx_collect_md_data - Retrieve firmware minidump data.
2092 * @ha: pointer to adapter structure
2093 **/
2094static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
2095{
2096 int num_entry_hdr = 0;
2097 struct qla82xx_minidump_entry_hdr *entry_hdr;
2098 struct qla4_8xxx_minidump_template_hdr *tmplt_hdr;
2099 uint32_t *data_ptr;
2100 uint32_t data_collected = 0;
2101 int i, rval = QLA_ERROR;
2102 uint64_t now;
2103 uint32_t timestamp;
2104
2105 if (!ha->fw_dump) {
2106 ql4_printk(KERN_INFO, ha, "%s(%ld) No buffer to dump\n",
2107 __func__, ha->host_no);
2108 return rval;
2109 }
2110
2111 tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
2112 ha->fw_dump_tmplt_hdr;
2113 data_ptr = (uint32_t *)((uint8_t *)ha->fw_dump +
2114 ha->fw_dump_tmplt_size);
2115 data_collected += ha->fw_dump_tmplt_size;
2116
2117 num_entry_hdr = tmplt_hdr->num_of_entries;
2118 ql4_printk(KERN_INFO, ha, "[%s]: starting data ptr: %p\n",
2119 __func__, data_ptr);
2120 ql4_printk(KERN_INFO, ha,
2121 "[%s]: no of entry headers in Template: 0x%x\n",
2122 __func__, num_entry_hdr);
2123 ql4_printk(KERN_INFO, ha, "[%s]: Capture Mask obtained: 0x%x\n",
2124 __func__, ha->fw_dump_capture_mask);
2125 ql4_printk(KERN_INFO, ha, "[%s]: Total_data_size 0x%x, %d obtained\n",
2126 __func__, ha->fw_dump_size, ha->fw_dump_size);
2127
2128 /* Update current timestamp before taking dump */
2129 now = get_jiffies_64();
2130 timestamp = (u32)(jiffies_to_msecs(now) / 1000);
2131 tmplt_hdr->driver_timestamp = timestamp;
2132
2133 entry_hdr = (struct qla82xx_minidump_entry_hdr *)
2134 (((uint8_t *)ha->fw_dump_tmplt_hdr) +
2135 tmplt_hdr->first_entry_offset);
2136
2137 /* Walk through the entry headers - validate/perform required action */
2138 for (i = 0; i < num_entry_hdr; i++) {
2139 if (data_collected >= ha->fw_dump_size) {
2140 ql4_printk(KERN_INFO, ha,
2141 "Data collected: [0x%x], Total Dump size: [0x%x]\n",
2142 data_collected, ha->fw_dump_size);
2143 return rval;
2144 }
2145
2146 if (!(entry_hdr->d_ctrl.entry_capture_mask &
2147 ha->fw_dump_capture_mask)) {
2148 entry_hdr->d_ctrl.driver_flags |=
2149 QLA82XX_DBG_SKIPPED_FLAG;
2150 goto skip_nxt_entry;
2151 }
2152
2153 DEBUG2(ql4_printk(KERN_INFO, ha,
2154 "Data collected: [0x%x], Dump size left:[0x%x]\n",
2155 data_collected,
2156 (ha->fw_dump_size - data_collected)));
2157
2158 /* Decode the entry type and take required action to capture
2159 * debug data
2160 */
2161 switch (entry_hdr->entry_type) {
2162 case QLA82XX_RDEND:
2163 ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2164 break;
2165 case QLA82XX_CNTRL:
2166 rval = qla4_8xxx_minidump_process_control(ha,
2167 entry_hdr);
2168 if (rval != QLA_SUCCESS) {
2169 ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2170 goto md_failed;
2171 }
2172 break;
2173 case QLA82XX_RDCRB:
2174 qla4_8xxx_minidump_process_rdcrb(ha, entry_hdr,
2175 &data_ptr);
2176 break;
2177 case QLA82XX_RDMEM:
2178 rval = qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
2179 &data_ptr);
2180 if (rval != QLA_SUCCESS) {
2181 ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2182 goto md_failed;
2183 }
2184 break;
2185 case QLA82XX_BOARD:
2186 case QLA82XX_RDROM:
2187 qla4_8xxx_minidump_process_rdrom(ha, entry_hdr,
2188 &data_ptr);
2189 break;
2190 case QLA82XX_L2DTG:
2191 case QLA82XX_L2ITG:
2192 case QLA82XX_L2DAT:
2193 case QLA82XX_L2INS:
2194 rval = qla4_8xxx_minidump_process_l2tag(ha, entry_hdr,
2195 &data_ptr);
2196 if (rval != QLA_SUCCESS) {
2197 ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2198 goto md_failed;
2199 }
2200 break;
2201 case QLA82XX_L1DAT:
2202 case QLA82XX_L1INS:
2203 qla4_8xxx_minidump_process_l1cache(ha, entry_hdr,
2204 &data_ptr);
2205 break;
2206 case QLA82XX_RDOCM:
2207 qla4_8xxx_minidump_process_rdocm(ha, entry_hdr,
2208 &data_ptr);
2209 break;
2210 case QLA82XX_RDMUX:
2211 qla4_8xxx_minidump_process_rdmux(ha, entry_hdr,
2212 &data_ptr);
2213 break;
2214 case QLA82XX_QUEUE:
2215 qla4_8xxx_minidump_process_queue(ha, entry_hdr,
2216 &data_ptr);
2217 break;
2218 case QLA82XX_RDNOP:
2219 default:
2220 ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2221 break;
2222 }
2223
2224 data_collected = (uint8_t *)data_ptr -
2225 ((uint8_t *)((uint8_t *)ha->fw_dump +
2226 ha->fw_dump_tmplt_size));
2227skip_nxt_entry:
2228 /* next entry in the template */
2229 entry_hdr = (struct qla82xx_minidump_entry_hdr *)
2230 (((uint8_t *)entry_hdr) +
2231 entry_hdr->entry_size);
2232 }
2233
2234 if ((data_collected + ha->fw_dump_tmplt_size) != ha->fw_dump_size) {
2235 ql4_printk(KERN_INFO, ha,
2236 "Dump data mismatch: Data collected: [0x%x], total_data_size:[0x%x]\n",
2237 data_collected, ha->fw_dump_size);
2238 goto md_failed;
2239 }
2240
2241 DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s Last entry: 0x%x\n",
2242 __func__, i));
2243md_failed:
2244 return rval;
2245}
2246
2247/**
2248 * qla4_8xxx_uevent_emit - Send uevent when the firmware dump is ready.
2249 * @ha: pointer to adapter structure
2250 **/
2251static void qla4_8xxx_uevent_emit(struct scsi_qla_host *ha, u32 code)
2252{
2253 char event_string[40];
2254 char *envp[] = { event_string, NULL };
2255
2256 switch (code) {
2257 case QL4_UEVENT_CODE_FW_DUMP:
2258 snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
2259 ha->host_no);
2260 break;
2261 default:
2262 /*do nothing*/
2263 break;
2264 }
2265
2266 kobject_uevent_env(&(&ha->pdev->dev)->kobj, KOBJ_CHANGE, envp);
2267}
2268
1604/** 2269/**
1605 * qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw 2270 * qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw
1606 * @ha: pointer to adapter structure 2271 * @ha: pointer to adapter structure
@@ -1659,6 +2324,15 @@ dev_initialize:
1659 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION); 2324 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION);
1660 2325
1661 qla4_8xxx_idc_unlock(ha); 2326 qla4_8xxx_idc_unlock(ha);
2327 if (ql4xenablemd && test_bit(AF_FW_RECOVERY, &ha->flags) &&
2328 !test_and_set_bit(AF_82XX_FW_DUMPED, &ha->flags)) {
2329 if (!qla4_8xxx_collect_md_data(ha)) {
2330 qla4_8xxx_uevent_emit(ha, QL4_UEVENT_CODE_FW_DUMP);
2331 } else {
2332 ql4_printk(KERN_INFO, ha, "Unable to collect minidump\n");
2333 clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
2334 }
2335 }
1662 rval = qla4_8xxx_try_start_fw(ha); 2336 rval = qla4_8xxx_try_start_fw(ha);
1663 qla4_8xxx_idc_lock(ha); 2337 qla4_8xxx_idc_lock(ha);
1664 2338
@@ -1686,6 +2360,7 @@ static void
1686qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha) 2360qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
1687{ 2361{
1688 uint32_t dev_state, drv_state, drv_active; 2362 uint32_t dev_state, drv_state, drv_active;
2363 uint32_t active_mask = 0xFFFFFFFF;
1689 unsigned long reset_timeout; 2364 unsigned long reset_timeout;
1690 2365
1691 ql4_printk(KERN_INFO, ha, 2366 ql4_printk(KERN_INFO, ha,
@@ -1697,7 +2372,14 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
1697 qla4_8xxx_idc_lock(ha); 2372 qla4_8xxx_idc_lock(ha);
1698 } 2373 }
1699 2374
1700 qla4_8xxx_set_rst_ready(ha); 2375 if (!test_bit(AF_82XX_RST_OWNER, &ha->flags)) {
2376 DEBUG2(ql4_printk(KERN_INFO, ha,
2377 "%s(%ld): reset acknowledged\n",
2378 __func__, ha->host_no));
2379 qla4_8xxx_set_rst_ready(ha);
2380 } else {
2381 active_mask = (~(1 << (ha->func_num * 4)));
2382 }
1701 2383
1702 /* wait for 10 seconds for reset ack from all functions */ 2384 /* wait for 10 seconds for reset ack from all functions */
1703 reset_timeout = jiffies + (ha->nx_reset_timeout * HZ); 2385 reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
@@ -1709,12 +2391,24 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
1709 "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n", 2391 "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
1710 __func__, ha->host_no, drv_state, drv_active); 2392 __func__, ha->host_no, drv_state, drv_active);
1711 2393
1712 while (drv_state != drv_active) { 2394 while (drv_state != (drv_active & active_mask)) {
1713 if (time_after_eq(jiffies, reset_timeout)) { 2395 if (time_after_eq(jiffies, reset_timeout)) {
1714 printk("%s: RESET TIMEOUT!\n", DRIVER_NAME); 2396 ql4_printk(KERN_INFO, ha,
2397 "%s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n",
2398 DRIVER_NAME, drv_state, drv_active);
1715 break; 2399 break;
1716 } 2400 }
1717 2401
2402 /*
2403 * When reset_owner times out, check which functions
2404 * acked/did not ack
2405 */
2406 if (test_bit(AF_82XX_RST_OWNER, &ha->flags)) {
2407 ql4_printk(KERN_INFO, ha,
2408 "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
2409 __func__, ha->host_no, drv_state,
2410 drv_active);
2411 }
1718 qla4_8xxx_idc_unlock(ha); 2412 qla4_8xxx_idc_unlock(ha);
1719 msleep(1000); 2413 msleep(1000);
1720 qla4_8xxx_idc_lock(ha); 2414 qla4_8xxx_idc_lock(ha);
@@ -1723,14 +2417,18 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
1723 drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 2417 drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
1724 } 2418 }
1725 2419
2420 /* Clear RESET OWNER as we are not going to use it any further */
2421 clear_bit(AF_82XX_RST_OWNER, &ha->flags);
2422
1726 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 2423 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
1727 ql4_printk(KERN_INFO, ha, "3:Device state is 0x%x = %s\n", dev_state, 2424 ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", dev_state,
1728 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); 2425 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
1729 2426
1730 /* Force to DEV_COLD unless someone else is starting a reset */ 2427 /* Force to DEV_COLD unless someone else is starting a reset */
1731 if (dev_state != QLA82XX_DEV_INITIALIZING) { 2428 if (dev_state != QLA82XX_DEV_INITIALIZING) {
1732 ql4_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n"); 2429 ql4_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n");
1733 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD); 2430 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
2431 qla4_8xxx_set_rst_ready(ha);
1734 } 2432 }
1735} 2433}
1736 2434
@@ -1765,8 +2463,9 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
1765 } 2463 }
1766 2464
1767 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 2465 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
1768 ql4_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state, 2466 DEBUG2(ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
1769 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); 2467 dev_state, dev_state < MAX_STATES ?
2468 qdev_state[dev_state] : "Unknown"));
1770 2469
1771 /* wait for 30 seconds for device to go ready */ 2470 /* wait for 30 seconds for device to go ready */
1772 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ); 2471 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
@@ -1775,15 +2474,19 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
1775 while (1) { 2474 while (1) {
1776 2475
1777 if (time_after_eq(jiffies, dev_init_timeout)) { 2476 if (time_after_eq(jiffies, dev_init_timeout)) {
1778 ql4_printk(KERN_WARNING, ha, "Device init failed!\n"); 2477 ql4_printk(KERN_WARNING, ha,
2478 "%s: Device Init Failed 0x%x = %s\n",
2479 DRIVER_NAME,
2480 dev_state, dev_state < MAX_STATES ?
2481 qdev_state[dev_state] : "Unknown");
1779 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 2482 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
1780 QLA82XX_DEV_FAILED); 2483 QLA82XX_DEV_FAILED);
1781 } 2484 }
1782 2485
1783 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 2486 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
1784 ql4_printk(KERN_INFO, ha, 2487 ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
1785 "2:Device state is 0x%x = %s\n", dev_state, 2488 dev_state, dev_state < MAX_STATES ?
1786 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); 2489 qdev_state[dev_state] : "Unknown");
1787 2490
1788 /* NOTE: Make sure idc unlocked upon exit of switch statement */ 2491 /* NOTE: Make sure idc unlocked upon exit of switch statement */
1789 switch (dev_state) { 2492 switch (dev_state) {
@@ -2184,6 +2887,7 @@ qla4_8xxx_isp_reset(struct scsi_qla_host *ha)
2184 ql4_printk(KERN_INFO, ha, "HW State: NEED RESET\n"); 2887 ql4_printk(KERN_INFO, ha, "HW State: NEED RESET\n");
2185 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 2888 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2186 QLA82XX_DEV_NEED_RESET); 2889 QLA82XX_DEV_NEED_RESET);
2890 set_bit(AF_82XX_RST_OWNER, &ha->flags);
2187 } else 2891 } else
2188 ql4_printk(KERN_INFO, ha, "HW State: DEVICE INITIALIZING\n"); 2892 ql4_printk(KERN_INFO, ha, "HW State: DEVICE INITIALIZING\n");
2189 2893
@@ -2195,8 +2899,10 @@ qla4_8xxx_isp_reset(struct scsi_qla_host *ha)
2195 qla4_8xxx_clear_rst_ready(ha); 2899 qla4_8xxx_clear_rst_ready(ha);
2196 qla4_8xxx_idc_unlock(ha); 2900 qla4_8xxx_idc_unlock(ha);
2197 2901
2198 if (rval == QLA_SUCCESS) 2902 if (rval == QLA_SUCCESS) {
2903 ql4_printk(KERN_INFO, ha, "Clearing AF_RECOVERY in qla4_8xxx_isp_reset\n");
2199 clear_bit(AF_FW_RECOVERY, &ha->flags); 2904 clear_bit(AF_FW_RECOVERY, &ha->flags);
2905 }
2200 2906
2201 return rval; 2907 return rval;
2202} 2908}
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
index dc7500e47b8b..30258479f100 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -792,4 +792,196 @@ struct crb_addr_pair {
792#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x0b0) 792#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x0b0)
793#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x0b4) 793#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x0b4)
794 794
795/* Minidump related */
796
797/* Entry Type Defines */
798#define QLA82XX_RDNOP 0
799#define QLA82XX_RDCRB 1
800#define QLA82XX_RDMUX 2
801#define QLA82XX_QUEUE 3
802#define QLA82XX_BOARD 4
803#define QLA82XX_RDOCM 6
804#define QLA82XX_PREGS 7
805#define QLA82XX_L1DTG 8
806#define QLA82XX_L1ITG 9
807#define QLA82XX_L1DAT 11
808#define QLA82XX_L1INS 12
809#define QLA82XX_L2DTG 21
810#define QLA82XX_L2ITG 22
811#define QLA82XX_L2DAT 23
812#define QLA82XX_L2INS 24
813#define QLA82XX_RDROM 71
814#define QLA82XX_RDMEM 72
815#define QLA82XX_CNTRL 98
816#define QLA82XX_RDEND 255
817
818/* Opcodes for Control Entries.
819 * These Flags are bit fields.
820 */
821#define QLA82XX_DBG_OPCODE_WR 0x01
822#define QLA82XX_DBG_OPCODE_RW 0x02
823#define QLA82XX_DBG_OPCODE_AND 0x04
824#define QLA82XX_DBG_OPCODE_OR 0x08
825#define QLA82XX_DBG_OPCODE_POLL 0x10
826#define QLA82XX_DBG_OPCODE_RDSTATE 0x20
827#define QLA82XX_DBG_OPCODE_WRSTATE 0x40
828#define QLA82XX_DBG_OPCODE_MDSTATE 0x80
829
830/* Driver Flags */
831#define QLA82XX_DBG_SKIPPED_FLAG 0x80 /* driver skipped this entry */
832#define QLA82XX_DBG_SIZE_ERR_FLAG 0x40 /* Entry vs Capture size
833 * mismatch */
834
835/* Driver_code is for driver to write some info about the entry
836 * currently not used.
837 */
838struct qla82xx_minidump_entry_hdr {
839 uint32_t entry_type;
840 uint32_t entry_size;
841 uint32_t entry_capture_size;
842 struct {
843 uint8_t entry_capture_mask;
844 uint8_t entry_code;
845 uint8_t driver_code;
846 uint8_t driver_flags;
847 } d_ctrl;
848};
849
850/* Read CRB entry header */
851struct qla82xx_minidump_entry_crb {
852 struct qla82xx_minidump_entry_hdr h;
853 uint32_t addr;
854 struct {
855 uint8_t addr_stride;
856 uint8_t state_index_a;
857 uint16_t poll_timeout;
858 } crb_strd;
859 uint32_t data_size;
860 uint32_t op_count;
861
862 struct {
863 uint8_t opcode;
864 uint8_t state_index_v;
865 uint8_t shl;
866 uint8_t shr;
867 } crb_ctrl;
868
869 uint32_t value_1;
870 uint32_t value_2;
871 uint32_t value_3;
872};
873
874struct qla82xx_minidump_entry_cache {
875 struct qla82xx_minidump_entry_hdr h;
876 uint32_t tag_reg_addr;
877 struct {
878 uint16_t tag_value_stride;
879 uint16_t init_tag_value;
880 } addr_ctrl;
881 uint32_t data_size;
882 uint32_t op_count;
883 uint32_t control_addr;
884 struct {
885 uint16_t write_value;
886 uint8_t poll_mask;
887 uint8_t poll_wait;
888 } cache_ctrl;
889 uint32_t read_addr;
890 struct {
891 uint8_t read_addr_stride;
892 uint8_t read_addr_cnt;
893 uint16_t rsvd_1;
894 } read_ctrl;
895};
896
897/* Read OCM */
898struct qla82xx_minidump_entry_rdocm {
899 struct qla82xx_minidump_entry_hdr h;
900 uint32_t rsvd_0;
901 uint32_t rsvd_1;
902 uint32_t data_size;
903 uint32_t op_count;
904 uint32_t rsvd_2;
905 uint32_t rsvd_3;
906 uint32_t read_addr;
907 uint32_t read_addr_stride;
908};
909
910/* Read Memory */
911struct qla82xx_minidump_entry_rdmem {
912 struct qla82xx_minidump_entry_hdr h;
913 uint32_t rsvd[6];
914 uint32_t read_addr;
915 uint32_t read_data_size;
916};
917
918/* Read ROM */
919struct qla82xx_minidump_entry_rdrom {
920 struct qla82xx_minidump_entry_hdr h;
921 uint32_t rsvd[6];
922 uint32_t read_addr;
923 uint32_t read_data_size;
924};
925
926/* Mux entry */
927struct qla82xx_minidump_entry_mux {
928 struct qla82xx_minidump_entry_hdr h;
929 uint32_t select_addr;
930 uint32_t rsvd_0;
931 uint32_t data_size;
932 uint32_t op_count;
933 uint32_t select_value;
934 uint32_t select_value_stride;
935 uint32_t read_addr;
936 uint32_t rsvd_1;
937};
938
939/* Queue entry */
940struct qla82xx_minidump_entry_queue {
941 struct qla82xx_minidump_entry_hdr h;
942 uint32_t select_addr;
943 struct {
944 uint16_t queue_id_stride;
945 uint16_t rsvd_0;
946 } q_strd;
947 uint32_t data_size;
948 uint32_t op_count;
949 uint32_t rsvd_1;
950 uint32_t rsvd_2;
951 uint32_t read_addr;
952 struct {
953 uint8_t read_addr_stride;
954 uint8_t read_addr_cnt;
955 uint16_t rsvd_3;
956 } rd_strd;
957};
958
959#define QLA82XX_MINIDUMP_OCM0_SIZE (256 * 1024)
960#define QLA82XX_MINIDUMP_L1C_SIZE (256 * 1024)
961#define QLA82XX_MINIDUMP_L2C_SIZE 1572864
962#define QLA82XX_MINIDUMP_COMMON_STR_SIZE 0
963#define QLA82XX_MINIDUMP_FCOE_STR_SIZE 0
964#define QLA82XX_MINIDUMP_MEM_SIZE 0
965#define QLA82XX_MAX_ENTRY_HDR 4
966
967struct qla82xx_minidump {
968 uint32_t md_ocm0_data[QLA82XX_MINIDUMP_OCM0_SIZE];
969 uint32_t md_l1c_data[QLA82XX_MINIDUMP_L1C_SIZE];
970 uint32_t md_l2c_data[QLA82XX_MINIDUMP_L2C_SIZE];
971 uint32_t md_cs_data[QLA82XX_MINIDUMP_COMMON_STR_SIZE];
972 uint32_t md_fcoes_data[QLA82XX_MINIDUMP_FCOE_STR_SIZE];
973 uint32_t md_mem_data[QLA82XX_MINIDUMP_MEM_SIZE];
974};
975
976#define MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE 0x129
977#define RQST_TMPLT_SIZE 0x0
978#define RQST_TMPLT 0x1
979#define MD_DIRECT_ROM_WINDOW 0x42110030
980#define MD_DIRECT_ROM_READ_BASE 0x42150000
981#define MD_MIU_TEST_AGT_CTRL 0x41000090
982#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
983#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
984
985static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8,
986 0x410000AC, 0x410000B8, 0x410000BC };
795#endif 987#endif
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index ee47820c30a6..cd15678f9ada 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -68,12 +68,34 @@ MODULE_PARM_DESC(ql4xmaxqdepth,
68 " Maximum queue depth to report for target devices.\n" 68 " Maximum queue depth to report for target devices.\n"
69 "\t\t Default: 32."); 69 "\t\t Default: 32.");
70 70
71static int ql4xqfulltracking = 1;
72module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR);
73MODULE_PARM_DESC(ql4xqfulltracking,
74 " Enable or disable dynamic tracking and adjustment of\n"
75 "\t\t scsi device queue depth.\n"
76 "\t\t 0 - Disable.\n"
77 "\t\t 1 - Enable. (Default)");
78
71static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO; 79static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
72module_param(ql4xsess_recovery_tmo, int, S_IRUGO); 80module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
73MODULE_PARM_DESC(ql4xsess_recovery_tmo, 81MODULE_PARM_DESC(ql4xsess_recovery_tmo,
74 " Target Session Recovery Timeout.\n" 82 " Target Session Recovery Timeout.\n"
75 "\t\t Default: 120 sec."); 83 "\t\t Default: 120 sec.");
76 84
85int ql4xmdcapmask = 0x1F;
86module_param(ql4xmdcapmask, int, S_IRUGO);
87MODULE_PARM_DESC(ql4xmdcapmask,
88 " Set the Minidump driver capture mask level.\n"
89 "\t\t Default is 0x1F.\n"
90 "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F");
91
92int ql4xenablemd = 1;
93module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR);
94MODULE_PARM_DESC(ql4xenablemd,
95 " Set to enable minidump.\n"
96 "\t\t 0 - disable minidump\n"
97 "\t\t 1 - enable minidump (Default)");
98
77static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha); 99static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
78/* 100/*
79 * SCSI host template entry points 101 * SCSI host template entry points
@@ -140,6 +162,8 @@ static int qla4xxx_slave_configure(struct scsi_device *device);
140static void qla4xxx_slave_destroy(struct scsi_device *sdev); 162static void qla4xxx_slave_destroy(struct scsi_device *sdev);
141static umode_t ql4_attr_is_visible(int param_type, int param); 163static umode_t ql4_attr_is_visible(int param_type, int param);
142static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type); 164static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
165static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
166 int reason);
143 167
144static struct qla4_8xxx_legacy_intr_set legacy_intr[] = 168static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
145 QLA82XX_LEGACY_INTR_CONFIG; 169 QLA82XX_LEGACY_INTR_CONFIG;
@@ -159,6 +183,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
159 .slave_configure = qla4xxx_slave_configure, 183 .slave_configure = qla4xxx_slave_configure,
160 .slave_alloc = qla4xxx_slave_alloc, 184 .slave_alloc = qla4xxx_slave_alloc,
161 .slave_destroy = qla4xxx_slave_destroy, 185 .slave_destroy = qla4xxx_slave_destroy,
186 .change_queue_depth = qla4xxx_change_queue_depth,
162 187
163 .this_id = -1, 188 .this_id = -1,
164 .cmd_per_lun = 3, 189 .cmd_per_lun = 3,
@@ -1555,19 +1580,53 @@ static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
1555 struct iscsi_session *sess; 1580 struct iscsi_session *sess;
1556 struct ddb_entry *ddb_entry; 1581 struct ddb_entry *ddb_entry;
1557 struct scsi_qla_host *ha; 1582 struct scsi_qla_host *ha;
1558 unsigned long flags; 1583 unsigned long flags, wtime;
1584 struct dev_db_entry *fw_ddb_entry = NULL;
1585 dma_addr_t fw_ddb_entry_dma;
1586 uint32_t ddb_state;
1587 int ret;
1559 1588
1560 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); 1589 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1561 sess = cls_sess->dd_data; 1590 sess = cls_sess->dd_data;
1562 ddb_entry = sess->dd_data; 1591 ddb_entry = sess->dd_data;
1563 ha = ddb_entry->ha; 1592 ha = ddb_entry->ha;
1564 1593
1594 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1595 &fw_ddb_entry_dma, GFP_KERNEL);
1596 if (!fw_ddb_entry) {
1597 ql4_printk(KERN_ERR, ha,
1598 "%s: Unable to allocate dma buffer\n", __func__);
1599 goto destroy_session;
1600 }
1601
1602 wtime = jiffies + (HZ * LOGOUT_TOV);
1603 do {
1604 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
1605 fw_ddb_entry, fw_ddb_entry_dma,
1606 NULL, NULL, &ddb_state, NULL,
1607 NULL, NULL);
1608 if (ret == QLA_ERROR)
1609 goto destroy_session;
1610
1611 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
1612 (ddb_state == DDB_DS_SESSION_FAILED))
1613 goto destroy_session;
1614
1615 schedule_timeout_uninterruptible(HZ);
1616 } while ((time_after(wtime, jiffies)));
1617
1618destroy_session:
1565 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); 1619 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
1566 1620
1567 spin_lock_irqsave(&ha->hardware_lock, flags); 1621 spin_lock_irqsave(&ha->hardware_lock, flags);
1568 qla4xxx_free_ddb(ha, ddb_entry); 1622 qla4xxx_free_ddb(ha, ddb_entry);
1569 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1623 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1624
1570 iscsi_session_teardown(cls_sess); 1625 iscsi_session_teardown(cls_sess);
1626
1627 if (fw_ddb_entry)
1628 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1629 fw_ddb_entry, fw_ddb_entry_dma);
1571} 1630}
1572 1631
1573static struct iscsi_cls_conn * 1632static struct iscsi_cls_conn *
@@ -2220,6 +2279,9 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
2220 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues, 2279 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
2221 ha->queues_dma); 2280 ha->queues_dma);
2222 2281
2282 if (ha->fw_dump)
2283 vfree(ha->fw_dump);
2284
2223 ha->queues_len = 0; 2285 ha->queues_len = 0;
2224 ha->queues = NULL; 2286 ha->queues = NULL;
2225 ha->queues_dma = 0; 2287 ha->queues_dma = 0;
@@ -2229,6 +2291,8 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
2229 ha->response_dma = 0; 2291 ha->response_dma = 0;
2230 ha->shadow_regs = NULL; 2292 ha->shadow_regs = NULL;
2231 ha->shadow_regs_dma = 0; 2293 ha->shadow_regs_dma = 0;
2294 ha->fw_dump = NULL;
2295 ha->fw_dump_size = 0;
2232 2296
2233 /* Free srb pool. */ 2297 /* Free srb pool. */
2234 if (ha->srb_mempool) 2298 if (ha->srb_mempool)
@@ -5023,6 +5087,8 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
5023 5087
5024 set_bit(AF_INIT_DONE, &ha->flags); 5088 set_bit(AF_INIT_DONE, &ha->flags);
5025 5089
5090 qla4_8xxx_alloc_sysfs_attr(ha);
5091
5026 printk(KERN_INFO 5092 printk(KERN_INFO
5027 " QLogic iSCSI HBA Driver version: %s\n" 5093 " QLogic iSCSI HBA Driver version: %s\n"
5028 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n", 5094 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
@@ -5149,6 +5215,7 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
5149 iscsi_boot_destroy_kset(ha->boot_kset); 5215 iscsi_boot_destroy_kset(ha->boot_kset);
5150 5216
5151 qla4xxx_destroy_fw_ddb_session(ha); 5217 qla4xxx_destroy_fw_ddb_session(ha);
5218 qla4_8xxx_free_sysfs_attr(ha);
5152 5219
5153 scsi_remove_host(ha->host); 5220 scsi_remove_host(ha->host);
5154 5221
@@ -5217,6 +5284,15 @@ static void qla4xxx_slave_destroy(struct scsi_device *sdev)
5217 scsi_deactivate_tcq(sdev, 1); 5284 scsi_deactivate_tcq(sdev, 1);
5218} 5285}
5219 5286
5287static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
5288 int reason)
5289{
5290 if (!ql4xqfulltracking)
5291 return -EOPNOTSUPP;
5292
5293 return iscsi_change_queue_depth(sdev, qdepth, reason);
5294}
5295
5220/** 5296/**
5221 * qla4xxx_del_from_active_array - returns an active srb 5297 * qla4xxx_del_from_active_array - returns an active srb
5222 * @ha: Pointer to host adapter structure. 5298 * @ha: Pointer to host adapter structure.
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 97b30c108e36..cc1cc3518b87 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.02.00-k16" 8#define QLA4XXX_DRIVER_VERSION "5.02.00-k17"
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 61c82a345f82..bbbc9c918d4c 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -90,11 +90,9 @@ unsigned int scsi_logging_level;
90EXPORT_SYMBOL(scsi_logging_level); 90EXPORT_SYMBOL(scsi_logging_level);
91#endif 91#endif
92 92
93#if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_BLK_DEV_SD) 93/* sd, scsi core and power management need to coordinate flushing async actions */
94/* sd and scsi_pm need to coordinate flushing async actions */
95LIST_HEAD(scsi_sd_probe_domain); 94LIST_HEAD(scsi_sd_probe_domain);
96EXPORT_SYMBOL(scsi_sd_probe_domain); 95EXPORT_SYMBOL(scsi_sd_probe_domain);
97#endif
98 96
99/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI. 97/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
100 * You may not alter any existing entry (although adding new ones is 98 * You may not alter any existing entry (although adding new ones is
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 62ddfd31d4ce..6dfb9785d345 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1378,16 +1378,19 @@ static int scsi_lld_busy(struct request_queue *q)
1378{ 1378{
1379 struct scsi_device *sdev = q->queuedata; 1379 struct scsi_device *sdev = q->queuedata;
1380 struct Scsi_Host *shost; 1380 struct Scsi_Host *shost;
1381 struct scsi_target *starget;
1382 1381
1383 if (!sdev) 1382 if (!sdev)
1384 return 0; 1383 return 0;
1385 1384
1386 shost = sdev->host; 1385 shost = sdev->host;
1387 starget = scsi_target(sdev);
1388 1386
1389 if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) || 1387 /*
1390 scsi_target_is_busy(starget) || scsi_device_is_busy(sdev)) 1388 * Ignore host/starget busy state.
1389 * Since block layer does not have a concept of fairness across
1390 * multiple queues, congestion of host/starget needs to be handled
1391 * in SCSI layer.
1392 */
1393 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
1391 return 1; 1394 return 1;
1392 1395
1393 return 0; 1396 return 0;
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index c77628afbf9f..8818dd681c19 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -486,6 +486,10 @@ void
486scsi_netlink_init(void) 486scsi_netlink_init(void)
487{ 487{
488 int error; 488 int error;
489 struct netlink_kernel_cfg cfg = {
490 .input = scsi_nl_rcv_msg,
491 .groups = SCSI_NL_GRP_CNT,
492 };
489 493
490 INIT_LIST_HEAD(&scsi_nl_drivers); 494 INIT_LIST_HEAD(&scsi_nl_drivers);
491 495
@@ -497,8 +501,7 @@ scsi_netlink_init(void)
497 } 501 }
498 502
499 scsi_nl_sock = netlink_kernel_create(&init_net, NETLINK_SCSITRANSPORT, 503 scsi_nl_sock = netlink_kernel_create(&init_net, NETLINK_SCSITRANSPORT,
500 SCSI_NL_GRP_CNT, scsi_nl_rcv_msg, NULL, 504 THIS_MODULE, &cfg);
501 THIS_MODULE);
502 if (!scsi_nl_sock) { 505 if (!scsi_nl_sock) {
503 printk(KERN_ERR "%s: register of receive handler failed\n", 506 printk(KERN_ERR "%s: register of receive handler failed\n",
504 __func__); 507 __func__);
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index f661a41fa4c6..d4201ded3b22 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -24,8 +24,11 @@ static int scsi_dev_type_suspend(struct device *dev, pm_message_t msg)
24 err = scsi_device_quiesce(to_scsi_device(dev)); 24 err = scsi_device_quiesce(to_scsi_device(dev));
25 if (err == 0) { 25 if (err == 0) {
26 drv = dev->driver; 26 drv = dev->driver;
27 if (drv && drv->suspend) 27 if (drv && drv->suspend) {
28 err = drv->suspend(dev, msg); 28 err = drv->suspend(dev, msg);
29 if (err)
30 scsi_device_resume(to_scsi_device(dev));
31 }
29 } 32 }
30 dev_dbg(dev, "scsi suspend: %d\n", err); 33 dev_dbg(dev, "scsi suspend: %d\n", err);
31 return err; 34 return err;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 01b03744f1f9..2e5fe584aad3 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -147,7 +147,7 @@ int scsi_complete_async_scans(void)
147 147
148 do { 148 do {
149 if (list_empty(&scanning_hosts)) 149 if (list_empty(&scanning_hosts))
150 return 0; 150 goto out;
151 /* If we can't get memory immediately, that's OK. Just 151 /* If we can't get memory immediately, that's OK. Just
152 * sleep a little. Even if we never get memory, the async 152 * sleep a little. Even if we never get memory, the async
153 * scans will finish eventually. 153 * scans will finish eventually.
@@ -179,8 +179,11 @@ int scsi_complete_async_scans(void)
179 } 179 }
180 done: 180 done:
181 spin_unlock(&async_scan_lock); 181 spin_unlock(&async_scan_lock);
182
183 kfree(data); 182 kfree(data);
183
184 out:
185 async_synchronize_full_domain(&scsi_sd_probe_domain);
186
184 return 0; 187 return 0;
185} 188}
186 189
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 1cf640e575da..6042954d8f3b 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2936,7 +2936,10 @@ EXPORT_SYMBOL_GPL(iscsi_unregister_transport);
2936static __init int iscsi_transport_init(void) 2936static __init int iscsi_transport_init(void)
2937{ 2937{
2938 int err; 2938 int err;
2939 2939 struct netlink_kernel_cfg cfg = {
2940 .groups = 1,
2941 .input = iscsi_if_rx,
2942 };
2940 printk(KERN_INFO "Loading iSCSI transport class v%s.\n", 2943 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
2941 ISCSI_TRANSPORT_VERSION); 2944 ISCSI_TRANSPORT_VERSION);
2942 2945
@@ -2966,8 +2969,8 @@ static __init int iscsi_transport_init(void)
2966 if (err) 2969 if (err)
2967 goto unregister_conn_class; 2970 goto unregister_conn_class;
2968 2971
2969 nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx, 2972 nls = netlink_kernel_create(&init_net, NETLINK_ISCSI,
2970 NULL, THIS_MODULE); 2973 THIS_MODULE, &cfg);
2971 if (!nls) { 2974 if (!nls) {
2972 err = -ENOBUFS; 2975 err = -ENOBUFS;
2973 goto unregister_session_class; 2976 goto unregister_session_class;
diff --git a/drivers/scsi/scsi_wait_scan.c b/drivers/scsi/scsi_wait_scan.c
index 74708fcaf82f..072734538876 100644
--- a/drivers/scsi/scsi_wait_scan.c
+++ b/drivers/scsi/scsi_wait_scan.c
@@ -12,7 +12,7 @@
12 12
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/device.h> 14#include <linux/device.h>
15#include <scsi/scsi_scan.h> 15#include "scsi_priv.h"
16 16
17static int __init wait_scan_init(void) 17static int __init wait_scan_init(void)
18{ 18{
@@ -22,11 +22,6 @@ static int __init wait_scan_init(void)
22 * and might not yet have reached the scsi async scanning 22 * and might not yet have reached the scsi async scanning
23 */ 23 */
24 wait_for_device_probe(); 24 wait_for_device_probe();
25 /*
26 * and then we wait for the actual asynchronous scsi scan
27 * to finish.
28 */
29 scsi_complete_async_scans();
30 return 0; 25 return 0;
31} 26}
32 27
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 6f0a4c612b3b..6f72b80121a0 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1899,6 +1899,8 @@ static int sd_try_rc16_first(struct scsi_device *sdp)
1899{ 1899{
1900 if (sdp->host->max_cmd_len < 16) 1900 if (sdp->host->max_cmd_len < 16)
1901 return 0; 1901 return 0;
1902 if (sdp->try_rc_10_first)
1903 return 0;
1902 if (sdp->scsi_level > SCSI_SPC_2) 1904 if (sdp->scsi_level > SCSI_SPC_2)
1903 return 1; 1905 return 1;
1904 if (scsi_device_protection(sdp)) 1906 if (scsi_device_protection(sdp))
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 4e010b727818..6a4fd00117ca 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1836,7 +1836,7 @@ ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1836 err = pci_request_regions(pdev, UFSHCD); 1836 err = pci_request_regions(pdev, UFSHCD);
1837 if (err < 0) { 1837 if (err < 0) {
1838 dev_err(&pdev->dev, "request regions failed\n"); 1838 dev_err(&pdev->dev, "request regions failed\n");
1839 goto out_disable; 1839 goto out_host_put;
1840 } 1840 }
1841 1841
1842 hba->mmio_base = pci_ioremap_bar(pdev, 0); 1842 hba->mmio_base = pci_ioremap_bar(pdev, 0);
@@ -1925,8 +1925,9 @@ out_iounmap:
1925 iounmap(hba->mmio_base); 1925 iounmap(hba->mmio_base);
1926out_release_regions: 1926out_release_regions:
1927 pci_release_regions(pdev); 1927 pci_release_regions(pdev);
1928out_disable: 1928out_host_put:
1929 scsi_host_put(host); 1929 scsi_host_put(host);
1930out_disable:
1930 pci_clear_master(pdev); 1931 pci_clear_master(pdev);
1931 pci_disable_device(pdev); 1932 pci_disable_device(pdev);
1932out_error: 1933out_error: