aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-11 18:24:22 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-11 18:24:22 -0400
commit4c4445013f792f82855079ac377bf5d75af4581c (patch)
treefbdc80d7bd7cf3c40f85048b6f5e9c1357ca50d6 /drivers/scsi
parentac4e01093f6d7b051c5d6a3e61ea5337774ac36a (diff)
parent297b8a07347555f0d2fafa4a1ddfc332d2d4afa9 (diff)
Merge tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull second SCSI update from James "Jaj B" Bottomley: "This is the final round of SCSI patches for the merge window. It consists mostly of driver updates (bnx2fc, ibmfc, fnic, lpfc, be2iscsi, pm80xx, qla4x and ipr). There's also the power management updates that complete the patches in Jens' tree, an iscsi refcounting problem fix from the last pull, some dif handling in scsi_debug fixes, a few nice code cleanups and an error handling busy bug fix." * tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (92 commits) [SCSI] qla2xxx: Update firmware link in Kconfig file. [SCSI] iscsi class, qla4xxx: fix sess/conn refcounting when find fns are used [SCSI] sas: unify the pointlessly separated enums sas_dev_type and sas_device_type [SCSI] pm80xx: thermal, sas controller config and error handling update [SCSI] pm80xx: NCQ error handling changes [SCSI] pm80xx: WWN Modification for PM8081/88/89 controllers [SCSI] pm80xx: Changed module name and debug messages update [SCSI] pm80xx: Firmware flash memory free fix, with addition of new memory region for it [SCSI] pm80xx: SPC new firmware changes for device id 0x8081 alone [SCSI] pm80xx: Added SPCv/ve specific hardware functionalities and relevant changes in common files [SCSI] pm80xx: MSI-X implementation for using 64 interrupts [SCSI] pm80xx: Updated common functions common for SPC and SPCv/ve [SCSI] pm80xx: Multiple inbound/outbound queue configuration [SCSI] pm80xx: Added SPCv/ve specific ids, variables and modify for SPC [SCSI] lpfc: fix up Kconfig dependencies [SCSI] Handle MLQUEUE busy response in scsi_send_eh_cmnd [SCSI] sd: change to auto suspend mode [SCSI] sd: use REQ_PM in sd's runtime suspend operation [SCSI] qla4xxx: Fix iocb_cnt calculation in qla4xxx_send_mbox_iocb() [SCSI] ufs: Correct the expected data transfersize ...
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/Kconfig2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dev.c24
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c2
-rw-r--r--drivers/scsi/be2iscsi/be.h2
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c172
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h27
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c70
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c375
-rw-r--r--drivers/scsi/be2iscsi/be_main.h29
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c43
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h35
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h8
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_els.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c55
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c8
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c9
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c2
-rw-r--r--drivers/scsi/csiostor/csio_lnode.h2
-rw-r--r--drivers/scsi/csiostor/csio_rnode.h2
-rw-r--r--drivers/scsi/fnic/fnic.h34
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c564
-rw-r--r--drivers/scsi/fnic/fnic_fip.h68
-rw-r--r--drivers/scsi/fnic/fnic_main.c51
-rw-r--r--drivers/scsi/fnic/vnic_dev.c10
-rw-r--r--drivers/scsi/fnic/vnic_dev.h2
-rw-r--r--drivers/scsi/fnic/vnic_devcmd.h67
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c85
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h7
-rw-r--r--drivers/scsi/ipr.c16
-rw-r--r--drivers/scsi/ipr.h2
-rw-r--r--drivers/scsi/isci/remote_device.c4
-rw-r--r--drivers/scsi/isci/remote_device.h2
-rw-r--r--drivers/scsi/isci/request.c6
-rw-r--r--drivers/scsi/isci/task.c2
-rw-r--r--drivers/scsi/libsas/sas_ata.c18
-rw-r--r--drivers/scsi/libsas/sas_discover.c34
-rw-r--r--drivers/scsi/libsas/sas_expander.c110
-rw-r--r--drivers/scsi/libsas/sas_internal.h10
-rw-r--r--drivers/scsi/libsas/sas_port.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h24
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c166
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c39
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c113
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c91
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c607
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c25
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c798
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c106
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h21
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c25
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.h1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c3
-rw-r--r--drivers/scsi/mvsas/mv_init.c2
-rw-r--r--drivers/scsi/mvsas/mv_sas.c16
-rw-r--r--drivers/scsi/mvsas/mv_sas.h4
-rw-r--r--drivers/scsi/pm8001/Makefile7
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c74
-rw-r--r--drivers/scsi/pm8001/pm8001_defs.h34
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c817
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h4
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c383
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c119
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h181
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c4130
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h1523
-rw-r--r--drivers/scsi/qla2xxx/Kconfig4
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c1
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c45
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_debug.c97
-rw-r--r--drivers/scsi/scsi_error.c37
-rw-r--r--drivers/scsi/scsi_lib.c9
-rw-r--r--drivers/scsi/scsi_pm.c84
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c94
-rw-r--r--drivers/scsi/sd.c42
-rw-r--r--drivers/scsi/sd.h1
-rw-r--r--drivers/scsi/sd_dif.c8
-rw-r--r--drivers/scsi/ufs/Kconfig11
-rw-r--r--drivers/scsi/ufs/Makefile1
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c217
-rw-r--r--drivers/scsi/ufs/ufshcd.c2
93 files changed, 10533 insertions, 1448 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index db95c547c09d..86af29f53bbe 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1353,6 +1353,8 @@ config SCSI_LPFC
1353 tristate "Emulex LightPulse Fibre Channel Support" 1353 tristate "Emulex LightPulse Fibre Channel Support"
1354 depends on PCI && SCSI 1354 depends on PCI && SCSI
1355 select SCSI_FC_ATTRS 1355 select SCSI_FC_ATTRS
1356 select GENERIC_CSUM
1357 select CRC_T10DIF
1356 help 1358 help
1357 This lpfc driver supports the Emulex LightPulse 1359 This lpfc driver supports the Emulex LightPulse
1358 Family of Fibre Channel PCI host adapters. 1360 Family of Fibre Channel PCI host adapters.
diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c
index 64136c56e706..33072388ea16 100644
--- a/drivers/scsi/aic94xx/aic94xx_dev.c
+++ b/drivers/scsi/aic94xx/aic94xx_dev.c
@@ -84,7 +84,7 @@ static void asd_set_ddb_type(struct domain_device *dev)
84 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; 84 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
85 int ddb = (int) (unsigned long) dev->lldd_dev; 85 int ddb = (int) (unsigned long) dev->lldd_dev;
86 86
87 if (dev->dev_type == SATA_PM_PORT) 87 if (dev->dev_type == SAS_SATA_PM_PORT)
88 asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_PM_PORT); 88 asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_PM_PORT);
89 else if (dev->tproto) 89 else if (dev->tproto)
90 asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_TARGET); 90 asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_TARGET);
@@ -116,7 +116,7 @@ void asd_set_dmamode(struct domain_device *dev)
116 int ddb = (int) (unsigned long) dev->lldd_dev; 116 int ddb = (int) (unsigned long) dev->lldd_dev;
117 u32 qdepth = 0; 117 u32 qdepth = 0;
118 118
119 if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM_PORT) { 119 if (dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM_PORT) {
120 if (ata_id_has_ncq(ata_dev->id)) 120 if (ata_id_has_ncq(ata_dev->id))
121 qdepth = ata_id_queue_depth(ata_dev->id); 121 qdepth = ata_id_queue_depth(ata_dev->id);
122 asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK, 122 asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK,
@@ -140,8 +140,8 @@ static int asd_init_sata(struct domain_device *dev)
140 int ddb = (int) (unsigned long) dev->lldd_dev; 140 int ddb = (int) (unsigned long) dev->lldd_dev;
141 141
142 asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF); 142 asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF);
143 if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM || 143 if (dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM ||
144 dev->dev_type == SATA_PM_PORT) { 144 dev->dev_type == SAS_SATA_PM_PORT) {
145 struct dev_to_host_fis *fis = (struct dev_to_host_fis *) 145 struct dev_to_host_fis *fis = (struct dev_to_host_fis *)
146 dev->frame_rcvd; 146 dev->frame_rcvd;
147 asd_ddbsite_write_byte(asd_ha, ddb, SATA_STATUS, fis->status); 147 asd_ddbsite_write_byte(asd_ha, ddb, SATA_STATUS, fis->status);
@@ -174,7 +174,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
174 asd_ddbsite_write_byte(asd_ha, ddb, CONN_MASK, dev->port->phy_mask); 174 asd_ddbsite_write_byte(asd_ha, ddb, CONN_MASK, dev->port->phy_mask);
175 if (dev->port->oob_mode != SATA_OOB_MODE) { 175 if (dev->port->oob_mode != SATA_OOB_MODE) {
176 flags |= OPEN_REQUIRED; 176 flags |= OPEN_REQUIRED;
177 if ((dev->dev_type == SATA_DEV) || 177 if ((dev->dev_type == SAS_SATA_DEV) ||
178 (dev->tproto & SAS_PROTOCOL_STP)) { 178 (dev->tproto & SAS_PROTOCOL_STP)) {
179 struct smp_resp *rps_resp = &dev->sata_dev.rps_resp; 179 struct smp_resp *rps_resp = &dev->sata_dev.rps_resp;
180 if (rps_resp->frame_type == SMP_RESPONSE && 180 if (rps_resp->frame_type == SMP_RESPONSE &&
@@ -188,8 +188,8 @@ static int asd_init_target_ddb(struct domain_device *dev)
188 } else { 188 } else {
189 flags |= CONCURRENT_CONN_SUPP; 189 flags |= CONCURRENT_CONN_SUPP;
190 if (!dev->parent && 190 if (!dev->parent &&
191 (dev->dev_type == EDGE_DEV || 191 (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
192 dev->dev_type == FANOUT_DEV)) 192 dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE))
193 asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN, 193 asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN,
194 4); 194 4);
195 else 195 else
@@ -198,7 +198,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
198 asd_ddbsite_write_byte(asd_ha, ddb, NUM_CTX, 1); 198 asd_ddbsite_write_byte(asd_ha, ddb, NUM_CTX, 1);
199 } 199 }
200 } 200 }
201 if (dev->dev_type == SATA_PM) 201 if (dev->dev_type == SAS_SATA_PM)
202 flags |= SATA_MULTIPORT; 202 flags |= SATA_MULTIPORT;
203 asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS, flags); 203 asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS, flags);
204 204
@@ -211,7 +211,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
211 asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_TAIL, 0xFFFF); 211 asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_TAIL, 0xFFFF);
212 asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF); 212 asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF);
213 213
214 if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { 214 if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
215 i = asd_init_sata(dev); 215 i = asd_init_sata(dev);
216 if (i < 0) { 216 if (i < 0) {
217 asd_free_ddb(asd_ha, ddb); 217 asd_free_ddb(asd_ha, ddb);
@@ -219,7 +219,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
219 } 219 }
220 } 220 }
221 221
222 if (dev->dev_type == SAS_END_DEV) { 222 if (dev->dev_type == SAS_END_DEVICE) {
223 struct sas_end_device *rdev = rphy_to_end_device(dev->rphy); 223 struct sas_end_device *rdev = rphy_to_end_device(dev->rphy);
224 if (rdev->I_T_nexus_loss_timeout > 0) 224 if (rdev->I_T_nexus_loss_timeout > 0)
225 asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT, 225 asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT,
@@ -328,10 +328,10 @@ int asd_dev_found(struct domain_device *dev)
328 328
329 spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags); 329 spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags);
330 switch (dev->dev_type) { 330 switch (dev->dev_type) {
331 case SATA_PM: 331 case SAS_SATA_PM:
332 res = asd_init_sata_pm_ddb(dev); 332 res = asd_init_sata_pm_ddb(dev);
333 break; 333 break;
334 case SATA_PM_PORT: 334 case SAS_SATA_PM_PORT:
335 res = asd_init_sata_pm_port_ddb(dev); 335 res = asd_init_sata_pm_port_ddb(dev);
336 break; 336 break;
337 default: 337 default:
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 81b736c76fff..4df867e07b20 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -74,7 +74,7 @@ static void asd_init_phy_identify(struct asd_phy *phy)
74 74
75 memset(phy->identify_frame, 0, sizeof(*phy->identify_frame)); 75 memset(phy->identify_frame, 0, sizeof(*phy->identify_frame));
76 76
77 phy->identify_frame->dev_type = SAS_END_DEV; 77 phy->identify_frame->dev_type = SAS_END_DEVICE;
78 if (phy->sas_phy.role & PHY_ROLE_INITIATOR) 78 if (phy->sas_phy.role & PHY_ROLE_INITIATOR)
79 phy->identify_frame->initiator_bits = phy->sas_phy.iproto; 79 phy->identify_frame->initiator_bits = phy->sas_phy.iproto;
80 if (phy->sas_phy.role & PHY_ROLE_TARGET) 80 if (phy->sas_phy.role & PHY_ROLE_TARGET)
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
index cf9040933da6..d4c35df3d4ae 100644
--- a/drivers/scsi/aic94xx/aic94xx_tmf.c
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -184,7 +184,7 @@ int asd_I_T_nexus_reset(struct domain_device *dev)
184 struct sas_phy *phy = sas_get_local_phy(dev); 184 struct sas_phy *phy = sas_get_local_phy(dev);
185 /* Standard mandates link reset for ATA (type 0) and 185 /* Standard mandates link reset for ATA (type 0) and
186 * hard reset for SSP (type 1) */ 186 * hard reset for SSP (type 1) */
187 int reset_type = (dev->dev_type == SATA_DEV || 187 int reset_type = (dev->dev_type == SAS_SATA_DEV ||
188 (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; 188 (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
189 189
190 asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE); 190 asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE);
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index f1733dfa3ae2..777e7c0bbb4b 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2012 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 5c87768c109c..e66aa7c11a8a 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2012 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -155,6 +155,7 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
155 uint16_t status = 0, addl_status = 0, wrb_num = 0; 155 uint16_t status = 0, addl_status = 0, wrb_num = 0;
156 struct be_mcc_wrb *temp_wrb; 156 struct be_mcc_wrb *temp_wrb;
157 struct be_cmd_req_hdr *ioctl_hdr; 157 struct be_cmd_req_hdr *ioctl_hdr;
158 struct be_cmd_resp_hdr *ioctl_resp_hdr;
158 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; 159 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
159 160
160 if (beiscsi_error(phba)) 161 if (beiscsi_error(phba))
@@ -204,6 +205,12 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
204 ioctl_hdr->subsystem, 205 ioctl_hdr->subsystem,
205 ioctl_hdr->opcode, 206 ioctl_hdr->opcode,
206 status, addl_status); 207 status, addl_status);
208
209 if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
210 ioctl_resp_hdr = (struct be_cmd_resp_hdr *) ioctl_hdr;
211 if (ioctl_resp_hdr->response_length)
212 goto release_mcc_tag;
213 }
207 rc = -EAGAIN; 214 rc = -EAGAIN;
208 } 215 }
209 216
@@ -267,6 +274,7 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
267 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); 274 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
268 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); 275 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
269 struct be_cmd_req_hdr *hdr = embedded_payload(wrb); 276 struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
277 struct be_cmd_resp_hdr *resp_hdr;
270 278
271 be_dws_le_to_cpu(compl, 4); 279 be_dws_le_to_cpu(compl, 4);
272 280
@@ -284,6 +292,11 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
284 hdr->subsystem, hdr->opcode, 292 hdr->subsystem, hdr->opcode,
285 compl_status, extd_status); 293 compl_status, extd_status);
286 294
295 if (compl_status == MCC_STATUS_INSUFFICIENT_BUFFER) {
296 resp_hdr = (struct be_cmd_resp_hdr *) hdr;
297 if (resp_hdr->response_length)
298 return 0;
299 }
287 return -EBUSY; 300 return -EBUSY;
288 } 301 }
289 return 0; 302 return 0;
@@ -335,30 +348,26 @@ static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
335void beiscsi_async_link_state_process(struct beiscsi_hba *phba, 348void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
336 struct be_async_event_link_state *evt) 349 struct be_async_event_link_state *evt)
337{ 350{
338 switch (evt->port_link_status) { 351 if ((evt->port_link_status == ASYNC_EVENT_LINK_DOWN) ||
339 case ASYNC_EVENT_LINK_DOWN: 352 ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
353 (evt->port_fault != BEISCSI_PHY_LINK_FAULT_NONE))) {
354 phba->state = BE_ADAPTER_LINK_DOWN;
355
340 beiscsi_log(phba, KERN_ERR, 356 beiscsi_log(phba, KERN_ERR,
341 BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, 357 BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
342 "BC_%d : Link Down on Physical Port %d\n", 358 "BC_%d : Link Down on Port %d\n",
343 evt->physical_port); 359 evt->physical_port);
344 360
345 phba->state |= BE_ADAPTER_LINK_DOWN;
346 iscsi_host_for_each_session(phba->shost, 361 iscsi_host_for_each_session(phba->shost,
347 be2iscsi_fail_session); 362 be2iscsi_fail_session);
348 break; 363 } else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) ||
349 case ASYNC_EVENT_LINK_UP: 364 ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
365 (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) {
350 phba->state = BE_ADAPTER_UP; 366 phba->state = BE_ADAPTER_UP;
367
351 beiscsi_log(phba, KERN_ERR, 368 beiscsi_log(phba, KERN_ERR,
352 BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, 369 BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
353 "BC_%d : Link UP on Physical Port %d\n", 370 "BC_%d : Link UP on Port %d\n",
354 evt->physical_port);
355 break;
356 default:
357 beiscsi_log(phba, KERN_ERR,
358 BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
359 "BC_%d : Unexpected Async Notification %d on"
360 "Physical Port %d\n",
361 evt->port_link_status,
362 evt->physical_port); 371 evt->physical_port);
363 } 372 }
364} 373}
@@ -479,7 +488,7 @@ static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
479{ 488{
480 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET; 489 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
481 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); 490 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
482 int wait = 0; 491 uint32_t wait = 0;
483 u32 ready; 492 u32 ready;
484 493
485 do { 494 do {
@@ -527,6 +536,10 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
527 struct be_mcc_compl *compl = &mbox->compl; 536 struct be_mcc_compl *compl = &mbox->compl;
528 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); 537 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
529 538
539 status = be_mbox_db_ready_wait(ctrl);
540 if (status)
541 return status;
542
530 val &= ~MPU_MAILBOX_DB_RDY_MASK; 543 val &= ~MPU_MAILBOX_DB_RDY_MASK;
531 val |= MPU_MAILBOX_DB_HI_MASK; 544 val |= MPU_MAILBOX_DB_HI_MASK;
532 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; 545 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
@@ -580,6 +593,10 @@ static int be_mbox_notify_wait(struct beiscsi_hba *phba)
580 struct be_mcc_compl *compl = &mbox->compl; 593 struct be_mcc_compl *compl = &mbox->compl;
581 struct be_ctrl_info *ctrl = &phba->ctrl; 594 struct be_ctrl_info *ctrl = &phba->ctrl;
582 595
596 status = be_mbox_db_ready_wait(ctrl);
597 if (status)
598 return status;
599
583 val |= MPU_MAILBOX_DB_HI_MASK; 600 val |= MPU_MAILBOX_DB_HI_MASK;
584 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */ 601 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
585 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; 602 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
@@ -732,6 +749,16 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
732 return status; 749 return status;
733} 750}
734 751
752/**
753 * be_cmd_fw_initialize()- Initialize FW
754 * @ctrl: Pointer to function control structure
755 *
756 * Send FW initialize pattern for the function.
757 *
758 * return
759 * Success: 0
760 * Failure: Non-Zero value
761 **/
735int be_cmd_fw_initialize(struct be_ctrl_info *ctrl) 762int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
736{ 763{
737 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); 764 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
@@ -762,6 +789,47 @@ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
762 return status; 789 return status;
763} 790}
764 791
792/**
793 * be_cmd_fw_uninit()- Uinitialize FW
794 * @ctrl: Pointer to function control structure
795 *
796 * Send FW uninitialize pattern for the function
797 *
798 * return
799 * Success: 0
800 * Failure: Non-Zero value
801 **/
802int be_cmd_fw_uninit(struct be_ctrl_info *ctrl)
803{
804 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
805 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
806 int status;
807 u8 *endian_check;
808
809 spin_lock(&ctrl->mbox_lock);
810 memset(wrb, 0, sizeof(*wrb));
811
812 endian_check = (u8 *) wrb;
813 *endian_check++ = 0xFF;
814 *endian_check++ = 0xAA;
815 *endian_check++ = 0xBB;
816 *endian_check++ = 0xFF;
817 *endian_check++ = 0xFF;
818 *endian_check++ = 0xCC;
819 *endian_check++ = 0xDD;
820 *endian_check = 0xFF;
821
822 be_dws_cpu_to_le(wrb, sizeof(*wrb));
823
824 status = be_mbox_notify(ctrl);
825 if (status)
826 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
827 "BC_%d : be_cmd_fw_uninit Failed\n");
828
829 spin_unlock(&ctrl->mbox_lock);
830 return status;
831}
832
765int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl, 833int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
766 struct be_queue_info *cq, struct be_queue_info *eq, 834 struct be_queue_info *cq, struct be_queue_info *eq,
767 bool sol_evts, bool no_delay, int coalesce_wm) 835 bool sol_evts, bool no_delay, int coalesce_wm)
@@ -783,20 +851,7 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
783 OPCODE_COMMON_CQ_CREATE, sizeof(*req)); 851 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
784 852
785 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 853 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
786 if (chip_skh_r(ctrl->pdev)) { 854 if (is_chip_be2_be3r(phba)) {
787 req->hdr.version = MBX_CMD_VER2;
788 req->page_size = 1;
789 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
790 ctxt, coalesce_wm);
791 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
792 ctxt, no_delay);
793 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
794 __ilog2_u32(cq->len / 256));
795 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
796 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
797 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
798 AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
799 } else {
800 AMAP_SET_BITS(struct amap_cq_context, coalescwm, 855 AMAP_SET_BITS(struct amap_cq_context, coalescwm,
801 ctxt, coalesce_wm); 856 ctxt, coalesce_wm);
802 AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay); 857 AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
@@ -809,6 +864,19 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
809 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1); 864 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
810 AMAP_SET_BITS(struct amap_cq_context, func, ctxt, 865 AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
811 PCI_FUNC(ctrl->pdev->devfn)); 866 PCI_FUNC(ctrl->pdev->devfn));
867 } else {
868 req->hdr.version = MBX_CMD_VER2;
869 req->page_size = 1;
870 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
871 ctxt, coalesce_wm);
872 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
873 ctxt, no_delay);
874 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
875 __ilog2_u32(cq->len / 256));
876 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
877 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
878 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
879 AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
812 } 880 }
813 881
814 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 882 be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -949,6 +1017,7 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
949 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); 1017 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
950 struct be_defq_create_req *req = embedded_payload(wrb); 1018 struct be_defq_create_req *req = embedded_payload(wrb);
951 struct be_dma_mem *q_mem = &dq->dma_mem; 1019 struct be_dma_mem *q_mem = &dq->dma_mem;
1020 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
952 void *ctxt = &req->context; 1021 void *ctxt = &req->context;
953 int status; 1022 int status;
954 1023
@@ -961,17 +1030,36 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
961 OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req)); 1030 OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
962 1031
963 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); 1032 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
964 AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid, ctxt, 0); 1033
965 AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid_valid, ctxt, 1034 if (is_chip_be2_be3r(phba)) {
966 1); 1035 AMAP_SET_BITS(struct amap_be_default_pdu_context,
967 AMAP_SET_BITS(struct amap_be_default_pdu_context, pci_func_id, ctxt, 1036 rx_pdid, ctxt, 0);
968 PCI_FUNC(ctrl->pdev->devfn)); 1037 AMAP_SET_BITS(struct amap_be_default_pdu_context,
969 AMAP_SET_BITS(struct amap_be_default_pdu_context, ring_size, ctxt, 1038 rx_pdid_valid, ctxt, 1);
970 be_encoded_q_len(length / sizeof(struct phys_addr))); 1039 AMAP_SET_BITS(struct amap_be_default_pdu_context,
971 AMAP_SET_BITS(struct amap_be_default_pdu_context, default_buffer_size, 1040 pci_func_id, ctxt, PCI_FUNC(ctrl->pdev->devfn));
972 ctxt, entry_size); 1041 AMAP_SET_BITS(struct amap_be_default_pdu_context,
973 AMAP_SET_BITS(struct amap_be_default_pdu_context, cq_id_recv, ctxt, 1042 ring_size, ctxt,
974 cq->id); 1043 be_encoded_q_len(length /
1044 sizeof(struct phys_addr)));
1045 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1046 default_buffer_size, ctxt, entry_size);
1047 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1048 cq_id_recv, ctxt, cq->id);
1049 } else {
1050 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1051 rx_pdid, ctxt, 0);
1052 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1053 rx_pdid_valid, ctxt, 1);
1054 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1055 ring_size, ctxt,
1056 be_encoded_q_len(length /
1057 sizeof(struct phys_addr)));
1058 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1059 default_buffer_size, ctxt, entry_size);
1060 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1061 cq_id_recv, ctxt, cq->id);
1062 }
975 1063
976 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 1064 be_dws_cpu_to_le(ctxt, sizeof(req->context));
977 1065
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 23397d51ac54..99073086dfe0 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2012 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -52,6 +52,10 @@ struct be_mcc_wrb {
52 52
53/* Completion Status */ 53/* Completion Status */
54#define MCC_STATUS_SUCCESS 0x0 54#define MCC_STATUS_SUCCESS 0x0
55#define MCC_STATUS_FAILED 0x1
56#define MCC_STATUS_ILLEGAL_REQUEST 0x2
57#define MCC_STATUS_ILLEGAL_FIELD 0x3
58#define MCC_STATUS_INSUFFICIENT_BUFFER 0x4
55 59
56#define CQE_STATUS_COMPL_MASK 0xFFFF 60#define CQE_STATUS_COMPL_MASK 0xFFFF
57#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */ 61#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
@@ -118,7 +122,8 @@ struct be_async_event_trailer {
118 122
119enum { 123enum {
120 ASYNC_EVENT_LINK_DOWN = 0x0, 124 ASYNC_EVENT_LINK_DOWN = 0x0,
121 ASYNC_EVENT_LINK_UP = 0x1 125 ASYNC_EVENT_LINK_UP = 0x1,
126 ASYNC_EVENT_LOGICAL = 0x2
122}; 127};
123 128
124/** 129/**
@@ -130,6 +135,9 @@ struct be_async_event_link_state {
130 u8 port_link_status; 135 u8 port_link_status;
131 u8 port_duplex; 136 u8 port_duplex;
132 u8 port_speed; 137 u8 port_speed;
138#define BEISCSI_PHY_LINK_FAULT_NONE 0x00
139#define BEISCSI_PHY_LINK_FAULT_LOCAL 0x01
140#define BEISCSI_PHY_LINK_FAULT_REMOTE 0x02
133 u8 port_fault; 141 u8 port_fault;
134 u8 rsvd0[7]; 142 u8 rsvd0[7];
135 struct be_async_event_trailer trailer; 143 struct be_async_event_trailer trailer;
@@ -697,6 +705,7 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
697 uint32_t tag, struct be_mcc_wrb **wrb, void *cmd_va); 705 uint32_t tag, struct be_mcc_wrb **wrb, void *cmd_va);
698/*ISCSI Functuions */ 706/*ISCSI Functuions */
699int be_cmd_fw_initialize(struct be_ctrl_info *ctrl); 707int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
708int be_cmd_fw_uninit(struct be_ctrl_info *ctrl);
700 709
701struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem); 710struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem);
702struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba); 711struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba);
@@ -751,6 +760,18 @@ struct amap_be_default_pdu_context {
751 u8 rsvd4[32]; /* dword 3 */ 760 u8 rsvd4[32]; /* dword 3 */
752} __packed; 761} __packed;
753 762
763struct amap_default_pdu_context_ext {
764 u8 rsvd0[16]; /* dword 0 */
765 u8 ring_size[4]; /* dword 0 */
766 u8 rsvd1[12]; /* dword 0 */
767 u8 rsvd2[22]; /* dword 1 */
768 u8 rx_pdid[9]; /* dword 1 */
769 u8 rx_pdid_valid; /* dword 1 */
770 u8 default_buffer_size[16]; /* dword 2 */
771 u8 cq_id_recv[16]; /* dword 2 */
772 u8 rsvd3[32]; /* dword 3 */
773} __packed;
774
754struct be_defq_create_req { 775struct be_defq_create_req {
755 struct be_cmd_req_hdr hdr; 776 struct be_cmd_req_hdr hdr;
756 u16 num_pages; 777 u16 num_pages;
@@ -896,7 +917,7 @@ struct amap_it_dmsg_cqe_v2 {
896 * stack to notify the 917 * stack to notify the
897 * controller of a posted Work Request Block 918 * controller of a posted Work Request Block
898 */ 919 */
899#define DB_WRB_POST_CID_MASK 0x3FF /* bits 0 - 9 */ 920#define DB_WRB_POST_CID_MASK 0xFFFF /* bits 0 - 16 */
900#define DB_DEF_PDU_WRB_INDEX_MASK 0xFF /* bits 0 - 9 */ 921#define DB_DEF_PDU_WRB_INDEX_MASK 0xFF /* bits 0 - 9 */
901 922
902#define DB_DEF_PDU_WRB_INDEX_SHIFT 16 923#define DB_DEF_PDU_WRB_INDEX_SHIFT 16
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 9014690fe841..ef36be003f67 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2012 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -161,7 +161,9 @@ static int beiscsi_bindconn_cid(struct beiscsi_hba *phba,
161 struct beiscsi_conn *beiscsi_conn, 161 struct beiscsi_conn *beiscsi_conn,
162 unsigned int cid) 162 unsigned int cid)
163{ 163{
164 if (phba->conn_table[cid]) { 164 uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
165
166 if (phba->conn_table[cri_index]) {
165 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 167 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
166 "BS_%d : Connection table already occupied. Detected clash\n"); 168 "BS_%d : Connection table already occupied. Detected clash\n");
167 169
@@ -169,9 +171,9 @@ static int beiscsi_bindconn_cid(struct beiscsi_hba *phba,
169 } else { 171 } else {
170 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 172 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
171 "BS_%d : phba->conn_table[%d]=%p(beiscsi_conn)\n", 173 "BS_%d : phba->conn_table[%d]=%p(beiscsi_conn)\n",
172 cid, beiscsi_conn); 174 cri_index, beiscsi_conn);
173 175
174 phba->conn_table[cid] = beiscsi_conn; 176 phba->conn_table[cri_index] = beiscsi_conn;
175 } 177 }
176 return 0; 178 return 0;
177} 179}
@@ -990,9 +992,27 @@ static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid)
990static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep) 992static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep)
991{ 993{
992 struct beiscsi_hba *phba = beiscsi_ep->phba; 994 struct beiscsi_hba *phba = beiscsi_ep->phba;
995 struct beiscsi_conn *beiscsi_conn;
993 996
994 beiscsi_put_cid(phba, beiscsi_ep->ep_cid); 997 beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
995 beiscsi_ep->phba = NULL; 998 beiscsi_ep->phba = NULL;
999 phba->ep_array[BE_GET_CRI_FROM_CID
1000 (beiscsi_ep->ep_cid)] = NULL;
1001
1002 /**
1003 * Check if any connection resource allocated by driver
1004 * is to be freed.This case occurs when target redirection
1005 * or connection retry is done.
1006 **/
1007 if (!beiscsi_ep->conn)
1008 return;
1009
1010 beiscsi_conn = beiscsi_ep->conn;
1011 if (beiscsi_conn->login_in_progress) {
1012 beiscsi_free_mgmt_task_handles(beiscsi_conn,
1013 beiscsi_conn->task);
1014 beiscsi_conn->login_in_progress = 0;
1015 }
996} 1016}
997 1017
998/** 1018/**
@@ -1009,7 +1029,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
1009{ 1029{
1010 struct beiscsi_endpoint *beiscsi_ep = ep->dd_data; 1030 struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
1011 struct beiscsi_hba *phba = beiscsi_ep->phba; 1031 struct beiscsi_hba *phba = beiscsi_ep->phba;
1012 struct be_mcc_wrb *wrb;
1013 struct tcp_connect_and_offload_out *ptcpcnct_out; 1032 struct tcp_connect_and_offload_out *ptcpcnct_out;
1014 struct be_dma_mem nonemb_cmd; 1033 struct be_dma_mem nonemb_cmd;
1015 unsigned int tag; 1034 unsigned int tag;
@@ -1029,15 +1048,8 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
1029 "BS_%d : In beiscsi_open_conn, ep_cid=%d\n", 1048 "BS_%d : In beiscsi_open_conn, ep_cid=%d\n",
1030 beiscsi_ep->ep_cid); 1049 beiscsi_ep->ep_cid);
1031 1050
1032 phba->ep_array[beiscsi_ep->ep_cid - 1051 phba->ep_array[BE_GET_CRI_FROM_CID
1033 phba->fw_config.iscsi_cid_start] = ep; 1052 (beiscsi_ep->ep_cid)] = ep;
1034 if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start +
1035 phba->params.cxns_per_ctrl * 2)) {
1036
1037 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
1038 "BS_%d : Failed in allocate iscsi cid\n");
1039 goto free_ep;
1040 }
1041 1053
1042 beiscsi_ep->cid_vld = 0; 1054 beiscsi_ep->cid_vld = 0;
1043 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 1055 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
@@ -1049,24 +1061,24 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
1049 "BS_%d : Failed to allocate memory for" 1061 "BS_%d : Failed to allocate memory for"
1050 " mgmt_open_connection\n"); 1062 " mgmt_open_connection\n");
1051 1063
1052 beiscsi_put_cid(phba, beiscsi_ep->ep_cid); 1064 beiscsi_free_ep(beiscsi_ep);
1053 return -ENOMEM; 1065 return -ENOMEM;
1054 } 1066 }
1055 nonemb_cmd.size = sizeof(struct tcp_connect_and_offload_in); 1067 nonemb_cmd.size = sizeof(struct tcp_connect_and_offload_in);
1056 memset(nonemb_cmd.va, 0, nonemb_cmd.size); 1068 memset(nonemb_cmd.va, 0, nonemb_cmd.size);
1057 tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd); 1069 tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd);
1058 if (!tag) { 1070 if (tag <= 0) {
1059 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 1071 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
1060 "BS_%d : mgmt_open_connection Failed for cid=%d\n", 1072 "BS_%d : mgmt_open_connection Failed for cid=%d\n",
1061 beiscsi_ep->ep_cid); 1073 beiscsi_ep->ep_cid);
1062 1074
1063 beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
1064 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 1075 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
1065 nonemb_cmd.va, nonemb_cmd.dma); 1076 nonemb_cmd.va, nonemb_cmd.dma);
1077 beiscsi_free_ep(beiscsi_ep);
1066 return -EAGAIN; 1078 return -EAGAIN;
1067 } 1079 }
1068 1080
1069 ret = beiscsi_mccq_compl(phba, tag, &wrb, NULL); 1081 ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
1070 if (ret) { 1082 if (ret) {
1071 beiscsi_log(phba, KERN_ERR, 1083 beiscsi_log(phba, KERN_ERR,
1072 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, 1084 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
@@ -1074,10 +1086,11 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
1074 1086
1075 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 1087 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
1076 nonemb_cmd.va, nonemb_cmd.dma); 1088 nonemb_cmd.va, nonemb_cmd.dma);
1077 goto free_ep; 1089 beiscsi_free_ep(beiscsi_ep);
1090 return -EBUSY;
1078 } 1091 }
1079 1092
1080 ptcpcnct_out = embedded_payload(wrb); 1093 ptcpcnct_out = (struct tcp_connect_and_offload_out *)nonemb_cmd.va;
1081 beiscsi_ep = ep->dd_data; 1094 beiscsi_ep = ep->dd_data;
1082 beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle; 1095 beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle;
1083 beiscsi_ep->cid_vld = 1; 1096 beiscsi_ep->cid_vld = 1;
@@ -1087,10 +1100,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
1087 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 1100 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
1088 nonemb_cmd.va, nonemb_cmd.dma); 1101 nonemb_cmd.va, nonemb_cmd.dma);
1089 return 0; 1102 return 0;
1090
1091free_ep:
1092 beiscsi_free_ep(beiscsi_ep);
1093 return -EBUSY;
1094} 1103}
1095 1104
1096/** 1105/**
@@ -1119,6 +1128,13 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
1119 return ERR_PTR(ret); 1128 return ERR_PTR(ret);
1120 } 1129 }
1121 1130
1131 if (beiscsi_error(phba)) {
1132 ret = -EIO;
1133 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
1134 "BS_%d : The FW state Not Stable!!!\n");
1135 return ERR_PTR(ret);
1136 }
1137
1122 if (phba->state != BE_ADAPTER_UP) { 1138 if (phba->state != BE_ADAPTER_UP) {
1123 ret = -EBUSY; 1139 ret = -EBUSY;
1124 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, 1140 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
@@ -1201,8 +1217,10 @@ static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag)
1201static int beiscsi_unbind_conn_to_cid(struct beiscsi_hba *phba, 1217static int beiscsi_unbind_conn_to_cid(struct beiscsi_hba *phba,
1202 unsigned int cid) 1218 unsigned int cid)
1203{ 1219{
1204 if (phba->conn_table[cid]) 1220 uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
1205 phba->conn_table[cid] = NULL; 1221
1222 if (phba->conn_table[cri_index])
1223 phba->conn_table[cri_index] = NULL;
1206 else { 1224 else {
1207 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1225 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1208 "BS_%d : Connection table Not occupied.\n"); 1226 "BS_%d : Connection table Not occupied.\n");
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index 38eab7232159..31ddc8494398 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2012 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 4e2733d23003..d24a2867bc21 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2012 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -153,10 +153,14 @@ BEISCSI_RW_ATTR(log_enable, 0x00,
153 153
154DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL); 154DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL);
155DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL); 155DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL);
156DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL);
157DEVICE_ATTR(beiscsi_active_cid_count, S_IRUGO, beiscsi_active_cid_disp, NULL);
156struct device_attribute *beiscsi_attrs[] = { 158struct device_attribute *beiscsi_attrs[] = {
157 &dev_attr_beiscsi_log_enable, 159 &dev_attr_beiscsi_log_enable,
158 &dev_attr_beiscsi_drvr_ver, 160 &dev_attr_beiscsi_drvr_ver,
159 &dev_attr_beiscsi_adapter_family, 161 &dev_attr_beiscsi_adapter_family,
162 &dev_attr_beiscsi_fw_ver,
163 &dev_attr_beiscsi_active_cid_count,
160 NULL, 164 NULL,
161}; 165};
162 166
@@ -702,7 +706,7 @@ static void beiscsi_get_params(struct beiscsi_hba *phba)
702 + BE2_TMFS 706 + BE2_TMFS
703 + BE2_NOPOUT_REQ)); 707 + BE2_NOPOUT_REQ));
704 phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count; 708 phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
705 phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2; 709 phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count;
706 phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count; 710 phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;
707 phba->params.num_sge_per_io = BE2_SGE; 711 phba->params.num_sge_per_io = BE2_SGE;
708 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; 712 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
@@ -1032,7 +1036,6 @@ static void hwi_ring_cq_db(struct beiscsi_hba *phba,
1032static unsigned int 1036static unsigned int
1033beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn, 1037beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
1034 struct beiscsi_hba *phba, 1038 struct beiscsi_hba *phba,
1035 unsigned short cid,
1036 struct pdu_base *ppdu, 1039 struct pdu_base *ppdu,
1037 unsigned long pdu_len, 1040 unsigned long pdu_len,
1038 void *pbuffer, unsigned long buf_len) 1041 void *pbuffer, unsigned long buf_len)
@@ -1144,9 +1147,10 @@ struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
1144 struct hwi_wrb_context *pwrb_context; 1147 struct hwi_wrb_context *pwrb_context;
1145 struct hwi_controller *phwi_ctrlr; 1148 struct hwi_controller *phwi_ctrlr;
1146 struct wrb_handle *pwrb_handle, *pwrb_handle_tmp; 1149 struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
1150 uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
1147 1151
1148 phwi_ctrlr = phba->phwi_ctrlr; 1152 phwi_ctrlr = phba->phwi_ctrlr;
1149 pwrb_context = &phwi_ctrlr->wrb_context[cid]; 1153 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
1150 if (pwrb_context->wrb_handles_available >= 2) { 1154 if (pwrb_context->wrb_handles_available >= 2) {
1151 pwrb_handle = pwrb_context->pwrb_handle_base[ 1155 pwrb_handle = pwrb_context->pwrb_handle_base[
1152 pwrb_context->alloc_index]; 1156 pwrb_context->alloc_index];
@@ -1322,8 +1326,9 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn,
1322 hdr->t2retain = 0; 1326 hdr->t2retain = 0;
1323 hdr->flags = csol_cqe->i_flags; 1327 hdr->flags = csol_cqe->i_flags;
1324 hdr->response = csol_cqe->i_resp; 1328 hdr->response = csol_cqe->i_resp;
1325 hdr->exp_cmdsn = csol_cqe->exp_cmdsn; 1329 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
1326 hdr->max_cmdsn = (csol_cqe->exp_cmdsn + csol_cqe->cmd_wnd - 1); 1330 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
1331 csol_cqe->cmd_wnd - 1);
1327 1332
1328 hdr->dlength[0] = 0; 1333 hdr->dlength[0] = 0;
1329 hdr->dlength[1] = 0; 1334 hdr->dlength[1] = 0;
@@ -1346,9 +1351,9 @@ be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
1346 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; 1351 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
1347 hdr->flags = csol_cqe->i_flags; 1352 hdr->flags = csol_cqe->i_flags;
1348 hdr->response = csol_cqe->i_resp; 1353 hdr->response = csol_cqe->i_resp;
1349 hdr->exp_cmdsn = csol_cqe->exp_cmdsn; 1354 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
1350 hdr->max_cmdsn = (csol_cqe->exp_cmdsn + 1355 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
1351 csol_cqe->cmd_wnd - 1); 1356 csol_cqe->cmd_wnd - 1);
1352 1357
1353 hdr->itt = io_task->libiscsi_itt; 1358 hdr->itt = io_task->libiscsi_itt;
1354 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1359 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
@@ -1363,35 +1368,29 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
1363 struct hwi_controller *phwi_ctrlr; 1368 struct hwi_controller *phwi_ctrlr;
1364 struct iscsi_task *task; 1369 struct iscsi_task *task;
1365 struct beiscsi_io_task *io_task; 1370 struct beiscsi_io_task *io_task;
1366 struct iscsi_conn *conn = beiscsi_conn->conn; 1371 uint16_t wrb_index, cid, cri_index;
1367 struct iscsi_session *session = conn->session;
1368 uint16_t wrb_index, cid;
1369 1372
1370 phwi_ctrlr = phba->phwi_ctrlr; 1373 phwi_ctrlr = phba->phwi_ctrlr;
1371 if (chip_skh_r(phba->pcidev)) { 1374 if (is_chip_be2_be3r(phba)) {
1372 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, 1375 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
1373 wrb_idx, psol); 1376 wrb_idx, psol);
1374 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, 1377 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
1375 cid, psol); 1378 cid, psol);
1376 } else { 1379 } else {
1377 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe, 1380 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
1378 wrb_idx, psol); 1381 wrb_idx, psol);
1379 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe, 1382 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
1380 cid, psol); 1383 cid, psol);
1381 } 1384 }
1382 1385
1383 pwrb_context = &phwi_ctrlr->wrb_context[ 1386 cri_index = BE_GET_CRI_FROM_CID(cid);
1384 cid - phba->fw_config.iscsi_cid_start]; 1387 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
1385 pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index]; 1388 pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index];
1386 task = pwrb_handle->pio_handle; 1389 task = pwrb_handle->pio_handle;
1387 1390
1388 io_task = task->dd_data; 1391 io_task = task->dd_data;
1389 spin_lock_bh(&phba->mgmt_sgl_lock); 1392 memset(io_task->pwrb_handle->pwrb, 0, sizeof(struct iscsi_wrb));
1390 free_mgmt_sgl_handle(phba, io_task->psgl_handle); 1393 iscsi_put_task(task);
1391 spin_unlock_bh(&phba->mgmt_sgl_lock);
1392 spin_lock_bh(&session->lock);
1393 free_wrb_handle(phba, pwrb_context, pwrb_handle);
1394 spin_unlock_bh(&session->lock);
1395} 1394}
1396 1395
1397static void 1396static void
@@ -1406,8 +1405,8 @@ be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
1406 hdr = (struct iscsi_nopin *)task->hdr; 1405 hdr = (struct iscsi_nopin *)task->hdr;
1407 hdr->flags = csol_cqe->i_flags; 1406 hdr->flags = csol_cqe->i_flags;
1408 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1407 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
1409 hdr->max_cmdsn = be32_to_cpu(hdr->exp_cmdsn + 1408 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
1410 csol_cqe->cmd_wnd - 1); 1409 csol_cqe->cmd_wnd - 1);
1411 1410
1412 hdr->opcode = ISCSI_OP_NOOP_IN; 1411 hdr->opcode = ISCSI_OP_NOOP_IN;
1413 hdr->itt = io_task->libiscsi_itt; 1412 hdr->itt = io_task->libiscsi_itt;
@@ -1418,7 +1417,26 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
1418 struct sol_cqe *psol, 1417 struct sol_cqe *psol,
1419 struct common_sol_cqe *csol_cqe) 1418 struct common_sol_cqe *csol_cqe)
1420{ 1419{
1421 if (chip_skh_r(phba->pcidev)) { 1420 if (is_chip_be2_be3r(phba)) {
1421 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe,
1422 i_exp_cmd_sn, psol);
1423 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe,
1424 i_res_cnt, psol);
1425 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
1426 i_cmd_wnd, psol);
1427 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe,
1428 wrb_index, psol);
1429 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe,
1430 cid, psol);
1431 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe,
1432 hw_sts, psol);
1433 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe,
1434 i_resp, psol);
1435 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe,
1436 i_sts, psol);
1437 csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe,
1438 i_flags, psol);
1439 } else {
1422 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1440 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1423 i_exp_cmd_sn, psol); 1441 i_exp_cmd_sn, psol);
1424 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1442 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2,
@@ -1429,7 +1447,7 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
1429 cid, psol); 1447 cid, psol);
1430 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1448 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1431 hw_sts, psol); 1449 hw_sts, psol);
1432 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe, 1450 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1433 i_cmd_wnd, psol); 1451 i_cmd_wnd, psol);
1434 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1452 if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
1435 cmd_cmpl, psol)) 1453 cmd_cmpl, psol))
@@ -1445,25 +1463,6 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
1445 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1463 if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
1446 o, psol)) 1464 o, psol))
1447 csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW; 1465 csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW;
1448 } else {
1449 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe,
1450 i_exp_cmd_sn, psol);
1451 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe,
1452 i_res_cnt, psol);
1453 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
1454 i_cmd_wnd, psol);
1455 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe,
1456 wrb_index, psol);
1457 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe,
1458 cid, psol);
1459 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe,
1460 hw_sts, psol);
1461 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe,
1462 i_resp, psol);
1463 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe,
1464 i_sts, psol);
1465 csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe,
1466 i_flags, psol);
1467 } 1466 }
1468} 1467}
1469 1468
@@ -1480,14 +1479,15 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1480 struct iscsi_conn *conn = beiscsi_conn->conn; 1479 struct iscsi_conn *conn = beiscsi_conn->conn;
1481 struct iscsi_session *session = conn->session; 1480 struct iscsi_session *session = conn->session;
1482 struct common_sol_cqe csol_cqe = {0}; 1481 struct common_sol_cqe csol_cqe = {0};
1482 uint16_t cri_index = 0;
1483 1483
1484 phwi_ctrlr = phba->phwi_ctrlr; 1484 phwi_ctrlr = phba->phwi_ctrlr;
1485 1485
1486 /* Copy the elements to a common structure */ 1486 /* Copy the elements to a common structure */
1487 adapter_get_sol_cqe(phba, psol, &csol_cqe); 1487 adapter_get_sol_cqe(phba, psol, &csol_cqe);
1488 1488
1489 pwrb_context = &phwi_ctrlr->wrb_context[ 1489 cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid);
1490 csol_cqe.cid - phba->fw_config.iscsi_cid_start]; 1490 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
1491 1491
1492 pwrb_handle = pwrb_context->pwrb_handle_basestd[ 1492 pwrb_handle = pwrb_context->pwrb_handle_basestd[
1493 csol_cqe.wrb_index]; 1493 csol_cqe.wrb_index];
@@ -1561,15 +1561,15 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
1561 unsigned char is_header = 0; 1561 unsigned char is_header = 0;
1562 unsigned int index, dpl; 1562 unsigned int index, dpl;
1563 1563
1564 if (chip_skh_r(phba->pcidev)) { 1564 if (is_chip_be2_be3r(phba)) {
1565 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1565 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
1566 dpl, pdpdu_cqe); 1566 dpl, pdpdu_cqe);
1567 index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1567 index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
1568 index, pdpdu_cqe); 1568 index, pdpdu_cqe);
1569 } else { 1569 } else {
1570 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1570 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
1571 dpl, pdpdu_cqe); 1571 dpl, pdpdu_cqe);
1572 index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1572 index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
1573 index, pdpdu_cqe); 1573 index, pdpdu_cqe);
1574 } 1574 }
1575 1575
@@ -1613,8 +1613,8 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
1613 1613
1614 WARN_ON(!pasync_handle); 1614 WARN_ON(!pasync_handle);
1615 1615
1616 pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid - 1616 pasync_handle->cri =
1617 phba->fw_config.iscsi_cid_start; 1617 BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid);
1618 pasync_handle->is_header = is_header; 1618 pasync_handle->is_header = is_header;
1619 pasync_handle->buffer_len = dpl; 1619 pasync_handle->buffer_len = dpl;
1620 *pcq_index = index; 1620 *pcq_index = index;
@@ -1856,8 +1856,6 @@ hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1856 } 1856 }
1857 1857
1858 status = beiscsi_process_async_pdu(beiscsi_conn, phba, 1858 status = beiscsi_process_async_pdu(beiscsi_conn, phba,
1859 (beiscsi_conn->beiscsi_conn_cid -
1860 phba->fw_config.iscsi_cid_start),
1861 phdr, hdr_len, pfirst_buffer, 1859 phdr, hdr_len, pfirst_buffer,
1862 offset); 1860 offset);
1863 1861
@@ -2011,6 +2009,7 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
2011 unsigned int num_processed = 0; 2009 unsigned int num_processed = 0;
2012 unsigned int tot_nump = 0; 2010 unsigned int tot_nump = 0;
2013 unsigned short code = 0, cid = 0; 2011 unsigned short code = 0, cid = 0;
2012 uint16_t cri_index = 0;
2014 struct beiscsi_conn *beiscsi_conn; 2013 struct beiscsi_conn *beiscsi_conn;
2015 struct beiscsi_endpoint *beiscsi_ep; 2014 struct beiscsi_endpoint *beiscsi_ep;
2016 struct iscsi_endpoint *ep; 2015 struct iscsi_endpoint *ep;
@@ -2028,7 +2027,9 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
2028 32] & CQE_CODE_MASK); 2027 32] & CQE_CODE_MASK);
2029 2028
2030 /* Get the CID */ 2029 /* Get the CID */
2031 if (chip_skh_r(phba->pcidev)) { 2030 if (is_chip_be2_be3r(phba)) {
2031 cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol);
2032 } else {
2032 if ((code == DRIVERMSG_NOTIFY) || 2033 if ((code == DRIVERMSG_NOTIFY) ||
2033 (code == UNSOL_HDR_NOTIFY) || 2034 (code == UNSOL_HDR_NOTIFY) ||
2034 (code == UNSOL_DATA_NOTIFY)) 2035 (code == UNSOL_DATA_NOTIFY))
@@ -2038,10 +2039,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
2038 else 2039 else
2039 cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, 2040 cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
2040 cid, sol); 2041 cid, sol);
2041 } else 2042 }
2042 cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol);
2043 2043
2044 ep = phba->ep_array[cid - phba->fw_config.iscsi_cid_start]; 2044 cri_index = BE_GET_CRI_FROM_CID(cid);
2045 ep = phba->ep_array[cri_index];
2045 beiscsi_ep = ep->dd_data; 2046 beiscsi_ep = ep->dd_data;
2046 beiscsi_conn = beiscsi_ep->conn; 2047 beiscsi_conn = beiscsi_ep->conn;
2047 2048
@@ -2191,7 +2192,7 @@ void beiscsi_process_all_cqs(struct work_struct *work)
2191 2192
2192static int be_iopoll(struct blk_iopoll *iop, int budget) 2193static int be_iopoll(struct blk_iopoll *iop, int budget)
2193{ 2194{
2194 static unsigned int ret; 2195 unsigned int ret;
2195 struct beiscsi_hba *phba; 2196 struct beiscsi_hba *phba;
2196 struct be_eq_obj *pbe_eq; 2197 struct be_eq_obj *pbe_eq;
2197 2198
@@ -2416,11 +2417,11 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
2416 /* Check for the data_count */ 2417 /* Check for the data_count */
2417 dsp_value = (task->data_count) ? 1 : 0; 2418 dsp_value = (task->data_count) ? 1 : 0;
2418 2419
2419 if (chip_skh_r(phba->pcidev)) 2420 if (is_chip_be2_be3r(phba))
2420 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, 2421 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp,
2421 pwrb, dsp_value); 2422 pwrb, dsp_value);
2422 else 2423 else
2423 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, 2424 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp,
2424 pwrb, dsp_value); 2425 pwrb, dsp_value);
2425 2426
2426 /* Map addr only if there is data_count */ 2427 /* Map addr only if there is data_count */
@@ -2538,8 +2539,9 @@ static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
2538 2539
2539static int beiscsi_alloc_mem(struct beiscsi_hba *phba) 2540static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
2540{ 2541{
2541 struct be_mem_descriptor *mem_descr;
2542 dma_addr_t bus_add; 2542 dma_addr_t bus_add;
2543 struct hwi_controller *phwi_ctrlr;
2544 struct be_mem_descriptor *mem_descr;
2543 struct mem_array *mem_arr, *mem_arr_orig; 2545 struct mem_array *mem_arr, *mem_arr_orig;
2544 unsigned int i, j, alloc_size, curr_alloc_size; 2546 unsigned int i, j, alloc_size, curr_alloc_size;
2545 2547
@@ -2547,9 +2549,18 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
2547 if (!phba->phwi_ctrlr) 2549 if (!phba->phwi_ctrlr)
2548 return -ENOMEM; 2550 return -ENOMEM;
2549 2551
2552 /* Allocate memory for wrb_context */
2553 phwi_ctrlr = phba->phwi_ctrlr;
2554 phwi_ctrlr->wrb_context = kzalloc(sizeof(struct hwi_wrb_context) *
2555 phba->params.cxns_per_ctrl,
2556 GFP_KERNEL);
2557 if (!phwi_ctrlr->wrb_context)
2558 return -ENOMEM;
2559
2550 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr), 2560 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
2551 GFP_KERNEL); 2561 GFP_KERNEL);
2552 if (!phba->init_mem) { 2562 if (!phba->init_mem) {
2563 kfree(phwi_ctrlr->wrb_context);
2553 kfree(phba->phwi_ctrlr); 2564 kfree(phba->phwi_ctrlr);
2554 return -ENOMEM; 2565 return -ENOMEM;
2555 } 2566 }
@@ -2558,6 +2569,7 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
2558 GFP_KERNEL); 2569 GFP_KERNEL);
2559 if (!mem_arr_orig) { 2570 if (!mem_arr_orig) {
2560 kfree(phba->init_mem); 2571 kfree(phba->init_mem);
2572 kfree(phwi_ctrlr->wrb_context);
2561 kfree(phba->phwi_ctrlr); 2573 kfree(phba->phwi_ctrlr);
2562 return -ENOMEM; 2574 return -ENOMEM;
2563 } 2575 }
@@ -2628,6 +2640,7 @@ free_mem:
2628 } 2640 }
2629 kfree(mem_arr_orig); 2641 kfree(mem_arr_orig);
2630 kfree(phba->init_mem); 2642 kfree(phba->init_mem);
2643 kfree(phba->phwi_ctrlr->wrb_context);
2631 kfree(phba->phwi_ctrlr); 2644 kfree(phba->phwi_ctrlr);
2632 return -ENOMEM; 2645 return -ENOMEM;
2633} 2646}
@@ -2666,6 +2679,7 @@ static void iscsi_init_global_templates(struct beiscsi_hba *phba)
2666static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) 2679static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2667{ 2680{
2668 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb; 2681 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
2682 struct hwi_context_memory *phwi_ctxt;
2669 struct wrb_handle *pwrb_handle = NULL; 2683 struct wrb_handle *pwrb_handle = NULL;
2670 struct hwi_controller *phwi_ctrlr; 2684 struct hwi_controller *phwi_ctrlr;
2671 struct hwi_wrb_context *pwrb_context; 2685 struct hwi_wrb_context *pwrb_context;
@@ -2680,7 +2694,18 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2680 mem_descr_wrb += HWI_MEM_WRB; 2694 mem_descr_wrb += HWI_MEM_WRB;
2681 phwi_ctrlr = phba->phwi_ctrlr; 2695 phwi_ctrlr = phba->phwi_ctrlr;
2682 2696
2683 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { 2697 /* Allocate memory for WRBQ */
2698 phwi_ctxt = phwi_ctrlr->phwi_ctxt;
2699 phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) *
2700 phba->fw_config.iscsi_cid_count,
2701 GFP_KERNEL);
2702 if (!phwi_ctxt->be_wrbq) {
2703 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2704 "BM_%d : WRBQ Mem Alloc Failed\n");
2705 return -ENOMEM;
2706 }
2707
2708 for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
2684 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2709 pwrb_context = &phwi_ctrlr->wrb_context[index];
2685 pwrb_context->pwrb_handle_base = 2710 pwrb_context->pwrb_handle_base =
2686 kzalloc(sizeof(struct wrb_handle *) * 2711 kzalloc(sizeof(struct wrb_handle *) *
@@ -2723,7 +2748,7 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2723 } 2748 }
2724 } 2749 }
2725 idx = 0; 2750 idx = 0;
2726 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { 2751 for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
2727 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2752 pwrb_context = &phwi_ctrlr->wrb_context[index];
2728 if (!num_cxn_wrb) { 2753 if (!num_cxn_wrb) {
2729 pwrb = mem_descr_wrb->mem_array[idx].virtual_address; 2754 pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
@@ -2752,7 +2777,7 @@ init_wrb_hndl_failed:
2752 return -ENOMEM; 2777 return -ENOMEM;
2753} 2778}
2754 2779
2755static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) 2780static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2756{ 2781{
2757 struct hwi_controller *phwi_ctrlr; 2782 struct hwi_controller *phwi_ctrlr;
2758 struct hba_parameters *p = &phba->params; 2783 struct hba_parameters *p = &phba->params;
@@ -2770,6 +2795,15 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2770 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx; 2795 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2771 memset(pasync_ctx, 0, sizeof(*pasync_ctx)); 2796 memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2772 2797
2798 pasync_ctx->async_entry = kzalloc(sizeof(struct hwi_async_entry) *
2799 phba->fw_config.iscsi_cid_count,
2800 GFP_KERNEL);
2801 if (!pasync_ctx->async_entry) {
2802 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2803 "BM_%d : hwi_init_async_pdu_ctx Mem Alloc Failed\n");
2804 return -ENOMEM;
2805 }
2806
2773 pasync_ctx->num_entries = p->asyncpdus_per_ctrl; 2807 pasync_ctx->num_entries = p->asyncpdus_per_ctrl;
2774 pasync_ctx->buffer_size = p->defpdu_hdr_sz; 2808 pasync_ctx->buffer_size = p->defpdu_hdr_sz;
2775 2809
@@ -2934,6 +2968,8 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2934 pasync_ctx->async_header.ep_read_ptr = -1; 2968 pasync_ctx->async_header.ep_read_ptr = -1;
2935 pasync_ctx->async_data.host_write_ptr = 0; 2969 pasync_ctx->async_data.host_write_ptr = 0;
2936 pasync_ctx->async_data.ep_read_ptr = -1; 2970 pasync_ctx->async_data.ep_read_ptr = -1;
2971
2972 return 0;
2937} 2973}
2938 2974
2939static int 2975static int
@@ -3293,6 +3329,7 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
3293 void *wrb_vaddr; 3329 void *wrb_vaddr;
3294 struct be_dma_mem sgl; 3330 struct be_dma_mem sgl;
3295 struct be_mem_descriptor *mem_descr; 3331 struct be_mem_descriptor *mem_descr;
3332 struct hwi_wrb_context *pwrb_context;
3296 int status; 3333 int status;
3297 3334
3298 idx = 0; 3335 idx = 0;
@@ -3351,8 +3388,9 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
3351 kfree(pwrb_arr); 3388 kfree(pwrb_arr);
3352 return status; 3389 return status;
3353 } 3390 }
3354 phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i]. 3391 pwrb_context = &phwi_ctrlr->wrb_context[i];
3355 id; 3392 pwrb_context->cid = phwi_context->be_wrbq[i].id;
3393 BE_SET_CID_TO_CRI(i, pwrb_context->cid);
3356 } 3394 }
3357 kfree(pwrb_arr); 3395 kfree(pwrb_arr);
3358 return 0; 3396 return 0;
@@ -3365,7 +3403,7 @@ static void free_wrb_handles(struct beiscsi_hba *phba)
3365 struct hwi_wrb_context *pwrb_context; 3403 struct hwi_wrb_context *pwrb_context;
3366 3404
3367 phwi_ctrlr = phba->phwi_ctrlr; 3405 phwi_ctrlr = phba->phwi_ctrlr;
3368 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { 3406 for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
3369 pwrb_context = &phwi_ctrlr->wrb_context[index]; 3407 pwrb_context = &phwi_ctrlr->wrb_context[index];
3370 kfree(pwrb_context->pwrb_handle_base); 3408 kfree(pwrb_context->pwrb_handle_base);
3371 kfree(pwrb_context->pwrb_handle_basestd); 3409 kfree(pwrb_context->pwrb_handle_basestd);
@@ -3394,6 +3432,7 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
3394 struct be_ctrl_info *ctrl = &phba->ctrl; 3432 struct be_ctrl_info *ctrl = &phba->ctrl;
3395 struct hwi_controller *phwi_ctrlr; 3433 struct hwi_controller *phwi_ctrlr;
3396 struct hwi_context_memory *phwi_context; 3434 struct hwi_context_memory *phwi_context;
3435 struct hwi_async_pdu_context *pasync_ctx;
3397 int i, eq_num; 3436 int i, eq_num;
3398 3437
3399 phwi_ctrlr = phba->phwi_ctrlr; 3438 phwi_ctrlr = phba->phwi_ctrlr;
@@ -3403,6 +3442,7 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
3403 if (q->created) 3442 if (q->created)
3404 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ); 3443 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
3405 } 3444 }
3445 kfree(phwi_context->be_wrbq);
3406 free_wrb_handles(phba); 3446 free_wrb_handles(phba);
3407 3447
3408 q = &phwi_context->be_def_hdrq; 3448 q = &phwi_context->be_def_hdrq;
@@ -3430,6 +3470,10 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
3430 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); 3470 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
3431 } 3471 }
3432 be_mcc_queues_destroy(phba); 3472 be_mcc_queues_destroy(phba);
3473
3474 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
3475 kfree(pasync_ctx->async_entry);
3476 be_cmd_fw_uninit(ctrl);
3433} 3477}
3434 3478
3435static int be_mcc_queues_create(struct beiscsi_hba *phba, 3479static int be_mcc_queues_create(struct beiscsi_hba *phba,
@@ -3607,7 +3651,12 @@ static int hwi_init_controller(struct beiscsi_hba *phba)
3607 if (beiscsi_init_wrb_handle(phba)) 3651 if (beiscsi_init_wrb_handle(phba))
3608 return -ENOMEM; 3652 return -ENOMEM;
3609 3653
3610 hwi_init_async_pdu_ctx(phba); 3654 if (hwi_init_async_pdu_ctx(phba)) {
3655 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3656 "BM_%d : hwi_init_async_pdu_ctx failed\n");
3657 return -ENOMEM;
3658 }
3659
3611 if (hwi_init_port(phba) != 0) { 3660 if (hwi_init_port(phba) != 0) {
3612 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3661 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3613 "BM_%d : hwi_init_controller failed\n"); 3662 "BM_%d : hwi_init_controller failed\n");
@@ -3637,6 +3686,7 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba)
3637 mem_descr++; 3686 mem_descr++;
3638 } 3687 }
3639 kfree(phba->init_mem); 3688 kfree(phba->init_mem);
3689 kfree(phba->phwi_ctrlr->wrb_context);
3640 kfree(phba->phwi_ctrlr); 3690 kfree(phba->phwi_ctrlr);
3641} 3691}
3642 3692
@@ -3769,7 +3819,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3769 3819
3770static int hba_setup_cid_tbls(struct beiscsi_hba *phba) 3820static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3771{ 3821{
3772 int i, new_cid; 3822 int i;
3773 3823
3774 phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl, 3824 phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
3775 GFP_KERNEL); 3825 GFP_KERNEL);
@@ -3780,19 +3830,33 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3780 return -ENOMEM; 3830 return -ENOMEM;
3781 } 3831 }
3782 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) * 3832 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
3783 phba->params.cxns_per_ctrl * 2, GFP_KERNEL); 3833 phba->params.cxns_per_ctrl, GFP_KERNEL);
3784 if (!phba->ep_array) { 3834 if (!phba->ep_array) {
3785 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3835 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3786 "BM_%d : Failed to allocate memory in " 3836 "BM_%d : Failed to allocate memory in "
3787 "hba_setup_cid_tbls\n"); 3837 "hba_setup_cid_tbls\n");
3788 kfree(phba->cid_array); 3838 kfree(phba->cid_array);
3839 phba->cid_array = NULL;
3789 return -ENOMEM; 3840 return -ENOMEM;
3790 } 3841 }
3791 new_cid = phba->fw_config.iscsi_cid_start; 3842
3792 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3843 phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) *
3793 phba->cid_array[i] = new_cid; 3844 phba->params.cxns_per_ctrl, GFP_KERNEL);
3794 new_cid += 2; 3845 if (!phba->conn_table) {
3846 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3847 "BM_%d : Failed to allocate memory in"
3848 "hba_setup_cid_tbls\n");
3849
3850 kfree(phba->cid_array);
3851 kfree(phba->ep_array);
3852 phba->cid_array = NULL;
3853 phba->ep_array = NULL;
3854 return -ENOMEM;
3795 } 3855 }
3856
3857 for (i = 0; i < phba->params.cxns_per_ctrl; i++)
3858 phba->cid_array[i] = phba->phwi_ctrlr->wrb_context[i].cid;
3859
3796 phba->avlbl_cids = phba->params.cxns_per_ctrl; 3860 phba->avlbl_cids = phba->params.cxns_per_ctrl;
3797 return 0; 3861 return 0;
3798} 3862}
@@ -4062,6 +4126,53 @@ static void beiscsi_clean_port(struct beiscsi_hba *phba)
4062 kfree(phba->eh_sgl_hndl_base); 4126 kfree(phba->eh_sgl_hndl_base);
4063 kfree(phba->cid_array); 4127 kfree(phba->cid_array);
4064 kfree(phba->ep_array); 4128 kfree(phba->ep_array);
4129 kfree(phba->conn_table);
4130}
4131
4132/**
4133 * beiscsi_free_mgmt_task_handles()- Free driver CXN resources
4134 * @beiscsi_conn: ptr to the conn to be cleaned up
4135 * @task: ptr to iscsi_task resource to be freed.
4136 *
4137 * Free driver mgmt resources binded to CXN.
4138 **/
4139void
4140beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
4141 struct iscsi_task *task)
4142{
4143 struct beiscsi_io_task *io_task;
4144 struct beiscsi_hba *phba = beiscsi_conn->phba;
4145 struct hwi_wrb_context *pwrb_context;
4146 struct hwi_controller *phwi_ctrlr;
4147 uint16_t cri_index = BE_GET_CRI_FROM_CID(
4148 beiscsi_conn->beiscsi_conn_cid);
4149
4150 phwi_ctrlr = phba->phwi_ctrlr;
4151 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
4152
4153 io_task = task->dd_data;
4154
4155 if (io_task->pwrb_handle) {
4156 memset(io_task->pwrb_handle->pwrb, 0,
4157 sizeof(struct iscsi_wrb));
4158 free_wrb_handle(phba, pwrb_context,
4159 io_task->pwrb_handle);
4160 io_task->pwrb_handle = NULL;
4161 }
4162
4163 if (io_task->psgl_handle) {
4164 spin_lock_bh(&phba->mgmt_sgl_lock);
4165 free_mgmt_sgl_handle(phba,
4166 io_task->psgl_handle);
4167 io_task->psgl_handle = NULL;
4168 spin_unlock_bh(&phba->mgmt_sgl_lock);
4169 }
4170
4171 if (io_task->mtask_addr)
4172 pci_unmap_single(phba->pcidev,
4173 io_task->mtask_addr,
4174 io_task->mtask_data_count,
4175 PCI_DMA_TODEVICE);
4065} 4176}
4066 4177
4067/** 4178/**
@@ -4078,10 +4189,11 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
4078 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; 4189 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
4079 struct hwi_wrb_context *pwrb_context; 4190 struct hwi_wrb_context *pwrb_context;
4080 struct hwi_controller *phwi_ctrlr; 4191 struct hwi_controller *phwi_ctrlr;
4192 uint16_t cri_index = BE_GET_CRI_FROM_CID(
4193 beiscsi_conn->beiscsi_conn_cid);
4081 4194
4082 phwi_ctrlr = phba->phwi_ctrlr; 4195 phwi_ctrlr = phba->phwi_ctrlr;
4083 pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid 4196 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
4084 - phba->fw_config.iscsi_cid_start];
4085 4197
4086 if (io_task->cmd_bhs) { 4198 if (io_task->cmd_bhs) {
4087 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 4199 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
@@ -4103,27 +4215,8 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
4103 io_task->psgl_handle = NULL; 4215 io_task->psgl_handle = NULL;
4104 } 4216 }
4105 } else { 4217 } else {
4106 if (!beiscsi_conn->login_in_progress) { 4218 if (!beiscsi_conn->login_in_progress)
4107 if (io_task->pwrb_handle) { 4219 beiscsi_free_mgmt_task_handles(beiscsi_conn, task);
4108 free_wrb_handle(phba, pwrb_context,
4109 io_task->pwrb_handle);
4110 io_task->pwrb_handle = NULL;
4111 }
4112 if (io_task->psgl_handle) {
4113 spin_lock(&phba->mgmt_sgl_lock);
4114 free_mgmt_sgl_handle(phba,
4115 io_task->psgl_handle);
4116 spin_unlock(&phba->mgmt_sgl_lock);
4117 io_task->psgl_handle = NULL;
4118 }
4119 if (io_task->mtask_addr) {
4120 pci_unmap_single(phba->pcidev,
4121 io_task->mtask_addr,
4122 io_task->mtask_data_count,
4123 PCI_DMA_TODEVICE);
4124 io_task->mtask_addr = 0;
4125 }
4126 }
4127 } 4220 }
4128} 4221}
4129 4222
@@ -4146,15 +4239,14 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
4146 beiscsi_cleanup_task(task); 4239 beiscsi_cleanup_task(task);
4147 spin_unlock_bh(&session->lock); 4240 spin_unlock_bh(&session->lock);
4148 4241
4149 pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid - 4242 pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid);
4150 phba->fw_config.iscsi_cid_start));
4151 4243
4152 /* Check for the adapter family */ 4244 /* Check for the adapter family */
4153 if (chip_skh_r(phba->pcidev)) 4245 if (is_chip_be2_be3r(phba))
4154 beiscsi_offload_cxn_v2(params, pwrb_handle);
4155 else
4156 beiscsi_offload_cxn_v0(params, pwrb_handle, 4246 beiscsi_offload_cxn_v0(params, pwrb_handle,
4157 phba->init_mem); 4247 phba->init_mem);
4248 else
4249 beiscsi_offload_cxn_v2(params, pwrb_handle);
4158 4250
4159 be_dws_le_to_cpu(pwrb_handle->pwrb, 4251 be_dws_le_to_cpu(pwrb_handle->pwrb,
4160 sizeof(struct iscsi_target_context_update_wrb)); 4252 sizeof(struct iscsi_target_context_update_wrb));
@@ -4194,6 +4286,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
4194 struct hwi_wrb_context *pwrb_context; 4286 struct hwi_wrb_context *pwrb_context;
4195 struct hwi_controller *phwi_ctrlr; 4287 struct hwi_controller *phwi_ctrlr;
4196 itt_t itt; 4288 itt_t itt;
4289 uint16_t cri_index = 0;
4197 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; 4290 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
4198 dma_addr_t paddr; 4291 dma_addr_t paddr;
4199 4292
@@ -4223,8 +4316,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
4223 goto free_hndls; 4316 goto free_hndls;
4224 } 4317 }
4225 io_task->pwrb_handle = alloc_wrb_handle(phba, 4318 io_task->pwrb_handle = alloc_wrb_handle(phba,
4226 beiscsi_conn->beiscsi_conn_cid - 4319 beiscsi_conn->beiscsi_conn_cid);
4227 phba->fw_config.iscsi_cid_start);
4228 if (!io_task->pwrb_handle) { 4320 if (!io_task->pwrb_handle) {
4229 beiscsi_log(phba, KERN_ERR, 4321 beiscsi_log(phba, KERN_ERR,
4230 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4322 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
@@ -4236,6 +4328,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
4236 } else { 4328 } else {
4237 io_task->scsi_cmnd = NULL; 4329 io_task->scsi_cmnd = NULL;
4238 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { 4330 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
4331 beiscsi_conn->task = task;
4239 if (!beiscsi_conn->login_in_progress) { 4332 if (!beiscsi_conn->login_in_progress) {
4240 spin_lock(&phba->mgmt_sgl_lock); 4333 spin_lock(&phba->mgmt_sgl_lock);
4241 io_task->psgl_handle = (struct sgl_handle *) 4334 io_task->psgl_handle = (struct sgl_handle *)
@@ -4257,8 +4350,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
4257 io_task->psgl_handle; 4350 io_task->psgl_handle;
4258 io_task->pwrb_handle = 4351 io_task->pwrb_handle =
4259 alloc_wrb_handle(phba, 4352 alloc_wrb_handle(phba,
4260 beiscsi_conn->beiscsi_conn_cid - 4353 beiscsi_conn->beiscsi_conn_cid);
4261 phba->fw_config.iscsi_cid_start);
4262 if (!io_task->pwrb_handle) { 4354 if (!io_task->pwrb_handle) {
4263 beiscsi_log(phba, KERN_ERR, 4355 beiscsi_log(phba, KERN_ERR,
4264 BEISCSI_LOG_IO | 4356 BEISCSI_LOG_IO |
@@ -4278,7 +4370,6 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
4278 io_task->pwrb_handle = 4370 io_task->pwrb_handle =
4279 beiscsi_conn->plogin_wrb_handle; 4371 beiscsi_conn->plogin_wrb_handle;
4280 } 4372 }
4281 beiscsi_conn->task = task;
4282 } else { 4373 } else {
4283 spin_lock(&phba->mgmt_sgl_lock); 4374 spin_lock(&phba->mgmt_sgl_lock);
4284 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba); 4375 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
@@ -4295,8 +4386,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
4295 } 4386 }
4296 io_task->pwrb_handle = 4387 io_task->pwrb_handle =
4297 alloc_wrb_handle(phba, 4388 alloc_wrb_handle(phba,
4298 beiscsi_conn->beiscsi_conn_cid - 4389 beiscsi_conn->beiscsi_conn_cid);
4299 phba->fw_config.iscsi_cid_start);
4300 if (!io_task->pwrb_handle) { 4390 if (!io_task->pwrb_handle) {
4301 beiscsi_log(phba, KERN_ERR, 4391 beiscsi_log(phba, KERN_ERR,
4302 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4392 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
@@ -4324,12 +4414,13 @@ free_io_hndls:
4324free_mgmt_hndls: 4414free_mgmt_hndls:
4325 spin_lock(&phba->mgmt_sgl_lock); 4415 spin_lock(&phba->mgmt_sgl_lock);
4326 free_mgmt_sgl_handle(phba, io_task->psgl_handle); 4416 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
4417 io_task->psgl_handle = NULL;
4327 spin_unlock(&phba->mgmt_sgl_lock); 4418 spin_unlock(&phba->mgmt_sgl_lock);
4328free_hndls: 4419free_hndls:
4329 phwi_ctrlr = phba->phwi_ctrlr; 4420 phwi_ctrlr = phba->phwi_ctrlr;
4330 pwrb_context = &phwi_ctrlr->wrb_context[ 4421 cri_index = BE_GET_CRI_FROM_CID(
4331 beiscsi_conn->beiscsi_conn_cid - 4422 beiscsi_conn->beiscsi_conn_cid);
4332 phba->fw_config.iscsi_cid_start]; 4423 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
4333 if (io_task->pwrb_handle) 4424 if (io_task->pwrb_handle)
4334 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); 4425 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
4335 io_task->pwrb_handle = NULL; 4426 io_task->pwrb_handle = NULL;
@@ -4351,7 +4442,6 @@ int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
4351 unsigned int doorbell = 0; 4442 unsigned int doorbell = 0;
4352 4443
4353 pwrb = io_task->pwrb_handle->pwrb; 4444 pwrb = io_task->pwrb_handle->pwrb;
4354 memset(pwrb, 0, sizeof(*pwrb));
4355 4445
4356 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0; 4446 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
4357 io_task->bhs_len = sizeof(struct be_cmd_bhs); 4447 io_task->bhs_len = sizeof(struct be_cmd_bhs);
@@ -4465,19 +4555,7 @@ static int beiscsi_mtask(struct iscsi_task *task)
4465 pwrb = io_task->pwrb_handle->pwrb; 4555 pwrb = io_task->pwrb_handle->pwrb;
4466 memset(pwrb, 0, sizeof(*pwrb)); 4556 memset(pwrb, 0, sizeof(*pwrb));
4467 4557
4468 if (chip_skh_r(phba->pcidev)) { 4558 if (is_chip_be2_be3r(phba)) {
4469 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
4470 be32_to_cpu(task->cmdsn));
4471 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
4472 io_task->pwrb_handle->wrb_index);
4473 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
4474 io_task->psgl_handle->sgl_index);
4475 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb,
4476 task->data_count);
4477 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
4478 io_task->pwrb_handle->nxt_wrb_index);
4479 pwrb_typeoffset = SKH_WRB_TYPE_OFFSET;
4480 } else {
4481 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 4559 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4482 be32_to_cpu(task->cmdsn)); 4560 be32_to_cpu(task->cmdsn));
4483 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 4561 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
@@ -4489,6 +4567,18 @@ static int beiscsi_mtask(struct iscsi_task *task)
4489 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, 4567 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4490 io_task->pwrb_handle->nxt_wrb_index); 4568 io_task->pwrb_handle->nxt_wrb_index);
4491 pwrb_typeoffset = BE_WRB_TYPE_OFFSET; 4569 pwrb_typeoffset = BE_WRB_TYPE_OFFSET;
4570 } else {
4571 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
4572 be32_to_cpu(task->cmdsn));
4573 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
4574 io_task->pwrb_handle->wrb_index);
4575 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
4576 io_task->psgl_handle->sgl_index);
4577 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb,
4578 task->data_count);
4579 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
4580 io_task->pwrb_handle->nxt_wrb_index);
4581 pwrb_typeoffset = SKH_WRB_TYPE_OFFSET;
4492 } 4582 }
4493 4583
4494 4584
@@ -4501,19 +4591,19 @@ static int beiscsi_mtask(struct iscsi_task *task)
4501 case ISCSI_OP_NOOP_OUT: 4591 case ISCSI_OP_NOOP_OUT:
4502 if (task->hdr->ttt != ISCSI_RESERVED_TAG) { 4592 if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
4503 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 4593 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
4504 if (chip_skh_r(phba->pcidev)) 4594 if (is_chip_be2_be3r(phba))
4505 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 4595 AMAP_SET_BITS(struct amap_iscsi_wrb,
4506 dmsg, pwrb, 1); 4596 dmsg, pwrb, 1);
4507 else 4597 else
4508 AMAP_SET_BITS(struct amap_iscsi_wrb, 4598 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
4509 dmsg, pwrb, 1); 4599 dmsg, pwrb, 1);
4510 } else { 4600 } else {
4511 ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset); 4601 ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset);
4512 if (chip_skh_r(phba->pcidev)) 4602 if (is_chip_be2_be3r(phba))
4513 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 4603 AMAP_SET_BITS(struct amap_iscsi_wrb,
4514 dmsg, pwrb, 0); 4604 dmsg, pwrb, 0);
4515 else 4605 else
4516 AMAP_SET_BITS(struct amap_iscsi_wrb, 4606 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
4517 dmsg, pwrb, 0); 4607 dmsg, pwrb, 0);
4518 } 4608 }
4519 hwi_write_buffer(pwrb, task); 4609 hwi_write_buffer(pwrb, task);
@@ -4540,9 +4630,9 @@ static int beiscsi_mtask(struct iscsi_task *task)
4540 } 4630 }
4541 4631
4542 /* Set the task type */ 4632 /* Set the task type */
4543 io_task->wrb_type = (chip_skh_r(phba->pcidev)) ? 4633 io_task->wrb_type = (is_chip_be2_be3r(phba)) ?
4544 AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb) : 4634 AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) :
4545 AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb); 4635 AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb);
4546 4636
4547 doorbell |= cid & DB_WRB_POST_CID_MASK; 4637 doorbell |= cid & DB_WRB_POST_CID_MASK;
4548 doorbell |= (io_task->pwrb_handle->wrb_index & 4638 doorbell |= (io_task->pwrb_handle->wrb_index &
@@ -4834,6 +4924,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
4834 case OC_SKH_ID1: 4924 case OC_SKH_ID1:
4835 phba->generation = BE_GEN4; 4925 phba->generation = BE_GEN4;
4836 phba->iotask_fn = beiscsi_iotask_v2; 4926 phba->iotask_fn = beiscsi_iotask_v2;
4927 break;
4837 default: 4928 default:
4838 phba->generation = 0; 4929 phba->generation = 0;
4839 } 4930 }
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 5946577d79d6..2c06ef3c02ac 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2012 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -36,7 +36,7 @@
36 36
37#include "be.h" 37#include "be.h"
38#define DRV_NAME "be2iscsi" 38#define DRV_NAME "be2iscsi"
39#define BUILD_STR "10.0.272.0" 39#define BUILD_STR "10.0.467.0"
40#define BE_NAME "Emulex OneConnect" \ 40#define BE_NAME "Emulex OneConnect" \
41 "Open-iSCSI Driver version" BUILD_STR 41 "Open-iSCSI Driver version" BUILD_STR
42#define DRV_DESC BE_NAME " " "Driver" 42#define DRV_DESC BE_NAME " " "Driver"
@@ -66,8 +66,9 @@
66 66
67#define MAX_CPUS 64 67#define MAX_CPUS 64
68#define BEISCSI_MAX_NUM_CPUS 7 68#define BEISCSI_MAX_NUM_CPUS 7
69#define OC_SKH_MAX_NUM_CPUS 63 69#define OC_SKH_MAX_NUM_CPUS 31
70 70
71#define BEISCSI_VER_STRLEN 32
71 72
72#define BEISCSI_SGLIST_ELEMENTS 30 73#define BEISCSI_SGLIST_ELEMENTS 30
73 74
@@ -265,7 +266,9 @@ struct invalidate_command_table {
265 unsigned short cid; 266 unsigned short cid;
266} __packed; 267} __packed;
267 268
268#define chip_skh_r(pdev) (pdev->device == OC_SKH_ID1) 269#define chip_be2(phba) (phba->generation == BE_GEN2)
270#define chip_be3_r(phba) (phba->generation == BE_GEN3)
271#define is_chip_be2_be3r(phba) (chip_be3_r(phba) || (chip_be2(phba)))
269struct beiscsi_hba { 272struct beiscsi_hba {
270 struct hba_parameters params; 273 struct hba_parameters params;
271 struct hwi_controller *phwi_ctrlr; 274 struct hwi_controller *phwi_ctrlr;
@@ -304,10 +307,15 @@ struct beiscsi_hba {
304 unsigned short avlbl_cids; 307 unsigned short avlbl_cids;
305 unsigned short cid_alloc; 308 unsigned short cid_alloc;
306 unsigned short cid_free; 309 unsigned short cid_free;
307 struct beiscsi_conn *conn_table[BE2_MAX_SESSIONS * 2];
308 struct list_head hba_queue; 310 struct list_head hba_queue;
311#define BE_MAX_SESSION 2048
312#define BE_SET_CID_TO_CRI(cri_index, cid) \
313 (phba->cid_to_cri_map[cid] = cri_index)
314#define BE_GET_CRI_FROM_CID(cid) (phba->cid_to_cri_map[cid])
315 unsigned short cid_to_cri_map[BE_MAX_SESSION];
309 unsigned short *cid_array; 316 unsigned short *cid_array;
310 struct iscsi_endpoint **ep_array; 317 struct iscsi_endpoint **ep_array;
318 struct beiscsi_conn **conn_table;
311 struct iscsi_boot_kset *boot_kset; 319 struct iscsi_boot_kset *boot_kset;
312 struct Scsi_Host *shost; 320 struct Scsi_Host *shost;
313 struct iscsi_iface *ipv4_iface; 321 struct iscsi_iface *ipv4_iface;
@@ -339,6 +347,7 @@ struct beiscsi_hba {
339 struct delayed_work beiscsi_hw_check_task; 347 struct delayed_work beiscsi_hw_check_task;
340 348
341 u8 mac_address[ETH_ALEN]; 349 u8 mac_address[ETH_ALEN];
350 char fw_ver_str[BEISCSI_VER_STRLEN];
342 char wq_name[20]; 351 char wq_name[20];
343 struct workqueue_struct *wq; /* The actuak work queue */ 352 struct workqueue_struct *wq; /* The actuak work queue */
344 struct be_ctrl_info ctrl; 353 struct be_ctrl_info ctrl;
@@ -563,7 +572,7 @@ struct hwi_async_pdu_context {
563 * This is a varying size list! Do not add anything 572 * This is a varying size list! Do not add anything
564 * after this entry!! 573 * after this entry!!
565 */ 574 */
566 struct hwi_async_entry async_entry[BE2_MAX_SESSIONS * 2]; 575 struct hwi_async_entry *async_entry;
567}; 576};
568 577
569#define PDUCQE_CODE_MASK 0x0000003F 578#define PDUCQE_CODE_MASK 0x0000003F
@@ -749,6 +758,8 @@ void
749free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle); 758free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle);
750 759
751void beiscsi_process_all_cqs(struct work_struct *work); 760void beiscsi_process_all_cqs(struct work_struct *work);
761void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
762 struct iscsi_task *task);
752 763
753static inline bool beiscsi_error(struct beiscsi_hba *phba) 764static inline bool beiscsi_error(struct beiscsi_hba *phba)
754{ 765{
@@ -933,7 +944,7 @@ struct hwi_controller {
933 struct sgl_handle *psgl_handle_base; 944 struct sgl_handle *psgl_handle_base;
934 unsigned int wrb_mem_index; 945 unsigned int wrb_mem_index;
935 946
936 struct hwi_wrb_context wrb_context[BE2_MAX_SESSIONS * 2]; 947 struct hwi_wrb_context *wrb_context;
937 struct mcc_wrb *pmcc_wrb_base; 948 struct mcc_wrb *pmcc_wrb_base;
938 struct be_ring default_pdu_hdr; 949 struct be_ring default_pdu_hdr;
939 struct be_ring default_pdu_data; 950 struct be_ring default_pdu_data;
@@ -970,9 +981,7 @@ struct hwi_context_memory {
970 struct be_queue_info be_def_hdrq; 981 struct be_queue_info be_def_hdrq;
971 struct be_queue_info be_def_dataq; 982 struct be_queue_info be_def_dataq;
972 983
973 struct be_queue_info be_wrbq[BE2_MAX_SESSIONS]; 984 struct be_queue_info *be_wrbq;
974 struct be_mcc_wrb_context *pbe_mcc_context;
975
976 struct hwi_async_pdu_context *pasync_ctx; 985 struct hwi_async_pdu_context *pasync_ctx;
977}; 986};
978 987
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 55cc9902263d..245a9595a93a 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2012 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -368,6 +368,8 @@ int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
368 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 368 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
369 "BM_%d : phba->fw_config.iscsi_features = %d\n", 369 "BM_%d : phba->fw_config.iscsi_features = %d\n",
370 phba->fw_config.iscsi_features); 370 phba->fw_config.iscsi_features);
371 memcpy(phba->fw_ver_str, resp->params.hba_attribs.
372 firmware_version_string, BEISCSI_VER_STRLEN);
371 } else 373 } else
372 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 374 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
373 "BG_%d : Failed in mgmt_check_supported_fw\n"); 375 "BG_%d : Failed in mgmt_check_supported_fw\n");
@@ -1260,6 +1262,45 @@ beiscsi_drvr_ver_disp(struct device *dev, struct device_attribute *attr,
1260} 1262}
1261 1263
1262/** 1264/**
1265 * beiscsi_fw_ver_disp()- Display Firmware Version
1266 * @dev: ptr to device not used.
1267 * @attr: device attribute, not used.
1268 * @buf: contains formatted text Firmware version
1269 *
1270 * return
1271 * size of the formatted string
1272 **/
1273ssize_t
1274beiscsi_fw_ver_disp(struct device *dev, struct device_attribute *attr,
1275 char *buf)
1276{
1277 struct Scsi_Host *shost = class_to_shost(dev);
1278 struct beiscsi_hba *phba = iscsi_host_priv(shost);
1279
1280 return snprintf(buf, PAGE_SIZE, "%s\n", phba->fw_ver_str);
1281}
1282
1283/**
1284 * beiscsi_active_cid_disp()- Display Sessions Active
1285 * @dev: ptr to device not used.
1286 * @attr: device attribute, not used.
1287 * @buf: contains formatted text Session Count
1288 *
1289 * return
1290 * size of the formatted string
1291 **/
1292ssize_t
1293beiscsi_active_cid_disp(struct device *dev, struct device_attribute *attr,
1294 char *buf)
1295{
1296 struct Scsi_Host *shost = class_to_shost(dev);
1297 struct beiscsi_hba *phba = iscsi_host_priv(shost);
1298
1299 return snprintf(buf, PAGE_SIZE, "%d\n",
1300 (phba->params.cxns_per_ctrl - phba->avlbl_cids));
1301}
1302
1303/**
1263 * beiscsi_adap_family_disp()- Display adapter family. 1304 * beiscsi_adap_family_disp()- Display adapter family.
1264 * @dev: ptr to device to get priv structure 1305 * @dev: ptr to device to get priv structure
1265 * @attr: device attribute, not used. 1306 * @attr: device attribute, not used.
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index 2e4968add799..04af7e74fe48 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2012 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -156,25 +156,25 @@ union invalidate_commands_params {
156} __packed; 156} __packed;
157 157
158struct mgmt_hba_attributes { 158struct mgmt_hba_attributes {
159 u8 flashrom_version_string[32]; 159 u8 flashrom_version_string[BEISCSI_VER_STRLEN];
160 u8 manufacturer_name[32]; 160 u8 manufacturer_name[BEISCSI_VER_STRLEN];
161 u32 supported_modes; 161 u32 supported_modes;
162 u8 seeprom_version_lo; 162 u8 seeprom_version_lo;
163 u8 seeprom_version_hi; 163 u8 seeprom_version_hi;
164 u8 rsvd0[2]; 164 u8 rsvd0[2];
165 u32 fw_cmd_data_struct_version; 165 u32 fw_cmd_data_struct_version;
166 u32 ep_fw_data_struct_version; 166 u32 ep_fw_data_struct_version;
167 u32 future_reserved[12]; 167 u8 ncsi_version_string[12];
168 u32 default_extended_timeout; 168 u32 default_extended_timeout;
169 u8 controller_model_number[32]; 169 u8 controller_model_number[BEISCSI_VER_STRLEN];
170 u8 controller_description[64]; 170 u8 controller_description[64];
171 u8 controller_serial_number[32]; 171 u8 controller_serial_number[BEISCSI_VER_STRLEN];
172 u8 ip_version_string[32]; 172 u8 ip_version_string[BEISCSI_VER_STRLEN];
173 u8 firmware_version_string[32]; 173 u8 firmware_version_string[BEISCSI_VER_STRLEN];
174 u8 bios_version_string[32]; 174 u8 bios_version_string[BEISCSI_VER_STRLEN];
175 u8 redboot_version_string[32]; 175 u8 redboot_version_string[BEISCSI_VER_STRLEN];
176 u8 driver_version_string[32]; 176 u8 driver_version_string[BEISCSI_VER_STRLEN];
177 u8 fw_on_flash_version_string[32]; 177 u8 fw_on_flash_version_string[BEISCSI_VER_STRLEN];
178 u32 functionalities_supported; 178 u32 functionalities_supported;
179 u16 max_cdblength; 179 u16 max_cdblength;
180 u8 asic_revision; 180 u8 asic_revision;
@@ -190,7 +190,8 @@ struct mgmt_hba_attributes {
190 u32 firmware_post_status; 190 u32 firmware_post_status;
191 u32 hba_mtu[8]; 191 u32 hba_mtu[8];
192 u8 iscsi_features; 192 u8 iscsi_features;
193 u8 future_u8[3]; 193 u8 asic_generation;
194 u8 future_u8[2];
194 u32 future_u32[3]; 195 u32 future_u32[3];
195} __packed; 196} __packed;
196 197
@@ -207,7 +208,7 @@ struct mgmt_controller_attributes {
207 u64 unique_identifier; 208 u64 unique_identifier;
208 u8 netfilters; 209 u8 netfilters;
209 u8 rsvd0[3]; 210 u8 rsvd0[3];
210 u8 future_u32[4]; 211 u32 future_u32[4];
211} __packed; 212} __packed;
212 213
213struct be_mgmt_controller_attributes { 214struct be_mgmt_controller_attributes {
@@ -311,6 +312,12 @@ int mgmt_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
311ssize_t beiscsi_drvr_ver_disp(struct device *dev, 312ssize_t beiscsi_drvr_ver_disp(struct device *dev,
312 struct device_attribute *attr, char *buf); 313 struct device_attribute *attr, char *buf);
313 314
315ssize_t beiscsi_fw_ver_disp(struct device *dev,
316 struct device_attribute *attr, char *buf);
317
318ssize_t beiscsi_active_cid_disp(struct device *dev,
319 struct device_attribute *attr, char *buf);
320
314ssize_t beiscsi_adap_family_disp(struct device *dev, 321ssize_t beiscsi_adap_family_disp(struct device *dev,
315 struct device_attribute *attr, char *buf); 322 struct device_attribute *attr, char *buf);
316 323
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 11596b2c4702..08b22a901c25 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -2,7 +2,7 @@
2#define _BNX2FC_H_ 2#define _BNX2FC_H_
3/* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver. 3/* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver.
4 * 4 *
5 * Copyright (c) 2008 - 2011 Broadcom Corporation 5 * Copyright (c) 2008 - 2013 Broadcom Corporation
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -64,10 +64,12 @@
64#include "bnx2fc_constants.h" 64#include "bnx2fc_constants.h"
65 65
66#define BNX2FC_NAME "bnx2fc" 66#define BNX2FC_NAME "bnx2fc"
67#define BNX2FC_VERSION "1.0.13" 67#define BNX2FC_VERSION "1.0.14"
68 68
69#define PFX "bnx2fc: " 69#define PFX "bnx2fc: "
70 70
71#define BCM_CHIP_LEN 16
72
71#define BNX2X_DOORBELL_PCI_BAR 2 73#define BNX2X_DOORBELL_PCI_BAR 2
72 74
73#define BNX2FC_MAX_BD_LEN 0xffff 75#define BNX2FC_MAX_BD_LEN 0xffff
@@ -241,6 +243,8 @@ struct bnx2fc_hba {
241 int wait_for_link_down; 243 int wait_for_link_down;
242 int num_ofld_sess; 244 int num_ofld_sess;
243 struct list_head vports; 245 struct list_head vports;
246
247 char chip_num[BCM_CHIP_LEN];
244}; 248};
245 249
246struct bnx2fc_interface { 250struct bnx2fc_interface {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
index bdbbb13b8534..b1c9a4f8caee 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_els.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -3,7 +3,7 @@
3 * This file contains helper routines that handle ELS requests 3 * This file contains helper routines that handle ELS requests
4 * and responses. 4 * and responses.
5 * 5 *
6 * Copyright (c) 2008 - 2011 Broadcom Corporation 6 * Copyright (c) 2008 - 2013 Broadcom Corporation
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 7dffec1e5715..69ac55495c1d 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -3,7 +3,7 @@
3 * cnic modules to create FCoE instances, send/receive non-offloaded 3 * cnic modules to create FCoE instances, send/receive non-offloaded
4 * FIP/FCoE packets, listen to link events etc. 4 * FIP/FCoE packets, listen to link events etc.
5 * 5 *
6 * Copyright (c) 2008 - 2011 Broadcom Corporation 6 * Copyright (c) 2008 - 2013 Broadcom Corporation
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
22 22
23#define DRV_MODULE_NAME "bnx2fc" 23#define DRV_MODULE_NAME "bnx2fc"
24#define DRV_MODULE_VERSION BNX2FC_VERSION 24#define DRV_MODULE_VERSION BNX2FC_VERSION
25#define DRV_MODULE_RELDATE "Dec 21, 2012" 25#define DRV_MODULE_RELDATE "Mar 08, 2013"
26 26
27 27
28static char version[] = 28static char version[] =
@@ -679,6 +679,7 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
679{ 679{
680 struct fcoe_port *port = lport_priv(lport); 680 struct fcoe_port *port = lport_priv(lport);
681 struct bnx2fc_interface *interface = port->priv; 681 struct bnx2fc_interface *interface = port->priv;
682 struct bnx2fc_hba *hba = interface->hba;
682 struct Scsi_Host *shost = lport->host; 683 struct Scsi_Host *shost = lport->host;
683 int rc = 0; 684 int rc = 0;
684 685
@@ -699,8 +700,9 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
699 } 700 }
700 if (!lport->vport) 701 if (!lport->vport)
701 fc_host_max_npiv_vports(lport->host) = USHRT_MAX; 702 fc_host_max_npiv_vports(lport->host) = USHRT_MAX;
702 sprintf(fc_host_symbolic_name(lport->host), "%s v%s over %s", 703 snprintf(fc_host_symbolic_name(lport->host), 256,
703 BNX2FC_NAME, BNX2FC_VERSION, 704 "%s (Broadcom %s) v%s over %s",
705 BNX2FC_NAME, hba->chip_num, BNX2FC_VERSION,
704 interface->netdev->name); 706 interface->netdev->name);
705 707
706 return 0; 708 return 0;
@@ -1656,23 +1658,60 @@ mem_err:
1656static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba) 1658static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba)
1657{ 1659{
1658 struct cnic_dev *cnic; 1660 struct cnic_dev *cnic;
1661 struct pci_dev *pdev;
1659 1662
1660 if (!hba->cnic) { 1663 if (!hba->cnic) {
1661 printk(KERN_ERR PFX "cnic is NULL\n"); 1664 printk(KERN_ERR PFX "cnic is NULL\n");
1662 return -ENODEV; 1665 return -ENODEV;
1663 } 1666 }
1664 cnic = hba->cnic; 1667 cnic = hba->cnic;
1665 hba->pcidev = cnic->pcidev; 1668 pdev = hba->pcidev = cnic->pcidev;
1666 if (hba->pcidev) 1669 if (!hba->pcidev)
1667 pci_dev_get(hba->pcidev); 1670 return -ENODEV;
1668 1671
1672 switch (pdev->device) {
1673 case PCI_DEVICE_ID_NX2_57710:
1674 strncpy(hba->chip_num, "BCM57710", BCM_CHIP_LEN);
1675 break;
1676 case PCI_DEVICE_ID_NX2_57711:
1677 strncpy(hba->chip_num, "BCM57711", BCM_CHIP_LEN);
1678 break;
1679 case PCI_DEVICE_ID_NX2_57712:
1680 case PCI_DEVICE_ID_NX2_57712_MF:
1681 case PCI_DEVICE_ID_NX2_57712_VF:
1682 strncpy(hba->chip_num, "BCM57712", BCM_CHIP_LEN);
1683 break;
1684 case PCI_DEVICE_ID_NX2_57800:
1685 case PCI_DEVICE_ID_NX2_57800_MF:
1686 case PCI_DEVICE_ID_NX2_57800_VF:
1687 strncpy(hba->chip_num, "BCM57800", BCM_CHIP_LEN);
1688 break;
1689 case PCI_DEVICE_ID_NX2_57810:
1690 case PCI_DEVICE_ID_NX2_57810_MF:
1691 case PCI_DEVICE_ID_NX2_57810_VF:
1692 strncpy(hba->chip_num, "BCM57810", BCM_CHIP_LEN);
1693 break;
1694 case PCI_DEVICE_ID_NX2_57840:
1695 case PCI_DEVICE_ID_NX2_57840_MF:
1696 case PCI_DEVICE_ID_NX2_57840_VF:
1697 case PCI_DEVICE_ID_NX2_57840_2_20:
1698 case PCI_DEVICE_ID_NX2_57840_4_10:
1699 strncpy(hba->chip_num, "BCM57840", BCM_CHIP_LEN);
1700 break;
1701 default:
1702 pr_err(PFX "Unknown device id 0x%x\n", pdev->device);
1703 break;
1704 }
1705 pci_dev_get(hba->pcidev);
1669 return 0; 1706 return 0;
1670} 1707}
1671 1708
1672static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba) 1709static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
1673{ 1710{
1674 if (hba->pcidev) 1711 if (hba->pcidev) {
1712 hba->chip_num[0] = '\0';
1675 pci_dev_put(hba->pcidev); 1713 pci_dev_put(hba->pcidev);
1714 }
1676 hba->pcidev = NULL; 1715 hba->pcidev = NULL;
1677} 1716}
1678 1717
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 50510ffe1bf5..c0d035a8f8f9 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -2,7 +2,7 @@
2 * This file contains the code that low level functions that interact 2 * This file contains the code that low level functions that interact
3 * with 57712 FCoE firmware. 3 * with 57712 FCoE firmware.
4 * 4 *
5 * Copyright (c) 2008 - 2011 Broadcom Corporation 5 * Copyright (c) 2008 - 2013 Broadcom Corporation
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -126,7 +126,11 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
126 fcoe_init3.error_bit_map_lo = 0xffffffff; 126 fcoe_init3.error_bit_map_lo = 0xffffffff;
127 fcoe_init3.error_bit_map_hi = 0xffffffff; 127 fcoe_init3.error_bit_map_hi = 0xffffffff;
128 128
129 fcoe_init3.perf_config = 1; 129 /*
130 * enable both cached connection and cached tasks
131 * 0 = none, 1 = cached connection, 2 = cached tasks, 3 = both
132 */
133 fcoe_init3.perf_config = 3;
130 134
131 kwqe_arr[0] = (struct kwqe *) &fcoe_init1; 135 kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
132 kwqe_arr[1] = (struct kwqe *) &fcoe_init2; 136 kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 723a9a8ba5ee..575142e92d9c 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1,7 +1,7 @@
1/* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver. 1/* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver.
2 * IO manager and SCSI IO processing. 2 * IO manager and SCSI IO processing.
3 * 3 *
4 * Copyright (c) 2008 - 2011 Broadcom Corporation 4 * Copyright (c) 2008 - 2013 Broadcom Corporation
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -1270,8 +1270,11 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1270 1270
1271 spin_lock_bh(&tgt->tgt_lock); 1271 spin_lock_bh(&tgt->tgt_lock);
1272 io_req->wait_for_comp = 0; 1272 io_req->wait_for_comp = 0;
1273 if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, 1273 if (test_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) {
1274 &io_req->req_flags))) { 1274 BNX2FC_IO_DBG(io_req, "IO completed in a different context\n");
1275 rc = SUCCESS;
1276 } else if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
1277 &io_req->req_flags))) {
1275 /* Let the scsi-ml try to recover this command */ 1278 /* Let the scsi-ml try to recover this command */
1276 printk(KERN_ERR PFX "abort failed, xid = 0x%x\n", 1279 printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
1277 io_req->xid); 1280 io_req->xid);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index c57a3bb8a9fb..4d93177dfb53 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -2,7 +2,7 @@
2 * Handles operations such as session offload/upload etc, and manages 2 * Handles operations such as session offload/upload etc, and manages
3 * session resources such as connection id and qp resources. 3 * session resources such as connection id and qp resources.
4 * 4 *
5 * Copyright (c) 2008 - 2011 Broadcom Corporation 5 * Copyright (c) 2008 - 2013 Broadcom Corporation
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/csiostor/csio_lnode.h b/drivers/scsi/csiostor/csio_lnode.h
index 0f9c04175b11..372a67d122d3 100644
--- a/drivers/scsi/csiostor/csio_lnode.h
+++ b/drivers/scsi/csiostor/csio_lnode.h
@@ -114,7 +114,7 @@ struct csio_lnode_stats {
114 uint32_t n_rnode_match; /* matched rnode */ 114 uint32_t n_rnode_match; /* matched rnode */
115 uint32_t n_dev_loss_tmo; /* Device loss timeout */ 115 uint32_t n_dev_loss_tmo; /* Device loss timeout */
116 uint32_t n_fdmi_err; /* fdmi err */ 116 uint32_t n_fdmi_err; /* fdmi err */
117 uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO]; /* fw events */ 117 uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO + 1]; /* fw events */
118 enum csio_ln_ev n_evt_sm[CSIO_LNE_MAX_EVENT]; /* State m/c events */ 118 enum csio_ln_ev n_evt_sm[CSIO_LNE_MAX_EVENT]; /* State m/c events */
119 uint32_t n_rnode_alloc; /* rnode allocated */ 119 uint32_t n_rnode_alloc; /* rnode allocated */
120 uint32_t n_rnode_free; /* rnode freed */ 120 uint32_t n_rnode_free; /* rnode freed */
diff --git a/drivers/scsi/csiostor/csio_rnode.h b/drivers/scsi/csiostor/csio_rnode.h
index 65940096a80d..433434221222 100644
--- a/drivers/scsi/csiostor/csio_rnode.h
+++ b/drivers/scsi/csiostor/csio_rnode.h
@@ -63,7 +63,7 @@ struct csio_rnode_stats {
63 uint32_t n_err_nomem; /* error nomem */ 63 uint32_t n_err_nomem; /* error nomem */
64 uint32_t n_evt_unexp; /* unexpected event */ 64 uint32_t n_evt_unexp; /* unexpected event */
65 uint32_t n_evt_drop; /* unexpected event */ 65 uint32_t n_evt_drop; /* unexpected event */
66 uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO]; /* fw events */ 66 uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO + 1]; /* fw events */
67 enum csio_rn_ev n_evt_sm[CSIO_RNFE_MAX_EVENT]; /* State m/c events */ 67 enum csio_rn_ev n_evt_sm[CSIO_RNFE_MAX_EVENT]; /* State m/c events */
68 uint32_t n_lun_rst; /* Number of resets of 68 uint32_t n_lun_rst; /* Number of resets of
69 * of LUNs under this 69 * of LUNs under this
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 98436c363035..b6d1f92ed33c 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -38,7 +38,7 @@
38 38
39#define DRV_NAME "fnic" 39#define DRV_NAME "fnic"
40#define DRV_DESCRIPTION "Cisco FCoE HBA Driver" 40#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
41#define DRV_VERSION "1.5.0.2" 41#define DRV_VERSION "1.5.0.22"
42#define PFX DRV_NAME ": " 42#define PFX DRV_NAME ": "
43#define DFX DRV_NAME "%d: " 43#define DFX DRV_NAME "%d: "
44 44
@@ -192,6 +192,18 @@ enum fnic_state {
192 192
193struct mempool; 193struct mempool;
194 194
195enum fnic_evt {
196 FNIC_EVT_START_VLAN_DISC = 1,
197 FNIC_EVT_START_FCF_DISC = 2,
198 FNIC_EVT_MAX,
199};
200
201struct fnic_event {
202 struct list_head list;
203 struct fnic *fnic;
204 enum fnic_evt event;
205};
206
195/* Per-instance private data structure */ 207/* Per-instance private data structure */
196struct fnic { 208struct fnic {
197 struct fc_lport *lport; 209 struct fc_lport *lport;
@@ -254,6 +266,18 @@ struct fnic {
254 struct sk_buff_head frame_queue; 266 struct sk_buff_head frame_queue;
255 struct sk_buff_head tx_queue; 267 struct sk_buff_head tx_queue;
256 268
269 /*** FIP related data members -- start ***/
270 void (*set_vlan)(struct fnic *, u16 vlan);
271 struct work_struct fip_frame_work;
272 struct sk_buff_head fip_frame_queue;
273 struct timer_list fip_timer;
274 struct list_head vlans;
275 spinlock_t vlans_lock;
276
277 struct work_struct event_work;
278 struct list_head evlist;
279 /*** FIP related data members -- end ***/
280
257 /* copy work queue cache line section */ 281 /* copy work queue cache line section */
258 ____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX]; 282 ____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX];
259 /* completion queue cache line section */ 283 /* completion queue cache line section */
@@ -278,6 +302,7 @@ static inline struct fnic *fnic_from_ctlr(struct fcoe_ctlr *fip)
278} 302}
279 303
280extern struct workqueue_struct *fnic_event_queue; 304extern struct workqueue_struct *fnic_event_queue;
305extern struct workqueue_struct *fnic_fip_queue;
281extern struct device_attribute *fnic_attrs[]; 306extern struct device_attribute *fnic_attrs[];
282 307
283void fnic_clear_intr_mode(struct fnic *fnic); 308void fnic_clear_intr_mode(struct fnic *fnic);
@@ -289,6 +314,7 @@ int fnic_send(struct fc_lport *, struct fc_frame *);
289void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf); 314void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
290void fnic_handle_frame(struct work_struct *work); 315void fnic_handle_frame(struct work_struct *work);
291void fnic_handle_link(struct work_struct *work); 316void fnic_handle_link(struct work_struct *work);
317void fnic_handle_event(struct work_struct *work);
292int fnic_rq_cmpl_handler(struct fnic *fnic, int); 318int fnic_rq_cmpl_handler(struct fnic *fnic, int);
293int fnic_alloc_rq_frame(struct vnic_rq *rq); 319int fnic_alloc_rq_frame(struct vnic_rq *rq);
294void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf); 320void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
@@ -321,6 +347,12 @@ void fnic_handle_link_event(struct fnic *fnic);
321 347
322int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *); 348int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *);
323 349
350void fnic_handle_fip_frame(struct work_struct *work);
351void fnic_handle_fip_event(struct fnic *fnic);
352void fnic_fcoe_reset_vlans(struct fnic *fnic);
353void fnic_fcoe_evlist_free(struct fnic *fnic);
354extern void fnic_handle_fip_timer(struct fnic *fnic);
355
324static inline int 356static inline int
325fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags) 357fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
326{ 358{
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 483eb9dbe663..006fa92a02df 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -31,12 +31,20 @@
31#include <scsi/libfc.h> 31#include <scsi/libfc.h>
32#include "fnic_io.h" 32#include "fnic_io.h"
33#include "fnic.h" 33#include "fnic.h"
34#include "fnic_fip.h"
34#include "cq_enet_desc.h" 35#include "cq_enet_desc.h"
35#include "cq_exch_desc.h" 36#include "cq_exch_desc.h"
36 37
38static u8 fcoe_all_fcfs[ETH_ALEN];
39struct workqueue_struct *fnic_fip_queue;
37struct workqueue_struct *fnic_event_queue; 40struct workqueue_struct *fnic_event_queue;
38 41
39static void fnic_set_eth_mode(struct fnic *); 42static void fnic_set_eth_mode(struct fnic *);
43static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
44static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
45static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
46static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
47static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
40 48
41void fnic_handle_link(struct work_struct *work) 49void fnic_handle_link(struct work_struct *work)
42{ 50{
@@ -69,6 +77,11 @@ void fnic_handle_link(struct work_struct *work)
69 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 77 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
70 "link down\n"); 78 "link down\n");
71 fcoe_ctlr_link_down(&fnic->ctlr); 79 fcoe_ctlr_link_down(&fnic->ctlr);
80 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
81 /* start FCoE VLAN discovery */
82 fnic_fcoe_send_vlan_req(fnic);
83 return;
84 }
72 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 85 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
73 "link up\n"); 86 "link up\n");
74 fcoe_ctlr_link_up(&fnic->ctlr); 87 fcoe_ctlr_link_up(&fnic->ctlr);
@@ -79,6 +92,11 @@ void fnic_handle_link(struct work_struct *work)
79 } else if (fnic->link_status) { 92 } else if (fnic->link_status) {
80 /* DOWN -> UP */ 93 /* DOWN -> UP */
81 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 94 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
95 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
96 /* start FCoE VLAN discovery */
97 fnic_fcoe_send_vlan_req(fnic);
98 return;
99 }
82 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); 100 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
83 fcoe_ctlr_link_up(&fnic->ctlr); 101 fcoe_ctlr_link_up(&fnic->ctlr);
84 } else { 102 } else {
@@ -128,6 +146,441 @@ void fnic_handle_frame(struct work_struct *work)
128 } 146 }
129} 147}
130 148
149void fnic_fcoe_evlist_free(struct fnic *fnic)
150{
151 struct fnic_event *fevt = NULL;
152 struct fnic_event *next = NULL;
153 unsigned long flags;
154
155 spin_lock_irqsave(&fnic->fnic_lock, flags);
156 if (list_empty(&fnic->evlist)) {
157 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
158 return;
159 }
160
161 list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
162 list_del(&fevt->list);
163 kfree(fevt);
164 }
165 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
166}
167
168void fnic_handle_event(struct work_struct *work)
169{
170 struct fnic *fnic = container_of(work, struct fnic, event_work);
171 struct fnic_event *fevt = NULL;
172 struct fnic_event *next = NULL;
173 unsigned long flags;
174
175 spin_lock_irqsave(&fnic->fnic_lock, flags);
176 if (list_empty(&fnic->evlist)) {
177 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
178 return;
179 }
180
181 list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
182 if (fnic->stop_rx_link_events) {
183 list_del(&fevt->list);
184 kfree(fevt);
185 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
186 return;
187 }
188 /*
189 * If we're in a transitional state, just re-queue and return.
190 * The queue will be serviced when we get to a stable state.
191 */
192 if (fnic->state != FNIC_IN_FC_MODE &&
193 fnic->state != FNIC_IN_ETH_MODE) {
194 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
195 return;
196 }
197
198 list_del(&fevt->list);
199 switch (fevt->event) {
200 case FNIC_EVT_START_VLAN_DISC:
201 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
202 fnic_fcoe_send_vlan_req(fnic);
203 spin_lock_irqsave(&fnic->fnic_lock, flags);
204 break;
205 case FNIC_EVT_START_FCF_DISC:
206 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
207 "Start FCF Discovery\n");
208 fnic_fcoe_start_fcf_disc(fnic);
209 break;
210 default:
211 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
212 "Unknown event 0x%x\n", fevt->event);
213 break;
214 }
215 kfree(fevt);
216 }
217 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
218}
219
220/**
221 * Check if the Received FIP FLOGI frame is rejected
222 * @fip: The FCoE controller that received the frame
223 * @skb: The received FIP frame
224 *
225 * Returns non-zero if the frame is rejected with unsupported cmd with
226 * insufficient resource els explanation.
227 */
228static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
229 struct sk_buff *skb)
230{
231 struct fc_lport *lport = fip->lp;
232 struct fip_header *fiph;
233 struct fc_frame_header *fh = NULL;
234 struct fip_desc *desc;
235 struct fip_encaps *els;
236 enum fip_desc_type els_dtype = 0;
237 u16 op;
238 u8 els_op;
239 u8 sub;
240
241 size_t els_len = 0;
242 size_t rlen;
243 size_t dlen = 0;
244
245 if (skb_linearize(skb))
246 return 0;
247
248 if (skb->len < sizeof(*fiph))
249 return 0;
250
251 fiph = (struct fip_header *)skb->data;
252 op = ntohs(fiph->fip_op);
253 sub = fiph->fip_subcode;
254
255 if (op != FIP_OP_LS)
256 return 0;
257
258 if (sub != FIP_SC_REP)
259 return 0;
260
261 rlen = ntohs(fiph->fip_dl_len) * 4;
262 if (rlen + sizeof(*fiph) > skb->len)
263 return 0;
264
265 desc = (struct fip_desc *)(fiph + 1);
266 dlen = desc->fip_dlen * FIP_BPW;
267
268 if (desc->fip_dtype == FIP_DT_FLOGI) {
269
270 shost_printk(KERN_DEBUG, lport->host,
271 " FIP TYPE FLOGI: fab name:%llx "
272 "vfid:%d map:%x\n",
273 fip->sel_fcf->fabric_name, fip->sel_fcf->vfid,
274 fip->sel_fcf->fc_map);
275 if (dlen < sizeof(*els) + sizeof(*fh) + 1)
276 return 0;
277
278 els_len = dlen - sizeof(*els);
279 els = (struct fip_encaps *)desc;
280 fh = (struct fc_frame_header *)(els + 1);
281 els_dtype = desc->fip_dtype;
282
283 if (!fh)
284 return 0;
285
286 /*
287 * ELS command code, reason and explanation should be = Reject,
288 * unsupported command and insufficient resource
289 */
290 els_op = *(u8 *)(fh + 1);
291 if (els_op == ELS_LS_RJT) {
292 shost_printk(KERN_INFO, lport->host,
293 "Flogi Request Rejected by Switch\n");
294 return 1;
295 }
296 shost_printk(KERN_INFO, lport->host,
297 "Flogi Request Accepted by Switch\n");
298 }
299 return 0;
300}
301
302static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
303{
304 struct fcoe_ctlr *fip = &fnic->ctlr;
305 struct sk_buff *skb;
306 char *eth_fr;
307 int fr_len;
308 struct fip_vlan *vlan;
309 u64 vlan_tov;
310
311 fnic_fcoe_reset_vlans(fnic);
312 fnic->set_vlan(fnic, 0);
313 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
314 "Sending VLAN request...\n");
315 skb = dev_alloc_skb(sizeof(struct fip_vlan));
316 if (!skb)
317 return;
318
319 fr_len = sizeof(*vlan);
320 eth_fr = (char *)skb->data;
321 vlan = (struct fip_vlan *)eth_fr;
322
323 memset(vlan, 0, sizeof(*vlan));
324 memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
325 memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN);
326 vlan->eth.h_proto = htons(ETH_P_FIP);
327
328 vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
329 vlan->fip.fip_op = htons(FIP_OP_VLAN);
330 vlan->fip.fip_subcode = FIP_SC_VL_REQ;
331 vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
332
333 vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
334 vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
335 memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
336
337 vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
338 vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
339 put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
340
341 skb_put(skb, sizeof(*vlan));
342 skb->protocol = htons(ETH_P_FIP);
343 skb_reset_mac_header(skb);
344 skb_reset_network_header(skb);
345 fip->send(fip, skb);
346
347 /* set a timer so that we can retry if there no response */
348 vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
349 mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov));
350}
351
352static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
353{
354 struct fcoe_ctlr *fip = &fnic->ctlr;
355 struct fip_header *fiph;
356 struct fip_desc *desc;
357 u16 vid;
358 size_t rlen;
359 size_t dlen;
360 struct fcoe_vlan *vlan;
361 u64 sol_time;
362 unsigned long flags;
363
364 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
365 "Received VLAN response...\n");
366
367 fiph = (struct fip_header *) skb->data;
368
369 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
370 "Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
371 ntohs(fiph->fip_op), fiph->fip_subcode);
372
373 rlen = ntohs(fiph->fip_dl_len) * 4;
374 fnic_fcoe_reset_vlans(fnic);
375 spin_lock_irqsave(&fnic->vlans_lock, flags);
376 desc = (struct fip_desc *)(fiph + 1);
377 while (rlen > 0) {
378 dlen = desc->fip_dlen * FIP_BPW;
379 switch (desc->fip_dtype) {
380 case FIP_DT_VLAN:
381 vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
382 shost_printk(KERN_INFO, fnic->lport->host,
383 "process_vlan_resp: FIP VLAN %d\n", vid);
384 vlan = kmalloc(sizeof(*vlan),
385 GFP_ATOMIC);
386 if (!vlan) {
387 /* retry from timer */
388 spin_unlock_irqrestore(&fnic->vlans_lock,
389 flags);
390 goto out;
391 }
392 memset(vlan, 0, sizeof(struct fcoe_vlan));
393 vlan->vid = vid & 0x0fff;
394 vlan->state = FIP_VLAN_AVAIL;
395 list_add_tail(&vlan->list, &fnic->vlans);
396 break;
397 }
398 desc = (struct fip_desc *)((char *)desc + dlen);
399 rlen -= dlen;
400 }
401
402 /* any VLAN descriptors present ? */
403 if (list_empty(&fnic->vlans)) {
404 /* retry from timer */
405 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
406 "No VLAN descriptors in FIP VLAN response\n");
407 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
408 goto out;
409 }
410
411 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
412 fnic->set_vlan(fnic, vlan->vid);
413 vlan->state = FIP_VLAN_SENT; /* sent now */
414 vlan->sol_count++;
415 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
416
417 /* start the solicitation */
418 fcoe_ctlr_link_up(fip);
419
420 sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
421 mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
422out:
423 return;
424}
425
426static void fnic_fcoe_start_fcf_disc(struct fnic *fnic)
427{
428 unsigned long flags;
429 struct fcoe_vlan *vlan;
430 u64 sol_time;
431
432 spin_lock_irqsave(&fnic->vlans_lock, flags);
433 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
434 fnic->set_vlan(fnic, vlan->vid);
435 vlan->state = FIP_VLAN_SENT; /* sent now */
436 vlan->sol_count = 1;
437 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
438
439 /* start the solicitation */
440 fcoe_ctlr_link_up(&fnic->ctlr);
441
442 sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
443 mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
444}
445
446static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag)
447{
448 unsigned long flags;
449 struct fcoe_vlan *fvlan;
450
451 spin_lock_irqsave(&fnic->vlans_lock, flags);
452 if (list_empty(&fnic->vlans)) {
453 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
454 return -EINVAL;
455 }
456
457 fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
458 if (fvlan->state == FIP_VLAN_USED) {
459 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
460 return 0;
461 }
462
463 if (fvlan->state == FIP_VLAN_SENT) {
464 fvlan->state = FIP_VLAN_USED;
465 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
466 return 0;
467 }
468 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
469 return -EINVAL;
470}
471
472static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev)
473{
474 struct fnic_event *fevt;
475 unsigned long flags;
476
477 fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC);
478 if (!fevt)
479 return;
480
481 fevt->fnic = fnic;
482 fevt->event = ev;
483
484 spin_lock_irqsave(&fnic->fnic_lock, flags);
485 list_add_tail(&fevt->list, &fnic->evlist);
486 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
487
488 schedule_work(&fnic->event_work);
489}
490
491static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
492{
493 struct fip_header *fiph;
494 int ret = 1;
495 u16 op;
496 u8 sub;
497
498 if (!skb || !(skb->data))
499 return -1;
500
501 if (skb_linearize(skb))
502 goto drop;
503
504 fiph = (struct fip_header *)skb->data;
505 op = ntohs(fiph->fip_op);
506 sub = fiph->fip_subcode;
507
508 if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
509 goto drop;
510
511 if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
512 goto drop;
513
514 if (op == FIP_OP_DISC && sub == FIP_SC_ADV) {
515 if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags)))
516 goto drop;
517 /* pass it on to fcoe */
518 ret = 1;
519 } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_REP) {
520 /* set the vlan as used */
521 fnic_fcoe_process_vlan_resp(fnic, skb);
522 ret = 0;
523 } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
524 /* received CVL request, restart vlan disc */
525 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
526 /* pass it on to fcoe */
527 ret = 1;
528 }
529drop:
530 return ret;
531}
532
533void fnic_handle_fip_frame(struct work_struct *work)
534{
535 struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
536 unsigned long flags;
537 struct sk_buff *skb;
538 struct ethhdr *eh;
539
540 while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
541 spin_lock_irqsave(&fnic->fnic_lock, flags);
542 if (fnic->stop_rx_link_events) {
543 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
544 dev_kfree_skb(skb);
545 return;
546 }
547 /*
548 * If we're in a transitional state, just re-queue and return.
549 * The queue will be serviced when we get to a stable state.
550 */
551 if (fnic->state != FNIC_IN_FC_MODE &&
552 fnic->state != FNIC_IN_ETH_MODE) {
553 skb_queue_head(&fnic->fip_frame_queue, skb);
554 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
555 return;
556 }
557 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
558 eh = (struct ethhdr *)skb->data;
559 if (eh->h_proto == htons(ETH_P_FIP)) {
560 skb_pull(skb, sizeof(*eh));
561 if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
562 dev_kfree_skb(skb);
563 continue;
564 }
565 /*
566 * If there's FLOGI rejects - clear all
567 * fcf's & restart from scratch
568 */
569 if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
570 shost_printk(KERN_INFO, fnic->lport->host,
571 "Trigger a Link down - VLAN Disc\n");
572 fcoe_ctlr_link_down(&fnic->ctlr);
573 /* start FCoE VLAN discovery */
574 fnic_fcoe_send_vlan_req(fnic);
575 dev_kfree_skb(skb);
576 continue;
577 }
578 fcoe_ctlr_recv(&fnic->ctlr, skb);
579 continue;
580 }
581 }
582}
583
131/** 584/**
132 * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame. 585 * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
133 * @fnic: fnic instance. 586 * @fnic: fnic instance.
@@ -150,8 +603,14 @@ static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
150 skb_reset_mac_header(skb); 603 skb_reset_mac_header(skb);
151 } 604 }
152 if (eh->h_proto == htons(ETH_P_FIP)) { 605 if (eh->h_proto == htons(ETH_P_FIP)) {
153 skb_pull(skb, sizeof(*eh)); 606 if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) {
154 fcoe_ctlr_recv(&fnic->ctlr, skb); 607 printk(KERN_ERR "Dropped FIP frame, as firmware "
608 "uses non-FIP mode, Enable FIP "
609 "using UCSM\n");
610 goto drop;
611 }
612 skb_queue_tail(&fnic->fip_frame_queue, skb);
613 queue_work(fnic_fip_queue, &fnic->fip_frame_work);
155 return 1; /* let caller know packet was used */ 614 return 1; /* let caller know packet was used */
156 } 615 }
157 if (eh->h_proto != htons(ETH_P_FCOE)) 616 if (eh->h_proto != htons(ETH_P_FCOE))
@@ -720,3 +1179,104 @@ void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
720 dev_kfree_skb(fp_skb(fp)); 1179 dev_kfree_skb(fp_skb(fp));
721 buf->os_buf = NULL; 1180 buf->os_buf = NULL;
722} 1181}
1182
1183void fnic_fcoe_reset_vlans(struct fnic *fnic)
1184{
1185 unsigned long flags;
1186 struct fcoe_vlan *vlan;
1187 struct fcoe_vlan *next;
1188
1189 /*
1190 * indicate a link down to fcoe so that all fcf's are free'd
1191 * might not be required since we did this before sending vlan
1192 * discovery request
1193 */
1194 spin_lock_irqsave(&fnic->vlans_lock, flags);
1195 if (!list_empty(&fnic->vlans)) {
1196 list_for_each_entry_safe(vlan, next, &fnic->vlans, list) {
1197 list_del(&vlan->list);
1198 kfree(vlan);
1199 }
1200 }
1201 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1202}
1203
1204void fnic_handle_fip_timer(struct fnic *fnic)
1205{
1206 unsigned long flags;
1207 struct fcoe_vlan *vlan;
1208 u64 sol_time;
1209
1210 spin_lock_irqsave(&fnic->fnic_lock, flags);
1211 if (fnic->stop_rx_link_events) {
1212 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1213 return;
1214 }
1215 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1216
1217 if (fnic->ctlr.mode == FIP_ST_NON_FIP)
1218 return;
1219
1220 spin_lock_irqsave(&fnic->vlans_lock, flags);
1221 if (list_empty(&fnic->vlans)) {
1222 /* no vlans available, try again */
1223 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1224 "Start VLAN Discovery\n");
1225 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1226 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1227 return;
1228 }
1229
1230 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
1231 shost_printk(KERN_DEBUG, fnic->lport->host,
1232 "fip_timer: vlan %d state %d sol_count %d\n",
1233 vlan->vid, vlan->state, vlan->sol_count);
1234 switch (vlan->state) {
1235 case FIP_VLAN_USED:
1236 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1237 "FIP VLAN is selected for FC transaction\n");
1238 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1239 break;
1240 case FIP_VLAN_FAILED:
1241 /* if all vlans are in failed state, restart vlan disc */
1242 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1243 "Start VLAN Discovery\n");
1244 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1245 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1246 break;
1247 case FIP_VLAN_SENT:
1248 if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
1249 /*
1250 * no response on this vlan, remove from the list.
1251 * Try the next vlan
1252 */
1253 shost_printk(KERN_INFO, fnic->lport->host,
1254 "Dequeue this VLAN ID %d from list\n",
1255 vlan->vid);
1256 list_del(&vlan->list);
1257 kfree(vlan);
1258 vlan = NULL;
1259 if (list_empty(&fnic->vlans)) {
1260 /* we exhausted all vlans, restart vlan disc */
1261 spin_unlock_irqrestore(&fnic->vlans_lock,
1262 flags);
1263 shost_printk(KERN_INFO, fnic->lport->host,
1264 "fip_timer: vlan list empty, "
1265 "trigger vlan disc\n");
1266 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1267 return;
1268 }
1269 /* check the next vlan */
1270 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan,
1271 list);
1272 fnic->set_vlan(fnic, vlan->vid);
1273 vlan->state = FIP_VLAN_SENT; /* sent now */
1274 }
1275 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1276 vlan->sol_count++;
1277 sol_time = jiffies + msecs_to_jiffies
1278 (FCOE_CTLR_START_DELAY);
1279 mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
1280 break;
1281 }
1282}
diff --git a/drivers/scsi/fnic/fnic_fip.h b/drivers/scsi/fnic/fnic_fip.h
new file mode 100644
index 000000000000..87e74c2ab971
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_fip.h
@@ -0,0 +1,68 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18
19#ifndef _FNIC_FIP_H_
20#define _FNIC_FIP_H_
21
22
23#define FCOE_CTLR_START_DELAY 2000 /* ms after first adv. to choose FCF */
24#define FCOE_CTLR_FIPVLAN_TOV 2000 /* ms after FIP VLAN disc */
25#define FCOE_CTLR_MAX_SOL 8
26
27#define FINC_MAX_FLOGI_REJECTS 8
28
29/*
30 * FIP_DT_VLAN descriptor.
31 */
32struct fip_vlan_desc {
33 struct fip_desc fd_desc;
34 __be16 fd_vlan;
35} __attribute__((packed));
36
37struct vlan {
38 __be16 vid;
39 __be16 type;
40};
41
42/*
43 * VLAN entry.
44 */
45struct fcoe_vlan {
46 struct list_head list;
47 u16 vid; /* vlan ID */
48 u16 sol_count; /* no. of sols sent */
49 u16 state; /* state */
50};
51
52enum fip_vlan_state {
53 FIP_VLAN_AVAIL = 0, /* don't do anything */
54 FIP_VLAN_SENT = 1, /* sent */
55 FIP_VLAN_USED = 2, /* succeed */
56 FIP_VLAN_FAILED = 3, /* failed to response */
57};
58
59struct fip_vlan {
60 struct ethhdr eth;
61 struct fip_header fip;
62 struct {
63 struct fip_mac_desc mac;
64 struct fip_wwn_desc wwnn;
65 } desc;
66};
67
68#endif /* __FINC_FIP_H_ */
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index d601ac543c52..5f09d1814d26 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -39,6 +39,7 @@
39#include "vnic_intr.h" 39#include "vnic_intr.h"
40#include "vnic_stats.h" 40#include "vnic_stats.h"
41#include "fnic_io.h" 41#include "fnic_io.h"
42#include "fnic_fip.h"
42#include "fnic.h" 43#include "fnic.h"
43 44
44#define PCI_DEVICE_ID_CISCO_FNIC 0x0045 45#define PCI_DEVICE_ID_CISCO_FNIC 0x0045
@@ -292,6 +293,13 @@ static void fnic_notify_timer(unsigned long data)
292 round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD)); 293 round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD));
293} 294}
294 295
296static void fnic_fip_notify_timer(unsigned long data)
297{
298 struct fnic *fnic = (struct fnic *)data;
299
300 fnic_handle_fip_timer(fnic);
301}
302
295static void fnic_notify_timer_start(struct fnic *fnic) 303static void fnic_notify_timer_start(struct fnic *fnic)
296{ 304{
297 switch (vnic_dev_get_intr_mode(fnic->vdev)) { 305 switch (vnic_dev_get_intr_mode(fnic->vdev)) {
@@ -403,6 +411,12 @@ static u8 *fnic_get_mac(struct fc_lport *lport)
403 return fnic->data_src_addr; 411 return fnic->data_src_addr;
404} 412}
405 413
414static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id)
415{
416 u16 old_vlan;
417 old_vlan = vnic_dev_set_default_vlan(fnic->vdev, vlan_id);
418}
419
406static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 420static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
407{ 421{
408 struct Scsi_Host *host; 422 struct Scsi_Host *host;
@@ -620,7 +634,29 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
620 vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0); 634 vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0);
621 vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS); 635 vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS);
622 vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); 636 vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
637 fnic->set_vlan = fnic_set_vlan;
623 fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO); 638 fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO);
639 setup_timer(&fnic->fip_timer, fnic_fip_notify_timer,
640 (unsigned long)fnic);
641 spin_lock_init(&fnic->vlans_lock);
642 INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
643 INIT_WORK(&fnic->event_work, fnic_handle_event);
644 skb_queue_head_init(&fnic->fip_frame_queue);
645 spin_lock_irqsave(&fnic_list_lock, flags);
646 if (!fnic_fip_queue) {
647 fnic_fip_queue =
648 create_singlethread_workqueue("fnic_fip_q");
649 if (!fnic_fip_queue) {
650 spin_unlock_irqrestore(&fnic_list_lock, flags);
651 printk(KERN_ERR PFX "fnic FIP work queue "
652 "create failed\n");
653 err = -ENOMEM;
654 goto err_out_free_max_pool;
655 }
656 }
657 spin_unlock_irqrestore(&fnic_list_lock, flags);
658 INIT_LIST_HEAD(&fnic->evlist);
659 INIT_LIST_HEAD(&fnic->vlans);
624 } else { 660 } else {
625 shost_printk(KERN_INFO, fnic->lport->host, 661 shost_printk(KERN_INFO, fnic->lport->host,
626 "firmware uses non-FIP mode\n"); 662 "firmware uses non-FIP mode\n");
@@ -807,6 +843,13 @@ static void fnic_remove(struct pci_dev *pdev)
807 skb_queue_purge(&fnic->frame_queue); 843 skb_queue_purge(&fnic->frame_queue);
808 skb_queue_purge(&fnic->tx_queue); 844 skb_queue_purge(&fnic->tx_queue);
809 845
846 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
847 del_timer_sync(&fnic->fip_timer);
848 skb_queue_purge(&fnic->fip_frame_queue);
849 fnic_fcoe_reset_vlans(fnic);
850 fnic_fcoe_evlist_free(fnic);
851 }
852
810 /* 853 /*
811 * Log off the fabric. This stops all remote ports, dns port, 854 * Log off the fabric. This stops all remote ports, dns port,
812 * logs off the fabric. This flushes all rport, disc, lport work 855 * logs off the fabric. This flushes all rport, disc, lport work
@@ -889,8 +932,8 @@ static int __init fnic_init_module(void)
889 len = sizeof(struct fnic_sgl_list); 932 len = sizeof(struct fnic_sgl_list);
890 fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create 933 fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create
891 ("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN, 934 ("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
892 SLAB_HWCACHE_ALIGN, 935 SLAB_HWCACHE_ALIGN,
893 NULL); 936 NULL);
894 if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) { 937 if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) {
895 printk(KERN_ERR PFX "failed to create fnic max sgl slab\n"); 938 printk(KERN_ERR PFX "failed to create fnic max sgl slab\n");
896 err = -ENOMEM; 939 err = -ENOMEM;
@@ -951,6 +994,10 @@ static void __exit fnic_cleanup_module(void)
951{ 994{
952 pci_unregister_driver(&fnic_driver); 995 pci_unregister_driver(&fnic_driver);
953 destroy_workqueue(fnic_event_queue); 996 destroy_workqueue(fnic_event_queue);
997 if (fnic_fip_queue) {
998 flush_workqueue(fnic_fip_queue);
999 destroy_workqueue(fnic_fip_queue);
1000 }
954 kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); 1001 kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
955 kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); 1002 kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
956 kmem_cache_destroy(fnic_io_req_cache); 1003 kmem_cache_destroy(fnic_io_req_cache);
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index b576be734e2e..9795d6f3e197 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -584,6 +584,16 @@ int vnic_dev_init(struct vnic_dev *vdev, int arg)
584 return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait); 584 return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
585} 585}
586 586
587u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev, u16 new_default_vlan)
588{
589 u64 a0 = new_default_vlan, a1 = 0;
590 int wait = 1000;
591 int old_vlan = 0;
592
593 old_vlan = vnic_dev_cmd(vdev, CMD_SET_DEFAULT_VLAN, &a0, &a1, wait);
594 return (u16)old_vlan;
595}
596
587int vnic_dev_link_status(struct vnic_dev *vdev) 597int vnic_dev_link_status(struct vnic_dev *vdev)
588{ 598{
589 if (vdev->linkstatus) 599 if (vdev->linkstatus)
diff --git a/drivers/scsi/fnic/vnic_dev.h b/drivers/scsi/fnic/vnic_dev.h
index f9935a8a5a09..40d4195f562b 100644
--- a/drivers/scsi/fnic/vnic_dev.h
+++ b/drivers/scsi/fnic/vnic_dev.h
@@ -148,6 +148,8 @@ int vnic_dev_disable(struct vnic_dev *vdev);
148int vnic_dev_open(struct vnic_dev *vdev, int arg); 148int vnic_dev_open(struct vnic_dev *vdev, int arg);
149int vnic_dev_open_done(struct vnic_dev *vdev, int *done); 149int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
150int vnic_dev_init(struct vnic_dev *vdev, int arg); 150int vnic_dev_init(struct vnic_dev *vdev, int arg);
151u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev,
152 u16 new_default_vlan);
151int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg); 153int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
152int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done); 154int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
153void vnic_dev_set_intr_mode(struct vnic_dev *vdev, 155void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
diff --git a/drivers/scsi/fnic/vnic_devcmd.h b/drivers/scsi/fnic/vnic_devcmd.h
index 7c9ccbd4134b..3e2fcbda6aed 100644
--- a/drivers/scsi/fnic/vnic_devcmd.h
+++ b/drivers/scsi/fnic/vnic_devcmd.h
@@ -196,6 +196,73 @@ enum vnic_devcmd_cmd {
196 196
197 /* undo initialize of virtual link */ 197 /* undo initialize of virtual link */
198 CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34), 198 CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
199
200 /* check fw capability of a cmd:
201 * in: (u32)a0=cmd
202 * out: (u32)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits */
203 CMD_CAPABILITY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36),
204
205 /* persistent binding info
206 * in: (u64)a0=paddr of arg
207 * (u32)a1=CMD_PERBI_XXX */
208 CMD_PERBI = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 37),
209
210 /* Interrupt Assert Register functionality
211 * in: (u16)a0=interrupt number to assert
212 */
213 CMD_IAR = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 38),
214
215 /* initiate hangreset, like softreset after hang detected */
216 CMD_HANG_RESET = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 39),
217
218 /* hangreset status:
219 * out: a0=0 reset complete, a0=1 reset in progress */
220 CMD_HANG_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 40),
221
222 /*
223 * Set hw ingress packet vlan rewrite mode:
224 * in: (u32)a0=new vlan rewrite mode
225 * out: (u32)a0=old vlan rewrite mode */
226 CMD_IG_VLAN_REWRITE_MODE = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 41),
227
228 /*
229 * in: (u16)a0=bdf of target vnic
230 * (u32)a1=cmd to proxy
231 * a2-a15=args to cmd in a1
232 * out: (u32)a0=status of proxied cmd
233 * a1-a15=out args of proxied cmd */
234 CMD_PROXY_BY_BDF = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42),
235
236 /*
237 * As for BY_BDF except a0 is index of hvnlink subordinate vnic
238 * or SR-IOV virtual vnic
239 */
240 CMD_PROXY_BY_INDEX = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 43),
241
242 /*
243 * For HPP toggle:
244 * adapter-info-get
245 * in: (u64)a0=phsical address of buffer passed in from caller.
246 * (u16)a1=size of buffer specified in a0.
247 * out: (u64)a0=phsical address of buffer passed in from caller.
248 * (u16)a1=actual bytes from VIF-CONFIG-INFO TLV, or
249 * 0 if no VIF-CONFIG-INFO TLV was ever received. */
250 CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44),
251
252 /*
253 * INT13 API: (u64)a0=paddr to vnic_int13_params struct
254 * (u32)a1=INT13_CMD_xxx
255 */
256 CMD_INT13_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 45),
257
258 /*
259 * Set default vlan:
260 * in: (u16)a0=new default vlan
261 * (u16)a1=zero for overriding vlan with param a0,
262 * non-zero for resetting vlan to the default
263 * out: (u16)a0=old default vlan
264 */
265 CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46)
199}; 266};
200 267
201/* flags for CMD_OPEN */ 268/* flags for CMD_OPEN */
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index cc82d0f322b6..4e31caa21ddf 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -2179,7 +2179,7 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
2179 return 0; 2179 return 0;
2180 } 2180 }
2181 2181
2182 if (vhost->state == IBMVFC_ACTIVE) { 2182 if (vhost->logged_in) {
2183 evt = ibmvfc_get_event(vhost); 2183 evt = ibmvfc_get_event(vhost);
2184 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT); 2184 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2185 2185
@@ -2190,7 +2190,12 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
2190 tmf->common.length = sizeof(*tmf); 2190 tmf->common.length = sizeof(*tmf);
2191 tmf->scsi_id = rport->port_id; 2191 tmf->scsi_id = rport->port_id;
2192 int_to_scsilun(sdev->lun, &tmf->lun); 2192 int_to_scsilun(sdev->lun, &tmf->lun);
2193 tmf->flags = (type | IBMVFC_TMF_LUA_VALID); 2193 if (!(vhost->login_buf->resp.capabilities & IBMVFC_CAN_SUPPRESS_ABTS))
2194 type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
2195 if (vhost->state == IBMVFC_ACTIVE)
2196 tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
2197 else
2198 tmf->flags = ((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID);
2194 tmf->cancel_key = (unsigned long)sdev->hostdata; 2199 tmf->cancel_key = (unsigned long)sdev->hostdata;
2195 tmf->my_cancel_key = (unsigned long)starget->hostdata; 2200 tmf->my_cancel_key = (unsigned long)starget->hostdata;
2196 2201
@@ -2327,7 +2332,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
2327 timeout = wait_for_completion_timeout(&evt->comp, timeout); 2332 timeout = wait_for_completion_timeout(&evt->comp, timeout);
2328 2333
2329 if (!timeout) { 2334 if (!timeout) {
2330 rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); 2335 rc = ibmvfc_cancel_all(sdev, 0);
2331 if (!rc) { 2336 if (!rc) {
2332 rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key); 2337 rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
2333 if (rc == SUCCESS) 2338 if (rc == SUCCESS)
@@ -2383,24 +2388,30 @@ out:
2383 * @cmd: scsi command to abort 2388 * @cmd: scsi command to abort
2384 * 2389 *
2385 * Returns: 2390 * Returns:
2386 * SUCCESS / FAILED 2391 * SUCCESS / FAST_IO_FAIL / FAILED
2387 **/ 2392 **/
2388static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd) 2393static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
2389{ 2394{
2390 struct scsi_device *sdev = cmd->device; 2395 struct scsi_device *sdev = cmd->device;
2391 struct ibmvfc_host *vhost = shost_priv(sdev->host); 2396 struct ibmvfc_host *vhost = shost_priv(sdev->host);
2392 int cancel_rc, abort_rc; 2397 int cancel_rc, block_rc;
2393 int rc = FAILED; 2398 int rc = FAILED;
2394 2399
2395 ENTER; 2400 ENTER;
2396 fc_block_scsi_eh(cmd); 2401 block_rc = fc_block_scsi_eh(cmd);
2397 ibmvfc_wait_while_resetting(vhost); 2402 ibmvfc_wait_while_resetting(vhost);
2398 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); 2403 if (block_rc != FAST_IO_FAIL) {
2399 abort_rc = ibmvfc_abort_task_set(sdev); 2404 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
2405 ibmvfc_abort_task_set(sdev);
2406 } else
2407 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2400 2408
2401 if (!cancel_rc && !abort_rc) 2409 if (!cancel_rc)
2402 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun); 2410 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2403 2411
2412 if (block_rc == FAST_IO_FAIL && rc != FAILED)
2413 rc = FAST_IO_FAIL;
2414
2404 LEAVE; 2415 LEAVE;
2405 return rc; 2416 return rc;
2406} 2417}
@@ -2410,29 +2421,47 @@ static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
2410 * @cmd: scsi command struct 2421 * @cmd: scsi command struct
2411 * 2422 *
2412 * Returns: 2423 * Returns:
2413 * SUCCESS / FAILED 2424 * SUCCESS / FAST_IO_FAIL / FAILED
2414 **/ 2425 **/
2415static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd) 2426static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
2416{ 2427{
2417 struct scsi_device *sdev = cmd->device; 2428 struct scsi_device *sdev = cmd->device;
2418 struct ibmvfc_host *vhost = shost_priv(sdev->host); 2429 struct ibmvfc_host *vhost = shost_priv(sdev->host);
2419 int cancel_rc, reset_rc; 2430 int cancel_rc, block_rc, reset_rc = 0;
2420 int rc = FAILED; 2431 int rc = FAILED;
2421 2432
2422 ENTER; 2433 ENTER;
2423 fc_block_scsi_eh(cmd); 2434 block_rc = fc_block_scsi_eh(cmd);
2424 ibmvfc_wait_while_resetting(vhost); 2435 ibmvfc_wait_while_resetting(vhost);
2425 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET); 2436 if (block_rc != FAST_IO_FAIL) {
2426 reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN"); 2437 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
2438 reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
2439 } else
2440 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2427 2441
2428 if (!cancel_rc && !reset_rc) 2442 if (!cancel_rc && !reset_rc)
2429 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun); 2443 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2430 2444
2445 if (block_rc == FAST_IO_FAIL && rc != FAILED)
2446 rc = FAST_IO_FAIL;
2447
2431 LEAVE; 2448 LEAVE;
2432 return rc; 2449 return rc;
2433} 2450}
2434 2451
2435/** 2452/**
2453 * ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function
2454 * @sdev: scsi device struct
2455 * @data: return code
2456 *
2457 **/
2458static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data)
2459{
2460 unsigned long *rc = data;
2461 *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2462}
2463
2464/**
2436 * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function 2465 * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
2437 * @sdev: scsi device struct 2466 * @sdev: scsi device struct
2438 * @data: return code 2467 * @data: return code
@@ -2449,26 +2478,33 @@ static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
2449 * @cmd: scsi command struct 2478 * @cmd: scsi command struct
2450 * 2479 *
2451 * Returns: 2480 * Returns:
2452 * SUCCESS / FAILED 2481 * SUCCESS / FAST_IO_FAIL / FAILED
2453 **/ 2482 **/
2454static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd) 2483static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
2455{ 2484{
2456 struct scsi_device *sdev = cmd->device; 2485 struct scsi_device *sdev = cmd->device;
2457 struct ibmvfc_host *vhost = shost_priv(sdev->host); 2486 struct ibmvfc_host *vhost = shost_priv(sdev->host);
2458 struct scsi_target *starget = scsi_target(sdev); 2487 struct scsi_target *starget = scsi_target(sdev);
2459 int reset_rc; 2488 int block_rc;
2489 int reset_rc = 0;
2460 int rc = FAILED; 2490 int rc = FAILED;
2461 unsigned long cancel_rc = 0; 2491 unsigned long cancel_rc = 0;
2462 2492
2463 ENTER; 2493 ENTER;
2464 fc_block_scsi_eh(cmd); 2494 block_rc = fc_block_scsi_eh(cmd);
2465 ibmvfc_wait_while_resetting(vhost); 2495 ibmvfc_wait_while_resetting(vhost);
2466 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset); 2496 if (block_rc != FAST_IO_FAIL) {
2467 reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target"); 2497 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
2498 reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
2499 } else
2500 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset);
2468 2501
2469 if (!cancel_rc && !reset_rc) 2502 if (!cancel_rc && !reset_rc)
2470 rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target); 2503 rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
2471 2504
2505 if (block_rc == FAST_IO_FAIL && rc != FAILED)
2506 rc = FAST_IO_FAIL;
2507
2472 LEAVE; 2508 LEAVE;
2473 return rc; 2509 return rc;
2474} 2510}
@@ -2480,12 +2516,16 @@ static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
2480 **/ 2516 **/
2481static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd) 2517static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
2482{ 2518{
2483 int rc; 2519 int rc, block_rc;
2484 struct ibmvfc_host *vhost = shost_priv(cmd->device->host); 2520 struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
2485 2521
2486 fc_block_scsi_eh(cmd); 2522 block_rc = fc_block_scsi_eh(cmd);
2487 dev_err(vhost->dev, "Resetting connection due to error recovery\n"); 2523 dev_err(vhost->dev, "Resetting connection due to error recovery\n");
2488 rc = ibmvfc_issue_fc_host_lip(vhost->host); 2524 rc = ibmvfc_issue_fc_host_lip(vhost->host);
2525
2526 if (block_rc == FAST_IO_FAIL)
2527 return FAST_IO_FAIL;
2528
2489 return rc ? FAILED : SUCCESS; 2529 return rc ? FAILED : SUCCESS;
2490} 2530}
2491 2531
@@ -2509,8 +2549,7 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
2509 dev_rport = starget_to_rport(scsi_target(sdev)); 2549 dev_rport = starget_to_rport(scsi_target(sdev));
2510 if (dev_rport != rport) 2550 if (dev_rport != rport)
2511 continue; 2551 continue;
2512 ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); 2552 ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2513 ibmvfc_abort_task_set(sdev);
2514 } 2553 }
2515 2554
2516 rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport); 2555 rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 3be8af624e6f..017a5290e8c1 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -29,8 +29,8 @@
29#include "viosrp.h" 29#include "viosrp.h"
30 30
31#define IBMVFC_NAME "ibmvfc" 31#define IBMVFC_NAME "ibmvfc"
32#define IBMVFC_DRIVER_VERSION "1.0.10" 32#define IBMVFC_DRIVER_VERSION "1.0.11"
33#define IBMVFC_DRIVER_DATE "(August 24, 2012)" 33#define IBMVFC_DRIVER_DATE "(April 12, 2013)"
34 34
35#define IBMVFC_DEFAULT_TIMEOUT 60 35#define IBMVFC_DEFAULT_TIMEOUT 60
36#define IBMVFC_ADISC_CANCEL_TIMEOUT 45 36#define IBMVFC_ADISC_CANCEL_TIMEOUT 45
@@ -208,10 +208,10 @@ struct ibmvfc_npiv_login_resp {
208 u16 error; 208 u16 error;
209 u32 flags; 209 u32 flags;
210#define IBMVFC_NATIVE_FC 0x01 210#define IBMVFC_NATIVE_FC 0x01
211#define IBMVFC_CAN_FLUSH_ON_HALT 0x08
212 u32 reserved; 211 u32 reserved;
213 u64 capabilities; 212 u64 capabilities;
214#define IBMVFC_CAN_FLUSH_ON_HALT 0x08 213#define IBMVFC_CAN_FLUSH_ON_HALT 0x08
214#define IBMVFC_CAN_SUPPRESS_ABTS 0x10
215 u32 max_cmds; 215 u32 max_cmds;
216 u32 scsi_id_sz; 216 u32 scsi_id_sz;
217 u64 max_dma_len; 217 u64 max_dma_len;
@@ -351,6 +351,7 @@ struct ibmvfc_tmf {
351#define IBMVFC_TMF_LUN_RESET 0x10 351#define IBMVFC_TMF_LUN_RESET 0x10
352#define IBMVFC_TMF_TGT_RESET 0x20 352#define IBMVFC_TMF_TGT_RESET 0x20
353#define IBMVFC_TMF_LUA_VALID 0x40 353#define IBMVFC_TMF_LUA_VALID 0x40
354#define IBMVFC_TMF_SUPPRESS_ABTS 0x80
354 u32 cancel_key; 355 u32 cancel_key;
355 u32 my_cancel_key; 356 u32 my_cancel_key;
356 u32 pad; 357 u32 pad;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 2197b57fb225..82a3c1ec8706 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -4777,7 +4777,7 @@ static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4777 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; 4777 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4778 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4778 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4779 4779
4780 if (!ioa_cfg->in_reset_reload) { 4780 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4781 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); 4781 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4782 dev_err(&ioa_cfg->pdev->dev, 4782 dev_err(&ioa_cfg->pdev->dev,
4783 "Adapter being reset as a result of error recovery.\n"); 4783 "Adapter being reset as a result of error recovery.\n");
@@ -6421,7 +6421,7 @@ static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6421{ 6421{
6422 u32 ioadl_flags = 0; 6422 u32 ioadl_flags = 0;
6423 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6423 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6424 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; 6424 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6425 struct ipr_ioadl64_desc *last_ioadl64 = NULL; 6425 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6426 int len = qc->nbytes; 6426 int len = qc->nbytes;
6427 struct scatterlist *sg; 6427 struct scatterlist *sg;
@@ -6441,7 +6441,7 @@ static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6441 ioarcb->ioadl_len = 6441 ioarcb->ioadl_len =
6442 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); 6442 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6443 ioarcb->u.sis64_addr_data.data_ioadl_addr = 6443 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6444 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl)); 6444 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6445 6445
6446 for_each_sg(qc->sg, sg, qc->n_elem, si) { 6446 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6447 ioadl64->flags = cpu_to_be32(ioadl_flags); 6447 ioadl64->flags = cpu_to_be32(ioadl_flags);
@@ -6739,6 +6739,7 @@ static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6739static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd) 6739static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6740{ 6740{
6741 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6741 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6742 int i;
6742 6743
6743 ENTER; 6744 ENTER;
6744 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { 6745 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
@@ -6750,6 +6751,13 @@ static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6750 6751
6751 ioa_cfg->in_reset_reload = 0; 6752 ioa_cfg->in_reset_reload = 0;
6752 ioa_cfg->reset_retries = 0; 6753 ioa_cfg->reset_retries = 0;
6754 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6755 spin_lock(&ioa_cfg->hrrq[i]._lock);
6756 ioa_cfg->hrrq[i].ioa_is_dead = 1;
6757 spin_unlock(&ioa_cfg->hrrq[i]._lock);
6758 }
6759 wmb();
6760
6753 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 6761 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6754 wake_up_all(&ioa_cfg->reset_wait_q); 6762 wake_up_all(&ioa_cfg->reset_wait_q);
6755 LEAVE; 6763 LEAVE;
@@ -8651,7 +8659,7 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev)
8651 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 8659 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8652 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) 8660 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8653 ioa_cfg->sdt_state = ABORT_DUMP; 8661 ioa_cfg->sdt_state = ABORT_DUMP;
8654 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES; 8662 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
8655 ioa_cfg->in_ioa_bringdown = 1; 8663 ioa_cfg->in_ioa_bringdown = 1;
8656 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 8664 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8657 spin_lock(&ioa_cfg->hrrq[i]._lock); 8665 spin_lock(&ioa_cfg->hrrq[i]._lock);
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 21a6ff1ed5c6..a1fb840596ef 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -552,7 +552,7 @@ struct ipr_ioarcb_ata_regs { /* 22 bytes */
552 u8 hob_lbam; 552 u8 hob_lbam;
553 u8 hob_lbah; 553 u8 hob_lbah;
554 u8 ctl; 554 u8 ctl;
555}__attribute__ ((packed, aligned(4))); 555}__attribute__ ((packed, aligned(2)));
556 556
557struct ipr_ioadl_desc { 557struct ipr_ioadl_desc {
558 __be32 flags_and_data_len; 558 __be32 flags_and_data_len;
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index c3aa6c5457b9..96a26f454673 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -1085,7 +1085,7 @@ static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *s
1085 struct isci_host *ihost = idev->owning_port->owning_controller; 1085 struct isci_host *ihost = idev->owning_port->owning_controller;
1086 struct domain_device *dev = idev->domain_dev; 1086 struct domain_device *dev = idev->domain_dev;
1087 1087
1088 if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) { 1088 if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
1089 sci_change_state(&idev->sm, SCI_STP_DEV_IDLE); 1089 sci_change_state(&idev->sm, SCI_STP_DEV_IDLE);
1090 } else if (dev_is_expander(dev)) { 1090 } else if (dev_is_expander(dev)) {
1091 sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE); 1091 sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE);
@@ -1098,7 +1098,7 @@ static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm
1098 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 1098 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1099 struct domain_device *dev = idev->domain_dev; 1099 struct domain_device *dev = idev->domain_dev;
1100 1100
1101 if (dev->dev_type == SAS_END_DEV) { 1101 if (dev->dev_type == SAS_END_DEVICE) {
1102 struct isci_host *ihost = idev->owning_port->owning_controller; 1102 struct isci_host *ihost = idev->owning_port->owning_controller;
1103 1103
1104 isci_remote_device_not_ready(ihost, idev, 1104 isci_remote_device_not_ready(ihost, idev,
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
index 7674caae1d88..47a013fffae7 100644
--- a/drivers/scsi/isci/remote_device.h
+++ b/drivers/scsi/isci/remote_device.h
@@ -297,7 +297,7 @@ static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_conte
297 297
298static inline bool dev_is_expander(struct domain_device *dev) 298static inline bool dev_is_expander(struct domain_device *dev)
299{ 299{
300 return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV; 300 return dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE;
301} 301}
302 302
303static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev) 303static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev)
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 9594ab62702b..e3e3bcbd5a9f 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -2978,7 +2978,7 @@ static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
2978 /* all unaccelerated request types (non ssp or ncq) handled with 2978 /* all unaccelerated request types (non ssp or ncq) handled with
2979 * substates 2979 * substates
2980 */ 2980 */
2981 if (!task && dev->dev_type == SAS_END_DEV) { 2981 if (!task && dev->dev_type == SAS_END_DEVICE) {
2982 state = SCI_REQ_TASK_WAIT_TC_COMP; 2982 state = SCI_REQ_TASK_WAIT_TC_COMP;
2983 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { 2983 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
2984 state = SCI_REQ_SMP_WAIT_RESP; 2984 state = SCI_REQ_SMP_WAIT_RESP;
@@ -3101,7 +3101,7 @@ sci_io_request_construct(struct isci_host *ihost,
3101 if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) 3101 if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
3102 return SCI_FAILURE_INVALID_REMOTE_DEVICE; 3102 return SCI_FAILURE_INVALID_REMOTE_DEVICE;
3103 3103
3104 if (dev->dev_type == SAS_END_DEV) 3104 if (dev->dev_type == SAS_END_DEVICE)
3105 /* pass */; 3105 /* pass */;
3106 else if (dev_is_sata(dev)) 3106 else if (dev_is_sata(dev))
3107 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); 3107 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
@@ -3125,7 +3125,7 @@ enum sci_status sci_task_request_construct(struct isci_host *ihost,
3125 /* Build the common part of the request */ 3125 /* Build the common part of the request */
3126 sci_general_request_construct(ihost, idev, ireq); 3126 sci_general_request_construct(ihost, idev, ireq);
3127 3127
3128 if (dev->dev_type == SAS_END_DEV || dev_is_sata(dev)) { 3128 if (dev->dev_type == SAS_END_DEVICE || dev_is_sata(dev)) {
3129 set_bit(IREQ_TMF, &ireq->flags); 3129 set_bit(IREQ_TMF, &ireq->flags);
3130 memset(ireq->tc, 0, sizeof(struct scu_task_context)); 3130 memset(ireq->tc, 0, sizeof(struct scu_task_context));
3131 3131
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index b6f19a1db780..9bb020ac089c 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -250,7 +250,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost,
250 } 250 }
251 251
252 /* XXX convert to get this from task->tproto like other drivers */ 252 /* XXX convert to get this from task->tproto like other drivers */
253 if (dev->dev_type == SAS_END_DEV) { 253 if (dev->dev_type == SAS_END_DEVICE) {
254 isci_tmf->proto = SAS_PROTOCOL_SSP; 254 isci_tmf->proto = SAS_PROTOCOL_SSP;
255 status = sci_task_request_construct_ssp(ireq); 255 status = sci_task_request_construct_ssp(ireq);
256 if (status != SCI_SUCCESS) 256 if (status != SCI_SUCCESS)
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index bdb81cda8401..161c98efade9 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -285,14 +285,14 @@ int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy)
285 if (phy->attached_tproto & SAS_PROTOCOL_STP) 285 if (phy->attached_tproto & SAS_PROTOCOL_STP)
286 dev->tproto = phy->attached_tproto; 286 dev->tproto = phy->attached_tproto;
287 if (phy->attached_sata_dev) 287 if (phy->attached_sata_dev)
288 dev->tproto |= SATA_DEV; 288 dev->tproto |= SAS_SATA_DEV;
289 289
290 if (phy->attached_dev_type == SATA_PENDING) 290 if (phy->attached_dev_type == SAS_SATA_PENDING)
291 dev->dev_type = SATA_PENDING; 291 dev->dev_type = SAS_SATA_PENDING;
292 else { 292 else {
293 int res; 293 int res;
294 294
295 dev->dev_type = SATA_DEV; 295 dev->dev_type = SAS_SATA_DEV;
296 res = sas_get_report_phy_sata(dev->parent, phy->phy_id, 296 res = sas_get_report_phy_sata(dev->parent, phy->phy_id,
297 &dev->sata_dev.rps_resp); 297 &dev->sata_dev.rps_resp);
298 if (res) { 298 if (res) {
@@ -314,7 +314,7 @@ static int sas_ata_clear_pending(struct domain_device *dev, struct ex_phy *phy)
314 int res; 314 int res;
315 315
316 /* we weren't pending, so successfully end the reset sequence now */ 316 /* we weren't pending, so successfully end the reset sequence now */
317 if (dev->dev_type != SATA_PENDING) 317 if (dev->dev_type != SAS_SATA_PENDING)
318 return 1; 318 return 1;
319 319
320 /* hmmm, if this succeeds do we need to repost the domain_device to the 320 /* hmmm, if this succeeds do we need to repost the domain_device to the
@@ -348,9 +348,9 @@ static int smp_ata_check_ready(struct ata_link *link)
348 return 0; 348 return 0;
349 349
350 switch (ex_phy->attached_dev_type) { 350 switch (ex_phy->attached_dev_type) {
351 case SATA_PENDING: 351 case SAS_SATA_PENDING:
352 return 0; 352 return 0;
353 case SAS_END_DEV: 353 case SAS_END_DEVICE:
354 if (ex_phy->attached_sata_dev) 354 if (ex_phy->attached_sata_dev)
355 return sas_ata_clear_pending(dev, ex_phy); 355 return sas_ata_clear_pending(dev, ex_phy);
356 default: 356 default:
@@ -631,7 +631,7 @@ static void sas_get_ata_command_set(struct domain_device *dev)
631 struct dev_to_host_fis *fis = 631 struct dev_to_host_fis *fis =
632 (struct dev_to_host_fis *) dev->frame_rcvd; 632 (struct dev_to_host_fis *) dev->frame_rcvd;
633 633
634 if (dev->dev_type == SATA_PENDING) 634 if (dev->dev_type == SAS_SATA_PENDING)
635 return; 635 return;
636 636
637 if ((fis->sector_count == 1 && /* ATA */ 637 if ((fis->sector_count == 1 && /* ATA */
@@ -797,7 +797,7 @@ int sas_discover_sata(struct domain_device *dev)
797{ 797{
798 int res; 798 int res;
799 799
800 if (dev->dev_type == SATA_PM) 800 if (dev->dev_type == SAS_SATA_PM)
801 return -ENODEV; 801 return -ENODEV;
802 802
803 sas_get_ata_command_set(dev); 803 sas_get_ata_command_set(dev);
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index a0c3003e0c7d..62b58d38ce2e 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -39,11 +39,11 @@
39void sas_init_dev(struct domain_device *dev) 39void sas_init_dev(struct domain_device *dev)
40{ 40{
41 switch (dev->dev_type) { 41 switch (dev->dev_type) {
42 case SAS_END_DEV: 42 case SAS_END_DEVICE:
43 INIT_LIST_HEAD(&dev->ssp_dev.eh_list_node); 43 INIT_LIST_HEAD(&dev->ssp_dev.eh_list_node);
44 break; 44 break;
45 case EDGE_DEV: 45 case SAS_EDGE_EXPANDER_DEVICE:
46 case FANOUT_DEV: 46 case SAS_FANOUT_EXPANDER_DEVICE:
47 INIT_LIST_HEAD(&dev->ex_dev.children); 47 INIT_LIST_HEAD(&dev->ex_dev.children);
48 mutex_init(&dev->ex_dev.cmd_mutex); 48 mutex_init(&dev->ex_dev.cmd_mutex);
49 break; 49 break;
@@ -93,9 +93,9 @@ static int sas_get_port_device(struct asd_sas_port *port)
93 if (fis->interrupt_reason == 1 && fis->lbal == 1 && 93 if (fis->interrupt_reason == 1 && fis->lbal == 1 &&
94 fis->byte_count_low==0x69 && fis->byte_count_high == 0x96 94 fis->byte_count_low==0x69 && fis->byte_count_high == 0x96
95 && (fis->device & ~0x10) == 0) 95 && (fis->device & ~0x10) == 0)
96 dev->dev_type = SATA_PM; 96 dev->dev_type = SAS_SATA_PM;
97 else 97 else
98 dev->dev_type = SATA_DEV; 98 dev->dev_type = SAS_SATA_DEV;
99 dev->tproto = SAS_PROTOCOL_SATA; 99 dev->tproto = SAS_PROTOCOL_SATA;
100 } else { 100 } else {
101 struct sas_identify_frame *id = 101 struct sas_identify_frame *id =
@@ -109,21 +109,21 @@ static int sas_get_port_device(struct asd_sas_port *port)
109 109
110 dev->port = port; 110 dev->port = port;
111 switch (dev->dev_type) { 111 switch (dev->dev_type) {
112 case SATA_DEV: 112 case SAS_SATA_DEV:
113 rc = sas_ata_init(dev); 113 rc = sas_ata_init(dev);
114 if (rc) { 114 if (rc) {
115 rphy = NULL; 115 rphy = NULL;
116 break; 116 break;
117 } 117 }
118 /* fall through */ 118 /* fall through */
119 case SAS_END_DEV: 119 case SAS_END_DEVICE:
120 rphy = sas_end_device_alloc(port->port); 120 rphy = sas_end_device_alloc(port->port);
121 break; 121 break;
122 case EDGE_DEV: 122 case SAS_EDGE_EXPANDER_DEVICE:
123 rphy = sas_expander_alloc(port->port, 123 rphy = sas_expander_alloc(port->port,
124 SAS_EDGE_EXPANDER_DEVICE); 124 SAS_EDGE_EXPANDER_DEVICE);
125 break; 125 break;
126 case FANOUT_DEV: 126 case SAS_FANOUT_EXPANDER_DEVICE:
127 rphy = sas_expander_alloc(port->port, 127 rphy = sas_expander_alloc(port->port,
128 SAS_FANOUT_EXPANDER_DEVICE); 128 SAS_FANOUT_EXPANDER_DEVICE);
129 break; 129 break;
@@ -156,7 +156,7 @@ static int sas_get_port_device(struct asd_sas_port *port)
156 dev->rphy = rphy; 156 dev->rphy = rphy;
157 get_device(&dev->rphy->dev); 157 get_device(&dev->rphy->dev);
158 158
159 if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEV) 159 if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEVICE)
160 list_add_tail(&dev->disco_list_node, &port->disco_list); 160 list_add_tail(&dev->disco_list_node, &port->disco_list);
161 else { 161 else {
162 spin_lock_irq(&port->dev_list_lock); 162 spin_lock_irq(&port->dev_list_lock);
@@ -315,7 +315,7 @@ void sas_free_device(struct kref *kref)
315 dev->phy = NULL; 315 dev->phy = NULL;
316 316
317 /* remove the phys and ports, everything else should be gone */ 317 /* remove the phys and ports, everything else should be gone */
318 if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) 318 if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
319 kfree(dev->ex_dev.ex_phy); 319 kfree(dev->ex_dev.ex_phy);
320 320
321 if (dev_is_sata(dev) && dev->sata_dev.ap) { 321 if (dev_is_sata(dev) && dev->sata_dev.ap) {
@@ -343,7 +343,7 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d
343 spin_unlock_irq(&port->dev_list_lock); 343 spin_unlock_irq(&port->dev_list_lock);
344 344
345 spin_lock_irq(&ha->lock); 345 spin_lock_irq(&ha->lock);
346 if (dev->dev_type == SAS_END_DEV && 346 if (dev->dev_type == SAS_END_DEVICE &&
347 !list_empty(&dev->ssp_dev.eh_list_node)) { 347 !list_empty(&dev->ssp_dev.eh_list_node)) {
348 list_del_init(&dev->ssp_dev.eh_list_node); 348 list_del_init(&dev->ssp_dev.eh_list_node);
349 ha->eh_active--; 349 ha->eh_active--;
@@ -457,15 +457,15 @@ static void sas_discover_domain(struct work_struct *work)
457 task_pid_nr(current)); 457 task_pid_nr(current));
458 458
459 switch (dev->dev_type) { 459 switch (dev->dev_type) {
460 case SAS_END_DEV: 460 case SAS_END_DEVICE:
461 error = sas_discover_end_dev(dev); 461 error = sas_discover_end_dev(dev);
462 break; 462 break;
463 case EDGE_DEV: 463 case SAS_EDGE_EXPANDER_DEVICE:
464 case FANOUT_DEV: 464 case SAS_FANOUT_EXPANDER_DEVICE:
465 error = sas_discover_root_expander(dev); 465 error = sas_discover_root_expander(dev);
466 break; 466 break;
467 case SATA_DEV: 467 case SAS_SATA_DEV:
468 case SATA_PM: 468 case SAS_SATA_PM:
469#ifdef CONFIG_SCSI_SAS_ATA 469#ifdef CONFIG_SCSI_SAS_ATA
470 error = sas_discover_sata(dev); 470 error = sas_discover_sata(dev);
471 break; 471 break;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index f42b0e15410f..446b85110a1f 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -183,21 +183,21 @@ static char sas_route_char(struct domain_device *dev, struct ex_phy *phy)
183 } 183 }
184} 184}
185 185
186static enum sas_dev_type to_dev_type(struct discover_resp *dr) 186static enum sas_device_type to_dev_type(struct discover_resp *dr)
187{ 187{
188 /* This is detecting a failure to transmit initial dev to host 188 /* This is detecting a failure to transmit initial dev to host
189 * FIS as described in section J.5 of sas-2 r16 189 * FIS as described in section J.5 of sas-2 r16
190 */ 190 */
191 if (dr->attached_dev_type == NO_DEVICE && dr->attached_sata_dev && 191 if (dr->attached_dev_type == SAS_PHY_UNUSED && dr->attached_sata_dev &&
192 dr->linkrate >= SAS_LINK_RATE_1_5_GBPS) 192 dr->linkrate >= SAS_LINK_RATE_1_5_GBPS)
193 return SATA_PENDING; 193 return SAS_SATA_PENDING;
194 else 194 else
195 return dr->attached_dev_type; 195 return dr->attached_dev_type;
196} 196}
197 197
198static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) 198static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
199{ 199{
200 enum sas_dev_type dev_type; 200 enum sas_device_type dev_type;
201 enum sas_linkrate linkrate; 201 enum sas_linkrate linkrate;
202 u8 sas_addr[SAS_ADDR_SIZE]; 202 u8 sas_addr[SAS_ADDR_SIZE];
203 struct smp_resp *resp = rsp; 203 struct smp_resp *resp = rsp;
@@ -238,7 +238,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
238 /* Handle vacant phy - rest of dr data is not valid so skip it */ 238 /* Handle vacant phy - rest of dr data is not valid so skip it */
239 if (phy->phy_state == PHY_VACANT) { 239 if (phy->phy_state == PHY_VACANT) {
240 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); 240 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
241 phy->attached_dev_type = NO_DEVICE; 241 phy->attached_dev_type = SAS_PHY_UNUSED;
242 if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) { 242 if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) {
243 phy->phy_id = phy_id; 243 phy->phy_id = phy_id;
244 goto skip; 244 goto skip;
@@ -259,7 +259,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
259 /* help some expanders that fail to zero sas_address in the 'no 259 /* help some expanders that fail to zero sas_address in the 'no
260 * device' case 260 * device' case
261 */ 261 */
262 if (phy->attached_dev_type == NO_DEVICE || 262 if (phy->attached_dev_type == SAS_PHY_UNUSED ||
263 phy->linkrate < SAS_LINK_RATE_1_5_GBPS) 263 phy->linkrate < SAS_LINK_RATE_1_5_GBPS)
264 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); 264 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
265 else 265 else
@@ -292,13 +292,13 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
292 292
293 out: 293 out:
294 switch (phy->attached_dev_type) { 294 switch (phy->attached_dev_type) {
295 case SATA_PENDING: 295 case SAS_SATA_PENDING:
296 type = "stp pending"; 296 type = "stp pending";
297 break; 297 break;
298 case NO_DEVICE: 298 case SAS_PHY_UNUSED:
299 type = "no device"; 299 type = "no device";
300 break; 300 break;
301 case SAS_END_DEV: 301 case SAS_END_DEVICE:
302 if (phy->attached_iproto) { 302 if (phy->attached_iproto) {
303 if (phy->attached_tproto) 303 if (phy->attached_tproto)
304 type = "host+target"; 304 type = "host+target";
@@ -311,8 +311,8 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
311 type = "ssp"; 311 type = "ssp";
312 } 312 }
313 break; 313 break;
314 case EDGE_DEV: 314 case SAS_EDGE_EXPANDER_DEVICE:
315 case FANOUT_DEV: 315 case SAS_FANOUT_EXPANDER_DEVICE:
316 type = "smp"; 316 type = "smp";
317 break; 317 break;
318 default: 318 default:
@@ -833,7 +833,7 @@ static struct domain_device *sas_ex_discover_end_dev(
833 } else 833 } else
834#endif 834#endif
835 if (phy->attached_tproto & SAS_PROTOCOL_SSP) { 835 if (phy->attached_tproto & SAS_PROTOCOL_SSP) {
836 child->dev_type = SAS_END_DEV; 836 child->dev_type = SAS_END_DEVICE;
837 rphy = sas_end_device_alloc(phy->port); 837 rphy = sas_end_device_alloc(phy->port);
838 /* FIXME: error handling */ 838 /* FIXME: error handling */
839 if (unlikely(!rphy)) 839 if (unlikely(!rphy))
@@ -932,11 +932,11 @@ static struct domain_device *sas_ex_discover_expander(
932 932
933 933
934 switch (phy->attached_dev_type) { 934 switch (phy->attached_dev_type) {
935 case EDGE_DEV: 935 case SAS_EDGE_EXPANDER_DEVICE:
936 rphy = sas_expander_alloc(phy->port, 936 rphy = sas_expander_alloc(phy->port,
937 SAS_EDGE_EXPANDER_DEVICE); 937 SAS_EDGE_EXPANDER_DEVICE);
938 break; 938 break;
939 case FANOUT_DEV: 939 case SAS_FANOUT_EXPANDER_DEVICE:
940 rphy = sas_expander_alloc(phy->port, 940 rphy = sas_expander_alloc(phy->port,
941 SAS_FANOUT_EXPANDER_DEVICE); 941 SAS_FANOUT_EXPANDER_DEVICE);
942 break; 942 break;
@@ -1013,7 +1013,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
1013 if (sas_dev_present_in_domain(dev->port, ex_phy->attached_sas_addr)) 1013 if (sas_dev_present_in_domain(dev->port, ex_phy->attached_sas_addr))
1014 sas_ex_disable_port(dev, ex_phy->attached_sas_addr); 1014 sas_ex_disable_port(dev, ex_phy->attached_sas_addr);
1015 1015
1016 if (ex_phy->attached_dev_type == NO_DEVICE) { 1016 if (ex_phy->attached_dev_type == SAS_PHY_UNUSED) {
1017 if (ex_phy->routing_attr == DIRECT_ROUTING) { 1017 if (ex_phy->routing_attr == DIRECT_ROUTING) {
1018 memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE); 1018 memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
1019 sas_configure_routing(dev, ex_phy->attached_sas_addr); 1019 sas_configure_routing(dev, ex_phy->attached_sas_addr);
@@ -1022,10 +1022,10 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
1022 } else if (ex_phy->linkrate == SAS_LINK_RATE_UNKNOWN) 1022 } else if (ex_phy->linkrate == SAS_LINK_RATE_UNKNOWN)
1023 return 0; 1023 return 0;
1024 1024
1025 if (ex_phy->attached_dev_type != SAS_END_DEV && 1025 if (ex_phy->attached_dev_type != SAS_END_DEVICE &&
1026 ex_phy->attached_dev_type != FANOUT_DEV && 1026 ex_phy->attached_dev_type != SAS_FANOUT_EXPANDER_DEVICE &&
1027 ex_phy->attached_dev_type != EDGE_DEV && 1027 ex_phy->attached_dev_type != SAS_EDGE_EXPANDER_DEVICE &&
1028 ex_phy->attached_dev_type != SATA_PENDING) { 1028 ex_phy->attached_dev_type != SAS_SATA_PENDING) {
1029 SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx " 1029 SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx "
1030 "phy 0x%x\n", ex_phy->attached_dev_type, 1030 "phy 0x%x\n", ex_phy->attached_dev_type,
1031 SAS_ADDR(dev->sas_addr), 1031 SAS_ADDR(dev->sas_addr),
@@ -1049,11 +1049,11 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
1049 } 1049 }
1050 1050
1051 switch (ex_phy->attached_dev_type) { 1051 switch (ex_phy->attached_dev_type) {
1052 case SAS_END_DEV: 1052 case SAS_END_DEVICE:
1053 case SATA_PENDING: 1053 case SAS_SATA_PENDING:
1054 child = sas_ex_discover_end_dev(dev, phy_id); 1054 child = sas_ex_discover_end_dev(dev, phy_id);
1055 break; 1055 break;
1056 case FANOUT_DEV: 1056 case SAS_FANOUT_EXPANDER_DEVICE:
1057 if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) { 1057 if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) {
1058 SAS_DPRINTK("second fanout expander %016llx phy 0x%x " 1058 SAS_DPRINTK("second fanout expander %016llx phy 0x%x "
1059 "attached to ex %016llx phy 0x%x\n", 1059 "attached to ex %016llx phy 0x%x\n",
@@ -1067,7 +1067,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
1067 memcpy(dev->port->disc.fanout_sas_addr, 1067 memcpy(dev->port->disc.fanout_sas_addr,
1068 ex_phy->attached_sas_addr, SAS_ADDR_SIZE); 1068 ex_phy->attached_sas_addr, SAS_ADDR_SIZE);
1069 /* fallthrough */ 1069 /* fallthrough */
1070 case EDGE_DEV: 1070 case SAS_EDGE_EXPANDER_DEVICE:
1071 child = sas_ex_discover_expander(dev, phy_id); 1071 child = sas_ex_discover_expander(dev, phy_id);
1072 break; 1072 break;
1073 default: 1073 default:
@@ -1111,8 +1111,8 @@ static int sas_find_sub_addr(struct domain_device *dev, u8 *sub_addr)
1111 phy->phy_state == PHY_NOT_PRESENT) 1111 phy->phy_state == PHY_NOT_PRESENT)
1112 continue; 1112 continue;
1113 1113
1114 if ((phy->attached_dev_type == EDGE_DEV || 1114 if ((phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE ||
1115 phy->attached_dev_type == FANOUT_DEV) && 1115 phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE) &&
1116 phy->routing_attr == SUBTRACTIVE_ROUTING) { 1116 phy->routing_attr == SUBTRACTIVE_ROUTING) {
1117 1117
1118 memcpy(sub_addr, phy->attached_sas_addr,SAS_ADDR_SIZE); 1118 memcpy(sub_addr, phy->attached_sas_addr,SAS_ADDR_SIZE);
@@ -1130,8 +1130,8 @@ static int sas_check_level_subtractive_boundary(struct domain_device *dev)
1130 u8 sub_addr[8] = {0, }; 1130 u8 sub_addr[8] = {0, };
1131 1131
1132 list_for_each_entry(child, &ex->children, siblings) { 1132 list_for_each_entry(child, &ex->children, siblings) {
1133 if (child->dev_type != EDGE_DEV && 1133 if (child->dev_type != SAS_EDGE_EXPANDER_DEVICE &&
1134 child->dev_type != FANOUT_DEV) 1134 child->dev_type != SAS_FANOUT_EXPANDER_DEVICE)
1135 continue; 1135 continue;
1136 if (sub_addr[0] == 0) { 1136 if (sub_addr[0] == 0) {
1137 sas_find_sub_addr(child, sub_addr); 1137 sas_find_sub_addr(child, sub_addr);
@@ -1208,7 +1208,7 @@ static int sas_check_ex_subtractive_boundary(struct domain_device *dev)
1208 int i; 1208 int i;
1209 u8 *sub_sas_addr = NULL; 1209 u8 *sub_sas_addr = NULL;
1210 1210
1211 if (dev->dev_type != EDGE_DEV) 1211 if (dev->dev_type != SAS_EDGE_EXPANDER_DEVICE)
1212 return 0; 1212 return 0;
1213 1213
1214 for (i = 0; i < ex->num_phys; i++) { 1214 for (i = 0; i < ex->num_phys; i++) {
@@ -1218,8 +1218,8 @@ static int sas_check_ex_subtractive_boundary(struct domain_device *dev)
1218 phy->phy_state == PHY_NOT_PRESENT) 1218 phy->phy_state == PHY_NOT_PRESENT)
1219 continue; 1219 continue;
1220 1220
1221 if ((phy->attached_dev_type == FANOUT_DEV || 1221 if ((phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE ||
1222 phy->attached_dev_type == EDGE_DEV) && 1222 phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE) &&
1223 phy->routing_attr == SUBTRACTIVE_ROUTING) { 1223 phy->routing_attr == SUBTRACTIVE_ROUTING) {
1224 1224
1225 if (!sub_sas_addr) 1225 if (!sub_sas_addr)
@@ -1245,8 +1245,8 @@ static void sas_print_parent_topology_bug(struct domain_device *child,
1245 struct ex_phy *child_phy) 1245 struct ex_phy *child_phy)
1246{ 1246{
1247 static const char *ex_type[] = { 1247 static const char *ex_type[] = {
1248 [EDGE_DEV] = "edge", 1248 [SAS_EDGE_EXPANDER_DEVICE] = "edge",
1249 [FANOUT_DEV] = "fanout", 1249 [SAS_FANOUT_EXPANDER_DEVICE] = "fanout",
1250 }; 1250 };
1251 struct domain_device *parent = child->parent; 1251 struct domain_device *parent = child->parent;
1252 1252
@@ -1321,8 +1321,8 @@ static int sas_check_parent_topology(struct domain_device *child)
1321 if (!child->parent) 1321 if (!child->parent)
1322 return 0; 1322 return 0;
1323 1323
1324 if (child->parent->dev_type != EDGE_DEV && 1324 if (child->parent->dev_type != SAS_EDGE_EXPANDER_DEVICE &&
1325 child->parent->dev_type != FANOUT_DEV) 1325 child->parent->dev_type != SAS_FANOUT_EXPANDER_DEVICE)
1326 return 0; 1326 return 0;
1327 1327
1328 parent_ex = &child->parent->ex_dev; 1328 parent_ex = &child->parent->ex_dev;
@@ -1341,8 +1341,8 @@ static int sas_check_parent_topology(struct domain_device *child)
1341 child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id]; 1341 child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id];
1342 1342
1343 switch (child->parent->dev_type) { 1343 switch (child->parent->dev_type) {
1344 case EDGE_DEV: 1344 case SAS_EDGE_EXPANDER_DEVICE:
1345 if (child->dev_type == FANOUT_DEV) { 1345 if (child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
1346 if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING || 1346 if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING ||
1347 child_phy->routing_attr != TABLE_ROUTING) { 1347 child_phy->routing_attr != TABLE_ROUTING) {
1348 sas_print_parent_topology_bug(child, parent_phy, child_phy); 1348 sas_print_parent_topology_bug(child, parent_phy, child_phy);
@@ -1366,7 +1366,7 @@ static int sas_check_parent_topology(struct domain_device *child)
1366 } 1366 }
1367 } 1367 }
1368 break; 1368 break;
1369 case FANOUT_DEV: 1369 case SAS_FANOUT_EXPANDER_DEVICE:
1370 if (parent_phy->routing_attr != TABLE_ROUTING || 1370 if (parent_phy->routing_attr != TABLE_ROUTING ||
1371 child_phy->routing_attr != SUBTRACTIVE_ROUTING) { 1371 child_phy->routing_attr != SUBTRACTIVE_ROUTING) {
1372 sas_print_parent_topology_bug(child, parent_phy, child_phy); 1372 sas_print_parent_topology_bug(child, parent_phy, child_phy);
@@ -1619,8 +1619,8 @@ static int sas_ex_level_discovery(struct asd_sas_port *port, const int level)
1619 struct domain_device *dev; 1619 struct domain_device *dev;
1620 1620
1621 list_for_each_entry(dev, &port->dev_list, dev_list_node) { 1621 list_for_each_entry(dev, &port->dev_list, dev_list_node) {
1622 if (dev->dev_type == EDGE_DEV || 1622 if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
1623 dev->dev_type == FANOUT_DEV) { 1623 dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
1624 struct sas_expander_device *ex = 1624 struct sas_expander_device *ex =
1625 rphy_to_expander_device(dev->rphy); 1625 rphy_to_expander_device(dev->rphy);
1626 1626
@@ -1720,7 +1720,7 @@ static int sas_get_phy_change_count(struct domain_device *dev,
1720} 1720}
1721 1721
1722static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id, 1722static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
1723 u8 *sas_addr, enum sas_dev_type *type) 1723 u8 *sas_addr, enum sas_device_type *type)
1724{ 1724{
1725 int res; 1725 int res;
1726 struct smp_resp *disc_resp; 1726 struct smp_resp *disc_resp;
@@ -1849,7 +1849,7 @@ static int sas_find_bcast_dev(struct domain_device *dev,
1849 SAS_DPRINTK("Expander phys DID NOT change\n"); 1849 SAS_DPRINTK("Expander phys DID NOT change\n");
1850 } 1850 }
1851 list_for_each_entry(ch, &ex->children, siblings) { 1851 list_for_each_entry(ch, &ex->children, siblings) {
1852 if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) { 1852 if (ch->dev_type == SAS_EDGE_EXPANDER_DEVICE || ch->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
1853 res = sas_find_bcast_dev(ch, src_dev); 1853 res = sas_find_bcast_dev(ch, src_dev);
1854 if (*src_dev) 1854 if (*src_dev)
1855 return res; 1855 return res;
@@ -1866,8 +1866,8 @@ static void sas_unregister_ex_tree(struct asd_sas_port *port, struct domain_devi
1866 1866
1867 list_for_each_entry_safe(child, n, &ex->children, siblings) { 1867 list_for_each_entry_safe(child, n, &ex->children, siblings) {
1868 set_bit(SAS_DEV_GONE, &child->state); 1868 set_bit(SAS_DEV_GONE, &child->state);
1869 if (child->dev_type == EDGE_DEV || 1869 if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
1870 child->dev_type == FANOUT_DEV) 1870 child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
1871 sas_unregister_ex_tree(port, child); 1871 sas_unregister_ex_tree(port, child);
1872 else 1872 else
1873 sas_unregister_dev(port, child); 1873 sas_unregister_dev(port, child);
@@ -1887,8 +1887,8 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
1887 if (SAS_ADDR(child->sas_addr) == 1887 if (SAS_ADDR(child->sas_addr) ==
1888 SAS_ADDR(phy->attached_sas_addr)) { 1888 SAS_ADDR(phy->attached_sas_addr)) {
1889 set_bit(SAS_DEV_GONE, &child->state); 1889 set_bit(SAS_DEV_GONE, &child->state);
1890 if (child->dev_type == EDGE_DEV || 1890 if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
1891 child->dev_type == FANOUT_DEV) 1891 child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
1892 sas_unregister_ex_tree(parent->port, child); 1892 sas_unregister_ex_tree(parent->port, child);
1893 else 1893 else
1894 sas_unregister_dev(parent->port, child); 1894 sas_unregister_dev(parent->port, child);
@@ -1916,8 +1916,8 @@ static int sas_discover_bfs_by_root_level(struct domain_device *root,
1916 int res = 0; 1916 int res = 0;
1917 1917
1918 list_for_each_entry(child, &ex_root->children, siblings) { 1918 list_for_each_entry(child, &ex_root->children, siblings) {
1919 if (child->dev_type == EDGE_DEV || 1919 if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
1920 child->dev_type == FANOUT_DEV) { 1920 child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
1921 struct sas_expander_device *ex = 1921 struct sas_expander_device *ex =
1922 rphy_to_expander_device(child->rphy); 1922 rphy_to_expander_device(child->rphy);
1923 1923
@@ -1970,8 +1970,8 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
1970 list_for_each_entry(child, &dev->ex_dev.children, siblings) { 1970 list_for_each_entry(child, &dev->ex_dev.children, siblings) {
1971 if (SAS_ADDR(child->sas_addr) == 1971 if (SAS_ADDR(child->sas_addr) ==
1972 SAS_ADDR(ex_phy->attached_sas_addr)) { 1972 SAS_ADDR(ex_phy->attached_sas_addr)) {
1973 if (child->dev_type == EDGE_DEV || 1973 if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
1974 child->dev_type == FANOUT_DEV) 1974 child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
1975 res = sas_discover_bfs_by_root(child); 1975 res = sas_discover_bfs_by_root(child);
1976 break; 1976 break;
1977 } 1977 }
@@ -1979,16 +1979,16 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
1979 return res; 1979 return res;
1980} 1980}
1981 1981
1982static bool dev_type_flutter(enum sas_dev_type new, enum sas_dev_type old) 1982static bool dev_type_flutter(enum sas_device_type new, enum sas_device_type old)
1983{ 1983{
1984 if (old == new) 1984 if (old == new)
1985 return true; 1985 return true;
1986 1986
1987 /* treat device directed resets as flutter, if we went 1987 /* treat device directed resets as flutter, if we went
1988 * SAS_END_DEV to SATA_PENDING the link needs recovery 1988 * SAS_END_DEVICE to SAS_SATA_PENDING the link needs recovery
1989 */ 1989 */
1990 if ((old == SATA_PENDING && new == SAS_END_DEV) || 1990 if ((old == SAS_SATA_PENDING && new == SAS_END_DEVICE) ||
1991 (old == SAS_END_DEV && new == SATA_PENDING)) 1991 (old == SAS_END_DEVICE && new == SAS_SATA_PENDING))
1992 return true; 1992 return true;
1993 1993
1994 return false; 1994 return false;
@@ -1998,7 +1998,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
1998{ 1998{
1999 struct expander_device *ex = &dev->ex_dev; 1999 struct expander_device *ex = &dev->ex_dev;
2000 struct ex_phy *phy = &ex->ex_phy[phy_id]; 2000 struct ex_phy *phy = &ex->ex_phy[phy_id];
2001 enum sas_dev_type type = NO_DEVICE; 2001 enum sas_device_type type = SAS_PHY_UNUSED;
2002 u8 sas_addr[8]; 2002 u8 sas_addr[8];
2003 int res; 2003 int res;
2004 2004
@@ -2032,7 +2032,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
2032 2032
2033 sas_ex_phy_discover(dev, phy_id); 2033 sas_ex_phy_discover(dev, phy_id);
2034 2034
2035 if (ata_dev && phy->attached_dev_type == SATA_PENDING) 2035 if (ata_dev && phy->attached_dev_type == SAS_SATA_PENDING)
2036 action = ", needs recovery"; 2036 action = ", needs recovery";
2037 SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter%s\n", 2037 SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter%s\n",
2038 SAS_ADDR(dev->sas_addr), phy_id, action); 2038 SAS_ADDR(dev->sas_addr), phy_id, action);
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 1de67964e5a1..7e7ba83f0a21 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -131,16 +131,16 @@ static inline void sas_fill_in_rphy(struct domain_device *dev,
131 rphy->identify.initiator_port_protocols = dev->iproto; 131 rphy->identify.initiator_port_protocols = dev->iproto;
132 rphy->identify.target_port_protocols = dev->tproto; 132 rphy->identify.target_port_protocols = dev->tproto;
133 switch (dev->dev_type) { 133 switch (dev->dev_type) {
134 case SATA_DEV: 134 case SAS_SATA_DEV:
135 /* FIXME: need sata device type */ 135 /* FIXME: need sata device type */
136 case SAS_END_DEV: 136 case SAS_END_DEVICE:
137 case SATA_PENDING: 137 case SAS_SATA_PENDING:
138 rphy->identify.device_type = SAS_END_DEVICE; 138 rphy->identify.device_type = SAS_END_DEVICE;
139 break; 139 break;
140 case EDGE_DEV: 140 case SAS_EDGE_EXPANDER_DEVICE:
141 rphy->identify.device_type = SAS_EDGE_EXPANDER_DEVICE; 141 rphy->identify.device_type = SAS_EDGE_EXPANDER_DEVICE;
142 break; 142 break;
143 case FANOUT_DEV: 143 case SAS_FANOUT_EXPANDER_DEVICE:
144 rphy->identify.device_type = SAS_FANOUT_EXPANDER_DEVICE; 144 rphy->identify.device_type = SAS_FANOUT_EXPANDER_DEVICE;
145 break; 145 break;
146 default: 146 default:
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 1398b714c018..d3c5297c6c89 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -69,7 +69,7 @@ static void sas_resume_port(struct asd_sas_phy *phy)
69 continue; 69 continue;
70 } 70 }
71 71
72 if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) { 72 if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
73 dev->ex_dev.ex_change_count = -1; 73 dev->ex_dev.ex_change_count = -1;
74 for (i = 0; i < dev->ex_dev.num_phys; i++) { 74 for (i = 0; i < dev->ex_dev.num_phys; i++) {
75 struct ex_phy *phy = &dev->ex_dev.ex_phy[i]; 75 struct ex_phy *phy = &dev->ex_dev.ex_phy[i];
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 7706c99ec8bb..bcc56cac4fd8 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -46,10 +46,15 @@ struct lpfc_sli2_slim;
46#define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128 /* sg element count per scsi 46#define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128 /* sg element count per scsi
47 cmnd for menlo needs nearly twice as for firmware 47 cmnd for menlo needs nearly twice as for firmware
48 downloads using bsg */ 48 downloads using bsg */
49#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */ 49
50#define LPFC_MIN_SG_SLI4_BUF_SZ 0x800 /* based on LPFC_DEFAULT_SG_SEG_CNT */
51#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */
52#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */
50#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ 53#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
54#define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */
55#define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */
56
51#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */ 57#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */
52#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/
53#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ 58#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
54#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ 59#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
55#define LPFC_VNAME_LEN 100 /* vport symbolic name length */ 60#define LPFC_VNAME_LEN 100 /* vport symbolic name length */
@@ -66,8 +71,10 @@ struct lpfc_sli2_slim;
66 * queue depths when there are driver resource error or Firmware 71 * queue depths when there are driver resource error or Firmware
67 * resource error. 72 * resource error.
68 */ 73 */
69#define QUEUE_RAMP_DOWN_INTERVAL (1 * HZ) /* 1 Second */ 74/* 1 Second */
70#define QUEUE_RAMP_UP_INTERVAL (300 * HZ) /* 5 minutes */ 75#define QUEUE_RAMP_DOWN_INTERVAL (msecs_to_jiffies(1000 * 1))
76/* 5 minutes */
77#define QUEUE_RAMP_UP_INTERVAL (msecs_to_jiffies(1000 * 300))
71 78
72/* Number of exchanges reserved for discovery to complete */ 79/* Number of exchanges reserved for discovery to complete */
73#define LPFC_DISC_IOCB_BUFF_COUNT 20 80#define LPFC_DISC_IOCB_BUFF_COUNT 20
@@ -671,6 +678,7 @@ struct lpfc_hba {
671 uint32_t lmt; 678 uint32_t lmt;
672 679
673 uint32_t fc_topology; /* link topology, from LINK INIT */ 680 uint32_t fc_topology; /* link topology, from LINK INIT */
681 uint32_t fc_topology_changed; /* link topology, from LINK INIT */
674 682
675 struct lpfc_stats fc_stat; 683 struct lpfc_stats fc_stat;
676 684
@@ -701,9 +709,11 @@ struct lpfc_hba {
701 uint32_t cfg_poll_tmo; 709 uint32_t cfg_poll_tmo;
702 uint32_t cfg_use_msi; 710 uint32_t cfg_use_msi;
703 uint32_t cfg_fcp_imax; 711 uint32_t cfg_fcp_imax;
712 uint32_t cfg_fcp_cpu_map;
704 uint32_t cfg_fcp_wq_count; 713 uint32_t cfg_fcp_wq_count;
705 uint32_t cfg_fcp_eq_count; 714 uint32_t cfg_fcp_eq_count;
706 uint32_t cfg_fcp_io_channel; 715 uint32_t cfg_fcp_io_channel;
716 uint32_t cfg_total_seg_cnt;
707 uint32_t cfg_sg_seg_cnt; 717 uint32_t cfg_sg_seg_cnt;
708 uint32_t cfg_prot_sg_seg_cnt; 718 uint32_t cfg_prot_sg_seg_cnt;
709 uint32_t cfg_sg_dma_buf_size; 719 uint32_t cfg_sg_dma_buf_size;
@@ -804,8 +814,10 @@ struct lpfc_hba {
804 uint64_t bg_reftag_err_cnt; 814 uint64_t bg_reftag_err_cnt;
805 815
806 /* fastpath list. */ 816 /* fastpath list. */
807 spinlock_t scsi_buf_list_lock; 817 spinlock_t scsi_buf_list_get_lock; /* SCSI buf alloc list lock */
808 struct list_head lpfc_scsi_buf_list; 818 spinlock_t scsi_buf_list_put_lock; /* SCSI buf free list lock */
819 struct list_head lpfc_scsi_buf_list_get;
820 struct list_head lpfc_scsi_buf_list_put;
809 uint32_t total_scsi_bufs; 821 uint32_t total_scsi_bufs;
810 struct list_head lpfc_iocb_list; 822 struct list_head lpfc_iocb_list;
811 uint32_t total_iocbq_bufs; 823 uint32_t total_iocbq_bufs;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 9290713af253..3c5625b8b1f4 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -674,6 +674,9 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
674 int i; 674 int i;
675 int rc; 675 int rc;
676 676
677 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
678 return 0;
679
677 init_completion(&online_compl); 680 init_completion(&online_compl);
678 rc = lpfc_workq_post_event(phba, &status, &online_compl, 681 rc = lpfc_workq_post_event(phba, &status, &online_compl,
679 LPFC_EVT_OFFLINE_PREP); 682 LPFC_EVT_OFFLINE_PREP);
@@ -741,7 +744,8 @@ lpfc_selective_reset(struct lpfc_hba *phba)
741 int status = 0; 744 int status = 0;
742 int rc; 745 int rc;
743 746
744 if (!phba->cfg_enable_hba_reset) 747 if ((!phba->cfg_enable_hba_reset) ||
748 (phba->pport->fc_flag & FC_OFFLINE_MODE))
745 return -EACCES; 749 return -EACCES;
746 750
747 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); 751 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
@@ -895,6 +899,7 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
895 pci_disable_sriov(pdev); 899 pci_disable_sriov(pdev);
896 phba->cfg_sriov_nr_virtfn = 0; 900 phba->cfg_sriov_nr_virtfn = 0;
897 } 901 }
902
898 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); 903 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
899 904
900 if (status != 0) 905 if (status != 0)
@@ -2801,6 +2806,8 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
2801 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 2806 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
2802 "3054 lpfc_topology changed from %d to %d\n", 2807 "3054 lpfc_topology changed from %d to %d\n",
2803 prev_val, val); 2808 prev_val, val);
2809 if (prev_val != val && phba->sli_rev == LPFC_SLI_REV4)
2810 phba->fc_topology_changed = 1;
2804 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); 2811 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
2805 if (err) { 2812 if (err) {
2806 phba->cfg_topology = prev_val; 2813 phba->cfg_topology = prev_val;
@@ -3792,6 +3799,141 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
3792static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR, 3799static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR,
3793 lpfc_fcp_imax_show, lpfc_fcp_imax_store); 3800 lpfc_fcp_imax_show, lpfc_fcp_imax_store);
3794 3801
3802/**
3803 * lpfc_state_show - Display current driver CPU affinity
3804 * @dev: class converted to a Scsi_host structure.
3805 * @attr: device attribute, not used.
3806 * @buf: on return contains text describing the state of the link.
3807 *
3808 * Returns: size of formatted string.
3809 **/
3810static ssize_t
3811lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
3812 char *buf)
3813{
3814 struct Scsi_Host *shost = class_to_shost(dev);
3815 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
3816 struct lpfc_hba *phba = vport->phba;
3817 struct lpfc_vector_map_info *cpup;
3818 int idx, len = 0;
3819
3820 if ((phba->sli_rev != LPFC_SLI_REV4) ||
3821 (phba->intr_type != MSIX))
3822 return len;
3823
3824 switch (phba->cfg_fcp_cpu_map) {
3825 case 0:
3826 len += snprintf(buf + len, PAGE_SIZE-len,
3827 "fcp_cpu_map: No mapping (%d)\n",
3828 phba->cfg_fcp_cpu_map);
3829 return len;
3830 case 1:
3831 len += snprintf(buf + len, PAGE_SIZE-len,
3832 "fcp_cpu_map: HBA centric mapping (%d): "
3833 "%d online CPUs\n",
3834 phba->cfg_fcp_cpu_map,
3835 phba->sli4_hba.num_online_cpu);
3836 break;
3837 case 2:
3838 len += snprintf(buf + len, PAGE_SIZE-len,
3839 "fcp_cpu_map: Driver centric mapping (%d): "
3840 "%d online CPUs\n",
3841 phba->cfg_fcp_cpu_map,
3842 phba->sli4_hba.num_online_cpu);
3843 break;
3844 }
3845
3846 cpup = phba->sli4_hba.cpu_map;
3847 for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
3848 if (cpup->irq == LPFC_VECTOR_MAP_EMPTY)
3849 len += snprintf(buf + len, PAGE_SIZE-len,
3850 "CPU %02d io_chan %02d "
3851 "physid %d coreid %d\n",
3852 idx, cpup->channel_id, cpup->phys_id,
3853 cpup->core_id);
3854 else
3855 len += snprintf(buf + len, PAGE_SIZE-len,
3856 "CPU %02d io_chan %02d "
3857 "physid %d coreid %d IRQ %d\n",
3858 idx, cpup->channel_id, cpup->phys_id,
3859 cpup->core_id, cpup->irq);
3860
3861 cpup++;
3862 }
3863 return len;
3864}
3865
3866/**
3867 * lpfc_fcp_cpu_map_store - Change CPU affinity of driver vectors
3868 * @dev: class device that is converted into a Scsi_host.
3869 * @attr: device attribute, not used.
3870 * @buf: one or more lpfc_polling_flags values.
3871 * @count: not used.
3872 *
3873 * Returns:
3874 * -EINVAL - Not implemented yet.
3875 **/
3876static ssize_t
3877lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr,
3878 const char *buf, size_t count)
3879{
3880 int status = -EINVAL;
3881 return status;
3882}
3883
3884/*
3885# lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors
3886# for the HBA.
3887#
3888# Value range is [0 to 2]. Default value is LPFC_DRIVER_CPU_MAP (2).
3889# 0 - Do not affinitze IRQ vectors
3890# 1 - Affintize HBA vectors with respect to each HBA
3891# (start with CPU0 for each HBA)
3892# 2 - Affintize HBA vectors with respect to the entire driver
3893# (round robin thru all CPUs across all HBAs)
3894*/
3895static int lpfc_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
3896module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR);
3897MODULE_PARM_DESC(lpfc_fcp_cpu_map,
3898 "Defines how to map CPUs to IRQ vectors per HBA");
3899
3900/**
3901 * lpfc_fcp_cpu_map_init - Set the initial sr-iov virtual function enable
3902 * @phba: lpfc_hba pointer.
3903 * @val: link speed value.
3904 *
3905 * Description:
3906 * If val is in a valid range [0-2], then affinitze the adapter's
3907 * MSIX vectors.
3908 *
3909 * Returns:
3910 * zero if val saved.
3911 * -EINVAL val out of range
3912 **/
3913static int
3914lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val)
3915{
3916 if (phba->sli_rev != LPFC_SLI_REV4) {
3917 phba->cfg_fcp_cpu_map = 0;
3918 return 0;
3919 }
3920
3921 if (val >= LPFC_MIN_CPU_MAP && val <= LPFC_MAX_CPU_MAP) {
3922 phba->cfg_fcp_cpu_map = val;
3923 return 0;
3924 }
3925
3926 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3927 "3326 fcp_cpu_map: %d out of range, using default\n",
3928 val);
3929 phba->cfg_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
3930
3931 return 0;
3932}
3933
3934static DEVICE_ATTR(lpfc_fcp_cpu_map, S_IRUGO | S_IWUSR,
3935 lpfc_fcp_cpu_map_show, lpfc_fcp_cpu_map_store);
3936
3795/* 3937/*
3796# lpfc_fcp_class: Determines FC class to use for the FCP protocol. 3938# lpfc_fcp_class: Determines FC class to use for the FCP protocol.
3797# Value range is [2,3]. Default value is 3. 3939# Value range is [2,3]. Default value is 3.
@@ -4009,12 +4151,11 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
4009# 0 = disabled (default) 4151# 0 = disabled (default)
4010# 1 = enabled 4152# 1 = enabled
4011# Value range is [0,1]. Default value is 0. 4153# Value range is [0,1]. Default value is 0.
4154#
4155# This feature in under investigation and may be supported in the future.
4012*/ 4156*/
4013unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF; 4157unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF;
4014 4158
4015module_param(lpfc_fcp_look_ahead, uint, S_IRUGO);
4016MODULE_PARM_DESC(lpfc_fcp_look_ahead, "Look ahead for completions");
4017
4018/* 4159/*
4019# lpfc_prot_mask: i 4160# lpfc_prot_mask: i
4020# - Bit mask of host protection capabilities used to register with the 4161# - Bit mask of host protection capabilities used to register with the
@@ -4071,16 +4212,23 @@ MODULE_PARM_DESC(lpfc_delay_discovery,
4071 4212
4072/* 4213/*
4073 * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count 4214 * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
4074 * This value can be set to values between 64 and 256. The default value is 4215 * This value can be set to values between 64 and 4096. The default value is
4075 * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer 4216 * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer
4076 * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE). 4217 * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE).
4218 * Because of the additional overhead involved in setting up T10-DIF,
4219 * this parameter will be limited to 128 if BlockGuard is enabled under SLI4
4220 * and will be limited to 512 if BlockGuard is enabled under SLI3.
4077 */ 4221 */
4078LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT, 4222LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
4079 LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count"); 4223 LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
4080 4224
4081LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_PROT_SG_SEG_CNT, 4225/*
4082 LPFC_DEFAULT_PROT_SG_SEG_CNT, LPFC_MAX_PROT_SG_SEG_CNT, 4226 * This parameter will be depricated, the driver cannot limit the
4083 "Max Protection Scatter Gather Segment Count"); 4227 * protection data s/g list.
4228 */
4229LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT,
4230 LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT,
4231 "Max Protection Scatter Gather Segment Count");
4084 4232
4085struct device_attribute *lpfc_hba_attrs[] = { 4233struct device_attribute *lpfc_hba_attrs[] = {
4086 &dev_attr_bg_info, 4234 &dev_attr_bg_info,
@@ -4141,6 +4289,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
4141 &dev_attr_lpfc_poll_tmo, 4289 &dev_attr_lpfc_poll_tmo,
4142 &dev_attr_lpfc_use_msi, 4290 &dev_attr_lpfc_use_msi,
4143 &dev_attr_lpfc_fcp_imax, 4291 &dev_attr_lpfc_fcp_imax,
4292 &dev_attr_lpfc_fcp_cpu_map,
4144 &dev_attr_lpfc_fcp_wq_count, 4293 &dev_attr_lpfc_fcp_wq_count,
4145 &dev_attr_lpfc_fcp_eq_count, 4294 &dev_attr_lpfc_fcp_eq_count,
4146 &dev_attr_lpfc_fcp_io_channel, 4295 &dev_attr_lpfc_fcp_io_channel,
@@ -5123,6 +5272,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
5123 lpfc_enable_rrq_init(phba, lpfc_enable_rrq); 5272 lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
5124 lpfc_use_msi_init(phba, lpfc_use_msi); 5273 lpfc_use_msi_init(phba, lpfc_use_msi);
5125 lpfc_fcp_imax_init(phba, lpfc_fcp_imax); 5274 lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
5275 lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
5126 lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count); 5276 lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
5127 lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count); 5277 lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
5128 lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel); 5278 lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 888666892004..094be2cad65b 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -219,26 +219,35 @@ lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
219 unsigned int transfer_bytes, bytes_copied = 0; 219 unsigned int transfer_bytes, bytes_copied = 0;
220 unsigned int sg_offset, dma_offset; 220 unsigned int sg_offset, dma_offset;
221 unsigned char *dma_address, *sg_address; 221 unsigned char *dma_address, *sg_address;
222 struct scatterlist *sgel;
223 LIST_HEAD(temp_list); 222 LIST_HEAD(temp_list);
224 223 struct sg_mapping_iter miter;
224 unsigned long flags;
225 unsigned int sg_flags = SG_MITER_ATOMIC;
226 bool sg_valid;
225 227
226 list_splice_init(&dma_buffers->list, &temp_list); 228 list_splice_init(&dma_buffers->list, &temp_list);
227 list_add(&dma_buffers->list, &temp_list); 229 list_add(&dma_buffers->list, &temp_list);
228 sg_offset = 0; 230 sg_offset = 0;
229 sgel = bsg_buffers->sg_list; 231 if (to_buffers)
232 sg_flags |= SG_MITER_FROM_SG;
233 else
234 sg_flags |= SG_MITER_TO_SG;
235 sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt,
236 sg_flags);
237 local_irq_save(flags);
238 sg_valid = sg_miter_next(&miter);
230 list_for_each_entry(mp, &temp_list, list) { 239 list_for_each_entry(mp, &temp_list, list) {
231 dma_offset = 0; 240 dma_offset = 0;
232 while (bytes_to_transfer && sgel && 241 while (bytes_to_transfer && sg_valid &&
233 (dma_offset < LPFC_BPL_SIZE)) { 242 (dma_offset < LPFC_BPL_SIZE)) {
234 dma_address = mp->virt + dma_offset; 243 dma_address = mp->virt + dma_offset;
235 if (sg_offset) { 244 if (sg_offset) {
236 /* Continue previous partial transfer of sg */ 245 /* Continue previous partial transfer of sg */
237 sg_address = sg_virt(sgel) + sg_offset; 246 sg_address = miter.addr + sg_offset;
238 transfer_bytes = sgel->length - sg_offset; 247 transfer_bytes = miter.length - sg_offset;
239 } else { 248 } else {
240 sg_address = sg_virt(sgel); 249 sg_address = miter.addr;
241 transfer_bytes = sgel->length; 250 transfer_bytes = miter.length;
242 } 251 }
243 if (bytes_to_transfer < transfer_bytes) 252 if (bytes_to_transfer < transfer_bytes)
244 transfer_bytes = bytes_to_transfer; 253 transfer_bytes = bytes_to_transfer;
@@ -252,12 +261,14 @@ lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
252 sg_offset += transfer_bytes; 261 sg_offset += transfer_bytes;
253 bytes_to_transfer -= transfer_bytes; 262 bytes_to_transfer -= transfer_bytes;
254 bytes_copied += transfer_bytes; 263 bytes_copied += transfer_bytes;
255 if (sg_offset >= sgel->length) { 264 if (sg_offset >= miter.length) {
256 sg_offset = 0; 265 sg_offset = 0;
257 sgel = sg_next(sgel); 266 sg_valid = sg_miter_next(&miter);
258 } 267 }
259 } 268 }
260 } 269 }
270 sg_miter_stop(&miter);
271 local_irq_restore(flags);
261 list_del_init(&dma_buffers->list); 272 list_del_init(&dma_buffers->list);
262 list_splice(&temp_list, &dma_buffers->list); 273 list_splice(&temp_list, &dma_buffers->list);
263 return bytes_copied; 274 return bytes_copied;
@@ -471,6 +482,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
471 cmdiocbq->context1 = dd_data; 482 cmdiocbq->context1 = dd_data;
472 cmdiocbq->context2 = cmp; 483 cmdiocbq->context2 = cmp;
473 cmdiocbq->context3 = bmp; 484 cmdiocbq->context3 = bmp;
485 cmdiocbq->context_un.ndlp = ndlp;
474 dd_data->type = TYPE_IOCB; 486 dd_data->type = TYPE_IOCB;
475 dd_data->set_job = job; 487 dd_data->set_job = job;
476 dd_data->context_un.iocb.cmdiocbq = cmdiocbq; 488 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
@@ -1508,6 +1520,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1508 ctiocb->context1 = dd_data; 1520 ctiocb->context1 = dd_data;
1509 ctiocb->context2 = cmp; 1521 ctiocb->context2 = cmp;
1510 ctiocb->context3 = bmp; 1522 ctiocb->context3 = bmp;
1523 ctiocb->context_un.ndlp = ndlp;
1511 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp; 1524 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
1512 1525
1513 dd_data->type = TYPE_IOCB; 1526 dd_data->type = TYPE_IOCB;
@@ -2576,7 +2589,8 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
2576 evt->wait_time_stamp = jiffies; 2589 evt->wait_time_stamp = jiffies;
2577 time_left = wait_event_interruptible_timeout( 2590 time_left = wait_event_interruptible_timeout(
2578 evt->wq, !list_empty(&evt->events_to_see), 2591 evt->wq, !list_empty(&evt->events_to_see),
2579 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); 2592 msecs_to_jiffies(1000 *
2593 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
2580 if (list_empty(&evt->events_to_see)) 2594 if (list_empty(&evt->events_to_see))
2581 ret_val = (time_left) ? -EINTR : -ETIMEDOUT; 2595 ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
2582 else { 2596 else {
@@ -3151,7 +3165,8 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
3151 evt->waiting = 1; 3165 evt->waiting = 1;
3152 time_left = wait_event_interruptible_timeout( 3166 time_left = wait_event_interruptible_timeout(
3153 evt->wq, !list_empty(&evt->events_to_see), 3167 evt->wq, !list_empty(&evt->events_to_see),
3154 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); 3168 msecs_to_jiffies(1000 *
3169 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
3155 evt->waiting = 0; 3170 evt->waiting = 0;
3156 if (list_empty(&evt->events_to_see)) { 3171 if (list_empty(&evt->events_to_see)) {
3157 rc = (time_left) ? -EINTR : -ETIMEDOUT; 3172 rc = (time_left) ? -EINTR : -ETIMEDOUT;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 7631893ae005..d41456e5f814 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -470,3 +470,4 @@ int lpfc_sli4_xri_sgl_update(struct lpfc_hba *);
470void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *); 470void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
471uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *); 471uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);
472int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t); 472int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t);
473void lpfc_sli4_offline_eratt(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 7bff3a19af56..ae1a07c57cae 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1811,7 +1811,8 @@ lpfc_fdmi_timeout_handler(struct lpfc_vport *vport)
1811 if (init_utsname()->nodename[0] != '\0') 1811 if (init_utsname()->nodename[0] != '\0')
1812 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); 1812 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
1813 else 1813 else
1814 mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60); 1814 mod_timer(&vport->fc_fdmitmo, jiffies +
1815 msecs_to_jiffies(1000 * 60));
1815 } 1816 }
1816 return; 1817 return;
1817} 1818}
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index bbed8471bf0b..3cae0a92e8bd 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -29,6 +29,7 @@
29#include <scsi/scsi_host.h> 29#include <scsi/scsi_host.h>
30#include <scsi/scsi_transport_fc.h> 30#include <scsi/scsi_transport_fc.h>
31 31
32
32#include "lpfc_hw4.h" 33#include "lpfc_hw4.h"
33#include "lpfc_hw.h" 34#include "lpfc_hw.h"
34#include "lpfc_sli.h" 35#include "lpfc_sli.h"
@@ -238,7 +239,10 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
238 239
239 icmd->un.elsreq64.remoteID = did; /* DID */ 240 icmd->un.elsreq64.remoteID = did; /* DID */
240 icmd->ulpCommand = CMD_ELS_REQUEST64_CR; 241 icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
241 icmd->ulpTimeout = phba->fc_ratov * 2; 242 if (elscmd == ELS_CMD_FLOGI)
243 icmd->ulpTimeout = FF_DEF_RATOV * 2;
244 else
245 icmd->ulpTimeout = phba->fc_ratov * 2;
242 } else { 246 } else {
243 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); 247 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
244 icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys); 248 icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
@@ -308,16 +312,20 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
308 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 312 /* Xmit ELS command <elsCmd> to remote NPORT <did> */
309 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 313 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
310 "0116 Xmit ELS command x%x to remote " 314 "0116 Xmit ELS command x%x to remote "
311 "NPORT x%x I/O tag: x%x, port state: x%x\n", 315 "NPORT x%x I/O tag: x%x, port state:x%x"
316 " fc_flag:x%x\n",
312 elscmd, did, elsiocb->iotag, 317 elscmd, did, elsiocb->iotag,
313 vport->port_state); 318 vport->port_state,
319 vport->fc_flag);
314 } else { 320 } else {
315 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 321 /* Xmit ELS response <elsCmd> to remote NPORT <did> */
316 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 322 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
317 "0117 Xmit ELS response x%x to remote " 323 "0117 Xmit ELS response x%x to remote "
318 "NPORT x%x I/O tag: x%x, size: x%x\n", 324 "NPORT x%x I/O tag: x%x, size: x%x "
325 "port_state x%x fc_flag x%x\n",
319 elscmd, ndlp->nlp_DID, elsiocb->iotag, 326 elscmd, ndlp->nlp_DID, elsiocb->iotag,
320 cmdSize); 327 cmdSize, vport->port_state,
328 vport->fc_flag);
321 } 329 }
322 return elsiocb; 330 return elsiocb;
323 331
@@ -909,6 +917,23 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
909 spin_lock_irq(shost->host_lock); 917 spin_lock_irq(shost->host_lock);
910 vport->fc_flag |= FC_PT2PT; 918 vport->fc_flag |= FC_PT2PT;
911 spin_unlock_irq(shost->host_lock); 919 spin_unlock_irq(shost->host_lock);
920 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
921 if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
922 lpfc_unregister_fcf_prep(phba);
923
924 /* The FC_VFI_REGISTERED flag will get clear in the cmpl
925 * handler for unreg_vfi, but if we don't force the
926 * FC_VFI_REGISTERED flag then the reg_vfi mailbox could be
927 * built with the update bit set instead of just the vp bit to
928 * change the Nport ID. We need to have the vp set and the
929 * Upd cleared on topology changes.
930 */
931 spin_lock_irq(shost->host_lock);
932 vport->fc_flag &= ~FC_VFI_REGISTERED;
933 spin_unlock_irq(shost->host_lock);
934 phba->fc_topology_changed = 0;
935 lpfc_issue_reg_vfi(vport);
936 }
912 937
913 /* Start discovery - this should just do CLEAR_LA */ 938 /* Start discovery - this should just do CLEAR_LA */
914 lpfc_disc_start(vport); 939 lpfc_disc_start(vport);
@@ -1030,9 +1055,19 @@ stop_rr_fcf_flogi:
1030 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; 1055 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
1031 if ((phba->sli_rev == LPFC_SLI_REV4) && 1056 if ((phba->sli_rev == LPFC_SLI_REV4) &&
1032 (!(vport->fc_flag & FC_VFI_REGISTERED) || 1057 (!(vport->fc_flag & FC_VFI_REGISTERED) ||
1033 (vport->fc_prevDID != vport->fc_myDID))) { 1058 (vport->fc_prevDID != vport->fc_myDID) ||
1034 if (vport->fc_flag & FC_VFI_REGISTERED) 1059 phba->fc_topology_changed)) {
1035 lpfc_sli4_unreg_all_rpis(vport); 1060 if (vport->fc_flag & FC_VFI_REGISTERED) {
1061 if (phba->fc_topology_changed) {
1062 lpfc_unregister_fcf_prep(phba);
1063 spin_lock_irq(shost->host_lock);
1064 vport->fc_flag &= ~FC_VFI_REGISTERED;
1065 spin_unlock_irq(shost->host_lock);
1066 phba->fc_topology_changed = 0;
1067 } else {
1068 lpfc_sli4_unreg_all_rpis(vport);
1069 }
1070 }
1036 lpfc_issue_reg_vfi(vport); 1071 lpfc_issue_reg_vfi(vport);
1037 lpfc_nlp_put(ndlp); 1072 lpfc_nlp_put(ndlp);
1038 goto out; 1073 goto out;
@@ -1054,10 +1089,11 @@ stop_rr_fcf_flogi:
1054 1089
1055 /* FLOGI completes successfully */ 1090 /* FLOGI completes successfully */
1056 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1091 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1057 "0101 FLOGI completes successfully " 1092 "0101 FLOGI completes successfully, I/O tag:x%x, "
1058 "Data: x%x x%x x%x x%x\n", 1093 "Data: x%x x%x x%x x%x x%x x%x\n", cmdiocb->iotag,
1059 irsp->un.ulpWord[4], sp->cmn.e_d_tov, 1094 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
1060 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution); 1095 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
1096 vport->port_state, vport->fc_flag);
1061 1097
1062 if (vport->port_state == LPFC_FLOGI) { 1098 if (vport->port_state == LPFC_FLOGI) {
1063 /* 1099 /*
@@ -5047,6 +5083,8 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5047 struct ls_rjt stat; 5083 struct ls_rjt stat;
5048 uint32_t cmd, did; 5084 uint32_t cmd, did;
5049 int rc; 5085 int rc;
5086 uint32_t fc_flag = 0;
5087 uint32_t port_state = 0;
5050 5088
5051 cmd = *lp++; 5089 cmd = *lp++;
5052 sp = (struct serv_parm *) lp; 5090 sp = (struct serv_parm *) lp;
@@ -5113,16 +5151,25 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5113 * will be. 5151 * will be.
5114 */ 5152 */
5115 vport->fc_myDID = PT2PT_LocalID; 5153 vport->fc_myDID = PT2PT_LocalID;
5116 } 5154 } else
5155 vport->fc_myDID = PT2PT_RemoteID;
5117 5156
5118 /* 5157 /*
5119 * The vport state should go to LPFC_FLOGI only 5158 * The vport state should go to LPFC_FLOGI only
5120 * AFTER we issue a FLOGI, not receive one. 5159 * AFTER we issue a FLOGI, not receive one.
5121 */ 5160 */
5122 spin_lock_irq(shost->host_lock); 5161 spin_lock_irq(shost->host_lock);
5162 fc_flag = vport->fc_flag;
5163 port_state = vport->port_state;
5123 vport->fc_flag |= FC_PT2PT; 5164 vport->fc_flag |= FC_PT2PT;
5124 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 5165 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
5166 vport->port_state = LPFC_FLOGI;
5125 spin_unlock_irq(shost->host_lock); 5167 spin_unlock_irq(shost->host_lock);
5168 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5169 "3311 Rcv Flogi PS x%x new PS x%x "
5170 "fc_flag x%x new fc_flag x%x\n",
5171 port_state, vport->port_state,
5172 fc_flag, vport->fc_flag);
5126 5173
5127 /* 5174 /*
5128 * We temporarily set fc_myDID to make it look like we are 5175 * We temporarily set fc_myDID to make it look like we are
@@ -6241,7 +6288,8 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
6241 } 6288 }
6242 6289
6243 if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq)) 6290 if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq))
6244 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 6291 mod_timer(&vport->els_tmofunc,
6292 jiffies + msecs_to_jiffies(1000 * timeout));
6245} 6293}
6246 6294
6247/** 6295/**
@@ -6612,7 +6660,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6612 /* ELS command <elsCmd> received from NPORT <did> */ 6660 /* ELS command <elsCmd> received from NPORT <did> */
6613 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6661 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6614 "0112 ELS command x%x received from NPORT x%x " 6662 "0112 ELS command x%x received from NPORT x%x "
6615 "Data: x%x\n", cmd, did, vport->port_state); 6663 "Data: x%x x%x x%x x%x\n",
6664 cmd, did, vport->port_state, vport->fc_flag,
6665 vport->fc_myDID, vport->fc_prevDID);
6616 switch (cmd) { 6666 switch (cmd) {
6617 case ELS_CMD_PLOGI: 6667 case ELS_CMD_PLOGI:
6618 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6668 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -6621,6 +6671,19 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6621 6671
6622 phba->fc_stat.elsRcvPLOGI++; 6672 phba->fc_stat.elsRcvPLOGI++;
6623 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 6673 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
6674 if (phba->sli_rev == LPFC_SLI_REV4 &&
6675 (phba->pport->fc_flag & FC_PT2PT)) {
6676 vport->fc_prevDID = vport->fc_myDID;
6677 /* Our DID needs to be updated before registering
6678 * the vfi. This is done in lpfc_rcv_plogi but
6679 * that is called after the reg_vfi.
6680 */
6681 vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo;
6682 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6683 "3312 Remote port assigned DID x%x "
6684 "%x\n", vport->fc_myDID,
6685 vport->fc_prevDID);
6686 }
6624 6687
6625 lpfc_send_els_event(vport, ndlp, payload); 6688 lpfc_send_els_event(vport, ndlp, payload);
6626 6689
@@ -6630,6 +6693,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6630 rjt_exp = LSEXP_NOTHING_MORE; 6693 rjt_exp = LSEXP_NOTHING_MORE;
6631 break; 6694 break;
6632 } 6695 }
6696 shost = lpfc_shost_from_vport(vport);
6633 if (vport->port_state < LPFC_DISC_AUTH) { 6697 if (vport->port_state < LPFC_DISC_AUTH) {
6634 if (!(phba->pport->fc_flag & FC_PT2PT) || 6698 if (!(phba->pport->fc_flag & FC_PT2PT) ||
6635 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { 6699 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -6641,9 +6705,18 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6641 * another NPort and the other side has initiated 6705 * another NPort and the other side has initiated
6642 * the PLOGI before responding to our FLOGI. 6706 * the PLOGI before responding to our FLOGI.
6643 */ 6707 */
6708 if (phba->sli_rev == LPFC_SLI_REV4 &&
6709 (phba->fc_topology_changed ||
6710 vport->fc_myDID != vport->fc_prevDID)) {
6711 lpfc_unregister_fcf_prep(phba);
6712 spin_lock_irq(shost->host_lock);
6713 vport->fc_flag &= ~FC_VFI_REGISTERED;
6714 spin_unlock_irq(shost->host_lock);
6715 phba->fc_topology_changed = 0;
6716 lpfc_issue_reg_vfi(vport);
6717 }
6644 } 6718 }
6645 6719
6646 shost = lpfc_shost_from_vport(vport);
6647 spin_lock_irq(shost->host_lock); 6720 spin_lock_irq(shost->host_lock);
6648 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; 6721 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
6649 spin_unlock_irq(shost->host_lock); 6722 spin_unlock_irq(shost->host_lock);
@@ -7002,8 +7075,11 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
7002 spin_lock_irq(shost->host_lock); 7075 spin_lock_irq(shost->host_lock);
7003 if (vport->fc_flag & FC_DISC_DELAYED) { 7076 if (vport->fc_flag & FC_DISC_DELAYED) {
7004 spin_unlock_irq(shost->host_lock); 7077 spin_unlock_irq(shost->host_lock);
7078 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
7079 "3334 Delay fc port discovery for %d seconds\n",
7080 phba->fc_ratov);
7005 mod_timer(&vport->delayed_disc_tmo, 7081 mod_timer(&vport->delayed_disc_tmo,
7006 jiffies + HZ * phba->fc_ratov); 7082 jiffies + msecs_to_jiffies(1000 * phba->fc_ratov));
7007 return; 7083 return;
7008 } 7084 }
7009 spin_unlock_irq(shost->host_lock); 7085 spin_unlock_irq(shost->host_lock);
@@ -7287,7 +7363,7 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba)
7287 return; 7363 return;
7288 7364
7289 shost = lpfc_shost_from_vport(phba->pport); 7365 shost = lpfc_shost_from_vport(phba->pport);
7290 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 7366 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
7291 spin_lock_irq(shost->host_lock); 7367 spin_lock_irq(shost->host_lock);
7292 ndlp->nlp_flag |= NLP_DELAY_TMO; 7368 ndlp->nlp_flag |= NLP_DELAY_TMO;
7293 spin_unlock_irq(shost->host_lock); 7369 spin_unlock_irq(shost->host_lock);
@@ -7791,7 +7867,8 @@ lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
7791 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 7867 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
7792 /* Start a timer to unblock fabric iocbs after 100ms */ 7868 /* Start a timer to unblock fabric iocbs after 100ms */
7793 if (!blocked) 7869 if (!blocked)
7794 mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 ); 7870 mod_timer(&phba->fabric_block_timer,
7871 jiffies + msecs_to_jiffies(100));
7795 7872
7796 return; 7873 return;
7797} 7874}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 326e05a65a73..0f6e2548f35d 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -160,11 +160,12 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
160 if (!list_empty(&evtp->evt_listp)) 160 if (!list_empty(&evtp->evt_listp))
161 return; 161 return;
162 162
163 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
164
163 spin_lock_irq(&phba->hbalock); 165 spin_lock_irq(&phba->hbalock);
164 /* We need to hold the node by incrementing the reference 166 /* We need to hold the node by incrementing the reference
165 * count until this queued work is done 167 * count until this queued work is done
166 */ 168 */
167 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
168 if (evtp->evt_arg1) { 169 if (evtp->evt_arg1) {
169 evtp->evt = LPFC_EVT_DEV_LOSS; 170 evtp->evt = LPFC_EVT_DEV_LOSS;
170 list_add_tail(&evtp->evt_listp, &phba->work_list); 171 list_add_tail(&evtp->evt_listp, &phba->work_list);
@@ -1008,9 +1009,6 @@ lpfc_linkup(struct lpfc_hba *phba)
1008 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 1009 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
1009 lpfc_linkup_port(vports[i]); 1010 lpfc_linkup_port(vports[i]);
1010 lpfc_destroy_vport_work_array(phba, vports); 1011 lpfc_destroy_vport_work_array(phba, vports);
1011 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1012 (phba->sli_rev < LPFC_SLI_REV4))
1013 lpfc_issue_clear_la(phba, phba->pport);
1014 1012
1015 return 0; 1013 return 0;
1016} 1014}
@@ -1436,7 +1434,8 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1436 if (phba->fcf.fcf_flag & FCF_REGISTERED) { 1434 if (phba->fcf.fcf_flag & FCF_REGISTERED) {
1437 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); 1435 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1438 phba->hba_flag &= ~FCF_TS_INPROG; 1436 phba->hba_flag &= ~FCF_TS_INPROG;
1439 if (phba->pport->port_state != LPFC_FLOGI) { 1437 if (phba->pport->port_state != LPFC_FLOGI &&
1438 phba->pport->fc_flag & FC_FABRIC) {
1440 phba->hba_flag |= FCF_RR_INPROG; 1439 phba->hba_flag |= FCF_RR_INPROG;
1441 spin_unlock_irq(&phba->hbalock); 1440 spin_unlock_irq(&phba->hbalock);
1442 lpfc_initial_flogi(phba->pport); 1441 lpfc_initial_flogi(phba->pport);
@@ -2270,8 +2269,11 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2270 spin_unlock_irq(&phba->hbalock); 2269 spin_unlock_irq(&phba->hbalock);
2271 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2270 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2272 "2836 New FCF matches in-use " 2271 "2836 New FCF matches in-use "
2273 "FCF (x%x)\n", 2272 "FCF (x%x), port_state:x%x, "
2274 phba->fcf.current_rec.fcf_indx); 2273 "fc_flag:x%x\n",
2274 phba->fcf.current_rec.fcf_indx,
2275 phba->pport->port_state,
2276 phba->pport->fc_flag);
2275 goto out; 2277 goto out;
2276 } else 2278 } else
2277 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 2279 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
@@ -2796,7 +2798,19 @@ void
2796lpfc_issue_init_vpi(struct lpfc_vport *vport) 2798lpfc_issue_init_vpi(struct lpfc_vport *vport)
2797{ 2799{
2798 LPFC_MBOXQ_t *mboxq; 2800 LPFC_MBOXQ_t *mboxq;
2799 int rc; 2801 int rc, vpi;
2802
2803 if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) {
2804 vpi = lpfc_alloc_vpi(vport->phba);
2805 if (!vpi) {
2806 lpfc_printf_vlog(vport, KERN_ERR,
2807 LOG_MBOX,
2808 "3303 Failed to obtain vport vpi\n");
2809 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2810 return;
2811 }
2812 vport->vpi = vpi;
2813 }
2800 2814
2801 mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL); 2815 mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
2802 if (!mboxq) { 2816 if (!mboxq) {
@@ -2894,9 +2908,14 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2894 goto out_free_mem; 2908 goto out_free_mem;
2895 } 2909 }
2896 2910
2897 /* If the VFI is already registered, there is nothing else to do */ 2911 /* If the VFI is already registered, there is nothing else to do
2912 * Unless this was a VFI update and we are in PT2PT mode, then
2913 * we should drop through to set the port state to ready.
2914 */
2898 if (vport->fc_flag & FC_VFI_REGISTERED) 2915 if (vport->fc_flag & FC_VFI_REGISTERED)
2899 goto out_free_mem; 2916 if (!(phba->sli_rev == LPFC_SLI_REV4 &&
2917 vport->fc_flag & FC_PT2PT))
2918 goto out_free_mem;
2900 2919
2901 /* The VPI is implicitly registered when the VFI is registered */ 2920 /* The VPI is implicitly registered when the VFI is registered */
2902 spin_lock_irq(shost->host_lock); 2921 spin_lock_irq(shost->host_lock);
@@ -2913,6 +2932,13 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2913 goto out_free_mem; 2932 goto out_free_mem;
2914 } 2933 }
2915 2934
2935 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
2936 "3313 cmpl reg vfi port_state:%x fc_flag:%x myDid:%x "
2937 "alpacnt:%d LinkState:%x topology:%x\n",
2938 vport->port_state, vport->fc_flag, vport->fc_myDID,
2939 vport->phba->alpa_map[0],
2940 phba->link_state, phba->fc_topology);
2941
2916 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 2942 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
2917 /* 2943 /*
2918 * For private loop or for NPort pt2pt, 2944 * For private loop or for NPort pt2pt,
@@ -2925,7 +2951,10 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2925 /* Use loop map to make discovery list */ 2951 /* Use loop map to make discovery list */
2926 lpfc_disc_list_loopmap(vport); 2952 lpfc_disc_list_loopmap(vport);
2927 /* Start discovery */ 2953 /* Start discovery */
2928 lpfc_disc_start(vport); 2954 if (vport->fc_flag & FC_PT2PT)
2955 vport->port_state = LPFC_VPORT_READY;
2956 else
2957 lpfc_disc_start(vport);
2929 } else { 2958 } else {
2930 lpfc_start_fdiscs(phba); 2959 lpfc_start_fdiscs(phba);
2931 lpfc_do_scr_ns_plogi(phba, vport); 2960 lpfc_do_scr_ns_plogi(phba, vport);
@@ -3007,6 +3036,15 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
3007 break; 3036 break;
3008 } 3037 }
3009 3038
3039 if (phba->fc_topology &&
3040 phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) {
3041 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3042 "3314 Toplogy changed was 0x%x is 0x%x\n",
3043 phba->fc_topology,
3044 bf_get(lpfc_mbx_read_top_topology, la));
3045 phba->fc_topology_changed = 1;
3046 }
3047
3010 phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la); 3048 phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
3011 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 3049 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
3012 3050
@@ -4235,7 +4273,7 @@ lpfc_set_disctmo(struct lpfc_vport *vport)
4235 tmo, vport->port_state, vport->fc_flag); 4273 tmo, vport->port_state, vport->fc_flag);
4236 } 4274 }
4237 4275
4238 mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo); 4276 mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo));
4239 spin_lock_irq(shost->host_lock); 4277 spin_lock_irq(shost->host_lock);
4240 vport->fc_flag |= FC_DISC_TMO; 4278 vport->fc_flag |= FC_DISC_TMO;
4241 spin_unlock_irq(shost->host_lock); 4279 spin_unlock_irq(shost->host_lock);
@@ -4949,8 +4987,12 @@ lpfc_disc_start(struct lpfc_vport *vport)
4949 uint32_t clear_la_pending; 4987 uint32_t clear_la_pending;
4950 int did_changed; 4988 int did_changed;
4951 4989
4952 if (!lpfc_is_link_up(phba)) 4990 if (!lpfc_is_link_up(phba)) {
4991 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
4992 "3315 Link is not up %x\n",
4993 phba->link_state);
4953 return; 4994 return;
4995 }
4954 4996
4955 if (phba->link_state == LPFC_CLEAR_LA) 4997 if (phba->link_state == LPFC_CLEAR_LA)
4956 clear_la_pending = 1; 4998 clear_la_pending = 1;
@@ -4983,11 +5025,13 @@ lpfc_disc_start(struct lpfc_vport *vport)
4983 if (num_sent) 5025 if (num_sent)
4984 return; 5026 return;
4985 5027
4986 /* Register the VPI for SLI3, NON-NPIV only. */ 5028 /* Register the VPI for SLI3, NPIV only. */
4987 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 5029 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
4988 !(vport->fc_flag & FC_PT2PT) && 5030 !(vport->fc_flag & FC_PT2PT) &&
4989 !(vport->fc_flag & FC_RSCN_MODE) && 5031 !(vport->fc_flag & FC_RSCN_MODE) &&
4990 (phba->sli_rev < LPFC_SLI_REV4)) { 5032 (phba->sli_rev < LPFC_SLI_REV4)) {
5033 if (vport->port_type == LPFC_PHYSICAL_PORT)
5034 lpfc_issue_clear_la(phba, vport);
4991 lpfc_issue_reg_vpi(phba, vport); 5035 lpfc_issue_reg_vpi(phba, vport);
4992 return; 5036 return;
4993 } 5037 }
@@ -5410,7 +5454,8 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5410 if (vport->cfg_fdmi_on == 1) 5454 if (vport->cfg_fdmi_on == 1)
5411 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); 5455 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
5412 else 5456 else
5413 mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60); 5457 mod_timer(&vport->fc_fdmitmo,
5458 jiffies + msecs_to_jiffies(1000 * 60));
5414 5459
5415 /* decrement the node reference count held for this callback 5460 /* decrement the node reference count held for this callback
5416 * function. 5461 * function.
@@ -5855,7 +5900,7 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
5855 struct lpfc_vport **vports; 5900 struct lpfc_vport **vports;
5856 struct lpfc_nodelist *ndlp; 5901 struct lpfc_nodelist *ndlp;
5857 struct Scsi_Host *shost; 5902 struct Scsi_Host *shost;
5858 int i, rc; 5903 int i = 0, rc;
5859 5904
5860 /* Unregister RPIs */ 5905 /* Unregister RPIs */
5861 if (lpfc_fcf_inuse(phba)) 5906 if (lpfc_fcf_inuse(phba))
@@ -5883,6 +5928,20 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
5883 spin_unlock_irq(shost->host_lock); 5928 spin_unlock_irq(shost->host_lock);
5884 } 5929 }
5885 lpfc_destroy_vport_work_array(phba, vports); 5930 lpfc_destroy_vport_work_array(phba, vports);
5931 if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) {
5932 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
5933 if (ndlp)
5934 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
5935 lpfc_cleanup_pending_mbox(phba->pport);
5936 if (phba->sli_rev == LPFC_SLI_REV4)
5937 lpfc_sli4_unreg_all_rpis(phba->pport);
5938 lpfc_mbx_unreg_vpi(phba->pport);
5939 shost = lpfc_shost_from_vport(phba->pport);
5940 spin_lock_irq(shost->host_lock);
5941 phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
5942 phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED;
5943 spin_unlock_irq(shost->host_lock);
5944 }
5886 5945
5887 /* Cleanup any outstanding ELS commands */ 5946 /* Cleanup any outstanding ELS commands */
5888 lpfc_els_flush_all_cmd(phba); 5947 lpfc_els_flush_all_cmd(phba);
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index e8c476031703..83700c18f468 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1667,6 +1667,7 @@ enum lpfc_protgrp_type {
1667#define BG_OP_IN_CSUM_OUT_CSUM 0x5 1667#define BG_OP_IN_CSUM_OUT_CSUM 0x5
1668#define BG_OP_IN_CRC_OUT_CSUM 0x6 1668#define BG_OP_IN_CRC_OUT_CSUM 0x6
1669#define BG_OP_IN_CSUM_OUT_CRC 0x7 1669#define BG_OP_IN_CSUM_OUT_CRC 0x7
1670#define BG_OP_RAW_MODE 0x8
1670 1671
1671struct lpfc_pde5 { 1672struct lpfc_pde5 {
1672 uint32_t word0; 1673 uint32_t word0;
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 1dd2f6f0a127..713a4613ec3a 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -200,6 +200,11 @@ struct lpfc_sli_intf {
200#define LPFC_MAX_IMAX 5000000 200#define LPFC_MAX_IMAX 5000000
201#define LPFC_DEF_IMAX 50000 201#define LPFC_DEF_IMAX 50000
202 202
203#define LPFC_MIN_CPU_MAP 0
204#define LPFC_MAX_CPU_MAP 2
205#define LPFC_HBA_CPU_MAP 1
206#define LPFC_DRIVER_CPU_MAP 2 /* Default */
207
203/* PORT_CAPABILITIES constants. */ 208/* PORT_CAPABILITIES constants. */
204#define LPFC_MAX_SUPPORTED_PAGES 8 209#define LPFC_MAX_SUPPORTED_PAGES 8
205 210
@@ -621,7 +626,7 @@ struct lpfc_register {
621#define lpfc_sliport_status_rdy_SHIFT 23 626#define lpfc_sliport_status_rdy_SHIFT 23
622#define lpfc_sliport_status_rdy_MASK 0x1 627#define lpfc_sliport_status_rdy_MASK 0x1
623#define lpfc_sliport_status_rdy_WORD word0 628#define lpfc_sliport_status_rdy_WORD word0
624#define MAX_IF_TYPE_2_RESETS 1000 629#define MAX_IF_TYPE_2_RESETS 6
625 630
626#define LPFC_CTL_PORT_CTL_OFFSET 0x408 631#define LPFC_CTL_PORT_CTL_OFFSET 0x408
627#define lpfc_sliport_ctrl_end_SHIFT 30 632#define lpfc_sliport_ctrl_end_SHIFT 30
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 90b8b0515e23..cb465b253910 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -33,6 +33,7 @@
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/firmware.h> 34#include <linux/firmware.h>
35#include <linux/miscdevice.h> 35#include <linux/miscdevice.h>
36#include <linux/percpu.h>
36 37
37#include <scsi/scsi.h> 38#include <scsi/scsi.h>
38#include <scsi/scsi_device.h> 39#include <scsi/scsi_device.h>
@@ -58,6 +59,9 @@ char *_dump_buf_dif;
58unsigned long _dump_buf_dif_order; 59unsigned long _dump_buf_dif_order;
59spinlock_t _dump_buf_lock; 60spinlock_t _dump_buf_lock;
60 61
62/* Used when mapping IRQ vectors in a driver centric manner */
63uint16_t lpfc_used_cpu[LPFC_MAX_CPU];
64
61static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 65static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
62static int lpfc_post_rcv_buf(struct lpfc_hba *); 66static int lpfc_post_rcv_buf(struct lpfc_hba *);
63static int lpfc_sli4_queue_verify(struct lpfc_hba *); 67static int lpfc_sli4_queue_verify(struct lpfc_hba *);
@@ -541,13 +545,16 @@ lpfc_config_port_post(struct lpfc_hba *phba)
541 545
542 /* Set up ring-0 (ELS) timer */ 546 /* Set up ring-0 (ELS) timer */
543 timeout = phba->fc_ratov * 2; 547 timeout = phba->fc_ratov * 2;
544 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 548 mod_timer(&vport->els_tmofunc,
549 jiffies + msecs_to_jiffies(1000 * timeout));
545 /* Set up heart beat (HB) timer */ 550 /* Set up heart beat (HB) timer */
546 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 551 mod_timer(&phba->hb_tmofunc,
552 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
547 phba->hb_outstanding = 0; 553 phba->hb_outstanding = 0;
548 phba->last_completion_time = jiffies; 554 phba->last_completion_time = jiffies;
549 /* Set up error attention (ERATT) polling timer */ 555 /* Set up error attention (ERATT) polling timer */
550 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 556 mod_timer(&phba->eratt_poll,
557 jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
551 558
552 if (phba->hba_flag & LINK_DISABLED) { 559 if (phba->hba_flag & LINK_DISABLED) {
553 lpfc_printf_log(phba, 560 lpfc_printf_log(phba,
@@ -908,9 +915,9 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
908 psb->pCmd = NULL; 915 psb->pCmd = NULL;
909 psb->status = IOSTAT_SUCCESS; 916 psb->status = IOSTAT_SUCCESS;
910 } 917 }
911 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 918 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
912 list_splice(&aborts, &phba->lpfc_scsi_buf_list); 919 list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
913 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 920 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
914 return 0; 921 return 0;
915} 922}
916 923
@@ -1021,7 +1028,8 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1021 !(phba->link_state == LPFC_HBA_ERROR) && 1028 !(phba->link_state == LPFC_HBA_ERROR) &&
1022 !(phba->pport->load_flag & FC_UNLOADING)) 1029 !(phba->pport->load_flag & FC_UNLOADING))
1023 mod_timer(&phba->hb_tmofunc, 1030 mod_timer(&phba->hb_tmofunc,
1024 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 1031 jiffies +
1032 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1025 return; 1033 return;
1026} 1034}
1027 1035
@@ -1064,15 +1072,18 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1064 1072
1065 spin_lock_irq(&phba->pport->work_port_lock); 1073 spin_lock_irq(&phba->pport->work_port_lock);
1066 1074
1067 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, 1075 if (time_after(phba->last_completion_time +
1068 jiffies)) { 1076 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1077 jiffies)) {
1069 spin_unlock_irq(&phba->pport->work_port_lock); 1078 spin_unlock_irq(&phba->pport->work_port_lock);
1070 if (!phba->hb_outstanding) 1079 if (!phba->hb_outstanding)
1071 mod_timer(&phba->hb_tmofunc, 1080 mod_timer(&phba->hb_tmofunc,
1072 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 1081 jiffies +
1082 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1073 else 1083 else
1074 mod_timer(&phba->hb_tmofunc, 1084 mod_timer(&phba->hb_tmofunc,
1075 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1085 jiffies +
1086 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1076 return; 1087 return;
1077 } 1088 }
1078 spin_unlock_irq(&phba->pport->work_port_lock); 1089 spin_unlock_irq(&phba->pport->work_port_lock);
@@ -1104,7 +1115,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1104 if (!pmboxq) { 1115 if (!pmboxq) {
1105 mod_timer(&phba->hb_tmofunc, 1116 mod_timer(&phba->hb_tmofunc,
1106 jiffies + 1117 jiffies +
1107 HZ * LPFC_HB_MBOX_INTERVAL); 1118 msecs_to_jiffies(1000 *
1119 LPFC_HB_MBOX_INTERVAL));
1108 return; 1120 return;
1109 } 1121 }
1110 1122
@@ -1120,7 +1132,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1120 phba->mbox_mem_pool); 1132 phba->mbox_mem_pool);
1121 mod_timer(&phba->hb_tmofunc, 1133 mod_timer(&phba->hb_tmofunc,
1122 jiffies + 1134 jiffies +
1123 HZ * LPFC_HB_MBOX_INTERVAL); 1135 msecs_to_jiffies(1000 *
1136 LPFC_HB_MBOX_INTERVAL));
1124 return; 1137 return;
1125 } 1138 }
1126 phba->skipped_hb = 0; 1139 phba->skipped_hb = 0;
@@ -1136,7 +1149,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1136 phba->skipped_hb = jiffies; 1149 phba->skipped_hb = jiffies;
1137 1150
1138 mod_timer(&phba->hb_tmofunc, 1151 mod_timer(&phba->hb_tmofunc,
1139 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1152 jiffies +
1153 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1140 return; 1154 return;
1141 } else { 1155 } else {
1142 /* 1156 /*
@@ -1150,7 +1164,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1150 jiffies_to_msecs(jiffies 1164 jiffies_to_msecs(jiffies
1151 - phba->last_completion_time)); 1165 - phba->last_completion_time));
1152 mod_timer(&phba->hb_tmofunc, 1166 mod_timer(&phba->hb_tmofunc,
1153 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1167 jiffies +
1168 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1154 } 1169 }
1155 } 1170 }
1156} 1171}
@@ -1191,7 +1206,7 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
1191 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1206 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1192 * other than Port Error 6 has been detected. 1207 * other than Port Error 6 has been detected.
1193 **/ 1208 **/
1194static void 1209void
1195lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1210lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1196{ 1211{
1197 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1212 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
@@ -2633,6 +2648,7 @@ lpfc_online(struct lpfc_hba *phba)
2633 struct lpfc_vport *vport; 2648 struct lpfc_vport *vport;
2634 struct lpfc_vport **vports; 2649 struct lpfc_vport **vports;
2635 int i; 2650 int i;
2651 bool vpis_cleared = false;
2636 2652
2637 if (!phba) 2653 if (!phba)
2638 return 0; 2654 return 0;
@@ -2656,6 +2672,10 @@ lpfc_online(struct lpfc_hba *phba)
2656 lpfc_unblock_mgmt_io(phba); 2672 lpfc_unblock_mgmt_io(phba);
2657 return 1; 2673 return 1;
2658 } 2674 }
2675 spin_lock_irq(&phba->hbalock);
2676 if (!phba->sli4_hba.max_cfg_param.vpi_used)
2677 vpis_cleared = true;
2678 spin_unlock_irq(&phba->hbalock);
2659 } else { 2679 } else {
2660 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 2680 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
2661 lpfc_unblock_mgmt_io(phba); 2681 lpfc_unblock_mgmt_io(phba);
@@ -2672,8 +2692,13 @@ lpfc_online(struct lpfc_hba *phba)
2672 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 2692 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2673 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 2693 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2674 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2694 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2675 if (phba->sli_rev == LPFC_SLI_REV4) 2695 if (phba->sli_rev == LPFC_SLI_REV4) {
2676 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 2696 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
2697 if ((vpis_cleared) &&
2698 (vports[i]->port_type !=
2699 LPFC_PHYSICAL_PORT))
2700 vports[i]->vpi = 0;
2701 }
2677 spin_unlock_irq(shost->host_lock); 2702 spin_unlock_irq(shost->host_lock);
2678 } 2703 }
2679 lpfc_destroy_vport_work_array(phba, vports); 2704 lpfc_destroy_vport_work_array(phba, vports);
@@ -2833,16 +2858,30 @@ lpfc_scsi_free(struct lpfc_hba *phba)
2833 struct lpfc_iocbq *io, *io_next; 2858 struct lpfc_iocbq *io, *io_next;
2834 2859
2835 spin_lock_irq(&phba->hbalock); 2860 spin_lock_irq(&phba->hbalock);
2861
2836 /* Release all the lpfc_scsi_bufs maintained by this host. */ 2862 /* Release all the lpfc_scsi_bufs maintained by this host. */
2837 spin_lock(&phba->scsi_buf_list_lock); 2863
2838 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 2864 spin_lock(&phba->scsi_buf_list_put_lock);
2865 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
2866 list) {
2839 list_del(&sb->list); 2867 list_del(&sb->list);
2840 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 2868 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2841 sb->dma_handle); 2869 sb->dma_handle);
2842 kfree(sb); 2870 kfree(sb);
2843 phba->total_scsi_bufs--; 2871 phba->total_scsi_bufs--;
2844 } 2872 }
2845 spin_unlock(&phba->scsi_buf_list_lock); 2873 spin_unlock(&phba->scsi_buf_list_put_lock);
2874
2875 spin_lock(&phba->scsi_buf_list_get_lock);
2876 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
2877 list) {
2878 list_del(&sb->list);
2879 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2880 sb->dma_handle);
2881 kfree(sb);
2882 phba->total_scsi_bufs--;
2883 }
2884 spin_unlock(&phba->scsi_buf_list_get_lock);
2846 2885
2847 /* Release all the lpfc_iocbq entries maintained by this host. */ 2886 /* Release all the lpfc_iocbq entries maintained by this host. */
2848 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 2887 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
@@ -2978,9 +3017,12 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
2978 phba->sli4_hba.scsi_xri_cnt, 3017 phba->sli4_hba.scsi_xri_cnt,
2979 phba->sli4_hba.scsi_xri_max); 3018 phba->sli4_hba.scsi_xri_max);
2980 3019
2981 spin_lock_irq(&phba->scsi_buf_list_lock); 3020 spin_lock_irq(&phba->scsi_buf_list_get_lock);
2982 list_splice_init(&phba->lpfc_scsi_buf_list, &scsi_sgl_list); 3021 spin_lock_irq(&phba->scsi_buf_list_put_lock);
2983 spin_unlock_irq(&phba->scsi_buf_list_lock); 3022 list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
3023 list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
3024 spin_unlock_irq(&phba->scsi_buf_list_put_lock);
3025 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
2984 3026
2985 if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { 3027 if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
2986 /* max scsi xri shrinked below the allocated scsi buffers */ 3028 /* max scsi xri shrinked below the allocated scsi buffers */
@@ -2994,9 +3036,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
2994 psb->dma_handle); 3036 psb->dma_handle);
2995 kfree(psb); 3037 kfree(psb);
2996 } 3038 }
2997 spin_lock_irq(&phba->scsi_buf_list_lock); 3039 spin_lock_irq(&phba->scsi_buf_list_get_lock);
2998 phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt; 3040 phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
2999 spin_unlock_irq(&phba->scsi_buf_list_lock); 3041 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
3000 } 3042 }
3001 3043
3002 /* update xris associated to remaining allocated scsi buffers */ 3044 /* update xris associated to remaining allocated scsi buffers */
@@ -3014,9 +3056,12 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
3014 psb->cur_iocbq.sli4_lxritag = lxri; 3056 psb->cur_iocbq.sli4_lxritag = lxri;
3015 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3057 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3016 } 3058 }
3017 spin_lock_irq(&phba->scsi_buf_list_lock); 3059 spin_lock_irq(&phba->scsi_buf_list_get_lock);
3018 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list); 3060 spin_lock_irq(&phba->scsi_buf_list_put_lock);
3019 spin_unlock_irq(&phba->scsi_buf_list_lock); 3061 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
3062 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
3063 spin_unlock_irq(&phba->scsi_buf_list_put_lock);
3064 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
3020 3065
3021 return 0; 3066 return 0;
3022 3067
@@ -3197,14 +3242,15 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
3197 stat = 1; 3242 stat = 1;
3198 goto finished; 3243 goto finished;
3199 } 3244 }
3200 if (time >= 30 * HZ) { 3245 if (time >= msecs_to_jiffies(30 * 1000)) {
3201 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3246 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3202 "0461 Scanning longer than 30 " 3247 "0461 Scanning longer than 30 "
3203 "seconds. Continuing initialization\n"); 3248 "seconds. Continuing initialization\n");
3204 stat = 1; 3249 stat = 1;
3205 goto finished; 3250 goto finished;
3206 } 3251 }
3207 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { 3252 if (time >= msecs_to_jiffies(15 * 1000) &&
3253 phba->link_state <= LPFC_LINK_DOWN) {
3208 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3254 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3209 "0465 Link down longer than 15 " 3255 "0465 Link down longer than 15 "
3210 "seconds. Continuing initialization\n"); 3256 "seconds. Continuing initialization\n");
@@ -3216,7 +3262,7 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
3216 goto finished; 3262 goto finished;
3217 if (vport->num_disc_nodes || vport->fc_prli_sent) 3263 if (vport->num_disc_nodes || vport->fc_prli_sent)
3218 goto finished; 3264 goto finished;
3219 if (vport->fc_map_cnt == 0 && time < 2 * HZ) 3265 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
3220 goto finished; 3266 goto finished;
3221 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 3267 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
3222 goto finished; 3268 goto finished;
@@ -4215,7 +4261,8 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
4215 * If there are other active VLinks present, 4261 * If there are other active VLinks present,
4216 * re-instantiate the Vlink using FDISC. 4262 * re-instantiate the Vlink using FDISC.
4217 */ 4263 */
4218 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 4264 mod_timer(&ndlp->nlp_delayfunc,
4265 jiffies + msecs_to_jiffies(1000));
4219 shost = lpfc_shost_from_vport(vport); 4266 shost = lpfc_shost_from_vport(vport);
4220 spin_lock_irq(shost->host_lock); 4267 spin_lock_irq(shost->host_lock);
4221 ndlp->nlp_flag |= NLP_DELAY_TMO; 4268 ndlp->nlp_flag |= NLP_DELAY_TMO;
@@ -4707,23 +4754,52 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
4707 return -ENOMEM; 4754 return -ENOMEM;
4708 4755
4709 /* 4756 /*
4710 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4757 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
4711 * used to create the sg_dma_buf_pool must be dynamically calculated. 4758 * used to create the sg_dma_buf_pool must be dynamically calculated.
4712 * 2 segments are added since the IOCB needs a command and response bde.
4713 */ 4759 */
4714 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4715 sizeof(struct fcp_rsp) +
4716 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
4717 4760
4761 /* Initialize the host templates the configured values. */
4762 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4763 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4764
4765 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
4718 if (phba->cfg_enable_bg) { 4766 if (phba->cfg_enable_bg) {
4719 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; 4767 /*
4720 phba->cfg_sg_dma_buf_size += 4768 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
4721 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); 4769 * the FCP rsp, and a BDE for each. Sice we have no control
4770 * over how many protection data segments the SCSI Layer
4771 * will hand us (ie: there could be one for every block
4772 * in the IO), we just allocate enough BDEs to accomidate
4773 * our max amount and we need to limit lpfc_sg_seg_cnt to
4774 * minimize the risk of running out.
4775 */
4776 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4777 sizeof(struct fcp_rsp) +
4778 (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64));
4779
4780 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
4781 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
4782
4783 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
4784 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
4785 } else {
4786 /*
4787 * The scsi_buf for a regular I/O will hold the FCP cmnd,
4788 * the FCP rsp, a BDE for each, and a BDE for up to
4789 * cfg_sg_seg_cnt data segments.
4790 */
4791 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4792 sizeof(struct fcp_rsp) +
4793 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
4794
4795 /* Total BDEs in BPL for scsi_sg_list */
4796 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
4722 } 4797 }
4723 4798
4724 /* Also reinitialize the host templates with new values. */ 4799 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4725 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 4800 "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
4726 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 4801 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
4802 phba->cfg_total_seg_cnt);
4727 4803
4728 phba->max_vpi = LPFC_MAX_VPI; 4804 phba->max_vpi = LPFC_MAX_VPI;
4729 /* This will be set to correct value after config_port mbox */ 4805 /* This will be set to correct value after config_port mbox */
@@ -4789,13 +4865,13 @@ lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
4789static int 4865static int
4790lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 4866lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4791{ 4867{
4868 struct lpfc_vector_map_info *cpup;
4792 struct lpfc_sli *psli; 4869 struct lpfc_sli *psli;
4793 LPFC_MBOXQ_t *mboxq; 4870 LPFC_MBOXQ_t *mboxq;
4794 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; 4871 int rc, i, hbq_count, max_buf_size;
4795 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 4872 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
4796 struct lpfc_mqe *mqe; 4873 struct lpfc_mqe *mqe;
4797 int longs, sli_family; 4874 int longs;
4798 int sges_per_segment;
4799 4875
4800 /* Before proceed, wait for POST done and device ready */ 4876 /* Before proceed, wait for POST done and device ready */
4801 rc = lpfc_sli4_post_status_check(phba); 4877 rc = lpfc_sli4_post_status_check(phba);
@@ -4863,11 +4939,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4863 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 4939 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4864 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 4940 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4865 4941
4866 /* With BlockGuard we can have multiple SGEs per Data Segemnt */
4867 sges_per_segment = 1;
4868 if (phba->cfg_enable_bg)
4869 sges_per_segment = 2;
4870
4871 /* 4942 /*
4872 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands 4943 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
4873 * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple. 4944 * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple.
@@ -4878,43 +4949,71 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4878 sizeof(struct lpfc_sli_ring), GFP_KERNEL); 4949 sizeof(struct lpfc_sli_ring), GFP_KERNEL);
4879 if (!phba->sli.ring) 4950 if (!phba->sli.ring)
4880 return -ENOMEM; 4951 return -ENOMEM;
4952
4881 /* 4953 /*
4882 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4954 * It doesn't matter what family our adapter is in, we are
4955 * limited to 2 Pages, 512 SGEs, for our SGL.
4956 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
4957 */
4958 max_buf_size = (2 * SLI4_PAGE_SIZE);
4959 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
4960 phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
4961
4962 /*
4963 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
4883 * used to create the sg_dma_buf_pool must be dynamically calculated. 4964 * used to create the sg_dma_buf_pool must be dynamically calculated.
4884 * 2 segments are added since the IOCB needs a command and response bde.
4885 * To insure that the scsi sgl does not cross a 4k page boundary only
4886 * sgl sizes of must be a power of 2.
4887 */ 4965 */
4888 buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + 4966
4889 (((phba->cfg_sg_seg_cnt * sges_per_segment) + 2) * 4967 if (phba->cfg_enable_bg) {
4890 sizeof(struct sli4_sge))); 4968 /*
4891 4969 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
4892 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); 4970 * the FCP rsp, and a SGE for each. Sice we have no control
4893 max_buf_size = LPFC_SLI4_MAX_BUF_SIZE; 4971 * over how many protection data segments the SCSI Layer
4894 switch (sli_family) { 4972 * will hand us (ie: there could be one for every block
4895 case LPFC_SLI_INTF_FAMILY_BE2: 4973 * in the IO), we just allocate enough SGEs to accomidate
4896 case LPFC_SLI_INTF_FAMILY_BE3: 4974 * our max amount and we need to limit lpfc_sg_seg_cnt to
4897 /* There is a single hint for BE - 2 pages per BPL. */ 4975 * minimize the risk of running out.
4898 if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) == 4976 */
4899 LPFC_SLI_INTF_SLI_HINT1_1) 4977 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4900 max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE; 4978 sizeof(struct fcp_rsp) + max_buf_size;
4901 break; 4979
4902 case LPFC_SLI_INTF_FAMILY_LNCR_A0: 4980 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
4903 case LPFC_SLI_INTF_FAMILY_LNCR_B0: 4981 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
4904 default: 4982
4905 break; 4983 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF)
4984 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
4985 } else {
4986 /*
4987 * The scsi_buf for a regular I/O will hold the FCP cmnd,
4988 * the FCP rsp, a SGE for each, and a SGE for up to
4989 * cfg_sg_seg_cnt data segments.
4990 */
4991 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4992 sizeof(struct fcp_rsp) +
4993 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
4994
4995 /* Total SGEs for scsi_sg_list */
4996 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
4997 /*
4998 * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need
4999 * to post 1 page for the SGL.
5000 */
4906 } 5001 }
4907 5002
4908 for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE; 5003 /* Initialize the host templates with the updated values. */
4909 dma_buf_size < max_buf_size && buf_size > dma_buf_size; 5004 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4910 dma_buf_size = dma_buf_size << 1) 5005 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4911 ; 5006
4912 if (dma_buf_size == max_buf_size) 5007 if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
4913 phba->cfg_sg_seg_cnt = (dma_buf_size - 5008 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
4914 sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) - 5009 else
4915 (2 * sizeof(struct sli4_sge))) / 5010 phba->cfg_sg_dma_buf_size =
4916 sizeof(struct sli4_sge); 5011 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
4917 phba->cfg_sg_dma_buf_size = dma_buf_size; 5012
5013 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
5014 "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n",
5015 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
5016 phba->cfg_total_seg_cnt);
4918 5017
4919 /* Initialize buffer queue management fields */ 5018 /* Initialize buffer queue management fields */
4920 hbq_count = lpfc_sli_hbq_count(); 5019 hbq_count = lpfc_sli_hbq_count();
@@ -5104,6 +5203,26 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5104 goto out_free_fcp_eq_hdl; 5203 goto out_free_fcp_eq_hdl;
5105 } 5204 }
5106 5205
5206 phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) *
5207 phba->sli4_hba.num_present_cpu),
5208 GFP_KERNEL);
5209 if (!phba->sli4_hba.cpu_map) {
5210 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5211 "3327 Failed allocate memory for msi-x "
5212 "interrupt vector mapping\n");
5213 rc = -ENOMEM;
5214 goto out_free_msix;
5215 }
5216 /* Initialize io channels for round robin */
5217 cpup = phba->sli4_hba.cpu_map;
5218 rc = 0;
5219 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
5220 cpup->channel_id = rc;
5221 rc++;
5222 if (rc >= phba->cfg_fcp_io_channel)
5223 rc = 0;
5224 }
5225
5107 /* 5226 /*
5108 * Enable sr-iov virtual functions if supported and configured 5227 * Enable sr-iov virtual functions if supported and configured
5109 * through the module parameter. 5228 * through the module parameter.
@@ -5123,6 +5242,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5123 5242
5124 return 0; 5243 return 0;
5125 5244
5245out_free_msix:
5246 kfree(phba->sli4_hba.msix_entries);
5126out_free_fcp_eq_hdl: 5247out_free_fcp_eq_hdl:
5127 kfree(phba->sli4_hba.fcp_eq_hdl); 5248 kfree(phba->sli4_hba.fcp_eq_hdl);
5128out_free_fcf_rr_bmask: 5249out_free_fcf_rr_bmask:
@@ -5152,6 +5273,11 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
5152{ 5273{
5153 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 5274 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
5154 5275
5276 /* Free memory allocated for msi-x interrupt vector to CPU mapping */
5277 kfree(phba->sli4_hba.cpu_map);
5278 phba->sli4_hba.num_present_cpu = 0;
5279 phba->sli4_hba.num_online_cpu = 0;
5280
5155 /* Free memory allocated for msi-x interrupt vector entries */ 5281 /* Free memory allocated for msi-x interrupt vector entries */
5156 kfree(phba->sli4_hba.msix_entries); 5282 kfree(phba->sli4_hba.msix_entries);
5157 5283
@@ -5260,8 +5386,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
5260 init_waitqueue_head(&phba->work_waitq); 5386 init_waitqueue_head(&phba->work_waitq);
5261 5387
5262 /* Initialize the scsi buffer list used by driver for scsi IO */ 5388 /* Initialize the scsi buffer list used by driver for scsi IO */
5263 spin_lock_init(&phba->scsi_buf_list_lock); 5389 spin_lock_init(&phba->scsi_buf_list_get_lock);
5264 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 5390 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
5391 spin_lock_init(&phba->scsi_buf_list_put_lock);
5392 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
5265 5393
5266 /* Initialize the fabric iocb list */ 5394 /* Initialize the fabric iocb list */
5267 INIT_LIST_HEAD(&phba->fabric_iocb_list); 5395 INIT_LIST_HEAD(&phba->fabric_iocb_list);
@@ -6696,6 +6824,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6696 int cfg_fcp_io_channel; 6824 int cfg_fcp_io_channel;
6697 uint32_t cpu; 6825 uint32_t cpu;
6698 uint32_t i = 0; 6826 uint32_t i = 0;
6827 uint32_t j = 0;
6699 6828
6700 6829
6701 /* 6830 /*
@@ -6706,15 +6835,21 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6706 /* Sanity check on HBA EQ parameters */ 6835 /* Sanity check on HBA EQ parameters */
6707 cfg_fcp_io_channel = phba->cfg_fcp_io_channel; 6836 cfg_fcp_io_channel = phba->cfg_fcp_io_channel;
6708 6837
6709 /* It doesn't make sense to have more io channels then CPUs */ 6838 /* It doesn't make sense to have more io channels then online CPUs */
6710 for_each_online_cpu(cpu) { 6839 for_each_present_cpu(cpu) {
6711 i++; 6840 if (cpu_online(cpu))
6841 i++;
6842 j++;
6712 } 6843 }
6844 phba->sli4_hba.num_online_cpu = i;
6845 phba->sli4_hba.num_present_cpu = j;
6846
6713 if (i < cfg_fcp_io_channel) { 6847 if (i < cfg_fcp_io_channel) {
6714 lpfc_printf_log(phba, 6848 lpfc_printf_log(phba,
6715 KERN_ERR, LOG_INIT, 6849 KERN_ERR, LOG_INIT,
6716 "3188 Reducing IO channels to match number of " 6850 "3188 Reducing IO channels to match number of "
6717 "CPUs: from %d to %d\n", cfg_fcp_io_channel, i); 6851 "online CPUs: from %d to %d\n",
6852 cfg_fcp_io_channel, i);
6718 cfg_fcp_io_channel = i; 6853 cfg_fcp_io_channel = i;
6719 } 6854 }
6720 6855
@@ -7743,8 +7878,13 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
7743 7878
7744out: 7879out:
7745 /* Catch the not-ready port failure after a port reset. */ 7880 /* Catch the not-ready port failure after a port reset. */
7746 if (num_resets >= MAX_IF_TYPE_2_RESETS) 7881 if (num_resets >= MAX_IF_TYPE_2_RESETS) {
7882 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7883 "3317 HBA not functional: IP Reset Failed "
7884 "after (%d) retries, try: "
7885 "echo fw_reset > board_mode\n", num_resets);
7747 rc = -ENODEV; 7886 rc = -ENODEV;
7887 }
7748 7888
7749 return rc; 7889 return rc;
7750} 7890}
@@ -8209,6 +8349,269 @@ lpfc_sli_disable_intr(struct lpfc_hba *phba)
8209} 8349}
8210 8350
8211/** 8351/**
8352 * lpfc_find_next_cpu - Find next available CPU that matches the phys_id
8353 * @phba: pointer to lpfc hba data structure.
8354 *
8355 * Find next available CPU to use for IRQ to CPU affinity.
8356 */
8357static int
8358lpfc_find_next_cpu(struct lpfc_hba *phba, uint32_t phys_id)
8359{
8360 struct lpfc_vector_map_info *cpup;
8361 int cpu;
8362
8363 cpup = phba->sli4_hba.cpu_map;
8364 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
8365 /* CPU must be online */
8366 if (cpu_online(cpu)) {
8367 if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
8368 (lpfc_used_cpu[cpu] == LPFC_VECTOR_MAP_EMPTY) &&
8369 (cpup->phys_id == phys_id)) {
8370 return cpu;
8371 }
8372 }
8373 cpup++;
8374 }
8375
8376 /*
8377 * If we get here, we have used ALL CPUs for the specific
8378 * phys_id. Now we need to clear out lpfc_used_cpu and start
8379 * reusing CPUs.
8380 */
8381
8382 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
8383 if (lpfc_used_cpu[cpu] == phys_id)
8384 lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY;
8385 }
8386
8387 cpup = phba->sli4_hba.cpu_map;
8388 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
8389 /* CPU must be online */
8390 if (cpu_online(cpu)) {
8391 if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
8392 (cpup->phys_id == phys_id)) {
8393 return cpu;
8394 }
8395 }
8396 cpup++;
8397 }
8398 return LPFC_VECTOR_MAP_EMPTY;
8399}
8400
8401/**
8402 * lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors
8403 * @phba: pointer to lpfc hba data structure.
8404 * @vectors: number of HBA vectors
8405 *
8406 * Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector
8407 * affinization across multple physical CPUs (numa nodes).
8408 * In addition, this routine will assign an IO channel for each CPU
8409 * to use when issuing I/Os.
8410 */
8411static int
8412lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
8413{
8414 int i, idx, saved_chann, used_chann, cpu, phys_id;
8415 int max_phys_id, num_io_channel, first_cpu;
8416 struct lpfc_vector_map_info *cpup;
8417#ifdef CONFIG_X86
8418 struct cpuinfo_x86 *cpuinfo;
8419#endif
8420 struct cpumask *mask;
8421 uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1];
8422
8423 /* If there is no mapping, just return */
8424 if (!phba->cfg_fcp_cpu_map)
8425 return 1;
8426
8427 /* Init cpu_map array */
8428 memset(phba->sli4_hba.cpu_map, 0xff,
8429 (sizeof(struct lpfc_vector_map_info) *
8430 phba->sli4_hba.num_present_cpu));
8431
8432 max_phys_id = 0;
8433 phys_id = 0;
8434 num_io_channel = 0;
8435 first_cpu = LPFC_VECTOR_MAP_EMPTY;
8436
8437 /* Update CPU map with physical id and core id of each CPU */
8438 cpup = phba->sli4_hba.cpu_map;
8439 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
8440#ifdef CONFIG_X86
8441 cpuinfo = &cpu_data(cpu);
8442 cpup->phys_id = cpuinfo->phys_proc_id;
8443 cpup->core_id = cpuinfo->cpu_core_id;
8444#else
8445 /* No distinction between CPUs for other platforms */
8446 cpup->phys_id = 0;
8447 cpup->core_id = 0;
8448#endif
8449
8450 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8451 "3328 CPU physid %d coreid %d\n",
8452 cpup->phys_id, cpup->core_id);
8453
8454 if (cpup->phys_id > max_phys_id)
8455 max_phys_id = cpup->phys_id;
8456 cpup++;
8457 }
8458
8459 /* Now associate the HBA vectors with specific CPUs */
8460 for (idx = 0; idx < vectors; idx++) {
8461 cpup = phba->sli4_hba.cpu_map;
8462 cpu = lpfc_find_next_cpu(phba, phys_id);
8463 if (cpu == LPFC_VECTOR_MAP_EMPTY) {
8464
8465 /* Try for all phys_id's */
8466 for (i = 1; i < max_phys_id; i++) {
8467 phys_id++;
8468 if (phys_id > max_phys_id)
8469 phys_id = 0;
8470 cpu = lpfc_find_next_cpu(phba, phys_id);
8471 if (cpu == LPFC_VECTOR_MAP_EMPTY)
8472 continue;
8473 goto found;
8474 }
8475
8476 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8477 "3329 Cannot set affinity:"
8478 "Error mapping vector %d (%d)\n",
8479 idx, vectors);
8480 return 0;
8481 }
8482found:
8483 cpup += cpu;
8484 if (phba->cfg_fcp_cpu_map == LPFC_DRIVER_CPU_MAP)
8485 lpfc_used_cpu[cpu] = phys_id;
8486
8487 /* Associate vector with selected CPU */
8488 cpup->irq = phba->sli4_hba.msix_entries[idx].vector;
8489
8490 /* Associate IO channel with selected CPU */
8491 cpup->channel_id = idx;
8492 num_io_channel++;
8493
8494 if (first_cpu == LPFC_VECTOR_MAP_EMPTY)
8495 first_cpu = cpu;
8496
8497 /* Now affinitize to the selected CPU */
8498 mask = &cpup->maskbits;
8499 cpumask_clear(mask);
8500 cpumask_set_cpu(cpu, mask);
8501 i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx].
8502 vector, mask);
8503
8504 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8505 "3330 Set Affinity: CPU %d channel %d "
8506 "irq %d (%x)\n",
8507 cpu, cpup->channel_id,
8508 phba->sli4_hba.msix_entries[idx].vector, i);
8509
8510 /* Spread vector mapping across multple physical CPU nodes */
8511 phys_id++;
8512 if (phys_id > max_phys_id)
8513 phys_id = 0;
8514 }
8515
8516 /*
8517 * Finally fill in the IO channel for any remaining CPUs.
8518 * At this point, all IO channels have been assigned to a specific
8519 * MSIx vector, mapped to a specific CPU.
8520 * Base the remaining IO channel assigned, to IO channels already
8521 * assigned to other CPUs on the same phys_id.
8522 */
8523 for (i = 0; i <= max_phys_id; i++) {
8524 /*
8525 * If there are no io channels already mapped to
8526 * this phys_id, just round robin thru the io_channels.
8527 * Setup chann[] for round robin.
8528 */
8529 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
8530 chann[idx] = idx;
8531
8532 saved_chann = 0;
8533 used_chann = 0;
8534
8535 /*
8536 * First build a list of IO channels already assigned
8537 * to this phys_id before reassigning the same IO
8538 * channels to the remaining CPUs.
8539 */
8540 cpup = phba->sli4_hba.cpu_map;
8541 cpu = first_cpu;
8542 cpup += cpu;
8543 for (idx = 0; idx < phba->sli4_hba.num_present_cpu;
8544 idx++) {
8545 if (cpup->phys_id == i) {
8546 /*
8547 * Save any IO channels that are
8548 * already mapped to this phys_id.
8549 */
8550 if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) {
8551 chann[saved_chann] =
8552 cpup->channel_id;
8553 saved_chann++;
8554 goto out;
8555 }
8556
8557 /* See if we are using round-robin */
8558 if (saved_chann == 0)
8559 saved_chann =
8560 phba->cfg_fcp_io_channel;
8561
8562 /* Associate next IO channel with CPU */
8563 cpup->channel_id = chann[used_chann];
8564 num_io_channel++;
8565 used_chann++;
8566 if (used_chann == saved_chann)
8567 used_chann = 0;
8568
8569 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8570 "3331 Set IO_CHANN "
8571 "CPU %d channel %d\n",
8572 idx, cpup->channel_id);
8573 }
8574out:
8575 cpu++;
8576 if (cpu >= phba->sli4_hba.num_present_cpu) {
8577 cpup = phba->sli4_hba.cpu_map;
8578 cpu = 0;
8579 } else {
8580 cpup++;
8581 }
8582 }
8583 }
8584
8585 if (phba->sli4_hba.num_online_cpu != phba->sli4_hba.num_present_cpu) {
8586 cpup = phba->sli4_hba.cpu_map;
8587 for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
8588 if (cpup->channel_id == LPFC_VECTOR_MAP_EMPTY) {
8589 cpup->channel_id = 0;
8590 num_io_channel++;
8591
8592 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8593 "3332 Assign IO_CHANN "
8594 "CPU %d channel %d\n",
8595 idx, cpup->channel_id);
8596 }
8597 cpup++;
8598 }
8599 }
8600
8601 /* Sanity check */
8602 if (num_io_channel != phba->sli4_hba.num_present_cpu)
8603 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8604 "3333 Set affinity mismatch:"
8605 "%d chann != %d cpus: %d vactors\n",
8606 num_io_channel, phba->sli4_hba.num_present_cpu,
8607 vectors);
8608
8609 phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU;
8610 return 1;
8611}
8612
8613
8614/**
8212 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 8615 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
8213 * @phba: pointer to lpfc hba data structure. 8616 * @phba: pointer to lpfc hba data structure.
8214 * 8617 *
@@ -8259,9 +8662,7 @@ enable_msix_vectors:
8259 phba->sli4_hba.msix_entries[index].vector, 8662 phba->sli4_hba.msix_entries[index].vector,
8260 phba->sli4_hba.msix_entries[index].entry); 8663 phba->sli4_hba.msix_entries[index].entry);
8261 8664
8262 /* 8665 /* Assign MSI-X vectors to interrupt handlers */
8263 * Assign MSI-X vectors to interrupt handlers
8264 */
8265 for (index = 0; index < vectors; index++) { 8666 for (index = 0; index < vectors; index++) {
8266 memset(&phba->sli4_hba.handler_name[index], 0, 16); 8667 memset(&phba->sli4_hba.handler_name[index], 0, 16);
8267 sprintf((char *)&phba->sli4_hba.handler_name[index], 8668 sprintf((char *)&phba->sli4_hba.handler_name[index],
@@ -8289,6 +8690,8 @@ enable_msix_vectors:
8289 phba->cfg_fcp_io_channel, vectors); 8690 phba->cfg_fcp_io_channel, vectors);
8290 phba->cfg_fcp_io_channel = vectors; 8691 phba->cfg_fcp_io_channel = vectors;
8291 } 8692 }
8693
8694 lpfc_sli4_set_affinity(phba, vectors);
8292 return rc; 8695 return rc;
8293 8696
8294cfg_fail_out: 8697cfg_fail_out:
@@ -9213,15 +9616,15 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
9213 /* Block all SCSI devices' I/Os on the host */ 9616 /* Block all SCSI devices' I/Os on the host */
9214 lpfc_scsi_dev_block(phba); 9617 lpfc_scsi_dev_block(phba);
9215 9618
9619 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
9620 lpfc_sli_flush_fcp_rings(phba);
9621
9216 /* stop all timers */ 9622 /* stop all timers */
9217 lpfc_stop_hba_timers(phba); 9623 lpfc_stop_hba_timers(phba);
9218 9624
9219 /* Disable interrupt and pci device */ 9625 /* Disable interrupt and pci device */
9220 lpfc_sli_disable_intr(phba); 9626 lpfc_sli_disable_intr(phba);
9221 pci_disable_device(phba->pcidev); 9627 pci_disable_device(phba->pcidev);
9222
9223 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
9224 lpfc_sli_flush_fcp_rings(phba);
9225} 9628}
9226 9629
9227/** 9630/**
@@ -9966,6 +10369,9 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
9966 /* Block all SCSI devices' I/Os on the host */ 10369 /* Block all SCSI devices' I/Os on the host */
9967 lpfc_scsi_dev_block(phba); 10370 lpfc_scsi_dev_block(phba);
9968 10371
10372 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
10373 lpfc_sli_flush_fcp_rings(phba);
10374
9969 /* stop all timers */ 10375 /* stop all timers */
9970 lpfc_stop_hba_timers(phba); 10376 lpfc_stop_hba_timers(phba);
9971 10377
@@ -9973,9 +10379,6 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
9973 lpfc_sli4_disable_intr(phba); 10379 lpfc_sli4_disable_intr(phba);
9974 lpfc_sli4_queue_destroy(phba); 10380 lpfc_sli4_queue_destroy(phba);
9975 pci_disable_device(phba->pcidev); 10381 pci_disable_device(phba->pcidev);
9976
9977 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
9978 lpfc_sli_flush_fcp_rings(phba);
9979} 10382}
9980 10383
9981/** 10384/**
@@ -10535,6 +10938,7 @@ static struct miscdevice lpfc_mgmt_dev = {
10535static int __init 10938static int __init
10536lpfc_init(void) 10939lpfc_init(void)
10537{ 10940{
10941 int cpu;
10538 int error = 0; 10942 int error = 0;
10539 10943
10540 printk(LPFC_MODULE_DESC "\n"); 10944 printk(LPFC_MODULE_DESC "\n");
@@ -10561,6 +10965,11 @@ lpfc_init(void)
10561 return -ENOMEM; 10965 return -ENOMEM;
10562 } 10966 }
10563 } 10967 }
10968
10969 /* Initialize in case vector mapping is needed */
10970 for (cpu = 0; cpu < LPFC_MAX_CPU; cpu++)
10971 lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY;
10972
10564 error = pci_register_driver(&lpfc_driver); 10973 error = pci_register_driver(&lpfc_driver);
10565 if (error) { 10974 if (error) {
10566 fc_release_transport(lpfc_transport_template); 10975 fc_release_transport(lpfc_transport_template);
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index baf53e6c2bd1..2a4e5d21eab2 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -37,6 +37,7 @@
37#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */ 37#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */
38#define LOG_FIP 0x00020000 /* FIP events */ 38#define LOG_FIP 0x00020000 /* FIP events */
39#define LOG_FCP_UNDER 0x00040000 /* FCP underruns errors */ 39#define LOG_FCP_UNDER 0x00040000 /* FCP underruns errors */
40#define LOG_SCSI_CMD 0x00080000 /* ALL SCSI commands */
40#define LOG_ALL_MSG 0xffffffff /* LOG all messages */ 41#define LOG_ALL_MSG 0xffffffff /* LOG all messages */
41 42
42#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ 43#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index a7a9fa468308..41363db7d426 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2149,18 +2149,21 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
2149 2149
2150 /* Only FC supports upd bit */ 2150 /* Only FC supports upd bit */
2151 if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) && 2151 if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) &&
2152 (vport->fc_flag & FC_VFI_REGISTERED)) { 2152 (vport->fc_flag & FC_VFI_REGISTERED) &&
2153 (!phba->fc_topology_changed)) {
2153 bf_set(lpfc_reg_vfi_vp, reg_vfi, 0); 2154 bf_set(lpfc_reg_vfi_vp, reg_vfi, 0);
2154 bf_set(lpfc_reg_vfi_upd, reg_vfi, 1); 2155 bf_set(lpfc_reg_vfi_upd, reg_vfi, 1);
2155 } 2156 }
2156 lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX, 2157 lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
2157 "3134 Register VFI, mydid:x%x, fcfi:%d, " 2158 "3134 Register VFI, mydid:x%x, fcfi:%d, "
2158 " vfi:%d, vpi:%d, fc_pname:%x%x\n", 2159 " vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%x"
2160 " port_state:x%x topology chg:%d\n",
2159 vport->fc_myDID, 2161 vport->fc_myDID,
2160 phba->fcf.fcfi, 2162 phba->fcf.fcfi,
2161 phba->sli4_hba.vfi_ids[vport->vfi], 2163 phba->sli4_hba.vfi_ids[vport->vfi],
2162 phba->vpi_ids[vport->vpi], 2164 phba->vpi_ids[vport->vpi],
2163 reg_vfi->wwn[0], reg_vfi->wwn[1]); 2165 reg_vfi->wwn[0], reg_vfi->wwn[1], vport->fc_flag,
2166 vport->port_state, phba->fc_topology_changed);
2164} 2167}
2165 2168
2166/** 2169/**
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index cd86069a0ba8..812d0cd7c86d 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -64,18 +64,26 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
64 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 64 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
65 int i; 65 int i;
66 66
67 if (phba->sli_rev == LPFC_SLI_REV4) 67 if (phba->sli_rev == LPFC_SLI_REV4) {
68 /* Calculate alignment */
69 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
70 i = phba->cfg_sg_dma_buf_size;
71 else
72 i = SLI4_PAGE_SIZE;
73
68 phba->lpfc_scsi_dma_buf_pool = 74 phba->lpfc_scsi_dma_buf_pool =
69 pci_pool_create("lpfc_scsi_dma_buf_pool", 75 pci_pool_create("lpfc_scsi_dma_buf_pool",
70 phba->pcidev, 76 phba->pcidev,
71 phba->cfg_sg_dma_buf_size, 77 phba->cfg_sg_dma_buf_size,
72 phba->cfg_sg_dma_buf_size, 78 i,
73 0); 79 0);
74 else 80 } else {
75 phba->lpfc_scsi_dma_buf_pool = 81 phba->lpfc_scsi_dma_buf_pool =
76 pci_pool_create("lpfc_scsi_dma_buf_pool", 82 pci_pool_create("lpfc_scsi_dma_buf_pool",
77 phba->pcidev, phba->cfg_sg_dma_buf_size, 83 phba->pcidev, phba->cfg_sg_dma_buf_size,
78 align, 0); 84 align, 0);
85 }
86
79 if (!phba->lpfc_scsi_dma_buf_pool) 87 if (!phba->lpfc_scsi_dma_buf_pool)
80 goto fail; 88 goto fail;
81 89
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 82f4d3542289..31e9b92f5a9b 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -332,9 +332,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
332 332
333 /* PLOGI chkparm OK */ 333 /* PLOGI chkparm OK */
334 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 334 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
335 "0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n", 335 "0114 PLOGI chkparm OK Data: x%x x%x x%x "
336 "x%x x%x x%x\n",
336 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, 337 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
337 ndlp->nlp_rpi); 338 ndlp->nlp_rpi, vport->port_state,
339 vport->fc_flag);
338 340
339 if (vport->cfg_fcp_class == 2 && sp->cls2.classValid) 341 if (vport->cfg_fcp_class == 2 && sp->cls2.classValid)
340 ndlp->nlp_fcp_info |= CLASS2; 342 ndlp->nlp_fcp_info |= CLASS2;
@@ -574,7 +576,7 @@ out:
574 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 576 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
575 577
576 /* 1 sec timeout */ 578 /* 1 sec timeout */
577 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 579 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
578 580
579 spin_lock_irq(shost->host_lock); 581 spin_lock_irq(shost->host_lock);
580 ndlp->nlp_flag |= NLP_DELAY_TMO; 582 ndlp->nlp_flag |= NLP_DELAY_TMO;
@@ -631,7 +633,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
631 * If there are other active VLinks present, 633 * If there are other active VLinks present,
632 * re-instantiate the Vlink using FDISC. 634 * re-instantiate the Vlink using FDISC.
633 */ 635 */
634 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 636 mod_timer(&ndlp->nlp_delayfunc,
637 jiffies + msecs_to_jiffies(1000));
635 spin_lock_irq(shost->host_lock); 638 spin_lock_irq(shost->host_lock);
636 ndlp->nlp_flag |= NLP_DELAY_TMO; 639 ndlp->nlp_flag |= NLP_DELAY_TMO;
637 spin_unlock_irq(shost->host_lock); 640 spin_unlock_irq(shost->host_lock);
@@ -648,7 +651,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
648 !(ndlp->nlp_type & NLP_FCP_INITIATOR))) || 651 !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
649 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { 652 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
650 /* Only try to re-login if this is NOT a Fabric Node */ 653 /* Only try to re-login if this is NOT a Fabric Node */
651 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 654 mod_timer(&ndlp->nlp_delayfunc,
655 jiffies + msecs_to_jiffies(1000 * 1));
652 spin_lock_irq(shost->host_lock); 656 spin_lock_irq(shost->host_lock);
653 ndlp->nlp_flag |= NLP_DELAY_TMO; 657 ndlp->nlp_flag |= NLP_DELAY_TMO;
654 spin_unlock_irq(shost->host_lock); 658 spin_unlock_irq(shost->host_lock);
@@ -969,7 +973,7 @@ lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
969 } 973 }
970 974
971 /* Put ndlp in npr state set plogi timer for 1 sec */ 975 /* Put ndlp in npr state set plogi timer for 1 sec */
972 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 976 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1));
973 spin_lock_irq(shost->host_lock); 977 spin_lock_irq(shost->host_lock);
974 ndlp->nlp_flag |= NLP_DELAY_TMO; 978 ndlp->nlp_flag |= NLP_DELAY_TMO;
975 spin_unlock_irq(shost->host_lock); 979 spin_unlock_irq(shost->host_lock);
@@ -1303,7 +1307,8 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1303 if ((irsp->ulpStatus) || 1307 if ((irsp->ulpStatus) ||
1304 (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) { 1308 (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
1305 /* 1 sec timeout */ 1309 /* 1 sec timeout */
1306 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 1310 mod_timer(&ndlp->nlp_delayfunc,
1311 jiffies + msecs_to_jiffies(1000));
1307 spin_lock_irq(shost->host_lock); 1312 spin_lock_irq(shost->host_lock);
1308 ndlp->nlp_flag |= NLP_DELAY_TMO; 1313 ndlp->nlp_flag |= NLP_DELAY_TMO;
1309 spin_unlock_irq(shost->host_lock); 1314 spin_unlock_irq(shost->host_lock);
@@ -1509,7 +1514,8 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1509 } 1514 }
1510 1515
1511 /* Put ndlp in npr state set plogi timer for 1 sec */ 1516 /* Put ndlp in npr state set plogi timer for 1 sec */
1512 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 1517 mod_timer(&ndlp->nlp_delayfunc,
1518 jiffies + msecs_to_jiffies(1000 * 1));
1513 spin_lock_irq(shost->host_lock); 1519 spin_lock_irq(shost->host_lock);
1514 ndlp->nlp_flag |= NLP_DELAY_TMO; 1520 ndlp->nlp_flag |= NLP_DELAY_TMO;
1515 spin_unlock_irq(shost->host_lock); 1521 spin_unlock_irq(shost->host_lock);
@@ -2145,7 +2151,8 @@ lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2145 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 2151 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
2146 2152
2147 if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) { 2153 if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
2148 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 2154 mod_timer(&ndlp->nlp_delayfunc,
2155 jiffies + msecs_to_jiffies(1000 * 1));
2149 spin_lock_irq(shost->host_lock); 2156 spin_lock_irq(shost->host_lock);
2150 ndlp->nlp_flag |= NLP_DELAY_TMO; 2157 ndlp->nlp_flag |= NLP_DELAY_TMO;
2151 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2158 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 74b8710e1e90..8523b278ec9d 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -24,6 +24,8 @@
24#include <linux/export.h> 24#include <linux/export.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <asm/unaligned.h> 26#include <asm/unaligned.h>
27#include <linux/crc-t10dif.h>
28#include <net/checksum.h>
27 29
28#include <scsi/scsi.h> 30#include <scsi/scsi.h>
29#include <scsi/scsi_device.h> 31#include <scsi/scsi_device.h>
@@ -48,7 +50,7 @@
48#define LPFC_RESET_WAIT 2 50#define LPFC_RESET_WAIT 2
49#define LPFC_ABORT_WAIT 2 51#define LPFC_ABORT_WAIT 2
50 52
51int _dump_buf_done; 53int _dump_buf_done = 1;
52 54
53static char *dif_op_str[] = { 55static char *dif_op_str[] = {
54 "PROT_NORMAL", 56 "PROT_NORMAL",
@@ -66,6 +68,10 @@ struct scsi_dif_tuple {
66 __be32 ref_tag; /* Target LBA or indirect LBA */ 68 __be32 ref_tag; /* Target LBA or indirect LBA */
67}; 69};
68 70
71#if !defined(SCSI_PROT_GUARD_CHECK) || !defined(SCSI_PROT_REF_CHECK)
72#define scsi_prot_flagged(sc, flg) sc
73#endif
74
69static void 75static void
70lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); 76lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
71static void 77static void
@@ -534,7 +540,16 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
534 dma_addr_t pdma_phys_fcp_rsp; 540 dma_addr_t pdma_phys_fcp_rsp;
535 dma_addr_t pdma_phys_bpl; 541 dma_addr_t pdma_phys_bpl;
536 uint16_t iotag; 542 uint16_t iotag;
537 int bcnt; 543 int bcnt, bpl_size;
544
545 bpl_size = phba->cfg_sg_dma_buf_size -
546 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
547
548 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
549 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
550 num_to_alloc, phba->cfg_sg_dma_buf_size,
551 (int)sizeof(struct fcp_cmnd),
552 (int)sizeof(struct fcp_rsp), bpl_size);
538 553
539 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 554 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
540 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 555 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
@@ -759,7 +774,7 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
759 struct list_head *post_sblist, int sb_count) 774 struct list_head *post_sblist, int sb_count)
760{ 775{
761 struct lpfc_scsi_buf *psb, *psb_next; 776 struct lpfc_scsi_buf *psb, *psb_next;
762 int status; 777 int status, sgl_size;
763 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0; 778 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
764 dma_addr_t pdma_phys_bpl1; 779 dma_addr_t pdma_phys_bpl1;
765 int last_xritag = NO_XRI; 780 int last_xritag = NO_XRI;
@@ -771,6 +786,9 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
771 if (sb_count <= 0) 786 if (sb_count <= 0)
772 return -EINVAL; 787 return -EINVAL;
773 788
789 sgl_size = phba->cfg_sg_dma_buf_size -
790 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
791
774 list_for_each_entry_safe(psb, psb_next, post_sblist, list) { 792 list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
775 list_del_init(&psb->list); 793 list_del_init(&psb->list);
776 block_cnt++; 794 block_cnt++;
@@ -803,7 +821,7 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
803 post_cnt = block_cnt; 821 post_cnt = block_cnt;
804 } else if (block_cnt == 1) { 822 } else if (block_cnt == 1) {
805 /* last single sgl with non-contiguous xri */ 823 /* last single sgl with non-contiguous xri */
806 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 824 if (sgl_size > SGL_PAGE_SIZE)
807 pdma_phys_bpl1 = psb->dma_phys_bpl + 825 pdma_phys_bpl1 = psb->dma_phys_bpl +
808 SGL_PAGE_SIZE; 826 SGL_PAGE_SIZE;
809 else 827 else
@@ -885,9 +903,12 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
885 int num_posted, rc = 0; 903 int num_posted, rc = 0;
886 904
887 /* get all SCSI buffers need to repost to a local list */ 905 /* get all SCSI buffers need to repost to a local list */
888 spin_lock_irq(&phba->scsi_buf_list_lock); 906 spin_lock_irq(&phba->scsi_buf_list_get_lock);
889 list_splice_init(&phba->lpfc_scsi_buf_list, &post_sblist); 907 spin_lock_irq(&phba->scsi_buf_list_put_lock);
890 spin_unlock_irq(&phba->scsi_buf_list_lock); 908 list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist);
909 list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist);
910 spin_unlock_irq(&phba->scsi_buf_list_put_lock);
911 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
891 912
892 /* post the list of scsi buffer sgls to port if available */ 913 /* post the list of scsi buffer sgls to port if available */
893 if (!list_empty(&post_sblist)) { 914 if (!list_empty(&post_sblist)) {
@@ -923,13 +944,22 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
923 IOCB_t *iocb; 944 IOCB_t *iocb;
924 dma_addr_t pdma_phys_fcp_cmd; 945 dma_addr_t pdma_phys_fcp_cmd;
925 dma_addr_t pdma_phys_fcp_rsp; 946 dma_addr_t pdma_phys_fcp_rsp;
926 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1; 947 dma_addr_t pdma_phys_bpl;
927 uint16_t iotag, lxri = 0; 948 uint16_t iotag, lxri = 0;
928 int bcnt, num_posted; 949 int bcnt, num_posted, sgl_size;
929 LIST_HEAD(prep_sblist); 950 LIST_HEAD(prep_sblist);
930 LIST_HEAD(post_sblist); 951 LIST_HEAD(post_sblist);
931 LIST_HEAD(scsi_sblist); 952 LIST_HEAD(scsi_sblist);
932 953
954 sgl_size = phba->cfg_sg_dma_buf_size -
955 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
956
957 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
958 "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
959 num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size,
960 (int)sizeof(struct fcp_cmnd),
961 (int)sizeof(struct fcp_rsp));
962
933 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 963 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
934 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 964 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
935 if (!psb) 965 if (!psb)
@@ -948,6 +978,15 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
948 } 978 }
949 memset(psb->data, 0, phba->cfg_sg_dma_buf_size); 979 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
950 980
981 /* Page alignment is CRITICAL, double check to be sure */
982 if (((unsigned long)(psb->data) &
983 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0) {
984 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
985 psb->data, psb->dma_handle);
986 kfree(psb);
987 break;
988 }
989
951 /* Allocate iotag for psb->cur_iocbq. */ 990 /* Allocate iotag for psb->cur_iocbq. */
952 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 991 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
953 if (iotag == 0) { 992 if (iotag == 0) {
@@ -968,17 +1007,14 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
968 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 1007 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
969 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; 1008 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
970 psb->fcp_bpl = psb->data; 1009 psb->fcp_bpl = psb->data;
971 psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size) 1010 psb->fcp_cmnd = (psb->data + sgl_size);
972 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
973 psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd + 1011 psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
974 sizeof(struct fcp_cmnd)); 1012 sizeof(struct fcp_cmnd));
975 1013
976 /* Initialize local short-hand pointers. */ 1014 /* Initialize local short-hand pointers. */
977 sgl = (struct sli4_sge *)psb->fcp_bpl; 1015 sgl = (struct sli4_sge *)psb->fcp_bpl;
978 pdma_phys_bpl = psb->dma_handle; 1016 pdma_phys_bpl = psb->dma_handle;
979 pdma_phys_fcp_cmd = 1017 pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size);
980 (psb->dma_handle + phba->cfg_sg_dma_buf_size)
981 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
982 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); 1018 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
983 1019
984 /* 1020 /*
@@ -1020,17 +1056,13 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
1020 iocb->ulpLe = 1; 1056 iocb->ulpLe = 1;
1021 iocb->ulpClass = CLASS3; 1057 iocb->ulpClass = CLASS3;
1022 psb->cur_iocbq.context1 = psb; 1058 psb->cur_iocbq.context1 = psb;
1023 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
1024 pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
1025 else
1026 pdma_phys_bpl1 = 0;
1027 psb->dma_phys_bpl = pdma_phys_bpl; 1059 psb->dma_phys_bpl = pdma_phys_bpl;
1028 1060
1029 /* add the scsi buffer to a post list */ 1061 /* add the scsi buffer to a post list */
1030 list_add_tail(&psb->list, &post_sblist); 1062 list_add_tail(&psb->list, &post_sblist);
1031 spin_lock_irq(&phba->scsi_buf_list_lock); 1063 spin_lock_irq(&phba->scsi_buf_list_get_lock);
1032 phba->sli4_hba.scsi_xri_cnt++; 1064 phba->sli4_hba.scsi_xri_cnt++;
1033 spin_unlock_irq(&phba->scsi_buf_list_lock); 1065 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
1034 } 1066 }
1035 lpfc_printf_log(phba, KERN_INFO, LOG_BG, 1067 lpfc_printf_log(phba, KERN_INFO, LOG_BG,
1036 "3021 Allocate %d out of %d requested new SCSI " 1068 "3021 Allocate %d out of %d requested new SCSI "
@@ -1079,17 +1111,23 @@ static struct lpfc_scsi_buf*
1079lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 1111lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1080{ 1112{
1081 struct lpfc_scsi_buf * lpfc_cmd = NULL; 1113 struct lpfc_scsi_buf * lpfc_cmd = NULL;
1082 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; 1114 struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
1083 unsigned long iflag = 0; 1115 unsigned long gflag = 0;
1084 1116 unsigned long pflag = 0;
1085 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 1117
1086 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); 1118 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
1087 if (lpfc_cmd) { 1119 list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf,
1088 lpfc_cmd->seg_cnt = 0; 1120 list);
1089 lpfc_cmd->nonsg_phys = 0; 1121 if (!lpfc_cmd) {
1090 lpfc_cmd->prot_seg_cnt = 0; 1122 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag);
1123 list_splice(&phba->lpfc_scsi_buf_list_put,
1124 &phba->lpfc_scsi_buf_list_get);
1125 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
1126 list_remove_head(scsi_buf_list_get, lpfc_cmd,
1127 struct lpfc_scsi_buf, list);
1128 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
1091 } 1129 }
1092 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 1130 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag);
1093 return lpfc_cmd; 1131 return lpfc_cmd;
1094} 1132}
1095/** 1133/**
@@ -1107,28 +1145,39 @@ static struct lpfc_scsi_buf*
1107lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 1145lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1108{ 1146{
1109 struct lpfc_scsi_buf *lpfc_cmd ; 1147 struct lpfc_scsi_buf *lpfc_cmd ;
1110 unsigned long iflag = 0; 1148 unsigned long gflag = 0;
1149 unsigned long pflag = 0;
1111 int found = 0; 1150 int found = 0;
1112 1151
1113 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 1152 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
1114 list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list, 1153 list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list_get, list) {
1115 list) {
1116 if (lpfc_test_rrq_active(phba, ndlp, 1154 if (lpfc_test_rrq_active(phba, ndlp,
1117 lpfc_cmd->cur_iocbq.sli4_lxritag)) 1155 lpfc_cmd->cur_iocbq.sli4_lxritag))
1118 continue; 1156 continue;
1119 list_del(&lpfc_cmd->list); 1157 list_del(&lpfc_cmd->list);
1120 found = 1; 1158 found = 1;
1121 lpfc_cmd->seg_cnt = 0;
1122 lpfc_cmd->nonsg_phys = 0;
1123 lpfc_cmd->prot_seg_cnt = 0;
1124 break; 1159 break;
1125 } 1160 }
1126 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, 1161 if (!found) {
1127 iflag); 1162 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag);
1163 list_splice(&phba->lpfc_scsi_buf_list_put,
1164 &phba->lpfc_scsi_buf_list_get);
1165 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
1166 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
1167 list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list_get,
1168 list) {
1169 if (lpfc_test_rrq_active(
1170 phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag))
1171 continue;
1172 list_del(&lpfc_cmd->list);
1173 found = 1;
1174 break;
1175 }
1176 }
1177 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag);
1128 if (!found) 1178 if (!found)
1129 return NULL; 1179 return NULL;
1130 else 1180 return lpfc_cmd;
1131 return lpfc_cmd;
1132} 1181}
1133/** 1182/**
1134 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA 1183 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
@@ -1160,10 +1209,15 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1160{ 1209{
1161 unsigned long iflag = 0; 1210 unsigned long iflag = 0;
1162 1211
1163 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 1212 psb->seg_cnt = 0;
1213 psb->nonsg_phys = 0;
1214 psb->prot_seg_cnt = 0;
1215
1216 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
1164 psb->pCmd = NULL; 1217 psb->pCmd = NULL;
1165 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); 1218 psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
1166 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 1219 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
1220 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
1167} 1221}
1168 1222
1169/** 1223/**
@@ -1181,6 +1235,10 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1181{ 1235{
1182 unsigned long iflag = 0; 1236 unsigned long iflag = 0;
1183 1237
1238 psb->seg_cnt = 0;
1239 psb->nonsg_phys = 0;
1240 psb->prot_seg_cnt = 0;
1241
1184 if (psb->exch_busy) { 1242 if (psb->exch_busy) {
1185 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, 1243 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
1186 iflag); 1244 iflag);
@@ -1190,11 +1248,11 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1190 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, 1248 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
1191 iflag); 1249 iflag);
1192 } else { 1250 } else {
1193
1194 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
1195 psb->pCmd = NULL; 1251 psb->pCmd = NULL;
1196 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); 1252 psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
1197 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 1253 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
1254 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
1255 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
1198 } 1256 }
1199} 1257}
1200 1258
@@ -1268,6 +1326,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1268 "dma_map_sg. Config %d, seg_cnt %d\n", 1326 "dma_map_sg. Config %d, seg_cnt %d\n",
1269 __func__, phba->cfg_sg_seg_cnt, 1327 __func__, phba->cfg_sg_seg_cnt,
1270 lpfc_cmd->seg_cnt); 1328 lpfc_cmd->seg_cnt);
1329 lpfc_cmd->seg_cnt = 0;
1271 scsi_dma_unmap(scsi_cmnd); 1330 scsi_dma_unmap(scsi_cmnd);
1272 return 1; 1331 return 1;
1273 } 1332 }
@@ -2013,9 +2072,21 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2013 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); 2072 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
2014 bf_set(pde6_optx, pde6, txop); 2073 bf_set(pde6_optx, pde6, txop);
2015 bf_set(pde6_oprx, pde6, rxop); 2074 bf_set(pde6_oprx, pde6, rxop);
2075
2076 /*
2077 * We only need to check the data on READs, for WRITEs
2078 * protection data is automatically generated, not checked.
2079 */
2016 if (datadir == DMA_FROM_DEVICE) { 2080 if (datadir == DMA_FROM_DEVICE) {
2017 bf_set(pde6_ce, pde6, checking); 2081 if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK))
2018 bf_set(pde6_re, pde6, checking); 2082 bf_set(pde6_ce, pde6, checking);
2083 else
2084 bf_set(pde6_ce, pde6, 0);
2085
2086 if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
2087 bf_set(pde6_re, pde6, checking);
2088 else
2089 bf_set(pde6_re, pde6, 0);
2019 } 2090 }
2020 bf_set(pde6_ai, pde6, 1); 2091 bf_set(pde6_ai, pde6, 1);
2021 bf_set(pde6_ae, pde6, 0); 2092 bf_set(pde6_ae, pde6, 0);
@@ -2145,6 +2216,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2145 2216
2146 split_offset = 0; 2217 split_offset = 0;
2147 do { 2218 do {
2219 /* Check to see if we ran out of space */
2220 if (num_bde >= (phba->cfg_total_seg_cnt - 2))
2221 return num_bde + 3;
2222
2148 /* setup PDE5 with what we have */ 2223 /* setup PDE5 with what we have */
2149 pde5 = (struct lpfc_pde5 *) bpl; 2224 pde5 = (struct lpfc_pde5 *) bpl;
2150 memset(pde5, 0, sizeof(struct lpfc_pde5)); 2225 memset(pde5, 0, sizeof(struct lpfc_pde5));
@@ -2164,8 +2239,17 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2164 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); 2239 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
2165 bf_set(pde6_optx, pde6, txop); 2240 bf_set(pde6_optx, pde6, txop);
2166 bf_set(pde6_oprx, pde6, rxop); 2241 bf_set(pde6_oprx, pde6, rxop);
2167 bf_set(pde6_ce, pde6, checking); 2242
2168 bf_set(pde6_re, pde6, checking); 2243 if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK))
2244 bf_set(pde6_ce, pde6, checking);
2245 else
2246 bf_set(pde6_ce, pde6, 0);
2247
2248 if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
2249 bf_set(pde6_re, pde6, checking);
2250 else
2251 bf_set(pde6_re, pde6, 0);
2252
2169 bf_set(pde6_ai, pde6, 1); 2253 bf_set(pde6_ai, pde6, 1);
2170 bf_set(pde6_ae, pde6, 0); 2254 bf_set(pde6_ae, pde6, 0);
2171 bf_set(pde6_apptagval, pde6, 0); 2255 bf_set(pde6_apptagval, pde6, 0);
@@ -2213,6 +2297,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2213 pgdone = 0; 2297 pgdone = 0;
2214 subtotal = 0; /* total bytes processed for current prot grp */ 2298 subtotal = 0; /* total bytes processed for current prot grp */
2215 while (!pgdone) { 2299 while (!pgdone) {
2300 /* Check to see if we ran out of space */
2301 if (num_bde >= phba->cfg_total_seg_cnt)
2302 return num_bde + 1;
2303
2216 if (!sgde) { 2304 if (!sgde) {
2217 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 2305 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2218 "9065 BLKGRD:%s Invalid data segment\n", 2306 "9065 BLKGRD:%s Invalid data segment\n",
@@ -2324,7 +2412,6 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2324 struct sli4_sge_diseed *diseed = NULL; 2412 struct sli4_sge_diseed *diseed = NULL;
2325 dma_addr_t physaddr; 2413 dma_addr_t physaddr;
2326 int i = 0, num_sge = 0, status; 2414 int i = 0, num_sge = 0, status;
2327 int datadir = sc->sc_data_direction;
2328 uint32_t reftag; 2415 uint32_t reftag;
2329 unsigned blksize; 2416 unsigned blksize;
2330 uint8_t txop, rxop; 2417 uint8_t txop, rxop;
@@ -2362,13 +2449,26 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2362 diseed->ref_tag = cpu_to_le32(reftag); 2449 diseed->ref_tag = cpu_to_le32(reftag);
2363 diseed->ref_tag_tran = diseed->ref_tag; 2450 diseed->ref_tag_tran = diseed->ref_tag;
2364 2451
2452 /*
2453 * We only need to check the data on READs, for WRITEs
2454 * protection data is automatically generated, not checked.
2455 */
2456 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2457 if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK))
2458 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2459 else
2460 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2461
2462 if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
2463 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2464 else
2465 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2466 }
2467
2365 /* setup DISEED with the rest of the info */ 2468 /* setup DISEED with the rest of the info */
2366 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); 2469 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2367 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); 2470 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2368 if (datadir == DMA_FROM_DEVICE) { 2471
2369 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2370 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2371 }
2372 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); 2472 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2373 bf_set(lpfc_sli4_sge_dif_me, diseed, 0); 2473 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2374 2474
@@ -2497,6 +2597,10 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2497 2597
2498 split_offset = 0; 2598 split_offset = 0;
2499 do { 2599 do {
2600 /* Check to see if we ran out of space */
2601 if (num_sge >= (phba->cfg_total_seg_cnt - 2))
2602 return num_sge + 3;
2603
2500 /* setup DISEED with what we have */ 2604 /* setup DISEED with what we have */
2501 diseed = (struct sli4_sge_diseed *) sgl; 2605 diseed = (struct sli4_sge_diseed *) sgl;
2502 memset(diseed, 0, sizeof(struct sli4_sge_diseed)); 2606 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
@@ -2506,11 +2610,34 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2506 diseed->ref_tag = cpu_to_le32(reftag); 2610 diseed->ref_tag = cpu_to_le32(reftag);
2507 diseed->ref_tag_tran = diseed->ref_tag; 2611 diseed->ref_tag_tran = diseed->ref_tag;
2508 2612
2613 if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK)) {
2614 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2615
2616 } else {
2617 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2618 /*
2619 * When in this mode, the hardware will replace
2620 * the guard tag from the host with a
2621 * newly generated good CRC for the wire.
2622 * Switch to raw mode here to avoid this
2623 * behavior. What the host sends gets put on the wire.
2624 */
2625 if (txop == BG_OP_IN_CRC_OUT_CRC) {
2626 txop = BG_OP_RAW_MODE;
2627 rxop = BG_OP_RAW_MODE;
2628 }
2629 }
2630
2631
2632 if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
2633 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2634 else
2635 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2636
2509 /* setup DISEED with the rest of the info */ 2637 /* setup DISEED with the rest of the info */
2510 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); 2638 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2511 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); 2639 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2512 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); 2640
2513 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2514 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); 2641 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2515 bf_set(lpfc_sli4_sge_dif_me, diseed, 0); 2642 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2516 2643
@@ -2556,6 +2683,10 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2556 pgdone = 0; 2683 pgdone = 0;
2557 subtotal = 0; /* total bytes processed for current prot grp */ 2684 subtotal = 0; /* total bytes processed for current prot grp */
2558 while (!pgdone) { 2685 while (!pgdone) {
2686 /* Check to see if we ran out of space */
2687 if (num_sge >= phba->cfg_total_seg_cnt)
2688 return num_sge + 1;
2689
2559 if (!sgde) { 2690 if (!sgde) {
2560 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 2691 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2561 "9086 BLKGRD:%s Invalid data segment\n", 2692 "9086 BLKGRD:%s Invalid data segment\n",
@@ -2670,6 +2801,47 @@ lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2670} 2801}
2671 2802
2672/** 2803/**
2804 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
2805 * @phba: The Hba for which this call is being executed.
2806 * @lpfc_cmd: The scsi buffer which is going to be adjusted.
2807 *
2808 * Adjust the data length to account for how much data
2809 * is actually on the wire.
2810 *
2811 * returns the adjusted data length
2812 **/
2813static int
2814lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
2815 struct lpfc_scsi_buf *lpfc_cmd)
2816{
2817 struct scsi_cmnd *sc = lpfc_cmd->pCmd;
2818 int fcpdl;
2819
2820 fcpdl = scsi_bufflen(sc);
2821
2822 /* Check if there is protection data on the wire */
2823 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2824 /* Read */
2825 if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT)
2826 return fcpdl;
2827
2828 } else {
2829 /* Write */
2830 if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP)
2831 return fcpdl;
2832 }
2833
2834 /*
2835 * If we are in DIF Type 1 mode every data block has a 8 byte
2836 * DIF (trailer) attached to it. Must ajust FCP data length.
2837 */
2838 if (scsi_prot_flagged(sc, SCSI_PROT_TRANSFER_PI))
2839 fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
2840
2841 return fcpdl;
2842}
2843
2844/**
2673 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec 2845 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
2674 * @phba: The Hba for which this call is being executed. 2846 * @phba: The Hba for which this call is being executed.
2675 * @lpfc_cmd: The scsi buffer which is going to be prep'ed. 2847 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
@@ -2689,8 +2861,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2689 uint32_t num_bde = 0; 2861 uint32_t num_bde = 0;
2690 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; 2862 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2691 int prot_group_type = 0; 2863 int prot_group_type = 0;
2692 int diflen, fcpdl; 2864 int fcpdl;
2693 unsigned blksize;
2694 2865
2695 /* 2866 /*
2696 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd 2867 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
@@ -2711,28 +2882,28 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2711 return 1; 2882 return 1;
2712 2883
2713 lpfc_cmd->seg_cnt = datasegcnt; 2884 lpfc_cmd->seg_cnt = datasegcnt;
2714 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 2885
2715 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 2886 /* First check if data segment count from SCSI Layer is good */
2716 "9067 BLKGRD: %s: Too many sg segments" 2887 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
2717 " from dma_map_sg. Config %d, seg_cnt" 2888 goto err;
2718 " %d\n",
2719 __func__, phba->cfg_sg_seg_cnt,
2720 lpfc_cmd->seg_cnt);
2721 scsi_dma_unmap(scsi_cmnd);
2722 return 1;
2723 }
2724 2889
2725 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); 2890 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2726 2891
2727 switch (prot_group_type) { 2892 switch (prot_group_type) {
2728 case LPFC_PG_TYPE_NO_DIF: 2893 case LPFC_PG_TYPE_NO_DIF:
2894
2895 /* Here we need to add a PDE5 and PDE6 to the count */
2896 if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt)
2897 goto err;
2898
2729 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl, 2899 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2730 datasegcnt); 2900 datasegcnt);
2731 /* we should have 2 or more entries in buffer list */ 2901 /* we should have 2 or more entries in buffer list */
2732 if (num_bde < 2) 2902 if (num_bde < 2)
2733 goto err; 2903 goto err;
2734 break; 2904 break;
2735 case LPFC_PG_TYPE_DIF_BUF:{ 2905
2906 case LPFC_PG_TYPE_DIF_BUF:
2736 /* 2907 /*
2737 * This type indicates that protection buffers are 2908 * This type indicates that protection buffers are
2738 * passed to the driver, so that needs to be prepared 2909 * passed to the driver, so that needs to be prepared
@@ -2747,31 +2918,28 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2747 } 2918 }
2748 2919
2749 lpfc_cmd->prot_seg_cnt = protsegcnt; 2920 lpfc_cmd->prot_seg_cnt = protsegcnt;
2750 if (lpfc_cmd->prot_seg_cnt 2921
2751 > phba->cfg_prot_sg_seg_cnt) { 2922 /*
2752 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 2923 * There is a minimun of 4 BPLs used for every
2753 "9068 BLKGRD: %s: Too many prot sg " 2924 * protection data segment.
2754 "segments from dma_map_sg. Config %d," 2925 */
2755 "prot_seg_cnt %d\n", __func__, 2926 if ((lpfc_cmd->prot_seg_cnt * 4) >
2756 phba->cfg_prot_sg_seg_cnt, 2927 (phba->cfg_total_seg_cnt - 2))
2757 lpfc_cmd->prot_seg_cnt); 2928 goto err;
2758 dma_unmap_sg(&phba->pcidev->dev,
2759 scsi_prot_sglist(scsi_cmnd),
2760 scsi_prot_sg_count(scsi_cmnd),
2761 datadir);
2762 scsi_dma_unmap(scsi_cmnd);
2763 return 1;
2764 }
2765 2929
2766 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl, 2930 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2767 datasegcnt, protsegcnt); 2931 datasegcnt, protsegcnt);
2768 /* we should have 3 or more entries in buffer list */ 2932 /* we should have 3 or more entries in buffer list */
2769 if (num_bde < 3) 2933 if ((num_bde < 3) ||
2934 (num_bde > phba->cfg_total_seg_cnt))
2770 goto err; 2935 goto err;
2771 break; 2936 break;
2772 } 2937
2773 case LPFC_PG_TYPE_INVALID: 2938 case LPFC_PG_TYPE_INVALID:
2774 default: 2939 default:
2940 scsi_dma_unmap(scsi_cmnd);
2941 lpfc_cmd->seg_cnt = 0;
2942
2775 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 2943 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2776 "9022 Unexpected protection group %i\n", 2944 "9022 Unexpected protection group %i\n",
2777 prot_group_type); 2945 prot_group_type);
@@ -2790,18 +2958,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2790 iocb_cmd->ulpBdeCount = 1; 2958 iocb_cmd->ulpBdeCount = 1;
2791 iocb_cmd->ulpLe = 1; 2959 iocb_cmd->ulpLe = 1;
2792 2960
2793 fcpdl = scsi_bufflen(scsi_cmnd); 2961 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
2794
2795 if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) {
2796 /*
2797 * We are in DIF Type 1 mode
2798 * Every data block has a 8 byte DIF (trailer)
2799 * attached to it. Must ajust FCP data length
2800 */
2801 blksize = lpfc_cmd_blksize(scsi_cmnd);
2802 diflen = (fcpdl / blksize) * 8;
2803 fcpdl += diflen;
2804 }
2805 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); 2962 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2806 2963
2807 /* 2964 /*
@@ -2812,14 +2969,234 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2812 2969
2813 return 0; 2970 return 0;
2814err: 2971err:
2972 if (lpfc_cmd->seg_cnt)
2973 scsi_dma_unmap(scsi_cmnd);
2974 if (lpfc_cmd->prot_seg_cnt)
2975 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
2976 scsi_prot_sg_count(scsi_cmnd),
2977 scsi_cmnd->sc_data_direction);
2978
2815 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 2979 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2816 "9023 Could not setup all needed BDE's" 2980 "9023 Cannot setup S/G List for HBA"
2817 "prot_group_type=%d, num_bde=%d\n", 2981 "IO segs %d/%d BPL %d SCSI %d: %d %d\n",
2982 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
2983 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
2818 prot_group_type, num_bde); 2984 prot_group_type, num_bde);
2985
2986 lpfc_cmd->seg_cnt = 0;
2987 lpfc_cmd->prot_seg_cnt = 0;
2819 return 1; 2988 return 1;
2820} 2989}
2821 2990
2822/* 2991/*
2992 * This function calcuates the T10 DIF guard tag
2993 * on the specified data using a CRC algorithmn
2994 * using crc_t10dif.
2995 */
2996uint16_t
2997lpfc_bg_crc(uint8_t *data, int count)
2998{
2999 uint16_t crc = 0;
3000 uint16_t x;
3001
3002 crc = crc_t10dif(data, count);
3003 x = cpu_to_be16(crc);
3004 return x;
3005}
3006
3007/*
3008 * This function calcuates the T10 DIF guard tag
3009 * on the specified data using a CSUM algorithmn
3010 * using ip_compute_csum.
3011 */
3012uint16_t
3013lpfc_bg_csum(uint8_t *data, int count)
3014{
3015 uint16_t ret;
3016
3017 ret = ip_compute_csum(data, count);
3018 return ret;
3019}
3020
3021/*
3022 * This function examines the protection data to try to determine
3023 * what type of T10-DIF error occurred.
3024 */
3025void
3026lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3027{
3028 struct scatterlist *sgpe; /* s/g prot entry */
3029 struct scatterlist *sgde; /* s/g data entry */
3030 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
3031 struct scsi_dif_tuple *src = NULL;
3032 uint8_t *data_src = NULL;
3033 uint16_t guard_tag, guard_type;
3034 uint16_t start_app_tag, app_tag;
3035 uint32_t start_ref_tag, ref_tag;
3036 int prot, protsegcnt;
3037 int err_type, len, data_len;
3038 int chk_ref, chk_app, chk_guard;
3039 uint16_t sum;
3040 unsigned blksize;
3041
3042 err_type = BGS_GUARD_ERR_MASK;
3043 sum = 0;
3044 guard_tag = 0;
3045
3046 /* First check to see if there is protection data to examine */
3047 prot = scsi_get_prot_op(cmd);
3048 if ((prot == SCSI_PROT_READ_STRIP) ||
3049 (prot == SCSI_PROT_WRITE_INSERT) ||
3050 (prot == SCSI_PROT_NORMAL))
3051 goto out;
3052
3053 /* Currently the driver just supports ref_tag and guard_tag checking */
3054 chk_ref = 1;
3055 chk_app = 0;
3056 chk_guard = 0;
3057
3058 /* Setup a ptr to the protection data provided by the SCSI host */
3059 sgpe = scsi_prot_sglist(cmd);
3060 protsegcnt = lpfc_cmd->prot_seg_cnt;
3061
3062 if (sgpe && protsegcnt) {
3063
3064 /*
3065 * We will only try to verify guard tag if the segment
3066 * data length is a multiple of the blksize.
3067 */
3068 sgde = scsi_sglist(cmd);
3069 blksize = lpfc_cmd_blksize(cmd);
3070 data_src = (uint8_t *)sg_virt(sgde);
3071 data_len = sgde->length;
3072 if ((data_len & (blksize - 1)) == 0)
3073 chk_guard = 1;
3074 guard_type = scsi_host_get_guard(cmd->device->host);
3075
3076 start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
3077 start_app_tag = src->app_tag;
3078 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
3079 len = sgpe->length;
3080 while (src && protsegcnt) {
3081 while (len) {
3082
3083 /*
3084 * First check to see if a protection data
3085 * check is valid
3086 */
3087 if ((src->ref_tag == 0xffffffff) ||
3088 (src->app_tag == 0xffff)) {
3089 start_ref_tag++;
3090 goto skipit;
3091 }
3092
3093 /* App Tag checking */
3094 app_tag = src->app_tag;
3095 if (chk_app && (app_tag != start_app_tag)) {
3096 err_type = BGS_APPTAG_ERR_MASK;
3097 goto out;
3098 }
3099
3100 /* Reference Tag checking */
3101 ref_tag = be32_to_cpu(src->ref_tag);
3102 if (chk_ref && (ref_tag != start_ref_tag)) {
3103 err_type = BGS_REFTAG_ERR_MASK;
3104 goto out;
3105 }
3106 start_ref_tag++;
3107
3108 /* Guard Tag checking */
3109 if (chk_guard) {
3110 guard_tag = src->guard_tag;
3111 if (guard_type == SHOST_DIX_GUARD_IP)
3112 sum = lpfc_bg_csum(data_src,
3113 blksize);
3114 else
3115 sum = lpfc_bg_crc(data_src,
3116 blksize);
3117 if ((guard_tag != sum)) {
3118 err_type = BGS_GUARD_ERR_MASK;
3119 goto out;
3120 }
3121 }
3122skipit:
3123 len -= sizeof(struct scsi_dif_tuple);
3124 if (len < 0)
3125 len = 0;
3126 src++;
3127
3128 data_src += blksize;
3129 data_len -= blksize;
3130
3131 /*
3132 * Are we at the end of the Data segment?
3133 * The data segment is only used for Guard
3134 * tag checking.
3135 */
3136 if (chk_guard && (data_len == 0)) {
3137 chk_guard = 0;
3138 sgde = sg_next(sgde);
3139 if (!sgde)
3140 goto out;
3141
3142 data_src = (uint8_t *)sg_virt(sgde);
3143 data_len = sgde->length;
3144 if ((data_len & (blksize - 1)) == 0)
3145 chk_guard = 1;
3146 }
3147 }
3148
3149 /* Goto the next Protection data segment */
3150 sgpe = sg_next(sgpe);
3151 if (sgpe) {
3152 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
3153 len = sgpe->length;
3154 } else {
3155 src = NULL;
3156 }
3157 protsegcnt--;
3158 }
3159 }
3160out:
3161 if (err_type == BGS_GUARD_ERR_MASK) {
3162 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3163 0x10, 0x1);
3164 cmd->result = DRIVER_SENSE << 24
3165 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3166 phba->bg_guard_err_cnt++;
3167 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3168 "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
3169 (unsigned long)scsi_get_lba(cmd),
3170 sum, guard_tag);
3171
3172 } else if (err_type == BGS_REFTAG_ERR_MASK) {
3173 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3174 0x10, 0x3);
3175 cmd->result = DRIVER_SENSE << 24
3176 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3177
3178 phba->bg_reftag_err_cnt++;
3179 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3180 "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
3181 (unsigned long)scsi_get_lba(cmd),
3182 ref_tag, start_ref_tag);
3183
3184 } else if (err_type == BGS_APPTAG_ERR_MASK) {
3185 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3186 0x10, 0x2);
3187 cmd->result = DRIVER_SENSE << 24
3188 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3189
3190 phba->bg_apptag_err_cnt++;
3191 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3192 "9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
3193 (unsigned long)scsi_get_lba(cmd),
3194 app_tag, start_app_tag);
3195 }
3196}
3197
3198
3199/*
2823 * This function checks for BlockGuard errors detected by 3200 * This function checks for BlockGuard errors detected by
2824 * the HBA. In case of errors, the ASC/ASCQ fields in the 3201 * the HBA. In case of errors, the ASC/ASCQ fields in the
2825 * sense buffer will be set accordingly, paired with 3202 * sense buffer will be set accordingly, paired with
@@ -2842,12 +3219,6 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
2842 uint32_t bgstat = bgf->bgstat; 3219 uint32_t bgstat = bgf->bgstat;
2843 uint64_t failing_sector = 0; 3220 uint64_t failing_sector = 0;
2844 3221
2845 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd"
2846 " 0x%x lba 0x%llx blk cnt 0x%x "
2847 "bgstat=0x%x bghm=0x%x\n",
2848 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
2849 blk_rq_sectors(cmd->request), bgstat, bghm);
2850
2851 spin_lock(&_dump_buf_lock); 3222 spin_lock(&_dump_buf_lock);
2852 if (!_dump_buf_done) { 3223 if (!_dump_buf_done) {
2853 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving" 3224 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving"
@@ -2870,18 +3241,24 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
2870 3241
2871 if (lpfc_bgs_get_invalid_prof(bgstat)) { 3242 if (lpfc_bgs_get_invalid_prof(bgstat)) {
2872 cmd->result = ScsiResult(DID_ERROR, 0); 3243 cmd->result = ScsiResult(DID_ERROR, 0);
2873 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid" 3244 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2874 " BlockGuard profile. bgstat:0x%x\n", 3245 "9072 BLKGRD: Invalid BG Profile in cmd"
2875 bgstat); 3246 " 0x%x lba 0x%llx blk cnt 0x%x "
3247 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3248 (unsigned long long)scsi_get_lba(cmd),
3249 blk_rq_sectors(cmd->request), bgstat, bghm);
2876 ret = (-1); 3250 ret = (-1);
2877 goto out; 3251 goto out;
2878 } 3252 }
2879 3253
2880 if (lpfc_bgs_get_uninit_dif_block(bgstat)) { 3254 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
2881 cmd->result = ScsiResult(DID_ERROR, 0); 3255 cmd->result = ScsiResult(DID_ERROR, 0);
2882 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: " 3256 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2883 "Invalid BlockGuard DIF Block. bgstat:0x%x\n", 3257 "9073 BLKGRD: Invalid BG PDIF Block in cmd"
2884 bgstat); 3258 " 0x%x lba 0x%llx blk cnt 0x%x "
3259 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3260 (unsigned long long)scsi_get_lba(cmd),
3261 blk_rq_sectors(cmd->request), bgstat, bghm);
2885 ret = (-1); 3262 ret = (-1);
2886 goto out; 3263 goto out;
2887 } 3264 }
@@ -2894,8 +3271,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
2894 cmd->result = DRIVER_SENSE << 24 3271 cmd->result = DRIVER_SENSE << 24
2895 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); 3272 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
2896 phba->bg_guard_err_cnt++; 3273 phba->bg_guard_err_cnt++;
2897 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 3274 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2898 "9055 BLKGRD: guard_tag error\n"); 3275 "9055 BLKGRD: Guard Tag error in cmd"
3276 " 0x%x lba 0x%llx blk cnt 0x%x "
3277 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3278 (unsigned long long)scsi_get_lba(cmd),
3279 blk_rq_sectors(cmd->request), bgstat, bghm);
2899 } 3280 }
2900 3281
2901 if (lpfc_bgs_get_reftag_err(bgstat)) { 3282 if (lpfc_bgs_get_reftag_err(bgstat)) {
@@ -2907,8 +3288,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
2907 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); 3288 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
2908 3289
2909 phba->bg_reftag_err_cnt++; 3290 phba->bg_reftag_err_cnt++;
2910 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 3291 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2911 "9056 BLKGRD: ref_tag error\n"); 3292 "9056 BLKGRD: Ref Tag error in cmd"
3293 " 0x%x lba 0x%llx blk cnt 0x%x "
3294 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3295 (unsigned long long)scsi_get_lba(cmd),
3296 blk_rq_sectors(cmd->request), bgstat, bghm);
2912 } 3297 }
2913 3298
2914 if (lpfc_bgs_get_apptag_err(bgstat)) { 3299 if (lpfc_bgs_get_apptag_err(bgstat)) {
@@ -2920,8 +3305,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
2920 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); 3305 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
2921 3306
2922 phba->bg_apptag_err_cnt++; 3307 phba->bg_apptag_err_cnt++;
2923 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 3308 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2924 "9061 BLKGRD: app_tag error\n"); 3309 "9061 BLKGRD: App Tag error in cmd"
3310 " 0x%x lba 0x%llx blk cnt 0x%x "
3311 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3312 (unsigned long long)scsi_get_lba(cmd),
3313 blk_rq_sectors(cmd->request), bgstat, bghm);
2925 } 3314 }
2926 3315
2927 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { 3316 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
@@ -2960,11 +3349,16 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
2960 3349
2961 if (!ret) { 3350 if (!ret) {
2962 /* No error was reported - problem in FW? */ 3351 /* No error was reported - problem in FW? */
2963 cmd->result = ScsiResult(DID_ERROR, 0); 3352 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2964 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 3353 "9057 BLKGRD: Unknown error in cmd"
2965 "9057 BLKGRD: Unknown error reported!\n"); 3354 " 0x%x lba 0x%llx blk cnt 0x%x "
3355 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3356 (unsigned long long)scsi_get_lba(cmd),
3357 blk_rq_sectors(cmd->request), bgstat, bghm);
3358
3359 /* Calcuate what type of error it was */
3360 lpfc_calc_bg_err(phba, lpfc_cmd);
2966 } 3361 }
2967
2968out: 3362out:
2969 return ret; 3363 return ret;
2970} 3364}
@@ -3028,6 +3422,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3028 "dma_map_sg. Config %d, seg_cnt %d\n", 3422 "dma_map_sg. Config %d, seg_cnt %d\n",
3029 __func__, phba->cfg_sg_seg_cnt, 3423 __func__, phba->cfg_sg_seg_cnt,
3030 lpfc_cmd->seg_cnt); 3424 lpfc_cmd->seg_cnt);
3425 lpfc_cmd->seg_cnt = 0;
3031 scsi_dma_unmap(scsi_cmnd); 3426 scsi_dma_unmap(scsi_cmnd);
3032 return 1; 3427 return 1;
3033 } 3428 }
@@ -3094,45 +3489,6 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3094} 3489}
3095 3490
3096/** 3491/**
3097 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
3098 * @phba: The Hba for which this call is being executed.
3099 * @lpfc_cmd: The scsi buffer which is going to be adjusted.
3100 *
3101 * Adjust the data length to account for how much data
3102 * is actually on the wire.
3103 *
3104 * returns the adjusted data length
3105 **/
3106static int
3107lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
3108 struct lpfc_scsi_buf *lpfc_cmd)
3109{
3110 struct scsi_cmnd *sc = lpfc_cmd->pCmd;
3111 int diflen, fcpdl;
3112 unsigned blksize;
3113
3114 fcpdl = scsi_bufflen(sc);
3115
3116 /* Check if there is protection data on the wire */
3117 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
3118 /* Read */
3119 if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT)
3120 return fcpdl;
3121
3122 } else {
3123 /* Write */
3124 if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP)
3125 return fcpdl;
3126 }
3127
3128 /* If protection data on the wire, adjust the count accordingly */
3129 blksize = lpfc_cmd_blksize(sc);
3130 diflen = (fcpdl / blksize) * 8;
3131 fcpdl += diflen;
3132 return fcpdl;
3133}
3134
3135/**
3136 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec 3492 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3137 * @phba: The Hba for which this call is being executed. 3493 * @phba: The Hba for which this call is being executed.
3138 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3494 * @lpfc_cmd: The scsi buffer which is going to be mapped.
@@ -3149,14 +3505,14 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3149 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 3505 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3150 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl); 3506 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
3151 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 3507 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3152 uint32_t num_bde = 0; 3508 uint32_t num_sge = 0;
3153 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; 3509 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3154 int prot_group_type = 0; 3510 int prot_group_type = 0;
3155 int fcpdl; 3511 int fcpdl;
3156 3512
3157 /* 3513 /*
3158 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd 3514 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3159 * fcp_rsp regions to the first data bde entry 3515 * fcp_rsp regions to the first data sge entry
3160 */ 3516 */
3161 if (scsi_sg_count(scsi_cmnd)) { 3517 if (scsi_sg_count(scsi_cmnd)) {
3162 /* 3518 /*
@@ -3179,28 +3535,28 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3179 3535
3180 sgl += 1; 3536 sgl += 1;
3181 lpfc_cmd->seg_cnt = datasegcnt; 3537 lpfc_cmd->seg_cnt = datasegcnt;
3182 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 3538
3183 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 3539 /* First check if data segment count from SCSI Layer is good */
3184 "9087 BLKGRD: %s: Too many sg segments" 3540 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
3185 " from dma_map_sg. Config %d, seg_cnt" 3541 goto err;
3186 " %d\n",
3187 __func__, phba->cfg_sg_seg_cnt,
3188 lpfc_cmd->seg_cnt);
3189 scsi_dma_unmap(scsi_cmnd);
3190 return 1;
3191 }
3192 3542
3193 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); 3543 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3194 3544
3195 switch (prot_group_type) { 3545 switch (prot_group_type) {
3196 case LPFC_PG_TYPE_NO_DIF: 3546 case LPFC_PG_TYPE_NO_DIF:
3197 num_bde = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl, 3547 /* Here we need to add a DISEED to the count */
3548 if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt)
3549 goto err;
3550
3551 num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3198 datasegcnt); 3552 datasegcnt);
3553
3199 /* we should have 2 or more entries in buffer list */ 3554 /* we should have 2 or more entries in buffer list */
3200 if (num_bde < 2) 3555 if (num_sge < 2)
3201 goto err; 3556 goto err;
3202 break; 3557 break;
3203 case LPFC_PG_TYPE_DIF_BUF:{ 3558
3559 case LPFC_PG_TYPE_DIF_BUF:
3204 /* 3560 /*
3205 * This type indicates that protection buffers are 3561 * This type indicates that protection buffers are
3206 * passed to the driver, so that needs to be prepared 3562 * passed to the driver, so that needs to be prepared
@@ -3215,31 +3571,28 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3215 } 3571 }
3216 3572
3217 lpfc_cmd->prot_seg_cnt = protsegcnt; 3573 lpfc_cmd->prot_seg_cnt = protsegcnt;
3218 if (lpfc_cmd->prot_seg_cnt 3574 /*
3219 > phba->cfg_prot_sg_seg_cnt) { 3575 * There is a minimun of 3 SGEs used for every
3220 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 3576 * protection data segment.
3221 "9088 BLKGRD: %s: Too many prot sg " 3577 */
3222 "segments from dma_map_sg. Config %d," 3578 if ((lpfc_cmd->prot_seg_cnt * 3) >
3223 "prot_seg_cnt %d\n", __func__, 3579 (phba->cfg_total_seg_cnt - 2))
3224 phba->cfg_prot_sg_seg_cnt, 3580 goto err;
3225 lpfc_cmd->prot_seg_cnt);
3226 dma_unmap_sg(&phba->pcidev->dev,
3227 scsi_prot_sglist(scsi_cmnd),
3228 scsi_prot_sg_count(scsi_cmnd),
3229 datadir);
3230 scsi_dma_unmap(scsi_cmnd);
3231 return 1;
3232 }
3233 3581
3234 num_bde = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl, 3582 num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3235 datasegcnt, protsegcnt); 3583 datasegcnt, protsegcnt);
3584
3236 /* we should have 3 or more entries in buffer list */ 3585 /* we should have 3 or more entries in buffer list */
3237 if (num_bde < 3) 3586 if ((num_sge < 3) ||
3587 (num_sge > phba->cfg_total_seg_cnt))
3238 goto err; 3588 goto err;
3239 break; 3589 break;
3240 } 3590
3241 case LPFC_PG_TYPE_INVALID: 3591 case LPFC_PG_TYPE_INVALID:
3242 default: 3592 default:
3593 scsi_dma_unmap(scsi_cmnd);
3594 lpfc_cmd->seg_cnt = 0;
3595
3243 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 3596 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3244 "9083 Unexpected protection group %i\n", 3597 "9083 Unexpected protection group %i\n",
3245 prot_group_type); 3598 prot_group_type);
@@ -3263,7 +3616,6 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3263 } 3616 }
3264 3617
3265 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); 3618 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3266
3267 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); 3619 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3268 3620
3269 /* 3621 /*
@@ -3274,10 +3626,22 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3274 3626
3275 return 0; 3627 return 0;
3276err: 3628err:
3629 if (lpfc_cmd->seg_cnt)
3630 scsi_dma_unmap(scsi_cmnd);
3631 if (lpfc_cmd->prot_seg_cnt)
3632 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
3633 scsi_prot_sg_count(scsi_cmnd),
3634 scsi_cmnd->sc_data_direction);
3635
3277 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 3636 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3278 "9084 Could not setup all needed BDE's" 3637 "9084 Cannot setup S/G List for HBA"
3279 "prot_group_type=%d, num_bde=%d\n", 3638 "IO segs %d/%d SGL %d SCSI %d: %d %d\n",
3280 prot_group_type, num_bde); 3639 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
3640 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
3641 prot_group_type, num_sge);
3642
3643 lpfc_cmd->seg_cnt = 0;
3644 lpfc_cmd->prot_seg_cnt = 0;
3281 return 1; 3645 return 1;
3282} 3646}
3283 3647
@@ -4357,7 +4721,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4357 4721
4358 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { 4722 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
4359 if (vport->phba->cfg_enable_bg) { 4723 if (vport->phba->cfg_enable_bg) {
4360 lpfc_printf_vlog(vport, KERN_INFO, LOG_BG, 4724 lpfc_printf_vlog(vport,
4725 KERN_INFO, LOG_SCSI_CMD,
4361 "9033 BLKGRD: rcvd %s cmd:x%x " 4726 "9033 BLKGRD: rcvd %s cmd:x%x "
4362 "sector x%llx cnt %u pt %x\n", 4727 "sector x%llx cnt %u pt %x\n",
4363 dif_op_str[scsi_get_prot_op(cmnd)], 4728 dif_op_str[scsi_get_prot_op(cmnd)],
@@ -4369,7 +4734,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4369 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 4734 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
4370 } else { 4735 } else {
4371 if (vport->phba->cfg_enable_bg) { 4736 if (vport->phba->cfg_enable_bg) {
4372 lpfc_printf_vlog(vport, KERN_INFO, LOG_BG, 4737 lpfc_printf_vlog(vport,
4738 KERN_INFO, LOG_SCSI_CMD,
4373 "9038 BLKGRD: rcvd PROT_NORMAL cmd: " 4739 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
4374 "x%x sector x%llx cnt %u pt %x\n", 4740 "x%x sector x%llx cnt %u pt %x\n",
4375 cmnd->cmnd[0], 4741 cmnd->cmnd[0],
@@ -4542,7 +4908,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4542 /* Wait for abort to complete */ 4908 /* Wait for abort to complete */
4543 wait_event_timeout(waitq, 4909 wait_event_timeout(waitq,
4544 (lpfc_cmd->pCmd != cmnd), 4910 (lpfc_cmd->pCmd != cmnd),
4545 (2*vport->cfg_devloss_tmo*HZ)); 4911 msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
4546 lpfc_cmd->waitq = NULL; 4912 lpfc_cmd->waitq = NULL;
4547 4913
4548 if (lpfc_cmd->pCmd == cmnd) { 4914 if (lpfc_cmd->pCmd == cmnd) {
@@ -5012,16 +5378,24 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
5012 struct lpfc_hba *phba = vport->phba; 5378 struct lpfc_hba *phba = vport->phba;
5013 int rc, ret = SUCCESS; 5379 int rc, ret = SUCCESS;
5014 5380
5381 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5382 "3172 SCSI layer issued Host Reset Data:\n");
5383
5015 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 5384 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
5016 lpfc_offline(phba); 5385 lpfc_offline(phba);
5017 rc = lpfc_sli_brdrestart(phba); 5386 rc = lpfc_sli_brdrestart(phba);
5018 if (rc) 5387 if (rc)
5019 ret = FAILED; 5388 ret = FAILED;
5020 lpfc_online(phba); 5389 rc = lpfc_online(phba);
5390 if (rc)
5391 ret = FAILED;
5021 lpfc_unblock_mgmt_io(phba); 5392 lpfc_unblock_mgmt_io(phba);
5022 5393
5023 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 5394 if (ret == FAILED) {
5024 "3172 SCSI layer issued Host Reset Data: x%x\n", ret); 5395 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5396 "3323 Failed host reset, bring it offline\n");
5397 lpfc_sli4_offline_eratt(phba);
5398 }
5025 return ret; 5399 return ret;
5026} 5400}
5027 5401
@@ -5088,11 +5462,11 @@ lpfc_slave_alloc(struct scsi_device *sdev)
5088 } 5462 }
5089 num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc); 5463 num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
5090 if (num_to_alloc != num_allocated) { 5464 if (num_to_alloc != num_allocated) {
5091 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5465 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5092 "0708 Allocation request of %d " 5466 "0708 Allocation request of %d "
5093 "command buffers did not succeed. " 5467 "command buffers did not succeed. "
5094 "Allocated %d buffers.\n", 5468 "Allocated %d buffers.\n",
5095 num_to_alloc, num_allocated); 5469 num_to_alloc, num_allocated);
5096 } 5470 }
5097 if (num_allocated > 0) 5471 if (num_allocated > 0)
5098 phba->total_scsi_bufs += num_allocated; 5472 phba->total_scsi_bufs += num_allocated;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 35dd17eb0f27..572579f87de4 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -667,7 +667,7 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
667 667
668 spin_lock_irqsave(&phba->hbalock, iflags); 668 spin_lock_irqsave(&phba->hbalock, iflags);
669 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 669 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
670 next_time = jiffies + HZ * (phba->fc_ratov + 1); 670 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
671 list_for_each_entry_safe(rrq, nextrrq, 671 list_for_each_entry_safe(rrq, nextrrq,
672 &phba->active_rrq_list, list) { 672 &phba->active_rrq_list, list) {
673 if (time_after(jiffies, rrq->rrq_stop_time)) 673 if (time_after(jiffies, rrq->rrq_stop_time))
@@ -782,7 +782,7 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
782 return; 782 return;
783 spin_lock_irqsave(&phba->hbalock, iflags); 783 spin_lock_irqsave(&phba->hbalock, iflags);
784 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 784 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
785 next_time = jiffies + HZ * (phba->fc_ratov * 2); 785 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2));
786 list_splice_init(&phba->active_rrq_list, &rrq_list); 786 list_splice_init(&phba->active_rrq_list, &rrq_list);
787 spin_unlock_irqrestore(&phba->hbalock, iflags); 787 spin_unlock_irqrestore(&phba->hbalock, iflags);
788 788
@@ -878,7 +878,8 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
878 else 878 else
879 rrq->send_rrq = 0; 879 rrq->send_rrq = 0;
880 rrq->xritag = xritag; 880 rrq->xritag = xritag;
881 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1); 881 rrq->rrq_stop_time = jiffies +
882 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
882 rrq->ndlp = ndlp; 883 rrq->ndlp = ndlp;
883 rrq->nlp_DID = ndlp->nlp_DID; 884 rrq->nlp_DID = ndlp->nlp_DID;
884 rrq->vport = ndlp->vport; 885 rrq->vport = ndlp->vport;
@@ -926,8 +927,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
926 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && 927 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
927 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) 928 !(piocbq->iocb_flag & LPFC_IO_LIBDFC))
928 ndlp = piocbq->context_un.ndlp; 929 ndlp = piocbq->context_un.ndlp;
929 else if ((piocbq->iocb.ulpCommand == CMD_ELS_REQUEST64_CR) && 930 else if (piocbq->iocb_flag & LPFC_IO_LIBDFC)
930 (piocbq->iocb_flag & LPFC_IO_LIBDFC))
931 ndlp = piocbq->context_un.ndlp; 931 ndlp = piocbq->context_un.ndlp;
932 else 932 else
933 ndlp = piocbq->context1; 933 ndlp = piocbq->context1;
@@ -1339,7 +1339,8 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1339 BUG(); 1339 BUG();
1340 else 1340 else
1341 mod_timer(&piocb->vport->els_tmofunc, 1341 mod_timer(&piocb->vport->els_tmofunc,
1342 jiffies + HZ * (phba->fc_ratov << 1)); 1342 jiffies +
1343 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1343 } 1344 }
1344 1345
1345 1346
@@ -2340,7 +2341,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2340 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 2341 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2341 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2342 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2342 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p " 2343 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
2343 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", 2344 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2345 "x%x x%x x%x\n",
2344 pmb->vport ? pmb->vport->vpi : 0, 2346 pmb->vport ? pmb->vport->vpi : 0,
2345 pmbox->mbxCommand, 2347 pmbox->mbxCommand,
2346 lpfc_sli_config_mbox_subsys_get(phba, pmb), 2348 lpfc_sli_config_mbox_subsys_get(phba, pmb),
@@ -2354,7 +2356,10 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2354 pmbox->un.varWords[4], 2356 pmbox->un.varWords[4],
2355 pmbox->un.varWords[5], 2357 pmbox->un.varWords[5],
2356 pmbox->un.varWords[6], 2358 pmbox->un.varWords[6],
2357 pmbox->un.varWords[7]); 2359 pmbox->un.varWords[7],
2360 pmbox->un.varWords[8],
2361 pmbox->un.varWords[9],
2362 pmbox->un.varWords[10]);
2358 2363
2359 if (pmb->mbox_cmpl) 2364 if (pmb->mbox_cmpl)
2360 pmb->mbox_cmpl(phba,pmb); 2365 pmb->mbox_cmpl(phba,pmb);
@@ -2908,8 +2913,9 @@ void lpfc_poll_eratt(unsigned long ptr)
2908 lpfc_worker_wake_up(phba); 2913 lpfc_worker_wake_up(phba);
2909 else 2914 else
2910 /* Restart the timer for next eratt poll */ 2915 /* Restart the timer for next eratt poll */
2911 mod_timer(&phba->eratt_poll, jiffies + 2916 mod_timer(&phba->eratt_poll,
2912 HZ * LPFC_ERATT_POLL_INTERVAL); 2917 jiffies +
2918 msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
2913 return; 2919 return;
2914} 2920}
2915 2921
@@ -5511,6 +5517,7 @@ lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5511 list_del_init(&rsrc_blk->list); 5517 list_del_init(&rsrc_blk->list);
5512 kfree(rsrc_blk); 5518 kfree(rsrc_blk);
5513 } 5519 }
5520 phba->sli4_hba.max_cfg_param.vpi_used = 0;
5514 break; 5521 break;
5515 case LPFC_RSC_TYPE_FCOE_XRI: 5522 case LPFC_RSC_TYPE_FCOE_XRI:
5516 kfree(phba->sli4_hba.xri_bmask); 5523 kfree(phba->sli4_hba.xri_bmask);
@@ -5811,6 +5818,7 @@ lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
5811 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 5818 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5812 } else { 5819 } else {
5813 kfree(phba->vpi_bmask); 5820 kfree(phba->vpi_bmask);
5821 phba->sli4_hba.max_cfg_param.vpi_used = 0;
5814 kfree(phba->vpi_ids); 5822 kfree(phba->vpi_ids);
5815 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5823 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5816 kfree(phba->sli4_hba.xri_bmask); 5824 kfree(phba->sli4_hba.xri_bmask);
@@ -5992,7 +6000,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
5992 struct lpfc_sglq *sglq_entry = NULL; 6000 struct lpfc_sglq *sglq_entry = NULL;
5993 struct lpfc_sglq *sglq_entry_next = NULL; 6001 struct lpfc_sglq *sglq_entry_next = NULL;
5994 struct lpfc_sglq *sglq_entry_first = NULL; 6002 struct lpfc_sglq *sglq_entry_first = NULL;
5995 int status, post_cnt = 0, num_posted = 0, block_cnt = 0; 6003 int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0;
5996 int last_xritag = NO_XRI; 6004 int last_xritag = NO_XRI;
5997 LIST_HEAD(prep_sgl_list); 6005 LIST_HEAD(prep_sgl_list);
5998 LIST_HEAD(blck_sgl_list); 6006 LIST_HEAD(blck_sgl_list);
@@ -6004,6 +6012,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
6004 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list); 6012 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
6005 spin_unlock_irq(&phba->hbalock); 6013 spin_unlock_irq(&phba->hbalock);
6006 6014
6015 total_cnt = phba->sli4_hba.els_xri_cnt;
6007 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 6016 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6008 &allc_sgl_list, list) { 6017 &allc_sgl_list, list) {
6009 list_del_init(&sglq_entry->list); 6018 list_del_init(&sglq_entry->list);
@@ -6055,9 +6064,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
6055 sglq_entry->sli4_xritag); 6064 sglq_entry->sli4_xritag);
6056 list_add_tail(&sglq_entry->list, 6065 list_add_tail(&sglq_entry->list,
6057 &free_sgl_list); 6066 &free_sgl_list);
6058 spin_lock_irq(&phba->hbalock); 6067 total_cnt--;
6059 phba->sli4_hba.els_xri_cnt--;
6060 spin_unlock_irq(&phba->hbalock);
6061 } 6068 }
6062 } 6069 }
6063 } 6070 }
@@ -6085,9 +6092,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
6085 (sglq_entry_first->sli4_xritag + 6092 (sglq_entry_first->sli4_xritag +
6086 post_cnt - 1)); 6093 post_cnt - 1));
6087 list_splice_init(&blck_sgl_list, &free_sgl_list); 6094 list_splice_init(&blck_sgl_list, &free_sgl_list);
6088 spin_lock_irq(&phba->hbalock); 6095 total_cnt -= post_cnt;
6089 phba->sli4_hba.els_xri_cnt -= post_cnt;
6090 spin_unlock_irq(&phba->hbalock);
6091 } 6096 }
6092 6097
6093 /* don't reset xirtag due to hole in xri block */ 6098 /* don't reset xirtag due to hole in xri block */
@@ -6097,6 +6102,8 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
6097 /* reset els sgl post count for next round of posting */ 6102 /* reset els sgl post count for next round of posting */
6098 post_cnt = 0; 6103 post_cnt = 0;
6099 } 6104 }
6105 /* update the number of XRIs posted for ELS */
6106 phba->sli4_hba.els_xri_cnt = total_cnt;
6100 6107
6101 /* free the els sgls failed to post */ 6108 /* free the els sgls failed to post */
6102 lpfc_free_sgl_list(phba, &free_sgl_list); 6109 lpfc_free_sgl_list(phba, &free_sgl_list);
@@ -6446,16 +6453,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6446 6453
6447 /* Start the ELS watchdog timer */ 6454 /* Start the ELS watchdog timer */
6448 mod_timer(&vport->els_tmofunc, 6455 mod_timer(&vport->els_tmofunc,
6449 jiffies + HZ * (phba->fc_ratov * 2)); 6456 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
6450 6457
6451 /* Start heart beat timer */ 6458 /* Start heart beat timer */
6452 mod_timer(&phba->hb_tmofunc, 6459 mod_timer(&phba->hb_tmofunc,
6453 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 6460 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
6454 phba->hb_outstanding = 0; 6461 phba->hb_outstanding = 0;
6455 phba->last_completion_time = jiffies; 6462 phba->last_completion_time = jiffies;
6456 6463
6457 /* Start error attention (ERATT) polling timer */ 6464 /* Start error attention (ERATT) polling timer */
6458 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 6465 mod_timer(&phba->eratt_poll,
6466 jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
6459 6467
6460 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 6468 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
6461 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 6469 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
@@ -6822,8 +6830,9 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
6822 goto out_not_finished; 6830 goto out_not_finished;
6823 } 6831 }
6824 /* timeout active mbox command */ 6832 /* timeout active mbox command */
6825 mod_timer(&psli->mbox_tmo, (jiffies + 6833 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
6826 (HZ * lpfc_mbox_tmo_val(phba, pmbox)))); 6834 1000);
6835 mod_timer(&psli->mbox_tmo, jiffies + timeout);
6827 } 6836 }
6828 6837
6829 /* Mailbox cmd <cmd> issue */ 6838 /* Mailbox cmd <cmd> issue */
@@ -7496,7 +7505,7 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
7496 7505
7497 /* Start timer for the mbox_tmo and log some mailbox post messages */ 7506 /* Start timer for the mbox_tmo and log some mailbox post messages */
7498 mod_timer(&psli->mbox_tmo, (jiffies + 7507 mod_timer(&psli->mbox_tmo, (jiffies +
7499 (HZ * lpfc_mbox_tmo_val(phba, mboxq)))); 7508 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
7500 7509
7501 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7510 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7502 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " 7511 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
@@ -7914,15 +7923,21 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
7914static inline uint32_t 7923static inline uint32_t
7915lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba) 7924lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
7916{ 7925{
7917 int i; 7926 struct lpfc_vector_map_info *cpup;
7918 7927 int chann, cpu;
7919 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU)
7920 i = smp_processor_id();
7921 else
7922 i = atomic_add_return(1, &phba->fcp_qidx);
7923 7928
7924 i = (i % phba->cfg_fcp_io_channel); 7929 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU) {
7925 return i; 7930 cpu = smp_processor_id();
7931 if (cpu < phba->sli4_hba.num_present_cpu) {
7932 cpup = phba->sli4_hba.cpu_map;
7933 cpup += cpu;
7934 return cpup->channel_id;
7935 }
7936 chann = cpu;
7937 }
7938 chann = atomic_add_return(1, &phba->fcp_qidx);
7939 chann = (chann % phba->cfg_fcp_io_channel);
7940 return chann;
7926} 7941}
7927 7942
7928/** 7943/**
@@ -8444,10 +8459,14 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
8444 8459
8445 if ((piocb->iocb_flag & LPFC_IO_FCP) || 8460 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
8446 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 8461 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
8462 if (unlikely(!phba->sli4_hba.fcp_wq))
8463 return IOCB_ERROR;
8447 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx], 8464 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
8448 &wqe)) 8465 &wqe))
8449 return IOCB_ERROR; 8466 return IOCB_ERROR;
8450 } else { 8467 } else {
8468 if (unlikely(!phba->sli4_hba.els_wq))
8469 return IOCB_ERROR;
8451 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) 8470 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
8452 return IOCB_ERROR; 8471 return IOCB_ERROR;
8453 } 8472 }
@@ -10003,7 +10022,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
10003 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 10022 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
10004 SLI_IOCB_RET_IOCB); 10023 SLI_IOCB_RET_IOCB);
10005 if (retval == IOCB_SUCCESS) { 10024 if (retval == IOCB_SUCCESS) {
10006 timeout_req = timeout * HZ; 10025 timeout_req = msecs_to_jiffies(timeout * 1000);
10007 timeleft = wait_event_timeout(done_q, 10026 timeleft = wait_event_timeout(done_q,
10008 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), 10027 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
10009 timeout_req); 10028 timeout_req);
@@ -10108,7 +10127,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
10108 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 10127 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
10109 wait_event_interruptible_timeout(done_q, 10128 wait_event_interruptible_timeout(done_q,
10110 pmboxq->mbox_flag & LPFC_MBX_WAKE, 10129 pmboxq->mbox_flag & LPFC_MBX_WAKE,
10111 timeout * HZ); 10130 msecs_to_jiffies(timeout * 1000));
10112 10131
10113 spin_lock_irqsave(&phba->hbalock, flag); 10132 spin_lock_irqsave(&phba->hbalock, flag);
10114 pmboxq->context1 = NULL; 10133 pmboxq->context1 = NULL;
@@ -12899,8 +12918,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
12899 } 12918 }
12900 wq->db_regaddr = bar_memmap_p + db_offset; 12919 wq->db_regaddr = bar_memmap_p + db_offset;
12901 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12920 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12902 "3264 WQ[%d]: barset:x%x, offset:x%x\n", 12921 "3264 WQ[%d]: barset:x%x, offset:x%x, "
12903 wq->queue_id, pci_barset, db_offset); 12922 "format:x%x\n", wq->queue_id, pci_barset,
12923 db_offset, wq->db_format);
12904 } else { 12924 } else {
12905 wq->db_format = LPFC_DB_LIST_FORMAT; 12925 wq->db_format = LPFC_DB_LIST_FORMAT;
12906 wq->db_regaddr = phba->sli4_hba.WQDBregaddr; 12926 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
@@ -13120,8 +13140,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
13120 } 13140 }
13121 hrq->db_regaddr = bar_memmap_p + db_offset; 13141 hrq->db_regaddr = bar_memmap_p + db_offset;
13122 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13142 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13123 "3266 RQ[qid:%d]: barset:x%x, offset:x%x\n", 13143 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
13124 hrq->queue_id, pci_barset, db_offset); 13144 "format:x%x\n", hrq->queue_id, pci_barset,
13145 db_offset, hrq->db_format);
13125 } else { 13146 } else {
13126 hrq->db_format = LPFC_DB_RING_FORMAT; 13147 hrq->db_format = LPFC_DB_RING_FORMAT;
13127 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; 13148 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
@@ -13971,13 +13992,14 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
13971 } 13992 }
13972 13993
13973 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 13994 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
13974 "2538 Received frame rctl:%s type:%s " 13995 "2538 Received frame rctl:%s (x%x), type:%s (x%x), "
13975 "Frame Data:%08x %08x %08x %08x %08x %08x\n", 13996 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
13976 rctl_names[fc_hdr->fh_r_ctl], 13997 rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl,
13977 type_names[fc_hdr->fh_type], 13998 type_names[fc_hdr->fh_type], fc_hdr->fh_type,
13978 be32_to_cpu(header[0]), be32_to_cpu(header[1]), 13999 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
13979 be32_to_cpu(header[2]), be32_to_cpu(header[3]), 14000 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
13980 be32_to_cpu(header[4]), be32_to_cpu(header[5])); 14001 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
14002 be32_to_cpu(header[6]));
13981 return 0; 14003 return 0;
13982drop: 14004drop:
13983 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 14005 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index be02b59ea279..67af460184ba 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -346,11 +346,6 @@ struct lpfc_bmbx {
346#define SLI4_CT_VFI 2 346#define SLI4_CT_VFI 2
347#define SLI4_CT_FCFI 3 347#define SLI4_CT_FCFI 3
348 348
349#define LPFC_SLI4_FL1_MAX_SEGMENT_SIZE 0x10000
350#define LPFC_SLI4_FL1_MAX_BUF_SIZE 0X2000
351#define LPFC_SLI4_MIN_BUF_SIZE 0x400
352#define LPFC_SLI4_MAX_BUF_SIZE 0x20000
353
354/* 349/*
355 * SLI4 specific data structures 350 * SLI4 specific data structures
356 */ 351 */
@@ -440,6 +435,17 @@ struct lpfc_sli4_lnk_info {
440 435
441#define LPFC_SLI4_HANDLER_NAME_SZ 16 436#define LPFC_SLI4_HANDLER_NAME_SZ 16
442 437
438/* Used for IRQ vector to CPU mapping */
439struct lpfc_vector_map_info {
440 uint16_t phys_id;
441 uint16_t core_id;
442 uint16_t irq;
443 uint16_t channel_id;
444 struct cpumask maskbits;
445};
446#define LPFC_VECTOR_MAP_EMPTY 0xffff
447#define LPFC_MAX_CPU 256
448
443/* SLI4 HBA data structure entries */ 449/* SLI4 HBA data structure entries */
444struct lpfc_sli4_hba { 450struct lpfc_sli4_hba {
445 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for 451 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
@@ -573,6 +579,11 @@ struct lpfc_sli4_hba {
573 struct lpfc_iov iov; 579 struct lpfc_iov iov;
574 spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ 580 spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
575 spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */ 581 spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
582
583 /* CPU to vector mapping information */
584 struct lpfc_vector_map_info *cpu_map;
585 uint16_t num_online_cpu;
586 uint16_t num_present_cpu;
576}; 587};
577 588
578enum lpfc_sge_type { 589enum lpfc_sge_type {
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 664cd04f7cd8..a38dc3b16969 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.38" 21#define LPFC_DRIVER_VERSION "8.3.39"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23 23
24/* Used for SLI 2/3 */ 24/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 0fe188e66000..e28e431564b0 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -80,7 +80,7 @@ inline void lpfc_vport_set_state(struct lpfc_vport *vport,
80 } 80 }
81} 81}
82 82
83static int 83int
84lpfc_alloc_vpi(struct lpfc_hba *phba) 84lpfc_alloc_vpi(struct lpfc_hba *phba)
85{ 85{
86 unsigned long vpi; 86 unsigned long vpi;
@@ -568,6 +568,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
568 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; 568 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
569 struct lpfc_hba *phba = vport->phba; 569 struct lpfc_hba *phba = vport->phba;
570 long timeout; 570 long timeout;
571 bool ns_ndlp_referenced = false;
571 572
572 if (vport->port_type == LPFC_PHYSICAL_PORT) { 573 if (vport->port_type == LPFC_PHYSICAL_PORT) {
573 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, 574 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
@@ -628,6 +629,18 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
628 629
629 lpfc_debugfs_terminate(vport); 630 lpfc_debugfs_terminate(vport);
630 631
632 /*
633 * The call to fc_remove_host might release the NameServer ndlp. Since
634 * we might need to use the ndlp to send the DA_ID CT command,
635 * increment the reference for the NameServer ndlp to prevent it from
636 * being released.
637 */
638 ndlp = lpfc_findnode_did(vport, NameServer_DID);
639 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
640 lpfc_nlp_get(ndlp);
641 ns_ndlp_referenced = true;
642 }
643
631 /* Remove FC host and then SCSI host with the vport */ 644 /* Remove FC host and then SCSI host with the vport */
632 fc_remove_host(lpfc_shost_from_vport(vport)); 645 fc_remove_host(lpfc_shost_from_vport(vport));
633 scsi_remove_host(lpfc_shost_from_vport(vport)); 646 scsi_remove_host(lpfc_shost_from_vport(vport));
@@ -734,6 +747,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
734 lpfc_discovery_wait(vport); 747 lpfc_discovery_wait(vport);
735 748
736skip_logo: 749skip_logo:
750
751 /*
752 * If the NameServer ndlp has been incremented to allow the DA_ID CT
753 * command to be sent, decrement the ndlp now.
754 */
755 if (ns_ndlp_referenced) {
756 ndlp = lpfc_findnode_did(vport, NameServer_DID);
757 lpfc_nlp_put(ndlp);
758 }
759
737 lpfc_cleanup(vport); 760 lpfc_cleanup(vport);
738 lpfc_sli_host_down(vport); 761 lpfc_sli_host_down(vport);
739 762
diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h
index 90828340acea..6b2c94eb8134 100644
--- a/drivers/scsi/lpfc/lpfc_vport.h
+++ b/drivers/scsi/lpfc/lpfc_vport.h
@@ -90,6 +90,7 @@ int lpfc_vport_getinfo(struct Scsi_Host *, struct vport_info *);
90int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint); 90int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint);
91struct lpfc_vport **lpfc_create_vport_work_array(struct lpfc_hba *); 91struct lpfc_vport **lpfc_create_vport_work_array(struct lpfc_hba *);
92void lpfc_destroy_vport_work_array(struct lpfc_hba *, struct lpfc_vport **); 92void lpfc_destroy_vport_work_array(struct lpfc_hba *, struct lpfc_vport **);
93int lpfc_alloc_vpi(struct lpfc_hba *phba);
93 94
94/* 95/*
95 * queuecommand VPORT-specific return codes. Specified in the host byte code. 96 * queuecommand VPORT-specific return codes. Specified in the host byte code.
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 7c90d57b867e..3a9ddae86f1f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -4931,11 +4931,12 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
4931 printk(KERN_ERR "megaraid_sas: timed out while" 4931 printk(KERN_ERR "megaraid_sas: timed out while"
4932 "waiting for HBA to recover\n"); 4932 "waiting for HBA to recover\n");
4933 error = -ENODEV; 4933 error = -ENODEV;
4934 goto out_kfree_ioc; 4934 goto out_up;
4935 } 4935 }
4936 spin_unlock_irqrestore(&instance->hba_lock, flags); 4936 spin_unlock_irqrestore(&instance->hba_lock, flags);
4937 4937
4938 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); 4938 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
4939 out_up:
4939 up(&instance->ioctl_sem); 4940 up(&instance->ioctl_sem);
4940 4941
4941 out_kfree_ioc: 4942 out_kfree_ioc:
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 74550922ad55..7b7381d7671f 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -254,7 +254,7 @@ static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
254 } 254 }
255 for (i = 0; i < MVS_MAX_DEVICES; i++) { 255 for (i = 0; i < MVS_MAX_DEVICES; i++) {
256 mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED; 256 mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED;
257 mvi->devices[i].dev_type = NO_DEVICE; 257 mvi->devices[i].dev_type = SAS_PHY_UNUSED;
258 mvi->devices[i].device_id = i; 258 mvi->devices[i].device_id = i;
259 mvi->devices[i].dev_status = MVS_DEV_NORMAL; 259 mvi->devices[i].dev_status = MVS_DEV_NORMAL;
260 init_timer(&mvi->devices[i].timer); 260 init_timer(&mvi->devices[i].timer);
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 532110f4562a..c9e244984e30 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -706,7 +706,7 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
706 return 0; 706 return 0;
707} 707}
708 708
709#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE))) 709#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == SAS_PHY_UNUSED)))
710static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf, 710static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf,
711 struct mvs_tmf_task *tmf, int *pass) 711 struct mvs_tmf_task *tmf, int *pass)
712{ 712{
@@ -726,7 +726,7 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf
726 * libsas will use dev->port, should 726 * libsas will use dev->port, should
727 * not call task_done for sata 727 * not call task_done for sata
728 */ 728 */
729 if (dev->dev_type != SATA_DEV) 729 if (dev->dev_type != SAS_SATA_DEV)
730 task->task_done(task); 730 task->task_done(task);
731 return rc; 731 return rc;
732 } 732 }
@@ -1159,10 +1159,10 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
1159 phy->identify.device_type = 1159 phy->identify.device_type =
1160 phy->att_dev_info & PORT_DEV_TYPE_MASK; 1160 phy->att_dev_info & PORT_DEV_TYPE_MASK;
1161 1161
1162 if (phy->identify.device_type == SAS_END_DEV) 1162 if (phy->identify.device_type == SAS_END_DEVICE)
1163 phy->identify.target_port_protocols = 1163 phy->identify.target_port_protocols =
1164 SAS_PROTOCOL_SSP; 1164 SAS_PROTOCOL_SSP;
1165 else if (phy->identify.device_type != NO_DEVICE) 1165 else if (phy->identify.device_type != SAS_PHY_UNUSED)
1166 phy->identify.target_port_protocols = 1166 phy->identify.target_port_protocols =
1167 SAS_PROTOCOL_SMP; 1167 SAS_PROTOCOL_SMP;
1168 if (oob_done) 1168 if (oob_done)
@@ -1260,7 +1260,7 @@ struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
1260{ 1260{
1261 u32 dev; 1261 u32 dev;
1262 for (dev = 0; dev < MVS_MAX_DEVICES; dev++) { 1262 for (dev = 0; dev < MVS_MAX_DEVICES; dev++) {
1263 if (mvi->devices[dev].dev_type == NO_DEVICE) { 1263 if (mvi->devices[dev].dev_type == SAS_PHY_UNUSED) {
1264 mvi->devices[dev].device_id = dev; 1264 mvi->devices[dev].device_id = dev;
1265 return &mvi->devices[dev]; 1265 return &mvi->devices[dev];
1266 } 1266 }
@@ -1278,7 +1278,7 @@ void mvs_free_dev(struct mvs_device *mvi_dev)
1278 u32 id = mvi_dev->device_id; 1278 u32 id = mvi_dev->device_id;
1279 memset(mvi_dev, 0, sizeof(*mvi_dev)); 1279 memset(mvi_dev, 0, sizeof(*mvi_dev));
1280 mvi_dev->device_id = id; 1280 mvi_dev->device_id = id;
1281 mvi_dev->dev_type = NO_DEVICE; 1281 mvi_dev->dev_type = SAS_PHY_UNUSED;
1282 mvi_dev->dev_status = MVS_DEV_NORMAL; 1282 mvi_dev->dev_status = MVS_DEV_NORMAL;
1283 mvi_dev->taskfileset = MVS_ID_NOT_MAPPED; 1283 mvi_dev->taskfileset = MVS_ID_NOT_MAPPED;
1284} 1284}
@@ -1480,7 +1480,7 @@ static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
1480{ 1480{
1481 int rc; 1481 int rc;
1482 struct sas_phy *phy = sas_get_local_phy(dev); 1482 struct sas_phy *phy = sas_get_local_phy(dev);
1483 int reset_type = (dev->dev_type == SATA_DEV || 1483 int reset_type = (dev->dev_type == SAS_SATA_DEV ||
1484 (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; 1484 (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1485 rc = sas_phy_reset(phy, reset_type); 1485 rc = sas_phy_reset(phy, reset_type);
1486 sas_put_local_phy(phy); 1486 sas_put_local_phy(phy);
@@ -1629,7 +1629,7 @@ int mvs_abort_task(struct sas_task *task)
1629 1629
1630 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1630 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1631 task->task_proto & SAS_PROTOCOL_STP) { 1631 task->task_proto & SAS_PROTOCOL_STP) {
1632 if (SATA_DEV == dev->dev_type) { 1632 if (SAS_SATA_DEV == dev->dev_type) {
1633 struct mvs_slot_info *slot = task->lldd_task; 1633 struct mvs_slot_info *slot = task->lldd_task;
1634 u32 slot_idx = (u32)(slot - mvi->slot_info); 1634 u32 slot_idx = (u32)(slot - mvi->slot_info);
1635 mv_dprintk("mvs_abort_task() mvi=%p task=%p " 1635 mv_dprintk("mvs_abort_task() mvi=%p task=%p "
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 9f3cc13a5ce7..60e2fb7f2dca 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -67,7 +67,7 @@ extern const struct mvs_dispatch mvs_94xx_dispatch;
67extern struct kmem_cache *mvs_task_list_cache; 67extern struct kmem_cache *mvs_task_list_cache;
68 68
69#define DEV_IS_EXPANDER(type) \ 69#define DEV_IS_EXPANDER(type) \
70 ((type == EDGE_DEV) || (type == FANOUT_DEV)) 70 ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE))
71 71
72#define bit(n) ((u64)1 << n) 72#define bit(n) ((u64)1 << n)
73 73
@@ -241,7 +241,7 @@ struct mvs_phy {
241 241
242struct mvs_device { 242struct mvs_device {
243 struct list_head dev_entry; 243 struct list_head dev_entry;
244 enum sas_dev_type dev_type; 244 enum sas_device_type dev_type;
245 struct mvs_info *mvi_info; 245 struct mvs_info *mvi_info;
246 struct domain_device *sas_device; 246 struct domain_device *sas_device;
247 struct timer_list timer; 247 struct timer_list timer;
diff --git a/drivers/scsi/pm8001/Makefile b/drivers/scsi/pm8001/Makefile
index 52f04296171c..ce4cd87c7c66 100644
--- a/drivers/scsi/pm8001/Makefile
+++ b/drivers/scsi/pm8001/Makefile
@@ -4,9 +4,10 @@
4# Copyright (C) 2008-2009 USI Co., Ltd. 4# Copyright (C) 2008-2009 USI Co., Ltd.
5 5
6 6
7obj-$(CONFIG_SCSI_PM8001) += pm8001.o 7obj-$(CONFIG_SCSI_PM8001) += pm80xx.o
8pm8001-y += pm8001_init.o \ 8pm80xx-y += pm8001_init.o \
9 pm8001_sas.o \ 9 pm8001_sas.o \
10 pm8001_ctl.o \ 10 pm8001_ctl.o \
11 pm8001_hwi.o 11 pm8001_hwi.o \
12 pm80xx_hwi.o
12 13
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index 45bc197bc22f..d99f41c2ca13 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver 2 * PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver
3 * 3 *
4 * Copyright (c) 2008-2009 USI Co., Ltd. 4 * Copyright (c) 2008-2009 USI Co., Ltd.
5 * All rights reserved. 5 * All rights reserved.
@@ -58,8 +58,13 @@ static ssize_t pm8001_ctl_mpi_interface_rev_show(struct device *cdev,
58 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 58 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
59 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; 59 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
60 60
61 return snprintf(buf, PAGE_SIZE, "%d\n", 61 if (pm8001_ha->chip_id == chip_8001) {
62 pm8001_ha->main_cfg_tbl.interface_rev); 62 return snprintf(buf, PAGE_SIZE, "%d\n",
63 pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev);
64 } else {
65 return snprintf(buf, PAGE_SIZE, "%d\n",
66 pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev);
67 }
63} 68}
64static 69static
65DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL); 70DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL);
@@ -78,11 +83,19 @@ static ssize_t pm8001_ctl_fw_version_show(struct device *cdev,
78 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 83 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
79 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; 84 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
80 85
81 return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n", 86 if (pm8001_ha->chip_id == chip_8001) {
82 (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 24), 87 return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
83 (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 16), 88 (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 24),
84 (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 8), 89 (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 16),
85 (u8)(pm8001_ha->main_cfg_tbl.firmware_rev)); 90 (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 8),
91 (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev));
92 } else {
93 return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
94 (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 24),
95 (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 16),
96 (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 8),
97 (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev));
98 }
86} 99}
87static DEVICE_ATTR(fw_version, S_IRUGO, pm8001_ctl_fw_version_show, NULL); 100static DEVICE_ATTR(fw_version, S_IRUGO, pm8001_ctl_fw_version_show, NULL);
88/** 101/**
@@ -99,8 +112,13 @@ static ssize_t pm8001_ctl_max_out_io_show(struct device *cdev,
99 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 112 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
100 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; 113 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
101 114
102 return snprintf(buf, PAGE_SIZE, "%d\n", 115 if (pm8001_ha->chip_id == chip_8001) {
103 pm8001_ha->main_cfg_tbl.max_out_io); 116 return snprintf(buf, PAGE_SIZE, "%d\n",
117 pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io);
118 } else {
119 return snprintf(buf, PAGE_SIZE, "%d\n",
120 pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io);
121 }
104} 122}
105static DEVICE_ATTR(max_out_io, S_IRUGO, pm8001_ctl_max_out_io_show, NULL); 123static DEVICE_ATTR(max_out_io, S_IRUGO, pm8001_ctl_max_out_io_show, NULL);
106/** 124/**
@@ -117,8 +135,15 @@ static ssize_t pm8001_ctl_max_devices_show(struct device *cdev,
117 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 135 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
118 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; 136 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
119 137
120 return snprintf(buf, PAGE_SIZE, "%04d\n", 138 if (pm8001_ha->chip_id == chip_8001) {
121 (u16)(pm8001_ha->main_cfg_tbl.max_sgl >> 16)); 139 return snprintf(buf, PAGE_SIZE, "%04d\n",
140 (u16)(pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl >> 16)
141 );
142 } else {
143 return snprintf(buf, PAGE_SIZE, "%04d\n",
144 (u16)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl >> 16)
145 );
146 }
122} 147}
123static DEVICE_ATTR(max_devices, S_IRUGO, pm8001_ctl_max_devices_show, NULL); 148static DEVICE_ATTR(max_devices, S_IRUGO, pm8001_ctl_max_devices_show, NULL);
124/** 149/**
@@ -136,8 +161,15 @@ static ssize_t pm8001_ctl_max_sg_list_show(struct device *cdev,
136 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 161 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
137 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; 162 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
138 163
139 return snprintf(buf, PAGE_SIZE, "%04d\n", 164 if (pm8001_ha->chip_id == chip_8001) {
140 pm8001_ha->main_cfg_tbl.max_sgl & 0x0000FFFF); 165 return snprintf(buf, PAGE_SIZE, "%04d\n",
166 pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl & 0x0000FFFF
167 );
168 } else {
169 return snprintf(buf, PAGE_SIZE, "%04d\n",
170 pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl & 0x0000FFFF
171 );
172 }
141} 173}
142static DEVICE_ATTR(max_sg_list, S_IRUGO, pm8001_ctl_max_sg_list_show, NULL); 174static DEVICE_ATTR(max_sg_list, S_IRUGO, pm8001_ctl_max_sg_list_show, NULL);
143 175
@@ -173,7 +205,14 @@ static ssize_t pm8001_ctl_sas_spec_support_show(struct device *cdev,
173 struct Scsi_Host *shost = class_to_shost(cdev); 205 struct Scsi_Host *shost = class_to_shost(cdev);
174 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 206 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
175 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; 207 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
176 mode = (pm8001_ha->main_cfg_tbl.ctrl_cap_flag & 0xfe000000)>>25; 208 /* fe000000 means supports SAS2.1 */
209 if (pm8001_ha->chip_id == chip_8001)
210 mode = (pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag &
211 0xfe000000)>>25;
212 else
213 /* fe000000 means supports SAS2.1 */
214 mode = (pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag &
215 0xfe000000)>>25;
177 return show_sas_spec_support_status(mode, buf); 216 return show_sas_spec_support_status(mode, buf);
178} 217}
179static DEVICE_ATTR(sas_spec_support, S_IRUGO, 218static DEVICE_ATTR(sas_spec_support, S_IRUGO,
@@ -361,10 +400,11 @@ static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha)
361 goto out; 400 goto out;
362 } 401 }
363 payload = (struct pm8001_ioctl_payload *)ioctlbuffer; 402 payload = (struct pm8001_ioctl_payload *)ioctlbuffer;
364 memcpy((u8 *)payload->func_specific, (u8 *)pm8001_ha->fw_image->data, 403 memcpy((u8 *)&payload->func_specific, (u8 *)pm8001_ha->fw_image->data,
365 pm8001_ha->fw_image->size); 404 pm8001_ha->fw_image->size);
366 payload->length = pm8001_ha->fw_image->size; 405 payload->length = pm8001_ha->fw_image->size;
367 payload->id = 0; 406 payload->id = 0;
407 payload->minor_function = 0x1;
368 pm8001_ha->nvmd_completion = &completion; 408 pm8001_ha->nvmd_completion = &completion;
369 ret = PM8001_CHIP_DISP->set_nvmd_req(pm8001_ha, payload); 409 ret = PM8001_CHIP_DISP->set_nvmd_req(pm8001_ha, payload);
370 wait_for_completion(&completion); 410 wait_for_completion(&completion);
@@ -411,7 +451,7 @@ static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha)
411 payload->length = 1024*16; 451 payload->length = 1024*16;
412 payload->id = 0; 452 payload->id = 0;
413 fwControl = 453 fwControl =
414 (struct fw_control_info *)payload->func_specific; 454 (struct fw_control_info *)&payload->func_specific;
415 fwControl->len = IOCTL_BUF_SIZE; /* IN */ 455 fwControl->len = IOCTL_BUF_SIZE; /* IN */
416 fwControl->size = partitionSize + HEADER_LEN;/* IN */ 456 fwControl->size = partitionSize + HEADER_LEN;/* IN */
417 fwControl->retcode = 0;/* OUT */ 457 fwControl->retcode = 0;/* OUT */
diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h
index c3d20c8d4abe..479c5a7a863a 100644
--- a/drivers/scsi/pm8001/pm8001_defs.h
+++ b/drivers/scsi/pm8001/pm8001_defs.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver 2 * PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver
3 * 3 *
4 * Copyright (c) 2008-2009 USI Co., Ltd. 4 * Copyright (c) 2008-2009 USI Co., Ltd.
5 * All rights reserved. 5 * All rights reserved.
@@ -43,9 +43,12 @@
43 43
44enum chip_flavors { 44enum chip_flavors {
45 chip_8001, 45 chip_8001,
46 chip_8008,
47 chip_8009,
48 chip_8018,
49 chip_8019
46}; 50};
47#define USI_MAX_MEMCNT 9 51
48#define PM8001_MAX_DMA_SG SG_ALL
49enum phy_speed { 52enum phy_speed {
50 PHY_SPEED_15 = 0x01, 53 PHY_SPEED_15 = 0x01,
51 PHY_SPEED_30 = 0x02, 54 PHY_SPEED_30 = 0x02,
@@ -69,23 +72,34 @@ enum port_type {
69#define PM8001_MPI_QUEUE 1024 /* maximum mpi queue entries */ 72#define PM8001_MPI_QUEUE 1024 /* maximum mpi queue entries */
70#define PM8001_MAX_INB_NUM 1 73#define PM8001_MAX_INB_NUM 1
71#define PM8001_MAX_OUTB_NUM 1 74#define PM8001_MAX_OUTB_NUM 1
75#define PM8001_MAX_SPCV_INB_NUM 1
76#define PM8001_MAX_SPCV_OUTB_NUM 4
72#define PM8001_CAN_QUEUE 508 /* SCSI Queue depth */ 77#define PM8001_CAN_QUEUE 508 /* SCSI Queue depth */
73 78
79/* Inbound/Outbound queue size */
80#define IOMB_SIZE_SPC 64
81#define IOMB_SIZE_SPCV 128
82
74/* unchangeable hardware details */ 83/* unchangeable hardware details */
75#define PM8001_MAX_PHYS 8 /* max. possible phys */ 84#define PM8001_MAX_PHYS 16 /* max. possible phys */
76#define PM8001_MAX_PORTS 8 /* max. possible ports */ 85#define PM8001_MAX_PORTS 16 /* max. possible ports */
77#define PM8001_MAX_DEVICES 1024 /* max supported device */ 86#define PM8001_MAX_DEVICES 2048 /* max supported device */
87#define PM8001_MAX_MSIX_VEC 64 /* max msi-x int for spcv/ve */
78 88
89#define USI_MAX_MEMCNT_BASE 5
90#define IB (USI_MAX_MEMCNT_BASE + 1)
91#define CI (IB + PM8001_MAX_SPCV_INB_NUM)
92#define OB (CI + PM8001_MAX_SPCV_INB_NUM)
93#define PI (OB + PM8001_MAX_SPCV_OUTB_NUM)
94#define USI_MAX_MEMCNT (PI + PM8001_MAX_SPCV_OUTB_NUM)
95#define PM8001_MAX_DMA_SG SG_ALL
79enum memory_region_num { 96enum memory_region_num {
80 AAP1 = 0x0, /* application acceleration processor */ 97 AAP1 = 0x0, /* application acceleration processor */
81 IOP, /* IO processor */ 98 IOP, /* IO processor */
82 CI, /* consumer index */
83 PI, /* producer index */
84 IB, /* inbound queue */
85 OB, /* outbound queue */
86 NVMD, /* NVM device */ 99 NVMD, /* NVM device */
87 DEV_MEM, /* memory for devices */ 100 DEV_MEM, /* memory for devices */
88 CCB_MEM, /* memory for command control block */ 101 CCB_MEM, /* memory for command control block */
102 FW_FLASH /* memory for fw flash update */
89}; 103};
90#define PM8001_EVENT_LOG_SIZE (128 * 1024) 104#define PM8001_EVENT_LOG_SIZE (128 * 1024)
91 105
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index b8dd05074abb..69dd49c05f1e 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -50,32 +50,39 @@
50static void read_main_config_table(struct pm8001_hba_info *pm8001_ha) 50static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
51{ 51{
52 void __iomem *address = pm8001_ha->main_cfg_tbl_addr; 52 void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
53 pm8001_ha->main_cfg_tbl.signature = pm8001_mr32(address, 0x00); 53 pm8001_ha->main_cfg_tbl.pm8001_tbl.signature =
54 pm8001_ha->main_cfg_tbl.interface_rev = pm8001_mr32(address, 0x04); 54 pm8001_mr32(address, 0x00);
55 pm8001_ha->main_cfg_tbl.firmware_rev = pm8001_mr32(address, 0x08); 55 pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev =
56 pm8001_ha->main_cfg_tbl.max_out_io = pm8001_mr32(address, 0x0C); 56 pm8001_mr32(address, 0x04);
57 pm8001_ha->main_cfg_tbl.max_sgl = pm8001_mr32(address, 0x10); 57 pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev =
58 pm8001_ha->main_cfg_tbl.ctrl_cap_flag = pm8001_mr32(address, 0x14); 58 pm8001_mr32(address, 0x08);
59 pm8001_ha->main_cfg_tbl.gst_offset = pm8001_mr32(address, 0x18); 59 pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io =
60 pm8001_ha->main_cfg_tbl.inbound_queue_offset = 60 pm8001_mr32(address, 0x0C);
61 pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl =
62 pm8001_mr32(address, 0x10);
63 pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag =
64 pm8001_mr32(address, 0x14);
65 pm8001_ha->main_cfg_tbl.pm8001_tbl.gst_offset =
66 pm8001_mr32(address, 0x18);
67 pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_queue_offset =
61 pm8001_mr32(address, MAIN_IBQ_OFFSET); 68 pm8001_mr32(address, MAIN_IBQ_OFFSET);
62 pm8001_ha->main_cfg_tbl.outbound_queue_offset = 69 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_queue_offset =
63 pm8001_mr32(address, MAIN_OBQ_OFFSET); 70 pm8001_mr32(address, MAIN_OBQ_OFFSET);
64 pm8001_ha->main_cfg_tbl.hda_mode_flag = 71 pm8001_ha->main_cfg_tbl.pm8001_tbl.hda_mode_flag =
65 pm8001_mr32(address, MAIN_HDA_FLAGS_OFFSET); 72 pm8001_mr32(address, MAIN_HDA_FLAGS_OFFSET);
66 73
67 /* read analog Setting offset from the configuration table */ 74 /* read analog Setting offset from the configuration table */
68 pm8001_ha->main_cfg_tbl.anolog_setup_table_offset = 75 pm8001_ha->main_cfg_tbl.pm8001_tbl.anolog_setup_table_offset =
69 pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET); 76 pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET);
70 77
71 /* read Error Dump Offset and Length */ 78 /* read Error Dump Offset and Length */
72 pm8001_ha->main_cfg_tbl.fatal_err_dump_offset0 = 79 pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset0 =
73 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET); 80 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET);
74 pm8001_ha->main_cfg_tbl.fatal_err_dump_length0 = 81 pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length0 =
75 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH); 82 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH);
76 pm8001_ha->main_cfg_tbl.fatal_err_dump_offset1 = 83 pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset1 =
77 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET); 84 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET);
78 pm8001_ha->main_cfg_tbl.fatal_err_dump_length1 = 85 pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length1 =
79 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH); 86 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH);
80} 87}
81 88
@@ -86,31 +93,56 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
86static void read_general_status_table(struct pm8001_hba_info *pm8001_ha) 93static void read_general_status_table(struct pm8001_hba_info *pm8001_ha)
87{ 94{
88 void __iomem *address = pm8001_ha->general_stat_tbl_addr; 95 void __iomem *address = pm8001_ha->general_stat_tbl_addr;
89 pm8001_ha->gs_tbl.gst_len_mpistate = pm8001_mr32(address, 0x00); 96 pm8001_ha->gs_tbl.pm8001_tbl.gst_len_mpistate =
90 pm8001_ha->gs_tbl.iq_freeze_state0 = pm8001_mr32(address, 0x04); 97 pm8001_mr32(address, 0x00);
91 pm8001_ha->gs_tbl.iq_freeze_state1 = pm8001_mr32(address, 0x08); 98 pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state0 =
92 pm8001_ha->gs_tbl.msgu_tcnt = pm8001_mr32(address, 0x0C); 99 pm8001_mr32(address, 0x04);
93 pm8001_ha->gs_tbl.iop_tcnt = pm8001_mr32(address, 0x10); 100 pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state1 =
94 pm8001_ha->gs_tbl.reserved = pm8001_mr32(address, 0x14); 101 pm8001_mr32(address, 0x08);
95 pm8001_ha->gs_tbl.phy_state[0] = pm8001_mr32(address, 0x18); 102 pm8001_ha->gs_tbl.pm8001_tbl.msgu_tcnt =
96 pm8001_ha->gs_tbl.phy_state[1] = pm8001_mr32(address, 0x1C); 103 pm8001_mr32(address, 0x0C);
97 pm8001_ha->gs_tbl.phy_state[2] = pm8001_mr32(address, 0x20); 104 pm8001_ha->gs_tbl.pm8001_tbl.iop_tcnt =
98 pm8001_ha->gs_tbl.phy_state[3] = pm8001_mr32(address, 0x24); 105 pm8001_mr32(address, 0x10);
99 pm8001_ha->gs_tbl.phy_state[4] = pm8001_mr32(address, 0x28); 106 pm8001_ha->gs_tbl.pm8001_tbl.rsvd =
100 pm8001_ha->gs_tbl.phy_state[5] = pm8001_mr32(address, 0x2C); 107 pm8001_mr32(address, 0x14);
101 pm8001_ha->gs_tbl.phy_state[6] = pm8001_mr32(address, 0x30); 108 pm8001_ha->gs_tbl.pm8001_tbl.phy_state[0] =
102 pm8001_ha->gs_tbl.phy_state[7] = pm8001_mr32(address, 0x34); 109 pm8001_mr32(address, 0x18);
103 pm8001_ha->gs_tbl.reserved1 = pm8001_mr32(address, 0x38); 110 pm8001_ha->gs_tbl.pm8001_tbl.phy_state[1] =
104 pm8001_ha->gs_tbl.reserved2 = pm8001_mr32(address, 0x3C); 111 pm8001_mr32(address, 0x1C);
105 pm8001_ha->gs_tbl.reserved3 = pm8001_mr32(address, 0x40); 112 pm8001_ha->gs_tbl.pm8001_tbl.phy_state[2] =
106 pm8001_ha->gs_tbl.recover_err_info[0] = pm8001_mr32(address, 0x44); 113 pm8001_mr32(address, 0x20);
107 pm8001_ha->gs_tbl.recover_err_info[1] = pm8001_mr32(address, 0x48); 114 pm8001_ha->gs_tbl.pm8001_tbl.phy_state[3] =
108 pm8001_ha->gs_tbl.recover_err_info[2] = pm8001_mr32(address, 0x4C); 115 pm8001_mr32(address, 0x24);
109 pm8001_ha->gs_tbl.recover_err_info[3] = pm8001_mr32(address, 0x50); 116 pm8001_ha->gs_tbl.pm8001_tbl.phy_state[4] =
110 pm8001_ha->gs_tbl.recover_err_info[4] = pm8001_mr32(address, 0x54); 117 pm8001_mr32(address, 0x28);
111 pm8001_ha->gs_tbl.recover_err_info[5] = pm8001_mr32(address, 0x58); 118 pm8001_ha->gs_tbl.pm8001_tbl.phy_state[5] =
112 pm8001_ha->gs_tbl.recover_err_info[6] = pm8001_mr32(address, 0x5C); 119 pm8001_mr32(address, 0x2C);
113 pm8001_ha->gs_tbl.recover_err_info[7] = pm8001_mr32(address, 0x60); 120 pm8001_ha->gs_tbl.pm8001_tbl.phy_state[6] =
121 pm8001_mr32(address, 0x30);
122 pm8001_ha->gs_tbl.pm8001_tbl.phy_state[7] =
123 pm8001_mr32(address, 0x34);
124 pm8001_ha->gs_tbl.pm8001_tbl.gpio_input_val =
125 pm8001_mr32(address, 0x38);
126 pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[0] =
127 pm8001_mr32(address, 0x3C);
128 pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[1] =
129 pm8001_mr32(address, 0x40);
130 pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[0] =
131 pm8001_mr32(address, 0x44);
132 pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[1] =
133 pm8001_mr32(address, 0x48);
134 pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[2] =
135 pm8001_mr32(address, 0x4C);
136 pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[3] =
137 pm8001_mr32(address, 0x50);
138 pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[4] =
139 pm8001_mr32(address, 0x54);
140 pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[5] =
141 pm8001_mr32(address, 0x58);
142 pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[6] =
143 pm8001_mr32(address, 0x5C);
144 pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[7] =
145 pm8001_mr32(address, 0x60);
114} 146}
115 147
116/** 148/**
@@ -119,10 +151,9 @@ static void read_general_status_table(struct pm8001_hba_info *pm8001_ha)
119 */ 151 */
120static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha) 152static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
121{ 153{
122 int inbQ_num = 1;
123 int i; 154 int i;
124 void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; 155 void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
125 for (i = 0; i < inbQ_num; i++) { 156 for (i = 0; i < PM8001_MAX_INB_NUM; i++) {
126 u32 offset = i * 0x20; 157 u32 offset = i * 0x20;
127 pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = 158 pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
128 get_pci_bar_index(pm8001_mr32(address, (offset + 0x14))); 159 get_pci_bar_index(pm8001_mr32(address, (offset + 0x14)));
@@ -137,10 +168,9 @@ static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
137 */ 168 */
138static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha) 169static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
139{ 170{
140 int outbQ_num = 1;
141 int i; 171 int i;
142 void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; 172 void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
143 for (i = 0; i < outbQ_num; i++) { 173 for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) {
144 u32 offset = i * 0x24; 174 u32 offset = i * 0x24;
145 pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = 175 pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
146 get_pci_bar_index(pm8001_mr32(address, (offset + 0x14))); 176 get_pci_bar_index(pm8001_mr32(address, (offset + 0x14)));
@@ -155,54 +185,57 @@ static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
155 */ 185 */
156static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) 186static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
157{ 187{
158 int qn = 1;
159 int i; 188 int i;
160 u32 offsetib, offsetob; 189 u32 offsetib, offsetob;
161 void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr; 190 void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr;
162 void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr; 191 void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr;
163 192
164 pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd = 0; 193 pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd = 0;
165 pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3 = 0; 194 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3 = 0;
166 pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7 = 0; 195 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7 = 0;
167 pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3 = 0; 196 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3 = 0;
168 pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7 = 0; 197 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7 = 0;
169 pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3 = 0; 198 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid0_3 =
170 pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7 = 0; 199 0;
171 pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3 = 0; 200 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid4_7 =
172 pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7 = 0; 201 0;
173 pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3 = 0; 202 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid0_3 = 0;
174 pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7 = 0; 203 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid4_7 = 0;
175 204 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid0_3 = 0;
176 pm8001_ha->main_cfg_tbl.upper_event_log_addr = 205 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid4_7 = 0;
206
207 pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr =
177 pm8001_ha->memoryMap.region[AAP1].phys_addr_hi; 208 pm8001_ha->memoryMap.region[AAP1].phys_addr_hi;
178 pm8001_ha->main_cfg_tbl.lower_event_log_addr = 209 pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr =
179 pm8001_ha->memoryMap.region[AAP1].phys_addr_lo; 210 pm8001_ha->memoryMap.region[AAP1].phys_addr_lo;
180 pm8001_ha->main_cfg_tbl.event_log_size = PM8001_EVENT_LOG_SIZE; 211 pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size =
181 pm8001_ha->main_cfg_tbl.event_log_option = 0x01; 212 PM8001_EVENT_LOG_SIZE;
182 pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr = 213 pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option = 0x01;
214 pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr =
183 pm8001_ha->memoryMap.region[IOP].phys_addr_hi; 215 pm8001_ha->memoryMap.region[IOP].phys_addr_hi;
184 pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr = 216 pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr =
185 pm8001_ha->memoryMap.region[IOP].phys_addr_lo; 217 pm8001_ha->memoryMap.region[IOP].phys_addr_lo;
186 pm8001_ha->main_cfg_tbl.iop_event_log_size = PM8001_EVENT_LOG_SIZE; 218 pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size =
187 pm8001_ha->main_cfg_tbl.iop_event_log_option = 0x01; 219 PM8001_EVENT_LOG_SIZE;
188 pm8001_ha->main_cfg_tbl.fatal_err_interrupt = 0x01; 220 pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option = 0x01;
189 for (i = 0; i < qn; i++) { 221 pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt = 0x01;
222 for (i = 0; i < PM8001_MAX_INB_NUM; i++) {
190 pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = 223 pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt =
191 PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30); 224 PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30);
192 pm8001_ha->inbnd_q_tbl[i].upper_base_addr = 225 pm8001_ha->inbnd_q_tbl[i].upper_base_addr =
193 pm8001_ha->memoryMap.region[IB].phys_addr_hi; 226 pm8001_ha->memoryMap.region[IB + i].phys_addr_hi;
194 pm8001_ha->inbnd_q_tbl[i].lower_base_addr = 227 pm8001_ha->inbnd_q_tbl[i].lower_base_addr =
195 pm8001_ha->memoryMap.region[IB].phys_addr_lo; 228 pm8001_ha->memoryMap.region[IB + i].phys_addr_lo;
196 pm8001_ha->inbnd_q_tbl[i].base_virt = 229 pm8001_ha->inbnd_q_tbl[i].base_virt =
197 (u8 *)pm8001_ha->memoryMap.region[IB].virt_ptr; 230 (u8 *)pm8001_ha->memoryMap.region[IB + i].virt_ptr;
198 pm8001_ha->inbnd_q_tbl[i].total_length = 231 pm8001_ha->inbnd_q_tbl[i].total_length =
199 pm8001_ha->memoryMap.region[IB].total_len; 232 pm8001_ha->memoryMap.region[IB + i].total_len;
200 pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr = 233 pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr =
201 pm8001_ha->memoryMap.region[CI].phys_addr_hi; 234 pm8001_ha->memoryMap.region[CI + i].phys_addr_hi;
202 pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr = 235 pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr =
203 pm8001_ha->memoryMap.region[CI].phys_addr_lo; 236 pm8001_ha->memoryMap.region[CI + i].phys_addr_lo;
204 pm8001_ha->inbnd_q_tbl[i].ci_virt = 237 pm8001_ha->inbnd_q_tbl[i].ci_virt =
205 pm8001_ha->memoryMap.region[CI].virt_ptr; 238 pm8001_ha->memoryMap.region[CI + i].virt_ptr;
206 offsetib = i * 0x20; 239 offsetib = i * 0x20;
207 pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = 240 pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
208 get_pci_bar_index(pm8001_mr32(addressib, 241 get_pci_bar_index(pm8001_mr32(addressib,
@@ -212,25 +245,25 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
212 pm8001_ha->inbnd_q_tbl[i].producer_idx = 0; 245 pm8001_ha->inbnd_q_tbl[i].producer_idx = 0;
213 pm8001_ha->inbnd_q_tbl[i].consumer_index = 0; 246 pm8001_ha->inbnd_q_tbl[i].consumer_index = 0;
214 } 247 }
215 for (i = 0; i < qn; i++) { 248 for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) {
216 pm8001_ha->outbnd_q_tbl[i].element_size_cnt = 249 pm8001_ha->outbnd_q_tbl[i].element_size_cnt =
217 PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30); 250 PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30);
218 pm8001_ha->outbnd_q_tbl[i].upper_base_addr = 251 pm8001_ha->outbnd_q_tbl[i].upper_base_addr =
219 pm8001_ha->memoryMap.region[OB].phys_addr_hi; 252 pm8001_ha->memoryMap.region[OB + i].phys_addr_hi;
220 pm8001_ha->outbnd_q_tbl[i].lower_base_addr = 253 pm8001_ha->outbnd_q_tbl[i].lower_base_addr =
221 pm8001_ha->memoryMap.region[OB].phys_addr_lo; 254 pm8001_ha->memoryMap.region[OB + i].phys_addr_lo;
222 pm8001_ha->outbnd_q_tbl[i].base_virt = 255 pm8001_ha->outbnd_q_tbl[i].base_virt =
223 (u8 *)pm8001_ha->memoryMap.region[OB].virt_ptr; 256 (u8 *)pm8001_ha->memoryMap.region[OB + i].virt_ptr;
224 pm8001_ha->outbnd_q_tbl[i].total_length = 257 pm8001_ha->outbnd_q_tbl[i].total_length =
225 pm8001_ha->memoryMap.region[OB].total_len; 258 pm8001_ha->memoryMap.region[OB + i].total_len;
226 pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr = 259 pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr =
227 pm8001_ha->memoryMap.region[PI].phys_addr_hi; 260 pm8001_ha->memoryMap.region[PI + i].phys_addr_hi;
228 pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr = 261 pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr =
229 pm8001_ha->memoryMap.region[PI].phys_addr_lo; 262 pm8001_ha->memoryMap.region[PI + i].phys_addr_lo;
230 pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = 263 pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay =
231 0 | (10 << 16) | (0 << 24); 264 0 | (10 << 16) | (i << 24);
232 pm8001_ha->outbnd_q_tbl[i].pi_virt = 265 pm8001_ha->outbnd_q_tbl[i].pi_virt =
233 pm8001_ha->memoryMap.region[PI].virt_ptr; 266 pm8001_ha->memoryMap.region[PI + i].virt_ptr;
234 offsetob = i * 0x24; 267 offsetob = i * 0x24;
235 pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = 268 pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
236 get_pci_bar_index(pm8001_mr32(addressob, 269 get_pci_bar_index(pm8001_mr32(addressob,
@@ -250,42 +283,51 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
250{ 283{
251 void __iomem *address = pm8001_ha->main_cfg_tbl_addr; 284 void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
252 pm8001_mw32(address, 0x24, 285 pm8001_mw32(address, 0x24,
253 pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd); 286 pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd);
254 pm8001_mw32(address, 0x28, 287 pm8001_mw32(address, 0x28,
255 pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3); 288 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3);
256 pm8001_mw32(address, 0x2C, 289 pm8001_mw32(address, 0x2C,
257 pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7); 290 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7);
258 pm8001_mw32(address, 0x30, 291 pm8001_mw32(address, 0x30,
259 pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3); 292 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3);
260 pm8001_mw32(address, 0x34, 293 pm8001_mw32(address, 0x34,
261 pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7); 294 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7);
262 pm8001_mw32(address, 0x38, 295 pm8001_mw32(address, 0x38,
263 pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3); 296 pm8001_ha->main_cfg_tbl.pm8001_tbl.
297 outbound_tgt_ITNexus_event_pid0_3);
264 pm8001_mw32(address, 0x3C, 298 pm8001_mw32(address, 0x3C,
265 pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7); 299 pm8001_ha->main_cfg_tbl.pm8001_tbl.
300 outbound_tgt_ITNexus_event_pid4_7);
266 pm8001_mw32(address, 0x40, 301 pm8001_mw32(address, 0x40,
267 pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3); 302 pm8001_ha->main_cfg_tbl.pm8001_tbl.
303 outbound_tgt_ssp_event_pid0_3);
268 pm8001_mw32(address, 0x44, 304 pm8001_mw32(address, 0x44,
269 pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7); 305 pm8001_ha->main_cfg_tbl.pm8001_tbl.
306 outbound_tgt_ssp_event_pid4_7);
270 pm8001_mw32(address, 0x48, 307 pm8001_mw32(address, 0x48,
271 pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3); 308 pm8001_ha->main_cfg_tbl.pm8001_tbl.
309 outbound_tgt_smp_event_pid0_3);
272 pm8001_mw32(address, 0x4C, 310 pm8001_mw32(address, 0x4C,
273 pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7); 311 pm8001_ha->main_cfg_tbl.pm8001_tbl.
312 outbound_tgt_smp_event_pid4_7);
274 pm8001_mw32(address, 0x50, 313 pm8001_mw32(address, 0x50,
275 pm8001_ha->main_cfg_tbl.upper_event_log_addr); 314 pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr);
276 pm8001_mw32(address, 0x54, 315 pm8001_mw32(address, 0x54,
277 pm8001_ha->main_cfg_tbl.lower_event_log_addr); 316 pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr);
278 pm8001_mw32(address, 0x58, pm8001_ha->main_cfg_tbl.event_log_size); 317 pm8001_mw32(address, 0x58,
279 pm8001_mw32(address, 0x5C, pm8001_ha->main_cfg_tbl.event_log_option); 318 pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size);
319 pm8001_mw32(address, 0x5C,
320 pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option);
280 pm8001_mw32(address, 0x60, 321 pm8001_mw32(address, 0x60,
281 pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr); 322 pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr);
282 pm8001_mw32(address, 0x64, 323 pm8001_mw32(address, 0x64,
283 pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr); 324 pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr);
284 pm8001_mw32(address, 0x68, pm8001_ha->main_cfg_tbl.iop_event_log_size); 325 pm8001_mw32(address, 0x68,
326 pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size);
285 pm8001_mw32(address, 0x6C, 327 pm8001_mw32(address, 0x6C,
286 pm8001_ha->main_cfg_tbl.iop_event_log_option); 328 pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option);
287 pm8001_mw32(address, 0x70, 329 pm8001_mw32(address, 0x70,
288 pm8001_ha->main_cfg_tbl.fatal_err_interrupt); 330 pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt);
289} 331}
290 332
291/** 333/**
@@ -597,6 +639,19 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
597 */ 639 */
598static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha) 640static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
599{ 641{
642 u8 i = 0;
643 u16 deviceid;
644 pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
645 /* 8081 controllers need BAR shift to access MPI space
646 * as this is shared with BIOS data */
647 if (deviceid == 0x8081) {
648 if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {
649 PM8001_FAIL_DBG(pm8001_ha,
650 pm8001_printk("Shift Bar4 to 0x%x failed\n",
651 GSM_SM_BASE));
652 return -1;
653 }
654 }
600 /* check the firmware status */ 655 /* check the firmware status */
601 if (-1 == check_fw_ready(pm8001_ha)) { 656 if (-1 == check_fw_ready(pm8001_ha)) {
602 PM8001_FAIL_DBG(pm8001_ha, 657 PM8001_FAIL_DBG(pm8001_ha,
@@ -613,11 +668,16 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
613 read_outbnd_queue_table(pm8001_ha); 668 read_outbnd_queue_table(pm8001_ha);
614 /* update main config table ,inbound table and outbound table */ 669 /* update main config table ,inbound table and outbound table */
615 update_main_config_table(pm8001_ha); 670 update_main_config_table(pm8001_ha);
616 update_inbnd_queue_table(pm8001_ha, 0); 671 for (i = 0; i < PM8001_MAX_INB_NUM; i++)
617 update_outbnd_queue_table(pm8001_ha, 0); 672 update_inbnd_queue_table(pm8001_ha, i);
618 mpi_set_phys_g3_with_ssc(pm8001_ha, 0); 673 for (i = 0; i < PM8001_MAX_OUTB_NUM; i++)
619 /* 7->130ms, 34->500ms, 119->1.5s */ 674 update_outbnd_queue_table(pm8001_ha, i);
620 mpi_set_open_retry_interval_reg(pm8001_ha, 119); 675 /* 8081 controller donot require these operations */
676 if (deviceid != 0x8081) {
677 mpi_set_phys_g3_with_ssc(pm8001_ha, 0);
678 /* 7->130ms, 34->500ms, 119->1.5s */
679 mpi_set_open_retry_interval_reg(pm8001_ha, 119);
680 }
621 /* notify firmware update finished and check initialization status */ 681 /* notify firmware update finished and check initialization status */
622 if (0 == mpi_init_check(pm8001_ha)) { 682 if (0 == mpi_init_check(pm8001_ha)) {
623 PM8001_INIT_DBG(pm8001_ha, 683 PM8001_INIT_DBG(pm8001_ha,
@@ -639,6 +699,16 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
639 u32 max_wait_count; 699 u32 max_wait_count;
640 u32 value; 700 u32 value;
641 u32 gst_len_mpistate; 701 u32 gst_len_mpistate;
702 u16 deviceid;
703 pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
704 if (deviceid == 0x8081) {
705 if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {
706 PM8001_FAIL_DBG(pm8001_ha,
707 pm8001_printk("Shift Bar4 to 0x%x failed\n",
708 GSM_SM_BASE));
709 return -1;
710 }
711 }
642 init_pci_device_addresses(pm8001_ha); 712 init_pci_device_addresses(pm8001_ha);
643 /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the 713 /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the
644 table is stop */ 714 table is stop */
@@ -740,14 +810,14 @@ static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha)
740 * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all 810 * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all
741 * the FW register status to the originated status. 811 * the FW register status to the originated status.
742 * @pm8001_ha: our hba card information 812 * @pm8001_ha: our hba card information
743 * @signature: signature in host scratch pad0 register.
744 */ 813 */
745static int 814static int
746pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature) 815pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
747{ 816{
748 u32 regVal, toggleVal; 817 u32 regVal, toggleVal;
749 u32 max_wait_count; 818 u32 max_wait_count;
750 u32 regVal1, regVal2, regVal3; 819 u32 regVal1, regVal2, regVal3;
820 u32 signature = 0x252acbcd; /* for host scratch pad0 */
751 unsigned long flags; 821 unsigned long flags;
752 822
753 /* step1: Check FW is ready for soft reset */ 823 /* step1: Check FW is ready for soft reset */
@@ -1113,7 +1183,7 @@ static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
1113 * pm8001_chip_iounmap - which maped when initialized. 1183 * pm8001_chip_iounmap - which maped when initialized.
1114 * @pm8001_ha: our hba card information 1184 * @pm8001_ha: our hba card information
1115 */ 1185 */
1116static void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha) 1186void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha)
1117{ 1187{
1118 s8 bar, logical = 0; 1188 s8 bar, logical = 0;
1119 for (bar = 0; bar < 6; bar++) { 1189 for (bar = 0; bar < 6; bar++) {
@@ -1192,7 +1262,7 @@ pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha,
1192 * @pm8001_ha: our hba card information 1262 * @pm8001_ha: our hba card information
1193 */ 1263 */
1194static void 1264static void
1195pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha) 1265pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
1196{ 1266{
1197#ifdef PM8001_USE_MSIX 1267#ifdef PM8001_USE_MSIX
1198 pm8001_chip_msix_interrupt_enable(pm8001_ha, 0); 1268 pm8001_chip_msix_interrupt_enable(pm8001_ha, 0);
@@ -1207,7 +1277,7 @@ pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
1207 * @pm8001_ha: our hba card information 1277 * @pm8001_ha: our hba card information
1208 */ 1278 */
1209static void 1279static void
1210pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha) 1280pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
1211{ 1281{
1212#ifdef PM8001_USE_MSIX 1282#ifdef PM8001_USE_MSIX
1213 pm8001_chip_msix_interrupt_disable(pm8001_ha, 0); 1283 pm8001_chip_msix_interrupt_disable(pm8001_ha, 0);
@@ -1218,12 +1288,13 @@ pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
1218} 1288}
1219 1289
1220/** 1290/**
1221 * mpi_msg_free_get- get the free message buffer for transfer inbound queue. 1291 * pm8001_mpi_msg_free_get - get the free message buffer for transfer
1292 * inbound queue.
1222 * @circularQ: the inbound queue we want to transfer to HBA. 1293 * @circularQ: the inbound queue we want to transfer to HBA.
1223 * @messageSize: the message size of this transfer, normally it is 64 bytes 1294 * @messageSize: the message size of this transfer, normally it is 64 bytes
1224 * @messagePtr: the pointer to message. 1295 * @messagePtr: the pointer to message.
1225 */ 1296 */
1226static int mpi_msg_free_get(struct inbound_queue_table *circularQ, 1297int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
1227 u16 messageSize, void **messagePtr) 1298 u16 messageSize, void **messagePtr)
1228{ 1299{
1229 u32 offset, consumer_index; 1300 u32 offset, consumer_index;
@@ -1231,7 +1302,7 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
1231 u8 bcCount = 1; /* only support single buffer */ 1302 u8 bcCount = 1; /* only support single buffer */
1232 1303
1233 /* Checks is the requested message size can be allocated in this queue*/ 1304 /* Checks is the requested message size can be allocated in this queue*/
1234 if (messageSize > 64) { 1305 if (messageSize > IOMB_SIZE_SPCV) {
1235 *messagePtr = NULL; 1306 *messagePtr = NULL;
1236 return -1; 1307 return -1;
1237 } 1308 }
@@ -1245,7 +1316,7 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
1245 return -1; 1316 return -1;
1246 } 1317 }
1247 /* get memory IOMB buffer address */ 1318 /* get memory IOMB buffer address */
1248 offset = circularQ->producer_idx * 64; 1319 offset = circularQ->producer_idx * messageSize;
1249 /* increment to next bcCount element */ 1320 /* increment to next bcCount element */
1250 circularQ->producer_idx = (circularQ->producer_idx + bcCount) 1321 circularQ->producer_idx = (circularQ->producer_idx + bcCount)
1251 % PM8001_MPI_QUEUE; 1322 % PM8001_MPI_QUEUE;
@@ -1257,29 +1328,30 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
1257} 1328}
1258 1329
1259/** 1330/**
1260 * mpi_build_cmd- build the message queue for transfer, update the PI to FW 1331 * pm8001_mpi_build_cmd- build the message queue for transfer, update the PI to
1261 * to tell the fw to get this message from IOMB. 1332 * FW to tell the fw to get this message from IOMB.
1262 * @pm8001_ha: our hba card information 1333 * @pm8001_ha: our hba card information
1263 * @circularQ: the inbound queue we want to transfer to HBA. 1334 * @circularQ: the inbound queue we want to transfer to HBA.
1264 * @opCode: the operation code represents commands which LLDD and fw recognized. 1335 * @opCode: the operation code represents commands which LLDD and fw recognized.
1265 * @payload: the command payload of each operation command. 1336 * @payload: the command payload of each operation command.
1266 */ 1337 */
1267static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, 1338int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
1268 struct inbound_queue_table *circularQ, 1339 struct inbound_queue_table *circularQ,
1269 u32 opCode, void *payload) 1340 u32 opCode, void *payload, u32 responseQueue)
1270{ 1341{
1271 u32 Header = 0, hpriority = 0, bc = 1, category = 0x02; 1342 u32 Header = 0, hpriority = 0, bc = 1, category = 0x02;
1272 u32 responseQueue = 0;
1273 void *pMessage; 1343 void *pMessage;
1274 1344
1275 if (mpi_msg_free_get(circularQ, 64, &pMessage) < 0) { 1345 if (pm8001_mpi_msg_free_get(circularQ, pm8001_ha->iomb_size,
1346 &pMessage) < 0) {
1276 PM8001_IO_DBG(pm8001_ha, 1347 PM8001_IO_DBG(pm8001_ha,
1277 pm8001_printk("No free mpi buffer\n")); 1348 pm8001_printk("No free mpi buffer\n"));
1278 return -1; 1349 return -1;
1279 } 1350 }
1280 BUG_ON(!payload); 1351 BUG_ON(!payload);
1281 /*Copy to the payload*/ 1352 /*Copy to the payload*/
1282 memcpy(pMessage, payload, (64 - sizeof(struct mpi_msg_hdr))); 1353 memcpy(pMessage, payload, (pm8001_ha->iomb_size -
1354 sizeof(struct mpi_msg_hdr)));
1283 1355
1284 /*Build the header*/ 1356 /*Build the header*/
1285 Header = ((1 << 31) | (hpriority << 30) | ((bc & 0x1f) << 24) 1357 Header = ((1 << 31) | (hpriority << 30) | ((bc & 0x1f) << 24)
@@ -1291,12 +1363,13 @@ static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
1291 pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar, 1363 pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar,
1292 circularQ->pi_offset, circularQ->producer_idx); 1364 circularQ->pi_offset, circularQ->producer_idx);
1293 PM8001_IO_DBG(pm8001_ha, 1365 PM8001_IO_DBG(pm8001_ha,
1294 pm8001_printk("after PI= %d CI= %d\n", circularQ->producer_idx, 1366 pm8001_printk("INB Q %x OPCODE:%x , UPDATED PI=%d CI=%d\n",
1295 circularQ->consumer_index)); 1367 responseQueue, opCode, circularQ->producer_idx,
1368 circularQ->consumer_index));
1296 return 0; 1369 return 0;
1297} 1370}
1298 1371
1299static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, 1372u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
1300 struct outbound_queue_table *circularQ, u8 bc) 1373 struct outbound_queue_table *circularQ, u8 bc)
1301{ 1374{
1302 u32 producer_index; 1375 u32 producer_index;
@@ -1305,7 +1378,7 @@ static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
1305 1378
1306 msgHeader = (struct mpi_msg_hdr *)(pMsg - sizeof(struct mpi_msg_hdr)); 1379 msgHeader = (struct mpi_msg_hdr *)(pMsg - sizeof(struct mpi_msg_hdr));
1307 pOutBoundMsgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt + 1380 pOutBoundMsgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt +
1308 circularQ->consumer_idx * 64); 1381 circularQ->consumer_idx * pm8001_ha->iomb_size);
1309 if (pOutBoundMsgHeader != msgHeader) { 1382 if (pOutBoundMsgHeader != msgHeader) {
1310 PM8001_FAIL_DBG(pm8001_ha, 1383 PM8001_FAIL_DBG(pm8001_ha,
1311 pm8001_printk("consumer_idx = %d msgHeader = %p\n", 1384 pm8001_printk("consumer_idx = %d msgHeader = %p\n",
@@ -1336,13 +1409,14 @@ static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
1336} 1409}
1337 1410
1338/** 1411/**
1339 * mpi_msg_consume- get the MPI message from outbound queue message table. 1412 * pm8001_mpi_msg_consume- get the MPI message from outbound queue
1413 * message table.
1340 * @pm8001_ha: our hba card information 1414 * @pm8001_ha: our hba card information
1341 * @circularQ: the outbound queue table. 1415 * @circularQ: the outbound queue table.
1342 * @messagePtr1: the message contents of this outbound message. 1416 * @messagePtr1: the message contents of this outbound message.
1343 * @pBC: the message size. 1417 * @pBC: the message size.
1344 */ 1418 */
1345static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, 1419u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
1346 struct outbound_queue_table *circularQ, 1420 struct outbound_queue_table *circularQ,
1347 void **messagePtr1, u8 *pBC) 1421 void **messagePtr1, u8 *pBC)
1348{ 1422{
@@ -1356,7 +1430,7 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
1356 /*Get the pointer to the circular queue buffer element*/ 1430 /*Get the pointer to the circular queue buffer element*/
1357 msgHeader = (struct mpi_msg_hdr *) 1431 msgHeader = (struct mpi_msg_hdr *)
1358 (circularQ->base_virt + 1432 (circularQ->base_virt +
1359 circularQ->consumer_idx * 64); 1433 circularQ->consumer_idx * pm8001_ha->iomb_size);
1360 /* read header */ 1434 /* read header */
1361 header_tmp = pm8001_read_32(msgHeader); 1435 header_tmp = pm8001_read_32(msgHeader);
1362 msgHeader_tmp = cpu_to_le32(header_tmp); 1436 msgHeader_tmp = cpu_to_le32(header_tmp);
@@ -1416,7 +1490,7 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
1416 return MPI_IO_STATUS_BUSY; 1490 return MPI_IO_STATUS_BUSY;
1417} 1491}
1418 1492
1419static void pm8001_work_fn(struct work_struct *work) 1493void pm8001_work_fn(struct work_struct *work)
1420{ 1494{
1421 struct pm8001_work *pw = container_of(work, struct pm8001_work, work); 1495 struct pm8001_work *pw = container_of(work, struct pm8001_work, work);
1422 struct pm8001_device *pm8001_dev; 1496 struct pm8001_device *pm8001_dev;
@@ -1431,7 +1505,7 @@ static void pm8001_work_fn(struct work_struct *work)
1431 pm8001_dev = pw->data; /* Most stash device structure */ 1505 pm8001_dev = pw->data; /* Most stash device structure */
1432 if ((pm8001_dev == NULL) 1506 if ((pm8001_dev == NULL)
1433 || ((pw->handler != IO_XFER_ERROR_BREAK) 1507 || ((pw->handler != IO_XFER_ERROR_BREAK)
1434 && (pm8001_dev->dev_type == NO_DEVICE))) { 1508 && (pm8001_dev->dev_type == SAS_PHY_UNUSED))) {
1435 kfree(pw); 1509 kfree(pw);
1436 return; 1510 return;
1437 } 1511 }
@@ -1596,7 +1670,7 @@ static void pm8001_work_fn(struct work_struct *work)
1596 } break; 1670 } break;
1597 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: 1671 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
1598 dev = pm8001_dev->sas_device; 1672 dev = pm8001_dev->sas_device;
1599 pm8001_I_T_nexus_reset(dev); 1673 pm8001_I_T_nexus_event_handler(dev);
1600 break; 1674 break;
1601 case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: 1675 case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
1602 dev = pm8001_dev->sas_device; 1676 dev = pm8001_dev->sas_device;
@@ -1614,7 +1688,7 @@ static void pm8001_work_fn(struct work_struct *work)
1614 kfree(pw); 1688 kfree(pw);
1615} 1689}
1616 1690
1617static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data, 1691int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data,
1618 int handler) 1692 int handler)
1619{ 1693{
1620 struct pm8001_work *pw; 1694 struct pm8001_work *pw;
@@ -1633,6 +1707,123 @@ static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data,
1633 return ret; 1707 return ret;
1634} 1708}
1635 1709
1710static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha,
1711 struct pm8001_device *pm8001_ha_dev)
1712{
1713 int res;
1714 u32 ccb_tag;
1715 struct pm8001_ccb_info *ccb;
1716 struct sas_task *task = NULL;
1717 struct task_abort_req task_abort;
1718 struct inbound_queue_table *circularQ;
1719 u32 opc = OPC_INB_SATA_ABORT;
1720 int ret;
1721
1722 if (!pm8001_ha_dev) {
1723 PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("dev is null\n"));
1724 return;
1725 }
1726
1727 task = sas_alloc_slow_task(GFP_ATOMIC);
1728
1729 if (!task) {
1730 PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("cannot "
1731 "allocate task\n"));
1732 return;
1733 }
1734
1735 task->task_done = pm8001_task_done;
1736
1737 res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
1738 if (res)
1739 return;
1740
1741 ccb = &pm8001_ha->ccb_info[ccb_tag];
1742 ccb->device = pm8001_ha_dev;
1743 ccb->ccb_tag = ccb_tag;
1744 ccb->task = task;
1745
1746 circularQ = &pm8001_ha->inbnd_q_tbl[0];
1747
1748 memset(&task_abort, 0, sizeof(task_abort));
1749 task_abort.abort_all = cpu_to_le32(1);
1750 task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
1751 task_abort.tag = cpu_to_le32(ccb_tag);
1752
1753 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
1754
1755}
1756
1757static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
1758 struct pm8001_device *pm8001_ha_dev)
1759{
1760 struct sata_start_req sata_cmd;
1761 int res;
1762 u32 ccb_tag;
1763 struct pm8001_ccb_info *ccb;
1764 struct sas_task *task = NULL;
1765 struct host_to_dev_fis fis;
1766 struct domain_device *dev;
1767 struct inbound_queue_table *circularQ;
1768 u32 opc = OPC_INB_SATA_HOST_OPSTART;
1769
1770 task = sas_alloc_slow_task(GFP_ATOMIC);
1771
1772 if (!task) {
1773 PM8001_FAIL_DBG(pm8001_ha,
1774 pm8001_printk("cannot allocate task !!!\n"));
1775 return;
1776 }
1777 task->task_done = pm8001_task_done;
1778
1779 res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
1780 if (res) {
1781 PM8001_FAIL_DBG(pm8001_ha,
1782 pm8001_printk("cannot allocate tag !!!\n"));
1783 return;
1784 }
1785
1786 /* allocate domain device by ourselves as libsas
1787 * is not going to provide any
1788 */
1789 dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC);
1790 if (!dev) {
1791 PM8001_FAIL_DBG(pm8001_ha,
1792 pm8001_printk("Domain device cannot be allocated\n"));
1793 sas_free_task(task);
1794 return;
1795 } else {
1796 task->dev = dev;
1797 task->dev->lldd_dev = pm8001_ha_dev;
1798 }
1799
1800 ccb = &pm8001_ha->ccb_info[ccb_tag];
1801 ccb->device = pm8001_ha_dev;
1802 ccb->ccb_tag = ccb_tag;
1803 ccb->task = task;
1804 pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG;
1805 pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG;
1806
1807 memset(&sata_cmd, 0, sizeof(sata_cmd));
1808 circularQ = &pm8001_ha->inbnd_q_tbl[0];
1809
1810 /* construct read log FIS */
1811 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1812 fis.fis_type = 0x27;
1813 fis.flags = 0x80;
1814 fis.command = ATA_CMD_READ_LOG_EXT;
1815 fis.lbal = 0x10;
1816 fis.sector_count = 0x1;
1817
1818 sata_cmd.tag = cpu_to_le32(ccb_tag);
1819 sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
1820 sata_cmd.ncqtag_atap_dir_m |= ((0x1 << 7) | (0x5 << 9));
1821 memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
1822
1823 res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
1824
1825}
1826
1636/** 1827/**
1637 * mpi_ssp_completion- process the event that FW response to the SSP request. 1828 * mpi_ssp_completion- process the event that FW response to the SSP request.
1638 * @pm8001_ha: our hba card information 1829 * @pm8001_ha: our hba card information
@@ -1867,7 +2058,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
1867 break; 2058 break;
1868 } 2059 }
1869 PM8001_IO_DBG(pm8001_ha, 2060 PM8001_IO_DBG(pm8001_ha,
1870 pm8001_printk("scsi_status = %x \n ", 2061 pm8001_printk("scsi_status = %x\n ",
1871 psspPayload->ssp_resp_iu.status)); 2062 psspPayload->ssp_resp_iu.status));
1872 spin_lock_irqsave(&t->task_state_lock, flags); 2063 spin_lock_irqsave(&t->task_state_lock, flags);
1873 t->task_state_flags &= ~SAS_TASK_STATE_PENDING; 2064 t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
@@ -2096,16 +2287,44 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2096 status = le32_to_cpu(psataPayload->status); 2287 status = le32_to_cpu(psataPayload->status);
2097 tag = le32_to_cpu(psataPayload->tag); 2288 tag = le32_to_cpu(psataPayload->tag);
2098 2289
2290 if (!tag) {
2291 PM8001_FAIL_DBG(pm8001_ha,
2292 pm8001_printk("tag null\n"));
2293 return;
2294 }
2099 ccb = &pm8001_ha->ccb_info[tag]; 2295 ccb = &pm8001_ha->ccb_info[tag];
2100 param = le32_to_cpu(psataPayload->param); 2296 param = le32_to_cpu(psataPayload->param);
2101 t = ccb->task; 2297 if (ccb) {
2298 t = ccb->task;
2299 pm8001_dev = ccb->device;
2300 } else {
2301 PM8001_FAIL_DBG(pm8001_ha,
2302 pm8001_printk("ccb null\n"));
2303 return;
2304 }
2305
2306 if (t) {
2307 if (t->dev && (t->dev->lldd_dev))
2308 pm8001_dev = t->dev->lldd_dev;
2309 } else {
2310 PM8001_FAIL_DBG(pm8001_ha,
2311 pm8001_printk("task null\n"));
2312 return;
2313 }
2314
2315 if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG))
2316 && unlikely(!t || !t->lldd_task || !t->dev)) {
2317 PM8001_FAIL_DBG(pm8001_ha,
2318 pm8001_printk("task or dev null\n"));
2319 return;
2320 }
2321
2102 ts = &t->task_status; 2322 ts = &t->task_status;
2103 pm8001_dev = ccb->device; 2323 if (!ts) {
2104 if (status)
2105 PM8001_FAIL_DBG(pm8001_ha, 2324 PM8001_FAIL_DBG(pm8001_ha,
2106 pm8001_printk("sata IO status 0x%x\n", status)); 2325 pm8001_printk("ts null\n"));
2107 if (unlikely(!t || !t->lldd_task || !t->dev))
2108 return; 2326 return;
2327 }
2109 2328
2110 switch (status) { 2329 switch (status) {
2111 case IO_SUCCESS: 2330 case IO_SUCCESS:
@@ -2113,6 +2332,19 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2113 if (param == 0) { 2332 if (param == 0) {
2114 ts->resp = SAS_TASK_COMPLETE; 2333 ts->resp = SAS_TASK_COMPLETE;
2115 ts->stat = SAM_STAT_GOOD; 2334 ts->stat = SAM_STAT_GOOD;
2335 /* check if response is for SEND READ LOG */
2336 if (pm8001_dev &&
2337 (pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
2338 /* set new bit for abort_all */
2339 pm8001_dev->id |= NCQ_ABORT_ALL_FLAG;
2340 /* clear bit for read log */
2341 pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF;
2342 pm8001_send_abort_all(pm8001_ha, pm8001_dev);
2343 /* Free the tag */
2344 pm8001_tag_free(pm8001_ha, tag);
2345 sas_free_task(t);
2346 return;
2347 }
2116 } else { 2348 } else {
2117 u8 len; 2349 u8 len;
2118 ts->resp = SAS_TASK_COMPLETE; 2350 ts->resp = SAS_TASK_COMPLETE;
@@ -2424,6 +2656,29 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
2424 unsigned long flags; 2656 unsigned long flags;
2425 2657
2426 ccb = &pm8001_ha->ccb_info[tag]; 2658 ccb = &pm8001_ha->ccb_info[tag];
2659
2660 if (ccb) {
2661 t = ccb->task;
2662 pm8001_dev = ccb->device;
2663 } else {
2664 PM8001_FAIL_DBG(pm8001_ha,
2665 pm8001_printk("No CCB !!!. returning\n"));
2666 }
2667 if (event)
2668 PM8001_FAIL_DBG(pm8001_ha,
2669 pm8001_printk("SATA EVENT 0x%x\n", event));
2670
2671 /* Check if this is NCQ error */
2672 if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) {
2673 /* find device using device id */
2674 pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id);
2675 /* send read log extension */
2676 if (pm8001_dev)
2677 pm8001_send_read_log(pm8001_ha, pm8001_dev);
2678 return;
2679 }
2680
2681 ccb = &pm8001_ha->ccb_info[tag];
2427 t = ccb->task; 2682 t = ccb->task;
2428 pm8001_dev = ccb->device; 2683 pm8001_dev = ccb->device;
2429 if (event) 2684 if (event)
@@ -2432,9 +2687,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
2432 if (unlikely(!t || !t->lldd_task || !t->dev)) 2687 if (unlikely(!t || !t->lldd_task || !t->dev))
2433 return; 2688 return;
2434 ts = &t->task_status; 2689 ts = &t->task_status;
2435 PM8001_IO_DBG(pm8001_ha, 2690 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
2436 pm8001_printk("port_id = %x,device_id = %x\n", 2691 "port_id:0x%x, device_id:0x%x, tag:0x%x, event:0x%x\n",
2437 port_id, dev_id)); 2692 port_id, dev_id, tag, event));
2438 switch (event) { 2693 switch (event) {
2439 case IO_OVERFLOW: 2694 case IO_OVERFLOW:
2440 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n")); 2695 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
@@ -2822,8 +3077,8 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2822 } 3077 }
2823} 3078}
2824 3079
2825static void 3080void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha,
2826mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) 3081 void *piomb)
2827{ 3082{
2828 struct set_dev_state_resp *pPayload = 3083 struct set_dev_state_resp *pPayload =
2829 (struct set_dev_state_resp *)(piomb + 4); 3084 (struct set_dev_state_resp *)(piomb + 4);
@@ -2843,8 +3098,7 @@ mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
2843 pm8001_ccb_free(pm8001_ha, tag); 3098 pm8001_ccb_free(pm8001_ha, tag);
2844} 3099}
2845 3100
2846static void 3101void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
2847mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
2848{ 3102{
2849 struct get_nvm_data_resp *pPayload = 3103 struct get_nvm_data_resp *pPayload =
2850 (struct get_nvm_data_resp *)(piomb + 4); 3104 (struct get_nvm_data_resp *)(piomb + 4);
@@ -2863,8 +3117,8 @@ mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
2863 pm8001_ccb_free(pm8001_ha, tag); 3117 pm8001_ccb_free(pm8001_ha, tag);
2864} 3118}
2865 3119
2866static void 3120void
2867mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) 3121pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
2868{ 3122{
2869 struct fw_control_ex *fw_control_context; 3123 struct fw_control_ex *fw_control_context;
2870 struct get_nvm_data_resp *pPayload = 3124 struct get_nvm_data_resp *pPayload =
@@ -2925,7 +3179,7 @@ mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
2925 pm8001_ccb_free(pm8001_ha, tag); 3179 pm8001_ccb_free(pm8001_ha, tag);
2926} 3180}
2927 3181
2928static int mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb) 3182int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
2929{ 3183{
2930 struct local_phy_ctl_resp *pPayload = 3184 struct local_phy_ctl_resp *pPayload =
2931 (struct local_phy_ctl_resp *)(piomb + 4); 3185 (struct local_phy_ctl_resp *)(piomb + 4);
@@ -2954,7 +3208,7 @@ static int mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
2954 * while receive a broadcast(change) primitive just tell the sas 3208 * while receive a broadcast(change) primitive just tell the sas
2955 * layer to discover the changed domain rather than the whole domain. 3209 * layer to discover the changed domain rather than the whole domain.
2956 */ 3210 */
2957static void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i) 3211void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
2958{ 3212{
2959 struct pm8001_phy *phy = &pm8001_ha->phy[i]; 3213 struct pm8001_phy *phy = &pm8001_ha->phy[i];
2960 struct asd_sas_phy *sas_phy = &phy->sas_phy; 3214 struct asd_sas_phy *sas_phy = &phy->sas_phy;
@@ -2988,7 +3242,7 @@ static void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
2988} 3242}
2989 3243
2990/* Get the link rate speed */ 3244/* Get the link rate speed */
2991static void get_lrate_mode(struct pm8001_phy *phy, u8 link_rate) 3245void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
2992{ 3246{
2993 struct sas_phy *sas_phy = phy->sas_phy.phy; 3247 struct sas_phy *sas_phy = phy->sas_phy.phy;
2994 3248
@@ -3025,7 +3279,7 @@ static void get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
3025 * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame 3279 * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame
3026 * buffer. 3280 * buffer.
3027 */ 3281 */
3028static void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, 3282void pm8001_get_attached_sas_addr(struct pm8001_phy *phy,
3029 u8 *sas_addr) 3283 u8 *sas_addr)
3030{ 3284{
3031 if (phy->sas_phy.frame_rcvd[0] == 0x34 3285 if (phy->sas_phy.frame_rcvd[0] == 0x34
@@ -3067,7 +3321,7 @@ static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
3067 ((phyId & 0x0F) << 4) | (port_id & 0x0F)); 3321 ((phyId & 0x0F) << 4) | (port_id & 0x0F));
3068 payload.param0 = cpu_to_le32(param0); 3322 payload.param0 = cpu_to_le32(param0);
3069 payload.param1 = cpu_to_le32(param1); 3323 payload.param1 = cpu_to_le32(param1);
3070 mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); 3324 pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
3071} 3325}
3072 3326
3073static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, 3327static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
@@ -3112,19 +3366,19 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
3112 pm8001_chip_phy_ctl_req(pm8001_ha, phy_id, 3366 pm8001_chip_phy_ctl_req(pm8001_ha, phy_id,
3113 PHY_NOTIFY_ENABLE_SPINUP); 3367 PHY_NOTIFY_ENABLE_SPINUP);
3114 port->port_attached = 1; 3368 port->port_attached = 1;
3115 get_lrate_mode(phy, link_rate); 3369 pm8001_get_lrate_mode(phy, link_rate);
3116 break; 3370 break;
3117 case SAS_EDGE_EXPANDER_DEVICE: 3371 case SAS_EDGE_EXPANDER_DEVICE:
3118 PM8001_MSG_DBG(pm8001_ha, 3372 PM8001_MSG_DBG(pm8001_ha,
3119 pm8001_printk("expander device.\n")); 3373 pm8001_printk("expander device.\n"));
3120 port->port_attached = 1; 3374 port->port_attached = 1;
3121 get_lrate_mode(phy, link_rate); 3375 pm8001_get_lrate_mode(phy, link_rate);
3122 break; 3376 break;
3123 case SAS_FANOUT_EXPANDER_DEVICE: 3377 case SAS_FANOUT_EXPANDER_DEVICE:
3124 PM8001_MSG_DBG(pm8001_ha, 3378 PM8001_MSG_DBG(pm8001_ha,
3125 pm8001_printk("fanout expander device.\n")); 3379 pm8001_printk("fanout expander device.\n"));
3126 port->port_attached = 1; 3380 port->port_attached = 1;
3127 get_lrate_mode(phy, link_rate); 3381 pm8001_get_lrate_mode(phy, link_rate);
3128 break; 3382 break;
3129 default: 3383 default:
3130 PM8001_MSG_DBG(pm8001_ha, 3384 PM8001_MSG_DBG(pm8001_ha,
@@ -3179,7 +3433,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
3179 " phy id = %d\n", port_id, phy_id)); 3433 " phy id = %d\n", port_id, phy_id));
3180 port->port_state = portstate; 3434 port->port_state = portstate;
3181 port->port_attached = 1; 3435 port->port_attached = 1;
3182 get_lrate_mode(phy, link_rate); 3436 pm8001_get_lrate_mode(phy, link_rate);
3183 phy->phy_type |= PORT_TYPE_SATA; 3437 phy->phy_type |= PORT_TYPE_SATA;
3184 phy->phy_attached = 1; 3438 phy->phy_attached = 1;
3185 phy->sas_phy.oob_mode = SATA_OOB_MODE; 3439 phy->sas_phy.oob_mode = SATA_OOB_MODE;
@@ -3189,7 +3443,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
3189 sizeof(struct dev_to_host_fis)); 3443 sizeof(struct dev_to_host_fis));
3190 phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); 3444 phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
3191 phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; 3445 phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
3192 phy->identify.device_type = SATA_DEV; 3446 phy->identify.device_type = SAS_SATA_DEV;
3193 pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); 3447 pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
3194 spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); 3448 spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
3195 pm8001_bytes_dmaed(pm8001_ha, phy_id); 3449 pm8001_bytes_dmaed(pm8001_ha, phy_id);
@@ -3260,7 +3514,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
3260} 3514}
3261 3515
3262/** 3516/**
3263 * mpi_reg_resp -process register device ID response. 3517 * pm8001_mpi_reg_resp -process register device ID response.
3264 * @pm8001_ha: our hba card information 3518 * @pm8001_ha: our hba card information
3265 * @piomb: IO message buffer 3519 * @piomb: IO message buffer
3266 * 3520 *
@@ -3269,7 +3523,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
3269 * has assigned, from now,inter-communication with FW is no longer using the 3523 * has assigned, from now,inter-communication with FW is no longer using the
3270 * SAS address, use device ID which FW assigned. 3524 * SAS address, use device ID which FW assigned.
3271 */ 3525 */
3272static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) 3526int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3273{ 3527{
3274 u32 status; 3528 u32 status;
3275 u32 device_id; 3529 u32 device_id;
@@ -3331,7 +3585,7 @@ static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3331 return 0; 3585 return 0;
3332} 3586}
3333 3587
3334static int mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) 3588int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3335{ 3589{
3336 u32 status; 3590 u32 status;
3337 u32 device_id; 3591 u32 device_id;
@@ -3347,8 +3601,13 @@ static int mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3347 return 0; 3601 return 0;
3348} 3602}
3349 3603
3350static int 3604/**
3351mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) 3605 * fw_flash_update_resp - Response from FW for flash update command.
3606 * @pm8001_ha: our hba card information
3607 * @piomb: IO message buffer
3608 */
3609int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
3610 void *piomb)
3352{ 3611{
3353 u32 status; 3612 u32 status;
3354 struct fw_control_ex fw_control_context; 3613 struct fw_control_ex fw_control_context;
@@ -3403,10 +3662,6 @@ mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3403 break; 3662 break;
3404 } 3663 }
3405 ccb->fw_control_context->fw_control->retcode = status; 3664 ccb->fw_control_context->fw_control->retcode = status;
3406 pci_free_consistent(pm8001_ha->pdev,
3407 fw_control_context.len,
3408 fw_control_context.virtAddr,
3409 fw_control_context.phys_addr);
3410 complete(pm8001_ha->nvmd_completion); 3665 complete(pm8001_ha->nvmd_completion);
3411 ccb->task = NULL; 3666 ccb->task = NULL;
3412 ccb->ccb_tag = 0xFFFFFFFF; 3667 ccb->ccb_tag = 0xFFFFFFFF;
@@ -3414,8 +3669,7 @@ mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3414 return 0; 3669 return 0;
3415} 3670}
3416 3671
3417static int 3672int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
3418mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
3419{ 3673{
3420 u32 status; 3674 u32 status;
3421 int i; 3675 int i;
@@ -3431,8 +3685,7 @@ mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
3431 return 0; 3685 return 0;
3432} 3686}
3433 3687
3434static int 3688int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3435mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3436{ 3689{
3437 struct sas_task *t; 3690 struct sas_task *t;
3438 struct pm8001_ccb_info *ccb; 3691 struct pm8001_ccb_info *ccb;
@@ -3440,19 +3693,29 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3440 u32 status ; 3693 u32 status ;
3441 u32 tag, scp; 3694 u32 tag, scp;
3442 struct task_status_struct *ts; 3695 struct task_status_struct *ts;
3696 struct pm8001_device *pm8001_dev;
3443 3697
3444 struct task_abort_resp *pPayload = 3698 struct task_abort_resp *pPayload =
3445 (struct task_abort_resp *)(piomb + 4); 3699 (struct task_abort_resp *)(piomb + 4);
3446 3700
3447 status = le32_to_cpu(pPayload->status); 3701 status = le32_to_cpu(pPayload->status);
3448 tag = le32_to_cpu(pPayload->tag); 3702 tag = le32_to_cpu(pPayload->tag);
3703 if (!tag) {
3704 PM8001_FAIL_DBG(pm8001_ha,
3705 pm8001_printk(" TAG NULL. RETURNING !!!"));
3706 return -1;
3707 }
3708
3449 scp = le32_to_cpu(pPayload->scp); 3709 scp = le32_to_cpu(pPayload->scp);
3450 ccb = &pm8001_ha->ccb_info[tag]; 3710 ccb = &pm8001_ha->ccb_info[tag];
3451 t = ccb->task; 3711 t = ccb->task;
3452 PM8001_IO_DBG(pm8001_ha, 3712 pm8001_dev = ccb->device; /* retrieve device */
3453 pm8001_printk(" status = 0x%x\n", status)); 3713
3454 if (t == NULL) 3714 if (!t) {
3715 PM8001_FAIL_DBG(pm8001_ha,
3716 pm8001_printk(" TASK NULL. RETURNING !!!"));
3455 return -1; 3717 return -1;
3718 }
3456 ts = &t->task_status; 3719 ts = &t->task_status;
3457 if (status != 0) 3720 if (status != 0)
3458 PM8001_FAIL_DBG(pm8001_ha, 3721 PM8001_FAIL_DBG(pm8001_ha,
@@ -3476,7 +3739,15 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3476 spin_unlock_irqrestore(&t->task_state_lock, flags); 3739 spin_unlock_irqrestore(&t->task_state_lock, flags);
3477 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 3740 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
3478 mb(); 3741 mb();
3479 t->task_done(t); 3742
3743 if ((pm8001_dev->id & NCQ_ABORT_ALL_FLAG) && t) {
3744 pm8001_tag_free(pm8001_ha, tag);
3745 sas_free_task(t);
3746 /* clear the flag */
3747 pm8001_dev->id &= 0xBFFFFFFF;
3748 } else
3749 t->task_done(t);
3750
3480 return 0; 3751 return 0;
3481} 3752}
3482 3753
@@ -3727,17 +3998,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
3727 case OPC_OUB_LOCAL_PHY_CNTRL: 3998 case OPC_OUB_LOCAL_PHY_CNTRL:
3728 PM8001_MSG_DBG(pm8001_ha, 3999 PM8001_MSG_DBG(pm8001_ha,
3729 pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n")); 4000 pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n"));
3730 mpi_local_phy_ctl(pm8001_ha, piomb); 4001 pm8001_mpi_local_phy_ctl(pm8001_ha, piomb);
3731 break; 4002 break;
3732 case OPC_OUB_DEV_REGIST: 4003 case OPC_OUB_DEV_REGIST:
3733 PM8001_MSG_DBG(pm8001_ha, 4004 PM8001_MSG_DBG(pm8001_ha,
3734 pm8001_printk("OPC_OUB_DEV_REGIST\n")); 4005 pm8001_printk("OPC_OUB_DEV_REGIST\n"));
3735 mpi_reg_resp(pm8001_ha, piomb); 4006 pm8001_mpi_reg_resp(pm8001_ha, piomb);
3736 break; 4007 break;
3737 case OPC_OUB_DEREG_DEV: 4008 case OPC_OUB_DEREG_DEV:
3738 PM8001_MSG_DBG(pm8001_ha, 4009 PM8001_MSG_DBG(pm8001_ha,
3739 pm8001_printk("unregister the device\n")); 4010 pm8001_printk("unregister the device\n"));
3740 mpi_dereg_resp(pm8001_ha, piomb); 4011 pm8001_mpi_dereg_resp(pm8001_ha, piomb);
3741 break; 4012 break;
3742 case OPC_OUB_GET_DEV_HANDLE: 4013 case OPC_OUB_GET_DEV_HANDLE:
3743 PM8001_MSG_DBG(pm8001_ha, 4014 PM8001_MSG_DBG(pm8001_ha,
@@ -3775,7 +4046,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
3775 case OPC_OUB_FW_FLASH_UPDATE: 4046 case OPC_OUB_FW_FLASH_UPDATE:
3776 PM8001_MSG_DBG(pm8001_ha, 4047 PM8001_MSG_DBG(pm8001_ha,
3777 pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n")); 4048 pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n"));
3778 mpi_fw_flash_update_resp(pm8001_ha, piomb); 4049 pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb);
3779 break; 4050 break;
3780 case OPC_OUB_GPIO_RESPONSE: 4051 case OPC_OUB_GPIO_RESPONSE:
3781 PM8001_MSG_DBG(pm8001_ha, 4052 PM8001_MSG_DBG(pm8001_ha,
@@ -3788,17 +4059,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
3788 case OPC_OUB_GENERAL_EVENT: 4059 case OPC_OUB_GENERAL_EVENT:
3789 PM8001_MSG_DBG(pm8001_ha, 4060 PM8001_MSG_DBG(pm8001_ha,
3790 pm8001_printk("OPC_OUB_GENERAL_EVENT\n")); 4061 pm8001_printk("OPC_OUB_GENERAL_EVENT\n"));
3791 mpi_general_event(pm8001_ha, piomb); 4062 pm8001_mpi_general_event(pm8001_ha, piomb);
3792 break; 4063 break;
3793 case OPC_OUB_SSP_ABORT_RSP: 4064 case OPC_OUB_SSP_ABORT_RSP:
3794 PM8001_MSG_DBG(pm8001_ha, 4065 PM8001_MSG_DBG(pm8001_ha,
3795 pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n")); 4066 pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n"));
3796 mpi_task_abort_resp(pm8001_ha, piomb); 4067 pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
3797 break; 4068 break;
3798 case OPC_OUB_SATA_ABORT_RSP: 4069 case OPC_OUB_SATA_ABORT_RSP:
3799 PM8001_MSG_DBG(pm8001_ha, 4070 PM8001_MSG_DBG(pm8001_ha,
3800 pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n")); 4071 pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n"));
3801 mpi_task_abort_resp(pm8001_ha, piomb); 4072 pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
3802 break; 4073 break;
3803 case OPC_OUB_SAS_DIAG_MODE_START_END: 4074 case OPC_OUB_SAS_DIAG_MODE_START_END:
3804 PM8001_MSG_DBG(pm8001_ha, 4075 PM8001_MSG_DBG(pm8001_ha,
@@ -3823,17 +4094,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
3823 case OPC_OUB_SMP_ABORT_RSP: 4094 case OPC_OUB_SMP_ABORT_RSP:
3824 PM8001_MSG_DBG(pm8001_ha, 4095 PM8001_MSG_DBG(pm8001_ha,
3825 pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n")); 4096 pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n"));
3826 mpi_task_abort_resp(pm8001_ha, piomb); 4097 pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
3827 break; 4098 break;
3828 case OPC_OUB_GET_NVMD_DATA: 4099 case OPC_OUB_GET_NVMD_DATA:
3829 PM8001_MSG_DBG(pm8001_ha, 4100 PM8001_MSG_DBG(pm8001_ha,
3830 pm8001_printk("OPC_OUB_GET_NVMD_DATA\n")); 4101 pm8001_printk("OPC_OUB_GET_NVMD_DATA\n"));
3831 mpi_get_nvmd_resp(pm8001_ha, piomb); 4102 pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb);
3832 break; 4103 break;
3833 case OPC_OUB_SET_NVMD_DATA: 4104 case OPC_OUB_SET_NVMD_DATA:
3834 PM8001_MSG_DBG(pm8001_ha, 4105 PM8001_MSG_DBG(pm8001_ha,
3835 pm8001_printk("OPC_OUB_SET_NVMD_DATA\n")); 4106 pm8001_printk("OPC_OUB_SET_NVMD_DATA\n"));
3836 mpi_set_nvmd_resp(pm8001_ha, piomb); 4107 pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb);
3837 break; 4108 break;
3838 case OPC_OUB_DEVICE_HANDLE_REMOVAL: 4109 case OPC_OUB_DEVICE_HANDLE_REMOVAL:
3839 PM8001_MSG_DBG(pm8001_ha, 4110 PM8001_MSG_DBG(pm8001_ha,
@@ -3842,7 +4113,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
3842 case OPC_OUB_SET_DEVICE_STATE: 4113 case OPC_OUB_SET_DEVICE_STATE:
3843 PM8001_MSG_DBG(pm8001_ha, 4114 PM8001_MSG_DBG(pm8001_ha,
3844 pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n")); 4115 pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n"));
3845 mpi_set_dev_state_resp(pm8001_ha, piomb); 4116 pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb);
3846 break; 4117 break;
3847 case OPC_OUB_GET_DEVICE_STATE: 4118 case OPC_OUB_GET_DEVICE_STATE:
3848 PM8001_MSG_DBG(pm8001_ha, 4119 PM8001_MSG_DBG(pm8001_ha,
@@ -3864,7 +4135,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
3864 } 4135 }
3865} 4136}
3866 4137
3867static int process_oq(struct pm8001_hba_info *pm8001_ha) 4138static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
3868{ 4139{
3869 struct outbound_queue_table *circularQ; 4140 struct outbound_queue_table *circularQ;
3870 void *pMsg1 = NULL; 4141 void *pMsg1 = NULL;
@@ -3873,14 +4144,15 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha)
3873 unsigned long flags; 4144 unsigned long flags;
3874 4145
3875 spin_lock_irqsave(&pm8001_ha->lock, flags); 4146 spin_lock_irqsave(&pm8001_ha->lock, flags);
3876 circularQ = &pm8001_ha->outbnd_q_tbl[0]; 4147 circularQ = &pm8001_ha->outbnd_q_tbl[vec];
3877 do { 4148 do {
3878 ret = mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc); 4149 ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
3879 if (MPI_IO_STATUS_SUCCESS == ret) { 4150 if (MPI_IO_STATUS_SUCCESS == ret) {
3880 /* process the outbound message */ 4151 /* process the outbound message */
3881 process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4)); 4152 process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4));
3882 /* free the message from the outbound circular buffer */ 4153 /* free the message from the outbound circular buffer */
3883 mpi_msg_free_set(pm8001_ha, pMsg1, circularQ, bc); 4154 pm8001_mpi_msg_free_set(pm8001_ha, pMsg1,
4155 circularQ, bc);
3884 } 4156 }
3885 if (MPI_IO_STATUS_BUSY == ret) { 4157 if (MPI_IO_STATUS_BUSY == ret) {
3886 /* Update the producer index from SPC */ 4158 /* Update the producer index from SPC */
@@ -3903,7 +4175,7 @@ static const u8 data_dir_flags[] = {
3903 [PCI_DMA_FROMDEVICE] = DATA_DIR_IN,/* INBOUND */ 4175 [PCI_DMA_FROMDEVICE] = DATA_DIR_IN,/* INBOUND */
3904 [PCI_DMA_NONE] = DATA_DIR_NONE,/* NO TRANSFER */ 4176 [PCI_DMA_NONE] = DATA_DIR_NONE,/* NO TRANSFER */
3905}; 4177};
3906static void 4178void
3907pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd) 4179pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd)
3908{ 4180{
3909 int i; 4181 int i;
@@ -3978,7 +4250,7 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
3978 smp_cmd.long_smp_req.long_resp_size = 4250 smp_cmd.long_smp_req.long_resp_size =
3979 cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4); 4251 cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
3980 build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd); 4252 build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd);
3981 mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd); 4253 pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd, 0);
3982 return 0; 4254 return 0;
3983 4255
3984err_out_2: 4256err_out_2:
@@ -4042,7 +4314,7 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
4042 ssp_cmd.len = cpu_to_le32(task->total_xfer_len); 4314 ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
4043 ssp_cmd.esgl = 0; 4315 ssp_cmd.esgl = 0;
4044 } 4316 }
4045 ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd); 4317 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, 0);
4046 return ret; 4318 return ret;
4047} 4319}
4048 4320
@@ -4060,6 +4332,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
4060 u32 ATAP = 0x0; 4332 u32 ATAP = 0x0;
4061 u32 dir; 4333 u32 dir;
4062 struct inbound_queue_table *circularQ; 4334 struct inbound_queue_table *circularQ;
4335 unsigned long flags;
4063 u32 opc = OPC_INB_SATA_HOST_OPSTART; 4336 u32 opc = OPC_INB_SATA_HOST_OPSTART;
4064 memset(&sata_cmd, 0, sizeof(sata_cmd)); 4337 memset(&sata_cmd, 0, sizeof(sata_cmd));
4065 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4338 circularQ = &pm8001_ha->inbnd_q_tbl[0];
@@ -4080,8 +4353,10 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
4080 PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n")); 4353 PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n"));
4081 } 4354 }
4082 } 4355 }
4083 if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) 4356 if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) {
4357 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
4084 ncg_tag = hdr_tag; 4358 ncg_tag = hdr_tag;
4359 }
4085 dir = data_dir_flags[task->data_dir] << 8; 4360 dir = data_dir_flags[task->data_dir] << 8;
4086 sata_cmd.tag = cpu_to_le32(tag); 4361 sata_cmd.tag = cpu_to_le32(tag);
4087 sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); 4362 sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
@@ -4112,7 +4387,55 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
4112 sata_cmd.len = cpu_to_le32(task->total_xfer_len); 4387 sata_cmd.len = cpu_to_le32(task->total_xfer_len);
4113 sata_cmd.esgl = 0; 4388 sata_cmd.esgl = 0;
4114 } 4389 }
4115 ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd); 4390
4391 /* Check for read log for failed drive and return */
4392 if (sata_cmd.sata_fis.command == 0x2f) {
4393 if (pm8001_ha_dev && ((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) ||
4394 (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) ||
4395 (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) {
4396 struct task_status_struct *ts;
4397
4398 pm8001_ha_dev->id &= 0xDFFFFFFF;
4399 ts = &task->task_status;
4400
4401 spin_lock_irqsave(&task->task_state_lock, flags);
4402 ts->resp = SAS_TASK_COMPLETE;
4403 ts->stat = SAM_STAT_GOOD;
4404 task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
4405 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
4406 task->task_state_flags |= SAS_TASK_STATE_DONE;
4407 if (unlikely((task->task_state_flags &
4408 SAS_TASK_STATE_ABORTED))) {
4409 spin_unlock_irqrestore(&task->task_state_lock,
4410 flags);
4411 PM8001_FAIL_DBG(pm8001_ha,
4412 pm8001_printk("task 0x%p resp 0x%x "
4413 " stat 0x%x but aborted by upper layer "
4414 "\n", task, ts->resp, ts->stat));
4415 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
4416 } else if (task->uldd_task) {
4417 spin_unlock_irqrestore(&task->task_state_lock,
4418 flags);
4419 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
4420 mb();/* ditto */
4421 spin_unlock_irq(&pm8001_ha->lock);
4422 task->task_done(task);
4423 spin_lock_irq(&pm8001_ha->lock);
4424 return 0;
4425 } else if (!task->uldd_task) {
4426 spin_unlock_irqrestore(&task->task_state_lock,
4427 flags);
4428 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
4429 mb();/*ditto*/
4430 spin_unlock_irq(&pm8001_ha->lock);
4431 task->task_done(task);
4432 spin_lock_irq(&pm8001_ha->lock);
4433 return 0;
4434 }
4435 }
4436 }
4437
4438 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
4116 return ret; 4439 return ret;
4117} 4440}
4118 4441
@@ -4142,12 +4465,12 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
4142 payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | 4465 payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
4143 LINKMODE_AUTO | LINKRATE_15 | 4466 LINKMODE_AUTO | LINKRATE_15 |
4144 LINKRATE_30 | LINKRATE_60 | phy_id); 4467 LINKRATE_30 | LINKRATE_60 | phy_id);
4145 payload.sas_identify.dev_type = SAS_END_DEV; 4468 payload.sas_identify.dev_type = SAS_END_DEVICE;
4146 payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL; 4469 payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
4147 memcpy(payload.sas_identify.sas_addr, 4470 memcpy(payload.sas_identify.sas_addr,
4148 pm8001_ha->sas_addr, SAS_ADDR_SIZE); 4471 pm8001_ha->sas_addr, SAS_ADDR_SIZE);
4149 payload.sas_identify.phy_id = phy_id; 4472 payload.sas_identify.phy_id = phy_id;
4150 ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload); 4473 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
4151 return ret; 4474 return ret;
4152} 4475}
4153 4476
@@ -4157,7 +4480,7 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
4157 * @num: the inbound queue number 4480 * @num: the inbound queue number
4158 * @phy_id: the phy id which we wanted to start up. 4481 * @phy_id: the phy id which we wanted to start up.
4159 */ 4482 */
4160static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, 4483int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
4161 u8 phy_id) 4484 u8 phy_id)
4162{ 4485{
4163 struct phy_stop_req payload; 4486 struct phy_stop_req payload;
@@ -4169,12 +4492,12 @@ static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
4169 memset(&payload, 0, sizeof(payload)); 4492 memset(&payload, 0, sizeof(payload));
4170 payload.tag = cpu_to_le32(tag); 4493 payload.tag = cpu_to_le32(tag);
4171 payload.phy_id = cpu_to_le32(phy_id); 4494 payload.phy_id = cpu_to_le32(phy_id);
4172 ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload); 4495 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
4173 return ret; 4496 return ret;
4174} 4497}
4175 4498
4176/** 4499/**
4177 * see comments on mpi_reg_resp. 4500 * see comments on pm8001_mpi_reg_resp.
4178 */ 4501 */
4179static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, 4502static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
4180 struct pm8001_device *pm8001_dev, u32 flag) 4503 struct pm8001_device *pm8001_dev, u32 flag)
@@ -4204,11 +4527,11 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
4204 if (flag == 1) 4527 if (flag == 1)
4205 stp_sspsmp_sata = 0x02; /*direct attached sata */ 4528 stp_sspsmp_sata = 0x02; /*direct attached sata */
4206 else { 4529 else {
4207 if (pm8001_dev->dev_type == SATA_DEV) 4530 if (pm8001_dev->dev_type == SAS_SATA_DEV)
4208 stp_sspsmp_sata = 0x00; /* stp*/ 4531 stp_sspsmp_sata = 0x00; /* stp*/
4209 else if (pm8001_dev->dev_type == SAS_END_DEV || 4532 else if (pm8001_dev->dev_type == SAS_END_DEVICE ||
4210 pm8001_dev->dev_type == EDGE_DEV || 4533 pm8001_dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
4211 pm8001_dev->dev_type == FANOUT_DEV) 4534 pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
4212 stp_sspsmp_sata = 0x01; /*ssp or smp*/ 4535 stp_sspsmp_sata = 0x01; /*ssp or smp*/
4213 } 4536 }
4214 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) 4537 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
@@ -4228,14 +4551,14 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
4228 cpu_to_le32(ITNT | (firstBurstSize * 0x10000)); 4551 cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
4229 memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr, 4552 memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
4230 SAS_ADDR_SIZE); 4553 SAS_ADDR_SIZE);
4231 rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); 4554 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
4232 return rc; 4555 return rc;
4233} 4556}
4234 4557
4235/** 4558/**
4236 * see comments on mpi_reg_resp. 4559 * see comments on pm8001_mpi_reg_resp.
4237 */ 4560 */
4238static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, 4561int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
4239 u32 device_id) 4562 u32 device_id)
4240{ 4563{
4241 struct dereg_dev_req payload; 4564 struct dereg_dev_req payload;
@@ -4249,7 +4572,7 @@ static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
4249 payload.device_id = cpu_to_le32(device_id); 4572 payload.device_id = cpu_to_le32(device_id);
4250 PM8001_MSG_DBG(pm8001_ha, 4573 PM8001_MSG_DBG(pm8001_ha,
4251 pm8001_printk("unregister device device_id = %d\n", device_id)); 4574 pm8001_printk("unregister device device_id = %d\n", device_id));
4252 ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); 4575 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
4253 return ret; 4576 return ret;
4254} 4577}
4255 4578
@@ -4272,7 +4595,7 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
4272 payload.tag = cpu_to_le32(1); 4595 payload.tag = cpu_to_le32(1);
4273 payload.phyop_phyid = 4596 payload.phyop_phyid =
4274 cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F)); 4597 cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F));
4275 ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); 4598 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
4276 return ret; 4599 return ret;
4277} 4600}
4278 4601
@@ -4296,11 +4619,11 @@ static u32 pm8001_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
4296 * @stat: stat. 4619 * @stat: stat.
4297 */ 4620 */
4298static irqreturn_t 4621static irqreturn_t
4299pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha) 4622pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
4300{ 4623{
4301 pm8001_chip_interrupt_disable(pm8001_ha); 4624 pm8001_chip_interrupt_disable(pm8001_ha, vec);
4302 process_oq(pm8001_ha); 4625 process_oq(pm8001_ha, vec);
4303 pm8001_chip_interrupt_enable(pm8001_ha); 4626 pm8001_chip_interrupt_enable(pm8001_ha, vec);
4304 return IRQ_HANDLED; 4627 return IRQ_HANDLED;
4305} 4628}
4306 4629
@@ -4322,7 +4645,7 @@ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc,
4322 task_abort.device_id = cpu_to_le32(dev_id); 4645 task_abort.device_id = cpu_to_le32(dev_id);
4323 task_abort.tag = cpu_to_le32(cmd_tag); 4646 task_abort.tag = cpu_to_le32(cmd_tag);
4324 } 4647 }
4325 ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort); 4648 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
4326 return ret; 4649 return ret;
4327} 4650}
4328 4651
@@ -4331,16 +4654,17 @@ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc,
4331 * @task: the task we wanted to aborted. 4654 * @task: the task we wanted to aborted.
4332 * @flag: the abort flag. 4655 * @flag: the abort flag.
4333 */ 4656 */
4334static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, 4657int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
4335 struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag, u32 cmd_tag) 4658 struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag, u32 cmd_tag)
4336{ 4659{
4337 u32 opc, device_id; 4660 u32 opc, device_id;
4338 int rc = TMF_RESP_FUNC_FAILED; 4661 int rc = TMF_RESP_FUNC_FAILED;
4339 PM8001_EH_DBG(pm8001_ha, pm8001_printk("cmd_tag = %x, abort task tag" 4662 PM8001_EH_DBG(pm8001_ha,
4340 " = %x", cmd_tag, task_tag)); 4663 pm8001_printk("cmd_tag = %x, abort task tag = 0x%x",
4341 if (pm8001_dev->dev_type == SAS_END_DEV) 4664 cmd_tag, task_tag));
4665 if (pm8001_dev->dev_type == SAS_END_DEVICE)
4342 opc = OPC_INB_SSP_ABORT; 4666 opc = OPC_INB_SSP_ABORT;
4343 else if (pm8001_dev->dev_type == SATA_DEV) 4667 else if (pm8001_dev->dev_type == SAS_SATA_DEV)
4344 opc = OPC_INB_SATA_ABORT; 4668 opc = OPC_INB_SATA_ABORT;
4345 else 4669 else
4346 opc = OPC_INB_SMP_ABORT;/* SMP */ 4670 opc = OPC_INB_SMP_ABORT;/* SMP */
@@ -4358,7 +4682,7 @@ static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
4358 * @ccb: the ccb information. 4682 * @ccb: the ccb information.
4359 * @tmf: task management function. 4683 * @tmf: task management function.
4360 */ 4684 */
4361static int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, 4685int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
4362 struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf) 4686 struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf)
4363{ 4687{
4364 struct sas_task *task = ccb->task; 4688 struct sas_task *task = ccb->task;
@@ -4376,11 +4700,11 @@ static int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
4376 memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8); 4700 memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8);
4377 sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag); 4701 sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag);
4378 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4702 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4379 ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd); 4703 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, 0);
4380 return ret; 4704 return ret;
4381} 4705}
4382 4706
4383static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, 4707int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
4384 void *payload) 4708 void *payload)
4385{ 4709{
4386 u32 opc = OPC_INB_GET_NVMD_DATA; 4710 u32 opc = OPC_INB_GET_NVMD_DATA;
@@ -4397,7 +4721,7 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
4397 fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); 4721 fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
4398 if (!fw_control_context) 4722 if (!fw_control_context)
4399 return -ENOMEM; 4723 return -ENOMEM;
4400 fw_control_context->usrAddr = (u8 *)&ioctl_payload->func_specific[0]; 4724 fw_control_context->usrAddr = (u8 *)ioctl_payload->func_specific;
4401 fw_control_context->len = ioctl_payload->length; 4725 fw_control_context->len = ioctl_payload->length;
4402 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4726 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4403 memset(&nvmd_req, 0, sizeof(nvmd_req)); 4727 memset(&nvmd_req, 0, sizeof(nvmd_req));
@@ -4456,11 +4780,11 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
4456 default: 4780 default:
4457 break; 4781 break;
4458 } 4782 }
4459 rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req); 4783 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0);
4460 return rc; 4784 return rc;
4461} 4785}
4462 4786
4463static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, 4787int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
4464 void *payload) 4788 void *payload)
4465{ 4789{
4466 u32 opc = OPC_INB_SET_NVMD_DATA; 4790 u32 opc = OPC_INB_SET_NVMD_DATA;
@@ -4479,7 +4803,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
4479 return -ENOMEM; 4803 return -ENOMEM;
4480 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4804 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4481 memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr, 4805 memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr,
4482 ioctl_payload->func_specific, 4806 &ioctl_payload->func_specific,
4483 ioctl_payload->length); 4807 ioctl_payload->length);
4484 memset(&nvmd_req, 0, sizeof(nvmd_req)); 4808 memset(&nvmd_req, 0, sizeof(nvmd_req));
4485 rc = pm8001_tag_alloc(pm8001_ha, &tag); 4809 rc = pm8001_tag_alloc(pm8001_ha, &tag);
@@ -4536,7 +4860,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
4536 default: 4860 default:
4537 break; 4861 break;
4538 } 4862 }
4539 rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req); 4863 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0);
4540 return rc; 4864 return rc;
4541} 4865}
4542 4866
@@ -4545,7 +4869,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
4545 * @pm8001_ha: our hba card information. 4869 * @pm8001_ha: our hba card information.
4546 * @fw_flash_updata_info: firmware flash update param 4870 * @fw_flash_updata_info: firmware flash update param
4547 */ 4871 */
4548static int 4872int
4549pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha, 4873pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
4550 void *fw_flash_updata_info, u32 tag) 4874 void *fw_flash_updata_info, u32 tag)
4551{ 4875{
@@ -4567,11 +4891,11 @@ pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
4567 cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr))); 4891 cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr)));
4568 payload.sgl_addr_hi = 4892 payload.sgl_addr_hi =
4569 cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr))); 4893 cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr)));
4570 ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); 4894 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
4571 return ret; 4895 return ret;
4572} 4896}
4573 4897
4574static int 4898int
4575pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, 4899pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
4576 void *payload) 4900 void *payload)
4577{ 4901{
@@ -4581,29 +4905,14 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
4581 int rc; 4905 int rc;
4582 u32 tag; 4906 u32 tag;
4583 struct pm8001_ccb_info *ccb; 4907 struct pm8001_ccb_info *ccb;
4584 void *buffer = NULL; 4908 void *buffer = pm8001_ha->memoryMap.region[FW_FLASH].virt_ptr;
4585 dma_addr_t phys_addr; 4909 dma_addr_t phys_addr = pm8001_ha->memoryMap.region[FW_FLASH].phys_addr;
4586 u32 phys_addr_hi;
4587 u32 phys_addr_lo;
4588 struct pm8001_ioctl_payload *ioctl_payload = payload; 4910 struct pm8001_ioctl_payload *ioctl_payload = payload;
4589 4911
4590 fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); 4912 fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
4591 if (!fw_control_context) 4913 if (!fw_control_context)
4592 return -ENOMEM; 4914 return -ENOMEM;
4593 fw_control = (struct fw_control_info *)&ioctl_payload->func_specific[0]; 4915 fw_control = (struct fw_control_info *)&ioctl_payload->func_specific;
4594 if (fw_control->len != 0) {
4595 if (pm8001_mem_alloc(pm8001_ha->pdev,
4596 (void **)&buffer,
4597 &phys_addr,
4598 &phys_addr_hi,
4599 &phys_addr_lo,
4600 fw_control->len, 0) != 0) {
4601 PM8001_FAIL_DBG(pm8001_ha,
4602 pm8001_printk("Mem alloc failure\n"));
4603 kfree(fw_control_context);
4604 return -ENOMEM;
4605 }
4606 }
4607 memcpy(buffer, fw_control->buffer, fw_control->len); 4916 memcpy(buffer, fw_control->buffer, fw_control->len);
4608 flash_update_info.sgl.addr = cpu_to_le64(phys_addr); 4917 flash_update_info.sgl.addr = cpu_to_le64(phys_addr);
4609 flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len); 4918 flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len);
@@ -4613,6 +4922,7 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
4613 flash_update_info.total_image_len = fw_control->size; 4922 flash_update_info.total_image_len = fw_control->size;
4614 fw_control_context->fw_control = fw_control; 4923 fw_control_context->fw_control = fw_control;
4615 fw_control_context->virtAddr = buffer; 4924 fw_control_context->virtAddr = buffer;
4925 fw_control_context->phys_addr = phys_addr;
4616 fw_control_context->len = fw_control->len; 4926 fw_control_context->len = fw_control->len;
4617 rc = pm8001_tag_alloc(pm8001_ha, &tag); 4927 rc = pm8001_tag_alloc(pm8001_ha, &tag);
4618 if (rc) { 4928 if (rc) {
@@ -4627,7 +4937,7 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
4627 return rc; 4937 return rc;
4628} 4938}
4629 4939
4630static int 4940int
4631pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, 4941pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
4632 struct pm8001_device *pm8001_dev, u32 state) 4942 struct pm8001_device *pm8001_dev, u32 state)
4633{ 4943{
@@ -4648,7 +4958,7 @@ pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
4648 payload.tag = cpu_to_le32(tag); 4958 payload.tag = cpu_to_le32(tag);
4649 payload.device_id = cpu_to_le32(pm8001_dev->device_id); 4959 payload.device_id = cpu_to_le32(pm8001_dev->device_id);
4650 payload.nds = cpu_to_le32(state); 4960 payload.nds = cpu_to_le32(state);
4651 rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); 4961 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
4652 return rc; 4962 return rc;
4653 4963
4654} 4964}
@@ -4673,7 +4983,7 @@ pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha)
4673 payload.SSAHOLT = cpu_to_le32(0xd << 25); 4983 payload.SSAHOLT = cpu_to_le32(0xd << 25);
4674 payload.sata_hol_tmo = cpu_to_le32(80); 4984 payload.sata_hol_tmo = cpu_to_le32(80);
4675 payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff); 4985 payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff);
4676 rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); 4986 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
4677 return rc; 4987 return rc;
4678 4988
4679} 4989}
@@ -4706,4 +5016,3 @@ const struct pm8001_dispatch pm8001_8001_dispatch = {
4706 .set_dev_state_req = pm8001_chip_set_dev_state_req, 5016 .set_dev_state_req = pm8001_chip_set_dev_state_req,
4707 .sas_re_init_req = pm8001_chip_sas_re_initialization, 5017 .sas_re_init_req = pm8001_chip_sas_re_initialization,
4708}; 5018};
4709
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index d437309cb1e1..d7c1e2034226 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -131,6 +131,8 @@
131#define LINKRATE_30 (0x02 << 8) 131#define LINKRATE_30 (0x02 << 8)
132#define LINKRATE_60 (0x04 << 8) 132#define LINKRATE_60 (0x04 << 8)
133 133
134/* for new SPC controllers MEMBASE III is shared between BIOS and DATA */
135#define GSM_SM_BASE 0x4F0000
134struct mpi_msg_hdr{ 136struct mpi_msg_hdr{
135 __le32 header; /* Bits [11:0] - Message operation code */ 137 __le32 header; /* Bits [11:0] - Message operation code */
136 /* Bits [15:12] - Message Category */ 138 /* Bits [15:12] - Message Category */
@@ -298,7 +300,7 @@ struct local_phy_ctl_resp {
298 300
299 301
300#define OP_BITS 0x0000FF00 302#define OP_BITS 0x0000FF00
301#define ID_BITS 0x0000000F 303#define ID_BITS 0x000000FF
302 304
303/* 305/*
304 * brief the data structure of PORT Control Command 306 * brief the data structure of PORT Control Command
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 3d5e522e00fc..e4b9bc7f5410 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver 2 * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
3 * 3 *
4 * Copyright (c) 2008-2009 USI Co., Ltd. 4 * Copyright (c) 2008-2009 USI Co., Ltd.
5 * All rights reserved. 5 * All rights reserved.
@@ -44,8 +44,16 @@
44 44
45static struct scsi_transport_template *pm8001_stt; 45static struct scsi_transport_template *pm8001_stt;
46 46
47/**
48 * chip info structure to identify chip key functionality as
49 * encryption available/not, no of ports, hw specific function ref
50 */
47static const struct pm8001_chip_info pm8001_chips[] = { 51static const struct pm8001_chip_info pm8001_chips[] = {
48 [chip_8001] = { 8, &pm8001_8001_dispatch,}, 52 [chip_8001] = {0, 8, &pm8001_8001_dispatch,},
53 [chip_8008] = {0, 8, &pm8001_80xx_dispatch,},
54 [chip_8009] = {1, 8, &pm8001_80xx_dispatch,},
55 [chip_8018] = {0, 16, &pm8001_80xx_dispatch,},
56 [chip_8019] = {1, 16, &pm8001_80xx_dispatch,},
49}; 57};
50static int pm8001_id; 58static int pm8001_id;
51 59
@@ -155,37 +163,75 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
155} 163}
156 164
157#ifdef PM8001_USE_TASKLET 165#ifdef PM8001_USE_TASKLET
166
167/**
168 * tasklet for 64 msi-x interrupt handler
169 * @opaque: the passed general host adapter struct
170 * Note: pm8001_tasklet is common for pm8001 & pm80xx
171 */
158static void pm8001_tasklet(unsigned long opaque) 172static void pm8001_tasklet(unsigned long opaque)
159{ 173{
160 struct pm8001_hba_info *pm8001_ha; 174 struct pm8001_hba_info *pm8001_ha;
175 u32 vec;
161 pm8001_ha = (struct pm8001_hba_info *)opaque; 176 pm8001_ha = (struct pm8001_hba_info *)opaque;
162 if (unlikely(!pm8001_ha)) 177 if (unlikely(!pm8001_ha))
163 BUG_ON(1); 178 BUG_ON(1);
164 PM8001_CHIP_DISP->isr(pm8001_ha); 179 vec = pm8001_ha->int_vector;
180 PM8001_CHIP_DISP->isr(pm8001_ha, vec);
181}
182#endif
183
184static struct pm8001_hba_info *outq_to_hba(u8 *outq)
185{
186 return container_of((outq - *outq), struct pm8001_hba_info, outq[0]);
165} 187}
188
189/**
190 * pm8001_interrupt_handler_msix - main MSIX interrupt handler.
191 * It obtains the vector number and calls the equivalent bottom
192 * half or services directly.
193 * @opaque: the passed outbound queue/vector. Host structure is
194 * retrieved from the same.
195 */
196static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque)
197{
198 struct pm8001_hba_info *pm8001_ha = outq_to_hba(opaque);
199 u8 outq = *(u8 *)opaque;
200 irqreturn_t ret = IRQ_HANDLED;
201 if (unlikely(!pm8001_ha))
202 return IRQ_NONE;
203 if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
204 return IRQ_NONE;
205 pm8001_ha->int_vector = outq;
206#ifdef PM8001_USE_TASKLET
207 tasklet_schedule(&pm8001_ha->tasklet);
208#else
209 ret = PM8001_CHIP_DISP->isr(pm8001_ha, outq);
166#endif 210#endif
211 return ret;
212}
167 213
214/**
215 * pm8001_interrupt_handler_intx - main INTx interrupt handler.
216 * @dev_id: sas_ha structure. The HBA is retrieved from sas_has structure.
217 */
168 218
169 /** 219static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id)
170 * pm8001_interrupt - when HBA originate a interrupt,we should invoke this
171 * dispatcher to handle each case.
172 * @irq: irq number.
173 * @opaque: the passed general host adapter struct
174 */
175static irqreturn_t pm8001_interrupt(int irq, void *opaque)
176{ 220{
177 struct pm8001_hba_info *pm8001_ha; 221 struct pm8001_hba_info *pm8001_ha;
178 irqreturn_t ret = IRQ_HANDLED; 222 irqreturn_t ret = IRQ_HANDLED;
179 struct sas_ha_struct *sha = opaque; 223 struct sas_ha_struct *sha = dev_id;
180 pm8001_ha = sha->lldd_ha; 224 pm8001_ha = sha->lldd_ha;
181 if (unlikely(!pm8001_ha)) 225 if (unlikely(!pm8001_ha))
182 return IRQ_NONE; 226 return IRQ_NONE;
183 if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha)) 227 if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
184 return IRQ_NONE; 228 return IRQ_NONE;
229
230 pm8001_ha->int_vector = 0;
185#ifdef PM8001_USE_TASKLET 231#ifdef PM8001_USE_TASKLET
186 tasklet_schedule(&pm8001_ha->tasklet); 232 tasklet_schedule(&pm8001_ha->tasklet);
187#else 233#else
188 ret = PM8001_CHIP_DISP->isr(pm8001_ha); 234 ret = PM8001_CHIP_DISP->isr(pm8001_ha, 0);
189#endif 235#endif
190 return ret; 236 return ret;
191} 237}
@@ -195,10 +241,14 @@ static irqreturn_t pm8001_interrupt(int irq, void *opaque)
195 * @pm8001_ha:our hba structure. 241 * @pm8001_ha:our hba structure.
196 * 242 *
197 */ 243 */
198static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha) 244static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
245 const struct pci_device_id *ent)
199{ 246{
200 int i; 247 int i;
201 spin_lock_init(&pm8001_ha->lock); 248 spin_lock_init(&pm8001_ha->lock);
249 PM8001_INIT_DBG(pm8001_ha,
250 pm8001_printk("pm8001_alloc: PHY:%x\n",
251 pm8001_ha->chip->n_phy));
202 for (i = 0; i < pm8001_ha->chip->n_phy; i++) { 252 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
203 pm8001_phy_init(pm8001_ha, i); 253 pm8001_phy_init(pm8001_ha, i);
204 pm8001_ha->port[i].wide_port_phymap = 0; 254 pm8001_ha->port[i].wide_port_phymap = 0;
@@ -222,30 +272,57 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
222 pm8001_ha->memoryMap.region[IOP].total_len = PM8001_EVENT_LOG_SIZE; 272 pm8001_ha->memoryMap.region[IOP].total_len = PM8001_EVENT_LOG_SIZE;
223 pm8001_ha->memoryMap.region[IOP].alignment = 32; 273 pm8001_ha->memoryMap.region[IOP].alignment = 32;
224 274
225 /* MPI Memory region 3 for consumer Index of inbound queues */ 275 for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
226 pm8001_ha->memoryMap.region[CI].num_elements = 1; 276 /* MPI Memory region 3 for consumer Index of inbound queues */
227 pm8001_ha->memoryMap.region[CI].element_size = 4; 277 pm8001_ha->memoryMap.region[CI+i].num_elements = 1;
228 pm8001_ha->memoryMap.region[CI].total_len = 4; 278 pm8001_ha->memoryMap.region[CI+i].element_size = 4;
229 pm8001_ha->memoryMap.region[CI].alignment = 4; 279 pm8001_ha->memoryMap.region[CI+i].total_len = 4;
230 280 pm8001_ha->memoryMap.region[CI+i].alignment = 4;
231 /* MPI Memory region 4 for producer Index of outbound queues */ 281
232 pm8001_ha->memoryMap.region[PI].num_elements = 1; 282 if ((ent->driver_data) != chip_8001) {
233 pm8001_ha->memoryMap.region[PI].element_size = 4; 283 /* MPI Memory region 5 inbound queues */
234 pm8001_ha->memoryMap.region[PI].total_len = 4; 284 pm8001_ha->memoryMap.region[IB+i].num_elements =
235 pm8001_ha->memoryMap.region[PI].alignment = 4; 285 PM8001_MPI_QUEUE;
236 286 pm8001_ha->memoryMap.region[IB+i].element_size = 128;
237 /* MPI Memory region 5 inbound queues */ 287 pm8001_ha->memoryMap.region[IB+i].total_len =
238 pm8001_ha->memoryMap.region[IB].num_elements = PM8001_MPI_QUEUE; 288 PM8001_MPI_QUEUE * 128;
239 pm8001_ha->memoryMap.region[IB].element_size = 64; 289 pm8001_ha->memoryMap.region[IB+i].alignment = 128;
240 pm8001_ha->memoryMap.region[IB].total_len = PM8001_MPI_QUEUE * 64; 290 } else {
241 pm8001_ha->memoryMap.region[IB].alignment = 64; 291 pm8001_ha->memoryMap.region[IB+i].num_elements =
242 292 PM8001_MPI_QUEUE;
243 /* MPI Memory region 6 outbound queues */ 293 pm8001_ha->memoryMap.region[IB+i].element_size = 64;
244 pm8001_ha->memoryMap.region[OB].num_elements = PM8001_MPI_QUEUE; 294 pm8001_ha->memoryMap.region[IB+i].total_len =
245 pm8001_ha->memoryMap.region[OB].element_size = 64; 295 PM8001_MPI_QUEUE * 64;
246 pm8001_ha->memoryMap.region[OB].total_len = PM8001_MPI_QUEUE * 64; 296 pm8001_ha->memoryMap.region[IB+i].alignment = 64;
247 pm8001_ha->memoryMap.region[OB].alignment = 64; 297 }
298 }
299
300 for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
301 /* MPI Memory region 4 for producer Index of outbound queues */
302 pm8001_ha->memoryMap.region[PI+i].num_elements = 1;
303 pm8001_ha->memoryMap.region[PI+i].element_size = 4;
304 pm8001_ha->memoryMap.region[PI+i].total_len = 4;
305 pm8001_ha->memoryMap.region[PI+i].alignment = 4;
306
307 if (ent->driver_data != chip_8001) {
308 /* MPI Memory region 6 Outbound queues */
309 pm8001_ha->memoryMap.region[OB+i].num_elements =
310 PM8001_MPI_QUEUE;
311 pm8001_ha->memoryMap.region[OB+i].element_size = 128;
312 pm8001_ha->memoryMap.region[OB+i].total_len =
313 PM8001_MPI_QUEUE * 128;
314 pm8001_ha->memoryMap.region[OB+i].alignment = 128;
315 } else {
316 /* MPI Memory region 6 Outbound queues */
317 pm8001_ha->memoryMap.region[OB+i].num_elements =
318 PM8001_MPI_QUEUE;
319 pm8001_ha->memoryMap.region[OB+i].element_size = 64;
320 pm8001_ha->memoryMap.region[OB+i].total_len =
321 PM8001_MPI_QUEUE * 64;
322 pm8001_ha->memoryMap.region[OB+i].alignment = 64;
323 }
248 324
325 }
249 /* Memory region write DMA*/ 326 /* Memory region write DMA*/
250 pm8001_ha->memoryMap.region[NVMD].num_elements = 1; 327 pm8001_ha->memoryMap.region[NVMD].num_elements = 1;
251 pm8001_ha->memoryMap.region[NVMD].element_size = 4096; 328 pm8001_ha->memoryMap.region[NVMD].element_size = 4096;
@@ -264,6 +341,9 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
264 pm8001_ha->memoryMap.region[CCB_MEM].total_len = PM8001_MAX_CCB * 341 pm8001_ha->memoryMap.region[CCB_MEM].total_len = PM8001_MAX_CCB *
265 sizeof(struct pm8001_ccb_info); 342 sizeof(struct pm8001_ccb_info);
266 343
344 /* Memory region for fw flash */
345 pm8001_ha->memoryMap.region[FW_FLASH].total_len = 4096;
346
267 for (i = 0; i < USI_MAX_MEMCNT; i++) { 347 for (i = 0; i < USI_MAX_MEMCNT; i++) {
268 if (pm8001_mem_alloc(pm8001_ha->pdev, 348 if (pm8001_mem_alloc(pm8001_ha->pdev,
269 &pm8001_ha->memoryMap.region[i].virt_ptr, 349 &pm8001_ha->memoryMap.region[i].virt_ptr,
@@ -281,7 +361,7 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
281 361
282 pm8001_ha->devices = pm8001_ha->memoryMap.region[DEV_MEM].virt_ptr; 362 pm8001_ha->devices = pm8001_ha->memoryMap.region[DEV_MEM].virt_ptr;
283 for (i = 0; i < PM8001_MAX_DEVICES; i++) { 363 for (i = 0; i < PM8001_MAX_DEVICES; i++) {
284 pm8001_ha->devices[i].dev_type = NO_DEVICE; 364 pm8001_ha->devices[i].dev_type = SAS_PHY_UNUSED;
285 pm8001_ha->devices[i].id = i; 365 pm8001_ha->devices[i].id = i;
286 pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES; 366 pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES;
287 pm8001_ha->devices[i].running_req = 0; 367 pm8001_ha->devices[i].running_req = 0;
@@ -339,10 +419,12 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
339 ioremap(pm8001_ha->io_mem[logicalBar].membase, 419 ioremap(pm8001_ha->io_mem[logicalBar].membase,
340 pm8001_ha->io_mem[logicalBar].memsize); 420 pm8001_ha->io_mem[logicalBar].memsize);
341 PM8001_INIT_DBG(pm8001_ha, 421 PM8001_INIT_DBG(pm8001_ha,
342 pm8001_printk("PCI: bar %d, logicalBar %d " 422 pm8001_printk("PCI: bar %d, logicalBar %d ",
343 "virt_addr=%lx,len=%d\n", bar, logicalBar, 423 bar, logicalBar));
344 (unsigned long) 424 PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
345 pm8001_ha->io_mem[logicalBar].memvirtaddr, 425 "base addr %llx virt_addr=%llx len=%d\n",
426 (u64)pm8001_ha->io_mem[logicalBar].membase,
427 (u64)pm8001_ha->io_mem[logicalBar].memvirtaddr,
346 pm8001_ha->io_mem[logicalBar].memsize)); 428 pm8001_ha->io_mem[logicalBar].memsize));
347 } else { 429 } else {
348 pm8001_ha->io_mem[logicalBar].membase = 0; 430 pm8001_ha->io_mem[logicalBar].membase = 0;
@@ -361,8 +443,9 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
361 * @shost: scsi host struct which has been initialized before. 443 * @shost: scsi host struct which has been initialized before.
362 */ 444 */
363static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev, 445static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
364 u32 chip_id, 446 const struct pci_device_id *ent,
365 struct Scsi_Host *shost) 447 struct Scsi_Host *shost)
448
366{ 449{
367 struct pm8001_hba_info *pm8001_ha; 450 struct pm8001_hba_info *pm8001_ha;
368 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 451 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
@@ -374,7 +457,7 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
374 457
375 pm8001_ha->pdev = pdev; 458 pm8001_ha->pdev = pdev;
376 pm8001_ha->dev = &pdev->dev; 459 pm8001_ha->dev = &pdev->dev;
377 pm8001_ha->chip_id = chip_id; 460 pm8001_ha->chip_id = ent->driver_data;
378 pm8001_ha->chip = &pm8001_chips[pm8001_ha->chip_id]; 461 pm8001_ha->chip = &pm8001_chips[pm8001_ha->chip_id];
379 pm8001_ha->irq = pdev->irq; 462 pm8001_ha->irq = pdev->irq;
380 pm8001_ha->sas = sha; 463 pm8001_ha->sas = sha;
@@ -382,12 +465,22 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
382 pm8001_ha->id = pm8001_id++; 465 pm8001_ha->id = pm8001_id++;
383 pm8001_ha->logging_level = 0x01; 466 pm8001_ha->logging_level = 0x01;
384 sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id); 467 sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id);
468 /* IOMB size is 128 for 8088/89 controllers */
469 if (pm8001_ha->chip_id != chip_8001)
470 pm8001_ha->iomb_size = IOMB_SIZE_SPCV;
471 else
472 pm8001_ha->iomb_size = IOMB_SIZE_SPC;
473
385#ifdef PM8001_USE_TASKLET 474#ifdef PM8001_USE_TASKLET
475 /**
476 * default tasklet for non msi-x interrupt handler/first msi-x
477 * interrupt handler
478 **/
386 tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet, 479 tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet,
387 (unsigned long)pm8001_ha); 480 (unsigned long)pm8001_ha);
388#endif 481#endif
389 pm8001_ioremap(pm8001_ha); 482 pm8001_ioremap(pm8001_ha);
390 if (!pm8001_alloc(pm8001_ha)) 483 if (!pm8001_alloc(pm8001_ha, ent))
391 return pm8001_ha; 484 return pm8001_ha;
392 pm8001_free(pm8001_ha); 485 pm8001_free(pm8001_ha);
393 return NULL; 486 return NULL;
@@ -512,21 +605,50 @@ static void pm8001_post_sas_ha_init(struct Scsi_Host *shost,
512 */ 605 */
513static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) 606static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
514{ 607{
515 u8 i; 608 u8 i, j;
516#ifdef PM8001_READ_VPD 609#ifdef PM8001_READ_VPD
610 /* For new SPC controllers WWN is stored in flash vpd
611 * For SPC/SPCve controllers WWN is stored in EEPROM
612 * For Older SPC WWN is stored in NVMD
613 */
517 DECLARE_COMPLETION_ONSTACK(completion); 614 DECLARE_COMPLETION_ONSTACK(completion);
518 struct pm8001_ioctl_payload payload; 615 struct pm8001_ioctl_payload payload;
616 u16 deviceid;
617 pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
519 pm8001_ha->nvmd_completion = &completion; 618 pm8001_ha->nvmd_completion = &completion;
520 payload.minor_function = 0; 619
521 payload.length = 128; 620 if (pm8001_ha->chip_id == chip_8001) {
522 payload.func_specific = kzalloc(128, GFP_KERNEL); 621 if (deviceid == 0x8081) {
622 payload.minor_function = 4;
623 payload.length = 4096;
624 } else {
625 payload.minor_function = 0;
626 payload.length = 128;
627 }
628 } else {
629 payload.minor_function = 1;
630 payload.length = 4096;
631 }
632 payload.offset = 0;
633 payload.func_specific = kzalloc(payload.length, GFP_KERNEL);
523 PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); 634 PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
524 wait_for_completion(&completion); 635 wait_for_completion(&completion);
636
637 for (i = 0, j = 0; i <= 7; i++, j++) {
638 if (pm8001_ha->chip_id == chip_8001) {
639 if (deviceid == 0x8081)
640 pm8001_ha->sas_addr[j] =
641 payload.func_specific[0x704 + i];
642 } else
643 pm8001_ha->sas_addr[j] =
644 payload.func_specific[0x804 + i];
645 }
646
525 for (i = 0; i < pm8001_ha->chip->n_phy; i++) { 647 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
526 memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr, 648 memcpy(&pm8001_ha->phy[i].dev_sas_addr,
527 SAS_ADDR_SIZE); 649 pm8001_ha->sas_addr, SAS_ADDR_SIZE);
528 PM8001_INIT_DBG(pm8001_ha, 650 PM8001_INIT_DBG(pm8001_ha,
529 pm8001_printk("phy %d sas_addr = %016llx \n", i, 651 pm8001_printk("phy %d sas_addr = %016llx\n", i,
530 pm8001_ha->phy[i].dev_sas_addr)); 652 pm8001_ha->phy[i].dev_sas_addr));
531 } 653 }
532#else 654#else
@@ -547,31 +669,50 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
547 * @chip_info: our ha struct. 669 * @chip_info: our ha struct.
548 * @irq_handler: irq_handler 670 * @irq_handler: irq_handler
549 */ 671 */
550static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha, 672static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
551 irq_handler_t irq_handler)
552{ 673{
553 u32 i = 0, j = 0; 674 u32 i = 0, j = 0;
554 u32 number_of_intr = 1; 675 u32 number_of_intr;
555 int flag = 0; 676 int flag = 0;
556 u32 max_entry; 677 u32 max_entry;
557 int rc; 678 int rc;
679 static char intr_drvname[PM8001_MAX_MSIX_VEC][sizeof(DRV_NAME)+3];
680
681 /* SPCv controllers supports 64 msi-x */
682 if (pm8001_ha->chip_id == chip_8001) {
683 number_of_intr = 1;
684 flag |= IRQF_DISABLED;
685 } else {
686 number_of_intr = PM8001_MAX_MSIX_VEC;
687 flag &= ~IRQF_SHARED;
688 flag |= IRQF_DISABLED;
689 }
690
558 max_entry = sizeof(pm8001_ha->msix_entries) / 691 max_entry = sizeof(pm8001_ha->msix_entries) /
559 sizeof(pm8001_ha->msix_entries[0]); 692 sizeof(pm8001_ha->msix_entries[0]);
560 flag |= IRQF_DISABLED;
561 for (i = 0; i < max_entry ; i++) 693 for (i = 0; i < max_entry ; i++)
562 pm8001_ha->msix_entries[i].entry = i; 694 pm8001_ha->msix_entries[i].entry = i;
563 rc = pci_enable_msix(pm8001_ha->pdev, pm8001_ha->msix_entries, 695 rc = pci_enable_msix(pm8001_ha->pdev, pm8001_ha->msix_entries,
564 number_of_intr); 696 number_of_intr);
565 pm8001_ha->number_of_intr = number_of_intr; 697 pm8001_ha->number_of_intr = number_of_intr;
566 if (!rc) { 698 if (!rc) {
699 PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
700 "pci_enable_msix request ret:%d no of intr %d\n",
701 rc, pm8001_ha->number_of_intr));
702
703 for (i = 0; i < number_of_intr; i++)
704 pm8001_ha->outq[i] = i;
705
567 for (i = 0; i < number_of_intr; i++) { 706 for (i = 0; i < number_of_intr; i++) {
707 snprintf(intr_drvname[i], sizeof(intr_drvname[0]),
708 DRV_NAME"%d", i);
568 if (request_irq(pm8001_ha->msix_entries[i].vector, 709 if (request_irq(pm8001_ha->msix_entries[i].vector,
569 irq_handler, flag, DRV_NAME, 710 pm8001_interrupt_handler_msix, flag,
570 SHOST_TO_SAS_HA(pm8001_ha->shost))) { 711 intr_drvname[i], &pm8001_ha->outq[i])) {
571 for (j = 0; j < i; j++) 712 for (j = 0; j < i; j++)
572 free_irq( 713 free_irq(
573 pm8001_ha->msix_entries[j].vector, 714 pm8001_ha->msix_entries[j].vector,
574 SHOST_TO_SAS_HA(pm8001_ha->shost)); 715 &pm8001_ha->outq[j]);
575 pci_disable_msix(pm8001_ha->pdev); 716 pci_disable_msix(pm8001_ha->pdev);
576 break; 717 break;
577 } 718 }
@@ -588,22 +729,24 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha,
588static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha) 729static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
589{ 730{
590 struct pci_dev *pdev; 731 struct pci_dev *pdev;
591 irq_handler_t irq_handler = pm8001_interrupt;
592 int rc; 732 int rc;
593 733
594 pdev = pm8001_ha->pdev; 734 pdev = pm8001_ha->pdev;
595 735
596#ifdef PM8001_USE_MSIX 736#ifdef PM8001_USE_MSIX
597 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) 737 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
598 return pm8001_setup_msix(pm8001_ha, irq_handler); 738 return pm8001_setup_msix(pm8001_ha);
599 else 739 else {
740 PM8001_INIT_DBG(pm8001_ha,
741 pm8001_printk("MSIX not supported!!!\n"));
600 goto intx; 742 goto intx;
743 }
601#endif 744#endif
602 745
603intx: 746intx:
604 /* initialize the INT-X interrupt */ 747 /* initialize the INT-X interrupt */
605 rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, 748 rc = request_irq(pdev->irq, pm8001_interrupt_handler_intx, IRQF_SHARED,
606 SHOST_TO_SAS_HA(pm8001_ha->shost)); 749 DRV_NAME, SHOST_TO_SAS_HA(pm8001_ha->shost));
607 return rc; 750 return rc;
608} 751}
609 752
@@ -621,12 +764,13 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
621{ 764{
622 unsigned int rc; 765 unsigned int rc;
623 u32 pci_reg; 766 u32 pci_reg;
767 u8 i = 0;
624 struct pm8001_hba_info *pm8001_ha; 768 struct pm8001_hba_info *pm8001_ha;
625 struct Scsi_Host *shost = NULL; 769 struct Scsi_Host *shost = NULL;
626 const struct pm8001_chip_info *chip; 770 const struct pm8001_chip_info *chip;
627 771
628 dev_printk(KERN_INFO, &pdev->dev, 772 dev_printk(KERN_INFO, &pdev->dev,
629 "pm8001: driver version %s\n", DRV_VERSION); 773 "pm80xx: driver version %s\n", DRV_VERSION);
630 rc = pci_enable_device(pdev); 774 rc = pci_enable_device(pdev);
631 if (rc) 775 if (rc)
632 goto err_out_enable; 776 goto err_out_enable;
@@ -665,25 +809,39 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
665 goto err_out_free; 809 goto err_out_free;
666 } 810 }
667 pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost)); 811 pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost));
668 pm8001_ha = pm8001_pci_alloc(pdev, chip_8001, shost); 812 /* ent->driver variable is used to differentiate between controllers */
813 pm8001_ha = pm8001_pci_alloc(pdev, ent, shost);
669 if (!pm8001_ha) { 814 if (!pm8001_ha) {
670 rc = -ENOMEM; 815 rc = -ENOMEM;
671 goto err_out_free; 816 goto err_out_free;
672 } 817 }
673 list_add_tail(&pm8001_ha->list, &hba_list); 818 list_add_tail(&pm8001_ha->list, &hba_list);
674 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); 819 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
675 rc = PM8001_CHIP_DISP->chip_init(pm8001_ha); 820 rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
676 if (rc) 821 if (rc) {
822 PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
823 "chip_init failed [ret: %d]\n", rc));
677 goto err_out_ha_free; 824 goto err_out_ha_free;
825 }
678 826
679 rc = scsi_add_host(shost, &pdev->dev); 827 rc = scsi_add_host(shost, &pdev->dev);
680 if (rc) 828 if (rc)
681 goto err_out_ha_free; 829 goto err_out_ha_free;
682 rc = pm8001_request_irq(pm8001_ha); 830 rc = pm8001_request_irq(pm8001_ha);
683 if (rc) 831 if (rc) {
832 PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
833 "pm8001_request_irq failed [ret: %d]\n", rc));
684 goto err_out_shost; 834 goto err_out_shost;
835 }
836
837 PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0);
838 if (pm8001_ha->chip_id != chip_8001) {
839 for (i = 1; i < pm8001_ha->number_of_intr; i++)
840 PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i);
841 /* setup thermal configuration. */
842 pm80xx_set_thermal_config(pm8001_ha);
843 }
685 844
686 PM8001_CHIP_DISP->interrupt_enable(pm8001_ha);
687 pm8001_init_sas_add(pm8001_ha); 845 pm8001_init_sas_add(pm8001_ha);
688 pm8001_post_sas_ha_init(shost, chip); 846 pm8001_post_sas_ha_init(shost, chip);
689 rc = sas_register_ha(SHOST_TO_SAS_HA(shost)); 847 rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
@@ -719,14 +877,15 @@ static void pm8001_pci_remove(struct pci_dev *pdev)
719 sas_remove_host(pm8001_ha->shost); 877 sas_remove_host(pm8001_ha->shost);
720 list_del(&pm8001_ha->list); 878 list_del(&pm8001_ha->list);
721 scsi_remove_host(pm8001_ha->shost); 879 scsi_remove_host(pm8001_ha->shost);
722 PM8001_CHIP_DISP->interrupt_disable(pm8001_ha); 880 PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
723 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); 881 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
724 882
725#ifdef PM8001_USE_MSIX 883#ifdef PM8001_USE_MSIX
726 for (i = 0; i < pm8001_ha->number_of_intr; i++) 884 for (i = 0; i < pm8001_ha->number_of_intr; i++)
727 synchronize_irq(pm8001_ha->msix_entries[i].vector); 885 synchronize_irq(pm8001_ha->msix_entries[i].vector);
728 for (i = 0; i < pm8001_ha->number_of_intr; i++) 886 for (i = 0; i < pm8001_ha->number_of_intr; i++)
729 free_irq(pm8001_ha->msix_entries[i].vector, sha); 887 free_irq(pm8001_ha->msix_entries[i].vector,
888 &pm8001_ha->outq[i]);
730 pci_disable_msix(pdev); 889 pci_disable_msix(pdev);
731#else 890#else
732 free_irq(pm8001_ha->irq, sha); 891 free_irq(pm8001_ha->irq, sha);
@@ -763,13 +922,14 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
763 printk(KERN_ERR " PCI PM not supported\n"); 922 printk(KERN_ERR " PCI PM not supported\n");
764 return -ENODEV; 923 return -ENODEV;
765 } 924 }
766 PM8001_CHIP_DISP->interrupt_disable(pm8001_ha); 925 PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
767 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); 926 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
768#ifdef PM8001_USE_MSIX 927#ifdef PM8001_USE_MSIX
769 for (i = 0; i < pm8001_ha->number_of_intr; i++) 928 for (i = 0; i < pm8001_ha->number_of_intr; i++)
770 synchronize_irq(pm8001_ha->msix_entries[i].vector); 929 synchronize_irq(pm8001_ha->msix_entries[i].vector);
771 for (i = 0; i < pm8001_ha->number_of_intr; i++) 930 for (i = 0; i < pm8001_ha->number_of_intr; i++)
772 free_irq(pm8001_ha->msix_entries[i].vector, sha); 931 free_irq(pm8001_ha->msix_entries[i].vector,
932 &pm8001_ha->outq[i]);
773 pci_disable_msix(pdev); 933 pci_disable_msix(pdev);
774#else 934#else
775 free_irq(pm8001_ha->irq, sha); 935 free_irq(pm8001_ha->irq, sha);
@@ -798,6 +958,7 @@ static int pm8001_pci_resume(struct pci_dev *pdev)
798 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 958 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
799 struct pm8001_hba_info *pm8001_ha; 959 struct pm8001_hba_info *pm8001_ha;
800 int rc; 960 int rc;
961 u8 i = 0;
801 u32 device_state; 962 u32 device_state;
802 pm8001_ha = sha->lldd_ha; 963 pm8001_ha = sha->lldd_ha;
803 device_state = pdev->current_state; 964 device_state = pdev->current_state;
@@ -820,19 +981,33 @@ static int pm8001_pci_resume(struct pci_dev *pdev)
820 if (rc) 981 if (rc)
821 goto err_out_disable; 982 goto err_out_disable;
822 983
823 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); 984 /* chip soft rst only for spc */
985 if (pm8001_ha->chip_id == chip_8001) {
986 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
987 PM8001_INIT_DBG(pm8001_ha,
988 pm8001_printk("chip soft reset successful\n"));
989 }
824 rc = PM8001_CHIP_DISP->chip_init(pm8001_ha); 990 rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
825 if (rc) 991 if (rc)
826 goto err_out_disable; 992 goto err_out_disable;
827 PM8001_CHIP_DISP->interrupt_disable(pm8001_ha); 993
994 /* disable all the interrupt bits */
995 PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
996
828 rc = pm8001_request_irq(pm8001_ha); 997 rc = pm8001_request_irq(pm8001_ha);
829 if (rc) 998 if (rc)
830 goto err_out_disable; 999 goto err_out_disable;
831 #ifdef PM8001_USE_TASKLET 1000#ifdef PM8001_USE_TASKLET
1001 /* default tasklet for non msi-x interrupt handler/first msi-x
1002 * interrupt handler */
832 tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet, 1003 tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet,
833 (unsigned long)pm8001_ha); 1004 (unsigned long)pm8001_ha);
834 #endif 1005#endif
835 PM8001_CHIP_DISP->interrupt_enable(pm8001_ha); 1006 PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0);
1007 if (pm8001_ha->chip_id != chip_8001) {
1008 for (i = 1; i < pm8001_ha->number_of_intr; i++)
1009 PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i);
1010 }
836 scsi_unblock_requests(pm8001_ha->shost); 1011 scsi_unblock_requests(pm8001_ha->shost);
837 return 0; 1012 return 0;
838 1013
@@ -843,14 +1018,45 @@ err_out_enable:
843 return rc; 1018 return rc;
844} 1019}
845 1020
1021/* update of pci device, vendor id and driver data with
1022 * unique value for each of the controller
1023 */
846static struct pci_device_id pm8001_pci_table[] = { 1024static struct pci_device_id pm8001_pci_table[] = {
847 { 1025 { PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 },
848 PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001
849 },
850 { 1026 {
851 PCI_DEVICE(0x117c, 0x0042), 1027 PCI_DEVICE(0x117c, 0x0042),
852 .driver_data = chip_8001 1028 .driver_data = chip_8001
853 }, 1029 },
1030 /* Support for SPC/SPCv/SPCve controllers */
1031 { PCI_VDEVICE(ADAPTEC2, 0x8001), chip_8001 },
1032 { PCI_VDEVICE(PMC_Sierra, 0x8008), chip_8008 },
1033 { PCI_VDEVICE(ADAPTEC2, 0x8008), chip_8008 },
1034 { PCI_VDEVICE(PMC_Sierra, 0x8018), chip_8018 },
1035 { PCI_VDEVICE(ADAPTEC2, 0x8018), chip_8018 },
1036 { PCI_VDEVICE(PMC_Sierra, 0x8009), chip_8009 },
1037 { PCI_VDEVICE(ADAPTEC2, 0x8009), chip_8009 },
1038 { PCI_VDEVICE(PMC_Sierra, 0x8019), chip_8019 },
1039 { PCI_VDEVICE(ADAPTEC2, 0x8019), chip_8019 },
1040 { PCI_VENDOR_ID_ADAPTEC2, 0x8081,
1041 PCI_VENDOR_ID_ADAPTEC2, 0x0400, 0, 0, chip_8001 },
1042 { PCI_VENDOR_ID_ADAPTEC2, 0x8081,
1043 PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8001 },
1044 { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
1045 PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8008 },
1046 { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
1047 PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8008 },
1048 { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
1049 PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8009 },
1050 { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
1051 PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8009 },
1052 { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
1053 PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8018 },
1054 { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
1055 PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8018 },
1056 { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
1057 PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8019 },
1058 { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
1059 PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8019 },
854 {} /* terminate list */ 1060 {} /* terminate list */
855}; 1061};
856 1062
@@ -870,7 +1076,7 @@ static int __init pm8001_init(void)
870{ 1076{
871 int rc = -ENOMEM; 1077 int rc = -ENOMEM;
872 1078
873 pm8001_wq = alloc_workqueue("pm8001", 0, 0); 1079 pm8001_wq = alloc_workqueue("pm80xx", 0, 0);
874 if (!pm8001_wq) 1080 if (!pm8001_wq)
875 goto err; 1081 goto err;
876 1082
@@ -902,7 +1108,8 @@ module_init(pm8001_init);
902module_exit(pm8001_exit); 1108module_exit(pm8001_exit);
903 1109
904MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>"); 1110MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>");
905MODULE_DESCRIPTION("PMC-Sierra PM8001 SAS/SATA controller driver"); 1111MODULE_DESCRIPTION(
1112 "PMC-Sierra PM8001/8081/8088/8089 SAS/SATA controller driver");
906MODULE_VERSION(DRV_VERSION); 1113MODULE_VERSION(DRV_VERSION);
907MODULE_LICENSE("GPL"); 1114MODULE_LICENSE("GPL");
908MODULE_DEVICE_TABLE(pci, pm8001_pci_table); 1115MODULE_DEVICE_TABLE(pci, pm8001_pci_table);
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index b961112395d5..a85d73de7c80 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver 2 * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
3 * 3 *
4 * Copyright (c) 2008-2009 USI Co., Ltd. 4 * Copyright (c) 2008-2009 USI Co., Ltd.
5 * All rights reserved. 5 * All rights reserved.
@@ -68,7 +68,7 @@ static void pm8001_tag_clear(struct pm8001_hba_info *pm8001_ha, u32 tag)
68 clear_bit(tag, bitmap); 68 clear_bit(tag, bitmap);
69} 69}
70 70
71static void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag) 71void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
72{ 72{
73 pm8001_tag_clear(pm8001_ha, tag); 73 pm8001_tag_clear(pm8001_ha, tag);
74} 74}
@@ -212,10 +212,12 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
212 break; 212 break;
213 case PHY_FUNC_GET_EVENTS: 213 case PHY_FUNC_GET_EVENTS:
214 spin_lock_irqsave(&pm8001_ha->lock, flags); 214 spin_lock_irqsave(&pm8001_ha->lock, flags);
215 if (-1 == pm8001_bar4_shift(pm8001_ha, 215 if (pm8001_ha->chip_id == chip_8001) {
216 if (-1 == pm8001_bar4_shift(pm8001_ha,
216 (phy_id < 4) ? 0x30000 : 0x40000)) { 217 (phy_id < 4) ? 0x30000 : 0x40000)) {
217 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 218 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
218 return -EINVAL; 219 return -EINVAL;
220 }
219 } 221 }
220 { 222 {
221 struct sas_phy *phy = sas_phy->phy; 223 struct sas_phy *phy = sas_phy->phy;
@@ -228,7 +230,8 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
228 phy->loss_of_dword_sync_count = qp[3]; 230 phy->loss_of_dword_sync_count = qp[3];
229 phy->phy_reset_problem_count = qp[4]; 231 phy->phy_reset_problem_count = qp[4];
230 } 232 }
231 pm8001_bar4_shift(pm8001_ha, 0); 233 if (pm8001_ha->chip_id == chip_8001)
234 pm8001_bar4_shift(pm8001_ha, 0);
232 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 235 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
233 return 0; 236 return 0;
234 default: 237 default:
@@ -249,7 +252,9 @@ void pm8001_scan_start(struct Scsi_Host *shost)
249 struct pm8001_hba_info *pm8001_ha; 252 struct pm8001_hba_info *pm8001_ha;
250 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 253 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
251 pm8001_ha = sha->lldd_ha; 254 pm8001_ha = sha->lldd_ha;
252 PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha); 255 /* SAS_RE_INITIALIZATION not available in SPCv/ve */
256 if (pm8001_ha->chip_id == chip_8001)
257 PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
253 for (i = 0; i < pm8001_ha->chip->n_phy; ++i) 258 for (i = 0; i < pm8001_ha->chip->n_phy; ++i)
254 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i); 259 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
255} 260}
@@ -352,7 +357,7 @@ static int sas_find_local_port_id(struct domain_device *dev)
352 * @tmf: the task management IU 357 * @tmf: the task management IU
353 */ 358 */
354#define DEV_IS_GONE(pm8001_dev) \ 359#define DEV_IS_GONE(pm8001_dev) \
355 ((!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE))) 360 ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
356static int pm8001_task_exec(struct sas_task *task, const int num, 361static int pm8001_task_exec(struct sas_task *task, const int num,
357 gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf) 362 gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf)
358{ 363{
@@ -370,7 +375,7 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
370 struct task_status_struct *tsm = &t->task_status; 375 struct task_status_struct *tsm = &t->task_status;
371 tsm->resp = SAS_TASK_UNDELIVERED; 376 tsm->resp = SAS_TASK_UNDELIVERED;
372 tsm->stat = SAS_PHY_DOWN; 377 tsm->stat = SAS_PHY_DOWN;
373 if (dev->dev_type != SATA_DEV) 378 if (dev->dev_type != SAS_SATA_DEV)
374 t->task_done(t); 379 t->task_done(t);
375 return 0; 380 return 0;
376 } 381 }
@@ -548,7 +553,7 @@ struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
548{ 553{
549 u32 dev; 554 u32 dev;
550 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) { 555 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
551 if (pm8001_ha->devices[dev].dev_type == NO_DEVICE) { 556 if (pm8001_ha->devices[dev].dev_type == SAS_PHY_UNUSED) {
552 pm8001_ha->devices[dev].id = dev; 557 pm8001_ha->devices[dev].id = dev;
553 return &pm8001_ha->devices[dev]; 558 return &pm8001_ha->devices[dev];
554 } 559 }
@@ -560,13 +565,31 @@ struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
560 } 565 }
561 return NULL; 566 return NULL;
562} 567}
568/**
569 * pm8001_find_dev - find a matching pm8001_device
570 * @pm8001_ha: our hba card information
571 */
572struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
573 u32 device_id)
574{
575 u32 dev;
576 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
577 if (pm8001_ha->devices[dev].device_id == device_id)
578 return &pm8001_ha->devices[dev];
579 }
580 if (dev == PM8001_MAX_DEVICES) {
581 PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("NO MATCHING "
582 "DEVICE FOUND !!!\n"));
583 }
584 return NULL;
585}
563 586
564static void pm8001_free_dev(struct pm8001_device *pm8001_dev) 587static void pm8001_free_dev(struct pm8001_device *pm8001_dev)
565{ 588{
566 u32 id = pm8001_dev->id; 589 u32 id = pm8001_dev->id;
567 memset(pm8001_dev, 0, sizeof(*pm8001_dev)); 590 memset(pm8001_dev, 0, sizeof(*pm8001_dev));
568 pm8001_dev->id = id; 591 pm8001_dev->id = id;
569 pm8001_dev->dev_type = NO_DEVICE; 592 pm8001_dev->dev_type = SAS_PHY_UNUSED;
570 pm8001_dev->device_id = PM8001_MAX_DEVICES; 593 pm8001_dev->device_id = PM8001_MAX_DEVICES;
571 pm8001_dev->sas_device = NULL; 594 pm8001_dev->sas_device = NULL;
572} 595}
@@ -624,7 +647,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
624 res = -1; 647 res = -1;
625 } 648 }
626 } else { 649 } else {
627 if (dev->dev_type == SATA_DEV) { 650 if (dev->dev_type == SAS_SATA_DEV) {
628 pm8001_device->attached_phy = 651 pm8001_device->attached_phy =
629 dev->rphy->identify.phy_identifier; 652 dev->rphy->identify.phy_identifier;
630 flag = 1; /* directly sata*/ 653 flag = 1; /* directly sata*/
@@ -634,7 +657,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
634 PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag); 657 PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag);
635 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 658 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
636 wait_for_completion(&completion); 659 wait_for_completion(&completion);
637 if (dev->dev_type == SAS_END_DEV) 660 if (dev->dev_type == SAS_END_DEVICE)
638 msleep(50); 661 msleep(50);
639 pm8001_ha->flags = PM8001F_RUN_TIME; 662 pm8001_ha->flags = PM8001F_RUN_TIME;
640 return 0; 663 return 0;
@@ -648,7 +671,7 @@ int pm8001_dev_found(struct domain_device *dev)
648 return pm8001_dev_found_notify(dev); 671 return pm8001_dev_found_notify(dev);
649} 672}
650 673
651static void pm8001_task_done(struct sas_task *task) 674void pm8001_task_done(struct sas_task *task)
652{ 675{
653 if (!del_timer(&task->slow_task->timer)) 676 if (!del_timer(&task->slow_task->timer))
654 return; 677 return;
@@ -904,7 +927,7 @@ void pm8001_open_reject_retry(
904 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i]; 927 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i];
905 928
906 pm8001_dev = ccb->device; 929 pm8001_dev = ccb->device;
907 if (!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE)) 930 if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))
908 continue; 931 continue;
909 if (!device_to_close) { 932 if (!device_to_close) {
910 uintptr_t d = (uintptr_t)pm8001_dev 933 uintptr_t d = (uintptr_t)pm8001_dev
@@ -995,6 +1018,72 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
995 return rc; 1018 return rc;
996} 1019}
997 1020
1021/*
1022* This function handle the IT_NEXUS_XXX event or completion
1023* status code for SSP/SATA/SMP I/O request.
1024*/
1025int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
1026{
1027 int rc = TMF_RESP_FUNC_FAILED;
1028 struct pm8001_device *pm8001_dev;
1029 struct pm8001_hba_info *pm8001_ha;
1030 struct sas_phy *phy;
1031 u32 device_id = 0;
1032
1033 if (!dev || !dev->lldd_dev)
1034 return -1;
1035
1036 pm8001_dev = dev->lldd_dev;
1037 device_id = pm8001_dev->device_id;
1038 pm8001_ha = pm8001_find_ha_by_dev(dev);
1039
1040 PM8001_EH_DBG(pm8001_ha,
1041 pm8001_printk("I_T_Nexus handler invoked !!"));
1042
1043 phy = sas_get_local_phy(dev);
1044
1045 if (dev_is_sata(dev)) {
1046 DECLARE_COMPLETION_ONSTACK(completion_setstate);
1047 if (scsi_is_sas_phy_local(phy)) {
1048 rc = 0;
1049 goto out;
1050 }
1051 /* send internal ssp/sata/smp abort command to FW */
1052 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
1053 dev, 1, 0);
1054 msleep(100);
1055
1056 /* deregister the target device */
1057 pm8001_dev_gone_notify(dev);
1058 msleep(200);
1059
1060 /*send phy reset to hard reset target */
1061 rc = sas_phy_reset(phy, 1);
1062 msleep(2000);
1063 pm8001_dev->setds_completion = &completion_setstate;
1064
1065 wait_for_completion(&completion_setstate);
1066 } else {
1067 /* send internal ssp/sata/smp abort command to FW */
1068 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
1069 dev, 1, 0);
1070 msleep(100);
1071
1072 /* deregister the target device */
1073 pm8001_dev_gone_notify(dev);
1074 msleep(200);
1075
1076 /*send phy reset to hard reset target */
1077 rc = sas_phy_reset(phy, 1);
1078 msleep(2000);
1079 }
1080 PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n",
1081 pm8001_dev->device_id, rc));
1082out:
1083 sas_put_local_phy(phy);
1084
1085 return rc;
1086}
998/* mandatory SAM-3, the task reset the specified LUN*/ 1087/* mandatory SAM-3, the task reset the specified LUN*/
999int pm8001_lu_reset(struct domain_device *dev, u8 *lun) 1088int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
1000{ 1089{
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 11008205aeb3..570819464d90 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver 2 * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
3 * 3 *
4 * Copyright (c) 2008-2009 USI Co., Ltd. 4 * Copyright (c) 2008-2009 USI Co., Ltd.
5 * All rights reserved. 5 * All rights reserved.
@@ -57,8 +57,8 @@
57#include <linux/atomic.h> 57#include <linux/atomic.h>
58#include "pm8001_defs.h" 58#include "pm8001_defs.h"
59 59
60#define DRV_NAME "pm8001" 60#define DRV_NAME "pm80xx"
61#define DRV_VERSION "0.1.36" 61#define DRV_VERSION "0.1.37"
62#define PM8001_FAIL_LOGGING 0x01 /* Error message logging */ 62#define PM8001_FAIL_LOGGING 0x01 /* Error message logging */
63#define PM8001_INIT_LOGGING 0x02 /* driver init logging */ 63#define PM8001_INIT_LOGGING 0x02 /* driver init logging */
64#define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */ 64#define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */
@@ -66,8 +66,8 @@
66#define PM8001_EH_LOGGING 0x10 /* libsas EH function logging*/ 66#define PM8001_EH_LOGGING 0x10 /* libsas EH function logging*/
67#define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */ 67#define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */
68#define PM8001_MSG_LOGGING 0x40 /* misc message logging */ 68#define PM8001_MSG_LOGGING 0x40 /* misc message logging */
69#define pm8001_printk(format, arg...) printk(KERN_INFO "%s %d:" format,\ 69#define pm8001_printk(format, arg...) printk(KERN_INFO "pm80xx %s %d:" \
70 __func__, __LINE__, ## arg) 70 format, __func__, __LINE__, ## arg)
71#define PM8001_CHECK_LOGGING(HBA, LEVEL, CMD) \ 71#define PM8001_CHECK_LOGGING(HBA, LEVEL, CMD) \
72do { \ 72do { \
73 if (unlikely(HBA->logging_level & LEVEL)) \ 73 if (unlikely(HBA->logging_level & LEVEL)) \
@@ -103,11 +103,12 @@ do { \
103#define PM8001_READ_VPD 103#define PM8001_READ_VPD
104 104
105 105
106#define DEV_IS_EXPANDER(type) ((type == EDGE_DEV) || (type == FANOUT_DEV)) 106#define DEV_IS_EXPANDER(type) ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE))
107 107
108#define PM8001_NAME_LENGTH 32/* generic length of strings */ 108#define PM8001_NAME_LENGTH 32/* generic length of strings */
109extern struct list_head hba_list; 109extern struct list_head hba_list;
110extern const struct pm8001_dispatch pm8001_8001_dispatch; 110extern const struct pm8001_dispatch pm8001_8001_dispatch;
111extern const struct pm8001_dispatch pm8001_80xx_dispatch;
111 112
112struct pm8001_hba_info; 113struct pm8001_hba_info;
113struct pm8001_ccb_info; 114struct pm8001_ccb_info;
@@ -131,15 +132,15 @@ struct pm8001_ioctl_payload {
131struct pm8001_dispatch { 132struct pm8001_dispatch {
132 char *name; 133 char *name;
133 int (*chip_init)(struct pm8001_hba_info *pm8001_ha); 134 int (*chip_init)(struct pm8001_hba_info *pm8001_ha);
134 int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha, u32 signature); 135 int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha);
135 void (*chip_rst)(struct pm8001_hba_info *pm8001_ha); 136 void (*chip_rst)(struct pm8001_hba_info *pm8001_ha);
136 int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha); 137 int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha);
137 void (*chip_iounmap)(struct pm8001_hba_info *pm8001_ha); 138 void (*chip_iounmap)(struct pm8001_hba_info *pm8001_ha);
138 irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha); 139 irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha, u8 vec);
139 u32 (*is_our_interupt)(struct pm8001_hba_info *pm8001_ha); 140 u32 (*is_our_interupt)(struct pm8001_hba_info *pm8001_ha);
140 int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha); 141 int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha, u8 vec);
141 void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha); 142 void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha, u8 vec);
142 void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha); 143 void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha, u8 vec);
143 void (*make_prd)(struct scatterlist *scatter, int nr, void *prd); 144 void (*make_prd)(struct scatterlist *scatter, int nr, void *prd);
144 int (*smp_req)(struct pm8001_hba_info *pm8001_ha, 145 int (*smp_req)(struct pm8001_hba_info *pm8001_ha,
145 struct pm8001_ccb_info *ccb); 146 struct pm8001_ccb_info *ccb);
@@ -173,6 +174,7 @@ struct pm8001_dispatch {
173}; 174};
174 175
175struct pm8001_chip_info { 176struct pm8001_chip_info {
177 u32 encrypt;
176 u32 n_phy; 178 u32 n_phy;
177 const struct pm8001_dispatch *dispatch; 179 const struct pm8001_dispatch *dispatch;
178}; 180};
@@ -204,7 +206,7 @@ struct pm8001_phy {
204}; 206};
205 207
206struct pm8001_device { 208struct pm8001_device {
207 enum sas_dev_type dev_type; 209 enum sas_device_type dev_type;
208 struct domain_device *sas_device; 210 struct domain_device *sas_device;
209 u32 attached_phy; 211 u32 attached_phy;
210 u32 id; 212 u32 id;
@@ -256,7 +258,20 @@ struct mpi_mem_req {
256 struct mpi_mem region[USI_MAX_MEMCNT]; 258 struct mpi_mem region[USI_MAX_MEMCNT];
257}; 259};
258 260
259struct main_cfg_table { 261struct encrypt {
262 u32 cipher_mode;
263 u32 sec_mode;
264 u32 status;
265 u32 flag;
266};
267
268struct sas_phy_attribute_table {
269 u32 phystart1_16[16];
270 u32 outbound_hw_event_pid1_16[16];
271};
272
273union main_cfg_table {
274 struct {
260 u32 signature; 275 u32 signature;
261 u32 interface_rev; 276 u32 interface_rev;
262 u32 firmware_rev; 277 u32 firmware_rev;
@@ -292,19 +307,69 @@ struct main_cfg_table {
292 u32 fatal_err_dump_length1; 307 u32 fatal_err_dump_length1;
293 u32 hda_mode_flag; 308 u32 hda_mode_flag;
294 u32 anolog_setup_table_offset; 309 u32 anolog_setup_table_offset;
310 u32 rsvd[4];
311 } pm8001_tbl;
312
313 struct {
314 u32 signature;
315 u32 interface_rev;
316 u32 firmware_rev;
317 u32 max_out_io;
318 u32 max_sgl;
319 u32 ctrl_cap_flag;
320 u32 gst_offset;
321 u32 inbound_queue_offset;
322 u32 outbound_queue_offset;
323 u32 inbound_q_nppd_hppd;
324 u32 rsvd[8];
325 u32 crc_core_dump;
326 u32 rsvd1;
327 u32 upper_event_log_addr;
328 u32 lower_event_log_addr;
329 u32 event_log_size;
330 u32 event_log_severity;
331 u32 upper_pcs_event_log_addr;
332 u32 lower_pcs_event_log_addr;
333 u32 pcs_event_log_size;
334 u32 pcs_event_log_severity;
335 u32 fatal_err_interrupt;
336 u32 fatal_err_dump_offset0;
337 u32 fatal_err_dump_length0;
338 u32 fatal_err_dump_offset1;
339 u32 fatal_err_dump_length1;
340 u32 gpio_led_mapping;
341 u32 analog_setup_table_offset;
342 u32 int_vec_table_offset;
343 u32 phy_attr_table_offset;
344 u32 port_recovery_timer;
345 u32 interrupt_reassertion_delay;
346 } pm80xx_tbl;
295}; 347};
296struct general_status_table { 348
349union general_status_table {
350 struct {
297 u32 gst_len_mpistate; 351 u32 gst_len_mpistate;
298 u32 iq_freeze_state0; 352 u32 iq_freeze_state0;
299 u32 iq_freeze_state1; 353 u32 iq_freeze_state1;
300 u32 msgu_tcnt; 354 u32 msgu_tcnt;
301 u32 iop_tcnt; 355 u32 iop_tcnt;
302 u32 reserved; 356 u32 rsvd;
303 u32 phy_state[8]; 357 u32 phy_state[8];
304 u32 reserved1; 358 u32 gpio_input_val;
305 u32 reserved2; 359 u32 rsvd1[2];
306 u32 reserved3; 360 u32 recover_err_info[8];
361 } pm8001_tbl;
362 struct {
363 u32 gst_len_mpistate;
364 u32 iq_freeze_state0;
365 u32 iq_freeze_state1;
366 u32 msgu_tcnt;
367 u32 iop_tcnt;
368 u32 rsvd[9];
369 u32 gpio_input_val;
370 u32 rsvd1[2];
307 u32 recover_err_info[8]; 371 u32 recover_err_info[8];
372 } pm80xx_tbl;
308}; 373};
309struct inbound_queue_table { 374struct inbound_queue_table {
310 u32 element_pri_size_cnt; 375 u32 element_pri_size_cnt;
@@ -351,15 +416,21 @@ struct pm8001_hba_info {
351 struct device *dev; 416 struct device *dev;
352 struct pm8001_hba_memspace io_mem[6]; 417 struct pm8001_hba_memspace io_mem[6];
353 struct mpi_mem_req memoryMap; 418 struct mpi_mem_req memoryMap;
419 struct encrypt encrypt_info; /* support encryption */
354 void __iomem *msg_unit_tbl_addr;/*Message Unit Table Addr*/ 420 void __iomem *msg_unit_tbl_addr;/*Message Unit Table Addr*/
355 void __iomem *main_cfg_tbl_addr;/*Main Config Table Addr*/ 421 void __iomem *main_cfg_tbl_addr;/*Main Config Table Addr*/
356 void __iomem *general_stat_tbl_addr;/*General Status Table Addr*/ 422 void __iomem *general_stat_tbl_addr;/*General Status Table Addr*/
357 void __iomem *inbnd_q_tbl_addr;/*Inbound Queue Config Table Addr*/ 423 void __iomem *inbnd_q_tbl_addr;/*Inbound Queue Config Table Addr*/
358 void __iomem *outbnd_q_tbl_addr;/*Outbound Queue Config Table Addr*/ 424 void __iomem *outbnd_q_tbl_addr;/*Outbound Queue Config Table Addr*/
359 struct main_cfg_table main_cfg_tbl; 425 void __iomem *pspa_q_tbl_addr;
360 struct general_status_table gs_tbl; 426 /*MPI SAS PHY attributes Queue Config Table Addr*/
361 struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_INB_NUM]; 427 void __iomem *ivt_tbl_addr; /*MPI IVT Table Addr */
362 struct outbound_queue_table outbnd_q_tbl[PM8001_MAX_OUTB_NUM]; 428 union main_cfg_table main_cfg_tbl;
429 union general_status_table gs_tbl;
430 struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_SPCV_INB_NUM];
431 struct outbound_queue_table outbnd_q_tbl[PM8001_MAX_SPCV_OUTB_NUM];
432 struct sas_phy_attribute_table phy_attr_table;
433 /* MPI SAS PHY attributes */
363 u8 sas_addr[SAS_ADDR_SIZE]; 434 u8 sas_addr[SAS_ADDR_SIZE];
364 struct sas_ha_struct *sas;/* SCSI/SAS glue */ 435 struct sas_ha_struct *sas;/* SCSI/SAS glue */
365 struct Scsi_Host *shost; 436 struct Scsi_Host *shost;
@@ -372,10 +443,12 @@ struct pm8001_hba_info {
372 struct pm8001_port port[PM8001_MAX_PHYS]; 443 struct pm8001_port port[PM8001_MAX_PHYS];
373 u32 id; 444 u32 id;
374 u32 irq; 445 u32 irq;
446 u32 iomb_size; /* SPC and SPCV IOMB size */
375 struct pm8001_device *devices; 447 struct pm8001_device *devices;
376 struct pm8001_ccb_info *ccb_info; 448 struct pm8001_ccb_info *ccb_info;
377#ifdef PM8001_USE_MSIX 449#ifdef PM8001_USE_MSIX
378 struct msix_entry msix_entries[16];/*for msi-x interrupt*/ 450 struct msix_entry msix_entries[PM8001_MAX_MSIX_VEC];
451 /*for msi-x interrupt*/
379 int number_of_intr;/*will be used in remove()*/ 452 int number_of_intr;/*will be used in remove()*/
380#endif 453#endif
381#ifdef PM8001_USE_TASKLET 454#ifdef PM8001_USE_TASKLET
@@ -383,7 +456,10 @@ struct pm8001_hba_info {
383#endif 456#endif
384 u32 logging_level; 457 u32 logging_level;
385 u32 fw_status; 458 u32 fw_status;
459 u32 smp_exp_mode;
460 u32 int_vector;
386 const struct firmware *fw_image; 461 const struct firmware *fw_image;
462 u8 outq[PM8001_MAX_MSIX_VEC];
387}; 463};
388 464
389struct pm8001_work { 465struct pm8001_work {
@@ -419,6 +495,9 @@ struct pm8001_fw_image_header {
419#define FLASH_UPDATE_DNLD_NOT_SUPPORTED 0x10 495#define FLASH_UPDATE_DNLD_NOT_SUPPORTED 0x10
420#define FLASH_UPDATE_DISABLED 0x11 496#define FLASH_UPDATE_DISABLED 0x11
421 497
498#define NCQ_READ_LOG_FLAG 0x80000000
499#define NCQ_ABORT_ALL_FLAG 0x40000000
500#define NCQ_2ND_RLE_FLAG 0x20000000
422/** 501/**
423 * brief param structure for firmware flash update. 502 * brief param structure for firmware flash update.
424 */ 503 */
@@ -484,6 +563,7 @@ int pm8001_dev_found(struct domain_device *dev);
484void pm8001_dev_gone(struct domain_device *dev); 563void pm8001_dev_gone(struct domain_device *dev);
485int pm8001_lu_reset(struct domain_device *dev, u8 *lun); 564int pm8001_lu_reset(struct domain_device *dev, u8 *lun);
486int pm8001_I_T_nexus_reset(struct domain_device *dev); 565int pm8001_I_T_nexus_reset(struct domain_device *dev);
566int pm8001_I_T_nexus_event_handler(struct domain_device *dev);
487int pm8001_query_task(struct sas_task *task); 567int pm8001_query_task(struct sas_task *task);
488void pm8001_open_reject_retry( 568void pm8001_open_reject_retry(
489 struct pm8001_hba_info *pm8001_ha, 569 struct pm8001_hba_info *pm8001_ha,
@@ -493,6 +573,61 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
493 dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo, 573 dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo,
494 u32 mem_size, u32 align); 574 u32 mem_size, u32 align);
495 575
576void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha);
577int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
578 struct inbound_queue_table *circularQ,
579 u32 opCode, void *payload, u32 responseQueue);
580int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
581 u16 messageSize, void **messagePtr);
582u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
583 struct outbound_queue_table *circularQ, u8 bc);
584u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
585 struct outbound_queue_table *circularQ,
586 void **messagePtr1, u8 *pBC);
587int pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
588 struct pm8001_device *pm8001_dev, u32 state);
589int pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
590 void *payload);
591int pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
592 void *fw_flash_updata_info, u32 tag);
593int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload);
594int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload);
595int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
596 struct pm8001_ccb_info *ccb,
597 struct pm8001_tmf_task *tmf);
598int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
599 struct pm8001_device *pm8001_dev,
600 u8 flag, u32 task_tag, u32 cmd_tag);
601int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, u32 device_id);
602void pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd);
603void pm8001_work_fn(struct work_struct *work);
604int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha,
605 void *data, int handler);
606void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha,
607 void *piomb);
608void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha,
609 void *piomb);
610void pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha,
611 void *piomb);
612int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha,
613 void *piomb);
614void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate);
615void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, u8 *sas_addr);
616void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i);
617int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
618int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
619int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
620 void *piomb);
621int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb);
622int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
623struct sas_task *pm8001_alloc_task(void);
624void pm8001_task_done(struct sas_task *task);
625void pm8001_free_task(struct sas_task *task);
626void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag);
627struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
628 u32 device_id);
629int pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha);
630
496int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue); 631int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue);
497 632
498/* ctl shared API */ 633/* ctl shared API */
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
new file mode 100644
index 000000000000..302514d8157b
--- /dev/null
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -0,0 +1,4130 @@
1/*
2 * PMC-Sierra SPCv/ve 8088/8089 SAS/SATA based host adapters driver
3 *
4 * Copyright (c) 2008-2009 PMC-Sierra, Inc.,
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon
16 * including a substantially similar Disclaimer requirement for further
17 * binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * Alternatively, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2 as published by the Free
24 * Software Foundation.
25 *
26 * NO WARRANTY
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGES.
38 *
39 */
40 #include <linux/slab.h>
41 #include "pm8001_sas.h"
42 #include "pm80xx_hwi.h"
43 #include "pm8001_chips.h"
44 #include "pm8001_ctl.h"
45
46#define SMP_DIRECT 1
47#define SMP_INDIRECT 2
48/**
49 * read_main_config_table - read the configure table and save it.
50 * @pm8001_ha: our hba card information
51 */
52static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
53{
54 void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
55
56 pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature =
57 pm8001_mr32(address, MAIN_SIGNATURE_OFFSET);
58 pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev =
59 pm8001_mr32(address, MAIN_INTERFACE_REVISION);
60 pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev =
61 pm8001_mr32(address, MAIN_FW_REVISION);
62 pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io =
63 pm8001_mr32(address, MAIN_MAX_OUTSTANDING_IO_OFFSET);
64 pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl =
65 pm8001_mr32(address, MAIN_MAX_SGL_OFFSET);
66 pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag =
67 pm8001_mr32(address, MAIN_CNTRL_CAP_OFFSET);
68 pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset =
69 pm8001_mr32(address, MAIN_GST_OFFSET);
70 pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset =
71 pm8001_mr32(address, MAIN_IBQ_OFFSET);
72 pm8001_ha->main_cfg_tbl.pm80xx_tbl.outbound_queue_offset =
73 pm8001_mr32(address, MAIN_OBQ_OFFSET);
74
75 /* read Error Dump Offset and Length */
76 pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset0 =
77 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET);
78 pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length0 =
79 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH);
80 pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset1 =
81 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET);
82 pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length1 =
83 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH);
84
85 /* read GPIO LED settings from the configuration table */
86 pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping =
87 pm8001_mr32(address, MAIN_GPIO_LED_FLAGS_OFFSET);
88
89 /* read analog Setting offset from the configuration table */
90 pm8001_ha->main_cfg_tbl.pm80xx_tbl.analog_setup_table_offset =
91 pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET);
92
93 pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset =
94 pm8001_mr32(address, MAIN_INT_VECTOR_TABLE_OFFSET);
95 pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset =
96 pm8001_mr32(address, MAIN_SAS_PHY_ATTR_TABLE_OFFSET);
97}
98
99/**
100 * read_general_status_table - read the general status table and save it.
101 * @pm8001_ha: our hba card information
102 */
103static void read_general_status_table(struct pm8001_hba_info *pm8001_ha)
104{
105 void __iomem *address = pm8001_ha->general_stat_tbl_addr;
106 pm8001_ha->gs_tbl.pm80xx_tbl.gst_len_mpistate =
107 pm8001_mr32(address, GST_GSTLEN_MPIS_OFFSET);
108 pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state0 =
109 pm8001_mr32(address, GST_IQ_FREEZE_STATE0_OFFSET);
110 pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state1 =
111 pm8001_mr32(address, GST_IQ_FREEZE_STATE1_OFFSET);
112 pm8001_ha->gs_tbl.pm80xx_tbl.msgu_tcnt =
113 pm8001_mr32(address, GST_MSGUTCNT_OFFSET);
114 pm8001_ha->gs_tbl.pm80xx_tbl.iop_tcnt =
115 pm8001_mr32(address, GST_IOPTCNT_OFFSET);
116 pm8001_ha->gs_tbl.pm80xx_tbl.gpio_input_val =
117 pm8001_mr32(address, GST_GPIO_INPUT_VAL);
118 pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[0] =
119 pm8001_mr32(address, GST_RERRINFO_OFFSET0);
120 pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[1] =
121 pm8001_mr32(address, GST_RERRINFO_OFFSET1);
122 pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[2] =
123 pm8001_mr32(address, GST_RERRINFO_OFFSET2);
124 pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[3] =
125 pm8001_mr32(address, GST_RERRINFO_OFFSET3);
126 pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[4] =
127 pm8001_mr32(address, GST_RERRINFO_OFFSET4);
128 pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[5] =
129 pm8001_mr32(address, GST_RERRINFO_OFFSET5);
130 pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[6] =
131 pm8001_mr32(address, GST_RERRINFO_OFFSET6);
132 pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[7] =
133 pm8001_mr32(address, GST_RERRINFO_OFFSET7);
134}
135/**
136 * read_phy_attr_table - read the phy attribute table and save it.
137 * @pm8001_ha: our hba card information
138 */
139static void read_phy_attr_table(struct pm8001_hba_info *pm8001_ha)
140{
141 void __iomem *address = pm8001_ha->pspa_q_tbl_addr;
142 pm8001_ha->phy_attr_table.phystart1_16[0] =
143 pm8001_mr32(address, PSPA_PHYSTATE0_OFFSET);
144 pm8001_ha->phy_attr_table.phystart1_16[1] =
145 pm8001_mr32(address, PSPA_PHYSTATE1_OFFSET);
146 pm8001_ha->phy_attr_table.phystart1_16[2] =
147 pm8001_mr32(address, PSPA_PHYSTATE2_OFFSET);
148 pm8001_ha->phy_attr_table.phystart1_16[3] =
149 pm8001_mr32(address, PSPA_PHYSTATE3_OFFSET);
150 pm8001_ha->phy_attr_table.phystart1_16[4] =
151 pm8001_mr32(address, PSPA_PHYSTATE4_OFFSET);
152 pm8001_ha->phy_attr_table.phystart1_16[5] =
153 pm8001_mr32(address, PSPA_PHYSTATE5_OFFSET);
154 pm8001_ha->phy_attr_table.phystart1_16[6] =
155 pm8001_mr32(address, PSPA_PHYSTATE6_OFFSET);
156 pm8001_ha->phy_attr_table.phystart1_16[7] =
157 pm8001_mr32(address, PSPA_PHYSTATE7_OFFSET);
158 pm8001_ha->phy_attr_table.phystart1_16[8] =
159 pm8001_mr32(address, PSPA_PHYSTATE8_OFFSET);
160 pm8001_ha->phy_attr_table.phystart1_16[9] =
161 pm8001_mr32(address, PSPA_PHYSTATE9_OFFSET);
162 pm8001_ha->phy_attr_table.phystart1_16[10] =
163 pm8001_mr32(address, PSPA_PHYSTATE10_OFFSET);
164 pm8001_ha->phy_attr_table.phystart1_16[11] =
165 pm8001_mr32(address, PSPA_PHYSTATE11_OFFSET);
166 pm8001_ha->phy_attr_table.phystart1_16[12] =
167 pm8001_mr32(address, PSPA_PHYSTATE12_OFFSET);
168 pm8001_ha->phy_attr_table.phystart1_16[13] =
169 pm8001_mr32(address, PSPA_PHYSTATE13_OFFSET);
170 pm8001_ha->phy_attr_table.phystart1_16[14] =
171 pm8001_mr32(address, PSPA_PHYSTATE14_OFFSET);
172 pm8001_ha->phy_attr_table.phystart1_16[15] =
173 pm8001_mr32(address, PSPA_PHYSTATE15_OFFSET);
174
175 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[0] =
176 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID0_OFFSET);
177 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[1] =
178 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID1_OFFSET);
179 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[2] =
180 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID2_OFFSET);
181 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[3] =
182 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID3_OFFSET);
183 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[4] =
184 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID4_OFFSET);
185 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[5] =
186 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID5_OFFSET);
187 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[6] =
188 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID6_OFFSET);
189 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[7] =
190 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID7_OFFSET);
191 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[8] =
192 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID8_OFFSET);
193 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[9] =
194 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID9_OFFSET);
195 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[10] =
196 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID10_OFFSET);
197 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[11] =
198 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID11_OFFSET);
199 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[12] =
200 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID12_OFFSET);
201 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[13] =
202 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID13_OFFSET);
203 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[14] =
204 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID14_OFFSET);
205 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[15] =
206 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID15_OFFSET);
207
208}
209
210/**
211 * read_inbnd_queue_table - read the inbound queue table and save it.
212 * @pm8001_ha: our hba card information
213 */
214static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
215{
216 int i;
217 void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
218 for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
219 u32 offset = i * 0x20;
220 pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
221 get_pci_bar_index(pm8001_mr32(address,
222 (offset + IB_PIPCI_BAR)));
223 pm8001_ha->inbnd_q_tbl[i].pi_offset =
224 pm8001_mr32(address, (offset + IB_PIPCI_BAR_OFFSET));
225 }
226}
227
228/**
229 * read_outbnd_queue_table - read the outbound queue table and save it.
230 * @pm8001_ha: our hba card information
231 */
232static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
233{
234 int i;
235 void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
236 for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
237 u32 offset = i * 0x24;
238 pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
239 get_pci_bar_index(pm8001_mr32(address,
240 (offset + OB_CIPCI_BAR)));
241 pm8001_ha->outbnd_q_tbl[i].ci_offset =
242 pm8001_mr32(address, (offset + OB_CIPCI_BAR_OFFSET));
243 }
244}
245
246/**
247 * init_default_table_values - init the default table.
248 * @pm8001_ha: our hba card information
249 */
250static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
251{
252 int i;
253 u32 offsetib, offsetob;
254 void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr;
255 void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr;
256
257 pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr =
258 pm8001_ha->memoryMap.region[AAP1].phys_addr_hi;
259 pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr =
260 pm8001_ha->memoryMap.region[AAP1].phys_addr_lo;
261 pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size =
262 PM8001_EVENT_LOG_SIZE;
263 pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity = 0x01;
264 pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr =
265 pm8001_ha->memoryMap.region[IOP].phys_addr_hi;
266 pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr =
267 pm8001_ha->memoryMap.region[IOP].phys_addr_lo;
268 pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size =
269 PM8001_EVENT_LOG_SIZE;
270 pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity = 0x01;
271 pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt = 0x01;
272
273 /* Disable end to end CRC checking */
274 pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16);
275
276 for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
277 pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt =
278 PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30);
279 pm8001_ha->inbnd_q_tbl[i].upper_base_addr =
280 pm8001_ha->memoryMap.region[IB + i].phys_addr_hi;
281 pm8001_ha->inbnd_q_tbl[i].lower_base_addr =
282 pm8001_ha->memoryMap.region[IB + i].phys_addr_lo;
283 pm8001_ha->inbnd_q_tbl[i].base_virt =
284 (u8 *)pm8001_ha->memoryMap.region[IB + i].virt_ptr;
285 pm8001_ha->inbnd_q_tbl[i].total_length =
286 pm8001_ha->memoryMap.region[IB + i].total_len;
287 pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr =
288 pm8001_ha->memoryMap.region[CI + i].phys_addr_hi;
289 pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr =
290 pm8001_ha->memoryMap.region[CI + i].phys_addr_lo;
291 pm8001_ha->inbnd_q_tbl[i].ci_virt =
292 pm8001_ha->memoryMap.region[CI + i].virt_ptr;
293 offsetib = i * 0x20;
294 pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
295 get_pci_bar_index(pm8001_mr32(addressib,
296 (offsetib + 0x14)));
297 pm8001_ha->inbnd_q_tbl[i].pi_offset =
298 pm8001_mr32(addressib, (offsetib + 0x18));
299 pm8001_ha->inbnd_q_tbl[i].producer_idx = 0;
300 pm8001_ha->inbnd_q_tbl[i].consumer_index = 0;
301 }
302 for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
303 pm8001_ha->outbnd_q_tbl[i].element_size_cnt =
304 PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30);
305 pm8001_ha->outbnd_q_tbl[i].upper_base_addr =
306 pm8001_ha->memoryMap.region[OB + i].phys_addr_hi;
307 pm8001_ha->outbnd_q_tbl[i].lower_base_addr =
308 pm8001_ha->memoryMap.region[OB + i].phys_addr_lo;
309 pm8001_ha->outbnd_q_tbl[i].base_virt =
310 (u8 *)pm8001_ha->memoryMap.region[OB + i].virt_ptr;
311 pm8001_ha->outbnd_q_tbl[i].total_length =
312 pm8001_ha->memoryMap.region[OB + i].total_len;
313 pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr =
314 pm8001_ha->memoryMap.region[PI + i].phys_addr_hi;
315 pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr =
316 pm8001_ha->memoryMap.region[PI + i].phys_addr_lo;
317 /* interrupt vector based on oq */
318 pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = (i << 24);
319 pm8001_ha->outbnd_q_tbl[i].pi_virt =
320 pm8001_ha->memoryMap.region[PI + i].virt_ptr;
321 offsetob = i * 0x24;
322 pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
323 get_pci_bar_index(pm8001_mr32(addressob,
324 offsetob + 0x14));
325 pm8001_ha->outbnd_q_tbl[i].ci_offset =
326 pm8001_mr32(addressob, (offsetob + 0x18));
327 pm8001_ha->outbnd_q_tbl[i].consumer_idx = 0;
328 pm8001_ha->outbnd_q_tbl[i].producer_index = 0;
329 }
330}
331
332/**
333 * update_main_config_table - update the main default table to the HBA.
334 * @pm8001_ha: our hba card information
335 */
336static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
337{
338 void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
339 pm8001_mw32(address, MAIN_IQNPPD_HPPD_OFFSET,
340 pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_q_nppd_hppd);
341 pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_HI,
342 pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr);
343 pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_LO,
344 pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr);
345 pm8001_mw32(address, MAIN_EVENT_LOG_BUFF_SIZE,
346 pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size);
347 pm8001_mw32(address, MAIN_EVENT_LOG_OPTION,
348 pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity);
349 pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_HI,
350 pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr);
351 pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_LO,
352 pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr);
353 pm8001_mw32(address, MAIN_PCS_EVENT_LOG_BUFF_SIZE,
354 pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size);
355 pm8001_mw32(address, MAIN_PCS_EVENT_LOG_OPTION,
356 pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity);
357 pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT,
358 pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt);
359 pm8001_mw32(address, MAIN_EVENT_CRC_CHECK,
360 pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump);
361
362 /* SPCv specific */
363 pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping &= 0xCFFFFFFF;
364 /* Set GPIOLED to 0x2 for LED indicator */
365 pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping |= 0x20000000;
366 pm8001_mw32(address, MAIN_GPIO_LED_FLAGS_OFFSET,
367 pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping);
368
369 pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER,
370 pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);
371 pm8001_mw32(address, MAIN_INT_REASSERTION_DELAY,
372 pm8001_ha->main_cfg_tbl.pm80xx_tbl.interrupt_reassertion_delay);
373}
374
375/**
376 * update_inbnd_queue_table - update the inbound queue table to the HBA.
377 * @pm8001_ha: our hba card information
378 */
379static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
380 int number)
381{
382 void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
383 u16 offset = number * 0x20;
384 pm8001_mw32(address, offset + IB_PROPERITY_OFFSET,
385 pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt);
386 pm8001_mw32(address, offset + IB_BASE_ADDR_HI_OFFSET,
387 pm8001_ha->inbnd_q_tbl[number].upper_base_addr);
388 pm8001_mw32(address, offset + IB_BASE_ADDR_LO_OFFSET,
389 pm8001_ha->inbnd_q_tbl[number].lower_base_addr);
390 pm8001_mw32(address, offset + IB_CI_BASE_ADDR_HI_OFFSET,
391 pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr);
392 pm8001_mw32(address, offset + IB_CI_BASE_ADDR_LO_OFFSET,
393 pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr);
394}
395
396/**
397 * update_outbnd_queue_table - update the outbound queue table to the HBA.
398 * @pm8001_ha: our hba card information
399 */
400static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
401 int number)
402{
403 void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
404 u16 offset = number * 0x24;
405 pm8001_mw32(address, offset + OB_PROPERITY_OFFSET,
406 pm8001_ha->outbnd_q_tbl[number].element_size_cnt);
407 pm8001_mw32(address, offset + OB_BASE_ADDR_HI_OFFSET,
408 pm8001_ha->outbnd_q_tbl[number].upper_base_addr);
409 pm8001_mw32(address, offset + OB_BASE_ADDR_LO_OFFSET,
410 pm8001_ha->outbnd_q_tbl[number].lower_base_addr);
411 pm8001_mw32(address, offset + OB_PI_BASE_ADDR_HI_OFFSET,
412 pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr);
413 pm8001_mw32(address, offset + OB_PI_BASE_ADDR_LO_OFFSET,
414 pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr);
415 pm8001_mw32(address, offset + OB_INTERRUPT_COALES_OFFSET,
416 pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay);
417}
418
419/**
420 * mpi_init_check - check firmware initialization status.
421 * @pm8001_ha: our hba card information
422 */
423static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
424{
425 u32 max_wait_count;
426 u32 value;
427 u32 gst_len_mpistate;
428
429 /* Write bit0=1 to Inbound DoorBell Register to tell the SPC FW the
430 table is updated */
431 pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE);
432 /* wait until Inbound DoorBell Clear Register toggled */
433 max_wait_count = 2 * 1000 * 1000;/* 2 sec for spcv/ve */
434 do {
435 udelay(1);
436 value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
437 value &= SPCv_MSGU_CFG_TABLE_UPDATE;
438 } while ((value != 0) && (--max_wait_count));
439
440 if (!max_wait_count)
441 return -1;
442 /* check the MPI-State for initialization upto 100ms*/
443 max_wait_count = 100 * 1000;/* 100 msec */
444 do {
445 udelay(1);
446 gst_len_mpistate =
447 pm8001_mr32(pm8001_ha->general_stat_tbl_addr,
448 GST_GSTLEN_MPIS_OFFSET);
449 } while ((GST_MPI_STATE_INIT !=
450 (gst_len_mpistate & GST_MPI_STATE_MASK)) && (--max_wait_count));
451 if (!max_wait_count)
452 return -1;
453
454 /* check MPI Initialization error */
455 gst_len_mpistate = gst_len_mpistate >> 16;
456 if (0x0000 != gst_len_mpistate)
457 return -1;
458
459 return 0;
460}
461
462/**
463 * check_fw_ready - The LLDD check if the FW is ready, if not, return error.
464 * @pm8001_ha: our hba card information
465 */
466static int check_fw_ready(struct pm8001_hba_info *pm8001_ha)
467{
468 u32 value;
469 u32 max_wait_count;
470 u32 max_wait_time;
471 int ret = 0;
472
473 /* reset / PCIe ready */
474 max_wait_time = max_wait_count = 100 * 1000; /* 100 milli sec */
475 do {
476 udelay(1);
477 value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
478 } while ((value == 0xFFFFFFFF) && (--max_wait_count));
479
480 /* check ila status */
481 max_wait_time = max_wait_count = 1000 * 1000; /* 1000 milli sec */
482 do {
483 udelay(1);
484 value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
485 } while (((value & SCRATCH_PAD_ILA_READY) !=
486 SCRATCH_PAD_ILA_READY) && (--max_wait_count));
487 if (!max_wait_count)
488 ret = -1;
489 else {
490 PM8001_MSG_DBG(pm8001_ha,
491 pm8001_printk(" ila ready status in %d millisec\n",
492 (max_wait_time - max_wait_count)));
493 }
494
495 /* check RAAE status */
496 max_wait_time = max_wait_count = 1800 * 1000; /* 1800 milli sec */
497 do {
498 udelay(1);
499 value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
500 } while (((value & SCRATCH_PAD_RAAE_READY) !=
501 SCRATCH_PAD_RAAE_READY) && (--max_wait_count));
502 if (!max_wait_count)
503 ret = -1;
504 else {
505 PM8001_MSG_DBG(pm8001_ha,
506 pm8001_printk(" raae ready status in %d millisec\n",
507 (max_wait_time - max_wait_count)));
508 }
509
510 /* check iop0 status */
511 max_wait_time = max_wait_count = 600 * 1000; /* 600 milli sec */
512 do {
513 udelay(1);
514 value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
515 } while (((value & SCRATCH_PAD_IOP0_READY) != SCRATCH_PAD_IOP0_READY) &&
516 (--max_wait_count));
517 if (!max_wait_count)
518 ret = -1;
519 else {
520 PM8001_MSG_DBG(pm8001_ha,
521 pm8001_printk(" iop0 ready status in %d millisec\n",
522 (max_wait_time - max_wait_count)));
523 }
524
525 /* check iop1 status only for 16 port controllers */
526 if ((pm8001_ha->chip_id != chip_8008) &&
527 (pm8001_ha->chip_id != chip_8009)) {
528 /* 200 milli sec */
529 max_wait_time = max_wait_count = 200 * 1000;
530 do {
531 udelay(1);
532 value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
533 } while (((value & SCRATCH_PAD_IOP1_READY) !=
534 SCRATCH_PAD_IOP1_READY) && (--max_wait_count));
535 if (!max_wait_count)
536 ret = -1;
537 else {
538 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
539 "iop1 ready status in %d millisec\n",
540 (max_wait_time - max_wait_count)));
541 }
542 }
543
544 return ret;
545}
546
547static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
548{
549 void __iomem *base_addr;
550 u32 value;
551 u32 offset;
552 u32 pcibar;
553 u32 pcilogic;
554
555 value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
556 offset = value & 0x03FFFFFF; /* scratch pad 0 TBL address */
557
558 PM8001_INIT_DBG(pm8001_ha,
559 pm8001_printk("Scratchpad 0 Offset: 0x%x value 0x%x\n",
560 offset, value));
561 pcilogic = (value & 0xFC000000) >> 26;
562 pcibar = get_pci_bar_index(pcilogic);
563 PM8001_INIT_DBG(pm8001_ha,
564 pm8001_printk("Scratchpad 0 PCI BAR: %d\n", pcibar));
565 pm8001_ha->main_cfg_tbl_addr = base_addr =
566 pm8001_ha->io_mem[pcibar].memvirtaddr + offset;
567 pm8001_ha->general_stat_tbl_addr =
568 base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x18) &
569 0xFFFFFF);
570 pm8001_ha->inbnd_q_tbl_addr =
571 base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C) &
572 0xFFFFFF);
573 pm8001_ha->outbnd_q_tbl_addr =
574 base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x20) &
575 0xFFFFFF);
576 pm8001_ha->ivt_tbl_addr =
577 base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C) &
578 0xFFFFFF);
579 pm8001_ha->pspa_q_tbl_addr =
580 base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x90) &
581 0xFFFFFF);
582
583 PM8001_INIT_DBG(pm8001_ha,
584 pm8001_printk("GST OFFSET 0x%x\n",
585 pm8001_cr32(pm8001_ha, pcibar, offset + 0x18)));
586 PM8001_INIT_DBG(pm8001_ha,
587 pm8001_printk("INBND OFFSET 0x%x\n",
588 pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C)));
589 PM8001_INIT_DBG(pm8001_ha,
590 pm8001_printk("OBND OFFSET 0x%x\n",
591 pm8001_cr32(pm8001_ha, pcibar, offset + 0x20)));
592 PM8001_INIT_DBG(pm8001_ha,
593 pm8001_printk("IVT OFFSET 0x%x\n",
594 pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C)));
595 PM8001_INIT_DBG(pm8001_ha,
596 pm8001_printk("PSPA OFFSET 0x%x\n",
597 pm8001_cr32(pm8001_ha, pcibar, offset + 0x90)));
598 PM8001_INIT_DBG(pm8001_ha,
599 pm8001_printk("addr - main cfg %p general status %p\n",
600 pm8001_ha->main_cfg_tbl_addr,
601 pm8001_ha->general_stat_tbl_addr));
602 PM8001_INIT_DBG(pm8001_ha,
603 pm8001_printk("addr - inbnd %p obnd %p\n",
604 pm8001_ha->inbnd_q_tbl_addr,
605 pm8001_ha->outbnd_q_tbl_addr));
606 PM8001_INIT_DBG(pm8001_ha,
607 pm8001_printk("addr - pspa %p ivt %p\n",
608 pm8001_ha->pspa_q_tbl_addr,
609 pm8001_ha->ivt_tbl_addr));
610}
611
612/**
613 * pm80xx_set_thermal_config - support the thermal configuration
614 * @pm8001_ha: our hba card information.
615 */
616int
617pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha)
618{
619 struct set_ctrl_cfg_req payload;
620 struct inbound_queue_table *circularQ;
621 int rc;
622 u32 tag;
623 u32 opc = OPC_INB_SET_CONTROLLER_CONFIG;
624
625 memset(&payload, 0, sizeof(struct set_ctrl_cfg_req));
626 rc = pm8001_tag_alloc(pm8001_ha, &tag);
627 if (rc)
628 return -1;
629
630 circularQ = &pm8001_ha->inbnd_q_tbl[0];
631 payload.tag = cpu_to_le32(tag);
632 payload.cfg_pg[0] = (THERMAL_LOG_ENABLE << 9) |
633 (THERMAL_ENABLE << 8) | THERMAL_OP_CODE;
634 payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8);
635
636 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
637 return rc;
638
639}
640
641/**
642* pm80xx_set_sas_protocol_timer_config - support the SAS Protocol
643* Timer configuration page
644* @pm8001_ha: our hba card information.
645*/
646static int
647pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha)
648{
649 struct set_ctrl_cfg_req payload;
650 struct inbound_queue_table *circularQ;
651 SASProtocolTimerConfig_t SASConfigPage;
652 int rc;
653 u32 tag;
654 u32 opc = OPC_INB_SET_CONTROLLER_CONFIG;
655
656 memset(&payload, 0, sizeof(struct set_ctrl_cfg_req));
657 memset(&SASConfigPage, 0, sizeof(SASProtocolTimerConfig_t));
658
659 rc = pm8001_tag_alloc(pm8001_ha, &tag);
660
661 if (rc)
662 return -1;
663
664 circularQ = &pm8001_ha->inbnd_q_tbl[0];
665 payload.tag = cpu_to_le32(tag);
666
667 SASConfigPage.pageCode = SAS_PROTOCOL_TIMER_CONFIG_PAGE;
668 SASConfigPage.MST_MSI = 3 << 15;
669 SASConfigPage.STP_SSP_MCT_TMO = (STP_MCT_TMO << 16) | SSP_MCT_TMO;
670 SASConfigPage.STP_FRM_TMO = (SAS_MAX_OPEN_TIME << 24) |
671 (SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER;
672 SASConfigPage.STP_IDLE_TMO = STP_IDLE_TIME;
673
674 if (SASConfigPage.STP_IDLE_TMO > 0x3FFFFFF)
675 SASConfigPage.STP_IDLE_TMO = 0x3FFFFFF;
676
677
678 SASConfigPage.OPNRJT_RTRY_INTVL = (SAS_MFD << 16) |
679 SAS_OPNRJT_RTRY_INTVL;
680 SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO = (SAS_DOPNRJT_RTRY_TMO << 16)
681 | SAS_COPNRJT_RTRY_TMO;
682 SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR = (SAS_DOPNRJT_RTRY_THR << 16)
683 | SAS_COPNRJT_RTRY_THR;
684 SASConfigPage.MAX_AIP = SAS_MAX_AIP;
685
686 PM8001_INIT_DBG(pm8001_ha,
687 pm8001_printk("SASConfigPage.pageCode "
688 "0x%08x\n", SASConfigPage.pageCode));
689 PM8001_INIT_DBG(pm8001_ha,
690 pm8001_printk("SASConfigPage.MST_MSI "
691 " 0x%08x\n", SASConfigPage.MST_MSI));
692 PM8001_INIT_DBG(pm8001_ha,
693 pm8001_printk("SASConfigPage.STP_SSP_MCT_TMO "
694 " 0x%08x\n", SASConfigPage.STP_SSP_MCT_TMO));
695 PM8001_INIT_DBG(pm8001_ha,
696 pm8001_printk("SASConfigPage.STP_FRM_TMO "
697 " 0x%08x\n", SASConfigPage.STP_FRM_TMO));
698 PM8001_INIT_DBG(pm8001_ha,
699 pm8001_printk("SASConfigPage.STP_IDLE_TMO "
700 " 0x%08x\n", SASConfigPage.STP_IDLE_TMO));
701 PM8001_INIT_DBG(pm8001_ha,
702 pm8001_printk("SASConfigPage.OPNRJT_RTRY_INTVL "
703 " 0x%08x\n", SASConfigPage.OPNRJT_RTRY_INTVL));
704 PM8001_INIT_DBG(pm8001_ha,
705 pm8001_printk("SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO "
706 " 0x%08x\n", SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO));
707 PM8001_INIT_DBG(pm8001_ha,
708 pm8001_printk("SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR "
709 " 0x%08x\n", SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR));
710 PM8001_INIT_DBG(pm8001_ha, pm8001_printk("SASConfigPage.MAX_AIP "
711 " 0x%08x\n", SASConfigPage.MAX_AIP));
712
713 memcpy(&payload.cfg_pg, &SASConfigPage,
714 sizeof(SASProtocolTimerConfig_t));
715
716 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
717
718 return rc;
719}
720
721/**
722 * pm80xx_get_encrypt_info - Check for encryption
723 * @pm8001_ha: our hba card information.
724 */
725static int
726pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha)
727{
728 u32 scratch3_value;
729 int ret;
730
731 /* Read encryption status from SCRATCH PAD 3 */
732 scratch3_value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3);
733
734 if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) ==
735 SCRATCH_PAD3_ENC_READY) {
736 if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED)
737 pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS;
738 if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
739 SCRATCH_PAD3_SMF_ENABLED)
740 pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF;
741 if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
742 SCRATCH_PAD3_SMA_ENABLED)
743 pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA;
744 if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
745 SCRATCH_PAD3_SMB_ENABLED)
746 pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
747 pm8001_ha->encrypt_info.status = 0;
748 PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
749 "Encryption: SCRATCH_PAD3_ENC_READY 0x%08X."
750 "Cipher mode 0x%x Sec mode 0x%x status 0x%x\n",
751 scratch3_value, pm8001_ha->encrypt_info.cipher_mode,
752 pm8001_ha->encrypt_info.sec_mode,
753 pm8001_ha->encrypt_info.status));
754 ret = 0;
755 } else if ((scratch3_value & SCRATCH_PAD3_ENC_READY) ==
756 SCRATCH_PAD3_ENC_DISABLED) {
757 PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
758 "Encryption: SCRATCH_PAD3_ENC_DISABLED 0x%08X\n",
759 scratch3_value));
760 pm8001_ha->encrypt_info.status = 0xFFFFFFFF;
761 pm8001_ha->encrypt_info.cipher_mode = 0;
762 pm8001_ha->encrypt_info.sec_mode = 0;
763 return 0;
764 } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) ==
765 SCRATCH_PAD3_ENC_DIS_ERR) {
766 pm8001_ha->encrypt_info.status =
767 (scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16;
768 if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED)
769 pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS;
770 if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
771 SCRATCH_PAD3_SMF_ENABLED)
772 pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF;
773 if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
774 SCRATCH_PAD3_SMA_ENABLED)
775 pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA;
776 if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
777 SCRATCH_PAD3_SMB_ENABLED)
778 pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
779 PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
780 "Encryption: SCRATCH_PAD3_DIS_ERR 0x%08X."
781 "Cipher mode 0x%x sec mode 0x%x status 0x%x\n",
782 scratch3_value, pm8001_ha->encrypt_info.cipher_mode,
783 pm8001_ha->encrypt_info.sec_mode,
784 pm8001_ha->encrypt_info.status));
785 ret = -1;
786 } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) ==
787 SCRATCH_PAD3_ENC_ENA_ERR) {
788
789 pm8001_ha->encrypt_info.status =
790 (scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16;
791 if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED)
792 pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS;
793 if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
794 SCRATCH_PAD3_SMF_ENABLED)
795 pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF;
796 if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
797 SCRATCH_PAD3_SMA_ENABLED)
798 pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA;
799 if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
800 SCRATCH_PAD3_SMB_ENABLED)
801 pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
802
803 PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
804 "Encryption: SCRATCH_PAD3_ENA_ERR 0x%08X."
805 "Cipher mode 0x%x sec mode 0x%x status 0x%x\n",
806 scratch3_value, pm8001_ha->encrypt_info.cipher_mode,
807 pm8001_ha->encrypt_info.sec_mode,
808 pm8001_ha->encrypt_info.status));
809 ret = -1;
810 }
811 return ret;
812}
813
814/**
815 * pm80xx_encrypt_update - update flash with encryption informtion
816 * @pm8001_ha: our hba card information.
817 */
818static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha)
819{
820 struct kek_mgmt_req payload;
821 struct inbound_queue_table *circularQ;
822 int rc;
823 u32 tag;
824 u32 opc = OPC_INB_KEK_MANAGEMENT;
825
826 memset(&payload, 0, sizeof(struct kek_mgmt_req));
827 rc = pm8001_tag_alloc(pm8001_ha, &tag);
828 if (rc)
829 return -1;
830
831 circularQ = &pm8001_ha->inbnd_q_tbl[0];
832 payload.tag = cpu_to_le32(tag);
833 /* Currently only one key is used. New KEK index is 1.
834 * Current KEK index is 1. Store KEK to NVRAM is 1.
835 */
836 payload.new_curidx_ksop = ((1 << 24) | (1 << 16) | (1 << 8) |
837 KEK_MGMT_SUBOP_KEYCARDUPDATE);
838
839 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
840
841 return rc;
842}
843
844/**
845 * pm8001_chip_init - the main init function that initialize whole PM8001 chip.
846 * @pm8001_ha: our hba card information
847 */
848static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
849{
850 int ret;
851 u8 i = 0;
852
853 /* check the firmware status */
854 if (-1 == check_fw_ready(pm8001_ha)) {
855 PM8001_FAIL_DBG(pm8001_ha,
856 pm8001_printk("Firmware is not ready!\n"));
857 return -EBUSY;
858 }
859
860 /* Initialize pci space address eg: mpi offset */
861 init_pci_device_addresses(pm8001_ha);
862 init_default_table_values(pm8001_ha);
863 read_main_config_table(pm8001_ha);
864 read_general_status_table(pm8001_ha);
865 read_inbnd_queue_table(pm8001_ha);
866 read_outbnd_queue_table(pm8001_ha);
867 read_phy_attr_table(pm8001_ha);
868
869 /* update main config table ,inbound table and outbound table */
870 update_main_config_table(pm8001_ha);
871 for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++)
872 update_inbnd_queue_table(pm8001_ha, i);
873 for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++)
874 update_outbnd_queue_table(pm8001_ha, i);
875
876 /* notify firmware update finished and check initialization status */
877 if (0 == mpi_init_check(pm8001_ha)) {
878 PM8001_INIT_DBG(pm8001_ha,
879 pm8001_printk("MPI initialize successful!\n"));
880 } else
881 return -EBUSY;
882
883 /* send SAS protocol timer configuration page to FW */
884 ret = pm80xx_set_sas_protocol_timer_config(pm8001_ha);
885
886 /* Check for encryption */
887 if (pm8001_ha->chip->encrypt) {
888 PM8001_INIT_DBG(pm8001_ha,
889 pm8001_printk("Checking for encryption\n"));
890 ret = pm80xx_get_encrypt_info(pm8001_ha);
891 if (ret == -1) {
892 PM8001_INIT_DBG(pm8001_ha,
893 pm8001_printk("Encryption error !!\n"));
894 if (pm8001_ha->encrypt_info.status == 0x81) {
895 PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
896 "Encryption enabled with error."
897 "Saving encryption key to flash\n"));
898 pm80xx_encrypt_update(pm8001_ha);
899 }
900 }
901 }
902 return 0;
903}
904
905static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
906{
907 u32 max_wait_count;
908 u32 value;
909 u32 gst_len_mpistate;
910 init_pci_device_addresses(pm8001_ha);
911 /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the
912 table is stop */
913 pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_RESET);
914
915 /* wait until Inbound DoorBell Clear Register toggled */
916 max_wait_count = 2 * 1000 * 1000; /* 2 sec for spcv/ve */
917 do {
918 udelay(1);
919 value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
920 value &= SPCv_MSGU_CFG_TABLE_RESET;
921 } while ((value != 0) && (--max_wait_count));
922
923 if (!max_wait_count) {
924 PM8001_FAIL_DBG(pm8001_ha,
925 pm8001_printk("TIMEOUT:IBDB value/=%x\n", value));
926 return -1;
927 }
928
929 /* check the MPI-State for termination in progress */
930 /* wait until Inbound DoorBell Clear Register toggled */
931 max_wait_count = 2 * 1000 * 1000; /* 2 sec for spcv/ve */
932 do {
933 udelay(1);
934 gst_len_mpistate =
935 pm8001_mr32(pm8001_ha->general_stat_tbl_addr,
936 GST_GSTLEN_MPIS_OFFSET);
937 if (GST_MPI_STATE_UNINIT ==
938 (gst_len_mpistate & GST_MPI_STATE_MASK))
939 break;
940 } while (--max_wait_count);
941 if (!max_wait_count) {
942 PM8001_FAIL_DBG(pm8001_ha,
943 pm8001_printk(" TIME OUT MPI State = 0x%x\n",
944 gst_len_mpistate & GST_MPI_STATE_MASK));
945 return -1;
946 }
947
948 return 0;
949}
950
951/**
952 * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all
953 * the FW register status to the originated status.
954 * @pm8001_ha: our hba card information
955 */
956
957static int
958pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
959{
960 u32 regval;
961 u32 bootloader_state;
962
963 /* Check if MPI is in ready state to reset */
964 if (mpi_uninit_check(pm8001_ha) != 0) {
965 PM8001_FAIL_DBG(pm8001_ha,
966 pm8001_printk("MPI state is not ready\n"));
967 return -1;
968 }
969
970 /* checked for reset register normal state; 0x0 */
971 regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
972 PM8001_INIT_DBG(pm8001_ha,
973 pm8001_printk("reset register before write : 0x%x\n", regval));
974
975 pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, SPCv_NORMAL_RESET_VALUE);
976 mdelay(500);
977
978 regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
979 PM8001_INIT_DBG(pm8001_ha,
980 pm8001_printk("reset register after write 0x%x\n", regval));
981
982 if ((regval & SPCv_SOFT_RESET_READ_MASK) ==
983 SPCv_SOFT_RESET_NORMAL_RESET_OCCURED) {
984 PM8001_MSG_DBG(pm8001_ha,
985 pm8001_printk(" soft reset successful [regval: 0x%x]\n",
986 regval));
987 } else {
988 PM8001_MSG_DBG(pm8001_ha,
989 pm8001_printk(" soft reset failed [regval: 0x%x]\n",
990 regval));
991
992 /* check bootloader is successfully executed or in HDA mode */
993 bootloader_state =
994 pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) &
995 SCRATCH_PAD1_BOOTSTATE_MASK;
996
997 if (bootloader_state == SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM) {
998 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
999 "Bootloader state - HDA mode SEEPROM\n"));
1000 } else if (bootloader_state ==
1001 SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP) {
1002 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
1003 "Bootloader state - HDA mode Bootstrap Pin\n"));
1004 } else if (bootloader_state ==
1005 SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET) {
1006 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
1007 "Bootloader state - HDA mode soft reset\n"));
1008 } else if (bootloader_state ==
1009 SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR) {
1010 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
1011 "Bootloader state-HDA mode critical error\n"));
1012 }
1013 return -EBUSY;
1014 }
1015
1016 /* check the firmware status after reset */
1017 if (-1 == check_fw_ready(pm8001_ha)) {
1018 PM8001_FAIL_DBG(pm8001_ha,
1019 pm8001_printk("Firmware is not ready!\n"));
1020 return -EBUSY;
1021 }
1022 PM8001_INIT_DBG(pm8001_ha,
1023 pm8001_printk("SPCv soft reset Complete\n"));
1024 return 0;
1025}
1026
1027static void pm80xx_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
1028{
1029 u32 i;
1030
1031 PM8001_INIT_DBG(pm8001_ha,
1032 pm8001_printk("chip reset start\n"));
1033
1034 /* do SPCv chip reset. */
1035 pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, 0x11);
1036 PM8001_INIT_DBG(pm8001_ha,
1037 pm8001_printk("SPC soft reset Complete\n"));
1038
1039 /* Check this ..whether delay is required or no */
1040 /* delay 10 usec */
1041 udelay(10);
1042
1043 /* wait for 20 msec until the firmware gets reloaded */
1044 i = 20;
1045 do {
1046 mdelay(1);
1047 } while ((--i) != 0);
1048
1049 PM8001_INIT_DBG(pm8001_ha,
1050 pm8001_printk("chip reset finished\n"));
1051}
1052
1053/**
1054 * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
1055 * @pm8001_ha: our hba card information
1056 */
1057static void
1058pm80xx_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
1059{
1060 pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL);
1061 pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL);
1062}
1063
1064/**
1065 * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt
1066 * @pm8001_ha: our hba card information
1067 */
1068static void
1069pm80xx_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
1070{
1071 pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, ODMR_MASK_ALL);
1072}
1073
1074/**
1075 * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
1076 * @pm8001_ha: our hba card information
1077 */
1078static void
1079pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
1080{
1081#ifdef PM8001_USE_MSIX
1082 u32 mask;
1083 mask = (u32)(1 << vec);
1084
1085 pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF));
1086 return;
1087#endif
1088 pm80xx_chip_intx_interrupt_enable(pm8001_ha);
1089
1090}
1091
1092/**
1093 * pm8001_chip_interrupt_disable- disable PM8001 chip interrupt
1094 * @pm8001_ha: our hba card information
1095 */
1096static void
1097pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
1098{
1099#ifdef PM8001_USE_MSIX
1100 u32 mask;
1101 if (vec == 0xFF)
1102 mask = 0xFFFFFFFF;
1103 else
1104 mask = (u32)(1 << vec);
1105 pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF));
1106 return;
1107#endif
1108 pm80xx_chip_intx_interrupt_disable(pm8001_ha);
1109}
1110
1111static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha,
1112 struct pm8001_device *pm8001_ha_dev)
1113{
1114 int res;
1115 u32 ccb_tag;
1116 struct pm8001_ccb_info *ccb;
1117 struct sas_task *task = NULL;
1118 struct task_abort_req task_abort;
1119 struct inbound_queue_table *circularQ;
1120 u32 opc = OPC_INB_SATA_ABORT;
1121 int ret;
1122
1123 if (!pm8001_ha_dev) {
1124 PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("dev is null\n"));
1125 return;
1126 }
1127
1128 task = sas_alloc_slow_task(GFP_ATOMIC);
1129
1130 if (!task) {
1131 PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("cannot "
1132 "allocate task\n"));
1133 return;
1134 }
1135
1136 task->task_done = pm8001_task_done;
1137
1138 res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
1139 if (res)
1140 return;
1141
1142 ccb = &pm8001_ha->ccb_info[ccb_tag];
1143 ccb->device = pm8001_ha_dev;
1144 ccb->ccb_tag = ccb_tag;
1145 ccb->task = task;
1146
1147 circularQ = &pm8001_ha->inbnd_q_tbl[0];
1148
1149 memset(&task_abort, 0, sizeof(task_abort));
1150 task_abort.abort_all = cpu_to_le32(1);
1151 task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
1152 task_abort.tag = cpu_to_le32(ccb_tag);
1153
1154 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
1155
1156}
1157
1158static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
1159 struct pm8001_device *pm8001_ha_dev)
1160{
1161 struct sata_start_req sata_cmd;
1162 int res;
1163 u32 ccb_tag;
1164 struct pm8001_ccb_info *ccb;
1165 struct sas_task *task = NULL;
1166 struct host_to_dev_fis fis;
1167 struct domain_device *dev;
1168 struct inbound_queue_table *circularQ;
1169 u32 opc = OPC_INB_SATA_HOST_OPSTART;
1170
1171 task = sas_alloc_slow_task(GFP_ATOMIC);
1172
1173 if (!task) {
1174 PM8001_FAIL_DBG(pm8001_ha,
1175 pm8001_printk("cannot allocate task !!!\n"));
1176 return;
1177 }
1178 task->task_done = pm8001_task_done;
1179
1180 res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
1181 if (res) {
1182 PM8001_FAIL_DBG(pm8001_ha,
1183 pm8001_printk("cannot allocate tag !!!\n"));
1184 return;
1185 }
1186
1187 /* allocate domain device by ourselves as libsas
1188 * is not going to provide any
1189 */
1190 dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC);
1191 if (!dev) {
1192 PM8001_FAIL_DBG(pm8001_ha,
1193 pm8001_printk("Domain device cannot be allocated\n"));
1194 sas_free_task(task);
1195 return;
1196 } else {
1197 task->dev = dev;
1198 task->dev->lldd_dev = pm8001_ha_dev;
1199 }
1200
1201 ccb = &pm8001_ha->ccb_info[ccb_tag];
1202 ccb->device = pm8001_ha_dev;
1203 ccb->ccb_tag = ccb_tag;
1204 ccb->task = task;
1205 pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG;
1206 pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG;
1207
1208 memset(&sata_cmd, 0, sizeof(sata_cmd));
1209 circularQ = &pm8001_ha->inbnd_q_tbl[0];
1210
1211 /* construct read log FIS */
1212 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1213 fis.fis_type = 0x27;
1214 fis.flags = 0x80;
1215 fis.command = ATA_CMD_READ_LOG_EXT;
1216 fis.lbal = 0x10;
1217 fis.sector_count = 0x1;
1218
1219 sata_cmd.tag = cpu_to_le32(ccb_tag);
1220 sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
1221 sata_cmd.ncqtag_atap_dir_m_dad |= ((0x1 << 7) | (0x5 << 9));
1222 memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
1223
1224 res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
1225
1226}
1227
1228/**
1229 * mpi_ssp_completion- process the event that FW response to the SSP request.
1230 * @pm8001_ha: our hba card information
1231 * @piomb: the message contents of this outbound message.
1232 *
1233 * When FW has completed a ssp request for example a IO request, after it has
1234 * filled the SG data with the data, it will trigger this event represent
1235 * that he has finished the job,please check the coresponding buffer.
1236 * So we will tell the caller who maybe waiting the result to tell upper layer
1237 * that the task has been finished.
1238 */
1239static void
1240mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
1241{
1242 struct sas_task *t;
1243 struct pm8001_ccb_info *ccb;
1244 unsigned long flags;
1245 u32 status;
1246 u32 param;
1247 u32 tag;
1248 struct ssp_completion_resp *psspPayload;
1249 struct task_status_struct *ts;
1250 struct ssp_response_iu *iu;
1251 struct pm8001_device *pm8001_dev;
1252 psspPayload = (struct ssp_completion_resp *)(piomb + 4);
1253 status = le32_to_cpu(psspPayload->status);
1254 tag = le32_to_cpu(psspPayload->tag);
1255 ccb = &pm8001_ha->ccb_info[tag];
1256 if ((status == IO_ABORTED) && ccb->open_retry) {
1257 /* Being completed by another */
1258 ccb->open_retry = 0;
1259 return;
1260 }
1261 pm8001_dev = ccb->device;
1262 param = le32_to_cpu(psspPayload->param);
1263 t = ccb->task;
1264
1265 if (status && status != IO_UNDERFLOW)
1266 PM8001_FAIL_DBG(pm8001_ha,
1267 pm8001_printk("sas IO status 0x%x\n", status));
1268 if (unlikely(!t || !t->lldd_task || !t->dev))
1269 return;
1270 ts = &t->task_status;
1271 switch (status) {
1272 case IO_SUCCESS:
1273 PM8001_IO_DBG(pm8001_ha,
1274 pm8001_printk("IO_SUCCESS ,param = 0x%x\n",
1275 param));
1276 if (param == 0) {
1277 ts->resp = SAS_TASK_COMPLETE;
1278 ts->stat = SAM_STAT_GOOD;
1279 } else {
1280 ts->resp = SAS_TASK_COMPLETE;
1281 ts->stat = SAS_PROTO_RESPONSE;
1282 ts->residual = param;
1283 iu = &psspPayload->ssp_resp_iu;
1284 sas_ssp_task_response(pm8001_ha->dev, t, iu);
1285 }
1286 if (pm8001_dev)
1287 pm8001_dev->running_req--;
1288 break;
1289 case IO_ABORTED:
1290 PM8001_IO_DBG(pm8001_ha,
1291 pm8001_printk("IO_ABORTED IOMB Tag\n"));
1292 ts->resp = SAS_TASK_COMPLETE;
1293 ts->stat = SAS_ABORTED_TASK;
1294 break;
1295 case IO_UNDERFLOW:
1296 /* SSP Completion with error */
1297 PM8001_IO_DBG(pm8001_ha,
1298 pm8001_printk("IO_UNDERFLOW ,param = 0x%x\n",
1299 param));
1300 ts->resp = SAS_TASK_COMPLETE;
1301 ts->stat = SAS_DATA_UNDERRUN;
1302 ts->residual = param;
1303 if (pm8001_dev)
1304 pm8001_dev->running_req--;
1305 break;
1306 case IO_NO_DEVICE:
1307 PM8001_IO_DBG(pm8001_ha,
1308 pm8001_printk("IO_NO_DEVICE\n"));
1309 ts->resp = SAS_TASK_UNDELIVERED;
1310 ts->stat = SAS_PHY_DOWN;
1311 break;
1312 case IO_XFER_ERROR_BREAK:
1313 PM8001_IO_DBG(pm8001_ha,
1314 pm8001_printk("IO_XFER_ERROR_BREAK\n"));
1315 ts->resp = SAS_TASK_COMPLETE;
1316 ts->stat = SAS_OPEN_REJECT;
1317 /* Force the midlayer to retry */
1318 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1319 break;
1320 case IO_XFER_ERROR_PHY_NOT_READY:
1321 PM8001_IO_DBG(pm8001_ha,
1322 pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
1323 ts->resp = SAS_TASK_COMPLETE;
1324 ts->stat = SAS_OPEN_REJECT;
1325 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1326 break;
1327 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
1328 PM8001_IO_DBG(pm8001_ha,
1329 pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
1330 ts->resp = SAS_TASK_COMPLETE;
1331 ts->stat = SAS_OPEN_REJECT;
1332 ts->open_rej_reason = SAS_OREJ_EPROTO;
1333 break;
1334 case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
1335 PM8001_IO_DBG(pm8001_ha,
1336 pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
1337 ts->resp = SAS_TASK_COMPLETE;
1338 ts->stat = SAS_OPEN_REJECT;
1339 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
1340 break;
1341 case IO_OPEN_CNX_ERROR_BREAK:
1342 PM8001_IO_DBG(pm8001_ha,
1343 pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
1344 ts->resp = SAS_TASK_COMPLETE;
1345 ts->stat = SAS_OPEN_REJECT;
1346 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1347 break;
1348 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
1349 case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
1350 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
1351 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
1352 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
1353 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
1354 PM8001_IO_DBG(pm8001_ha,
1355 pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
1356 ts->resp = SAS_TASK_COMPLETE;
1357 ts->stat = SAS_OPEN_REJECT;
1358 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
1359 if (!t->uldd_task)
1360 pm8001_handle_event(pm8001_ha,
1361 pm8001_dev,
1362 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
1363 break;
1364 case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
1365 PM8001_IO_DBG(pm8001_ha,
1366 pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
1367 ts->resp = SAS_TASK_COMPLETE;
1368 ts->stat = SAS_OPEN_REJECT;
1369 ts->open_rej_reason = SAS_OREJ_BAD_DEST;
1370 break;
1371 case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
1372 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
1373 "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
1374 ts->resp = SAS_TASK_COMPLETE;
1375 ts->stat = SAS_OPEN_REJECT;
1376 ts->open_rej_reason = SAS_OREJ_CONN_RATE;
1377 break;
1378 case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
1379 PM8001_IO_DBG(pm8001_ha,
1380 pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
1381 ts->resp = SAS_TASK_UNDELIVERED;
1382 ts->stat = SAS_OPEN_REJECT;
1383 ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
1384 break;
1385 case IO_XFER_ERROR_NAK_RECEIVED:
1386 PM8001_IO_DBG(pm8001_ha,
1387 pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
1388 ts->resp = SAS_TASK_COMPLETE;
1389 ts->stat = SAS_OPEN_REJECT;
1390 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1391 break;
1392 case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
1393 PM8001_IO_DBG(pm8001_ha,
1394 pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
1395 ts->resp = SAS_TASK_COMPLETE;
1396 ts->stat = SAS_NAK_R_ERR;
1397 break;
1398 case IO_XFER_ERROR_DMA:
1399 PM8001_IO_DBG(pm8001_ha,
1400 pm8001_printk("IO_XFER_ERROR_DMA\n"));
1401 ts->resp = SAS_TASK_COMPLETE;
1402 ts->stat = SAS_OPEN_REJECT;
1403 break;
1404 case IO_XFER_OPEN_RETRY_TIMEOUT:
1405 PM8001_IO_DBG(pm8001_ha,
1406 pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
1407 ts->resp = SAS_TASK_COMPLETE;
1408 ts->stat = SAS_OPEN_REJECT;
1409 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1410 break;
1411 case IO_XFER_ERROR_OFFSET_MISMATCH:
1412 PM8001_IO_DBG(pm8001_ha,
1413 pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
1414 ts->resp = SAS_TASK_COMPLETE;
1415 ts->stat = SAS_OPEN_REJECT;
1416 break;
1417 case IO_PORT_IN_RESET:
1418 PM8001_IO_DBG(pm8001_ha,
1419 pm8001_printk("IO_PORT_IN_RESET\n"));
1420 ts->resp = SAS_TASK_COMPLETE;
1421 ts->stat = SAS_OPEN_REJECT;
1422 break;
1423 case IO_DS_NON_OPERATIONAL:
1424 PM8001_IO_DBG(pm8001_ha,
1425 pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
1426 ts->resp = SAS_TASK_COMPLETE;
1427 ts->stat = SAS_OPEN_REJECT;
1428 if (!t->uldd_task)
1429 pm8001_handle_event(pm8001_ha,
1430 pm8001_dev,
1431 IO_DS_NON_OPERATIONAL);
1432 break;
1433 case IO_DS_IN_RECOVERY:
1434 PM8001_IO_DBG(pm8001_ha,
1435 pm8001_printk("IO_DS_IN_RECOVERY\n"));
1436 ts->resp = SAS_TASK_COMPLETE;
1437 ts->stat = SAS_OPEN_REJECT;
1438 break;
1439 case IO_TM_TAG_NOT_FOUND:
1440 PM8001_IO_DBG(pm8001_ha,
1441 pm8001_printk("IO_TM_TAG_NOT_FOUND\n"));
1442 ts->resp = SAS_TASK_COMPLETE;
1443 ts->stat = SAS_OPEN_REJECT;
1444 break;
1445 case IO_SSP_EXT_IU_ZERO_LEN_ERROR:
1446 PM8001_IO_DBG(pm8001_ha,
1447 pm8001_printk("IO_SSP_EXT_IU_ZERO_LEN_ERROR\n"));
1448 ts->resp = SAS_TASK_COMPLETE;
1449 ts->stat = SAS_OPEN_REJECT;
1450 break;
1451 case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
1452 PM8001_IO_DBG(pm8001_ha,
1453 pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
1454 ts->resp = SAS_TASK_COMPLETE;
1455 ts->stat = SAS_OPEN_REJECT;
1456 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1457 break;
1458 default:
1459 PM8001_IO_DBG(pm8001_ha,
1460 pm8001_printk("Unknown status 0x%x\n", status));
1461 /* not allowed case. Therefore, return failed status */
1462 ts->resp = SAS_TASK_COMPLETE;
1463 ts->stat = SAS_OPEN_REJECT;
1464 break;
1465 }
1466 PM8001_IO_DBG(pm8001_ha,
1467 pm8001_printk("scsi_status = 0x%x\n ",
1468 psspPayload->ssp_resp_iu.status));
1469 spin_lock_irqsave(&t->task_state_lock, flags);
1470 t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
1471 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
1472 t->task_state_flags |= SAS_TASK_STATE_DONE;
1473 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
1474 spin_unlock_irqrestore(&t->task_state_lock, flags);
1475 PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
1476 "task 0x%p done with io_status 0x%x resp 0x%x "
1477 "stat 0x%x but aborted by upper layer!\n",
1478 t, status, ts->resp, ts->stat));
1479 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
1480 } else {
1481 spin_unlock_irqrestore(&t->task_state_lock, flags);
1482 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
1483 mb();/* in order to force CPU ordering */
1484 t->task_done(t);
1485 }
1486}
1487
1488/*See the comments for mpi_ssp_completion */
1489static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
1490{
1491 struct sas_task *t;
1492 unsigned long flags;
1493 struct task_status_struct *ts;
1494 struct pm8001_ccb_info *ccb;
1495 struct pm8001_device *pm8001_dev;
1496 struct ssp_event_resp *psspPayload =
1497 (struct ssp_event_resp *)(piomb + 4);
1498 u32 event = le32_to_cpu(psspPayload->event);
1499 u32 tag = le32_to_cpu(psspPayload->tag);
1500 u32 port_id = le32_to_cpu(psspPayload->port_id);
1501
1502 ccb = &pm8001_ha->ccb_info[tag];
1503 t = ccb->task;
1504 pm8001_dev = ccb->device;
1505 if (event)
1506 PM8001_FAIL_DBG(pm8001_ha,
1507 pm8001_printk("sas IO status 0x%x\n", event));
1508 if (unlikely(!t || !t->lldd_task || !t->dev))
1509 return;
1510 ts = &t->task_status;
1511 PM8001_IO_DBG(pm8001_ha,
1512 pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n",
1513 port_id, tag, event));
1514 switch (event) {
1515 case IO_OVERFLOW:
1516 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n");)
1517 ts->resp = SAS_TASK_COMPLETE;
1518 ts->stat = SAS_DATA_OVERRUN;
1519 ts->residual = 0;
1520 if (pm8001_dev)
1521 pm8001_dev->running_req--;
1522 break;
1523 case IO_XFER_ERROR_BREAK:
1524 PM8001_IO_DBG(pm8001_ha,
1525 pm8001_printk("IO_XFER_ERROR_BREAK\n"));
1526 pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK);
1527 return;
1528 case IO_XFER_ERROR_PHY_NOT_READY:
1529 PM8001_IO_DBG(pm8001_ha,
1530 pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
1531 ts->resp = SAS_TASK_COMPLETE;
1532 ts->stat = SAS_OPEN_REJECT;
1533 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1534 break;
1535 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
1536 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
1537 "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
1538 ts->resp = SAS_TASK_COMPLETE;
1539 ts->stat = SAS_OPEN_REJECT;
1540 ts->open_rej_reason = SAS_OREJ_EPROTO;
1541 break;
1542 case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
1543 PM8001_IO_DBG(pm8001_ha,
1544 pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
1545 ts->resp = SAS_TASK_COMPLETE;
1546 ts->stat = SAS_OPEN_REJECT;
1547 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
1548 break;
1549 case IO_OPEN_CNX_ERROR_BREAK:
1550 PM8001_IO_DBG(pm8001_ha,
1551 pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
1552 ts->resp = SAS_TASK_COMPLETE;
1553 ts->stat = SAS_OPEN_REJECT;
1554 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1555 break;
1556 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
1557 case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
1558 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
1559 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
1560 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
1561 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
1562 PM8001_IO_DBG(pm8001_ha,
1563 pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
1564 ts->resp = SAS_TASK_COMPLETE;
1565 ts->stat = SAS_OPEN_REJECT;
1566 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
1567 if (!t->uldd_task)
1568 pm8001_handle_event(pm8001_ha,
1569 pm8001_dev,
1570 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
1571 break;
1572 case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
1573 PM8001_IO_DBG(pm8001_ha,
1574 pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
1575 ts->resp = SAS_TASK_COMPLETE;
1576 ts->stat = SAS_OPEN_REJECT;
1577 ts->open_rej_reason = SAS_OREJ_BAD_DEST;
1578 break;
1579 case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
1580 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
1581 "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
1582 ts->resp = SAS_TASK_COMPLETE;
1583 ts->stat = SAS_OPEN_REJECT;
1584 ts->open_rej_reason = SAS_OREJ_CONN_RATE;
1585 break;
1586 case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
1587 PM8001_IO_DBG(pm8001_ha,
1588 pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
1589 ts->resp = SAS_TASK_COMPLETE;
1590 ts->stat = SAS_OPEN_REJECT;
1591 ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
1592 break;
1593 case IO_XFER_ERROR_NAK_RECEIVED:
1594 PM8001_IO_DBG(pm8001_ha,
1595 pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
1596 ts->resp = SAS_TASK_COMPLETE;
1597 ts->stat = SAS_OPEN_REJECT;
1598 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1599 break;
1600 case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
1601 PM8001_IO_DBG(pm8001_ha,
1602 pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
1603 ts->resp = SAS_TASK_COMPLETE;
1604 ts->stat = SAS_NAK_R_ERR;
1605 break;
1606 case IO_XFER_OPEN_RETRY_TIMEOUT:
1607 PM8001_IO_DBG(pm8001_ha,
1608 pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
1609 pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT);
1610 return;
1611 case IO_XFER_ERROR_UNEXPECTED_PHASE:
1612 PM8001_IO_DBG(pm8001_ha,
1613 pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
1614 ts->resp = SAS_TASK_COMPLETE;
1615 ts->stat = SAS_DATA_OVERRUN;
1616 break;
1617 case IO_XFER_ERROR_XFER_RDY_OVERRUN:
1618 PM8001_IO_DBG(pm8001_ha,
1619 pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n"));
1620 ts->resp = SAS_TASK_COMPLETE;
1621 ts->stat = SAS_DATA_OVERRUN;
1622 break;
1623 case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
1624 PM8001_IO_DBG(pm8001_ha,
1625 pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"));
1626 ts->resp = SAS_TASK_COMPLETE;
1627 ts->stat = SAS_DATA_OVERRUN;
1628 break;
1629 case IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT:
1630 PM8001_IO_DBG(pm8001_ha,
1631 pm8001_printk("IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n"));
1632 ts->resp = SAS_TASK_COMPLETE;
1633 ts->stat = SAS_DATA_OVERRUN;
1634 break;
1635 case IO_XFER_ERROR_OFFSET_MISMATCH:
1636 PM8001_IO_DBG(pm8001_ha,
1637 pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
1638 ts->resp = SAS_TASK_COMPLETE;
1639 ts->stat = SAS_DATA_OVERRUN;
1640 break;
1641 case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
1642 PM8001_IO_DBG(pm8001_ha,
1643 pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"));
1644 ts->resp = SAS_TASK_COMPLETE;
1645 ts->stat = SAS_DATA_OVERRUN;
1646 break;
1647 case IO_XFER_ERROR_INTERNAL_CRC_ERROR:
1648 PM8001_IO_DBG(pm8001_ha,
1649 pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n"));
1650 /* TBC: used default set values */
1651 ts->resp = SAS_TASK_COMPLETE;
1652 ts->stat = SAS_DATA_OVERRUN;
1653 break;
1654 case IO_XFER_CMD_FRAME_ISSUED:
1655 PM8001_IO_DBG(pm8001_ha,
1656 pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n"));
1657 return;
1658 default:
1659 PM8001_IO_DBG(pm8001_ha,
1660 pm8001_printk("Unknown status 0x%x\n", event));
1661 /* not allowed case. Therefore, return failed status */
1662 ts->resp = SAS_TASK_COMPLETE;
1663 ts->stat = SAS_DATA_OVERRUN;
1664 break;
1665 }
1666 spin_lock_irqsave(&t->task_state_lock, flags);
1667 t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
1668 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
1669 t->task_state_flags |= SAS_TASK_STATE_DONE;
1670 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
1671 spin_unlock_irqrestore(&t->task_state_lock, flags);
1672 PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
1673 "task 0x%p done with event 0x%x resp 0x%x "
1674 "stat 0x%x but aborted by upper layer!\n",
1675 t, event, ts->resp, ts->stat));
1676 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
1677 } else {
1678 spin_unlock_irqrestore(&t->task_state_lock, flags);
1679 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
1680 mb();/* in order to force CPU ordering */
1681 t->task_done(t);
1682 }
1683}
1684
1685/*See the comments for mpi_ssp_completion */
1686static void
1687mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
1688{
1689 struct sas_task *t;
1690 struct pm8001_ccb_info *ccb;
1691 u32 param;
1692 u32 status;
1693 u32 tag;
1694 struct sata_completion_resp *psataPayload;
1695 struct task_status_struct *ts;
1696 struct ata_task_resp *resp ;
1697 u32 *sata_resp;
1698 struct pm8001_device *pm8001_dev;
1699 unsigned long flags;
1700
1701 psataPayload = (struct sata_completion_resp *)(piomb + 4);
1702 status = le32_to_cpu(psataPayload->status);
1703 tag = le32_to_cpu(psataPayload->tag);
1704
1705 if (!tag) {
1706 PM8001_FAIL_DBG(pm8001_ha,
1707 pm8001_printk("tag null\n"));
1708 return;
1709 }
1710 ccb = &pm8001_ha->ccb_info[tag];
1711 param = le32_to_cpu(psataPayload->param);
1712 if (ccb) {
1713 t = ccb->task;
1714 pm8001_dev = ccb->device;
1715 } else {
1716 PM8001_FAIL_DBG(pm8001_ha,
1717 pm8001_printk("ccb null\n"));
1718 return;
1719 }
1720
1721 if (t) {
1722 if (t->dev && (t->dev->lldd_dev))
1723 pm8001_dev = t->dev->lldd_dev;
1724 } else {
1725 PM8001_FAIL_DBG(pm8001_ha,
1726 pm8001_printk("task null\n"));
1727 return;
1728 }
1729
1730 if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG))
1731 && unlikely(!t || !t->lldd_task || !t->dev)) {
1732 PM8001_FAIL_DBG(pm8001_ha,
1733 pm8001_printk("task or dev null\n"));
1734 return;
1735 }
1736
1737 ts = &t->task_status;
1738 if (!ts) {
1739 PM8001_FAIL_DBG(pm8001_ha,
1740 pm8001_printk("ts null\n"));
1741 return;
1742 }
1743
1744 switch (status) {
1745 case IO_SUCCESS:
1746 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
1747 if (param == 0) {
1748 ts->resp = SAS_TASK_COMPLETE;
1749 ts->stat = SAM_STAT_GOOD;
1750 /* check if response is for SEND READ LOG */
1751 if (pm8001_dev &&
1752 (pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
1753 /* set new bit for abort_all */
1754 pm8001_dev->id |= NCQ_ABORT_ALL_FLAG;
1755 /* clear bit for read log */
1756 pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF;
1757 pm80xx_send_abort_all(pm8001_ha, pm8001_dev);
1758 /* Free the tag */
1759 pm8001_tag_free(pm8001_ha, tag);
1760 sas_free_task(t);
1761 return;
1762 }
1763 } else {
1764 u8 len;
1765 ts->resp = SAS_TASK_COMPLETE;
1766 ts->stat = SAS_PROTO_RESPONSE;
1767 ts->residual = param;
1768 PM8001_IO_DBG(pm8001_ha,
1769 pm8001_printk("SAS_PROTO_RESPONSE len = %d\n",
1770 param));
1771 sata_resp = &psataPayload->sata_resp[0];
1772 resp = (struct ata_task_resp *)ts->buf;
1773 if (t->ata_task.dma_xfer == 0 &&
1774 t->data_dir == PCI_DMA_FROMDEVICE) {
1775 len = sizeof(struct pio_setup_fis);
1776 PM8001_IO_DBG(pm8001_ha,
1777 pm8001_printk("PIO read len = %d\n", len));
1778 } else if (t->ata_task.use_ncq) {
1779 len = sizeof(struct set_dev_bits_fis);
1780 PM8001_IO_DBG(pm8001_ha,
1781 pm8001_printk("FPDMA len = %d\n", len));
1782 } else {
1783 len = sizeof(struct dev_to_host_fis);
1784 PM8001_IO_DBG(pm8001_ha,
1785 pm8001_printk("other len = %d\n", len));
1786 }
1787 if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
1788 resp->frame_len = len;
1789 memcpy(&resp->ending_fis[0], sata_resp, len);
1790 ts->buf_valid_size = sizeof(*resp);
1791 } else
1792 PM8001_IO_DBG(pm8001_ha,
1793 pm8001_printk("response to large\n"));
1794 }
1795 if (pm8001_dev)
1796 pm8001_dev->running_req--;
1797 break;
1798 case IO_ABORTED:
1799 PM8001_IO_DBG(pm8001_ha,
1800 pm8001_printk("IO_ABORTED IOMB Tag\n"));
1801 ts->resp = SAS_TASK_COMPLETE;
1802 ts->stat = SAS_ABORTED_TASK;
1803 if (pm8001_dev)
1804 pm8001_dev->running_req--;
1805 break;
1806 /* following cases are to do cases */
1807 case IO_UNDERFLOW:
1808 /* SATA Completion with error */
1809 PM8001_IO_DBG(pm8001_ha,
1810 pm8001_printk("IO_UNDERFLOW param = %d\n", param));
1811 ts->resp = SAS_TASK_COMPLETE;
1812 ts->stat = SAS_DATA_UNDERRUN;
1813 ts->residual = param;
1814 if (pm8001_dev)
1815 pm8001_dev->running_req--;
1816 break;
1817 case IO_NO_DEVICE:
1818 PM8001_IO_DBG(pm8001_ha,
1819 pm8001_printk("IO_NO_DEVICE\n"));
1820 ts->resp = SAS_TASK_UNDELIVERED;
1821 ts->stat = SAS_PHY_DOWN;
1822 break;
1823 case IO_XFER_ERROR_BREAK:
1824 PM8001_IO_DBG(pm8001_ha,
1825 pm8001_printk("IO_XFER_ERROR_BREAK\n"));
1826 ts->resp = SAS_TASK_COMPLETE;
1827 ts->stat = SAS_INTERRUPTED;
1828 break;
1829 case IO_XFER_ERROR_PHY_NOT_READY:
1830 PM8001_IO_DBG(pm8001_ha,
1831 pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
1832 ts->resp = SAS_TASK_COMPLETE;
1833 ts->stat = SAS_OPEN_REJECT;
1834 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1835 break;
1836 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
1837 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
1838 "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
1839 ts->resp = SAS_TASK_COMPLETE;
1840 ts->stat = SAS_OPEN_REJECT;
1841 ts->open_rej_reason = SAS_OREJ_EPROTO;
1842 break;
1843 case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
1844 PM8001_IO_DBG(pm8001_ha,
1845 pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
1846 ts->resp = SAS_TASK_COMPLETE;
1847 ts->stat = SAS_OPEN_REJECT;
1848 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
1849 break;
1850 case IO_OPEN_CNX_ERROR_BREAK:
1851 PM8001_IO_DBG(pm8001_ha,
1852 pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
1853 ts->resp = SAS_TASK_COMPLETE;
1854 ts->stat = SAS_OPEN_REJECT;
1855 ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
1856 break;
1857 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
1858 case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
1859 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
1860 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
1861 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
1862 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
1863 PM8001_IO_DBG(pm8001_ha,
1864 pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
1865 ts->resp = SAS_TASK_COMPLETE;
1866 ts->stat = SAS_DEV_NO_RESPONSE;
1867 if (!t->uldd_task) {
1868 pm8001_handle_event(pm8001_ha,
1869 pm8001_dev,
1870 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
1871 ts->resp = SAS_TASK_UNDELIVERED;
1872 ts->stat = SAS_QUEUE_FULL;
1873 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
1874 mb();/*in order to force CPU ordering*/
1875 spin_unlock_irq(&pm8001_ha->lock);
1876 t->task_done(t);
1877 spin_lock_irq(&pm8001_ha->lock);
1878 return;
1879 }
1880 break;
1881 case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
1882 PM8001_IO_DBG(pm8001_ha,
1883 pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
1884 ts->resp = SAS_TASK_UNDELIVERED;
1885 ts->stat = SAS_OPEN_REJECT;
1886 ts->open_rej_reason = SAS_OREJ_BAD_DEST;
1887 if (!t->uldd_task) {
1888 pm8001_handle_event(pm8001_ha,
1889 pm8001_dev,
1890 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
1891 ts->resp = SAS_TASK_UNDELIVERED;
1892 ts->stat = SAS_QUEUE_FULL;
1893 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
1894 mb();/*ditto*/
1895 spin_unlock_irq(&pm8001_ha->lock);
1896 t->task_done(t);
1897 spin_lock_irq(&pm8001_ha->lock);
1898 return;
1899 }
1900 break;
1901 case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
1902 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
1903 "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
1904 ts->resp = SAS_TASK_COMPLETE;
1905 ts->stat = SAS_OPEN_REJECT;
1906 ts->open_rej_reason = SAS_OREJ_CONN_RATE;
1907 break;
1908 case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
1909 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
1910 "IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY\n"));
1911 ts->resp = SAS_TASK_COMPLETE;
1912 ts->stat = SAS_DEV_NO_RESPONSE;
1913 if (!t->uldd_task) {
1914 pm8001_handle_event(pm8001_ha,
1915 pm8001_dev,
1916 IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);
1917 ts->resp = SAS_TASK_UNDELIVERED;
1918 ts->stat = SAS_QUEUE_FULL;
1919 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
1920 mb();/* ditto*/
1921 spin_unlock_irq(&pm8001_ha->lock);
1922 t->task_done(t);
1923 spin_lock_irq(&pm8001_ha->lock);
1924 return;
1925 }
1926 break;
1927 case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
1928 PM8001_IO_DBG(pm8001_ha,
1929 pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
1930 ts->resp = SAS_TASK_COMPLETE;
1931 ts->stat = SAS_OPEN_REJECT;
1932 ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
1933 break;
1934 case IO_XFER_ERROR_NAK_RECEIVED:
1935 PM8001_IO_DBG(pm8001_ha,
1936 pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
1937 ts->resp = SAS_TASK_COMPLETE;
1938 ts->stat = SAS_NAK_R_ERR;
1939 break;
1940 case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
1941 PM8001_IO_DBG(pm8001_ha,
1942 pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
1943 ts->resp = SAS_TASK_COMPLETE;
1944 ts->stat = SAS_NAK_R_ERR;
1945 break;
1946 case IO_XFER_ERROR_DMA:
1947 PM8001_IO_DBG(pm8001_ha,
1948 pm8001_printk("IO_XFER_ERROR_DMA\n"));
1949 ts->resp = SAS_TASK_COMPLETE;
1950 ts->stat = SAS_ABORTED_TASK;
1951 break;
1952 case IO_XFER_ERROR_SATA_LINK_TIMEOUT:
1953 PM8001_IO_DBG(pm8001_ha,
1954 pm8001_printk("IO_XFER_ERROR_SATA_LINK_TIMEOUT\n"));
1955 ts->resp = SAS_TASK_UNDELIVERED;
1956 ts->stat = SAS_DEV_NO_RESPONSE;
1957 break;
1958 case IO_XFER_ERROR_REJECTED_NCQ_MODE:
1959 PM8001_IO_DBG(pm8001_ha,
1960 pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n"));
1961 ts->resp = SAS_TASK_COMPLETE;
1962 ts->stat = SAS_DATA_UNDERRUN;
1963 break;
1964 case IO_XFER_OPEN_RETRY_TIMEOUT:
1965 PM8001_IO_DBG(pm8001_ha,
1966 pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
1967 ts->resp = SAS_TASK_COMPLETE;
1968 ts->stat = SAS_OPEN_TO;
1969 break;
1970 case IO_PORT_IN_RESET:
1971 PM8001_IO_DBG(pm8001_ha,
1972 pm8001_printk("IO_PORT_IN_RESET\n"));
1973 ts->resp = SAS_TASK_COMPLETE;
1974 ts->stat = SAS_DEV_NO_RESPONSE;
1975 break;
1976 case IO_DS_NON_OPERATIONAL:
1977 PM8001_IO_DBG(pm8001_ha,
1978 pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
1979 ts->resp = SAS_TASK_COMPLETE;
1980 ts->stat = SAS_DEV_NO_RESPONSE;
1981 if (!t->uldd_task) {
1982 pm8001_handle_event(pm8001_ha, pm8001_dev,
1983 IO_DS_NON_OPERATIONAL);
1984 ts->resp = SAS_TASK_UNDELIVERED;
1985 ts->stat = SAS_QUEUE_FULL;
1986 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
1987 mb();/*ditto*/
1988 spin_unlock_irq(&pm8001_ha->lock);
1989 t->task_done(t);
1990 spin_lock_irq(&pm8001_ha->lock);
1991 return;
1992 }
1993 break;
1994 case IO_DS_IN_RECOVERY:
1995 PM8001_IO_DBG(pm8001_ha,
1996 pm8001_printk("IO_DS_IN_RECOVERY\n"));
1997 ts->resp = SAS_TASK_COMPLETE;
1998 ts->stat = SAS_DEV_NO_RESPONSE;
1999 break;
2000 case IO_DS_IN_ERROR:
2001 PM8001_IO_DBG(pm8001_ha,
2002 pm8001_printk("IO_DS_IN_ERROR\n"));
2003 ts->resp = SAS_TASK_COMPLETE;
2004 ts->stat = SAS_DEV_NO_RESPONSE;
2005 if (!t->uldd_task) {
2006 pm8001_handle_event(pm8001_ha, pm8001_dev,
2007 IO_DS_IN_ERROR);
2008 ts->resp = SAS_TASK_UNDELIVERED;
2009 ts->stat = SAS_QUEUE_FULL;
2010 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2011 mb();/*ditto*/
2012 spin_unlock_irq(&pm8001_ha->lock);
2013 t->task_done(t);
2014 spin_lock_irq(&pm8001_ha->lock);
2015 return;
2016 }
2017 break;
2018 case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
2019 PM8001_IO_DBG(pm8001_ha,
2020 pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
2021 ts->resp = SAS_TASK_COMPLETE;
2022 ts->stat = SAS_OPEN_REJECT;
2023 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2024 default:
2025 PM8001_IO_DBG(pm8001_ha,
2026 pm8001_printk("Unknown status 0x%x\n", status));
2027 /* not allowed case. Therefore, return failed status */
2028 ts->resp = SAS_TASK_COMPLETE;
2029 ts->stat = SAS_DEV_NO_RESPONSE;
2030 break;
2031 }
2032 spin_lock_irqsave(&t->task_state_lock, flags);
2033 t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
2034 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
2035 t->task_state_flags |= SAS_TASK_STATE_DONE;
2036 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
2037 spin_unlock_irqrestore(&t->task_state_lock, flags);
2038 PM8001_FAIL_DBG(pm8001_ha,
2039 pm8001_printk("task 0x%p done with io_status 0x%x"
2040 " resp 0x%x stat 0x%x but aborted by upper layer!\n",
2041 t, status, ts->resp, ts->stat));
2042 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2043 } else if (t->uldd_task) {
2044 spin_unlock_irqrestore(&t->task_state_lock, flags);
2045 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2046 mb();/* ditto */
2047 spin_unlock_irq(&pm8001_ha->lock);
2048 t->task_done(t);
2049 spin_lock_irq(&pm8001_ha->lock);
2050 } else if (!t->uldd_task) {
2051 spin_unlock_irqrestore(&t->task_state_lock, flags);
2052 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2053 mb();/*ditto*/
2054 spin_unlock_irq(&pm8001_ha->lock);
2055 t->task_done(t);
2056 spin_lock_irq(&pm8001_ha->lock);
2057 }
2058}
2059
2060/*See the comments for mpi_ssp_completion */
2061static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
2062{
2063 struct sas_task *t;
2064 struct task_status_struct *ts;
2065 struct pm8001_ccb_info *ccb;
2066 struct pm8001_device *pm8001_dev;
2067 struct sata_event_resp *psataPayload =
2068 (struct sata_event_resp *)(piomb + 4);
2069 u32 event = le32_to_cpu(psataPayload->event);
2070 u32 tag = le32_to_cpu(psataPayload->tag);
2071 u32 port_id = le32_to_cpu(psataPayload->port_id);
2072 u32 dev_id = le32_to_cpu(psataPayload->device_id);
2073 unsigned long flags;
2074
2075 ccb = &pm8001_ha->ccb_info[tag];
2076
2077 if (ccb) {
2078 t = ccb->task;
2079 pm8001_dev = ccb->device;
2080 } else {
2081 PM8001_FAIL_DBG(pm8001_ha,
2082 pm8001_printk("No CCB !!!. returning\n"));
2083 return;
2084 }
2085 if (event)
2086 PM8001_FAIL_DBG(pm8001_ha,
2087 pm8001_printk("SATA EVENT 0x%x\n", event));
2088
2089 /* Check if this is NCQ error */
2090 if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) {
2091 /* find device using device id */
2092 pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id);
2093 /* send read log extension */
2094 if (pm8001_dev)
2095 pm80xx_send_read_log(pm8001_ha, pm8001_dev);
2096 return;
2097 }
2098
2099 if (unlikely(!t || !t->lldd_task || !t->dev)) {
2100 PM8001_FAIL_DBG(pm8001_ha,
2101 pm8001_printk("task or dev null\n"));
2102 return;
2103 }
2104
2105 ts = &t->task_status;
2106 PM8001_IO_DBG(pm8001_ha,
2107 pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n",
2108 port_id, tag, event));
2109 switch (event) {
2110 case IO_OVERFLOW:
2111 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
2112 ts->resp = SAS_TASK_COMPLETE;
2113 ts->stat = SAS_DATA_OVERRUN;
2114 ts->residual = 0;
2115 if (pm8001_dev)
2116 pm8001_dev->running_req--;
2117 break;
2118 case IO_XFER_ERROR_BREAK:
2119 PM8001_IO_DBG(pm8001_ha,
2120 pm8001_printk("IO_XFER_ERROR_BREAK\n"));
2121 ts->resp = SAS_TASK_COMPLETE;
2122 ts->stat = SAS_INTERRUPTED;
2123 break;
2124 case IO_XFER_ERROR_PHY_NOT_READY:
2125 PM8001_IO_DBG(pm8001_ha,
2126 pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
2127 ts->resp = SAS_TASK_COMPLETE;
2128 ts->stat = SAS_OPEN_REJECT;
2129 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2130 break;
2131 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
2132 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
2133 "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
2134 ts->resp = SAS_TASK_COMPLETE;
2135 ts->stat = SAS_OPEN_REJECT;
2136 ts->open_rej_reason = SAS_OREJ_EPROTO;
2137 break;
2138 case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
2139 PM8001_IO_DBG(pm8001_ha,
2140 pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
2141 ts->resp = SAS_TASK_COMPLETE;
2142 ts->stat = SAS_OPEN_REJECT;
2143 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
2144 break;
2145 case IO_OPEN_CNX_ERROR_BREAK:
2146 PM8001_IO_DBG(pm8001_ha,
2147 pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
2148 ts->resp = SAS_TASK_COMPLETE;
2149 ts->stat = SAS_OPEN_REJECT;
2150 ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
2151 break;
2152 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
2153 case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
2154 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
2155 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
2156 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
2157 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
2158 PM8001_FAIL_DBG(pm8001_ha,
2159 pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
2160 ts->resp = SAS_TASK_UNDELIVERED;
2161 ts->stat = SAS_DEV_NO_RESPONSE;
2162 if (!t->uldd_task) {
2163 pm8001_handle_event(pm8001_ha,
2164 pm8001_dev,
2165 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
2166 ts->resp = SAS_TASK_COMPLETE;
2167 ts->stat = SAS_QUEUE_FULL;
2168 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2169 mb();/*ditto*/
2170 spin_unlock_irq(&pm8001_ha->lock);
2171 t->task_done(t);
2172 spin_lock_irq(&pm8001_ha->lock);
2173 return;
2174 }
2175 break;
2176 case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
2177 PM8001_IO_DBG(pm8001_ha,
2178 pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
2179 ts->resp = SAS_TASK_UNDELIVERED;
2180 ts->stat = SAS_OPEN_REJECT;
2181 ts->open_rej_reason = SAS_OREJ_BAD_DEST;
2182 break;
2183 case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
2184 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
2185 "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
2186 ts->resp = SAS_TASK_COMPLETE;
2187 ts->stat = SAS_OPEN_REJECT;
2188 ts->open_rej_reason = SAS_OREJ_CONN_RATE;
2189 break;
2190 case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
2191 PM8001_IO_DBG(pm8001_ha,
2192 pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
2193 ts->resp = SAS_TASK_COMPLETE;
2194 ts->stat = SAS_OPEN_REJECT;
2195 ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
2196 break;
2197 case IO_XFER_ERROR_NAK_RECEIVED:
2198 PM8001_IO_DBG(pm8001_ha,
2199 pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
2200 ts->resp = SAS_TASK_COMPLETE;
2201 ts->stat = SAS_NAK_R_ERR;
2202 break;
2203 case IO_XFER_ERROR_PEER_ABORTED:
2204 PM8001_IO_DBG(pm8001_ha,
2205 pm8001_printk("IO_XFER_ERROR_PEER_ABORTED\n"));
2206 ts->resp = SAS_TASK_COMPLETE;
2207 ts->stat = SAS_NAK_R_ERR;
2208 break;
2209 case IO_XFER_ERROR_REJECTED_NCQ_MODE:
2210 PM8001_IO_DBG(pm8001_ha,
2211 pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n"));
2212 ts->resp = SAS_TASK_COMPLETE;
2213 ts->stat = SAS_DATA_UNDERRUN;
2214 break;
2215 case IO_XFER_OPEN_RETRY_TIMEOUT:
2216 PM8001_IO_DBG(pm8001_ha,
2217 pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
2218 ts->resp = SAS_TASK_COMPLETE;
2219 ts->stat = SAS_OPEN_TO;
2220 break;
2221 case IO_XFER_ERROR_UNEXPECTED_PHASE:
2222 PM8001_IO_DBG(pm8001_ha,
2223 pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
2224 ts->resp = SAS_TASK_COMPLETE;
2225 ts->stat = SAS_OPEN_TO;
2226 break;
2227 case IO_XFER_ERROR_XFER_RDY_OVERRUN:
2228 PM8001_IO_DBG(pm8001_ha,
2229 pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n"));
2230 ts->resp = SAS_TASK_COMPLETE;
2231 ts->stat = SAS_OPEN_TO;
2232 break;
2233 case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
2234 PM8001_IO_DBG(pm8001_ha,
2235 pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"));
2236 ts->resp = SAS_TASK_COMPLETE;
2237 ts->stat = SAS_OPEN_TO;
2238 break;
2239 case IO_XFER_ERROR_OFFSET_MISMATCH:
2240 PM8001_IO_DBG(pm8001_ha,
2241 pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
2242 ts->resp = SAS_TASK_COMPLETE;
2243 ts->stat = SAS_OPEN_TO;
2244 break;
2245 case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
2246 PM8001_IO_DBG(pm8001_ha,
2247 pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"));
2248 ts->resp = SAS_TASK_COMPLETE;
2249 ts->stat = SAS_OPEN_TO;
2250 break;
2251 case IO_XFER_CMD_FRAME_ISSUED:
2252 PM8001_IO_DBG(pm8001_ha,
2253 pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n"));
2254 break;
2255 case IO_XFER_PIO_SETUP_ERROR:
2256 PM8001_IO_DBG(pm8001_ha,
2257 pm8001_printk("IO_XFER_PIO_SETUP_ERROR\n"));
2258 ts->resp = SAS_TASK_COMPLETE;
2259 ts->stat = SAS_OPEN_TO;
2260 break;
2261 case IO_XFER_ERROR_INTERNAL_CRC_ERROR:
2262 PM8001_FAIL_DBG(pm8001_ha,
2263 pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n"));
2264 /* TBC: used default set values */
2265 ts->resp = SAS_TASK_COMPLETE;
2266 ts->stat = SAS_OPEN_TO;
2267 break;
2268 case IO_XFER_DMA_ACTIVATE_TIMEOUT:
2269 PM8001_FAIL_DBG(pm8001_ha,
2270 pm8001_printk("IO_XFR_DMA_ACTIVATE_TIMEOUT\n"));
2271 /* TBC: used default set values */
2272 ts->resp = SAS_TASK_COMPLETE;
2273 ts->stat = SAS_OPEN_TO;
2274 break;
2275 default:
2276 PM8001_IO_DBG(pm8001_ha,
2277 pm8001_printk("Unknown status 0x%x\n", event));
2278 /* not allowed case. Therefore, return failed status */
2279 ts->resp = SAS_TASK_COMPLETE;
2280 ts->stat = SAS_OPEN_TO;
2281 break;
2282 }
2283 spin_lock_irqsave(&t->task_state_lock, flags);
2284 t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
2285 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
2286 t->task_state_flags |= SAS_TASK_STATE_DONE;
2287 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
2288 spin_unlock_irqrestore(&t->task_state_lock, flags);
2289 PM8001_FAIL_DBG(pm8001_ha,
2290 pm8001_printk("task 0x%p done with io_status 0x%x"
2291 " resp 0x%x stat 0x%x but aborted by upper layer!\n",
2292 t, event, ts->resp, ts->stat));
2293 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2294 } else if (t->uldd_task) {
2295 spin_unlock_irqrestore(&t->task_state_lock, flags);
2296 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2297 mb();/* ditto */
2298 spin_unlock_irq(&pm8001_ha->lock);
2299 t->task_done(t);
2300 spin_lock_irq(&pm8001_ha->lock);
2301 } else if (!t->uldd_task) {
2302 spin_unlock_irqrestore(&t->task_state_lock, flags);
2303 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2304 mb();/*ditto*/
2305 spin_unlock_irq(&pm8001_ha->lock);
2306 t->task_done(t);
2307 spin_lock_irq(&pm8001_ha->lock);
2308 }
2309}
2310
2311/*See the comments for mpi_ssp_completion */
2312static void
2313mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2314{
2315 u32 param, i;
2316 struct sas_task *t;
2317 struct pm8001_ccb_info *ccb;
2318 unsigned long flags;
2319 u32 status;
2320 u32 tag;
2321 struct smp_completion_resp *psmpPayload;
2322 struct task_status_struct *ts;
2323 struct pm8001_device *pm8001_dev;
2324 char *pdma_respaddr = NULL;
2325
2326 psmpPayload = (struct smp_completion_resp *)(piomb + 4);
2327 status = le32_to_cpu(psmpPayload->status);
2328 tag = le32_to_cpu(psmpPayload->tag);
2329
2330 ccb = &pm8001_ha->ccb_info[tag];
2331 param = le32_to_cpu(psmpPayload->param);
2332 t = ccb->task;
2333 ts = &t->task_status;
2334 pm8001_dev = ccb->device;
2335 if (status)
2336 PM8001_FAIL_DBG(pm8001_ha,
2337 pm8001_printk("smp IO status 0x%x\n", status));
2338 if (unlikely(!t || !t->lldd_task || !t->dev))
2339 return;
2340
2341 switch (status) {
2342
2343 case IO_SUCCESS:
2344 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
2345 ts->resp = SAS_TASK_COMPLETE;
2346 ts->stat = SAM_STAT_GOOD;
2347 if (pm8001_dev)
2348 pm8001_dev->running_req--;
2349 if (pm8001_ha->smp_exp_mode == SMP_DIRECT) {
2350 PM8001_IO_DBG(pm8001_ha,
2351 pm8001_printk("DIRECT RESPONSE Length:%d\n",
2352 param));
2353 pdma_respaddr = (char *)(phys_to_virt(cpu_to_le64
2354 ((u64)sg_dma_address
2355 (&t->smp_task.smp_resp))));
2356 for (i = 0; i < param; i++) {
2357 *(pdma_respaddr+i) = psmpPayload->_r_a[i];
2358 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
2359 "SMP Byte%d DMA data 0x%x psmp 0x%x\n",
2360 i, *(pdma_respaddr+i),
2361 psmpPayload->_r_a[i]));
2362 }
2363 }
2364 break;
2365 case IO_ABORTED:
2366 PM8001_IO_DBG(pm8001_ha,
2367 pm8001_printk("IO_ABORTED IOMB\n"));
2368 ts->resp = SAS_TASK_COMPLETE;
2369 ts->stat = SAS_ABORTED_TASK;
2370 if (pm8001_dev)
2371 pm8001_dev->running_req--;
2372 break;
2373 case IO_OVERFLOW:
2374 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
2375 ts->resp = SAS_TASK_COMPLETE;
2376 ts->stat = SAS_DATA_OVERRUN;
2377 ts->residual = 0;
2378 if (pm8001_dev)
2379 pm8001_dev->running_req--;
2380 break;
2381 case IO_NO_DEVICE:
2382 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_NO_DEVICE\n"));
2383 ts->resp = SAS_TASK_COMPLETE;
2384 ts->stat = SAS_PHY_DOWN;
2385 break;
2386 case IO_ERROR_HW_TIMEOUT:
2387 PM8001_IO_DBG(pm8001_ha,
2388 pm8001_printk("IO_ERROR_HW_TIMEOUT\n"));
2389 ts->resp = SAS_TASK_COMPLETE;
2390 ts->stat = SAM_STAT_BUSY;
2391 break;
2392 case IO_XFER_ERROR_BREAK:
2393 PM8001_IO_DBG(pm8001_ha,
2394 pm8001_printk("IO_XFER_ERROR_BREAK\n"));
2395 ts->resp = SAS_TASK_COMPLETE;
2396 ts->stat = SAM_STAT_BUSY;
2397 break;
2398 case IO_XFER_ERROR_PHY_NOT_READY:
2399 PM8001_IO_DBG(pm8001_ha,
2400 pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
2401 ts->resp = SAS_TASK_COMPLETE;
2402 ts->stat = SAM_STAT_BUSY;
2403 break;
2404 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
2405 PM8001_IO_DBG(pm8001_ha,
2406 pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
2407 ts->resp = SAS_TASK_COMPLETE;
2408 ts->stat = SAS_OPEN_REJECT;
2409 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
2410 break;
2411 case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
2412 PM8001_IO_DBG(pm8001_ha,
2413 pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
2414 ts->resp = SAS_TASK_COMPLETE;
2415 ts->stat = SAS_OPEN_REJECT;
2416 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
2417 break;
2418 case IO_OPEN_CNX_ERROR_BREAK:
2419 PM8001_IO_DBG(pm8001_ha,
2420 pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
2421 ts->resp = SAS_TASK_COMPLETE;
2422 ts->stat = SAS_OPEN_REJECT;
2423 ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
2424 break;
2425 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
2426 case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
2427 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
2428 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
2429 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
2430 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
2431 PM8001_IO_DBG(pm8001_ha,
2432 pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
2433 ts->resp = SAS_TASK_COMPLETE;
2434 ts->stat = SAS_OPEN_REJECT;
2435 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
2436 pm8001_handle_event(pm8001_ha,
2437 pm8001_dev,
2438 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
2439 break;
2440 case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
2441 PM8001_IO_DBG(pm8001_ha,
2442 pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
2443 ts->resp = SAS_TASK_COMPLETE;
2444 ts->stat = SAS_OPEN_REJECT;
2445 ts->open_rej_reason = SAS_OREJ_BAD_DEST;
2446 break;
2447 case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
2448 PM8001_IO_DBG(pm8001_ha, pm8001_printk(\
2449 "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
2450 ts->resp = SAS_TASK_COMPLETE;
2451 ts->stat = SAS_OPEN_REJECT;
2452 ts->open_rej_reason = SAS_OREJ_CONN_RATE;
2453 break;
2454 case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
2455 PM8001_IO_DBG(pm8001_ha,
2456 pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
2457 ts->resp = SAS_TASK_COMPLETE;
2458 ts->stat = SAS_OPEN_REJECT;
2459 ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
2460 break;
2461 case IO_XFER_ERROR_RX_FRAME:
2462 PM8001_IO_DBG(pm8001_ha,
2463 pm8001_printk("IO_XFER_ERROR_RX_FRAME\n"));
2464 ts->resp = SAS_TASK_COMPLETE;
2465 ts->stat = SAS_DEV_NO_RESPONSE;
2466 break;
2467 case IO_XFER_OPEN_RETRY_TIMEOUT:
2468 PM8001_IO_DBG(pm8001_ha,
2469 pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
2470 ts->resp = SAS_TASK_COMPLETE;
2471 ts->stat = SAS_OPEN_REJECT;
2472 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2473 break;
2474 case IO_ERROR_INTERNAL_SMP_RESOURCE:
2475 PM8001_IO_DBG(pm8001_ha,
2476 pm8001_printk("IO_ERROR_INTERNAL_SMP_RESOURCE\n"));
2477 ts->resp = SAS_TASK_COMPLETE;
2478 ts->stat = SAS_QUEUE_FULL;
2479 break;
2480 case IO_PORT_IN_RESET:
2481 PM8001_IO_DBG(pm8001_ha,
2482 pm8001_printk("IO_PORT_IN_RESET\n"));
2483 ts->resp = SAS_TASK_COMPLETE;
2484 ts->stat = SAS_OPEN_REJECT;
2485 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2486 break;
2487 case IO_DS_NON_OPERATIONAL:
2488 PM8001_IO_DBG(pm8001_ha,
2489 pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
2490 ts->resp = SAS_TASK_COMPLETE;
2491 ts->stat = SAS_DEV_NO_RESPONSE;
2492 break;
2493 case IO_DS_IN_RECOVERY:
2494 PM8001_IO_DBG(pm8001_ha,
2495 pm8001_printk("IO_DS_IN_RECOVERY\n"));
2496 ts->resp = SAS_TASK_COMPLETE;
2497 ts->stat = SAS_OPEN_REJECT;
2498 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2499 break;
2500 case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
2501 PM8001_IO_DBG(pm8001_ha,
2502 pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
2503 ts->resp = SAS_TASK_COMPLETE;
2504 ts->stat = SAS_OPEN_REJECT;
2505 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2506 break;
2507 default:
2508 PM8001_IO_DBG(pm8001_ha,
2509 pm8001_printk("Unknown status 0x%x\n", status));
2510 ts->resp = SAS_TASK_COMPLETE;
2511 ts->stat = SAS_DEV_NO_RESPONSE;
2512 /* not allowed case. Therefore, return failed status */
2513 break;
2514 }
2515 spin_lock_irqsave(&t->task_state_lock, flags);
2516 t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
2517 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
2518 t->task_state_flags |= SAS_TASK_STATE_DONE;
2519 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
2520 spin_unlock_irqrestore(&t->task_state_lock, flags);
2521 PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
2522 "task 0x%p done with io_status 0x%x resp 0x%x"
2523 "stat 0x%x but aborted by upper layer!\n",
2524 t, status, ts->resp, ts->stat));
2525 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2526 } else {
2527 spin_unlock_irqrestore(&t->task_state_lock, flags);
2528 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2529 mb();/* in order to force CPU ordering */
2530 t->task_done(t);
2531 }
2532}
2533
2534/**
2535 * pm80xx_hw_event_ack_req- For PM8001,some events need to acknowage to FW.
2536 * @pm8001_ha: our hba card information
2537 * @Qnum: the outbound queue message number.
2538 * @SEA: source of event to ack
2539 * @port_id: port id.
2540 * @phyId: phy id.
2541 * @param0: parameter 0.
2542 * @param1: parameter 1.
2543 */
2544static void pm80xx_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
2545 u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0, u32 param1)
2546{
2547 struct hw_event_ack_req payload;
2548 u32 opc = OPC_INB_SAS_HW_EVENT_ACK;
2549
2550 struct inbound_queue_table *circularQ;
2551
2552 memset((u8 *)&payload, 0, sizeof(payload));
2553 circularQ = &pm8001_ha->inbnd_q_tbl[Qnum];
2554 payload.tag = cpu_to_le32(1);
2555 payload.phyid_sea_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) |
2556 ((phyId & 0xFF) << 24) | (port_id & 0xFF));
2557 payload.param0 = cpu_to_le32(param0);
2558 payload.param1 = cpu_to_le32(param1);
2559 pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
2560}
2561
2562static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
2563 u32 phyId, u32 phy_op);
2564
2565/**
2566 * hw_event_sas_phy_up -FW tells me a SAS phy up event.
2567 * @pm8001_ha: our hba card information
2568 * @piomb: IO message buffer
2569 */
2570static void
2571hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
2572{
2573 struct hw_event_resp *pPayload =
2574 (struct hw_event_resp *)(piomb + 4);
2575 u32 lr_status_evt_portid =
2576 le32_to_cpu(pPayload->lr_status_evt_portid);
2577 u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
2578
2579 u8 link_rate =
2580 (u8)((lr_status_evt_portid & 0xF0000000) >> 28);
2581 u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
2582 u8 phy_id =
2583 (u8)((phyid_npip_portstate & 0xFF0000) >> 16);
2584 u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
2585
2586 struct pm8001_port *port = &pm8001_ha->port[port_id];
2587 struct sas_ha_struct *sas_ha = pm8001_ha->sas;
2588 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
2589 unsigned long flags;
2590 u8 deviceType = pPayload->sas_identify.dev_type;
2591 port->port_state = portstate;
2592 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
2593 "portid:%d; phyid:%d; linkrate:%d; "
2594 "portstate:%x; devicetype:%x\n",
2595 port_id, phy_id, link_rate, portstate, deviceType));
2596
2597 switch (deviceType) {
2598 case SAS_PHY_UNUSED:
2599 PM8001_MSG_DBG(pm8001_ha,
2600 pm8001_printk("device type no device.\n"));
2601 break;
2602 case SAS_END_DEVICE:
2603 PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n"));
2604 pm80xx_chip_phy_ctl_req(pm8001_ha, phy_id,
2605 PHY_NOTIFY_ENABLE_SPINUP);
2606 port->port_attached = 1;
2607 pm8001_get_lrate_mode(phy, link_rate);
2608 break;
2609 case SAS_EDGE_EXPANDER_DEVICE:
2610 PM8001_MSG_DBG(pm8001_ha,
2611 pm8001_printk("expander device.\n"));
2612 port->port_attached = 1;
2613 pm8001_get_lrate_mode(phy, link_rate);
2614 break;
2615 case SAS_FANOUT_EXPANDER_DEVICE:
2616 PM8001_MSG_DBG(pm8001_ha,
2617 pm8001_printk("fanout expander device.\n"));
2618 port->port_attached = 1;
2619 pm8001_get_lrate_mode(phy, link_rate);
2620 break;
2621 default:
2622 PM8001_MSG_DBG(pm8001_ha,
2623 pm8001_printk("unknown device type(%x)\n", deviceType));
2624 break;
2625 }
2626 phy->phy_type |= PORT_TYPE_SAS;
2627 phy->identify.device_type = deviceType;
2628 phy->phy_attached = 1;
2629 if (phy->identify.device_type == SAS_END_DEVICE)
2630 phy->identify.target_port_protocols = SAS_PROTOCOL_SSP;
2631 else if (phy->identify.device_type != SAS_PHY_UNUSED)
2632 phy->identify.target_port_protocols = SAS_PROTOCOL_SMP;
2633 phy->sas_phy.oob_mode = SAS_OOB_MODE;
2634 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
2635 spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
2636 memcpy(phy->frame_rcvd, &pPayload->sas_identify,
2637 sizeof(struct sas_identify_frame)-4);
2638 phy->frame_rcvd_size = sizeof(struct sas_identify_frame) - 4;
2639 pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
2640 spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
2641 if (pm8001_ha->flags == PM8001F_RUN_TIME)
2642 mdelay(200);/*delay a moment to wait disk to spinup*/
2643 pm8001_bytes_dmaed(pm8001_ha, phy_id);
2644}
2645
2646/**
2647 * hw_event_sata_phy_up -FW tells me a SATA phy up event.
2648 * @pm8001_ha: our hba card information
2649 * @piomb: IO message buffer
2650 */
2651static void
2652hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
2653{
2654 struct hw_event_resp *pPayload =
2655 (struct hw_event_resp *)(piomb + 4);
2656 u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
2657 u32 lr_status_evt_portid =
2658 le32_to_cpu(pPayload->lr_status_evt_portid);
2659 u8 link_rate =
2660 (u8)((lr_status_evt_portid & 0xF0000000) >> 28);
2661 u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
2662 u8 phy_id =
2663 (u8)((phyid_npip_portstate & 0xFF0000) >> 16);
2664
2665 u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
2666
2667 struct pm8001_port *port = &pm8001_ha->port[port_id];
2668 struct sas_ha_struct *sas_ha = pm8001_ha->sas;
2669 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
2670 unsigned long flags;
2671 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
2672 "port id %d, phy id %d link_rate %d portstate 0x%x\n",
2673 port_id, phy_id, link_rate, portstate));
2674
2675 port->port_state = portstate;
2676 port->port_attached = 1;
2677 pm8001_get_lrate_mode(phy, link_rate);
2678 phy->phy_type |= PORT_TYPE_SATA;
2679 phy->phy_attached = 1;
2680 phy->sas_phy.oob_mode = SATA_OOB_MODE;
2681 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
2682 spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
2683 memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4),
2684 sizeof(struct dev_to_host_fis));
2685 phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
2686 phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
2687 phy->identify.device_type = SAS_SATA_DEV;
2688 pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
2689 spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
2690 pm8001_bytes_dmaed(pm8001_ha, phy_id);
2691}
2692
2693/**
2694 * hw_event_phy_down -we should notify the libsas the phy is down.
2695 * @pm8001_ha: our hba card information
2696 * @piomb: IO message buffer
2697 */
2698static void
2699hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
2700{
2701 struct hw_event_resp *pPayload =
2702 (struct hw_event_resp *)(piomb + 4);
2703
2704 u32 lr_status_evt_portid =
2705 le32_to_cpu(pPayload->lr_status_evt_portid);
2706 u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
2707 u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
2708 u8 phy_id =
2709 (u8)((phyid_npip_portstate & 0xFF0000) >> 16);
2710 u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
2711
2712 struct pm8001_port *port = &pm8001_ha->port[port_id];
2713 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
2714 port->port_state = portstate;
2715 phy->phy_type = 0;
2716 phy->identify.device_type = 0;
2717 phy->phy_attached = 0;
2718 memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE);
2719 switch (portstate) {
2720 case PORT_VALID:
2721 break;
2722 case PORT_INVALID:
2723 PM8001_MSG_DBG(pm8001_ha,
2724 pm8001_printk(" PortInvalid portID %d\n", port_id));
2725 PM8001_MSG_DBG(pm8001_ha,
2726 pm8001_printk(" Last phy Down and port invalid\n"));
2727 port->port_attached = 0;
2728 pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
2729 port_id, phy_id, 0, 0);
2730 break;
2731 case PORT_IN_RESET:
2732 PM8001_MSG_DBG(pm8001_ha,
2733 pm8001_printk(" Port In Reset portID %d\n", port_id));
2734 break;
2735 case PORT_NOT_ESTABLISHED:
2736 PM8001_MSG_DBG(pm8001_ha,
2737 pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n"));
2738 port->port_attached = 0;
2739 break;
2740 case PORT_LOSTCOMM:
2741 PM8001_MSG_DBG(pm8001_ha,
2742 pm8001_printk(" phy Down and PORT_LOSTCOMM\n"));
2743 PM8001_MSG_DBG(pm8001_ha,
2744 pm8001_printk(" Last phy Down and port invalid\n"));
2745 port->port_attached = 0;
2746 pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
2747 port_id, phy_id, 0, 0);
2748 break;
2749 default:
2750 port->port_attached = 0;
2751 PM8001_MSG_DBG(pm8001_ha,
2752 pm8001_printk(" phy Down and(default) = 0x%x\n",
2753 portstate));
2754 break;
2755
2756 }
2757}
2758
2759static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
2760{
2761 struct phy_start_resp *pPayload =
2762 (struct phy_start_resp *)(piomb + 4);
2763 u32 status =
2764 le32_to_cpu(pPayload->status);
2765 u32 phy_id =
2766 le32_to_cpu(pPayload->phyid);
2767 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
2768
2769 PM8001_INIT_DBG(pm8001_ha,
2770 pm8001_printk("phy start resp status:0x%x, phyid:0x%x\n",
2771 status, phy_id));
2772 if (status == 0) {
2773 phy->phy_state = 1;
2774 if (pm8001_ha->flags == PM8001F_RUN_TIME)
2775 complete(phy->enable_completion);
2776 }
2777 return 0;
2778
2779}
2780
2781/**
2782 * mpi_thermal_hw_event -The hw event has come.
2783 * @pm8001_ha: our hba card information
2784 * @piomb: IO message buffer
2785 */
2786static int mpi_thermal_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
2787{
2788 struct thermal_hw_event *pPayload =
2789 (struct thermal_hw_event *)(piomb + 4);
2790
2791 u32 thermal_event = le32_to_cpu(pPayload->thermal_event);
2792 u32 rht_lht = le32_to_cpu(pPayload->rht_lht);
2793
2794 if (thermal_event & 0x40) {
2795 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
2796 "Thermal Event: Local high temperature violated!\n"));
2797 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
2798 "Thermal Event: Measured local high temperature %d\n",
2799 ((rht_lht & 0xFF00) >> 8)));
2800 }
2801 if (thermal_event & 0x10) {
2802 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
2803 "Thermal Event: Remote high temperature violated!\n"));
2804 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
2805 "Thermal Event: Measured remote high temperature %d\n",
2806 ((rht_lht & 0xFF000000) >> 24)));
2807 }
2808 return 0;
2809}
2810
2811/**
2812 * mpi_hw_event -The hw event has come.
2813 * @pm8001_ha: our hba card information
2814 * @piomb: IO message buffer
2815 */
2816static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
2817{
2818 unsigned long flags;
2819 struct hw_event_resp *pPayload =
2820 (struct hw_event_resp *)(piomb + 4);
2821 u32 lr_status_evt_portid =
2822 le32_to_cpu(pPayload->lr_status_evt_portid);
2823 u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
2824 u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
2825 u8 phy_id =
2826 (u8)((phyid_npip_portstate & 0xFF0000) >> 16);
2827 u16 eventType =
2828 (u16)((lr_status_evt_portid & 0x00FFFF00) >> 8);
2829 u8 status =
2830 (u8)((lr_status_evt_portid & 0x0F000000) >> 24);
2831
2832 struct sas_ha_struct *sas_ha = pm8001_ha->sas;
2833 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
2834 struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
2835 PM8001_MSG_DBG(pm8001_ha,
2836 pm8001_printk("portid:%d phyid:%d event:0x%x status:0x%x\n",
2837 port_id, phy_id, eventType, status));
2838
2839 switch (eventType) {
2840
2841 case HW_EVENT_SAS_PHY_UP:
2842 PM8001_MSG_DBG(pm8001_ha,
2843 pm8001_printk("HW_EVENT_PHY_START_STATUS\n"));
2844 hw_event_sas_phy_up(pm8001_ha, piomb);
2845 break;
2846 case HW_EVENT_SATA_PHY_UP:
2847 PM8001_MSG_DBG(pm8001_ha,
2848 pm8001_printk("HW_EVENT_SATA_PHY_UP\n"));
2849 hw_event_sata_phy_up(pm8001_ha, piomb);
2850 break;
2851 case HW_EVENT_SATA_SPINUP_HOLD:
2852 PM8001_MSG_DBG(pm8001_ha,
2853 pm8001_printk("HW_EVENT_SATA_SPINUP_HOLD\n"));
2854 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
2855 break;
2856 case HW_EVENT_PHY_DOWN:
2857 PM8001_MSG_DBG(pm8001_ha,
2858 pm8001_printk("HW_EVENT_PHY_DOWN\n"));
2859 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
2860 phy->phy_attached = 0;
2861 phy->phy_state = 0;
2862 hw_event_phy_down(pm8001_ha, piomb);
2863 break;
2864 case HW_EVENT_PORT_INVALID:
2865 PM8001_MSG_DBG(pm8001_ha,
2866 pm8001_printk("HW_EVENT_PORT_INVALID\n"));
2867 sas_phy_disconnected(sas_phy);
2868 phy->phy_attached = 0;
2869 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
2870 break;
2871 /* the broadcast change primitive received, tell the LIBSAS this event
2872 to revalidate the sas domain*/
2873 case HW_EVENT_BROADCAST_CHANGE:
2874 PM8001_MSG_DBG(pm8001_ha,
2875 pm8001_printk("HW_EVENT_BROADCAST_CHANGE\n"));
2876 pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_BROADCAST_CHANGE,
2877 port_id, phy_id, 1, 0);
2878 spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
2879 sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE;
2880 spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
2881 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
2882 break;
2883 case HW_EVENT_PHY_ERROR:
2884 PM8001_MSG_DBG(pm8001_ha,
2885 pm8001_printk("HW_EVENT_PHY_ERROR\n"));
2886 sas_phy_disconnected(&phy->sas_phy);
2887 phy->phy_attached = 0;
2888 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
2889 break;
2890 case HW_EVENT_BROADCAST_EXP:
2891 PM8001_MSG_DBG(pm8001_ha,
2892 pm8001_printk("HW_EVENT_BROADCAST_EXP\n"));
2893 spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
2894 sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP;
2895 spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
2896 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
2897 break;
2898 case HW_EVENT_LINK_ERR_INVALID_DWORD:
2899 PM8001_MSG_DBG(pm8001_ha,
2900 pm8001_printk("HW_EVENT_LINK_ERR_INVALID_DWORD\n"));
2901 pm80xx_hw_event_ack_req(pm8001_ha, 0,
2902 HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0);
2903 sas_phy_disconnected(sas_phy);
2904 phy->phy_attached = 0;
2905 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
2906 break;
2907 case HW_EVENT_LINK_ERR_DISPARITY_ERROR:
2908 PM8001_MSG_DBG(pm8001_ha,
2909 pm8001_printk("HW_EVENT_LINK_ERR_DISPARITY_ERROR\n"));
2910 pm80xx_hw_event_ack_req(pm8001_ha, 0,
2911 HW_EVENT_LINK_ERR_DISPARITY_ERROR,
2912 port_id, phy_id, 0, 0);
2913 sas_phy_disconnected(sas_phy);
2914 phy->phy_attached = 0;
2915 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
2916 break;
2917 case HW_EVENT_LINK_ERR_CODE_VIOLATION:
2918 PM8001_MSG_DBG(pm8001_ha,
2919 pm8001_printk("HW_EVENT_LINK_ERR_CODE_VIOLATION\n"));
2920 pm80xx_hw_event_ack_req(pm8001_ha, 0,
2921 HW_EVENT_LINK_ERR_CODE_VIOLATION,
2922 port_id, phy_id, 0, 0);
2923 sas_phy_disconnected(sas_phy);
2924 phy->phy_attached = 0;
2925 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
2926 break;
2927 case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH:
2928 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
2929 "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n"));
2930 pm80xx_hw_event_ack_req(pm8001_ha, 0,
2931 HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH,
2932 port_id, phy_id, 0, 0);
2933 sas_phy_disconnected(sas_phy);
2934 phy->phy_attached = 0;
2935 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
2936 break;
2937 case HW_EVENT_MALFUNCTION:
2938 PM8001_MSG_DBG(pm8001_ha,
2939 pm8001_printk("HW_EVENT_MALFUNCTION\n"));
2940 break;
2941 case HW_EVENT_BROADCAST_SES:
2942 PM8001_MSG_DBG(pm8001_ha,
2943 pm8001_printk("HW_EVENT_BROADCAST_SES\n"));
2944 spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
2945 sas_phy->sas_prim = HW_EVENT_BROADCAST_SES;
2946 spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
2947 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
2948 break;
2949 case HW_EVENT_INBOUND_CRC_ERROR:
2950 PM8001_MSG_DBG(pm8001_ha,
2951 pm8001_printk("HW_EVENT_INBOUND_CRC_ERROR\n"));
2952 pm80xx_hw_event_ack_req(pm8001_ha, 0,
2953 HW_EVENT_INBOUND_CRC_ERROR,
2954 port_id, phy_id, 0, 0);
2955 break;
2956 case HW_EVENT_HARD_RESET_RECEIVED:
2957 PM8001_MSG_DBG(pm8001_ha,
2958 pm8001_printk("HW_EVENT_HARD_RESET_RECEIVED\n"));
2959 sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET);
2960 break;
2961 case HW_EVENT_ID_FRAME_TIMEOUT:
2962 PM8001_MSG_DBG(pm8001_ha,
2963 pm8001_printk("HW_EVENT_ID_FRAME_TIMEOUT\n"));
2964 sas_phy_disconnected(sas_phy);
2965 phy->phy_attached = 0;
2966 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
2967 break;
2968 case HW_EVENT_LINK_ERR_PHY_RESET_FAILED:
2969 PM8001_MSG_DBG(pm8001_ha,
2970 pm8001_printk("HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n"));
2971 pm80xx_hw_event_ack_req(pm8001_ha, 0,
2972 HW_EVENT_LINK_ERR_PHY_RESET_FAILED,
2973 port_id, phy_id, 0, 0);
2974 sas_phy_disconnected(sas_phy);
2975 phy->phy_attached = 0;
2976 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
2977 break;
2978 case HW_EVENT_PORT_RESET_TIMER_TMO:
2979 PM8001_MSG_DBG(pm8001_ha,
2980 pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO\n"));
2981 sas_phy_disconnected(sas_phy);
2982 phy->phy_attached = 0;
2983 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
2984 break;
2985 case HW_EVENT_PORT_RECOVERY_TIMER_TMO:
2986 PM8001_MSG_DBG(pm8001_ha,
2987 pm8001_printk("HW_EVENT_PORT_RECOVERY_TIMER_TMO\n"));
2988 pm80xx_hw_event_ack_req(pm8001_ha, 0,
2989 HW_EVENT_PORT_RECOVERY_TIMER_TMO,
2990 port_id, phy_id, 0, 0);
2991 sas_phy_disconnected(sas_phy);
2992 phy->phy_attached = 0;
2993 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
2994 break;
2995 case HW_EVENT_PORT_RECOVER:
2996 PM8001_MSG_DBG(pm8001_ha,
2997 pm8001_printk("HW_EVENT_PORT_RECOVER\n"));
2998 break;
2999 case HW_EVENT_PORT_RESET_COMPLETE:
3000 PM8001_MSG_DBG(pm8001_ha,
3001 pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE\n"));
3002 break;
3003 case EVENT_BROADCAST_ASYNCH_EVENT:
3004 PM8001_MSG_DBG(pm8001_ha,
3005 pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n"));
3006 break;
3007 default:
3008 PM8001_MSG_DBG(pm8001_ha,
3009 pm8001_printk("Unknown event type 0x%x\n", eventType));
3010 break;
3011 }
3012 return 0;
3013}
3014
3015/**
3016 * mpi_phy_stop_resp - SPCv specific
3017 * @pm8001_ha: our hba card information
3018 * @piomb: IO message buffer
3019 */
3020static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3021{
3022 struct phy_stop_resp *pPayload =
3023 (struct phy_stop_resp *)(piomb + 4);
3024 u32 status =
3025 le32_to_cpu(pPayload->status);
3026 u32 phyid =
3027 le32_to_cpu(pPayload->phyid);
3028 struct pm8001_phy *phy = &pm8001_ha->phy[phyid];
3029 PM8001_MSG_DBG(pm8001_ha,
3030 pm8001_printk("phy:0x%x status:0x%x\n",
3031 phyid, status));
3032 if (status == 0)
3033 phy->phy_state = 0;
3034 return 0;
3035}
3036
3037/**
3038 * mpi_set_controller_config_resp - SPCv specific
3039 * @pm8001_ha: our hba card information
3040 * @piomb: IO message buffer
3041 */
3042static int mpi_set_controller_config_resp(struct pm8001_hba_info *pm8001_ha,
3043 void *piomb)
3044{
3045 struct set_ctrl_cfg_resp *pPayload =
3046 (struct set_ctrl_cfg_resp *)(piomb + 4);
3047 u32 status = le32_to_cpu(pPayload->status);
3048 u32 err_qlfr_pgcd = le32_to_cpu(pPayload->err_qlfr_pgcd);
3049
3050 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
3051 "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n",
3052 status, err_qlfr_pgcd));
3053
3054 return 0;
3055}
3056
3057/**
3058 * mpi_get_controller_config_resp - SPCv specific
3059 * @pm8001_ha: our hba card information
3060 * @piomb: IO message buffer
3061 */
3062static int mpi_get_controller_config_resp(struct pm8001_hba_info *pm8001_ha,
3063 void *piomb)
3064{
3065 PM8001_MSG_DBG(pm8001_ha,
3066 pm8001_printk(" pm80xx_addition_functionality\n"));
3067
3068 return 0;
3069}
3070
3071/**
3072 * mpi_get_phy_profile_resp - SPCv specific
3073 * @pm8001_ha: our hba card information
3074 * @piomb: IO message buffer
3075 */
3076static int mpi_get_phy_profile_resp(struct pm8001_hba_info *pm8001_ha,
3077 void *piomb)
3078{
3079 PM8001_MSG_DBG(pm8001_ha,
3080 pm8001_printk(" pm80xx_addition_functionality\n"));
3081
3082 return 0;
3083}
3084
3085/**
3086 * mpi_flash_op_ext_resp - SPCv specific
3087 * @pm8001_ha: our hba card information
3088 * @piomb: IO message buffer
3089 */
3090static int mpi_flash_op_ext_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3091{
3092 PM8001_MSG_DBG(pm8001_ha,
3093 pm8001_printk(" pm80xx_addition_functionality\n"));
3094
3095 return 0;
3096}
3097
3098/**
3099 * mpi_set_phy_profile_resp - SPCv specific
3100 * @pm8001_ha: our hba card information
3101 * @piomb: IO message buffer
3102 */
3103static int mpi_set_phy_profile_resp(struct pm8001_hba_info *pm8001_ha,
3104 void *piomb)
3105{
3106 PM8001_MSG_DBG(pm8001_ha,
3107 pm8001_printk(" pm80xx_addition_functionality\n"));
3108
3109 return 0;
3110}
3111
3112/**
3113 * mpi_kek_management_resp - SPCv specific
3114 * @pm8001_ha: our hba card information
3115 * @piomb: IO message buffer
3116 */
3117static int mpi_kek_management_resp(struct pm8001_hba_info *pm8001_ha,
3118 void *piomb)
3119{
3120 struct kek_mgmt_resp *pPayload = (struct kek_mgmt_resp *)(piomb + 4);
3121
3122 u32 status = le32_to_cpu(pPayload->status);
3123 u32 kidx_new_curr_ksop = le32_to_cpu(pPayload->kidx_new_curr_ksop);
3124 u32 err_qlfr = le32_to_cpu(pPayload->err_qlfr);
3125
3126 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
3127 "KEK MGMT RESP. Status 0x%x idx_ksop 0x%x err_qlfr 0x%x\n",
3128 status, kidx_new_curr_ksop, err_qlfr));
3129
3130 return 0;
3131}
3132
3133/**
3134 * mpi_dek_management_resp - SPCv specific
3135 * @pm8001_ha: our hba card information
3136 * @piomb: IO message buffer
3137 */
3138static int mpi_dek_management_resp(struct pm8001_hba_info *pm8001_ha,
3139 void *piomb)
3140{
3141 PM8001_MSG_DBG(pm8001_ha,
3142 pm8001_printk(" pm80xx_addition_functionality\n"));
3143
3144 return 0;
3145}
3146
3147/**
3148 * ssp_coalesced_comp_resp - SPCv specific
3149 * @pm8001_ha: our hba card information
3150 * @piomb: IO message buffer
3151 */
3152static int ssp_coalesced_comp_resp(struct pm8001_hba_info *pm8001_ha,
3153 void *piomb)
3154{
3155 PM8001_MSG_DBG(pm8001_ha,
3156 pm8001_printk(" pm80xx_addition_functionality\n"));
3157
3158 return 0;
3159}
3160
3161/**
3162 * process_one_iomb - process one outbound Queue memory block
3163 * @pm8001_ha: our hba card information
3164 * @piomb: IO message buffer
3165 */
3166static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
3167{
3168 __le32 pHeader = *(__le32 *)piomb;
3169 u32 opc = (u32)((le32_to_cpu(pHeader)) & 0xFFF);
3170
3171 switch (opc) {
3172 case OPC_OUB_ECHO:
3173 PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_ECHO\n"));
3174 break;
3175 case OPC_OUB_HW_EVENT:
3176 PM8001_MSG_DBG(pm8001_ha,
3177 pm8001_printk("OPC_OUB_HW_EVENT\n"));
3178 mpi_hw_event(pm8001_ha, piomb);
3179 break;
3180 case OPC_OUB_THERM_HW_EVENT:
3181 PM8001_MSG_DBG(pm8001_ha,
3182 pm8001_printk("OPC_OUB_THERMAL_EVENT\n"));
3183 mpi_thermal_hw_event(pm8001_ha, piomb);
3184 break;
3185 case OPC_OUB_SSP_COMP:
3186 PM8001_MSG_DBG(pm8001_ha,
3187 pm8001_printk("OPC_OUB_SSP_COMP\n"));
3188 mpi_ssp_completion(pm8001_ha, piomb);
3189 break;
3190 case OPC_OUB_SMP_COMP:
3191 PM8001_MSG_DBG(pm8001_ha,
3192 pm8001_printk("OPC_OUB_SMP_COMP\n"));
3193 mpi_smp_completion(pm8001_ha, piomb);
3194 break;
3195 case OPC_OUB_LOCAL_PHY_CNTRL:
3196 PM8001_MSG_DBG(pm8001_ha,
3197 pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n"));
3198 pm8001_mpi_local_phy_ctl(pm8001_ha, piomb);
3199 break;
3200 case OPC_OUB_DEV_REGIST:
3201 PM8001_MSG_DBG(pm8001_ha,
3202 pm8001_printk("OPC_OUB_DEV_REGIST\n"));
3203 pm8001_mpi_reg_resp(pm8001_ha, piomb);
3204 break;
3205 case OPC_OUB_DEREG_DEV:
3206 PM8001_MSG_DBG(pm8001_ha,
3207 pm8001_printk("unresgister the deviece\n"));
3208 pm8001_mpi_dereg_resp(pm8001_ha, piomb);
3209 break;
3210 case OPC_OUB_GET_DEV_HANDLE:
3211 PM8001_MSG_DBG(pm8001_ha,
3212 pm8001_printk("OPC_OUB_GET_DEV_HANDLE\n"));
3213 break;
3214 case OPC_OUB_SATA_COMP:
3215 PM8001_MSG_DBG(pm8001_ha,
3216 pm8001_printk("OPC_OUB_SATA_COMP\n"));
3217 mpi_sata_completion(pm8001_ha, piomb);
3218 break;
3219 case OPC_OUB_SATA_EVENT:
3220 PM8001_MSG_DBG(pm8001_ha,
3221 pm8001_printk("OPC_OUB_SATA_EVENT\n"));
3222 mpi_sata_event(pm8001_ha, piomb);
3223 break;
3224 case OPC_OUB_SSP_EVENT:
3225 PM8001_MSG_DBG(pm8001_ha,
3226 pm8001_printk("OPC_OUB_SSP_EVENT\n"));
3227 mpi_ssp_event(pm8001_ha, piomb);
3228 break;
3229 case OPC_OUB_DEV_HANDLE_ARRIV:
3230 PM8001_MSG_DBG(pm8001_ha,
3231 pm8001_printk("OPC_OUB_DEV_HANDLE_ARRIV\n"));
3232 /*This is for target*/
3233 break;
3234 case OPC_OUB_SSP_RECV_EVENT:
3235 PM8001_MSG_DBG(pm8001_ha,
3236 pm8001_printk("OPC_OUB_SSP_RECV_EVENT\n"));
3237 /*This is for target*/
3238 break;
3239 case OPC_OUB_FW_FLASH_UPDATE:
3240 PM8001_MSG_DBG(pm8001_ha,
3241 pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n"));
3242 pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb);
3243 break;
3244 case OPC_OUB_GPIO_RESPONSE:
3245 PM8001_MSG_DBG(pm8001_ha,
3246 pm8001_printk("OPC_OUB_GPIO_RESPONSE\n"));
3247 break;
3248 case OPC_OUB_GPIO_EVENT:
3249 PM8001_MSG_DBG(pm8001_ha,
3250 pm8001_printk("OPC_OUB_GPIO_EVENT\n"));
3251 break;
3252 case OPC_OUB_GENERAL_EVENT:
3253 PM8001_MSG_DBG(pm8001_ha,
3254 pm8001_printk("OPC_OUB_GENERAL_EVENT\n"));
3255 pm8001_mpi_general_event(pm8001_ha, piomb);
3256 break;
3257 case OPC_OUB_SSP_ABORT_RSP:
3258 PM8001_MSG_DBG(pm8001_ha,
3259 pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n"));
3260 pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
3261 break;
3262 case OPC_OUB_SATA_ABORT_RSP:
3263 PM8001_MSG_DBG(pm8001_ha,
3264 pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n"));
3265 pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
3266 break;
3267 case OPC_OUB_SAS_DIAG_MODE_START_END:
3268 PM8001_MSG_DBG(pm8001_ha,
3269 pm8001_printk("OPC_OUB_SAS_DIAG_MODE_START_END\n"));
3270 break;
3271 case OPC_OUB_SAS_DIAG_EXECUTE:
3272 PM8001_MSG_DBG(pm8001_ha,
3273 pm8001_printk("OPC_OUB_SAS_DIAG_EXECUTE\n"));
3274 break;
3275 case OPC_OUB_GET_TIME_STAMP:
3276 PM8001_MSG_DBG(pm8001_ha,
3277 pm8001_printk("OPC_OUB_GET_TIME_STAMP\n"));
3278 break;
3279 case OPC_OUB_SAS_HW_EVENT_ACK:
3280 PM8001_MSG_DBG(pm8001_ha,
3281 pm8001_printk("OPC_OUB_SAS_HW_EVENT_ACK\n"));
3282 break;
3283 case OPC_OUB_PORT_CONTROL:
3284 PM8001_MSG_DBG(pm8001_ha,
3285 pm8001_printk("OPC_OUB_PORT_CONTROL\n"));
3286 break;
3287 case OPC_OUB_SMP_ABORT_RSP:
3288 PM8001_MSG_DBG(pm8001_ha,
3289 pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n"));
3290 pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
3291 break;
3292 case OPC_OUB_GET_NVMD_DATA:
3293 PM8001_MSG_DBG(pm8001_ha,
3294 pm8001_printk("OPC_OUB_GET_NVMD_DATA\n"));
3295 pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb);
3296 break;
3297 case OPC_OUB_SET_NVMD_DATA:
3298 PM8001_MSG_DBG(pm8001_ha,
3299 pm8001_printk("OPC_OUB_SET_NVMD_DATA\n"));
3300 pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb);
3301 break;
3302 case OPC_OUB_DEVICE_HANDLE_REMOVAL:
3303 PM8001_MSG_DBG(pm8001_ha,
3304 pm8001_printk("OPC_OUB_DEVICE_HANDLE_REMOVAL\n"));
3305 break;
3306 case OPC_OUB_SET_DEVICE_STATE:
3307 PM8001_MSG_DBG(pm8001_ha,
3308 pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n"));
3309 pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb);
3310 break;
3311 case OPC_OUB_GET_DEVICE_STATE:
3312 PM8001_MSG_DBG(pm8001_ha,
3313 pm8001_printk("OPC_OUB_GET_DEVICE_STATE\n"));
3314 break;
3315 case OPC_OUB_SET_DEV_INFO:
3316 PM8001_MSG_DBG(pm8001_ha,
3317 pm8001_printk("OPC_OUB_SET_DEV_INFO\n"));
3318 break;
3319 /* spcv specifc commands */
3320 case OPC_OUB_PHY_START_RESP:
3321 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
3322 "OPC_OUB_PHY_START_RESP opcode:%x\n", opc));
3323 mpi_phy_start_resp(pm8001_ha, piomb);
3324 break;
3325 case OPC_OUB_PHY_STOP_RESP:
3326 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
3327 "OPC_OUB_PHY_STOP_RESP opcode:%x\n", opc));
3328 mpi_phy_stop_resp(pm8001_ha, piomb);
3329 break;
3330 case OPC_OUB_SET_CONTROLLER_CONFIG:
3331 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
3332 "OPC_OUB_SET_CONTROLLER_CONFIG opcode:%x\n", opc));
3333 mpi_set_controller_config_resp(pm8001_ha, piomb);
3334 break;
3335 case OPC_OUB_GET_CONTROLLER_CONFIG:
3336 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
3337 "OPC_OUB_GET_CONTROLLER_CONFIG opcode:%x\n", opc));
3338 mpi_get_controller_config_resp(pm8001_ha, piomb);
3339 break;
3340 case OPC_OUB_GET_PHY_PROFILE:
3341 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
3342 "OPC_OUB_GET_PHY_PROFILE opcode:%x\n", opc));
3343 mpi_get_phy_profile_resp(pm8001_ha, piomb);
3344 break;
3345 case OPC_OUB_FLASH_OP_EXT:
3346 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
3347 "OPC_OUB_FLASH_OP_EXT opcode:%x\n", opc));
3348 mpi_flash_op_ext_resp(pm8001_ha, piomb);
3349 break;
3350 case OPC_OUB_SET_PHY_PROFILE:
3351 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
3352 "OPC_OUB_SET_PHY_PROFILE opcode:%x\n", opc));
3353 mpi_set_phy_profile_resp(pm8001_ha, piomb);
3354 break;
3355 case OPC_OUB_KEK_MANAGEMENT_RESP:
3356 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
3357 "OPC_OUB_KEK_MANAGEMENT_RESP opcode:%x\n", opc));
3358 mpi_kek_management_resp(pm8001_ha, piomb);
3359 break;
3360 case OPC_OUB_DEK_MANAGEMENT_RESP:
3361 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
3362 "OPC_OUB_DEK_MANAGEMENT_RESP opcode:%x\n", opc));
3363 mpi_dek_management_resp(pm8001_ha, piomb);
3364 break;
3365 case OPC_OUB_SSP_COALESCED_COMP_RESP:
3366 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
3367 "OPC_OUB_SSP_COALESCED_COMP_RESP opcode:%x\n", opc));
3368 ssp_coalesced_comp_resp(pm8001_ha, piomb);
3369 break;
3370 default:
3371 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
3372 "Unknown outbound Queue IOMB OPC = 0x%x\n", opc));
3373 break;
3374 }
3375}
3376
3377static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
3378{
3379 struct outbound_queue_table *circularQ;
3380 void *pMsg1 = NULL;
3381 u8 uninitialized_var(bc);
3382 u32 ret = MPI_IO_STATUS_FAIL;
3383 unsigned long flags;
3384
3385 spin_lock_irqsave(&pm8001_ha->lock, flags);
3386 circularQ = &pm8001_ha->outbnd_q_tbl[vec];
3387 do {
3388 ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
3389 if (MPI_IO_STATUS_SUCCESS == ret) {
3390 /* process the outbound message */
3391 process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4));
3392 /* free the message from the outbound circular buffer */
3393 pm8001_mpi_msg_free_set(pm8001_ha, pMsg1,
3394 circularQ, bc);
3395 }
3396 if (MPI_IO_STATUS_BUSY == ret) {
3397 /* Update the producer index from SPC */
3398 circularQ->producer_index =
3399 cpu_to_le32(pm8001_read_32(circularQ->pi_virt));
3400 if (le32_to_cpu(circularQ->producer_index) ==
3401 circularQ->consumer_idx)
3402 /* OQ is empty */
3403 break;
3404 }
3405 } while (1);
3406 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
3407 return ret;
3408}
3409
3410/* PCI_DMA_... to our direction translation. */
3411static const u8 data_dir_flags[] = {
3412 [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,/* UNSPECIFIED */
3413 [PCI_DMA_TODEVICE] = DATA_DIR_OUT,/* OUTBOUND */
3414 [PCI_DMA_FROMDEVICE] = DATA_DIR_IN,/* INBOUND */
3415 [PCI_DMA_NONE] = DATA_DIR_NONE,/* NO TRANSFER */
3416};
3417
3418static void build_smp_cmd(u32 deviceID, __le32 hTag,
3419 struct smp_req *psmp_cmd, int mode, int length)
3420{
3421 psmp_cmd->tag = hTag;
3422 psmp_cmd->device_id = cpu_to_le32(deviceID);
3423 if (mode == SMP_DIRECT) {
3424 length = length - 4; /* subtract crc */
3425 psmp_cmd->len_ip_ir = cpu_to_le32(length << 16);
3426 } else {
3427 psmp_cmd->len_ip_ir = cpu_to_le32(1|(1 << 1));
3428 }
3429}
3430
3431/**
3432 * pm8001_chip_smp_req - send a SMP task to FW
3433 * @pm8001_ha: our hba card information.
3434 * @ccb: the ccb information this request used.
3435 */
3436static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
3437 struct pm8001_ccb_info *ccb)
3438{
3439 int elem, rc;
3440 struct sas_task *task = ccb->task;
3441 struct domain_device *dev = task->dev;
3442 struct pm8001_device *pm8001_dev = dev->lldd_dev;
3443 struct scatterlist *sg_req, *sg_resp;
3444 u32 req_len, resp_len;
3445 struct smp_req smp_cmd;
3446 u32 opc;
3447 struct inbound_queue_table *circularQ;
3448 char *preq_dma_addr = NULL;
3449 __le64 tmp_addr;
3450 u32 i, length;
3451
3452 memset(&smp_cmd, 0, sizeof(smp_cmd));
3453 /*
3454 * DMA-map SMP request, response buffers
3455 */
3456 sg_req = &task->smp_task.smp_req;
3457 elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, PCI_DMA_TODEVICE);
3458 if (!elem)
3459 return -ENOMEM;
3460 req_len = sg_dma_len(sg_req);
3461
3462 sg_resp = &task->smp_task.smp_resp;
3463 elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
3464 if (!elem) {
3465 rc = -ENOMEM;
3466 goto err_out;
3467 }
3468 resp_len = sg_dma_len(sg_resp);
3469 /* must be in dwords */
3470 if ((req_len & 0x3) || (resp_len & 0x3)) {
3471 rc = -EINVAL;
3472 goto err_out_2;
3473 }
3474
3475 opc = OPC_INB_SMP_REQUEST;
3476 circularQ = &pm8001_ha->inbnd_q_tbl[0];
3477 smp_cmd.tag = cpu_to_le32(ccb->ccb_tag);
3478
3479 length = sg_req->length;
3480 PM8001_IO_DBG(pm8001_ha,
3481 pm8001_printk("SMP Frame Length %d\n", sg_req->length));
3482 if (!(length - 8))
3483 pm8001_ha->smp_exp_mode = SMP_DIRECT;
3484 else
3485 pm8001_ha->smp_exp_mode = SMP_INDIRECT;
3486
3487 /* DIRECT MODE support only in spcv/ve */
3488 pm8001_ha->smp_exp_mode = SMP_DIRECT;
3489
3490 tmp_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
3491 preq_dma_addr = (char *)phys_to_virt(tmp_addr);
3492
3493 /* INDIRECT MODE command settings. Use DMA */
3494 if (pm8001_ha->smp_exp_mode == SMP_INDIRECT) {
3495 PM8001_IO_DBG(pm8001_ha,
3496 pm8001_printk("SMP REQUEST INDIRECT MODE\n"));
3497 /* for SPCv indirect mode. Place the top 4 bytes of
3498 * SMP Request header here. */
3499 for (i = 0; i < 4; i++)
3500 smp_cmd.smp_req16[i] = *(preq_dma_addr + i);
3501 /* exclude top 4 bytes for SMP req header */
3502 smp_cmd.long_smp_req.long_req_addr =
3503 cpu_to_le64((u64)sg_dma_address
3504 (&task->smp_task.smp_req) - 4);
3505 /* exclude 4 bytes for SMP req header and CRC */
3506 smp_cmd.long_smp_req.long_req_size =
3507 cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-8);
3508 smp_cmd.long_smp_req.long_resp_addr =
3509 cpu_to_le64((u64)sg_dma_address
3510 (&task->smp_task.smp_resp));
3511 smp_cmd.long_smp_req.long_resp_size =
3512 cpu_to_le32((u32)sg_dma_len
3513 (&task->smp_task.smp_resp)-4);
3514 } else { /* DIRECT MODE */
3515 smp_cmd.long_smp_req.long_req_addr =
3516 cpu_to_le64((u64)sg_dma_address
3517 (&task->smp_task.smp_req));
3518 smp_cmd.long_smp_req.long_req_size =
3519 cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4);
3520 smp_cmd.long_smp_req.long_resp_addr =
3521 cpu_to_le64((u64)sg_dma_address
3522 (&task->smp_task.smp_resp));
3523 smp_cmd.long_smp_req.long_resp_size =
3524 cpu_to_le32
3525 ((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
3526 }
3527 if (pm8001_ha->smp_exp_mode == SMP_DIRECT) {
3528 PM8001_IO_DBG(pm8001_ha,
3529 pm8001_printk("SMP REQUEST DIRECT MODE\n"));
3530 for (i = 0; i < length; i++)
3531 if (i < 16) {
3532 smp_cmd.smp_req16[i] = *(preq_dma_addr+i);
3533 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
3534 "Byte[%d]:%x (DMA data:%x)\n",
3535 i, smp_cmd.smp_req16[i],
3536 *(preq_dma_addr)));
3537 } else {
3538 smp_cmd.smp_req[i] = *(preq_dma_addr+i);
3539 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
3540 "Byte[%d]:%x (DMA data:%x)\n",
3541 i, smp_cmd.smp_req[i],
3542 *(preq_dma_addr)));
3543 }
3544 }
3545
3546 build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag,
3547 &smp_cmd, pm8001_ha->smp_exp_mode, length);
3548 pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd, 0);
3549 return 0;
3550
3551err_out_2:
3552 dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1,
3553 PCI_DMA_FROMDEVICE);
3554err_out:
3555 dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1,
3556 PCI_DMA_TODEVICE);
3557 return rc;
3558}
3559
3560static int check_enc_sas_cmd(struct sas_task *task)
3561{
3562 if ((task->ssp_task.cdb[0] == READ_10)
3563 || (task->ssp_task.cdb[0] == WRITE_10)
3564 || (task->ssp_task.cdb[0] == WRITE_VERIFY))
3565 return 1;
3566 else
3567 return 0;
3568}
3569
3570static int check_enc_sat_cmd(struct sas_task *task)
3571{
3572 int ret = 0;
3573 switch (task->ata_task.fis.command) {
3574 case ATA_CMD_FPDMA_READ:
3575 case ATA_CMD_READ_EXT:
3576 case ATA_CMD_READ:
3577 case ATA_CMD_FPDMA_WRITE:
3578 case ATA_CMD_WRITE_EXT:
3579 case ATA_CMD_WRITE:
3580 case ATA_CMD_PIO_READ:
3581 case ATA_CMD_PIO_READ_EXT:
3582 case ATA_CMD_PIO_WRITE:
3583 case ATA_CMD_PIO_WRITE_EXT:
3584 ret = 1;
3585 break;
3586 default:
3587 ret = 0;
3588 break;
3589 }
3590 return ret;
3591}
3592
3593/**
3594 * pm80xx_chip_ssp_io_req - send a SSP task to FW
3595 * @pm8001_ha: our hba card information.
3596 * @ccb: the ccb information this request used.
3597 */
3598static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
3599 struct pm8001_ccb_info *ccb)
3600{
3601 struct sas_task *task = ccb->task;
3602 struct domain_device *dev = task->dev;
3603 struct pm8001_device *pm8001_dev = dev->lldd_dev;
3604 struct ssp_ini_io_start_req ssp_cmd;
3605 u32 tag = ccb->ccb_tag;
3606 int ret;
3607 u64 phys_addr;
3608 struct inbound_queue_table *circularQ;
3609 static u32 inb;
3610 static u32 outb;
3611 u32 opc = OPC_INB_SSPINIIOSTART;
3612 memset(&ssp_cmd, 0, sizeof(ssp_cmd));
3613 memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
3614 /* data address domain added for spcv; set to 0 by host,
3615 * used internally by controller
3616 * 0 for SAS 1.1 and SAS 2.0 compatible TLR
3617 */
3618 ssp_cmd.dad_dir_m_tlr =
3619 cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0);
3620 ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len);
3621 ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
3622 ssp_cmd.tag = cpu_to_le32(tag);
3623 if (task->ssp_task.enable_first_burst)
3624 ssp_cmd.ssp_iu.efb_prio_attr |= 0x80;
3625 ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3);
3626 ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
3627 memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cdb, 16);
3628 circularQ = &pm8001_ha->inbnd_q_tbl[0];
3629
3630 /* Check if encryption is set */
3631 if (pm8001_ha->chip->encrypt &&
3632 !(pm8001_ha->encrypt_info.status) && check_enc_sas_cmd(task)) {
3633 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
3634 "Encryption enabled.Sending Encrypt SAS command 0x%x\n",
3635 task->ssp_task.cdb[0]));
3636 opc = OPC_INB_SSP_INI_DIF_ENC_IO;
3637 /* enable encryption. 0 for SAS 1.1 and SAS 2.0 compatible TLR*/
3638 ssp_cmd.dad_dir_m_tlr = cpu_to_le32
3639 ((data_dir_flags[task->data_dir] << 8) | 0x20 | 0x0);
3640
3641 /* fill in PRD (scatter/gather) table, if any */
3642 if (task->num_scatter > 1) {
3643 pm8001_chip_make_sg(task->scatter,
3644 ccb->n_elem, ccb->buf_prd);
3645 phys_addr = ccb->ccb_dma_handle +
3646 offsetof(struct pm8001_ccb_info, buf_prd[0]);
3647 ssp_cmd.enc_addr_low =
3648 cpu_to_le32(lower_32_bits(phys_addr));
3649 ssp_cmd.enc_addr_high =
3650 cpu_to_le32(upper_32_bits(phys_addr));
3651 ssp_cmd.enc_esgl = cpu_to_le32(1<<31);
3652 } else if (task->num_scatter == 1) {
3653 u64 dma_addr = sg_dma_address(task->scatter);
3654 ssp_cmd.enc_addr_low =
3655 cpu_to_le32(lower_32_bits(dma_addr));
3656 ssp_cmd.enc_addr_high =
3657 cpu_to_le32(upper_32_bits(dma_addr));
3658 ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
3659 ssp_cmd.enc_esgl = 0;
3660 } else if (task->num_scatter == 0) {
3661 ssp_cmd.enc_addr_low = 0;
3662 ssp_cmd.enc_addr_high = 0;
3663 ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
3664 ssp_cmd.enc_esgl = 0;
3665 }
3666 /* XTS mode. All other fields are 0 */
3667 ssp_cmd.key_cmode = 0x6 << 4;
3668 /* set tweak values. Should be the start lba */
3669 ssp_cmd.twk_val0 = cpu_to_le32((task->ssp_task.cdb[2] << 24) |
3670 (task->ssp_task.cdb[3] << 16) |
3671 (task->ssp_task.cdb[4] << 8) |
3672 (task->ssp_task.cdb[5]));
3673 } else {
3674 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
3675 "Sending Normal SAS command 0x%x inb q %x\n",
3676 task->ssp_task.cdb[0], inb));
3677 /* fill in PRD (scatter/gather) table, if any */
3678 if (task->num_scatter > 1) {
3679 pm8001_chip_make_sg(task->scatter, ccb->n_elem,
3680 ccb->buf_prd);
3681 phys_addr = ccb->ccb_dma_handle +
3682 offsetof(struct pm8001_ccb_info, buf_prd[0]);
3683 ssp_cmd.addr_low =
3684 cpu_to_le32(lower_32_bits(phys_addr));
3685 ssp_cmd.addr_high =
3686 cpu_to_le32(upper_32_bits(phys_addr));
3687 ssp_cmd.esgl = cpu_to_le32(1<<31);
3688 } else if (task->num_scatter == 1) {
3689 u64 dma_addr = sg_dma_address(task->scatter);
3690 ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr));
3691 ssp_cmd.addr_high =
3692 cpu_to_le32(upper_32_bits(dma_addr));
3693 ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
3694 ssp_cmd.esgl = 0;
3695 } else if (task->num_scatter == 0) {
3696 ssp_cmd.addr_low = 0;
3697 ssp_cmd.addr_high = 0;
3698 ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
3699 ssp_cmd.esgl = 0;
3700 }
3701 }
3702 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, outb++);
3703
3704 /* rotate the outb queue */
3705 outb = outb%PM8001_MAX_SPCV_OUTB_NUM;
3706
3707 return ret;
3708}
3709
3710static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
3711 struct pm8001_ccb_info *ccb)
3712{
3713 struct sas_task *task = ccb->task;
3714 struct domain_device *dev = task->dev;
3715 struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;
3716 u32 tag = ccb->ccb_tag;
3717 int ret;
3718 static u32 inb;
3719 static u32 outb;
3720 struct sata_start_req sata_cmd;
3721 u32 hdr_tag, ncg_tag = 0;
3722 u64 phys_addr;
3723 u32 ATAP = 0x0;
3724 u32 dir;
3725 struct inbound_queue_table *circularQ;
3726 unsigned long flags;
3727 u32 opc = OPC_INB_SATA_HOST_OPSTART;
3728 memset(&sata_cmd, 0, sizeof(sata_cmd));
3729 circularQ = &pm8001_ha->inbnd_q_tbl[0];
3730
3731 if (task->data_dir == PCI_DMA_NONE) {
3732 ATAP = 0x04; /* no data*/
3733 PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n"));
3734 } else if (likely(!task->ata_task.device_control_reg_update)) {
3735 if (task->ata_task.dma_xfer) {
3736 ATAP = 0x06; /* DMA */
3737 PM8001_IO_DBG(pm8001_ha, pm8001_printk("DMA\n"));
3738 } else {
3739 ATAP = 0x05; /* PIO*/
3740 PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO\n"));
3741 }
3742 if (task->ata_task.use_ncq &&
3743 dev->sata_dev.command_set != ATAPI_COMMAND_SET) {
3744 ATAP = 0x07; /* FPDMA */
3745 PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n"));
3746 }
3747 }
3748 if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) {
3749 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
3750 ncg_tag = hdr_tag;
3751 }
3752 dir = data_dir_flags[task->data_dir] << 8;
3753 sata_cmd.tag = cpu_to_le32(tag);
3754 sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
3755 sata_cmd.data_len = cpu_to_le32(task->total_xfer_len);
3756
3757 sata_cmd.sata_fis = task->ata_task.fis;
3758 if (likely(!task->ata_task.device_control_reg_update))
3759 sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */
3760 sata_cmd.sata_fis.flags &= 0xF0;/* PM_PORT field shall be 0 */
3761
3762 /* Check if encryption is set */
3763 if (pm8001_ha->chip->encrypt &&
3764 !(pm8001_ha->encrypt_info.status) && check_enc_sat_cmd(task)) {
3765 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
3766 "Encryption enabled.Sending Encrypt SATA cmd 0x%x\n",
3767 sata_cmd.sata_fis.command));
3768 opc = OPC_INB_SATA_DIF_ENC_IO;
3769
3770 /* set encryption bit */
3771 sata_cmd.ncqtag_atap_dir_m_dad =
3772 cpu_to_le32(((ncg_tag & 0xff)<<16)|
3773 ((ATAP & 0x3f) << 10) | 0x20 | dir);
3774 /* dad (bit 0-1) is 0 */
3775 /* fill in PRD (scatter/gather) table, if any */
3776 if (task->num_scatter > 1) {
3777 pm8001_chip_make_sg(task->scatter,
3778 ccb->n_elem, ccb->buf_prd);
3779 phys_addr = ccb->ccb_dma_handle +
3780 offsetof(struct pm8001_ccb_info, buf_prd[0]);
3781 sata_cmd.enc_addr_low = lower_32_bits(phys_addr);
3782 sata_cmd.enc_addr_high = upper_32_bits(phys_addr);
3783 sata_cmd.enc_esgl = cpu_to_le32(1 << 31);
3784 } else if (task->num_scatter == 1) {
3785 u64 dma_addr = sg_dma_address(task->scatter);
3786 sata_cmd.enc_addr_low = lower_32_bits(dma_addr);
3787 sata_cmd.enc_addr_high = upper_32_bits(dma_addr);
3788 sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
3789 sata_cmd.enc_esgl = 0;
3790 } else if (task->num_scatter == 0) {
3791 sata_cmd.enc_addr_low = 0;
3792 sata_cmd.enc_addr_high = 0;
3793 sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
3794 sata_cmd.enc_esgl = 0;
3795 }
3796 /* XTS mode. All other fields are 0 */
3797 sata_cmd.key_index_mode = 0x6 << 4;
3798 /* set tweak values. Should be the start lba */
3799 sata_cmd.twk_val0 =
3800 cpu_to_le32((sata_cmd.sata_fis.lbal_exp << 24) |
3801 (sata_cmd.sata_fis.lbah << 16) |
3802 (sata_cmd.sata_fis.lbam << 8) |
3803 (sata_cmd.sata_fis.lbal));
3804 sata_cmd.twk_val1 =
3805 cpu_to_le32((sata_cmd.sata_fis.lbah_exp << 8) |
3806 (sata_cmd.sata_fis.lbam_exp));
3807 } else {
3808 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
3809 "Sending Normal SATA command 0x%x inb %x\n",
3810 sata_cmd.sata_fis.command, inb));
3811 /* dad (bit 0-1) is 0 */
3812 sata_cmd.ncqtag_atap_dir_m_dad =
3813 cpu_to_le32(((ncg_tag & 0xff)<<16) |
3814 ((ATAP & 0x3f) << 10) | dir);
3815
3816 /* fill in PRD (scatter/gather) table, if any */
3817 if (task->num_scatter > 1) {
3818 pm8001_chip_make_sg(task->scatter,
3819 ccb->n_elem, ccb->buf_prd);
3820 phys_addr = ccb->ccb_dma_handle +
3821 offsetof(struct pm8001_ccb_info, buf_prd[0]);
3822 sata_cmd.addr_low = lower_32_bits(phys_addr);
3823 sata_cmd.addr_high = upper_32_bits(phys_addr);
3824 sata_cmd.esgl = cpu_to_le32(1 << 31);
3825 } else if (task->num_scatter == 1) {
3826 u64 dma_addr = sg_dma_address(task->scatter);
3827 sata_cmd.addr_low = lower_32_bits(dma_addr);
3828 sata_cmd.addr_high = upper_32_bits(dma_addr);
3829 sata_cmd.len = cpu_to_le32(task->total_xfer_len);
3830 sata_cmd.esgl = 0;
3831 } else if (task->num_scatter == 0) {
3832 sata_cmd.addr_low = 0;
3833 sata_cmd.addr_high = 0;
3834 sata_cmd.len = cpu_to_le32(task->total_xfer_len);
3835 sata_cmd.esgl = 0;
3836 }
3837 /* scsi cdb */
3838 sata_cmd.atapi_scsi_cdb[0] =
3839 cpu_to_le32(((task->ata_task.atapi_packet[0]) |
3840 (task->ata_task.atapi_packet[1] << 8) |
3841 (task->ata_task.atapi_packet[2] << 16) |
3842 (task->ata_task.atapi_packet[3] << 24)));
3843 sata_cmd.atapi_scsi_cdb[1] =
3844 cpu_to_le32(((task->ata_task.atapi_packet[4]) |
3845 (task->ata_task.atapi_packet[5] << 8) |
3846 (task->ata_task.atapi_packet[6] << 16) |
3847 (task->ata_task.atapi_packet[7] << 24)));
3848 sata_cmd.atapi_scsi_cdb[2] =
3849 cpu_to_le32(((task->ata_task.atapi_packet[8]) |
3850 (task->ata_task.atapi_packet[9] << 8) |
3851 (task->ata_task.atapi_packet[10] << 16) |
3852 (task->ata_task.atapi_packet[11] << 24)));
3853 sata_cmd.atapi_scsi_cdb[3] =
3854 cpu_to_le32(((task->ata_task.atapi_packet[12]) |
3855 (task->ata_task.atapi_packet[13] << 8) |
3856 (task->ata_task.atapi_packet[14] << 16) |
3857 (task->ata_task.atapi_packet[15] << 24)));
3858 }
3859
3860 /* Check for read log for failed drive and return */
3861 if (sata_cmd.sata_fis.command == 0x2f) {
3862 if (pm8001_ha_dev && ((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) ||
3863 (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) ||
3864 (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) {
3865 struct task_status_struct *ts;
3866
3867 pm8001_ha_dev->id &= 0xDFFFFFFF;
3868 ts = &task->task_status;
3869
3870 spin_lock_irqsave(&task->task_state_lock, flags);
3871 ts->resp = SAS_TASK_COMPLETE;
3872 ts->stat = SAM_STAT_GOOD;
3873 task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
3874 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
3875 task->task_state_flags |= SAS_TASK_STATE_DONE;
3876 if (unlikely((task->task_state_flags &
3877 SAS_TASK_STATE_ABORTED))) {
3878 spin_unlock_irqrestore(&task->task_state_lock,
3879 flags);
3880 PM8001_FAIL_DBG(pm8001_ha,
3881 pm8001_printk("task 0x%p resp 0x%x "
3882 " stat 0x%x but aborted by upper layer "
3883 "\n", task, ts->resp, ts->stat));
3884 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
3885 return 0;
3886 } else if (task->uldd_task) {
3887 spin_unlock_irqrestore(&task->task_state_lock,
3888 flags);
3889 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
3890 mb();/* ditto */
3891 spin_unlock_irq(&pm8001_ha->lock);
3892 task->task_done(task);
3893 spin_lock_irq(&pm8001_ha->lock);
3894 return 0;
3895 } else if (!task->uldd_task) {
3896 spin_unlock_irqrestore(&task->task_state_lock,
3897 flags);
3898 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
3899 mb();/*ditto*/
3900 spin_unlock_irq(&pm8001_ha->lock);
3901 task->task_done(task);
3902 spin_lock_irq(&pm8001_ha->lock);
3903 return 0;
3904 }
3905 }
3906 }
3907
3908 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
3909 &sata_cmd, outb++);
3910
3911 /* rotate the outb queue */
3912 outb = outb%PM8001_MAX_SPCV_OUTB_NUM;
3913 return ret;
3914}
3915
3916/**
3917 * pm80xx_chip_phy_start_req - start phy via PHY_START COMMAND
3918 * @pm8001_ha: our hba card information.
3919 * @num: the inbound queue number
3920 * @phy_id: the phy id which we wanted to start up.
3921 */
3922static int
3923pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
3924{
3925 struct phy_start_req payload;
3926 struct inbound_queue_table *circularQ;
3927 int ret;
3928 u32 tag = 0x01;
3929 u32 opcode = OPC_INB_PHYSTART;
3930 circularQ = &pm8001_ha->inbnd_q_tbl[0];
3931 memset(&payload, 0, sizeof(payload));
3932 payload.tag = cpu_to_le32(tag);
3933
3934 PM8001_INIT_DBG(pm8001_ha,
3935 pm8001_printk("PHY START REQ for phy_id %d\n", phy_id));
3936 /*
3937 ** [0:7] PHY Identifier
3938 ** [8:11] link rate 1.5G, 3G, 6G
3939 ** [12:13] link mode 01b SAS mode; 10b SATA mode; 11b Auto mode
3940 ** [14] 0b disable spin up hold; 1b enable spin up hold
3941 ** [15] ob no change in current PHY analig setup 1b enable using SPAST
3942 */
3943 payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
3944 LINKMODE_AUTO | LINKRATE_15 |
3945 LINKRATE_30 | LINKRATE_60 | phy_id);
3946 /* SSC Disable and SAS Analog ST configuration */
3947 /**
3948 payload.ase_sh_lm_slr_phyid =
3949 cpu_to_le32(SSC_DISABLE_30 | SAS_ASE | SPINHOLD_DISABLE |
3950 LINKMODE_AUTO | LINKRATE_15 | LINKRATE_30 | LINKRATE_60 |
3951 phy_id);
3952 Have to add "SAS PHY Analog Setup SPASTI 1 Byte" Based on need
3953 **/
3954
3955 payload.sas_identify.dev_type = SAS_END_DEVICE;
3956 payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
3957 memcpy(payload.sas_identify.sas_addr,
3958 pm8001_ha->sas_addr, SAS_ADDR_SIZE);
3959 payload.sas_identify.phy_id = phy_id;
3960 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
3961 return ret;
3962}
3963
3964/**
3965 * pm8001_chip_phy_stop_req - start phy via PHY_STOP COMMAND
3966 * @pm8001_ha: our hba card information.
3967 * @num: the inbound queue number
3968 * @phy_id: the phy id which we wanted to start up.
3969 */
3970static int pm80xx_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
3971 u8 phy_id)
3972{
3973 struct phy_stop_req payload;
3974 struct inbound_queue_table *circularQ;
3975 int ret;
3976 u32 tag = 0x01;
3977 u32 opcode = OPC_INB_PHYSTOP;
3978 circularQ = &pm8001_ha->inbnd_q_tbl[0];
3979 memset(&payload, 0, sizeof(payload));
3980 payload.tag = cpu_to_le32(tag);
3981 payload.phy_id = cpu_to_le32(phy_id);
3982 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
3983 return ret;
3984}
3985
3986/**
3987 * see comments on pm8001_mpi_reg_resp.
3988 */
3989static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
3990 struct pm8001_device *pm8001_dev, u32 flag)
3991{
3992 struct reg_dev_req payload;
3993 u32 opc;
3994 u32 stp_sspsmp_sata = 0x4;
3995 struct inbound_queue_table *circularQ;
3996 u32 linkrate, phy_id;
3997 int rc, tag = 0xdeadbeef;
3998 struct pm8001_ccb_info *ccb;
3999 u8 retryFlag = 0x1;
4000 u16 firstBurstSize = 0;
4001 u16 ITNT = 2000;
4002 struct domain_device *dev = pm8001_dev->sas_device;
4003 struct domain_device *parent_dev = dev->parent;
4004 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4005
4006 memset(&payload, 0, sizeof(payload));
4007 rc = pm8001_tag_alloc(pm8001_ha, &tag);
4008 if (rc)
4009 return rc;
4010 ccb = &pm8001_ha->ccb_info[tag];
4011 ccb->device = pm8001_dev;
4012 ccb->ccb_tag = tag;
4013 payload.tag = cpu_to_le32(tag);
4014
4015 if (flag == 1) {
4016 stp_sspsmp_sata = 0x02; /*direct attached sata */
4017 } else {
4018 if (pm8001_dev->dev_type == SAS_SATA_DEV)
4019 stp_sspsmp_sata = 0x00; /* stp*/
4020 else if (pm8001_dev->dev_type == SAS_END_DEVICE ||
4021 pm8001_dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
4022 pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
4023 stp_sspsmp_sata = 0x01; /*ssp or smp*/
4024 }
4025 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
4026 phy_id = parent_dev->ex_dev.ex_phy->phy_id;
4027 else
4028 phy_id = pm8001_dev->attached_phy;
4029
4030 opc = OPC_INB_REG_DEV;
4031
4032 linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ?
4033 pm8001_dev->sas_device->linkrate : dev->port->linkrate;
4034
4035 payload.phyid_portid =
4036 cpu_to_le32(((pm8001_dev->sas_device->port->id) & 0xFF) |
4037 ((phy_id & 0xFF) << 8));
4038
4039 payload.dtype_dlr_mcn_ir_retry = cpu_to_le32((retryFlag & 0x01) |
4040 ((linkrate & 0x0F) << 24) |
4041 ((stp_sspsmp_sata & 0x03) << 28));
4042 payload.firstburstsize_ITNexustimeout =
4043 cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
4044
4045 memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
4046 SAS_ADDR_SIZE);
4047
4048 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
4049
4050 return rc;
4051}
4052
4053/**
4054 * pm80xx_chip_phy_ctl_req - support the local phy operation
4055 * @pm8001_ha: our hba card information.
4056 * @num: the inbound queue number
4057 * @phy_id: the phy id which we wanted to operate
4058 * @phy_op:
4059 */
4060static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
4061 u32 phyId, u32 phy_op)
4062{
4063 struct local_phy_ctl_req payload;
4064 struct inbound_queue_table *circularQ;
4065 int ret;
4066 u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
4067 memset(&payload, 0, sizeof(payload));
4068 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4069 payload.tag = cpu_to_le32(1);
4070 payload.phyop_phyid =
4071 cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF));
4072 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
4073 return ret;
4074}
4075
4076static u32 pm80xx_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
4077{
4078 u32 value;
4079#ifdef PM8001_USE_MSIX
4080 return 1;
4081#endif
4082 value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR);
4083 if (value)
4084 return 1;
4085 return 0;
4086
4087}
4088
4089/**
4090 * pm8001_chip_isr - PM8001 isr handler.
4091 * @pm8001_ha: our hba card information.
4092 * @irq: irq number.
4093 * @stat: stat.
4094 */
4095static irqreturn_t
4096pm80xx_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
4097{
4098 pm80xx_chip_interrupt_disable(pm8001_ha, vec);
4099 process_oq(pm8001_ha, vec);
4100 pm80xx_chip_interrupt_enable(pm8001_ha, vec);
4101 return IRQ_HANDLED;
4102}
4103
4104const struct pm8001_dispatch pm8001_80xx_dispatch = {
4105 .name = "pmc80xx",
4106 .chip_init = pm80xx_chip_init,
4107 .chip_soft_rst = pm80xx_chip_soft_rst,
4108 .chip_rst = pm80xx_hw_chip_rst,
4109 .chip_iounmap = pm8001_chip_iounmap,
4110 .isr = pm80xx_chip_isr,
4111 .is_our_interupt = pm80xx_chip_is_our_interupt,
4112 .isr_process_oq = process_oq,
4113 .interrupt_enable = pm80xx_chip_interrupt_enable,
4114 .interrupt_disable = pm80xx_chip_interrupt_disable,
4115 .make_prd = pm8001_chip_make_sg,
4116 .smp_req = pm80xx_chip_smp_req,
4117 .ssp_io_req = pm80xx_chip_ssp_io_req,
4118 .sata_req = pm80xx_chip_sata_req,
4119 .phy_start_req = pm80xx_chip_phy_start_req,
4120 .phy_stop_req = pm80xx_chip_phy_stop_req,
4121 .reg_dev_req = pm80xx_chip_reg_dev_req,
4122 .dereg_dev_req = pm8001_chip_dereg_dev_req,
4123 .phy_ctl_req = pm80xx_chip_phy_ctl_req,
4124 .task_abort = pm8001_chip_abort_task,
4125 .ssp_tm_req = pm8001_chip_ssp_tm_req,
4126 .get_nvmd_req = pm8001_chip_get_nvmd_req,
4127 .set_nvmd_req = pm8001_chip_set_nvmd_req,
4128 .fw_flash_update_req = pm8001_chip_fw_flash_update_req,
4129 .set_dev_state_req = pm8001_chip_set_dev_state_req,
4130};
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
new file mode 100644
index 000000000000..2b760ba75d7b
--- /dev/null
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -0,0 +1,1523 @@
1/*
2 * PMC-Sierra SPCv/ve 8088/8089 SAS/SATA based host adapters driver
3 *
4 * Copyright (c) 2008-2009 USI Co., Ltd.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon
16 * including a substantially similar Disclaimer requirement for further
17 * binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * Alternatively, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2 as published by the Free
24 * Software Foundation.
25 *
26 * NO WARRANTY
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGES.
38 *
39 */
40
41#ifndef _PMC8001_REG_H_
42#define _PMC8001_REG_H_
43
44#include <linux/types.h>
45#include <scsi/libsas.h>
46
47/* for Request Opcode of IOMB */
48#define OPC_INB_ECHO 1 /* 0x000 */
49#define OPC_INB_PHYSTART 4 /* 0x004 */
50#define OPC_INB_PHYSTOP 5 /* 0x005 */
51#define OPC_INB_SSPINIIOSTART 6 /* 0x006 */
52#define OPC_INB_SSPINITMSTART 7 /* 0x007 */
53/* 0x8 RESV IN SPCv */
54#define OPC_INB_RSVD 8 /* 0x008 */
55#define OPC_INB_DEV_HANDLE_ACCEPT 9 /* 0x009 */
56#define OPC_INB_SSPTGTIOSTART 10 /* 0x00A */
57#define OPC_INB_SSPTGTRSPSTART 11 /* 0x00B */
58/* 0xC, 0xD, 0xE removed in SPCv */
59#define OPC_INB_SSP_ABORT 15 /* 0x00F */
60#define OPC_INB_DEREG_DEV_HANDLE 16 /* 0x010 */
61#define OPC_INB_GET_DEV_HANDLE 17 /* 0x011 */
62#define OPC_INB_SMP_REQUEST 18 /* 0x012 */
63/* 0x13 SMP_RESPONSE is removed in SPCv */
64#define OPC_INB_SMP_ABORT 20 /* 0x014 */
65/* 0x16 RESV IN SPCv */
66#define OPC_INB_RSVD1 22 /* 0x016 */
67#define OPC_INB_SATA_HOST_OPSTART 23 /* 0x017 */
68#define OPC_INB_SATA_ABORT 24 /* 0x018 */
69#define OPC_INB_LOCAL_PHY_CONTROL 25 /* 0x019 */
70/* 0x1A RESV IN SPCv */
71#define OPC_INB_RSVD2 26 /* 0x01A */
72#define OPC_INB_FW_FLASH_UPDATE 32 /* 0x020 */
73#define OPC_INB_GPIO 34 /* 0x022 */
74#define OPC_INB_SAS_DIAG_MODE_START_END 35 /* 0x023 */
75#define OPC_INB_SAS_DIAG_EXECUTE 36 /* 0x024 */
76/* 0x25 RESV IN SPCv */
77#define OPC_INB_RSVD3 37 /* 0x025 */
78#define OPC_INB_GET_TIME_STAMP 38 /* 0x026 */
79#define OPC_INB_PORT_CONTROL 39 /* 0x027 */
80#define OPC_INB_GET_NVMD_DATA 40 /* 0x028 */
81#define OPC_INB_SET_NVMD_DATA 41 /* 0x029 */
82#define OPC_INB_SET_DEVICE_STATE 42 /* 0x02A */
83#define OPC_INB_GET_DEVICE_STATE 43 /* 0x02B */
84#define OPC_INB_SET_DEV_INFO 44 /* 0x02C */
85/* 0x2D RESV IN SPCv */
86#define OPC_INB_RSVD4 45 /* 0x02D */
87#define OPC_INB_SGPIO_REGISTER 46 /* 0x02E */
88#define OPC_INB_PCIE_DIAG_EXEC 47 /* 0x02F */
89#define OPC_INB_SET_CONTROLLER_CONFIG 48 /* 0x030 */
90#define OPC_INB_GET_CONTROLLER_CONFIG 49 /* 0x031 */
91#define OPC_INB_REG_DEV 50 /* 0x032 */
92#define OPC_INB_SAS_HW_EVENT_ACK 51 /* 0x033 */
93#define OPC_INB_GET_DEVICE_INFO 52 /* 0x034 */
94#define OPC_INB_GET_PHY_PROFILE 53 /* 0x035 */
95#define OPC_INB_FLASH_OP_EXT 54 /* 0x036 */
96#define OPC_INB_SET_PHY_PROFILE 55 /* 0x037 */
97#define OPC_INB_KEK_MANAGEMENT 256 /* 0x100 */
98#define OPC_INB_DEK_MANAGEMENT 257 /* 0x101 */
99#define OPC_INB_SSP_INI_DIF_ENC_IO 258 /* 0x102 */
100#define OPC_INB_SATA_DIF_ENC_IO 259 /* 0x103 */
101
102/* for Response Opcode of IOMB */
103#define OPC_OUB_ECHO 1 /* 0x001 */
104#define OPC_OUB_RSVD 4 /* 0x004 */
105#define OPC_OUB_SSP_COMP 5 /* 0x005 */
106#define OPC_OUB_SMP_COMP 6 /* 0x006 */
107#define OPC_OUB_LOCAL_PHY_CNTRL 7 /* 0x007 */
108#define OPC_OUB_RSVD1 10 /* 0x00A */
109#define OPC_OUB_DEREG_DEV 11 /* 0x00B */
110#define OPC_OUB_GET_DEV_HANDLE 12 /* 0x00C */
111#define OPC_OUB_SATA_COMP 13 /* 0x00D */
112#define OPC_OUB_SATA_EVENT 14 /* 0x00E */
113#define OPC_OUB_SSP_EVENT 15 /* 0x00F */
114#define OPC_OUB_RSVD2 16 /* 0x010 */
115/* 0x11 - SMP_RECEIVED Notification removed in SPCv*/
116#define OPC_OUB_SSP_RECV_EVENT 18 /* 0x012 */
117#define OPC_OUB_RSVD3 19 /* 0x013 */
118#define OPC_OUB_FW_FLASH_UPDATE 20 /* 0x014 */
119#define OPC_OUB_GPIO_RESPONSE 22 /* 0x016 */
120#define OPC_OUB_GPIO_EVENT 23 /* 0x017 */
121#define OPC_OUB_GENERAL_EVENT 24 /* 0x018 */
122#define OPC_OUB_SSP_ABORT_RSP 26 /* 0x01A */
123#define OPC_OUB_SATA_ABORT_RSP 27 /* 0x01B */
124#define OPC_OUB_SAS_DIAG_MODE_START_END 28 /* 0x01C */
125#define OPC_OUB_SAS_DIAG_EXECUTE 29 /* 0x01D */
126#define OPC_OUB_GET_TIME_STAMP 30 /* 0x01E */
127#define OPC_OUB_RSVD4 31 /* 0x01F */
128#define OPC_OUB_PORT_CONTROL 32 /* 0x020 */
129#define OPC_OUB_SKIP_ENTRY 33 /* 0x021 */
130#define OPC_OUB_SMP_ABORT_RSP 34 /* 0x022 */
131#define OPC_OUB_GET_NVMD_DATA 35 /* 0x023 */
132#define OPC_OUB_SET_NVMD_DATA 36 /* 0x024 */
133#define OPC_OUB_DEVICE_HANDLE_REMOVAL 37 /* 0x025 */
134#define OPC_OUB_SET_DEVICE_STATE 38 /* 0x026 */
135#define OPC_OUB_GET_DEVICE_STATE 39 /* 0x027 */
136#define OPC_OUB_SET_DEV_INFO 40 /* 0x028 */
137#define OPC_OUB_RSVD5 41 /* 0x029 */
138#define OPC_OUB_HW_EVENT 1792 /* 0x700 */
139#define OPC_OUB_DEV_HANDLE_ARRIV 1824 /* 0x720 */
140#define OPC_OUB_THERM_HW_EVENT 1840 /* 0x730 */
141#define OPC_OUB_SGPIO_RESP 2094 /* 0x82E */
142#define OPC_OUB_PCIE_DIAG_EXECUTE 2095 /* 0x82F */
143#define OPC_OUB_DEV_REGIST 2098 /* 0x832 */
144#define OPC_OUB_SAS_HW_EVENT_ACK 2099 /* 0x833 */
145#define OPC_OUB_GET_DEVICE_INFO 2100 /* 0x834 */
146/* spcv specific commands */
147#define OPC_OUB_PHY_START_RESP 2052 /* 0x804 */
148#define OPC_OUB_PHY_STOP_RESP 2053 /* 0x805 */
149#define OPC_OUB_SET_CONTROLLER_CONFIG 2096 /* 0x830 */
150#define OPC_OUB_GET_CONTROLLER_CONFIG 2097 /* 0x831 */
151#define OPC_OUB_GET_PHY_PROFILE 2101 /* 0x835 */
152#define OPC_OUB_FLASH_OP_EXT 2102 /* 0x836 */
153#define OPC_OUB_SET_PHY_PROFILE 2103 /* 0x837 */
154#define OPC_OUB_KEK_MANAGEMENT_RESP 2304 /* 0x900 */
155#define OPC_OUB_DEK_MANAGEMENT_RESP 2305 /* 0x901 */
156#define OPC_OUB_SSP_COALESCED_COMP_RESP 2306 /* 0x902 */
157
158/* for phy start*/
159#define SSC_DISABLE_15 (0x01 << 16)
160#define SSC_DISABLE_30 (0x02 << 16)
161#define SSC_DISABLE_60 (0x04 << 16)
162#define SAS_ASE (0x01 << 15)
163#define SPINHOLD_DISABLE (0x00 << 14)
164#define SPINHOLD_ENABLE (0x01 << 14)
165#define LINKMODE_SAS (0x01 << 12)
166#define LINKMODE_DSATA (0x02 << 12)
167#define LINKMODE_AUTO (0x03 << 12)
168#define LINKRATE_15 (0x01 << 8)
169#define LINKRATE_30 (0x02 << 8)
170#define LINKRATE_60 (0x06 << 8)
171
172/* Thermal related */
173#define THERMAL_ENABLE 0x1
174#define THERMAL_LOG_ENABLE 0x1
175#define THERMAL_OP_CODE 0x6
176#define LTEMPHIL 70
177#define RTEMPHIL 100
178
179/* Encryption info */
180#define SCRATCH_PAD3_ENC_DISABLED 0x00000000
181#define SCRATCH_PAD3_ENC_DIS_ERR 0x00000001
182#define SCRATCH_PAD3_ENC_ENA_ERR 0x00000002
183#define SCRATCH_PAD3_ENC_READY 0x00000003
184#define SCRATCH_PAD3_ENC_MASK SCRATCH_PAD3_ENC_READY
185
186#define SCRATCH_PAD3_XTS_ENABLED (1 << 14)
187#define SCRATCH_PAD3_SMA_ENABLED (1 << 4)
188#define SCRATCH_PAD3_SMB_ENABLED (1 << 5)
189#define SCRATCH_PAD3_SMF_ENABLED 0
190#define SCRATCH_PAD3_SM_MASK 0x000000F0
191#define SCRATCH_PAD3_ERR_CODE 0x00FF0000
192
193#define SEC_MODE_SMF 0x0
194#define SEC_MODE_SMA 0x100
195#define SEC_MODE_SMB 0x200
196#define CIPHER_MODE_ECB 0x00000001
197#define CIPHER_MODE_XTS 0x00000002
198#define KEK_MGMT_SUBOP_KEYCARDUPDATE 0x4
199
200/* SAS protocol timer configuration page */
201#define SAS_PROTOCOL_TIMER_CONFIG_PAGE 0x04
202#define STP_MCT_TMO 32
203#define SSP_MCT_TMO 32
204#define SAS_MAX_OPEN_TIME 5
205#define SMP_MAX_CONN_TIMER 0xFF
206#define STP_FRM_TIMER 0
207#define STP_IDLE_TIME 5 /* 5 us; controller default */
208#define SAS_MFD 0
209#define SAS_OPNRJT_RTRY_INTVL 2
210#define SAS_DOPNRJT_RTRY_TMO 128
211#define SAS_COPNRJT_RTRY_TMO 128
212
213/*
214 Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second.
215 Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128
216 is DOPNRJT_RTRY_TMO
217*/
218#define SAS_DOPNRJT_RTRY_THR 23438
219#define SAS_COPNRJT_RTRY_THR 23438
220#define SAS_MAX_AIP 0x200000
221#define IT_NEXUS_TIMEOUT 0x7D0
222#define PORT_RECOVERY_TIMEOUT ((IT_NEXUS_TIMEOUT/100) + 30)
223
224struct mpi_msg_hdr {
225 __le32 header; /* Bits [11:0] - Message operation code */
226 /* Bits [15:12] - Message Category */
227 /* Bits [21:16] - Outboundqueue ID for the
228 operation completion message */
229 /* Bits [23:22] - Reserved */
230 /* Bits [28:24] - Buffer Count, indicates how
231 many buffer are allocated for the massage */
232 /* Bits [30:29] - Reserved */
233 /* Bits [31] - Message Valid bit */
234} __attribute__((packed, aligned(4)));
235
236/*
237 * brief the data structure of PHY Start Command
238 * use to describe enable the phy (128 bytes)
239 */
240struct phy_start_req {
241 __le32 tag;
242 __le32 ase_sh_lm_slr_phyid;
243 struct sas_identify_frame sas_identify; /* 28 Bytes */
244 __le32 spasti;
245 u32 reserved[21];
246} __attribute__((packed, aligned(4)));
247
248/*
249 * brief the data structure of PHY Start Command
250 * use to disable the phy (128 bytes)
251 */
252struct phy_stop_req {
253 __le32 tag;
254 __le32 phy_id;
255 u32 reserved[29];
256} __attribute__((packed, aligned(4)));
257
258/* set device bits fis - device to host */
259struct set_dev_bits_fis {
260 u8 fis_type; /* 0xA1*/
261 u8 n_i_pmport;
262 /* b7 : n Bit. Notification bit. If set device needs attention. */
263 /* b6 : i Bit. Interrupt Bit */
264 /* b5-b4: reserved2 */
265 /* b3-b0: PM Port */
266 u8 status;
267 u8 error;
268 u32 _r_a;
269} __attribute__ ((packed));
270/* PIO setup FIS - device to host */
271struct pio_setup_fis {
272 u8 fis_type; /* 0x5f */
273 u8 i_d_pmPort;
274 /* b7 : reserved */
275 /* b6 : i bit. Interrupt bit */
276 /* b5 : d bit. data transfer direction. set to 1 for device to host
277 xfer */
278 /* b4 : reserved */
279 /* b3-b0: PM Port */
280 u8 status;
281 u8 error;
282 u8 lbal;
283 u8 lbam;
284 u8 lbah;
285 u8 device;
286 u8 lbal_exp;
287 u8 lbam_exp;
288 u8 lbah_exp;
289 u8 _r_a;
290 u8 sector_count;
291 u8 sector_count_exp;
292 u8 _r_b;
293 u8 e_status;
294 u8 _r_c[2];
295 u8 transfer_count;
296} __attribute__ ((packed));
297
298/*
299 * brief the data structure of SATA Completion Response
300 * use to describe the sata task response (64 bytes)
301 */
302struct sata_completion_resp {
303 __le32 tag;
304 __le32 status;
305 __le32 param;
306 u32 sata_resp[12];
307} __attribute__((packed, aligned(4)));
308
309/*
310 * brief the data structure of SAS HW Event Notification
311 * use to alert the host about the hardware event(64 bytes)
312 */
313/* updated outbound struct for spcv */
314
315struct hw_event_resp {
316 __le32 lr_status_evt_portid;
317 __le32 evt_param;
318 __le32 phyid_npip_portstate;
319 struct sas_identify_frame sas_identify;
320 struct dev_to_host_fis sata_fis;
321} __attribute__((packed, aligned(4)));
322
323/*
324 * brief the data structure for thermal event notification
325 */
326
327struct thermal_hw_event {
328 __le32 thermal_event;
329 __le32 rht_lht;
330} __attribute__((packed, aligned(4)));
331
332/*
333 * brief the data structure of REGISTER DEVICE Command
334 * use to describe MPI REGISTER DEVICE Command (64 bytes)
335 */
336
337struct reg_dev_req {
338 __le32 tag;
339 __le32 phyid_portid;
340 __le32 dtype_dlr_mcn_ir_retry;
341 __le32 firstburstsize_ITNexustimeout;
342 u8 sas_addr[SAS_ADDR_SIZE];
343 __le32 upper_device_id;
344 u32 reserved[24];
345} __attribute__((packed, aligned(4)));
346
347/*
348 * brief the data structure of DEREGISTER DEVICE Command
349 * use to request spc to remove all internal resources associated
350 * with the device id (64 bytes)
351 */
352
353struct dereg_dev_req {
354 __le32 tag;
355 __le32 device_id;
356 u32 reserved[29];
357} __attribute__((packed, aligned(4)));
358
359/*
360 * brief the data structure of DEVICE_REGISTRATION Response
361 * use to notify the completion of the device registration (64 bytes)
362 */
363struct dev_reg_resp {
364 __le32 tag;
365 __le32 status;
366 __le32 device_id;
367 u32 reserved[12];
368} __attribute__((packed, aligned(4)));
369
370/*
371 * brief the data structure of Local PHY Control Command
372 * use to issue PHY CONTROL to local phy (64 bytes)
373 */
374struct local_phy_ctl_req {
375 __le32 tag;
376 __le32 phyop_phyid;
377 u32 reserved1[29];
378} __attribute__((packed, aligned(4)));
379
380/**
381 * brief the data structure of Local Phy Control Response
382 * use to describe MPI Local Phy Control Response (64 bytes)
383 */
384 struct local_phy_ctl_resp {
385 __le32 tag;
386 __le32 phyop_phyid;
387 __le32 status;
388 u32 reserved[12];
389} __attribute__((packed, aligned(4)));
390
391#define OP_BITS 0x0000FF00
392#define ID_BITS 0x000000FF
393
394/*
395 * brief the data structure of PORT Control Command
396 * use to control port properties (64 bytes)
397 */
398
399struct port_ctl_req {
400 __le32 tag;
401 __le32 portop_portid;
402 __le32 param0;
403 __le32 param1;
404 u32 reserved1[27];
405} __attribute__((packed, aligned(4)));
406
407/*
408 * brief the data structure of HW Event Ack Command
409 * use to acknowledge receive HW event (64 bytes)
410 */
411struct hw_event_ack_req {
412 __le32 tag;
413 __le32 phyid_sea_portid;
414 __le32 param0;
415 __le32 param1;
416 u32 reserved1[27];
417} __attribute__((packed, aligned(4)));
418
419/*
420 * brief the data structure of PHY_START Response Command
421 * indicates the completion of PHY_START command (64 bytes)
422 */
423struct phy_start_resp {
424 __le32 tag;
425 __le32 status;
426 __le32 phyid;
427 u32 reserved[12];
428} __attribute__((packed, aligned(4)));
429
430/*
431 * brief the data structure of PHY_STOP Response Command
432 * indicates the completion of PHY_STOP command (64 bytes)
433 */
434struct phy_stop_resp {
435 __le32 tag;
436 __le32 status;
437 __le32 phyid;
438 u32 reserved[12];
439} __attribute__((packed, aligned(4)));
440
441/*
442 * brief the data structure of SSP Completion Response
443 * use to indicate a SSP Completion (n bytes)
444 */
445struct ssp_completion_resp {
446 __le32 tag;
447 __le32 status;
448 __le32 param;
449 __le32 ssptag_rescv_rescpad;
450 struct ssp_response_iu ssp_resp_iu;
451 __le32 residual_count;
452} __attribute__((packed, aligned(4)));
453
454#define SSP_RESCV_BIT 0x00010000
455
456/*
457 * brief the data structure of SATA EVNET response
458 * use to indicate a SATA Completion (64 bytes)
459 */
460struct sata_event_resp {
461 __le32 tag;
462 __le32 event;
463 __le32 port_id;
464 __le32 device_id;
465 u32 reserved;
466 __le32 event_param0;
467 __le32 event_param1;
468 __le32 sata_addr_h32;
469 __le32 sata_addr_l32;
470 __le32 e_udt1_udt0_crc;
471 __le32 e_udt5_udt4_udt3_udt2;
472 __le32 a_udt1_udt0_crc;
473 __le32 a_udt5_udt4_udt3_udt2;
474 __le32 hwdevid_diferr;
475 __le32 err_framelen_byteoffset;
476 __le32 err_dataframe;
477} __attribute__((packed, aligned(4)));
478
479/*
480 * brief the data structure of SSP EVNET esponse
481 * use to indicate a SSP Completion (64 bytes)
482 */
483struct ssp_event_resp {
484 __le32 tag;
485 __le32 event;
486 __le32 port_id;
487 __le32 device_id;
488 __le32 ssp_tag;
489 __le32 event_param0;
490 __le32 event_param1;
491 __le32 sas_addr_h32;
492 __le32 sas_addr_l32;
493 __le32 e_udt1_udt0_crc;
494 __le32 e_udt5_udt4_udt3_udt2;
495 __le32 a_udt1_udt0_crc;
496 __le32 a_udt5_udt4_udt3_udt2;
497 __le32 hwdevid_diferr;
498 __le32 err_framelen_byteoffset;
499 __le32 err_dataframe;
500} __attribute__((packed, aligned(4)));
501
502/**
503 * brief the data structure of General Event Notification Response
504 * use to describe MPI General Event Notification Response (64 bytes)
505 */
506struct general_event_resp {
507 __le32 status;
508 __le32 inb_IOMB_payload[14];
509} __attribute__((packed, aligned(4)));
510
511#define GENERAL_EVENT_PAYLOAD 14
512#define OPCODE_BITS 0x00000fff
513
514/*
515 * brief the data structure of SMP Request Command
516 * use to describe MPI SMP REQUEST Command (64 bytes)
517 */
518struct smp_req {
519 __le32 tag;
520 __le32 device_id;
521 __le32 len_ip_ir;
522 /* Bits [0] - Indirect response */
523 /* Bits [1] - Indirect Payload */
524 /* Bits [15:2] - Reserved */
525 /* Bits [23:16] - direct payload Len */
526 /* Bits [31:24] - Reserved */
527 u8 smp_req16[16];
528 union {
529 u8 smp_req[32];
530 struct {
531 __le64 long_req_addr;/* sg dma address, LE */
532 __le32 long_req_size;/* LE */
533 u32 _r_a;
534 __le64 long_resp_addr;/* sg dma address, LE */
535 __le32 long_resp_size;/* LE */
536 u32 _r_b;
537 } long_smp_req;/* sequencer extension */
538 };
539 __le32 rsvd[16];
540} __attribute__((packed, aligned(4)));
541/*
542 * brief the data structure of SMP Completion Response
543 * use to describe MPI SMP Completion Response (64 bytes)
544 */
545struct smp_completion_resp {
546 __le32 tag;
547 __le32 status;
548 __le32 param;
549 u8 _r_a[252];
550} __attribute__((packed, aligned(4)));
551
552/*
553 *brief the data structure of SSP SMP SATA Abort Command
554 * use to describe MPI SSP SMP & SATA Abort Command (64 bytes)
555 */
556struct task_abort_req {
557 __le32 tag;
558 __le32 device_id;
559 __le32 tag_to_abort;
560 __le32 abort_all;
561 u32 reserved[27];
562} __attribute__((packed, aligned(4)));
563
564/* These flags used for SSP SMP & SATA Abort */
565#define ABORT_MASK 0x3
566#define ABORT_SINGLE 0x0
567#define ABORT_ALL 0x1
568
569/**
570 * brief the data structure of SSP SATA SMP Abort Response
571 * use to describe SSP SMP & SATA Abort Response ( 64 bytes)
572 */
573struct task_abort_resp {
574 __le32 tag;
575 __le32 status;
576 __le32 scp;
577 u32 reserved[12];
578} __attribute__((packed, aligned(4)));
579
580/**
581 * brief the data structure of SAS Diagnostic Start/End Command
582 * use to describe MPI SAS Diagnostic Start/End Command (64 bytes)
583 */
584struct sas_diag_start_end_req {
585 __le32 tag;
586 __le32 operation_phyid;
587 u32 reserved[29];
588} __attribute__((packed, aligned(4)));
589
590/**
591 * brief the data structure of SAS Diagnostic Execute Command
592 * use to describe MPI SAS Diagnostic Execute Command (64 bytes)
593 */
594struct sas_diag_execute_req {
595 __le32 tag;
596 __le32 cmdtype_cmddesc_phyid;
597 __le32 pat1_pat2;
598 __le32 threshold;
599 __le32 codepat_errmsk;
600 __le32 pmon;
601 __le32 pERF1CTL;
602 u32 reserved[24];
603} __attribute__((packed, aligned(4)));
604
605#define SAS_DIAG_PARAM_BYTES 24
606
607/*
608 * brief the data structure of Set Device State Command
609 * use to describe MPI Set Device State Command (64 bytes)
610 */
611struct set_dev_state_req {
612 __le32 tag;
613 __le32 device_id;
614 __le32 nds;
615 u32 reserved[28];
616} __attribute__((packed, aligned(4)));
617
618/*
619 * brief the data structure of SATA Start Command
620 * use to describe MPI SATA IO Start Command (64 bytes)
621 * Note: This structure is common for normal / encryption I/O
622 */
623
624struct sata_start_req {
625 __le32 tag;
626 __le32 device_id;
627 __le32 data_len;
628 __le32 ncqtag_atap_dir_m_dad;
629 struct host_to_dev_fis sata_fis;
630 u32 reserved1;
631 u32 reserved2; /* dword 11. rsvd for normal I/O. */
632 /* EPLE Descl for enc I/O */
633 u32 addr_low; /* dword 12. rsvd for enc I/O */
634 u32 addr_high; /* dword 13. reserved for enc I/O */
635 __le32 len; /* dword 14: length for normal I/O. */
636 /* EPLE Desch for enc I/O */
637 __le32 esgl; /* dword 15. rsvd for enc I/O */
638 __le32 atapi_scsi_cdb[4]; /* dword 16-19. rsvd for enc I/O */
639 /* The below fields are reserved for normal I/O */
640 __le32 key_index_mode; /* dword 20 */
641 __le32 sector_cnt_enss;/* dword 21 */
642 __le32 keytagl; /* dword 22 */
643 __le32 keytagh; /* dword 23 */
644 __le32 twk_val0; /* dword 24 */
645 __le32 twk_val1; /* dword 25 */
646 __le32 twk_val2; /* dword 26 */
647 __le32 twk_val3; /* dword 27 */
648 __le32 enc_addr_low; /* dword 28. Encryption SGL address high */
649 __le32 enc_addr_high; /* dword 29. Encryption SGL address low */
650 __le32 enc_len; /* dword 30. Encryption length */
651 __le32 enc_esgl; /* dword 31. Encryption esgl bit */
652} __attribute__((packed, aligned(4)));
653
654/**
655 * brief the data structure of SSP INI TM Start Command
656 * use to describe MPI SSP INI TM Start Command (64 bytes)
657 */
658struct ssp_ini_tm_start_req {
659 __le32 tag;
660 __le32 device_id;
661 __le32 relate_tag;
662 __le32 tmf;
663 u8 lun[8];
664 __le32 ds_ads_m;
665 u32 reserved[24];
666} __attribute__((packed, aligned(4)));
667
668struct ssp_info_unit {
669 u8 lun[8];/* SCSI Logical Unit Number */
670 u8 reserved1;/* reserved */
671 u8 efb_prio_attr;
672 /* B7 : enabledFirstBurst */
673 /* B6-3 : taskPriority */
674 /* B2-0 : taskAttribute */
675 u8 reserved2; /* reserved */
676 u8 additional_cdb_len;
677 /* B7-2 : additional_cdb_len */
678 /* B1-0 : reserved */
679 u8 cdb[16];/* The SCSI CDB up to 16 bytes length */
680} __attribute__((packed, aligned(4)));
681
682/**
683 * brief the data structure of SSP INI IO Start Command
684 * use to describe MPI SSP INI IO Start Command (64 bytes)
685 * Note: This structure is common for normal / encryption I/O
686 */
687struct ssp_ini_io_start_req {
688 __le32 tag;
689 __le32 device_id;
690 __le32 data_len;
691 __le32 dad_dir_m_tlr;
692 struct ssp_info_unit ssp_iu;
693 __le32 addr_low; /* dword 12: sgl low for normal I/O. */
694 /* epl_descl for encryption I/O */
695 __le32 addr_high; /* dword 13: sgl hi for normal I/O */
696 /* dpl_descl for encryption I/O */
697 __le32 len; /* dword 14: len for normal I/O. */
698 /* edpl_desch for encryption I/O */
699 __le32 esgl; /* dword 15: ESGL bit for normal I/O. */
700 /* user defined tag mask for enc I/O */
701 /* The below fields are reserved for normal I/O */
702 u8 udt[12]; /* dword 16-18 */
703 __le32 sectcnt_ios; /* dword 19 */
704 __le32 key_cmode; /* dword 20 */
705 __le32 ks_enss; /* dword 21 */
706 __le32 keytagl; /* dword 22 */
707 __le32 keytagh; /* dword 23 */
708 __le32 twk_val0; /* dword 24 */
709 __le32 twk_val1; /* dword 25 */
710 __le32 twk_val2; /* dword 26 */
711 __le32 twk_val3; /* dword 27 */
712 __le32 enc_addr_low; /* dword 28: Encryption sgl addr low */
713 __le32 enc_addr_high; /* dword 29: Encryption sgl addr hi */
714 __le32 enc_len; /* dword 30: Encryption length */
715 __le32 enc_esgl; /* dword 31: ESGL bit for encryption */
716} __attribute__((packed, aligned(4)));
717
718/**
719 * brief the data structure for SSP_INI_DIF_ENC_IO COMMAND
720 * use to initiate SSP I/O operation with optional DIF/ENC
721 */
722struct ssp_dif_enc_io_req {
723 __le32 tag;
724 __le32 device_id;
725 __le32 data_len;
726 __le32 dirMTlr;
727 __le32 sspiu0;
728 __le32 sspiu1;
729 __le32 sspiu2;
730 __le32 sspiu3;
731 __le32 sspiu4;
732 __le32 sspiu5;
733 __le32 sspiu6;
734 __le32 epl_des;
735 __le32 dpl_desl_ndplr;
736 __le32 dpl_desh;
737 __le32 uum_uuv_bss_difbits;
738 u8 udt[12];
739 __le32 sectcnt_ios;
740 __le32 key_cmode;
741 __le32 ks_enss;
742 __le32 keytagl;
743 __le32 keytagh;
744 __le32 twk_val0;
745 __le32 twk_val1;
746 __le32 twk_val2;
747 __le32 twk_val3;
748 __le32 addr_low;
749 __le32 addr_high;
750 __le32 len;
751 __le32 esgl;
752} __attribute__((packed, aligned(4)));
753
754/**
755 * brief the data structure of Firmware download
756 * use to describe MPI FW DOWNLOAD Command (64 bytes)
757 */
758struct fw_flash_Update_req {
759 __le32 tag;
760 __le32 cur_image_offset;
761 __le32 cur_image_len;
762 __le32 total_image_len;
763 u32 reserved0[7];
764 __le32 sgl_addr_lo;
765 __le32 sgl_addr_hi;
766 __le32 len;
767 __le32 ext_reserved;
768 u32 reserved1[16];
769} __attribute__((packed, aligned(4)));
770
771#define FWFLASH_IOMB_RESERVED_LEN 0x07
772/**
773 * brief the data structure of FW_FLASH_UPDATE Response
774 * use to describe MPI FW_FLASH_UPDATE Response (64 bytes)
775 *
776 */
777 struct fw_flash_Update_resp {
778 __le32 tag;
779 __le32 status;
780 u32 reserved[13];
781} __attribute__((packed, aligned(4)));
782
783/**
784 * brief the data structure of Get NVM Data Command
785 * use to get data from NVM in HBA(64 bytes)
786 */
787struct get_nvm_data_req {
788 __le32 tag;
789 __le32 len_ir_vpdd;
790 __le32 vpd_offset;
791 u32 reserved[8];
792 __le32 resp_addr_lo;
793 __le32 resp_addr_hi;
794 __le32 resp_len;
795 u32 reserved1[17];
796} __attribute__((packed, aligned(4)));
797
798struct set_nvm_data_req {
799 __le32 tag;
800 __le32 len_ir_vpdd;
801 __le32 vpd_offset;
802 u32 reserved[8];
803 __le32 resp_addr_lo;
804 __le32 resp_addr_hi;
805 __le32 resp_len;
806 u32 reserved1[17];
807} __attribute__((packed, aligned(4)));
808
809/**
810 * brief the data structure for SET CONTROLLER CONFIG COMMAND
811 * use to modify controller configuration
812 */
813struct set_ctrl_cfg_req {
814 __le32 tag;
815 __le32 cfg_pg[14];
816 u32 reserved[16];
817} __attribute__((packed, aligned(4)));
818
819/**
820 * brief the data structure for GET CONTROLLER CONFIG COMMAND
821 * use to get controller configuration page
822 */
823struct get_ctrl_cfg_req {
824 __le32 tag;
825 __le32 pgcd;
826 __le32 int_vec;
827 u32 reserved[28];
828} __attribute__((packed, aligned(4)));
829
830/**
831 * brief the data structure for KEK_MANAGEMENT COMMAND
832 * use for KEK management
833 */
834struct kek_mgmt_req {
835 __le32 tag;
836 __le32 new_curidx_ksop;
837 u32 reserved;
838 __le32 kblob[12];
839 u32 reserved1[16];
840} __attribute__((packed, aligned(4)));
841
842/**
843 * brief the data structure for DEK_MANAGEMENT COMMAND
844 * use for DEK management
845 */
846struct dek_mgmt_req {
847 __le32 tag;
848 __le32 kidx_dsop;
849 __le32 dekidx;
850 __le32 addr_l;
851 __le32 addr_h;
852 __le32 nent;
853 __le32 dbf_tblsize;
854 u32 reserved[24];
855} __attribute__((packed, aligned(4)));
856
857/**
858 * brief the data structure for SET PHY PROFILE COMMAND
859 * use to retrive phy specific information
860 */
861struct set_phy_profile_req {
862 __le32 tag;
863 __le32 ppc_phyid;
864 u32 reserved[29];
865} __attribute__((packed, aligned(4)));
866
867/**
868 * brief the data structure for GET PHY PROFILE COMMAND
869 * use to retrive phy specific information
870 */
871struct get_phy_profile_req {
872 __le32 tag;
873 __le32 ppc_phyid;
874 __le32 profile[29];
875} __attribute__((packed, aligned(4)));
876
877/**
878 * brief the data structure for EXT FLASH PARTITION
879 * use to manage ext flash partition
880 */
881struct ext_flash_partition_req {
882 __le32 tag;
883 __le32 cmd;
884 __le32 offset;
885 __le32 len;
886 u32 reserved[7];
887 __le32 addr_low;
888 __le32 addr_high;
889 __le32 len1;
890 __le32 ext;
891 u32 reserved1[16];
892} __attribute__((packed, aligned(4)));
893
894#define TWI_DEVICE 0x0
895#define C_SEEPROM 0x1
896#define VPD_FLASH 0x4
897#define AAP1_RDUMP 0x5
898#define IOP_RDUMP 0x6
899#define EXPAN_ROM 0x7
900
901#define IPMode 0x80000000
902#define NVMD_TYPE 0x0000000F
903#define NVMD_STAT 0x0000FFFF
904#define NVMD_LEN 0xFF000000
905/**
906 * brief the data structure of Get NVMD Data Response
907 * use to describe MPI Get NVMD Data Response (64 bytes)
908 */
909struct get_nvm_data_resp {
910 __le32 tag;
911 __le32 ir_tda_bn_dps_das_nvm;
912 __le32 dlen_status;
913 __le32 nvm_data[12];
914} __attribute__((packed, aligned(4)));
915
916/**
917 * brief the data structure of SAS Diagnostic Start/End Response
918 * use to describe MPI SAS Diagnostic Start/End Response (64 bytes)
919 *
920 */
921struct sas_diag_start_end_resp {
922 __le32 tag;
923 __le32 status;
924 u32 reserved[13];
925} __attribute__((packed, aligned(4)));
926
927/**
928 * brief the data structure of SAS Diagnostic Execute Response
929 * use to describe MPI SAS Diagnostic Execute Response (64 bytes)
930 *
931 */
932struct sas_diag_execute_resp {
933 __le32 tag;
934 __le32 cmdtype_cmddesc_phyid;
935 __le32 Status;
936 __le32 ReportData;
937 u32 reserved[11];
938} __attribute__((packed, aligned(4)));
939
940/**
941 * brief the data structure of Set Device State Response
942 * use to describe MPI Set Device State Response (64 bytes)
943 *
944 */
945struct set_dev_state_resp {
946 __le32 tag;
947 __le32 status;
948 __le32 device_id;
949 __le32 pds_nds;
950 u32 reserved[11];
951} __attribute__((packed, aligned(4)));
952
953/* new outbound structure for spcv - begins */
954/**
955 * brief the data structure for SET CONTROLLER CONFIG COMMAND
956 * use to modify controller configuration
957 */
958struct set_ctrl_cfg_resp {
959 __le32 tag;
960 __le32 status;
961 __le32 err_qlfr_pgcd;
962 u32 reserved[12];
963} __attribute__((packed, aligned(4)));
964
965struct get_ctrl_cfg_resp {
966 __le32 tag;
967 __le32 status;
968 __le32 err_qlfr;
969 __le32 confg_page[12];
970} __attribute__((packed, aligned(4)));
971
972struct kek_mgmt_resp {
973 __le32 tag;
974 __le32 status;
975 __le32 kidx_new_curr_ksop;
976 __le32 err_qlfr;
977 u32 reserved[11];
978} __attribute__((packed, aligned(4)));
979
980struct dek_mgmt_resp {
981 __le32 tag;
982 __le32 status;
983 __le32 kekidx_tbls_dsop;
984 __le32 dekidx;
985 __le32 err_qlfr;
986 u32 reserved[10];
987} __attribute__((packed, aligned(4)));
988
989struct get_phy_profile_resp {
990 __le32 tag;
991 __le32 status;
992 __le32 ppc_phyid;
993 __le32 ppc_specific_rsp[12];
994} __attribute__((packed, aligned(4)));
995
996struct flash_op_ext_resp {
997 __le32 tag;
998 __le32 cmd;
999 __le32 status;
1000 __le32 epart_size;
1001 __le32 epart_sect_size;
1002 u32 reserved[10];
1003} __attribute__((packed, aligned(4)));
1004
1005struct set_phy_profile_resp {
1006 __le32 tag;
1007 __le32 status;
1008 __le32 ppc_phyid;
1009 __le32 ppc_specific_rsp[12];
1010} __attribute__((packed, aligned(4)));
1011
1012struct ssp_coalesced_comp_resp {
1013 __le32 coal_cnt;
1014 __le32 tag0;
1015 __le32 ssp_tag0;
1016 __le32 tag1;
1017 __le32 ssp_tag1;
1018 __le32 add_tag_ssp_tag[10];
1019} __attribute__((packed, aligned(4)));
1020
1021/* new outbound structure for spcv - ends */
1022
1023/* brief data structure for SAS protocol timer configuration page.
1024 *
1025 */
1026struct SASProtocolTimerConfig {
1027 __le32 pageCode; /* 0 */
1028 __le32 MST_MSI; /* 1 */
1029 __le32 STP_SSP_MCT_TMO; /* 2 */
1030 __le32 STP_FRM_TMO; /* 3 */
1031 __le32 STP_IDLE_TMO; /* 4 */
1032 __le32 OPNRJT_RTRY_INTVL; /* 5 */
1033 __le32 Data_Cmd_OPNRJT_RTRY_TMO; /* 6 */
1034 __le32 Data_Cmd_OPNRJT_RTRY_THR; /* 7 */
1035 __le32 MAX_AIP; /* 8 */
1036} __attribute__((packed, aligned(4)));
1037
1038typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
1039
1040#define NDS_BITS 0x0F
1041#define PDS_BITS 0xF0
1042
1043/*
1044 * HW Events type
1045 */
1046
1047#define HW_EVENT_RESET_START 0x01
1048#define HW_EVENT_CHIP_RESET_COMPLETE 0x02
1049#define HW_EVENT_PHY_STOP_STATUS 0x03
1050#define HW_EVENT_SAS_PHY_UP 0x04
1051#define HW_EVENT_SATA_PHY_UP 0x05
1052#define HW_EVENT_SATA_SPINUP_HOLD 0x06
1053#define HW_EVENT_PHY_DOWN 0x07
1054#define HW_EVENT_PORT_INVALID 0x08
1055#define HW_EVENT_BROADCAST_CHANGE 0x09
1056#define HW_EVENT_PHY_ERROR 0x0A
1057#define HW_EVENT_BROADCAST_SES 0x0B
1058#define HW_EVENT_INBOUND_CRC_ERROR 0x0C
1059#define HW_EVENT_HARD_RESET_RECEIVED 0x0D
1060#define HW_EVENT_MALFUNCTION 0x0E
1061#define HW_EVENT_ID_FRAME_TIMEOUT 0x0F
1062#define HW_EVENT_BROADCAST_EXP 0x10
1063#define HW_EVENT_PHY_START_STATUS 0x11
1064#define HW_EVENT_LINK_ERR_INVALID_DWORD 0x12
1065#define HW_EVENT_LINK_ERR_DISPARITY_ERROR 0x13
1066#define HW_EVENT_LINK_ERR_CODE_VIOLATION 0x14
1067#define HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH 0x15
1068#define HW_EVENT_LINK_ERR_PHY_RESET_FAILED 0x16
1069#define HW_EVENT_PORT_RECOVERY_TIMER_TMO 0x17
1070#define HW_EVENT_PORT_RECOVER 0x18
1071#define HW_EVENT_PORT_RESET_TIMER_TMO 0x19
1072#define HW_EVENT_PORT_RESET_COMPLETE 0x20
1073#define EVENT_BROADCAST_ASYNCH_EVENT 0x21
1074
1075/* port state */
1076#define PORT_NOT_ESTABLISHED 0x00
1077#define PORT_VALID 0x01
1078#define PORT_LOSTCOMM 0x02
1079#define PORT_IN_RESET 0x04
1080#define PORT_3RD_PARTY_RESET 0x07
1081#define PORT_INVALID 0x08
1082
1083/*
1084 * SSP/SMP/SATA IO Completion Status values
1085 */
1086
1087#define IO_SUCCESS 0x00
1088#define IO_ABORTED 0x01
1089#define IO_OVERFLOW 0x02
1090#define IO_UNDERFLOW 0x03
1091#define IO_FAILED 0x04
1092#define IO_ABORT_RESET 0x05
1093#define IO_NOT_VALID 0x06
1094#define IO_NO_DEVICE 0x07
1095#define IO_ILLEGAL_PARAMETER 0x08
1096#define IO_LINK_FAILURE 0x09
1097#define IO_PROG_ERROR 0x0A
1098
1099#define IO_EDC_IN_ERROR 0x0B
1100#define IO_EDC_OUT_ERROR 0x0C
1101#define IO_ERROR_HW_TIMEOUT 0x0D
1102#define IO_XFER_ERROR_BREAK 0x0E
1103#define IO_XFER_ERROR_PHY_NOT_READY 0x0F
1104#define IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED 0x10
1105#define IO_OPEN_CNX_ERROR_ZONE_VIOLATION 0x11
1106#define IO_OPEN_CNX_ERROR_BREAK 0x12
1107#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS 0x13
1108#define IO_OPEN_CNX_ERROR_BAD_DESTINATION 0x14
1109#define IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED 0x15
1110#define IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY 0x16
1111#define IO_OPEN_CNX_ERROR_WRONG_DESTINATION 0x17
1112/* This error code 0x18 is not used on SPCv */
1113#define IO_OPEN_CNX_ERROR_UNKNOWN_ERROR 0x18
1114#define IO_XFER_ERROR_NAK_RECEIVED 0x19
1115#define IO_XFER_ERROR_ACK_NAK_TIMEOUT 0x1A
1116#define IO_XFER_ERROR_PEER_ABORTED 0x1B
1117#define IO_XFER_ERROR_RX_FRAME 0x1C
1118#define IO_XFER_ERROR_DMA 0x1D
1119#define IO_XFER_ERROR_CREDIT_TIMEOUT 0x1E
1120#define IO_XFER_ERROR_SATA_LINK_TIMEOUT 0x1F
1121#define IO_XFER_ERROR_SATA 0x20
1122
1123/* This error code 0x22 is not used on SPCv */
1124#define IO_XFER_ERROR_ABORTED_DUE_TO_SRST 0x22
1125#define IO_XFER_ERROR_REJECTED_NCQ_MODE 0x21
1126#define IO_XFER_ERROR_ABORTED_NCQ_MODE 0x23
1127#define IO_XFER_OPEN_RETRY_TIMEOUT 0x24
1128/* This error code 0x25 is not used on SPCv */
1129#define IO_XFER_SMP_RESP_CONNECTION_ERROR 0x25
1130#define IO_XFER_ERROR_UNEXPECTED_PHASE 0x26
1131#define IO_XFER_ERROR_XFER_RDY_OVERRUN 0x27
1132#define IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED 0x28
1133#define IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT 0x30
1134
1135/* The following error code 0x31 and 0x32 are not using (obsolete) */
1136#define IO_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NAK 0x31
1137#define IO_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK 0x32
1138
1139#define IO_XFER_ERROR_OFFSET_MISMATCH 0x34
1140#define IO_XFER_ERROR_XFER_ZERO_DATA_LEN 0x35
1141#define IO_XFER_CMD_FRAME_ISSUED 0x36
1142#define IO_ERROR_INTERNAL_SMP_RESOURCE 0x37
1143#define IO_PORT_IN_RESET 0x38
1144#define IO_DS_NON_OPERATIONAL 0x39
1145#define IO_DS_IN_RECOVERY 0x3A
1146#define IO_TM_TAG_NOT_FOUND 0x3B
1147#define IO_XFER_PIO_SETUP_ERROR 0x3C
1148#define IO_SSP_EXT_IU_ZERO_LEN_ERROR 0x3D
1149#define IO_DS_IN_ERROR 0x3E
1150#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY 0x3F
1151#define IO_ABORT_IN_PROGRESS 0x40
1152#define IO_ABORT_DELAYED 0x41
1153#define IO_INVALID_LENGTH 0x42
1154
1155/********** additional response event values *****************/
1156
1157#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY_ALT 0x43
1158#define IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED 0x44
1159#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO 0x45
1160#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST 0x46
1161#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE 0x47
1162#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED 0x48
1163#define IO_DS_INVALID 0x49
1164/* WARNING: the value is not contiguous from here */
1165#define IO_XFER_ERR_LAST_PIO_DATAIN_CRC_ERR 0x52
1166#define IO_XFER_DMA_ACTIVATE_TIMEOUT 0x53
1167#define IO_XFER_ERROR_INTERNAL_CRC_ERROR 0x54
1168#define MPI_IO_RQE_BUSY_FULL 0x55
1169#define IO_XFER_ERR_EOB_DATA_OVERRUN 0x56
1170#define IO_XFR_ERROR_INVALID_SSP_RSP_FRAME 0x57
1171#define IO_OPEN_CNX_ERROR_OPEN_PREEMPTED 0x58
1172
1173#define MPI_ERR_IO_RESOURCE_UNAVAILABLE 0x1004
1174#define MPI_ERR_ATAPI_DEVICE_BUSY 0x1024
1175
1176#define IO_XFR_ERROR_DEK_KEY_CACHE_MISS 0x2040
1177/*
1178 * An encryption IO request failed due to DEK Key Tag mismatch.
1179 * The key tag supplied in the encryption IOMB does not match with
1180 * the Key Tag in the referenced DEK Entry.
1181 */
1182#define IO_XFR_ERROR_DEK_KEY_TAG_MISMATCH 0x2041
1183#define IO_XFR_ERROR_CIPHER_MODE_INVALID 0x2042
1184/*
1185 * An encryption I/O request failed because the initial value (IV)
1186 * in the unwrapped DEK blob didn't match the IV used to unwrap it.
1187 */
1188#define IO_XFR_ERROR_DEK_IV_MISMATCH 0x2043
1189/* An encryption I/O request failed due to an internal RAM ECC or
1190 * interface error while unwrapping the DEK. */
1191#define IO_XFR_ERROR_DEK_RAM_INTERFACE_ERROR 0x2044
1192/* An encryption I/O request failed due to an internal RAM ECC or
1193 * interface error while unwrapping the DEK. */
1194#define IO_XFR_ERROR_INTERNAL_RAM 0x2045
1195/*
1196 * An encryption I/O request failed
1197 * because the DEK index specified in the I/O was outside the bounds of
1198 * the total number of entries in the host DEK table.
1199 */
1200#define IO_XFR_ERROR_DEK_INDEX_OUT_OF_BOUNDS0x2046
1201
1202/* define DIF IO response error status code */
1203#define IO_XFR_ERROR_DIF_MISMATCH 0x3000
1204#define IO_XFR_ERROR_DIF_APPLICATION_TAG_MISMATCH 0x3001
1205#define IO_XFR_ERROR_DIF_REFERENCE_TAG_MISMATCH 0x3002
1206#define IO_XFR_ERROR_DIF_CRC_MISMATCH 0x3003
1207
1208/* define operator management response status and error qualifier code */
1209#define OPR_MGMT_OP_NOT_SUPPORTED 0x2060
1210#define OPR_MGMT_MPI_ENC_ERR_OPR_PARAM_ILLEGAL 0x2061
1211#define OPR_MGMT_MPI_ENC_ERR_OPR_ID_NOT_FOUND 0x2062
1212#define OPR_MGMT_MPI_ENC_ERR_OPR_ROLE_NOT_MATCH 0x2063
1213#define OPR_MGMT_MPI_ENC_ERR_OPR_MAX_NUM_EXCEEDED 0x2064
1214#define OPR_MGMT_MPI_ENC_ERR_KEK_UNWRAP_FAIL 0x2022
1215#define OPR_MGMT_MPI_ENC_ERR_NVRAM_OPERATION_FAILURE 0x2023
1216/***************** additional response event values ***************/
1217
1218/* WARNING: This error code must always be the last number.
1219 * If you add error code, modify this code also
1220 * It is used as an index
1221 */
1222#define IO_ERROR_UNKNOWN_GENERIC 0x2023
1223
1224/* MSGU CONFIGURATION TABLE*/
1225
1226#define SPCv_MSGU_CFG_TABLE_UPDATE 0x01
1227#define SPCv_MSGU_CFG_TABLE_RESET 0x02
1228#define SPCv_MSGU_CFG_TABLE_FREEZE 0x04
1229#define SPCv_MSGU_CFG_TABLE_UNFREEZE 0x08
1230#define MSGU_IBDB_SET 0x00
1231#define MSGU_HOST_INT_STATUS 0x08
1232#define MSGU_HOST_INT_MASK 0x0C
1233#define MSGU_IOPIB_INT_STATUS 0x18
1234#define MSGU_IOPIB_INT_MASK 0x1C
1235#define MSGU_IBDB_CLEAR 0x20
1236
1237#define MSGU_MSGU_CONTROL 0x24
1238#define MSGU_ODR 0x20
1239#define MSGU_ODCR 0x28
1240
1241#define MSGU_ODMR 0x30
1242#define MSGU_ODMR_U 0x34
1243#define MSGU_ODMR_CLR 0x38
1244#define MSGU_ODMR_CLR_U 0x3C
1245#define MSGU_OD_RSVD 0x40
1246
1247#define MSGU_SCRATCH_PAD_0 0x44
1248#define MSGU_SCRATCH_PAD_1 0x48
1249#define MSGU_SCRATCH_PAD_2 0x4C
1250#define MSGU_SCRATCH_PAD_3 0x50
1251#define MSGU_HOST_SCRATCH_PAD_0 0x54
1252#define MSGU_HOST_SCRATCH_PAD_1 0x58
1253#define MSGU_HOST_SCRATCH_PAD_2 0x5C
1254#define MSGU_HOST_SCRATCH_PAD_3 0x60
1255#define MSGU_HOST_SCRATCH_PAD_4 0x64
1256#define MSGU_HOST_SCRATCH_PAD_5 0x68
1257#define MSGU_HOST_SCRATCH_PAD_6 0x6C
1258#define MSGU_HOST_SCRATCH_PAD_7 0x70
1259
1260/* bit definition for ODMR register */
1261#define ODMR_MASK_ALL 0xFFFFFFFF/* mask all
1262 interrupt vector */
1263#define ODMR_CLEAR_ALL 0 /* clear all
1264 interrupt vector */
1265/* bit definition for ODCR register */
1266#define ODCR_CLEAR_ALL 0xFFFFFFFF /* mask all
1267 interrupt vector*/
1268/* MSIX Interupts */
1269#define MSIX_TABLE_OFFSET 0x2000
1270#define MSIX_TABLE_ELEMENT_SIZE 0x10
1271#define MSIX_INTERRUPT_CONTROL_OFFSET 0xC
1272#define MSIX_TABLE_BASE (MSIX_TABLE_OFFSET + \
1273 MSIX_INTERRUPT_CONTROL_OFFSET)
1274#define MSIX_INTERRUPT_DISABLE 0x1
1275#define MSIX_INTERRUPT_ENABLE 0x0
1276
1277/* state definition for Scratch Pad1 register */
1278#define SCRATCH_PAD_RAAE_READY 0x3
1279#define SCRATCH_PAD_ILA_READY 0xC
1280#define SCRATCH_PAD_BOOT_LOAD_SUCCESS 0x0
1281#define SCRATCH_PAD_IOP0_READY 0xC00
1282#define SCRATCH_PAD_IOP1_READY 0x3000
1283
1284/* boot loader state */
1285#define SCRATCH_PAD1_BOOTSTATE_MASK 0x70 /* Bit 4-6 */
1286#define SCRATCH_PAD1_BOOTSTATE_SUCESS 0x0 /* Load successful */
1287#define SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM 0x10 /* HDA SEEPROM */
1288#define SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP 0x20 /* HDA BootStrap Pins */
1289#define SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET 0x30 /* HDA Soft Reset */
1290#define SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR 0x40 /* HDA critical error */
1291#define SCRATCH_PAD1_BOOTSTATE_R1 0x50 /* Reserved */
1292#define SCRATCH_PAD1_BOOTSTATE_R2 0x60 /* Reserved */
1293#define SCRATCH_PAD1_BOOTSTATE_FATAL 0x70 /* Fatal Error */
1294
1295 /* state definition for Scratch Pad2 register */
1296#define SCRATCH_PAD2_POR 0x00 /* power on state */
1297#define SCRATCH_PAD2_SFR 0x01 /* soft reset state */
1298#define SCRATCH_PAD2_ERR 0x02 /* error state */
1299#define SCRATCH_PAD2_RDY 0x03 /* ready state */
1300#define SCRATCH_PAD2_FWRDY_RST 0x04 /* FW rdy for soft reset flag */
1301#define SCRATCH_PAD2_IOPRDY_RST 0x08 /* IOP ready for soft reset */
1302#define SCRATCH_PAD2_STATE_MASK 0xFFFFFFF4 /* ScratchPad 2
1303 Mask, bit1-0 State */
1304#define SCRATCH_PAD2_RESERVED 0x000003FC/* Scratch Pad1
1305 Reserved bit 2 to 9 */
1306
1307#define SCRATCH_PAD_ERROR_MASK 0xFFFFFC00 /* Error mask bits */
1308#define SCRATCH_PAD_STATE_MASK 0x00000003 /* State Mask bits */
1309
1310/* main configuration offset - byte offset */
1311#define MAIN_SIGNATURE_OFFSET 0x00 /* DWORD 0x00 */
1312#define MAIN_INTERFACE_REVISION 0x04 /* DWORD 0x01 */
1313#define MAIN_FW_REVISION 0x08 /* DWORD 0x02 */
1314#define MAIN_MAX_OUTSTANDING_IO_OFFSET 0x0C /* DWORD 0x03 */
1315#define MAIN_MAX_SGL_OFFSET 0x10 /* DWORD 0x04 */
1316#define MAIN_CNTRL_CAP_OFFSET 0x14 /* DWORD 0x05 */
1317#define MAIN_GST_OFFSET 0x18 /* DWORD 0x06 */
1318#define MAIN_IBQ_OFFSET 0x1C /* DWORD 0x07 */
1319#define MAIN_OBQ_OFFSET 0x20 /* DWORD 0x08 */
1320#define MAIN_IQNPPD_HPPD_OFFSET 0x24 /* DWORD 0x09 */
1321
1322/* 0x28 - 0x4C - RSVD */
1323#define MAIN_EVENT_CRC_CHECK 0x48 /* DWORD 0x12 */
1324#define MAIN_EVENT_LOG_ADDR_HI 0x50 /* DWORD 0x14 */
1325#define MAIN_EVENT_LOG_ADDR_LO 0x54 /* DWORD 0x15 */
1326#define MAIN_EVENT_LOG_BUFF_SIZE 0x58 /* DWORD 0x16 */
1327#define MAIN_EVENT_LOG_OPTION 0x5C /* DWORD 0x17 */
1328#define MAIN_PCS_EVENT_LOG_ADDR_HI 0x60 /* DWORD 0x18 */
1329#define MAIN_PCS_EVENT_LOG_ADDR_LO 0x64 /* DWORD 0x19 */
1330#define MAIN_PCS_EVENT_LOG_BUFF_SIZE 0x68 /* DWORD 0x1A */
1331#define MAIN_PCS_EVENT_LOG_OPTION 0x6C /* DWORD 0x1B */
1332#define MAIN_FATAL_ERROR_INTERRUPT 0x70 /* DWORD 0x1C */
1333#define MAIN_FATAL_ERROR_RDUMP0_OFFSET 0x74 /* DWORD 0x1D */
1334#define MAIN_FATAL_ERROR_RDUMP0_LENGTH 0x78 /* DWORD 0x1E */
1335#define MAIN_FATAL_ERROR_RDUMP1_OFFSET 0x7C /* DWORD 0x1F */
1336#define MAIN_FATAL_ERROR_RDUMP1_LENGTH 0x80 /* DWORD 0x20 */
1337#define MAIN_GPIO_LED_FLAGS_OFFSET 0x84 /* DWORD 0x21 */
1338#define MAIN_ANALOG_SETUP_OFFSET 0x88 /* DWORD 0x22 */
1339
1340#define MAIN_INT_VECTOR_TABLE_OFFSET 0x8C /* DWORD 0x23 */
1341#define MAIN_SAS_PHY_ATTR_TABLE_OFFSET 0x90 /* DWORD 0x24 */
1342#define MAIN_PORT_RECOVERY_TIMER 0x94 /* DWORD 0x25 */
1343#define MAIN_INT_REASSERTION_DELAY 0x98 /* DWORD 0x26 */
1344
1345/* Gereral Status Table offset - byte offset */
1346#define GST_GSTLEN_MPIS_OFFSET 0x00
1347#define GST_IQ_FREEZE_STATE0_OFFSET 0x04
1348#define GST_IQ_FREEZE_STATE1_OFFSET 0x08
1349#define GST_MSGUTCNT_OFFSET 0x0C
1350#define GST_IOPTCNT_OFFSET 0x10
1351/* 0x14 - 0x34 - RSVD */
1352#define GST_GPIO_INPUT_VAL 0x38
1353/* 0x3c - 0x40 - RSVD */
1354#define GST_RERRINFO_OFFSET0 0x44
1355#define GST_RERRINFO_OFFSET1 0x48
1356#define GST_RERRINFO_OFFSET2 0x4c
1357#define GST_RERRINFO_OFFSET3 0x50
1358#define GST_RERRINFO_OFFSET4 0x54
1359#define GST_RERRINFO_OFFSET5 0x58
1360#define GST_RERRINFO_OFFSET6 0x5c
1361#define GST_RERRINFO_OFFSET7 0x60
1362
1363/* General Status Table - MPI state */
1364#define GST_MPI_STATE_UNINIT 0x00
1365#define GST_MPI_STATE_INIT 0x01
1366#define GST_MPI_STATE_TERMINATION 0x02
1367#define GST_MPI_STATE_ERROR 0x03
1368#define GST_MPI_STATE_MASK 0x07
1369
1370/* Per SAS PHY Attributes */
1371
1372#define PSPA_PHYSTATE0_OFFSET 0x00 /* Dword V */
1373#define PSPA_OB_HW_EVENT_PID0_OFFSET 0x04 /* DWORD V+1 */
1374#define PSPA_PHYSTATE1_OFFSET 0x08 /* Dword V+2 */
1375#define PSPA_OB_HW_EVENT_PID1_OFFSET 0x0C /* DWORD V+3 */
1376#define PSPA_PHYSTATE2_OFFSET 0x10 /* Dword V+4 */
1377#define PSPA_OB_HW_EVENT_PID2_OFFSET 0x14 /* DWORD V+5 */
1378#define PSPA_PHYSTATE3_OFFSET 0x18 /* Dword V+6 */
1379#define PSPA_OB_HW_EVENT_PID3_OFFSET 0x1C /* DWORD V+7 */
1380#define PSPA_PHYSTATE4_OFFSET 0x20 /* Dword V+8 */
1381#define PSPA_OB_HW_EVENT_PID4_OFFSET 0x24 /* DWORD V+9 */
1382#define PSPA_PHYSTATE5_OFFSET 0x28 /* Dword V+10 */
1383#define PSPA_OB_HW_EVENT_PID5_OFFSET 0x2C /* DWORD V+11 */
1384#define PSPA_PHYSTATE6_OFFSET 0x30 /* Dword V+12 */
1385#define PSPA_OB_HW_EVENT_PID6_OFFSET 0x34 /* DWORD V+13 */
1386#define PSPA_PHYSTATE7_OFFSET 0x38 /* Dword V+14 */
1387#define PSPA_OB_HW_EVENT_PID7_OFFSET 0x3C /* DWORD V+15 */
1388#define PSPA_PHYSTATE8_OFFSET 0x40 /* DWORD V+16 */
1389#define PSPA_OB_HW_EVENT_PID8_OFFSET 0x44 /* DWORD V+17 */
1390#define PSPA_PHYSTATE9_OFFSET 0x48 /* DWORD V+18 */
1391#define PSPA_OB_HW_EVENT_PID9_OFFSET 0x4C /* DWORD V+19 */
1392#define PSPA_PHYSTATE10_OFFSET 0x50 /* DWORD V+20 */
1393#define PSPA_OB_HW_EVENT_PID10_OFFSET 0x54 /* DWORD V+21 */
1394#define PSPA_PHYSTATE11_OFFSET 0x58 /* DWORD V+22 */
1395#define PSPA_OB_HW_EVENT_PID11_OFFSET 0x5C /* DWORD V+23 */
1396#define PSPA_PHYSTATE12_OFFSET 0x60 /* DWORD V+24 */
1397#define PSPA_OB_HW_EVENT_PID12_OFFSET 0x64 /* DWORD V+25 */
1398#define PSPA_PHYSTATE13_OFFSET 0x68 /* DWORD V+26 */
1399#define PSPA_OB_HW_EVENT_PID13_OFFSET 0x6c /* DWORD V+27 */
1400#define PSPA_PHYSTATE14_OFFSET 0x70 /* DWORD V+28 */
1401#define PSPA_OB_HW_EVENT_PID14_OFFSET 0x74 /* DWORD V+29 */
1402#define PSPA_PHYSTATE15_OFFSET 0x78 /* DWORD V+30 */
1403#define PSPA_OB_HW_EVENT_PID15_OFFSET 0x7c /* DWORD V+31 */
1404/* end PSPA */
1405
1406/* inbound queue configuration offset - byte offset */
1407#define IB_PROPERITY_OFFSET 0x00
1408#define IB_BASE_ADDR_HI_OFFSET 0x04
1409#define IB_BASE_ADDR_LO_OFFSET 0x08
1410#define IB_CI_BASE_ADDR_HI_OFFSET 0x0C
1411#define IB_CI_BASE_ADDR_LO_OFFSET 0x10
1412#define IB_PIPCI_BAR 0x14
1413#define IB_PIPCI_BAR_OFFSET 0x18
1414#define IB_RESERVED_OFFSET 0x1C
1415
1416/* outbound queue configuration offset - byte offset */
1417#define OB_PROPERITY_OFFSET 0x00
1418#define OB_BASE_ADDR_HI_OFFSET 0x04
1419#define OB_BASE_ADDR_LO_OFFSET 0x08
1420#define OB_PI_BASE_ADDR_HI_OFFSET 0x0C
1421#define OB_PI_BASE_ADDR_LO_OFFSET 0x10
1422#define OB_CIPCI_BAR 0x14
1423#define OB_CIPCI_BAR_OFFSET 0x18
1424#define OB_INTERRUPT_COALES_OFFSET 0x1C
1425#define OB_DYNAMIC_COALES_OFFSET 0x20
1426#define OB_PROPERTY_INT_ENABLE 0x40000000
1427
1428#define MBIC_NMI_ENABLE_VPE0_IOP 0x000418
1429#define MBIC_NMI_ENABLE_VPE0_AAP1 0x000418
1430/* PCIE registers - BAR2(0x18), BAR1(win) 0x010000 */
1431#define PCIE_EVENT_INTERRUPT_ENABLE 0x003040
1432#define PCIE_EVENT_INTERRUPT 0x003044
1433#define PCIE_ERROR_INTERRUPT_ENABLE 0x003048
1434#define PCIE_ERROR_INTERRUPT 0x00304C
1435
1436/* SPCV soft reset */
1437#define SPC_REG_SOFT_RESET 0x00001000
1438#define SPCv_NORMAL_RESET_VALUE 0x1
1439
1440#define SPCv_SOFT_RESET_READ_MASK 0xC0
1441#define SPCv_SOFT_RESET_NO_RESET 0x0
1442#define SPCv_SOFT_RESET_NORMAL_RESET_OCCURED 0x40
1443#define SPCv_SOFT_RESET_HDA_MODE_OCCURED 0x80
1444#define SPCv_SOFT_RESET_CHIP_RESET_OCCURED 0xC0
1445
1446/* signature definition for host scratch pad0 register */
1447#define SPC_SOFT_RESET_SIGNATURE 0x252acbcd
1448/* Signature for Soft Reset */
1449
1450/* SPC Reset register - BAR4(0x20), BAR2(win) (need dynamic mapping) */
1451#define SPC_REG_RESET 0x000000/* reset register */
1452
1453/* bit definition for SPC_RESET register */
1454#define SPC_REG_RESET_OSSP 0x00000001
1455#define SPC_REG_RESET_RAAE 0x00000002
1456#define SPC_REG_RESET_PCS_SPBC 0x00000004
1457#define SPC_REG_RESET_PCS_IOP_SS 0x00000008
1458#define SPC_REG_RESET_PCS_AAP1_SS 0x00000010
1459#define SPC_REG_RESET_PCS_AAP2_SS 0x00000020
1460#define SPC_REG_RESET_PCS_LM 0x00000040
1461#define SPC_REG_RESET_PCS 0x00000080
1462#define SPC_REG_RESET_GSM 0x00000100
1463#define SPC_REG_RESET_DDR2 0x00010000
1464#define SPC_REG_RESET_BDMA_CORE 0x00020000
1465#define SPC_REG_RESET_BDMA_SXCBI 0x00040000
1466#define SPC_REG_RESET_PCIE_AL_SXCBI 0x00080000
1467#define SPC_REG_RESET_PCIE_PWR 0x00100000
1468#define SPC_REG_RESET_PCIE_SFT 0x00200000
1469#define SPC_REG_RESET_PCS_SXCBI 0x00400000
1470#define SPC_REG_RESET_LMS_SXCBI 0x00800000
1471#define SPC_REG_RESET_PMIC_SXCBI 0x01000000
1472#define SPC_REG_RESET_PMIC_CORE 0x02000000
1473#define SPC_REG_RESET_PCIE_PC_SXCBI 0x04000000
1474#define SPC_REG_RESET_DEVICE 0x80000000
1475
1476/* registers for BAR Shifting - BAR2(0x18), BAR1(win) */
1477#define SPCV_IBW_AXI_TRANSLATION_LOW 0x001010
1478
1479#define MBIC_AAP1_ADDR_BASE 0x060000
1480#define MBIC_IOP_ADDR_BASE 0x070000
1481#define GSM_ADDR_BASE 0x0700000
1482/* Dynamic map through Bar4 - 0x00700000 */
1483#define GSM_CONFIG_RESET 0x00000000
1484#define RAM_ECC_DB_ERR 0x00000018
1485#define GSM_READ_ADDR_PARITY_INDIC 0x00000058
1486#define GSM_WRITE_ADDR_PARITY_INDIC 0x00000060
1487#define GSM_WRITE_DATA_PARITY_INDIC 0x00000068
1488#define GSM_READ_ADDR_PARITY_CHECK 0x00000038
1489#define GSM_WRITE_ADDR_PARITY_CHECK 0x00000040
1490#define GSM_WRITE_DATA_PARITY_CHECK 0x00000048
1491
1492#define RB6_ACCESS_REG 0x6A0000
1493#define HDAC_EXEC_CMD 0x0002
1494#define HDA_C_PA 0xcb
1495#define HDA_SEQ_ID_BITS 0x00ff0000
1496#define HDA_GSM_OFFSET_BITS 0x00FFFFFF
1497#define HDA_GSM_CMD_OFFSET_BITS 0x42C0
1498#define HDA_GSM_RSP_OFFSET_BITS 0x42E0
1499
1500#define MBIC_AAP1_ADDR_BASE 0x060000
1501#define MBIC_IOP_ADDR_BASE 0x070000
1502#define GSM_ADDR_BASE 0x0700000
1503#define SPC_TOP_LEVEL_ADDR_BASE 0x000000
1504#define GSM_CONFIG_RESET_VALUE 0x00003b00
1505#define GPIO_ADDR_BASE 0x00090000
1506#define GPIO_GPIO_0_0UTPUT_CTL_OFFSET 0x0000010c
1507
1508/* RB6 offset */
1509#define SPC_RB6_OFFSET 0x80C0
1510/* Magic number of soft reset for RB6 */
1511#define RB6_MAGIC_NUMBER_RST 0x1234
1512
1513/* Device Register status */
1514#define DEVREG_SUCCESS 0x00
1515#define DEVREG_FAILURE_OUT_OF_RESOURCE 0x01
1516#define DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED 0x02
1517#define DEVREG_FAILURE_INVALID_PHY_ID 0x03
1518#define DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED 0x04
1519#define DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE 0x05
1520#define DEVREG_FAILURE_PORT_NOT_VALID_STATE 0x06
1521#define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID 0x07
1522
1523#endif
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index 317a7fdc3b82..23d607218ae8 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -24,7 +24,9 @@ config SCSI_QLA_FC
24 24
25 Firmware images can be retrieved from: 25 Firmware images can be retrieved from:
26 26
27 ftp://ftp.qlogic.com/outgoing/linux/firmware/ 27 http://ldriver.qlogic.com/firmware/
28
29 They are also included in the linux-firmware tree as well.
28 30
29config TCM_QLA2XXX 31config TCM_QLA2XXX
30 tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs" 32 tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs"
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 729b74389f83..937fed8cb038 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -3003,12 +3003,10 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
3003 3003
3004 /* Set transfer direction */ 3004 /* Set transfer direction */
3005 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 3005 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
3006 lcmd_pkt->cntrl_flags = 3006 lcmd_pkt->cntrl_flags = TMF_WRITE_DATA;
3007 __constant_cpu_to_le16(TMF_WRITE_DATA);
3008 vha->qla_stats.output_bytes += scsi_bufflen(cmd); 3007 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
3009 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 3008 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
3010 lcmd_pkt->cntrl_flags = 3009 lcmd_pkt->cntrl_flags = TMF_READ_DATA;
3011 __constant_cpu_to_le16(TMF_READ_DATA);
3012 vha->qla_stats.input_bytes += scsi_bufflen(cmd); 3010 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
3013 } 3011 }
3014 3012
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 5307bf86d5e0..ad72c1d85111 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -644,7 +644,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr)
644 qla2x00_rel_sp(sp->fcport->vha, sp); 644 qla2x00_rel_sp(sp->fcport->vha, sp);
645} 645}
646 646
647void 647static void
648qla2x00_sp_compl(void *data, void *ptr, int res) 648qla2x00_sp_compl(void *data, void *ptr, int res)
649{ 649{
650 struct qla_hw_data *ha = (struct qla_hw_data *)data; 650 struct qla_hw_data *ha = (struct qla_hw_data *)data;
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index 14fec976f634..fad71ed067ec 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -507,6 +507,7 @@ static int qla4xxx_send_mbox_iocb(struct scsi_qla_host *ha, struct mrb *mrb,
507 mrb->mbox_cmd = in_mbox[0]; 507 mrb->mbox_cmd = in_mbox[0];
508 wmb(); 508 wmb();
509 509
510 ha->iocb_cnt += mrb->iocb_cnt;
510 ha->isp_ops->queue_iocb(ha); 511 ha->isp_ops->queue_iocb(ha);
511exit_mbox_iocb: 512exit_mbox_iocb:
512 spin_unlock_irqrestore(&ha->hardware_lock, flags); 513 spin_unlock_irqrestore(&ha->hardware_lock, flags);
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index a47f99957ba8..4d231c12463e 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -2216,14 +2216,14 @@ static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess,
2216 fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain); 2216 fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain);
2217 fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt); 2217 fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt);
2218 fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size); 2218 fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size);
2219 fw_ddb_entry->tcp_xmt_wsf = cpu_to_le16(conn->tcp_xmit_wsf); 2219 fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf);
2220 fw_ddb_entry->tcp_rcv_wsf = cpu_to_le16(conn->tcp_recv_wsf); 2220 fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf);
2221 fw_ddb_entry->ipv4_tos = conn->ipv4_tos; 2221 fw_ddb_entry->ipv4_tos = conn->ipv4_tos;
2222 fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label); 2222 fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label);
2223 fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout); 2223 fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout);
2224 fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port); 2224 fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port);
2225 fw_ddb_entry->stat_sn = cpu_to_le16(conn->statsn); 2225 fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn);
2226 fw_ddb_entry->exp_stat_sn = cpu_to_le16(conn->exp_statsn); 2226 fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn);
2227 fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_type); 2227 fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_type);
2228 fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx); 2228 fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx);
2229 fw_ddb_entry->tsid = cpu_to_le16(sess->tsid); 2229 fw_ddb_entry->tsid = cpu_to_le16(sess->tsid);
@@ -5504,9 +5504,9 @@ static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data)
5504 * If this is invoked as a result of a userspace call then the entry is marked 5504 * If this is invoked as a result of a userspace call then the entry is marked
5505 * as nonpersistent using flash_state field. 5505 * as nonpersistent using flash_state field.
5506 **/ 5506 **/
5507int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha, 5507static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
5508 struct dev_db_entry *fw_ddb_entry, 5508 struct dev_db_entry *fw_ddb_entry,
5509 uint16_t *idx, int user) 5509 uint16_t *idx, int user)
5510{ 5510{
5511 struct iscsi_bus_flash_session *fnode_sess = NULL; 5511 struct iscsi_bus_flash_session *fnode_sess = NULL;
5512 struct iscsi_bus_flash_conn *fnode_conn = NULL; 5512 struct iscsi_bus_flash_conn *fnode_conn = NULL;
@@ -5605,6 +5605,7 @@ static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
5605 ql4_printk(KERN_ERR, ha, 5605 ql4_printk(KERN_ERR, ha,
5606 "%s: A non-persistent entry %s found\n", 5606 "%s: A non-persistent entry %s found\n",
5607 __func__, dev->kobj.name); 5607 __func__, dev->kobj.name);
5608 put_device(dev);
5608 goto exit_ddb_add; 5609 goto exit_ddb_add;
5609 } 5610 }
5610 5611
@@ -6112,8 +6113,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
6112 int parent_type, parent_index = 0xffff; 6113 int parent_type, parent_index = 0xffff;
6113 int rc = 0; 6114 int rc = 0;
6114 6115
6115 dev = iscsi_find_flashnode_conn(fnode_sess, NULL, 6116 dev = iscsi_find_flashnode_conn(fnode_sess);
6116 iscsi_is_flashnode_conn_dev);
6117 if (!dev) 6117 if (!dev)
6118 return -EIO; 6118 return -EIO;
6119 6119
@@ -6276,8 +6276,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
6276 rc = sprintf(buf, "\n"); 6276 rc = sprintf(buf, "\n");
6277 break; 6277 break;
6278 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: 6278 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
6279 if ((fnode_sess->discovery_parent_idx) >= 0 && 6279 if (fnode_sess->discovery_parent_idx < MAX_DDB_ENTRIES)
6280 (fnode_sess->discovery_parent_idx < MAX_DDB_ENTRIES))
6281 parent_index = fnode_sess->discovery_parent_idx; 6280 parent_index = fnode_sess->discovery_parent_idx;
6282 6281
6283 rc = sprintf(buf, "%u\n", parent_index); 6282 rc = sprintf(buf, "%u\n", parent_index);
@@ -6287,8 +6286,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
6287 parent_type = ISCSI_DISC_PARENT_ISNS; 6286 parent_type = ISCSI_DISC_PARENT_ISNS;
6288 else if (fnode_sess->discovery_parent_type == DDB_NO_LINK) 6287 else if (fnode_sess->discovery_parent_type == DDB_NO_LINK)
6289 parent_type = ISCSI_DISC_PARENT_UNKNOWN; 6288 parent_type = ISCSI_DISC_PARENT_UNKNOWN;
6290 else if (fnode_sess->discovery_parent_type >= 0 && 6289 else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES)
6291 fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES)
6292 parent_type = ISCSI_DISC_PARENT_SENDTGT; 6290 parent_type = ISCSI_DISC_PARENT_SENDTGT;
6293 else 6291 else
6294 parent_type = ISCSI_DISC_PARENT_UNKNOWN; 6292 parent_type = ISCSI_DISC_PARENT_UNKNOWN;
@@ -6349,6 +6347,8 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
6349 rc = -ENOSYS; 6347 rc = -ENOSYS;
6350 break; 6348 break;
6351 } 6349 }
6350
6351 put_device(dev);
6352 return rc; 6352 return rc;
6353} 6353}
6354 6354
@@ -6368,20 +6368,11 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
6368{ 6368{
6369 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 6369 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
6370 struct scsi_qla_host *ha = to_qla_host(shost); 6370 struct scsi_qla_host *ha = to_qla_host(shost);
6371 struct dev_db_entry *fw_ddb_entry = NULL;
6372 struct iscsi_flashnode_param_info *fnode_param; 6371 struct iscsi_flashnode_param_info *fnode_param;
6373 struct nlattr *attr; 6372 struct nlattr *attr;
6374 int rc = QLA_ERROR; 6373 int rc = QLA_ERROR;
6375 uint32_t rem = len; 6374 uint32_t rem = len;
6376 6375
6377 fw_ddb_entry = kzalloc(sizeof(*fw_ddb_entry), GFP_KERNEL);
6378 if (!fw_ddb_entry) {
6379 DEBUG2(ql4_printk(KERN_ERR, ha,
6380 "%s: Unable to allocate ddb buffer\n",
6381 __func__));
6382 return -ENOMEM;
6383 }
6384
6385 nla_for_each_attr(attr, data, len, rem) { 6376 nla_for_each_attr(attr, data, len, rem) {
6386 fnode_param = nla_data(attr); 6377 fnode_param = nla_data(attr);
6387 6378
@@ -6590,16 +6581,11 @@ static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess)
6590 struct dev_db_entry *fw_ddb_entry = NULL; 6581 struct dev_db_entry *fw_ddb_entry = NULL;
6591 dma_addr_t fw_ddb_entry_dma; 6582 dma_addr_t fw_ddb_entry_dma;
6592 uint16_t *ddb_cookie = NULL; 6583 uint16_t *ddb_cookie = NULL;
6593 size_t ddb_size; 6584 size_t ddb_size = 0;
6594 void *pddb = NULL; 6585 void *pddb = NULL;
6595 int target_id; 6586 int target_id;
6596 int rc = 0; 6587 int rc = 0;
6597 6588
6598 if (!fnode_sess) {
6599 rc = -EINVAL;
6600 goto exit_ddb_del;
6601 }
6602
6603 if (fnode_sess->is_boot_target) { 6589 if (fnode_sess->is_boot_target) {
6604 rc = -EPERM; 6590 rc = -EPERM;
6605 DEBUG2(ql4_printk(KERN_ERR, ha, 6591 DEBUG2(ql4_printk(KERN_ERR, ha,
@@ -6631,8 +6617,7 @@ static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess)
6631 6617
6632 dev_db_start_offset += (fnode_sess->target_id * 6618 dev_db_start_offset += (fnode_sess->target_id *
6633 sizeof(*fw_ddb_entry)); 6619 sizeof(*fw_ddb_entry));
6634 dev_db_start_offset += (void *)&(fw_ddb_entry->cookie) - 6620 dev_db_start_offset += offsetof(struct dev_db_entry, cookie);
6635 (void *)fw_ddb_entry;
6636 6621
6637 ddb_size = sizeof(*ddb_cookie); 6622 ddb_size = sizeof(*ddb_cookie);
6638 } 6623 }
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 83e0fec35d56..fe873cf7570d 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.03.00-k8" 8#define QLA4XXX_DRIVER_VERSION "5.03.00-k9"
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 5add6f4e7928..0a537a0515ca 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1997,24 +1997,39 @@ out:
1997 return ret; 1997 return ret;
1998} 1998}
1999 1999
2000static unsigned int map_state(sector_t lba, unsigned int *num) 2000static unsigned long lba_to_map_index(sector_t lba)
2001{
2002 if (scsi_debug_unmap_alignment) {
2003 lba += scsi_debug_unmap_granularity -
2004 scsi_debug_unmap_alignment;
2005 }
2006 do_div(lba, scsi_debug_unmap_granularity);
2007
2008 return lba;
2009}
2010
2011static sector_t map_index_to_lba(unsigned long index)
2001{ 2012{
2002 unsigned int granularity, alignment, mapped; 2013 return index * scsi_debug_unmap_granularity -
2003 sector_t block, next, end; 2014 scsi_debug_unmap_alignment;
2015}
2004 2016
2005 granularity = scsi_debug_unmap_granularity; 2017static unsigned int map_state(sector_t lba, unsigned int *num)
2006 alignment = granularity - scsi_debug_unmap_alignment; 2018{
2007 block = lba + alignment; 2019 sector_t end;
2008 do_div(block, granularity); 2020 unsigned int mapped;
2021 unsigned long index;
2022 unsigned long next;
2009 2023
2010 mapped = test_bit(block, map_storep); 2024 index = lba_to_map_index(lba);
2025 mapped = test_bit(index, map_storep);
2011 2026
2012 if (mapped) 2027 if (mapped)
2013 next = find_next_zero_bit(map_storep, map_size, block); 2028 next = find_next_zero_bit(map_storep, map_size, index);
2014 else 2029 else
2015 next = find_next_bit(map_storep, map_size, block); 2030 next = find_next_bit(map_storep, map_size, index);
2016 2031
2017 end = next * granularity - scsi_debug_unmap_alignment; 2032 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
2018 *num = end - lba; 2033 *num = end - lba;
2019 2034
2020 return mapped; 2035 return mapped;
@@ -2022,47 +2037,37 @@ static unsigned int map_state(sector_t lba, unsigned int *num)
2022 2037
2023static void map_region(sector_t lba, unsigned int len) 2038static void map_region(sector_t lba, unsigned int len)
2024{ 2039{
2025 unsigned int granularity, alignment;
2026 sector_t end = lba + len; 2040 sector_t end = lba + len;
2027 2041
2028 granularity = scsi_debug_unmap_granularity;
2029 alignment = granularity - scsi_debug_unmap_alignment;
2030
2031 while (lba < end) { 2042 while (lba < end) {
2032 sector_t block, rem; 2043 unsigned long index = lba_to_map_index(lba);
2033
2034 block = lba + alignment;
2035 rem = do_div(block, granularity);
2036 2044
2037 if (block < map_size) 2045 if (index < map_size)
2038 set_bit(block, map_storep); 2046 set_bit(index, map_storep);
2039 2047
2040 lba += granularity - rem; 2048 lba = map_index_to_lba(index + 1);
2041 } 2049 }
2042} 2050}
2043 2051
2044static void unmap_region(sector_t lba, unsigned int len) 2052static void unmap_region(sector_t lba, unsigned int len)
2045{ 2053{
2046 unsigned int granularity, alignment;
2047 sector_t end = lba + len; 2054 sector_t end = lba + len;
2048 2055
2049 granularity = scsi_debug_unmap_granularity;
2050 alignment = granularity - scsi_debug_unmap_alignment;
2051
2052 while (lba < end) { 2056 while (lba < end) {
2053 sector_t block, rem; 2057 unsigned long index = lba_to_map_index(lba);
2054
2055 block = lba + alignment;
2056 rem = do_div(block, granularity);
2057 2058
2058 if (rem == 0 && lba + granularity < end && block < map_size) { 2059 if (lba == map_index_to_lba(index) &&
2059 clear_bit(block, map_storep); 2060 lba + scsi_debug_unmap_granularity <= end &&
2060 if (scsi_debug_lbprz) 2061 index < map_size) {
2062 clear_bit(index, map_storep);
2063 if (scsi_debug_lbprz) {
2061 memset(fake_storep + 2064 memset(fake_storep +
2062 block * scsi_debug_sector_size, 0, 2065 lba * scsi_debug_sector_size, 0,
2063 scsi_debug_sector_size); 2066 scsi_debug_sector_size *
2067 scsi_debug_unmap_granularity);
2068 }
2064 } 2069 }
2065 lba += granularity - rem; 2070 lba = map_index_to_lba(index + 1);
2066 } 2071 }
2067} 2072}
2068 2073
@@ -2089,7 +2094,7 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
2089 2094
2090 write_lock_irqsave(&atomic_rw, iflags); 2095 write_lock_irqsave(&atomic_rw, iflags);
2091 ret = do_device_access(SCpnt, devip, lba, num, 1); 2096 ret = do_device_access(SCpnt, devip, lba, num, 1);
2092 if (scsi_debug_unmap_granularity) 2097 if (scsi_debug_lbp())
2093 map_region(lba, num); 2098 map_region(lba, num);
2094 write_unlock_irqrestore(&atomic_rw, iflags); 2099 write_unlock_irqrestore(&atomic_rw, iflags);
2095 if (-1 == ret) 2100 if (-1 == ret)
@@ -2122,7 +2127,7 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2122 2127
2123 write_lock_irqsave(&atomic_rw, iflags); 2128 write_lock_irqsave(&atomic_rw, iflags);
2124 2129
2125 if (unmap && scsi_debug_unmap_granularity) { 2130 if (unmap && scsi_debug_lbp()) {
2126 unmap_region(lba, num); 2131 unmap_region(lba, num);
2127 goto out; 2132 goto out;
2128 } 2133 }
@@ -2146,7 +2151,7 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2146 fake_storep + (lba * scsi_debug_sector_size), 2151 fake_storep + (lba * scsi_debug_sector_size),
2147 scsi_debug_sector_size); 2152 scsi_debug_sector_size);
2148 2153
2149 if (scsi_debug_unmap_granularity) 2154 if (scsi_debug_lbp())
2150 map_region(lba, num); 2155 map_region(lba, num);
2151out: 2156out:
2152 write_unlock_irqrestore(&atomic_rw, iflags); 2157 write_unlock_irqrestore(&atomic_rw, iflags);
@@ -3389,8 +3394,6 @@ static int __init scsi_debug_init(void)
3389 3394
3390 /* Logical Block Provisioning */ 3395 /* Logical Block Provisioning */
3391 if (scsi_debug_lbp()) { 3396 if (scsi_debug_lbp()) {
3392 unsigned int map_bytes;
3393
3394 scsi_debug_unmap_max_blocks = 3397 scsi_debug_unmap_max_blocks =
3395 clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU); 3398 clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
3396 3399
@@ -3401,16 +3404,16 @@ static int __init scsi_debug_init(void)
3401 clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU); 3404 clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
3402 3405
3403 if (scsi_debug_unmap_alignment && 3406 if (scsi_debug_unmap_alignment &&
3404 scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) { 3407 scsi_debug_unmap_granularity <=
3408 scsi_debug_unmap_alignment) {
3405 printk(KERN_ERR 3409 printk(KERN_ERR
3406 "%s: ERR: unmap_granularity < unmap_alignment\n", 3410 "%s: ERR: unmap_granularity <= unmap_alignment\n",
3407 __func__); 3411 __func__);
3408 return -EINVAL; 3412 return -EINVAL;
3409 } 3413 }
3410 3414
3411 map_size = (sdebug_store_sectors / scsi_debug_unmap_granularity); 3415 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
3412 map_bytes = map_size >> 3; 3416 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
3413 map_storep = vmalloc(map_bytes);
3414 3417
3415 printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n", 3418 printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n",
3416 map_size); 3419 map_size);
@@ -3421,7 +3424,7 @@ static int __init scsi_debug_init(void)
3421 goto free_vm; 3424 goto free_vm;
3422 } 3425 }
3423 3426
3424 memset(map_storep, 0x0, map_bytes); 3427 bitmap_zero(map_storep, map_size);
3425 3428
3426 /* Map first 1KB for partition table */ 3429 /* Map first 1KB for partition table */
3427 if (scsi_debug_num_parts) 3430 if (scsi_debug_num_parts)
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index c1b05a83d403..f43de1e56420 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -25,6 +25,7 @@
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/blkdev.h> 26#include <linux/blkdev.h>
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/jiffies.h>
28 29
29#include <scsi/scsi.h> 30#include <scsi/scsi.h>
30#include <scsi/scsi_cmnd.h> 31#include <scsi/scsi_cmnd.h>
@@ -791,32 +792,48 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
791 struct scsi_device *sdev = scmd->device; 792 struct scsi_device *sdev = scmd->device;
792 struct Scsi_Host *shost = sdev->host; 793 struct Scsi_Host *shost = sdev->host;
793 DECLARE_COMPLETION_ONSTACK(done); 794 DECLARE_COMPLETION_ONSTACK(done);
794 unsigned long timeleft; 795 unsigned long timeleft = timeout;
795 struct scsi_eh_save ses; 796 struct scsi_eh_save ses;
797 const unsigned long stall_for = msecs_to_jiffies(100);
796 int rtn; 798 int rtn;
797 799
800retry:
798 scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes); 801 scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes);
799 shost->eh_action = &done; 802 shost->eh_action = &done;
800 803
801 scsi_log_send(scmd); 804 scsi_log_send(scmd);
802 scmd->scsi_done = scsi_eh_done; 805 scmd->scsi_done = scsi_eh_done;
803 shost->hostt->queuecommand(shost, scmd); 806 rtn = shost->hostt->queuecommand(shost, scmd);
804 807 if (rtn) {
805 timeleft = wait_for_completion_timeout(&done, timeout); 808 if (timeleft > stall_for) {
809 scsi_eh_restore_cmnd(scmd, &ses);
810 timeleft -= stall_for;
811 msleep(jiffies_to_msecs(stall_for));
812 goto retry;
813 }
814 /* signal not to enter either branch of the if () below */
815 timeleft = 0;
816 rtn = NEEDS_RETRY;
817 } else {
818 timeleft = wait_for_completion_timeout(&done, timeout);
819 }
806 820
807 shost->eh_action = NULL; 821 shost->eh_action = NULL;
808 822
809 scsi_log_completion(scmd, SUCCESS); 823 scsi_log_completion(scmd, rtn);
810 824
811 SCSI_LOG_ERROR_RECOVERY(3, 825 SCSI_LOG_ERROR_RECOVERY(3,
812 printk("%s: scmd: %p, timeleft: %ld\n", 826 printk("%s: scmd: %p, timeleft: %ld\n",
813 __func__, scmd, timeleft)); 827 __func__, scmd, timeleft));
814 828
815 /* 829 /*
816 * If there is time left scsi_eh_done got called, and we will 830 * If there is time left scsi_eh_done got called, and we will examine
817 * examine the actual status codes to see whether the command 831 * the actual status codes to see whether the command actually did
818 * actually did complete normally, else tell the host to forget 832 * complete normally, else if we have a zero return and no time left,
819 * about this command. 833 * the command must still be pending, so abort it and return FAILED.
834 * If we never actually managed to issue the command, because
835 * ->queuecommand() kept returning non zero, use the rtn = FAILED
836 * value above (so don't execute either branch of the if)
820 */ 837 */
821 if (timeleft) { 838 if (timeleft) {
822 rtn = scsi_eh_completed_normally(scmd); 839 rtn = scsi_eh_completed_normally(scmd);
@@ -837,7 +854,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
837 rtn = FAILED; 854 rtn = FAILED;
838 break; 855 break;
839 } 856 }
840 } else { 857 } else if (!rtn) {
841 scsi_abort_eh_cmnd(scmd); 858 scsi_abort_eh_cmnd(scmd);
842 rtn = FAILED; 859 rtn = FAILED;
843 } 860 }
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index c31187d79343..86d522004a20 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -276,11 +276,10 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
276} 276}
277EXPORT_SYMBOL(scsi_execute); 277EXPORT_SYMBOL(scsi_execute);
278 278
279 279int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
280int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
281 int data_direction, void *buffer, unsigned bufflen, 280 int data_direction, void *buffer, unsigned bufflen,
282 struct scsi_sense_hdr *sshdr, int timeout, int retries, 281 struct scsi_sense_hdr *sshdr, int timeout, int retries,
283 int *resid) 282 int *resid, int flags)
284{ 283{
285 char *sense = NULL; 284 char *sense = NULL;
286 int result; 285 int result;
@@ -291,14 +290,14 @@ int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
291 return DRIVER_ERROR << 24; 290 return DRIVER_ERROR << 24;
292 } 291 }
293 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, 292 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
294 sense, timeout, retries, 0, resid); 293 sense, timeout, retries, flags, resid);
295 if (sshdr) 294 if (sshdr)
296 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); 295 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
297 296
298 kfree(sense); 297 kfree(sense);
299 return result; 298 return result;
300} 299}
301EXPORT_SYMBOL(scsi_execute_req); 300EXPORT_SYMBOL(scsi_execute_req_flags);
302 301
303/* 302/*
304 * Function: scsi_init_cmd_errh() 303 * Function: scsi_init_cmd_errh()
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index 8f6b12cbd224..42539ee2cb11 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -144,33 +144,83 @@ static int scsi_bus_restore(struct device *dev)
144 144
145#ifdef CONFIG_PM_RUNTIME 145#ifdef CONFIG_PM_RUNTIME
146 146
147static int sdev_blk_runtime_suspend(struct scsi_device *sdev,
148 int (*cb)(struct device *))
149{
150 int err;
151
152 err = blk_pre_runtime_suspend(sdev->request_queue);
153 if (err)
154 return err;
155 if (cb)
156 err = cb(&sdev->sdev_gendev);
157 blk_post_runtime_suspend(sdev->request_queue, err);
158
159 return err;
160}
161
162static int sdev_runtime_suspend(struct device *dev)
163{
164 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
165 int (*cb)(struct device *) = pm ? pm->runtime_suspend : NULL;
166 struct scsi_device *sdev = to_scsi_device(dev);
167 int err;
168
169 if (sdev->request_queue->dev)
170 return sdev_blk_runtime_suspend(sdev, cb);
171
172 err = scsi_dev_type_suspend(dev, cb);
173 if (err == -EAGAIN)
174 pm_schedule_suspend(dev, jiffies_to_msecs(
175 round_jiffies_up_relative(HZ/10)));
176 return err;
177}
178
147static int scsi_runtime_suspend(struct device *dev) 179static int scsi_runtime_suspend(struct device *dev)
148{ 180{
149 int err = 0; 181 int err = 0;
150 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
151 182
152 dev_dbg(dev, "scsi_runtime_suspend\n"); 183 dev_dbg(dev, "scsi_runtime_suspend\n");
153 if (scsi_is_sdev_device(dev)) { 184 if (scsi_is_sdev_device(dev))
154 err = scsi_dev_type_suspend(dev, 185 err = sdev_runtime_suspend(dev);
155 pm ? pm->runtime_suspend : NULL);
156 if (err == -EAGAIN)
157 pm_schedule_suspend(dev, jiffies_to_msecs(
158 round_jiffies_up_relative(HZ/10)));
159 }
160 186
161 /* Insert hooks here for targets, hosts, and transport classes */ 187 /* Insert hooks here for targets, hosts, and transport classes */
162 188
163 return err; 189 return err;
164} 190}
165 191
166static int scsi_runtime_resume(struct device *dev) 192static int sdev_blk_runtime_resume(struct scsi_device *sdev,
193 int (*cb)(struct device *))
167{ 194{
168 int err = 0; 195 int err = 0;
196
197 blk_pre_runtime_resume(sdev->request_queue);
198 if (cb)
199 err = cb(&sdev->sdev_gendev);
200 blk_post_runtime_resume(sdev->request_queue, err);
201
202 return err;
203}
204
205static int sdev_runtime_resume(struct device *dev)
206{
207 struct scsi_device *sdev = to_scsi_device(dev);
169 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 208 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
209 int (*cb)(struct device *) = pm ? pm->runtime_resume : NULL;
210
211 if (sdev->request_queue->dev)
212 return sdev_blk_runtime_resume(sdev, cb);
213 else
214 return scsi_dev_type_resume(dev, cb);
215}
216
217static int scsi_runtime_resume(struct device *dev)
218{
219 int err = 0;
170 220
171 dev_dbg(dev, "scsi_runtime_resume\n"); 221 dev_dbg(dev, "scsi_runtime_resume\n");
172 if (scsi_is_sdev_device(dev)) 222 if (scsi_is_sdev_device(dev))
173 err = scsi_dev_type_resume(dev, pm ? pm->runtime_resume : NULL); 223 err = sdev_runtime_resume(dev);
174 224
175 /* Insert hooks here for targets, hosts, and transport classes */ 225 /* Insert hooks here for targets, hosts, and transport classes */
176 226
@@ -185,10 +235,18 @@ static int scsi_runtime_idle(struct device *dev)
185 235
186 /* Insert hooks here for targets, hosts, and transport classes */ 236 /* Insert hooks here for targets, hosts, and transport classes */
187 237
188 if (scsi_is_sdev_device(dev)) 238 if (scsi_is_sdev_device(dev)) {
189 err = pm_schedule_suspend(dev, 100); 239 struct scsi_device *sdev = to_scsi_device(dev);
190 else 240
241 if (sdev->request_queue->dev) {
242 pm_runtime_mark_last_busy(dev);
243 err = pm_runtime_autosuspend(dev);
244 } else {
245 err = pm_runtime_suspend(dev);
246 }
247 } else {
191 err = pm_runtime_suspend(dev); 248 err = pm_runtime_suspend(dev);
249 }
192 return err; 250 return err;
193} 251}
194 252
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 47799a33d6ca..133926b1bb78 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1019,8 +1019,7 @@ exit_match_index:
1019/** 1019/**
1020 * iscsi_get_flashnode_by_index -finds flashnode session entry by index 1020 * iscsi_get_flashnode_by_index -finds flashnode session entry by index
1021 * @shost: pointer to host data 1021 * @shost: pointer to host data
1022 * @data: pointer to data containing value to use for comparison 1022 * @idx: index to match
1023 * @fn: function pointer that does actual comparison
1024 * 1023 *
1025 * Finds the flashnode session object for the passed index 1024 * Finds the flashnode session object for the passed index
1026 * 1025 *
@@ -1029,13 +1028,13 @@ exit_match_index:
1029 * %NULL on failure 1028 * %NULL on failure
1030 */ 1029 */
1031static struct iscsi_bus_flash_session * 1030static struct iscsi_bus_flash_session *
1032iscsi_get_flashnode_by_index(struct Scsi_Host *shost, void *data, 1031iscsi_get_flashnode_by_index(struct Scsi_Host *shost, uint32_t idx)
1033 int (*fn)(struct device *dev, void *data))
1034{ 1032{
1035 struct iscsi_bus_flash_session *fnode_sess = NULL; 1033 struct iscsi_bus_flash_session *fnode_sess = NULL;
1036 struct device *dev; 1034 struct device *dev;
1037 1035
1038 dev = device_find_child(&shost->shost_gendev, data, fn); 1036 dev = device_find_child(&shost->shost_gendev, &idx,
1037 flashnode_match_index);
1039 if (dev) 1038 if (dev)
1040 fnode_sess = iscsi_dev_to_flash_session(dev); 1039 fnode_sess = iscsi_dev_to_flash_session(dev);
1041 1040
@@ -1059,18 +1058,13 @@ struct device *
1059iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data, 1058iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data,
1060 int (*fn)(struct device *dev, void *data)) 1059 int (*fn)(struct device *dev, void *data))
1061{ 1060{
1062 struct device *dev; 1061 return device_find_child(&shost->shost_gendev, data, fn);
1063
1064 dev = device_find_child(&shost->shost_gendev, data, fn);
1065 return dev;
1066} 1062}
1067EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess); 1063EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess);
1068 1064
1069/** 1065/**
1070 * iscsi_find_flashnode_conn - finds flashnode connection entry 1066 * iscsi_find_flashnode_conn - finds flashnode connection entry
1071 * @fnode_sess: pointer to parent flashnode session entry 1067 * @fnode_sess: pointer to parent flashnode session entry
1072 * @data: pointer to data containing value to use for comparison
1073 * @fn: function pointer that does actual comparison
1074 * 1068 *
1075 * Finds the flashnode connection object comparing the data passed using logic 1069 * Finds the flashnode connection object comparing the data passed using logic
1076 * defined in passed function pointer 1070 * defined in passed function pointer
@@ -1080,14 +1074,10 @@ EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess);
1080 * %NULL on failure 1074 * %NULL on failure
1081 */ 1075 */
1082struct device * 1076struct device *
1083iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess, 1077iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess)
1084 void *data,
1085 int (*fn)(struct device *dev, void *data))
1086{ 1078{
1087 struct device *dev; 1079 return device_find_child(&fnode_sess->dev, NULL,
1088 1080 iscsi_is_flashnode_conn_dev);
1089 dev = device_find_child(&fnode_sess->dev, data, fn);
1090 return dev;
1091} 1081}
1092EXPORT_SYMBOL_GPL(iscsi_find_flashnode_conn); 1082EXPORT_SYMBOL_GPL(iscsi_find_flashnode_conn);
1093 1083
@@ -2808,7 +2798,7 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport,
2808 struct iscsi_bus_flash_session *fnode_sess; 2798 struct iscsi_bus_flash_session *fnode_sess;
2809 struct iscsi_bus_flash_conn *fnode_conn; 2799 struct iscsi_bus_flash_conn *fnode_conn;
2810 struct device *dev; 2800 struct device *dev;
2811 uint32_t *idx; 2801 uint32_t idx;
2812 int err = 0; 2802 int err = 0;
2813 2803
2814 if (!transport->set_flashnode_param) { 2804 if (!transport->set_flashnode_param) {
@@ -2824,25 +2814,27 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport,
2824 goto put_host; 2814 goto put_host;
2825 } 2815 }
2826 2816
2827 idx = &ev->u.set_flashnode.flashnode_idx; 2817 idx = ev->u.set_flashnode.flashnode_idx;
2828 fnode_sess = iscsi_get_flashnode_by_index(shost, idx, 2818 fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
2829 flashnode_match_index);
2830 if (!fnode_sess) { 2819 if (!fnode_sess) {
2831 pr_err("%s could not find flashnode %u for host no %u\n", 2820 pr_err("%s could not find flashnode %u for host no %u\n",
2832 __func__, *idx, ev->u.set_flashnode.host_no); 2821 __func__, idx, ev->u.set_flashnode.host_no);
2833 err = -ENODEV; 2822 err = -ENODEV;
2834 goto put_host; 2823 goto put_host;
2835 } 2824 }
2836 2825
2837 dev = iscsi_find_flashnode_conn(fnode_sess, NULL, 2826 dev = iscsi_find_flashnode_conn(fnode_sess);
2838 iscsi_is_flashnode_conn_dev);
2839 if (!dev) { 2827 if (!dev) {
2840 err = -ENODEV; 2828 err = -ENODEV;
2841 goto put_host; 2829 goto put_sess;
2842 } 2830 }
2843 2831
2844 fnode_conn = iscsi_dev_to_flash_conn(dev); 2832 fnode_conn = iscsi_dev_to_flash_conn(dev);
2845 err = transport->set_flashnode_param(fnode_sess, fnode_conn, data, len); 2833 err = transport->set_flashnode_param(fnode_sess, fnode_conn, data, len);
2834 put_device(dev);
2835
2836put_sess:
2837 put_device(&fnode_sess->dev);
2846 2838
2847put_host: 2839put_host:
2848 scsi_host_put(shost); 2840 scsi_host_put(shost);
@@ -2891,7 +2883,7 @@ static int iscsi_del_flashnode(struct iscsi_transport *transport,
2891{ 2883{
2892 struct Scsi_Host *shost; 2884 struct Scsi_Host *shost;
2893 struct iscsi_bus_flash_session *fnode_sess; 2885 struct iscsi_bus_flash_session *fnode_sess;
2894 uint32_t *idx; 2886 uint32_t idx;
2895 int err = 0; 2887 int err = 0;
2896 2888
2897 if (!transport->del_flashnode) { 2889 if (!transport->del_flashnode) {
@@ -2907,17 +2899,17 @@ static int iscsi_del_flashnode(struct iscsi_transport *transport,
2907 goto put_host; 2899 goto put_host;
2908 } 2900 }
2909 2901
2910 idx = &ev->u.del_flashnode.flashnode_idx; 2902 idx = ev->u.del_flashnode.flashnode_idx;
2911 fnode_sess = iscsi_get_flashnode_by_index(shost, idx, 2903 fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
2912 flashnode_match_index);
2913 if (!fnode_sess) { 2904 if (!fnode_sess) {
2914 pr_err("%s could not find flashnode %u for host no %u\n", 2905 pr_err("%s could not find flashnode %u for host no %u\n",
2915 __func__, *idx, ev->u.del_flashnode.host_no); 2906 __func__, idx, ev->u.del_flashnode.host_no);
2916 err = -ENODEV; 2907 err = -ENODEV;
2917 goto put_host; 2908 goto put_host;
2918 } 2909 }
2919 2910
2920 err = transport->del_flashnode(fnode_sess); 2911 err = transport->del_flashnode(fnode_sess);
2912 put_device(&fnode_sess->dev);
2921 2913
2922put_host: 2914put_host:
2923 scsi_host_put(shost); 2915 scsi_host_put(shost);
@@ -2933,7 +2925,7 @@ static int iscsi_login_flashnode(struct iscsi_transport *transport,
2933 struct iscsi_bus_flash_session *fnode_sess; 2925 struct iscsi_bus_flash_session *fnode_sess;
2934 struct iscsi_bus_flash_conn *fnode_conn; 2926 struct iscsi_bus_flash_conn *fnode_conn;
2935 struct device *dev; 2927 struct device *dev;
2936 uint32_t *idx; 2928 uint32_t idx;
2937 int err = 0; 2929 int err = 0;
2938 2930
2939 if (!transport->login_flashnode) { 2931 if (!transport->login_flashnode) {
@@ -2949,25 +2941,27 @@ static int iscsi_login_flashnode(struct iscsi_transport *transport,
2949 goto put_host; 2941 goto put_host;
2950 } 2942 }
2951 2943
2952 idx = &ev->u.login_flashnode.flashnode_idx; 2944 idx = ev->u.login_flashnode.flashnode_idx;
2953 fnode_sess = iscsi_get_flashnode_by_index(shost, idx, 2945 fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
2954 flashnode_match_index);
2955 if (!fnode_sess) { 2946 if (!fnode_sess) {
2956 pr_err("%s could not find flashnode %u for host no %u\n", 2947 pr_err("%s could not find flashnode %u for host no %u\n",
2957 __func__, *idx, ev->u.login_flashnode.host_no); 2948 __func__, idx, ev->u.login_flashnode.host_no);
2958 err = -ENODEV; 2949 err = -ENODEV;
2959 goto put_host; 2950 goto put_host;
2960 } 2951 }
2961 2952
2962 dev = iscsi_find_flashnode_conn(fnode_sess, NULL, 2953 dev = iscsi_find_flashnode_conn(fnode_sess);
2963 iscsi_is_flashnode_conn_dev);
2964 if (!dev) { 2954 if (!dev) {
2965 err = -ENODEV; 2955 err = -ENODEV;
2966 goto put_host; 2956 goto put_sess;
2967 } 2957 }
2968 2958
2969 fnode_conn = iscsi_dev_to_flash_conn(dev); 2959 fnode_conn = iscsi_dev_to_flash_conn(dev);
2970 err = transport->login_flashnode(fnode_sess, fnode_conn); 2960 err = transport->login_flashnode(fnode_sess, fnode_conn);
2961 put_device(dev);
2962
2963put_sess:
2964 put_device(&fnode_sess->dev);
2971 2965
2972put_host: 2966put_host:
2973 scsi_host_put(shost); 2967 scsi_host_put(shost);
@@ -2983,7 +2977,7 @@ static int iscsi_logout_flashnode(struct iscsi_transport *transport,
2983 struct iscsi_bus_flash_session *fnode_sess; 2977 struct iscsi_bus_flash_session *fnode_sess;
2984 struct iscsi_bus_flash_conn *fnode_conn; 2978 struct iscsi_bus_flash_conn *fnode_conn;
2985 struct device *dev; 2979 struct device *dev;
2986 uint32_t *idx; 2980 uint32_t idx;
2987 int err = 0; 2981 int err = 0;
2988 2982
2989 if (!transport->logout_flashnode) { 2983 if (!transport->logout_flashnode) {
@@ -2999,26 +2993,28 @@ static int iscsi_logout_flashnode(struct iscsi_transport *transport,
2999 goto put_host; 2993 goto put_host;
3000 } 2994 }
3001 2995
3002 idx = &ev->u.logout_flashnode.flashnode_idx; 2996 idx = ev->u.logout_flashnode.flashnode_idx;
3003 fnode_sess = iscsi_get_flashnode_by_index(shost, idx, 2997 fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
3004 flashnode_match_index);
3005 if (!fnode_sess) { 2998 if (!fnode_sess) {
3006 pr_err("%s could not find flashnode %u for host no %u\n", 2999 pr_err("%s could not find flashnode %u for host no %u\n",
3007 __func__, *idx, ev->u.logout_flashnode.host_no); 3000 __func__, idx, ev->u.logout_flashnode.host_no);
3008 err = -ENODEV; 3001 err = -ENODEV;
3009 goto put_host; 3002 goto put_host;
3010 } 3003 }
3011 3004
3012 dev = iscsi_find_flashnode_conn(fnode_sess, NULL, 3005 dev = iscsi_find_flashnode_conn(fnode_sess);
3013 iscsi_is_flashnode_conn_dev);
3014 if (!dev) { 3006 if (!dev) {
3015 err = -ENODEV; 3007 err = -ENODEV;
3016 goto put_host; 3008 goto put_sess;
3017 } 3009 }
3018 3010
3019 fnode_conn = iscsi_dev_to_flash_conn(dev); 3011 fnode_conn = iscsi_dev_to_flash_conn(dev);
3020 3012
3021 err = transport->logout_flashnode(fnode_sess, fnode_conn); 3013 err = transport->logout_flashnode(fnode_sess, fnode_conn);
3014 put_device(dev);
3015
3016put_sess:
3017 put_device(&fnode_sess->dev);
3022 3018
3023put_host: 3019put_host:
3024 scsi_host_put(shost); 3020 scsi_host_put(shost);
@@ -3985,8 +3981,10 @@ static __init int iscsi_transport_init(void)
3985 } 3981 }
3986 3982
3987 iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh"); 3983 iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh");
3988 if (!iscsi_eh_timer_workq) 3984 if (!iscsi_eh_timer_workq) {
3985 err = -ENOMEM;
3989 goto release_nls; 3986 goto release_nls;
3987 }
3990 3988
3991 return 0; 3989 return 0;
3992 3990
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index e6689776b4f6..c1c555242d0d 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -142,6 +142,7 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
142 char *buffer_data; 142 char *buffer_data;
143 struct scsi_mode_data data; 143 struct scsi_mode_data data;
144 struct scsi_sense_hdr sshdr; 144 struct scsi_sense_hdr sshdr;
145 const char *temp = "temporary ";
145 int len; 146 int len;
146 147
147 if (sdp->type != TYPE_DISK) 148 if (sdp->type != TYPE_DISK)
@@ -150,6 +151,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
150 * it's not worth the risk */ 151 * it's not worth the risk */
151 return -EINVAL; 152 return -EINVAL;
152 153
154 if (strncmp(buf, temp, sizeof(temp) - 1) == 0) {
155 buf += sizeof(temp) - 1;
156 sdkp->cache_override = 1;
157 } else {
158 sdkp->cache_override = 0;
159 }
160
153 for (i = 0; i < ARRAY_SIZE(sd_cache_types); i++) { 161 for (i = 0; i < ARRAY_SIZE(sd_cache_types); i++) {
154 len = strlen(sd_cache_types[i]); 162 len = strlen(sd_cache_types[i]);
155 if (strncmp(sd_cache_types[i], buf, len) == 0 && 163 if (strncmp(sd_cache_types[i], buf, len) == 0 &&
@@ -162,6 +170,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
162 return -EINVAL; 170 return -EINVAL;
163 rcd = ct & 0x01 ? 1 : 0; 171 rcd = ct & 0x01 ? 1 : 0;
164 wce = ct & 0x02 ? 1 : 0; 172 wce = ct & 0x02 ? 1 : 0;
173
174 if (sdkp->cache_override) {
175 sdkp->WCE = wce;
176 sdkp->RCD = rcd;
177 return count;
178 }
179
165 if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT, 180 if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT,
166 SD_MAX_RETRIES, &data, NULL)) 181 SD_MAX_RETRIES, &data, NULL))
167 return -EINVAL; 182 return -EINVAL;
@@ -1121,10 +1136,6 @@ static int sd_open(struct block_device *bdev, fmode_t mode)
1121 1136
1122 sdev = sdkp->device; 1137 sdev = sdkp->device;
1123 1138
1124 retval = scsi_autopm_get_device(sdev);
1125 if (retval)
1126 goto error_autopm;
1127
1128 /* 1139 /*
1129 * If the device is in error recovery, wait until it is done. 1140 * If the device is in error recovery, wait until it is done.
1130 * If the device is offline, then disallow any access to it. 1141 * If the device is offline, then disallow any access to it.
@@ -1169,8 +1180,6 @@ static int sd_open(struct block_device *bdev, fmode_t mode)
1169 return 0; 1180 return 0;
1170 1181
1171error_out: 1182error_out:
1172 scsi_autopm_put_device(sdev);
1173error_autopm:
1174 scsi_disk_put(sdkp); 1183 scsi_disk_put(sdkp);
1175 return retval; 1184 return retval;
1176} 1185}
@@ -1205,7 +1214,6 @@ static void sd_release(struct gendisk *disk, fmode_t mode)
1205 * XXX is followed by a "rmmod sd_mod"? 1214 * XXX is followed by a "rmmod sd_mod"?
1206 */ 1215 */
1207 1216
1208 scsi_autopm_put_device(sdev);
1209 scsi_disk_put(sdkp); 1217 scsi_disk_put(sdkp);
1210} 1218}
1211 1219
@@ -1366,14 +1374,9 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
1366 retval = -ENODEV; 1374 retval = -ENODEV;
1367 1375
1368 if (scsi_block_when_processing_errors(sdp)) { 1376 if (scsi_block_when_processing_errors(sdp)) {
1369 retval = scsi_autopm_get_device(sdp);
1370 if (retval)
1371 goto out;
1372
1373 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); 1377 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
1374 retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES, 1378 retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES,
1375 sshdr); 1379 sshdr);
1376 scsi_autopm_put_device(sdp);
1377 } 1380 }
1378 1381
1379 /* failed to execute TUR, assume media not present */ 1382 /* failed to execute TUR, assume media not present */
@@ -1423,8 +1426,9 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
1423 * Leave the rest of the command zero to indicate 1426 * Leave the rest of the command zero to indicate
1424 * flush everything. 1427 * flush everything.
1425 */ 1428 */
1426 res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, 1429 res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0,
1427 SD_FLUSH_TIMEOUT, SD_MAX_RETRIES, NULL); 1430 &sshdr, SD_FLUSH_TIMEOUT,
1431 SD_MAX_RETRIES, NULL, REQ_PM);
1428 if (res == 0) 1432 if (res == 0)
1429 break; 1433 break;
1430 } 1434 }
@@ -2318,6 +2322,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2318 int old_rcd = sdkp->RCD; 2322 int old_rcd = sdkp->RCD;
2319 int old_dpofua = sdkp->DPOFUA; 2323 int old_dpofua = sdkp->DPOFUA;
2320 2324
2325
2326 if (sdkp->cache_override)
2327 return;
2328
2321 first_len = 4; 2329 first_len = 4;
2322 if (sdp->skip_ms_page_8) { 2330 if (sdp->skip_ms_page_8) {
2323 if (sdp->type == TYPE_RBC) 2331 if (sdp->type == TYPE_RBC)
@@ -2811,6 +2819,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
2811 sdkp->capacity = 0; 2819 sdkp->capacity = 0;
2812 sdkp->media_present = 1; 2820 sdkp->media_present = 1;
2813 sdkp->write_prot = 0; 2821 sdkp->write_prot = 0;
2822 sdkp->cache_override = 0;
2814 sdkp->WCE = 0; 2823 sdkp->WCE = 0;
2815 sdkp->RCD = 0; 2824 sdkp->RCD = 0;
2816 sdkp->ATO = 0; 2825 sdkp->ATO = 0;
@@ -2837,6 +2846,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
2837 2846
2838 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", 2847 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
2839 sdp->removable ? "removable " : ""); 2848 sdp->removable ? "removable " : "");
2849 blk_pm_runtime_init(sdp->request_queue, dev);
2840 scsi_autopm_put_device(sdp); 2850 scsi_autopm_put_device(sdp);
2841 put_device(&sdkp->dev); 2851 put_device(&sdkp->dev);
2842} 2852}
@@ -3020,8 +3030,8 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
3020 if (!scsi_device_online(sdp)) 3030 if (!scsi_device_online(sdp))
3021 return -ENODEV; 3031 return -ENODEV;
3022 3032
3023 res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, 3033 res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
3024 SD_TIMEOUT, SD_MAX_RETRIES, NULL); 3034 SD_TIMEOUT, SD_MAX_RETRIES, NULL, REQ_PM);
3025 if (res) { 3035 if (res) {
3026 sd_printk(KERN_WARNING, sdkp, "START_STOP FAILED\n"); 3036 sd_printk(KERN_WARNING, sdkp, "START_STOP FAILED\n");
3027 sd_print_result(sdkp, res); 3037 sd_print_result(sdkp, res);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 74a1e4ca5401..2386aeb41fe8 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -73,6 +73,7 @@ struct scsi_disk {
73 u8 protection_type;/* Data Integrity Field */ 73 u8 protection_type;/* Data Integrity Field */
74 u8 provisioning_mode; 74 u8 provisioning_mode;
75 unsigned ATO : 1; /* state of disk ATO bit */ 75 unsigned ATO : 1; /* state of disk ATO bit */
76 unsigned cache_override : 1; /* temp override of WCE,RCD */
76 unsigned WCE : 1; /* state of disk WCE bit */ 77 unsigned WCE : 1; /* state of disk WCE bit */
77 unsigned RCD : 1; /* state of disk RCD bit, unused */ 78 unsigned RCD : 1; /* state of disk RCD bit, unused */
78 unsigned DPOFUA : 1; /* state of disk DPOFUA bit */ 79 unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 04998f36e507..6174ca4ea275 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -93,14 +93,6 @@ static int sd_dif_type1_verify(struct blk_integrity_exchg *bix, csum_fn *fn)
93 if (sdt->app_tag == 0xffff) 93 if (sdt->app_tag == 0xffff)
94 return 0; 94 return 0;
95 95
96 /* Bad ref tag received from disk */
97 if (sdt->ref_tag == 0xffffffff) {
98 printk(KERN_ERR
99 "%s: bad phys ref tag on sector %lu\n",
100 bix->disk_name, (unsigned long)sector);
101 return -EIO;
102 }
103
104 if (be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { 96 if (be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
105 printk(KERN_ERR 97 printk(KERN_ERR
106 "%s: ref tag error on sector %lu (rcvd %u)\n", 98 "%s: ref tag error on sector %lu (rcvd %u)\n",
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 0371047c5922..35faf24c6044 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -57,3 +57,14 @@ config SCSI_UFSHCD_PCI
57 If you have a controller with this interface, say Y or M here. 57 If you have a controller with this interface, say Y or M here.
58 58
59 If unsure, say N. 59 If unsure, say N.
60
61config SCSI_UFSHCD_PLATFORM
62 tristate "Platform bus based UFS Controller support"
63 depends on SCSI_UFSHCD
64 ---help---
65 This selects the UFS host controller support. Select this if
66 you have an UFS controller on Platform bus.
67
68 If you have a controller with this interface, say Y or M here.
69
70 If unsure, say N.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index 9eda0dfbd6df..1e5bd48457d6 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -1,3 +1,4 @@
1# UFSHCD makefile 1# UFSHCD makefile
2obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o 2obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o
3obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o 3obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
4obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
new file mode 100644
index 000000000000..03319acd9c72
--- /dev/null
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -0,0 +1,217 @@
1/*
2 * Universal Flash Storage Host controller Platform bus based glue driver
3 *
4 * This code is based on drivers/scsi/ufs/ufshcd-pltfrm.c
5 * Copyright (C) 2011-2013 Samsung India Software Operations
6 *
7 * Authors:
8 * Santosh Yaraganavi <santosh.sy@samsung.com>
9 * Vinayak Holikatti <h.vinayak@samsung.com>
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 2
14 * of the License, or (at your option) any later version.
15 * See the COPYING file in the top-level directory or visit
16 * <http://www.gnu.org/licenses/gpl-2.0.html>
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * This program is provided "AS IS" and "WITH ALL FAULTS" and
24 * without warranty of any kind. You are solely responsible for
25 * determining the appropriateness of using and distributing
26 * the program and assume all risks associated with your exercise
27 * of rights with respect to the program, including but not limited
28 * to infringement of third party rights, the risks and costs of
29 * program errors, damage to or loss of data, programs or equipment,
30 * and unavailability or interruption of operations. Under no
31 * circumstances will the contributor of this Program be liable for
32 * any damages of any kind arising from your use or distribution of
33 * this program.
34 */
35
36#include "ufshcd.h"
37#include <linux/platform_device.h>
38
39#ifdef CONFIG_PM
40/**
41 * ufshcd_pltfrm_suspend - suspend power management function
42 * @dev: pointer to device handle
43 *
44 *
45 * Returns 0
46 */
47static int ufshcd_pltfrm_suspend(struct device *dev)
48{
49 struct platform_device *pdev = to_platform_device(dev);
50 struct ufs_hba *hba = platform_get_drvdata(pdev);
51
52 /*
53 * TODO:
54 * 1. Call ufshcd_suspend
55 * 2. Do bus specific power management
56 */
57
58 disable_irq(hba->irq);
59
60 return 0;
61}
62
63/**
64 * ufshcd_pltfrm_resume - resume power management function
65 * @dev: pointer to device handle
66 *
67 * Returns 0
68 */
69static int ufshcd_pltfrm_resume(struct device *dev)
70{
71 struct platform_device *pdev = to_platform_device(dev);
72 struct ufs_hba *hba = platform_get_drvdata(pdev);
73
74 /*
75 * TODO:
76 * 1. Call ufshcd_resume.
77 * 2. Do bus specific wake up
78 */
79
80 enable_irq(hba->irq);
81
82 return 0;
83}
84#else
85#define ufshcd_pltfrm_suspend NULL
86#define ufshcd_pltfrm_resume NULL
87#endif
88
89/**
90 * ufshcd_pltfrm_probe - probe routine of the driver
91 * @pdev: pointer to Platform device handle
92 *
93 * Returns 0 on success, non-zero value on failure
94 */
95static int ufshcd_pltfrm_probe(struct platform_device *pdev)
96{
97 struct ufs_hba *hba;
98 void __iomem *mmio_base;
99 struct resource *mem_res;
100 struct resource *irq_res;
101 resource_size_t mem_size;
102 int err;
103 struct device *dev = &pdev->dev;
104
105 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
106 if (!mem_res) {
107 dev_err(&pdev->dev,
108 "Memory resource not available\n");
109 err = -ENODEV;
110 goto out_error;
111 }
112
113 mem_size = resource_size(mem_res);
114 if (!request_mem_region(mem_res->start, mem_size, "ufshcd")) {
115 dev_err(&pdev->dev,
116 "Cannot reserve the memory resource\n");
117 err = -EBUSY;
118 goto out_error;
119 }
120
121 mmio_base = ioremap_nocache(mem_res->start, mem_size);
122 if (!mmio_base) {
123 dev_err(&pdev->dev, "memory map failed\n");
124 err = -ENOMEM;
125 goto out_release_regions;
126 }
127
128 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
129 if (!irq_res) {
130 dev_err(&pdev->dev, "IRQ resource not available\n");
131 err = -ENODEV;
132 goto out_iounmap;
133 }
134
135 err = dma_set_coherent_mask(dev, dev->coherent_dma_mask);
136 if (err) {
137 dev_err(&pdev->dev, "set dma mask failed\n");
138 goto out_iounmap;
139 }
140
141 err = ufshcd_init(&pdev->dev, &hba, mmio_base, irq_res->start);
142 if (err) {
143 dev_err(&pdev->dev, "Intialization failed\n");
144 goto out_iounmap;
145 }
146
147 platform_set_drvdata(pdev, hba);
148
149 return 0;
150
151out_iounmap:
152 iounmap(mmio_base);
153out_release_regions:
154 release_mem_region(mem_res->start, mem_size);
155out_error:
156 return err;
157}
158
159/**
160 * ufshcd_pltfrm_remove - remove platform driver routine
161 * @pdev: pointer to platform device handle
162 *
163 * Returns 0 on success, non-zero value on failure
164 */
165static int ufshcd_pltfrm_remove(struct platform_device *pdev)
166{
167 struct resource *mem_res;
168 resource_size_t mem_size;
169 struct ufs_hba *hba = platform_get_drvdata(pdev);
170
171 disable_irq(hba->irq);
172
173 /* Some buggy controllers raise interrupt after
174 * the resources are removed. So first we unregister the
175 * irq handler and then the resources used by driver
176 */
177
178 free_irq(hba->irq, hba);
179 ufshcd_remove(hba);
180 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
181 if (!mem_res)
182 dev_err(&pdev->dev, "ufshcd: Memory resource not available\n");
183 else {
184 mem_size = resource_size(mem_res);
185 release_mem_region(mem_res->start, mem_size);
186 }
187 platform_set_drvdata(pdev, NULL);
188 return 0;
189}
190
191static const struct of_device_id ufs_of_match[] = {
192 { .compatible = "jedec,ufs-1.1"},
193};
194
195static const struct dev_pm_ops ufshcd_dev_pm_ops = {
196 .suspend = ufshcd_pltfrm_suspend,
197 .resume = ufshcd_pltfrm_resume,
198};
199
200static struct platform_driver ufshcd_pltfrm_driver = {
201 .probe = ufshcd_pltfrm_probe,
202 .remove = ufshcd_pltfrm_remove,
203 .driver = {
204 .name = "ufshcd",
205 .owner = THIS_MODULE,
206 .pm = &ufshcd_dev_pm_ops,
207 .of_match_table = ufs_of_match,
208 },
209};
210
211module_platform_driver(ufshcd_pltfrm_driver);
212
213MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
214MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
215MODULE_DESCRIPTION("UFS host controller Pltform bus based glue driver");
216MODULE_LICENSE("GPL");
217MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 60fd40c4e4c2..c32a478df81b 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -478,7 +478,7 @@ static void ufshcd_compose_upiu(struct ufshcd_lrb *lrbp)
478 ucd_cmd_ptr->header.dword_2 = 0; 478 ucd_cmd_ptr->header.dword_2 = 0;
479 479
480 ucd_cmd_ptr->exp_data_transfer_len = 480 ucd_cmd_ptr->exp_data_transfer_len =
481 cpu_to_be32(lrbp->cmd->transfersize); 481 cpu_to_be32(lrbp->cmd->sdb.length);
482 482
483 memcpy(ucd_cmd_ptr->cdb, 483 memcpy(ucd_cmd_ptr->cdb,
484 lrbp->cmd->cmnd, 484 lrbp->cmd->cmnd,