aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-22 14:36:49 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-22 14:36:49 -0400
commite6f194d8f6f50da6837af637b2fd839c34185f7a (patch)
treef3c479a2bc24d49a150ff183e2614ee0f76cb366 /drivers
parent7578634990fb47cc30083fbd812689aa6deacfc0 (diff)
parentb91421749a1840148d8c81637c03c0ace3f35269 (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (60 commits) [SCSI] libsas: make ATA functions selectable by a config option [SCSI] bsg: unexport sg v3 helper functions [SCSI] bsg: fix bsg_unregister_queue [SCSI] bsg: make class backlinks [SCSI] 3w-9xxx: add support for 9690SA [SCSI] bsg: fix bsg_register_queue error path [SCSI] ESP: Increase ESP_BUS_TIMEOUT to 275. [SCSI] libsas: fix scr_read/write users and update the libata documentation [SCSI] mpt fusion: update Kconfig help [SCSI] scsi_transport_sas: add destructor for bsg [SCSI] iscsi_tcp: buggered kmalloc() [SCSI] qla2xxx: Update version number to 8.02.00-k2. [SCSI] qla2xxx: Add ISP25XX support. [SCSI] qla2xxx: Use pci_try_set_mwi(). [SCSI] qla2xxx: Use PCI-X/PCI-Express read control interfaces. [SCSI] qla2xxx: Re-factor isp_operations to static structures. [SCSI] qla2xxx: Validate mid-layer 'underflow' during check-condition handling. [SCSI] qla2xxx: Correct setting of 'current' and 'supported' speeds during FDMI registration. [SCSI] qla2xxx: Generalize iIDMA support. [SCSI] qla2xxx: Generalize FW-Interface-2 support. ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/firewire/fw-sbp2.c2
-rw-r--r--drivers/message/fusion/Kconfig1
-rw-r--r--drivers/message/fusion/mptbase.c382
-rw-r--r--drivers/message/fusion/mptbase.h9
-rw-r--r--drivers/message/fusion/mptfc.c3
-rw-r--r--drivers/message/fusion/mptsas.c72
-rw-r--r--drivers/message/fusion/mptscsih.c153
-rw-r--r--drivers/message/fusion/mptscsih.h1
-rw-r--r--drivers/message/fusion/mptspi.c9
-rw-r--r--drivers/s390/scsi/zfcp_aux.c9
-rw-r--r--drivers/s390/scsi/zfcp_def.h1
-rw-r--r--drivers/s390/scsi/zfcp_erp.c3
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c113
-rw-r--r--drivers/scsi/3w-9xxx.c67
-rw-r--r--drivers/scsi/3w-9xxx.h5
-rw-r--r--drivers/scsi/Kconfig10
-rw-r--r--drivers/scsi/a4000t.c3
-rw-r--r--drivers/scsi/aacraid/aachba.c140
-rw-r--r--drivers/scsi/aacraid/aacraid.h14
-rw-r--r--drivers/scsi/aacraid/commsup.c16
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dev.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c3
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c20
-rw-r--r--drivers/scsi/bvme6000_scsi.c3
-rw-r--r--drivers/scsi/esp_scsi.h2
-rw-r--r--drivers/scsi/libsas/Kconfig7
-rw-r--r--drivers/scsi/libsas/Makefile1
-rw-r--r--drivers/scsi/libsas/sas_ata.c817
-rw-r--r--drivers/scsi/libsas/sas_discover.c402
-rw-r--r--drivers/scsi/libsas/sas_expander.c230
-rw-r--r--drivers/scsi/libsas/sas_init.c1
-rw-r--r--drivers/scsi/libsas/sas_internal.h3
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c74
-rw-r--r--drivers/scsi/mvme16x_scsi.c3
-rw-r--r--drivers/scsi/pcmcia/Kconfig7
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c33
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c1114
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h38
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h22
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h36
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c82
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c136
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c55
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c58
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c380
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c35
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/scsi_sysctl.c1
-rw-r--r--drivers/scsi/scsi_sysfs.c16
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
-rw-r--r--drivers/scsi/scsi_transport_sas.c125
-rw-r--r--drivers/scsi/seagate.c2
-rw-r--r--drivers/scsi/sim710.c3
-rw-r--r--drivers/scsi/sr.c2
-rw-r--r--drivers/scsi/wd33c93.c4
-rw-r--r--drivers/scsi/zorro7xx.c3
61 files changed, 3525 insertions, 1236 deletions
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
index fc984474162c..3e4a369d0057 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/fw-sbp2.c
@@ -1160,7 +1160,7 @@ static struct device_attribute *sbp2_scsi_sysfs_attrs[] = {
1160static struct scsi_host_template scsi_driver_template = { 1160static struct scsi_host_template scsi_driver_template = {
1161 .module = THIS_MODULE, 1161 .module = THIS_MODULE,
1162 .name = "SBP-2 IEEE-1394", 1162 .name = "SBP-2 IEEE-1394",
1163 .proc_name = (char *)sbp2_driver_name, 1163 .proc_name = sbp2_driver_name,
1164 .queuecommand = sbp2_scsi_queuecommand, 1164 .queuecommand = sbp2_scsi_queuecommand,
1165 .slave_alloc = sbp2_scsi_slave_alloc, 1165 .slave_alloc = sbp2_scsi_slave_alloc,
1166 .slave_configure = sbp2_scsi_slave_configure, 1166 .slave_configure = sbp2_scsi_slave_configure,
diff --git a/drivers/message/fusion/Kconfig b/drivers/message/fusion/Kconfig
index c88cc75ab49b..4494e0fd36c6 100644
--- a/drivers/message/fusion/Kconfig
+++ b/drivers/message/fusion/Kconfig
@@ -37,6 +37,7 @@ config FUSION_FC
37 LSIFC929 37 LSIFC929
38 LSIFC929X 38 LSIFC929X
39 LSIFC929XL 39 LSIFC929XL
40 Brocade FC 410/420
40 41
41config FUSION_SAS 42config FUSION_SAS
42 tristate "Fusion MPT ScsiHost drivers for SAS" 43 tristate "Fusion MPT ScsiHost drivers for SAS"
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 5a10c87239c2..04f75e24dcec 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -161,6 +161,7 @@ static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum);
161static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc); 161static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc);
162static void mpt_read_ioc_pg_4(MPT_ADAPTER *ioc); 162static void mpt_read_ioc_pg_4(MPT_ADAPTER *ioc);
163static void mpt_timer_expired(unsigned long data); 163static void mpt_timer_expired(unsigned long data);
164static void mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc);
164static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch); 165static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch);
165static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp); 166static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp);
166static int mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag); 167static int mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag);
@@ -1131,6 +1132,248 @@ mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp)
1131 return -1; 1132 return -1;
1132} 1133}
1133 1134
1135/**
1136 * mpt_get_product_name - returns product string
1137 * @vendor: pci vendor id
1138 * @device: pci device id
1139 * @revision: pci revision id
1140 * @prod_name: string returned
1141 *
1142 * Returns product string displayed when driver loads,
1143 * in /proc/mpt/summary and /sysfs/class/scsi_host/host<X>/version_product
1144 *
1145 **/
1146static void
1147mpt_get_product_name(u16 vendor, u16 device, u8 revision, char *prod_name)
1148{
1149 char *product_str = NULL;
1150
1151 if (vendor == PCI_VENDOR_ID_BROCADE) {
1152 switch (device)
1153 {
1154 case MPI_MANUFACTPAGE_DEVICEID_FC949E:
1155 switch (revision)
1156 {
1157 case 0x00:
1158 product_str = "BRE040 A0";
1159 break;
1160 case 0x01:
1161 product_str = "BRE040 A1";
1162 break;
1163 default:
1164 product_str = "BRE040";
1165 break;
1166 }
1167 break;
1168 }
1169 goto out;
1170 }
1171
1172 switch (device)
1173 {
1174 case MPI_MANUFACTPAGE_DEVICEID_FC909:
1175 product_str = "LSIFC909 B1";
1176 break;
1177 case MPI_MANUFACTPAGE_DEVICEID_FC919:
1178 product_str = "LSIFC919 B0";
1179 break;
1180 case MPI_MANUFACTPAGE_DEVICEID_FC929:
1181 product_str = "LSIFC929 B0";
1182 break;
1183 case MPI_MANUFACTPAGE_DEVICEID_FC919X:
1184 if (revision < 0x80)
1185 product_str = "LSIFC919X A0";
1186 else
1187 product_str = "LSIFC919XL A1";
1188 break;
1189 case MPI_MANUFACTPAGE_DEVICEID_FC929X:
1190 if (revision < 0x80)
1191 product_str = "LSIFC929X A0";
1192 else
1193 product_str = "LSIFC929XL A1";
1194 break;
1195 case MPI_MANUFACTPAGE_DEVICEID_FC939X:
1196 product_str = "LSIFC939X A1";
1197 break;
1198 case MPI_MANUFACTPAGE_DEVICEID_FC949X:
1199 product_str = "LSIFC949X A1";
1200 break;
1201 case MPI_MANUFACTPAGE_DEVICEID_FC949E:
1202 switch (revision)
1203 {
1204 case 0x00:
1205 product_str = "LSIFC949E A0";
1206 break;
1207 case 0x01:
1208 product_str = "LSIFC949E A1";
1209 break;
1210 default:
1211 product_str = "LSIFC949E";
1212 break;
1213 }
1214 break;
1215 case MPI_MANUFACTPAGE_DEVID_53C1030:
1216 switch (revision)
1217 {
1218 case 0x00:
1219 product_str = "LSI53C1030 A0";
1220 break;
1221 case 0x01:
1222 product_str = "LSI53C1030 B0";
1223 break;
1224 case 0x03:
1225 product_str = "LSI53C1030 B1";
1226 break;
1227 case 0x07:
1228 product_str = "LSI53C1030 B2";
1229 break;
1230 case 0x08:
1231 product_str = "LSI53C1030 C0";
1232 break;
1233 case 0x80:
1234 product_str = "LSI53C1030T A0";
1235 break;
1236 case 0x83:
1237 product_str = "LSI53C1030T A2";
1238 break;
1239 case 0x87:
1240 product_str = "LSI53C1030T A3";
1241 break;
1242 case 0xc1:
1243 product_str = "LSI53C1020A A1";
1244 break;
1245 default:
1246 product_str = "LSI53C1030";
1247 break;
1248 }
1249 break;
1250 case MPI_MANUFACTPAGE_DEVID_1030_53C1035:
1251 switch (revision)
1252 {
1253 case 0x03:
1254 product_str = "LSI53C1035 A2";
1255 break;
1256 case 0x04:
1257 product_str = "LSI53C1035 B0";
1258 break;
1259 default:
1260 product_str = "LSI53C1035";
1261 break;
1262 }
1263 break;
1264 case MPI_MANUFACTPAGE_DEVID_SAS1064:
1265 switch (revision)
1266 {
1267 case 0x00:
1268 product_str = "LSISAS1064 A1";
1269 break;
1270 case 0x01:
1271 product_str = "LSISAS1064 A2";
1272 break;
1273 case 0x02:
1274 product_str = "LSISAS1064 A3";
1275 break;
1276 case 0x03:
1277 product_str = "LSISAS1064 A4";
1278 break;
1279 default:
1280 product_str = "LSISAS1064";
1281 break;
1282 }
1283 break;
1284 case MPI_MANUFACTPAGE_DEVID_SAS1064E:
1285 switch (revision)
1286 {
1287 case 0x00:
1288 product_str = "LSISAS1064E A0";
1289 break;
1290 case 0x01:
1291 product_str = "LSISAS1064E B0";
1292 break;
1293 case 0x02:
1294 product_str = "LSISAS1064E B1";
1295 break;
1296 case 0x04:
1297 product_str = "LSISAS1064E B2";
1298 break;
1299 case 0x08:
1300 product_str = "LSISAS1064E B3";
1301 break;
1302 default:
1303 product_str = "LSISAS1064E";
1304 break;
1305 }
1306 break;
1307 case MPI_MANUFACTPAGE_DEVID_SAS1068:
1308 switch (revision)
1309 {
1310 case 0x00:
1311 product_str = "LSISAS1068 A0";
1312 break;
1313 case 0x01:
1314 product_str = "LSISAS1068 B0";
1315 break;
1316 case 0x02:
1317 product_str = "LSISAS1068 B1";
1318 break;
1319 default:
1320 product_str = "LSISAS1068";
1321 break;
1322 }
1323 break;
1324 case MPI_MANUFACTPAGE_DEVID_SAS1068E:
1325 switch (revision)
1326 {
1327 case 0x00:
1328 product_str = "LSISAS1068E A0";
1329 break;
1330 case 0x01:
1331 product_str = "LSISAS1068E B0";
1332 break;
1333 case 0x02:
1334 product_str = "LSISAS1068E B1";
1335 break;
1336 case 0x04:
1337 product_str = "LSISAS1068E B2";
1338 break;
1339 case 0x08:
1340 product_str = "LSISAS1068E B3";
1341 break;
1342 default:
1343 product_str = "LSISAS1068E";
1344 break;
1345 }
1346 break;
1347 case MPI_MANUFACTPAGE_DEVID_SAS1078:
1348 switch (revision)
1349 {
1350 case 0x00:
1351 product_str = "LSISAS1078 A0";
1352 break;
1353 case 0x01:
1354 product_str = "LSISAS1078 B0";
1355 break;
1356 case 0x02:
1357 product_str = "LSISAS1078 C0";
1358 break;
1359 case 0x03:
1360 product_str = "LSISAS1078 C1";
1361 break;
1362 case 0x04:
1363 product_str = "LSISAS1078 C2";
1364 break;
1365 default:
1366 product_str = "LSISAS1078";
1367 break;
1368 }
1369 break;
1370 }
1371
1372 out:
1373 if (product_str)
1374 sprintf(prod_name, "%s", product_str);
1375}
1376
1134/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1377/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1135/** 1378/**
1136 * mpt_attach - Install a PCI intelligent MPT adapter. 1379 * mpt_attach - Install a PCI intelligent MPT adapter.
@@ -1274,23 +1517,23 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1274 ioc->pio_chip = (SYSIF_REGS __iomem *)pmem; 1517 ioc->pio_chip = (SYSIF_REGS __iomem *)pmem;
1275 } 1518 }
1276 1519
1277 if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC909) { 1520 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
1278 ioc->prod_name = "LSIFC909"; 1521 mpt_get_product_name(pdev->vendor, pdev->device, revision, ioc->prod_name);
1279 ioc->bus_type = FC; 1522
1280 } 1523 switch (pdev->device)
1281 else if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC929) { 1524 {
1282 ioc->prod_name = "LSIFC929"; 1525 case MPI_MANUFACTPAGE_DEVICEID_FC939X:
1283 ioc->bus_type = FC; 1526 case MPI_MANUFACTPAGE_DEVICEID_FC949X:
1284 } 1527 ioc->errata_flag_1064 = 1;
1285 else if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC919) { 1528 case MPI_MANUFACTPAGE_DEVICEID_FC909:
1286 ioc->prod_name = "LSIFC919"; 1529 case MPI_MANUFACTPAGE_DEVICEID_FC929:
1287 ioc->bus_type = FC; 1530 case MPI_MANUFACTPAGE_DEVICEID_FC919:
1288 } 1531 case MPI_MANUFACTPAGE_DEVICEID_FC949E:
1289 else if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC929X) {
1290 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
1291 ioc->bus_type = FC; 1532 ioc->bus_type = FC;
1533 break;
1534
1535 case MPI_MANUFACTPAGE_DEVICEID_FC929X:
1292 if (revision < XL_929) { 1536 if (revision < XL_929) {
1293 ioc->prod_name = "LSIFC929X";
1294 /* 929X Chip Fix. Set Split transactions level 1537 /* 929X Chip Fix. Set Split transactions level
1295 * for PCIX. Set MOST bits to zero. 1538 * for PCIX. Set MOST bits to zero.
1296 */ 1539 */
@@ -1298,75 +1541,46 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1298 pcixcmd &= 0x8F; 1541 pcixcmd &= 0x8F;
1299 pci_write_config_byte(pdev, 0x6a, pcixcmd); 1542 pci_write_config_byte(pdev, 0x6a, pcixcmd);
1300 } else { 1543 } else {
1301 ioc->prod_name = "LSIFC929XL";
1302 /* 929XL Chip Fix. Set MMRBC to 0x08. 1544 /* 929XL Chip Fix. Set MMRBC to 0x08.
1303 */ 1545 */
1304 pci_read_config_byte(pdev, 0x6a, &pcixcmd); 1546 pci_read_config_byte(pdev, 0x6a, &pcixcmd);
1305 pcixcmd |= 0x08; 1547 pcixcmd |= 0x08;
1306 pci_write_config_byte(pdev, 0x6a, pcixcmd); 1548 pci_write_config_byte(pdev, 0x6a, pcixcmd);
1307 } 1549 }
1308 }
1309 else if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC919X) {
1310 ioc->prod_name = "LSIFC919X";
1311 ioc->bus_type = FC; 1550 ioc->bus_type = FC;
1551 break;
1552
1553 case MPI_MANUFACTPAGE_DEVICEID_FC919X:
1312 /* 919X Chip Fix. Set Split transactions level 1554 /* 919X Chip Fix. Set Split transactions level
1313 * for PCIX. Set MOST bits to zero. 1555 * for PCIX. Set MOST bits to zero.
1314 */ 1556 */
1315 pci_read_config_byte(pdev, 0x6a, &pcixcmd); 1557 pci_read_config_byte(pdev, 0x6a, &pcixcmd);
1316 pcixcmd &= 0x8F; 1558 pcixcmd &= 0x8F;
1317 pci_write_config_byte(pdev, 0x6a, pcixcmd); 1559 pci_write_config_byte(pdev, 0x6a, pcixcmd);
1318 }
1319 else if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC939X) {
1320 ioc->prod_name = "LSIFC939X";
1321 ioc->bus_type = FC;
1322 ioc->errata_flag_1064 = 1;
1323 }
1324 else if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC949X) {
1325 ioc->prod_name = "LSIFC949X";
1326 ioc->bus_type = FC; 1560 ioc->bus_type = FC;
1327 ioc->errata_flag_1064 = 1; 1561 break;
1328 } 1562
1329 else if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC949E) { 1563 case MPI_MANUFACTPAGE_DEVID_53C1030:
1330 ioc->prod_name = "LSIFC949E";
1331 ioc->bus_type = FC;
1332 }
1333 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_53C1030) {
1334 ioc->prod_name = "LSI53C1030";
1335 ioc->bus_type = SPI;
1336 /* 1030 Chip Fix. Disable Split transactions 1564 /* 1030 Chip Fix. Disable Split transactions
1337 * for PCIX. Set MOST bits to zero if Rev < C0( = 8). 1565 * for PCIX. Set MOST bits to zero if Rev < C0( = 8).
1338 */ 1566 */
1339 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
1340 if (revision < C0_1030) { 1567 if (revision < C0_1030) {
1341 pci_read_config_byte(pdev, 0x6a, &pcixcmd); 1568 pci_read_config_byte(pdev, 0x6a, &pcixcmd);
1342 pcixcmd &= 0x8F; 1569 pcixcmd &= 0x8F;
1343 pci_write_config_byte(pdev, 0x6a, pcixcmd); 1570 pci_write_config_byte(pdev, 0x6a, pcixcmd);
1344 } 1571 }
1345 } 1572
1346 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_1030_53C1035) { 1573 case MPI_MANUFACTPAGE_DEVID_1030_53C1035:
1347 ioc->prod_name = "LSI53C1035";
1348 ioc->bus_type = SPI; 1574 ioc->bus_type = SPI;
1349 } 1575 break;
1350 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1064) { 1576
1351 ioc->prod_name = "LSISAS1064"; 1577 case MPI_MANUFACTPAGE_DEVID_SAS1064:
1352 ioc->bus_type = SAS; 1578 case MPI_MANUFACTPAGE_DEVID_SAS1068:
1353 ioc->errata_flag_1064 = 1;
1354 }
1355 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1068) {
1356 ioc->prod_name = "LSISAS1068";
1357 ioc->bus_type = SAS;
1358 ioc->errata_flag_1064 = 1; 1579 ioc->errata_flag_1064 = 1;
1359 } 1580
1360 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1064E) { 1581 case MPI_MANUFACTPAGE_DEVID_SAS1064E:
1361 ioc->prod_name = "LSISAS1064E"; 1582 case MPI_MANUFACTPAGE_DEVID_SAS1068E:
1362 ioc->bus_type = SAS; 1583 case MPI_MANUFACTPAGE_DEVID_SAS1078:
1363 }
1364 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1068E) {
1365 ioc->prod_name = "LSISAS1068E";
1366 ioc->bus_type = SAS;
1367 }
1368 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) {
1369 ioc->prod_name = "LSISAS1078";
1370 ioc->bus_type = SAS; 1584 ioc->bus_type = SAS;
1371 } 1585 }
1372 1586
@@ -1880,6 +2094,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
1880 } 2094 }
1881 2095
1882 GetIoUnitPage2(ioc); 2096 GetIoUnitPage2(ioc);
2097 mpt_get_manufacturing_pg_0(ioc);
1883 } 2098 }
1884 2099
1885 /* 2100 /*
@@ -2138,8 +2353,8 @@ MptDisplayIocCapabilities(MPT_ADAPTER *ioc)
2138 int i = 0; 2353 int i = 0;
2139 2354
2140 printk(KERN_INFO "%s: ", ioc->name); 2355 printk(KERN_INFO "%s: ", ioc->name);
2141 if (ioc->prod_name && strlen(ioc->prod_name) > 3) 2356 if (ioc->prod_name)
2142 printk("%s: ", ioc->prod_name+3); 2357 printk("%s: ", ioc->prod_name);
2143 printk("Capabilities={"); 2358 printk("Capabilities={");
2144 2359
2145 if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) { 2360 if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) {
@@ -5190,6 +5405,49 @@ mpt_read_ioc_pg_1(MPT_ADAPTER *ioc)
5190 return; 5405 return;
5191} 5406}
5192 5407
5408static void
5409mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc)
5410{
5411 CONFIGPARMS cfg;
5412 ConfigPageHeader_t hdr;
5413 dma_addr_t buf_dma;
5414 ManufacturingPage0_t *pbuf = NULL;
5415
5416 memset(&cfg, 0 , sizeof(CONFIGPARMS));
5417 memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
5418
5419 hdr.PageType = MPI_CONFIG_PAGETYPE_MANUFACTURING;
5420 cfg.cfghdr.hdr = &hdr;
5421 cfg.physAddr = -1;
5422 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
5423 cfg.timeout = 10;
5424
5425 if (mpt_config(ioc, &cfg) != 0)
5426 goto out;
5427
5428 if (!cfg.cfghdr.hdr->PageLength)
5429 goto out;
5430
5431 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
5432 pbuf = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, &buf_dma);
5433 if (!pbuf)
5434 goto out;
5435
5436 cfg.physAddr = buf_dma;
5437
5438 if (mpt_config(ioc, &cfg) != 0)
5439 goto out;
5440
5441 memcpy(ioc->board_name, pbuf->BoardName, sizeof(ioc->board_name));
5442 memcpy(ioc->board_assembly, pbuf->BoardAssembly, sizeof(ioc->board_assembly));
5443 memcpy(ioc->board_tracer, pbuf->BoardTracerNumber, sizeof(ioc->board_tracer));
5444
5445 out:
5446
5447 if (pbuf)
5448 pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma);
5449}
5450
5193/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 5451/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5194/** 5452/**
5195 * SendEventNotification - Send EventNotification (on or off) request to adapter 5453 * SendEventNotification - Send EventNotification (on or off) request to adapter
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index 05eb6e528753..98eb9c688e17 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -537,7 +537,14 @@ typedef struct _MPT_ADAPTER
537 int id; /* Unique adapter id N {0,1,2,...} */ 537 int id; /* Unique adapter id N {0,1,2,...} */
538 int pci_irq; /* This irq */ 538 int pci_irq; /* This irq */
539 char name[MPT_NAME_LENGTH]; /* "iocN" */ 539 char name[MPT_NAME_LENGTH]; /* "iocN" */
540 char *prod_name; /* "LSIFC9x9" */ 540 char prod_name[MPT_NAME_LENGTH]; /* "LSIFC9x9" */
541 char board_name[16];
542 char board_assembly[16];
543 char board_tracer[16];
544 u16 nvdata_version_persistent;
545 u16 nvdata_version_default;
546 u8 io_missing_delay;
547 u8 device_missing_delay;
541 SYSIF_REGS __iomem *chip; /* == c8817000 (mmap) */ 548 SYSIF_REGS __iomem *chip; /* == c8817000 (mmap) */
542 SYSIF_REGS __iomem *pio_chip; /* Programmed IO (downloadboot) */ 549 SYSIF_REGS __iomem *pio_chip; /* Programmed IO (downloadboot) */
543 u8 bus_type; 550 u8 bus_type;
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index b766445f19aa..f2ebaa9992fe 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -130,6 +130,7 @@ static struct scsi_host_template mptfc_driver_template = {
130 .max_sectors = 8192, 130 .max_sectors = 8192,
131 .cmd_per_lun = 7, 131 .cmd_per_lun = 7,
132 .use_clustering = ENABLE_CLUSTERING, 132 .use_clustering = ENABLE_CLUSTERING,
133 .shost_attrs = mptscsih_host_attrs,
133}; 134};
134 135
135/**************************************************************************** 136/****************************************************************************
@@ -153,6 +154,8 @@ static struct pci_device_id mptfc_pci_table[] = {
153 PCI_ANY_ID, PCI_ANY_ID }, 154 PCI_ANY_ID, PCI_ANY_ID },
154 { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC949E, 155 { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC949E,
155 PCI_ANY_ID, PCI_ANY_ID }, 156 PCI_ANY_ID, PCI_ANY_ID },
157 { PCI_VENDOR_ID_BROCADE, MPI_MANUFACTPAGE_DEVICEID_FC949E,
158 PCI_ANY_ID, PCI_ANY_ID },
156 {0} /* Terminating entry */ 159 {0} /* Terminating entry */
157}; 160};
158MODULE_DEVICE_TABLE(pci, mptfc_pci_table); 161MODULE_DEVICE_TABLE(pci, mptfc_pci_table);
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 9e5424e1871f..d50664640512 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1119,6 +1119,7 @@ static struct scsi_host_template mptsas_driver_template = {
1119 .max_sectors = 8192, 1119 .max_sectors = 8192,
1120 .cmd_per_lun = 7, 1120 .cmd_per_lun = 7,
1121 .use_clustering = ENABLE_CLUSTERING, 1121 .use_clustering = ENABLE_CLUSTERING,
1122 .shost_attrs = mptscsih_host_attrs,
1122}; 1123};
1123 1124
1124static int mptsas_get_linkerrors(struct sas_phy *phy) 1125static int mptsas_get_linkerrors(struct sas_phy *phy)
@@ -1390,6 +1391,11 @@ mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
1390 goto out_free_consistent; 1391 goto out_free_consistent;
1391 } 1392 }
1392 1393
1394 ioc->nvdata_version_persistent =
1395 le16_to_cpu(buffer->NvdataVersionPersistent);
1396 ioc->nvdata_version_default =
1397 le16_to_cpu(buffer->NvdataVersionDefault);
1398
1393 for (i = 0; i < port_info->num_phys; i++) { 1399 for (i = 0; i < port_info->num_phys; i++) {
1394 mptsas_print_phy_data(&buffer->PhyData[i]); 1400 mptsas_print_phy_data(&buffer->PhyData[i]);
1395 port_info->phy_info[i].phy_id = i; 1401 port_info->phy_info[i].phy_id = i;
@@ -1410,6 +1416,63 @@ mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
1410} 1416}
1411 1417
1412static int 1418static int
1419mptsas_sas_io_unit_pg1(MPT_ADAPTER *ioc)
1420{
1421 ConfigExtendedPageHeader_t hdr;
1422 CONFIGPARMS cfg;
1423 SasIOUnitPage1_t *buffer;
1424 dma_addr_t dma_handle;
1425 int error;
1426 u16 device_missing_delay;
1427
1428 memset(&hdr, 0, sizeof(ConfigExtendedPageHeader_t));
1429 memset(&cfg, 0, sizeof(CONFIGPARMS));
1430
1431 cfg.cfghdr.ehdr = &hdr;
1432 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
1433 cfg.timeout = 10;
1434 cfg.cfghdr.ehdr->PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
1435 cfg.cfghdr.ehdr->ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
1436 cfg.cfghdr.ehdr->PageVersion = MPI_SASIOUNITPAGE1_PAGEVERSION;
1437 cfg.cfghdr.ehdr->PageNumber = 1;
1438
1439 error = mpt_config(ioc, &cfg);
1440 if (error)
1441 goto out;
1442 if (!hdr.ExtPageLength) {
1443 error = -ENXIO;
1444 goto out;
1445 }
1446
1447 buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
1448 &dma_handle);
1449 if (!buffer) {
1450 error = -ENOMEM;
1451 goto out;
1452 }
1453
1454 cfg.physAddr = dma_handle;
1455 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
1456
1457 error = mpt_config(ioc, &cfg);
1458 if (error)
1459 goto out_free_consistent;
1460
1461 ioc->io_missing_delay =
1462 le16_to_cpu(buffer->IODeviceMissingDelay);
1463 device_missing_delay = le16_to_cpu(buffer->ReportDeviceMissingDelay);
1464 ioc->device_missing_delay = (device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_UNIT_16) ?
1465 (device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16 :
1466 device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
1467
1468 out_free_consistent:
1469 pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
1470 buffer, dma_handle);
1471 out:
1472 return error;
1473}
1474
1475static int
1413mptsas_sas_phy_pg0(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, 1476mptsas_sas_phy_pg0(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
1414 u32 form, u32 form_specific) 1477 u32 form, u32 form_specific)
1415{ 1478{
@@ -1990,6 +2053,7 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
1990 if (error) 2053 if (error)
1991 goto out_free_port_info; 2054 goto out_free_port_info;
1992 2055
2056 mptsas_sas_io_unit_pg1(ioc);
1993 mutex_lock(&ioc->sas_topology_mutex); 2057 mutex_lock(&ioc->sas_topology_mutex);
1994 ioc->handle = hba->phy_info[0].handle; 2058 ioc->handle = hba->phy_info[0].handle;
1995 port_info = mptsas_find_portinfo_by_handle(ioc, ioc->handle); 2059 port_info = mptsas_find_portinfo_by_handle(ioc, ioc->handle);
@@ -3237,6 +3301,8 @@ static struct pci_driver mptsas_driver = {
3237static int __init 3301static int __init
3238mptsas_init(void) 3302mptsas_init(void)
3239{ 3303{
3304 int error;
3305
3240 show_mptmod_ver(my_NAME, my_VERSION); 3306 show_mptmod_ver(my_NAME, my_VERSION);
3241 3307
3242 mptsas_transport_template = 3308 mptsas_transport_template =
@@ -3260,7 +3326,11 @@ mptsas_init(void)
3260 ": Registered for IOC reset notifications\n")); 3326 ": Registered for IOC reset notifications\n"));
3261 } 3327 }
3262 3328
3263 return pci_register_driver(&mptsas_driver); 3329 error = pci_register_driver(&mptsas_driver);
3330 if (error)
3331 sas_release_transport(mptsas_transport_template);
3332
3333 return error;
3264} 3334}
3265 3335
3266static void __exit 3336static void __exit
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index d35617376f87..fd3aa2619f42 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -3187,6 +3187,159 @@ mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
3187 mptscsih_do_cmd(hd, &iocmd); 3187 mptscsih_do_cmd(hd, &iocmd);
3188} 3188}
3189 3189
3190static ssize_t
3191mptscsih_version_fw_show(struct class_device *cdev, char *buf)
3192{
3193 struct Scsi_Host *host = class_to_shost(cdev);
3194 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
3195 MPT_ADAPTER *ioc = hd->ioc;
3196
3197 return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
3198 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
3199 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
3200 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
3201 ioc->facts.FWVersion.Word & 0x000000FF);
3202}
3203static CLASS_DEVICE_ATTR(version_fw, S_IRUGO, mptscsih_version_fw_show, NULL);
3204
3205static ssize_t
3206mptscsih_version_bios_show(struct class_device *cdev, char *buf)
3207{
3208 struct Scsi_Host *host = class_to_shost(cdev);
3209 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
3210 MPT_ADAPTER *ioc = hd->ioc;
3211
3212 return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
3213 (ioc->biosVersion & 0xFF000000) >> 24,
3214 (ioc->biosVersion & 0x00FF0000) >> 16,
3215 (ioc->biosVersion & 0x0000FF00) >> 8,
3216 ioc->biosVersion & 0x000000FF);
3217}
3218static CLASS_DEVICE_ATTR(version_bios, S_IRUGO, mptscsih_version_bios_show, NULL);
3219
3220static ssize_t
3221mptscsih_version_mpi_show(struct class_device *cdev, char *buf)
3222{
3223 struct Scsi_Host *host = class_to_shost(cdev);
3224 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
3225 MPT_ADAPTER *ioc = hd->ioc;
3226
3227 return snprintf(buf, PAGE_SIZE, "%03x\n", ioc->facts.MsgVersion);
3228}
3229static CLASS_DEVICE_ATTR(version_mpi, S_IRUGO, mptscsih_version_mpi_show, NULL);
3230
3231static ssize_t
3232mptscsih_version_product_show(struct class_device *cdev, char *buf)
3233{
3234 struct Scsi_Host *host = class_to_shost(cdev);
3235 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
3236 MPT_ADAPTER *ioc = hd->ioc;
3237
3238 return snprintf(buf, PAGE_SIZE, "%s\n", ioc->prod_name);
3239}
3240static CLASS_DEVICE_ATTR(version_product, S_IRUGO,
3241 mptscsih_version_product_show, NULL);
3242
3243static ssize_t
3244mptscsih_version_nvdata_persistent_show(struct class_device *cdev, char *buf)
3245{
3246 struct Scsi_Host *host = class_to_shost(cdev);
3247 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
3248 MPT_ADAPTER *ioc = hd->ioc;
3249
3250 return snprintf(buf, PAGE_SIZE, "%02xh\n",
3251 ioc->nvdata_version_persistent);
3252}
3253static CLASS_DEVICE_ATTR(version_nvdata_persistent, S_IRUGO,
3254 mptscsih_version_nvdata_persistent_show, NULL);
3255
3256static ssize_t
3257mptscsih_version_nvdata_default_show(struct class_device *cdev, char *buf)
3258{
3259 struct Scsi_Host *host = class_to_shost(cdev);
3260 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
3261 MPT_ADAPTER *ioc = hd->ioc;
3262
3263 return snprintf(buf, PAGE_SIZE, "%02xh\n",ioc->nvdata_version_default);
3264}
3265static CLASS_DEVICE_ATTR(version_nvdata_default, S_IRUGO,
3266 mptscsih_version_nvdata_default_show, NULL);
3267
3268static ssize_t
3269mptscsih_board_name_show(struct class_device *cdev, char *buf)
3270{
3271 struct Scsi_Host *host = class_to_shost(cdev);
3272 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
3273 MPT_ADAPTER *ioc = hd->ioc;
3274
3275 return snprintf(buf, PAGE_SIZE, "%s\n", ioc->board_name);
3276}
3277static CLASS_DEVICE_ATTR(board_name, S_IRUGO, mptscsih_board_name_show, NULL);
3278
3279static ssize_t
3280mptscsih_board_assembly_show(struct class_device *cdev, char *buf)
3281{
3282 struct Scsi_Host *host = class_to_shost(cdev);
3283 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
3284 MPT_ADAPTER *ioc = hd->ioc;
3285
3286 return snprintf(buf, PAGE_SIZE, "%s\n", ioc->board_assembly);
3287}
3288static CLASS_DEVICE_ATTR(board_assembly, S_IRUGO,
3289 mptscsih_board_assembly_show, NULL);
3290
3291static ssize_t
3292mptscsih_board_tracer_show(struct class_device *cdev, char *buf)
3293{
3294 struct Scsi_Host *host = class_to_shost(cdev);
3295 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
3296 MPT_ADAPTER *ioc = hd->ioc;
3297
3298 return snprintf(buf, PAGE_SIZE, "%s\n", ioc->board_tracer);
3299}
3300static CLASS_DEVICE_ATTR(board_tracer, S_IRUGO,
3301 mptscsih_board_tracer_show, NULL);
3302
3303static ssize_t
3304mptscsih_io_delay_show(struct class_device *cdev, char *buf)
3305{
3306 struct Scsi_Host *host = class_to_shost(cdev);
3307 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
3308 MPT_ADAPTER *ioc = hd->ioc;
3309
3310 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay);
3311}
3312static CLASS_DEVICE_ATTR(io_delay, S_IRUGO,
3313 mptscsih_io_delay_show, NULL);
3314
3315static ssize_t
3316mptscsih_device_delay_show(struct class_device *cdev, char *buf)
3317{
3318 struct Scsi_Host *host = class_to_shost(cdev);
3319 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
3320 MPT_ADAPTER *ioc = hd->ioc;
3321
3322 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay);
3323}
3324static CLASS_DEVICE_ATTR(device_delay, S_IRUGO,
3325 mptscsih_device_delay_show, NULL);
3326
3327struct class_device_attribute *mptscsih_host_attrs[] = {
3328 &class_device_attr_version_fw,
3329 &class_device_attr_version_bios,
3330 &class_device_attr_version_mpi,
3331 &class_device_attr_version_product,
3332 &class_device_attr_version_nvdata_persistent,
3333 &class_device_attr_version_nvdata_default,
3334 &class_device_attr_board_name,
3335 &class_device_attr_board_assembly,
3336 &class_device_attr_board_tracer,
3337 &class_device_attr_io_delay,
3338 &class_device_attr_device_delay,
3339 NULL,
3340};
3341EXPORT_SYMBOL(mptscsih_host_attrs);
3342
3190EXPORT_SYMBOL(mptscsih_remove); 3343EXPORT_SYMBOL(mptscsih_remove);
3191EXPORT_SYMBOL(mptscsih_shutdown); 3344EXPORT_SYMBOL(mptscsih_shutdown);
3192#ifdef CONFIG_PM 3345#ifdef CONFIG_PM
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index 8eccdfe5701a..67b088db2f10 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -129,3 +129,4 @@ extern void mptscsih_timer_expired(unsigned long data);
129extern int mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout); 129extern int mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout);
130extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id); 130extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id);
131extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id); 131extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id);
132extern struct class_device_attribute *mptscsih_host_attrs[];
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 6b3e0c00952b..947fe2901800 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -821,6 +821,7 @@ static struct scsi_host_template mptspi_driver_template = {
821 .max_sectors = 8192, 821 .max_sectors = 8192,
822 .cmd_per_lun = 7, 822 .cmd_per_lun = 7,
823 .use_clustering = ENABLE_CLUSTERING, 823 .use_clustering = ENABLE_CLUSTERING,
824 .shost_attrs = mptscsih_host_attrs,
824}; 825};
825 826
826static int mptspi_write_spi_device_pg1(struct scsi_target *starget, 827static int mptspi_write_spi_device_pg1(struct scsi_target *starget,
@@ -1523,6 +1524,8 @@ static struct pci_driver mptspi_driver = {
1523static int __init 1524static int __init
1524mptspi_init(void) 1525mptspi_init(void)
1525{ 1526{
1527 int error;
1528
1526 show_mptmod_ver(my_NAME, my_VERSION); 1529 show_mptmod_ver(my_NAME, my_VERSION);
1527 1530
1528 mptspi_transport_template = spi_attach_transport(&mptspi_transport_functions); 1531 mptspi_transport_template = spi_attach_transport(&mptspi_transport_functions);
@@ -1543,7 +1546,11 @@ mptspi_init(void)
1543 ": Registered for IOC reset notifications\n")); 1546 ": Registered for IOC reset notifications\n"));
1544 } 1547 }
1545 1548
1546 return pci_register_driver(&mptspi_driver); 1549 error = pci_register_driver(&mptspi_driver);
1550 if (error)
1551 spi_release_transport(mptspi_transport_template);
1552
1553 return error;
1547} 1554}
1548 1555
1549/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1556/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 9726261c367d..ab5ec1feaf4e 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -1526,15 +1526,12 @@ zfcp_gid_pn_buffers_alloc(struct zfcp_gid_pn_data **gid_pn, mempool_t *pool)
1526 * zfcp_gid_pn_buffers_free - free buffers for GID_PN nameserver request 1526 * zfcp_gid_pn_buffers_free - free buffers for GID_PN nameserver request
1527 * @gid_pn: pointer to struct zfcp_gid_pn_data which has to be freed 1527 * @gid_pn: pointer to struct zfcp_gid_pn_data which has to be freed
1528 */ 1528 */
1529static void 1529static void zfcp_gid_pn_buffers_free(struct zfcp_gid_pn_data *gid_pn)
1530zfcp_gid_pn_buffers_free(struct zfcp_gid_pn_data *gid_pn)
1531{ 1530{
1532 if ((gid_pn->ct.pool != 0)) 1531 if (gid_pn->ct.pool)
1533 mempool_free(gid_pn, gid_pn->ct.pool); 1532 mempool_free(gid_pn, gid_pn->ct.pool);
1534 else 1533 else
1535 kfree(gid_pn); 1534 kfree(gid_pn);
1536
1537 return;
1538} 1535}
1539 1536
1540/** 1537/**
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 22649639230b..b36dfc40d9fa 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -126,6 +126,7 @@ zfcp_address_to_sg(void *address, struct scatterlist *list)
126#define ZFCP_MIN_OUTPUT_THRESHOLD 1 /* ignored by QDIO layer */ 126#define ZFCP_MIN_OUTPUT_THRESHOLD 1 /* ignored by QDIO layer */
127 127
128#define QDIO_SCSI_QFMT 1 /* 1 for FSF */ 128#define QDIO_SCSI_QFMT 1 /* 1 for FSF */
129#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
129 130
130/********************* FSF SPECIFIC DEFINES *********************************/ 131/********************* FSF SPECIFIC DEFINES *********************************/
131 132
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 4e7cb6dc4d34..d8cd75ce2d9a 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -1626,7 +1626,7 @@ zfcp_erp_schedule_work(struct zfcp_unit *unit)
1626{ 1626{
1627 struct zfcp_erp_add_work *p; 1627 struct zfcp_erp_add_work *p;
1628 1628
1629 p = kmalloc(sizeof(*p), GFP_KERNEL); 1629 p = kzalloc(sizeof(*p), GFP_KERNEL);
1630 if (!p) { 1630 if (!p) {
1631 ZFCP_LOG_NORMAL("error: Out of resources. Could not register " 1631 ZFCP_LOG_NORMAL("error: Out of resources. Could not register "
1632 "the FCP-LUN 0x%Lx connected to " 1632 "the FCP-LUN 0x%Lx connected to "
@@ -1639,7 +1639,6 @@ zfcp_erp_schedule_work(struct zfcp_unit *unit)
1639 } 1639 }
1640 1640
1641 zfcp_unit_get(unit); 1641 zfcp_unit_get(unit);
1642 memset(p, 0, sizeof(*p));
1643 atomic_set_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status); 1642 atomic_set_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
1644 INIT_WORK(&p->work, zfcp_erp_scsi_scan); 1643 INIT_WORK(&p->work, zfcp_erp_scsi_scan);
1645 p->unit = unit; 1644 p->unit = unit;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 0eb31e162b15..b240800b78d7 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -1930,7 +1930,7 @@ static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *fsf_req)
1930skip_fsfstatus: 1930skip_fsfstatus:
1931 send_els->status = retval; 1931 send_els->status = retval;
1932 1932
1933 if (send_els->handler != 0) 1933 if (send_els->handler)
1934 send_els->handler(send_els->handler_data); 1934 send_els->handler(send_els->handler_data);
1935 1935
1936 return retval; 1936 return retval;
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index bdf5782b8a7a..c408badd2ae9 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -47,103 +47,56 @@ static int zfcp_qdio_handler_error_check(struct zfcp_adapter *,
47#define ZFCP_LOG_AREA ZFCP_LOG_AREA_QDIO 47#define ZFCP_LOG_AREA ZFCP_LOG_AREA_QDIO
48 48
49/* 49/*
50 * Allocates BUFFER memory to each of the pointers of the qdio_buffer_t 50 * Frees BUFFER memory for each of the pointers of the struct qdio_buffer array
51 * array in the adapter struct. 51 * in the adapter struct sbuf is the pointer array.
52 * Cur_buf is the pointer array and count can be any number of required
53 * buffers, the page-fitting arithmetic is done entirely within this funciton.
54 * 52 *
55 * returns: number of buffers allocated
56 * locks: must only be called with zfcp_data.config_sema taken 53 * locks: must only be called with zfcp_data.config_sema taken
57 */ 54 */
58static int 55static void
59zfcp_qdio_buffers_enqueue(struct qdio_buffer **cur_buf, int count) 56zfcp_qdio_buffers_dequeue(struct qdio_buffer **sbuf)
60{ 57{
61 int buf_pos; 58 int pos;
62 int qdio_buffers_per_page;
63 int page_pos = 0;
64 struct qdio_buffer *first_in_page = NULL;
65
66 qdio_buffers_per_page = PAGE_SIZE / sizeof (struct qdio_buffer);
67 ZFCP_LOG_TRACE("buffers_per_page=%d\n", qdio_buffers_per_page);
68
69 for (buf_pos = 0; buf_pos < count; buf_pos++) {
70 if (page_pos == 0) {
71 cur_buf[buf_pos] = (struct qdio_buffer *)
72 get_zeroed_page(GFP_KERNEL);
73 if (cur_buf[buf_pos] == NULL) {
74 ZFCP_LOG_INFO("error: allocation of "
75 "QDIO buffer failed \n");
76 goto out;
77 }
78 first_in_page = cur_buf[buf_pos];
79 } else {
80 cur_buf[buf_pos] = first_in_page + page_pos;
81 59
82 } 60 for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE)
83 /* was initialised to zero */ 61 free_page((unsigned long) sbuf[pos]);
84 page_pos++;
85 page_pos %= qdio_buffers_per_page;
86 }
87 out:
88 return buf_pos;
89} 62}
90 63
91/* 64/*
92 * Frees BUFFER memory for each of the pointers of the struct qdio_buffer array 65 * Allocates BUFFER memory to each of the pointers of the qdio_buffer_t
93 * in the adapter struct cur_buf is the pointer array and count can be any 66 * array in the adapter struct.
94 * number of buffers in the array that should be freed starting from buffer 0 67 * Cur_buf is the pointer array
95 * 68 *
69 * returns: zero on success else -ENOMEM
96 * locks: must only be called with zfcp_data.config_sema taken 70 * locks: must only be called with zfcp_data.config_sema taken
97 */ 71 */
98static void 72static int
99zfcp_qdio_buffers_dequeue(struct qdio_buffer **cur_buf, int count) 73zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbuf)
100{ 74{
101 int buf_pos; 75 int pos;
102 int qdio_buffers_per_page;
103
104 qdio_buffers_per_page = PAGE_SIZE / sizeof (struct qdio_buffer);
105 ZFCP_LOG_TRACE("buffers_per_page=%d\n", qdio_buffers_per_page);
106 76
107 for (buf_pos = 0; buf_pos < count; buf_pos += qdio_buffers_per_page) 77 for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE) {
108 free_page((unsigned long) cur_buf[buf_pos]); 78 sbuf[pos] = (struct qdio_buffer *) get_zeroed_page(GFP_KERNEL);
109 return; 79 if (!sbuf[pos]) {
80 zfcp_qdio_buffers_dequeue(sbuf);
81 return -ENOMEM;
82 }
83 }
84 for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos++)
85 if (pos % QBUFF_PER_PAGE)
86 sbuf[pos] = sbuf[pos - 1] + 1;
87 return 0;
110} 88}
111 89
112/* locks: must only be called with zfcp_data.config_sema taken */ 90/* locks: must only be called with zfcp_data.config_sema taken */
113int 91int
114zfcp_qdio_allocate_queues(struct zfcp_adapter *adapter) 92zfcp_qdio_allocate_queues(struct zfcp_adapter *adapter)
115{ 93{
116 int buffer_count; 94 int ret;
117 int retval = 0;
118 95
119 buffer_count = 96 ret = zfcp_qdio_buffers_enqueue(adapter->request_queue.buffer);
120 zfcp_qdio_buffers_enqueue(&(adapter->request_queue.buffer[0]), 97 if (ret)
121 QDIO_MAX_BUFFERS_PER_Q); 98 return ret;
122 if (buffer_count < QDIO_MAX_BUFFERS_PER_Q) { 99 return zfcp_qdio_buffers_enqueue(adapter->response_queue.buffer);
123 ZFCP_LOG_DEBUG("only %d QDIO buffers allocated for request "
124 "queue\n", buffer_count);
125 zfcp_qdio_buffers_dequeue(&(adapter->request_queue.buffer[0]),
126 buffer_count);
127 retval = -ENOMEM;
128 goto out;
129 }
130
131 buffer_count =
132 zfcp_qdio_buffers_enqueue(&(adapter->response_queue.buffer[0]),
133 QDIO_MAX_BUFFERS_PER_Q);
134 if (buffer_count < QDIO_MAX_BUFFERS_PER_Q) {
135 ZFCP_LOG_DEBUG("only %d QDIO buffers allocated for response "
136 "queue", buffer_count);
137 zfcp_qdio_buffers_dequeue(&(adapter->response_queue.buffer[0]),
138 buffer_count);
139 ZFCP_LOG_TRACE("freeing request_queue buffers\n");
140 zfcp_qdio_buffers_dequeue(&(adapter->request_queue.buffer[0]),
141 QDIO_MAX_BUFFERS_PER_Q);
142 retval = -ENOMEM;
143 goto out;
144 }
145 out:
146 return retval;
147} 100}
148 101
149/* locks: must only be called with zfcp_data.config_sema taken */ 102/* locks: must only be called with zfcp_data.config_sema taken */
@@ -151,12 +104,10 @@ void
151zfcp_qdio_free_queues(struct zfcp_adapter *adapter) 104zfcp_qdio_free_queues(struct zfcp_adapter *adapter)
152{ 105{
153 ZFCP_LOG_TRACE("freeing request_queue buffers\n"); 106 ZFCP_LOG_TRACE("freeing request_queue buffers\n");
154 zfcp_qdio_buffers_dequeue(&(adapter->request_queue.buffer[0]), 107 zfcp_qdio_buffers_dequeue(adapter->request_queue.buffer);
155 QDIO_MAX_BUFFERS_PER_Q);
156 108
157 ZFCP_LOG_TRACE("freeing response_queue buffers\n"); 109 ZFCP_LOG_TRACE("freeing response_queue buffers\n");
158 zfcp_qdio_buffers_dequeue(&(adapter->response_queue.buffer[0]), 110 zfcp_qdio_buffers_dequeue(adapter->response_queue.buffer);
159 QDIO_MAX_BUFFERS_PER_Q);
160} 111}
161 112
162int 113int
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 6b49f6a2524d..efd9d8d3a890 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -4,7 +4,7 @@
4 Written By: Adam Radford <linuxraid@amcc.com> 4 Written By: Adam Radford <linuxraid@amcc.com>
5 Modifications By: Tom Couch <linuxraid@amcc.com> 5 Modifications By: Tom Couch <linuxraid@amcc.com>
6 6
7 Copyright (C) 2004-2006 Applied Micro Circuits Corporation. 7 Copyright (C) 2004-2007 Applied Micro Circuits Corporation.
8 8
9 This program is free software; you can redistribute it and/or modify 9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by 10 it under the terms of the GNU General Public License as published by
@@ -69,6 +69,8 @@
69 2.26.02.008 - Free irq handler in __twa_shutdown(). 69 2.26.02.008 - Free irq handler in __twa_shutdown().
70 Serialize reset code. 70 Serialize reset code.
71 Add support for 9650SE controllers. 71 Add support for 9650SE controllers.
72 2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails.
73 2.26.02.010 - Add support for 9690SA controllers.
72*/ 74*/
73 75
74#include <linux/module.h> 76#include <linux/module.h>
@@ -92,7 +94,7 @@
92#include "3w-9xxx.h" 94#include "3w-9xxx.h"
93 95
94/* Globals */ 96/* Globals */
95#define TW_DRIVER_VERSION "2.26.02.008" 97#define TW_DRIVER_VERSION "2.26.02.010"
96static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; 98static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
97static unsigned int twa_device_extension_count; 99static unsigned int twa_device_extension_count;
98static int twa_major = -1; 100static int twa_major = -1;
@@ -124,11 +126,11 @@ static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
124 unsigned short *fw_on_ctlr_branch, 126 unsigned short *fw_on_ctlr_branch,
125 unsigned short *fw_on_ctlr_build, 127 unsigned short *fw_on_ctlr_build,
126 u32 *init_connect_result); 128 u32 *init_connect_result);
127static void twa_load_sgl(TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length); 129static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
128static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds); 130static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds);
129static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds); 131static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds);
130static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal); 132static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
131static int twa_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset); 133static int twa_reset_device_extension(TW_Device_Extension *tw_dev);
132static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset); 134static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
133static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg); 135static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
134static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id); 136static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
@@ -683,7 +685,7 @@ static int twa_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int
683 full_command_packet = &tw_ioctl->firmware_command; 685 full_command_packet = &tw_ioctl->firmware_command;
684 686
685 /* Load request id and sglist for both command types */ 687 /* Load request id and sglist for both command types */
686 twa_load_sgl(full_command_packet, request_id, dma_handle, data_buffer_length_adjusted); 688 twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
687 689
688 memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full)); 690 memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
689 691
@@ -700,10 +702,10 @@ static int twa_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int
700 if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) { 702 if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
701 /* Now we need to reset the board */ 703 /* Now we need to reset the board */
702 printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n", 704 printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
703 tw_dev->host->host_no, TW_DRIVER, 0xc, 705 tw_dev->host->host_no, TW_DRIVER, 0x37,
704 cmd); 706 cmd);
705 retval = TW_IOCTL_ERROR_OS_EIO; 707 retval = TW_IOCTL_ERROR_OS_EIO;
706 twa_reset_device_extension(tw_dev, 1); 708 twa_reset_device_extension(tw_dev);
707 goto out3; 709 goto out3;
708 } 710 }
709 711
@@ -890,7 +892,9 @@ static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
890 } 892 }
891 893
892 if (status_reg_value & TW_STATUS_QUEUE_ERROR) { 894 if (status_reg_value & TW_STATUS_QUEUE_ERROR) {
893 if ((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) || (!test_bit(TW_IN_RESET, &tw_dev->flags))) 895 if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) &&
896 (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) ||
897 (!test_bit(TW_IN_RESET, &tw_dev->flags)))
894 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing"); 898 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing");
895 writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev)); 899 writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
896 } 900 }
@@ -935,8 +939,7 @@ static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
935 unsigned long before; 939 unsigned long before;
936 int retval = 1; 940 int retval = 1;
937 941
938 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9550SX) || 942 if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) {
939 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE)) {
940 before = jiffies; 943 before = jiffies;
941 while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) { 944 while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) {
942 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev)); 945 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
@@ -1195,7 +1198,6 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1195 u32 status_reg_value; 1198 u32 status_reg_value;
1196 TW_Response_Queue response_que; 1199 TW_Response_Queue response_que;
1197 TW_Command_Full *full_command_packet; 1200 TW_Command_Full *full_command_packet;
1198 TW_Command *command_packet;
1199 TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance; 1201 TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
1200 int handled = 0; 1202 int handled = 0;
1201 1203
@@ -1273,7 +1275,6 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1273 request_id = TW_RESID_OUT(response_que.response_id); 1275 request_id = TW_RESID_OUT(response_que.response_id);
1274 full_command_packet = tw_dev->command_packet_virt[request_id]; 1276 full_command_packet = tw_dev->command_packet_virt[request_id];
1275 error = 0; 1277 error = 0;
1276 command_packet = &full_command_packet->command.oldcommand;
1277 /* Check for command packet errors */ 1278 /* Check for command packet errors */
1278 if (full_command_packet->command.newcommand.status != 0) { 1279 if (full_command_packet->command.newcommand.status != 0) {
1279 if (tw_dev->srb[request_id] != 0) { 1280 if (tw_dev->srb[request_id] != 0) {
@@ -1352,11 +1353,15 @@ twa_interrupt_bail:
1352} /* End twa_interrupt() */ 1353} /* End twa_interrupt() */
1353 1354
1354/* This function will load the request id and various sgls for ioctls */ 1355/* This function will load the request id and various sgls for ioctls */
1355static void twa_load_sgl(TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length) 1356static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
1356{ 1357{
1357 TW_Command *oldcommand; 1358 TW_Command *oldcommand;
1358 TW_Command_Apache *newcommand; 1359 TW_Command_Apache *newcommand;
1359 TW_SG_Entry *sgl; 1360 TW_SG_Entry *sgl;
1361 unsigned int pae = 0;
1362
1363 if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
1364 pae = 1;
1360 1365
1361 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) { 1366 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1362 newcommand = &full_command_packet->command.newcommand; 1367 newcommand = &full_command_packet->command.newcommand;
@@ -1372,12 +1377,14 @@ static void twa_load_sgl(TW_Command_Full *full_command_packet, int request_id, d
1372 1377
1373 if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) { 1378 if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
1374 /* Load the sg list */ 1379 /* Load the sg list */
1375 sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset)); 1380 if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)
1381 sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae);
1382 else
1383 sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
1376 sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1); 1384 sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1377 sgl->length = cpu_to_le32(length); 1385 sgl->length = cpu_to_le32(length);
1378 1386
1379 if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4)) 1387 oldcommand->size += pae;
1380 oldcommand->size += 1;
1381 } 1388 }
1382 } 1389 }
1383} /* End twa_load_sgl() */ 1390} /* End twa_load_sgl() */
@@ -1506,7 +1513,8 @@ static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id,
1506 command_que_value = tw_dev->command_packet_phys[request_id]; 1513 command_que_value = tw_dev->command_packet_phys[request_id];
1507 1514
1508 /* For 9650SE write low 4 bytes first */ 1515 /* For 9650SE write low 4 bytes first */
1509 if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) { 1516 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1517 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1510 command_que_value += TW_COMMAND_OFFSET; 1518 command_que_value += TW_COMMAND_OFFSET;
1511 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev)); 1519 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev));
1512 } 1520 }
@@ -1537,7 +1545,8 @@ static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id,
1537 TW_UNMASK_COMMAND_INTERRUPT(tw_dev); 1545 TW_UNMASK_COMMAND_INTERRUPT(tw_dev);
1538 goto out; 1546 goto out;
1539 } else { 1547 } else {
1540 if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) { 1548 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1549 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1541 /* Now write upper 4 bytes */ 1550 /* Now write upper 4 bytes */
1542 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4); 1551 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4);
1543 } else { 1552 } else {
@@ -1561,7 +1570,7 @@ out:
1561} /* End twa_post_command_packet() */ 1570} /* End twa_post_command_packet() */
1562 1571
1563/* This function will reset a device extension */ 1572/* This function will reset a device extension */
1564static int twa_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset) 1573static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
1565{ 1574{
1566 int i = 0; 1575 int i = 0;
1567 int retval = 1; 1576 int retval = 1;
@@ -1719,7 +1728,7 @@ static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt)
1719 mutex_lock(&tw_dev->ioctl_lock); 1728 mutex_lock(&tw_dev->ioctl_lock);
1720 1729
1721 /* Now reset the card and some of the device extension data */ 1730 /* Now reset the card and some of the device extension data */
1722 if (twa_reset_device_extension(tw_dev, 0)) { 1731 if (twa_reset_device_extension(tw_dev)) {
1723 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset"); 1732 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset");
1724 goto out; 1733 goto out;
1725 } 1734 }
@@ -2001,11 +2010,14 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
2001 2010
2002 pci_set_master(pdev); 2011 pci_set_master(pdev);
2003 2012
2004 retval = pci_set_dma_mask(pdev, sizeof(dma_addr_t) > 4 ? DMA_64BIT_MASK : DMA_32BIT_MASK); 2013 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK)
2005 if (retval) { 2014 || pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
2006 TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask"); 2015 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)
2007 goto out_disable_device; 2016 || pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
2008 } 2017 TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
2018 retval = -ENODEV;
2019 goto out_disable_device;
2020 }
2009 2021
2010 host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension)); 2022 host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
2011 if (!host) { 2023 if (!host) {
@@ -2053,7 +2065,8 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
2053 goto out_iounmap; 2065 goto out_iounmap;
2054 2066
2055 /* Set host specific parameters */ 2067 /* Set host specific parameters */
2056 if (pdev->device == PCI_DEVICE_ID_3WARE_9650SE) 2068 if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
2069 (pdev->device == PCI_DEVICE_ID_3WARE_9690SA))
2057 host->max_id = TW_MAX_UNITS_9650SE; 2070 host->max_id = TW_MAX_UNITS_9650SE;
2058 else 2071 else
2059 host->max_id = TW_MAX_UNITS; 2072 host->max_id = TW_MAX_UNITS;
@@ -2160,6 +2173,8 @@ static struct pci_device_id twa_pci_tbl[] __devinitdata = {
2160 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 2173 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2161 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE, 2174 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE,
2162 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 2175 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2176 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA,
2177 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2163 { } 2178 { }
2164}; 2179};
2165MODULE_DEVICE_TABLE(pci, twa_pci_tbl); 2180MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index 7901517d4513..d14a9479e389 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -4,7 +4,7 @@
4 Written By: Adam Radford <linuxraid@amcc.com> 4 Written By: Adam Radford <linuxraid@amcc.com>
5 Modifications By: Tom Couch <linuxraid@amcc.com> 5 Modifications By: Tom Couch <linuxraid@amcc.com>
6 6
7 Copyright (C) 2004-2006 Applied Micro Circuits Corporation. 7 Copyright (C) 2004-2007 Applied Micro Circuits Corporation.
8 8
9 This program is free software; you can redistribute it and/or modify 9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by 10 it under the terms of the GNU General Public License as published by
@@ -419,6 +419,9 @@ static twa_message_type twa_error_table[] = {
419#ifndef PCI_DEVICE_ID_3WARE_9650SE 419#ifndef PCI_DEVICE_ID_3WARE_9650SE
420#define PCI_DEVICE_ID_3WARE_9650SE 0x1004 420#define PCI_DEVICE_ID_3WARE_9650SE 0x1004
421#endif 421#endif
422#ifndef PCI_DEVICE_ID_3WARE_9690SA
423#define PCI_DEVICE_ID_3WARE_9690SA 0x1005
424#endif
422 425
423/* Bitmask macros to eliminate bitfields */ 426/* Bitmask macros to eliminate bitfields */
424 427
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index a947257b8964..d2b3898b750a 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -282,7 +282,7 @@ config SCSI_ISCSI_ATTRS
282 282
283config SCSI_SAS_ATTRS 283config SCSI_SAS_ATTRS
284 tristate "SAS Transport Attributes" 284 tristate "SAS Transport Attributes"
285 depends on SCSI 285 depends on SCSI && BLK_DEV_BSG
286 help 286 help
287 If you wish to export transport-specific information about 287 If you wish to export transport-specific information about
288 each attached SAS device to sysfs, say Y. 288 each attached SAS device to sysfs, say Y.
@@ -291,8 +291,12 @@ source "drivers/scsi/libsas/Kconfig"
291 291
292endmenu 292endmenu
293 293
294menu "SCSI low-level drivers" 294menuconfig SCSI_LOWLEVEL
295 bool "SCSI low-level drivers"
295 depends on SCSI!=n 296 depends on SCSI!=n
297 default y
298
299if SCSI_LOWLEVEL
296 300
297config ISCSI_TCP 301config ISCSI_TCP
298 tristate "iSCSI Initiator over TCP/IP" 302 tristate "iSCSI Initiator over TCP/IP"
@@ -1800,7 +1804,7 @@ config SCSI_SRP
1800 To compile this driver as a module, choose M here: the 1804 To compile this driver as a module, choose M here: the
1801 module will be called libsrp. 1805 module will be called libsrp.
1802 1806
1803endmenu 1807endif # SCSI_LOWLEVEL
1804 1808
1805source "drivers/scsi/pcmcia/Kconfig" 1809source "drivers/scsi/pcmcia/Kconfig"
1806 1810
diff --git a/drivers/scsi/a4000t.c b/drivers/scsi/a4000t.c
index 6a5784683ed3..0c758d1452ba 100644
--- a/drivers/scsi/a4000t.c
+++ b/drivers/scsi/a4000t.c
@@ -79,6 +79,7 @@ static int __devinit a4000t_probe(struct device *dev)
79 goto out_put_host; 79 goto out_put_host;
80 } 80 }
81 81
82 dev_set_drvdata(dev, host);
82 scsi_scan_host(host); 83 scsi_scan_host(host);
83 84
84 return 0; 85 return 0;
@@ -95,7 +96,7 @@ static int __devinit a4000t_probe(struct device *dev)
95 96
96static __devexit int a4000t_device_remove(struct device *dev) 97static __devexit int a4000t_device_remove(struct device *dev)
97{ 98{
98 struct Scsi_Host *host = dev_to_shost(dev); 99 struct Scsi_Host *host = dev_get_drvdata(dev);
99 struct NCR_700_Host_Parameters *hostdata = shost_priv(host); 100 struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
100 101
101 scsi_remove_host(host); 102 scsi_remove_host(host);
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 0b6fd0b654d2..a26baab09dbf 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -751,6 +751,101 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
751 inqstrcpy ("V1.0", str->prl); 751 inqstrcpy ("V1.0", str->prl);
752} 752}
753 753
754static void get_container_serial_callback(void *context, struct fib * fibptr)
755{
756 struct aac_get_serial_resp * get_serial_reply;
757 struct scsi_cmnd * scsicmd;
758
759 BUG_ON(fibptr == NULL);
760
761 scsicmd = (struct scsi_cmnd *) context;
762 if (!aac_valid_context(scsicmd, fibptr))
763 return;
764
765 get_serial_reply = (struct aac_get_serial_resp *) fib_data(fibptr);
766 /* Failure is irrelevant, using default value instead */
767 if (le32_to_cpu(get_serial_reply->status) == CT_OK) {
768 char sp[13];
769 /* EVPD bit set */
770 sp[0] = INQD_PDT_DA;
771 sp[1] = scsicmd->cmnd[2];
772 sp[2] = 0;
773 sp[3] = snprintf(sp+4, sizeof(sp)-4, "%08X",
774 le32_to_cpu(get_serial_reply->uid));
775 aac_internal_transfer(scsicmd, sp, 0, sizeof(sp));
776 }
777
778 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
779
780 aac_fib_complete(fibptr);
781 aac_fib_free(fibptr);
782 scsicmd->scsi_done(scsicmd);
783}
784
785/**
786 * aac_get_container_serial - get container serial, none blocking.
787 */
788static int aac_get_container_serial(struct scsi_cmnd * scsicmd)
789{
790 int status;
791 struct aac_get_serial *dinfo;
792 struct fib * cmd_fibcontext;
793 struct aac_dev * dev;
794
795 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
796
797 if (!(cmd_fibcontext = aac_fib_alloc(dev)))
798 return -ENOMEM;
799
800 aac_fib_init(cmd_fibcontext);
801 dinfo = (struct aac_get_serial *) fib_data(cmd_fibcontext);
802
803 dinfo->command = cpu_to_le32(VM_ContainerConfig);
804 dinfo->type = cpu_to_le32(CT_CID_TO_32BITS_UID);
805 dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
806
807 status = aac_fib_send(ContainerCommand,
808 cmd_fibcontext,
809 sizeof (struct aac_get_serial),
810 FsaNormal,
811 0, 1,
812 (fib_callback) get_container_serial_callback,
813 (void *) scsicmd);
814
815 /*
816 * Check that the command queued to the controller
817 */
818 if (status == -EINPROGRESS) {
819 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
820 return 0;
821 }
822
823 printk(KERN_WARNING "aac_get_container_serial: aac_fib_send failed with status: %d.\n", status);
824 aac_fib_complete(cmd_fibcontext);
825 aac_fib_free(cmd_fibcontext);
826 return -1;
827}
828
829/* Function: setinqserial
830 *
831 * Arguments: [1] pointer to void [1] int
832 *
833 * Purpose: Sets SCSI Unit Serial number.
834 * This is a fake. We should read a proper
835 * serial number from the container. <SuSE>But
836 * without docs it's quite hard to do it :-)
837 * So this will have to do in the meantime.</SuSE>
838 */
839
840static int setinqserial(struct aac_dev *dev, void *data, int cid)
841{
842 /*
843 * This breaks array migration.
844 */
845 return snprintf((char *)(data), sizeof(struct scsi_inq) - 4, "%08X%02X",
846 le32_to_cpu(dev->adapter_info.serial[0]), cid);
847}
848
754static void set_sense(u8 *sense_buf, u8 sense_key, u8 sense_code, 849static void set_sense(u8 *sense_buf, u8 sense_key, u8 sense_code,
755 u8 a_sense_code, u8 incorrect_length, 850 u8 a_sense_code, u8 incorrect_length,
756 u8 bit_pointer, u16 field_pointer, 851 u8 bit_pointer, u16 field_pointer,
@@ -1798,6 +1893,49 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1798 dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", cid)); 1893 dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", cid));
1799 memset(&inq_data, 0, sizeof (struct inquiry_data)); 1894 memset(&inq_data, 0, sizeof (struct inquiry_data));
1800 1895
1896 if (scsicmd->cmnd[1] & 0x1 ) {
1897 char *arr = (char *)&inq_data;
1898
1899 /* EVPD bit set */
1900 arr[0] = (scmd_id(scsicmd) == host->this_id) ?
1901 INQD_PDT_PROC : INQD_PDT_DA;
1902 if (scsicmd->cmnd[2] == 0) {
1903 /* supported vital product data pages */
1904 arr[3] = 2;
1905 arr[4] = 0x0;
1906 arr[5] = 0x80;
1907 arr[1] = scsicmd->cmnd[2];
1908 aac_internal_transfer(scsicmd, &inq_data, 0,
1909 sizeof(inq_data));
1910 scsicmd->result = DID_OK << 16 |
1911 COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1912 } else if (scsicmd->cmnd[2] == 0x80) {
1913 /* unit serial number page */
1914 arr[3] = setinqserial(dev, &arr[4],
1915 scmd_id(scsicmd));
1916 arr[1] = scsicmd->cmnd[2];
1917 aac_internal_transfer(scsicmd, &inq_data, 0,
1918 sizeof(inq_data));
1919 return aac_get_container_serial(scsicmd);
1920 } else {
1921 /* vpd page not implemented */
1922 scsicmd->result = DID_OK << 16 |
1923 COMMAND_COMPLETE << 8 |
1924 SAM_STAT_CHECK_CONDITION;
1925 set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
1926 ILLEGAL_REQUEST,
1927 SENCODE_INVALID_CDB_FIELD,
1928 ASENCODE_NO_SENSE, 0, 7, 2, 0);
1929 memcpy(scsicmd->sense_buffer,
1930 &dev->fsa_dev[cid].sense_data,
1931 (sizeof(dev->fsa_dev[cid].sense_data) >
1932 sizeof(scsicmd->sense_buffer))
1933 ? sizeof(scsicmd->sense_buffer)
1934 : sizeof(dev->fsa_dev[cid].sense_data));
1935 }
1936 scsicmd->scsi_done(scsicmd);
1937 return 0;
1938 }
1801 inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */ 1939 inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */
1802 inq_data.inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */ 1940 inq_data.inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
1803 inq_data.inqd_len = 31; 1941 inq_data.inqd_len = 31;
@@ -2070,7 +2208,7 @@ static int query_disk(struct aac_dev *dev, void __user *arg)
2070 } 2208 }
2071 else return -EINVAL; 2209 else return -EINVAL;
2072 2210
2073 qd.valid = fsa_dev_ptr[qd.cnum].valid; 2211 qd.valid = fsa_dev_ptr[qd.cnum].valid != 0;
2074 qd.locked = fsa_dev_ptr[qd.cnum].locked; 2212 qd.locked = fsa_dev_ptr[qd.cnum].locked;
2075 qd.deleted = fsa_dev_ptr[qd.cnum].deleted; 2213 qd.deleted = fsa_dev_ptr[qd.cnum].deleted;
2076 2214
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index f1d3b66af879..400d03403cd5 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1567,6 +1567,20 @@ struct aac_get_name_resp {
1567 u8 data[16]; 1567 u8 data[16];
1568}; 1568};
1569 1569
1570#define CT_CID_TO_32BITS_UID 165
1571struct aac_get_serial {
1572 __le32 command; /* VM_ContainerConfig */
1573 __le32 type; /* CT_CID_TO_32BITS_UID */
1574 __le32 cid;
1575};
1576
1577struct aac_get_serial_resp {
1578 __le32 dummy0;
1579 __le32 dummy1;
1580 __le32 status; /* CT_OK */
1581 __le32 uid;
1582};
1583
1570/* 1584/*
1571 * The following command is sent to shut down each container. 1585 * The following command is sent to shut down each container.
1572 */ 1586 */
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index d510839c0bb2..bb870906b4cf 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -80,7 +80,11 @@ static int fib_map_alloc(struct aac_dev *dev)
80 80
81void aac_fib_map_free(struct aac_dev *dev) 81void aac_fib_map_free(struct aac_dev *dev)
82{ 82{
83 pci_free_consistent(dev->pdev, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), dev->hw_fib_va, dev->hw_fib_pa); 83 pci_free_consistent(dev->pdev,
84 dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
85 dev->hw_fib_va, dev->hw_fib_pa);
86 dev->hw_fib_va = NULL;
87 dev->hw_fib_pa = 0;
84} 88}
85 89
86/** 90/**
@@ -1087,8 +1091,6 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
1087 * case. 1091 * case.
1088 */ 1092 */
1089 aac_fib_map_free(aac); 1093 aac_fib_map_free(aac);
1090 aac->hw_fib_va = NULL;
1091 aac->hw_fib_pa = 0;
1092 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys); 1094 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
1093 aac->comm_addr = NULL; 1095 aac->comm_addr = NULL;
1094 aac->comm_phys = 0; 1096 aac->comm_phys = 0;
@@ -1098,12 +1100,12 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
1098 kfree(aac->fsa_dev); 1100 kfree(aac->fsa_dev);
1099 aac->fsa_dev = NULL; 1101 aac->fsa_dev = NULL;
1100 if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT) { 1102 if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT) {
1101 if (((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK))) || 1103 if (((retval = pci_set_dma_mask(aac->pdev, DMA_31BIT_MASK))) ||
1102 ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_32BIT_MASK)))) 1104 ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_31BIT_MASK))))
1103 goto out; 1105 goto out;
1104 } else { 1106 } else {
1105 if (((retval = pci_set_dma_mask(aac->pdev, 0x7FFFFFFFULL))) || 1107 if (((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK))) ||
1106 ((retval = pci_set_consistent_dma_mask(aac->pdev, 0x7FFFFFFFULL)))) 1108 ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_32BIT_MASK))))
1107 goto out; 1109 goto out;
1108 } 1110 }
1109 if ((retval = (*(aac_get_driver_ident(index)->init))(aac))) 1111 if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c
index c520e5b41fb5..3dce618bf414 100644
--- a/drivers/scsi/aic94xx/aic94xx_dev.c
+++ b/drivers/scsi/aic94xx/aic94xx_dev.c
@@ -126,7 +126,7 @@ static inline int asd_init_sata(struct domain_device *dev)
126 if (w76 & 0x100) /* NCQ? */ 126 if (w76 & 0x100) /* NCQ? */
127 qdepth = (w75 & 0x1F) + 1; 127 qdepth = (w75 & 0x1F) + 1;
128 asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK, 128 asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK,
129 (1<<qdepth)-1); 129 (1ULL<<qdepth)-1);
130 asd_ddbsite_write_byte(asd_ha, ddb, NUM_SATA_TAGS, qdepth); 130 asd_ddbsite_write_byte(asd_ha, ddb, NUM_SATA_TAGS, qdepth);
131 } 131 }
132 if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM || 132 if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM ||
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index b8c6810090d5..ab00aecc5466 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -81,6 +81,9 @@ static struct scsi_host_template aic94xx_sht = {
81 .use_clustering = ENABLE_CLUSTERING, 81 .use_clustering = ENABLE_CLUSTERING,
82 .eh_device_reset_handler = sas_eh_device_reset_handler, 82 .eh_device_reset_handler = sas_eh_device_reset_handler,
83 .eh_bus_reset_handler = sas_eh_bus_reset_handler, 83 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
84 .slave_alloc = sas_slave_alloc,
85 .target_destroy = sas_target_destroy,
86 .ioctl = sas_ioctl,
84}; 87};
85 88
86static int __devinit asd_map_memio(struct asd_ha_struct *asd_ha) 89static int __devinit asd_map_memio(struct asd_ha_struct *asd_ha)
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index e2ad5bed9403..d5d8caba3560 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -74,8 +74,13 @@ static inline int asd_map_scatterlist(struct sas_task *task,
74 return 0; 74 return 0;
75 } 75 }
76 76
77 num_sg = pci_map_sg(asd_ha->pcidev, task->scatter, task->num_scatter, 77 /* STP tasks come from libata which has already mapped
78 task->data_dir); 78 * the SG list */
79 if (sas_protocol_ata(task->task_proto))
80 num_sg = task->num_scatter;
81 else
82 num_sg = pci_map_sg(asd_ha->pcidev, task->scatter,
83 task->num_scatter, task->data_dir);
79 if (num_sg == 0) 84 if (num_sg == 0)
80 return -ENOMEM; 85 return -ENOMEM;
81 86
@@ -120,8 +125,9 @@ static inline int asd_map_scatterlist(struct sas_task *task,
120 125
121 return 0; 126 return 0;
122err_unmap: 127err_unmap:
123 pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter, 128 if (sas_protocol_ata(task->task_proto))
124 task->data_dir); 129 pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
130 task->data_dir);
125 return res; 131 return res;
126} 132}
127 133
@@ -142,8 +148,9 @@ static inline void asd_unmap_scatterlist(struct asd_ascb *ascb)
142 } 148 }
143 149
144 asd_free_coherent(asd_ha, ascb->sg_arr); 150 asd_free_coherent(asd_ha, ascb->sg_arr);
145 pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter, 151 if (task->task_proto != SAS_PROTOCOL_STP)
146 task->data_dir); 152 pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
153 task->data_dir);
147} 154}
148 155
149/* ---------- Task complete tasklet ---------- */ 156/* ---------- Task complete tasklet ---------- */
@@ -391,7 +398,6 @@ static int asd_build_ata_ascb(struct asd_ascb *ascb, struct sas_task *task,
391 398
392 scb->ata_task.total_xfer_len = cpu_to_le32(task->total_xfer_len); 399 scb->ata_task.total_xfer_len = cpu_to_le32(task->total_xfer_len);
393 scb->ata_task.fis = task->ata_task.fis; 400 scb->ata_task.fis = task->ata_task.fis;
394 scb->ata_task.fis.fis_type = 0x27;
395 if (likely(!task->ata_task.device_control_reg_update)) 401 if (likely(!task->ata_task.device_control_reg_update))
396 scb->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ 402 scb->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
397 scb->ata_task.fis.flags &= 0xF0; /* PM_PORT field shall be 0 */ 403 scb->ata_task.fis.flags &= 0xF0; /* PM_PORT field shall be 0 */
diff --git a/drivers/scsi/bvme6000_scsi.c b/drivers/scsi/bvme6000_scsi.c
index 012cdea7946d..cac354086737 100644
--- a/drivers/scsi/bvme6000_scsi.c
+++ b/drivers/scsi/bvme6000_scsi.c
@@ -74,6 +74,7 @@ bvme6000_probe(struct device *dev)
74 goto out_put_host; 74 goto out_put_host;
75 } 75 }
76 76
77 dev_set_drvdata(dev, host);
77 scsi_scan_host(host); 78 scsi_scan_host(host);
78 79
79 return 0; 80 return 0;
@@ -89,7 +90,7 @@ bvme6000_probe(struct device *dev)
89static __devexit int 90static __devexit int
90bvme6000_device_remove(struct device *dev) 91bvme6000_device_remove(struct device *dev)
91{ 92{
92 struct Scsi_Host *host = dev_to_shost(dev); 93 struct Scsi_Host *host = dev_get_drvdata(dev);
93 struct NCR_700_Host_Parameters *hostdata = shost_priv(host); 94 struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
94 95
95 scsi_remove_host(host); 96 scsi_remove_host(host);
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index d5576d54ce76..856e38b14861 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -220,7 +220,7 @@
220#define ESP_BUSID_RESELID 0x10 220#define ESP_BUSID_RESELID 0x10
221#define ESP_BUSID_CTR32BIT 0x40 221#define ESP_BUSID_CTR32BIT 0x40
222 222
223#define ESP_BUS_TIMEOUT 250 /* In milli-seconds */ 223#define ESP_BUS_TIMEOUT 275 /* In milli-seconds */
224#define ESP_TIMEO_CONST 8192 224#define ESP_TIMEO_CONST 8192
225#define ESP_NEG_DEFP(mhz, cfact) \ 225#define ESP_NEG_DEFP(mhz, cfact) \
226 ((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact))) 226 ((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact)))
diff --git a/drivers/scsi/libsas/Kconfig b/drivers/scsi/libsas/Kconfig
index aafdc92f8312..3a3c1ac9c6cd 100644
--- a/drivers/scsi/libsas/Kconfig
+++ b/drivers/scsi/libsas/Kconfig
@@ -30,6 +30,13 @@ config SCSI_SAS_LIBSAS
30 This provides transport specific helpers for SAS drivers which 30 This provides transport specific helpers for SAS drivers which
31 use the domain device construct (like the aic94xxx). 31 use the domain device construct (like the aic94xxx).
32 32
33config SCSI_SAS_ATA
34 bool "ATA support for libsas (requires libata)"
35 depends on SCSI_SAS_LIBSAS && ATA
36 help
37 Builds in ATA support into libsas. Will necessitate
38 the loading of libata along with libsas.
39
33config SCSI_SAS_LIBSAS_DEBUG 40config SCSI_SAS_LIBSAS_DEBUG
34 bool "Compile the SAS Domain Transport Attributes in debug mode" 41 bool "Compile the SAS Domain Transport Attributes in debug mode"
35 default y 42 default y
diff --git a/drivers/scsi/libsas/Makefile b/drivers/scsi/libsas/Makefile
index 44d972a3b4bd..fd387b91856e 100644
--- a/drivers/scsi/libsas/Makefile
+++ b/drivers/scsi/libsas/Makefile
@@ -34,3 +34,4 @@ libsas-y += sas_init.o \
34 sas_discover.o \ 34 sas_discover.o \
35 sas_expander.o \ 35 sas_expander.o \
36 sas_scsi_host.o 36 sas_scsi_host.o
37libsas-$(CONFIG_SCSI_SAS_ATA) += sas_ata.o
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
new file mode 100644
index 000000000000..ced2de32c511
--- /dev/null
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -0,0 +1,817 @@
1/*
2 * Support for SATA devices on Serial Attached SCSI (SAS) controllers
3 *
4 * Copyright (C) 2006 IBM Corporation
5 *
6 * Written by: Darrick J. Wong <djwong@us.ibm.com>, IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
21 * USA
22 */
23
24#include <linux/scatterlist.h>
25
26#include <scsi/sas_ata.h>
27#include "sas_internal.h"
28#include <scsi/scsi_host.h>
29#include <scsi/scsi_device.h>
30#include <scsi/scsi_tcq.h>
31#include <scsi/scsi.h>
32#include <scsi/scsi_transport.h>
33#include <scsi/scsi_transport_sas.h>
34#include "../scsi_sas_internal.h"
35#include "../scsi_transport_api.h"
36#include <scsi/scsi_eh.h>
37
38static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
39{
40 /* Cheesy attempt to translate SAS errors into ATA. Hah! */
41
42 /* transport error */
43 if (ts->resp == SAS_TASK_UNDELIVERED)
44 return AC_ERR_ATA_BUS;
45
46 /* ts->resp == SAS_TASK_COMPLETE */
47 /* task delivered, what happened afterwards? */
48 switch (ts->stat) {
49 case SAS_DEV_NO_RESPONSE:
50 return AC_ERR_TIMEOUT;
51
52 case SAS_INTERRUPTED:
53 case SAS_PHY_DOWN:
54 case SAS_NAK_R_ERR:
55 return AC_ERR_ATA_BUS;
56
57
58 case SAS_DATA_UNDERRUN:
59 /*
60 * Some programs that use the taskfile interface
61 * (smartctl in particular) can cause underrun
62 * problems. Ignore these errors, perhaps at our
63 * peril.
64 */
65 return 0;
66
67 case SAS_DATA_OVERRUN:
68 case SAS_QUEUE_FULL:
69 case SAS_DEVICE_UNKNOWN:
70 case SAS_SG_ERR:
71 return AC_ERR_INVALID;
72
73 case SAM_CHECK_COND:
74 case SAS_OPEN_TO:
75 case SAS_OPEN_REJECT:
76 SAS_DPRINTK("%s: Saw error %d. What to do?\n",
77 __FUNCTION__, ts->stat);
78 return AC_ERR_OTHER;
79
80 case SAS_ABORTED_TASK:
81 return AC_ERR_DEV;
82
83 case SAS_PROTO_RESPONSE:
84 /* This means the ending_fis has the error
85 * value; return 0 here to collect it */
86 return 0;
87 default:
88 return 0;
89 }
90}
91
92static void sas_ata_task_done(struct sas_task *task)
93{
94 struct ata_queued_cmd *qc = task->uldd_task;
95 struct domain_device *dev;
96 struct task_status_struct *stat = &task->task_status;
97 struct ata_task_resp *resp = (struct ata_task_resp *)stat->buf;
98 struct sas_ha_struct *sas_ha;
99 enum ata_completion_errors ac;
100 unsigned long flags;
101
102 if (!qc)
103 goto qc_already_gone;
104
105 dev = qc->ap->private_data;
106 sas_ha = dev->port->ha;
107
108 spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
109 if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_GOOD) {
110 ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);
111 qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
112 dev->sata_dev.sstatus = resp->sstatus;
113 dev->sata_dev.serror = resp->serror;
114 dev->sata_dev.scontrol = resp->scontrol;
115 } else if (stat->stat != SAM_STAT_GOOD) {
116 ac = sas_to_ata_err(stat);
117 if (ac) {
118 SAS_DPRINTK("%s: SAS error %x\n", __FUNCTION__,
119 stat->stat);
120 /* We saw a SAS error. Send a vague error. */
121 qc->err_mask = ac;
122 dev->sata_dev.tf.feature = 0x04; /* status err */
123 dev->sata_dev.tf.command = ATA_ERR;
124 }
125 }
126
127 qc->lldd_task = NULL;
128 if (qc->scsicmd)
129 ASSIGN_SAS_TASK(qc->scsicmd, NULL);
130 ata_qc_complete(qc);
131 spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags);
132
133 /*
134 * If the sas_task has an ata qc, a scsi_cmnd and the aborted
135 * flag is set, then we must have come in via the libsas EH
136 * functions. When we exit this function, we need to put the
137 * scsi_cmnd on the list of finished errors. The ata_qc_complete
138 * call cleans up the libata side of things but we're protected
139 * from the scsi_cmnd going away because the scsi_cmnd is owned
140 * by the EH, making libata's call to scsi_done a NOP.
141 */
142 spin_lock_irqsave(&task->task_state_lock, flags);
143 if (qc->scsicmd && task->task_state_flags & SAS_TASK_STATE_ABORTED)
144 scsi_eh_finish_cmd(qc->scsicmd, &sas_ha->eh_done_q);
145 spin_unlock_irqrestore(&task->task_state_lock, flags);
146
147qc_already_gone:
148 list_del_init(&task->list);
149 sas_free_task(task);
150}
151
152static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
153{
154 int res;
155 struct sas_task *task;
156 struct domain_device *dev = qc->ap->private_data;
157 struct sas_ha_struct *sas_ha = dev->port->ha;
158 struct Scsi_Host *host = sas_ha->core.shost;
159 struct sas_internal *i = to_sas_internal(host->transportt);
160 struct scatterlist *sg;
161 unsigned int num = 0;
162 unsigned int xfer = 0;
163
164 task = sas_alloc_task(GFP_ATOMIC);
165 if (!task)
166 return AC_ERR_SYSTEM;
167 task->dev = dev;
168 task->task_proto = SAS_PROTOCOL_STP;
169 task->task_done = sas_ata_task_done;
170
171 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
172 qc->tf.command == ATA_CMD_FPDMA_READ) {
173 /* Need to zero out the tag libata assigned us */
174 qc->tf.nsect = 0;
175 }
176
177 ata_tf_to_fis(&qc->tf, 1, 0, (u8*)&task->ata_task.fis);
178 task->uldd_task = qc;
179 if (is_atapi_taskfile(&qc->tf)) {
180 memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
181 task->total_xfer_len = qc->nbytes + qc->pad_len;
182 task->num_scatter = qc->pad_len ? qc->n_elem + 1 : qc->n_elem;
183 } else {
184 ata_for_each_sg(sg, qc) {
185 num++;
186 xfer += sg->length;
187 }
188
189 task->total_xfer_len = xfer;
190 task->num_scatter = num;
191 }
192
193 task->data_dir = qc->dma_dir;
194 task->scatter = qc->__sg;
195 task->ata_task.retry_count = 1;
196 task->task_state_flags = SAS_TASK_STATE_PENDING;
197 qc->lldd_task = task;
198
199 switch (qc->tf.protocol) {
200 case ATA_PROT_NCQ:
201 task->ata_task.use_ncq = 1;
202 /* fall through */
203 case ATA_PROT_ATAPI_DMA:
204 case ATA_PROT_DMA:
205 task->ata_task.dma_xfer = 1;
206 break;
207 }
208
209 if (qc->scsicmd)
210 ASSIGN_SAS_TASK(qc->scsicmd, task);
211
212 if (sas_ha->lldd_max_execute_num < 2)
213 res = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC);
214 else
215 res = sas_queue_up(task);
216
217 /* Examine */
218 if (res) {
219 SAS_DPRINTK("lldd_execute_task returned: %d\n", res);
220
221 if (qc->scsicmd)
222 ASSIGN_SAS_TASK(qc->scsicmd, NULL);
223 sas_free_task(task);
224 return AC_ERR_SYSTEM;
225 }
226
227 return 0;
228}
229
230static u8 sas_ata_check_status(struct ata_port *ap)
231{
232 struct domain_device *dev = ap->private_data;
233 return dev->sata_dev.tf.command;
234}
235
236static void sas_ata_phy_reset(struct ata_port *ap)
237{
238 struct domain_device *dev = ap->private_data;
239 struct sas_internal *i =
240 to_sas_internal(dev->port->ha->core.shost->transportt);
241 int res = 0;
242
243 if (i->dft->lldd_I_T_nexus_reset)
244 res = i->dft->lldd_I_T_nexus_reset(dev);
245
246 if (res)
247 SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __FUNCTION__);
248
249 switch (dev->sata_dev.command_set) {
250 case ATA_COMMAND_SET:
251 SAS_DPRINTK("%s: Found ATA device.\n", __FUNCTION__);
252 ap->device[0].class = ATA_DEV_ATA;
253 break;
254 case ATAPI_COMMAND_SET:
255 SAS_DPRINTK("%s: Found ATAPI device.\n", __FUNCTION__);
256 ap->device[0].class = ATA_DEV_ATAPI;
257 break;
258 default:
259 SAS_DPRINTK("%s: Unknown SATA command set: %d.\n",
260 __FUNCTION__,
261 dev->sata_dev.command_set);
262 ap->device[0].class = ATA_DEV_UNKNOWN;
263 break;
264 }
265
266 ap->cbl = ATA_CBL_SATA;
267}
268
269static void sas_ata_post_internal(struct ata_queued_cmd *qc)
270{
271 if (qc->flags & ATA_QCFLAG_FAILED)
272 qc->err_mask |= AC_ERR_OTHER;
273
274 if (qc->err_mask) {
275 /*
276 * Find the sas_task and kill it. By this point,
277 * libata has decided to kill the qc, so we needn't
278 * bother with sas_ata_task_done. But we still
279 * ought to abort the task.
280 */
281 struct sas_task *task = qc->lldd_task;
282 unsigned long flags;
283
284 qc->lldd_task = NULL;
285 if (task) {
286 /* Should this be a AT(API) device reset? */
287 spin_lock_irqsave(&task->task_state_lock, flags);
288 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
289 spin_unlock_irqrestore(&task->task_state_lock, flags);
290
291 task->uldd_task = NULL;
292 __sas_task_abort(task);
293 }
294 }
295}
296
297static void sas_ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
298{
299 struct domain_device *dev = ap->private_data;
300 memcpy(tf, &dev->sata_dev.tf, sizeof (*tf));
301}
302
303static int sas_ata_scr_write(struct ata_port *ap, unsigned int sc_reg_in,
304 u32 val)
305{
306 struct domain_device *dev = ap->private_data;
307
308 SAS_DPRINTK("STUB %s\n", __FUNCTION__);
309 switch (sc_reg_in) {
310 case SCR_STATUS:
311 dev->sata_dev.sstatus = val;
312 break;
313 case SCR_CONTROL:
314 dev->sata_dev.scontrol = val;
315 break;
316 case SCR_ERROR:
317 dev->sata_dev.serror = val;
318 break;
319 case SCR_ACTIVE:
320 dev->sata_dev.ap->sactive = val;
321 break;
322 default:
323 return -EINVAL;
324 }
325 return 0;
326}
327
328static int sas_ata_scr_read(struct ata_port *ap, unsigned int sc_reg_in,
329 u32 *val)
330{
331 struct domain_device *dev = ap->private_data;
332
333 SAS_DPRINTK("STUB %s\n", __FUNCTION__);
334 switch (sc_reg_in) {
335 case SCR_STATUS:
336 *val = dev->sata_dev.sstatus;
337 return 0;
338 case SCR_CONTROL:
339 *val = dev->sata_dev.scontrol;
340 return 0;
341 case SCR_ERROR:
342 *val = dev->sata_dev.serror;
343 return 0;
344 case SCR_ACTIVE:
345 *val = dev->sata_dev.ap->sactive;
346 return 0;
347 default:
348 return -EINVAL;
349 }
350}
351
352static struct ata_port_operations sas_sata_ops = {
353 .port_disable = ata_port_disable,
354 .check_status = sas_ata_check_status,
355 .check_altstatus = sas_ata_check_status,
356 .dev_select = ata_noop_dev_select,
357 .phy_reset = sas_ata_phy_reset,
358 .post_internal_cmd = sas_ata_post_internal,
359 .tf_read = sas_ata_tf_read,
360 .qc_prep = ata_noop_qc_prep,
361 .qc_issue = sas_ata_qc_issue,
362 .port_start = ata_sas_port_start,
363 .port_stop = ata_sas_port_stop,
364 .scr_read = sas_ata_scr_read,
365 .scr_write = sas_ata_scr_write
366};
367
368static struct ata_port_info sata_port_info = {
369 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
370 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ,
371 .pio_mask = 0x1f, /* PIO0-4 */
372 .mwdma_mask = 0x07, /* MWDMA0-2 */
373 .udma_mask = ATA_UDMA6,
374 .port_ops = &sas_sata_ops
375};
376
377int sas_ata_init_host_and_port(struct domain_device *found_dev,
378 struct scsi_target *starget)
379{
380 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
381 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
382 struct ata_port *ap;
383
384 ata_host_init(&found_dev->sata_dev.ata_host,
385 &ha->pcidev->dev,
386 sata_port_info.flags,
387 &sas_sata_ops);
388 ap = ata_sas_port_alloc(&found_dev->sata_dev.ata_host,
389 &sata_port_info,
390 shost);
391 if (!ap) {
392 SAS_DPRINTK("ata_sas_port_alloc failed.\n");
393 return -ENODEV;
394 }
395
396 ap->private_data = found_dev;
397 ap->cbl = ATA_CBL_SATA;
398 ap->scsi_host = shost;
399 found_dev->sata_dev.ap = ap;
400
401 return 0;
402}
403
404void sas_ata_task_abort(struct sas_task *task)
405{
406 struct ata_queued_cmd *qc = task->uldd_task;
407 struct completion *waiting;
408
409 /* Bounce SCSI-initiated commands to the SCSI EH */
410 if (qc->scsicmd) {
411 scsi_req_abort_cmd(qc->scsicmd);
412 scsi_schedule_eh(qc->scsicmd->device->host);
413 return;
414 }
415
416 /* Internal command, fake a timeout and complete. */
417 qc->flags &= ~ATA_QCFLAG_ACTIVE;
418 qc->flags |= ATA_QCFLAG_FAILED;
419 qc->err_mask |= AC_ERR_TIMEOUT;
420 waiting = qc->private_data;
421 complete(waiting);
422}
423
424static void sas_task_timedout(unsigned long _task)
425{
426 struct sas_task *task = (void *) _task;
427 unsigned long flags;
428
429 spin_lock_irqsave(&task->task_state_lock, flags);
430 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
431 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
432 spin_unlock_irqrestore(&task->task_state_lock, flags);
433
434 complete(&task->completion);
435}
436
437static void sas_disc_task_done(struct sas_task *task)
438{
439 if (!del_timer(&task->timer))
440 return;
441 complete(&task->completion);
442}
443
444#define SAS_DEV_TIMEOUT 10
445
446/**
447 * sas_execute_task -- Basic task processing for discovery
448 * @task: the task to be executed
449 * @buffer: pointer to buffer to do I/O
450 * @size: size of @buffer
451 * @pci_dma_dir: PCI_DMA_...
452 */
453static int sas_execute_task(struct sas_task *task, void *buffer, int size,
454 int pci_dma_dir)
455{
456 int res = 0;
457 struct scatterlist *scatter = NULL;
458 struct task_status_struct *ts = &task->task_status;
459 int num_scatter = 0;
460 int retries = 0;
461 struct sas_internal *i =
462 to_sas_internal(task->dev->port->ha->core.shost->transportt);
463
464 if (pci_dma_dir != PCI_DMA_NONE) {
465 scatter = kzalloc(sizeof(*scatter), GFP_KERNEL);
466 if (!scatter)
467 goto out;
468
469 sg_init_one(scatter, buffer, size);
470 num_scatter = 1;
471 }
472
473 task->task_proto = task->dev->tproto;
474 task->scatter = scatter;
475 task->num_scatter = num_scatter;
476 task->total_xfer_len = size;
477 task->data_dir = pci_dma_dir;
478 task->task_done = sas_disc_task_done;
479 if (pci_dma_dir != PCI_DMA_NONE &&
480 sas_protocol_ata(task->task_proto)) {
481 task->num_scatter = pci_map_sg(task->dev->port->ha->pcidev,
482 task->scatter,
483 task->num_scatter,
484 task->data_dir);
485 }
486
487 for (retries = 0; retries < 5; retries++) {
488 task->task_state_flags = SAS_TASK_STATE_PENDING;
489 init_completion(&task->completion);
490
491 task->timer.data = (unsigned long) task;
492 task->timer.function = sas_task_timedout;
493 task->timer.expires = jiffies + SAS_DEV_TIMEOUT*HZ;
494 add_timer(&task->timer);
495
496 res = i->dft->lldd_execute_task(task, 1, GFP_KERNEL);
497 if (res) {
498 del_timer(&task->timer);
499 SAS_DPRINTK("executing SAS discovery task failed:%d\n",
500 res);
501 goto ex_err;
502 }
503 wait_for_completion(&task->completion);
504 res = -ETASK;
505 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
506 int res2;
507 SAS_DPRINTK("task aborted, flags:0x%x\n",
508 task->task_state_flags);
509 res2 = i->dft->lldd_abort_task(task);
510 SAS_DPRINTK("came back from abort task\n");
511 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
512 if (res2 == TMF_RESP_FUNC_COMPLETE)
513 continue; /* Retry the task */
514 else
515 goto ex_err;
516 }
517 }
518 if (task->task_status.stat == SAM_BUSY ||
519 task->task_status.stat == SAM_TASK_SET_FULL ||
520 task->task_status.stat == SAS_QUEUE_FULL) {
521 SAS_DPRINTK("task: q busy, sleeping...\n");
522 schedule_timeout_interruptible(HZ);
523 } else if (task->task_status.stat == SAM_CHECK_COND) {
524 struct scsi_sense_hdr shdr;
525
526 if (!scsi_normalize_sense(ts->buf, ts->buf_valid_size,
527 &shdr)) {
528 SAS_DPRINTK("couldn't normalize sense\n");
529 continue;
530 }
531 if ((shdr.sense_key == 6 && shdr.asc == 0x29) ||
532 (shdr.sense_key == 2 && shdr.asc == 4 &&
533 shdr.ascq == 1)) {
534 SAS_DPRINTK("device %016llx LUN: %016llx "
535 "powering up or not ready yet, "
536 "sleeping...\n",
537 SAS_ADDR(task->dev->sas_addr),
538 SAS_ADDR(task->ssp_task.LUN));
539
540 schedule_timeout_interruptible(5*HZ);
541 } else if (shdr.sense_key == 1) {
542 res = 0;
543 break;
544 } else if (shdr.sense_key == 5) {
545 break;
546 } else {
547 SAS_DPRINTK("dev %016llx LUN: %016llx "
548 "sense key:0x%x ASC:0x%x ASCQ:0x%x"
549 "\n",
550 SAS_ADDR(task->dev->sas_addr),
551 SAS_ADDR(task->ssp_task.LUN),
552 shdr.sense_key,
553 shdr.asc, shdr.ascq);
554 }
555 } else if (task->task_status.resp != SAS_TASK_COMPLETE ||
556 task->task_status.stat != SAM_GOOD) {
557 SAS_DPRINTK("task finished with resp:0x%x, "
558 "stat:0x%x\n",
559 task->task_status.resp,
560 task->task_status.stat);
561 goto ex_err;
562 } else {
563 res = 0;
564 break;
565 }
566 }
567ex_err:
568 if (pci_dma_dir != PCI_DMA_NONE) {
569 if (sas_protocol_ata(task->task_proto))
570 pci_unmap_sg(task->dev->port->ha->pcidev,
571 task->scatter, task->num_scatter,
572 task->data_dir);
573 kfree(scatter);
574 }
575out:
576 return res;
577}
578
579/* ---------- SATA ---------- */
580
581static void sas_get_ata_command_set(struct domain_device *dev)
582{
583 struct dev_to_host_fis *fis =
584 (struct dev_to_host_fis *) dev->frame_rcvd;
585
586 if ((fis->sector_count == 1 && /* ATA */
587 fis->lbal == 1 &&
588 fis->lbam == 0 &&
589 fis->lbah == 0 &&
590 fis->device == 0)
591 ||
592 (fis->sector_count == 0 && /* CE-ATA (mATA) */
593 fis->lbal == 0 &&
594 fis->lbam == 0xCE &&
595 fis->lbah == 0xAA &&
596 (fis->device & ~0x10) == 0))
597
598 dev->sata_dev.command_set = ATA_COMMAND_SET;
599
600 else if ((fis->interrupt_reason == 1 && /* ATAPI */
601 fis->lbal == 1 &&
602 fis->byte_count_low == 0x14 &&
603 fis->byte_count_high == 0xEB &&
604 (fis->device & ~0x10) == 0))
605
606 dev->sata_dev.command_set = ATAPI_COMMAND_SET;
607
608 else if ((fis->sector_count == 1 && /* SEMB */
609 fis->lbal == 1 &&
610 fis->lbam == 0x3C &&
611 fis->lbah == 0xC3 &&
612 fis->device == 0)
613 ||
614 (fis->interrupt_reason == 1 && /* SATA PM */
615 fis->lbal == 1 &&
616 fis->byte_count_low == 0x69 &&
617 fis->byte_count_high == 0x96 &&
618 (fis->device & ~0x10) == 0))
619
620 /* Treat it as a superset? */
621 dev->sata_dev.command_set = ATAPI_COMMAND_SET;
622}
623
624/**
625 * sas_issue_ata_cmd -- Basic SATA command processing for discovery
626 * @dev: the device to send the command to
627 * @command: the command register
628 * @features: the features register
629 * @buffer: pointer to buffer to do I/O
630 * @size: size of @buffer
631 * @pci_dma_dir: PCI_DMA_...
632 */
633static int sas_issue_ata_cmd(struct domain_device *dev, u8 command,
634 u8 features, void *buffer, int size,
635 int pci_dma_dir)
636{
637 int res = 0;
638 struct sas_task *task;
639 struct dev_to_host_fis *d2h_fis = (struct dev_to_host_fis *)
640 &dev->frame_rcvd[0];
641
642 res = -ENOMEM;
643 task = sas_alloc_task(GFP_KERNEL);
644 if (!task)
645 goto out;
646
647 task->dev = dev;
648
649 task->ata_task.fis.fis_type = 0x27;
650 task->ata_task.fis.command = command;
651 task->ata_task.fis.features = features;
652 task->ata_task.fis.device = d2h_fis->device;
653 task->ata_task.retry_count = 1;
654
655 res = sas_execute_task(task, buffer, size, pci_dma_dir);
656
657 sas_free_task(task);
658out:
659 return res;
660}
661
662static void sas_sata_propagate_sas_addr(struct domain_device *dev)
663{
664 unsigned long flags;
665 struct asd_sas_port *port = dev->port;
666 struct asd_sas_phy *phy;
667
668 BUG_ON(dev->parent);
669
670 memcpy(port->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE);
671 spin_lock_irqsave(&port->phy_list_lock, flags);
672 list_for_each_entry(phy, &port->phy_list, port_phy_el)
673 memcpy(phy->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE);
674 spin_unlock_irqrestore(&port->phy_list_lock, flags);
675}
676
677#define ATA_IDENTIFY_DEV 0xEC
678#define ATA_IDENTIFY_PACKET_DEV 0xA1
679#define ATA_SET_FEATURES 0xEF
680#define ATA_FEATURE_PUP_STBY_SPIN_UP 0x07
681
682/**
683 * sas_discover_sata_dev -- discover a STP/SATA device (SATA_DEV)
684 * @dev: STP/SATA device of interest (ATA/ATAPI)
685 *
686 * The LLDD has already been notified of this device, so that we can
687 * send FISes to it. Here we try to get IDENTIFY DEVICE or IDENTIFY
688 * PACKET DEVICE, if ATAPI device, so that the LLDD can fine-tune its
689 * performance for this device.
690 */
691static int sas_discover_sata_dev(struct domain_device *dev)
692{
693 int res;
694 __le16 *identify_x;
695 u8 command;
696
697 identify_x = kzalloc(512, GFP_KERNEL);
698 if (!identify_x)
699 return -ENOMEM;
700
701 if (dev->sata_dev.command_set == ATA_COMMAND_SET) {
702 dev->sata_dev.identify_device = identify_x;
703 command = ATA_IDENTIFY_DEV;
704 } else {
705 dev->sata_dev.identify_packet_device = identify_x;
706 command = ATA_IDENTIFY_PACKET_DEV;
707 }
708
709 res = sas_issue_ata_cmd(dev, command, 0, identify_x, 512,
710 PCI_DMA_FROMDEVICE);
711 if (res)
712 goto out_err;
713
714 /* lives on the media? */
715 if (le16_to_cpu(identify_x[0]) & 4) {
716 /* incomplete response */
717 SAS_DPRINTK("sending SET FEATURE/PUP_STBY_SPIN_UP to "
718 "dev %llx\n", SAS_ADDR(dev->sas_addr));
719 if (!le16_to_cpu(identify_x[83] & (1<<6)))
720 goto cont1;
721 res = sas_issue_ata_cmd(dev, ATA_SET_FEATURES,
722 ATA_FEATURE_PUP_STBY_SPIN_UP,
723 NULL, 0, PCI_DMA_NONE);
724 if (res)
725 goto cont1;
726
727 schedule_timeout_interruptible(5*HZ); /* More time? */
728 res = sas_issue_ata_cmd(dev, command, 0, identify_x, 512,
729 PCI_DMA_FROMDEVICE);
730 if (res)
731 goto out_err;
732 }
733cont1:
734 /* Get WWN */
735 if (dev->port->oob_mode != SATA_OOB_MODE) {
736 memcpy(dev->sas_addr, dev->sata_dev.rps_resp.rps.stp_sas_addr,
737 SAS_ADDR_SIZE);
738 } else if (dev->sata_dev.command_set == ATA_COMMAND_SET &&
739 (le16_to_cpu(dev->sata_dev.identify_device[108]) & 0xF000)
740 == 0x5000) {
741 int i;
742
743 for (i = 0; i < 4; i++) {
744 dev->sas_addr[2*i] =
745 (le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0xFF00) >> 8;
746 dev->sas_addr[2*i+1] =
747 le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0x00FF;
748 }
749 }
750 sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr);
751 if (!dev->parent)
752 sas_sata_propagate_sas_addr(dev);
753
754 /* XXX Hint: register this SATA device with SATL.
755 When this returns, dev->sata_dev->lu is alive and
756 present.
757 sas_satl_register_dev(dev);
758 */
759
760 sas_fill_in_rphy(dev, dev->rphy);
761
762 return 0;
763out_err:
764 dev->sata_dev.identify_packet_device = NULL;
765 dev->sata_dev.identify_device = NULL;
766 kfree(identify_x);
767 return res;
768}
769
770static int sas_discover_sata_pm(struct domain_device *dev)
771{
772 return -ENODEV;
773}
774
775/**
776 * sas_discover_sata -- discover an STP/SATA domain device
777 * @dev: pointer to struct domain_device of interest
778 *
779 * First we notify the LLDD of this device, so we can send frames to
780 * it. Then depending on the type of device we call the appropriate
781 * discover functions. Once device discover is done, we notify the
782 * LLDD so that it can fine-tune its parameters for the device, by
783 * removing it and then adding it. That is, the second time around,
784 * the driver would have certain fields, that it is looking at, set.
785 * Finally we initialize the kobj so that the device can be added to
786 * the system at registration time. Devices directly attached to a HA
787 * port, have no parents. All other devices do, and should have their
788 * "parent" pointer set appropriately before calling this function.
789 */
790int sas_discover_sata(struct domain_device *dev)
791{
792 int res;
793
794 sas_get_ata_command_set(dev);
795
796 res = sas_notify_lldd_dev_found(dev);
797 if (res)
798 return res;
799
800 switch (dev->dev_type) {
801 case SATA_DEV:
802 res = sas_discover_sata_dev(dev);
803 break;
804 case SATA_PM:
805 res = sas_discover_sata_pm(dev);
806 break;
807 default:
808 break;
809 }
810 sas_notify_lldd_dev_gone(dev);
811 if (!res) {
812 sas_notify_lldd_dev_found(dev);
813 res = sas_rphy_add(dev->rphy);
814 }
815
816 return res;
817}
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index a65598b1e536..6ac9f61d006a 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -55,149 +55,6 @@ void sas_init_dev(struct domain_device *dev)
55 } 55 }
56} 56}
57 57
58static void sas_task_timedout(unsigned long _task)
59{
60 struct sas_task *task = (void *) _task;
61 unsigned long flags;
62
63 spin_lock_irqsave(&task->task_state_lock, flags);
64 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
65 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
66 spin_unlock_irqrestore(&task->task_state_lock, flags);
67
68 complete(&task->completion);
69}
70
71static void sas_disc_task_done(struct sas_task *task)
72{
73 if (!del_timer(&task->timer))
74 return;
75 complete(&task->completion);
76}
77
78#define SAS_DEV_TIMEOUT 10
79
80/**
81 * sas_execute_task -- Basic task processing for discovery
82 * @task: the task to be executed
83 * @buffer: pointer to buffer to do I/O
84 * @size: size of @buffer
85 * @pci_dma_dir: PCI_DMA_...
86 */
87static int sas_execute_task(struct sas_task *task, void *buffer, int size,
88 int pci_dma_dir)
89{
90 int res = 0;
91 struct scatterlist *scatter = NULL;
92 struct task_status_struct *ts = &task->task_status;
93 int num_scatter = 0;
94 int retries = 0;
95 struct sas_internal *i =
96 to_sas_internal(task->dev->port->ha->core.shost->transportt);
97
98 if (pci_dma_dir != PCI_DMA_NONE) {
99 scatter = kzalloc(sizeof(*scatter), GFP_KERNEL);
100 if (!scatter)
101 goto out;
102
103 sg_init_one(scatter, buffer, size);
104 num_scatter = 1;
105 }
106
107 task->task_proto = task->dev->tproto;
108 task->scatter = scatter;
109 task->num_scatter = num_scatter;
110 task->total_xfer_len = size;
111 task->data_dir = pci_dma_dir;
112 task->task_done = sas_disc_task_done;
113
114 for (retries = 0; retries < 5; retries++) {
115 task->task_state_flags = SAS_TASK_STATE_PENDING;
116 init_completion(&task->completion);
117
118 task->timer.data = (unsigned long) task;
119 task->timer.function = sas_task_timedout;
120 task->timer.expires = jiffies + SAS_DEV_TIMEOUT*HZ;
121 add_timer(&task->timer);
122
123 res = i->dft->lldd_execute_task(task, 1, GFP_KERNEL);
124 if (res) {
125 del_timer(&task->timer);
126 SAS_DPRINTK("executing SAS discovery task failed:%d\n",
127 res);
128 goto ex_err;
129 }
130 wait_for_completion(&task->completion);
131 res = -ETASK;
132 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
133 int res2;
134 SAS_DPRINTK("task aborted, flags:0x%x\n",
135 task->task_state_flags);
136 res2 = i->dft->lldd_abort_task(task);
137 SAS_DPRINTK("came back from abort task\n");
138 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
139 if (res2 == TMF_RESP_FUNC_COMPLETE)
140 continue; /* Retry the task */
141 else
142 goto ex_err;
143 }
144 }
145 if (task->task_status.stat == SAM_BUSY ||
146 task->task_status.stat == SAM_TASK_SET_FULL ||
147 task->task_status.stat == SAS_QUEUE_FULL) {
148 SAS_DPRINTK("task: q busy, sleeping...\n");
149 schedule_timeout_interruptible(HZ);
150 } else if (task->task_status.stat == SAM_CHECK_COND) {
151 struct scsi_sense_hdr shdr;
152
153 if (!scsi_normalize_sense(ts->buf, ts->buf_valid_size,
154 &shdr)) {
155 SAS_DPRINTK("couldn't normalize sense\n");
156 continue;
157 }
158 if ((shdr.sense_key == 6 && shdr.asc == 0x29) ||
159 (shdr.sense_key == 2 && shdr.asc == 4 &&
160 shdr.ascq == 1)) {
161 SAS_DPRINTK("device %016llx LUN: %016llx "
162 "powering up or not ready yet, "
163 "sleeping...\n",
164 SAS_ADDR(task->dev->sas_addr),
165 SAS_ADDR(task->ssp_task.LUN));
166
167 schedule_timeout_interruptible(5*HZ);
168 } else if (shdr.sense_key == 1) {
169 res = 0;
170 break;
171 } else if (shdr.sense_key == 5) {
172 break;
173 } else {
174 SAS_DPRINTK("dev %016llx LUN: %016llx "
175 "sense key:0x%x ASC:0x%x ASCQ:0x%x"
176 "\n",
177 SAS_ADDR(task->dev->sas_addr),
178 SAS_ADDR(task->ssp_task.LUN),
179 shdr.sense_key,
180 shdr.asc, shdr.ascq);
181 }
182 } else if (task->task_status.resp != SAS_TASK_COMPLETE ||
183 task->task_status.stat != SAM_GOOD) {
184 SAS_DPRINTK("task finished with resp:0x%x, "
185 "stat:0x%x\n",
186 task->task_status.resp,
187 task->task_status.stat);
188 goto ex_err;
189 } else {
190 res = 0;
191 break;
192 }
193 }
194ex_err:
195 if (pci_dma_dir != PCI_DMA_NONE)
196 kfree(scatter);
197out:
198 return res;
199}
200
201/* ---------- Domain device discovery ---------- */ 58/* ---------- Domain device discovery ---------- */
202 59
203/** 60/**
@@ -255,6 +112,7 @@ static int sas_get_port_device(struct asd_sas_port *port)
255 112
256 switch (dev->dev_type) { 113 switch (dev->dev_type) {
257 case SAS_END_DEV: 114 case SAS_END_DEV:
115 case SATA_DEV:
258 rphy = sas_end_device_alloc(port->port); 116 rphy = sas_end_device_alloc(port->port);
259 break; 117 break;
260 case EDGE_DEV: 118 case EDGE_DEV:
@@ -265,7 +123,6 @@ static int sas_get_port_device(struct asd_sas_port *port)
265 rphy = sas_expander_alloc(port->port, 123 rphy = sas_expander_alloc(port->port,
266 SAS_FANOUT_EXPANDER_DEVICE); 124 SAS_FANOUT_EXPANDER_DEVICE);
267 break; 125 break;
268 case SATA_DEV:
269 default: 126 default:
270 printk("ERROR: Unidentified device type %d\n", dev->dev_type); 127 printk("ERROR: Unidentified device type %d\n", dev->dev_type);
271 rphy = NULL; 128 rphy = NULL;
@@ -292,207 +149,15 @@ static int sas_get_port_device(struct asd_sas_port *port)
292 port->disc.max_level = 0; 149 port->disc.max_level = 0;
293 150
294 dev->rphy = rphy; 151 dev->rphy = rphy;
295 spin_lock(&port->dev_list_lock); 152 spin_lock_irq(&port->dev_list_lock);
296 list_add_tail(&dev->dev_list_node, &port->dev_list); 153 list_add_tail(&dev->dev_list_node, &port->dev_list);
297 spin_unlock(&port->dev_list_lock); 154 spin_unlock_irq(&port->dev_list_lock);
298 155
299 return 0; 156 return 0;
300} 157}
301 158
302/* ---------- Discover and Revalidate ---------- */ 159/* ---------- Discover and Revalidate ---------- */
303 160
304/* ---------- SATA ---------- */
305
306static void sas_get_ata_command_set(struct domain_device *dev)
307{
308 struct dev_to_host_fis *fis =
309 (struct dev_to_host_fis *) dev->frame_rcvd;
310
311 if ((fis->sector_count == 1 && /* ATA */
312 fis->lbal == 1 &&
313 fis->lbam == 0 &&
314 fis->lbah == 0 &&
315 fis->device == 0)
316 ||
317 (fis->sector_count == 0 && /* CE-ATA (mATA) */
318 fis->lbal == 0 &&
319 fis->lbam == 0xCE &&
320 fis->lbah == 0xAA &&
321 (fis->device & ~0x10) == 0))
322
323 dev->sata_dev.command_set = ATA_COMMAND_SET;
324
325 else if ((fis->interrupt_reason == 1 && /* ATAPI */
326 fis->lbal == 1 &&
327 fis->byte_count_low == 0x14 &&
328 fis->byte_count_high == 0xEB &&
329 (fis->device & ~0x10) == 0))
330
331 dev->sata_dev.command_set = ATAPI_COMMAND_SET;
332
333 else if ((fis->sector_count == 1 && /* SEMB */
334 fis->lbal == 1 &&
335 fis->lbam == 0x3C &&
336 fis->lbah == 0xC3 &&
337 fis->device == 0)
338 ||
339 (fis->interrupt_reason == 1 && /* SATA PM */
340 fis->lbal == 1 &&
341 fis->byte_count_low == 0x69 &&
342 fis->byte_count_high == 0x96 &&
343 (fis->device & ~0x10) == 0))
344
345 /* Treat it as a superset? */
346 dev->sata_dev.command_set = ATAPI_COMMAND_SET;
347}
348
349/**
350 * sas_issue_ata_cmd -- Basic SATA command processing for discovery
351 * @dev: the device to send the command to
352 * @command: the command register
353 * @features: the features register
354 * @buffer: pointer to buffer to do I/O
355 * @size: size of @buffer
356 * @pci_dma_dir: PCI_DMA_...
357 */
358static int sas_issue_ata_cmd(struct domain_device *dev, u8 command,
359 u8 features, void *buffer, int size,
360 int pci_dma_dir)
361{
362 int res = 0;
363 struct sas_task *task;
364 struct dev_to_host_fis *d2h_fis = (struct dev_to_host_fis *)
365 &dev->frame_rcvd[0];
366
367 res = -ENOMEM;
368 task = sas_alloc_task(GFP_KERNEL);
369 if (!task)
370 goto out;
371
372 task->dev = dev;
373
374 task->ata_task.fis.command = command;
375 task->ata_task.fis.features = features;
376 task->ata_task.fis.device = d2h_fis->device;
377 task->ata_task.retry_count = 1;
378
379 res = sas_execute_task(task, buffer, size, pci_dma_dir);
380
381 sas_free_task(task);
382out:
383 return res;
384}
385
386static void sas_sata_propagate_sas_addr(struct domain_device *dev)
387{
388 unsigned long flags;
389 struct asd_sas_port *port = dev->port;
390 struct asd_sas_phy *phy;
391
392 BUG_ON(dev->parent);
393
394 memcpy(port->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE);
395 spin_lock_irqsave(&port->phy_list_lock, flags);
396 list_for_each_entry(phy, &port->phy_list, port_phy_el)
397 memcpy(phy->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE);
398 spin_unlock_irqrestore(&port->phy_list_lock, flags);
399}
400
401#define ATA_IDENTIFY_DEV 0xEC
402#define ATA_IDENTIFY_PACKET_DEV 0xA1
403#define ATA_SET_FEATURES 0xEF
404#define ATA_FEATURE_PUP_STBY_SPIN_UP 0x07
405
406/**
407 * sas_discover_sata_dev -- discover a STP/SATA device (SATA_DEV)
408 * @dev: STP/SATA device of interest (ATA/ATAPI)
409 *
410 * The LLDD has already been notified of this device, so that we can
411 * send FISes to it. Here we try to get IDENTIFY DEVICE or IDENTIFY
412 * PACKET DEVICE, if ATAPI device, so that the LLDD can fine-tune its
413 * performance for this device.
414 */
415static int sas_discover_sata_dev(struct domain_device *dev)
416{
417 int res;
418 __le16 *identify_x;
419 u8 command;
420
421 identify_x = kzalloc(512, GFP_KERNEL);
422 if (!identify_x)
423 return -ENOMEM;
424
425 if (dev->sata_dev.command_set == ATA_COMMAND_SET) {
426 dev->sata_dev.identify_device = identify_x;
427 command = ATA_IDENTIFY_DEV;
428 } else {
429 dev->sata_dev.identify_packet_device = identify_x;
430 command = ATA_IDENTIFY_PACKET_DEV;
431 }
432
433 res = sas_issue_ata_cmd(dev, command, 0, identify_x, 512,
434 PCI_DMA_FROMDEVICE);
435 if (res)
436 goto out_err;
437
438 /* lives on the media? */
439 if (le16_to_cpu(identify_x[0]) & 4) {
440 /* incomplete response */
441 SAS_DPRINTK("sending SET FEATURE/PUP_STBY_SPIN_UP to "
442 "dev %llx\n", SAS_ADDR(dev->sas_addr));
443 if (!le16_to_cpu(identify_x[83] & (1<<6)))
444 goto cont1;
445 res = sas_issue_ata_cmd(dev, ATA_SET_FEATURES,
446 ATA_FEATURE_PUP_STBY_SPIN_UP,
447 NULL, 0, PCI_DMA_NONE);
448 if (res)
449 goto cont1;
450
451 schedule_timeout_interruptible(5*HZ); /* More time? */
452 res = sas_issue_ata_cmd(dev, command, 0, identify_x, 512,
453 PCI_DMA_FROMDEVICE);
454 if (res)
455 goto out_err;
456 }
457cont1:
458 /* Get WWN */
459 if (dev->port->oob_mode != SATA_OOB_MODE) {
460 memcpy(dev->sas_addr, dev->sata_dev.rps_resp.rps.stp_sas_addr,
461 SAS_ADDR_SIZE);
462 } else if (dev->sata_dev.command_set == ATA_COMMAND_SET &&
463 (le16_to_cpu(dev->sata_dev.identify_device[108]) & 0xF000)
464 == 0x5000) {
465 int i;
466
467 for (i = 0; i < 4; i++) {
468 dev->sas_addr[2*i] =
469 (le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0xFF00) >> 8;
470 dev->sas_addr[2*i+1] =
471 le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0x00FF;
472 }
473 }
474 sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr);
475 if (!dev->parent)
476 sas_sata_propagate_sas_addr(dev);
477
478 /* XXX Hint: register this SATA device with SATL.
479 When this returns, dev->sata_dev->lu is alive and
480 present.
481 sas_satl_register_dev(dev);
482 */
483 return 0;
484out_err:
485 dev->sata_dev.identify_packet_device = NULL;
486 dev->sata_dev.identify_device = NULL;
487 kfree(identify_x);
488 return res;
489}
490
491static int sas_discover_sata_pm(struct domain_device *dev)
492{
493 return -ENODEV;
494}
495
496int sas_notify_lldd_dev_found(struct domain_device *dev) 161int sas_notify_lldd_dev_found(struct domain_device *dev)
497{ 162{
498 int res = 0; 163 int res = 0;
@@ -525,60 +190,6 @@ void sas_notify_lldd_dev_gone(struct domain_device *dev)
525 190
526/* ---------- Common/dispatchers ---------- */ 191/* ---------- Common/dispatchers ---------- */
527 192
528/**
529 * sas_discover_sata -- discover an STP/SATA domain device
530 * @dev: pointer to struct domain_device of interest
531 *
532 * First we notify the LLDD of this device, so we can send frames to
533 * it. Then depending on the type of device we call the appropriate
534 * discover functions. Once device discover is done, we notify the
535 * LLDD so that it can fine-tune its parameters for the device, by
536 * removing it and then adding it. That is, the second time around,
537 * the driver would have certain fields, that it is looking at, set.
538 * Finally we initialize the kobj so that the device can be added to
539 * the system at registration time. Devices directly attached to a HA
540 * port, have no parents. All other devices do, and should have their
541 * "parent" pointer set appropriately before calling this function.
542 */
543int sas_discover_sata(struct domain_device *dev)
544{
545 int res;
546
547 sas_get_ata_command_set(dev);
548
549 res = sas_notify_lldd_dev_found(dev);
550 if (res)
551 goto out_err2;
552
553 switch (dev->dev_type) {
554 case SATA_DEV:
555 res = sas_discover_sata_dev(dev);
556 break;
557 case SATA_PM:
558 res = sas_discover_sata_pm(dev);
559 break;
560 default:
561 break;
562 }
563 if (res)
564 goto out_err;
565
566 sas_notify_lldd_dev_gone(dev);
567 res = sas_notify_lldd_dev_found(dev);
568 if (res)
569 goto out_err2;
570
571 res = sas_rphy_add(dev->rphy);
572 if (res)
573 goto out_err;
574
575 return res;
576
577out_err:
578 sas_notify_lldd_dev_gone(dev);
579out_err2:
580 return res;
581}
582 193
583/** 194/**
584 * sas_discover_end_dev -- discover an end device (SSP, etc) 195 * sas_discover_end_dev -- discover an end device (SSP, etc)
@@ -685,11 +296,14 @@ static void sas_discover_domain(struct work_struct *work)
685 case FANOUT_DEV: 296 case FANOUT_DEV:
686 error = sas_discover_root_expander(dev); 297 error = sas_discover_root_expander(dev);
687 break; 298 break;
299#ifdef CONFIG_SCSI_SAS_ATA
688 case SATA_DEV: 300 case SATA_DEV:
689 case SATA_PM: 301 case SATA_PM:
690 error = sas_discover_sata(dev); 302 error = sas_discover_sata(dev);
691 break; 303 break;
304#endif
692 default: 305 default:
306 error = -ENXIO;
693 SAS_DPRINTK("unhandled device %d\n", dev->dev_type); 307 SAS_DPRINTK("unhandled device %d\n", dev->dev_type);
694 break; 308 break;
695 } 309 }
@@ -698,9 +312,9 @@ static void sas_discover_domain(struct work_struct *work)
698 sas_rphy_free(dev->rphy); 312 sas_rphy_free(dev->rphy);
699 dev->rphy = NULL; 313 dev->rphy = NULL;
700 314
701 spin_lock(&port->dev_list_lock); 315 spin_lock_irq(&port->dev_list_lock);
702 list_del_init(&dev->dev_list_node); 316 list_del_init(&dev->dev_list_node);
703 spin_unlock(&port->dev_list_lock); 317 spin_unlock_irq(&port->dev_list_lock);
704 318
705 kfree(dev); /* not kobject_register-ed yet */ 319 kfree(dev); /* not kobject_register-ed yet */
706 port->port_dev = NULL; 320 port->port_dev = NULL;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 23e90c5f8f35..b500f0c1449c 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include <linux/scatterlist.h> 25#include <linux/scatterlist.h>
26#include <linux/blkdev.h>
26 27
27#include "sas_internal.h" 28#include "sas_internal.h"
28 29
@@ -36,14 +37,6 @@ static int sas_configure_phy(struct domain_device *dev, int phy_id,
36 u8 *sas_addr, int include); 37 u8 *sas_addr, int include);
37static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr); 38static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr);
38 39
39#if 0
40/* FIXME: smp needs to migrate into the sas class */
41static ssize_t smp_portal_read(struct kobject *, struct bin_attribute *,
42 char *, loff_t, size_t);
43static ssize_t smp_portal_write(struct kobject *, struct bin_attribute *,
44 char *, loff_t, size_t);
45#endif
46
47/* ---------- SMP task management ---------- */ 40/* ---------- SMP task management ---------- */
48 41
49static void smp_task_timedout(unsigned long _task) 42static void smp_task_timedout(unsigned long _task)
@@ -220,6 +213,36 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
220#define DISCOVER_REQ_SIZE 16 213#define DISCOVER_REQ_SIZE 16
221#define DISCOVER_RESP_SIZE 56 214#define DISCOVER_RESP_SIZE 56
222 215
216static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req,
217 u8 *disc_resp, int single)
218{
219 int i, res;
220
221 disc_req[9] = single;
222 for (i = 1 ; i < 3; i++) {
223 struct discover_resp *dr;
224
225 res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE,
226 disc_resp, DISCOVER_RESP_SIZE);
227 if (res)
228 return res;
229 /* This is detecting a failure to transmit inital
230 * dev to host FIS as described in section G.5 of
231 * sas-2 r 04b */
232 dr = &((struct smp_resp *)disc_resp)->disc;
233 if (!(dr->attached_dev_type == 0 &&
234 dr->attached_sata_dev))
235 break;
236 /* In order to generate the dev to host FIS, we
237 * send a link reset to the expander port */
238 sas_smp_phy_control(dev, single, PHY_FUNC_LINK_RESET, NULL);
239 /* Wait for the reset to trigger the negotiation */
240 msleep(500);
241 }
242 sas_set_ex_phy(dev, single, disc_resp);
243 return 0;
244}
245
223static int sas_ex_phy_discover(struct domain_device *dev, int single) 246static int sas_ex_phy_discover(struct domain_device *dev, int single)
224{ 247{
225 struct expander_device *ex = &dev->ex_dev; 248 struct expander_device *ex = &dev->ex_dev;
@@ -240,23 +263,15 @@ static int sas_ex_phy_discover(struct domain_device *dev, int single)
240 disc_req[1] = SMP_DISCOVER; 263 disc_req[1] = SMP_DISCOVER;
241 264
242 if (0 <= single && single < ex->num_phys) { 265 if (0 <= single && single < ex->num_phys) {
243 disc_req[9] = single; 266 res = sas_ex_phy_discover_helper(dev, disc_req, disc_resp, single);
244 res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE,
245 disc_resp, DISCOVER_RESP_SIZE);
246 if (res)
247 goto out_err;
248 sas_set_ex_phy(dev, single, disc_resp);
249 } else { 267 } else {
250 int i; 268 int i;
251 269
252 for (i = 0; i < ex->num_phys; i++) { 270 for (i = 0; i < ex->num_phys; i++) {
253 disc_req[9] = i; 271 res = sas_ex_phy_discover_helper(dev, disc_req,
254 res = smp_execute_task(dev, disc_req, 272 disc_resp, i);
255 DISCOVER_REQ_SIZE, disc_resp,
256 DISCOVER_RESP_SIZE);
257 if (res) 273 if (res)
258 goto out_err; 274 goto out_err;
259 sas_set_ex_phy(dev, i, disc_resp);
260 } 275 }
261 } 276 }
262out_err: 277out_err:
@@ -520,6 +535,8 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
520 535
521} 536}
522 537
538#ifdef CONFIG_SCSI_SAS_ATA
539
523#define RPS_REQ_SIZE 16 540#define RPS_REQ_SIZE 16
524#define RPS_RESP_SIZE 60 541#define RPS_RESP_SIZE 60
525 542
@@ -529,6 +546,7 @@ static int sas_get_report_phy_sata(struct domain_device *dev,
529{ 546{
530 int res; 547 int res;
531 u8 *rps_req = alloc_smp_req(RPS_REQ_SIZE); 548 u8 *rps_req = alloc_smp_req(RPS_REQ_SIZE);
549 u8 *resp = (u8 *)rps_resp;
532 550
533 if (!rps_req) 551 if (!rps_req)
534 return -ENOMEM; 552 return -ENOMEM;
@@ -539,9 +557,30 @@ static int sas_get_report_phy_sata(struct domain_device *dev,
539 res = smp_execute_task(dev, rps_req, RPS_REQ_SIZE, 557 res = smp_execute_task(dev, rps_req, RPS_REQ_SIZE,
540 rps_resp, RPS_RESP_SIZE); 558 rps_resp, RPS_RESP_SIZE);
541 559
560 /* 0x34 is the FIS type for the D2H fis. There's a potential
561 * standards cockup here. sas-2 explicitly specifies the FIS
562 * should be encoded so that FIS type is in resp[24].
563 * However, some expanders endian reverse this. Undo the
564 * reversal here */
565 if (!res && resp[27] == 0x34 && resp[24] != 0x34) {
566 int i;
567
568 for (i = 0; i < 5; i++) {
569 int j = 24 + (i*4);
570 u8 a, b;
571 a = resp[j + 0];
572 b = resp[j + 1];
573 resp[j + 0] = resp[j + 3];
574 resp[j + 1] = resp[j + 2];
575 resp[j + 2] = b;
576 resp[j + 3] = a;
577 }
578 }
579
542 kfree(rps_req); 580 kfree(rps_req);
543 return 0; 581 return res;
544} 582}
583#endif
545 584
546static void sas_ex_get_linkrate(struct domain_device *parent, 585static void sas_ex_get_linkrate(struct domain_device *parent,
547 struct domain_device *child, 586 struct domain_device *child,
@@ -609,6 +648,7 @@ static struct domain_device *sas_ex_discover_end_dev(
609 } 648 }
610 sas_ex_get_linkrate(parent, child, phy); 649 sas_ex_get_linkrate(parent, child, phy);
611 650
651#ifdef CONFIG_SCSI_SAS_ATA
612 if ((phy->attached_tproto & SAS_PROTO_STP) || phy->attached_sata_dev) { 652 if ((phy->attached_tproto & SAS_PROTO_STP) || phy->attached_sata_dev) {
613 child->dev_type = SATA_DEV; 653 child->dev_type = SATA_DEV;
614 if (phy->attached_tproto & SAS_PROTO_STP) 654 if (phy->attached_tproto & SAS_PROTO_STP)
@@ -625,16 +665,30 @@ static struct domain_device *sas_ex_discover_end_dev(
625 } 665 }
626 memcpy(child->frame_rcvd, &child->sata_dev.rps_resp.rps.fis, 666 memcpy(child->frame_rcvd, &child->sata_dev.rps_resp.rps.fis,
627 sizeof(struct dev_to_host_fis)); 667 sizeof(struct dev_to_host_fis));
668
669 rphy = sas_end_device_alloc(phy->port);
670 if (unlikely(!rphy))
671 goto out_free;
672
628 sas_init_dev(child); 673 sas_init_dev(child);
674
675 child->rphy = rphy;
676
677 spin_lock_irq(&parent->port->dev_list_lock);
678 list_add_tail(&child->dev_list_node, &parent->port->dev_list);
679 spin_unlock_irq(&parent->port->dev_list_lock);
680
629 res = sas_discover_sata(child); 681 res = sas_discover_sata(child);
630 if (res) { 682 if (res) {
631 SAS_DPRINTK("sas_discover_sata() for device %16llx at " 683 SAS_DPRINTK("sas_discover_sata() for device %16llx at "
632 "%016llx:0x%x returned 0x%x\n", 684 "%016llx:0x%x returned 0x%x\n",
633 SAS_ADDR(child->sas_addr), 685 SAS_ADDR(child->sas_addr),
634 SAS_ADDR(parent->sas_addr), phy_id, res); 686 SAS_ADDR(parent->sas_addr), phy_id, res);
635 goto out_free; 687 goto out_list_del;
636 } 688 }
637 } else if (phy->attached_tproto & SAS_PROTO_SSP) { 689 } else
690#endif
691 if (phy->attached_tproto & SAS_PROTO_SSP) {
638 child->dev_type = SAS_END_DEV; 692 child->dev_type = SAS_END_DEV;
639 rphy = sas_end_device_alloc(phy->port); 693 rphy = sas_end_device_alloc(phy->port);
640 /* FIXME: error handling */ 694 /* FIXME: error handling */
@@ -646,9 +700,9 @@ static struct domain_device *sas_ex_discover_end_dev(
646 child->rphy = rphy; 700 child->rphy = rphy;
647 sas_fill_in_rphy(child, rphy); 701 sas_fill_in_rphy(child, rphy);
648 702
649 spin_lock(&parent->port->dev_list_lock); 703 spin_lock_irq(&parent->port->dev_list_lock);
650 list_add_tail(&child->dev_list_node, &parent->port->dev_list); 704 list_add_tail(&child->dev_list_node, &parent->port->dev_list);
651 spin_unlock(&parent->port->dev_list_lock); 705 spin_unlock_irq(&parent->port->dev_list_lock);
652 706
653 res = sas_discover_end_dev(child); 707 res = sas_discover_end_dev(child);
654 if (res) { 708 if (res) {
@@ -662,6 +716,7 @@ static struct domain_device *sas_ex_discover_end_dev(
662 SAS_DPRINTK("target proto 0x%x at %016llx:0x%x not handled\n", 716 SAS_DPRINTK("target proto 0x%x at %016llx:0x%x not handled\n",
663 phy->attached_tproto, SAS_ADDR(parent->sas_addr), 717 phy->attached_tproto, SAS_ADDR(parent->sas_addr),
664 phy_id); 718 phy_id);
719 goto out_free;
665 } 720 }
666 721
667 list_add_tail(&child->siblings, &parent_ex->children); 722 list_add_tail(&child->siblings, &parent_ex->children);
@@ -761,9 +816,9 @@ static struct domain_device *sas_ex_discover_expander(
761 sas_fill_in_rphy(child, rphy); 816 sas_fill_in_rphy(child, rphy);
762 sas_rphy_add(rphy); 817 sas_rphy_add(rphy);
763 818
764 spin_lock(&parent->port->dev_list_lock); 819 spin_lock_irq(&parent->port->dev_list_lock);
765 list_add_tail(&child->dev_list_node, &parent->port->dev_list); 820 list_add_tail(&child->dev_list_node, &parent->port->dev_list);
766 spin_unlock(&parent->port->dev_list_lock); 821 spin_unlock_irq(&parent->port->dev_list_lock);
767 822
768 res = sas_discover_expander(child); 823 res = sas_discover_expander(child);
769 if (res) { 824 if (res) {
@@ -1359,30 +1414,6 @@ static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr)
1359 return 0; 1414 return 0;
1360} 1415}
1361 1416
1362#if 0
1363#define SMP_BIN_ATTR_NAME "smp_portal"
1364
1365static void sas_ex_smp_hook(struct domain_device *dev)
1366{
1367 struct expander_device *ex_dev = &dev->ex_dev;
1368 struct bin_attribute *bin_attr = &ex_dev->smp_bin_attr;
1369
1370 memset(bin_attr, 0, sizeof(*bin_attr));
1371
1372 bin_attr->attr.name = SMP_BIN_ATTR_NAME;
1373 bin_attr->attr.mode = 0600;
1374
1375 bin_attr->size = 0;
1376 bin_attr->private = NULL;
1377 bin_attr->read = smp_portal_read;
1378 bin_attr->write= smp_portal_write;
1379 bin_attr->mmap = NULL;
1380
1381 ex_dev->smp_portal_pid = -1;
1382 init_MUTEX(&ex_dev->smp_sema);
1383}
1384#endif
1385
1386/** 1417/**
1387 * sas_discover_expander -- expander discovery 1418 * sas_discover_expander -- expander discovery
1388 * @ex: pointer to expander domain device 1419 * @ex: pointer to expander domain device
@@ -1844,76 +1875,49 @@ out:
1844 return res; 1875 return res;
1845} 1876}
1846 1877
1847#if 0 1878int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1848/* ---------- SMP portal ---------- */ 1879 struct request *req)
1849
1850static ssize_t smp_portal_write(struct kobject *kobj,
1851 struct bin_attribute *bin_attr,
1852 char *buf, loff_t offs, size_t size)
1853{ 1880{
1854 struct domain_device *dev = to_dom_device(kobj); 1881 struct domain_device *dev;
1855 struct expander_device *ex = &dev->ex_dev; 1882 int ret, type = rphy->identify.device_type;
1856 1883 struct request *rsp = req->next_rq;
1857 if (offs != 0)
1858 return -EFBIG;
1859 else if (size == 0)
1860 return 0;
1861 1884
1862 down_interruptible(&ex->smp_sema); 1885 if (!rsp) {
1863 if (ex->smp_req) 1886 printk("%s: space for a smp response is missing\n",
1864 kfree(ex->smp_req); 1887 __FUNCTION__);
1865 ex->smp_req = kzalloc(size, GFP_USER); 1888 return -EINVAL;
1866 if (!ex->smp_req) {
1867 up(&ex->smp_sema);
1868 return -ENOMEM;
1869 } 1889 }
1870 memcpy(ex->smp_req, buf, size);
1871 ex->smp_req_size = size;
1872 ex->smp_portal_pid = current->pid;
1873 up(&ex->smp_sema);
1874 1890
1875 return size; 1891 /* seems aic94xx doesn't support */
1876} 1892 if (!rphy) {
1877 1893 printk("%s: can we send a smp request to a host?\n",
1878static ssize_t smp_portal_read(struct kobject *kobj, 1894 __FUNCTION__);
1879 struct bin_attribute *bin_attr, 1895 return -EINVAL;
1880 char *buf, loff_t offs, size_t size) 1896 }
1881{
1882 struct domain_device *dev = to_dom_device(kobj);
1883 struct expander_device *ex = &dev->ex_dev;
1884 u8 *smp_resp;
1885 int res = -EINVAL;
1886
1887 /* XXX: sysfs gives us an offset of 0x10 or 0x8 while in fact
1888 * it should be 0.
1889 */
1890 1897
1891 down_interruptible(&ex->smp_sema); 1898 if (type != SAS_EDGE_EXPANDER_DEVICE &&
1892 if (!ex->smp_req || ex->smp_portal_pid != current->pid) 1899 type != SAS_FANOUT_EXPANDER_DEVICE) {
1893 goto out; 1900 printk("%s: can we send a smp request to a device?\n",
1901 __FUNCTION__);
1902 return -EINVAL;
1903 }
1894 1904
1895 res = 0; 1905 dev = sas_find_dev_by_rphy(rphy);
1896 if (size == 0) 1906 if (!dev) {
1897 goto out; 1907 printk("%s: fail to find a domain_device?\n", __FUNCTION__);
1908 return -EINVAL;
1909 }
1898 1910
1899 res = -ENOMEM; 1911 /* do we need to support multiple segments? */
1900 smp_resp = alloc_smp_resp(size); 1912 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
1901 if (!smp_resp) 1913 printk("%s: multiple segments req %u %u, rsp %u %u\n",
1902 goto out; 1914 __FUNCTION__, req->bio->bi_vcnt, req->data_len,
1903 res = smp_execute_task(dev, ex->smp_req, ex->smp_req_size, 1915 rsp->bio->bi_vcnt, rsp->data_len);
1904 smp_resp, size); 1916 return -EINVAL;
1905 if (!res) {
1906 memcpy(buf, smp_resp, size);
1907 res = size;
1908 } 1917 }
1909 1918
1910 kfree(smp_resp); 1919 ret = smp_execute_task(dev, bio_data(req->bio), req->data_len,
1911out: 1920 bio_data(rsp->bio), rsp->data_len);
1912 kfree(ex->smp_req); 1921
1913 ex->smp_req = NULL; 1922 return ret;
1914 ex->smp_req_size = 0;
1915 ex->smp_portal_pid = -1;
1916 up(&ex->smp_sema);
1917 return res;
1918} 1923}
1919#endif
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 1396c83b0c9c..9cd5abe9e714 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -259,6 +259,7 @@ static struct sas_function_template sft = {
259 .phy_reset = sas_phy_reset, 259 .phy_reset = sas_phy_reset,
260 .set_phy_speed = sas_set_phy_speed, 260 .set_phy_speed = sas_set_phy_speed,
261 .get_linkerrors = sas_get_linkerrors, 261 .get_linkerrors = sas_get_linkerrors,
262 .smp_handler = sas_smp_handler,
262}; 263};
263 264
264struct scsi_transport_template * 265struct scsi_transport_template *
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index a78638df2018..2b8213b1832d 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -39,6 +39,9 @@
39#define SAS_DPRINTK(fmt, ...) 39#define SAS_DPRINTK(fmt, ...)
40#endif 40#endif
41 41
42#define TO_SAS_TASK(_scsi_cmd) ((void *)(_scsi_cmd)->host_scribble)
43#define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0)
44
42void sas_scsi_recover_host(struct Scsi_Host *shost); 45void sas_scsi_recover_host(struct Scsi_Host *shost);
43 46
44int sas_show_class(enum sas_class class, char *buf); 47int sas_show_class(enum sas_class class, char *buf);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 9c5342e7a69c..7663841eb4cf 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -34,6 +34,7 @@
34#include <scsi/scsi_eh.h> 34#include <scsi/scsi_eh.h>
35#include <scsi/scsi_transport.h> 35#include <scsi/scsi_transport.h>
36#include <scsi/scsi_transport_sas.h> 36#include <scsi/scsi_transport_sas.h>
37#include <scsi/sas_ata.h>
37#include "../scsi_sas_internal.h" 38#include "../scsi_sas_internal.h"
38#include "../scsi_transport_api.h" 39#include "../scsi_transport_api.h"
39#include "../scsi_priv.h" 40#include "../scsi_priv.h"
@@ -42,12 +43,10 @@
42#include <linux/blkdev.h> 43#include <linux/blkdev.h>
43#include <linux/freezer.h> 44#include <linux/freezer.h>
44#include <linux/scatterlist.h> 45#include <linux/scatterlist.h>
46#include <linux/libata.h>
45 47
46/* ---------- SCSI Host glue ---------- */ 48/* ---------- SCSI Host glue ---------- */
47 49
48#define TO_SAS_TASK(_scsi_cmd) ((void *)(_scsi_cmd)->host_scribble)
49#define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0)
50
51static void sas_scsi_task_done(struct sas_task *task) 50static void sas_scsi_task_done(struct sas_task *task)
52{ 51{
53 struct task_status_struct *ts = &task->task_status; 52 struct task_status_struct *ts = &task->task_status;
@@ -172,7 +171,7 @@ static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
172 return task; 171 return task;
173} 172}
174 173
175static int sas_queue_up(struct sas_task *task) 174int sas_queue_up(struct sas_task *task)
176{ 175{
177 struct sas_ha_struct *sas_ha = task->dev->port->ha; 176 struct sas_ha_struct *sas_ha = task->dev->port->ha;
178 struct scsi_core *core = &sas_ha->core; 177 struct scsi_core *core = &sas_ha->core;
@@ -213,6 +212,16 @@ int sas_queuecommand(struct scsi_cmnd *cmd,
213 struct sas_ha_struct *sas_ha = dev->port->ha; 212 struct sas_ha_struct *sas_ha = dev->port->ha;
214 struct sas_task *task; 213 struct sas_task *task;
215 214
215 if (dev_is_sata(dev)) {
216 unsigned long flags;
217
218 spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
219 res = ata_sas_queuecmd(cmd, scsi_done,
220 dev->sata_dev.ap);
221 spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags);
222 goto out;
223 }
224
216 res = -ENOMEM; 225 res = -ENOMEM;
217 task = sas_create_task(cmd, dev, GFP_ATOMIC); 226 task = sas_create_task(cmd, dev, GFP_ATOMIC);
218 if (!task) 227 if (!task)
@@ -684,6 +693,16 @@ enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
684 return EH_NOT_HANDLED; 693 return EH_NOT_HANDLED;
685} 694}
686 695
696int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
697{
698 struct domain_device *dev = sdev_to_domain_dev(sdev);
699
700 if (dev_is_sata(dev))
701 return ata_scsi_ioctl(sdev, cmd, arg);
702
703 return -EINVAL;
704}
705
687struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy) 706struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
688{ 707{
689 struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent); 708 struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent);
@@ -723,10 +742,17 @@ static inline struct domain_device *sas_find_target(struct scsi_target *starget)
723int sas_target_alloc(struct scsi_target *starget) 742int sas_target_alloc(struct scsi_target *starget)
724{ 743{
725 struct domain_device *found_dev = sas_find_target(starget); 744 struct domain_device *found_dev = sas_find_target(starget);
745 int res;
726 746
727 if (!found_dev) 747 if (!found_dev)
728 return -ENODEV; 748 return -ENODEV;
729 749
750 if (dev_is_sata(found_dev)) {
751 res = sas_ata_init_host_and_port(found_dev, starget);
752 if (res)
753 return res;
754 }
755
730 starget->hostdata = found_dev; 756 starget->hostdata = found_dev;
731 return 0; 757 return 0;
732} 758}
@@ -741,6 +767,11 @@ int sas_slave_configure(struct scsi_device *scsi_dev)
741 767
742 BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE); 768 BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE);
743 769
770 if (dev_is_sata(dev)) {
771 ata_sas_slave_configure(scsi_dev, dev->sata_dev.ap);
772 return 0;
773 }
774
744 sas_ha = dev->port->ha; 775 sas_ha = dev->port->ha;
745 776
746 sas_read_port_mode_page(scsi_dev); 777 sas_read_port_mode_page(scsi_dev);
@@ -764,6 +795,10 @@ int sas_slave_configure(struct scsi_device *scsi_dev)
764 795
765void sas_slave_destroy(struct scsi_device *scsi_dev) 796void sas_slave_destroy(struct scsi_device *scsi_dev)
766{ 797{
798 struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
799
800 if (dev_is_sata(dev))
801 ata_port_disable(dev->sata_dev.ap);
767} 802}
768 803
769int sas_change_queue_depth(struct scsi_device *scsi_dev, int new_depth) 804int sas_change_queue_depth(struct scsi_device *scsi_dev, int new_depth)
@@ -980,10 +1015,38 @@ void sas_task_abort(struct sas_task *task)
980 return; 1015 return;
981 } 1016 }
982 1017
1018 if (dev_is_sata(task->dev)) {
1019 sas_ata_task_abort(task);
1020 return;
1021 }
1022
983 scsi_req_abort_cmd(sc); 1023 scsi_req_abort_cmd(sc);
984 scsi_schedule_eh(sc->device->host); 1024 scsi_schedule_eh(sc->device->host);
985} 1025}
986 1026
1027int sas_slave_alloc(struct scsi_device *scsi_dev)
1028{
1029 struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
1030
1031 if (dev_is_sata(dev))
1032 return ata_sas_port_init(dev->sata_dev.ap);
1033
1034 return 0;
1035}
1036
1037void sas_target_destroy(struct scsi_target *starget)
1038{
1039 struct domain_device *found_dev = sas_find_target(starget);
1040
1041 if (!found_dev)
1042 return;
1043
1044 if (dev_is_sata(found_dev))
1045 ata_sas_port_destroy(found_dev->sata_dev.ap);
1046
1047 return;
1048}
1049
987EXPORT_SYMBOL_GPL(sas_queuecommand); 1050EXPORT_SYMBOL_GPL(sas_queuecommand);
988EXPORT_SYMBOL_GPL(sas_target_alloc); 1051EXPORT_SYMBOL_GPL(sas_target_alloc);
989EXPORT_SYMBOL_GPL(sas_slave_configure); 1052EXPORT_SYMBOL_GPL(sas_slave_configure);
@@ -997,3 +1060,6 @@ EXPORT_SYMBOL_GPL(sas_phy_reset);
997EXPORT_SYMBOL_GPL(sas_phy_enable); 1060EXPORT_SYMBOL_GPL(sas_phy_enable);
998EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler); 1061EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
999EXPORT_SYMBOL_GPL(sas_eh_bus_reset_handler); 1062EXPORT_SYMBOL_GPL(sas_eh_bus_reset_handler);
1063EXPORT_SYMBOL_GPL(sas_slave_alloc);
1064EXPORT_SYMBOL_GPL(sas_target_destroy);
1065EXPORT_SYMBOL_GPL(sas_ioctl);
diff --git a/drivers/scsi/mvme16x_scsi.c b/drivers/scsi/mvme16x_scsi.c
index d6ef22a941c4..1bdddad48571 100644
--- a/drivers/scsi/mvme16x_scsi.c
+++ b/drivers/scsi/mvme16x_scsi.c
@@ -89,6 +89,7 @@ mvme16x_probe(struct device *dev)
89 out_be32(0xfff4202c, v); 89 out_be32(0xfff4202c, v);
90 } 90 }
91 91
92 dev_set_drvdata(dev, host);
92 scsi_scan_host(host); 93 scsi_scan_host(host);
93 94
94 return 0; 95 return 0;
@@ -104,7 +105,7 @@ mvme16x_probe(struct device *dev)
104static __devexit int 105static __devexit int
105mvme16x_device_remove(struct device *dev) 106mvme16x_device_remove(struct device *dev)
106{ 107{
107 struct Scsi_Host *host = dev_to_shost(dev); 108 struct Scsi_Host *host = dev_get_drvdata(dev);
108 struct NCR_700_Host_Parameters *hostdata = shost_priv(host); 109 struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
109 110
110 /* Disable scsi chip ints */ 111 /* Disable scsi chip ints */
diff --git a/drivers/scsi/pcmcia/Kconfig b/drivers/scsi/pcmcia/Kconfig
index 7dd787f6ab27..fa481b515ead 100644
--- a/drivers/scsi/pcmcia/Kconfig
+++ b/drivers/scsi/pcmcia/Kconfig
@@ -2,9 +2,12 @@
2# PCMCIA SCSI adapter configuration 2# PCMCIA SCSI adapter configuration
3# 3#
4 4
5menu "PCMCIA SCSI adapter support" 5menuconfig SCSI_LOWLEVEL_PCMCIA
6 bool "PCMCIA SCSI adapter support"
6 depends on SCSI!=n && PCMCIA!=n 7 depends on SCSI!=n && PCMCIA!=n
7 8
9if SCSI_LOWLEVEL_PCMCIA && SCSI && PCMCIA
10
8config PCMCIA_AHA152X 11config PCMCIA_AHA152X
9 tristate "Adaptec AHA152X PCMCIA support" 12 tristate "Adaptec AHA152X PCMCIA support"
10 depends on !64BIT 13 depends on !64BIT
@@ -77,4 +80,4 @@ config PCMCIA_SYM53C500
77 To compile this driver as a module, choose M here: the 80 To compile this driver as a module, choose M here: the
78 module will be called sym53c500_cs. 81 module will be called sym53c500_cs.
79 82
80endmenu 83endif # SCSI_LOWLEVEL_PCMCIA
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 3eb2208675ae..1612f9200a52 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -98,7 +98,7 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj,
98 98
99 /* Read NVRAM. */ 99 /* Read NVRAM. */
100 spin_lock_irqsave(&ha->hardware_lock, flags); 100 spin_lock_irqsave(&ha->hardware_lock, flags);
101 ha->isp_ops.read_nvram(ha, (uint8_t *)buf, ha->nvram_base, 101 ha->isp_ops->read_nvram(ha, (uint8_t *)buf, ha->nvram_base,
102 ha->nvram_size); 102 ha->nvram_size);
103 spin_unlock_irqrestore(&ha->hardware_lock, flags); 103 spin_unlock_irqrestore(&ha->hardware_lock, flags);
104 104
@@ -119,7 +119,7 @@ qla2x00_sysfs_write_nvram(struct kobject *kobj,
119 return 0; 119 return 0;
120 120
121 /* Checksum NVRAM. */ 121 /* Checksum NVRAM. */
122 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 122 if (IS_FWI2_CAPABLE(ha)) {
123 uint32_t *iter; 123 uint32_t *iter;
124 uint32_t chksum; 124 uint32_t chksum;
125 125
@@ -143,7 +143,7 @@ qla2x00_sysfs_write_nvram(struct kobject *kobj,
143 143
144 /* Write NVRAM. */ 144 /* Write NVRAM. */
145 spin_lock_irqsave(&ha->hardware_lock, flags); 145 spin_lock_irqsave(&ha->hardware_lock, flags);
146 ha->isp_ops.write_nvram(ha, (uint8_t *)buf, ha->nvram_base, count); 146 ha->isp_ops->write_nvram(ha, (uint8_t *)buf, ha->nvram_base, count);
147 spin_unlock_irqrestore(&ha->hardware_lock, flags); 147 spin_unlock_irqrestore(&ha->hardware_lock, flags);
148 148
149 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 149 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
@@ -206,7 +206,7 @@ static struct bin_attribute sysfs_optrom_attr = {
206 .name = "optrom", 206 .name = "optrom",
207 .mode = S_IRUSR | S_IWUSR, 207 .mode = S_IRUSR | S_IWUSR,
208 }, 208 },
209 .size = OPTROM_SIZE_24XX, 209 .size = 0,
210 .read = qla2x00_sysfs_read_optrom, 210 .read = qla2x00_sysfs_read_optrom,
211 .write = qla2x00_sysfs_write_optrom, 211 .write = qla2x00_sysfs_write_optrom,
212}; 212};
@@ -252,7 +252,7 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
252 } 252 }
253 253
254 memset(ha->optrom_buffer, 0, ha->optrom_size); 254 memset(ha->optrom_buffer, 0, ha->optrom_size);
255 ha->isp_ops.read_optrom(ha, ha->optrom_buffer, 0, 255 ha->isp_ops->read_optrom(ha, ha->optrom_buffer, 0,
256 ha->optrom_size); 256 ha->optrom_size);
257 break; 257 break;
258 case 2: 258 case 2:
@@ -275,7 +275,7 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
275 if (ha->optrom_state != QLA_SWRITING) 275 if (ha->optrom_state != QLA_SWRITING)
276 break; 276 break;
277 277
278 ha->isp_ops.write_optrom(ha, ha->optrom_buffer, 0, 278 ha->isp_ops->write_optrom(ha, ha->optrom_buffer, 0,
279 ha->optrom_size); 279 ha->optrom_size);
280 break; 280 break;
281 } 281 }
@@ -305,7 +305,8 @@ qla2x00_sysfs_read_vpd(struct kobject *kobj,
305 305
306 /* Read NVRAM. */ 306 /* Read NVRAM. */
307 spin_lock_irqsave(&ha->hardware_lock, flags); 307 spin_lock_irqsave(&ha->hardware_lock, flags);
308 ha->isp_ops.read_nvram(ha, (uint8_t *)buf, ha->vpd_base, ha->vpd_size); 308 ha->isp_ops->read_nvram(ha, (uint8_t *)buf, ha->vpd_base,
309 ha->vpd_size);
309 spin_unlock_irqrestore(&ha->hardware_lock, flags); 310 spin_unlock_irqrestore(&ha->hardware_lock, flags);
310 311
311 return ha->vpd_size; 312 return ha->vpd_size;
@@ -325,7 +326,7 @@ qla2x00_sysfs_write_vpd(struct kobject *kobj,
325 326
326 /* Write NVRAM. */ 327 /* Write NVRAM. */
327 spin_lock_irqsave(&ha->hardware_lock, flags); 328 spin_lock_irqsave(&ha->hardware_lock, flags);
328 ha->isp_ops.write_nvram(ha, (uint8_t *)buf, ha->vpd_base, count); 329 ha->isp_ops->write_nvram(ha, (uint8_t *)buf, ha->vpd_base, count);
329 spin_unlock_irqrestore(&ha->hardware_lock, flags); 330 spin_unlock_irqrestore(&ha->hardware_lock, flags);
330 331
331 return count; 332 return count;
@@ -410,7 +411,7 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *ha)
410 int ret; 411 int ret;
411 412
412 for (iter = bin_file_entries; iter->name; iter++) { 413 for (iter = bin_file_entries; iter->name; iter++) {
413 if (iter->is4GBp_only && (!IS_QLA24XX(ha) && !IS_QLA54XX(ha))) 414 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
414 continue; 415 continue;
415 416
416 ret = sysfs_create_bin_file(&host->shost_gendev.kobj, 417 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
@@ -429,7 +430,7 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *ha)
429 struct sysfs_entry *iter; 430 struct sysfs_entry *iter;
430 431
431 for (iter = bin_file_entries; iter->name; iter++) { 432 for (iter = bin_file_entries; iter->name; iter++) {
432 if (iter->is4GBp_only && (!IS_QLA24XX(ha) && !IS_QLA54XX(ha))) 433 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
433 continue; 434 continue;
434 435
435 sysfs_remove_bin_file(&host->shost_gendev.kobj, 436 sysfs_remove_bin_file(&host->shost_gendev.kobj,
@@ -437,7 +438,7 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *ha)
437 } 438 }
438 439
439 if (ha->beacon_blink_led == 1) 440 if (ha->beacon_blink_led == 1)
440 ha->isp_ops.beacon_off(ha); 441 ha->isp_ops->beacon_off(ha);
441} 442}
442 443
443/* Scsi_Host attributes. */ 444/* Scsi_Host attributes. */
@@ -455,7 +456,7 @@ qla2x00_fw_version_show(struct class_device *cdev, char *buf)
455 char fw_str[30]; 456 char fw_str[30];
456 457
457 return snprintf(buf, PAGE_SIZE, "%s\n", 458 return snprintf(buf, PAGE_SIZE, "%s\n",
458 ha->isp_ops.fw_version_str(ha, fw_str)); 459 ha->isp_ops->fw_version_str(ha, fw_str));
459} 460}
460 461
461static ssize_t 462static ssize_t
@@ -507,7 +508,7 @@ qla2x00_pci_info_show(struct class_device *cdev, char *buf)
507 char pci_info[30]; 508 char pci_info[30];
508 509
509 return snprintf(buf, PAGE_SIZE, "%s\n", 510 return snprintf(buf, PAGE_SIZE, "%s\n",
510 ha->isp_ops.pci_info_str(ha, pci_info)); 511 ha->isp_ops->pci_info_str(ha, pci_info));
511} 512}
512 513
513static ssize_t 514static ssize_t
@@ -652,9 +653,9 @@ qla2x00_beacon_store(struct class_device *cdev, const char *buf,
652 return -EINVAL; 653 return -EINVAL;
653 654
654 if (val) 655 if (val)
655 rval = ha->isp_ops.beacon_on(ha); 656 rval = ha->isp_ops->beacon_on(ha);
656 else 657 else
657 rval = ha->isp_ops.beacon_off(ha); 658 rval = ha->isp_ops->beacon_off(ha);
658 659
659 if (rval != QLA_SUCCESS) 660 if (rval != QLA_SUCCESS)
660 count = 0; 661 count = 0;
@@ -898,7 +899,7 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
898 pfc_host_stat = &ha->fc_host_stat; 899 pfc_host_stat = &ha->fc_host_stat;
899 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics)); 900 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
900 901
901 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 902 if (IS_FWI2_CAPABLE(ha)) {
902 rval = qla24xx_get_isp_stats(ha, (uint32_t *)&stat_buf, 903 rval = qla24xx_get_isp_stats(ha, (uint32_t *)&stat_buf,
903 sizeof(stat_buf) / 4, mb_stat); 904 sizeof(stat_buf) / 4, mb_stat);
904 } else if (atomic_read(&ha->loop_state) == LOOP_READY && 905 } else if (atomic_read(&ha->loop_state) == LOOP_READY &&
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 996c47a63074..563d18f4ff50 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -37,6 +37,121 @@ qla2xxx_copy_queues(scsi_qla_host_t *ha, void *ptr)
37 return ptr + (ha->response_q_length * sizeof(response_t)); 37 return ptr + (ha->response_q_length * sizeof(response_t));
38} 38}
39 39
40static int
41qla2xxx_dump_memory(scsi_qla_host_t *ha, uint32_t *code_ram,
42 uint32_t cram_size, uint32_t *ext_mem, void **nxt)
43{
44 int rval;
45 uint32_t cnt, stat, timer, risc_address, ext_mem_cnt;
46 uint16_t mb[4];
47 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
48
49 rval = QLA_SUCCESS;
50 risc_address = ext_mem_cnt = 0;
51 memset(mb, 0, sizeof(mb));
52
53 /* Code RAM. */
54 risc_address = 0x20000;
55 WRT_REG_WORD(&reg->mailbox0, MBC_READ_RAM_EXTENDED);
56 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
57
58 for (cnt = 0; cnt < cram_size / 4 && rval == QLA_SUCCESS;
59 cnt++, risc_address++) {
60 WRT_REG_WORD(&reg->mailbox1, LSW(risc_address));
61 WRT_REG_WORD(&reg->mailbox8, MSW(risc_address));
62 RD_REG_WORD(&reg->mailbox8);
63 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
64
65 for (timer = 6000000; timer; timer--) {
66 /* Check for pending interrupts. */
67 stat = RD_REG_DWORD(&reg->host_status);
68 if (stat & HSRX_RISC_INT) {
69 stat &= 0xff;
70
71 if (stat == 0x1 || stat == 0x2 ||
72 stat == 0x10 || stat == 0x11) {
73 set_bit(MBX_INTERRUPT,
74 &ha->mbx_cmd_flags);
75
76 mb[0] = RD_REG_WORD(&reg->mailbox0);
77 mb[2] = RD_REG_WORD(&reg->mailbox2);
78 mb[3] = RD_REG_WORD(&reg->mailbox3);
79
80 WRT_REG_DWORD(&reg->hccr,
81 HCCRX_CLR_RISC_INT);
82 RD_REG_DWORD(&reg->hccr);
83 break;
84 }
85
86 /* Clear this intr; it wasn't a mailbox intr */
87 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
88 RD_REG_DWORD(&reg->hccr);
89 }
90 udelay(5);
91 }
92
93 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
94 rval = mb[0] & MBS_MASK;
95 code_ram[cnt] = htonl((mb[3] << 16) | mb[2]);
96 } else {
97 rval = QLA_FUNCTION_FAILED;
98 }
99 }
100
101 if (rval == QLA_SUCCESS) {
102 /* External Memory. */
103 risc_address = 0x100000;
104 ext_mem_cnt = ha->fw_memory_size - 0x100000 + 1;
105 WRT_REG_WORD(&reg->mailbox0, MBC_READ_RAM_EXTENDED);
106 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
107 }
108 for (cnt = 0; cnt < ext_mem_cnt && rval == QLA_SUCCESS;
109 cnt++, risc_address++) {
110 WRT_REG_WORD(&reg->mailbox1, LSW(risc_address));
111 WRT_REG_WORD(&reg->mailbox8, MSW(risc_address));
112 RD_REG_WORD(&reg->mailbox8);
113 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
114
115 for (timer = 6000000; timer; timer--) {
116 /* Check for pending interrupts. */
117 stat = RD_REG_DWORD(&reg->host_status);
118 if (stat & HSRX_RISC_INT) {
119 stat &= 0xff;
120
121 if (stat == 0x1 || stat == 0x2 ||
122 stat == 0x10 || stat == 0x11) {
123 set_bit(MBX_INTERRUPT,
124 &ha->mbx_cmd_flags);
125
126 mb[0] = RD_REG_WORD(&reg->mailbox0);
127 mb[2] = RD_REG_WORD(&reg->mailbox2);
128 mb[3] = RD_REG_WORD(&reg->mailbox3);
129
130 WRT_REG_DWORD(&reg->hccr,
131 HCCRX_CLR_RISC_INT);
132 RD_REG_DWORD(&reg->hccr);
133 break;
134 }
135
136 /* Clear this intr; it wasn't a mailbox intr */
137 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
138 RD_REG_DWORD(&reg->hccr);
139 }
140 udelay(5);
141 }
142
143 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
144 rval = mb[0] & MBS_MASK;
145 ext_mem[cnt] = htonl((mb[3] << 16) | mb[2]);
146 } else {
147 rval = QLA_FUNCTION_FAILED;
148 }
149 }
150
151 *nxt = rval == QLA_SUCCESS ? &ext_mem[cnt]: NULL;
152 return rval;
153}
154
40/** 155/**
41 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware. 156 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
42 * @ha: HA context 157 * @ha: HA context
@@ -633,11 +748,10 @@ void
633qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked) 748qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
634{ 749{
635 int rval; 750 int rval;
636 uint32_t cnt, timer; 751 uint32_t cnt;
637 uint32_t risc_address; 752 uint32_t risc_address;
638 uint16_t mb[4], wd; 753 uint16_t mb0, wd;
639 754
640 uint32_t stat;
641 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 755 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
642 uint32_t __iomem *dmp_reg; 756 uint32_t __iomem *dmp_reg;
643 uint32_t *iter_reg; 757 uint32_t *iter_reg;
@@ -645,10 +759,9 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
645 unsigned long flags; 759 unsigned long flags;
646 struct qla24xx_fw_dump *fw; 760 struct qla24xx_fw_dump *fw;
647 uint32_t ext_mem_cnt; 761 uint32_t ext_mem_cnt;
648 void *eft; 762 void *nxt;
649 763
650 risc_address = ext_mem_cnt = 0; 764 risc_address = ext_mem_cnt = 0;
651 memset(mb, 0, sizeof(mb));
652 flags = 0; 765 flags = 0;
653 766
654 if (!hardware_locked) 767 if (!hardware_locked)
@@ -701,250 +814,236 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
701 /* Shadow registers. */ 814 /* Shadow registers. */
702 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70); 815 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
703 RD_REG_DWORD(&reg->iobase_addr); 816 RD_REG_DWORD(&reg->iobase_addr);
704 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0); 817 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
705 WRT_REG_DWORD(dmp_reg, 0xB0000000); 818 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
706 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC); 819
707 fw->shadow_reg[0] = htonl(RD_REG_DWORD(dmp_reg)); 820 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
708 821 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
709 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0); 822
710 WRT_REG_DWORD(dmp_reg, 0xB0100000); 823 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
711 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC); 824 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
712 fw->shadow_reg[1] = htonl(RD_REG_DWORD(dmp_reg)); 825
713 826 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
714 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0); 827 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
715 WRT_REG_DWORD(dmp_reg, 0xB0200000); 828
716 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC); 829 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
717 fw->shadow_reg[2] = htonl(RD_REG_DWORD(dmp_reg)); 830 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
718 831
719 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0); 832 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
720 WRT_REG_DWORD(dmp_reg, 0xB0300000); 833 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
721 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC); 834
722 fw->shadow_reg[3] = htonl(RD_REG_DWORD(dmp_reg)); 835 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
723 836 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
724 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0);
725 WRT_REG_DWORD(dmp_reg, 0xB0400000);
726 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC);
727 fw->shadow_reg[4] = htonl(RD_REG_DWORD(dmp_reg));
728
729 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0);
730 WRT_REG_DWORD(dmp_reg, 0xB0500000);
731 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC);
732 fw->shadow_reg[5] = htonl(RD_REG_DWORD(dmp_reg));
733
734 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0);
735 WRT_REG_DWORD(dmp_reg, 0xB0600000);
736 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC);
737 fw->shadow_reg[6] = htonl(RD_REG_DWORD(dmp_reg));
738 837
739 /* Mailbox registers. */ 838 /* Mailbox registers. */
740 mbx_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 839 mbx_reg = &reg->mailbox0;
741 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) 840 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
742 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++)); 841 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
743 842
744 /* Transfer sequence registers. */ 843 /* Transfer sequence registers. */
745 iter_reg = fw->xseq_gp_reg; 844 iter_reg = fw->xseq_gp_reg;
746 WRT_REG_DWORD(&reg->iobase_addr, 0xBF00); 845 WRT_REG_DWORD(&reg->iobase_addr, 0xBF00);
747 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 846 dmp_reg = &reg->iobase_window;
748 for (cnt = 0; cnt < 16; cnt++) 847 for (cnt = 0; cnt < 16; cnt++)
749 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 848 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
750 849
751 WRT_REG_DWORD(&reg->iobase_addr, 0xBF10); 850 WRT_REG_DWORD(&reg->iobase_addr, 0xBF10);
752 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 851 dmp_reg = &reg->iobase_window;
753 for (cnt = 0; cnt < 16; cnt++) 852 for (cnt = 0; cnt < 16; cnt++)
754 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 853 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
755 854
756 WRT_REG_DWORD(&reg->iobase_addr, 0xBF20); 855 WRT_REG_DWORD(&reg->iobase_addr, 0xBF20);
757 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 856 dmp_reg = &reg->iobase_window;
758 for (cnt = 0; cnt < 16; cnt++) 857 for (cnt = 0; cnt < 16; cnt++)
759 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 858 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
760 859
761 WRT_REG_DWORD(&reg->iobase_addr, 0xBF30); 860 WRT_REG_DWORD(&reg->iobase_addr, 0xBF30);
762 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 861 dmp_reg = &reg->iobase_window;
763 for (cnt = 0; cnt < 16; cnt++) 862 for (cnt = 0; cnt < 16; cnt++)
764 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 863 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
765 864
766 WRT_REG_DWORD(&reg->iobase_addr, 0xBF40); 865 WRT_REG_DWORD(&reg->iobase_addr, 0xBF40);
767 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 866 dmp_reg = &reg->iobase_window;
768 for (cnt = 0; cnt < 16; cnt++) 867 for (cnt = 0; cnt < 16; cnt++)
769 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 868 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
770 869
771 WRT_REG_DWORD(&reg->iobase_addr, 0xBF50); 870 WRT_REG_DWORD(&reg->iobase_addr, 0xBF50);
772 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 871 dmp_reg = &reg->iobase_window;
773 for (cnt = 0; cnt < 16; cnt++) 872 for (cnt = 0; cnt < 16; cnt++)
774 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 873 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
775 874
776 WRT_REG_DWORD(&reg->iobase_addr, 0xBF60); 875 WRT_REG_DWORD(&reg->iobase_addr, 0xBF60);
777 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 876 dmp_reg = &reg->iobase_window;
778 for (cnt = 0; cnt < 16; cnt++) 877 for (cnt = 0; cnt < 16; cnt++)
779 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 878 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
780 879
781 WRT_REG_DWORD(&reg->iobase_addr, 0xBF70); 880 WRT_REG_DWORD(&reg->iobase_addr, 0xBF70);
782 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 881 dmp_reg = &reg->iobase_window;
783 for (cnt = 0; cnt < 16; cnt++) 882 for (cnt = 0; cnt < 16; cnt++)
784 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 883 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
785 884
786 WRT_REG_DWORD(&reg->iobase_addr, 0xBFE0); 885 WRT_REG_DWORD(&reg->iobase_addr, 0xBFE0);
787 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 886 dmp_reg = &reg->iobase_window;
788 for (cnt = 0; cnt < sizeof(fw->xseq_0_reg) / 4; cnt++) 887 for (cnt = 0; cnt < sizeof(fw->xseq_0_reg) / 4; cnt++)
789 fw->xseq_0_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++)); 888 fw->xseq_0_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
790 889
791 WRT_REG_DWORD(&reg->iobase_addr, 0xBFF0); 890 WRT_REG_DWORD(&reg->iobase_addr, 0xBFF0);
792 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 891 dmp_reg = &reg->iobase_window;
793 for (cnt = 0; cnt < sizeof(fw->xseq_1_reg) / 4; cnt++) 892 for (cnt = 0; cnt < sizeof(fw->xseq_1_reg) / 4; cnt++)
794 fw->xseq_1_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++)); 893 fw->xseq_1_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
795 894
796 /* Receive sequence registers. */ 895 /* Receive sequence registers. */
797 iter_reg = fw->rseq_gp_reg; 896 iter_reg = fw->rseq_gp_reg;
798 WRT_REG_DWORD(&reg->iobase_addr, 0xFF00); 897 WRT_REG_DWORD(&reg->iobase_addr, 0xFF00);
799 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 898 dmp_reg = &reg->iobase_window;
800 for (cnt = 0; cnt < 16; cnt++) 899 for (cnt = 0; cnt < 16; cnt++)
801 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 900 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
802 901
803 WRT_REG_DWORD(&reg->iobase_addr, 0xFF10); 902 WRT_REG_DWORD(&reg->iobase_addr, 0xFF10);
804 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 903 dmp_reg = &reg->iobase_window;
805 for (cnt = 0; cnt < 16; cnt++) 904 for (cnt = 0; cnt < 16; cnt++)
806 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 905 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
807 906
808 WRT_REG_DWORD(&reg->iobase_addr, 0xFF20); 907 WRT_REG_DWORD(&reg->iobase_addr, 0xFF20);
809 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 908 dmp_reg = &reg->iobase_window;
810 for (cnt = 0; cnt < 16; cnt++) 909 for (cnt = 0; cnt < 16; cnt++)
811 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 910 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
812 911
813 WRT_REG_DWORD(&reg->iobase_addr, 0xFF30); 912 WRT_REG_DWORD(&reg->iobase_addr, 0xFF30);
814 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 913 dmp_reg = &reg->iobase_window;
815 for (cnt = 0; cnt < 16; cnt++) 914 for (cnt = 0; cnt < 16; cnt++)
816 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 915 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
817 916
818 WRT_REG_DWORD(&reg->iobase_addr, 0xFF40); 917 WRT_REG_DWORD(&reg->iobase_addr, 0xFF40);
819 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 918 dmp_reg = &reg->iobase_window;
820 for (cnt = 0; cnt < 16; cnt++) 919 for (cnt = 0; cnt < 16; cnt++)
821 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 920 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
822 921
823 WRT_REG_DWORD(&reg->iobase_addr, 0xFF50); 922 WRT_REG_DWORD(&reg->iobase_addr, 0xFF50);
824 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 923 dmp_reg = &reg->iobase_window;
825 for (cnt = 0; cnt < 16; cnt++) 924 for (cnt = 0; cnt < 16; cnt++)
826 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 925 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
827 926
828 WRT_REG_DWORD(&reg->iobase_addr, 0xFF60); 927 WRT_REG_DWORD(&reg->iobase_addr, 0xFF60);
829 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 928 dmp_reg = &reg->iobase_window;
830 for (cnt = 0; cnt < 16; cnt++) 929 for (cnt = 0; cnt < 16; cnt++)
831 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 930 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
832 931
833 WRT_REG_DWORD(&reg->iobase_addr, 0xFF70); 932 WRT_REG_DWORD(&reg->iobase_addr, 0xFF70);
834 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 933 dmp_reg = &reg->iobase_window;
835 for (cnt = 0; cnt < 16; cnt++) 934 for (cnt = 0; cnt < 16; cnt++)
836 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 935 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
837 936
838 WRT_REG_DWORD(&reg->iobase_addr, 0xFFD0); 937 WRT_REG_DWORD(&reg->iobase_addr, 0xFFD0);
839 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 938 dmp_reg = &reg->iobase_window;
840 for (cnt = 0; cnt < sizeof(fw->rseq_0_reg) / 4; cnt++) 939 for (cnt = 0; cnt < sizeof(fw->rseq_0_reg) / 4; cnt++)
841 fw->rseq_0_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++)); 940 fw->rseq_0_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
842 941
843 WRT_REG_DWORD(&reg->iobase_addr, 0xFFE0); 942 WRT_REG_DWORD(&reg->iobase_addr, 0xFFE0);
844 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 943 dmp_reg = &reg->iobase_window;
845 for (cnt = 0; cnt < sizeof(fw->rseq_1_reg) / 4; cnt++) 944 for (cnt = 0; cnt < sizeof(fw->rseq_1_reg) / 4; cnt++)
846 fw->rseq_1_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++)); 945 fw->rseq_1_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
847 946
848 WRT_REG_DWORD(&reg->iobase_addr, 0xFFF0); 947 WRT_REG_DWORD(&reg->iobase_addr, 0xFFF0);
849 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 948 dmp_reg = &reg->iobase_window;
850 for (cnt = 0; cnt < sizeof(fw->rseq_2_reg) / 4; cnt++) 949 for (cnt = 0; cnt < sizeof(fw->rseq_2_reg) / 4; cnt++)
851 fw->rseq_2_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++)); 950 fw->rseq_2_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
852 951
853 /* Command DMA registers. */ 952 /* Command DMA registers. */
854 WRT_REG_DWORD(&reg->iobase_addr, 0x7100); 953 WRT_REG_DWORD(&reg->iobase_addr, 0x7100);
855 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 954 dmp_reg = &reg->iobase_window;
856 for (cnt = 0; cnt < sizeof(fw->cmd_dma_reg) / 4; cnt++) 955 for (cnt = 0; cnt < sizeof(fw->cmd_dma_reg) / 4; cnt++)
857 fw->cmd_dma_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++)); 956 fw->cmd_dma_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
858 957
859 /* Queues. */ 958 /* Queues. */
860 iter_reg = fw->req0_dma_reg; 959 iter_reg = fw->req0_dma_reg;
861 WRT_REG_DWORD(&reg->iobase_addr, 0x7200); 960 WRT_REG_DWORD(&reg->iobase_addr, 0x7200);
862 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 961 dmp_reg = &reg->iobase_window;
863 for (cnt = 0; cnt < 8; cnt++) 962 for (cnt = 0; cnt < 8; cnt++)
864 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 963 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
865 964
866 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xE4); 965 dmp_reg = &reg->iobase_q;
867 for (cnt = 0; cnt < 7; cnt++) 966 for (cnt = 0; cnt < 7; cnt++)
868 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 967 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
869 968
870 iter_reg = fw->resp0_dma_reg; 969 iter_reg = fw->resp0_dma_reg;
871 WRT_REG_DWORD(&reg->iobase_addr, 0x7300); 970 WRT_REG_DWORD(&reg->iobase_addr, 0x7300);
872 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 971 dmp_reg = &reg->iobase_window;
873 for (cnt = 0; cnt < 8; cnt++) 972 for (cnt = 0; cnt < 8; cnt++)
874 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 973 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
875 974
876 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xE4); 975 dmp_reg = &reg->iobase_q;
877 for (cnt = 0; cnt < 7; cnt++) 976 for (cnt = 0; cnt < 7; cnt++)
878 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 977 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
879 978
880 iter_reg = fw->req1_dma_reg; 979 iter_reg = fw->req1_dma_reg;
881 WRT_REG_DWORD(&reg->iobase_addr, 0x7400); 980 WRT_REG_DWORD(&reg->iobase_addr, 0x7400);
882 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 981 dmp_reg = &reg->iobase_window;
883 for (cnt = 0; cnt < 8; cnt++) 982 for (cnt = 0; cnt < 8; cnt++)
884 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 983 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
885 984
886 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xE4); 985 dmp_reg = &reg->iobase_q;
887 for (cnt = 0; cnt < 7; cnt++) 986 for (cnt = 0; cnt < 7; cnt++)
888 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 987 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
889 988
890 /* Transmit DMA registers. */ 989 /* Transmit DMA registers. */
891 iter_reg = fw->xmt0_dma_reg; 990 iter_reg = fw->xmt0_dma_reg;
892 WRT_REG_DWORD(&reg->iobase_addr, 0x7600); 991 WRT_REG_DWORD(&reg->iobase_addr, 0x7600);
893 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 992 dmp_reg = &reg->iobase_window;
894 for (cnt = 0; cnt < 16; cnt++) 993 for (cnt = 0; cnt < 16; cnt++)
895 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 994 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
896 995
897 WRT_REG_DWORD(&reg->iobase_addr, 0x7610); 996 WRT_REG_DWORD(&reg->iobase_addr, 0x7610);
898 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 997 dmp_reg = &reg->iobase_window;
899 for (cnt = 0; cnt < 16; cnt++) 998 for (cnt = 0; cnt < 16; cnt++)
900 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 999 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
901 1000
902 iter_reg = fw->xmt1_dma_reg; 1001 iter_reg = fw->xmt1_dma_reg;
903 WRT_REG_DWORD(&reg->iobase_addr, 0x7620); 1002 WRT_REG_DWORD(&reg->iobase_addr, 0x7620);
904 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1003 dmp_reg = &reg->iobase_window;
905 for (cnt = 0; cnt < 16; cnt++) 1004 for (cnt = 0; cnt < 16; cnt++)
906 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1005 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
907 1006
908 WRT_REG_DWORD(&reg->iobase_addr, 0x7630); 1007 WRT_REG_DWORD(&reg->iobase_addr, 0x7630);
909 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1008 dmp_reg = &reg->iobase_window;
910 for (cnt = 0; cnt < 16; cnt++) 1009 for (cnt = 0; cnt < 16; cnt++)
911 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1010 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
912 1011
913 iter_reg = fw->xmt2_dma_reg; 1012 iter_reg = fw->xmt2_dma_reg;
914 WRT_REG_DWORD(&reg->iobase_addr, 0x7640); 1013 WRT_REG_DWORD(&reg->iobase_addr, 0x7640);
915 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1014 dmp_reg = &reg->iobase_window;
916 for (cnt = 0; cnt < 16; cnt++) 1015 for (cnt = 0; cnt < 16; cnt++)
917 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1016 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
918 1017
919 WRT_REG_DWORD(&reg->iobase_addr, 0x7650); 1018 WRT_REG_DWORD(&reg->iobase_addr, 0x7650);
920 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1019 dmp_reg = &reg->iobase_window;
921 for (cnt = 0; cnt < 16; cnt++) 1020 for (cnt = 0; cnt < 16; cnt++)
922 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1021 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
923 1022
924 iter_reg = fw->xmt3_dma_reg; 1023 iter_reg = fw->xmt3_dma_reg;
925 WRT_REG_DWORD(&reg->iobase_addr, 0x7660); 1024 WRT_REG_DWORD(&reg->iobase_addr, 0x7660);
926 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1025 dmp_reg = &reg->iobase_window;
927 for (cnt = 0; cnt < 16; cnt++) 1026 for (cnt = 0; cnt < 16; cnt++)
928 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1027 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
929 1028
930 WRT_REG_DWORD(&reg->iobase_addr, 0x7670); 1029 WRT_REG_DWORD(&reg->iobase_addr, 0x7670);
931 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1030 dmp_reg = &reg->iobase_window;
932 for (cnt = 0; cnt < 16; cnt++) 1031 for (cnt = 0; cnt < 16; cnt++)
933 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1032 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
934 1033
935 iter_reg = fw->xmt4_dma_reg; 1034 iter_reg = fw->xmt4_dma_reg;
936 WRT_REG_DWORD(&reg->iobase_addr, 0x7680); 1035 WRT_REG_DWORD(&reg->iobase_addr, 0x7680);
937 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1036 dmp_reg = &reg->iobase_window;
938 for (cnt = 0; cnt < 16; cnt++) 1037 for (cnt = 0; cnt < 16; cnt++)
939 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1038 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
940 1039
941 WRT_REG_DWORD(&reg->iobase_addr, 0x7690); 1040 WRT_REG_DWORD(&reg->iobase_addr, 0x7690);
942 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1041 dmp_reg = &reg->iobase_window;
943 for (cnt = 0; cnt < 16; cnt++) 1042 for (cnt = 0; cnt < 16; cnt++)
944 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1043 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
945 1044
946 WRT_REG_DWORD(&reg->iobase_addr, 0x76A0); 1045 WRT_REG_DWORD(&reg->iobase_addr, 0x76A0);
947 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1046 dmp_reg = &reg->iobase_window;
948 for (cnt = 0; cnt < sizeof(fw->xmt_data_dma_reg) / 4; cnt++) 1047 for (cnt = 0; cnt < sizeof(fw->xmt_data_dma_reg) / 4; cnt++)
949 fw->xmt_data_dma_reg[cnt] = 1048 fw->xmt_data_dma_reg[cnt] =
950 htonl(RD_REG_DWORD(dmp_reg++)); 1049 htonl(RD_REG_DWORD(dmp_reg++));
@@ -952,221 +1051,221 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
952 /* Receive DMA registers. */ 1051 /* Receive DMA registers. */
953 iter_reg = fw->rcvt0_data_dma_reg; 1052 iter_reg = fw->rcvt0_data_dma_reg;
954 WRT_REG_DWORD(&reg->iobase_addr, 0x7700); 1053 WRT_REG_DWORD(&reg->iobase_addr, 0x7700);
955 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1054 dmp_reg = &reg->iobase_window;
956 for (cnt = 0; cnt < 16; cnt++) 1055 for (cnt = 0; cnt < 16; cnt++)
957 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1056 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
958 1057
959 WRT_REG_DWORD(&reg->iobase_addr, 0x7710); 1058 WRT_REG_DWORD(&reg->iobase_addr, 0x7710);
960 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1059 dmp_reg = &reg->iobase_window;
961 for (cnt = 0; cnt < 16; cnt++) 1060 for (cnt = 0; cnt < 16; cnt++)
962 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1061 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
963 1062
964 iter_reg = fw->rcvt1_data_dma_reg; 1063 iter_reg = fw->rcvt1_data_dma_reg;
965 WRT_REG_DWORD(&reg->iobase_addr, 0x7720); 1064 WRT_REG_DWORD(&reg->iobase_addr, 0x7720);
966 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1065 dmp_reg = &reg->iobase_window;
967 for (cnt = 0; cnt < 16; cnt++) 1066 for (cnt = 0; cnt < 16; cnt++)
968 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1067 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
969 1068
970 WRT_REG_DWORD(&reg->iobase_addr, 0x7730); 1069 WRT_REG_DWORD(&reg->iobase_addr, 0x7730);
971 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1070 dmp_reg = &reg->iobase_window;
972 for (cnt = 0; cnt < 16; cnt++) 1071 for (cnt = 0; cnt < 16; cnt++)
973 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1072 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
974 1073
975 /* RISC registers. */ 1074 /* RISC registers. */
976 iter_reg = fw->risc_gp_reg; 1075 iter_reg = fw->risc_gp_reg;
977 WRT_REG_DWORD(&reg->iobase_addr, 0x0F00); 1076 WRT_REG_DWORD(&reg->iobase_addr, 0x0F00);
978 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1077 dmp_reg = &reg->iobase_window;
979 for (cnt = 0; cnt < 16; cnt++) 1078 for (cnt = 0; cnt < 16; cnt++)
980 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1079 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
981 1080
982 WRT_REG_DWORD(&reg->iobase_addr, 0x0F10); 1081 WRT_REG_DWORD(&reg->iobase_addr, 0x0F10);
983 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1082 dmp_reg = &reg->iobase_window;
984 for (cnt = 0; cnt < 16; cnt++) 1083 for (cnt = 0; cnt < 16; cnt++)
985 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1084 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
986 1085
987 WRT_REG_DWORD(&reg->iobase_addr, 0x0F20); 1086 WRT_REG_DWORD(&reg->iobase_addr, 0x0F20);
988 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1087 dmp_reg = &reg->iobase_window;
989 for (cnt = 0; cnt < 16; cnt++) 1088 for (cnt = 0; cnt < 16; cnt++)
990 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1089 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
991 1090
992 WRT_REG_DWORD(&reg->iobase_addr, 0x0F30); 1091 WRT_REG_DWORD(&reg->iobase_addr, 0x0F30);
993 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1092 dmp_reg = &reg->iobase_window;
994 for (cnt = 0; cnt < 16; cnt++) 1093 for (cnt = 0; cnt < 16; cnt++)
995 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1094 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
996 1095
997 WRT_REG_DWORD(&reg->iobase_addr, 0x0F40); 1096 WRT_REG_DWORD(&reg->iobase_addr, 0x0F40);
998 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1097 dmp_reg = &reg->iobase_window;
999 for (cnt = 0; cnt < 16; cnt++) 1098 for (cnt = 0; cnt < 16; cnt++)
1000 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1099 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1001 1100
1002 WRT_REG_DWORD(&reg->iobase_addr, 0x0F50); 1101 WRT_REG_DWORD(&reg->iobase_addr, 0x0F50);
1003 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1102 dmp_reg = &reg->iobase_window;
1004 for (cnt = 0; cnt < 16; cnt++) 1103 for (cnt = 0; cnt < 16; cnt++)
1005 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1104 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1006 1105
1007 WRT_REG_DWORD(&reg->iobase_addr, 0x0F60); 1106 WRT_REG_DWORD(&reg->iobase_addr, 0x0F60);
1008 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1107 dmp_reg = &reg->iobase_window;
1009 for (cnt = 0; cnt < 16; cnt++) 1108 for (cnt = 0; cnt < 16; cnt++)
1010 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1109 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1011 1110
1012 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70); 1111 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1013 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1112 dmp_reg = &reg->iobase_window;
1014 for (cnt = 0; cnt < 16; cnt++) 1113 for (cnt = 0; cnt < 16; cnt++)
1015 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1114 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1016 1115
1017 /* Local memory controller registers. */ 1116 /* Local memory controller registers. */
1018 iter_reg = fw->lmc_reg; 1117 iter_reg = fw->lmc_reg;
1019 WRT_REG_DWORD(&reg->iobase_addr, 0x3000); 1118 WRT_REG_DWORD(&reg->iobase_addr, 0x3000);
1020 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1119 dmp_reg = &reg->iobase_window;
1021 for (cnt = 0; cnt < 16; cnt++) 1120 for (cnt = 0; cnt < 16; cnt++)
1022 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1121 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1023 1122
1024 WRT_REG_DWORD(&reg->iobase_addr, 0x3010); 1123 WRT_REG_DWORD(&reg->iobase_addr, 0x3010);
1025 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1124 dmp_reg = &reg->iobase_window;
1026 for (cnt = 0; cnt < 16; cnt++) 1125 for (cnt = 0; cnt < 16; cnt++)
1027 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1126 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1028 1127
1029 WRT_REG_DWORD(&reg->iobase_addr, 0x3020); 1128 WRT_REG_DWORD(&reg->iobase_addr, 0x3020);
1030 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1129 dmp_reg = &reg->iobase_window;
1031 for (cnt = 0; cnt < 16; cnt++) 1130 for (cnt = 0; cnt < 16; cnt++)
1032 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1131 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1033 1132
1034 WRT_REG_DWORD(&reg->iobase_addr, 0x3030); 1133 WRT_REG_DWORD(&reg->iobase_addr, 0x3030);
1035 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1134 dmp_reg = &reg->iobase_window;
1036 for (cnt = 0; cnt < 16; cnt++) 1135 for (cnt = 0; cnt < 16; cnt++)
1037 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1136 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1038 1137
1039 WRT_REG_DWORD(&reg->iobase_addr, 0x3040); 1138 WRT_REG_DWORD(&reg->iobase_addr, 0x3040);
1040 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1139 dmp_reg = &reg->iobase_window;
1041 for (cnt = 0; cnt < 16; cnt++) 1140 for (cnt = 0; cnt < 16; cnt++)
1042 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1141 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1043 1142
1044 WRT_REG_DWORD(&reg->iobase_addr, 0x3050); 1143 WRT_REG_DWORD(&reg->iobase_addr, 0x3050);
1045 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1144 dmp_reg = &reg->iobase_window;
1046 for (cnt = 0; cnt < 16; cnt++) 1145 for (cnt = 0; cnt < 16; cnt++)
1047 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1146 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1048 1147
1049 WRT_REG_DWORD(&reg->iobase_addr, 0x3060); 1148 WRT_REG_DWORD(&reg->iobase_addr, 0x3060);
1050 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1149 dmp_reg = &reg->iobase_window;
1051 for (cnt = 0; cnt < 16; cnt++) 1150 for (cnt = 0; cnt < 16; cnt++)
1052 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1151 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1053 1152
1054 /* Fibre Protocol Module registers. */ 1153 /* Fibre Protocol Module registers. */
1055 iter_reg = fw->fpm_hdw_reg; 1154 iter_reg = fw->fpm_hdw_reg;
1056 WRT_REG_DWORD(&reg->iobase_addr, 0x4000); 1155 WRT_REG_DWORD(&reg->iobase_addr, 0x4000);
1057 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1156 dmp_reg = &reg->iobase_window;
1058 for (cnt = 0; cnt < 16; cnt++) 1157 for (cnt = 0; cnt < 16; cnt++)
1059 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1158 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1060 1159
1061 WRT_REG_DWORD(&reg->iobase_addr, 0x4010); 1160 WRT_REG_DWORD(&reg->iobase_addr, 0x4010);
1062 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1161 dmp_reg = &reg->iobase_window;
1063 for (cnt = 0; cnt < 16; cnt++) 1162 for (cnt = 0; cnt < 16; cnt++)
1064 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1163 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1065 1164
1066 WRT_REG_DWORD(&reg->iobase_addr, 0x4020); 1165 WRT_REG_DWORD(&reg->iobase_addr, 0x4020);
1067 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1166 dmp_reg = &reg->iobase_window;
1068 for (cnt = 0; cnt < 16; cnt++) 1167 for (cnt = 0; cnt < 16; cnt++)
1069 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1168 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1070 1169
1071 WRT_REG_DWORD(&reg->iobase_addr, 0x4030); 1170 WRT_REG_DWORD(&reg->iobase_addr, 0x4030);
1072 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1171 dmp_reg = &reg->iobase_window;
1073 for (cnt = 0; cnt < 16; cnt++) 1172 for (cnt = 0; cnt < 16; cnt++)
1074 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1173 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1075 1174
1076 WRT_REG_DWORD(&reg->iobase_addr, 0x4040); 1175 WRT_REG_DWORD(&reg->iobase_addr, 0x4040);
1077 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1176 dmp_reg = &reg->iobase_window;
1078 for (cnt = 0; cnt < 16; cnt++) 1177 for (cnt = 0; cnt < 16; cnt++)
1079 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1178 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1080 1179
1081 WRT_REG_DWORD(&reg->iobase_addr, 0x4050); 1180 WRT_REG_DWORD(&reg->iobase_addr, 0x4050);
1082 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1181 dmp_reg = &reg->iobase_window;
1083 for (cnt = 0; cnt < 16; cnt++) 1182 for (cnt = 0; cnt < 16; cnt++)
1084 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1183 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1085 1184
1086 WRT_REG_DWORD(&reg->iobase_addr, 0x4060); 1185 WRT_REG_DWORD(&reg->iobase_addr, 0x4060);
1087 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1186 dmp_reg = &reg->iobase_window;
1088 for (cnt = 0; cnt < 16; cnt++) 1187 for (cnt = 0; cnt < 16; cnt++)
1089 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1188 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1090 1189
1091 WRT_REG_DWORD(&reg->iobase_addr, 0x4070); 1190 WRT_REG_DWORD(&reg->iobase_addr, 0x4070);
1092 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1191 dmp_reg = &reg->iobase_window;
1093 for (cnt = 0; cnt < 16; cnt++) 1192 for (cnt = 0; cnt < 16; cnt++)
1094 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1193 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1095 1194
1096 WRT_REG_DWORD(&reg->iobase_addr, 0x4080); 1195 WRT_REG_DWORD(&reg->iobase_addr, 0x4080);
1097 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1196 dmp_reg = &reg->iobase_window;
1098 for (cnt = 0; cnt < 16; cnt++) 1197 for (cnt = 0; cnt < 16; cnt++)
1099 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1198 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1100 1199
1101 WRT_REG_DWORD(&reg->iobase_addr, 0x4090); 1200 WRT_REG_DWORD(&reg->iobase_addr, 0x4090);
1102 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1201 dmp_reg = &reg->iobase_window;
1103 for (cnt = 0; cnt < 16; cnt++) 1202 for (cnt = 0; cnt < 16; cnt++)
1104 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1203 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1105 1204
1106 WRT_REG_DWORD(&reg->iobase_addr, 0x40A0); 1205 WRT_REG_DWORD(&reg->iobase_addr, 0x40A0);
1107 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1206 dmp_reg = &reg->iobase_window;
1108 for (cnt = 0; cnt < 16; cnt++) 1207 for (cnt = 0; cnt < 16; cnt++)
1109 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1208 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1110 1209
1111 WRT_REG_DWORD(&reg->iobase_addr, 0x40B0); 1210 WRT_REG_DWORD(&reg->iobase_addr, 0x40B0);
1112 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1211 dmp_reg = &reg->iobase_window;
1113 for (cnt = 0; cnt < 16; cnt++) 1212 for (cnt = 0; cnt < 16; cnt++)
1114 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1213 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1115 1214
1116 /* Frame Buffer registers. */ 1215 /* Frame Buffer registers. */
1117 iter_reg = fw->fb_hdw_reg; 1216 iter_reg = fw->fb_hdw_reg;
1118 WRT_REG_DWORD(&reg->iobase_addr, 0x6000); 1217 WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
1119 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1218 dmp_reg = &reg->iobase_window;
1120 for (cnt = 0; cnt < 16; cnt++) 1219 for (cnt = 0; cnt < 16; cnt++)
1121 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1220 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1122 1221
1123 WRT_REG_DWORD(&reg->iobase_addr, 0x6010); 1222 WRT_REG_DWORD(&reg->iobase_addr, 0x6010);
1124 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1223 dmp_reg = &reg->iobase_window;
1125 for (cnt = 0; cnt < 16; cnt++) 1224 for (cnt = 0; cnt < 16; cnt++)
1126 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1225 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1127 1226
1128 WRT_REG_DWORD(&reg->iobase_addr, 0x6020); 1227 WRT_REG_DWORD(&reg->iobase_addr, 0x6020);
1129 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1228 dmp_reg = &reg->iobase_window;
1130 for (cnt = 0; cnt < 16; cnt++) 1229 for (cnt = 0; cnt < 16; cnt++)
1131 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1230 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1132 1231
1133 WRT_REG_DWORD(&reg->iobase_addr, 0x6030); 1232 WRT_REG_DWORD(&reg->iobase_addr, 0x6030);
1134 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1233 dmp_reg = &reg->iobase_window;
1135 for (cnt = 0; cnt < 16; cnt++) 1234 for (cnt = 0; cnt < 16; cnt++)
1136 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1235 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1137 1236
1138 WRT_REG_DWORD(&reg->iobase_addr, 0x6040); 1237 WRT_REG_DWORD(&reg->iobase_addr, 0x6040);
1139 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1238 dmp_reg = &reg->iobase_window;
1140 for (cnt = 0; cnt < 16; cnt++) 1239 for (cnt = 0; cnt < 16; cnt++)
1141 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1240 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1142 1241
1143 WRT_REG_DWORD(&reg->iobase_addr, 0x6100); 1242 WRT_REG_DWORD(&reg->iobase_addr, 0x6100);
1144 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1243 dmp_reg = &reg->iobase_window;
1145 for (cnt = 0; cnt < 16; cnt++) 1244 for (cnt = 0; cnt < 16; cnt++)
1146 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1245 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1147 1246
1148 WRT_REG_DWORD(&reg->iobase_addr, 0x6130); 1247 WRT_REG_DWORD(&reg->iobase_addr, 0x6130);
1149 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1248 dmp_reg = &reg->iobase_window;
1150 for (cnt = 0; cnt < 16; cnt++) 1249 for (cnt = 0; cnt < 16; cnt++)
1151 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1250 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1152 1251
1153 WRT_REG_DWORD(&reg->iobase_addr, 0x6150); 1252 WRT_REG_DWORD(&reg->iobase_addr, 0x6150);
1154 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1253 dmp_reg = &reg->iobase_window;
1155 for (cnt = 0; cnt < 16; cnt++) 1254 for (cnt = 0; cnt < 16; cnt++)
1156 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1255 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1157 1256
1158 WRT_REG_DWORD(&reg->iobase_addr, 0x6170); 1257 WRT_REG_DWORD(&reg->iobase_addr, 0x6170);
1159 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1258 dmp_reg = &reg->iobase_window;
1160 for (cnt = 0; cnt < 16; cnt++) 1259 for (cnt = 0; cnt < 16; cnt++)
1161 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1260 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1162 1261
1163 WRT_REG_DWORD(&reg->iobase_addr, 0x6190); 1262 WRT_REG_DWORD(&reg->iobase_addr, 0x6190);
1164 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1263 dmp_reg = &reg->iobase_window;
1165 for (cnt = 0; cnt < 16; cnt++) 1264 for (cnt = 0; cnt < 16; cnt++)
1166 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1265 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1167 1266
1168 WRT_REG_DWORD(&reg->iobase_addr, 0x61B0); 1267 WRT_REG_DWORD(&reg->iobase_addr, 0x61B0);
1169 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1268 dmp_reg = &reg->iobase_window;
1170 for (cnt = 0; cnt < 16; cnt++) 1269 for (cnt = 0; cnt < 16; cnt++)
1171 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1270 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1172 1271
@@ -1187,10 +1286,10 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
1187 1286
1188 udelay(100); 1287 udelay(100);
1189 /* Wait for firmware to complete NVRAM accesses. */ 1288 /* Wait for firmware to complete NVRAM accesses. */
1190 mb[0] = (uint32_t) RD_REG_WORD(&reg->mailbox0); 1289 mb0 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
1191 for (cnt = 10000 ; cnt && mb[0]; cnt--) { 1290 for (cnt = 10000 ; cnt && mb0; cnt--) {
1192 udelay(5); 1291 udelay(5);
1193 mb[0] = (uint32_t) RD_REG_WORD(&reg->mailbox0); 1292 mb0 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
1194 barrier(); 1293 barrier();
1195 } 1294 }
1196 1295
@@ -1214,110 +1313,717 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
1214 rval = QLA_FUNCTION_TIMEOUT; 1313 rval = QLA_FUNCTION_TIMEOUT;
1215 } 1314 }
1216 1315
1217 /* Memory. */ 1316 if (rval == QLA_SUCCESS)
1317 rval = qla2xxx_dump_memory(ha, fw->code_ram,
1318 sizeof(fw->code_ram), fw->ext_mem, &nxt);
1319
1218 if (rval == QLA_SUCCESS) { 1320 if (rval == QLA_SUCCESS) {
1219 /* Code RAM. */ 1321 nxt = qla2xxx_copy_queues(ha, nxt);
1220 risc_address = 0x20000; 1322 if (ha->eft)
1221 WRT_REG_WORD(&reg->mailbox0, MBC_READ_RAM_EXTENDED); 1323 memcpy(nxt, ha->eft, ntohl(ha->fw_dump->eft_size));
1222 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1223 } 1324 }
1224 for (cnt = 0; cnt < sizeof(fw->code_ram) / 4 && rval == QLA_SUCCESS;
1225 cnt++, risc_address++) {
1226 WRT_REG_WORD(&reg->mailbox1, LSW(risc_address));
1227 WRT_REG_WORD(&reg->mailbox8, MSW(risc_address));
1228 RD_REG_WORD(&reg->mailbox8);
1229 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
1230 1325
1231 for (timer = 6000000; timer; timer--) { 1326 if (rval != QLA_SUCCESS) {
1232 /* Check for pending interrupts. */ 1327 qla_printk(KERN_WARNING, ha,
1233 stat = RD_REG_DWORD(&reg->host_status); 1328 "Failed to dump firmware (%x)!!!\n", rval);
1234 if (stat & HSRX_RISC_INT) { 1329 ha->fw_dumped = 0;
1235 stat &= 0xff;
1236 1330
1237 if (stat == 0x1 || stat == 0x2 || 1331 } else {
1238 stat == 0x10 || stat == 0x11) { 1332 qla_printk(KERN_INFO, ha,
1239 set_bit(MBX_INTERRUPT, 1333 "Firmware dump saved to temp buffer (%ld/%p).\n",
1240 &ha->mbx_cmd_flags); 1334 ha->host_no, ha->fw_dump);
1335 ha->fw_dumped = 1;
1336 }
1241 1337
1242 mb[0] = RD_REG_WORD(&reg->mailbox0); 1338qla24xx_fw_dump_failed:
1243 mb[2] = RD_REG_WORD(&reg->mailbox2); 1339 if (!hardware_locked)
1244 mb[3] = RD_REG_WORD(&reg->mailbox3); 1340 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1341}
1245 1342
1246 WRT_REG_DWORD(&reg->hccr, 1343void
1247 HCCRX_CLR_RISC_INT); 1344qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
1248 RD_REG_DWORD(&reg->hccr); 1345{
1249 break; 1346 int rval;
1250 } 1347 uint32_t cnt;
1348 uint32_t risc_address;
1349 uint16_t mb0, wd;
1251 1350
1252 /* Clear this intr; it wasn't a mailbox intr */ 1351 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1253 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1352 uint32_t __iomem *dmp_reg;
1254 RD_REG_DWORD(&reg->hccr); 1353 uint32_t *iter_reg;
1255 } 1354 uint16_t __iomem *mbx_reg;
1256 udelay(5); 1355 unsigned long flags;
1257 } 1356 struct qla25xx_fw_dump *fw;
1357 uint32_t ext_mem_cnt;
1358 void *nxt;
1258 1359
1259 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 1360 risc_address = ext_mem_cnt = 0;
1260 rval = mb[0] & MBS_MASK; 1361 flags = 0;
1261 fw->code_ram[cnt] = htonl((mb[3] << 16) | mb[2]); 1362
1262 } else { 1363 if (!hardware_locked)
1263 rval = QLA_FUNCTION_FAILED; 1364 spin_lock_irqsave(&ha->hardware_lock, flags);
1365
1366 if (!ha->fw_dump) {
1367 qla_printk(KERN_WARNING, ha,
1368 "No buffer available for dump!!!\n");
1369 goto qla25xx_fw_dump_failed;
1370 }
1371
1372 if (ha->fw_dumped) {
1373 qla_printk(KERN_WARNING, ha,
1374 "Firmware has been previously dumped (%p) -- ignoring "
1375 "request...\n", ha->fw_dump);
1376 goto qla25xx_fw_dump_failed;
1377 }
1378 fw = &ha->fw_dump->isp.isp25;
1379 qla2xxx_prep_dump(ha, ha->fw_dump);
1380
1381 rval = QLA_SUCCESS;
1382 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1383
1384 /* Pause RISC. */
1385 if ((RD_REG_DWORD(&reg->hccr) & HCCRX_RISC_PAUSE) == 0) {
1386 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET |
1387 HCCRX_CLR_HOST_INT);
1388 RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
1389 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
1390 for (cnt = 30000;
1391 (RD_REG_DWORD(&reg->hccr) & HCCRX_RISC_PAUSE) == 0 &&
1392 rval == QLA_SUCCESS; cnt--) {
1393 if (cnt)
1394 udelay(100);
1395 else
1396 rval = QLA_FUNCTION_TIMEOUT;
1264 } 1397 }
1265 } 1398 }
1266 1399
1267 if (rval == QLA_SUCCESS) { 1400 if (rval == QLA_SUCCESS) {
1268 /* External Memory. */ 1401 /* Host interface registers. */
1269 risc_address = 0x100000; 1402 dmp_reg = (uint32_t __iomem *)(reg + 0);
1270 ext_mem_cnt = ha->fw_memory_size - 0x100000 + 1; 1403 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
1271 WRT_REG_WORD(&reg->mailbox0, MBC_READ_RAM_EXTENDED); 1404 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1272 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1273 }
1274 for (cnt = 0; cnt < ext_mem_cnt && rval == QLA_SUCCESS;
1275 cnt++, risc_address++) {
1276 WRT_REG_WORD(&reg->mailbox1, LSW(risc_address));
1277 WRT_REG_WORD(&reg->mailbox8, MSW(risc_address));
1278 RD_REG_WORD(&reg->mailbox8);
1279 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
1280 1405
1281 for (timer = 6000000; timer; timer--) { 1406 /* Disable interrupts. */
1282 /* Check for pending interrupts. */ 1407 WRT_REG_DWORD(&reg->ictrl, 0);
1283 stat = RD_REG_DWORD(&reg->host_status); 1408 RD_REG_DWORD(&reg->ictrl);
1284 if (stat & HSRX_RISC_INT) {
1285 stat &= 0xff;
1286 1409
1287 if (stat == 0x1 || stat == 0x2 || 1410 /* Shadow registers. */
1288 stat == 0x10 || stat == 0x11) { 1411 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1289 set_bit(MBX_INTERRUPT, 1412 RD_REG_DWORD(&reg->iobase_addr);
1290 &ha->mbx_cmd_flags); 1413 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1414 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1291 1415
1292 mb[0] = RD_REG_WORD(&reg->mailbox0); 1416 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1293 mb[2] = RD_REG_WORD(&reg->mailbox2); 1417 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1294 mb[3] = RD_REG_WORD(&reg->mailbox3);
1295 1418
1296 WRT_REG_DWORD(&reg->hccr, 1419 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1297 HCCRX_CLR_RISC_INT); 1420 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1298 RD_REG_DWORD(&reg->hccr);
1299 break;
1300 }
1301 1421
1302 /* Clear this intr; it wasn't a mailbox intr */ 1422 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1303 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1423 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1304 RD_REG_DWORD(&reg->hccr); 1424
1305 } 1425 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1426 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1427
1428 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1429 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1430
1431 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1432 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1433
1434 WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
1435 fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1436
1437 WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
1438 fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1439
1440 WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
1441 fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1442
1443 WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
1444 fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1445
1446 /* RISC I/O register. */
1447 WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
1448 RD_REG_DWORD(&reg->iobase_addr);
1449 fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
1450
1451 /* Mailbox registers. */
1452 mbx_reg = &reg->mailbox0;
1453 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
1454 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
1455
1456 /* Transfer sequence registers. */
1457 iter_reg = fw->xseq_gp_reg;
1458 WRT_REG_DWORD(&reg->iobase_addr, 0xBF00);
1459 dmp_reg = &reg->iobase_window;
1460 for (cnt = 0; cnt < 16; cnt++)
1461 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1462
1463 WRT_REG_DWORD(&reg->iobase_addr, 0xBF10);
1464 dmp_reg = &reg->iobase_window;
1465 for (cnt = 0; cnt < 16; cnt++)
1466 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1467
1468 WRT_REG_DWORD(&reg->iobase_addr, 0xBF20);
1469 dmp_reg = &reg->iobase_window;
1470 for (cnt = 0; cnt < 16; cnt++)
1471 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1472
1473 WRT_REG_DWORD(&reg->iobase_addr, 0xBF30);
1474 dmp_reg = &reg->iobase_window;
1475 for (cnt = 0; cnt < 16; cnt++)
1476 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1477
1478 WRT_REG_DWORD(&reg->iobase_addr, 0xBF40);
1479 dmp_reg = &reg->iobase_window;
1480 for (cnt = 0; cnt < 16; cnt++)
1481 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1482
1483 WRT_REG_DWORD(&reg->iobase_addr, 0xBF50);
1484 dmp_reg = &reg->iobase_window;
1485 for (cnt = 0; cnt < 16; cnt++)
1486 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1487
1488 WRT_REG_DWORD(&reg->iobase_addr, 0xBF60);
1489 dmp_reg = &reg->iobase_window;
1490 for (cnt = 0; cnt < 16; cnt++)
1491 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1492
1493 WRT_REG_DWORD(&reg->iobase_addr, 0xBF70);
1494 dmp_reg = &reg->iobase_window;
1495 for (cnt = 0; cnt < 16; cnt++)
1496 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1497
1498 iter_reg = fw->xseq_0_reg;
1499 WRT_REG_DWORD(&reg->iobase_addr, 0xBFC0);
1500 dmp_reg = &reg->iobase_window;
1501 for (cnt = 0; cnt < 16; cnt++)
1502 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1503
1504 WRT_REG_DWORD(&reg->iobase_addr, 0xBFD0);
1505 dmp_reg = &reg->iobase_window;
1506 for (cnt = 0; cnt < 16; cnt++)
1507 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1508
1509 WRT_REG_DWORD(&reg->iobase_addr, 0xBFE0);
1510 dmp_reg = &reg->iobase_window;
1511 for (cnt = 0; cnt < 16; cnt++)
1512 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1513
1514 WRT_REG_DWORD(&reg->iobase_addr, 0xBFF0);
1515 dmp_reg = &reg->iobase_window;
1516 for (cnt = 0; cnt < sizeof(fw->xseq_1_reg) / 4; cnt++)
1517 fw->xseq_1_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1518
1519 /* Receive sequence registers. */
1520 iter_reg = fw->rseq_gp_reg;
1521 WRT_REG_DWORD(&reg->iobase_addr, 0xFF00);
1522 dmp_reg = &reg->iobase_window;
1523 for (cnt = 0; cnt < 16; cnt++)
1524 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1525
1526 WRT_REG_DWORD(&reg->iobase_addr, 0xFF10);
1527 dmp_reg = &reg->iobase_window;
1528 for (cnt = 0; cnt < 16; cnt++)
1529 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1530
1531 WRT_REG_DWORD(&reg->iobase_addr, 0xFF20);
1532 dmp_reg = &reg->iobase_window;
1533 for (cnt = 0; cnt < 16; cnt++)
1534 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1535
1536 WRT_REG_DWORD(&reg->iobase_addr, 0xFF30);
1537 dmp_reg = &reg->iobase_window;
1538 for (cnt = 0; cnt < 16; cnt++)
1539 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1540
1541 WRT_REG_DWORD(&reg->iobase_addr, 0xFF40);
1542 dmp_reg = &reg->iobase_window;
1543 for (cnt = 0; cnt < 16; cnt++)
1544 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1545
1546 WRT_REG_DWORD(&reg->iobase_addr, 0xFF50);
1547 dmp_reg = &reg->iobase_window;
1548 for (cnt = 0; cnt < 16; cnt++)
1549 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1550
1551 WRT_REG_DWORD(&reg->iobase_addr, 0xFF60);
1552 dmp_reg = &reg->iobase_window;
1553 for (cnt = 0; cnt < 16; cnt++)
1554 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1555
1556 WRT_REG_DWORD(&reg->iobase_addr, 0xFF70);
1557 dmp_reg = &reg->iobase_window;
1558 for (cnt = 0; cnt < 16; cnt++)
1559 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1560
1561 iter_reg = fw->rseq_0_reg;
1562 WRT_REG_DWORD(&reg->iobase_addr, 0xFFC0);
1563 dmp_reg = &reg->iobase_window;
1564 for (cnt = 0; cnt < 16; cnt++)
1565 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1566
1567 WRT_REG_DWORD(&reg->iobase_addr, 0xFFD0);
1568 dmp_reg = &reg->iobase_window;
1569 for (cnt = 0; cnt < 16; cnt++)
1570 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1571
1572 WRT_REG_DWORD(&reg->iobase_addr, 0xFFE0);
1573 dmp_reg = &reg->iobase_window;
1574 for (cnt = 0; cnt < sizeof(fw->rseq_1_reg) / 4; cnt++)
1575 fw->rseq_1_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1576
1577 WRT_REG_DWORD(&reg->iobase_addr, 0xFFF0);
1578 dmp_reg = &reg->iobase_window;
1579 for (cnt = 0; cnt < sizeof(fw->rseq_2_reg) / 4; cnt++)
1580 fw->rseq_2_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1581
1582 /* Auxiliary sequence registers. */
1583 iter_reg = fw->aseq_gp_reg;
1584 WRT_REG_DWORD(&reg->iobase_addr, 0xB000);
1585 dmp_reg = &reg->iobase_window;
1586 for (cnt = 0; cnt < 16; cnt++)
1587 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1588
1589 WRT_REG_DWORD(&reg->iobase_addr, 0xB010);
1590 dmp_reg = &reg->iobase_window;
1591 for (cnt = 0; cnt < 16; cnt++)
1592 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1593
1594 WRT_REG_DWORD(&reg->iobase_addr, 0xB020);
1595 dmp_reg = &reg->iobase_window;
1596 for (cnt = 0; cnt < 16; cnt++)
1597 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1598
1599 WRT_REG_DWORD(&reg->iobase_addr, 0xB030);
1600 dmp_reg = &reg->iobase_window;
1601 for (cnt = 0; cnt < 16; cnt++)
1602 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1603
1604 WRT_REG_DWORD(&reg->iobase_addr, 0xB040);
1605 dmp_reg = &reg->iobase_window;
1606 for (cnt = 0; cnt < 16; cnt++)
1607 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1608
1609 WRT_REG_DWORD(&reg->iobase_addr, 0xB050);
1610 dmp_reg = &reg->iobase_window;
1611 for (cnt = 0; cnt < 16; cnt++)
1612 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1613
1614 WRT_REG_DWORD(&reg->iobase_addr, 0xB060);
1615 dmp_reg = &reg->iobase_window;
1616 for (cnt = 0; cnt < 16; cnt++)
1617 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1618
1619 WRT_REG_DWORD(&reg->iobase_addr, 0xB070);
1620 dmp_reg = &reg->iobase_window;
1621 for (cnt = 0; cnt < 16; cnt++)
1622 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1623
1624 iter_reg = fw->aseq_0_reg;
1625 WRT_REG_DWORD(&reg->iobase_addr, 0xB0C0);
1626 dmp_reg = &reg->iobase_window;
1627 for (cnt = 0; cnt < 16; cnt++)
1628 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1629
1630 WRT_REG_DWORD(&reg->iobase_addr, 0xB0D0);
1631 dmp_reg = &reg->iobase_window;
1632 for (cnt = 0; cnt < 16; cnt++)
1633 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1634
1635 WRT_REG_DWORD(&reg->iobase_addr, 0xB0E0);
1636 dmp_reg = &reg->iobase_window;
1637 for (cnt = 0; cnt < sizeof(fw->aseq_1_reg) / 4; cnt++)
1638 fw->aseq_1_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1639
1640 WRT_REG_DWORD(&reg->iobase_addr, 0xB0F0);
1641 dmp_reg = &reg->iobase_window;
1642 for (cnt = 0; cnt < sizeof(fw->aseq_2_reg) / 4; cnt++)
1643 fw->aseq_2_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1644
1645 /* Command DMA registers. */
1646 WRT_REG_DWORD(&reg->iobase_addr, 0x7100);
1647 dmp_reg = &reg->iobase_window;
1648 for (cnt = 0; cnt < sizeof(fw->cmd_dma_reg) / 4; cnt++)
1649 fw->cmd_dma_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1650
1651 /* Queues. */
1652 iter_reg = fw->req0_dma_reg;
1653 WRT_REG_DWORD(&reg->iobase_addr, 0x7200);
1654 dmp_reg = &reg->iobase_window;
1655 for (cnt = 0; cnt < 8; cnt++)
1656 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1657
1658 dmp_reg = &reg->iobase_q;
1659 for (cnt = 0; cnt < 7; cnt++)
1660 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1661
1662 iter_reg = fw->resp0_dma_reg;
1663 WRT_REG_DWORD(&reg->iobase_addr, 0x7300);
1664 dmp_reg = &reg->iobase_window;
1665 for (cnt = 0; cnt < 8; cnt++)
1666 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1667
1668 dmp_reg = &reg->iobase_q;
1669 for (cnt = 0; cnt < 7; cnt++)
1670 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1671
1672 iter_reg = fw->req1_dma_reg;
1673 WRT_REG_DWORD(&reg->iobase_addr, 0x7400);
1674 dmp_reg = &reg->iobase_window;
1675 for (cnt = 0; cnt < 8; cnt++)
1676 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1677
1678 dmp_reg = &reg->iobase_q;
1679 for (cnt = 0; cnt < 7; cnt++)
1680 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1681
1682 /* Transmit DMA registers. */
1683 iter_reg = fw->xmt0_dma_reg;
1684 WRT_REG_DWORD(&reg->iobase_addr, 0x7600);
1685 dmp_reg = &reg->iobase_window;
1686 for (cnt = 0; cnt < 16; cnt++)
1687 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1688
1689 WRT_REG_DWORD(&reg->iobase_addr, 0x7610);
1690 dmp_reg = &reg->iobase_window;
1691 for (cnt = 0; cnt < 16; cnt++)
1692 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1693
1694 iter_reg = fw->xmt1_dma_reg;
1695 WRT_REG_DWORD(&reg->iobase_addr, 0x7620);
1696 dmp_reg = &reg->iobase_window;
1697 for (cnt = 0; cnt < 16; cnt++)
1698 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1699
1700 WRT_REG_DWORD(&reg->iobase_addr, 0x7630);
1701 dmp_reg = &reg->iobase_window;
1702 for (cnt = 0; cnt < 16; cnt++)
1703 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1704
1705 iter_reg = fw->xmt2_dma_reg;
1706 WRT_REG_DWORD(&reg->iobase_addr, 0x7640);
1707 dmp_reg = &reg->iobase_window;
1708 for (cnt = 0; cnt < 16; cnt++)
1709 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1710
1711 WRT_REG_DWORD(&reg->iobase_addr, 0x7650);
1712 dmp_reg = &reg->iobase_window;
1713 for (cnt = 0; cnt < 16; cnt++)
1714 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1715
1716 iter_reg = fw->xmt3_dma_reg;
1717 WRT_REG_DWORD(&reg->iobase_addr, 0x7660);
1718 dmp_reg = &reg->iobase_window;
1719 for (cnt = 0; cnt < 16; cnt++)
1720 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1721
1722 WRT_REG_DWORD(&reg->iobase_addr, 0x7670);
1723 dmp_reg = &reg->iobase_window;
1724 for (cnt = 0; cnt < 16; cnt++)
1725 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1726
1727 iter_reg = fw->xmt4_dma_reg;
1728 WRT_REG_DWORD(&reg->iobase_addr, 0x7680);
1729 dmp_reg = &reg->iobase_window;
1730 for (cnt = 0; cnt < 16; cnt++)
1731 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1732
1733 WRT_REG_DWORD(&reg->iobase_addr, 0x7690);
1734 dmp_reg = &reg->iobase_window;
1735 for (cnt = 0; cnt < 16; cnt++)
1736 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1737
1738 WRT_REG_DWORD(&reg->iobase_addr, 0x76A0);
1739 dmp_reg = &reg->iobase_window;
1740 for (cnt = 0; cnt < sizeof(fw->xmt_data_dma_reg) / 4; cnt++)
1741 fw->xmt_data_dma_reg[cnt] =
1742 htonl(RD_REG_DWORD(dmp_reg++));
1743
1744 /* Receive DMA registers. */
1745 iter_reg = fw->rcvt0_data_dma_reg;
1746 WRT_REG_DWORD(&reg->iobase_addr, 0x7700);
1747 dmp_reg = &reg->iobase_window;
1748 for (cnt = 0; cnt < 16; cnt++)
1749 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1750
1751 WRT_REG_DWORD(&reg->iobase_addr, 0x7710);
1752 dmp_reg = &reg->iobase_window;
1753 for (cnt = 0; cnt < 16; cnt++)
1754 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1755
1756 iter_reg = fw->rcvt1_data_dma_reg;
1757 WRT_REG_DWORD(&reg->iobase_addr, 0x7720);
1758 dmp_reg = &reg->iobase_window;
1759 for (cnt = 0; cnt < 16; cnt++)
1760 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1761
1762 WRT_REG_DWORD(&reg->iobase_addr, 0x7730);
1763 dmp_reg = &reg->iobase_window;
1764 for (cnt = 0; cnt < 16; cnt++)
1765 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1766
1767 /* RISC registers. */
1768 iter_reg = fw->risc_gp_reg;
1769 WRT_REG_DWORD(&reg->iobase_addr, 0x0F00);
1770 dmp_reg = &reg->iobase_window;
1771 for (cnt = 0; cnt < 16; cnt++)
1772 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1773
1774 WRT_REG_DWORD(&reg->iobase_addr, 0x0F10);
1775 dmp_reg = &reg->iobase_window;
1776 for (cnt = 0; cnt < 16; cnt++)
1777 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1778
1779 WRT_REG_DWORD(&reg->iobase_addr, 0x0F20);
1780 dmp_reg = &reg->iobase_window;
1781 for (cnt = 0; cnt < 16; cnt++)
1782 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1783
1784 WRT_REG_DWORD(&reg->iobase_addr, 0x0F30);
1785 dmp_reg = &reg->iobase_window;
1786 for (cnt = 0; cnt < 16; cnt++)
1787 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1788
1789 WRT_REG_DWORD(&reg->iobase_addr, 0x0F40);
1790 dmp_reg = &reg->iobase_window;
1791 for (cnt = 0; cnt < 16; cnt++)
1792 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1793
1794 WRT_REG_DWORD(&reg->iobase_addr, 0x0F50);
1795 dmp_reg = &reg->iobase_window;
1796 for (cnt = 0; cnt < 16; cnt++)
1797 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1798
1799 WRT_REG_DWORD(&reg->iobase_addr, 0x0F60);
1800 dmp_reg = &reg->iobase_window;
1801 for (cnt = 0; cnt < 16; cnt++)
1802 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1803
1804 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1805 dmp_reg = &reg->iobase_window;
1806 for (cnt = 0; cnt < 16; cnt++)
1807 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1808
1809 /* Local memory controller registers. */
1810 iter_reg = fw->lmc_reg;
1811 WRT_REG_DWORD(&reg->iobase_addr, 0x3000);
1812 dmp_reg = &reg->iobase_window;
1813 for (cnt = 0; cnt < 16; cnt++)
1814 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1815
1816 WRT_REG_DWORD(&reg->iobase_addr, 0x3010);
1817 dmp_reg = &reg->iobase_window;
1818 for (cnt = 0; cnt < 16; cnt++)
1819 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1820
1821 WRT_REG_DWORD(&reg->iobase_addr, 0x3020);
1822 dmp_reg = &reg->iobase_window;
1823 for (cnt = 0; cnt < 16; cnt++)
1824 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1825
1826 WRT_REG_DWORD(&reg->iobase_addr, 0x3030);
1827 dmp_reg = &reg->iobase_window;
1828 for (cnt = 0; cnt < 16; cnt++)
1829 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1830
1831 WRT_REG_DWORD(&reg->iobase_addr, 0x3040);
1832 dmp_reg = &reg->iobase_window;
1833 for (cnt = 0; cnt < 16; cnt++)
1834 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1835
1836 WRT_REG_DWORD(&reg->iobase_addr, 0x3050);
1837 dmp_reg = &reg->iobase_window;
1838 for (cnt = 0; cnt < 16; cnt++)
1839 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1840
1841 WRT_REG_DWORD(&reg->iobase_addr, 0x3060);
1842 dmp_reg = &reg->iobase_window;
1843 for (cnt = 0; cnt < 16; cnt++)
1844 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1845
1846 WRT_REG_DWORD(&reg->iobase_addr, 0x3070);
1847 dmp_reg = &reg->iobase_window;
1848 for (cnt = 0; cnt < 16; cnt++)
1849 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1850
1851 /* Fibre Protocol Module registers. */
1852 iter_reg = fw->fpm_hdw_reg;
1853 WRT_REG_DWORD(&reg->iobase_addr, 0x4000);
1854 dmp_reg = &reg->iobase_window;
1855 for (cnt = 0; cnt < 16; cnt++)
1856 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1857
1858 WRT_REG_DWORD(&reg->iobase_addr, 0x4010);
1859 dmp_reg = &reg->iobase_window;
1860 for (cnt = 0; cnt < 16; cnt++)
1861 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1862
1863 WRT_REG_DWORD(&reg->iobase_addr, 0x4020);
1864 dmp_reg = &reg->iobase_window;
1865 for (cnt = 0; cnt < 16; cnt++)
1866 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1867
1868 WRT_REG_DWORD(&reg->iobase_addr, 0x4030);
1869 dmp_reg = &reg->iobase_window;
1870 for (cnt = 0; cnt < 16; cnt++)
1871 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1872
1873 WRT_REG_DWORD(&reg->iobase_addr, 0x4040);
1874 dmp_reg = &reg->iobase_window;
1875 for (cnt = 0; cnt < 16; cnt++)
1876 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1877
1878 WRT_REG_DWORD(&reg->iobase_addr, 0x4050);
1879 dmp_reg = &reg->iobase_window;
1880 for (cnt = 0; cnt < 16; cnt++)
1881 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1882
1883 WRT_REG_DWORD(&reg->iobase_addr, 0x4060);
1884 dmp_reg = &reg->iobase_window;
1885 for (cnt = 0; cnt < 16; cnt++)
1886 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1887
1888 WRT_REG_DWORD(&reg->iobase_addr, 0x4070);
1889 dmp_reg = &reg->iobase_window;
1890 for (cnt = 0; cnt < 16; cnt++)
1891 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1892
1893 WRT_REG_DWORD(&reg->iobase_addr, 0x4080);
1894 dmp_reg = &reg->iobase_window;
1895 for (cnt = 0; cnt < 16; cnt++)
1896 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1897
1898 WRT_REG_DWORD(&reg->iobase_addr, 0x4090);
1899 dmp_reg = &reg->iobase_window;
1900 for (cnt = 0; cnt < 16; cnt++)
1901 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1902
1903 WRT_REG_DWORD(&reg->iobase_addr, 0x40A0);
1904 dmp_reg = &reg->iobase_window;
1905 for (cnt = 0; cnt < 16; cnt++)
1906 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1907
1908 WRT_REG_DWORD(&reg->iobase_addr, 0x40B0);
1909 dmp_reg = &reg->iobase_window;
1910 for (cnt = 0; cnt < 16; cnt++)
1911 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1912
1913 /* Frame Buffer registers. */
1914 iter_reg = fw->fb_hdw_reg;
1915 WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
1916 dmp_reg = &reg->iobase_window;
1917 for (cnt = 0; cnt < 16; cnt++)
1918 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1919
1920 WRT_REG_DWORD(&reg->iobase_addr, 0x6010);
1921 dmp_reg = &reg->iobase_window;
1922 for (cnt = 0; cnt < 16; cnt++)
1923 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1924
1925 WRT_REG_DWORD(&reg->iobase_addr, 0x6020);
1926 dmp_reg = &reg->iobase_window;
1927 for (cnt = 0; cnt < 16; cnt++)
1928 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1929
1930 WRT_REG_DWORD(&reg->iobase_addr, 0x6030);
1931 dmp_reg = &reg->iobase_window;
1932 for (cnt = 0; cnt < 16; cnt++)
1933 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1934
1935 WRT_REG_DWORD(&reg->iobase_addr, 0x6040);
1936 dmp_reg = &reg->iobase_window;
1937 for (cnt = 0; cnt < 16; cnt++)
1938 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1939
1940 WRT_REG_DWORD(&reg->iobase_addr, 0x6100);
1941 dmp_reg = &reg->iobase_window;
1942 for (cnt = 0; cnt < 16; cnt++)
1943 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1944
1945 WRT_REG_DWORD(&reg->iobase_addr, 0x6130);
1946 dmp_reg = &reg->iobase_window;
1947 for (cnt = 0; cnt < 16; cnt++)
1948 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1949
1950 WRT_REG_DWORD(&reg->iobase_addr, 0x6150);
1951 dmp_reg = &reg->iobase_window;
1952 for (cnt = 0; cnt < 16; cnt++)
1953 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1954
1955 WRT_REG_DWORD(&reg->iobase_addr, 0x6170);
1956 dmp_reg = &reg->iobase_window;
1957 for (cnt = 0; cnt < 16; cnt++)
1958 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1959
1960 WRT_REG_DWORD(&reg->iobase_addr, 0x6190);
1961 dmp_reg = &reg->iobase_window;
1962 for (cnt = 0; cnt < 16; cnt++)
1963 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1964
1965 WRT_REG_DWORD(&reg->iobase_addr, 0x61B0);
1966 dmp_reg = &reg->iobase_window;
1967 for (cnt = 0; cnt < 16; cnt++)
1968 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1969
1970 WRT_REG_DWORD(&reg->iobase_addr, 0x6F00);
1971 dmp_reg = &reg->iobase_window;
1972 for (cnt = 0; cnt < 16; cnt++)
1973 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1974
1975 /* Reset RISC. */
1976 WRT_REG_DWORD(&reg->ctrl_status,
1977 CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
1978 for (cnt = 0; cnt < 30000; cnt++) {
1979 if ((RD_REG_DWORD(&reg->ctrl_status) &
1980 CSRX_DMA_ACTIVE) == 0)
1981 break;
1982
1983 udelay(10);
1984 }
1985
1986 WRT_REG_DWORD(&reg->ctrl_status,
1987 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
1988 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
1989
1990 udelay(100);
1991 /* Wait for firmware to complete NVRAM accesses. */
1992 mb0 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
1993 for (cnt = 10000 ; cnt && mb0; cnt--) {
1306 udelay(5); 1994 udelay(5);
1995 mb0 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
1996 barrier();
1307 } 1997 }
1308 1998
1309 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 1999 /* Wait for soft-reset to complete. */
1310 rval = mb[0] & MBS_MASK; 2000 for (cnt = 0; cnt < 30000; cnt++) {
1311 fw->ext_mem[cnt] = htonl((mb[3] << 16) | mb[2]); 2001 if ((RD_REG_DWORD(&reg->ctrl_status) &
1312 } else { 2002 CSRX_ISP_SOFT_RESET) == 0)
1313 rval = QLA_FUNCTION_FAILED; 2003 break;
2004
2005 udelay(10);
1314 } 2006 }
2007 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
2008 RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
1315 } 2009 }
1316 2010
2011 for (cnt = 30000; RD_REG_WORD(&reg->mailbox0) != 0 &&
2012 rval == QLA_SUCCESS; cnt--) {
2013 if (cnt)
2014 udelay(100);
2015 else
2016 rval = QLA_FUNCTION_TIMEOUT;
2017 }
2018
2019 if (rval == QLA_SUCCESS)
2020 rval = qla2xxx_dump_memory(ha, fw->code_ram,
2021 sizeof(fw->code_ram), fw->ext_mem, &nxt);
2022
1317 if (rval == QLA_SUCCESS) { 2023 if (rval == QLA_SUCCESS) {
1318 eft = qla2xxx_copy_queues(ha, &fw->ext_mem[cnt]); 2024 nxt = qla2xxx_copy_queues(ha, nxt);
1319 if (ha->eft) 2025 if (ha->eft)
1320 memcpy(eft, ha->eft, ntohl(ha->fw_dump->eft_size)); 2026 memcpy(nxt, ha->eft, ntohl(ha->fw_dump->eft_size));
1321 } 2027 }
1322 2028
1323 if (rval != QLA_SUCCESS) { 2029 if (rval != QLA_SUCCESS) {
@@ -1332,7 +2038,7 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
1332 ha->fw_dumped = 1; 2038 ha->fw_dumped = 1;
1333 } 2039 }
1334 2040
1335qla24xx_fw_dump_failed: 2041qla25xx_fw_dump_failed:
1336 if (!hardware_locked) 2042 if (!hardware_locked)
1337 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2043 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1338} 2044}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 49dffeb78512..cca4b0d8253e 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -213,6 +213,43 @@ struct qla24xx_fw_dump {
213 uint32_t ext_mem[1]; 213 uint32_t ext_mem[1];
214}; 214};
215 215
216struct qla25xx_fw_dump {
217 uint32_t host_status;
218 uint32_t host_reg[32];
219 uint32_t shadow_reg[11];
220 uint32_t risc_io_reg;
221 uint16_t mailbox_reg[32];
222 uint32_t xseq_gp_reg[128];
223 uint32_t xseq_0_reg[48];
224 uint32_t xseq_1_reg[16];
225 uint32_t rseq_gp_reg[128];
226 uint32_t rseq_0_reg[32];
227 uint32_t rseq_1_reg[16];
228 uint32_t rseq_2_reg[16];
229 uint32_t aseq_gp_reg[128];
230 uint32_t aseq_0_reg[32];
231 uint32_t aseq_1_reg[16];
232 uint32_t aseq_2_reg[16];
233 uint32_t cmd_dma_reg[16];
234 uint32_t req0_dma_reg[15];
235 uint32_t resp0_dma_reg[15];
236 uint32_t req1_dma_reg[15];
237 uint32_t xmt0_dma_reg[32];
238 uint32_t xmt1_dma_reg[32];
239 uint32_t xmt2_dma_reg[32];
240 uint32_t xmt3_dma_reg[32];
241 uint32_t xmt4_dma_reg[32];
242 uint32_t xmt_data_dma_reg[16];
243 uint32_t rcvt0_data_dma_reg[32];
244 uint32_t rcvt1_data_dma_reg[32];
245 uint32_t risc_gp_reg[128];
246 uint32_t lmc_reg[128];
247 uint32_t fpm_hdw_reg[192];
248 uint32_t fb_hdw_reg[192];
249 uint32_t code_ram[0x2000];
250 uint32_t ext_mem[1];
251};
252
216#define EFT_NUM_BUFFERS 4 253#define EFT_NUM_BUFFERS 4
217#define EFT_BYTES_PER_BUFFER 0x4000 254#define EFT_BYTES_PER_BUFFER 0x4000
218#define EFT_SIZE ((EFT_BYTES_PER_BUFFER) * (EFT_NUM_BUFFERS)) 255#define EFT_SIZE ((EFT_BYTES_PER_BUFFER) * (EFT_NUM_BUFFERS))
@@ -246,5 +283,6 @@ struct qla2xxx_fw_dump {
246 struct qla2100_fw_dump isp21; 283 struct qla2100_fw_dump isp21;
247 struct qla2300_fw_dump isp23; 284 struct qla2300_fw_dump isp23;
248 struct qla24xx_fw_dump isp24; 285 struct qla24xx_fw_dump isp24;
286 struct qla25xx_fw_dump isp25;
249 } isp; 287 } isp;
250}; 288};
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index a1ca590ba447..0c9f36c8a248 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1711,6 +1711,14 @@ struct ct_fdmi_hba_attributes {
1711#define FDMI_PORT_OS_DEVICE_NAME 5 1711#define FDMI_PORT_OS_DEVICE_NAME 5
1712#define FDMI_PORT_HOST_NAME 6 1712#define FDMI_PORT_HOST_NAME 6
1713 1713
1714#define FDMI_PORT_SPEED_1GB 0x1
1715#define FDMI_PORT_SPEED_2GB 0x2
1716#define FDMI_PORT_SPEED_10GB 0x4
1717#define FDMI_PORT_SPEED_4GB 0x8
1718#define FDMI_PORT_SPEED_8GB 0x10
1719#define FDMI_PORT_SPEED_16GB 0x20
1720#define FDMI_PORT_SPEED_UNKNOWN 0x8000
1721
1714struct ct_fdmi_port_attr { 1722struct ct_fdmi_port_attr {
1715 uint16_t type; 1723 uint16_t type;
1716 uint16_t len; 1724 uint16_t len;
@@ -2201,6 +2209,7 @@ typedef struct scsi_qla_host {
2201#define SWITCH_FOUND BIT_3 2209#define SWITCH_FOUND BIT_3
2202#define DFLG_NO_CABLE BIT_4 2210#define DFLG_NO_CABLE BIT_4
2203 2211
2212#define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532
2204 uint32_t device_type; 2213 uint32_t device_type;
2205#define DT_ISP2100 BIT_0 2214#define DT_ISP2100 BIT_0
2206#define DT_ISP2200 BIT_1 2215#define DT_ISP2200 BIT_1
@@ -2213,8 +2222,11 @@ typedef struct scsi_qla_host {
2213#define DT_ISP2432 BIT_8 2222#define DT_ISP2432 BIT_8
2214#define DT_ISP5422 BIT_9 2223#define DT_ISP5422 BIT_9
2215#define DT_ISP5432 BIT_10 2224#define DT_ISP5432 BIT_10
2216#define DT_ISP_LAST (DT_ISP5432 << 1) 2225#define DT_ISP2532 BIT_11
2226#define DT_ISP_LAST (DT_ISP2532 << 1)
2217 2227
2228#define DT_IIDMA BIT_26
2229#define DT_FWI2 BIT_27
2218#define DT_ZIO_SUPPORTED BIT_28 2230#define DT_ZIO_SUPPORTED BIT_28
2219#define DT_OEM_001 BIT_29 2231#define DT_OEM_001 BIT_29
2220#define DT_ISP2200A BIT_30 2232#define DT_ISP2200A BIT_30
@@ -2232,12 +2244,16 @@ typedef struct scsi_qla_host {
2232#define IS_QLA2432(ha) (DT_MASK(ha) & DT_ISP2432) 2244#define IS_QLA2432(ha) (DT_MASK(ha) & DT_ISP2432)
2233#define IS_QLA5422(ha) (DT_MASK(ha) & DT_ISP5422) 2245#define IS_QLA5422(ha) (DT_MASK(ha) & DT_ISP5422)
2234#define IS_QLA5432(ha) (DT_MASK(ha) & DT_ISP5432) 2246#define IS_QLA5432(ha) (DT_MASK(ha) & DT_ISP5432)
2247#define IS_QLA2532(ha) (DT_MASK(ha) & DT_ISP2532)
2235 2248
2236#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \ 2249#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
2237 IS_QLA6312(ha) || IS_QLA6322(ha)) 2250 IS_QLA6312(ha) || IS_QLA6322(ha))
2238#define IS_QLA24XX(ha) (IS_QLA2422(ha) || IS_QLA2432(ha)) 2251#define IS_QLA24XX(ha) (IS_QLA2422(ha) || IS_QLA2432(ha))
2239#define IS_QLA54XX(ha) (IS_QLA5422(ha) || IS_QLA5432(ha)) 2252#define IS_QLA54XX(ha) (IS_QLA5422(ha) || IS_QLA5432(ha))
2253#define IS_QLA25XX(ha) (IS_QLA2532(ha))
2240 2254
2255#define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA)
2256#define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2)
2241#define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED) 2257#define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED)
2242#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001) 2258#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001)
2243#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS) 2259#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS)
@@ -2274,7 +2290,7 @@ typedef struct scsi_qla_host {
2274 uint16_t rsp_ring_index; /* Current index. */ 2290 uint16_t rsp_ring_index; /* Current index. */
2275 uint16_t response_q_length; 2291 uint16_t response_q_length;
2276 2292
2277 struct isp_operations isp_ops; 2293 struct isp_operations *isp_ops;
2278 2294
2279 /* Outstandings ISP commands. */ 2295 /* Outstandings ISP commands. */
2280 srb_t *outstanding_cmds[MAX_OUTSTANDING_COMMANDS]; 2296 srb_t *outstanding_cmds[MAX_OUTSTANDING_COMMANDS];
@@ -2298,6 +2314,7 @@ typedef struct scsi_qla_host {
2298#define PORT_SPEED_1GB 0x00 2314#define PORT_SPEED_1GB 0x00
2299#define PORT_SPEED_2GB 0x01 2315#define PORT_SPEED_2GB 0x01
2300#define PORT_SPEED_4GB 0x03 2316#define PORT_SPEED_4GB 0x03
2317#define PORT_SPEED_8GB 0x04
2301 uint16_t link_data_rate; /* F/W operating speed */ 2318 uint16_t link_data_rate; /* F/W operating speed */
2302 2319
2303 uint8_t current_topology; 2320 uint8_t current_topology;
@@ -2564,6 +2581,7 @@ typedef struct scsi_qla_host {
2564#define OPTROM_SIZE_2300 0x20000 2581#define OPTROM_SIZE_2300 0x20000
2565#define OPTROM_SIZE_2322 0x100000 2582#define OPTROM_SIZE_2322 0x100000
2566#define OPTROM_SIZE_24XX 0x100000 2583#define OPTROM_SIZE_24XX 0x100000
2584#define OPTROM_SIZE_25XX 0x200000
2567 2585
2568#include "qla_gbl.h" 2586#include "qla_gbl.h"
2569#include "qla_dbg.h" 2587#include "qla_dbg.h"
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 63a11fef5d1b..99fe49618d61 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -8,14 +8,17 @@
8#define __QLA_FW_H 8#define __QLA_FW_H
9 9
10#define MBS_CHECKSUM_ERROR 0x4010 10#define MBS_CHECKSUM_ERROR 0x4010
11#define MBS_INVALID_PRODUCT_KEY 0x4020
11 12
12/* 13/*
13 * Firmware Options. 14 * Firmware Options.
14 */ 15 */
15#define FO1_ENABLE_PUREX BIT_10 16#define FO1_ENABLE_PUREX BIT_10
16#define FO1_DISABLE_LED_CTRL BIT_6 17#define FO1_DISABLE_LED_CTRL BIT_6
18#define FO1_ENABLE_8016 BIT_0
17#define FO2_ENABLE_SEL_CLASS2 BIT_5 19#define FO2_ENABLE_SEL_CLASS2 BIT_5
18#define FO3_NO_ABTS_ON_LINKDOWN BIT_14 20#define FO3_NO_ABTS_ON_LINKDOWN BIT_14
21#define FO3_HOLD_STS_IOCB BIT_12
19 22
20/* 23/*
21 * Port Database structure definition for ISP 24xx. 24 * Port Database structure definition for ISP 24xx.
@@ -341,7 +344,9 @@ struct init_cb_24xx {
341 * BIT 10 = Reserved 344 * BIT 10 = Reserved
342 * BIT 11 = Enable FC-SP Security 345 * BIT 11 = Enable FC-SP Security
343 * BIT 12 = FC Tape Enable 346 * BIT 12 = FC Tape Enable
344 * BIT 13-31 = Reserved 347 * BIT 13 = Reserved
348 * BIT 14 = Enable Target PRLI Control
349 * BIT 15-31 = Reserved
345 */ 350 */
346 uint32_t firmware_options_2; 351 uint32_t firmware_options_2;
347 352
@@ -363,7 +368,8 @@ struct init_cb_24xx {
363 * BIT 13 = Data Rate bit 0 368 * BIT 13 = Data Rate bit 0
364 * BIT 14 = Data Rate bit 1 369 * BIT 14 = Data Rate bit 1
365 * BIT 15 = Data Rate bit 2 370 * BIT 15 = Data Rate bit 2
366 * BIT 16-31 = Reserved 371 * BIT 16 = Enable 75 ohm Termination Select
372 * BIT 17-31 = Reserved
367 */ 373 */
368 uint32_t firmware_options_3; 374 uint32_t firmware_options_3;
369 375
@@ -435,6 +441,7 @@ struct cmd_type_7 {
435#define TMF_LUN_RESET BIT_12 441#define TMF_LUN_RESET BIT_12
436#define TMF_CLEAR_TASK_SET BIT_10 442#define TMF_CLEAR_TASK_SET BIT_10
437#define TMF_ABORT_TASK_SET BIT_9 443#define TMF_ABORT_TASK_SET BIT_9
444#define TMF_DSD_LIST_ENABLE BIT_2
438#define TMF_READ_DATA BIT_1 445#define TMF_READ_DATA BIT_1
439#define TMF_WRITE_DATA BIT_0 446#define TMF_WRITE_DATA BIT_0
440 447
@@ -589,7 +596,7 @@ struct els_entry_24xx {
589#define EST_SOFI3 (1 << 4) 596#define EST_SOFI3 (1 << 4)
590#define EST_SOFI2 (3 << 4) 597#define EST_SOFI2 (3 << 4)
591 598
592 uint32_t rx_xchg_address[2]; /* Receive exchange address. */ 599 uint32_t rx_xchg_address; /* Receive exchange address. */
593 uint16_t rx_dsd_count; 600 uint16_t rx_dsd_count;
594 601
595 uint8_t opcode; 602 uint8_t opcode;
@@ -650,6 +657,7 @@ struct logio_entry_24xx {
650 657
651 uint16_t control_flags; /* Control flags. */ 658 uint16_t control_flags; /* Control flags. */
652 /* Modifiers. */ 659 /* Modifiers. */
660#define LCF_INCLUDE_SNS BIT_10 /* Include SNS (FFFFFC) during LOGO. */
653#define LCF_FCP2_OVERRIDE BIT_9 /* Set/Reset word 3 of PRLI. */ 661#define LCF_FCP2_OVERRIDE BIT_9 /* Set/Reset word 3 of PRLI. */
654#define LCF_CLASS_2 BIT_8 /* Enable class 2 during PLOGI. */ 662#define LCF_CLASS_2 BIT_8 /* Enable class 2 during PLOGI. */
655#define LCF_FREE_NPORT BIT_7 /* Release NPORT handle after LOGO. */ 663#define LCF_FREE_NPORT BIT_7 /* Release NPORT handle after LOGO. */
@@ -779,6 +787,15 @@ struct device_reg_24xx {
779#define FA_RISC_CODE_ADDR 0x20000 787#define FA_RISC_CODE_ADDR 0x20000
780#define FA_RISC_CODE_SEGMENTS 2 788#define FA_RISC_CODE_SEGMENTS 2
781 789
790#define FA_FW_AREA_ADDR 0x40000
791#define FA_VPD_NVRAM_ADDR 0x48000
792#define FA_FEATURE_ADDR 0x4C000
793#define FA_FLASH_DESCR_ADDR 0x50000
794#define FA_HW_EVENT_ADDR 0x54000
795#define FA_BOOT_LOG_ADDR 0x58000
796#define FA_FW_DUMP0_ADDR 0x60000
797#define FA_FW_DUMP1_ADDR 0x70000
798
782 uint32_t flash_data; /* Flash/NVRAM BIOS data. */ 799 uint32_t flash_data; /* Flash/NVRAM BIOS data. */
783 800
784 uint32_t ctrl_status; /* Control/Status. */ 801 uint32_t ctrl_status; /* Control/Status. */
@@ -859,10 +876,13 @@ struct device_reg_24xx {
859#define HCCRX_CLR_RISC_INT 0xA0000000 876#define HCCRX_CLR_RISC_INT 0xA0000000
860 877
861 uint32_t gpiod; /* GPIO Data register. */ 878 uint32_t gpiod; /* GPIO Data register. */
879
862 /* LED update mask. */ 880 /* LED update mask. */
863#define GPDX_LED_UPDATE_MASK (BIT_20|BIT_19|BIT_18) 881#define GPDX_LED_UPDATE_MASK (BIT_20|BIT_19|BIT_18)
864 /* Data update mask. */ 882 /* Data update mask. */
865#define GPDX_DATA_UPDATE_MASK (BIT_17|BIT_16) 883#define GPDX_DATA_UPDATE_MASK (BIT_17|BIT_16)
884 /* Data update mask. */
885#define GPDX_DATA_UPDATE_2_MASK (BIT_28|BIT_27|BIT_26|BIT_17|BIT_16)
866 /* LED control mask. */ 886 /* LED control mask. */
867#define GPDX_LED_COLOR_MASK (BIT_4|BIT_3|BIT_2) 887#define GPDX_LED_COLOR_MASK (BIT_4|BIT_3|BIT_2)
868 /* LED bit values. Color names as 888 /* LED bit values. Color names as
@@ -877,6 +897,8 @@ struct device_reg_24xx {
877 uint32_t gpioe; /* GPIO Enable register. */ 897 uint32_t gpioe; /* GPIO Enable register. */
878 /* Enable update mask. */ 898 /* Enable update mask. */
879#define GPEX_ENABLE_UPDATE_MASK (BIT_17|BIT_16) 899#define GPEX_ENABLE_UPDATE_MASK (BIT_17|BIT_16)
900 /* Enable update mask. */
901#define GPEX_ENABLE_UPDATE_2_MASK (BIT_28|BIT_27|BIT_26|BIT_17|BIT_16)
880 /* Enable. */ 902 /* Enable. */
881#define GPEX_ENABLE (BIT_1|BIT_0) 903#define GPEX_ENABLE (BIT_1|BIT_0)
882 904
@@ -916,6 +938,14 @@ struct device_reg_24xx {
916 uint16_t mailbox29; 938 uint16_t mailbox29;
917 uint16_t mailbox30; 939 uint16_t mailbox30;
918 uint16_t mailbox31; 940 uint16_t mailbox31;
941
942 uint32_t iobase_window;
943 uint32_t unused_4[8]; /* Gap. */
944 uint32_t iobase_q;
945 uint32_t unused_5[2]; /* Gap. */
946 uint32_t iobase_select;
947 uint32_t unused_6[2]; /* Gap. */
948 uint32_t iobase_sdata;
919}; 949};
920 950
921/* MID Support ***************************************************************/ 951/* MID Support ***************************************************************/
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index b44eff2803ce..aa1e41152283 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -17,6 +17,7 @@ extern int qla2x00_initialize_adapter(scsi_qla_host_t *);
17extern int qla2100_pci_config(struct scsi_qla_host *); 17extern int qla2100_pci_config(struct scsi_qla_host *);
18extern int qla2300_pci_config(struct scsi_qla_host *); 18extern int qla2300_pci_config(struct scsi_qla_host *);
19extern int qla24xx_pci_config(scsi_qla_host_t *); 19extern int qla24xx_pci_config(scsi_qla_host_t *);
20extern int qla25xx_pci_config(scsi_qla_host_t *);
20extern void qla2x00_reset_chip(struct scsi_qla_host *); 21extern void qla2x00_reset_chip(struct scsi_qla_host *);
21extern void qla24xx_reset_chip(struct scsi_qla_host *); 22extern void qla24xx_reset_chip(struct scsi_qla_host *);
22extern int qla2x00_chip_diag(struct scsi_qla_host *); 23extern int qla2x00_chip_diag(struct scsi_qla_host *);
@@ -281,6 +282,10 @@ extern int qla2x00_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
281 uint32_t); 282 uint32_t);
282extern int qla24xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, 283extern int qla24xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
283 uint32_t); 284 uint32_t);
285extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
286 uint32_t);
287extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
288 uint32_t);
284 289
285extern int qla2x00_beacon_on(struct scsi_qla_host *); 290extern int qla2x00_beacon_on(struct scsi_qla_host *);
286extern int qla2x00_beacon_off(struct scsi_qla_host *); 291extern int qla2x00_beacon_off(struct scsi_qla_host *);
@@ -307,6 +312,7 @@ extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *);
307extern void qla2100_fw_dump(scsi_qla_host_t *, int); 312extern void qla2100_fw_dump(scsi_qla_host_t *, int);
308extern void qla2300_fw_dump(scsi_qla_host_t *, int); 313extern void qla2300_fw_dump(scsi_qla_host_t *, int);
309extern void qla24xx_fw_dump(scsi_qla_host_t *, int); 314extern void qla24xx_fw_dump(scsi_qla_host_t *, int);
315extern void qla25xx_fw_dump(scsi_qla_host_t *, int);
310extern void qla2x00_dump_regs(scsi_qla_host_t *); 316extern void qla2x00_dump_regs(scsi_qla_host_t *);
311extern void qla2x00_dump_buffer(uint8_t *, uint32_t); 317extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
312extern void qla2x00_print_scsi_cmd(struct scsi_cmnd *); 318extern void qla2x00_print_scsi_cmd(struct scsi_cmnd *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index a086b3f0df65..b06cbb8580d3 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -127,7 +127,7 @@ qla2x00_chk_ms_status(scsi_qla_host_t *ha, ms_iocb_entry_t *ms_pkt,
127 DEBUG2_3(printk("scsi(%ld): %s failed, error status (%x).\n", 127 DEBUG2_3(printk("scsi(%ld): %s failed, error status (%x).\n",
128 ha->host_no, routine, ms_pkt->entry_status)); 128 ha->host_no, routine, ms_pkt->entry_status));
129 } else { 129 } else {
130 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) 130 if (IS_FWI2_CAPABLE(ha))
131 comp_status = le16_to_cpu( 131 comp_status = le16_to_cpu(
132 ((struct ct_entry_24xx *)ms_pkt)->comp_status); 132 ((struct ct_entry_24xx *)ms_pkt)->comp_status);
133 else 133 else
@@ -180,7 +180,8 @@ qla2x00_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
180 180
181 /* Issue GA_NXT */ 181 /* Issue GA_NXT */
182 /* Prepare common MS IOCB */ 182 /* Prepare common MS IOCB */
183 ms_pkt = ha->isp_ops.prep_ms_iocb(ha, GA_NXT_REQ_SIZE, GA_NXT_RSP_SIZE); 183 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GA_NXT_REQ_SIZE,
184 GA_NXT_RSP_SIZE);
184 185
185 /* Prepare CT request */ 186 /* Prepare CT request */
186 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GA_NXT_CMD, 187 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GA_NXT_CMD,
@@ -266,7 +267,8 @@ qla2x00_gid_pt(scsi_qla_host_t *ha, sw_info_t *list)
266 267
267 /* Issue GID_PT */ 268 /* Issue GID_PT */
268 /* Prepare common MS IOCB */ 269 /* Prepare common MS IOCB */
269 ms_pkt = ha->isp_ops.prep_ms_iocb(ha, GID_PT_REQ_SIZE, GID_PT_RSP_SIZE); 270 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GID_PT_REQ_SIZE,
271 GID_PT_RSP_SIZE);
270 272
271 /* Prepare CT request */ 273 /* Prepare CT request */
272 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GID_PT_CMD, 274 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GID_PT_CMD,
@@ -338,7 +340,7 @@ qla2x00_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
338 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 340 for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
339 /* Issue GPN_ID */ 341 /* Issue GPN_ID */
340 /* Prepare common MS IOCB */ 342 /* Prepare common MS IOCB */
341 ms_pkt = ha->isp_ops.prep_ms_iocb(ha, GPN_ID_REQ_SIZE, 343 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GPN_ID_REQ_SIZE,
342 GPN_ID_RSP_SIZE); 344 GPN_ID_RSP_SIZE);
343 345
344 /* Prepare CT request */ 346 /* Prepare CT request */
@@ -399,7 +401,7 @@ qla2x00_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
399 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 401 for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
400 /* Issue GNN_ID */ 402 /* Issue GNN_ID */
401 /* Prepare common MS IOCB */ 403 /* Prepare common MS IOCB */
402 ms_pkt = ha->isp_ops.prep_ms_iocb(ha, GNN_ID_REQ_SIZE, 404 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GNN_ID_REQ_SIZE,
403 GNN_ID_RSP_SIZE); 405 GNN_ID_RSP_SIZE);
404 406
405 /* Prepare CT request */ 407 /* Prepare CT request */
@@ -473,7 +475,8 @@ qla2x00_rft_id(scsi_qla_host_t *ha)
473 475
474 /* Issue RFT_ID */ 476 /* Issue RFT_ID */
475 /* Prepare common MS IOCB */ 477 /* Prepare common MS IOCB */
476 ms_pkt = ha->isp_ops.prep_ms_iocb(ha, RFT_ID_REQ_SIZE, RFT_ID_RSP_SIZE); 478 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, RFT_ID_REQ_SIZE,
479 RFT_ID_RSP_SIZE);
477 480
478 /* Prepare CT request */ 481 /* Prepare CT request */
479 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RFT_ID_CMD, 482 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RFT_ID_CMD,
@@ -528,7 +531,8 @@ qla2x00_rff_id(scsi_qla_host_t *ha)
528 531
529 /* Issue RFF_ID */ 532 /* Issue RFF_ID */
530 /* Prepare common MS IOCB */ 533 /* Prepare common MS IOCB */
531 ms_pkt = ha->isp_ops.prep_ms_iocb(ha, RFF_ID_REQ_SIZE, RFF_ID_RSP_SIZE); 534 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, RFF_ID_REQ_SIZE,
535 RFF_ID_RSP_SIZE);
532 536
533 /* Prepare CT request */ 537 /* Prepare CT request */
534 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RFF_ID_CMD, 538 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RFF_ID_CMD,
@@ -582,7 +586,8 @@ qla2x00_rnn_id(scsi_qla_host_t *ha)
582 586
583 /* Issue RNN_ID */ 587 /* Issue RNN_ID */
584 /* Prepare common MS IOCB */ 588 /* Prepare common MS IOCB */
585 ms_pkt = ha->isp_ops.prep_ms_iocb(ha, RNN_ID_REQ_SIZE, RNN_ID_RSP_SIZE); 589 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, RNN_ID_REQ_SIZE,
590 RNN_ID_RSP_SIZE);
586 591
587 /* Prepare CT request */ 592 /* Prepare CT request */
588 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RNN_ID_CMD, 593 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RNN_ID_CMD,
@@ -645,7 +650,7 @@ qla2x00_rsnn_nn(scsi_qla_host_t *ha)
645 /* Issue RSNN_NN */ 650 /* Issue RSNN_NN */
646 /* Prepare common MS IOCB */ 651 /* Prepare common MS IOCB */
647 /* Request size adjusted after CT preparation */ 652 /* Request size adjusted after CT preparation */
648 ms_pkt = ha->isp_ops.prep_ms_iocb(ha, 0, RSNN_NN_RSP_SIZE); 653 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, 0, RSNN_NN_RSP_SIZE);
649 654
650 /* Prepare CT request */ 655 /* Prepare CT request */
651 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RSNN_NN_CMD, 656 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RSNN_NN_CMD,
@@ -1102,7 +1107,7 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *ha)
1102 if (ha->flags.management_server_logged_in) 1107 if (ha->flags.management_server_logged_in)
1103 return ret; 1108 return ret;
1104 1109
1105 ha->isp_ops.fabric_login(ha, ha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa, 1110 ha->isp_ops->fabric_login(ha, ha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
1106 mb, BIT_1); 1111 mb, BIT_1);
1107 if (mb[0] != MBS_COMMAND_COMPLETE) { 1112 if (mb[0] != MBS_COMMAND_COMPLETE) {
1108 DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: " 1113 DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: "
@@ -1198,7 +1203,7 @@ qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size)
1198 ms_iocb_entry_t *ms_pkt = ha->ms_iocb; 1203 ms_iocb_entry_t *ms_pkt = ha->ms_iocb;
1199 struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; 1204 struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1200 1205
1201 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 1206 if (IS_FWI2_CAPABLE(ha)) {
1202 ct_pkt->cmd_byte_count = cpu_to_le32(req_size); 1207 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1203 ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count; 1208 ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
1204 } else { 1209 } else {
@@ -1253,7 +1258,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1253 /* Issue RHBA */ 1258 /* Issue RHBA */
1254 /* Prepare common MS IOCB */ 1259 /* Prepare common MS IOCB */
1255 /* Request size adjusted after CT preparation */ 1260 /* Request size adjusted after CT preparation */
1256 ms_pkt = ha->isp_ops.prep_ms_fdmi_iocb(ha, 0, RHBA_RSP_SIZE); 1261 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(ha, 0, RHBA_RSP_SIZE);
1257 1262
1258 /* Prepare CT request */ 1263 /* Prepare CT request */
1259 ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RHBA_CMD, 1264 ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RHBA_CMD,
@@ -1373,7 +1378,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1373 /* Firmware version */ 1378 /* Firmware version */
1374 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1379 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
1375 eiter->type = __constant_cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION); 1380 eiter->type = __constant_cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
1376 ha->isp_ops.fw_version_str(ha, eiter->a.fw_version); 1381 ha->isp_ops->fw_version_str(ha, eiter->a.fw_version);
1377 alen = strlen(eiter->a.fw_version); 1382 alen = strlen(eiter->a.fw_version);
1378 alen += (alen & 3) ? (4 - (alen & 3)) : 4; 1383 alen += (alen & 3) ? (4 - (alen & 3)) : 4;
1379 eiter->len = cpu_to_be16(4 + alen); 1384 eiter->len = cpu_to_be16(4 + alen);
@@ -1439,7 +1444,7 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *ha)
1439 1444
1440 /* Issue RPA */ 1445 /* Issue RPA */
1441 /* Prepare common MS IOCB */ 1446 /* Prepare common MS IOCB */
1442 ms_pkt = ha->isp_ops.prep_ms_fdmi_iocb(ha, DHBA_REQ_SIZE, 1447 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(ha, DHBA_REQ_SIZE,
1443 DHBA_RSP_SIZE); 1448 DHBA_RSP_SIZE);
1444 1449
1445 /* Prepare CT request */ 1450 /* Prepare CT request */
@@ -1497,7 +1502,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1497 /* Issue RPA */ 1502 /* Issue RPA */
1498 /* Prepare common MS IOCB */ 1503 /* Prepare common MS IOCB */
1499 /* Request size adjusted after CT preparation */ 1504 /* Request size adjusted after CT preparation */
1500 ms_pkt = ha->isp_ops.prep_ms_fdmi_iocb(ha, 0, RPA_RSP_SIZE); 1505 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(ha, 0, RPA_RSP_SIZE);
1501 1506
1502 /* Prepare CT request */ 1507 /* Prepare CT request */
1503 ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RPA_CMD, 1508 ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RPA_CMD,
@@ -1527,12 +1532,20 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1527 eiter = (struct ct_fdmi_port_attr *) (entries + size); 1532 eiter = (struct ct_fdmi_port_attr *) (entries + size);
1528 eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED); 1533 eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
1529 eiter->len = __constant_cpu_to_be16(4 + 4); 1534 eiter->len = __constant_cpu_to_be16(4 + 4);
1530 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) 1535 if (IS_QLA25XX(ha))
1531 eiter->a.sup_speed = __constant_cpu_to_be32(4); 1536 eiter->a.sup_speed = __constant_cpu_to_be32(
1537 FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB|
1538 FDMI_PORT_SPEED_4GB|FDMI_PORT_SPEED_8GB);
1539 else if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
1540 eiter->a.sup_speed = __constant_cpu_to_be32(
1541 FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB|
1542 FDMI_PORT_SPEED_4GB);
1532 else if (IS_QLA23XX(ha)) 1543 else if (IS_QLA23XX(ha))
1533 eiter->a.sup_speed = __constant_cpu_to_be32(2); 1544 eiter->a.sup_speed =__constant_cpu_to_be32(
1545 FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB);
1534 else 1546 else
1535 eiter->a.sup_speed = __constant_cpu_to_be32(1); 1547 eiter->a.sup_speed = __constant_cpu_to_be32(
1548 FDMI_PORT_SPEED_1GB);
1536 size += 4 + 4; 1549 size += 4 + 4;
1537 1550
1538 DEBUG13(printk("%s(%ld): SUPPORTED_SPEED=%x.\n", __func__, ha->host_no, 1551 DEBUG13(printk("%s(%ld): SUPPORTED_SPEED=%x.\n", __func__, ha->host_no,
@@ -1543,14 +1556,25 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1543 eiter->type = __constant_cpu_to_be16(FDMI_PORT_CURRENT_SPEED); 1556 eiter->type = __constant_cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
1544 eiter->len = __constant_cpu_to_be16(4 + 4); 1557 eiter->len = __constant_cpu_to_be16(4 + 4);
1545 switch (ha->link_data_rate) { 1558 switch (ha->link_data_rate) {
1546 case 0: 1559 case PORT_SPEED_1GB:
1547 eiter->a.cur_speed = __constant_cpu_to_be32(1); 1560 eiter->a.cur_speed =
1561 __constant_cpu_to_be32(FDMI_PORT_SPEED_1GB);
1562 break;
1563 case PORT_SPEED_2GB:
1564 eiter->a.cur_speed =
1565 __constant_cpu_to_be32(FDMI_PORT_SPEED_2GB);
1566 break;
1567 case PORT_SPEED_4GB:
1568 eiter->a.cur_speed =
1569 __constant_cpu_to_be32(FDMI_PORT_SPEED_4GB);
1548 break; 1570 break;
1549 case 1: 1571 case PORT_SPEED_8GB:
1550 eiter->a.cur_speed = __constant_cpu_to_be32(2); 1572 eiter->a.cur_speed =
1573 __constant_cpu_to_be32(FDMI_PORT_SPEED_8GB);
1551 break; 1574 break;
1552 case 3: 1575 default:
1553 eiter->a.cur_speed = __constant_cpu_to_be32(4); 1576 eiter->a.cur_speed =
1577 __constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
1554 break; 1578 break;
1555 } 1579 }
1556 size += 4 + 4; 1580 size += 4 + 4;
@@ -1562,7 +1586,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1562 eiter = (struct ct_fdmi_port_attr *) (entries + size); 1586 eiter = (struct ct_fdmi_port_attr *) (entries + size);
1563 eiter->type = __constant_cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE); 1587 eiter->type = __constant_cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
1564 eiter->len = __constant_cpu_to_be16(4 + 4); 1588 eiter->len = __constant_cpu_to_be16(4 + 4);
1565 max_frame_size = IS_QLA24XX(ha) || IS_QLA54XX(ha) ? 1589 max_frame_size = IS_FWI2_CAPABLE(ha) ?
1566 (uint32_t) icb24->frame_payload_size: 1590 (uint32_t) icb24->frame_payload_size:
1567 (uint32_t) ha->init_cb->frame_payload_size; 1591 (uint32_t) ha->init_cb->frame_payload_size;
1568 eiter->a.max_frame_size = cpu_to_be32(max_frame_size); 1592 eiter->a.max_frame_size = cpu_to_be32(max_frame_size);
@@ -1678,7 +1702,7 @@ qla2x00_gfpn_id(scsi_qla_host_t *ha, sw_info_t *list)
1678 struct ct_sns_req *ct_req; 1702 struct ct_sns_req *ct_req;
1679 struct ct_sns_rsp *ct_rsp; 1703 struct ct_sns_rsp *ct_rsp;
1680 1704
1681 if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha)) 1705 if (!IS_IIDMA_CAPABLE(ha))
1682 return QLA_FUNCTION_FAILED; 1706 return QLA_FUNCTION_FAILED;
1683 1707
1684 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 1708 for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
@@ -1686,7 +1710,7 @@ qla2x00_gfpn_id(scsi_qla_host_t *ha, sw_info_t *list)
1686 memset(list[i].fabric_port_name, 0, WWN_SIZE); 1710 memset(list[i].fabric_port_name, 0, WWN_SIZE);
1687 1711
1688 /* Prepare common MS IOCB */ 1712 /* Prepare common MS IOCB */
1689 ms_pkt = ha->isp_ops.prep_ms_iocb(ha, GFPN_ID_REQ_SIZE, 1713 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GFPN_ID_REQ_SIZE,
1690 GFPN_ID_RSP_SIZE); 1714 GFPN_ID_RSP_SIZE);
1691 1715
1692 /* Prepare CT request */ 1716 /* Prepare CT request */
@@ -1786,7 +1810,7 @@ qla2x00_gpsc(scsi_qla_host_t *ha, sw_info_t *list)
1786 struct ct_sns_req *ct_req; 1810 struct ct_sns_req *ct_req;
1787 struct ct_sns_rsp *ct_rsp; 1811 struct ct_sns_rsp *ct_rsp;
1788 1812
1789 if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha)) 1813 if (!IS_IIDMA_CAPABLE(ha))
1790 return QLA_FUNCTION_FAILED; 1814 return QLA_FUNCTION_FAILED;
1791 if (!ha->flags.gpsc_supported) 1815 if (!ha->flags.gpsc_supported)
1792 return QLA_FUNCTION_FAILED; 1816 return QLA_FUNCTION_FAILED;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index cc6ebb609e98..5ec798c2bf13 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -79,20 +79,20 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
79 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); 79 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
80 80
81 qla_printk(KERN_INFO, ha, "Configuring PCI space...\n"); 81 qla_printk(KERN_INFO, ha, "Configuring PCI space...\n");
82 rval = ha->isp_ops.pci_config(ha); 82 rval = ha->isp_ops->pci_config(ha);
83 if (rval) { 83 if (rval) {
84 DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n", 84 DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n",
85 ha->host_no)); 85 ha->host_no));
86 return (rval); 86 return (rval);
87 } 87 }
88 88
89 ha->isp_ops.reset_chip(ha); 89 ha->isp_ops->reset_chip(ha);
90 90
91 ha->isp_ops.get_flash_version(ha, ha->request_ring); 91 ha->isp_ops->get_flash_version(ha, ha->request_ring);
92 92
93 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n"); 93 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
94 94
95 ha->isp_ops.nvram_config(ha); 95 ha->isp_ops->nvram_config(ha);
96 96
97 if (ha->flags.disable_serdes) { 97 if (ha->flags.disable_serdes) {
98 /* Mask HBA via NVRAM settings? */ 98 /* Mask HBA via NVRAM settings? */
@@ -108,7 +108,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
108 qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n"); 108 qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n");
109 109
110 if (qla2x00_isp_firmware(ha) != QLA_SUCCESS) { 110 if (qla2x00_isp_firmware(ha) != QLA_SUCCESS) {
111 rval = ha->isp_ops.chip_diag(ha); 111 rval = ha->isp_ops->chip_diag(ha);
112 if (rval) 112 if (rval)
113 return (rval); 113 return (rval);
114 rval = qla2x00_setup_chip(ha); 114 rval = qla2x00_setup_chip(ha);
@@ -129,14 +129,13 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
129int 129int
130qla2100_pci_config(scsi_qla_host_t *ha) 130qla2100_pci_config(scsi_qla_host_t *ha)
131{ 131{
132 int ret;
133 uint16_t w; 132 uint16_t w;
134 uint32_t d; 133 uint32_t d;
135 unsigned long flags; 134 unsigned long flags;
136 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 135 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
137 136
138 pci_set_master(ha->pdev); 137 pci_set_master(ha->pdev);
139 ret = pci_set_mwi(ha->pdev); 138 pci_try_set_mwi(ha->pdev);
140 139
141 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 140 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
142 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 141 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
@@ -164,7 +163,6 @@ qla2100_pci_config(scsi_qla_host_t *ha)
164int 163int
165qla2300_pci_config(scsi_qla_host_t *ha) 164qla2300_pci_config(scsi_qla_host_t *ha)
166{ 165{
167 int ret;
168 uint16_t w; 166 uint16_t w;
169 uint32_t d; 167 uint32_t d;
170 unsigned long flags = 0; 168 unsigned long flags = 0;
@@ -172,7 +170,7 @@ qla2300_pci_config(scsi_qla_host_t *ha)
172 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 170 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
173 171
174 pci_set_master(ha->pdev); 172 pci_set_master(ha->pdev);
175 ret = pci_set_mwi(ha->pdev); 173 pci_try_set_mwi(ha->pdev);
176 174
177 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 175 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
178 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 176 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
@@ -250,15 +248,13 @@ qla2300_pci_config(scsi_qla_host_t *ha)
250int 248int
251qla24xx_pci_config(scsi_qla_host_t *ha) 249qla24xx_pci_config(scsi_qla_host_t *ha)
252{ 250{
253 int ret;
254 uint16_t w; 251 uint16_t w;
255 uint32_t d; 252 uint32_t d;
256 unsigned long flags = 0; 253 unsigned long flags = 0;
257 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 254 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
258 int pcix_cmd_reg, pcie_dctl_reg;
259 255
260 pci_set_master(ha->pdev); 256 pci_set_master(ha->pdev);
261 ret = pci_set_mwi(ha->pdev); 257 pci_try_set_mwi(ha->pdev);
262 258
263 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 259 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
264 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 260 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
@@ -268,28 +264,12 @@ qla24xx_pci_config(scsi_qla_host_t *ha)
268 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); 264 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
269 265
270 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */ 266 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
271 pcix_cmd_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX); 267 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
272 if (pcix_cmd_reg) { 268 pcix_set_mmrbc(ha->pdev, 2048);
273 uint16_t pcix_cmd;
274
275 pcix_cmd_reg += PCI_X_CMD;
276 pci_read_config_word(ha->pdev, pcix_cmd_reg, &pcix_cmd);
277 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
278 pcix_cmd |= 0x0008;
279 pci_write_config_word(ha->pdev, pcix_cmd_reg, pcix_cmd);
280 }
281 269
282 /* PCIe -- adjust Maximum Read Request Size (2048). */ 270 /* PCIe -- adjust Maximum Read Request Size (2048). */
283 pcie_dctl_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP); 271 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
284 if (pcie_dctl_reg) { 272 pcie_set_readrq(ha->pdev, 2048);
285 uint16_t pcie_dctl;
286
287 pcie_dctl_reg += PCI_EXP_DEVCTL;
288 pci_read_config_word(ha->pdev, pcie_dctl_reg, &pcie_dctl);
289 pcie_dctl &= ~PCI_EXP_DEVCTL_READRQ;
290 pcie_dctl |= 0x4000;
291 pci_write_config_word(ha->pdev, pcie_dctl_reg, pcie_dctl);
292 }
293 273
294 /* Reset expansion ROM address decode enable */ 274 /* Reset expansion ROM address decode enable */
295 pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d); 275 pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d);
@@ -307,6 +287,40 @@ qla24xx_pci_config(scsi_qla_host_t *ha)
307} 287}
308 288
309/** 289/**
290 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
291 * @ha: HA context
292 *
293 * Returns 0 on success.
294 */
295int
296qla25xx_pci_config(scsi_qla_host_t *ha)
297{
298 uint16_t w;
299 uint32_t d;
300
301 pci_set_master(ha->pdev);
302 pci_try_set_mwi(ha->pdev);
303
304 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
305 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
306 w &= ~PCI_COMMAND_INTX_DISABLE;
307 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
308
309 /* PCIe -- adjust Maximum Read Request Size (2048). */
310 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
311 pcie_set_readrq(ha->pdev, 2048);
312
313 /* Reset expansion ROM address decode enable */
314 pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d);
315 d &= ~PCI_ROM_ADDRESS_ENABLE;
316 pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d);
317
318 ha->chip_revision = ha->pdev->revision;
319
320 return QLA_SUCCESS;
321}
322
323/**
310 * qla2x00_isp_firmware() - Choose firmware image. 324 * qla2x00_isp_firmware() - Choose firmware image.
311 * @ha: HA context 325 * @ha: HA context
312 * 326 *
@@ -351,7 +365,7 @@ qla2x00_reset_chip(scsi_qla_host_t *ha)
351 uint32_t cnt; 365 uint32_t cnt;
352 uint16_t cmd; 366 uint16_t cmd;
353 367
354 ha->isp_ops.disable_intrs(ha); 368 ha->isp_ops->disable_intrs(ha);
355 369
356 spin_lock_irqsave(&ha->hardware_lock, flags); 370 spin_lock_irqsave(&ha->hardware_lock, flags);
357 371
@@ -551,7 +565,7 @@ qla24xx_reset_risc(scsi_qla_host_t *ha)
551void 565void
552qla24xx_reset_chip(scsi_qla_host_t *ha) 566qla24xx_reset_chip(scsi_qla_host_t *ha)
553{ 567{
554 ha->isp_ops.disable_intrs(ha); 568 ha->isp_ops->disable_intrs(ha);
555 569
556 /* Perform RISC reset. */ 570 /* Perform RISC reset. */
557 qla24xx_reset_risc(ha); 571 qla24xx_reset_risc(ha);
@@ -736,8 +750,10 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
736 fixed_size = offsetof(struct qla2300_fw_dump, data_ram); 750 fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
737 mem_size = (ha->fw_memory_size - 0x11000 + 1) * 751 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
738 sizeof(uint16_t); 752 sizeof(uint16_t);
739 } else if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 753 } else if (IS_FWI2_CAPABLE(ha)) {
740 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem); 754 fixed_size = IS_QLA25XX(ha) ?
755 offsetof(struct qla25xx_fw_dump, ext_mem):
756 offsetof(struct qla24xx_fw_dump, ext_mem);
741 mem_size = (ha->fw_memory_size - 0x100000 + 1) * 757 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
742 sizeof(uint32_t); 758 sizeof(uint32_t);
743 759
@@ -879,7 +895,7 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
879 uint32_t srisc_address = 0; 895 uint32_t srisc_address = 0;
880 896
881 /* Load firmware sequences */ 897 /* Load firmware sequences */
882 rval = ha->isp_ops.load_risc(ha, &srisc_address); 898 rval = ha->isp_ops->load_risc(ha, &srisc_address);
883 if (rval == QLA_SUCCESS) { 899 if (rval == QLA_SUCCESS) {
884 DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC " 900 DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC "
885 "code.\n", ha->host_no)); 901 "code.\n", ha->host_no));
@@ -1130,12 +1146,12 @@ qla2x00_init_rings(scsi_qla_host_t *ha)
1130 /* Initialize response queue entries */ 1146 /* Initialize response queue entries */
1131 qla2x00_init_response_q_entries(ha); 1147 qla2x00_init_response_q_entries(ha);
1132 1148
1133 ha->isp_ops.config_rings(ha); 1149 ha->isp_ops->config_rings(ha);
1134 1150
1135 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1151 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1136 1152
1137 /* Update any ISP specific firmware options before initialization. */ 1153 /* Update any ISP specific firmware options before initialization. */
1138 ha->isp_ops.update_fw_options(ha); 1154 ha->isp_ops->update_fw_options(ha);
1139 1155
1140 DEBUG(printk("scsi(%ld): Issue init firmware.\n", ha->host_no)); 1156 DEBUG(printk("scsi(%ld): Issue init firmware.\n", ha->host_no));
1141 1157
@@ -1459,7 +1475,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1459 ha->nvram_base = 0x80; 1475 ha->nvram_base = 0x80;
1460 1476
1461 /* Get NVRAM data and calculate checksum. */ 1477 /* Get NVRAM data and calculate checksum. */
1462 ha->isp_ops.read_nvram(ha, ptr, ha->nvram_base, ha->nvram_size); 1478 ha->isp_ops->read_nvram(ha, ptr, ha->nvram_base, ha->nvram_size);
1463 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++) 1479 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
1464 chksum += *ptr++; 1480 chksum += *ptr++;
1465 1481
@@ -2119,7 +2135,7 @@ qla2x00_iidma_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
2119 int rval; 2135 int rval;
2120 uint16_t port_speed, mb[6]; 2136 uint16_t port_speed, mb[6];
2121 2137
2122 if (!IS_QLA24XX(ha)) 2138 if (!IS_IIDMA_CAPABLE(ha))
2123 return; 2139 return;
2124 2140
2125 switch (be16_to_cpu(fcport->fp_speed)) { 2141 switch (be16_to_cpu(fcport->fp_speed)) {
@@ -2267,7 +2283,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2267 scsi_qla_host_t *pha = to_qla_parent(ha); 2283 scsi_qla_host_t *pha = to_qla_parent(ha);
2268 2284
2269 /* If FL port exists, then SNS is present */ 2285 /* If FL port exists, then SNS is present */
2270 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) 2286 if (IS_FWI2_CAPABLE(ha))
2271 loop_id = NPH_F_PORT; 2287 loop_id = NPH_F_PORT;
2272 else 2288 else
2273 loop_id = SNS_FL_PORT; 2289 loop_id = SNS_FL_PORT;
@@ -2294,11 +2310,11 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2294 qla2x00_fdmi_register(ha); 2310 qla2x00_fdmi_register(ha);
2295 2311
2296 /* Ensure we are logged into the SNS. */ 2312 /* Ensure we are logged into the SNS. */
2297 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) 2313 if (IS_FWI2_CAPABLE(ha))
2298 loop_id = NPH_SNS; 2314 loop_id = NPH_SNS;
2299 else 2315 else
2300 loop_id = SIMPLE_NAME_SERVER; 2316 loop_id = SIMPLE_NAME_SERVER;
2301 ha->isp_ops.fabric_login(ha, loop_id, 0xff, 0xff, 2317 ha->isp_ops->fabric_login(ha, loop_id, 0xff, 0xff,
2302 0xfc, mb, BIT_1 | BIT_0); 2318 0xfc, mb, BIT_1 | BIT_0);
2303 if (mb[0] != MBS_COMMAND_COMPLETE) { 2319 if (mb[0] != MBS_COMMAND_COMPLETE) {
2304 DEBUG2(qla_printk(KERN_INFO, ha, 2320 DEBUG2(qla_printk(KERN_INFO, ha,
@@ -2355,7 +2371,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2355 (fcport->flags & FCF_TAPE_PRESENT) == 0 && 2371 (fcport->flags & FCF_TAPE_PRESENT) == 0 &&
2356 fcport->port_type != FCT_INITIATOR && 2372 fcport->port_type != FCT_INITIATOR &&
2357 fcport->port_type != FCT_BROADCAST) { 2373 fcport->port_type != FCT_BROADCAST) {
2358 ha->isp_ops.fabric_logout(ha, 2374 ha->isp_ops->fabric_logout(ha,
2359 fcport->loop_id, 2375 fcport->loop_id,
2360 fcport->d_id.b.domain, 2376 fcport->d_id.b.domain,
2361 fcport->d_id.b.area, 2377 fcport->d_id.b.area,
@@ -2664,7 +2680,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2664 (fcport->flags & FCF_TAPE_PRESENT) == 0 && 2680 (fcport->flags & FCF_TAPE_PRESENT) == 0 &&
2665 fcport->port_type != FCT_INITIATOR && 2681 fcport->port_type != FCT_INITIATOR &&
2666 fcport->port_type != FCT_BROADCAST) { 2682 fcport->port_type != FCT_BROADCAST) {
2667 ha->isp_ops.fabric_logout(ha, fcport->loop_id, 2683 ha->isp_ops->fabric_logout(ha, fcport->loop_id,
2668 fcport->d_id.b.domain, fcport->d_id.b.area, 2684 fcport->d_id.b.domain, fcport->d_id.b.area,
2669 fcport->d_id.b.al_pa); 2685 fcport->d_id.b.al_pa);
2670 fcport->loop_id = FC_NO_LOOP_ID; 2686 fcport->loop_id = FC_NO_LOOP_ID;
@@ -2919,7 +2935,7 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *ha, fc_port_t *fcport,
2919 opts |= BIT_1; 2935 opts |= BIT_1;
2920 rval = qla2x00_get_port_database(ha, fcport, opts); 2936 rval = qla2x00_get_port_database(ha, fcport, opts);
2921 if (rval != QLA_SUCCESS) { 2937 if (rval != QLA_SUCCESS) {
2922 ha->isp_ops.fabric_logout(ha, fcport->loop_id, 2938 ha->isp_ops->fabric_logout(ha, fcport->loop_id,
2923 fcport->d_id.b.domain, fcport->d_id.b.area, 2939 fcport->d_id.b.domain, fcport->d_id.b.area,
2924 fcport->d_id.b.al_pa); 2940 fcport->d_id.b.al_pa);
2925 qla2x00_mark_device_lost(ha, fcport, 1, 0); 2941 qla2x00_mark_device_lost(ha, fcport, 1, 0);
@@ -2964,7 +2980,7 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
2964 fcport->d_id.b.area, fcport->d_id.b.al_pa)); 2980 fcport->d_id.b.area, fcport->d_id.b.al_pa));
2965 2981
2966 /* Login fcport on switch. */ 2982 /* Login fcport on switch. */
2967 ha->isp_ops.fabric_login(ha, fcport->loop_id, 2983 ha->isp_ops->fabric_login(ha, fcport->loop_id,
2968 fcport->d_id.b.domain, fcport->d_id.b.area, 2984 fcport->d_id.b.domain, fcport->d_id.b.area,
2969 fcport->d_id.b.al_pa, mb, BIT_0); 2985 fcport->d_id.b.al_pa, mb, BIT_0);
2970 if (mb[0] == MBS_PORT_ID_USED) { 2986 if (mb[0] == MBS_PORT_ID_USED) {
@@ -3032,7 +3048,7 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
3032 * dead. 3048 * dead.
3033 */ 3049 */
3034 *next_loopid = fcport->loop_id; 3050 *next_loopid = fcport->loop_id;
3035 ha->isp_ops.fabric_logout(ha, fcport->loop_id, 3051 ha->isp_ops->fabric_logout(ha, fcport->loop_id,
3036 fcport->d_id.b.domain, fcport->d_id.b.area, 3052 fcport->d_id.b.domain, fcport->d_id.b.area,
3037 fcport->d_id.b.al_pa); 3053 fcport->d_id.b.al_pa);
3038 qla2x00_mark_device_lost(ha, fcport, 1, 0); 3054 qla2x00_mark_device_lost(ha, fcport, 1, 0);
@@ -3050,7 +3066,7 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
3050 fcport->d_id.b.al_pa, fcport->loop_id, jiffies)); 3066 fcport->d_id.b.al_pa, fcport->loop_id, jiffies));
3051 3067
3052 *next_loopid = fcport->loop_id; 3068 *next_loopid = fcport->loop_id;
3053 ha->isp_ops.fabric_logout(ha, fcport->loop_id, 3069 ha->isp_ops->fabric_logout(ha, fcport->loop_id,
3054 fcport->d_id.b.domain, fcport->d_id.b.area, 3070 fcport->d_id.b.domain, fcport->d_id.b.area,
3055 fcport->d_id.b.al_pa); 3071 fcport->d_id.b.al_pa);
3056 fcport->loop_id = FC_NO_LOOP_ID; 3072 fcport->loop_id = FC_NO_LOOP_ID;
@@ -3206,7 +3222,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3206 3222
3207 qla_printk(KERN_INFO, ha, 3223 qla_printk(KERN_INFO, ha,
3208 "Performing ISP error recovery - ha= %p.\n", ha); 3224 "Performing ISP error recovery - ha= %p.\n", ha);
3209 ha->isp_ops.reset_chip(ha); 3225 ha->isp_ops->reset_chip(ha);
3210 3226
3211 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 3227 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
3212 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 3228 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
@@ -3232,9 +3248,9 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3232 } 3248 }
3233 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3249 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3234 3250
3235 ha->isp_ops.get_flash_version(ha, ha->request_ring); 3251 ha->isp_ops->get_flash_version(ha, ha->request_ring);
3236 3252
3237 ha->isp_ops.nvram_config(ha); 3253 ha->isp_ops->nvram_config(ha);
3238 3254
3239 if (!qla2x00_restart_isp(ha)) { 3255 if (!qla2x00_restart_isp(ha)) {
3240 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 3256 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
@@ -3249,7 +3265,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3249 3265
3250 ha->flags.online = 1; 3266 ha->flags.online = 1;
3251 3267
3252 ha->isp_ops.enable_intrs(ha); 3268 ha->isp_ops->enable_intrs(ha);
3253 3269
3254 ha->isp_abort_cnt = 0; 3270 ha->isp_abort_cnt = 0;
3255 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags); 3271 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags);
@@ -3274,7 +3290,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3274 * The next call disables the board 3290 * The next call disables the board
3275 * completely. 3291 * completely.
3276 */ 3292 */
3277 ha->isp_ops.reset_adapter(ha); 3293 ha->isp_ops->reset_adapter(ha);
3278 ha->flags.online = 0; 3294 ha->flags.online = 0;
3279 clear_bit(ISP_ABORT_RETRY, 3295 clear_bit(ISP_ABORT_RETRY,
3280 &ha->dpc_flags); 3296 &ha->dpc_flags);
@@ -3331,7 +3347,7 @@ qla2x00_restart_isp(scsi_qla_host_t *ha)
3331 /* If firmware needs to be loaded */ 3347 /* If firmware needs to be loaded */
3332 if (qla2x00_isp_firmware(ha)) { 3348 if (qla2x00_isp_firmware(ha)) {
3333 ha->flags.online = 0; 3349 ha->flags.online = 0;
3334 if (!(status = ha->isp_ops.chip_diag(ha))) { 3350 if (!(status = ha->isp_ops->chip_diag(ha))) {
3335 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 3351 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
3336 status = qla2x00_setup_chip(ha); 3352 status = qla2x00_setup_chip(ha);
3337 goto done; 3353 goto done;
@@ -3423,7 +3439,7 @@ qla2x00_reset_adapter(scsi_qla_host_t *ha)
3423 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 3439 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3424 3440
3425 ha->flags.online = 0; 3441 ha->flags.online = 0;
3426 ha->isp_ops.disable_intrs(ha); 3442 ha->isp_ops->disable_intrs(ha);
3427 3443
3428 spin_lock_irqsave(&ha->hardware_lock, flags); 3444 spin_lock_irqsave(&ha->hardware_lock, flags);
3429 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); 3445 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
@@ -3440,7 +3456,7 @@ qla24xx_reset_adapter(scsi_qla_host_t *ha)
3440 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 3456 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3441 3457
3442 ha->flags.online = 0; 3458 ha->flags.online = 0;
3443 ha->isp_ops.disable_intrs(ha); 3459 ha->isp_ops->disable_intrs(ha);
3444 3460
3445 spin_lock_irqsave(&ha->hardware_lock, flags); 3461 spin_lock_irqsave(&ha->hardware_lock, flags);
3446 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET); 3462 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
@@ -3498,7 +3514,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3498 3514
3499 /* Get NVRAM data and calculate checksum. */ 3515 /* Get NVRAM data and calculate checksum. */
3500 dptr = (uint32_t *)nv; 3516 dptr = (uint32_t *)nv;
3501 ha->isp_ops.read_nvram(ha, (uint8_t *)dptr, ha->nvram_base, 3517 ha->isp_ops->read_nvram(ha, (uint8_t *)dptr, ha->nvram_base,
3502 ha->nvram_size); 3518 ha->nvram_size);
3503 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) 3519 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
3504 chksum += le32_to_cpu(*dptr++); 3520 chksum += le32_to_cpu(*dptr++);
@@ -4012,7 +4028,7 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *ha)
4012{ 4028{
4013 int ret, retries; 4029 int ret, retries;
4014 4030
4015 if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha)) 4031 if (!IS_FWI2_CAPABLE(ha))
4016 return; 4032 return;
4017 if (!ha->fw_major_version) 4033 if (!ha->fw_major_version)
4018 return; 4034 return;
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index d3023338628f..8e3b04464cff 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -104,7 +104,7 @@ static __inline__ void qla2x00_poll(scsi_qla_host_t *);
104static inline void 104static inline void
105qla2x00_poll(scsi_qla_host_t *ha) 105qla2x00_poll(scsi_qla_host_t *ha)
106{ 106{
107 ha->isp_ops.intr_handler(0, ha); 107 ha->isp_ops->intr_handler(0, ha);
108} 108}
109 109
110static __inline__ void qla2x00_check_fabric_devices(scsi_qla_host_t *); 110static __inline__ void qla2x00_check_fabric_devices(scsi_qla_host_t *);
@@ -163,7 +163,7 @@ static inline int qla2x00_is_reserved_id(scsi_qla_host_t *, uint16_t);
163static inline int 163static inline int
164qla2x00_is_reserved_id(scsi_qla_host_t *ha, uint16_t loop_id) 164qla2x00_is_reserved_id(scsi_qla_host_t *ha, uint16_t loop_id)
165{ 165{
166 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) 166 if (IS_FWI2_CAPABLE(ha))
167 return (loop_id > NPH_LAST_HANDLE); 167 return (loop_id > NPH_LAST_HANDLE);
168 168
169 return ((loop_id > ha->last_loop_id && loop_id < SNS_FIRST_LOOP_ID) || 169 return ((loop_id > ha->last_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index c71863ff5489..3a5e78cb6b3f 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -326,7 +326,7 @@ qla2x00_start_scsi(srb_t *sp)
326 tot_dsds = nseg; 326 tot_dsds = nseg;
327 327
328 /* Calculate the number of request entries needed. */ 328 /* Calculate the number of request entries needed. */
329 req_cnt = ha->isp_ops.calc_req_entries(tot_dsds); 329 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
330 if (ha->req_q_cnt < (req_cnt + 2)) { 330 if (ha->req_q_cnt < (req_cnt + 2)) {
331 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg)); 331 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
332 if (ha->req_ring_index < cnt) 332 if (ha->req_ring_index < cnt)
@@ -364,7 +364,7 @@ qla2x00_start_scsi(srb_t *sp)
364 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 364 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
365 365
366 /* Build IOCB segments */ 366 /* Build IOCB segments */
367 ha->isp_ops.build_iocbs(sp, cmd_pkt, tot_dsds); 367 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
368 368
369 /* Set total data segment count. */ 369 /* Set total data segment count. */
370 cmd_pkt->entry_count = (uint8_t)req_cnt; 370 cmd_pkt->entry_count = (uint8_t)req_cnt;
@@ -432,7 +432,7 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
432 mrk->entry_type = MARKER_TYPE; 432 mrk->entry_type = MARKER_TYPE;
433 mrk->modifier = type; 433 mrk->modifier = type;
434 if (type != MK_SYNC_ALL) { 434 if (type != MK_SYNC_ALL) {
435 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 435 if (IS_FWI2_CAPABLE(ha)) {
436 mrk24 = (struct mrk_entry_24xx *) mrk; 436 mrk24 = (struct mrk_entry_24xx *) mrk;
437 mrk24->nport_handle = cpu_to_le16(loop_id); 437 mrk24->nport_handle = cpu_to_le16(loop_id);
438 mrk24->lun[1] = LSB(lun); 438 mrk24->lun[1] = LSB(lun);
@@ -487,7 +487,7 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
487 for (timer = HZ; timer; timer--) { 487 for (timer = HZ; timer; timer--) {
488 if ((req_cnt + 2) >= ha->req_q_cnt) { 488 if ((req_cnt + 2) >= ha->req_q_cnt) {
489 /* Calculate number of free request entries. */ 489 /* Calculate number of free request entries. */
490 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) 490 if (IS_FWI2_CAPABLE(ha))
491 cnt = (uint16_t)RD_REG_DWORD( 491 cnt = (uint16_t)RD_REG_DWORD(
492 &reg->isp24.req_q_out); 492 &reg->isp24.req_q_out);
493 else 493 else
@@ -561,7 +561,7 @@ qla2x00_isp_cmd(scsi_qla_host_t *ha)
561 ha->request_ring_ptr++; 561 ha->request_ring_ptr++;
562 562
563 /* Set chip new ring index. */ 563 /* Set chip new ring index. */
564 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 564 if (IS_FWI2_CAPABLE(ha)) {
565 WRT_REG_DWORD(&reg->isp24.req_q_in, ha->req_ring_index); 565 WRT_REG_DWORD(&reg->isp24.req_q_in, ha->req_ring_index);
566 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in); 566 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
567 } else { 567 } else {
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 0ba4c8d37879..b8f226ae2633 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -143,7 +143,7 @@ qla2300_intr_handler(int irq, void *dev_id)
143 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); 143 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
144 RD_REG_WORD(&reg->hccr); 144 RD_REG_WORD(&reg->hccr);
145 145
146 ha->isp_ops.fw_dump(ha, 1); 146 ha->isp_ops->fw_dump(ha, 1);
147 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 147 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
148 break; 148 break;
149 } else if ((stat & HSR_RISC_INT) == 0) 149 } else if ((stat & HSR_RISC_INT) == 0)
@@ -247,7 +247,7 @@ void
247qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) 247qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
248{ 248{
249#define LS_UNKNOWN 2 249#define LS_UNKNOWN 2
250 static char *link_speeds[5] = { "1", "2", "?", "4", "10" }; 250 static char *link_speeds[5] = { "1", "2", "?", "4", "8" };
251 char *link_speed; 251 char *link_speed;
252 uint16_t handle_cnt; 252 uint16_t handle_cnt;
253 uint16_t cnt; 253 uint16_t cnt;
@@ -334,9 +334,9 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
334 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n", 334 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
335 mb[1], mb[2], mb[3]); 335 mb[1], mb[2], mb[3]);
336 336
337 ha->isp_ops.fw_dump(ha, 1); 337 ha->isp_ops->fw_dump(ha, 1);
338 338
339 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 339 if (IS_FWI2_CAPABLE(ha)) {
340 if (mb[1] == 0 && mb[2] == 0) { 340 if (mb[1] == 0 && mb[2] == 0) {
341 qla_printk(KERN_ERR, ha, 341 qla_printk(KERN_ERR, ha,
342 "Unrecoverable Hardware Error: adapter " 342 "Unrecoverable Hardware Error: adapter "
@@ -601,7 +601,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
601 "scsi(%ld): [R|Z]IO update completion.\n", 601 "scsi(%ld): [R|Z]IO update completion.\n",
602 ha->host_no)); 602 ha->host_no));
603 603
604 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) 604 if (IS_FWI2_CAPABLE(ha))
605 qla24xx_process_response_queue(ha); 605 qla24xx_process_response_queue(ha);
606 else 606 else
607 qla2x00_process_response_queue(ha); 607 qla2x00_process_response_queue(ha);
@@ -823,7 +823,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
823 823
824 sts = (sts_entry_t *) pkt; 824 sts = (sts_entry_t *) pkt;
825 sts24 = (struct sts_entry_24xx *) pkt; 825 sts24 = (struct sts_entry_24xx *) pkt;
826 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 826 if (IS_FWI2_CAPABLE(ha)) {
827 comp_status = le16_to_cpu(sts24->comp_status); 827 comp_status = le16_to_cpu(sts24->comp_status);
828 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 828 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
829 } else { 829 } else {
@@ -872,7 +872,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
872 fcport = sp->fcport; 872 fcport = sp->fcport;
873 873
874 sense_len = rsp_info_len = resid_len = fw_resid_len = 0; 874 sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
875 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 875 if (IS_FWI2_CAPABLE(ha)) {
876 sense_len = le32_to_cpu(sts24->sense_len); 876 sense_len = le32_to_cpu(sts24->sense_len);
877 rsp_info_len = le32_to_cpu(sts24->rsp_data_len); 877 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
878 resid_len = le32_to_cpu(sts24->rsp_residual_count); 878 resid_len = le32_to_cpu(sts24->rsp_residual_count);
@@ -891,7 +891,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
891 /* Check for any FCP transport errors. */ 891 /* Check for any FCP transport errors. */
892 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { 892 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
893 /* Sense data lies beyond any FCP RESPONSE data. */ 893 /* Sense data lies beyond any FCP RESPONSE data. */
894 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) 894 if (IS_FWI2_CAPABLE(ha))
895 sense_data += rsp_info_len; 895 sense_data += rsp_info_len;
896 if (rsp_info_len > 3 && rsp_info[3]) { 896 if (rsp_info_len > 3 && rsp_info[3]) {
897 DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol " 897 DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
@@ -990,7 +990,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
990 case CS_DATA_UNDERRUN: 990 case CS_DATA_UNDERRUN:
991 resid = resid_len; 991 resid = resid_len;
992 /* Use F/W calculated residual length. */ 992 /* Use F/W calculated residual length. */
993 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) 993 if (IS_FWI2_CAPABLE(ha))
994 resid = fw_resid_len; 994 resid = fw_resid_len;
995 995
996 if (scsi_status & SS_RESIDUAL_UNDER) { 996 if (scsi_status & SS_RESIDUAL_UNDER) {
@@ -1062,6 +1062,25 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1062 cp->device->id, cp->device->lun, cp, 1062 cp->device->id, cp->device->lun, cp,
1063 cp->serial_number)); 1063 cp->serial_number));
1064 1064
1065 /*
1066 * In case of a Underrun condition, set both the lscsi
1067 * status and the completion status to appropriate
1068 * values.
1069 */
1070 if (resid &&
1071 ((unsigned)(cp->request_bufflen - resid) <
1072 cp->underflow)) {
1073 DEBUG2(qla_printk(KERN_INFO, ha,
1074 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1075 "detected (%x of %x bytes)...returning "
1076 "error status.\n", ha->host_no,
1077 cp->device->channel, cp->device->id,
1078 cp->device->lun, resid,
1079 cp->request_bufflen));
1080
1081 cp->result = DID_ERROR << 16 | lscsi_status;
1082 }
1083
1065 if (sense_len) 1084 if (sense_len)
1066 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, 1085 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
1067 CMD_ACTUAL_SNSLEN(cp))); 1086 CMD_ACTUAL_SNSLEN(cp)));
@@ -1166,7 +1185,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1166 case CS_TIMEOUT: 1185 case CS_TIMEOUT:
1167 cp->result = DID_BUS_BUSY << 16; 1186 cp->result = DID_BUS_BUSY << 16;
1168 1187
1169 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 1188 if (IS_FWI2_CAPABLE(ha)) {
1170 DEBUG2(printk(KERN_INFO 1189 DEBUG2(printk(KERN_INFO
1171 "scsi(%ld:%d:%d:%d): TIMEOUT status detected " 1190 "scsi(%ld:%d:%d:%d): TIMEOUT status detected "
1172 "0x%x-0x%x\n", ha->host_no, cp->device->channel, 1191 "0x%x-0x%x\n", ha->host_no, cp->device->channel,
@@ -1235,7 +1254,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
1235 } 1254 }
1236 1255
1237 /* Move sense data. */ 1256 /* Move sense data. */
1238 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) 1257 if (IS_FWI2_CAPABLE(ha))
1239 host_to_fcp_swap(pkt->data, sizeof(pkt->data)); 1258 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1240 memcpy(sp->request_sense_ptr, pkt->data, sense_sz); 1259 memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
1241 DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz)); 1260 DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz));
@@ -1483,7 +1502,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
1483 1502
1484 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " 1503 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1485 "Dumping firmware!\n", hccr); 1504 "Dumping firmware!\n", hccr);
1486 ha->isp_ops.fw_dump(ha, 1); 1505 ha->isp_ops->fw_dump(ha, 1);
1487 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1506 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1488 break; 1507 break;
1489 } else if ((stat & HSRX_RISC_INT) == 0) 1508 } else if ((stat & HSRX_RISC_INT) == 0)
@@ -1617,7 +1636,7 @@ qla24xx_msix_default(int irq, void *dev_id)
1617 1636
1618 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " 1637 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1619 "Dumping firmware!\n", hccr); 1638 "Dumping firmware!\n", hccr);
1620 ha->isp_ops.fw_dump(ha, 1); 1639 ha->isp_ops->fw_dump(ha, 1);
1621 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1640 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1622 break; 1641 break;
1623 } else if ((stat & HSRX_RISC_INT) == 0) 1642 } else if ((stat & HSRX_RISC_INT) == 0)
@@ -1739,11 +1758,11 @@ qla2x00_request_irqs(scsi_qla_host_t *ha)
1739 int ret; 1758 int ret;
1740 1759
1741 /* If possible, enable MSI-X. */ 1760 /* If possible, enable MSI-X. */
1742 if (!IS_QLA2432(ha)) 1761 if (!IS_QLA2432(ha) && !IS_QLA2532(ha))
1743 goto skip_msix; 1762 goto skip_msix;
1744 1763
1745 if (ha->chip_revision < QLA_MSIX_CHIP_REV_24XX || 1764 if (IS_QLA2432(ha) && (ha->chip_revision < QLA_MSIX_CHIP_REV_24XX ||
1746 !QLA_MSIX_FW_MODE_1(ha->fw_attributes)) { 1765 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
1747 DEBUG2(qla_printk(KERN_WARNING, ha, 1766 DEBUG2(qla_printk(KERN_WARNING, ha,
1748 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n", 1767 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
1749 ha->chip_revision, ha->fw_attributes)); 1768 ha->chip_revision, ha->fw_attributes));
@@ -1762,7 +1781,7 @@ qla2x00_request_irqs(scsi_qla_host_t *ha)
1762 "MSI-X: Falling back-to INTa mode -- %d.\n", ret); 1781 "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
1763skip_msix: 1782skip_msix:
1764 1783
1765 if (!IS_QLA24XX(ha)) 1784 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha))
1766 goto skip_msi; 1785 goto skip_msi;
1767 1786
1768 ret = pci_enable_msi(ha->pdev); 1787 ret = pci_enable_msi(ha->pdev);
@@ -1772,7 +1791,7 @@ skip_msix:
1772 } 1791 }
1773skip_msi: 1792skip_msi:
1774 1793
1775 ret = request_irq(ha->pdev->irq, ha->isp_ops.intr_handler, 1794 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
1776 IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha); 1795 IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha);
1777 if (!ret) { 1796 if (!ret) {
1778 ha->flags.inta_enabled = 1; 1797 ha->flags.inta_enabled = 1;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 2cd0cff25928..d3746ec80a85 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -90,7 +90,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
90 spin_lock_irqsave(&ha->hardware_lock, flags); 90 spin_lock_irqsave(&ha->hardware_lock, flags);
91 91
92 /* Load mailbox registers. */ 92 /* Load mailbox registers. */
93 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) 93 if (IS_FWI2_CAPABLE(ha))
94 optr = (uint16_t __iomem *)&reg->isp24.mailbox0; 94 optr = (uint16_t __iomem *)&reg->isp24.mailbox0;
95 else 95 else
96 optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0); 96 optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0);
@@ -154,7 +154,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
154 154
155 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 155 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
156 156
157 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) 157 if (IS_FWI2_CAPABLE(ha))
158 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT); 158 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
159 else 159 else
160 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT); 160 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
@@ -175,7 +175,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
175 DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__, 175 DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__,
176 ha->host_no, command)); 176 ha->host_no, command));
177 177
178 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) 178 if (IS_FWI2_CAPABLE(ha))
179 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT); 179 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
180 else 180 else
181 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT); 181 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
@@ -228,7 +228,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
228 uint16_t mb0; 228 uint16_t mb0;
229 uint32_t ictrl; 229 uint32_t ictrl;
230 230
231 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 231 if (IS_FWI2_CAPABLE(ha)) {
232 mb0 = RD_REG_WORD(&reg->isp24.mailbox0); 232 mb0 = RD_REG_WORD(&reg->isp24.mailbox0);
233 ictrl = RD_REG_DWORD(&reg->isp24.ictrl); 233 ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
234 } else { 234 } else {
@@ -322,7 +322,7 @@ qla2x00_load_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t risc_addr,
322 322
323 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 323 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
324 324
325 if (MSW(risc_addr) || IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 325 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
326 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; 326 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
327 mcp->mb[8] = MSW(risc_addr); 327 mcp->mb[8] = MSW(risc_addr);
328 mcp->out_mb = MBX_8|MBX_0; 328 mcp->out_mb = MBX_8|MBX_0;
@@ -336,7 +336,7 @@ qla2x00_load_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t risc_addr,
336 mcp->mb[6] = MSW(MSD(req_dma)); 336 mcp->mb[6] = MSW(MSD(req_dma));
337 mcp->mb[7] = LSW(MSD(req_dma)); 337 mcp->mb[7] = LSW(MSD(req_dma));
338 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; 338 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
339 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 339 if (IS_FWI2_CAPABLE(ha)) {
340 mcp->mb[4] = MSW(risc_code_size); 340 mcp->mb[4] = MSW(risc_code_size);
341 mcp->mb[5] = LSW(risc_code_size); 341 mcp->mb[5] = LSW(risc_code_size);
342 mcp->out_mb |= MBX_5|MBX_4; 342 mcp->out_mb |= MBX_5|MBX_4;
@@ -387,7 +387,7 @@ qla2x00_execute_fw(scsi_qla_host_t *ha, uint32_t risc_addr)
387 mcp->mb[0] = MBC_EXECUTE_FIRMWARE; 387 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
388 mcp->out_mb = MBX_0; 388 mcp->out_mb = MBX_0;
389 mcp->in_mb = MBX_0; 389 mcp->in_mb = MBX_0;
390 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 390 if (IS_FWI2_CAPABLE(ha)) {
391 mcp->mb[1] = MSW(risc_addr); 391 mcp->mb[1] = MSW(risc_addr);
392 mcp->mb[2] = LSW(risc_addr); 392 mcp->mb[2] = LSW(risc_addr);
393 mcp->mb[3] = 0; 393 mcp->mb[3] = 0;
@@ -410,7 +410,7 @@ qla2x00_execute_fw(scsi_qla_host_t *ha, uint32_t risc_addr)
410 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, 410 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
411 ha->host_no, rval, mcp->mb[0])); 411 ha->host_no, rval, mcp->mb[0]));
412 } else { 412 } else {
413 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 413 if (IS_FWI2_CAPABLE(ha)) {
414 DEBUG11(printk("%s(%ld): done exchanges=%x.\n", 414 DEBUG11(printk("%s(%ld): done exchanges=%x.\n",
415 __func__, ha->host_no, mcp->mb[1])); 415 __func__, ha->host_no, mcp->mb[1]));
416 } else { 416 } else {
@@ -551,7 +551,7 @@ qla2x00_set_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts)
551 mcp->mb[3] = fwopts[3]; 551 mcp->mb[3] = fwopts[3];
552 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 552 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
553 mcp->in_mb = MBX_0; 553 mcp->in_mb = MBX_0;
554 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 554 if (IS_FWI2_CAPABLE(ha)) {
555 mcp->in_mb |= MBX_1; 555 mcp->in_mb |= MBX_1;
556 } else { 556 } else {
557 mcp->mb[10] = fwopts[10]; 557 mcp->mb[10] = fwopts[10];
@@ -664,7 +664,7 @@ qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr)
664 mcp->mb[0] = MBC_VERIFY_CHECKSUM; 664 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
665 mcp->out_mb = MBX_0; 665 mcp->out_mb = MBX_0;
666 mcp->in_mb = MBX_0; 666 mcp->in_mb = MBX_0;
667 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 667 if (IS_FWI2_CAPABLE(ha)) {
668 mcp->mb[1] = MSW(risc_addr); 668 mcp->mb[1] = MSW(risc_addr);
669 mcp->mb[2] = LSW(risc_addr); 669 mcp->mb[2] = LSW(risc_addr);
670 mcp->out_mb |= MBX_2|MBX_1; 670 mcp->out_mb |= MBX_2|MBX_1;
@@ -681,8 +681,8 @@ qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr)
681 681
682 if (rval != QLA_SUCCESS) { 682 if (rval != QLA_SUCCESS) {
683 DEBUG2_3_11(printk("%s(%ld): failed=%x chk sum=%x.\n", __func__, 683 DEBUG2_3_11(printk("%s(%ld): failed=%x chk sum=%x.\n", __func__,
684 ha->host_no, rval, (IS_QLA24XX(ha) || IS_QLA54XX(ha) ? 684 ha->host_no, rval, IS_FWI2_CAPABLE(ha) ?
685 (mcp->mb[2] << 16) | mcp->mb[1]: mcp->mb[1]))); 685 (mcp->mb[2] << 16) | mcp->mb[1]: mcp->mb[1]));
686 } else { 686 } else {
687 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 687 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
688 } 688 }
@@ -739,7 +739,7 @@ qla2x00_issue_iocb(scsi_qla_host_t *ha, void* buffer, dma_addr_t phys_addr,
739 739
740 /* Mask reserved bits. */ 740 /* Mask reserved bits. */
741 sts_entry->entry_status &= 741 sts_entry->entry_status &=
742 IS_QLA24XX(ha) || IS_QLA54XX(ha) ? RF_MASK_24XX :RF_MASK; 742 IS_FWI2_CAPABLE(ha) ? RF_MASK_24XX :RF_MASK;
743 } 743 }
744 744
745 return rval; 745 return rval;
@@ -1085,7 +1085,7 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
1085 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE)); 1085 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
1086 1086
1087 mcp->mb[0] = MBC_GET_PORT_DATABASE; 1087 mcp->mb[0] = MBC_GET_PORT_DATABASE;
1088 if (opt != 0 && !IS_QLA24XX(ha) && !IS_QLA54XX(ha)) 1088 if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1089 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE; 1089 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
1090 mcp->mb[2] = MSW(pd_dma); 1090 mcp->mb[2] = MSW(pd_dma);
1091 mcp->mb[3] = LSW(pd_dma); 1091 mcp->mb[3] = LSW(pd_dma);
@@ -1094,7 +1094,7 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
1094 mcp->mb[9] = ha->vp_idx; 1094 mcp->mb[9] = ha->vp_idx;
1095 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 1095 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1096 mcp->in_mb = MBX_0; 1096 mcp->in_mb = MBX_0;
1097 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 1097 if (IS_FWI2_CAPABLE(ha)) {
1098 mcp->mb[1] = fcport->loop_id; 1098 mcp->mb[1] = fcport->loop_id;
1099 mcp->mb[10] = opt; 1099 mcp->mb[10] = opt;
1100 mcp->out_mb |= MBX_10|MBX_1; 1100 mcp->out_mb |= MBX_10|MBX_1;
@@ -1107,15 +1107,15 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
1107 mcp->mb[1] = fcport->loop_id << 8 | opt; 1107 mcp->mb[1] = fcport->loop_id << 8 | opt;
1108 mcp->out_mb |= MBX_1; 1108 mcp->out_mb |= MBX_1;
1109 } 1109 }
1110 mcp->buf_size = (IS_QLA24XX(ha) || IS_QLA54XX(ha) ? 1110 mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
1111 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE); 1111 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
1112 mcp->flags = MBX_DMA_IN; 1112 mcp->flags = MBX_DMA_IN;
1113 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 1113 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1114 rval = qla2x00_mailbox_command(ha, mcp); 1114 rval = qla2x00_mailbox_command(ha, mcp);
1115 if (rval != QLA_SUCCESS) 1115 if (rval != QLA_SUCCESS)
1116 goto gpd_error_out; 1116 goto gpd_error_out;
1117 1117
1118 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 1118 if (IS_FWI2_CAPABLE(ha)) {
1119 pd24 = (struct port_database_24xx *) pd; 1119 pd24 = (struct port_database_24xx *) pd;
1120 1120
1121 /* Check for logged in state. */ 1121 /* Check for logged in state. */
@@ -1333,7 +1333,7 @@ qla2x00_lip_reset(scsi_qla_host_t *ha)
1333 1333
1334 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 1334 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
1335 1335
1336 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 1336 if (IS_FWI2_CAPABLE(ha)) {
1337 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 1337 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1338 mcp->mb[1] = BIT_6; 1338 mcp->mb[1] = BIT_6;
1339 mcp->mb[2] = 0; 1339 mcp->mb[2] = 0;
@@ -1637,7 +1637,7 @@ qla2x00_login_local_device(scsi_qla_host_t *ha, fc_port_t *fcport,
1637 mbx_cmd_t mc; 1637 mbx_cmd_t mc;
1638 mbx_cmd_t *mcp = &mc; 1638 mbx_cmd_t *mcp = &mc;
1639 1639
1640 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) 1640 if (IS_FWI2_CAPABLE(ha))
1641 return qla24xx_login_fabric(ha, fcport->loop_id, 1641 return qla24xx_login_fabric(ha, fcport->loop_id,
1642 fcport->d_id.b.domain, fcport->d_id.b.area, 1642 fcport->d_id.b.domain, fcport->d_id.b.area,
1643 fcport->d_id.b.al_pa, mb_ret, opt); 1643 fcport->d_id.b.al_pa, mb_ret, opt);
@@ -1821,7 +1821,7 @@ qla2x00_full_login_lip(scsi_qla_host_t *ha)
1821 ha->host_no)); 1821 ha->host_no));
1822 1822
1823 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 1823 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1824 mcp->mb[1] = IS_QLA24XX(ha) || IS_QLA54XX(ha) ? BIT_3: 0; 1824 mcp->mb[1] = IS_FWI2_CAPABLE(ha) ? BIT_3: 0;
1825 mcp->mb[2] = 0; 1825 mcp->mb[2] = 0;
1826 mcp->mb[3] = 0; 1826 mcp->mb[3] = 0;
1827 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1827 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -1871,7 +1871,7 @@ qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma,
1871 1871
1872 mcp->mb[0] = MBC_GET_ID_LIST; 1872 mcp->mb[0] = MBC_GET_ID_LIST;
1873 mcp->out_mb = MBX_0; 1873 mcp->out_mb = MBX_0;
1874 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 1874 if (IS_FWI2_CAPABLE(ha)) {
1875 mcp->mb[2] = MSW(id_list_dma); 1875 mcp->mb[2] = MSW(id_list_dma);
1876 mcp->mb[3] = LSW(id_list_dma); 1876 mcp->mb[3] = LSW(id_list_dma);
1877 mcp->mb[6] = MSW(MSD(id_list_dma)); 1877 mcp->mb[6] = MSW(MSD(id_list_dma));
@@ -2063,7 +2063,7 @@ qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id,
2063 mcp->mb[7] = LSW(MSD(stat_buf_dma)); 2063 mcp->mb[7] = LSW(MSD(stat_buf_dma));
2064 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 2064 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2065 mcp->in_mb = MBX_0; 2065 mcp->in_mb = MBX_0;
2066 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 2066 if (IS_FWI2_CAPABLE(ha)) {
2067 mcp->mb[1] = loop_id; 2067 mcp->mb[1] = loop_id;
2068 mcp->mb[4] = 0; 2068 mcp->mb[4] = 0;
2069 mcp->mb[10] = 0; 2069 mcp->mb[10] = 0;
@@ -2334,7 +2334,7 @@ qla2x00_system_error(scsi_qla_host_t *ha)
2334 mbx_cmd_t mc; 2334 mbx_cmd_t mc;
2335 mbx_cmd_t *mcp = &mc; 2335 mbx_cmd_t *mcp = &mc;
2336 2336
2337 if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha)) 2337 if (!IS_FWI2_CAPABLE(ha))
2338 return QLA_FUNCTION_FAILED; 2338 return QLA_FUNCTION_FAILED;
2339 2339
2340 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2340 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
@@ -2444,7 +2444,7 @@ qla2x00_stop_firmware(scsi_qla_host_t *ha)
2444 mbx_cmd_t mc; 2444 mbx_cmd_t mc;
2445 mbx_cmd_t *mcp = &mc; 2445 mbx_cmd_t *mcp = &mc;
2446 2446
2447 if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha)) 2447 if (!IS_FWI2_CAPABLE(ha))
2448 return QLA_FUNCTION_FAILED; 2448 return QLA_FUNCTION_FAILED;
2449 2449
2450 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2450 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
@@ -2474,7 +2474,7 @@ qla2x00_trace_control(scsi_qla_host_t *ha, uint16_t ctrl, dma_addr_t eft_dma,
2474 mbx_cmd_t mc; 2474 mbx_cmd_t mc;
2475 mbx_cmd_t *mcp = &mc; 2475 mbx_cmd_t *mcp = &mc;
2476 2476
2477 if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha)) 2477 if (!IS_FWI2_CAPABLE(ha))
2478 return QLA_FUNCTION_FAILED; 2478 return QLA_FUNCTION_FAILED;
2479 2479
2480 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2480 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
@@ -2514,7 +2514,7 @@ qla2x00_read_sfp(scsi_qla_host_t *ha, dma_addr_t sfp_dma, uint16_t addr,
2514 mbx_cmd_t mc; 2514 mbx_cmd_t mc;
2515 mbx_cmd_t *mcp = &mc; 2515 mbx_cmd_t *mcp = &mc;
2516 2516
2517 if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha)) 2517 if (!IS_FWI2_CAPABLE(ha))
2518 return QLA_FUNCTION_FAILED; 2518 return QLA_FUNCTION_FAILED;
2519 2519
2520 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2520 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
@@ -2552,7 +2552,7 @@ qla2x00_get_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id,
2552 mbx_cmd_t mc; 2552 mbx_cmd_t mc;
2553 mbx_cmd_t *mcp = &mc; 2553 mbx_cmd_t *mcp = &mc;
2554 2554
2555 if (!IS_QLA24XX(ha)) 2555 if (!IS_IIDMA_CAPABLE(ha))
2556 return QLA_FUNCTION_FAILED; 2556 return QLA_FUNCTION_FAILED;
2557 2557
2558 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2558 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
@@ -2595,7 +2595,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id,
2595 mbx_cmd_t mc; 2595 mbx_cmd_t mc;
2596 mbx_cmd_t *mcp = &mc; 2596 mbx_cmd_t *mcp = &mc;
2597 2597
2598 if (!IS_QLA24XX(ha)) 2598 if (!IS_IIDMA_CAPABLE(ha))
2599 return QLA_FUNCTION_FAILED; 2599 return QLA_FUNCTION_FAILED;
2600 2600
2601 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2601 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 92376f9dfdd5..c488996cb958 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -265,6 +265,8 @@ qla24xx_pci_info_str(struct scsi_qla_host *ha, char *str)
265 strcpy(str, "PCIe ("); 265 strcpy(str, "PCIe (");
266 if (lspeed == 1) 266 if (lspeed == 1)
267 strcat(str, "2.5Gb/s "); 267 strcat(str, "2.5Gb/s ");
268 else if (lspeed == 2)
269 strcat(str, "5.0Gb/s ");
268 else 270 else
269 strcat(str, "<unknown> "); 271 strcat(str, "<unknown> ");
270 snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth); 272 snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth);
@@ -343,6 +345,12 @@ qla24xx_fw_version_str(struct scsi_qla_host *ha, char *str)
343 strcat(str, "[IP] "); 345 strcat(str, "[IP] ");
344 if (ha->fw_attributes & BIT_2) 346 if (ha->fw_attributes & BIT_2)
345 strcat(str, "[Multi-ID] "); 347 strcat(str, "[Multi-ID] ");
348 if (ha->fw_attributes & BIT_3)
349 strcat(str, "[SB-2] ");
350 if (ha->fw_attributes & BIT_4)
351 strcat(str, "[T10 CRC] ");
352 if (ha->fw_attributes & BIT_5)
353 strcat(str, "[VI] ");
346 if (ha->fw_attributes & BIT_13) 354 if (ha->fw_attributes & BIT_13)
347 strcat(str, "[Experimental]"); 355 strcat(str, "[Experimental]");
348 return str; 356 return str;
@@ -681,7 +689,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
681 DEBUG3(qla2x00_print_scsi_cmd(cmd)); 689 DEBUG3(qla2x00_print_scsi_cmd(cmd));
682 690
683 spin_unlock_irqrestore(&pha->hardware_lock, flags); 691 spin_unlock_irqrestore(&pha->hardware_lock, flags);
684 if (ha->isp_ops.abort_command(ha, sp)) { 692 if (ha->isp_ops->abort_command(ha, sp)) {
685 DEBUG2(printk("%s(%ld): abort_command " 693 DEBUG2(printk("%s(%ld): abort_command "
686 "mbx failed.\n", __func__, ha->host_no)); 694 "mbx failed.\n", __func__, ha->host_no));
687 } else { 695 } else {
@@ -813,7 +821,7 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
813#if defined(LOGOUT_AFTER_DEVICE_RESET) 821#if defined(LOGOUT_AFTER_DEVICE_RESET)
814 if (ret == SUCCESS) { 822 if (ret == SUCCESS) {
815 if (fcport->flags & FC_FABRIC_DEVICE) { 823 if (fcport->flags & FC_FABRIC_DEVICE) {
816 ha->isp_ops.fabric_logout(ha, fcport->loop_id); 824 ha->isp_ops->fabric_logout(ha, fcport->loop_id);
817 qla2x00_mark_device_lost(ha, fcport, 0, 0); 825 qla2x00_mark_device_lost(ha, fcport, 0, 0);
818 } 826 }
819 } 827 }
@@ -1105,7 +1113,7 @@ static int
1105qla2x00_device_reset(scsi_qla_host_t *ha, fc_port_t *reset_fcport) 1113qla2x00_device_reset(scsi_qla_host_t *ha, fc_port_t *reset_fcport)
1106{ 1114{
1107 /* Abort Target command will clear Reservation */ 1115 /* Abort Target command will clear Reservation */
1108 return ha->isp_ops.abort_target(reset_fcport); 1116 return ha->isp_ops->abort_target(reset_fcport);
1109} 1117}
1110 1118
1111static int 1119static int
@@ -1184,8 +1192,8 @@ qla2x00_config_dma_addressing(scsi_qla_host_t *ha)
1184 !pci_set_consistent_dma_mask(ha->pdev, DMA_64BIT_MASK)) { 1192 !pci_set_consistent_dma_mask(ha->pdev, DMA_64BIT_MASK)) {
1185 /* Ok, a 64bit DMA mask is applicable. */ 1193 /* Ok, a 64bit DMA mask is applicable. */
1186 ha->flags.enable_64bit_addressing = 1; 1194 ha->flags.enable_64bit_addressing = 1;
1187 ha->isp_ops.calc_req_entries = qla2x00_calc_iocbs_64; 1195 ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
1188 ha->isp_ops.build_iocbs = qla2x00_build_scsi_iocbs_64; 1196 ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
1189 return; 1197 return;
1190 } 1198 }
1191 } 1199 }
@@ -1194,6 +1202,193 @@ qla2x00_config_dma_addressing(scsi_qla_host_t *ha)
1194 pci_set_consistent_dma_mask(ha->pdev, DMA_32BIT_MASK); 1202 pci_set_consistent_dma_mask(ha->pdev, DMA_32BIT_MASK);
1195} 1203}
1196 1204
1205static void
1206qla2x00_enable_intrs(scsi_qla_host_t *ha)
1207{
1208 unsigned long flags = 0;
1209 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1210
1211 spin_lock_irqsave(&ha->hardware_lock, flags);
1212 ha->interrupts_on = 1;
1213 /* enable risc and host interrupts */
1214 WRT_REG_WORD(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
1215 RD_REG_WORD(&reg->ictrl);
1216 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1217
1218}
1219
1220static void
1221qla2x00_disable_intrs(scsi_qla_host_t *ha)
1222{
1223 unsigned long flags = 0;
1224 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1225
1226 spin_lock_irqsave(&ha->hardware_lock, flags);
1227 ha->interrupts_on = 0;
1228 /* disable risc and host interrupts */
1229 WRT_REG_WORD(&reg->ictrl, 0);
1230 RD_REG_WORD(&reg->ictrl);
1231 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1232}
1233
1234static void
1235qla24xx_enable_intrs(scsi_qla_host_t *ha)
1236{
1237 unsigned long flags = 0;
1238 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1239
1240 spin_lock_irqsave(&ha->hardware_lock, flags);
1241 ha->interrupts_on = 1;
1242 WRT_REG_DWORD(&reg->ictrl, ICRX_EN_RISC_INT);
1243 RD_REG_DWORD(&reg->ictrl);
1244 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1245}
1246
1247static void
1248qla24xx_disable_intrs(scsi_qla_host_t *ha)
1249{
1250 unsigned long flags = 0;
1251 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1252
1253 spin_lock_irqsave(&ha->hardware_lock, flags);
1254 ha->interrupts_on = 0;
1255 WRT_REG_DWORD(&reg->ictrl, 0);
1256 RD_REG_DWORD(&reg->ictrl);
1257 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1258}
1259
1260static struct isp_operations qla2100_isp_ops = {
1261 .pci_config = qla2100_pci_config,
1262 .reset_chip = qla2x00_reset_chip,
1263 .chip_diag = qla2x00_chip_diag,
1264 .config_rings = qla2x00_config_rings,
1265 .reset_adapter = qla2x00_reset_adapter,
1266 .nvram_config = qla2x00_nvram_config,
1267 .update_fw_options = qla2x00_update_fw_options,
1268 .load_risc = qla2x00_load_risc,
1269 .pci_info_str = qla2x00_pci_info_str,
1270 .fw_version_str = qla2x00_fw_version_str,
1271 .intr_handler = qla2100_intr_handler,
1272 .enable_intrs = qla2x00_enable_intrs,
1273 .disable_intrs = qla2x00_disable_intrs,
1274 .abort_command = qla2x00_abort_command,
1275 .abort_target = qla2x00_abort_target,
1276 .fabric_login = qla2x00_login_fabric,
1277 .fabric_logout = qla2x00_fabric_logout,
1278 .calc_req_entries = qla2x00_calc_iocbs_32,
1279 .build_iocbs = qla2x00_build_scsi_iocbs_32,
1280 .prep_ms_iocb = qla2x00_prep_ms_iocb,
1281 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb,
1282 .read_nvram = qla2x00_read_nvram_data,
1283 .write_nvram = qla2x00_write_nvram_data,
1284 .fw_dump = qla2100_fw_dump,
1285 .beacon_on = NULL,
1286 .beacon_off = NULL,
1287 .beacon_blink = NULL,
1288 .read_optrom = qla2x00_read_optrom_data,
1289 .write_optrom = qla2x00_write_optrom_data,
1290 .get_flash_version = qla2x00_get_flash_version,
1291};
1292
1293static struct isp_operations qla2300_isp_ops = {
1294 .pci_config = qla2300_pci_config,
1295 .reset_chip = qla2x00_reset_chip,
1296 .chip_diag = qla2x00_chip_diag,
1297 .config_rings = qla2x00_config_rings,
1298 .reset_adapter = qla2x00_reset_adapter,
1299 .nvram_config = qla2x00_nvram_config,
1300 .update_fw_options = qla2x00_update_fw_options,
1301 .load_risc = qla2x00_load_risc,
1302 .pci_info_str = qla2x00_pci_info_str,
1303 .fw_version_str = qla2x00_fw_version_str,
1304 .intr_handler = qla2300_intr_handler,
1305 .enable_intrs = qla2x00_enable_intrs,
1306 .disable_intrs = qla2x00_disable_intrs,
1307 .abort_command = qla2x00_abort_command,
1308 .abort_target = qla2x00_abort_target,
1309 .fabric_login = qla2x00_login_fabric,
1310 .fabric_logout = qla2x00_fabric_logout,
1311 .calc_req_entries = qla2x00_calc_iocbs_32,
1312 .build_iocbs = qla2x00_build_scsi_iocbs_32,
1313 .prep_ms_iocb = qla2x00_prep_ms_iocb,
1314 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb,
1315 .read_nvram = qla2x00_read_nvram_data,
1316 .write_nvram = qla2x00_write_nvram_data,
1317 .fw_dump = qla2300_fw_dump,
1318 .beacon_on = qla2x00_beacon_on,
1319 .beacon_off = qla2x00_beacon_off,
1320 .beacon_blink = qla2x00_beacon_blink,
1321 .read_optrom = qla2x00_read_optrom_data,
1322 .write_optrom = qla2x00_write_optrom_data,
1323 .get_flash_version = qla2x00_get_flash_version,
1324};
1325
1326static struct isp_operations qla24xx_isp_ops = {
1327 .pci_config = qla24xx_pci_config,
1328 .reset_chip = qla24xx_reset_chip,
1329 .chip_diag = qla24xx_chip_diag,
1330 .config_rings = qla24xx_config_rings,
1331 .reset_adapter = qla24xx_reset_adapter,
1332 .nvram_config = qla24xx_nvram_config,
1333 .update_fw_options = qla24xx_update_fw_options,
1334 .load_risc = qla24xx_load_risc,
1335 .pci_info_str = qla24xx_pci_info_str,
1336 .fw_version_str = qla24xx_fw_version_str,
1337 .intr_handler = qla24xx_intr_handler,
1338 .enable_intrs = qla24xx_enable_intrs,
1339 .disable_intrs = qla24xx_disable_intrs,
1340 .abort_command = qla24xx_abort_command,
1341 .abort_target = qla24xx_abort_target,
1342 .fabric_login = qla24xx_login_fabric,
1343 .fabric_logout = qla24xx_fabric_logout,
1344 .calc_req_entries = NULL,
1345 .build_iocbs = NULL,
1346 .prep_ms_iocb = qla24xx_prep_ms_iocb,
1347 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
1348 .read_nvram = qla24xx_read_nvram_data,
1349 .write_nvram = qla24xx_write_nvram_data,
1350 .fw_dump = qla24xx_fw_dump,
1351 .beacon_on = qla24xx_beacon_on,
1352 .beacon_off = qla24xx_beacon_off,
1353 .beacon_blink = qla24xx_beacon_blink,
1354 .read_optrom = qla24xx_read_optrom_data,
1355 .write_optrom = qla24xx_write_optrom_data,
1356 .get_flash_version = qla24xx_get_flash_version,
1357};
1358
1359static struct isp_operations qla25xx_isp_ops = {
1360 .pci_config = qla25xx_pci_config,
1361 .reset_chip = qla24xx_reset_chip,
1362 .chip_diag = qla24xx_chip_diag,
1363 .config_rings = qla24xx_config_rings,
1364 .reset_adapter = qla24xx_reset_adapter,
1365 .nvram_config = qla24xx_nvram_config,
1366 .update_fw_options = qla24xx_update_fw_options,
1367 .load_risc = qla24xx_load_risc,
1368 .pci_info_str = qla24xx_pci_info_str,
1369 .fw_version_str = qla24xx_fw_version_str,
1370 .intr_handler = qla24xx_intr_handler,
1371 .enable_intrs = qla24xx_enable_intrs,
1372 .disable_intrs = qla24xx_disable_intrs,
1373 .abort_command = qla24xx_abort_command,
1374 .abort_target = qla24xx_abort_target,
1375 .fabric_login = qla24xx_login_fabric,
1376 .fabric_logout = qla24xx_fabric_logout,
1377 .calc_req_entries = NULL,
1378 .build_iocbs = NULL,
1379 .prep_ms_iocb = qla24xx_prep_ms_iocb,
1380 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
1381 .read_nvram = qla25xx_read_nvram_data,
1382 .write_nvram = qla25xx_write_nvram_data,
1383 .fw_dump = qla25xx_fw_dump,
1384 .beacon_on = qla24xx_beacon_on,
1385 .beacon_off = qla24xx_beacon_off,
1386 .beacon_blink = qla24xx_beacon_blink,
1387 .read_optrom = qla24xx_read_optrom_data,
1388 .write_optrom = qla24xx_write_optrom_data,
1389 .get_flash_version = qla24xx_get_flash_version,
1390};
1391
1197static inline void 1392static inline void
1198qla2x00_set_isp_flags(scsi_qla_host_t *ha) 1393qla2x00_set_isp_flags(scsi_qla_host_t *ha)
1199{ 1394{
@@ -1238,19 +1433,32 @@ qla2x00_set_isp_flags(scsi_qla_host_t *ha)
1238 case PCI_DEVICE_ID_QLOGIC_ISP2422: 1433 case PCI_DEVICE_ID_QLOGIC_ISP2422:
1239 ha->device_type |= DT_ISP2422; 1434 ha->device_type |= DT_ISP2422;
1240 ha->device_type |= DT_ZIO_SUPPORTED; 1435 ha->device_type |= DT_ZIO_SUPPORTED;
1436 ha->device_type |= DT_FWI2;
1437 ha->device_type |= DT_IIDMA;
1241 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1438 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1242 break; 1439 break;
1243 case PCI_DEVICE_ID_QLOGIC_ISP2432: 1440 case PCI_DEVICE_ID_QLOGIC_ISP2432:
1244 ha->device_type |= DT_ISP2432; 1441 ha->device_type |= DT_ISP2432;
1245 ha->device_type |= DT_ZIO_SUPPORTED; 1442 ha->device_type |= DT_ZIO_SUPPORTED;
1443 ha->device_type |= DT_FWI2;
1444 ha->device_type |= DT_IIDMA;
1246 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1445 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1247 break; 1446 break;
1248 case PCI_DEVICE_ID_QLOGIC_ISP5422: 1447 case PCI_DEVICE_ID_QLOGIC_ISP5422:
1249 ha->device_type |= DT_ISP5422; 1448 ha->device_type |= DT_ISP5422;
1449 ha->device_type |= DT_FWI2;
1250 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1450 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1251 break; 1451 break;
1252 case PCI_DEVICE_ID_QLOGIC_ISP5432: 1452 case PCI_DEVICE_ID_QLOGIC_ISP5432:
1253 ha->device_type |= DT_ISP5432; 1453 ha->device_type |= DT_ISP5432;
1454 ha->device_type |= DT_FWI2;
1455 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1456 break;
1457 case PCI_DEVICE_ID_QLOGIC_ISP2532:
1458 ha->device_type |= DT_ISP2532;
1459 ha->device_type |= DT_ZIO_SUPPORTED;
1460 ha->device_type |= DT_FWI2;
1461 ha->device_type |= DT_IIDMA;
1254 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1462 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1255 break; 1463 break;
1256 } 1464 }
@@ -1323,61 +1531,6 @@ iospace_error_exit:
1323} 1531}
1324 1532
1325static void 1533static void
1326qla2x00_enable_intrs(scsi_qla_host_t *ha)
1327{
1328 unsigned long flags = 0;
1329 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1330
1331 spin_lock_irqsave(&ha->hardware_lock, flags);
1332 ha->interrupts_on = 1;
1333 /* enable risc and host interrupts */
1334 WRT_REG_WORD(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
1335 RD_REG_WORD(&reg->ictrl);
1336 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1337
1338}
1339
1340static void
1341qla2x00_disable_intrs(scsi_qla_host_t *ha)
1342{
1343 unsigned long flags = 0;
1344 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1345
1346 spin_lock_irqsave(&ha->hardware_lock, flags);
1347 ha->interrupts_on = 0;
1348 /* disable risc and host interrupts */
1349 WRT_REG_WORD(&reg->ictrl, 0);
1350 RD_REG_WORD(&reg->ictrl);
1351 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1352}
1353
1354static void
1355qla24xx_enable_intrs(scsi_qla_host_t *ha)
1356{
1357 unsigned long flags = 0;
1358 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1359
1360 spin_lock_irqsave(&ha->hardware_lock, flags);
1361 ha->interrupts_on = 1;
1362 WRT_REG_DWORD(&reg->ictrl, ICRX_EN_RISC_INT);
1363 RD_REG_DWORD(&reg->ictrl);
1364 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1365}
1366
1367static void
1368qla24xx_disable_intrs(scsi_qla_host_t *ha)
1369{
1370 unsigned long flags = 0;
1371 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1372
1373 spin_lock_irqsave(&ha->hardware_lock, flags);
1374 ha->interrupts_on = 0;
1375 WRT_REG_DWORD(&reg->ictrl, 0);
1376 RD_REG_DWORD(&reg->ictrl);
1377 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1378}
1379
1380static void
1381qla2xxx_scan_start(struct Scsi_Host *shost) 1534qla2xxx_scan_start(struct Scsi_Host *shost)
1382{ 1535{
1383 scsi_qla_host_t *ha = (scsi_qla_host_t *)shost->hostdata; 1536 scsi_qla_host_t *ha = (scsi_qla_host_t *)shost->hostdata;
@@ -1422,7 +1575,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1422 if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 || 1575 if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
1423 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 || 1576 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 ||
1424 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 || 1577 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
1425 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432) 1578 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
1579 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532)
1426 sht = &qla24xx_driver_template; 1580 sht = &qla24xx_driver_template;
1427 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); 1581 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
1428 if (host == NULL) { 1582 if (host == NULL) {
@@ -1466,33 +1620,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1466 ha->max_q_depth = ql2xmaxqdepth; 1620 ha->max_q_depth = ql2xmaxqdepth;
1467 1621
1468 /* Assign ISP specific operations. */ 1622 /* Assign ISP specific operations. */
1469 ha->isp_ops.pci_config = qla2100_pci_config;
1470 ha->isp_ops.reset_chip = qla2x00_reset_chip;
1471 ha->isp_ops.chip_diag = qla2x00_chip_diag;
1472 ha->isp_ops.config_rings = qla2x00_config_rings;
1473 ha->isp_ops.reset_adapter = qla2x00_reset_adapter;
1474 ha->isp_ops.nvram_config = qla2x00_nvram_config;
1475 ha->isp_ops.update_fw_options = qla2x00_update_fw_options;
1476 ha->isp_ops.load_risc = qla2x00_load_risc;
1477 ha->isp_ops.pci_info_str = qla2x00_pci_info_str;
1478 ha->isp_ops.fw_version_str = qla2x00_fw_version_str;
1479 ha->isp_ops.intr_handler = qla2100_intr_handler;
1480 ha->isp_ops.enable_intrs = qla2x00_enable_intrs;
1481 ha->isp_ops.disable_intrs = qla2x00_disable_intrs;
1482 ha->isp_ops.abort_command = qla2x00_abort_command;
1483 ha->isp_ops.abort_target = qla2x00_abort_target;
1484 ha->isp_ops.fabric_login = qla2x00_login_fabric;
1485 ha->isp_ops.fabric_logout = qla2x00_fabric_logout;
1486 ha->isp_ops.calc_req_entries = qla2x00_calc_iocbs_32;
1487 ha->isp_ops.build_iocbs = qla2x00_build_scsi_iocbs_32;
1488 ha->isp_ops.prep_ms_iocb = qla2x00_prep_ms_iocb;
1489 ha->isp_ops.prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb;
1490 ha->isp_ops.read_nvram = qla2x00_read_nvram_data;
1491 ha->isp_ops.write_nvram = qla2x00_write_nvram_data;
1492 ha->isp_ops.fw_dump = qla2100_fw_dump;
1493 ha->isp_ops.read_optrom = qla2x00_read_optrom_data;
1494 ha->isp_ops.write_optrom = qla2x00_write_optrom_data;
1495 ha->isp_ops.get_flash_version = qla2x00_get_flash_version;
1496 if (IS_QLA2100(ha)) { 1623 if (IS_QLA2100(ha)) {
1497 host->max_id = MAX_TARGETS_2100; 1624 host->max_id = MAX_TARGETS_2100;
1498 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; 1625 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
@@ -1501,6 +1628,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1501 ha->last_loop_id = SNS_LAST_LOOP_ID_2100; 1628 ha->last_loop_id = SNS_LAST_LOOP_ID_2100;
1502 host->sg_tablesize = 32; 1629 host->sg_tablesize = 32;
1503 ha->gid_list_info_size = 4; 1630 ha->gid_list_info_size = 4;
1631 ha->isp_ops = &qla2100_isp_ops;
1504 } else if (IS_QLA2200(ha)) { 1632 } else if (IS_QLA2200(ha)) {
1505 host->max_id = MAX_TARGETS_2200; 1633 host->max_id = MAX_TARGETS_2200;
1506 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1634 ha->mbx_count = MAILBOX_REGISTER_COUNT;
@@ -1508,21 +1636,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1508 ha->response_q_length = RESPONSE_ENTRY_CNT_2100; 1636 ha->response_q_length = RESPONSE_ENTRY_CNT_2100;
1509 ha->last_loop_id = SNS_LAST_LOOP_ID_2100; 1637 ha->last_loop_id = SNS_LAST_LOOP_ID_2100;
1510 ha->gid_list_info_size = 4; 1638 ha->gid_list_info_size = 4;
1639 ha->isp_ops = &qla2100_isp_ops;
1511 } else if (IS_QLA23XX(ha)) { 1640 } else if (IS_QLA23XX(ha)) {
1512 host->max_id = MAX_TARGETS_2200; 1641 host->max_id = MAX_TARGETS_2200;
1513 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1642 ha->mbx_count = MAILBOX_REGISTER_COUNT;
1514 ha->request_q_length = REQUEST_ENTRY_CNT_2200; 1643 ha->request_q_length = REQUEST_ENTRY_CNT_2200;
1515 ha->response_q_length = RESPONSE_ENTRY_CNT_2300; 1644 ha->response_q_length = RESPONSE_ENTRY_CNT_2300;
1516 ha->last_loop_id = SNS_LAST_LOOP_ID_2300; 1645 ha->last_loop_id = SNS_LAST_LOOP_ID_2300;
1517 ha->isp_ops.pci_config = qla2300_pci_config;
1518 ha->isp_ops.intr_handler = qla2300_intr_handler;
1519 ha->isp_ops.fw_dump = qla2300_fw_dump;
1520 ha->isp_ops.beacon_on = qla2x00_beacon_on;
1521 ha->isp_ops.beacon_off = qla2x00_beacon_off;
1522 ha->isp_ops.beacon_blink = qla2x00_beacon_blink;
1523 ha->gid_list_info_size = 6; 1646 ha->gid_list_info_size = 6;
1524 if (IS_QLA2322(ha) || IS_QLA6322(ha)) 1647 if (IS_QLA2322(ha) || IS_QLA6322(ha))
1525 ha->optrom_size = OPTROM_SIZE_2322; 1648 ha->optrom_size = OPTROM_SIZE_2322;
1649 ha->isp_ops = &qla2300_isp_ops;
1526 } else if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 1650 } else if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
1527 host->max_id = MAX_TARGETS_2200; 1651 host->max_id = MAX_TARGETS_2200;
1528 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1652 ha->mbx_count = MAILBOX_REGISTER_COUNT;
@@ -1531,36 +1655,20 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1531 ha->last_loop_id = SNS_LAST_LOOP_ID_2300; 1655 ha->last_loop_id = SNS_LAST_LOOP_ID_2300;
1532 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 1656 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
1533 ha->mgmt_svr_loop_id = 10 + ha->vp_idx; 1657 ha->mgmt_svr_loop_id = 10 + ha->vp_idx;
1534 ha->isp_ops.pci_config = qla24xx_pci_config;
1535 ha->isp_ops.reset_chip = qla24xx_reset_chip;
1536 ha->isp_ops.chip_diag = qla24xx_chip_diag;
1537 ha->isp_ops.config_rings = qla24xx_config_rings;
1538 ha->isp_ops.reset_adapter = qla24xx_reset_adapter;
1539 ha->isp_ops.nvram_config = qla24xx_nvram_config;
1540 ha->isp_ops.update_fw_options = qla24xx_update_fw_options;
1541 ha->isp_ops.load_risc = qla24xx_load_risc;
1542 ha->isp_ops.pci_info_str = qla24xx_pci_info_str;
1543 ha->isp_ops.fw_version_str = qla24xx_fw_version_str;
1544 ha->isp_ops.intr_handler = qla24xx_intr_handler;
1545 ha->isp_ops.enable_intrs = qla24xx_enable_intrs;
1546 ha->isp_ops.disable_intrs = qla24xx_disable_intrs;
1547 ha->isp_ops.abort_command = qla24xx_abort_command;
1548 ha->isp_ops.abort_target = qla24xx_abort_target;
1549 ha->isp_ops.fabric_login = qla24xx_login_fabric;
1550 ha->isp_ops.fabric_logout = qla24xx_fabric_logout;
1551 ha->isp_ops.prep_ms_iocb = qla24xx_prep_ms_iocb;
1552 ha->isp_ops.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb;
1553 ha->isp_ops.read_nvram = qla24xx_read_nvram_data;
1554 ha->isp_ops.write_nvram = qla24xx_write_nvram_data;
1555 ha->isp_ops.fw_dump = qla24xx_fw_dump;
1556 ha->isp_ops.read_optrom = qla24xx_read_optrom_data;
1557 ha->isp_ops.write_optrom = qla24xx_write_optrom_data;
1558 ha->isp_ops.beacon_on = qla24xx_beacon_on;
1559 ha->isp_ops.beacon_off = qla24xx_beacon_off;
1560 ha->isp_ops.beacon_blink = qla24xx_beacon_blink;
1561 ha->isp_ops.get_flash_version = qla24xx_get_flash_version;
1562 ha->gid_list_info_size = 8; 1658 ha->gid_list_info_size = 8;
1563 ha->optrom_size = OPTROM_SIZE_24XX; 1659 ha->optrom_size = OPTROM_SIZE_24XX;
1660 ha->isp_ops = &qla24xx_isp_ops;
1661 } else if (IS_QLA25XX(ha)) {
1662 host->max_id = MAX_TARGETS_2200;
1663 ha->mbx_count = MAILBOX_REGISTER_COUNT;
1664 ha->request_q_length = REQUEST_ENTRY_CNT_24XX;
1665 ha->response_q_length = RESPONSE_ENTRY_CNT_2300;
1666 ha->last_loop_id = SNS_LAST_LOOP_ID_2300;
1667 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
1668 ha->mgmt_svr_loop_id = 10 + ha->vp_idx;
1669 ha->gid_list_info_size = 8;
1670 ha->optrom_size = OPTROM_SIZE_25XX;
1671 ha->isp_ops = &qla25xx_isp_ops;
1564 } 1672 }
1565 host->can_queue = ha->request_q_length + 128; 1673 host->can_queue = ha->request_q_length + 128;
1566 1674
@@ -1628,11 +1736,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1628 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n", 1736 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
1629 ha->host_no, ha)); 1737 ha->host_no, ha));
1630 1738
1631 ha->isp_ops.disable_intrs(ha); 1739 ha->isp_ops->disable_intrs(ha);
1632 1740
1633 spin_lock_irqsave(&ha->hardware_lock, flags); 1741 spin_lock_irqsave(&ha->hardware_lock, flags);
1634 reg = ha->iobase; 1742 reg = ha->iobase;
1635 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 1743 if (IS_FWI2_CAPABLE(ha)) {
1636 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT); 1744 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
1637 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT); 1745 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
1638 } else { 1746 } else {
@@ -1654,7 +1762,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1654 } 1762 }
1655 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1763 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1656 1764
1657 ha->isp_ops.enable_intrs(ha); 1765 ha->isp_ops->enable_intrs(ha);
1658 1766
1659 pci_set_drvdata(pdev, ha); 1767 pci_set_drvdata(pdev, ha);
1660 1768
@@ -1679,9 +1787,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1679 " ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n", 1787 " ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n",
1680 qla2x00_version_str, ha->model_number, 1788 qla2x00_version_str, ha->model_number,
1681 ha->model_desc ? ha->model_desc: "", pdev->device, 1789 ha->model_desc ? ha->model_desc: "", pdev->device,
1682 ha->isp_ops.pci_info_str(ha, pci_info), pci_name(pdev), 1790 ha->isp_ops->pci_info_str(ha, pci_info), pci_name(pdev),
1683 ha->flags.enable_64bit_addressing ? '+': '-', ha->host_no, 1791 ha->flags.enable_64bit_addressing ? '+': '-', ha->host_no,
1684 ha->isp_ops.fw_version_str(ha, fw_str)); 1792 ha->isp_ops->fw_version_str(ha, fw_str));
1685 1793
1686 return 0; 1794 return 0;
1687 1795
@@ -1747,7 +1855,7 @@ qla2x00_free_device(scsi_qla_host_t *ha)
1747 1855
1748 /* turn-off interrupts on the card */ 1856 /* turn-off interrupts on the card */
1749 if (ha->interrupts_on) 1857 if (ha->interrupts_on)
1750 ha->isp_ops.disable_intrs(ha); 1858 ha->isp_ops->disable_intrs(ha);
1751 1859
1752 qla2x00_mem_free(ha); 1860 qla2x00_mem_free(ha);
1753 1861
@@ -2025,7 +2133,7 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha)
2025 } 2133 }
2026 memset(ha->ct_sns, 0, sizeof(struct ct_sns_pkt)); 2134 memset(ha->ct_sns, 0, sizeof(struct ct_sns_pkt));
2027 2135
2028 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 2136 if (IS_FWI2_CAPABLE(ha)) {
2029 /* 2137 /*
2030 * Get consistent memory allocated for SFP 2138 * Get consistent memory allocated for SFP
2031 * block. 2139 * block.
@@ -2305,7 +2413,7 @@ qla2x00_do_dpc(void *data)
2305 if (fcport->flags & FCF_FABRIC_DEVICE) { 2413 if (fcport->flags & FCF_FABRIC_DEVICE) {
2306 if (fcport->flags & 2414 if (fcport->flags &
2307 FCF_TAPE_PRESENT) 2415 FCF_TAPE_PRESENT)
2308 ha->isp_ops.fabric_logout( 2416 ha->isp_ops->fabric_logout(
2309 ha, fcport->loop_id, 2417 ha, fcport->loop_id,
2310 fcport->d_id.b.domain, 2418 fcport->d_id.b.domain,
2311 fcport->d_id.b.area, 2419 fcport->d_id.b.area,
@@ -2385,10 +2493,10 @@ qla2x00_do_dpc(void *data)
2385 } 2493 }
2386 2494
2387 if (!ha->interrupts_on) 2495 if (!ha->interrupts_on)
2388 ha->isp_ops.enable_intrs(ha); 2496 ha->isp_ops->enable_intrs(ha);
2389 2497
2390 if (test_and_clear_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags)) 2498 if (test_and_clear_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags))
2391 ha->isp_ops.beacon_blink(ha); 2499 ha->isp_ops->beacon_blink(ha);
2392 2500
2393 qla2x00_do_dpc_all_vps(ha); 2501 qla2x00_do_dpc_all_vps(ha);
2394 2502
@@ -2617,18 +2725,20 @@ qla2x00_down_timeout(struct semaphore *sema, unsigned long timeout)
2617 2725
2618/* Firmware interface routines. */ 2726/* Firmware interface routines. */
2619 2727
2620#define FW_BLOBS 5 2728#define FW_BLOBS 6
2621#define FW_ISP21XX 0 2729#define FW_ISP21XX 0
2622#define FW_ISP22XX 1 2730#define FW_ISP22XX 1
2623#define FW_ISP2300 2 2731#define FW_ISP2300 2
2624#define FW_ISP2322 3 2732#define FW_ISP2322 3
2625#define FW_ISP24XX 4 2733#define FW_ISP24XX 4
2734#define FW_ISP25XX 5
2626 2735
2627#define FW_FILE_ISP21XX "ql2100_fw.bin" 2736#define FW_FILE_ISP21XX "ql2100_fw.bin"
2628#define FW_FILE_ISP22XX "ql2200_fw.bin" 2737#define FW_FILE_ISP22XX "ql2200_fw.bin"
2629#define FW_FILE_ISP2300 "ql2300_fw.bin" 2738#define FW_FILE_ISP2300 "ql2300_fw.bin"
2630#define FW_FILE_ISP2322 "ql2322_fw.bin" 2739#define FW_FILE_ISP2322 "ql2322_fw.bin"
2631#define FW_FILE_ISP24XX "ql2400_fw.bin" 2740#define FW_FILE_ISP24XX "ql2400_fw.bin"
2741#define FW_FILE_ISP25XX "ql2500_fw.bin"
2632 2742
2633static DECLARE_MUTEX(qla_fw_lock); 2743static DECLARE_MUTEX(qla_fw_lock);
2634 2744
@@ -2638,6 +2748,7 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
2638 { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, }, 2748 { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, },
2639 { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, }, 2749 { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, },
2640 { .name = FW_FILE_ISP24XX, }, 2750 { .name = FW_FILE_ISP24XX, },
2751 { .name = FW_FILE_ISP25XX, },
2641}; 2752};
2642 2753
2643struct fw_blob * 2754struct fw_blob *
@@ -2656,6 +2767,8 @@ qla2x00_request_firmware(scsi_qla_host_t *ha)
2656 blob = &qla_fw_blobs[FW_ISP2322]; 2767 blob = &qla_fw_blobs[FW_ISP2322];
2657 } else if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 2768 } else if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
2658 blob = &qla_fw_blobs[FW_ISP24XX]; 2769 blob = &qla_fw_blobs[FW_ISP24XX];
2770 } else if (IS_QLA25XX(ha)) {
2771 blob = &qla_fw_blobs[FW_ISP25XX];
2659 } 2772 }
2660 2773
2661 down(&qla_fw_lock); 2774 down(&qla_fw_lock);
@@ -2699,6 +2812,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
2699 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) }, 2812 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) },
2700 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) }, 2813 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) },
2701 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) }, 2814 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
2815 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
2702 { 0 }, 2816 { 0 },
2703}; 2817};
2704MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); 2818MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 206bda093da2..a925a3f179f9 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -766,6 +766,29 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
766 return ret; 766 return ret;
767} 767}
768 768
769uint8_t *
770qla25xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
771 uint32_t bytes)
772{
773 uint32_t i;
774 uint32_t *dwptr;
775
776 /* Dword reads to flash. */
777 dwptr = (uint32_t *)buf;
778 for (i = 0; i < bytes >> 2; i++, naddr++)
779 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
780 flash_data_to_access_addr(FA_VPD_NVRAM_ADDR | naddr)));
781
782 return buf;
783}
784
785int
786qla25xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
787 uint32_t bytes)
788{
789 return qla24xx_write_flash_data(ha, (uint32_t *)buf,
790 FA_VPD_NVRAM_ADDR | naddr, bytes >> 2);
791}
769 792
770static inline void 793static inline void
771qla2x00_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags) 794qla2x00_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags)
@@ -919,7 +942,7 @@ qla2x00_beacon_off(struct scsi_qla_host *ha)
919 else 942 else
920 ha->beacon_color_state = QLA_LED_GRN_ON; 943 ha->beacon_color_state = QLA_LED_GRN_ON;
921 944
922 ha->isp_ops.beacon_blink(ha); /* This turns green LED off */ 945 ha->isp_ops->beacon_blink(ha); /* This turns green LED off */
923 946
924 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; 947 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
925 ha->fw_options[1] &= ~FO1_DISABLE_GPIO6_7; 948 ha->fw_options[1] &= ~FO1_DISABLE_GPIO6_7;
@@ -1031,7 +1054,7 @@ qla24xx_beacon_off(struct scsi_qla_host *ha)
1031 ha->beacon_blink_led = 0; 1054 ha->beacon_blink_led = 0;
1032 ha->beacon_color_state = QLA_LED_ALL_ON; 1055 ha->beacon_color_state = QLA_LED_ALL_ON;
1033 1056
1034 ha->isp_ops.beacon_blink(ha); /* Will flip to all off. */ 1057 ha->isp_ops->beacon_blink(ha); /* Will flip to all off. */
1035 1058
1036 /* Give control back to firmware. */ 1059 /* Give control back to firmware. */
1037 spin_lock_irqsave(&ha->hardware_lock, flags); 1060 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1419,7 +1442,7 @@ qla2x00_suspend_hba(struct scsi_qla_host *ha)
1419 1442
1420 /* Suspend HBA. */ 1443 /* Suspend HBA. */
1421 scsi_block_requests(ha->host); 1444 scsi_block_requests(ha->host);
1422 ha->isp_ops.disable_intrs(ha); 1445 ha->isp_ops->disable_intrs(ha);
1423 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 1446 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
1424 1447
1425 /* Pause RISC. */ 1448 /* Pause RISC. */
@@ -1705,7 +1728,7 @@ qla24xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1705{ 1728{
1706 /* Suspend HBA. */ 1729 /* Suspend HBA. */
1707 scsi_block_requests(ha->host); 1730 scsi_block_requests(ha->host);
1708 ha->isp_ops.disable_intrs(ha); 1731 ha->isp_ops->disable_intrs(ha);
1709 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 1732 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
1710 1733
1711 /* Go with read. */ 1734 /* Go with read. */
@@ -1713,7 +1736,7 @@ qla24xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1713 1736
1714 /* Resume HBA. */ 1737 /* Resume HBA. */
1715 clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 1738 clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
1716 ha->isp_ops.enable_intrs(ha); 1739 ha->isp_ops->enable_intrs(ha);
1717 scsi_unblock_requests(ha->host); 1740 scsi_unblock_requests(ha->host);
1718 1741
1719 return buf; 1742 return buf;
@@ -1727,7 +1750,7 @@ qla24xx_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1727 1750
1728 /* Suspend HBA. */ 1751 /* Suspend HBA. */
1729 scsi_block_requests(ha->host); 1752 scsi_block_requests(ha->host);
1730 ha->isp_ops.disable_intrs(ha); 1753 ha->isp_ops->disable_intrs(ha);
1731 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 1754 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
1732 1755
1733 /* Go with write. */ 1756 /* Go with write. */
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index fd2f10a25348..dd1f8ceb79c4 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.02.00-k1" 10#define QLA2XXX_VERSION "8.02.00-k2"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 2 13#define QLA_DRIVER_MINOR_VER 2
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 4cd9c58efef1..4947dfe625a6 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -2875,7 +2875,7 @@ static int __init scsi_debug_init(void)
2875 2875
2876 init_all_queued(); 2876 init_all_queued();
2877 2877
2878 sdebug_driver_template.proc_name = (char *)sdebug_proc_name; 2878 sdebug_driver_template.proc_name = sdebug_proc_name;
2879 2879
2880 host_to_add = scsi_debug_add_host; 2880 host_to_add = scsi_debug_add_host;
2881 scsi_debug_add_host = 0; 2881 scsi_debug_add_host = 0;
diff --git a/drivers/scsi/scsi_sysctl.c b/drivers/scsi/scsi_sysctl.c
index 6cfaaa2d0c81..63a30f566f3a 100644
--- a/drivers/scsi/scsi_sysctl.c
+++ b/drivers/scsi/scsi_sysctl.c
@@ -9,6 +9,7 @@
9#include <linux/sysctl.h> 9#include <linux/sysctl.h>
10 10
11#include "scsi_logging.h" 11#include "scsi_logging.h"
12#include "scsi_priv.h"
12 13
13 14
14static ctl_table scsi_table[] = { 15static ctl_table scsi_table[] = {
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index ed720863ab97..34cdce6738a6 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -16,6 +16,7 @@
16#include <scsi/scsi_host.h> 16#include <scsi/scsi_host.h>
17#include <scsi/scsi_tcq.h> 17#include <scsi/scsi_tcq.h>
18#include <scsi/scsi_transport.h> 18#include <scsi/scsi_transport.h>
19#include <scsi/scsi_driver.h>
19 20
20#include "scsi_priv.h" 21#include "scsi_priv.h"
21#include "scsi_logging.h" 22#include "scsi_logging.h"
@@ -714,6 +715,7 @@ static int attr_add(struct device *dev, struct device_attribute *attr)
714int scsi_sysfs_add_sdev(struct scsi_device *sdev) 715int scsi_sysfs_add_sdev(struct scsi_device *sdev)
715{ 716{
716 int error, i; 717 int error, i;
718 struct request_queue *rq = sdev->request_queue;
717 719
718 if ((error = scsi_device_set_state(sdev, SDEV_RUNNING)) != 0) 720 if ((error = scsi_device_set_state(sdev, SDEV_RUNNING)) != 0)
719 return error; 721 return error;
@@ -733,6 +735,17 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
733 /* take a reference for the sdev_classdev; this is 735 /* take a reference for the sdev_classdev; this is
734 * released by the sdev_class .release */ 736 * released by the sdev_class .release */
735 get_device(&sdev->sdev_gendev); 737 get_device(&sdev->sdev_gendev);
738
739 error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL);
740
741 if (error)
742 sdev_printk(KERN_INFO, sdev,
743 "Failed to register bsg queue, errno=%d\n", error);
744
745 /* we're treating error on bsg register as non-fatal, so pretend
746 * nothing went wrong */
747 error = 0;
748
736 if (sdev->host->hostt->sdev_attrs) { 749 if (sdev->host->hostt->sdev_attrs) {
737 for (i = 0; sdev->host->hostt->sdev_attrs[i]; i++) { 750 for (i = 0; sdev->host->hostt->sdev_attrs[i]; i++) {
738 error = attr_add(&sdev->sdev_gendev, 751 error = attr_add(&sdev->sdev_gendev,
@@ -779,6 +792,7 @@ void __scsi_remove_device(struct scsi_device *sdev)
779 if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0) 792 if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0)
780 return; 793 return;
781 794
795 bsg_unregister_queue(sdev->request_queue);
782 class_device_unregister(&sdev->sdev_classdev); 796 class_device_unregister(&sdev->sdev_classdev);
783 transport_remove_device(dev); 797 transport_remove_device(dev);
784 device_del(dev); 798 device_del(dev);
@@ -803,7 +817,7 @@ void scsi_remove_device(struct scsi_device *sdev)
803} 817}
804EXPORT_SYMBOL(scsi_remove_device); 818EXPORT_SYMBOL(scsi_remove_device);
805 819
806void __scsi_remove_target(struct scsi_target *starget) 820static void __scsi_remove_target(struct scsi_target *starget)
807{ 821{
808 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 822 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
809 unsigned long flags; 823 unsigned long flags;
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index e8825709797e..47057254850d 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -2358,7 +2358,7 @@ fc_rport_final_delete(struct work_struct *work)
2358 * Notes: 2358 * Notes:
2359 * This routine assumes no locks are held on entry. 2359 * This routine assumes no locks are held on entry.
2360 **/ 2360 **/
2361struct fc_rport * 2361static struct fc_rport *
2362fc_rport_create(struct Scsi_Host *shost, int channel, 2362fc_rport_create(struct Scsi_Host *shost, int channel,
2363 struct fc_rport_identifiers *ids) 2363 struct fc_rport_identifiers *ids)
2364{ 2364{
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index b2ef71a86292..3120f4b3a11a 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -29,6 +29,8 @@
29#include <linux/err.h> 29#include <linux/err.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/string.h> 31#include <linux/string.h>
32#include <linux/blkdev.h>
33#include <linux/bsg.h>
32 34
33#include <scsi/scsi.h> 35#include <scsi/scsi.h>
34#include <scsi/scsi_device.h> 36#include <scsi/scsi_device.h>
@@ -40,6 +42,7 @@
40struct sas_host_attrs { 42struct sas_host_attrs {
41 struct list_head rphy_list; 43 struct list_head rphy_list;
42 struct mutex lock; 44 struct mutex lock;
45 struct request_queue *q;
43 u32 next_target_id; 46 u32 next_target_id;
44 u32 next_expander_id; 47 u32 next_expander_id;
45 int next_port_id; 48 int next_port_id;
@@ -152,6 +155,106 @@ static struct {
152sas_bitfield_name_search(linkspeed, sas_linkspeed_names) 155sas_bitfield_name_search(linkspeed, sas_linkspeed_names)
153sas_bitfield_name_set(linkspeed, sas_linkspeed_names) 156sas_bitfield_name_set(linkspeed, sas_linkspeed_names)
154 157
158static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
159 struct sas_rphy *rphy)
160{
161 struct request *req;
162 int ret;
163 int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
164
165 while (!blk_queue_plugged(q)) {
166 req = elv_next_request(q);
167 if (!req)
168 break;
169
170 blkdev_dequeue_request(req);
171
172 spin_unlock_irq(q->queue_lock);
173
174 handler = to_sas_internal(shost->transportt)->f->smp_handler;
175 ret = handler(shost, rphy, req);
176
177 spin_lock_irq(q->queue_lock);
178
179 req->end_io(req, ret);
180 }
181}
182
183static void sas_host_smp_request(struct request_queue *q)
184{
185 sas_smp_request(q, (struct Scsi_Host *)q->queuedata, NULL);
186}
187
188static void sas_non_host_smp_request(struct request_queue *q)
189{
190 struct sas_rphy *rphy = q->queuedata;
191 sas_smp_request(q, rphy_to_shost(rphy), rphy);
192}
193
194static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
195{
196 struct request_queue *q;
197 int error;
198 struct device *dev;
199 char namebuf[BUS_ID_SIZE];
200 const char *name;
201
202 if (!to_sas_internal(shost->transportt)->f->smp_handler) {
203 printk("%s can't handle SMP requests\n", shost->hostt->name);
204 return 0;
205 }
206
207 if (rphy) {
208 q = blk_init_queue(sas_non_host_smp_request, NULL);
209 dev = &rphy->dev;
210 name = dev->bus_id;
211 } else {
212 q = blk_init_queue(sas_host_smp_request, NULL);
213 dev = &shost->shost_gendev;
214 snprintf(namebuf, sizeof(namebuf),
215 "sas_host%d", shost->host_no);
216 name = namebuf;
217 }
218 if (!q)
219 return -ENOMEM;
220
221 error = bsg_register_queue(q, dev, name);
222 if (error) {
223 blk_cleanup_queue(q);
224 return -ENOMEM;
225 }
226
227 if (rphy)
228 rphy->q = q;
229 else
230 to_sas_host_attrs(shost)->q = q;
231
232 if (rphy)
233 q->queuedata = rphy;
234 else
235 q->queuedata = shost;
236
237 set_bit(QUEUE_FLAG_BIDI, &q->queue_flags);
238
239 return 0;
240}
241
242static void sas_bsg_remove(struct Scsi_Host *shost, struct sas_rphy *rphy)
243{
244 struct request_queue *q;
245
246 if (rphy)
247 q = rphy->q;
248 else
249 q = to_sas_host_attrs(shost)->q;
250
251 if (!q)
252 return;
253
254 bsg_unregister_queue(q);
255 blk_cleanup_queue(q);
256}
257
155/* 258/*
156 * SAS host attributes 259 * SAS host attributes
157 */ 260 */
@@ -167,11 +270,26 @@ static int sas_host_setup(struct transport_container *tc, struct device *dev,
167 sas_host->next_target_id = 0; 270 sas_host->next_target_id = 0;
168 sas_host->next_expander_id = 0; 271 sas_host->next_expander_id = 0;
169 sas_host->next_port_id = 0; 272 sas_host->next_port_id = 0;
273
274 if (sas_bsg_initialize(shost, NULL))
275 dev_printk(KERN_ERR, dev, "fail to a bsg device %d\n",
276 shost->host_no);
277
278 return 0;
279}
280
281static int sas_host_remove(struct transport_container *tc, struct device *dev,
282 struct class_device *cdev)
283{
284 struct Scsi_Host *shost = dev_to_shost(dev);
285
286 sas_bsg_remove(shost, NULL);
287
170 return 0; 288 return 0;
171} 289}
172 290
173static DECLARE_TRANSPORT_CLASS(sas_host_class, 291static DECLARE_TRANSPORT_CLASS(sas_host_class,
174 "sas_host", sas_host_setup, NULL, NULL); 292 "sas_host", sas_host_setup, sas_host_remove, NULL);
175 293
176static int sas_host_match(struct attribute_container *cont, 294static int sas_host_match(struct attribute_container *cont,
177 struct device *dev) 295 struct device *dev)
@@ -1287,6 +1405,9 @@ int sas_rphy_add(struct sas_rphy *rphy)
1287 return error; 1405 return error;
1288 transport_add_device(&rphy->dev); 1406 transport_add_device(&rphy->dev);
1289 transport_configure_device(&rphy->dev); 1407 transport_configure_device(&rphy->dev);
1408 if (sas_bsg_initialize(shost, rphy))
1409 printk("fail to a bsg device %s\n", rphy->dev.bus_id);
1410
1290 1411
1291 mutex_lock(&sas_host->lock); 1412 mutex_lock(&sas_host->lock);
1292 list_add_tail(&rphy->list, &sas_host->rphy_list); 1413 list_add_tail(&rphy->list, &sas_host->rphy_list);
@@ -1329,6 +1450,8 @@ void sas_rphy_free(struct sas_rphy *rphy)
1329 list_del(&rphy->list); 1450 list_del(&rphy->list);
1330 mutex_unlock(&sas_host->lock); 1451 mutex_unlock(&sas_host->lock);
1331 1452
1453 sas_bsg_remove(shost, rphy);
1454
1332 transport_destroy_device(dev); 1455 transport_destroy_device(dev);
1333 1456
1334 put_device(dev); 1457 put_device(dev);
diff --git a/drivers/scsi/seagate.c b/drivers/scsi/seagate.c
index ff62e9708e1c..ce80fa9ad815 100644
--- a/drivers/scsi/seagate.c
+++ b/drivers/scsi/seagate.c
@@ -420,7 +420,7 @@ static inline void borken_wait (void)
420#define ULOOP( i ) for (clock = i*8;;) 420#define ULOOP( i ) for (clock = i*8;;)
421#define TIMEOUT (!(clock--)) 421#define TIMEOUT (!(clock--))
422 422
423int __init seagate_st0x_detect (struct scsi_host_template * tpnt) 423static int __init seagate_st0x_detect (struct scsi_host_template * tpnt)
424{ 424{
425 struct Scsi_Host *instance; 425 struct Scsi_Host *instance;
426 int i, j; 426 int i, j;
diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c
index 710f19de3d40..d63d229e2323 100644
--- a/drivers/scsi/sim710.c
+++ b/drivers/scsi/sim710.c
@@ -138,6 +138,7 @@ sim710_probe_common(struct device *dev, unsigned long base_addr,
138 goto out_put_host; 138 goto out_put_host;
139 } 139 }
140 140
141 dev_set_drvdata(dev, host);
141 scsi_scan_host(host); 142 scsi_scan_host(host);
142 143
143 return 0; 144 return 0;
@@ -155,7 +156,7 @@ sim710_probe_common(struct device *dev, unsigned long base_addr,
155static __devexit int 156static __devexit int
156sim710_device_remove(struct device *dev) 157sim710_device_remove(struct device *dev)
157{ 158{
158 struct Scsi_Host *host = dev_to_shost(dev); 159 struct Scsi_Host *host = dev_get_drvdata(dev);
159 struct NCR_700_Host_Parameters *hostdata = 160 struct NCR_700_Host_Parameters *hostdata =
160 (struct NCR_700_Host_Parameters *)host->hostdata[0]; 161 (struct NCR_700_Host_Parameters *)host->hostdata[0];
161 162
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 5143c8990845..e7b6a7fde1cb 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -175,7 +175,7 @@ static void scsi_cd_put(struct scsi_cd *cd)
175 * an inode for that to work, and we do not always have one. 175 * an inode for that to work, and we do not always have one.
176 */ 176 */
177 177
178int sr_media_change(struct cdrom_device_info *cdi, int slot) 178static int sr_media_change(struct cdrom_device_info *cdi, int slot)
179{ 179{
180 struct scsi_cd *cd = cdi->handle; 180 struct scsi_cd *cd = cdi->handle;
181 int retval; 181 int retval;
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index fa4e08e508ad..b92ff047af38 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -89,6 +89,8 @@
89#include <scsi/scsi_device.h> 89#include <scsi/scsi_device.h>
90#include <scsi/scsi_host.h> 90#include <scsi/scsi_host.h>
91 91
92#include <asm/irq.h>
93
92#include "wd33c93.h" 94#include "wd33c93.h"
93 95
94#define optimum_sx_per(hostdata) (hostdata)->sx_table[1].period_ns 96#define optimum_sx_per(hostdata) (hostdata)->sx_table[1].period_ns
@@ -1762,7 +1764,7 @@ static char setup_buffer[SETUP_BUFFER_SIZE];
1762static char setup_used[MAX_SETUP_ARGS]; 1764static char setup_used[MAX_SETUP_ARGS];
1763static int done_setup = 0; 1765static int done_setup = 0;
1764 1766
1765int 1767static int
1766wd33c93_setup(char *str) 1768wd33c93_setup(char *str)
1767{ 1769{
1768 int i; 1770 int i;
diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c
index 50703877a585..c822debc2668 100644
--- a/drivers/scsi/zorro7xx.c
+++ b/drivers/scsi/zorro7xx.c
@@ -130,6 +130,7 @@ static int __devinit zorro7xx_init_one(struct zorro_dev *z,
130 goto out_put_host; 130 goto out_put_host;
131 } 131 }
132 132
133 zorro_set_drvdata(z, host);
133 scsi_scan_host(host); 134 scsi_scan_host(host);
134 135
135 return 0; 136 return 0;
@@ -148,7 +149,7 @@ static int __devinit zorro7xx_init_one(struct zorro_dev *z,
148 149
149static __devexit void zorro7xx_remove_one(struct zorro_dev *z) 150static __devexit void zorro7xx_remove_one(struct zorro_dev *z)
150{ 151{
151 struct Scsi_Host *host = dev_to_shost(&z->dev); 152 struct Scsi_Host *host = zorro_get_drvdata(z);
152 struct NCR_700_Host_Parameters *hostdata = shost_priv(host); 153 struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
153 154
154 scsi_remove_host(host); 155 scsi_remove_host(host);