aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx/qla_os.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_os.c')
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c363
1 files changed, 175 insertions, 188 deletions
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index f9e5b85e84d8..4ed1e4a96b95 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -83,6 +83,9 @@ MODULE_PARM_DESC(ql2xextended_error_logging,
83 "\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n" 83 "\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n"
84 "\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n" 84 "\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n"
85 "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n" 85 "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n"
86 "\t\t0x1e400000 - Preferred value for capturing essential "
87 "debug information (equivalent to old "
88 "ql2xextended_error_logging=1).\n"
86 "\t\tDo LOGICAL OR of the value to enable more than one level"); 89 "\t\tDo LOGICAL OR of the value to enable more than one level");
87 90
88int ql2xshiftctondsd = 6; 91int ql2xshiftctondsd = 6;
@@ -199,7 +202,7 @@ int ql2xmdcapmask = 0x1F;
199module_param(ql2xmdcapmask, int, S_IRUGO); 202module_param(ql2xmdcapmask, int, S_IRUGO);
200MODULE_PARM_DESC(ql2xmdcapmask, 203MODULE_PARM_DESC(ql2xmdcapmask,
201 "Set the Minidump driver capture mask level. " 204 "Set the Minidump driver capture mask level. "
202 "Default is 0x7F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F."); 205 "Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
203 206
204int ql2xmdenable = 1; 207int ql2xmdenable = 1;
205module_param(ql2xmdenable, int, S_IRUGO); 208module_param(ql2xmdenable, int, S_IRUGO);
@@ -847,14 +850,10 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
847 int wait = 0; 850 int wait = 0;
848 struct qla_hw_data *ha = vha->hw; 851 struct qla_hw_data *ha = vha->hw;
849 852
850 ql_dbg(ql_dbg_taskm, vha, 0x8000,
851 "Entered %s for cmd=%p.\n", __func__, cmd);
852 if (!CMD_SP(cmd)) 853 if (!CMD_SP(cmd))
853 return SUCCESS; 854 return SUCCESS;
854 855
855 ret = fc_block_scsi_eh(cmd); 856 ret = fc_block_scsi_eh(cmd);
856 ql_dbg(ql_dbg_taskm, vha, 0x8001,
857 "Return value of fc_block_scsi_eh=%d.\n", ret);
858 if (ret != 0) 857 if (ret != 0)
859 return ret; 858 return ret;
860 ret = SUCCESS; 859 ret = SUCCESS;
@@ -870,7 +869,8 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
870 } 869 }
871 870
872 ql_dbg(ql_dbg_taskm, vha, 0x8002, 871 ql_dbg(ql_dbg_taskm, vha, 0x8002,
873 "Aborting sp=%p cmd=%p from RISC ", sp, cmd); 872 "Aborting from RISC nexus=%ld:%d:%d sp=%p cmd=%p\n",
873 vha->host_no, id, lun, sp, cmd);
874 874
875 /* Get a reference to the sp and drop the lock.*/ 875 /* Get a reference to the sp and drop the lock.*/
876 sp_get(sp); 876 sp_get(sp);
@@ -878,10 +878,10 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
878 spin_unlock_irqrestore(&ha->hardware_lock, flags); 878 spin_unlock_irqrestore(&ha->hardware_lock, flags);
879 if (ha->isp_ops->abort_command(sp)) { 879 if (ha->isp_ops->abort_command(sp)) {
880 ql_dbg(ql_dbg_taskm, vha, 0x8003, 880 ql_dbg(ql_dbg_taskm, vha, 0x8003,
881 "Abort command mbx failed for cmd=%p.\n", cmd); 881 "Abort command mbx failed cmd=%p.\n", cmd);
882 } else { 882 } else {
883 ql_dbg(ql_dbg_taskm, vha, 0x8004, 883 ql_dbg(ql_dbg_taskm, vha, 0x8004,
884 "Abort command mbx success.\n"); 884 "Abort command mbx success cmd=%p.\n", cmd);
885 wait = 1; 885 wait = 1;
886 } 886 }
887 887
@@ -897,13 +897,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
897 if (wait) { 897 if (wait) {
898 if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) { 898 if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
899 ql_log(ql_log_warn, vha, 0x8006, 899 ql_log(ql_log_warn, vha, 0x8006,
900 "Abort handler timed out for cmd=%p.\n", cmd); 900 "Abort handler timed out cmd=%p.\n", cmd);
901 ret = FAILED; 901 ret = FAILED;
902 } 902 }
903 } 903 }
904 904
905 ql_log(ql_log_info, vha, 0x801c, 905 ql_log(ql_log_info, vha, 0x801c,
906 "Abort command issued -- %d %x.\n", wait, ret); 906 "Abort command issued nexus=%ld:%d:%d -- %d %x.\n",
907 vha->host_no, id, lun, wait, ret);
907 908
908 return ret; 909 return ret;
909} 910}
@@ -972,19 +973,15 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
972 int err; 973 int err;
973 974
974 if (!fcport) { 975 if (!fcport) {
975 ql_log(ql_log_warn, vha, 0x8007,
976 "fcport is NULL.\n");
977 return FAILED; 976 return FAILED;
978 } 977 }
979 978
980 err = fc_block_scsi_eh(cmd); 979 err = fc_block_scsi_eh(cmd);
981 ql_dbg(ql_dbg_taskm, vha, 0x8008,
982 "fc_block_scsi_eh ret=%d.\n", err);
983 if (err != 0) 980 if (err != 0)
984 return err; 981 return err;
985 982
986 ql_log(ql_log_info, vha, 0x8009, 983 ql_log(ql_log_info, vha, 0x8009,
987 "%s RESET ISSUED for id %d lun %d cmd=%p.\n", name, 984 "%s RESET ISSUED nexus=%ld:%d:%d cmd=%p.\n", name, vha->host_no,
988 cmd->device->id, cmd->device->lun, cmd); 985 cmd->device->id, cmd->device->lun, cmd);
989 986
990 err = 0; 987 err = 0;
@@ -1009,15 +1006,16 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
1009 } 1006 }
1010 1007
1011 ql_log(ql_log_info, vha, 0x800e, 1008 ql_log(ql_log_info, vha, 0x800e,
1012 "%s RESET SUCCEEDED for id %d lun %d cmd=%p.\n", name, 1009 "%s RESET SUCCEEDED nexus:%ld:%d:%d cmd=%p.\n", name,
1013 cmd->device->id, cmd->device->lun, cmd); 1010 vha->host_no, cmd->device->id, cmd->device->lun, cmd);
1014 1011
1015 return SUCCESS; 1012 return SUCCESS;
1016 1013
1017eh_reset_failed: 1014eh_reset_failed:
1018 ql_log(ql_log_info, vha, 0x800f, 1015 ql_log(ql_log_info, vha, 0x800f,
1019 "%s RESET FAILED: %s for id %d lun %d cmd=%p.\n", name, 1016 "%s RESET FAILED: %s nexus=%ld:%d:%d cmd=%p.\n", name,
1020 reset_errors[err], cmd->device->id, cmd->device->lun); 1017 reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun,
1018 cmd);
1021 return FAILED; 1019 return FAILED;
1022} 1020}
1023 1021
@@ -1068,20 +1066,16 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1068 lun = cmd->device->lun; 1066 lun = cmd->device->lun;
1069 1067
1070 if (!fcport) { 1068 if (!fcport) {
1071 ql_log(ql_log_warn, vha, 0x8010,
1072 "fcport is NULL.\n");
1073 return ret; 1069 return ret;
1074 } 1070 }
1075 1071
1076 ret = fc_block_scsi_eh(cmd); 1072 ret = fc_block_scsi_eh(cmd);
1077 ql_dbg(ql_dbg_taskm, vha, 0x8011,
1078 "fc_block_scsi_eh ret=%d.\n", ret);
1079 if (ret != 0) 1073 if (ret != 0)
1080 return ret; 1074 return ret;
1081 ret = FAILED; 1075 ret = FAILED;
1082 1076
1083 ql_log(ql_log_info, vha, 0x8012, 1077 ql_log(ql_log_info, vha, 0x8012,
1084 "BUS RESET ISSUED for id %d lun %d.\n", id, lun); 1078 "BUS RESET ISSUED nexus=%ld:%d%d.\n", vha->host_no, id, lun);
1085 1079
1086 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1080 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1087 ql_log(ql_log_fatal, vha, 0x8013, 1081 ql_log(ql_log_fatal, vha, 0x8013,
@@ -1105,7 +1099,8 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1105 1099
1106eh_bus_reset_done: 1100eh_bus_reset_done:
1107 ql_log(ql_log_warn, vha, 0x802b, 1101 ql_log(ql_log_warn, vha, 0x802b,
1108 "BUS RESET %s.\n", (ret == FAILED) ? "FAILED" : "SUCCEDED"); 1102 "BUS RESET %s nexus=%ld:%d:%d.\n",
1103 (ret == FAILED) ? "FAILED" : "SUCCEDED", vha->host_no, id, lun);
1109 1104
1110 return ret; 1105 return ret;
1111} 1106}
@@ -1139,20 +1134,16 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1139 lun = cmd->device->lun; 1134 lun = cmd->device->lun;
1140 1135
1141 if (!fcport) { 1136 if (!fcport) {
1142 ql_log(ql_log_warn, vha, 0x8016,
1143 "fcport is NULL.\n");
1144 return ret; 1137 return ret;
1145 } 1138 }
1146 1139
1147 ret = fc_block_scsi_eh(cmd); 1140 ret = fc_block_scsi_eh(cmd);
1148 ql_dbg(ql_dbg_taskm, vha, 0x8017,
1149 "fc_block_scsi_eh ret=%d.\n", ret);
1150 if (ret != 0) 1141 if (ret != 0)
1151 return ret; 1142 return ret;
1152 ret = FAILED; 1143 ret = FAILED;
1153 1144
1154 ql_log(ql_log_info, vha, 0x8018, 1145 ql_log(ql_log_info, vha, 0x8018,
1155 "ADAPTER RESET ISSUED for id %d lun %d.\n", id, lun); 1146 "ADAPTER RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun);
1156 1147
1157 if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS) 1148 if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
1158 goto eh_host_reset_lock; 1149 goto eh_host_reset_lock;
@@ -1193,8 +1184,9 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1193 ret = SUCCESS; 1184 ret = SUCCESS;
1194 1185
1195eh_host_reset_lock: 1186eh_host_reset_lock:
1196 qla_printk(KERN_INFO, ha, "%s: reset %s.\n", __func__, 1187 ql_log(ql_log_info, vha, 0x8017,
1197 (ret == FAILED) ? "failed" : "succeeded"); 1188 "ADAPTER RESET %s nexus=%ld:%d:%d.\n",
1189 (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
1198 1190
1199 return ret; 1191 return ret;
1200} 1192}
@@ -1344,10 +1336,8 @@ static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
1344 return; 1336 return;
1345 1337
1346 ql_dbg(ql_dbg_io, fcport->vha, 0x3029, 1338 ql_dbg(ql_dbg_io, fcport->vha, 0x3029,
1347 "Queue depth adjusted-down " 1339 "Queue depth adjusted-down to %d for nexus=%ld:%d:%d.\n",
1348 "to %d for scsi(%ld:%d:%d:%d).\n", 1340 sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun);
1349 sdev->queue_depth, fcport->vha->host_no,
1350 sdev->channel, sdev->id, sdev->lun);
1351} 1341}
1352 1342
1353static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth) 1343static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
@@ -1369,10 +1359,8 @@ static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
1369 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth); 1359 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth);
1370 1360
1371 ql_dbg(ql_dbg_io, vha, 0x302a, 1361 ql_dbg(ql_dbg_io, vha, 0x302a,
1372 "Queue depth adjusted-up to %d for " 1362 "Queue depth adjusted-up to %d for nexus=%ld:%d:%d.\n",
1373 "scsi(%ld:%d:%d:%d).\n", 1363 sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun);
1374 sdev->queue_depth, fcport->vha->host_no,
1375 sdev->channel, sdev->id, sdev->lun);
1376} 1364}
1377 1365
1378static int 1366static int
@@ -1496,6 +1484,118 @@ qla24xx_disable_intrs(struct qla_hw_data *ha)
1496 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1484 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1497} 1485}
1498 1486
1487static int
1488qla2x00_iospace_config(struct qla_hw_data *ha)
1489{
1490 resource_size_t pio;
1491 uint16_t msix;
1492 int cpus;
1493
1494 if (IS_QLA82XX(ha))
1495 return qla82xx_iospace_config(ha);
1496
1497 if (pci_request_selected_regions(ha->pdev, ha->bars,
1498 QLA2XXX_DRIVER_NAME)) {
1499 ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
1500 "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
1501 pci_name(ha->pdev));
1502 goto iospace_error_exit;
1503 }
1504 if (!(ha->bars & 1))
1505 goto skip_pio;
1506
1507 /* We only need PIO for Flash operations on ISP2312 v2 chips. */
1508 pio = pci_resource_start(ha->pdev, 0);
1509 if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
1510 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
1511 ql_log_pci(ql_log_warn, ha->pdev, 0x0012,
1512 "Invalid pci I/O region size (%s).\n",
1513 pci_name(ha->pdev));
1514 pio = 0;
1515 }
1516 } else {
1517 ql_log_pci(ql_log_warn, ha->pdev, 0x0013,
1518 "Region #0 no a PIO resource (%s).\n",
1519 pci_name(ha->pdev));
1520 pio = 0;
1521 }
1522 ha->pio_address = pio;
1523 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014,
1524 "PIO address=%llu.\n",
1525 (unsigned long long)ha->pio_address);
1526
1527skip_pio:
1528 /* Use MMIO operations for all accesses. */
1529 if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
1530 ql_log_pci(ql_log_fatal, ha->pdev, 0x0015,
1531 "Region #1 not an MMIO resource (%s), aborting.\n",
1532 pci_name(ha->pdev));
1533 goto iospace_error_exit;
1534 }
1535 if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
1536 ql_log_pci(ql_log_fatal, ha->pdev, 0x0016,
1537 "Invalid PCI mem region size (%s), aborting.\n",
1538 pci_name(ha->pdev));
1539 goto iospace_error_exit;
1540 }
1541
1542 ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
1543 if (!ha->iobase) {
1544 ql_log_pci(ql_log_fatal, ha->pdev, 0x0017,
1545 "Cannot remap MMIO (%s), aborting.\n",
1546 pci_name(ha->pdev));
1547 goto iospace_error_exit;
1548 }
1549
1550 /* Determine queue resources */
1551 ha->max_req_queues = ha->max_rsp_queues = 1;
1552 if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) ||
1553 (ql2xmaxqueues > 1 && ql2xmultique_tag) ||
1554 (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
1555 goto mqiobase_exit;
1556
1557 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
1558 pci_resource_len(ha->pdev, 3));
1559 if (ha->mqiobase) {
1560 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018,
1561 "MQIO Base=%p.\n", ha->mqiobase);
1562 /* Read MSIX vector size of the board */
1563 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
1564 ha->msix_count = msix;
1565 /* Max queues are bounded by available msix vectors */
1566 /* queue 0 uses two msix vectors */
1567 if (ql2xmultique_tag) {
1568 cpus = num_online_cpus();
1569 ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
1570 (cpus + 1) : (ha->msix_count - 1);
1571 ha->max_req_queues = 2;
1572 } else if (ql2xmaxqueues > 1) {
1573 ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
1574 QLA_MQ_SIZE : ql2xmaxqueues;
1575 ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008,
1576 "QoS mode set, max no of request queues:%d.\n",
1577 ha->max_req_queues);
1578 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019,
1579 "QoS mode set, max no of request queues:%d.\n",
1580 ha->max_req_queues);
1581 }
1582 ql_log_pci(ql_log_info, ha->pdev, 0x001a,
1583 "MSI-X vector count: %d.\n", msix);
1584 } else
1585 ql_log_pci(ql_log_info, ha->pdev, 0x001b,
1586 "BAR 3 not enabled.\n");
1587
1588mqiobase_exit:
1589 ha->msix_count = ha->max_rsp_queues + 1;
1590 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
1591 "MSIX Count:%d.\n", ha->msix_count);
1592 return (0);
1593
1594iospace_error_exit:
1595 return (-ENOMEM);
1596}
1597
1598
1499static struct isp_operations qla2100_isp_ops = { 1599static struct isp_operations qla2100_isp_ops = {
1500 .pci_config = qla2100_pci_config, 1600 .pci_config = qla2100_pci_config,
1501 .reset_chip = qla2x00_reset_chip, 1601 .reset_chip = qla2x00_reset_chip,
@@ -1530,6 +1630,7 @@ static struct isp_operations qla2100_isp_ops = {
1530 .get_flash_version = qla2x00_get_flash_version, 1630 .get_flash_version = qla2x00_get_flash_version,
1531 .start_scsi = qla2x00_start_scsi, 1631 .start_scsi = qla2x00_start_scsi,
1532 .abort_isp = qla2x00_abort_isp, 1632 .abort_isp = qla2x00_abort_isp,
1633 .iospace_config = qla2x00_iospace_config,
1533}; 1634};
1534 1635
1535static struct isp_operations qla2300_isp_ops = { 1636static struct isp_operations qla2300_isp_ops = {
@@ -1566,6 +1667,7 @@ static struct isp_operations qla2300_isp_ops = {
1566 .get_flash_version = qla2x00_get_flash_version, 1667 .get_flash_version = qla2x00_get_flash_version,
1567 .start_scsi = qla2x00_start_scsi, 1668 .start_scsi = qla2x00_start_scsi,
1568 .abort_isp = qla2x00_abort_isp, 1669 .abort_isp = qla2x00_abort_isp,
1670 .iospace_config = qla2x00_iospace_config,
1569}; 1671};
1570 1672
1571static struct isp_operations qla24xx_isp_ops = { 1673static struct isp_operations qla24xx_isp_ops = {
@@ -1602,6 +1704,7 @@ static struct isp_operations qla24xx_isp_ops = {
1602 .get_flash_version = qla24xx_get_flash_version, 1704 .get_flash_version = qla24xx_get_flash_version,
1603 .start_scsi = qla24xx_start_scsi, 1705 .start_scsi = qla24xx_start_scsi,
1604 .abort_isp = qla2x00_abort_isp, 1706 .abort_isp = qla2x00_abort_isp,
1707 .iospace_config = qla2x00_iospace_config,
1605}; 1708};
1606 1709
1607static struct isp_operations qla25xx_isp_ops = { 1710static struct isp_operations qla25xx_isp_ops = {
@@ -1638,6 +1741,7 @@ static struct isp_operations qla25xx_isp_ops = {
1638 .get_flash_version = qla24xx_get_flash_version, 1741 .get_flash_version = qla24xx_get_flash_version,
1639 .start_scsi = qla24xx_dif_start_scsi, 1742 .start_scsi = qla24xx_dif_start_scsi,
1640 .abort_isp = qla2x00_abort_isp, 1743 .abort_isp = qla2x00_abort_isp,
1744 .iospace_config = qla2x00_iospace_config,
1641}; 1745};
1642 1746
1643static struct isp_operations qla81xx_isp_ops = { 1747static struct isp_operations qla81xx_isp_ops = {
@@ -1674,6 +1778,7 @@ static struct isp_operations qla81xx_isp_ops = {
1674 .get_flash_version = qla24xx_get_flash_version, 1778 .get_flash_version = qla24xx_get_flash_version,
1675 .start_scsi = qla24xx_dif_start_scsi, 1779 .start_scsi = qla24xx_dif_start_scsi,
1676 .abort_isp = qla2x00_abort_isp, 1780 .abort_isp = qla2x00_abort_isp,
1781 .iospace_config = qla2x00_iospace_config,
1677}; 1782};
1678 1783
1679static struct isp_operations qla82xx_isp_ops = { 1784static struct isp_operations qla82xx_isp_ops = {
@@ -1710,6 +1815,7 @@ static struct isp_operations qla82xx_isp_ops = {
1710 .get_flash_version = qla24xx_get_flash_version, 1815 .get_flash_version = qla24xx_get_flash_version,
1711 .start_scsi = qla82xx_start_scsi, 1816 .start_scsi = qla82xx_start_scsi,
1712 .abort_isp = qla82xx_abort_isp, 1817 .abort_isp = qla82xx_abort_isp,
1818 .iospace_config = qla82xx_iospace_config,
1713}; 1819};
1714 1820
1715static inline void 1821static inline void
@@ -1819,121 +1925,10 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
1819 else 1925 else
1820 ha->flags.port0 = 0; 1926 ha->flags.port0 = 0;
1821 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b, 1927 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b,
1822 "device_type=0x%x port=%d fw_srisc_address=%p.\n", 1928 "device_type=0x%x port=%d fw_srisc_address=0x%x.\n",
1823 ha->device_type, ha->flags.port0, ha->fw_srisc_address); 1929 ha->device_type, ha->flags.port0, ha->fw_srisc_address);
1824} 1930}
1825 1931
1826static int
1827qla2x00_iospace_config(struct qla_hw_data *ha)
1828{
1829 resource_size_t pio;
1830 uint16_t msix;
1831 int cpus;
1832
1833 if (IS_QLA82XX(ha))
1834 return qla82xx_iospace_config(ha);
1835
1836 if (pci_request_selected_regions(ha->pdev, ha->bars,
1837 QLA2XXX_DRIVER_NAME)) {
1838 ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
1839 "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
1840 pci_name(ha->pdev));
1841 goto iospace_error_exit;
1842 }
1843 if (!(ha->bars & 1))
1844 goto skip_pio;
1845
1846 /* We only need PIO for Flash operations on ISP2312 v2 chips. */
1847 pio = pci_resource_start(ha->pdev, 0);
1848 if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
1849 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
1850 ql_log_pci(ql_log_warn, ha->pdev, 0x0012,
1851 "Invalid pci I/O region size (%s).\n",
1852 pci_name(ha->pdev));
1853 pio = 0;
1854 }
1855 } else {
1856 ql_log_pci(ql_log_warn, ha->pdev, 0x0013,
1857 "Region #0 no a PIO resource (%s).\n",
1858 pci_name(ha->pdev));
1859 pio = 0;
1860 }
1861 ha->pio_address = pio;
1862 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014,
1863 "PIO address=%p.\n",
1864 ha->pio_address);
1865
1866skip_pio:
1867 /* Use MMIO operations for all accesses. */
1868 if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
1869 ql_log_pci(ql_log_fatal, ha->pdev, 0x0015,
1870 "Region #1 not an MMIO resource (%s), aborting.\n",
1871 pci_name(ha->pdev));
1872 goto iospace_error_exit;
1873 }
1874 if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
1875 ql_log_pci(ql_log_fatal, ha->pdev, 0x0016,
1876 "Invalid PCI mem region size (%s), aborting.\n",
1877 pci_name(ha->pdev));
1878 goto iospace_error_exit;
1879 }
1880
1881 ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
1882 if (!ha->iobase) {
1883 ql_log_pci(ql_log_fatal, ha->pdev, 0x0017,
1884 "Cannot remap MMIO (%s), aborting.\n",
1885 pci_name(ha->pdev));
1886 goto iospace_error_exit;
1887 }
1888
1889 /* Determine queue resources */
1890 ha->max_req_queues = ha->max_rsp_queues = 1;
1891 if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) ||
1892 (ql2xmaxqueues > 1 && ql2xmultique_tag) ||
1893 (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
1894 goto mqiobase_exit;
1895
1896 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
1897 pci_resource_len(ha->pdev, 3));
1898 if (ha->mqiobase) {
1899 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018,
1900 "MQIO Base=%p.\n", ha->mqiobase);
1901 /* Read MSIX vector size of the board */
1902 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
1903 ha->msix_count = msix;
1904 /* Max queues are bounded by available msix vectors */
1905 /* queue 0 uses two msix vectors */
1906 if (ql2xmultique_tag) {
1907 cpus = num_online_cpus();
1908 ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
1909 (cpus + 1) : (ha->msix_count - 1);
1910 ha->max_req_queues = 2;
1911 } else if (ql2xmaxqueues > 1) {
1912 ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
1913 QLA_MQ_SIZE : ql2xmaxqueues;
1914 ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008,
1915 "QoS mode set, max no of request queues:%d.\n",
1916 ha->max_req_queues);
1917 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019,
1918 "QoS mode set, max no of request queues:%d.\n",
1919 ha->max_req_queues);
1920 }
1921 ql_log_pci(ql_log_info, ha->pdev, 0x001a,
1922 "MSI-X vector count: %d.\n", msix);
1923 } else
1924 ql_log_pci(ql_log_info, ha->pdev, 0x001b,
1925 "BAR 3 not enabled.\n");
1926
1927mqiobase_exit:
1928 ha->msix_count = ha->max_rsp_queues + 1;
1929 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
1930 "MSIX Count:%d.\n", ha->msix_count);
1931 return (0);
1932
1933iospace_error_exit:
1934 return (-ENOMEM);
1935}
1936
1937static void 1932static void
1938qla2xxx_scan_start(struct Scsi_Host *shost) 1933qla2xxx_scan_start(struct Scsi_Host *shost)
1939{ 1934{
@@ -2032,14 +2027,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2032 pdev->needs_freset = 1; 2027 pdev->needs_freset = 1;
2033 } 2028 }
2034 2029
2035 /* Configure PCI I/O space */
2036 ret = qla2x00_iospace_config(ha);
2037 if (ret)
2038 goto probe_hw_failed;
2039
2040 ql_log_pci(ql_log_info, pdev, 0x001d,
2041 "Found an ISP%04X irq %d iobase 0x%p.\n",
2042 pdev->device, pdev->irq, ha->iobase);
2043 ha->prev_topology = 0; 2030 ha->prev_topology = 0;
2044 ha->init_cb_size = sizeof(init_cb_t); 2031 ha->init_cb_size = sizeof(init_cb_t);
2045 ha->link_data_rate = PORT_SPEED_UNKNOWN; 2032 ha->link_data_rate = PORT_SPEED_UNKNOWN;
@@ -2152,6 +2139,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2152 "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n", 2139 "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n",
2153 ha->isp_ops, ha->flash_conf_off, ha->flash_data_off, 2140 ha->isp_ops, ha->flash_conf_off, ha->flash_data_off,
2154 ha->nvram_conf_off, ha->nvram_data_off); 2141 ha->nvram_conf_off, ha->nvram_data_off);
2142
2143 /* Configure PCI I/O space */
2144 ret = ha->isp_ops->iospace_config(ha);
2145 if (ret)
2146 goto probe_hw_failed;
2147
2148 ql_log_pci(ql_log_info, pdev, 0x001d,
2149 "Found an ISP%04X irq %d iobase 0x%p.\n",
2150 pdev->device, pdev->irq, ha->iobase);
2155 mutex_init(&ha->vport_lock); 2151 mutex_init(&ha->vport_lock);
2156 init_completion(&ha->mbx_cmd_comp); 2152 init_completion(&ha->mbx_cmd_comp);
2157 complete(&ha->mbx_cmd_comp); 2153 complete(&ha->mbx_cmd_comp);
@@ -2227,7 +2223,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2227 ql_dbg(ql_dbg_init, base_vha, 0x0033, 2223 ql_dbg(ql_dbg_init, base_vha, 0x0033,
2228 "max_id=%d this_id=%d " 2224 "max_id=%d this_id=%d "
2229 "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d " 2225 "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d "
2230 "max_lun=%d transportt=%p, vendor_id=%d.\n", host->max_id, 2226 "max_lun=%d transportt=%p, vendor_id=%llu.\n", host->max_id,
2231 host->this_id, host->cmd_per_lun, host->unique_id, 2227 host->this_id, host->cmd_per_lun, host->unique_id,
2232 host->max_cmd_len, host->max_channel, host->max_lun, 2228 host->max_cmd_len, host->max_channel, host->max_lun,
2233 host->transportt, sht->vendor_id); 2229 host->transportt, sht->vendor_id);
@@ -2382,9 +2378,6 @@ skip_dpc:
2382 2378
2383 qla2x00_dfs_setup(base_vha); 2379 qla2x00_dfs_setup(base_vha);
2384 2380
2385 ql_log(ql_log_info, base_vha, 0x00fa,
2386 "QLogic Fibre Channed HBA Driver: %s.\n",
2387 qla2x00_version_str);
2388 ql_log(ql_log_info, base_vha, 0x00fb, 2381 ql_log(ql_log_info, base_vha, 0x00fb,
2389 "QLogic %s - %s.\n", 2382 "QLogic %s - %s.\n",
2390 ha->model_number, ha->model_desc ? ha->model_desc : ""); 2383 ha->model_number, ha->model_desc ? ha->model_desc : "");
@@ -2833,7 +2826,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2833 if (!ha->sns_cmd) 2826 if (!ha->sns_cmd)
2834 goto fail_dma_pool; 2827 goto fail_dma_pool;
2835 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026, 2828 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026,
2836 "sns_cmd.\n", ha->sns_cmd); 2829 "sns_cmd: %p.\n", ha->sns_cmd);
2837 } else { 2830 } else {
2838 /* Get consistent memory allocated for MS IOCB */ 2831 /* Get consistent memory allocated for MS IOCB */
2839 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 2832 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
@@ -3460,27 +3453,21 @@ qla2x00_do_dpc(void *data)
3460 schedule(); 3453 schedule();
3461 __set_current_state(TASK_RUNNING); 3454 __set_current_state(TASK_RUNNING);
3462 3455
3463 ql_dbg(ql_dbg_dpc, base_vha, 0x4001, 3456 if (!base_vha->flags.init_done || ha->flags.mbox_busy)
3464 "DPC handler waking up.\n"); 3457 goto end_loop;
3465 ql_dbg(ql_dbg_dpc, base_vha, 0x4002,
3466 "dpc_flags=0x%lx.\n", base_vha->dpc_flags);
3467
3468 /* Initialization not yet finished. Don't do anything yet. */
3469 if (!base_vha->flags.init_done)
3470 continue;
3471 3458
3472 if (ha->flags.eeh_busy) { 3459 if (ha->flags.eeh_busy) {
3473 ql_dbg(ql_dbg_dpc, base_vha, 0x4003, 3460 ql_dbg(ql_dbg_dpc, base_vha, 0x4003,
3474 "eeh_busy=%d.\n", ha->flags.eeh_busy); 3461 "eeh_busy=%d.\n", ha->flags.eeh_busy);
3475 continue; 3462 goto end_loop;
3476 } 3463 }
3477 3464
3478 ha->dpc_active = 1; 3465 ha->dpc_active = 1;
3479 3466
3480 if (ha->flags.mbox_busy) { 3467 ql_dbg(ql_dbg_dpc, base_vha, 0x4001,
3481 ha->dpc_active = 0; 3468 "DPC handler waking up.\n");
3482 continue; 3469 ql_dbg(ql_dbg_dpc, base_vha, 0x4002,
3483 } 3470 "dpc_flags=0x%lx.\n", base_vha->dpc_flags);
3484 3471
3485 qla2x00_do_work(base_vha); 3472 qla2x00_do_work(base_vha);
3486 3473
@@ -3622,6 +3609,7 @@ qla2x00_do_dpc(void *data)
3622 qla2x00_do_dpc_all_vps(base_vha); 3609 qla2x00_do_dpc_all_vps(base_vha);
3623 3610
3624 ha->dpc_active = 0; 3611 ha->dpc_active = 0;
3612end_loop:
3625 set_current_state(TASK_INTERRUPTIBLE); 3613 set_current_state(TASK_INTERRUPTIBLE);
3626 } /* End of while(1) */ 3614 } /* End of while(1) */
3627 __set_current_state(TASK_RUNNING); 3615 __set_current_state(TASK_RUNNING);
@@ -3705,16 +3693,6 @@ qla2x00_sp_free_dma(srb_t *sp)
3705 sp->flags &= ~SRB_CRC_CTX_DMA_VALID; 3693 sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
3706 } 3694 }
3707 3695
3708 CMD_SP(cmd) = NULL;
3709}
3710
3711static void
3712qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp)
3713{
3714 struct scsi_cmnd *cmd = sp->cmd;
3715
3716 qla2x00_sp_free_dma(sp);
3717
3718 if (sp->flags & SRB_FCP_CMND_DMA_VALID) { 3696 if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
3719 struct ct6_dsd *ctx = sp->ctx; 3697 struct ct6_dsd *ctx = sp->ctx;
3720 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, 3698 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd,
@@ -3726,6 +3704,15 @@ qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp)
3726 sp->ctx = NULL; 3704 sp->ctx = NULL;
3727 } 3705 }
3728 3706
3707 CMD_SP(cmd) = NULL;
3708}
3709
3710static void
3711qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp)
3712{
3713 struct scsi_cmnd *cmd = sp->cmd;
3714
3715 qla2x00_sp_free_dma(sp);
3729 mempool_free(sp, ha->srb_mempool); 3716 mempool_free(sp, ha->srb_mempool);
3730 cmd->scsi_done(cmd); 3717 cmd->scsi_done(cmd);
3731} 3718}