aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/qla2xxx')
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c147
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c376
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h30
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c29
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h166
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h49
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h39
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c565
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h28
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c261
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c437
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c224
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c151
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h17
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c770
-rw-r--r--drivers/scsi/qla2xxx/qla_settings.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c67
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
22 files changed, 2961 insertions, 416 deletions
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 5ab953029f8d..1c28215f8bed 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -26,7 +26,7 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
26 struct qla_hw_data *ha = vha->hw; 26 struct qla_hw_data *ha = vha->hw;
27 int rval = 0; 27 int rval = 0;
28 28
29 if (ha->fw_dump_reading == 0) 29 if (!(ha->fw_dump_reading || ha->mctp_dump_reading))
30 return 0; 30 return 0;
31 31
32 if (IS_QLA82XX(ha)) { 32 if (IS_QLA82XX(ha)) {
@@ -39,9 +39,14 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
39 rval = memory_read_from_buffer(buf, count, 39 rval = memory_read_from_buffer(buf, count,
40 &off, ha->md_dump, ha->md_dump_size); 40 &off, ha->md_dump, ha->md_dump_size);
41 return rval; 41 return rval;
42 } else 42 } else if (ha->mctp_dumped && ha->mctp_dump_reading)
43 return memory_read_from_buffer(buf, count, &off, ha->mctp_dump,
44 MCTP_DUMP_SIZE);
45 else if (ha->fw_dump_reading)
43 return memory_read_from_buffer(buf, count, &off, ha->fw_dump, 46 return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
44 ha->fw_dump_len); 47 ha->fw_dump_len);
48 else
49 return 0;
45} 50}
46 51
47static ssize_t 52static ssize_t
@@ -107,6 +112,22 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
107 if (IS_QLA82XX(ha)) 112 if (IS_QLA82XX(ha))
108 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 113 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
109 break; 114 break;
115 case 6:
116 if (!ha->mctp_dump_reading)
117 break;
118 ql_log(ql_log_info, vha, 0x70c1,
119 "MCTP dump cleared on (%ld).\n", vha->host_no);
120 ha->mctp_dump_reading = 0;
121 ha->mctp_dumped = 0;
122 break;
123 case 7:
124 if (ha->mctp_dumped && !ha->mctp_dump_reading) {
125 ha->mctp_dump_reading = 1;
126 ql_log(ql_log_info, vha, 0x70c2,
127 "Raw mctp dump ready for read on (%ld).\n",
128 vha->host_no);
129 }
130 break;
110 } 131 }
111 return count; 132 return count;
112} 133}
@@ -564,6 +585,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
564 struct qla_hw_data *ha = vha->hw; 585 struct qla_hw_data *ha = vha->hw;
565 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 586 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
566 int type; 587 int type;
588 uint32_t idc_control;
567 589
568 if (off != 0) 590 if (off != 0)
569 return -EINVAL; 591 return -EINVAL;
@@ -587,22 +609,36 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
587 scsi_unblock_requests(vha->host); 609 scsi_unblock_requests(vha->host);
588 break; 610 break;
589 case 0x2025d: 611 case 0x2025d:
590 if (!IS_QLA81XX(ha) || !IS_QLA8031(ha)) 612 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
591 return -EPERM; 613 return -EPERM;
592 614
593 ql_log(ql_log_info, vha, 0x706f, 615 ql_log(ql_log_info, vha, 0x706f,
594 "Issuing MPI reset.\n"); 616 "Issuing MPI reset.\n");
595 617
596 /* Make sure FC side is not in reset */ 618 if (IS_QLA83XX(ha)) {
597 qla2x00_wait_for_hba_online(vha); 619 uint32_t idc_control;
598 620
599 /* Issue MPI reset */ 621 qla83xx_idc_lock(vha, 0);
600 scsi_block_requests(vha->host); 622 __qla83xx_get_idc_control(vha, &idc_control);
601 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS) 623 idc_control |= QLA83XX_IDC_GRACEFUL_RESET;
602 ql_log(ql_log_warn, vha, 0x7070, 624 __qla83xx_set_idc_control(vha, idc_control);
603 "MPI reset failed.\n"); 625 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
604 scsi_unblock_requests(vha->host); 626 QLA8XXX_DEV_NEED_RESET);
605 break; 627 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
628 qla83xx_idc_unlock(vha, 0);
629 break;
630 } else {
631 /* Make sure FC side is not in reset */
632 qla2x00_wait_for_hba_online(vha);
633
634 /* Issue MPI reset */
635 scsi_block_requests(vha->host);
636 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
637 ql_log(ql_log_warn, vha, 0x7070,
638 "MPI reset failed.\n");
639 scsi_unblock_requests(vha->host);
640 break;
641 }
606 case 0x2025e: 642 case 0x2025e:
607 if (!IS_QLA82XX(ha) || vha != base_vha) { 643 if (!IS_QLA82XX(ha) || vha != base_vha) {
608 ql_log(ql_log_info, vha, 0x7071, 644 ql_log(ql_log_info, vha, 0x7071,
@@ -616,6 +652,29 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
616 qla2xxx_wake_dpc(vha); 652 qla2xxx_wake_dpc(vha);
617 qla2x00_wait_for_fcoe_ctx_reset(vha); 653 qla2x00_wait_for_fcoe_ctx_reset(vha);
618 break; 654 break;
655 case 0x2025f:
656 if (!IS_QLA8031(ha))
657 return -EPERM;
658 ql_log(ql_log_info, vha, 0x70bc,
659 "Disabling Reset by IDC control\n");
660 qla83xx_idc_lock(vha, 0);
661 __qla83xx_get_idc_control(vha, &idc_control);
662 idc_control |= QLA83XX_IDC_RESET_DISABLED;
663 __qla83xx_set_idc_control(vha, idc_control);
664 qla83xx_idc_unlock(vha, 0);
665 break;
666 case 0x20260:
667 if (!IS_QLA8031(ha))
668 return -EPERM;
669 ql_log(ql_log_info, vha, 0x70bd,
670 "Enabling Reset by IDC control\n");
671 qla83xx_idc_lock(vha, 0);
672 __qla83xx_get_idc_control(vha, &idc_control);
673 idc_control &= ~QLA83XX_IDC_RESET_DISABLED;
674 __qla83xx_set_idc_control(vha, idc_control);
675 qla83xx_idc_unlock(vha, 0);
676 break;
677
619 } 678 }
620 return count; 679 return count;
621} 680}
@@ -1251,6 +1310,49 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1251 state[1], state[2], state[3], state[4]); 1310 state[1], state[2], state[3], state[4]);
1252} 1311}
1253 1312
1313static ssize_t
1314qla2x00_diag_requests_show(struct device *dev,
1315 struct device_attribute *attr, char *buf)
1316{
1317 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1318
1319 if (!IS_BIDI_CAPABLE(vha->hw))
1320 return snprintf(buf, PAGE_SIZE, "\n");
1321
1322 return snprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count);
1323}
1324
1325static ssize_t
1326qla2x00_diag_megabytes_show(struct device *dev,
1327 struct device_attribute *attr, char *buf)
1328{
1329 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1330
1331 if (!IS_BIDI_CAPABLE(vha->hw))
1332 return snprintf(buf, PAGE_SIZE, "\n");
1333
1334 return snprintf(buf, PAGE_SIZE, "%llu\n",
1335 vha->bidi_stats.transfer_bytes >> 20);
1336}
1337
1338static ssize_t
1339qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr,
1340 char *buf)
1341{
1342 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1343 struct qla_hw_data *ha = vha->hw;
1344 uint32_t size;
1345
1346 if (!ha->fw_dumped)
1347 size = 0;
1348 else if (IS_QLA82XX(ha))
1349 size = ha->md_template_size + ha->md_dump_size;
1350 else
1351 size = ha->fw_dump_len;
1352
1353 return snprintf(buf, PAGE_SIZE, "%d\n", size);
1354}
1355
1254static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); 1356static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
1255static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); 1357static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
1256static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); 1358static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -1289,6 +1391,9 @@ static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
1289static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL); 1391static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
1290static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL); 1392static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
1291static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL); 1393static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL);
1394static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL);
1395static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL);
1396static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL);
1292 1397
1293struct device_attribute *qla2x00_host_attrs[] = { 1398struct device_attribute *qla2x00_host_attrs[] = {
1294 &dev_attr_driver_version, 1399 &dev_attr_driver_version,
@@ -1318,6 +1423,9 @@ struct device_attribute *qla2x00_host_attrs[] = {
1318 &dev_attr_fw_state, 1423 &dev_attr_fw_state,
1319 &dev_attr_optrom_gold_fw_version, 1424 &dev_attr_optrom_gold_fw_version,
1320 &dev_attr_thermal_temp, 1425 &dev_attr_thermal_temp,
1426 &dev_attr_diag_requests,
1427 &dev_attr_diag_megabytes,
1428 &dev_attr_fw_dump_size,
1321 NULL, 1429 NULL,
1322}; 1430};
1323 1431
@@ -1704,7 +1812,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1704 1812
1705 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { 1813 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
1706 if (ha->fw_attributes & BIT_4) { 1814 if (ha->fw_attributes & BIT_4) {
1707 int prot = 0; 1815 int prot = 0, guard;
1708 vha->flags.difdix_supported = 1; 1816 vha->flags.difdix_supported = 1;
1709 ql_dbg(ql_dbg_user, vha, 0x7082, 1817 ql_dbg(ql_dbg_user, vha, 0x7082,
1710 "Registered for DIF/DIX type 1 and 3 protection.\n"); 1818 "Registered for DIF/DIX type 1 and 3 protection.\n");
@@ -1717,7 +1825,14 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1717 | SHOST_DIX_TYPE1_PROTECTION 1825 | SHOST_DIX_TYPE1_PROTECTION
1718 | SHOST_DIX_TYPE2_PROTECTION 1826 | SHOST_DIX_TYPE2_PROTECTION
1719 | SHOST_DIX_TYPE3_PROTECTION); 1827 | SHOST_DIX_TYPE3_PROTECTION);
1720 scsi_host_set_guard(vha->host, SHOST_DIX_GUARD_CRC); 1828
1829 guard = SHOST_DIX_GUARD_CRC;
1830
1831 if (IS_PI_IPGUARD_CAPABLE(ha) &&
1832 (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
1833 guard |= SHOST_DIX_GUARD_IP;
1834
1835 scsi_host_set_guard(vha->host, guard);
1721 } else 1836 } else
1722 vha->flags.difdix_supported = 0; 1837 vha->flags.difdix_supported = 0;
1723 } 1838 }
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index c68883806c54..2f9bddd3c616 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -530,13 +530,13 @@ done_unmap_sg:
530done: 530done:
531 return rval; 531 return rval;
532} 532}
533 533/*
534/* Set the port configuration to enable the 534 * Set the port configuration to enable the internal or external loopback
535 * internal loopback on ISP81XX 535 * depending on the loopback mode.
536 */ 536 */
537static inline int 537static inline int
538qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config, 538qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
539 uint16_t *new_config) 539 uint16_t *new_config, uint16_t mode)
540{ 540{
541 int ret = 0; 541 int ret = 0;
542 int rval = 0; 542 int rval = 0;
@@ -545,8 +545,14 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
545 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 545 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
546 goto done_set_internal; 546 goto done_set_internal;
547 547
548 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1); 548 if (mode == INTERNAL_LOOPBACK)
549 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ; 549 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
550 else if (mode == EXTERNAL_LOOPBACK)
551 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
552 ql_dbg(ql_dbg_user, vha, 0x70be,
553 "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
554
555 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
550 556
551 ha->notify_dcbx_comp = 1; 557 ha->notify_dcbx_comp = 1;
552 ret = qla81xx_set_port_config(vha, new_config); 558 ret = qla81xx_set_port_config(vha, new_config);
@@ -562,9 +568,17 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
562 if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) { 568 if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
563 ql_dbg(ql_dbg_user, vha, 0x7022, 569 ql_dbg(ql_dbg_user, vha, 0x7022,
564 "State change notification not received.\n"); 570 "State change notification not received.\n");
565 } else 571 rval = -EINVAL;
566 ql_dbg(ql_dbg_user, vha, 0x7023, 572 } else {
567 "State change received.\n"); 573 if (ha->flags.idc_compl_status) {
574 ql_dbg(ql_dbg_user, vha, 0x70c3,
575 "Bad status in IDC Completion AEN\n");
576 rval = -EINVAL;
577 ha->flags.idc_compl_status = 0;
578 } else
579 ql_dbg(ql_dbg_user, vha, 0x7023,
580 "State change received.\n");
581 }
568 582
569 ha->notify_dcbx_comp = 0; 583 ha->notify_dcbx_comp = 0;
570 584
@@ -572,11 +586,9 @@ done_set_internal:
572 return rval; 586 return rval;
573} 587}
574 588
575/* Set the port configuration to disable the 589/* Disable loopback mode */
576 * internal loopback on ISP81XX
577 */
578static inline int 590static inline int
579qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config, 591qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
580 int wait) 592 int wait)
581{ 593{
582 int ret = 0; 594 int ret = 0;
@@ -589,8 +601,12 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
589 601
590 memset(new_config, 0 , sizeof(new_config)); 602 memset(new_config, 0 , sizeof(new_config));
591 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == 603 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
592 ENABLE_INTERNAL_LOOPBACK) { 604 ENABLE_INTERNAL_LOOPBACK ||
605 (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
606 ENABLE_EXTERNAL_LOOPBACK) {
593 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK; 607 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
608 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
609 (new_config[0] & INTERNAL_LOOPBACK_MASK));
594 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ; 610 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
595 611
596 ha->notify_dcbx_comp = wait; 612 ha->notify_dcbx_comp = wait;
@@ -707,7 +723,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
707 723
708 elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 724 elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
709 725
710 if ((ha->current_topology == ISP_CFG_F || 726 if (atomic_read(&vha->loop_state) == LOOP_READY &&
727 (ha->current_topology == ISP_CFG_F ||
711 ((IS_QLA81XX(ha) || IS_QLA8031(ha)) && 728 ((IS_QLA81XX(ha) || IS_QLA8031(ha)) &&
712 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE 729 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
713 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) && 730 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
@@ -729,30 +746,24 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
729 goto done_free_dma_req; 746 goto done_free_dma_req;
730 } 747 }
731 748
732 if (elreq.options != EXTERNAL_LOOPBACK) { 749 ql_dbg(ql_dbg_user, vha, 0x70c0,
733 ql_dbg(ql_dbg_user, vha, 0x7020, 750 "elreq.options=%04x\n", elreq.options);
734 "Internal: current port config = %x\n", 751
735 config[0]); 752 if (elreq.options == EXTERNAL_LOOPBACK)
736 if (qla81xx_set_internal_loopback(vha, config, 753 if (IS_QLA8031(ha))
737 new_config)) { 754 rval = qla81xx_set_loopback_mode(vha,
738 ql_log(ql_log_warn, vha, 0x7024, 755 config, new_config, elreq.options);
739 "Internal loopback failed.\n"); 756 else
740 bsg_job->reply->result = 757 rval = qla81xx_reset_loopback_mode(vha,
741 (DID_ERROR << 16); 758 config, 1);
742 rval = -EPERM; 759 else
743 goto done_free_dma_req; 760 rval = qla81xx_set_loopback_mode(vha, config,
744 } 761 new_config, elreq.options);
745 } else { 762
746 /* For external loopback to work 763 if (rval) {
747 * ensure internal loopback is disabled 764 bsg_job->reply->result = (DID_ERROR << 16);
748 */ 765 rval = -EPERM;
749 if (qla81xx_reset_internal_loopback(vha, 766 goto done_free_dma_req;
750 config, 1)) {
751 bsg_job->reply->result =
752 (DID_ERROR << 16);
753 rval = -EPERM;
754 goto done_free_dma_req;
755 }
756 } 767 }
757 768
758 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 769 type = "FC_BSG_HST_VENDOR_LOOPBACK";
@@ -766,7 +777,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
766 /* Revert back to original port config 777 /* Revert back to original port config
767 * Also clear internal loopback 778 * Also clear internal loopback
768 */ 779 */
769 qla81xx_reset_internal_loopback(vha, 780 qla81xx_reset_loopback_mode(vha,
770 new_config, 0); 781 new_config, 0);
771 } 782 }
772 783
@@ -1364,7 +1375,7 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
1364 struct qla_hw_data *ha = vha->hw; 1375 struct qla_hw_data *ha = vha->hw;
1365 int rval = 0; 1376 int rval = 0;
1366 1377
1367 if (ha->flags.isp82xx_reset_hdlr_active) 1378 if (ha->flags.nic_core_reset_hdlr_active)
1368 return -EBUSY; 1379 return -EBUSY;
1369 1380
1370 rval = qla2x00_optrom_setup(bsg_job, vha, 0); 1381 rval = qla2x00_optrom_setup(bsg_job, vha, 0);
@@ -1560,6 +1571,276 @@ done:
1560} 1571}
1561 1572
1562static int 1573static int
1574qla2x00_write_i2c(struct fc_bsg_job *bsg_job)
1575{
1576 struct Scsi_Host *host = bsg_job->shost;
1577 scsi_qla_host_t *vha = shost_priv(host);
1578 struct qla_hw_data *ha = vha->hw;
1579 int rval = 0;
1580 uint8_t bsg[DMA_POOL_SIZE];
1581 struct qla_i2c_access *i2c = (void *)bsg;
1582 dma_addr_t sfp_dma;
1583 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1584 if (!sfp) {
1585 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1586 EXT_STATUS_NO_MEMORY;
1587 goto done;
1588 }
1589
1590 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1591 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1592
1593 memcpy(sfp, i2c->buffer, i2c->length);
1594 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1595 i2c->device, i2c->offset, i2c->length, i2c->option);
1596
1597 if (rval) {
1598 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1599 EXT_STATUS_MAILBOX;
1600 goto dealloc;
1601 }
1602
1603 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1604
1605dealloc:
1606 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1607
1608done:
1609 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1610 bsg_job->reply->result = DID_OK << 16;
1611 bsg_job->job_done(bsg_job);
1612
1613 return 0;
1614}
1615
1616static int
1617qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
1618{
1619 struct Scsi_Host *host = bsg_job->shost;
1620 scsi_qla_host_t *vha = shost_priv(host);
1621 struct qla_hw_data *ha = vha->hw;
1622 int rval = 0;
1623 uint8_t bsg[DMA_POOL_SIZE];
1624 struct qla_i2c_access *i2c = (void *)bsg;
1625 dma_addr_t sfp_dma;
1626 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1627 if (!sfp) {
1628 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1629 EXT_STATUS_NO_MEMORY;
1630 goto done;
1631 }
1632
1633 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1634 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1635
1636 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1637 i2c->device, i2c->offset, i2c->length, i2c->option);
1638
1639 if (rval) {
1640 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1641 EXT_STATUS_MAILBOX;
1642 goto dealloc;
1643 }
1644
1645 memcpy(i2c->buffer, sfp, i2c->length);
1646 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1647 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
1648
1649 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1650
1651dealloc:
1652 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1653
1654done:
1655 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1656 bsg_job->reply->reply_payload_rcv_len = sizeof(*i2c);
1657 bsg_job->reply->result = DID_OK << 16;
1658 bsg_job->job_done(bsg_job);
1659
1660 return 0;
1661}
1662
1663static int
1664qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job)
1665{
1666 struct Scsi_Host *host = bsg_job->shost;
1667 scsi_qla_host_t *vha = shost_priv(host);
1668 struct qla_hw_data *ha = vha->hw;
1669 uint16_t thread_id;
1670 uint32_t rval = EXT_STATUS_OK;
1671 uint16_t req_sg_cnt = 0;
1672 uint16_t rsp_sg_cnt = 0;
1673 uint16_t nextlid = 0;
1674 uint32_t tot_dsds;
1675 srb_t *sp = NULL;
1676 uint32_t req_data_len = 0;
1677 uint32_t rsp_data_len = 0;
1678
1679 /* Check the type of the adapter */
1680 if (!IS_BIDI_CAPABLE(ha)) {
1681 ql_log(ql_log_warn, vha, 0x70a0,
1682 "This adapter is not supported\n");
1683 rval = EXT_STATUS_NOT_SUPPORTED;
1684 goto done;
1685 }
1686
1687 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1688 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1689 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1690 rval = EXT_STATUS_BUSY;
1691 goto done;
1692 }
1693
1694 /* Check if host is online */
1695 if (!vha->flags.online) {
1696 ql_log(ql_log_warn, vha, 0x70a1,
1697 "Host is not online\n");
1698 rval = EXT_STATUS_DEVICE_OFFLINE;
1699 goto done;
1700 }
1701
1702 /* Check if cable is plugged in or not */
1703 if (vha->device_flags & DFLG_NO_CABLE) {
1704 ql_log(ql_log_warn, vha, 0x70a2,
1705 "Cable is unplugged...\n");
1706 rval = EXT_STATUS_INVALID_CFG;
1707 goto done;
1708 }
1709
1710 /* Check if the switch is connected or not */
1711 if (ha->current_topology != ISP_CFG_F) {
1712 ql_log(ql_log_warn, vha, 0x70a3,
1713 "Host is not connected to the switch\n");
1714 rval = EXT_STATUS_INVALID_CFG;
1715 goto done;
1716 }
1717
1718 /* Check if operating mode is P2P */
1719 if (ha->operating_mode != P2P) {
1720 ql_log(ql_log_warn, vha, 0x70a4,
1721 "Host is operating mode is not P2p\n");
1722 rval = EXT_STATUS_INVALID_CFG;
1723 goto done;
1724 }
1725
1726 thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1727
1728 mutex_lock(&ha->selflogin_lock);
1729 if (vha->self_login_loop_id == 0) {
1730 /* Initialize all required fields of fcport */
1731 vha->bidir_fcport.vha = vha;
1732 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
1733 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
1734 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
1735 vha->bidir_fcport.loop_id = vha->loop_id;
1736
1737 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
1738 ql_log(ql_log_warn, vha, 0x70a7,
1739 "Failed to login port %06X for bidirectional IOCB\n",
1740 vha->bidir_fcport.d_id.b24);
1741 mutex_unlock(&ha->selflogin_lock);
1742 rval = EXT_STATUS_MAILBOX;
1743 goto done;
1744 }
1745 vha->self_login_loop_id = nextlid - 1;
1746
1747 }
1748 /* Assign the self login loop id to fcport */
1749 mutex_unlock(&ha->selflogin_lock);
1750
1751 vha->bidir_fcport.loop_id = vha->self_login_loop_id;
1752
1753 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1754 bsg_job->request_payload.sg_list,
1755 bsg_job->request_payload.sg_cnt,
1756 DMA_TO_DEVICE);
1757
1758 if (!req_sg_cnt) {
1759 rval = EXT_STATUS_NO_MEMORY;
1760 goto done;
1761 }
1762
1763 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1764 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
1765 DMA_FROM_DEVICE);
1766
1767 if (!rsp_sg_cnt) {
1768 rval = EXT_STATUS_NO_MEMORY;
1769 goto done_unmap_req_sg;
1770 }
1771
1772 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
1773 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
1774 ql_dbg(ql_dbg_user, vha, 0x70a9,
1775 "Dma mapping resulted in different sg counts "
1776 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1777 "%x dma_reply_sg_cnt: %x]\n",
1778 bsg_job->request_payload.sg_cnt, req_sg_cnt,
1779 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1780 rval = EXT_STATUS_NO_MEMORY;
1781 goto done_unmap_sg;
1782 }
1783
1784 if (req_data_len != rsp_data_len) {
1785 rval = EXT_STATUS_BUSY;
1786 ql_log(ql_log_warn, vha, 0x70aa,
1787 "req_data_len != rsp_data_len\n");
1788 goto done_unmap_sg;
1789 }
1790
1791 req_data_len = bsg_job->request_payload.payload_len;
1792 rsp_data_len = bsg_job->reply_payload.payload_len;
1793
1794
1795 /* Alloc SRB structure */
1796 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
1797 if (!sp) {
1798 ql_dbg(ql_dbg_user, vha, 0x70ac,
1799 "Alloc SRB structure failed\n");
1800 rval = EXT_STATUS_NO_MEMORY;
1801 goto done_unmap_sg;
1802 }
1803
1804 /*Populate srb->ctx with bidir ctx*/
1805 sp->u.bsg_job = bsg_job;
1806 sp->free = qla2x00_bsg_sp_free;
1807 sp->type = SRB_BIDI_CMD;
1808 sp->done = qla2x00_bsg_job_done;
1809
1810 /* Add the read and write sg count */
1811 tot_dsds = rsp_sg_cnt + req_sg_cnt;
1812
1813 rval = qla2x00_start_bidir(sp, vha, tot_dsds);
1814 if (rval != EXT_STATUS_OK)
1815 goto done_free_srb;
1816 /* the bsg request will be completed in the interrupt handler */
1817 return rval;
1818
1819done_free_srb:
1820 mempool_free(sp, ha->srb_mempool);
1821done_unmap_sg:
1822 dma_unmap_sg(&ha->pdev->dev,
1823 bsg_job->reply_payload.sg_list,
1824 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1825done_unmap_req_sg:
1826 dma_unmap_sg(&ha->pdev->dev,
1827 bsg_job->request_payload.sg_list,
1828 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1829done:
1830
1831 /* Return an error vendor specific response
1832 * and complete the bsg request
1833 */
1834 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1835 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1836 bsg_job->reply->reply_payload_rcv_len = 0;
1837 bsg_job->reply->result = (DID_OK) << 16;
1838 bsg_job->job_done(bsg_job);
1839 /* Always retrun success, vendor rsp carries correct status */
1840 return 0;
1841}
1842
1843static int
1563qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job) 1844qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
1564{ 1845{
1565 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) { 1846 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
@@ -1596,6 +1877,15 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
1596 case QL_VND_WRITE_FRU_STATUS: 1877 case QL_VND_WRITE_FRU_STATUS:
1597 return qla2x00_write_fru_status(bsg_job); 1878 return qla2x00_write_fru_status(bsg_job);
1598 1879
1880 case QL_VND_WRITE_I2C:
1881 return qla2x00_write_i2c(bsg_job);
1882
1883 case QL_VND_READ_I2C:
1884 return qla2x00_read_i2c(bsg_job);
1885
1886 case QL_VND_DIAG_IO_CMD:
1887 return qla24xx_process_bidir_cmd(bsg_job);
1888
1599 default: 1889 default:
1600 bsg_job->reply->result = (DID_ERROR << 16); 1890 bsg_job->reply->result = (DID_ERROR << 16);
1601 bsg_job->job_done(bsg_job); 1891 bsg_job->job_done(bsg_job);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index 70caa63a8930..37b8b7ba7421 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -19,21 +19,41 @@
19#define QL_VND_SET_FRU_VERSION 0x0B 19#define QL_VND_SET_FRU_VERSION 0x0B
20#define QL_VND_READ_FRU_STATUS 0x0C 20#define QL_VND_READ_FRU_STATUS 0x0C
21#define QL_VND_WRITE_FRU_STATUS 0x0D 21#define QL_VND_WRITE_FRU_STATUS 0x0D
22#define QL_VND_DIAG_IO_CMD 0x0A
23#define QL_VND_WRITE_I2C 0x10
24#define QL_VND_READ_I2C 0x11
22 25
23/* BSG Vendor specific subcode returns */ 26/* BSG Vendor specific subcode returns */
24#define EXT_STATUS_OK 0 27#define EXT_STATUS_OK 0
25#define EXT_STATUS_ERR 1 28#define EXT_STATUS_ERR 1
29#define EXT_STATUS_BUSY 2
26#define EXT_STATUS_INVALID_PARAM 6 30#define EXT_STATUS_INVALID_PARAM 6
31#define EXT_STATUS_DATA_OVERRUN 7
32#define EXT_STATUS_DATA_UNDERRUN 8
27#define EXT_STATUS_MAILBOX 11 33#define EXT_STATUS_MAILBOX 11
28#define EXT_STATUS_NO_MEMORY 17 34#define EXT_STATUS_NO_MEMORY 17
35#define EXT_STATUS_DEVICE_OFFLINE 22
36
37/*
38 * To support bidirectional iocb
39 * BSG Vendor specific returns
40 */
41#define EXT_STATUS_NOT_SUPPORTED 27
42#define EXT_STATUS_INVALID_CFG 28
43#define EXT_STATUS_DMA_ERR 29
44#define EXT_STATUS_TIMEOUT 30
45#define EXT_STATUS_THREAD_FAILED 31
46#define EXT_STATUS_DATA_CMP_FAILED 32
29 47
30/* BSG definations for interpreting CommandSent field */ 48/* BSG definations for interpreting CommandSent field */
31#define INT_DEF_LB_LOOPBACK_CMD 0 49#define INT_DEF_LB_LOOPBACK_CMD 0
32#define INT_DEF_LB_ECHO_CMD 1 50#define INT_DEF_LB_ECHO_CMD 1
33 51
34/* Loopback related definations */ 52/* Loopback related definations */
53#define INTERNAL_LOOPBACK 0xF1
35#define EXTERNAL_LOOPBACK 0xF2 54#define EXTERNAL_LOOPBACK 0xF2
36#define ENABLE_INTERNAL_LOOPBACK 0x02 55#define ENABLE_INTERNAL_LOOPBACK 0x02
56#define ENABLE_EXTERNAL_LOOPBACK 0x04
37#define INTERNAL_LOOPBACK_MASK 0x000E 57#define INTERNAL_LOOPBACK_MASK 0x000E
38#define MAX_ELS_FRAME_PAYLOAD 252 58#define MAX_ELS_FRAME_PAYLOAD 252
39#define ELS_OPCODE_BYTE 0x10 59#define ELS_OPCODE_BYTE 0x10
@@ -183,4 +203,12 @@ struct qla_status_reg {
183 uint8_t reserved[7]; 203 uint8_t reserved[7];
184} __packed; 204} __packed;
185 205
206struct qla_i2c_access {
207 uint16_t device;
208 uint16_t offset;
209 uint16_t option;
210 uint16_t length;
211 uint8_t buffer[0x40];
212} __packed;
213
186#endif 214#endif
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index fdee5611f3e2..44efe3cc79e6 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -11,26 +11,31 @@
11 * ---------------------------------------------------------------------- 11 * ----------------------------------------------------------------------
12 * | Level | Last Value Used | Holes | 12 * | Level | Last Value Used | Holes |
13 * ---------------------------------------------------------------------- 13 * ----------------------------------------------------------------------
14 * | Module Init and Probe | 0x0122 | 0x4b,0xba,0xfa | 14 * | Module Init and Probe | 0x0124 | 0x4b,0xba,0xfa |
15 * | Mailbox commands | 0x1140 | 0x111a-0x111b | 15 * | Mailbox commands | 0x114f | 0x111a-0x111b |
16 * | | | 0x112c-0x112e | 16 * | | | 0x112c-0x112e |
17 * | | | 0x113a | 17 * | | | 0x113a |
18 * | Device Discovery | 0x2086 | 0x2020-0x2022 | 18 * | Device Discovery | 0x2087 | 0x2020-0x2022, |
19 * | Queue Command and IO tracing | 0x3030 | 0x3006,0x3008 | 19 * | | | 0x2016 |
20 * | Queue Command and IO tracing | 0x3030 | 0x3006-0x300b |
21 * | | | 0x3027-0x3028 |
20 * | | | 0x302d-0x302e | 22 * | | | 0x302d-0x302e |
21 * | DPC Thread | 0x401c | 0x4002,0x4013 | 23 * | DPC Thread | 0x401d | 0x4002,0x4013 |
22 * | Async Events | 0x505f | 0x502b-0x502f | 24 * | Async Events | 0x5071 | 0x502b-0x502f |
23 * | | | 0x5047,0x5052 | 25 * | | | 0x5047,0x5052 |
24 * | Timer Routines | 0x6011 | | 26 * | Timer Routines | 0x6011 | |
25 * | User Space Interactions | 0x709f | 0x7018,0x702e, | 27 * | User Space Interactions | 0x70c3 | 0x7018,0x702e, |
26 * | | | 0x7039,0x7045, | 28 * | | | 0x7039,0x7045, |
27 * | | | 0x7073-0x7075, | 29 * | | | 0x7073-0x7075, |
28 * | | | 0x708c | 30 * | | | 0x708c, |
31 * | | | 0x70a5,0x70a6, |
32 * | | | 0x70a8,0x70ab, |
33 * | | | 0x70ad-0x70ae |
29 * | Task Management | 0x803c | 0x8025-0x8026 | 34 * | Task Management | 0x803c | 0x8025-0x8026 |
30 * | | | 0x800b,0x8039 | 35 * | | | 0x800b,0x8039 |
31 * | AER/EEH | 0x9011 | | 36 * | AER/EEH | 0x9011 | |
32 * | Virtual Port | 0xa007 | | 37 * | Virtual Port | 0xa007 | |
33 * | ISP82XX Specific | 0xb054 | 0xb024 | 38 * | ISP82XX Specific | 0xb084 | 0xb002,0xb024 |
34 * | MultiQ | 0xc00c | | 39 * | MultiQ | 0xc00c | |
35 * | Misc | 0xd010 | | 40 * | Misc | 0xd010 | |
36 * | Target Mode | 0xe06f | | 41 * | Target Mode | 0xe06f | |
@@ -2357,7 +2362,7 @@ ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
2357 2362
2358/* 2363/*
2359 * This function is for formatting and logging debug information. 2364 * This function is for formatting and logging debug information.
2360 * It is to be used when vha is not available and pci is availble, 2365 * It is to be used when vha is not available and pci is available,
2361 * i.e., before host allocation. It formats the message and logs it 2366 * i.e., before host allocation. It formats the message and logs it
2362 * to the messages file. 2367 * to the messages file.
2363 * parameters: 2368 * parameters:
@@ -2452,7 +2457,7 @@ ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
2452 2457
2453/* 2458/*
2454 * This function is for formatting and logging log messages. 2459 * This function is for formatting and logging log messages.
2455 * It is to be used when vha is not available and pci is availble, 2460 * It is to be used when vha is not available and pci is available,
2456 * i.e., before host allocation. It formats the message and logs 2461 * i.e., before host allocation. It formats the message and logs
2457 * it to the messages file. All the messages are logged irrespective 2462 * it to the messages file. All the messages are logged irrespective
2458 * of the value of ql2xextended_error_logging. 2463 * of the value of ql2xextended_error_logging.
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index f278df8cce0f..8f911c0b1e74 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 39007f53aec0..a9725bf5527b 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -115,6 +115,82 @@
115#define WRT_REG_DWORD(addr, data) writel(data,addr) 115#define WRT_REG_DWORD(addr, data) writel(data,addr)
116 116
117/* 117/*
118 * ISP83XX specific remote register addresses
119 */
120#define QLA83XX_LED_PORT0 0x00201320
121#define QLA83XX_LED_PORT1 0x00201328
122#define QLA83XX_IDC_DEV_STATE 0x22102384
123#define QLA83XX_IDC_MAJOR_VERSION 0x22102380
124#define QLA83XX_IDC_MINOR_VERSION 0x22102398
125#define QLA83XX_IDC_DRV_PRESENCE 0x22102388
126#define QLA83XX_IDC_DRIVER_ACK 0x2210238c
127#define QLA83XX_IDC_CONTROL 0x22102390
128#define QLA83XX_IDC_AUDIT 0x22102394
129#define QLA83XX_IDC_LOCK_RECOVERY 0x2210239c
130#define QLA83XX_DRIVER_LOCKID 0x22102104
131#define QLA83XX_DRIVER_LOCK 0x8111c028
132#define QLA83XX_DRIVER_UNLOCK 0x8111c02c
133#define QLA83XX_FLASH_LOCKID 0x22102100
134#define QLA83XX_FLASH_LOCK 0x8111c010
135#define QLA83XX_FLASH_UNLOCK 0x8111c014
136#define QLA83XX_DEV_PARTINFO1 0x221023e0
137#define QLA83XX_DEV_PARTINFO2 0x221023e4
138#define QLA83XX_FW_HEARTBEAT 0x221020b0
139#define QLA83XX_PEG_HALT_STATUS1 0x221020a8
140#define QLA83XX_PEG_HALT_STATUS2 0x221020ac
141
142/* 83XX: Macros defining 8200 AEN Reason codes */
143#define IDC_DEVICE_STATE_CHANGE BIT_0
144#define IDC_PEG_HALT_STATUS_CHANGE BIT_1
145#define IDC_NIC_FW_REPORTED_FAILURE BIT_2
146#define IDC_HEARTBEAT_FAILURE BIT_3
147
148/* 83XX: Macros defining 8200 AEN Error-levels */
149#define ERR_LEVEL_NON_FATAL 0x1
150#define ERR_LEVEL_RECOVERABLE_FATAL 0x2
151#define ERR_LEVEL_UNRECOVERABLE_FATAL 0x4
152
153/* 83XX: Macros for IDC Version */
154#define QLA83XX_SUPP_IDC_MAJOR_VERSION 0x01
155#define QLA83XX_SUPP_IDC_MINOR_VERSION 0x0
156
157/* 83XX: Macros for scheduling dpc tasks */
158#define QLA83XX_NIC_CORE_RESET 0x1
159#define QLA83XX_IDC_STATE_HANDLER 0x2
160#define QLA83XX_NIC_CORE_UNRECOVERABLE 0x3
161
162/* 83XX: Macros for defining IDC-Control bits */
163#define QLA83XX_IDC_RESET_DISABLED BIT_0
164#define QLA83XX_IDC_GRACEFUL_RESET BIT_1
165
166/* 83XX: Macros for different timeouts */
167#define QLA83XX_IDC_INITIALIZATION_TIMEOUT 30
168#define QLA83XX_IDC_RESET_ACK_TIMEOUT 10
169#define QLA83XX_MAX_LOCK_RECOVERY_WAIT (2 * HZ)
170
171/* 83XX: Macros for defining class in DEV-Partition Info register */
172#define QLA83XX_CLASS_TYPE_NONE 0x0
173#define QLA83XX_CLASS_TYPE_NIC 0x1
174#define QLA83XX_CLASS_TYPE_FCOE 0x2
175#define QLA83XX_CLASS_TYPE_ISCSI 0x3
176
177/* 83XX: Macros for IDC Lock-Recovery stages */
178#define IDC_LOCK_RECOVERY_STAGE1 0x1 /* Stage1: Intent for
179 * lock-recovery
180 */
181#define IDC_LOCK_RECOVERY_STAGE2 0x2 /* Stage2: Perform lock-recovery */
182
183/* 83XX: Macros for IDC Audit type */
184#define IDC_AUDIT_TIMESTAMP 0x0 /* IDC-AUDIT: Record timestamp of
185 * dev-state change to NEED-RESET
186 * or NEED-QUIESCENT
187 */
188#define IDC_AUDIT_COMPLETION 0x1 /* IDC-AUDIT: Record duration of
189 * reset-recovery completion is
190 * second
191 */
192
193/*
118 * The ISP2312 v2 chip cannot access the FLASH/GPIO registers via MMIO in an 194 * The ISP2312 v2 chip cannot access the FLASH/GPIO registers via MMIO in an
119 * 133Mhz slot. 195 * 133Mhz slot.
120 */ 196 */
@@ -129,6 +205,7 @@
129#define MAX_FIBRE_DEVICES_2400 2048 205#define MAX_FIBRE_DEVICES_2400 2048
130#define MAX_FIBRE_DEVICES_LOOP 128 206#define MAX_FIBRE_DEVICES_LOOP 128
131#define MAX_FIBRE_DEVICES_MAX MAX_FIBRE_DEVICES_2400 207#define MAX_FIBRE_DEVICES_MAX MAX_FIBRE_DEVICES_2400
208#define LOOPID_MAP_SIZE (ha->max_fibre_devices)
132#define MAX_FIBRE_LUNS 0xFFFF 209#define MAX_FIBRE_LUNS 0xFFFF
133#define MAX_HOST_COUNT 16 210#define MAX_HOST_COUNT 16
134 211
@@ -259,6 +336,7 @@ struct srb_iocb {
259#define SRB_ADISC_CMD 6 336#define SRB_ADISC_CMD 6
260#define SRB_TM_CMD 7 337#define SRB_TM_CMD 7
261#define SRB_SCSI_CMD 8 338#define SRB_SCSI_CMD 8
339#define SRB_BIDI_CMD 9
262 340
263typedef struct srb { 341typedef struct srb {
264 atomic_t ref_count; 342 atomic_t ref_count;
@@ -594,6 +672,20 @@ typedef struct {
594#define MBA_DISCARD_RND_FRAME 0x8048 /* discard RND frame due to error. */ 672#define MBA_DISCARD_RND_FRAME 0x8048 /* discard RND frame due to error. */
595#define MBA_REJECTED_FCP_CMD 0x8049 /* rejected FCP_CMD. */ 673#define MBA_REJECTED_FCP_CMD 0x8049 /* rejected FCP_CMD. */
596 674
675/* 83XX FCoE specific */
676#define MBA_IDC_AEN 0x8200 /* FCoE: NIC Core state change AEN */
677
678/* Interrupt type codes */
679#define INTR_ROM_MB_SUCCESS 0x1
680#define INTR_ROM_MB_FAILED 0x2
681#define INTR_MB_SUCCESS 0x10
682#define INTR_MB_FAILED 0x11
683#define INTR_ASYNC_EVENT 0x12
684#define INTR_RSP_QUE_UPDATE 0x13
685#define INTR_RSP_QUE_UPDATE_83XX 0x14
686#define INTR_ATIO_QUE_UPDATE 0x1C
687#define INTR_ATIO_RSP_QUE_UPDATE 0x1D
688
597/* ISP mailbox loopback echo diagnostic error code */ 689/* ISP mailbox loopback echo diagnostic error code */
598#define MBS_LB_RESET 0x17 690#define MBS_LB_RESET 0x17
599/* 691/*
@@ -718,6 +810,7 @@ typedef struct {
718#define MBC_SEND_RNFT_ELS 0x5e /* Send RNFT ELS request */ 810#define MBC_SEND_RNFT_ELS 0x5e /* Send RNFT ELS request */
719#define MBC_GET_LINK_PRIV_STATS 0x6d /* Get link & private data. */ 811#define MBC_GET_LINK_PRIV_STATS 0x6d /* Get link & private data. */
720#define MBC_SET_VENDOR_ID 0x76 /* Set Vendor ID. */ 812#define MBC_SET_VENDOR_ID 0x76 /* Set Vendor ID. */
813#define MBC_PORT_RESET 0x120 /* Port Reset */
721#define MBC_SET_PORT_CONFIG 0x122 /* Set port configuration */ 814#define MBC_SET_PORT_CONFIG 0x122 /* Set port configuration */
722#define MBC_GET_PORT_CONFIG 0x123 /* Get port configuration */ 815#define MBC_GET_PORT_CONFIG 0x123 /* Get port configuration */
723 816
@@ -1375,9 +1468,10 @@ typedef struct {
1375} cont_a64_entry_t; 1468} cont_a64_entry_t;
1376 1469
1377#define PO_MODE_DIF_INSERT 0 1470#define PO_MODE_DIF_INSERT 0
1378#define PO_MODE_DIF_REMOVE BIT_0 1471#define PO_MODE_DIF_REMOVE 1
1379#define PO_MODE_DIF_PASS BIT_1 1472#define PO_MODE_DIF_PASS 2
1380#define PO_MODE_DIF_REPLACE (BIT_0 + BIT_1) 1473#define PO_MODE_DIF_REPLACE 3
1474#define PO_MODE_DIF_TCP_CKSUM 6
1381#define PO_ENABLE_DIF_BUNDLING BIT_8 1475#define PO_ENABLE_DIF_BUNDLING BIT_8
1382#define PO_ENABLE_INCR_GUARD_SEED BIT_3 1476#define PO_ENABLE_INCR_GUARD_SEED BIT_3
1383#define PO_DISABLE_INCR_REF_TAG BIT_5 1477#define PO_DISABLE_INCR_REF_TAG BIT_5
@@ -1509,6 +1603,13 @@ typedef struct {
1509#define CS_RETRY 0x82 /* Driver defined */ 1603#define CS_RETRY 0x82 /* Driver defined */
1510#define CS_LOOP_DOWN_ABORT 0x83 /* Driver defined */ 1604#define CS_LOOP_DOWN_ABORT 0x83 /* Driver defined */
1511 1605
1606#define CS_BIDIR_RD_OVERRUN 0x700
1607#define CS_BIDIR_RD_WR_OVERRUN 0x707
1608#define CS_BIDIR_RD_OVERRUN_WR_UNDERRUN 0x715
1609#define CS_BIDIR_RD_UNDERRUN 0x1500
1610#define CS_BIDIR_RD_UNDERRUN_WR_OVERRUN 0x1507
1611#define CS_BIDIR_RD_WR_UNDERRUN 0x1515
1612#define CS_BIDIR_DMA 0x200
1512/* 1613/*
1513 * Status entry status flags 1614 * Status entry status flags
1514 */ 1615 */
@@ -2373,6 +2474,11 @@ struct qla_statistics {
2373 uint64_t output_bytes; 2474 uint64_t output_bytes;
2374}; 2475};
2375 2476
2477struct bidi_statistics {
2478 unsigned long long io_count;
2479 unsigned long long transfer_bytes;
2480};
2481
2376/* Multi queue support */ 2482/* Multi queue support */
2377#define MBC_INITIALIZE_MULTIQ 0x1f 2483#define MBC_INITIALIZE_MULTIQ 0x1f
2378#define QLA_QUE_PAGE 0X1000 2484#define QLA_QUE_PAGE 0X1000
@@ -2509,14 +2615,16 @@ struct qla_hw_data {
2509 uint32_t disable_msix_handshake :1; 2615 uint32_t disable_msix_handshake :1;
2510 uint32_t fcp_prio_enabled :1; 2616 uint32_t fcp_prio_enabled :1;
2511 uint32_t isp82xx_fw_hung:1; 2617 uint32_t isp82xx_fw_hung:1;
2618 uint32_t nic_core_hung:1;
2512 2619
2513 uint32_t quiesce_owner:1; 2620 uint32_t quiesce_owner:1;
2514 uint32_t thermal_supported:1; 2621 uint32_t thermal_supported:1;
2515 uint32_t isp82xx_reset_hdlr_active:1; 2622 uint32_t nic_core_reset_hdlr_active:1;
2516 uint32_t isp82xx_reset_owner:1; 2623 uint32_t nic_core_reset_owner:1;
2517 uint32_t isp82xx_no_md_cap:1; 2624 uint32_t isp82xx_no_md_cap:1;
2518 uint32_t host_shutting_down:1; 2625 uint32_t host_shutting_down:1;
2519 /* 30 bits */ 2626 uint32_t idc_compl_status:1;
2627 /* 32 bits */
2520 } flags; 2628 } flags;
2521 2629
2522 /* This spinlock is used to protect "io transactions", you must 2630 /* This spinlock is used to protect "io transactions", you must
@@ -2670,6 +2778,16 @@ struct qla_hw_data {
2670#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS) 2778#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS)
2671#define IS_CT6_SUPPORTED(ha) ((ha)->device_type & DT_CT6_SUPPORTED) 2779#define IS_CT6_SUPPORTED(ha) ((ha)->device_type & DT_CT6_SUPPORTED)
2672#define IS_MQUE_CAPABLE(ha) ((ha)->mqenable || IS_QLA83XX(ha)) 2780#define IS_MQUE_CAPABLE(ha) ((ha)->mqenable || IS_QLA83XX(ha))
2781#define IS_BIDI_CAPABLE(ha) ((IS_QLA25XX(ha) || IS_QLA2031(ha)))
2782/* Bit 21 of fw_attributes decides the MCTP capabilities */
2783#define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \
2784 ((ha)->fw_attributes_ext[0] & BIT_0))
2785#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha))
2786#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha))
2787#define IS_PI_DIFB_DIX0_CAPABLE(ha) (0)
2788#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha))
2789#define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \
2790 (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))
2673 2791
2674 /* HBA serial number */ 2792 /* HBA serial number */
2675 uint8_t serial0; 2793 uint8_t serial0;
@@ -2753,6 +2871,7 @@ struct qla_hw_data {
2753 struct completion mbx_intr_comp; /* Used for completion notification */ 2871 struct completion mbx_intr_comp; /* Used for completion notification */
2754 struct completion dcbx_comp; /* For set port config notification */ 2872 struct completion dcbx_comp; /* For set port config notification */
2755 int notify_dcbx_comp; 2873 int notify_dcbx_comp;
2874 struct mutex selflogin_lock;
2756 2875
2757 /* Basic firmware related information. */ 2876 /* Basic firmware related information. */
2758 uint16_t fw_major_version; 2877 uint16_t fw_major_version;
@@ -2784,7 +2903,12 @@ struct qla_hw_data {
2784 int fw_dump_reading; 2903 int fw_dump_reading;
2785 dma_addr_t eft_dma; 2904 dma_addr_t eft_dma;
2786 void *eft; 2905 void *eft;
2787 2906/* Current size of mctp dump is 0x086064 bytes */
2907#define MCTP_DUMP_SIZE 0x086064
2908 dma_addr_t mctp_dump_dma;
2909 void *mctp_dump;
2910 int mctp_dumped;
2911 int mctp_dump_reading;
2788 uint32_t chain_offset; 2912 uint32_t chain_offset;
2789 struct dentry *dfs_dir; 2913 struct dentry *dfs_dir;
2790 struct dentry *dfs_fce; 2914 struct dentry *dfs_fce;
@@ -2896,8 +3020,8 @@ struct qla_hw_data {
2896 unsigned long mn_win_crb; 3020 unsigned long mn_win_crb;
2897 unsigned long ms_win_crb; 3021 unsigned long ms_win_crb;
2898 int qdr_sn_window; 3022 int qdr_sn_window;
2899 uint32_t nx_dev_init_timeout; 3023 uint32_t fcoe_dev_init_timeout;
2900 uint32_t nx_reset_timeout; 3024 uint32_t fcoe_reset_timeout;
2901 rwlock_t hw_lock; 3025 rwlock_t hw_lock;
2902 uint16_t portnum; /* port number */ 3026 uint16_t portnum; /* port number */
2903 int link_width; 3027 int link_width;
@@ -2918,6 +3042,20 @@ struct qla_hw_data {
2918 void *md_dump; 3042 void *md_dump;
2919 uint32_t md_dump_size; 3043 uint32_t md_dump_size;
2920 3044
3045 void *loop_id_map;
3046
3047 /* QLA83XX IDC specific fields */
3048 uint32_t idc_audit_ts;
3049
3050 /* DPC low-priority workqueue */
3051 struct workqueue_struct *dpc_lp_wq;
3052 struct work_struct idc_aen;
3053 /* DPC high-priority workqueue */
3054 struct workqueue_struct *dpc_hp_wq;
3055 struct work_struct nic_core_reset;
3056 struct work_struct idc_state_handler;
3057 struct work_struct nic_core_unrecoverable;
3058
2921 struct qlt_hw_data tgt; 3059 struct qlt_hw_data tgt;
2922}; 3060};
2923 3061
@@ -2985,6 +3123,13 @@ typedef struct scsi_qla_host {
2985 3123
2986 /* ISP configuration data. */ 3124 /* ISP configuration data. */
2987 uint16_t loop_id; /* Host adapter loop id */ 3125 uint16_t loop_id; /* Host adapter loop id */
3126 uint16_t self_login_loop_id; /* host adapter loop id
3127 * get it on self login
3128 */
3129 fc_port_t bidir_fcport; /* fcport used for bidir cmnds
3130 * no need of allocating it for
3131 * each command
3132 */
2988 3133
2989 port_id_t d_id; /* Host adapter port id */ 3134 port_id_t d_id; /* Host adapter port id */
2990 uint8_t marker_needed; 3135 uint8_t marker_needed;
@@ -3038,6 +3183,7 @@ typedef struct scsi_qla_host {
3038 int seconds_since_last_heartbeat; 3183 int seconds_since_last_heartbeat;
3039 struct fc_host_statistics fc_host_stat; 3184 struct fc_host_statistics fc_host_stat;
3040 struct qla_statistics qla_stats; 3185 struct qla_statistics qla_stats;
3186 struct bidi_statistics bidi_stats;
3041 3187
3042 atomic_t vref_count; 3188 atomic_t vref_count;
3043} scsi_qla_host_t; 3189} scsi_qla_host_t;
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 499c74e39ee5..706c4f7bc7c9 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 6d7d7758c797..59524aa0ab32 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -381,6 +381,44 @@ struct init_cb_24xx {
381/* 381/*
382 * ISP queue - command entry structure definition. 382 * ISP queue - command entry structure definition.
383 */ 383 */
384#define COMMAND_BIDIRECTIONAL 0x75
385struct cmd_bidir {
386 uint8_t entry_type; /* Entry type. */
387 uint8_t entry_count; /* Entry count. */
388 uint8_t sys_define; /* System defined */
389 uint8_t entry_status; /* Entry status. */
390
391 uint32_t handle; /* System handle. */
392
393 uint16_t nport_handle; /* N_PORT hanlde. */
394
395 uint16_t timeout; /* Commnad timeout. */
396
397 uint16_t wr_dseg_count; /* Write Data segment count. */
398 uint16_t rd_dseg_count; /* Read Data segment count. */
399
400 struct scsi_lun lun; /* FCP LUN (BE). */
401
402 uint16_t control_flags; /* Control flags. */
403#define BD_WRAP_BACK BIT_3
404#define BD_READ_DATA BIT_1
405#define BD_WRITE_DATA BIT_0
406
407 uint16_t fcp_cmnd_dseg_len; /* Data segment length. */
408 uint32_t fcp_cmnd_dseg_address[2]; /* Data segment address. */
409
410 uint16_t reserved[2]; /* Reserved */
411
412 uint32_t rd_byte_count; /* Total Byte count Read. */
413 uint32_t wr_byte_count; /* Total Byte count write. */
414
415 uint8_t port_id[3]; /* PortID of destination port.*/
416 uint8_t vp_index;
417
418 uint32_t fcp_data_dseg_address[2]; /* Data segment address. */
419 uint16_t fcp_data_dseg_len; /* Data segment length. */
420};
421
384#define COMMAND_TYPE_6 0x48 /* Command Type 6 entry */ 422#define COMMAND_TYPE_6 0x48 /* Command Type 6 entry */
385struct cmd_type_6 { 423struct cmd_type_6 {
386 uint8_t entry_type; /* Entry type. */ 424 uint8_t entry_type; /* Entry type. */
@@ -1130,7 +1168,7 @@ struct mid_db_entry_24xx {
1130/* 1168/*
1131 * Virtual Port Control IOCB 1169 * Virtual Port Control IOCB
1132 */ 1170 */
1133#define VP_CTRL_IOCB_TYPE 0x30 /* Vitual Port Control entry. */ 1171#define VP_CTRL_IOCB_TYPE 0x30 /* Virtual Port Control entry. */
1134struct vp_ctrl_entry_24xx { 1172struct vp_ctrl_entry_24xx {
1135 uint8_t entry_type; /* Entry type. */ 1173 uint8_t entry_type; /* Entry type. */
1136 uint8_t entry_count; /* Entry count. */ 1174 uint8_t entry_count; /* Entry count. */
@@ -1166,7 +1204,7 @@ struct vp_ctrl_entry_24xx {
1166/* 1204/*
1167 * Modify Virtual Port Configuration IOCB 1205 * Modify Virtual Port Configuration IOCB
1168 */ 1206 */
1169#define VP_CONFIG_IOCB_TYPE 0x31 /* Vitual Port Config entry. */ 1207#define VP_CONFIG_IOCB_TYPE 0x31 /* Virtual Port Config entry. */
1170struct vp_config_entry_24xx { 1208struct vp_config_entry_24xx {
1171 uint8_t entry_type; /* Entry type. */ 1209 uint8_t entry_type; /* Entry type. */
1172 uint8_t entry_count; /* Entry count. */ 1210 uint8_t entry_count; /* Entry count. */
@@ -1502,7 +1540,10 @@ struct access_chip_rsp_84xx {
1502/* 1540/*
1503 * ISP83xx mailbox commands 1541 * ISP83xx mailbox commands
1504 */ 1542 */
1505#define MBC_WRITE_REMOTE_REG 0x0001 /* Write remote register */ 1543#define MBC_WRITE_REMOTE_REG 0x0001 /* Write remote register */
1544#define MBC_READ_REMOTE_REG 0x0009 /* Read remote register */
1545#define MBC_RESTART_NIC_FIRMWARE 0x003d /* Restart NIC firmware */
1546#define MBC_SET_ACCESS_CONTROL 0x003e /* Access control command */
1506 1547
1507/* Flash access control option field bit definitions */ 1548/* Flash access control option field bit definitions */
1508#define FAC_OPT_FORCE_SEMAPHORE BIT_15 1549#define FAC_OPT_FORCE_SEMAPHORE BIT_15
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 9eacd2df111b..6acb39785a46 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -48,7 +48,7 @@ extern void qla2x00_update_fcports(scsi_qla_host_t *);
48 48
49extern int qla2x00_abort_isp(scsi_qla_host_t *); 49extern int qla2x00_abort_isp(scsi_qla_host_t *);
50extern void qla2x00_abort_isp_cleanup(scsi_qla_host_t *); 50extern void qla2x00_abort_isp_cleanup(scsi_qla_host_t *);
51extern void qla82xx_quiescent_state_cleanup(scsi_qla_host_t *); 51extern void qla2x00_quiesce_io(scsi_qla_host_t *);
52 52
53extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *); 53extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *);
54 54
@@ -76,6 +76,14 @@ extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *);
76 76
77extern fc_port_t * 77extern fc_port_t *
78qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t ); 78qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t );
79
80extern int __qla83xx_set_idc_control(scsi_qla_host_t *, uint32_t);
81extern int __qla83xx_get_idc_control(scsi_qla_host_t *, uint32_t *);
82extern void qla83xx_idc_audit(scsi_qla_host_t *, int);
83extern int qla83xx_nic_core_reset(scsi_qla_host_t *);
84extern void qla83xx_reset_ownership(scsi_qla_host_t *);
85extern int qla2xxx_mctp_dump(scsi_qla_host_t *);
86
79/* 87/*
80 * Global Data in qla_os.c source file. 88 * Global Data in qla_os.c source file.
81 */ 89 */
@@ -133,6 +141,20 @@ extern void qla2x00_relogin(struct scsi_qla_host *);
133extern void qla2x00_do_work(struct scsi_qla_host *); 141extern void qla2x00_do_work(struct scsi_qla_host *);
134extern void qla2x00_free_fcports(struct scsi_qla_host *); 142extern void qla2x00_free_fcports(struct scsi_qla_host *);
135 143
144extern void qla83xx_schedule_work(scsi_qla_host_t *, int);
145extern void qla83xx_service_idc_aen(struct work_struct *);
146extern void qla83xx_nic_core_unrecoverable_work(struct work_struct *);
147extern void qla83xx_idc_state_handler_work(struct work_struct *);
148extern void qla83xx_nic_core_reset_work(struct work_struct *);
149
150extern void qla83xx_idc_lock(scsi_qla_host_t *, uint16_t);
151extern void qla83xx_idc_unlock(scsi_qla_host_t *, uint16_t);
152extern int qla83xx_idc_state_handler(scsi_qla_host_t *);
153extern int qla83xx_set_drv_presence(scsi_qla_host_t *vha);
154extern int __qla83xx_set_drv_presence(scsi_qla_host_t *vha);
155extern int qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
156extern int __qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
157
136/* 158/*
137 * Global Functions in qla_mid.c source file. 159 * Global Functions in qla_mid.c source file.
138 */ 160 */
@@ -188,6 +210,8 @@ extern int qla2x00_start_sp(srb_t *);
188extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t); 210extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t);
189extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t); 211extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
190extern int qla24xx_dif_start_scsi(srb_t *); 212extern int qla24xx_dif_start_scsi(srb_t *);
213extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t);
214extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
191 215
192extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *); 216extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
193extern int qla2x00_issue_marker(scsi_qla_host_t *, int); 217extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
@@ -376,6 +400,9 @@ qla81xx_set_port_config(scsi_qla_host_t *, uint16_t *);
376extern int 400extern int
377qla2x00_port_logout(scsi_qla_host_t *, struct fc_port *); 401qla2x00_port_logout(scsi_qla_host_t *, struct fc_port *);
378 402
403extern int
404qla2x00_dump_mctp_data(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
405
379/* 406/*
380 * Global Function Prototypes in qla_isr.c source file. 407 * Global Function Prototypes in qla_isr.c source file.
381 */ 408 */
@@ -419,7 +446,11 @@ extern void qla24xx_beacon_blink(struct scsi_qla_host *);
419extern void qla83xx_beacon_blink(struct scsi_qla_host *); 446extern void qla83xx_beacon_blink(struct scsi_qla_host *);
420extern int qla82xx_beacon_on(struct scsi_qla_host *); 447extern int qla82xx_beacon_on(struct scsi_qla_host *);
421extern int qla82xx_beacon_off(struct scsi_qla_host *); 448extern int qla82xx_beacon_off(struct scsi_qla_host *);
422extern int qla83xx_write_remote_reg(struct scsi_qla_host *, uint32_t, uint32_t); 449extern int qla83xx_wr_reg(scsi_qla_host_t *, uint32_t, uint32_t);
450extern int qla83xx_rd_reg(scsi_qla_host_t *, uint32_t, uint32_t *);
451extern int qla83xx_restart_nic_firmware(scsi_qla_host_t *);
452extern int qla83xx_access_control(scsi_qla_host_t *, uint16_t, uint32_t,
453 uint32_t, uint16_t *);
423 454
424extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *, 455extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *,
425 uint32_t, uint32_t); 456 uint32_t, uint32_t);
@@ -527,7 +558,6 @@ extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
527/* PCI related functions */ 558/* PCI related functions */
528extern int qla82xx_pci_config(struct scsi_qla_host *); 559extern int qla82xx_pci_config(struct scsi_qla_host *);
529extern int qla82xx_pci_mem_read_2M(struct qla_hw_data *, u64, void *, int); 560extern int qla82xx_pci_mem_read_2M(struct qla_hw_data *, u64, void *, int);
530extern char *qla82xx_pci_info_str(struct scsi_qla_host *, char *);
531extern int qla82xx_pci_region_offset(struct pci_dev *, int); 561extern int qla82xx_pci_region_offset(struct pci_dev *, int);
532extern int qla82xx_iospace_config(struct qla_hw_data *); 562extern int qla82xx_iospace_config(struct qla_hw_data *);
533 563
@@ -580,6 +610,7 @@ extern uint32_t qla82xx_wait_for_state_change(scsi_qla_host_t *, uint32_t);
580extern int qla82xx_idc_lock(struct qla_hw_data *); 610extern int qla82xx_idc_lock(struct qla_hw_data *);
581extern void qla82xx_idc_unlock(struct qla_hw_data *); 611extern void qla82xx_idc_unlock(struct qla_hw_data *);
582extern int qla82xx_device_state_handler(scsi_qla_host_t *); 612extern int qla82xx_device_state_handler(scsi_qla_host_t *);
613extern void qla8xxx_dev_failed_handler(scsi_qla_host_t *);
583extern void qla82xx_clear_qsnt_ready(scsi_qla_host_t *); 614extern void qla82xx_clear_qsnt_ready(scsi_qla_host_t *);
584 615
585extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *, 616extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *,
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 05260d25fe46..f4e4bd7c3f4d 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -1131,7 +1131,7 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
1131 return ret; 1131 return ret;
1132 1132
1133 rval = ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 1133 rval = ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff,
1134 0xfa, mb, BIT_1|BIT_0); 1134 0xfa, mb, BIT_1);
1135 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) { 1135 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
1136 if (rval == QLA_MEMORY_ALLOC_FAILED) 1136 if (rval == QLA_MEMORY_ALLOC_FAILED)
1137 ql_dbg(ql_dbg_disc, vha, 0x2085, 1137 ql_dbg(ql_dbg_disc, vha, 0x2085,
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index a44653b42161..799a58bb9859 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -77,7 +77,7 @@ qla2x00_sp_free(void *data, void *ptr)
77 77
78/* Asynchronous Login/Logout Routines -------------------------------------- */ 78/* Asynchronous Login/Logout Routines -------------------------------------- */
79 79
80static inline unsigned long 80unsigned long
81qla2x00_get_async_timeout(struct scsi_qla_host *vha) 81qla2x00_get_async_timeout(struct scsi_qla_host *vha)
82{ 82{
83 unsigned long tmo; 83 unsigned long tmo;
@@ -429,6 +429,79 @@ qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
429/* QLogic ISP2x00 Hardware Support Functions. */ 429/* QLogic ISP2x00 Hardware Support Functions. */
430/****************************************************************************/ 430/****************************************************************************/
431 431
432int
433qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
434{
435 int rval = QLA_SUCCESS;
436 struct qla_hw_data *ha = vha->hw;
437 uint32_t idc_major_ver, idc_minor_ver;
438 uint16_t config[4];
439
440 qla83xx_idc_lock(vha, 0);
441
442 /* SV: TODO: Assign initialization timeout from
443 * flash-info / other param
444 */
445 ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT;
446 ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT;
447
448 /* Set our fcoe function presence */
449 if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) {
450 ql_dbg(ql_dbg_p3p, vha, 0xb077,
451 "Error while setting DRV-Presence.\n");
452 rval = QLA_FUNCTION_FAILED;
453 goto exit;
454 }
455
456 /* Decide the reset ownership */
457 qla83xx_reset_ownership(vha);
458
459 /*
460 * On first protocol driver load:
461 * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery
462 * register.
463 * Others: Check compatibility with current IDC Major version.
464 */
465 qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver);
466 if (ha->flags.nic_core_reset_owner) {
467 /* Set IDC Major version */
468 idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION;
469 qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver);
470
471 /* Clearing IDC-Lock-Recovery register */
472 qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0);
473 } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) {
474 /*
475 * Clear further IDC participation if we are not compatible with
476 * the current IDC Major Version.
477 */
478 ql_log(ql_log_warn, vha, 0xb07d,
479 "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n",
480 idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION);
481 __qla83xx_clear_drv_presence(vha);
482 rval = QLA_FUNCTION_FAILED;
483 goto exit;
484 }
485 /* Each function sets its supported Minor version. */
486 qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver);
487 idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2));
488 qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver);
489
490 if (ha->flags.nic_core_reset_owner) {
491 memset(config, 0, sizeof(config));
492 if (!qla81xx_get_port_config(vha, config))
493 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
494 QLA8XXX_DEV_READY);
495 }
496
497 rval = qla83xx_idc_state_handler(vha);
498
499exit:
500 qla83xx_idc_unlock(vha, 0);
501
502 return rval;
503}
504
432/* 505/*
433* qla2x00_initialize_adapter 506* qla2x00_initialize_adapter
434* Initialize board. 507* Initialize board.
@@ -537,6 +610,14 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
537 } 610 }
538 } 611 }
539 612
613 /* Load the NIC Core f/w if we are the first protocol driver. */
614 if (IS_QLA8031(ha)) {
615 rval = qla83xx_nic_core_fw_load(vha);
616 if (rval)
617 ql_log(ql_log_warn, vha, 0x0124,
618 "Error in initializing NIC Core f/w.\n");
619 }
620
540 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) 621 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
541 qla24xx_read_fcp_prio_cfg(vha); 622 qla24xx_read_fcp_prio_cfg(vha);
542 623
@@ -686,7 +767,7 @@ qla24xx_pci_config(scsi_qla_host_t *vha)
686 767
687 /* PCIe -- adjust Maximum Read Request Size (2048). */ 768 /* PCIe -- adjust Maximum Read Request Size (2048). */
688 if (pci_is_pcie(ha->pdev)) 769 if (pci_is_pcie(ha->pdev))
689 pcie_set_readrq(ha->pdev, 2048); 770 pcie_set_readrq(ha->pdev, 4096);
690 771
691 pci_disable_rom(ha->pdev); 772 pci_disable_rom(ha->pdev);
692 773
@@ -722,7 +803,7 @@ qla25xx_pci_config(scsi_qla_host_t *vha)
722 803
723 /* PCIe -- adjust Maximum Read Request Size (2048). */ 804 /* PCIe -- adjust Maximum Read Request Size (2048). */
724 if (pci_is_pcie(ha->pdev)) 805 if (pci_is_pcie(ha->pdev))
725 pcie_set_readrq(ha->pdev, 2048); 806 pcie_set_readrq(ha->pdev, 4096);
726 807
727 pci_disable_rom(ha->pdev); 808 pci_disable_rom(ha->pdev);
728 809
@@ -1480,7 +1561,8 @@ enable_82xx_npiv:
1480 "ISP Firmware failed checksum.\n"); 1561 "ISP Firmware failed checksum.\n");
1481 goto failed; 1562 goto failed;
1482 } 1563 }
1483 } 1564 } else
1565 goto failed;
1484 1566
1485 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { 1567 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
1486 /* Enable proper parity. */ 1568 /* Enable proper parity. */
@@ -1825,7 +1907,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1825 ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n"); 1907 ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
1826 1908
1827 if (ha->flags.npiv_supported) { 1909 if (ha->flags.npiv_supported) {
1828 if (ha->operating_mode == LOOP) 1910 if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
1829 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1; 1911 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
1830 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports); 1912 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
1831 } 1913 }
@@ -2682,11 +2764,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2682 new_fcport = NULL; 2764 new_fcport = NULL;
2683 entries = MAX_FIBRE_DEVICES_LOOP; 2765 entries = MAX_FIBRE_DEVICES_LOOP;
2684 2766
2685 ql_dbg(ql_dbg_disc, vha, 0x2016,
2686 "Getting FCAL position map.\n");
2687 if (ql2xextended_error_logging & ql_dbg_disc)
2688 qla2x00_get_fcal_position_map(vha, NULL);
2689
2690 /* Get list of logged in devices. */ 2767 /* Get list of logged in devices. */
2691 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha)); 2768 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
2692 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma, 2769 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
@@ -2753,6 +2830,8 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2753 if (loop_id > LAST_LOCAL_LOOP_ID) 2830 if (loop_id > LAST_LOCAL_LOOP_ID)
2754 continue; 2831 continue;
2755 2832
2833 memset(new_fcport, 0, sizeof(fc_port_t));
2834
2756 /* Fill in member data. */ 2835 /* Fill in member data. */
2757 new_fcport->d_id.b.domain = domain; 2836 new_fcport->d_id.b.domain = domain;
2758 new_fcport->d_id.b.area = area; 2837 new_fcport->d_id.b.area = area;
@@ -3285,7 +3364,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3285 */ 3364 */
3286 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { 3365 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3287 fcport->d_id.b24 = new_fcport->d_id.b24; 3366 fcport->d_id.b24 = new_fcport->d_id.b24;
3288 fcport->loop_id = FC_NO_LOOP_ID; 3367 qla2x00_clear_loop_id(fcport);
3289 fcport->flags |= (FCF_FABRIC_DEVICE | 3368 fcport->flags |= (FCF_FABRIC_DEVICE |
3290 FCF_LOGIN_NEEDED); 3369 FCF_LOGIN_NEEDED);
3291 break; 3370 break;
@@ -3306,7 +3385,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3306 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 3385 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3307 fcport->d_id.b.domain, fcport->d_id.b.area, 3386 fcport->d_id.b.domain, fcport->d_id.b.area,
3308 fcport->d_id.b.al_pa); 3387 fcport->d_id.b.al_pa);
3309 fcport->loop_id = FC_NO_LOOP_ID; 3388 qla2x00_clear_loop_id(fcport);
3310 } 3389 }
3311 3390
3312 break; 3391 break;
@@ -3352,71 +3431,32 @@ int
3352qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) 3431qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
3353{ 3432{
3354 int rval; 3433 int rval;
3355 int found;
3356 fc_port_t *fcport;
3357 uint16_t first_loop_id;
3358 struct qla_hw_data *ha = vha->hw; 3434 struct qla_hw_data *ha = vha->hw;
3359 struct scsi_qla_host *vp;
3360 struct scsi_qla_host *tvp;
3361 unsigned long flags = 0; 3435 unsigned long flags = 0;
3362 3436
3363 rval = QLA_SUCCESS; 3437 rval = QLA_SUCCESS;
3364 3438
3365 /* Save starting loop ID. */ 3439 spin_lock_irqsave(&ha->vport_slock, flags);
3366 first_loop_id = dev->loop_id;
3367
3368 for (;;) {
3369 /* Skip loop ID if already used by adapter. */
3370 if (dev->loop_id == vha->loop_id)
3371 dev->loop_id++;
3372
3373 /* Skip reserved loop IDs. */
3374 while (qla2x00_is_reserved_id(vha, dev->loop_id))
3375 dev->loop_id++;
3376
3377 /* Reset loop ID if passed the end. */
3378 if (dev->loop_id > ha->max_loop_id) {
3379 /* first loop ID. */
3380 dev->loop_id = ha->min_external_loopid;
3381 }
3382
3383 /* Check for loop ID being already in use. */
3384 found = 0;
3385 fcport = NULL;
3386
3387 spin_lock_irqsave(&ha->vport_slock, flags);
3388 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
3389 list_for_each_entry(fcport, &vp->vp_fcports, list) {
3390 if (fcport->loop_id == dev->loop_id &&
3391 fcport != dev) {
3392 /* ID possibly in use */
3393 found++;
3394 break;
3395 }
3396 }
3397 if (found)
3398 break;
3399 }
3400 spin_unlock_irqrestore(&ha->vport_slock, flags);
3401 3440
3402 /* If not in use then it is free to use. */ 3441 dev->loop_id = find_first_zero_bit(ha->loop_id_map,
3403 if (!found) { 3442 LOOPID_MAP_SIZE);
3404 ql_dbg(ql_dbg_disc, dev->vha, 0x2086, 3443 if (dev->loop_id >= LOOPID_MAP_SIZE ||
3405 "Assigning new loopid=%x, portid=%x.\n", 3444 qla2x00_is_reserved_id(vha, dev->loop_id)) {
3406 dev->loop_id, dev->d_id.b24); 3445 dev->loop_id = FC_NO_LOOP_ID;
3407 break; 3446 rval = QLA_FUNCTION_FAILED;
3408 } 3447 } else
3448 set_bit(dev->loop_id, ha->loop_id_map);
3409 3449
3410 /* ID in use. Try next value. */ 3450 spin_unlock_irqrestore(&ha->vport_slock, flags);
3411 dev->loop_id++;
3412 3451
3413 /* If wrap around. No free ID to use. */ 3452 if (rval == QLA_SUCCESS)
3414 if (dev->loop_id == first_loop_id) { 3453 ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
3415 dev->loop_id = FC_NO_LOOP_ID; 3454 "Assigning new loopid=%x, portid=%x.\n",
3416 rval = QLA_FUNCTION_FAILED; 3455 dev->loop_id, dev->d_id.b24);
3417 break; 3456 else
3418 } 3457 ql_log(ql_log_warn, dev->vha, 0x2087,
3419 } 3458 "No loop_id's available, portid=%x.\n",
3459 dev->d_id.b24);
3420 3460
3421 return (rval); 3461 return (rval);
3422} 3462}
@@ -3616,7 +3656,7 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3616 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 3656 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3617 fcport->d_id.b.domain, fcport->d_id.b.area, 3657 fcport->d_id.b.domain, fcport->d_id.b.area,
3618 fcport->d_id.b.al_pa); 3658 fcport->d_id.b.al_pa);
3619 fcport->loop_id = FC_NO_LOOP_ID; 3659 qla2x00_clear_loop_id(fcport);
3620 fcport->login_retry = 0; 3660 fcport->login_retry = 0;
3621 3661
3622 rval = 3; 3662 rval = 3;
@@ -3775,8 +3815,363 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
3775 spin_unlock_irqrestore(&ha->vport_slock, flags); 3815 spin_unlock_irqrestore(&ha->vport_slock, flags);
3776} 3816}
3777 3817
3818/* Assumes idc_lock always held on entry */
3819void
3820qla83xx_reset_ownership(scsi_qla_host_t *vha)
3821{
3822 struct qla_hw_data *ha = vha->hw;
3823 uint32_t drv_presence, drv_presence_mask;
3824 uint32_t dev_part_info1, dev_part_info2, class_type;
3825 uint32_t class_type_mask = 0x3;
3826 uint16_t fcoe_other_function = 0xffff, i;
3827
3828 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
3829
3830 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
3831 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
3832 for (i = 0; i < 8; i++) {
3833 class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
3834 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
3835 (i != ha->portnum)) {
3836 fcoe_other_function = i;
3837 break;
3838 }
3839 }
3840 if (fcoe_other_function == 0xffff) {
3841 for (i = 0; i < 8; i++) {
3842 class_type = ((dev_part_info2 >> (i * 4)) &
3843 class_type_mask);
3844 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
3845 ((i + 8) != ha->portnum)) {
3846 fcoe_other_function = i + 8;
3847 break;
3848 }
3849 }
3850 }
3851 /*
3852 * Prepare drv-presence mask based on fcoe functions present.
3853 * However consider only valid physical fcoe function numbers (0-15).
3854 */
3855 drv_presence_mask = ~((1 << (ha->portnum)) |
3856 ((fcoe_other_function == 0xffff) ?
3857 0 : (1 << (fcoe_other_function))));
3858
3859 /* We are the reset owner iff:
3860 * - No other protocol drivers present.
3861 * - This is the lowest among fcoe functions. */
3862 if (!(drv_presence & drv_presence_mask) &&
3863 (ha->portnum < fcoe_other_function)) {
3864 ql_dbg(ql_dbg_p3p, vha, 0xb07f,
3865 "This host is Reset owner.\n");
3866 ha->flags.nic_core_reset_owner = 1;
3867 }
3868}
3869
3870int
3871__qla83xx_set_drv_ack(scsi_qla_host_t *vha)
3872{
3873 int rval = QLA_SUCCESS;
3874 struct qla_hw_data *ha = vha->hw;
3875 uint32_t drv_ack;
3876
3877 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
3878 if (rval == QLA_SUCCESS) {
3879 drv_ack |= (1 << ha->portnum);
3880 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
3881 }
3882
3883 return rval;
3884}
3885
3886int
3887qla83xx_set_drv_ack(scsi_qla_host_t *vha)
3888{
3889 int rval = QLA_SUCCESS;
3890
3891 qla83xx_idc_lock(vha, 0);
3892 rval = __qla83xx_set_drv_ack(vha);
3893 qla83xx_idc_unlock(vha, 0);
3894
3895 return rval;
3896}
3897
3898int
3899__qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
3900{
3901 int rval = QLA_SUCCESS;
3902 struct qla_hw_data *ha = vha->hw;
3903 uint32_t drv_ack;
3904
3905 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
3906 if (rval == QLA_SUCCESS) {
3907 drv_ack &= ~(1 << ha->portnum);
3908 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
3909 }
3910
3911 return rval;
3912}
3913
3914int
3915qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
3916{
3917 int rval = QLA_SUCCESS;
3918
3919 qla83xx_idc_lock(vha, 0);
3920 rval = __qla83xx_clear_drv_ack(vha);
3921 qla83xx_idc_unlock(vha, 0);
3922
3923 return rval;
3924}
3925
3926const char *
3927qla83xx_dev_state_to_string(uint32_t dev_state)
3928{
3929 switch (dev_state) {
3930 case QLA8XXX_DEV_COLD:
3931 return "COLD/RE-INIT";
3932 case QLA8XXX_DEV_INITIALIZING:
3933 return "INITIALIZING";
3934 case QLA8XXX_DEV_READY:
3935 return "READY";
3936 case QLA8XXX_DEV_NEED_RESET:
3937 return "NEED RESET";
3938 case QLA8XXX_DEV_NEED_QUIESCENT:
3939 return "NEED QUIESCENT";
3940 case QLA8XXX_DEV_FAILED:
3941 return "FAILED";
3942 case QLA8XXX_DEV_QUIESCENT:
3943 return "QUIESCENT";
3944 default:
3945 return "Unknown";
3946 }
3947}
3948
3949/* Assumes idc-lock always held on entry */
3950void
3951qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
3952{
3953 struct qla_hw_data *ha = vha->hw;
3954 uint32_t idc_audit_reg = 0, duration_secs = 0;
3955
3956 switch (audit_type) {
3957 case IDC_AUDIT_TIMESTAMP:
3958 ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000);
3959 idc_audit_reg = (ha->portnum) |
3960 (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8);
3961 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
3962 break;
3963
3964 case IDC_AUDIT_COMPLETION:
3965 duration_secs = ((jiffies_to_msecs(jiffies) -
3966 jiffies_to_msecs(ha->idc_audit_ts)) / 1000);
3967 idc_audit_reg = (ha->portnum) |
3968 (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8);
3969 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
3970 break;
3971
3972 default:
3973 ql_log(ql_log_warn, vha, 0xb078,
3974 "Invalid audit type specified.\n");
3975 break;
3976 }
3977}
3978
3979/* Assumes idc_lock always held on entry */
3980int
3981qla83xx_initiating_reset(scsi_qla_host_t *vha)
3982{
3983 struct qla_hw_data *ha = vha->hw;
3984 uint32_t idc_control, dev_state;
3985
3986 __qla83xx_get_idc_control(vha, &idc_control);
3987 if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) {
3988 ql_log(ql_log_info, vha, 0xb080,
3989 "NIC Core reset has been disabled. idc-control=0x%x\n",
3990 idc_control);
3991 return QLA_FUNCTION_FAILED;
3992 }
3993
3994 /* Set NEED-RESET iff in READY state and we are the reset-owner */
3995 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
3996 if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) {
3997 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
3998 QLA8XXX_DEV_NEED_RESET);
3999 ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
4000 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
4001 } else {
4002 const char *state = qla83xx_dev_state_to_string(dev_state);
4003 ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state);
4004
4005 /* SV: XXX: Is timeout required here? */
4006 /* Wait for IDC state change READY -> NEED_RESET */
4007 while (dev_state == QLA8XXX_DEV_READY) {
4008 qla83xx_idc_unlock(vha, 0);
4009 msleep(200);
4010 qla83xx_idc_lock(vha, 0);
4011 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
4012 }
4013 }
4014
4015 /* Send IDC ack by writing to drv-ack register */
4016 __qla83xx_set_drv_ack(vha);
4017
4018 return QLA_SUCCESS;
4019}
4020
4021int
4022__qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
4023{
4024 return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
4025}
4026
4027int
4028qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
4029{
4030 int rval = QLA_SUCCESS;
4031
4032 qla83xx_idc_lock(vha, 0);
4033 rval = __qla83xx_set_idc_control(vha, idc_control);
4034 qla83xx_idc_unlock(vha, 0);
4035
4036 return rval;
4037}
4038
4039int
4040__qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
4041{
4042 return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
4043}
4044
4045int
4046qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
4047{
4048 int rval = QLA_SUCCESS;
4049
4050 qla83xx_idc_lock(vha, 0);
4051 rval = __qla83xx_get_idc_control(vha, idc_control);
4052 qla83xx_idc_unlock(vha, 0);
4053
4054 return rval;
4055}
4056
4057int
4058qla83xx_check_driver_presence(scsi_qla_host_t *vha)
4059{
4060 uint32_t drv_presence = 0;
4061 struct qla_hw_data *ha = vha->hw;
4062
4063 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
4064 if (drv_presence & (1 << ha->portnum))
4065 return QLA_SUCCESS;
4066 else
4067 return QLA_TEST_FAILED;
4068}
4069
4070int
4071qla83xx_nic_core_reset(scsi_qla_host_t *vha)
4072{
4073 int rval = QLA_SUCCESS;
4074 struct qla_hw_data *ha = vha->hw;
4075
4076 ql_dbg(ql_dbg_p3p, vha, 0xb058,
4077 "Entered %s().\n", __func__);
4078
4079 if (vha->device_flags & DFLG_DEV_FAILED) {
4080 ql_log(ql_log_warn, vha, 0xb059,
4081 "Device in unrecoverable FAILED state.\n");
4082 return QLA_FUNCTION_FAILED;
4083 }
4084
4085 qla83xx_idc_lock(vha, 0);
4086
4087 if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) {
4088 ql_log(ql_log_warn, vha, 0xb05a,
4089 "Function=0x%x has been removed from IDC participation.\n",
4090 ha->portnum);
4091 rval = QLA_FUNCTION_FAILED;
4092 goto exit;
4093 }
4094
4095 qla83xx_reset_ownership(vha);
4096
4097 rval = qla83xx_initiating_reset(vha);
4098
4099 /*
4100 * Perform reset if we are the reset-owner,
4101 * else wait till IDC state changes to READY/FAILED.
4102 */
4103 if (rval == QLA_SUCCESS) {
4104 rval = qla83xx_idc_state_handler(vha);
4105
4106 if (rval == QLA_SUCCESS)
4107 ha->flags.nic_core_hung = 0;
4108 __qla83xx_clear_drv_ack(vha);
4109 }
4110
4111exit:
4112 qla83xx_idc_unlock(vha, 0);
4113
4114 ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__);
4115
4116 return rval;
4117}
4118
4119int
4120qla2xxx_mctp_dump(scsi_qla_host_t *vha)
4121{
4122 struct qla_hw_data *ha = vha->hw;
4123 int rval = QLA_FUNCTION_FAILED;
4124
4125 if (!IS_MCTP_CAPABLE(ha)) {
4126 /* This message can be removed from the final version */
4127 ql_log(ql_log_info, vha, 0x506d,
4128 "This board is not MCTP capable\n");
4129 return rval;
4130 }
4131
4132 if (!ha->mctp_dump) {
4133 ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev,
4134 MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL);
4135
4136 if (!ha->mctp_dump) {
4137 ql_log(ql_log_warn, vha, 0x506e,
4138 "Failed to allocate memory for mctp dump\n");
4139 return rval;
4140 }
4141 }
4142
4143#define MCTP_DUMP_STR_ADDR 0x00000000
4144 rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma,
4145 MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4);
4146 if (rval != QLA_SUCCESS) {
4147 ql_log(ql_log_warn, vha, 0x506f,
4148 "Failed to capture mctp dump\n");
4149 } else {
4150 ql_log(ql_log_info, vha, 0x5070,
4151 "Mctp dump capture for host (%ld/%p).\n",
4152 vha->host_no, ha->mctp_dump);
4153 ha->mctp_dumped = 1;
4154 }
4155
4156 if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) {
4157 ha->flags.nic_core_reset_hdlr_active = 1;
4158 rval = qla83xx_restart_nic_firmware(vha);
4159 if (rval)
4160 /* NIC Core reset failed. */
4161 ql_log(ql_log_warn, vha, 0x5071,
4162 "Failed to restart nic firmware\n");
4163 else
4164 ql_dbg(ql_dbg_p3p, vha, 0xb084,
4165 "Restarted NIC firmware successfully.\n");
4166 ha->flags.nic_core_reset_hdlr_active = 0;
4167 }
4168
4169 return rval;
4170
4171}
4172
3778/* 4173/*
3779* qla82xx_quiescent_state_cleanup 4174* qla2x00_quiesce_io
3780* Description: This function will block the new I/Os 4175* Description: This function will block the new I/Os
3781* Its not aborting any I/Os as context 4176* Its not aborting any I/Os as context
3782* is not destroyed during quiescence 4177* is not destroyed during quiescence
@@ -3784,20 +4179,20 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
3784* return : void 4179* return : void
3785*/ 4180*/
3786void 4181void
3787qla82xx_quiescent_state_cleanup(scsi_qla_host_t *vha) 4182qla2x00_quiesce_io(scsi_qla_host_t *vha)
3788{ 4183{
3789 struct qla_hw_data *ha = vha->hw; 4184 struct qla_hw_data *ha = vha->hw;
3790 struct scsi_qla_host *vp; 4185 struct scsi_qla_host *vp;
3791 4186
3792 ql_dbg(ql_dbg_p3p, vha, 0xb002, 4187 ql_dbg(ql_dbg_dpc, vha, 0x401d,
3793 "Performing ISP error recovery - ha=%p.\n", ha); 4188 "Quiescing I/O - ha=%p.\n", ha);
3794 4189
3795 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 4190 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
3796 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 4191 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
3797 atomic_set(&vha->loop_state, LOOP_DOWN); 4192 atomic_set(&vha->loop_state, LOOP_DOWN);
3798 qla2x00_mark_all_devices_lost(vha, 0); 4193 qla2x00_mark_all_devices_lost(vha, 0);
3799 list_for_each_entry(vp, &ha->vp_list, list) 4194 list_for_each_entry(vp, &ha->vp_list, list)
3800 qla2x00_mark_all_devices_lost(vha, 0); 4195 qla2x00_mark_all_devices_lost(vp, 0);
3801 } else { 4196 } else {
3802 if (!atomic_read(&vha->loop_down_timer)) 4197 if (!atomic_read(&vha->loop_down_timer))
3803 atomic_set(&vha->loop_down_timer, 4198 atomic_set(&vha->loop_down_timer,
@@ -3913,6 +4308,14 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
3913 if (vha->flags.online) { 4308 if (vha->flags.online) {
3914 qla2x00_abort_isp_cleanup(vha); 4309 qla2x00_abort_isp_cleanup(vha);
3915 4310
4311 if (IS_QLA8031(ha)) {
4312 ql_dbg(ql_dbg_p3p, vha, 0xb05c,
4313 "Clearing fcoe driver presence.\n");
4314 if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS)
4315 ql_dbg(ql_dbg_p3p, vha, 0xb073,
4316 "Error while clearing DRV-Presence.\n");
4317 }
4318
3916 if (unlikely(pci_channel_offline(ha->pdev) && 4319 if (unlikely(pci_channel_offline(ha->pdev) &&
3917 ha->flags.pci_channel_io_perm_failure)) { 4320 ha->flags.pci_channel_io_perm_failure)) {
3918 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 4321 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
@@ -4021,6 +4424,13 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
4021 } 4424 }
4022 spin_unlock_irqrestore(&ha->vport_slock, flags); 4425 spin_unlock_irqrestore(&ha->vport_slock, flags);
4023 4426
4427 if (IS_QLA8031(ha)) {
4428 ql_dbg(ql_dbg_p3p, vha, 0xb05d,
4429 "Setting back fcoe driver presence.\n");
4430 if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS)
4431 ql_dbg(ql_dbg_p3p, vha, 0xb074,
4432 "Error while setting DRV-Presence.\n");
4433 }
4024 } else { 4434 } else {
4025 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n", 4435 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
4026 __func__); 4436 __func__);
@@ -5088,6 +5498,9 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5088 rval = 1; 5498 rval = 1;
5089 } 5499 }
5090 5500
5501 if (IS_T10_PI_CAPABLE(ha))
5502 nv->frame_payload_size &= ~7;
5503
5091 /* Reset Initialization control block */ 5504 /* Reset Initialization control block */
5092 memset(icb, 0, ha->init_cb_size); 5505 memset(icb, 0, ha->init_cb_size);
5093 5506
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 6e457643c639..c0462c04c885 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -57,6 +57,20 @@ host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
57 return fcp; 57 return fcp;
58} 58}
59 59
60static inline void
61qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha)
62{
63 int i;
64
65 if (IS_FWI2_CAPABLE(ha))
66 return;
67
68 for (i = 0; i < SNS_FIRST_LOOP_ID; i++)
69 set_bit(i, ha->loop_id_map);
70 set_bit(MANAGEMENT_SERVER, ha->loop_id_map);
71 set_bit(BROADCAST, ha->loop_id_map);
72}
73
60static inline int 74static inline int
61qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id) 75qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
62{ 76{
@@ -69,6 +83,18 @@ qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
69} 83}
70 84
71static inline void 85static inline void
86qla2x00_clear_loop_id(fc_port_t *fcport) {
87 struct qla_hw_data *ha = fcport->vha->hw;
88
89 if (fcport->loop_id == FC_NO_LOOP_ID ||
90 qla2x00_is_reserved_id(fcport->vha, fcport->loop_id))
91 return;
92
93 clear_bit(fcport->loop_id, ha->loop_id_map);
94 fcport->loop_id = FC_NO_LOOP_ID;
95}
96
97static inline void
72qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp) 98qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp)
73{ 99{
74 struct dsd_dma *dsd_ptr, *tdsd_ptr; 100 struct dsd_dma *dsd_ptr, *tdsd_ptr;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 70dbf53d9e0f..03b752632839 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -147,13 +147,6 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
147 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 147 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
148 uint8_t guard = scsi_host_get_guard(cmd->device->host); 148 uint8_t guard = scsi_host_get_guard(cmd->device->host);
149 149
150 /* We only support T10 DIF right now */
151 if (guard != SHOST_DIX_GUARD_CRC) {
152 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
153 "Unsupported guard: %d for cmd=%p.\n", guard, cmd);
154 return 0;
155 }
156
157 /* We always use DIFF Bundling for best performance */ 150 /* We always use DIFF Bundling for best performance */
158 *fw_prot_opts = 0; 151 *fw_prot_opts = 0;
159 152
@@ -172,10 +165,11 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
172 *fw_prot_opts |= PO_MODE_DIF_REMOVE; 165 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
173 break; 166 break;
174 case SCSI_PROT_READ_PASS: 167 case SCSI_PROT_READ_PASS:
175 *fw_prot_opts |= PO_MODE_DIF_PASS;
176 break;
177 case SCSI_PROT_WRITE_PASS: 168 case SCSI_PROT_WRITE_PASS:
178 *fw_prot_opts |= PO_MODE_DIF_PASS; 169 if (guard & SHOST_DIX_GUARD_IP)
170 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
171 else
172 *fw_prot_opts |= PO_MODE_DIF_PASS;
179 break; 173 break;
180 default: /* Normal Request */ 174 default: /* Normal Request */
181 *fw_prot_opts |= PO_MODE_DIF_PASS; 175 *fw_prot_opts |= PO_MODE_DIF_PASS;
@@ -821,7 +815,6 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
821 unsigned int protcnt) 815 unsigned int protcnt)
822{ 816{
823 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 817 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
824 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
825 818
826 switch (scsi_get_prot_type(cmd)) { 819 switch (scsi_get_prot_type(cmd)) {
827 case SCSI_PROT_DIF_TYPE0: 820 case SCSI_PROT_DIF_TYPE0:
@@ -891,12 +884,6 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
891 pkt->ref_tag_mask[3] = 0xff; 884 pkt->ref_tag_mask[3] = 0xff;
892 break; 885 break;
893 } 886 }
894
895 ql_dbg(ql_dbg_io, vha, 0x3009,
896 "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
897 "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
898 pkt->ref_tag, pkt->app_tag, protcnt, (int)scsi_get_lba(cmd),
899 scsi_get_prot_type(cmd), cmd);
900} 887}
901 888
902struct qla2_sgx { 889struct qla2_sgx {
@@ -1068,9 +1055,6 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1068 int i; 1055 int i;
1069 uint16_t used_dsds = tot_dsds; 1056 uint16_t used_dsds = tot_dsds;
1070 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1057 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1071 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1072
1073 uint8_t *cp;
1074 1058
1075 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 1059 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
1076 dma_addr_t sle_dma; 1060 dma_addr_t sle_dma;
@@ -1113,19 +1097,12 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1113 cur_dsd = (uint32_t *)next_dsd; 1097 cur_dsd = (uint32_t *)next_dsd;
1114 } 1098 }
1115 sle_dma = sg_dma_address(sg); 1099 sle_dma = sg_dma_address(sg);
1116 ql_dbg(ql_dbg_io, vha, 0x300a, 1100
1117 "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
1118 i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg), cmd);
1119 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 1101 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1120 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 1102 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1121 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 1103 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1122 avail_dsds--; 1104 avail_dsds--;
1123 1105
1124 if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
1125 cp = page_address(sg_page(sg)) + sg->offset;
1126 ql_dbg(ql_dbg_io, vha, 0x300b,
1127 "User data buffer=%p for cmd=%p.\n", cp, cmd);
1128 }
1129 } 1106 }
1130 /* Null termination */ 1107 /* Null termination */
1131 *cur_dsd++ = 0; 1108 *cur_dsd++ = 0;
@@ -1148,8 +1125,6 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1148 struct scsi_cmnd *cmd; 1125 struct scsi_cmnd *cmd;
1149 uint32_t *cur_dsd = dsd; 1126 uint32_t *cur_dsd = dsd;
1150 uint16_t used_dsds = tot_dsds; 1127 uint16_t used_dsds = tot_dsds;
1151 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1152 uint8_t *cp;
1153 1128
1154 cmd = GET_CMD_SP(sp); 1129 cmd = GET_CMD_SP(sp);
1155 scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) { 1130 scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
@@ -1193,23 +1168,11 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1193 cur_dsd = (uint32_t *)next_dsd; 1168 cur_dsd = (uint32_t *)next_dsd;
1194 } 1169 }
1195 sle_dma = sg_dma_address(sg); 1170 sle_dma = sg_dma_address(sg);
1196 if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) { 1171
1197 ql_dbg(ql_dbg_io, vha, 0x3027,
1198 "%s(): %p, sg_entry %d - "
1199 "addr=0x%x0x%x, len=%d.\n",
1200 __func__, cur_dsd, i,
1201 LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg));
1202 }
1203 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 1172 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1204 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 1173 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1205 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 1174 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1206 1175
1207 if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
1208 cp = page_address(sg_page(sg)) + sg->offset;
1209 ql_dbg(ql_dbg_io, vha, 0x3028,
1210 "%s(): Protection Data buffer = %p.\n", __func__,
1211 cp);
1212 }
1213 avail_dsds--; 1176 avail_dsds--;
1214 } 1177 }
1215 /* Null termination */ 1178 /* Null termination */
@@ -1386,6 +1349,16 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1386 1349
1387 if (!qla2x00_hba_err_chk_enabled(sp)) 1350 if (!qla2x00_hba_err_chk_enabled(sp))
1388 fw_prot_opts |= 0x10; /* Disable Guard tag checking */ 1351 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1352 /* HBA error checking enabled */
1353 else if (IS_PI_UNINIT_CAPABLE(ha)) {
1354 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1355 || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1356 SCSI_PROT_DIF_TYPE2))
1357 fw_prot_opts |= BIT_10;
1358 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1359 SCSI_PROT_DIF_TYPE3)
1360 fw_prot_opts |= BIT_11;
1361 }
1389 1362
1390 if (!bundling) { 1363 if (!bundling) {
1391 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address; 1364 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
@@ -1858,7 +1831,7 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1858 } 1831 }
1859 if (index == MAX_OUTSTANDING_COMMANDS) { 1832 if (index == MAX_OUTSTANDING_COMMANDS) {
1860 ql_log(ql_log_warn, vha, 0x700b, 1833 ql_log(ql_log_warn, vha, 0x700b,
1861 "No room on oustanding cmd array.\n"); 1834 "No room on outstanding cmd array.\n");
1862 goto queuing_error; 1835 goto queuing_error;
1863 } 1836 }
1864 1837
@@ -2665,3 +2638,201 @@ done:
2665 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2638 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2666 return rval; 2639 return rval;
2667} 2640}
2641
2642static void
2643qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
2644 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
2645{
2646 uint16_t avail_dsds;
2647 uint32_t *cur_dsd;
2648 uint32_t req_data_len = 0;
2649 uint32_t rsp_data_len = 0;
2650 struct scatterlist *sg;
2651 int index;
2652 int entry_count = 1;
2653 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2654
2655 /*Update entry type to indicate bidir command */
2656 *((uint32_t *)(&cmd_pkt->entry_type)) =
2657 __constant_cpu_to_le32(COMMAND_BIDIRECTIONAL);
2658
2659 /* Set the transfer direction, in this set both flags
2660 * Also set the BD_WRAP_BACK flag, firmware will take care
2661 * assigning DID=SID for outgoing pkts.
2662 */
2663 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2664 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2665 cmd_pkt->control_flags =
2666 __constant_cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
2667 BD_WRAP_BACK);
2668
2669 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2670 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
2671 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
2672 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
2673
2674 vha->bidi_stats.transfer_bytes += req_data_len;
2675 vha->bidi_stats.io_count++;
2676
2677 /* Only one dsd is available for bidirectional IOCB, remaining dsds
2678 * are bundled in continuation iocb
2679 */
2680 avail_dsds = 1;
2681 cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2682
2683 index = 0;
2684
2685 for_each_sg(bsg_job->request_payload.sg_list, sg,
2686 bsg_job->request_payload.sg_cnt, index) {
2687 dma_addr_t sle_dma;
2688 cont_a64_entry_t *cont_pkt;
2689
2690 /* Allocate additional continuation packets */
2691 if (avail_dsds == 0) {
2692 /* Continuation type 1 IOCB can accomodate
2693 * 5 DSDS
2694 */
2695 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2696 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2697 avail_dsds = 5;
2698 entry_count++;
2699 }
2700 sle_dma = sg_dma_address(sg);
2701 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2702 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2703 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2704 avail_dsds--;
2705 }
2706 /* For read request DSD will always goes to continuation IOCB
2707 * and follow the write DSD. If there is room on the current IOCB
2708 * then it is added to that IOCB else new continuation IOCB is
2709 * allocated.
2710 */
2711 for_each_sg(bsg_job->reply_payload.sg_list, sg,
2712 bsg_job->reply_payload.sg_cnt, index) {
2713 dma_addr_t sle_dma;
2714 cont_a64_entry_t *cont_pkt;
2715
2716 /* Allocate additional continuation packets */
2717 if (avail_dsds == 0) {
2718 /* Continuation type 1 IOCB can accomodate
2719 * 5 DSDS
2720 */
2721 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2722 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2723 avail_dsds = 5;
2724 entry_count++;
2725 }
2726 sle_dma = sg_dma_address(sg);
2727 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2728 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2729 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2730 avail_dsds--;
2731 }
2732 /* This value should be same as number of IOCB required for this cmd */
2733 cmd_pkt->entry_count = entry_count;
2734}
2735
2736int
2737qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
2738{
2739
2740 struct qla_hw_data *ha = vha->hw;
2741 unsigned long flags;
2742 uint32_t handle;
2743 uint32_t index;
2744 uint16_t req_cnt;
2745 uint16_t cnt;
2746 uint32_t *clr_ptr;
2747 struct cmd_bidir *cmd_pkt = NULL;
2748 struct rsp_que *rsp;
2749 struct req_que *req;
2750 int rval = EXT_STATUS_OK;
2751 device_reg_t __iomem *reg = ISP_QUE_REG(ha, vha->req->id);
2752
2753 rval = QLA_SUCCESS;
2754
2755 rsp = ha->rsp_q_map[0];
2756 req = vha->req;
2757
2758 /* Send marker if required */
2759 if (vha->marker_needed != 0) {
2760 if (qla2x00_marker(vha, req,
2761 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
2762 return EXT_STATUS_MAILBOX;
2763 vha->marker_needed = 0;
2764 }
2765
2766 /* Acquire ring specific lock */
2767 spin_lock_irqsave(&ha->hardware_lock, flags);
2768
2769 /* Check for room in outstanding command list. */
2770 handle = req->current_outstanding_cmd;
2771 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
2772 handle++;
2773 if (handle == MAX_OUTSTANDING_COMMANDS)
2774 handle = 1;
2775 if (!req->outstanding_cmds[handle])
2776 break;
2777 }
2778
2779 if (index == MAX_OUTSTANDING_COMMANDS) {
2780 rval = EXT_STATUS_BUSY;
2781 goto queuing_error;
2782 }
2783
2784 /* Calculate number of IOCB required */
2785 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2786
2787 /* Check for room on request queue. */
2788 if (req->cnt < req_cnt + 2) {
2789 if (ha->mqenable)
2790 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
2791 else if (IS_QLA82XX(ha))
2792 cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
2793 else if (IS_FWI2_CAPABLE(ha))
2794 cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
2795 else
2796 cnt = qla2x00_debounce_register(
2797 ISP_REQ_Q_OUT(ha, &reg->isp));
2798
2799 if (req->ring_index < cnt)
2800 req->cnt = cnt - req->ring_index;
2801 else
2802 req->cnt = req->length -
2803 (req->ring_index - cnt);
2804 }
2805 if (req->cnt < req_cnt + 2) {
2806 rval = EXT_STATUS_BUSY;
2807 goto queuing_error;
2808 }
2809
2810 cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
2811 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2812
2813 /* Zero out remaining portion of packet. */
2814 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2815 clr_ptr = (uint32_t *)cmd_pkt + 2;
2816 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2817
2818 /* Set NPORT-ID (of vha)*/
2819 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
2820 cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
2821 cmd_pkt->port_id[1] = vha->d_id.b.area;
2822 cmd_pkt->port_id[2] = vha->d_id.b.domain;
2823
2824 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
2825 cmd_pkt->entry_status = (uint8_t) rsp->id;
2826 /* Build command packet. */
2827 req->current_outstanding_cmd = handle;
2828 req->outstanding_cmds[handle] = sp;
2829 sp->handle = handle;
2830 req->cnt -= req_cnt;
2831
2832 /* Send the command to the firmware */
2833 wmb();
2834 qla2x00_start_iocbs(vha, req);
2835queuing_error:
2836 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2837 return rval;
2838}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 6f67a9d4998b..5733811ce8e7 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -294,6 +294,11 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
294 "%04x %04x %04x %04x %04x %04x %04x.\n", 294 "%04x %04x %04x %04x %04x %04x %04x.\n",
295 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], 295 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
296 mb[4], mb[5], mb[6]); 296 mb[4], mb[5], mb[6]);
297 if ((aen == MBA_IDC_COMPLETE && mb[1] >> 15)) {
298 vha->hw->flags.idc_compl_status = 1;
299 if (vha->hw->notify_dcbx_comp)
300 complete(&vha->hw->dcbx_comp);
301 }
297 302
298 /* Acknowledgement needed? [Notify && non-zero timeout]. */ 303 /* Acknowledgement needed? [Notify && non-zero timeout]. */
299 timeout = (descr >> 8) & 0xf; 304 timeout = (descr >> 8) & 0xf;
@@ -332,6 +337,166 @@ qla2x00_get_link_speed_str(struct qla_hw_data *ha)
332 return link_speed; 337 return link_speed;
333} 338}
334 339
340void
341qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
342{
343 struct qla_hw_data *ha = vha->hw;
344
345 /*
346 * 8200 AEN Interpretation:
347 * mb[0] = AEN code
348 * mb[1] = AEN Reason code
349 * mb[2] = LSW of Peg-Halt Status-1 Register
350 * mb[6] = MSW of Peg-Halt Status-1 Register
351 * mb[3] = LSW of Peg-Halt Status-2 register
352 * mb[7] = MSW of Peg-Halt Status-2 register
353 * mb[4] = IDC Device-State Register value
354 * mb[5] = IDC Driver-Presence Register value
355 */
356 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
357 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
358 mb[0], mb[1], mb[2], mb[6]);
359 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
360 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
361 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
362
363 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
364 IDC_HEARTBEAT_FAILURE)) {
365 ha->flags.nic_core_hung = 1;
366 ql_log(ql_log_warn, vha, 0x5060,
367 "83XX: F/W Error Reported: Check if reset required.\n");
368
369 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
370 uint32_t protocol_engine_id, fw_err_code, err_level;
371
372 /*
373 * IDC_PEG_HALT_STATUS_CHANGE interpretation:
374 * - PEG-Halt Status-1 Register:
375 * (LSW = mb[2], MSW = mb[6])
376 * Bits 0-7 = protocol-engine ID
377 * Bits 8-28 = f/w error code
378 * Bits 29-31 = Error-level
379 * Error-level 0x1 = Non-Fatal error
380 * Error-level 0x2 = Recoverable Fatal error
381 * Error-level 0x4 = UnRecoverable Fatal error
382 * - PEG-Halt Status-2 Register:
383 * (LSW = mb[3], MSW = mb[7])
384 */
385 protocol_engine_id = (mb[2] & 0xff);
386 fw_err_code = (((mb[2] & 0xff00) >> 8) |
387 ((mb[6] & 0x1fff) << 8));
388 err_level = ((mb[6] & 0xe000) >> 13);
389 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
390 "Register: protocol_engine_id=0x%x "
391 "fw_err_code=0x%x err_level=0x%x.\n",
392 protocol_engine_id, fw_err_code, err_level);
393 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
394 "Register: 0x%x%x.\n", mb[7], mb[3]);
395 if (err_level == ERR_LEVEL_NON_FATAL) {
396 ql_log(ql_log_warn, vha, 0x5063,
397 "Not a fatal error, f/w has recovered "
398 "iteself.\n");
399 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
400 ql_log(ql_log_fatal, vha, 0x5064,
401 "Recoverable Fatal error: Chip reset "
402 "required.\n");
403 qla83xx_schedule_work(vha,
404 QLA83XX_NIC_CORE_RESET);
405 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
406 ql_log(ql_log_fatal, vha, 0x5065,
407 "Unrecoverable Fatal error: Set FAILED "
408 "state, reboot required.\n");
409 qla83xx_schedule_work(vha,
410 QLA83XX_NIC_CORE_UNRECOVERABLE);
411 }
412 }
413
414 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
415 uint16_t peg_fw_state, nw_interface_link_up;
416 uint16_t nw_interface_signal_detect, sfp_status;
417 uint16_t htbt_counter, htbt_monitor_enable;
418 uint16_t sfp_additonal_info, sfp_multirate;
419 uint16_t sfp_tx_fault, link_speed, dcbx_status;
420
421 /*
422 * IDC_NIC_FW_REPORTED_FAILURE interpretation:
423 * - PEG-to-FC Status Register:
424 * (LSW = mb[2], MSW = mb[6])
425 * Bits 0-7 = Peg-Firmware state
426 * Bit 8 = N/W Interface Link-up
427 * Bit 9 = N/W Interface signal detected
428 * Bits 10-11 = SFP Status
429 * SFP Status 0x0 = SFP+ transceiver not expected
430 * SFP Status 0x1 = SFP+ transceiver not present
431 * SFP Status 0x2 = SFP+ transceiver invalid
432 * SFP Status 0x3 = SFP+ transceiver present and
433 * valid
434 * Bits 12-14 = Heartbeat Counter
435 * Bit 15 = Heartbeat Monitor Enable
436 * Bits 16-17 = SFP Additional Info
437 * SFP info 0x0 = Unregocnized transceiver for
438 * Ethernet
439 * SFP info 0x1 = SFP+ brand validation failed
440 * SFP info 0x2 = SFP+ speed validation failed
441 * SFP info 0x3 = SFP+ access error
442 * Bit 18 = SFP Multirate
443 * Bit 19 = SFP Tx Fault
444 * Bits 20-22 = Link Speed
445 * Bits 23-27 = Reserved
446 * Bits 28-30 = DCBX Status
447 * DCBX Status 0x0 = DCBX Disabled
448 * DCBX Status 0x1 = DCBX Enabled
449 * DCBX Status 0x2 = DCBX Exchange error
450 * Bit 31 = Reserved
451 */
452 peg_fw_state = (mb[2] & 0x00ff);
453 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
454 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
455 sfp_status = ((mb[2] & 0x0c00) >> 10);
456 htbt_counter = ((mb[2] & 0x7000) >> 12);
457 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
458 sfp_additonal_info = (mb[6] & 0x0003);
459 sfp_multirate = ((mb[6] & 0x0004) >> 2);
460 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
461 link_speed = ((mb[6] & 0x0070) >> 4);
462 dcbx_status = ((mb[6] & 0x7000) >> 12);
463
464 ql_log(ql_log_warn, vha, 0x5066,
465 "Peg-to-Fc Status Register:\n"
466 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
467 "nw_interface_signal_detect=0x%x"
468 "\nsfp_statis=0x%x.\n ", peg_fw_state,
469 nw_interface_link_up, nw_interface_signal_detect,
470 sfp_status);
471 ql_log(ql_log_warn, vha, 0x5067,
472 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
473 "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ",
474 htbt_counter, htbt_monitor_enable,
475 sfp_additonal_info, sfp_multirate);
476 ql_log(ql_log_warn, vha, 0x5068,
477 "sfp_tx_fault=0x%x, link_state=0x%x, "
478 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
479 dcbx_status);
480
481 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
482 }
483
484 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
485 ql_log(ql_log_warn, vha, 0x5069,
486 "Heartbeat Failure encountered, chip reset "
487 "required.\n");
488
489 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
490 }
491 }
492
493 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
494 ql_log(ql_log_info, vha, 0x506a,
495 "IDC Device-State changed = 0x%x.\n", mb[4]);
496 qla83xx_schedule_work(vha, MBA_IDC_AEN);
497 }
498}
499
335/** 500/**
336 * qla2x00_async_event() - Process aynchronous events. 501 * qla2x00_async_event() - Process aynchronous events.
337 * @ha: SCSI driver HA context 502 * @ha: SCSI driver HA context
@@ -681,8 +846,7 @@ skip_rio:
681 * it. Otherwise ignore it and Wait for RSCN to come in. 846 * it. Otherwise ignore it and Wait for RSCN to come in.
682 */ 847 */
683 atomic_set(&vha->loop_down_timer, 0); 848 atomic_set(&vha->loop_down_timer, 0);
684 if (atomic_read(&vha->loop_state) != LOOP_DOWN && 849 if (mb[1] != 0xffff || (mb[2] != 0x6 && mb[2] != 0x4)) {
685 atomic_read(&vha->loop_state) != LOOP_DEAD) {
686 ql_dbg(ql_dbg_async, vha, 0x5011, 850 ql_dbg(ql_dbg_async, vha, 0x5011,
687 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", 851 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
688 mb[1], mb[2], mb[3]); 852 mb[1], mb[2], mb[3]);
@@ -822,11 +986,28 @@ skip_rio:
822 "FCF Configuration Error -- %04x %04x %04x.\n", 986 "FCF Configuration Error -- %04x %04x %04x.\n",
823 mb[1], mb[2], mb[3]); 987 mb[1], mb[2], mb[3]);
824 break; 988 break;
825 case MBA_IDC_COMPLETE:
826 case MBA_IDC_NOTIFY: 989 case MBA_IDC_NOTIFY:
990 /* See if we need to quiesce any I/O */
991 if (IS_QLA8031(vha->hw))
992 if ((mb[2] & 0x7fff) == MBC_PORT_RESET ||
993 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) {
994 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
995 qla2xxx_wake_dpc(vha);
996 }
997 case MBA_IDC_COMPLETE:
827 case MBA_IDC_TIME_EXT: 998 case MBA_IDC_TIME_EXT:
828 qla81xx_idc_event(vha, mb[0], mb[1]); 999 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw))
1000 qla81xx_idc_event(vha, mb[0], mb[1]);
829 break; 1001 break;
1002
1003 case MBA_IDC_AEN:
1004 mb[4] = RD_REG_WORD(&reg24->mailbox4);
1005 mb[5] = RD_REG_WORD(&reg24->mailbox5);
1006 mb[6] = RD_REG_WORD(&reg24->mailbox6);
1007 mb[7] = RD_REG_WORD(&reg24->mailbox7);
1008 qla83xx_handle_8200_aen(vha, mb);
1009 break;
1010
830 default: 1011 default:
831 ql_dbg(ql_dbg_async, vha, 0x5057, 1012 ql_dbg(ql_dbg_async, vha, 0x5057,
832 "Unknown AEN:%04x %04x %04x %04x\n", 1013 "Unknown AEN:%04x %04x %04x %04x\n",
@@ -1414,7 +1595,7 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1414 1595
1415struct scsi_dif_tuple { 1596struct scsi_dif_tuple {
1416 __be16 guard; /* Checksum */ 1597 __be16 guard; /* Checksum */
1417 __be16 app_tag; /* APPL identifer */ 1598 __be16 app_tag; /* APPL identifier */
1418 __be32 ref_tag; /* Target LBA or indirect LBA */ 1599 __be32 ref_tag; /* Target LBA or indirect LBA */
1419}; 1600};
1420 1601
@@ -1546,6 +1727,149 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1546 return 1; 1727 return 1;
1547} 1728}
1548 1729
1730static void
1731qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
1732 struct req_que *req, uint32_t index)
1733{
1734 struct qla_hw_data *ha = vha->hw;
1735 srb_t *sp;
1736 uint16_t comp_status;
1737 uint16_t scsi_status;
1738 uint16_t thread_id;
1739 uint32_t rval = EXT_STATUS_OK;
1740 struct fc_bsg_job *bsg_job = NULL;
1741 sts_entry_t *sts;
1742 struct sts_entry_24xx *sts24;
1743 sts = (sts_entry_t *) pkt;
1744 sts24 = (struct sts_entry_24xx *) pkt;
1745
1746 /* Validate handle. */
1747 if (index >= MAX_OUTSTANDING_COMMANDS) {
1748 ql_log(ql_log_warn, vha, 0x70af,
1749 "Invalid SCSI completion handle 0x%x.\n", index);
1750 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1751 return;
1752 }
1753
1754 sp = req->outstanding_cmds[index];
1755 if (sp) {
1756 /* Free outstanding command slot. */
1757 req->outstanding_cmds[index] = NULL;
1758 bsg_job = sp->u.bsg_job;
1759 } else {
1760 ql_log(ql_log_warn, vha, 0x70b0,
1761 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
1762 req->id, index);
1763
1764 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1765 return;
1766 }
1767
1768 if (IS_FWI2_CAPABLE(ha)) {
1769 comp_status = le16_to_cpu(sts24->comp_status);
1770 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1771 } else {
1772 comp_status = le16_to_cpu(sts->comp_status);
1773 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1774 }
1775
1776 thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1777 switch (comp_status) {
1778 case CS_COMPLETE:
1779 if (scsi_status == 0) {
1780 bsg_job->reply->reply_payload_rcv_len =
1781 bsg_job->reply_payload.payload_len;
1782 rval = EXT_STATUS_OK;
1783 }
1784 goto done;
1785
1786 case CS_DATA_OVERRUN:
1787 ql_dbg(ql_dbg_user, vha, 0x70b1,
1788 "Command completed with date overrun thread_id=%d\n",
1789 thread_id);
1790 rval = EXT_STATUS_DATA_OVERRUN;
1791 break;
1792
1793 case CS_DATA_UNDERRUN:
1794 ql_dbg(ql_dbg_user, vha, 0x70b2,
1795 "Command completed with date underrun thread_id=%d\n",
1796 thread_id);
1797 rval = EXT_STATUS_DATA_UNDERRUN;
1798 break;
1799 case CS_BIDIR_RD_OVERRUN:
1800 ql_dbg(ql_dbg_user, vha, 0x70b3,
1801 "Command completed with read data overrun thread_id=%d\n",
1802 thread_id);
1803 rval = EXT_STATUS_DATA_OVERRUN;
1804 break;
1805
1806 case CS_BIDIR_RD_WR_OVERRUN:
1807 ql_dbg(ql_dbg_user, vha, 0x70b4,
1808 "Command completed with read and write data overrun "
1809 "thread_id=%d\n", thread_id);
1810 rval = EXT_STATUS_DATA_OVERRUN;
1811 break;
1812
1813 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
1814 ql_dbg(ql_dbg_user, vha, 0x70b5,
1815 "Command completed with read data over and write data "
1816 "underrun thread_id=%d\n", thread_id);
1817 rval = EXT_STATUS_DATA_OVERRUN;
1818 break;
1819
1820 case CS_BIDIR_RD_UNDERRUN:
1821 ql_dbg(ql_dbg_user, vha, 0x70b6,
1822 "Command completed with read data data underrun "
1823 "thread_id=%d\n", thread_id);
1824 rval = EXT_STATUS_DATA_UNDERRUN;
1825 break;
1826
1827 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
1828 ql_dbg(ql_dbg_user, vha, 0x70b7,
1829 "Command completed with read data under and write data "
1830 "overrun thread_id=%d\n", thread_id);
1831 rval = EXT_STATUS_DATA_UNDERRUN;
1832 break;
1833
1834 case CS_BIDIR_RD_WR_UNDERRUN:
1835 ql_dbg(ql_dbg_user, vha, 0x70b8,
1836 "Command completed with read and write data underrun "
1837 "thread_id=%d\n", thread_id);
1838 rval = EXT_STATUS_DATA_UNDERRUN;
1839 break;
1840
1841 case CS_BIDIR_DMA:
1842 ql_dbg(ql_dbg_user, vha, 0x70b9,
1843 "Command completed with data DMA error thread_id=%d\n",
1844 thread_id);
1845 rval = EXT_STATUS_DMA_ERR;
1846 break;
1847
1848 case CS_TIMEOUT:
1849 ql_dbg(ql_dbg_user, vha, 0x70ba,
1850 "Command completed with timeout thread_id=%d\n",
1851 thread_id);
1852 rval = EXT_STATUS_TIMEOUT;
1853 break;
1854 default:
1855 ql_dbg(ql_dbg_user, vha, 0x70bb,
1856 "Command completed with completion status=0x%x "
1857 "thread_id=%d\n", comp_status, thread_id);
1858 rval = EXT_STATUS_ERR;
1859 break;
1860 }
1861 bsg_job->reply->reply_payload_rcv_len = 0;
1862
1863done:
1864 /* Return the vendor specific reply to API */
1865 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1866 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1867 /* Always return DID_OK, bsg will send the vendor specific response
1868 * in this case only */
1869 sp->done(vha, sp, (DID_OK << 6));
1870
1871}
1872
1549/** 1873/**
1550 * qla2x00_status_entry() - Process a Status IOCB entry. 1874 * qla2x00_status_entry() - Process a Status IOCB entry.
1551 * @ha: SCSI driver HA context 1875 * @ha: SCSI driver HA context
@@ -1573,12 +1897,14 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1573 struct req_que *req; 1897 struct req_que *req;
1574 int logit = 1; 1898 int logit = 1;
1575 int res = 0; 1899 int res = 0;
1900 uint16_t state_flags = 0;
1576 1901
1577 sts = (sts_entry_t *) pkt; 1902 sts = (sts_entry_t *) pkt;
1578 sts24 = (struct sts_entry_24xx *) pkt; 1903 sts24 = (struct sts_entry_24xx *) pkt;
1579 if (IS_FWI2_CAPABLE(ha)) { 1904 if (IS_FWI2_CAPABLE(ha)) {
1580 comp_status = le16_to_cpu(sts24->comp_status); 1905 comp_status = le16_to_cpu(sts24->comp_status);
1581 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 1906 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1907 state_flags = le16_to_cpu(sts24->state_flags);
1582 } else { 1908 } else {
1583 comp_status = le16_to_cpu(sts->comp_status); 1909 comp_status = le16_to_cpu(sts->comp_status);
1584 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 1910 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
@@ -1587,17 +1913,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1587 que = MSW(sts->handle); 1913 que = MSW(sts->handle);
1588 req = ha->req_q_map[que]; 1914 req = ha->req_q_map[que];
1589 1915
1590 /* Fast path completion. */
1591 if (comp_status == CS_COMPLETE && scsi_status == 0) {
1592 qla2x00_process_completed_request(vha, req, handle);
1593
1594 return;
1595 }
1596
1597 /* Validate handle. */ 1916 /* Validate handle. */
1598 if (handle < MAX_OUTSTANDING_COMMANDS) { 1917 if (handle < MAX_OUTSTANDING_COMMANDS) {
1599 sp = req->outstanding_cmds[handle]; 1918 sp = req->outstanding_cmds[handle];
1600 req->outstanding_cmds[handle] = NULL;
1601 } else 1919 } else
1602 sp = NULL; 1920 sp = NULL;
1603 1921
@@ -1612,6 +1930,20 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1612 qla2xxx_wake_dpc(vha); 1930 qla2xxx_wake_dpc(vha);
1613 return; 1931 return;
1614 } 1932 }
1933
1934 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
1935 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
1936 return;
1937 }
1938
1939 /* Fast path completion. */
1940 if (comp_status == CS_COMPLETE && scsi_status == 0) {
1941 qla2x00_process_completed_request(vha, req, handle);
1942
1943 return;
1944 }
1945
1946 req->outstanding_cmds[handle] = NULL;
1615 cp = GET_CMD_SP(sp); 1947 cp = GET_CMD_SP(sp);
1616 if (cp == NULL) { 1948 if (cp == NULL) {
1617 ql_dbg(ql_dbg_io, vha, 0x3018, 1949 ql_dbg(ql_dbg_io, vha, 0x3018,
@@ -1830,7 +2162,21 @@ check_scsi_status:
1830 2162
1831 case CS_DIF_ERROR: 2163 case CS_DIF_ERROR:
1832 logit = qla2x00_handle_dif_error(sp, sts24); 2164 logit = qla2x00_handle_dif_error(sp, sts24);
2165 res = cp->result;
1833 break; 2166 break;
2167
2168 case CS_TRANSPORT:
2169 res = DID_ERROR << 16;
2170
2171 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
2172 break;
2173
2174 if (state_flags & BIT_4)
2175 scmd_printk(KERN_WARNING, cp,
2176 "Unsupported device '%s' found.\n",
2177 cp->device->vendor);
2178 break;
2179
1834 default: 2180 default:
1835 res = DID_ERROR << 16; 2181 res = DID_ERROR << 16;
1836 break; 2182 break;
@@ -2150,7 +2496,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
2150 unsigned long iter; 2496 unsigned long iter;
2151 uint32_t stat; 2497 uint32_t stat;
2152 uint32_t hccr; 2498 uint32_t hccr;
2153 uint16_t mb[4]; 2499 uint16_t mb[8];
2154 struct rsp_que *rsp; 2500 struct rsp_que *rsp;
2155 unsigned long flags; 2501 unsigned long flags;
2156 2502
@@ -2191,29 +2537,29 @@ qla24xx_intr_handler(int irq, void *dev_id)
2191 break; 2537 break;
2192 2538
2193 switch (stat & 0xff) { 2539 switch (stat & 0xff) {
2194 case 0x1: 2540 case INTR_ROM_MB_SUCCESS:
2195 case 0x2: 2541 case INTR_ROM_MB_FAILED:
2196 case 0x10: 2542 case INTR_MB_SUCCESS:
2197 case 0x11: 2543 case INTR_MB_FAILED:
2198 qla24xx_mbx_completion(vha, MSW(stat)); 2544 qla24xx_mbx_completion(vha, MSW(stat));
2199 status |= MBX_INTERRUPT; 2545 status |= MBX_INTERRUPT;
2200 2546
2201 break; 2547 break;
2202 case 0x12: 2548 case INTR_ASYNC_EVENT:
2203 mb[0] = MSW(stat); 2549 mb[0] = MSW(stat);
2204 mb[1] = RD_REG_WORD(&reg->mailbox1); 2550 mb[1] = RD_REG_WORD(&reg->mailbox1);
2205 mb[2] = RD_REG_WORD(&reg->mailbox2); 2551 mb[2] = RD_REG_WORD(&reg->mailbox2);
2206 mb[3] = RD_REG_WORD(&reg->mailbox3); 2552 mb[3] = RD_REG_WORD(&reg->mailbox3);
2207 qla2x00_async_event(vha, rsp, mb); 2553 qla2x00_async_event(vha, rsp, mb);
2208 break; 2554 break;
2209 case 0x13: 2555 case INTR_RSP_QUE_UPDATE:
2210 case 0x14: 2556 case INTR_RSP_QUE_UPDATE_83XX:
2211 qla24xx_process_response_queue(vha, rsp); 2557 qla24xx_process_response_queue(vha, rsp);
2212 break; 2558 break;
2213 case 0x1C: /* ATIO queue updated */ 2559 case INTR_ATIO_QUE_UPDATE:
2214 qlt_24xx_process_atio_queue(vha); 2560 qlt_24xx_process_atio_queue(vha);
2215 break; 2561 break;
2216 case 0x1D: /* ATIO and response queues updated */ 2562 case INTR_ATIO_RSP_QUE_UPDATE:
2217 qlt_24xx_process_atio_queue(vha); 2563 qlt_24xx_process_atio_queue(vha);
2218 qla24xx_process_response_queue(vha, rsp); 2564 qla24xx_process_response_queue(vha, rsp);
2219 break; 2565 break;
@@ -2224,6 +2570,8 @@ qla24xx_intr_handler(int irq, void *dev_id)
2224 } 2570 }
2225 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 2571 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2226 RD_REG_DWORD_RELAXED(&reg->hccr); 2572 RD_REG_DWORD_RELAXED(&reg->hccr);
2573 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
2574 ndelay(3500);
2227 } 2575 }
2228 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2576 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2229 2577
@@ -2306,7 +2654,7 @@ qla24xx_msix_default(int irq, void *dev_id)
2306 int status; 2654 int status;
2307 uint32_t stat; 2655 uint32_t stat;
2308 uint32_t hccr; 2656 uint32_t hccr;
2309 uint16_t mb[4]; 2657 uint16_t mb[8];
2310 unsigned long flags; 2658 unsigned long flags;
2311 2659
2312 rsp = (struct rsp_que *) dev_id; 2660 rsp = (struct rsp_que *) dev_id;
@@ -2342,29 +2690,29 @@ qla24xx_msix_default(int irq, void *dev_id)
2342 break; 2690 break;
2343 2691
2344 switch (stat & 0xff) { 2692 switch (stat & 0xff) {
2345 case 0x1: 2693 case INTR_ROM_MB_SUCCESS:
2346 case 0x2: 2694 case INTR_ROM_MB_FAILED:
2347 case 0x10: 2695 case INTR_MB_SUCCESS:
2348 case 0x11: 2696 case INTR_MB_FAILED:
2349 qla24xx_mbx_completion(vha, MSW(stat)); 2697 qla24xx_mbx_completion(vha, MSW(stat));
2350 status |= MBX_INTERRUPT; 2698 status |= MBX_INTERRUPT;
2351 2699
2352 break; 2700 break;
2353 case 0x12: 2701 case INTR_ASYNC_EVENT:
2354 mb[0] = MSW(stat); 2702 mb[0] = MSW(stat);
2355 mb[1] = RD_REG_WORD(&reg->mailbox1); 2703 mb[1] = RD_REG_WORD(&reg->mailbox1);
2356 mb[2] = RD_REG_WORD(&reg->mailbox2); 2704 mb[2] = RD_REG_WORD(&reg->mailbox2);
2357 mb[3] = RD_REG_WORD(&reg->mailbox3); 2705 mb[3] = RD_REG_WORD(&reg->mailbox3);
2358 qla2x00_async_event(vha, rsp, mb); 2706 qla2x00_async_event(vha, rsp, mb);
2359 break; 2707 break;
2360 case 0x13: 2708 case INTR_RSP_QUE_UPDATE:
2361 case 0x14: 2709 case INTR_RSP_QUE_UPDATE_83XX:
2362 qla24xx_process_response_queue(vha, rsp); 2710 qla24xx_process_response_queue(vha, rsp);
2363 break; 2711 break;
2364 case 0x1C: /* ATIO queue updated */ 2712 case INTR_ATIO_QUE_UPDATE:
2365 qlt_24xx_process_atio_queue(vha); 2713 qlt_24xx_process_atio_queue(vha);
2366 break; 2714 break;
2367 case 0x1D: /* ATIO and response queues updated */ 2715 case INTR_ATIO_RSP_QUE_UPDATE:
2368 qlt_24xx_process_atio_queue(vha); 2716 qlt_24xx_process_atio_queue(vha);
2369 qla24xx_process_response_queue(vha, rsp); 2717 qla24xx_process_response_queue(vha, rsp);
2370 break; 2718 break;
@@ -2570,7 +2918,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2570skip_msix: 2918skip_msix:
2571 2919
2572 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 2920 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2573 !IS_QLA8001(ha)) 2921 !IS_QLA8001(ha) && !IS_QLA82XX(ha))
2574 goto skip_msi; 2922 goto skip_msi;
2575 2923
2576 ret = pci_enable_msi(ha->pdev); 2924 ret = pci_enable_msi(ha->pdev);
@@ -2581,6 +2929,11 @@ skip_msix:
2581 } else 2929 } else
2582 ql_log(ql_log_warn, vha, 0x0039, 2930 ql_log(ql_log_warn, vha, 0x0039,
2583 "MSI-X; Falling back-to INTa mode -- %d.\n", ret); 2931 "MSI-X; Falling back-to INTa mode -- %d.\n", ret);
2932
2933 /* Skip INTx on ISP82xx. */
2934 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
2935 return QLA_FUNCTION_FAILED;
2936
2584skip_msi: 2937skip_msi:
2585 2938
2586 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 2939 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
@@ -2595,21 +2948,9 @@ skip_msi:
2595 2948
2596clear_risc_ints: 2949clear_risc_ints:
2597 2950
2598 /*
2599 * FIXME: Noted that 8014s were being dropped during NK testing.
2600 * Timing deltas during MSI-X/INTa transitions?
2601 */
2602 if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA83XX(ha))
2603 goto fail;
2604 spin_lock_irq(&ha->hardware_lock); 2951 spin_lock_irq(&ha->hardware_lock);
2605 if (IS_FWI2_CAPABLE(ha)) { 2952 if (!IS_FWI2_CAPABLE(ha))
2606 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
2607 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
2608 } else {
2609 WRT_REG_WORD(&reg->isp.semaphore, 0); 2953 WRT_REG_WORD(&reg->isp.semaphore, 0);
2610 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
2611 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
2612 }
2613 spin_unlock_irq(&ha->hardware_lock); 2954 spin_unlock_irq(&ha->hardware_lock);
2614 2955
2615fail: 2956fail:
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index d5ce92c0a8fc..18c509fae555 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -75,7 +75,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
75 return QLA_FUNCTION_TIMEOUT; 75 return QLA_FUNCTION_TIMEOUT;
76 } 76 }
77 77
78 if (ha->flags.isp82xx_fw_hung) { 78 if (IS_QLA82XX(ha) && ha->flags.isp82xx_fw_hung) {
79 /* Setting Link-Down error */ 79 /* Setting Link-Down error */
80 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 80 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
81 ql_log(ql_log_warn, vha, 0x1004, 81 ql_log(ql_log_warn, vha, 0x1004,
@@ -232,7 +232,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
232 ha->flags.mbox_int = 0; 232 ha->flags.mbox_int = 0;
233 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 233 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
234 234
235 if (ha->flags.isp82xx_fw_hung) { 235 if ((IS_QLA82XX(ha) && ha->flags.isp82xx_fw_hung)) {
236 ha->flags.mbox_busy = 0; 236 ha->flags.mbox_busy = 0;
237 /* Setting Link-Down error */ 237 /* Setting Link-Down error */
238 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 238 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
@@ -369,7 +369,7 @@ premature_exit:
369 369
370mbx_done: 370mbx_done:
371 if (rval) { 371 if (rval) {
372 ql_dbg(ql_dbg_mbx, base_vha, 0x1020, 372 ql_log(ql_log_warn, base_vha, 0x1020,
373 "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n", 373 "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n",
374 mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command); 374 mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
375 } else { 375 } else {
@@ -533,7 +533,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
533 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 533 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
534 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha)) 534 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha))
535 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8; 535 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
536 if (IS_QLA83XX(vha->hw)) 536 if (IS_FWI2_CAPABLE(ha))
537 mcp->in_mb |= MBX_17|MBX_16|MBX_15; 537 mcp->in_mb |= MBX_17|MBX_16|MBX_15;
538 mcp->flags = 0; 538 mcp->flags = 0;
539 mcp->tov = MBX_TOV_SECONDS; 539 mcp->tov = MBX_TOV_SECONDS;
@@ -559,18 +559,16 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
559 ha->phy_version[1] = mcp->mb[9] >> 8; 559 ha->phy_version[1] = mcp->mb[9] >> 8;
560 ha->phy_version[2] = mcp->mb[9] & 0xff; 560 ha->phy_version[2] = mcp->mb[9] & 0xff;
561 } 561 }
562 if (IS_QLA83XX(ha)) { 562 if (IS_FWI2_CAPABLE(ha)) {
563 if (mcp->mb[6] & BIT_15) { 563 ha->fw_attributes_h = mcp->mb[15];
564 ha->fw_attributes_h = mcp->mb[15]; 564 ha->fw_attributes_ext[0] = mcp->mb[16];
565 ha->fw_attributes_ext[0] = mcp->mb[16]; 565 ha->fw_attributes_ext[1] = mcp->mb[17];
566 ha->fw_attributes_ext[1] = mcp->mb[17]; 566 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
567 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139, 567 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
568 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n", 568 __func__, mcp->mb[15], mcp->mb[6]);
569 __func__, mcp->mb[15], mcp->mb[6]); 569 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
570 } else 570 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
571 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f, 571 __func__, mcp->mb[17], mcp->mb[16]);
572 "%s: FwAttributes [Upper] invalid, MB6:%04x\n",
573 __func__, mcp->mb[6]);
574 } 572 }
575 573
576failed: 574failed:
@@ -3408,7 +3406,6 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
3408 3406
3409 return rval; 3407 return rval;
3410} 3408}
3411
3412/* 84XX Support **************************************************************/ 3409/* 84XX Support **************************************************************/
3413 3410
3414struct cs84xx_mgmt_cmd { 3411struct cs84xx_mgmt_cmd {
@@ -4428,7 +4425,8 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
4428 "Entered %s.\n", __func__); 4425 "Entered %s.\n", __func__);
4429 4426
4430 /* Integer part */ 4427 /* Integer part */
4431 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0); 4428 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1,
4429 BIT_13|BIT_12|BIT_0);
4432 if (rval != QLA_SUCCESS) { 4430 if (rval != QLA_SUCCESS) {
4433 ql_dbg(ql_dbg_mbx, vha, 0x10c9, "Failed=%x.\n", rval); 4431 ql_dbg(ql_dbg_mbx, vha, 0x10c9, "Failed=%x.\n", rval);
4434 ha->flags.thermal_supported = 0; 4432 ha->flags.thermal_supported = 0;
@@ -4437,7 +4435,8 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
4437 *temp = byte; 4435 *temp = byte;
4438 4436
4439 /* Fraction part */ 4437 /* Fraction part */
4440 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1, BIT_13|BIT_0); 4438 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1,
4439 BIT_13|BIT_12|BIT_0);
4441 if (rval != QLA_SUCCESS) { 4440 if (rval != QLA_SUCCESS) {
4442 ql_dbg(ql_dbg_mbx, vha, 0x1019, "Failed=%x.\n", rval); 4441 ql_dbg(ql_dbg_mbx, vha, 0x1019, "Failed=%x.\n", rval);
4443 ha->flags.thermal_supported = 0; 4442 ha->flags.thermal_supported = 0;
@@ -4741,7 +4740,7 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
4741} 4740}
4742 4741
4743int 4742int
4744qla83xx_write_remote_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data) 4743qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
4745{ 4744{
4746 int rval; 4745 int rval;
4747 struct qla_hw_data *ha = vha->hw; 4746 struct qla_hw_data *ha = vha->hw;
@@ -4814,3 +4813,186 @@ qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
4814 return rval; 4813 return rval;
4815} 4814}
4816 4815
4816int
4817qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
4818{
4819 int rval;
4820 mbx_cmd_t mc;
4821 mbx_cmd_t *mcp = &mc;
4822 struct qla_hw_data *ha = vha->hw;
4823 unsigned long retry_max_time = jiffies + (2 * HZ);
4824
4825 if (!IS_QLA83XX(ha))
4826 return QLA_FUNCTION_FAILED;
4827
4828 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
4829
4830retry_rd_reg:
4831 mcp->mb[0] = MBC_READ_REMOTE_REG;
4832 mcp->mb[1] = LSW(reg);
4833 mcp->mb[2] = MSW(reg);
4834 mcp->out_mb = MBX_2|MBX_1|MBX_0;
4835 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
4836 mcp->tov = MBX_TOV_SECONDS;
4837 mcp->flags = 0;
4838 rval = qla2x00_mailbox_command(vha, mcp);
4839
4840 if (rval != QLA_SUCCESS) {
4841 ql_dbg(ql_dbg_mbx, vha, 0x114c,
4842 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4843 rval, mcp->mb[0], mcp->mb[1]);
4844 } else {
4845 *data = (mcp->mb[3] | (mcp->mb[4] << 16));
4846 if (*data == QLA8XXX_BAD_VALUE) {
4847 /*
4848 * During soft-reset CAMRAM register reads might
4849 * return 0xbad0bad0. So retry for MAX of 2 sec
4850 * while reading camram registers.
4851 */
4852 if (time_after(jiffies, retry_max_time)) {
4853 ql_dbg(ql_dbg_mbx, vha, 0x1141,
4854 "Failure to read CAMRAM register. "
4855 "data=0x%x.\n", *data);
4856 return QLA_FUNCTION_FAILED;
4857 }
4858 msleep(100);
4859 goto retry_rd_reg;
4860 }
4861 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
4862 }
4863
4864 return rval;
4865}
4866
4867int
4868qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
4869{
4870 int rval;
4871 mbx_cmd_t mc;
4872 mbx_cmd_t *mcp = &mc;
4873 struct qla_hw_data *ha = vha->hw;
4874
4875 if (!IS_QLA83XX(ha))
4876 return QLA_FUNCTION_FAILED;
4877
4878 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
4879
4880 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
4881 mcp->out_mb = MBX_0;
4882 mcp->in_mb = MBX_1|MBX_0;
4883 mcp->tov = MBX_TOV_SECONDS;
4884 mcp->flags = 0;
4885 rval = qla2x00_mailbox_command(vha, mcp);
4886
4887 if (rval != QLA_SUCCESS) {
4888 ql_dbg(ql_dbg_mbx, vha, 0x1144,
4889 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4890 rval, mcp->mb[0], mcp->mb[1]);
4891 ha->isp_ops->fw_dump(vha, 0);
4892 } else {
4893 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
4894 }
4895
4896 return rval;
4897}
4898
4899int
4900qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
4901 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
4902{
4903 int rval;
4904 mbx_cmd_t mc;
4905 mbx_cmd_t *mcp = &mc;
4906 uint8_t subcode = (uint8_t)options;
4907 struct qla_hw_data *ha = vha->hw;
4908
4909 if (!IS_QLA8031(ha))
4910 return QLA_FUNCTION_FAILED;
4911
4912 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
4913
4914 mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
4915 mcp->mb[1] = options;
4916 mcp->out_mb = MBX_1|MBX_0;
4917 if (subcode & BIT_2) {
4918 mcp->mb[2] = LSW(start_addr);
4919 mcp->mb[3] = MSW(start_addr);
4920 mcp->mb[4] = LSW(end_addr);
4921 mcp->mb[5] = MSW(end_addr);
4922 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
4923 }
4924 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4925 if (!(subcode & (BIT_2 | BIT_5)))
4926 mcp->in_mb |= MBX_4|MBX_3;
4927 mcp->tov = MBX_TOV_SECONDS;
4928 mcp->flags = 0;
4929 rval = qla2x00_mailbox_command(vha, mcp);
4930
4931 if (rval != QLA_SUCCESS) {
4932 ql_dbg(ql_dbg_mbx, vha, 0x1147,
4933 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
4934 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
4935 mcp->mb[4]);
4936 ha->isp_ops->fw_dump(vha, 0);
4937 } else {
4938 if (subcode & BIT_5)
4939 *sector_size = mcp->mb[1];
4940 else if (subcode & (BIT_6 | BIT_7)) {
4941 ql_dbg(ql_dbg_mbx, vha, 0x1148,
4942 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
4943 } else if (subcode & (BIT_3 | BIT_4)) {
4944 ql_dbg(ql_dbg_mbx, vha, 0x1149,
4945 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
4946 }
4947 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
4948 }
4949
4950 return rval;
4951}
4952
4953int
4954qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
4955 uint32_t size)
4956{
4957 int rval;
4958 mbx_cmd_t mc;
4959 mbx_cmd_t *mcp = &mc;
4960
4961 if (!IS_MCTP_CAPABLE(vha->hw))
4962 return QLA_FUNCTION_FAILED;
4963
4964 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
4965 "Entered %s.\n", __func__);
4966
4967 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
4968 mcp->mb[1] = LSW(addr);
4969 mcp->mb[2] = MSW(req_dma);
4970 mcp->mb[3] = LSW(req_dma);
4971 mcp->mb[4] = MSW(size);
4972 mcp->mb[5] = LSW(size);
4973 mcp->mb[6] = MSW(MSD(req_dma));
4974 mcp->mb[7] = LSW(MSD(req_dma));
4975 mcp->mb[8] = MSW(addr);
4976 /* Setting RAM ID to valid */
4977 mcp->mb[10] |= BIT_7;
4978 /* For MCTP RAM ID is 0x40 */
4979 mcp->mb[10] |= 0x40;
4980
4981 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
4982 MBX_0;
4983
4984 mcp->in_mb = MBX_0;
4985 mcp->tov = MBX_TOV_SECONDS;
4986 mcp->flags = 0;
4987 rval = qla2x00_mailbox_command(vha, mcp);
4988
4989 if (rval != QLA_SUCCESS) {
4990 ql_dbg(ql_dbg_mbx, vha, 0x114e,
4991 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4992 } else {
4993 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
4994 "Done %s.\n", __func__);
4995 }
4996
4997 return rval;
4998}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 3e8b32419e68..bd4708a422cd 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -476,7 +476,6 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
476 476
477 vha->req = base_vha->req; 477 vha->req = base_vha->req;
478 host->can_queue = base_vha->req->length + 128; 478 host->can_queue = base_vha->req->length + 128;
479 host->this_id = 255;
480 host->cmd_per_lun = 3; 479 host->cmd_per_lun = 3;
481 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) 480 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
482 host->max_cmd_len = 32; 481 host->max_cmd_len = 32;
@@ -643,7 +642,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
643 &req->dma, GFP_KERNEL); 642 &req->dma, GFP_KERNEL);
644 if (req->ring == NULL) { 643 if (req->ring == NULL) {
645 ql_log(ql_log_fatal, base_vha, 0x00da, 644 ql_log(ql_log_fatal, base_vha, 0x00da,
646 "Failed to allocte memory for request_ring.\n"); 645 "Failed to allocate memory for request_ring.\n");
647 goto que_failed; 646 goto que_failed;
648 } 647 }
649 648
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 7cfdf2bd8edb..14cd361742fa 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -1612,23 +1612,6 @@ qla82xx_get_fw_offs(struct qla_hw_data *ha)
1612} 1612}
1613 1613
1614/* PCI related functions */ 1614/* PCI related functions */
1615char *
1616qla82xx_pci_info_str(struct scsi_qla_host *vha, char *str)
1617{
1618 struct qla_hw_data *ha = vha->hw;
1619 char lwstr[6];
1620 uint16_t lnk;
1621
1622 pcie_capability_read_word(ha->pdev, PCI_EXP_LNKSTA, &lnk);
1623 ha->link_width = (lnk >> 4) & 0x3f;
1624
1625 strcpy(str, "PCIe (");
1626 strcat(str, "2.5Gb/s ");
1627 snprintf(lwstr, sizeof(lwstr), "x%d)", ha->link_width);
1628 strcat(str, lwstr);
1629 return str;
1630}
1631
1632int qla82xx_pci_region_offset(struct pci_dev *pdev, int region) 1615int qla82xx_pci_region_offset(struct pci_dev *pdev, int region)
1633{ 1616{
1634 unsigned long val = 0; 1617 unsigned long val = 0;
@@ -2320,6 +2303,29 @@ void qla82xx_init_flags(struct qla_hw_data *ha)
2320} 2303}
2321 2304
2322inline void 2305inline void
2306qla82xx_set_idc_version(scsi_qla_host_t *vha)
2307{
2308 int idc_ver;
2309 uint32_t drv_active;
2310 struct qla_hw_data *ha = vha->hw;
2311
2312 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2313 if (drv_active == (QLA82XX_DRV_ACTIVE << (ha->portnum * 4))) {
2314 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
2315 QLA82XX_IDC_VERSION);
2316 ql_log(ql_log_info, vha, 0xb082,
2317 "IDC version updated to %d\n", QLA82XX_IDC_VERSION);
2318 } else {
2319 idc_ver = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_IDC_VERSION);
2320 if (idc_ver != QLA82XX_IDC_VERSION)
2321 ql_log(ql_log_info, vha, 0xb083,
2322 "qla2xxx driver IDC version %d is not compatible "
2323 "with IDC version %d of the other drivers\n",
2324 QLA82XX_IDC_VERSION, idc_ver);
2325 }
2326}
2327
2328inline void
2323qla82xx_set_drv_active(scsi_qla_host_t *vha) 2329qla82xx_set_drv_active(scsi_qla_host_t *vha)
2324{ 2330{
2325 uint32_t drv_active; 2331 uint32_t drv_active;
@@ -2353,7 +2359,7 @@ qla82xx_need_reset(struct qla_hw_data *ha)
2353 uint32_t drv_state; 2359 uint32_t drv_state;
2354 int rval; 2360 int rval;
2355 2361
2356 if (ha->flags.isp82xx_reset_owner) 2362 if (ha->flags.nic_core_reset_owner)
2357 return 1; 2363 return 1;
2358 else { 2364 else {
2359 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 2365 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
@@ -2860,7 +2866,7 @@ qla82xx_device_bootstrap(scsi_qla_host_t *vha)
2860 timeout = msleep_interruptible(200); 2866 timeout = msleep_interruptible(200);
2861 if (timeout) { 2867 if (timeout) {
2862 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 2868 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2863 QLA82XX_DEV_FAILED); 2869 QLA8XXX_DEV_FAILED);
2864 return QLA_FUNCTION_FAILED; 2870 return QLA_FUNCTION_FAILED;
2865 } 2871 }
2866 2872
@@ -2891,10 +2897,7 @@ dev_initialize:
2891 /* set to DEV_INITIALIZING */ 2897 /* set to DEV_INITIALIZING */
2892 ql_log(ql_log_info, vha, 0x009e, 2898 ql_log(ql_log_info, vha, 0x009e,
2893 "HW State: INITIALIZING.\n"); 2899 "HW State: INITIALIZING.\n");
2894 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING); 2900 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_INITIALIZING);
2895
2896 /* Driver that sets device state to initializating sets IDC version */
2897 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION);
2898 2901
2899 qla82xx_idc_unlock(ha); 2902 qla82xx_idc_unlock(ha);
2900 rval = qla82xx_start_firmware(vha); 2903 rval = qla82xx_start_firmware(vha);
@@ -2904,14 +2907,14 @@ dev_initialize:
2904 ql_log(ql_log_fatal, vha, 0x00ad, 2907 ql_log(ql_log_fatal, vha, 0x00ad,
2905 "HW State: FAILED.\n"); 2908 "HW State: FAILED.\n");
2906 qla82xx_clear_drv_active(ha); 2909 qla82xx_clear_drv_active(ha);
2907 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED); 2910 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_FAILED);
2908 return rval; 2911 return rval;
2909 } 2912 }
2910 2913
2911dev_ready: 2914dev_ready:
2912 ql_log(ql_log_info, vha, 0x00ae, 2915 ql_log(ql_log_info, vha, 0x00ae,
2913 "HW State: READY.\n"); 2916 "HW State: READY.\n");
2914 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY); 2917 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_READY);
2915 2918
2916 return QLA_SUCCESS; 2919 return QLA_SUCCESS;
2917} 2920}
@@ -2935,7 +2938,7 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
2935 2938
2936 if (vha->flags.online) { 2939 if (vha->flags.online) {
2937 /*Block any further I/O and wait for pending cmnds to complete*/ 2940 /*Block any further I/O and wait for pending cmnds to complete*/
2938 qla82xx_quiescent_state_cleanup(vha); 2941 qla2x00_quiesce_io(vha);
2939 } 2942 }
2940 2943
2941 /* Set the quiescence ready bit */ 2944 /* Set the quiescence ready bit */
@@ -2960,7 +2963,7 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
2960 "DRV_STATE:%d.\n", QLA2XXX_DRIVER_NAME, 2963 "DRV_STATE:%d.\n", QLA2XXX_DRIVER_NAME,
2961 drv_active, drv_state); 2964 drv_active, drv_state);
2962 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 2965 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2963 QLA82XX_DEV_READY); 2966 QLA8XXX_DEV_READY);
2964 ql_log(ql_log_info, vha, 0xb025, 2967 ql_log(ql_log_info, vha, 0xb025,
2965 "HW State: DEV_READY.\n"); 2968 "HW State: DEV_READY.\n");
2966 qla82xx_idc_unlock(ha); 2969 qla82xx_idc_unlock(ha);
@@ -2981,10 +2984,10 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
2981 } 2984 }
2982 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 2985 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
2983 /* everyone acked so set the state to DEV_QUIESCENCE */ 2986 /* everyone acked so set the state to DEV_QUIESCENCE */
2984 if (dev_state == QLA82XX_DEV_NEED_QUIESCENT) { 2987 if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
2985 ql_log(ql_log_info, vha, 0xb026, 2988 ql_log(ql_log_info, vha, 0xb026,
2986 "HW State: DEV_QUIESCENT.\n"); 2989 "HW State: DEV_QUIESCENT.\n");
2987 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_QUIESCENT); 2990 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_QUIESCENT);
2988 } 2991 }
2989} 2992}
2990 2993
@@ -3014,8 +3017,8 @@ qla82xx_wait_for_state_change(scsi_qla_host_t *vha, uint32_t curr_state)
3014 return dev_state; 3017 return dev_state;
3015} 3018}
3016 3019
3017static void 3020void
3018qla82xx_dev_failed_handler(scsi_qla_host_t *vha) 3021qla8xxx_dev_failed_handler(scsi_qla_host_t *vha)
3019{ 3022{
3020 struct qla_hw_data *ha = vha->hw; 3023 struct qla_hw_data *ha = vha->hw;
3021 3024
@@ -3023,9 +3026,10 @@ qla82xx_dev_failed_handler(scsi_qla_host_t *vha)
3023 ql_log(ql_log_fatal, vha, 0x00b8, 3026 ql_log(ql_log_fatal, vha, 0x00b8,
3024 "Disabling the board.\n"); 3027 "Disabling the board.\n");
3025 3028
3026 qla82xx_idc_lock(ha); 3029 if (IS_QLA82XX(ha)) {
3027 qla82xx_clear_drv_active(ha); 3030 qla82xx_clear_drv_active(ha);
3028 qla82xx_idc_unlock(ha); 3031 qla82xx_idc_unlock(ha);
3032 }
3029 3033
3030 /* Set DEV_FAILED flag to disable timer */ 3034 /* Set DEV_FAILED flag to disable timer */
3031 vha->device_flags |= DFLG_DEV_FAILED; 3035 vha->device_flags |= DFLG_DEV_FAILED;
@@ -3064,7 +3068,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3064 } 3068 }
3065 3069
3066 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 3070 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3067 if (!ha->flags.isp82xx_reset_owner) { 3071 if (!ha->flags.nic_core_reset_owner) {
3068 ql_dbg(ql_dbg_p3p, vha, 0xb028, 3072 ql_dbg(ql_dbg_p3p, vha, 0xb028,
3069 "reset_acknowledged by 0x%x\n", ha->portnum); 3073 "reset_acknowledged by 0x%x\n", ha->portnum);
3070 qla82xx_set_rst_ready(ha); 3074 qla82xx_set_rst_ready(ha);
@@ -3076,7 +3080,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3076 } 3080 }
3077 3081
3078 /* wait for 10 seconds for reset ack from all functions */ 3082 /* wait for 10 seconds for reset ack from all functions */
3079 reset_timeout = jiffies + (ha->nx_reset_timeout * HZ); 3083 reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
3080 3084
3081 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 3085 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3082 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 3086 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
@@ -3088,7 +3092,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3088 drv_state, drv_active, dev_state, active_mask); 3092 drv_state, drv_active, dev_state, active_mask);
3089 3093
3090 while (drv_state != drv_active && 3094 while (drv_state != drv_active &&
3091 dev_state != QLA82XX_DEV_INITIALIZING) { 3095 dev_state != QLA8XXX_DEV_INITIALIZING) {
3092 if (time_after_eq(jiffies, reset_timeout)) { 3096 if (time_after_eq(jiffies, reset_timeout)) {
3093 ql_log(ql_log_warn, vha, 0x00b5, 3097 ql_log(ql_log_warn, vha, 0x00b5,
3094 "Reset timeout.\n"); 3098 "Reset timeout.\n");
@@ -3099,7 +3103,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3099 qla82xx_idc_lock(ha); 3103 qla82xx_idc_lock(ha);
3100 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 3104 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3101 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 3105 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3102 if (ha->flags.isp82xx_reset_owner) 3106 if (ha->flags.nic_core_reset_owner)
3103 drv_active &= active_mask; 3107 drv_active &= active_mask;
3104 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3108 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3105 } 3109 }
@@ -3115,11 +3119,11 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3115 dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown"); 3119 dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
3116 3120
3117 /* Force to DEV_COLD unless someone else is starting a reset */ 3121 /* Force to DEV_COLD unless someone else is starting a reset */
3118 if (dev_state != QLA82XX_DEV_INITIALIZING && 3122 if (dev_state != QLA8XXX_DEV_INITIALIZING &&
3119 dev_state != QLA82XX_DEV_COLD) { 3123 dev_state != QLA8XXX_DEV_COLD) {
3120 ql_log(ql_log_info, vha, 0x00b7, 3124 ql_log(ql_log_info, vha, 0x00b7,
3121 "HW State: COLD/RE-INIT.\n"); 3125 "HW State: COLD/RE-INIT.\n");
3122 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD); 3126 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_COLD);
3123 qla82xx_set_rst_ready(ha); 3127 qla82xx_set_rst_ready(ha);
3124 if (ql2xmdenable) { 3128 if (ql2xmdenable) {
3125 if (qla82xx_md_collect(vha)) 3129 if (qla82xx_md_collect(vha))
@@ -3226,8 +3230,10 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3226 int loopcount = 0; 3230 int loopcount = 0;
3227 3231
3228 qla82xx_idc_lock(ha); 3232 qla82xx_idc_lock(ha);
3229 if (!vha->flags.init_done) 3233 if (!vha->flags.init_done) {
3230 qla82xx_set_drv_active(vha); 3234 qla82xx_set_drv_active(vha);
3235 qla82xx_set_idc_version(vha);
3236 }
3231 3237
3232 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3238 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3233 old_dev_state = dev_state; 3239 old_dev_state = dev_state;
@@ -3237,7 +3243,7 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3237 dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown"); 3243 dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
3238 3244
3239 /* wait for 30 seconds for device to go ready */ 3245 /* wait for 30 seconds for device to go ready */
3240 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ); 3246 dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
3241 3247
3242 while (1) { 3248 while (1) {
3243 3249
@@ -3261,18 +3267,18 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3261 } 3267 }
3262 3268
3263 switch (dev_state) { 3269 switch (dev_state) {
3264 case QLA82XX_DEV_READY: 3270 case QLA8XXX_DEV_READY:
3265 ha->flags.isp82xx_reset_owner = 0; 3271 ha->flags.nic_core_reset_owner = 0;
3266 goto exit; 3272 goto rel_lock;
3267 case QLA82XX_DEV_COLD: 3273 case QLA8XXX_DEV_COLD:
3268 rval = qla82xx_device_bootstrap(vha); 3274 rval = qla82xx_device_bootstrap(vha);
3269 break; 3275 break;
3270 case QLA82XX_DEV_INITIALIZING: 3276 case QLA8XXX_DEV_INITIALIZING:
3271 qla82xx_idc_unlock(ha); 3277 qla82xx_idc_unlock(ha);
3272 msleep(1000); 3278 msleep(1000);
3273 qla82xx_idc_lock(ha); 3279 qla82xx_idc_lock(ha);
3274 break; 3280 break;
3275 case QLA82XX_DEV_NEED_RESET: 3281 case QLA8XXX_DEV_NEED_RESET:
3276 if (!ql2xdontresethba) 3282 if (!ql2xdontresethba)
3277 qla82xx_need_reset_handler(vha); 3283 qla82xx_need_reset_handler(vha);
3278 else { 3284 else {
@@ -3281,31 +3287,31 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3281 qla82xx_idc_lock(ha); 3287 qla82xx_idc_lock(ha);
3282 } 3288 }
3283 dev_init_timeout = jiffies + 3289 dev_init_timeout = jiffies +
3284 (ha->nx_dev_init_timeout * HZ); 3290 (ha->fcoe_dev_init_timeout * HZ);
3285 break; 3291 break;
3286 case QLA82XX_DEV_NEED_QUIESCENT: 3292 case QLA8XXX_DEV_NEED_QUIESCENT:
3287 qla82xx_need_qsnt_handler(vha); 3293 qla82xx_need_qsnt_handler(vha);
3288 /* Reset timeout value after quiescence handler */ 3294 /* Reset timeout value after quiescence handler */
3289 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\ 3295 dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout\
3290 * HZ); 3296 * HZ);
3291 break; 3297 break;
3292 case QLA82XX_DEV_QUIESCENT: 3298 case QLA8XXX_DEV_QUIESCENT:
3293 /* Owner will exit and other will wait for the state 3299 /* Owner will exit and other will wait for the state
3294 * to get changed 3300 * to get changed
3295 */ 3301 */
3296 if (ha->flags.quiesce_owner) 3302 if (ha->flags.quiesce_owner)
3297 goto exit; 3303 goto rel_lock;
3298 3304
3299 qla82xx_idc_unlock(ha); 3305 qla82xx_idc_unlock(ha);
3300 msleep(1000); 3306 msleep(1000);
3301 qla82xx_idc_lock(ha); 3307 qla82xx_idc_lock(ha);
3302 3308
3303 /* Reset timeout value after quiescence handler */ 3309 /* Reset timeout value after quiescence handler */
3304 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\ 3310 dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout\
3305 * HZ); 3311 * HZ);
3306 break; 3312 break;
3307 case QLA82XX_DEV_FAILED: 3313 case QLA8XXX_DEV_FAILED:
3308 qla82xx_dev_failed_handler(vha); 3314 qla8xxx_dev_failed_handler(vha);
3309 rval = QLA_FUNCTION_FAILED; 3315 rval = QLA_FUNCTION_FAILED;
3310 goto exit; 3316 goto exit;
3311 default: 3317 default:
@@ -3315,8 +3321,9 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3315 } 3321 }
3316 loopcount++; 3322 loopcount++;
3317 } 3323 }
3318exit: 3324rel_lock:
3319 qla82xx_idc_unlock(ha); 3325 qla82xx_idc_unlock(ha);
3326exit:
3320 return rval; 3327 return rval;
3321} 3328}
3322 3329
@@ -3364,22 +3371,30 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3364 struct qla_hw_data *ha = vha->hw; 3371 struct qla_hw_data *ha = vha->hw;
3365 3372
3366 /* don't poll if reset is going on */ 3373 /* don't poll if reset is going on */
3367 if (!ha->flags.isp82xx_reset_hdlr_active) { 3374 if (!ha->flags.nic_core_reset_hdlr_active) {
3368 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3375 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3369 if (qla82xx_check_temp(vha)) { 3376 if (qla82xx_check_temp(vha)) {
3370 set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); 3377 set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
3371 ha->flags.isp82xx_fw_hung = 1; 3378 ha->flags.isp82xx_fw_hung = 1;
3372 qla82xx_clear_pending_mbx(vha); 3379 qla82xx_clear_pending_mbx(vha);
3373 } else if (dev_state == QLA82XX_DEV_NEED_RESET && 3380 } else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
3374 !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) { 3381 !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
3375 ql_log(ql_log_warn, vha, 0x6001, 3382 ql_log(ql_log_warn, vha, 0x6001,
3376 "Adapter reset needed.\n"); 3383 "Adapter reset needed.\n");
3377 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3384 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3378 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT && 3385 } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
3379 !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) { 3386 !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
3380 ql_log(ql_log_warn, vha, 0x6002, 3387 ql_log(ql_log_warn, vha, 0x6002,
3381 "Quiescent needed.\n"); 3388 "Quiescent needed.\n");
3382 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); 3389 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
3390 } else if (dev_state == QLA8XXX_DEV_FAILED &&
3391 !test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) &&
3392 vha->flags.online == 1) {
3393 ql_log(ql_log_warn, vha, 0xb055,
3394 "Adapter state is failed. Offlining.\n");
3395 set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
3396 ha->flags.isp82xx_fw_hung = 1;
3397 qla82xx_clear_pending_mbx(vha);
3383 } else { 3398 } else {
3384 if (qla82xx_check_fw_alive(vha)) { 3399 if (qla82xx_check_fw_alive(vha)) {
3385 ql_dbg(ql_dbg_timer, vha, 0x6011, 3400 ql_dbg(ql_dbg_timer, vha, 0x6011,
@@ -3441,12 +3456,12 @@ qla82xx_set_reset_owner(scsi_qla_host_t *vha)
3441 uint32_t dev_state; 3456 uint32_t dev_state;
3442 3457
3443 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3458 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3444 if (dev_state == QLA82XX_DEV_READY) { 3459 if (dev_state == QLA8XXX_DEV_READY) {
3445 ql_log(ql_log_info, vha, 0xb02f, 3460 ql_log(ql_log_info, vha, 0xb02f,
3446 "HW State: NEED RESET\n"); 3461 "HW State: NEED RESET\n");
3447 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3462 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3448 QLA82XX_DEV_NEED_RESET); 3463 QLA8XXX_DEV_NEED_RESET);
3449 ha->flags.isp82xx_reset_owner = 1; 3464 ha->flags.nic_core_reset_owner = 1;
3450 ql_dbg(ql_dbg_p3p, vha, 0xb030, 3465 ql_dbg(ql_dbg_p3p, vha, 0xb030,
3451 "reset_owner is 0x%x\n", ha->portnum); 3466 "reset_owner is 0x%x\n", ha->portnum);
3452 } else 3467 } else
@@ -3477,7 +3492,7 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
3477 "Device in failed state, exiting.\n"); 3492 "Device in failed state, exiting.\n");
3478 return QLA_SUCCESS; 3493 return QLA_SUCCESS;
3479 } 3494 }
3480 ha->flags.isp82xx_reset_hdlr_active = 1; 3495 ha->flags.nic_core_reset_hdlr_active = 1;
3481 3496
3482 qla82xx_idc_lock(ha); 3497 qla82xx_idc_lock(ha);
3483 qla82xx_set_reset_owner(vha); 3498 qla82xx_set_reset_owner(vha);
@@ -3491,7 +3506,7 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
3491 3506
3492 if (rval == QLA_SUCCESS) { 3507 if (rval == QLA_SUCCESS) {
3493 ha->flags.isp82xx_fw_hung = 0; 3508 ha->flags.isp82xx_fw_hung = 0;
3494 ha->flags.isp82xx_reset_hdlr_active = 0; 3509 ha->flags.nic_core_reset_hdlr_active = 0;
3495 qla82xx_restart_isp(vha); 3510 qla82xx_restart_isp(vha);
3496 } 3511 }
3497 3512
@@ -4026,7 +4041,7 @@ qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha,
4026 4041
4027 if (r_addr & 0xf) { 4042 if (r_addr & 0xf) {
4028 ql_log(ql_log_warn, vha, 0xb033, 4043 ql_log(ql_log_warn, vha, 0xb033,
4029 "Read addr 0x%x not 16 bytes alligned\n", r_addr); 4044 "Read addr 0x%x not 16 bytes aligned\n", r_addr);
4030 return rval; 4045 return rval;
4031 } 4046 }
4032 4047
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 6eb210e3cc63..6c953e8c08f0 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -542,14 +542,15 @@
542#define QLA82XX_CRB_DRV_IDC_VERSION (QLA82XX_CAM_RAM(0x174)) 542#define QLA82XX_CRB_DRV_IDC_VERSION (QLA82XX_CAM_RAM(0x174))
543 543
544/* Every driver should use these Device State */ 544/* Every driver should use these Device State */
545#define QLA82XX_DEV_COLD 1 545#define QLA8XXX_DEV_COLD 1
546#define QLA82XX_DEV_INITIALIZING 2 546#define QLA8XXX_DEV_INITIALIZING 2
547#define QLA82XX_DEV_READY 3 547#define QLA8XXX_DEV_READY 3
548#define QLA82XX_DEV_NEED_RESET 4 548#define QLA8XXX_DEV_NEED_RESET 4
549#define QLA82XX_DEV_NEED_QUIESCENT 5 549#define QLA8XXX_DEV_NEED_QUIESCENT 5
550#define QLA82XX_DEV_FAILED 6 550#define QLA8XXX_DEV_FAILED 6
551#define QLA82XX_DEV_QUIESCENT 7 551#define QLA8XXX_DEV_QUIESCENT 7
552#define MAX_STATES 8 /* Increment if new state added */ 552#define MAX_STATES 8 /* Increment if new state added */
553#define QLA8XXX_BAD_VALUE 0xbad0bad0
553 554
554#define QLA82XX_IDC_VERSION 1 555#define QLA82XX_IDC_VERSION 1
555#define QLA82XX_ROM_DEV_INIT_TIMEOUT 30 556#define QLA82XX_ROM_DEV_INIT_TIMEOUT 30
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index d3052622e77a..d501bf5f806b 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -113,11 +113,11 @@ MODULE_PARM_DESC(ql2xfdmienable,
113static int ql2xmaxqdepth = MAX_Q_DEPTH; 113static int ql2xmaxqdepth = MAX_Q_DEPTH;
114module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR); 114module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
115MODULE_PARM_DESC(ql2xmaxqdepth, 115MODULE_PARM_DESC(ql2xmaxqdepth,
116 "Maximum queue depth to report for target devices."); 116 "Maximum queue depth to set for each LUN. "
117 "Default is 32.");
117 118
118/* Do not change the value of this after module load */ 119int ql2xenabledif = 2;
119int ql2xenabledif = 0; 120module_param(ql2xenabledif, int, S_IRUGO);
120module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR);
121MODULE_PARM_DESC(ql2xenabledif, 121MODULE_PARM_DESC(ql2xenabledif,
122 " Enable T10-CRC-DIF " 122 " Enable T10-CRC-DIF "
123 " Default is 0 - No DIF Support. 1 - Enable it" 123 " Default is 0 - No DIF Support. 1 - Enable it"
@@ -1078,7 +1078,7 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
1078 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id, 1078 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
1079 cmd->device->lun, type) != QLA_SUCCESS) { 1079 cmd->device->lun, type) != QLA_SUCCESS) {
1080 ql_log(ql_log_warn, vha, 0x800d, 1080 ql_log(ql_log_warn, vha, 0x800d,
1081 "wait for peding cmds failed for cmd=%p.\n", cmd); 1081 "wait for pending cmds failed for cmd=%p.\n", cmd);
1082 goto eh_reset_failed; 1082 goto eh_reset_failed;
1083 } 1083 }
1084 1084
@@ -1177,7 +1177,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1177eh_bus_reset_done: 1177eh_bus_reset_done:
1178 ql_log(ql_log_warn, vha, 0x802b, 1178 ql_log(ql_log_warn, vha, 0x802b,
1179 "BUS RESET %s nexus=%ld:%d:%d.\n", 1179 "BUS RESET %s nexus=%ld:%d:%d.\n",
1180 (ret == FAILED) ? "FAILED" : "SUCCEDED", vha->host_no, id, lun); 1180 (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
1181 1181
1182 return ret; 1182 return ret;
1183} 1183}
@@ -1357,6 +1357,9 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
1357 scsi_qla_host_t *vha = shost_priv(sdev->host); 1357 scsi_qla_host_t *vha = shost_priv(sdev->host);
1358 struct req_que *req = vha->req; 1358 struct req_que *req = vha->req;
1359 1359
1360 if (IS_T10_PI_CAPABLE(vha->hw))
1361 blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1362
1360 if (sdev->tagged_supported) 1363 if (sdev->tagged_supported)
1361 scsi_activate_tcq(sdev, req->max_q_depth); 1364 scsi_activate_tcq(sdev, req->max_q_depth);
1362 else 1365 else
@@ -1919,7 +1922,7 @@ static struct isp_operations qla82xx_isp_ops = {
1919 .nvram_config = qla81xx_nvram_config, 1922 .nvram_config = qla81xx_nvram_config,
1920 .update_fw_options = qla24xx_update_fw_options, 1923 .update_fw_options = qla24xx_update_fw_options,
1921 .load_risc = qla82xx_load_risc, 1924 .load_risc = qla82xx_load_risc,
1922 .pci_info_str = qla82xx_pci_info_str, 1925 .pci_info_str = qla24xx_pci_info_str,
1923 .fw_version_str = qla24xx_fw_version_str, 1926 .fw_version_str = qla24xx_fw_version_str,
1924 .intr_handler = qla82xx_intr_handler, 1927 .intr_handler = qla82xx_intr_handler,
1925 .enable_intrs = qla82xx_enable_intrs, 1928 .enable_intrs = qla82xx_enable_intrs,
@@ -2149,7 +2152,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2149 scsi_qla_host_t *base_vha = NULL; 2152 scsi_qla_host_t *base_vha = NULL;
2150 struct qla_hw_data *ha; 2153 struct qla_hw_data *ha;
2151 char pci_info[30]; 2154 char pci_info[30];
2152 char fw_str[30]; 2155 char fw_str[30], wq_name[30];
2153 struct scsi_host_template *sht; 2156 struct scsi_host_template *sht;
2154 int bars, mem_only = 0; 2157 int bars, mem_only = 0;
2155 uint16_t req_length = 0, rsp_length = 0; 2158 uint16_t req_length = 0, rsp_length = 0;
@@ -2203,12 +2206,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2203 ha->mem_only = mem_only; 2206 ha->mem_only = mem_only;
2204 spin_lock_init(&ha->hardware_lock); 2207 spin_lock_init(&ha->hardware_lock);
2205 spin_lock_init(&ha->vport_slock); 2208 spin_lock_init(&ha->vport_slock);
2209 mutex_init(&ha->selflogin_lock);
2206 2210
2207 /* Set ISP-type information. */ 2211 /* Set ISP-type information. */
2208 qla2x00_set_isp_flags(ha); 2212 qla2x00_set_isp_flags(ha);
2209 2213
2210 /* Set EEH reset type to fundamental if required by hba */ 2214 /* Set EEH reset type to fundamental if required by hba */
2211 if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) 2215 if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) ||
2216 IS_QLA83XX(ha))
2212 pdev->needs_freset = 1; 2217 pdev->needs_freset = 1;
2213 2218
2214 ha->prev_topology = 0; 2219 ha->prev_topology = 0;
@@ -2318,6 +2323,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2318 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2323 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
2319 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2324 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
2320 } else if (IS_QLA83XX(ha)) { 2325 } else if (IS_QLA83XX(ha)) {
2326 ha->portnum = PCI_FUNC(ha->pdev->devfn);
2321 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2327 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
2322 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2328 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2323 req_length = REQUEST_ENTRY_CNT_24XX; 2329 req_length = REQUEST_ENTRY_CNT_24XX;
@@ -2416,7 +2422,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2416 host->can_queue, base_vha->req, 2422 host->can_queue, base_vha->req,
2417 base_vha->mgmt_svr_loop_id, host->sg_tablesize); 2423 base_vha->mgmt_svr_loop_id, host->sg_tablesize);
2418 host->max_id = ha->max_fibre_devices; 2424 host->max_id = ha->max_fibre_devices;
2419 host->this_id = 255;
2420 host->cmd_per_lun = 3; 2425 host->cmd_per_lun = 3;
2421 host->unique_id = host->host_no; 2426 host->unique_id = host->host_no;
2422 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) 2427 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
@@ -2499,7 +2504,7 @@ que_init:
2499 if (IS_QLA82XX(ha)) { 2504 if (IS_QLA82XX(ha)) {
2500 qla82xx_idc_lock(ha); 2505 qla82xx_idc_lock(ha);
2501 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 2506 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2502 QLA82XX_DEV_FAILED); 2507 QLA8XXX_DEV_FAILED);
2503 qla82xx_idc_unlock(ha); 2508 qla82xx_idc_unlock(ha);
2504 ql_log(ql_log_fatal, base_vha, 0x00d7, 2509 ql_log(ql_log_fatal, base_vha, 0x00d7,
2505 "HW State: FAILED.\n"); 2510 "HW State: FAILED.\n");
@@ -2542,6 +2547,20 @@ que_init:
2542 */ 2547 */
2543 qla2xxx_wake_dpc(base_vha); 2548 qla2xxx_wake_dpc(base_vha);
2544 2549
2550 if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
2551 sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no);
2552 ha->dpc_lp_wq = create_singlethread_workqueue(wq_name);
2553 INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen);
2554
2555 sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no);
2556 ha->dpc_hp_wq = create_singlethread_workqueue(wq_name);
2557 INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work);
2558 INIT_WORK(&ha->idc_state_handler,
2559 qla83xx_idc_state_handler_work);
2560 INIT_WORK(&ha->nic_core_unrecoverable,
2561 qla83xx_nic_core_unrecoverable_work);
2562 }
2563
2545skip_dpc: 2564skip_dpc:
2546 list_add_tail(&base_vha->list, &ha->vp_list); 2565 list_add_tail(&base_vha->list, &ha->vp_list);
2547 base_vha->host->irq = ha->pdev->irq; 2566 base_vha->host->irq = ha->pdev->irq;
@@ -2557,7 +2576,7 @@ skip_dpc:
2557 2576
2558 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { 2577 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
2559 if (ha->fw_attributes & BIT_4) { 2578 if (ha->fw_attributes & BIT_4) {
2560 int prot = 0; 2579 int prot = 0, guard;
2561 base_vha->flags.difdix_supported = 1; 2580 base_vha->flags.difdix_supported = 1;
2562 ql_dbg(ql_dbg_init, base_vha, 0x00f1, 2581 ql_dbg(ql_dbg_init, base_vha, 0x00f1,
2563 "Registering for DIF/DIX type 1 and 3 protection.\n"); 2582 "Registering for DIF/DIX type 1 and 3 protection.\n");
@@ -2570,7 +2589,14 @@ skip_dpc:
2570 | SHOST_DIX_TYPE1_PROTECTION 2589 | SHOST_DIX_TYPE1_PROTECTION
2571 | SHOST_DIX_TYPE2_PROTECTION 2590 | SHOST_DIX_TYPE2_PROTECTION
2572 | SHOST_DIX_TYPE3_PROTECTION); 2591 | SHOST_DIX_TYPE3_PROTECTION);
2573 scsi_host_set_guard(host, SHOST_DIX_GUARD_CRC); 2592
2593 guard = SHOST_DIX_GUARD_CRC;
2594
2595 if (IS_PI_IPGUARD_CAPABLE(ha) &&
2596 (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
2597 guard |= SHOST_DIX_GUARD_IP;
2598
2599 scsi_host_set_guard(host, guard);
2574 } else 2600 } else
2575 base_vha->flags.difdix_supported = 0; 2601 base_vha->flags.difdix_supported = 0;
2576 } 2602 }
@@ -2750,6 +2776,14 @@ qla2x00_remove_one(struct pci_dev *pdev)
2750 } 2776 }
2751 mutex_unlock(&ha->vport_lock); 2777 mutex_unlock(&ha->vport_lock);
2752 2778
2779 if (IS_QLA8031(ha)) {
2780 ql_dbg(ql_dbg_p3p, base_vha, 0xb07e,
2781 "Clearing fcoe driver presence.\n");
2782 if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS)
2783 ql_dbg(ql_dbg_p3p, base_vha, 0xb079,
2784 "Error while clearing DRV-Presence.\n");
2785 }
2786
2753 set_bit(UNLOADING, &base_vha->dpc_flags); 2787 set_bit(UNLOADING, &base_vha->dpc_flags);
2754 2788
2755 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); 2789 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
@@ -2771,6 +2805,21 @@ qla2x00_remove_one(struct pci_dev *pdev)
2771 ha->wq = NULL; 2805 ha->wq = NULL;
2772 } 2806 }
2773 2807
2808 /* Cancel all work and destroy DPC workqueues */
2809 if (ha->dpc_lp_wq) {
2810 cancel_work_sync(&ha->idc_aen);
2811 destroy_workqueue(ha->dpc_lp_wq);
2812 ha->dpc_lp_wq = NULL;
2813 }
2814
2815 if (ha->dpc_hp_wq) {
2816 cancel_work_sync(&ha->nic_core_reset);
2817 cancel_work_sync(&ha->idc_state_handler);
2818 cancel_work_sync(&ha->nic_core_unrecoverable);
2819 destroy_workqueue(ha->dpc_hp_wq);
2820 ha->dpc_hp_wq = NULL;
2821 }
2822
2774 /* Kill the kernel thread for this host */ 2823 /* Kill the kernel thread for this host */
2775 if (ha->dpc_thread) { 2824 if (ha->dpc_thread) {
2776 struct task_struct *t = ha->dpc_thread; 2825 struct task_struct *t = ha->dpc_thread;
@@ -2837,7 +2886,6 @@ qla2x00_free_device(scsi_qla_host_t *vha)
2837 qla2x00_stop_dpc_thread(vha); 2886 qla2x00_stop_dpc_thread(vha);
2838 2887
2839 qla25xx_delete_queues(vha); 2888 qla25xx_delete_queues(vha);
2840
2841 if (ha->flags.fce_enabled) 2889 if (ha->flags.fce_enabled)
2842 qla2x00_disable_fce_trace(vha, NULL, NULL); 2890 qla2x00_disable_fce_trace(vha, NULL, NULL);
2843 2891
@@ -2872,6 +2920,7 @@ void qla2x00_free_fcports(struct scsi_qla_host *vha)
2872 2920
2873 list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) { 2921 list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) {
2874 list_del(&fcport->list); 2922 list_del(&fcport->list);
2923 qla2x00_clear_loop_id(fcport);
2875 kfree(fcport); 2924 kfree(fcport);
2876 fcport = NULL; 2925 fcport = NULL;
2877 } 2926 }
@@ -3169,6 +3218,18 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
3169 } 3218 }
3170 3219
3171 INIT_LIST_HEAD(&ha->vp_list); 3220 INIT_LIST_HEAD(&ha->vp_list);
3221
3222 /* Allocate memory for our loop_id bitmap */
3223 ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long),
3224 GFP_KERNEL);
3225 if (!ha->loop_id_map)
3226 goto fail_async_pd;
3227 else {
3228 qla2x00_set_reserved_loop_ids(ha);
3229 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
3230 "loop_id_map=%p. \n", ha->loop_id_map);
3231 }
3232
3172 return 1; 3233 return 1;
3173 3234
3174fail_async_pd: 3235fail_async_pd:
@@ -3280,6 +3341,10 @@ qla2x00_mem_free(struct qla_hw_data *ha)
3280{ 3341{
3281 qla2x00_free_fw_dump(ha); 3342 qla2x00_free_fw_dump(ha);
3282 3343
3344 if (ha->mctp_dump)
3345 dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump,
3346 ha->mctp_dump_dma);
3347
3283 if (ha->srb_mempool) 3348 if (ha->srb_mempool)
3284 mempool_destroy(ha->srb_mempool); 3349 mempool_destroy(ha->srb_mempool);
3285 3350
@@ -3352,6 +3417,7 @@ qla2x00_mem_free(struct qla_hw_data *ha)
3352 kfree(ha->nvram); 3417 kfree(ha->nvram);
3353 kfree(ha->npiv_info); 3418 kfree(ha->npiv_info);
3354 kfree(ha->swl); 3419 kfree(ha->swl);
3420 kfree(ha->loop_id_map);
3355 3421
3356 ha->srb_mempool = NULL; 3422 ha->srb_mempool = NULL;
3357 ha->ctx_mempool = NULL; 3423 ha->ctx_mempool = NULL;
@@ -3687,13 +3753,651 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
3687 } 3753 }
3688 3754
3689 if (fcport->login_retry == 0 && status != QLA_SUCCESS) 3755 if (fcport->login_retry == 0 && status != QLA_SUCCESS)
3690 fcport->loop_id = FC_NO_LOOP_ID; 3756 qla2x00_clear_loop_id(fcport);
3691 } 3757 }
3692 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 3758 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3693 break; 3759 break;
3694 } 3760 }
3695} 3761}
3696 3762
3763/* Schedule work on any of the dpc-workqueues */
3764void
3765qla83xx_schedule_work(scsi_qla_host_t *base_vha, int work_code)
3766{
3767 struct qla_hw_data *ha = base_vha->hw;
3768
3769 switch (work_code) {
3770 case MBA_IDC_AEN: /* 0x8200 */
3771 if (ha->dpc_lp_wq)
3772 queue_work(ha->dpc_lp_wq, &ha->idc_aen);
3773 break;
3774
3775 case QLA83XX_NIC_CORE_RESET: /* 0x1 */
3776 if (!ha->flags.nic_core_reset_hdlr_active) {
3777 if (ha->dpc_hp_wq)
3778 queue_work(ha->dpc_hp_wq, &ha->nic_core_reset);
3779 } else
3780 ql_dbg(ql_dbg_p3p, base_vha, 0xb05e,
3781 "NIC Core reset is already active. Skip "
3782 "scheduling it again.\n");
3783 break;
3784 case QLA83XX_IDC_STATE_HANDLER: /* 0x2 */
3785 if (ha->dpc_hp_wq)
3786 queue_work(ha->dpc_hp_wq, &ha->idc_state_handler);
3787 break;
3788 case QLA83XX_NIC_CORE_UNRECOVERABLE: /* 0x3 */
3789 if (ha->dpc_hp_wq)
3790 queue_work(ha->dpc_hp_wq, &ha->nic_core_unrecoverable);
3791 break;
3792 default:
3793 ql_log(ql_log_warn, base_vha, 0xb05f,
3794 "Unknow work-code=0x%x.\n", work_code);
3795 }
3796
3797 return;
3798}
3799
3800/* Work: Perform NIC Core Unrecoverable state handling */
3801void
3802qla83xx_nic_core_unrecoverable_work(struct work_struct *work)
3803{
3804 struct qla_hw_data *ha =
3805 container_of(work, struct qla_hw_data, nic_core_unrecoverable);
3806 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
3807 uint32_t dev_state = 0;
3808
3809 qla83xx_idc_lock(base_vha, 0);
3810 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
3811 qla83xx_reset_ownership(base_vha);
3812 if (ha->flags.nic_core_reset_owner) {
3813 ha->flags.nic_core_reset_owner = 0;
3814 qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE,
3815 QLA8XXX_DEV_FAILED);
3816 ql_log(ql_log_info, base_vha, 0xb060, "HW State: FAILED.\n");
3817 qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER);
3818 }
3819 qla83xx_idc_unlock(base_vha, 0);
3820}
3821
3822/* Work: Execute IDC state handler */
3823void
3824qla83xx_idc_state_handler_work(struct work_struct *work)
3825{
3826 struct qla_hw_data *ha =
3827 container_of(work, struct qla_hw_data, idc_state_handler);
3828 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
3829 uint32_t dev_state = 0;
3830
3831 qla83xx_idc_lock(base_vha, 0);
3832 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
3833 if (dev_state == QLA8XXX_DEV_FAILED ||
3834 dev_state == QLA8XXX_DEV_NEED_QUIESCENT)
3835 qla83xx_idc_state_handler(base_vha);
3836 qla83xx_idc_unlock(base_vha, 0);
3837}
3838
3839int
3840qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha)
3841{
3842 int rval = QLA_SUCCESS;
3843 unsigned long heart_beat_wait = jiffies + (1 * HZ);
3844 uint32_t heart_beat_counter1, heart_beat_counter2;
3845
3846 do {
3847 if (time_after(jiffies, heart_beat_wait)) {
3848 ql_dbg(ql_dbg_p3p, base_vha, 0xb07c,
3849 "Nic Core f/w is not alive.\n");
3850 rval = QLA_FUNCTION_FAILED;
3851 break;
3852 }
3853
3854 qla83xx_idc_lock(base_vha, 0);
3855 qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT,
3856 &heart_beat_counter1);
3857 qla83xx_idc_unlock(base_vha, 0);
3858 msleep(100);
3859 qla83xx_idc_lock(base_vha, 0);
3860 qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT,
3861 &heart_beat_counter2);
3862 qla83xx_idc_unlock(base_vha, 0);
3863 } while (heart_beat_counter1 == heart_beat_counter2);
3864
3865 return rval;
3866}
3867
3868/* Work: Perform NIC Core Reset handling */
3869void
3870qla83xx_nic_core_reset_work(struct work_struct *work)
3871{
3872 struct qla_hw_data *ha =
3873 container_of(work, struct qla_hw_data, nic_core_reset);
3874 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
3875 uint32_t dev_state = 0;
3876
3877 if (IS_QLA2031(ha)) {
3878 if (qla2xxx_mctp_dump(base_vha) != QLA_SUCCESS)
3879 ql_log(ql_log_warn, base_vha, 0xb081,
3880 "Failed to dump mctp\n");
3881 return;
3882 }
3883
3884 if (!ha->flags.nic_core_reset_hdlr_active) {
3885 if (qla83xx_check_nic_core_fw_alive(base_vha) == QLA_SUCCESS) {
3886 qla83xx_idc_lock(base_vha, 0);
3887 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE,
3888 &dev_state);
3889 qla83xx_idc_unlock(base_vha, 0);
3890 if (dev_state != QLA8XXX_DEV_NEED_RESET) {
3891 ql_dbg(ql_dbg_p3p, base_vha, 0xb07a,
3892 "Nic Core f/w is alive.\n");
3893 return;
3894 }
3895 }
3896
3897 ha->flags.nic_core_reset_hdlr_active = 1;
3898 if (qla83xx_nic_core_reset(base_vha)) {
3899 /* NIC Core reset failed. */
3900 ql_dbg(ql_dbg_p3p, base_vha, 0xb061,
3901 "NIC Core reset failed.\n");
3902 }
3903 ha->flags.nic_core_reset_hdlr_active = 0;
3904 }
3905}
3906
3907/* Work: Handle 8200 IDC aens */
3908void
3909qla83xx_service_idc_aen(struct work_struct *work)
3910{
3911 struct qla_hw_data *ha =
3912 container_of(work, struct qla_hw_data, idc_aen);
3913 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
3914 uint32_t dev_state, idc_control;
3915
3916 qla83xx_idc_lock(base_vha, 0);
3917 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
3918 qla83xx_rd_reg(base_vha, QLA83XX_IDC_CONTROL, &idc_control);
3919 qla83xx_idc_unlock(base_vha, 0);
3920 if (dev_state == QLA8XXX_DEV_NEED_RESET) {
3921 if (idc_control & QLA83XX_IDC_GRACEFUL_RESET) {
3922 ql_dbg(ql_dbg_p3p, base_vha, 0xb062,
3923 "Application requested NIC Core Reset.\n");
3924 qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET);
3925 } else if (qla83xx_check_nic_core_fw_alive(base_vha) ==
3926 QLA_SUCCESS) {
3927 ql_dbg(ql_dbg_p3p, base_vha, 0xb07b,
3928 "Other protocol driver requested NIC Core Reset.\n");
3929 qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET);
3930 }
3931 } else if (dev_state == QLA8XXX_DEV_FAILED ||
3932 dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
3933 qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER);
3934 }
3935}
3936
3937static void
3938qla83xx_wait_logic(void)
3939{
3940 int i;
3941
3942 /* Yield CPU */
3943 if (!in_interrupt()) {
3944 /*
3945 * Wait about 200ms before retrying again.
3946 * This controls the number of retries for single
3947 * lock operation.
3948 */
3949 msleep(100);
3950 schedule();
3951 } else {
3952 for (i = 0; i < 20; i++)
3953 cpu_relax(); /* This a nop instr on i386 */
3954 }
3955}
3956
3957int
3958qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha)
3959{
3960 int rval;
3961 uint32_t data;
3962 uint32_t idc_lck_rcvry_stage_mask = 0x3;
3963 uint32_t idc_lck_rcvry_owner_mask = 0x3c;
3964 struct qla_hw_data *ha = base_vha->hw;
3965
3966 rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data);
3967 if (rval)
3968 return rval;
3969
3970 if ((data & idc_lck_rcvry_stage_mask) > 0) {
3971 return QLA_SUCCESS;
3972 } else {
3973 data = (IDC_LOCK_RECOVERY_STAGE1) | (ha->portnum << 2);
3974 rval = qla83xx_wr_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY,
3975 data);
3976 if (rval)
3977 return rval;
3978
3979 msleep(200);
3980
3981 rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY,
3982 &data);
3983 if (rval)
3984 return rval;
3985
3986 if (((data & idc_lck_rcvry_owner_mask) >> 2) == ha->portnum) {
3987 data &= (IDC_LOCK_RECOVERY_STAGE2 |
3988 ~(idc_lck_rcvry_stage_mask));
3989 rval = qla83xx_wr_reg(base_vha,
3990 QLA83XX_IDC_LOCK_RECOVERY, data);
3991 if (rval)
3992 return rval;
3993
3994 /* Forcefully perform IDC UnLock */
3995 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK,
3996 &data);
3997 if (rval)
3998 return rval;
3999 /* Clear lock-id by setting 0xff */
4000 rval = qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
4001 0xff);
4002 if (rval)
4003 return rval;
4004 /* Clear lock-recovery by setting 0x0 */
4005 rval = qla83xx_wr_reg(base_vha,
4006 QLA83XX_IDC_LOCK_RECOVERY, 0x0);
4007 if (rval)
4008 return rval;
4009 } else
4010 return QLA_SUCCESS;
4011 }
4012
4013 return rval;
4014}
4015
4016int
4017qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha)
4018{
4019 int rval = QLA_SUCCESS;
4020 uint32_t o_drv_lockid, n_drv_lockid;
4021 unsigned long lock_recovery_timeout;
4022
4023 lock_recovery_timeout = jiffies + QLA83XX_MAX_LOCK_RECOVERY_WAIT;
4024retry_lockid:
4025 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &o_drv_lockid);
4026 if (rval)
4027 goto exit;
4028
4029 /* MAX wait time before forcing IDC Lock recovery = 2 secs */
4030 if (time_after_eq(jiffies, lock_recovery_timeout)) {
4031 if (qla83xx_force_lock_recovery(base_vha) == QLA_SUCCESS)
4032 return QLA_SUCCESS;
4033 else
4034 return QLA_FUNCTION_FAILED;
4035 }
4036
4037 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &n_drv_lockid);
4038 if (rval)
4039 goto exit;
4040
4041 if (o_drv_lockid == n_drv_lockid) {
4042 qla83xx_wait_logic();
4043 goto retry_lockid;
4044 } else
4045 return QLA_SUCCESS;
4046
4047exit:
4048 return rval;
4049}
4050
4051void
4052qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id)
4053{
4054 uint16_t options = (requester_id << 15) | BIT_6;
4055 uint32_t data;
4056 struct qla_hw_data *ha = base_vha->hw;
4057
4058 /* IDC-lock implementation using driver-lock/lock-id remote registers */
4059retry_lock:
4060 if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCK, &data)
4061 == QLA_SUCCESS) {
4062 if (data) {
4063 /* Setting lock-id to our function-number */
4064 qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
4065 ha->portnum);
4066 } else {
4067 ql_dbg(ql_dbg_p3p, base_vha, 0xb063,
4068 "Failed to acquire IDC lock. retrying...\n");
4069
4070 /* Retry/Perform IDC-Lock recovery */
4071 if (qla83xx_idc_lock_recovery(base_vha)
4072 == QLA_SUCCESS) {
4073 qla83xx_wait_logic();
4074 goto retry_lock;
4075 } else
4076 ql_log(ql_log_warn, base_vha, 0xb075,
4077 "IDC Lock recovery FAILED.\n");
4078 }
4079
4080 }
4081
4082 return;
4083
4084 /* XXX: IDC-lock implementation using access-control mbx */
4085retry_lock2:
4086 if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) {
4087 ql_dbg(ql_dbg_p3p, base_vha, 0xb072,
4088 "Failed to acquire IDC lock. retrying...\n");
4089 /* Retry/Perform IDC-Lock recovery */
4090 if (qla83xx_idc_lock_recovery(base_vha) == QLA_SUCCESS) {
4091 qla83xx_wait_logic();
4092 goto retry_lock2;
4093 } else
4094 ql_log(ql_log_warn, base_vha, 0xb076,
4095 "IDC Lock recovery FAILED.\n");
4096 }
4097
4098 return;
4099}
4100
4101void
4102qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id)
4103{
4104 uint16_t options = (requester_id << 15) | BIT_7, retry;
4105 uint32_t data;
4106 struct qla_hw_data *ha = base_vha->hw;
4107
4108 /* IDC-unlock implementation using driver-unlock/lock-id
4109 * remote registers
4110 */
4111 retry = 0;
4112retry_unlock:
4113 if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &data)
4114 == QLA_SUCCESS) {
4115 if (data == ha->portnum) {
4116 qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, &data);
4117 /* Clearing lock-id by setting 0xff */
4118 qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 0xff);
4119 } else if (retry < 10) {
4120 /* SV: XXX: IDC unlock retrying needed here? */
4121
4122 /* Retry for IDC-unlock */
4123 qla83xx_wait_logic();
4124 retry++;
4125 ql_dbg(ql_dbg_p3p, base_vha, 0xb064,
4126 "Failed to release IDC lock, retyring=%d\n", retry);
4127 goto retry_unlock;
4128 }
4129 } else if (retry < 10) {
4130 /* Retry for IDC-unlock */
4131 qla83xx_wait_logic();
4132 retry++;
4133 ql_dbg(ql_dbg_p3p, base_vha, 0xb065,
4134 "Failed to read drv-lockid, retyring=%d\n", retry);
4135 goto retry_unlock;
4136 }
4137
4138 return;
4139
4140 /* XXX: IDC-unlock implementation using access-control mbx */
4141 retry = 0;
4142retry_unlock2:
4143 if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) {
4144 if (retry < 10) {
4145 /* Retry for IDC-unlock */
4146 qla83xx_wait_logic();
4147 retry++;
4148 ql_dbg(ql_dbg_p3p, base_vha, 0xb066,
4149 "Failed to release IDC lock, retyring=%d\n", retry);
4150 goto retry_unlock2;
4151 }
4152 }
4153
4154 return;
4155}
4156
4157int
4158__qla83xx_set_drv_presence(scsi_qla_host_t *vha)
4159{
4160 int rval = QLA_SUCCESS;
4161 struct qla_hw_data *ha = vha->hw;
4162 uint32_t drv_presence;
4163
4164 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
4165 if (rval == QLA_SUCCESS) {
4166 drv_presence |= (1 << ha->portnum);
4167 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
4168 drv_presence);
4169 }
4170
4171 return rval;
4172}
4173
4174int
4175qla83xx_set_drv_presence(scsi_qla_host_t *vha)
4176{
4177 int rval = QLA_SUCCESS;
4178
4179 qla83xx_idc_lock(vha, 0);
4180 rval = __qla83xx_set_drv_presence(vha);
4181 qla83xx_idc_unlock(vha, 0);
4182
4183 return rval;
4184}
4185
4186int
4187__qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
4188{
4189 int rval = QLA_SUCCESS;
4190 struct qla_hw_data *ha = vha->hw;
4191 uint32_t drv_presence;
4192
4193 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
4194 if (rval == QLA_SUCCESS) {
4195 drv_presence &= ~(1 << ha->portnum);
4196 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
4197 drv_presence);
4198 }
4199
4200 return rval;
4201}
4202
4203int
4204qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
4205{
4206 int rval = QLA_SUCCESS;
4207
4208 qla83xx_idc_lock(vha, 0);
4209 rval = __qla83xx_clear_drv_presence(vha);
4210 qla83xx_idc_unlock(vha, 0);
4211
4212 return rval;
4213}
4214
4215void
4216qla83xx_need_reset_handler(scsi_qla_host_t *vha)
4217{
4218 struct qla_hw_data *ha = vha->hw;
4219 uint32_t drv_ack, drv_presence;
4220 unsigned long ack_timeout;
4221
4222 /* Wait for IDC ACK from all functions (DRV-ACK == DRV-PRESENCE) */
4223 ack_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
4224 while (1) {
4225 qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
4226 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
4227 if (drv_ack == drv_presence)
4228 break;
4229
4230 if (time_after_eq(jiffies, ack_timeout)) {
4231 ql_log(ql_log_warn, vha, 0xb067,
4232 "RESET ACK TIMEOUT! drv_presence=0x%x "
4233 "drv_ack=0x%x\n", drv_presence, drv_ack);
4234 /*
4235 * The function(s) which did not ack in time are forced
4236 * to withdraw any further participation in the IDC
4237 * reset.
4238 */
4239 if (drv_ack != drv_presence)
4240 qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
4241 drv_ack);
4242 break;
4243 }
4244
4245 qla83xx_idc_unlock(vha, 0);
4246 msleep(1000);
4247 qla83xx_idc_lock(vha, 0);
4248 }
4249
4250 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_COLD);
4251 ql_log(ql_log_info, vha, 0xb068, "HW State: COLD/RE-INIT.\n");
4252}
4253
4254int
4255qla83xx_device_bootstrap(scsi_qla_host_t *vha)
4256{
4257 int rval = QLA_SUCCESS;
4258 uint32_t idc_control;
4259
4260 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_INITIALIZING);
4261 ql_log(ql_log_info, vha, 0xb069, "HW State: INITIALIZING.\n");
4262
4263 /* Clearing IDC-Control Graceful-Reset Bit before resetting f/w */
4264 __qla83xx_get_idc_control(vha, &idc_control);
4265 idc_control &= ~QLA83XX_IDC_GRACEFUL_RESET;
4266 __qla83xx_set_idc_control(vha, 0);
4267
4268 qla83xx_idc_unlock(vha, 0);
4269 rval = qla83xx_restart_nic_firmware(vha);
4270 qla83xx_idc_lock(vha, 0);
4271
4272 if (rval != QLA_SUCCESS) {
4273 ql_log(ql_log_fatal, vha, 0xb06a,
4274 "Failed to restart NIC f/w.\n");
4275 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_FAILED);
4276 ql_log(ql_log_info, vha, 0xb06b, "HW State: FAILED.\n");
4277 } else {
4278 ql_dbg(ql_dbg_p3p, vha, 0xb06c,
4279 "Success in restarting nic f/w.\n");
4280 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_READY);
4281 ql_log(ql_log_info, vha, 0xb06d, "HW State: READY.\n");
4282 }
4283
4284 return rval;
4285}
4286
4287/* Assumes idc_lock always held on entry */
4288int
4289qla83xx_idc_state_handler(scsi_qla_host_t *base_vha)
4290{
4291 struct qla_hw_data *ha = base_vha->hw;
4292 int rval = QLA_SUCCESS;
4293 unsigned long dev_init_timeout;
4294 uint32_t dev_state;
4295
4296 /* Wait for MAX-INIT-TIMEOUT for the device to go ready */
4297 dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
4298
4299 while (1) {
4300
4301 if (time_after_eq(jiffies, dev_init_timeout)) {
4302 ql_log(ql_log_warn, base_vha, 0xb06e,
4303 "Initialization TIMEOUT!\n");
4304 /* Init timeout. Disable further NIC Core
4305 * communication.
4306 */
4307 qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE,
4308 QLA8XXX_DEV_FAILED);
4309 ql_log(ql_log_info, base_vha, 0xb06f,
4310 "HW State: FAILED.\n");
4311 }
4312
4313 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
4314 switch (dev_state) {
4315 case QLA8XXX_DEV_READY:
4316 if (ha->flags.nic_core_reset_owner)
4317 qla83xx_idc_audit(base_vha,
4318 IDC_AUDIT_COMPLETION);
4319 ha->flags.nic_core_reset_owner = 0;
4320 ql_dbg(ql_dbg_p3p, base_vha, 0xb070,
4321 "Reset_owner reset by 0x%x.\n",
4322 ha->portnum);
4323 goto exit;
4324 case QLA8XXX_DEV_COLD:
4325 if (ha->flags.nic_core_reset_owner)
4326 rval = qla83xx_device_bootstrap(base_vha);
4327 else {
4328 /* Wait for AEN to change device-state */
4329 qla83xx_idc_unlock(base_vha, 0);
4330 msleep(1000);
4331 qla83xx_idc_lock(base_vha, 0);
4332 }
4333 break;
4334 case QLA8XXX_DEV_INITIALIZING:
4335 /* Wait for AEN to change device-state */
4336 qla83xx_idc_unlock(base_vha, 0);
4337 msleep(1000);
4338 qla83xx_idc_lock(base_vha, 0);
4339 break;
4340 case QLA8XXX_DEV_NEED_RESET:
4341 if (!ql2xdontresethba && ha->flags.nic_core_reset_owner)
4342 qla83xx_need_reset_handler(base_vha);
4343 else {
4344 /* Wait for AEN to change device-state */
4345 qla83xx_idc_unlock(base_vha, 0);
4346 msleep(1000);
4347 qla83xx_idc_lock(base_vha, 0);
4348 }
4349 /* reset timeout value after need reset handler */
4350 dev_init_timeout = jiffies +
4351 (ha->fcoe_dev_init_timeout * HZ);
4352 break;
4353 case QLA8XXX_DEV_NEED_QUIESCENT:
4354 /* XXX: DEBUG for now */
4355 qla83xx_idc_unlock(base_vha, 0);
4356 msleep(1000);
4357 qla83xx_idc_lock(base_vha, 0);
4358 break;
4359 case QLA8XXX_DEV_QUIESCENT:
4360 /* XXX: DEBUG for now */
4361 if (ha->flags.quiesce_owner)
4362 goto exit;
4363
4364 qla83xx_idc_unlock(base_vha, 0);
4365 msleep(1000);
4366 qla83xx_idc_lock(base_vha, 0);
4367 dev_init_timeout = jiffies +
4368 (ha->fcoe_dev_init_timeout * HZ);
4369 break;
4370 case QLA8XXX_DEV_FAILED:
4371 if (ha->flags.nic_core_reset_owner)
4372 qla83xx_idc_audit(base_vha,
4373 IDC_AUDIT_COMPLETION);
4374 ha->flags.nic_core_reset_owner = 0;
4375 __qla83xx_clear_drv_presence(base_vha);
4376 qla83xx_idc_unlock(base_vha, 0);
4377 qla8xxx_dev_failed_handler(base_vha);
4378 rval = QLA_FUNCTION_FAILED;
4379 qla83xx_idc_lock(base_vha, 0);
4380 goto exit;
4381 case QLA8XXX_BAD_VALUE:
4382 qla83xx_idc_unlock(base_vha, 0);
4383 msleep(1000);
4384 qla83xx_idc_lock(base_vha, 0);
4385 break;
4386 default:
4387 ql_log(ql_log_warn, base_vha, 0xb071,
4388 "Unknow Device State: %x.\n", dev_state);
4389 qla83xx_idc_unlock(base_vha, 0);
4390 qla8xxx_dev_failed_handler(base_vha);
4391 rval = QLA_FUNCTION_FAILED;
4392 qla83xx_idc_lock(base_vha, 0);
4393 goto exit;
4394 }
4395 }
4396
4397exit:
4398 return rval;
4399}
4400
3697/************************************************************************** 4401/**************************************************************************
3698* qla2x00_do_dpc 4402* qla2x00_do_dpc
3699* This kernel thread is a task that is schedule by the interrupt handler 4403* This kernel thread is a task that is schedule by the interrupt handler
@@ -3749,7 +4453,7 @@ qla2x00_do_dpc(void *data)
3749 &base_vha->dpc_flags)) { 4453 &base_vha->dpc_flags)) {
3750 qla82xx_idc_lock(ha); 4454 qla82xx_idc_lock(ha);
3751 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 4455 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3752 QLA82XX_DEV_FAILED); 4456 QLA8XXX_DEV_FAILED);
3753 qla82xx_idc_unlock(ha); 4457 qla82xx_idc_unlock(ha);
3754 ql_log(ql_log_info, base_vha, 0x4004, 4458 ql_log(ql_log_info, base_vha, 0x4004,
3755 "HW State: FAILED.\n"); 4459 "HW State: FAILED.\n");
@@ -3819,14 +4523,21 @@ qla2x00_do_dpc(void *data)
3819 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { 4523 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
3820 ql_dbg(ql_dbg_dpc, base_vha, 0x4009, 4524 ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
3821 "Quiescence mode scheduled.\n"); 4525 "Quiescence mode scheduled.\n");
3822 qla82xx_device_state_handler(base_vha); 4526 if (IS_QLA82XX(ha)) {
3823 clear_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags); 4527 qla82xx_device_state_handler(base_vha);
3824 if (!ha->flags.quiesce_owner) { 4528 clear_bit(ISP_QUIESCE_NEEDED,
3825 qla2x00_perform_loop_resync(base_vha); 4529 &base_vha->dpc_flags);
3826 4530 if (!ha->flags.quiesce_owner) {
3827 qla82xx_idc_lock(ha); 4531 qla2x00_perform_loop_resync(base_vha);
3828 qla82xx_clear_qsnt_ready(base_vha); 4532
3829 qla82xx_idc_unlock(ha); 4533 qla82xx_idc_lock(ha);
4534 qla82xx_clear_qsnt_ready(base_vha);
4535 qla82xx_idc_unlock(ha);
4536 }
4537 } else {
4538 clear_bit(ISP_QUIESCE_NEEDED,
4539 &base_vha->dpc_flags);
4540 qla2x00_quiesce_io(base_vha);
3830 } 4541 }
3831 ql_dbg(ql_dbg_dpc, base_vha, 0x400a, 4542 ql_dbg(ql_dbg_dpc, base_vha, 0x400a,
3832 "Quiescence mode end.\n"); 4543 "Quiescence mode end.\n");
@@ -4326,7 +5037,7 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
4326 qla82xx_idc_lock(ha); 5037 qla82xx_idc_lock(ha);
4327 5038
4328 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 5039 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
4329 QLA82XX_DEV_INITIALIZING); 5040 QLA8XXX_DEV_INITIALIZING);
4330 5041
4331 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, 5042 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
4332 QLA82XX_IDC_VERSION); 5043 QLA82XX_IDC_VERSION);
@@ -4350,12 +5061,12 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
4350 "HW State: FAILED.\n"); 5061 "HW State: FAILED.\n");
4351 qla82xx_clear_drv_active(ha); 5062 qla82xx_clear_drv_active(ha);
4352 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 5063 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
4353 QLA82XX_DEV_FAILED); 5064 QLA8XXX_DEV_FAILED);
4354 } else { 5065 } else {
4355 ql_log(ql_log_info, base_vha, 0x900c, 5066 ql_log(ql_log_info, base_vha, 0x900c,
4356 "HW State: READY.\n"); 5067 "HW State: READY.\n");
4357 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 5068 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
4358 QLA82XX_DEV_READY); 5069 QLA8XXX_DEV_READY);
4359 qla82xx_idc_unlock(ha); 5070 qla82xx_idc_unlock(ha);
4360 ha->flags.isp82xx_fw_hung = 0; 5071 ha->flags.isp82xx_fw_hung = 0;
4361 rval = qla82xx_restart_isp(base_vha); 5072 rval = qla82xx_restart_isp(base_vha);
@@ -4370,7 +5081,7 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
4370 "This devfn is not reset owner = 0x%x.\n", 5081 "This devfn is not reset owner = 0x%x.\n",
4371 ha->pdev->devfn); 5082 ha->pdev->devfn);
4372 if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) == 5083 if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
4373 QLA82XX_DEV_READY)) { 5084 QLA8XXX_DEV_READY)) {
4374 ha->flags.isp82xx_fw_hung = 0; 5085 ha->flags.isp82xx_fw_hung = 0;
4375 rval = qla82xx_restart_isp(base_vha); 5086 rval = qla82xx_restart_isp(base_vha);
4376 qla82xx_idc_lock(ha); 5087 qla82xx_idc_lock(ha);
@@ -4495,6 +5206,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
4495 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) }, 5206 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) },
4496 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) }, 5207 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
4497 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) }, 5208 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
5209 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) },
4498 { 0 }, 5210 { 0 },
4499}; 5211};
4500MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); 5212MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
diff --git a/drivers/scsi/qla2xxx/qla_settings.h b/drivers/scsi/qla2xxx/qla_settings.h
index d70f03008981..892a81e457bc 100644
--- a/drivers/scsi/qla2xxx/qla_settings.h
+++ b/drivers/scsi/qla2xxx/qla_settings.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index a683e766d1ae..32fdc2a66dd1 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -966,16 +966,16 @@ qla2xxx_get_idc_param(scsi_qla_host_t *vha)
966 QLA82XX_IDC_PARAM_ADDR , 8); 966 QLA82XX_IDC_PARAM_ADDR , 8);
967 967
968 if (*wptr == __constant_cpu_to_le32(0xffffffff)) { 968 if (*wptr == __constant_cpu_to_le32(0xffffffff)) {
969 ha->nx_dev_init_timeout = QLA82XX_ROM_DEV_INIT_TIMEOUT; 969 ha->fcoe_dev_init_timeout = QLA82XX_ROM_DEV_INIT_TIMEOUT;
970 ha->nx_reset_timeout = QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT; 970 ha->fcoe_reset_timeout = QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT;
971 } else { 971 } else {
972 ha->nx_dev_init_timeout = le32_to_cpu(*wptr++); 972 ha->fcoe_dev_init_timeout = le32_to_cpu(*wptr++);
973 ha->nx_reset_timeout = le32_to_cpu(*wptr); 973 ha->fcoe_reset_timeout = le32_to_cpu(*wptr);
974 } 974 }
975 ql_dbg(ql_dbg_init, vha, 0x004e, 975 ql_dbg(ql_dbg_init, vha, 0x004e,
976 "nx_dev_init_timeout=%d " 976 "fcoe_dev_init_timeout=%d "
977 "nx_reset_timeout=%d.\n", ha->nx_dev_init_timeout, 977 "fcoe_reset_timeout=%d.\n", ha->fcoe_dev_init_timeout,
978 ha->nx_reset_timeout); 978 ha->fcoe_reset_timeout);
979 return; 979 return;
980} 980}
981 981
@@ -1017,7 +1017,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
1017 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha)) 1017 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
1018 return; 1018 return;
1019 1019
1020 if (ha->flags.isp82xx_reset_hdlr_active) 1020 if (ha->flags.nic_core_reset_hdlr_active)
1021 return; 1021 return;
1022 1022
1023 ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr, 1023 ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
@@ -1662,6 +1662,23 @@ qla24xx_beacon_blink(struct scsi_qla_host *vha)
1662 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1662 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1663} 1663}
1664 1664
1665static uint32_t
1666qla83xx_select_led_port(struct qla_hw_data *ha)
1667{
1668 uint32_t led_select_value = 0;
1669
1670 if (!IS_QLA83XX(ha))
1671 goto out;
1672
1673 if (ha->flags.port0)
1674 led_select_value = QLA83XX_LED_PORT0;
1675 else
1676 led_select_value = QLA83XX_LED_PORT1;
1677
1678out:
1679 return led_select_value;
1680}
1681
1665void 1682void
1666qla83xx_beacon_blink(struct scsi_qla_host *vha) 1683qla83xx_beacon_blink(struct scsi_qla_host *vha)
1667{ 1684{
@@ -1669,22 +1686,34 @@ qla83xx_beacon_blink(struct scsi_qla_host *vha)
1669 struct qla_hw_data *ha = vha->hw; 1686 struct qla_hw_data *ha = vha->hw;
1670 uint16_t led_cfg[6]; 1687 uint16_t led_cfg[6];
1671 uint16_t orig_led_cfg[6]; 1688 uint16_t orig_led_cfg[6];
1689 uint32_t led_10_value, led_43_value;
1672 1690
1673 if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha)) 1691 if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha))
1674 return; 1692 return;
1675 1693
1676 if (IS_QLA2031(ha) && ha->beacon_blink_led) { 1694 if (!ha->beacon_blink_led)
1677 if (ha->flags.port0) 1695 return;
1678 led_select_value = 0x00201320; 1696
1679 else 1697 if (IS_QLA2031(ha)) {
1680 led_select_value = 0x00201328; 1698 led_select_value = qla83xx_select_led_port(ha);
1681 1699
1682 qla83xx_write_remote_reg(vha, led_select_value, 0x40002000); 1700 qla83xx_wr_reg(vha, led_select_value, 0x40002000);
1683 qla83xx_write_remote_reg(vha, led_select_value + 4, 0x40002000); 1701 qla83xx_wr_reg(vha, led_select_value + 4, 0x40002000);
1702 msleep(1000);
1703 qla83xx_wr_reg(vha, led_select_value, 0x40004000);
1704 qla83xx_wr_reg(vha, led_select_value + 4, 0x40004000);
1705 } else if (IS_QLA8031(ha)) {
1706 led_select_value = qla83xx_select_led_port(ha);
1707
1708 qla83xx_rd_reg(vha, led_select_value, &led_10_value);
1709 qla83xx_rd_reg(vha, led_select_value + 0x10, &led_43_value);
1710 qla83xx_wr_reg(vha, led_select_value, 0x01f44000);
1711 msleep(500);
1712 qla83xx_wr_reg(vha, led_select_value, 0x400001f4);
1684 msleep(1000); 1713 msleep(1000);
1685 qla83xx_write_remote_reg(vha, led_select_value, 0x40004000); 1714 qla83xx_wr_reg(vha, led_select_value, led_10_value);
1686 qla83xx_write_remote_reg(vha, led_select_value + 4, 0x40004000); 1715 qla83xx_wr_reg(vha, led_select_value + 0x10, led_43_value);
1687 } else if ((IS_QLA8031(ha) || IS_QLA81XX(ha)) && ha->beacon_blink_led) { 1716 } else if (IS_QLA81XX(ha)) {
1688 int rval; 1717 int rval;
1689 1718
1690 /* Save Current */ 1719 /* Save Current */
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index f5fdb16bec9b..cfe934e1af42 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -1,15 +1,15 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.04.00.03-k" 10#define QLA2XXX_VERSION "8.04.00.07-k"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 4 13#define QLA_DRIVER_MINOR_VER 4
14#define QLA_DRIVER_PATCH_VER 0 14#define QLA_DRIVER_PATCH_VER 0
15#define QLA_DRIVER_BETA_VER 3 15#define QLA_DRIVER_BETA_VER 0