aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-22 14:36:49 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-22 14:36:49 -0400
commite6f194d8f6f50da6837af637b2fd839c34185f7a (patch)
treef3c479a2bc24d49a150ff183e2614ee0f76cb366 /drivers/scsi/qla2xxx
parent7578634990fb47cc30083fbd812689aa6deacfc0 (diff)
parentb91421749a1840148d8c81637c03c0ace3f35269 (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (60 commits) [SCSI] libsas: make ATA functions selectable by a config option [SCSI] bsg: unexport sg v3 helper functions [SCSI] bsg: fix bsg_unregister_queue [SCSI] bsg: make class backlinks [SCSI] 3w-9xxx: add support for 9690SA [SCSI] bsg: fix bsg_register_queue error path [SCSI] ESP: Increase ESP_BUS_TIMEOUT to 275. [SCSI] libsas: fix scr_read/write users and update the libata documentation [SCSI] mpt fusion: update Kconfig help [SCSI] scsi_transport_sas: add destructor for bsg [SCSI] iscsi_tcp: buggered kmalloc() [SCSI] qla2xxx: Update version number to 8.02.00-k2. [SCSI] qla2xxx: Add ISP25XX support. [SCSI] qla2xxx: Use pci_try_set_mwi(). [SCSI] qla2xxx: Use PCI-X/PCI-Express read control interfaces. [SCSI] qla2xxx: Re-factor isp_operations to static structures. [SCSI] qla2xxx: Validate mid-layer 'underflow' during check-condition handling. [SCSI] qla2xxx: Correct setting of 'current' and 'supported' speeds during FDMI registration. [SCSI] qla2xxx: Generalize iIDMA support. [SCSI] qla2xxx: Generalize FW-Interface-2 support. ...
Diffstat (limited to 'drivers/scsi/qla2xxx')
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c33
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c1114
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h38
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h22
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h36
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c82
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c136
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c55
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c58
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c380
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c35
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
15 files changed, 1503 insertions, 508 deletions
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 3eb2208675ae..1612f9200a52 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -98,7 +98,7 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj,
98 98
99 /* Read NVRAM. */ 99 /* Read NVRAM. */
100 spin_lock_irqsave(&ha->hardware_lock, flags); 100 spin_lock_irqsave(&ha->hardware_lock, flags);
101 ha->isp_ops.read_nvram(ha, (uint8_t *)buf, ha->nvram_base, 101 ha->isp_ops->read_nvram(ha, (uint8_t *)buf, ha->nvram_base,
102 ha->nvram_size); 102 ha->nvram_size);
103 spin_unlock_irqrestore(&ha->hardware_lock, flags); 103 spin_unlock_irqrestore(&ha->hardware_lock, flags);
104 104
@@ -119,7 +119,7 @@ qla2x00_sysfs_write_nvram(struct kobject *kobj,
119 return 0; 119 return 0;
120 120
121 /* Checksum NVRAM. */ 121 /* Checksum NVRAM. */
122 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 122 if (IS_FWI2_CAPABLE(ha)) {
123 uint32_t *iter; 123 uint32_t *iter;
124 uint32_t chksum; 124 uint32_t chksum;
125 125
@@ -143,7 +143,7 @@ qla2x00_sysfs_write_nvram(struct kobject *kobj,
143 143
144 /* Write NVRAM. */ 144 /* Write NVRAM. */
145 spin_lock_irqsave(&ha->hardware_lock, flags); 145 spin_lock_irqsave(&ha->hardware_lock, flags);
146 ha->isp_ops.write_nvram(ha, (uint8_t *)buf, ha->nvram_base, count); 146 ha->isp_ops->write_nvram(ha, (uint8_t *)buf, ha->nvram_base, count);
147 spin_unlock_irqrestore(&ha->hardware_lock, flags); 147 spin_unlock_irqrestore(&ha->hardware_lock, flags);
148 148
149 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 149 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
@@ -206,7 +206,7 @@ static struct bin_attribute sysfs_optrom_attr = {
206 .name = "optrom", 206 .name = "optrom",
207 .mode = S_IRUSR | S_IWUSR, 207 .mode = S_IRUSR | S_IWUSR,
208 }, 208 },
209 .size = OPTROM_SIZE_24XX, 209 .size = 0,
210 .read = qla2x00_sysfs_read_optrom, 210 .read = qla2x00_sysfs_read_optrom,
211 .write = qla2x00_sysfs_write_optrom, 211 .write = qla2x00_sysfs_write_optrom,
212}; 212};
@@ -252,7 +252,7 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
252 } 252 }
253 253
254 memset(ha->optrom_buffer, 0, ha->optrom_size); 254 memset(ha->optrom_buffer, 0, ha->optrom_size);
255 ha->isp_ops.read_optrom(ha, ha->optrom_buffer, 0, 255 ha->isp_ops->read_optrom(ha, ha->optrom_buffer, 0,
256 ha->optrom_size); 256 ha->optrom_size);
257 break; 257 break;
258 case 2: 258 case 2:
@@ -275,7 +275,7 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
275 if (ha->optrom_state != QLA_SWRITING) 275 if (ha->optrom_state != QLA_SWRITING)
276 break; 276 break;
277 277
278 ha->isp_ops.write_optrom(ha, ha->optrom_buffer, 0, 278 ha->isp_ops->write_optrom(ha, ha->optrom_buffer, 0,
279 ha->optrom_size); 279 ha->optrom_size);
280 break; 280 break;
281 } 281 }
@@ -305,7 +305,8 @@ qla2x00_sysfs_read_vpd(struct kobject *kobj,
305 305
306 /* Read NVRAM. */ 306 /* Read NVRAM. */
307 spin_lock_irqsave(&ha->hardware_lock, flags); 307 spin_lock_irqsave(&ha->hardware_lock, flags);
308 ha->isp_ops.read_nvram(ha, (uint8_t *)buf, ha->vpd_base, ha->vpd_size); 308 ha->isp_ops->read_nvram(ha, (uint8_t *)buf, ha->vpd_base,
309 ha->vpd_size);
309 spin_unlock_irqrestore(&ha->hardware_lock, flags); 310 spin_unlock_irqrestore(&ha->hardware_lock, flags);
310 311
311 return ha->vpd_size; 312 return ha->vpd_size;
@@ -325,7 +326,7 @@ qla2x00_sysfs_write_vpd(struct kobject *kobj,
325 326
326 /* Write NVRAM. */ 327 /* Write NVRAM. */
327 spin_lock_irqsave(&ha->hardware_lock, flags); 328 spin_lock_irqsave(&ha->hardware_lock, flags);
328 ha->isp_ops.write_nvram(ha, (uint8_t *)buf, ha->vpd_base, count); 329 ha->isp_ops->write_nvram(ha, (uint8_t *)buf, ha->vpd_base, count);
329 spin_unlock_irqrestore(&ha->hardware_lock, flags); 330 spin_unlock_irqrestore(&ha->hardware_lock, flags);
330 331
331 return count; 332 return count;
@@ -410,7 +411,7 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *ha)
410 int ret; 411 int ret;
411 412
412 for (iter = bin_file_entries; iter->name; iter++) { 413 for (iter = bin_file_entries; iter->name; iter++) {
413 if (iter->is4GBp_only && (!IS_QLA24XX(ha) && !IS_QLA54XX(ha))) 414 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
414 continue; 415 continue;
415 416
416 ret = sysfs_create_bin_file(&host->shost_gendev.kobj, 417 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
@@ -429,7 +430,7 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *ha)
429 struct sysfs_entry *iter; 430 struct sysfs_entry *iter;
430 431
431 for (iter = bin_file_entries; iter->name; iter++) { 432 for (iter = bin_file_entries; iter->name; iter++) {
432 if (iter->is4GBp_only && (!IS_QLA24XX(ha) && !IS_QLA54XX(ha))) 433 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
433 continue; 434 continue;
434 435
435 sysfs_remove_bin_file(&host->shost_gendev.kobj, 436 sysfs_remove_bin_file(&host->shost_gendev.kobj,
@@ -437,7 +438,7 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *ha)
437 } 438 }
438 439
439 if (ha->beacon_blink_led == 1) 440 if (ha->beacon_blink_led == 1)
440 ha->isp_ops.beacon_off(ha); 441 ha->isp_ops->beacon_off(ha);
441} 442}
442 443
443/* Scsi_Host attributes. */ 444/* Scsi_Host attributes. */
@@ -455,7 +456,7 @@ qla2x00_fw_version_show(struct class_device *cdev, char *buf)
455 char fw_str[30]; 456 char fw_str[30];
456 457
457 return snprintf(buf, PAGE_SIZE, "%s\n", 458 return snprintf(buf, PAGE_SIZE, "%s\n",
458 ha->isp_ops.fw_version_str(ha, fw_str)); 459 ha->isp_ops->fw_version_str(ha, fw_str));
459} 460}
460 461
461static ssize_t 462static ssize_t
@@ -507,7 +508,7 @@ qla2x00_pci_info_show(struct class_device *cdev, char *buf)
507 char pci_info[30]; 508 char pci_info[30];
508 509
509 return snprintf(buf, PAGE_SIZE, "%s\n", 510 return snprintf(buf, PAGE_SIZE, "%s\n",
510 ha->isp_ops.pci_info_str(ha, pci_info)); 511 ha->isp_ops->pci_info_str(ha, pci_info));
511} 512}
512 513
513static ssize_t 514static ssize_t
@@ -652,9 +653,9 @@ qla2x00_beacon_store(struct class_device *cdev, const char *buf,
652 return -EINVAL; 653 return -EINVAL;
653 654
654 if (val) 655 if (val)
655 rval = ha->isp_ops.beacon_on(ha); 656 rval = ha->isp_ops->beacon_on(ha);
656 else 657 else
657 rval = ha->isp_ops.beacon_off(ha); 658 rval = ha->isp_ops->beacon_off(ha);
658 659
659 if (rval != QLA_SUCCESS) 660 if (rval != QLA_SUCCESS)
660 count = 0; 661 count = 0;
@@ -898,7 +899,7 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
898 pfc_host_stat = &ha->fc_host_stat; 899 pfc_host_stat = &ha->fc_host_stat;
899 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics)); 900 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
900 901
901 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 902 if (IS_FWI2_CAPABLE(ha)) {
902 rval = qla24xx_get_isp_stats(ha, (uint32_t *)&stat_buf, 903 rval = qla24xx_get_isp_stats(ha, (uint32_t *)&stat_buf,
903 sizeof(stat_buf) / 4, mb_stat); 904 sizeof(stat_buf) / 4, mb_stat);
904 } else if (atomic_read(&ha->loop_state) == LOOP_READY && 905 } else if (atomic_read(&ha->loop_state) == LOOP_READY &&
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 996c47a63074..563d18f4ff50 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -37,6 +37,121 @@ qla2xxx_copy_queues(scsi_qla_host_t *ha, void *ptr)
37 return ptr + (ha->response_q_length * sizeof(response_t)); 37 return ptr + (ha->response_q_length * sizeof(response_t));
38} 38}
39 39
40static int
41qla2xxx_dump_memory(scsi_qla_host_t *ha, uint32_t *code_ram,
42 uint32_t cram_size, uint32_t *ext_mem, void **nxt)
43{
44 int rval;
45 uint32_t cnt, stat, timer, risc_address, ext_mem_cnt;
46 uint16_t mb[4];
47 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
48
49 rval = QLA_SUCCESS;
50 risc_address = ext_mem_cnt = 0;
51 memset(mb, 0, sizeof(mb));
52
53 /* Code RAM. */
54 risc_address = 0x20000;
55 WRT_REG_WORD(&reg->mailbox0, MBC_READ_RAM_EXTENDED);
56 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
57
58 for (cnt = 0; cnt < cram_size / 4 && rval == QLA_SUCCESS;
59 cnt++, risc_address++) {
60 WRT_REG_WORD(&reg->mailbox1, LSW(risc_address));
61 WRT_REG_WORD(&reg->mailbox8, MSW(risc_address));
62 RD_REG_WORD(&reg->mailbox8);
63 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
64
65 for (timer = 6000000; timer; timer--) {
66 /* Check for pending interrupts. */
67 stat = RD_REG_DWORD(&reg->host_status);
68 if (stat & HSRX_RISC_INT) {
69 stat &= 0xff;
70
71 if (stat == 0x1 || stat == 0x2 ||
72 stat == 0x10 || stat == 0x11) {
73 set_bit(MBX_INTERRUPT,
74 &ha->mbx_cmd_flags);
75
76 mb[0] = RD_REG_WORD(&reg->mailbox0);
77 mb[2] = RD_REG_WORD(&reg->mailbox2);
78 mb[3] = RD_REG_WORD(&reg->mailbox3);
79
80 WRT_REG_DWORD(&reg->hccr,
81 HCCRX_CLR_RISC_INT);
82 RD_REG_DWORD(&reg->hccr);
83 break;
84 }
85
86 /* Clear this intr; it wasn't a mailbox intr */
87 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
88 RD_REG_DWORD(&reg->hccr);
89 }
90 udelay(5);
91 }
92
93 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
94 rval = mb[0] & MBS_MASK;
95 code_ram[cnt] = htonl((mb[3] << 16) | mb[2]);
96 } else {
97 rval = QLA_FUNCTION_FAILED;
98 }
99 }
100
101 if (rval == QLA_SUCCESS) {
102 /* External Memory. */
103 risc_address = 0x100000;
104 ext_mem_cnt = ha->fw_memory_size - 0x100000 + 1;
105 WRT_REG_WORD(&reg->mailbox0, MBC_READ_RAM_EXTENDED);
106 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
107 }
108 for (cnt = 0; cnt < ext_mem_cnt && rval == QLA_SUCCESS;
109 cnt++, risc_address++) {
110 WRT_REG_WORD(&reg->mailbox1, LSW(risc_address));
111 WRT_REG_WORD(&reg->mailbox8, MSW(risc_address));
112 RD_REG_WORD(&reg->mailbox8);
113 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
114
115 for (timer = 6000000; timer; timer--) {
116 /* Check for pending interrupts. */
117 stat = RD_REG_DWORD(&reg->host_status);
118 if (stat & HSRX_RISC_INT) {
119 stat &= 0xff;
120
121 if (stat == 0x1 || stat == 0x2 ||
122 stat == 0x10 || stat == 0x11) {
123 set_bit(MBX_INTERRUPT,
124 &ha->mbx_cmd_flags);
125
126 mb[0] = RD_REG_WORD(&reg->mailbox0);
127 mb[2] = RD_REG_WORD(&reg->mailbox2);
128 mb[3] = RD_REG_WORD(&reg->mailbox3);
129
130 WRT_REG_DWORD(&reg->hccr,
131 HCCRX_CLR_RISC_INT);
132 RD_REG_DWORD(&reg->hccr);
133 break;
134 }
135
136 /* Clear this intr; it wasn't a mailbox intr */
137 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
138 RD_REG_DWORD(&reg->hccr);
139 }
140 udelay(5);
141 }
142
143 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
144 rval = mb[0] & MBS_MASK;
145 ext_mem[cnt] = htonl((mb[3] << 16) | mb[2]);
146 } else {
147 rval = QLA_FUNCTION_FAILED;
148 }
149 }
150
151 *nxt = rval == QLA_SUCCESS ? &ext_mem[cnt]: NULL;
152 return rval;
153}
154
40/** 155/**
41 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware. 156 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
42 * @ha: HA context 157 * @ha: HA context
@@ -633,11 +748,10 @@ void
633qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked) 748qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
634{ 749{
635 int rval; 750 int rval;
636 uint32_t cnt, timer; 751 uint32_t cnt;
637 uint32_t risc_address; 752 uint32_t risc_address;
638 uint16_t mb[4], wd; 753 uint16_t mb0, wd;
639 754
640 uint32_t stat;
641 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 755 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
642 uint32_t __iomem *dmp_reg; 756 uint32_t __iomem *dmp_reg;
643 uint32_t *iter_reg; 757 uint32_t *iter_reg;
@@ -645,10 +759,9 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
645 unsigned long flags; 759 unsigned long flags;
646 struct qla24xx_fw_dump *fw; 760 struct qla24xx_fw_dump *fw;
647 uint32_t ext_mem_cnt; 761 uint32_t ext_mem_cnt;
648 void *eft; 762 void *nxt;
649 763
650 risc_address = ext_mem_cnt = 0; 764 risc_address = ext_mem_cnt = 0;
651 memset(mb, 0, sizeof(mb));
652 flags = 0; 765 flags = 0;
653 766
654 if (!hardware_locked) 767 if (!hardware_locked)
@@ -701,250 +814,236 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
701 /* Shadow registers. */ 814 /* Shadow registers. */
702 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70); 815 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
703 RD_REG_DWORD(&reg->iobase_addr); 816 RD_REG_DWORD(&reg->iobase_addr);
704 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0); 817 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
705 WRT_REG_DWORD(dmp_reg, 0xB0000000); 818 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
706 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC); 819
707 fw->shadow_reg[0] = htonl(RD_REG_DWORD(dmp_reg)); 820 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
708 821 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
709 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0); 822
710 WRT_REG_DWORD(dmp_reg, 0xB0100000); 823 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
711 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC); 824 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
712 fw->shadow_reg[1] = htonl(RD_REG_DWORD(dmp_reg)); 825
713 826 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
714 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0); 827 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
715 WRT_REG_DWORD(dmp_reg, 0xB0200000); 828
716 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC); 829 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
717 fw->shadow_reg[2] = htonl(RD_REG_DWORD(dmp_reg)); 830 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
718 831
719 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0); 832 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
720 WRT_REG_DWORD(dmp_reg, 0xB0300000); 833 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
721 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC); 834
722 fw->shadow_reg[3] = htonl(RD_REG_DWORD(dmp_reg)); 835 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
723 836 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
724 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0);
725 WRT_REG_DWORD(dmp_reg, 0xB0400000);
726 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC);
727 fw->shadow_reg[4] = htonl(RD_REG_DWORD(dmp_reg));
728
729 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0);
730 WRT_REG_DWORD(dmp_reg, 0xB0500000);
731 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC);
732 fw->shadow_reg[5] = htonl(RD_REG_DWORD(dmp_reg));
733
734 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0);
735 WRT_REG_DWORD(dmp_reg, 0xB0600000);
736 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC);
737 fw->shadow_reg[6] = htonl(RD_REG_DWORD(dmp_reg));
738 837
739 /* Mailbox registers. */ 838 /* Mailbox registers. */
740 mbx_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 839 mbx_reg = &reg->mailbox0;
741 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) 840 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
742 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++)); 841 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
743 842
744 /* Transfer sequence registers. */ 843 /* Transfer sequence registers. */
745 iter_reg = fw->xseq_gp_reg; 844 iter_reg = fw->xseq_gp_reg;
746 WRT_REG_DWORD(&reg->iobase_addr, 0xBF00); 845 WRT_REG_DWORD(&reg->iobase_addr, 0xBF00);
747 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 846 dmp_reg = &reg->iobase_window;
748 for (cnt = 0; cnt < 16; cnt++) 847 for (cnt = 0; cnt < 16; cnt++)
749 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 848 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
750 849
751 WRT_REG_DWORD(&reg->iobase_addr, 0xBF10); 850 WRT_REG_DWORD(&reg->iobase_addr, 0xBF10);
752 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 851 dmp_reg = &reg->iobase_window;
753 for (cnt = 0; cnt < 16; cnt++) 852 for (cnt = 0; cnt < 16; cnt++)
754 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 853 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
755 854
756 WRT_REG_DWORD(&reg->iobase_addr, 0xBF20); 855 WRT_REG_DWORD(&reg->iobase_addr, 0xBF20);
757 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 856 dmp_reg = &reg->iobase_window;
758 for (cnt = 0; cnt < 16; cnt++) 857 for (cnt = 0; cnt < 16; cnt++)
759 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 858 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
760 859
761 WRT_REG_DWORD(&reg->iobase_addr, 0xBF30); 860 WRT_REG_DWORD(&reg->iobase_addr, 0xBF30);
762 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 861 dmp_reg = &reg->iobase_window;
763 for (cnt = 0; cnt < 16; cnt++) 862 for (cnt = 0; cnt < 16; cnt++)
764 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 863 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
765 864
766 WRT_REG_DWORD(&reg->iobase_addr, 0xBF40); 865 WRT_REG_DWORD(&reg->iobase_addr, 0xBF40);
767 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 866 dmp_reg = &reg->iobase_window;
768 for (cnt = 0; cnt < 16; cnt++) 867 for (cnt = 0; cnt < 16; cnt++)
769 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 868 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
770 869
771 WRT_REG_DWORD(&reg->iobase_addr, 0xBF50); 870 WRT_REG_DWORD(&reg->iobase_addr, 0xBF50);
772 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 871 dmp_reg = &reg->iobase_window;
773 for (cnt = 0; cnt < 16; cnt++) 872 for (cnt = 0; cnt < 16; cnt++)
774 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 873 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
775 874
776 WRT_REG_DWORD(&reg->iobase_addr, 0xBF60); 875 WRT_REG_DWORD(&reg->iobase_addr, 0xBF60);
777 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 876 dmp_reg = &reg->iobase_window;
778 for (cnt = 0; cnt < 16; cnt++) 877 for (cnt = 0; cnt < 16; cnt++)
779 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 878 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
780 879
781 WRT_REG_DWORD(&reg->iobase_addr, 0xBF70); 880 WRT_REG_DWORD(&reg->iobase_addr, 0xBF70);
782 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 881 dmp_reg = &reg->iobase_window;
783 for (cnt = 0; cnt < 16; cnt++) 882 for (cnt = 0; cnt < 16; cnt++)
784 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 883 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
785 884
786 WRT_REG_DWORD(&reg->iobase_addr, 0xBFE0); 885 WRT_REG_DWORD(&reg->iobase_addr, 0xBFE0);
787 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 886 dmp_reg = &reg->iobase_window;
788 for (cnt = 0; cnt < sizeof(fw->xseq_0_reg) / 4; cnt++) 887 for (cnt = 0; cnt < sizeof(fw->xseq_0_reg) / 4; cnt++)
789 fw->xseq_0_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++)); 888 fw->xseq_0_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
790 889
791 WRT_REG_DWORD(&reg->iobase_addr, 0xBFF0); 890 WRT_REG_DWORD(&reg->iobase_addr, 0xBFF0);
792 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 891 dmp_reg = &reg->iobase_window;
793 for (cnt = 0; cnt < sizeof(fw->xseq_1_reg) / 4; cnt++) 892 for (cnt = 0; cnt < sizeof(fw->xseq_1_reg) / 4; cnt++)
794 fw->xseq_1_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++)); 893 fw->xseq_1_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
795 894
796 /* Receive sequence registers. */ 895 /* Receive sequence registers. */
797 iter_reg = fw->rseq_gp_reg; 896 iter_reg = fw->rseq_gp_reg;
798 WRT_REG_DWORD(&reg->iobase_addr, 0xFF00); 897 WRT_REG_DWORD(&reg->iobase_addr, 0xFF00);
799 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 898 dmp_reg = &reg->iobase_window;
800 for (cnt = 0; cnt < 16; cnt++) 899 for (cnt = 0; cnt < 16; cnt++)
801 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 900 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
802 901
803 WRT_REG_DWORD(&reg->iobase_addr, 0xFF10); 902 WRT_REG_DWORD(&reg->iobase_addr, 0xFF10);
804 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 903 dmp_reg = &reg->iobase_window;
805 for (cnt = 0; cnt < 16; cnt++) 904 for (cnt = 0; cnt < 16; cnt++)
806 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 905 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
807 906
808 WRT_REG_DWORD(&reg->iobase_addr, 0xFF20); 907 WRT_REG_DWORD(&reg->iobase_addr, 0xFF20);
809 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 908 dmp_reg = &reg->iobase_window;
810 for (cnt = 0; cnt < 16; cnt++) 909 for (cnt = 0; cnt < 16; cnt++)
811 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 910 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
812 911
813 WRT_REG_DWORD(&reg->iobase_addr, 0xFF30); 912 WRT_REG_DWORD(&reg->iobase_addr, 0xFF30);
814 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 913 dmp_reg = &reg->iobase_window;
815 for (cnt = 0; cnt < 16; cnt++) 914 for (cnt = 0; cnt < 16; cnt++)
816 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 915 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
817 916
818 WRT_REG_DWORD(&reg->iobase_addr, 0xFF40); 917 WRT_REG_DWORD(&reg->iobase_addr, 0xFF40);
819 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 918 dmp_reg = &reg->iobase_window;
820 for (cnt = 0; cnt < 16; cnt++) 919 for (cnt = 0; cnt < 16; cnt++)
821 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 920 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
822 921
823 WRT_REG_DWORD(&reg->iobase_addr, 0xFF50); 922 WRT_REG_DWORD(&reg->iobase_addr, 0xFF50);
824 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 923 dmp_reg = &reg->iobase_window;
825 for (cnt = 0; cnt < 16; cnt++) 924 for (cnt = 0; cnt < 16; cnt++)
826 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 925 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
827 926
828 WRT_REG_DWORD(&reg->iobase_addr, 0xFF60); 927 WRT_REG_DWORD(&reg->iobase_addr, 0xFF60);
829 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 928 dmp_reg = &reg->iobase_window;
830 for (cnt = 0; cnt < 16; cnt++) 929 for (cnt = 0; cnt < 16; cnt++)
831 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 930 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
832 931
833 WRT_REG_DWORD(&reg->iobase_addr, 0xFF70); 932 WRT_REG_DWORD(&reg->iobase_addr, 0xFF70);
834 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 933 dmp_reg = &reg->iobase_window;
835 for (cnt = 0; cnt < 16; cnt++) 934 for (cnt = 0; cnt < 16; cnt++)
836 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 935 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
837 936
838 WRT_REG_DWORD(&reg->iobase_addr, 0xFFD0); 937 WRT_REG_DWORD(&reg->iobase_addr, 0xFFD0);
839 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 938 dmp_reg = &reg->iobase_window;
840 for (cnt = 0; cnt < sizeof(fw->rseq_0_reg) / 4; cnt++) 939 for (cnt = 0; cnt < sizeof(fw->rseq_0_reg) / 4; cnt++)
841 fw->rseq_0_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++)); 940 fw->rseq_0_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
842 941
843 WRT_REG_DWORD(&reg->iobase_addr, 0xFFE0); 942 WRT_REG_DWORD(&reg->iobase_addr, 0xFFE0);
844 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 943 dmp_reg = &reg->iobase_window;
845 for (cnt = 0; cnt < sizeof(fw->rseq_1_reg) / 4; cnt++) 944 for (cnt = 0; cnt < sizeof(fw->rseq_1_reg) / 4; cnt++)
846 fw->rseq_1_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++)); 945 fw->rseq_1_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
847 946
848 WRT_REG_DWORD(&reg->iobase_addr, 0xFFF0); 947 WRT_REG_DWORD(&reg->iobase_addr, 0xFFF0);
849 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 948 dmp_reg = &reg->iobase_window;
850 for (cnt = 0; cnt < sizeof(fw->rseq_2_reg) / 4; cnt++) 949 for (cnt = 0; cnt < sizeof(fw->rseq_2_reg) / 4; cnt++)
851 fw->rseq_2_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++)); 950 fw->rseq_2_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
852 951
853 /* Command DMA registers. */ 952 /* Command DMA registers. */
854 WRT_REG_DWORD(&reg->iobase_addr, 0x7100); 953 WRT_REG_DWORD(&reg->iobase_addr, 0x7100);
855 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 954 dmp_reg = &reg->iobase_window;
856 for (cnt = 0; cnt < sizeof(fw->cmd_dma_reg) / 4; cnt++) 955 for (cnt = 0; cnt < sizeof(fw->cmd_dma_reg) / 4; cnt++)
857 fw->cmd_dma_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++)); 956 fw->cmd_dma_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
858 957
859 /* Queues. */ 958 /* Queues. */
860 iter_reg = fw->req0_dma_reg; 959 iter_reg = fw->req0_dma_reg;
861 WRT_REG_DWORD(&reg->iobase_addr, 0x7200); 960 WRT_REG_DWORD(&reg->iobase_addr, 0x7200);
862 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 961 dmp_reg = &reg->iobase_window;
863 for (cnt = 0; cnt < 8; cnt++) 962 for (cnt = 0; cnt < 8; cnt++)
864 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 963 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
865 964
866 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xE4); 965 dmp_reg = &reg->iobase_q;
867 for (cnt = 0; cnt < 7; cnt++) 966 for (cnt = 0; cnt < 7; cnt++)
868 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 967 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
869 968
870 iter_reg = fw->resp0_dma_reg; 969 iter_reg = fw->resp0_dma_reg;
871 WRT_REG_DWORD(&reg->iobase_addr, 0x7300); 970 WRT_REG_DWORD(&reg->iobase_addr, 0x7300);
872 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 971 dmp_reg = &reg->iobase_window;
873 for (cnt = 0; cnt < 8; cnt++) 972 for (cnt = 0; cnt < 8; cnt++)
874 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 973 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
875 974
876 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xE4); 975 dmp_reg = &reg->iobase_q;
877 for (cnt = 0; cnt < 7; cnt++) 976 for (cnt = 0; cnt < 7; cnt++)
878 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 977 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
879 978
880 iter_reg = fw->req1_dma_reg; 979 iter_reg = fw->req1_dma_reg;
881 WRT_REG_DWORD(&reg->iobase_addr, 0x7400); 980 WRT_REG_DWORD(&reg->iobase_addr, 0x7400);
882 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 981 dmp_reg = &reg->iobase_window;
883 for (cnt = 0; cnt < 8; cnt++) 982 for (cnt = 0; cnt < 8; cnt++)
884 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 983 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
885 984
886 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xE4); 985 dmp_reg = &reg->iobase_q;
887 for (cnt = 0; cnt < 7; cnt++) 986 for (cnt = 0; cnt < 7; cnt++)
888 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 987 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
889 988
890 /* Transmit DMA registers. */ 989 /* Transmit DMA registers. */
891 iter_reg = fw->xmt0_dma_reg; 990 iter_reg = fw->xmt0_dma_reg;
892 WRT_REG_DWORD(&reg->iobase_addr, 0x7600); 991 WRT_REG_DWORD(&reg->iobase_addr, 0x7600);
893 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 992 dmp_reg = &reg->iobase_window;
894 for (cnt = 0; cnt < 16; cnt++) 993 for (cnt = 0; cnt < 16; cnt++)
895 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 994 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
896 995
897 WRT_REG_DWORD(&reg->iobase_addr, 0x7610); 996 WRT_REG_DWORD(&reg->iobase_addr, 0x7610);
898 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 997 dmp_reg = &reg->iobase_window;
899 for (cnt = 0; cnt < 16; cnt++) 998 for (cnt = 0; cnt < 16; cnt++)
900 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 999 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
901 1000
902 iter_reg = fw->xmt1_dma_reg; 1001 iter_reg = fw->xmt1_dma_reg;
903 WRT_REG_DWORD(&reg->iobase_addr, 0x7620); 1002 WRT_REG_DWORD(&reg->iobase_addr, 0x7620);
904 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1003 dmp_reg = &reg->iobase_window;
905 for (cnt = 0; cnt < 16; cnt++) 1004 for (cnt = 0; cnt < 16; cnt++)
906 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1005 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
907 1006
908 WRT_REG_DWORD(&reg->iobase_addr, 0x7630); 1007 WRT_REG_DWORD(&reg->iobase_addr, 0x7630);
909 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1008 dmp_reg = &reg->iobase_window;
910 for (cnt = 0; cnt < 16; cnt++) 1009 for (cnt = 0; cnt < 16; cnt++)
911 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1010 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
912 1011
913 iter_reg = fw->xmt2_dma_reg; 1012 iter_reg = fw->xmt2_dma_reg;
914 WRT_REG_DWORD(&reg->iobase_addr, 0x7640); 1013 WRT_REG_DWORD(&reg->iobase_addr, 0x7640);
915 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1014 dmp_reg = &reg->iobase_window;
916 for (cnt = 0; cnt < 16; cnt++) 1015 for (cnt = 0; cnt < 16; cnt++)
917 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1016 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
918 1017
919 WRT_REG_DWORD(&reg->iobase_addr, 0x7650); 1018 WRT_REG_DWORD(&reg->iobase_addr, 0x7650);
920 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1019 dmp_reg = &reg->iobase_window;
921 for (cnt = 0; cnt < 16; cnt++) 1020 for (cnt = 0; cnt < 16; cnt++)
922 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1021 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
923 1022
924 iter_reg = fw->xmt3_dma_reg; 1023 iter_reg = fw->xmt3_dma_reg;
925 WRT_REG_DWORD(&reg->iobase_addr, 0x7660); 1024 WRT_REG_DWORD(&reg->iobase_addr, 0x7660);
926 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1025 dmp_reg = &reg->iobase_window;
927 for (cnt = 0; cnt < 16; cnt++) 1026 for (cnt = 0; cnt < 16; cnt++)
928 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1027 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
929 1028
930 WRT_REG_DWORD(&reg->iobase_addr, 0x7670); 1029 WRT_REG_DWORD(&reg->iobase_addr, 0x7670);
931 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1030 dmp_reg = &reg->iobase_window;
932 for (cnt = 0; cnt < 16; cnt++) 1031 for (cnt = 0; cnt < 16; cnt++)
933 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1032 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
934 1033
935 iter_reg = fw->xmt4_dma_reg; 1034 iter_reg = fw->xmt4_dma_reg;
936 WRT_REG_DWORD(&reg->iobase_addr, 0x7680); 1035 WRT_REG_DWORD(&reg->iobase_addr, 0x7680);
937 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1036 dmp_reg = &reg->iobase_window;
938 for (cnt = 0; cnt < 16; cnt++) 1037 for (cnt = 0; cnt < 16; cnt++)
939 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1038 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
940 1039
941 WRT_REG_DWORD(&reg->iobase_addr, 0x7690); 1040 WRT_REG_DWORD(&reg->iobase_addr, 0x7690);
942 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1041 dmp_reg = &reg->iobase_window;
943 for (cnt = 0; cnt < 16; cnt++) 1042 for (cnt = 0; cnt < 16; cnt++)
944 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1043 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
945 1044
946 WRT_REG_DWORD(&reg->iobase_addr, 0x76A0); 1045 WRT_REG_DWORD(&reg->iobase_addr, 0x76A0);
947 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1046 dmp_reg = &reg->iobase_window;
948 for (cnt = 0; cnt < sizeof(fw->xmt_data_dma_reg) / 4; cnt++) 1047 for (cnt = 0; cnt < sizeof(fw->xmt_data_dma_reg) / 4; cnt++)
949 fw->xmt_data_dma_reg[cnt] = 1048 fw->xmt_data_dma_reg[cnt] =
950 htonl(RD_REG_DWORD(dmp_reg++)); 1049 htonl(RD_REG_DWORD(dmp_reg++));
@@ -952,221 +1051,221 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
952 /* Receive DMA registers. */ 1051 /* Receive DMA registers. */
953 iter_reg = fw->rcvt0_data_dma_reg; 1052 iter_reg = fw->rcvt0_data_dma_reg;
954 WRT_REG_DWORD(&reg->iobase_addr, 0x7700); 1053 WRT_REG_DWORD(&reg->iobase_addr, 0x7700);
955 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1054 dmp_reg = &reg->iobase_window;
956 for (cnt = 0; cnt < 16; cnt++) 1055 for (cnt = 0; cnt < 16; cnt++)
957 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1056 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
958 1057
959 WRT_REG_DWORD(&reg->iobase_addr, 0x7710); 1058 WRT_REG_DWORD(&reg->iobase_addr, 0x7710);
960 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1059 dmp_reg = &reg->iobase_window;
961 for (cnt = 0; cnt < 16; cnt++) 1060 for (cnt = 0; cnt < 16; cnt++)
962 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1061 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
963 1062
964 iter_reg = fw->rcvt1_data_dma_reg; 1063 iter_reg = fw->rcvt1_data_dma_reg;
965 WRT_REG_DWORD(&reg->iobase_addr, 0x7720); 1064 WRT_REG_DWORD(&reg->iobase_addr, 0x7720);
966 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1065 dmp_reg = &reg->iobase_window;
967 for (cnt = 0; cnt < 16; cnt++) 1066 for (cnt = 0; cnt < 16; cnt++)
968 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1067 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
969 1068
970 WRT_REG_DWORD(&reg->iobase_addr, 0x7730); 1069 WRT_REG_DWORD(&reg->iobase_addr, 0x7730);
971 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1070 dmp_reg = &reg->iobase_window;
972 for (cnt = 0; cnt < 16; cnt++) 1071 for (cnt = 0; cnt < 16; cnt++)
973 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1072 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
974 1073
975 /* RISC registers. */ 1074 /* RISC registers. */
976 iter_reg = fw->risc_gp_reg; 1075 iter_reg = fw->risc_gp_reg;
977 WRT_REG_DWORD(&reg->iobase_addr, 0x0F00); 1076 WRT_REG_DWORD(&reg->iobase_addr, 0x0F00);
978 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1077 dmp_reg = &reg->iobase_window;
979 for (cnt = 0; cnt < 16; cnt++) 1078 for (cnt = 0; cnt < 16; cnt++)
980 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1079 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
981 1080
982 WRT_REG_DWORD(&reg->iobase_addr, 0x0F10); 1081 WRT_REG_DWORD(&reg->iobase_addr, 0x0F10);
983 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1082 dmp_reg = &reg->iobase_window;
984 for (cnt = 0; cnt < 16; cnt++) 1083 for (cnt = 0; cnt < 16; cnt++)
985 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1084 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
986 1085
987 WRT_REG_DWORD(&reg->iobase_addr, 0x0F20); 1086 WRT_REG_DWORD(&reg->iobase_addr, 0x0F20);
988 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1087 dmp_reg = &reg->iobase_window;
989 for (cnt = 0; cnt < 16; cnt++) 1088 for (cnt = 0; cnt < 16; cnt++)
990 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1089 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
991 1090
992 WRT_REG_DWORD(&reg->iobase_addr, 0x0F30); 1091 WRT_REG_DWORD(&reg->iobase_addr, 0x0F30);
993 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1092 dmp_reg = &reg->iobase_window;
994 for (cnt = 0; cnt < 16; cnt++) 1093 for (cnt = 0; cnt < 16; cnt++)
995 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1094 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
996 1095
997 WRT_REG_DWORD(&reg->iobase_addr, 0x0F40); 1096 WRT_REG_DWORD(&reg->iobase_addr, 0x0F40);
998 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1097 dmp_reg = &reg->iobase_window;
999 for (cnt = 0; cnt < 16; cnt++) 1098 for (cnt = 0; cnt < 16; cnt++)
1000 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1099 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1001 1100
1002 WRT_REG_DWORD(&reg->iobase_addr, 0x0F50); 1101 WRT_REG_DWORD(&reg->iobase_addr, 0x0F50);
1003 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1102 dmp_reg = &reg->iobase_window;
1004 for (cnt = 0; cnt < 16; cnt++) 1103 for (cnt = 0; cnt < 16; cnt++)
1005 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1104 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1006 1105
1007 WRT_REG_DWORD(&reg->iobase_addr, 0x0F60); 1106 WRT_REG_DWORD(&reg->iobase_addr, 0x0F60);
1008 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1107 dmp_reg = &reg->iobase_window;
1009 for (cnt = 0; cnt < 16; cnt++) 1108 for (cnt = 0; cnt < 16; cnt++)
1010 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1109 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1011 1110
1012 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70); 1111 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1013 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1112 dmp_reg = &reg->iobase_window;
1014 for (cnt = 0; cnt < 16; cnt++) 1113 for (cnt = 0; cnt < 16; cnt++)
1015 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1114 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1016 1115
1017 /* Local memory controller registers. */ 1116 /* Local memory controller registers. */
1018 iter_reg = fw->lmc_reg; 1117 iter_reg = fw->lmc_reg;
1019 WRT_REG_DWORD(&reg->iobase_addr, 0x3000); 1118 WRT_REG_DWORD(&reg->iobase_addr, 0x3000);
1020 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1119 dmp_reg = &reg->iobase_window;
1021 for (cnt = 0; cnt < 16; cnt++) 1120 for (cnt = 0; cnt < 16; cnt++)
1022 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1121 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1023 1122
1024 WRT_REG_DWORD(&reg->iobase_addr, 0x3010); 1123 WRT_REG_DWORD(&reg->iobase_addr, 0x3010);
1025 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1124 dmp_reg = &reg->iobase_window;
1026 for (cnt = 0; cnt < 16; cnt++) 1125 for (cnt = 0; cnt < 16; cnt++)
1027 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1126 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1028 1127
1029 WRT_REG_DWORD(&reg->iobase_addr, 0x3020); 1128 WRT_REG_DWORD(&reg->iobase_addr, 0x3020);
1030 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1129 dmp_reg = &reg->iobase_window;
1031 for (cnt = 0; cnt < 16; cnt++) 1130 for (cnt = 0; cnt < 16; cnt++)
1032 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1131 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1033 1132
1034 WRT_REG_DWORD(&reg->iobase_addr, 0x3030); 1133 WRT_REG_DWORD(&reg->iobase_addr, 0x3030);
1035 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1134 dmp_reg = &reg->iobase_window;
1036 for (cnt = 0; cnt < 16; cnt++) 1135 for (cnt = 0; cnt < 16; cnt++)
1037 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1136 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1038 1137
1039 WRT_REG_DWORD(&reg->iobase_addr, 0x3040); 1138 WRT_REG_DWORD(&reg->iobase_addr, 0x3040);
1040 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1139 dmp_reg = &reg->iobase_window;
1041 for (cnt = 0; cnt < 16; cnt++) 1140 for (cnt = 0; cnt < 16; cnt++)
1042 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1141 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1043 1142
1044 WRT_REG_DWORD(&reg->iobase_addr, 0x3050); 1143 WRT_REG_DWORD(&reg->iobase_addr, 0x3050);
1045 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1144 dmp_reg = &reg->iobase_window;
1046 for (cnt = 0; cnt < 16; cnt++) 1145 for (cnt = 0; cnt < 16; cnt++)
1047 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1146 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1048 1147
1049 WRT_REG_DWORD(&reg->iobase_addr, 0x3060); 1148 WRT_REG_DWORD(&reg->iobase_addr, 0x3060);
1050 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1149 dmp_reg = &reg->iobase_window;
1051 for (cnt = 0; cnt < 16; cnt++) 1150 for (cnt = 0; cnt < 16; cnt++)
1052 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1151 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1053 1152
1054 /* Fibre Protocol Module registers. */ 1153 /* Fibre Protocol Module registers. */
1055 iter_reg = fw->fpm_hdw_reg; 1154 iter_reg = fw->fpm_hdw_reg;
1056 WRT_REG_DWORD(&reg->iobase_addr, 0x4000); 1155 WRT_REG_DWORD(&reg->iobase_addr, 0x4000);
1057 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1156 dmp_reg = &reg->iobase_window;
1058 for (cnt = 0; cnt < 16; cnt++) 1157 for (cnt = 0; cnt < 16; cnt++)
1059 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1158 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1060 1159
1061 WRT_REG_DWORD(&reg->iobase_addr, 0x4010); 1160 WRT_REG_DWORD(&reg->iobase_addr, 0x4010);
1062 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1161 dmp_reg = &reg->iobase_window;
1063 for (cnt = 0; cnt < 16; cnt++) 1162 for (cnt = 0; cnt < 16; cnt++)
1064 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1163 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1065 1164
1066 WRT_REG_DWORD(&reg->iobase_addr, 0x4020); 1165 WRT_REG_DWORD(&reg->iobase_addr, 0x4020);
1067 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1166 dmp_reg = &reg->iobase_window;
1068 for (cnt = 0; cnt < 16; cnt++) 1167 for (cnt = 0; cnt < 16; cnt++)
1069 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1168 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1070 1169
1071 WRT_REG_DWORD(&reg->iobase_addr, 0x4030); 1170 WRT_REG_DWORD(&reg->iobase_addr, 0x4030);
1072 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1171 dmp_reg = &reg->iobase_window;
1073 for (cnt = 0; cnt < 16; cnt++) 1172 for (cnt = 0; cnt < 16; cnt++)
1074 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1173 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1075 1174
1076 WRT_REG_DWORD(&reg->iobase_addr, 0x4040); 1175 WRT_REG_DWORD(&reg->iobase_addr, 0x4040);
1077 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1176 dmp_reg = &reg->iobase_window;
1078 for (cnt = 0; cnt < 16; cnt++) 1177 for (cnt = 0; cnt < 16; cnt++)
1079 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1178 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1080 1179
1081 WRT_REG_DWORD(&reg->iobase_addr, 0x4050); 1180 WRT_REG_DWORD(&reg->iobase_addr, 0x4050);
1082 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1181 dmp_reg = &reg->iobase_window;
1083 for (cnt = 0; cnt < 16; cnt++) 1182 for (cnt = 0; cnt < 16; cnt++)
1084 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1183 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1085 1184
1086 WRT_REG_DWORD(&reg->iobase_addr, 0x4060); 1185 WRT_REG_DWORD(&reg->iobase_addr, 0x4060);
1087 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1186 dmp_reg = &reg->iobase_window;
1088 for (cnt = 0; cnt < 16; cnt++) 1187 for (cnt = 0; cnt < 16; cnt++)
1089 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1188 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1090 1189
1091 WRT_REG_DWORD(&reg->iobase_addr, 0x4070); 1190 WRT_REG_DWORD(&reg->iobase_addr, 0x4070);
1092 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1191 dmp_reg = &reg->iobase_window;
1093 for (cnt = 0; cnt < 16; cnt++) 1192 for (cnt = 0; cnt < 16; cnt++)
1094 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1193 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1095 1194
1096 WRT_REG_DWORD(&reg->iobase_addr, 0x4080); 1195 WRT_REG_DWORD(&reg->iobase_addr, 0x4080);
1097 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1196 dmp_reg = &reg->iobase_window;
1098 for (cnt = 0; cnt < 16; cnt++) 1197 for (cnt = 0; cnt < 16; cnt++)
1099 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1198 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1100 1199
1101 WRT_REG_DWORD(&reg->iobase_addr, 0x4090); 1200 WRT_REG_DWORD(&reg->iobase_addr, 0x4090);
1102 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1201 dmp_reg = &reg->iobase_window;
1103 for (cnt = 0; cnt < 16; cnt++) 1202 for (cnt = 0; cnt < 16; cnt++)
1104 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1203 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1105 1204
1106 WRT_REG_DWORD(&reg->iobase_addr, 0x40A0); 1205 WRT_REG_DWORD(&reg->iobase_addr, 0x40A0);
1107 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1206 dmp_reg = &reg->iobase_window;
1108 for (cnt = 0; cnt < 16; cnt++) 1207 for (cnt = 0; cnt < 16; cnt++)
1109 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1208 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1110 1209
1111 WRT_REG_DWORD(&reg->iobase_addr, 0x40B0); 1210 WRT_REG_DWORD(&reg->iobase_addr, 0x40B0);
1112 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1211 dmp_reg = &reg->iobase_window;
1113 for (cnt = 0; cnt < 16; cnt++) 1212 for (cnt = 0; cnt < 16; cnt++)
1114 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1213 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1115 1214
1116 /* Frame Buffer registers. */ 1215 /* Frame Buffer registers. */
1117 iter_reg = fw->fb_hdw_reg; 1216 iter_reg = fw->fb_hdw_reg;
1118 WRT_REG_DWORD(&reg->iobase_addr, 0x6000); 1217 WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
1119 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1218 dmp_reg = &reg->iobase_window;
1120 for (cnt = 0; cnt < 16; cnt++) 1219 for (cnt = 0; cnt < 16; cnt++)
1121 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1220 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1122 1221
1123 WRT_REG_DWORD(&reg->iobase_addr, 0x6010); 1222 WRT_REG_DWORD(&reg->iobase_addr, 0x6010);
1124 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1223 dmp_reg = &reg->iobase_window;
1125 for (cnt = 0; cnt < 16; cnt++) 1224 for (cnt = 0; cnt < 16; cnt++)
1126 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1225 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1127 1226
1128 WRT_REG_DWORD(&reg->iobase_addr, 0x6020); 1227 WRT_REG_DWORD(&reg->iobase_addr, 0x6020);
1129 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1228 dmp_reg = &reg->iobase_window;
1130 for (cnt = 0; cnt < 16; cnt++) 1229 for (cnt = 0; cnt < 16; cnt++)
1131 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1230 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1132 1231
1133 WRT_REG_DWORD(&reg->iobase_addr, 0x6030); 1232 WRT_REG_DWORD(&reg->iobase_addr, 0x6030);
1134 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1233 dmp_reg = &reg->iobase_window;
1135 for (cnt = 0; cnt < 16; cnt++) 1234 for (cnt = 0; cnt < 16; cnt++)
1136 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1235 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1137 1236
1138 WRT_REG_DWORD(&reg->iobase_addr, 0x6040); 1237 WRT_REG_DWORD(&reg->iobase_addr, 0x6040);
1139 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1238 dmp_reg = &reg->iobase_window;
1140 for (cnt = 0; cnt < 16; cnt++) 1239 for (cnt = 0; cnt < 16; cnt++)
1141 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1240 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1142 1241
1143 WRT_REG_DWORD(&reg->iobase_addr, 0x6100); 1242 WRT_REG_DWORD(&reg->iobase_addr, 0x6100);
1144 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1243 dmp_reg = &reg->iobase_window;
1145 for (cnt = 0; cnt < 16; cnt++) 1244 for (cnt = 0; cnt < 16; cnt++)
1146 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1245 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1147 1246
1148 WRT_REG_DWORD(&reg->iobase_addr, 0x6130); 1247 WRT_REG_DWORD(&reg->iobase_addr, 0x6130);
1149 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1248 dmp_reg = &reg->iobase_window;
1150 for (cnt = 0; cnt < 16; cnt++) 1249 for (cnt = 0; cnt < 16; cnt++)
1151 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1250 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1152 1251
1153 WRT_REG_DWORD(&reg->iobase_addr, 0x6150); 1252 WRT_REG_DWORD(&reg->iobase_addr, 0x6150);
1154 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1253 dmp_reg = &reg->iobase_window;
1155 for (cnt = 0; cnt < 16; cnt++) 1254 for (cnt = 0; cnt < 16; cnt++)
1156 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1255 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1157 1256
1158 WRT_REG_DWORD(&reg->iobase_addr, 0x6170); 1257 WRT_REG_DWORD(&reg->iobase_addr, 0x6170);
1159 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1258 dmp_reg = &reg->iobase_window;
1160 for (cnt = 0; cnt < 16; cnt++) 1259 for (cnt = 0; cnt < 16; cnt++)
1161 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1260 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1162 1261
1163 WRT_REG_DWORD(&reg->iobase_addr, 0x6190); 1262 WRT_REG_DWORD(&reg->iobase_addr, 0x6190);
1164 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1263 dmp_reg = &reg->iobase_window;
1165 for (cnt = 0; cnt < 16; cnt++) 1264 for (cnt = 0; cnt < 16; cnt++)
1166 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1265 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1167 1266
1168 WRT_REG_DWORD(&reg->iobase_addr, 0x61B0); 1267 WRT_REG_DWORD(&reg->iobase_addr, 0x61B0);
1169 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1268 dmp_reg = &reg->iobase_window;
1170 for (cnt = 0; cnt < 16; cnt++) 1269 for (cnt = 0; cnt < 16; cnt++)
1171 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1270 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1172 1271
@@ -1187,10 +1286,10 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
1187 1286
1188 udelay(100); 1287 udelay(100);
1189 /* Wait for firmware to complete NVRAM accesses. */ 1288 /* Wait for firmware to complete NVRAM accesses. */
1190 mb[0] = (uint32_t) RD_REG_WORD(&reg->mailbox0); 1289 mb0 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
1191 for (cnt = 10000 ; cnt && mb[0]; cnt--) { 1290 for (cnt = 10000 ; cnt && mb0; cnt--) {
1192 udelay(5); 1291 udelay(5);
1193 mb[0] = (uint32_t) RD_REG_WORD(&reg->mailbox0); 1292 mb0 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
1194 barrier(); 1293 barrier();
1195 } 1294 }
1196 1295
@@ -1214,110 +1313,717 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
1214 rval = QLA_FUNCTION_TIMEOUT; 1313 rval = QLA_FUNCTION_TIMEOUT;
1215 } 1314 }
1216 1315
1217 /* Memory. */ 1316 if (rval == QLA_SUCCESS)
1317 rval = qla2xxx_dump_memory(ha, fw->code_ram,
1318 sizeof(fw->code_ram), fw->ext_mem, &nxt);
1319
1218 if (rval == QLA_SUCCESS) { 1320 if (rval == QLA_SUCCESS) {
1219 /* Code RAM. */ 1321 nxt = qla2xxx_copy_queues(ha, nxt);
1220 risc_address = 0x20000; 1322 if (ha->eft)
1221 WRT_REG_WORD(&reg->mailbox0, MBC_READ_RAM_EXTENDED); 1323 memcpy(nxt, ha->eft, ntohl(ha->fw_dump->eft_size));
1222 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1223 } 1324 }
1224 for (cnt = 0; cnt < sizeof(fw->code_ram) / 4 && rval == QLA_SUCCESS;
1225 cnt++, risc_address++) {
1226 WRT_REG_WORD(&reg->mailbox1, LSW(risc_address));
1227 WRT_REG_WORD(&reg->mailbox8, MSW(risc_address));
1228 RD_REG_WORD(&reg->mailbox8);
1229 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
1230 1325
1231 for (timer = 6000000; timer; timer--) { 1326 if (rval != QLA_SUCCESS) {
1232 /* Check for pending interrupts. */ 1327 qla_printk(KERN_WARNING, ha,
1233 stat = RD_REG_DWORD(&reg->host_status); 1328 "Failed to dump firmware (%x)!!!\n", rval);
1234 if (stat & HSRX_RISC_INT) { 1329 ha->fw_dumped = 0;
1235 stat &= 0xff;
1236 1330
1237 if (stat == 0x1 || stat == 0x2 || 1331 } else {
1238 stat == 0x10 || stat == 0x11) { 1332 qla_printk(KERN_INFO, ha,
1239 set_bit(MBX_INTERRUPT, 1333 "Firmware dump saved to temp buffer (%ld/%p).\n",
1240 &ha->mbx_cmd_flags); 1334 ha->host_no, ha->fw_dump);
1335 ha->fw_dumped = 1;
1336 }
1241 1337
1242 mb[0] = RD_REG_WORD(&reg->mailbox0); 1338qla24xx_fw_dump_failed:
1243 mb[2] = RD_REG_WORD(&reg->mailbox2); 1339 if (!hardware_locked)
1244 mb[3] = RD_REG_WORD(&reg->mailbox3); 1340 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1341}
1245 1342
1246 WRT_REG_DWORD(&reg->hccr, 1343void
1247 HCCRX_CLR_RISC_INT); 1344qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
1248 RD_REG_DWORD(&reg->hccr); 1345{
1249 break; 1346 int rval;
1250 } 1347 uint32_t cnt;
1348 uint32_t risc_address;
1349 uint16_t mb0, wd;
1251 1350
1252 /* Clear this intr; it wasn't a mailbox intr */ 1351 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1253 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1352 uint32_t __iomem *dmp_reg;
1254 RD_REG_DWORD(&reg->hccr); 1353 uint32_t *iter_reg;
1255 } 1354 uint16_t __iomem *mbx_reg;
1256 udelay(5); 1355 unsigned long flags;
1257 } 1356 struct qla25xx_fw_dump *fw;
1357 uint32_t ext_mem_cnt;
1358 void *nxt;
1258 1359
1259 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 1360 risc_address = ext_mem_cnt = 0;
1260 rval = mb[0] & MBS_MASK; 1361 flags = 0;
1261 fw->code_ram[cnt] = htonl((mb[3] << 16) | mb[2]); 1362
1262 } else { 1363 if (!hardware_locked)
1263 rval = QLA_FUNCTION_FAILED; 1364 spin_lock_irqsave(&ha->hardware_lock, flags);
1365
1366 if (!ha->fw_dump) {
1367 qla_printk(KERN_WARNING, ha,
1368 "No buffer available for dump!!!\n");
1369 goto qla25xx_fw_dump_failed;
1370 }
1371
1372 if (ha->fw_dumped) {
1373 qla_printk(KERN_WARNING, ha,
1374 "Firmware has been previously dumped (%p) -- ignoring "
1375 "request...\n", ha->fw_dump);
1376 goto qla25xx_fw_dump_failed;
1377 }
1378 fw = &ha->fw_dump->isp.isp25;
1379 qla2xxx_prep_dump(ha, ha->fw_dump);
1380
1381 rval = QLA_SUCCESS;
1382 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1383
1384 /* Pause RISC. */
1385 if ((RD_REG_DWORD(&reg->hccr) & HCCRX_RISC_PAUSE) == 0) {
1386 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET |
1387 HCCRX_CLR_HOST_INT);
1388 RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
1389 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
1390 for (cnt = 30000;
1391 (RD_REG_DWORD(&reg->hccr) & HCCRX_RISC_PAUSE) == 0 &&
1392 rval == QLA_SUCCESS; cnt--) {
1393 if (cnt)
1394 udelay(100);
1395 else
1396 rval = QLA_FUNCTION_TIMEOUT;
1264 } 1397 }
1265 } 1398 }
1266 1399
1267 if (rval == QLA_SUCCESS) { 1400 if (rval == QLA_SUCCESS) {
1268 /* External Memory. */ 1401 /* Host interface registers. */
1269 risc_address = 0x100000; 1402 dmp_reg = (uint32_t __iomem *)(reg + 0);
1270 ext_mem_cnt = ha->fw_memory_size - 0x100000 + 1; 1403 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
1271 WRT_REG_WORD(&reg->mailbox0, MBC_READ_RAM_EXTENDED); 1404 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1272 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1273 }
1274 for (cnt = 0; cnt < ext_mem_cnt && rval == QLA_SUCCESS;
1275 cnt++, risc_address++) {
1276 WRT_REG_WORD(&reg->mailbox1, LSW(risc_address));
1277 WRT_REG_WORD(&reg->mailbox8, MSW(risc_address));
1278 RD_REG_WORD(&reg->mailbox8);
1279 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
1280 1405
1281 for (timer = 6000000; timer; timer--) { 1406 /* Disable interrupts. */
1282 /* Check for pending interrupts. */ 1407 WRT_REG_DWORD(&reg->ictrl, 0);
1283 stat = RD_REG_DWORD(&reg->host_status); 1408 RD_REG_DWORD(&reg->ictrl);
1284 if (stat & HSRX_RISC_INT) {
1285 stat &= 0xff;
1286 1409
1287 if (stat == 0x1 || stat == 0x2 || 1410 /* Shadow registers. */
1288 stat == 0x10 || stat == 0x11) { 1411 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1289 set_bit(MBX_INTERRUPT, 1412 RD_REG_DWORD(&reg->iobase_addr);
1290 &ha->mbx_cmd_flags); 1413 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1414 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1291 1415
1292 mb[0] = RD_REG_WORD(&reg->mailbox0); 1416 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1293 mb[2] = RD_REG_WORD(&reg->mailbox2); 1417 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1294 mb[3] = RD_REG_WORD(&reg->mailbox3);
1295 1418
1296 WRT_REG_DWORD(&reg->hccr, 1419 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1297 HCCRX_CLR_RISC_INT); 1420 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1298 RD_REG_DWORD(&reg->hccr);
1299 break;
1300 }
1301 1421
1302 /* Clear this intr; it wasn't a mailbox intr */ 1422 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1303 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1423 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1304 RD_REG_DWORD(&reg->hccr); 1424
1305 } 1425 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1426 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1427
1428 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1429 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1430
1431 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1432 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1433
1434 WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
1435 fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1436
1437 WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
1438 fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1439
1440 WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
1441 fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1442
1443 WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
1444 fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1445
1446 /* RISC I/O register. */
1447 WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
1448 RD_REG_DWORD(&reg->iobase_addr);
1449 fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
1450
1451 /* Mailbox registers. */
1452 mbx_reg = &reg->mailbox0;
1453 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
1454 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
1455
1456 /* Transfer sequence registers. */
1457 iter_reg = fw->xseq_gp_reg;
1458 WRT_REG_DWORD(&reg->iobase_addr, 0xBF00);
1459 dmp_reg = &reg->iobase_window;
1460 for (cnt = 0; cnt < 16; cnt++)
1461 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1462
1463 WRT_REG_DWORD(&reg->iobase_addr, 0xBF10);
1464 dmp_reg = &reg->iobase_window;
1465 for (cnt = 0; cnt < 16; cnt++)
1466 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1467
1468 WRT_REG_DWORD(&reg->iobase_addr, 0xBF20);
1469 dmp_reg = &reg->iobase_window;
1470 for (cnt = 0; cnt < 16; cnt++)
1471 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1472
1473 WRT_REG_DWORD(&reg->iobase_addr, 0xBF30);
1474 dmp_reg = &reg->iobase_window;
1475 for (cnt = 0; cnt < 16; cnt++)
1476 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1477
1478 WRT_REG_DWORD(&reg->iobase_addr, 0xBF40);
1479 dmp_reg = &reg->iobase_window;
1480 for (cnt = 0; cnt < 16; cnt++)
1481 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1482
1483 WRT_REG_DWORD(&reg->iobase_addr, 0xBF50);
1484 dmp_reg = &reg->iobase_window;
1485 for (cnt = 0; cnt < 16; cnt++)
1486 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1487
1488 WRT_REG_DWORD(&reg->iobase_addr, 0xBF60);
1489 dmp_reg = &reg->iobase_window;
1490 for (cnt = 0; cnt < 16; cnt++)
1491 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1492
1493 WRT_REG_DWORD(&reg->iobase_addr, 0xBF70);
1494 dmp_reg = &reg->iobase_window;
1495 for (cnt = 0; cnt < 16; cnt++)
1496 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1497
1498 iter_reg = fw->xseq_0_reg;
1499 WRT_REG_DWORD(&reg->iobase_addr, 0xBFC0);
1500 dmp_reg = &reg->iobase_window;
1501 for (cnt = 0; cnt < 16; cnt++)
1502 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1503
1504 WRT_REG_DWORD(&reg->iobase_addr, 0xBFD0);
1505 dmp_reg = &reg->iobase_window;
1506 for (cnt = 0; cnt < 16; cnt++)
1507 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1508
1509 WRT_REG_DWORD(&reg->iobase_addr, 0xBFE0);
1510 dmp_reg = &reg->iobase_window;
1511 for (cnt = 0; cnt < 16; cnt++)
1512 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1513
1514 WRT_REG_DWORD(&reg->iobase_addr, 0xBFF0);
1515 dmp_reg = &reg->iobase_window;
1516 for (cnt = 0; cnt < sizeof(fw->xseq_1_reg) / 4; cnt++)
1517 fw->xseq_1_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1518
1519 /* Receive sequence registers. */
1520 iter_reg = fw->rseq_gp_reg;
1521 WRT_REG_DWORD(&reg->iobase_addr, 0xFF00);
1522 dmp_reg = &reg->iobase_window;
1523 for (cnt = 0; cnt < 16; cnt++)
1524 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1525
1526 WRT_REG_DWORD(&reg->iobase_addr, 0xFF10);
1527 dmp_reg = &reg->iobase_window;
1528 for (cnt = 0; cnt < 16; cnt++)
1529 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1530
1531 WRT_REG_DWORD(&reg->iobase_addr, 0xFF20);
1532 dmp_reg = &reg->iobase_window;
1533 for (cnt = 0; cnt < 16; cnt++)
1534 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1535
1536 WRT_REG_DWORD(&reg->iobase_addr, 0xFF30);
1537 dmp_reg = &reg->iobase_window;
1538 for (cnt = 0; cnt < 16; cnt++)
1539 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1540
1541 WRT_REG_DWORD(&reg->iobase_addr, 0xFF40);
1542 dmp_reg = &reg->iobase_window;
1543 for (cnt = 0; cnt < 16; cnt++)
1544 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1545
1546 WRT_REG_DWORD(&reg->iobase_addr, 0xFF50);
1547 dmp_reg = &reg->iobase_window;
1548 for (cnt = 0; cnt < 16; cnt++)
1549 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1550
1551 WRT_REG_DWORD(&reg->iobase_addr, 0xFF60);
1552 dmp_reg = &reg->iobase_window;
1553 for (cnt = 0; cnt < 16; cnt++)
1554 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1555
1556 WRT_REG_DWORD(&reg->iobase_addr, 0xFF70);
1557 dmp_reg = &reg->iobase_window;
1558 for (cnt = 0; cnt < 16; cnt++)
1559 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1560
1561 iter_reg = fw->rseq_0_reg;
1562 WRT_REG_DWORD(&reg->iobase_addr, 0xFFC0);
1563 dmp_reg = &reg->iobase_window;
1564 for (cnt = 0; cnt < 16; cnt++)
1565 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1566
1567 WRT_REG_DWORD(&reg->iobase_addr, 0xFFD0);
1568 dmp_reg = &reg->iobase_window;
1569 for (cnt = 0; cnt < 16; cnt++)
1570 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1571
1572 WRT_REG_DWORD(&reg->iobase_addr, 0xFFE0);
1573 dmp_reg = &reg->iobase_window;
1574 for (cnt = 0; cnt < sizeof(fw->rseq_1_reg) / 4; cnt++)
1575 fw->rseq_1_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1576
1577 WRT_REG_DWORD(&reg->iobase_addr, 0xFFF0);
1578 dmp_reg = &reg->iobase_window;
1579 for (cnt = 0; cnt < sizeof(fw->rseq_2_reg) / 4; cnt++)
1580 fw->rseq_2_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1581
1582 /* Auxiliary sequence registers. */
1583 iter_reg = fw->aseq_gp_reg;
1584 WRT_REG_DWORD(&reg->iobase_addr, 0xB000);
1585 dmp_reg = &reg->iobase_window;
1586 for (cnt = 0; cnt < 16; cnt++)
1587 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1588
1589 WRT_REG_DWORD(&reg->iobase_addr, 0xB010);
1590 dmp_reg = &reg->iobase_window;
1591 for (cnt = 0; cnt < 16; cnt++)
1592 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1593
1594 WRT_REG_DWORD(&reg->iobase_addr, 0xB020);
1595 dmp_reg = &reg->iobase_window;
1596 for (cnt = 0; cnt < 16; cnt++)
1597 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1598
1599 WRT_REG_DWORD(&reg->iobase_addr, 0xB030);
1600 dmp_reg = &reg->iobase_window;
1601 for (cnt = 0; cnt < 16; cnt++)
1602 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1603
1604 WRT_REG_DWORD(&reg->iobase_addr, 0xB040);
1605 dmp_reg = &reg->iobase_window;
1606 for (cnt = 0; cnt < 16; cnt++)
1607 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1608
1609 WRT_REG_DWORD(&reg->iobase_addr, 0xB050);
1610 dmp_reg = &reg->iobase_window;
1611 for (cnt = 0; cnt < 16; cnt++)
1612 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1613
1614 WRT_REG_DWORD(&reg->iobase_addr, 0xB060);
1615 dmp_reg = &reg->iobase_window;
1616 for (cnt = 0; cnt < 16; cnt++)
1617 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1618
1619 WRT_REG_DWORD(&reg->iobase_addr, 0xB070);
1620 dmp_reg = &reg->iobase_window;
1621 for (cnt = 0; cnt < 16; cnt++)
1622 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1623
1624 iter_reg = fw->aseq_0_reg;
1625 WRT_REG_DWORD(&reg->iobase_addr, 0xB0C0);
1626 dmp_reg = &reg->iobase_window;
1627 for (cnt = 0; cnt < 16; cnt++)
1628 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1629
1630 WRT_REG_DWORD(&reg->iobase_addr, 0xB0D0);
1631 dmp_reg = &reg->iobase_window;
1632 for (cnt = 0; cnt < 16; cnt++)
1633 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1634
1635 WRT_REG_DWORD(&reg->iobase_addr, 0xB0E0);
1636 dmp_reg = &reg->iobase_window;
1637 for (cnt = 0; cnt < sizeof(fw->aseq_1_reg) / 4; cnt++)
1638 fw->aseq_1_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1639
1640 WRT_REG_DWORD(&reg->iobase_addr, 0xB0F0);
1641 dmp_reg = &reg->iobase_window;
1642 for (cnt = 0; cnt < sizeof(fw->aseq_2_reg) / 4; cnt++)
1643 fw->aseq_2_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1644
1645 /* Command DMA registers. */
1646 WRT_REG_DWORD(&reg->iobase_addr, 0x7100);
1647 dmp_reg = &reg->iobase_window;
1648 for (cnt = 0; cnt < sizeof(fw->cmd_dma_reg) / 4; cnt++)
1649 fw->cmd_dma_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1650
1651 /* Queues. */
1652 iter_reg = fw->req0_dma_reg;
1653 WRT_REG_DWORD(&reg->iobase_addr, 0x7200);
1654 dmp_reg = &reg->iobase_window;
1655 for (cnt = 0; cnt < 8; cnt++)
1656 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1657
1658 dmp_reg = &reg->iobase_q;
1659 for (cnt = 0; cnt < 7; cnt++)
1660 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1661
1662 iter_reg = fw->resp0_dma_reg;
1663 WRT_REG_DWORD(&reg->iobase_addr, 0x7300);
1664 dmp_reg = &reg->iobase_window;
1665 for (cnt = 0; cnt < 8; cnt++)
1666 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1667
1668 dmp_reg = &reg->iobase_q;
1669 for (cnt = 0; cnt < 7; cnt++)
1670 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1671
1672 iter_reg = fw->req1_dma_reg;
1673 WRT_REG_DWORD(&reg->iobase_addr, 0x7400);
1674 dmp_reg = &reg->iobase_window;
1675 for (cnt = 0; cnt < 8; cnt++)
1676 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1677
1678 dmp_reg = &reg->iobase_q;
1679 for (cnt = 0; cnt < 7; cnt++)
1680 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1681
1682 /* Transmit DMA registers. */
1683 iter_reg = fw->xmt0_dma_reg;
1684 WRT_REG_DWORD(&reg->iobase_addr, 0x7600);
1685 dmp_reg = &reg->iobase_window;
1686 for (cnt = 0; cnt < 16; cnt++)
1687 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1688
1689 WRT_REG_DWORD(&reg->iobase_addr, 0x7610);
1690 dmp_reg = &reg->iobase_window;
1691 for (cnt = 0; cnt < 16; cnt++)
1692 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1693
1694 iter_reg = fw->xmt1_dma_reg;
1695 WRT_REG_DWORD(&reg->iobase_addr, 0x7620);
1696 dmp_reg = &reg->iobase_window;
1697 for (cnt = 0; cnt < 16; cnt++)
1698 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1699
1700 WRT_REG_DWORD(&reg->iobase_addr, 0x7630);
1701 dmp_reg = &reg->iobase_window;
1702 for (cnt = 0; cnt < 16; cnt++)
1703 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1704
1705 iter_reg = fw->xmt2_dma_reg;
1706 WRT_REG_DWORD(&reg->iobase_addr, 0x7640);
1707 dmp_reg = &reg->iobase_window;
1708 for (cnt = 0; cnt < 16; cnt++)
1709 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1710
1711 WRT_REG_DWORD(&reg->iobase_addr, 0x7650);
1712 dmp_reg = &reg->iobase_window;
1713 for (cnt = 0; cnt < 16; cnt++)
1714 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1715
1716 iter_reg = fw->xmt3_dma_reg;
1717 WRT_REG_DWORD(&reg->iobase_addr, 0x7660);
1718 dmp_reg = &reg->iobase_window;
1719 for (cnt = 0; cnt < 16; cnt++)
1720 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1721
1722 WRT_REG_DWORD(&reg->iobase_addr, 0x7670);
1723 dmp_reg = &reg->iobase_window;
1724 for (cnt = 0; cnt < 16; cnt++)
1725 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1726
1727 iter_reg = fw->xmt4_dma_reg;
1728 WRT_REG_DWORD(&reg->iobase_addr, 0x7680);
1729 dmp_reg = &reg->iobase_window;
1730 for (cnt = 0; cnt < 16; cnt++)
1731 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1732
1733 WRT_REG_DWORD(&reg->iobase_addr, 0x7690);
1734 dmp_reg = &reg->iobase_window;
1735 for (cnt = 0; cnt < 16; cnt++)
1736 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1737
1738 WRT_REG_DWORD(&reg->iobase_addr, 0x76A0);
1739 dmp_reg = &reg->iobase_window;
1740 for (cnt = 0; cnt < sizeof(fw->xmt_data_dma_reg) / 4; cnt++)
1741 fw->xmt_data_dma_reg[cnt] =
1742 htonl(RD_REG_DWORD(dmp_reg++));
1743
1744 /* Receive DMA registers. */
1745 iter_reg = fw->rcvt0_data_dma_reg;
1746 WRT_REG_DWORD(&reg->iobase_addr, 0x7700);
1747 dmp_reg = &reg->iobase_window;
1748 for (cnt = 0; cnt < 16; cnt++)
1749 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1750
1751 WRT_REG_DWORD(&reg->iobase_addr, 0x7710);
1752 dmp_reg = &reg->iobase_window;
1753 for (cnt = 0; cnt < 16; cnt++)
1754 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1755
1756 iter_reg = fw->rcvt1_data_dma_reg;
1757 WRT_REG_DWORD(&reg->iobase_addr, 0x7720);
1758 dmp_reg = &reg->iobase_window;
1759 for (cnt = 0; cnt < 16; cnt++)
1760 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1761
1762 WRT_REG_DWORD(&reg->iobase_addr, 0x7730);
1763 dmp_reg = &reg->iobase_window;
1764 for (cnt = 0; cnt < 16; cnt++)
1765 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1766
1767 /* RISC registers. */
1768 iter_reg = fw->risc_gp_reg;
1769 WRT_REG_DWORD(&reg->iobase_addr, 0x0F00);
1770 dmp_reg = &reg->iobase_window;
1771 for (cnt = 0; cnt < 16; cnt++)
1772 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1773
1774 WRT_REG_DWORD(&reg->iobase_addr, 0x0F10);
1775 dmp_reg = &reg->iobase_window;
1776 for (cnt = 0; cnt < 16; cnt++)
1777 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1778
1779 WRT_REG_DWORD(&reg->iobase_addr, 0x0F20);
1780 dmp_reg = &reg->iobase_window;
1781 for (cnt = 0; cnt < 16; cnt++)
1782 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1783
1784 WRT_REG_DWORD(&reg->iobase_addr, 0x0F30);
1785 dmp_reg = &reg->iobase_window;
1786 for (cnt = 0; cnt < 16; cnt++)
1787 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1788
1789 WRT_REG_DWORD(&reg->iobase_addr, 0x0F40);
1790 dmp_reg = &reg->iobase_window;
1791 for (cnt = 0; cnt < 16; cnt++)
1792 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1793
1794 WRT_REG_DWORD(&reg->iobase_addr, 0x0F50);
1795 dmp_reg = &reg->iobase_window;
1796 for (cnt = 0; cnt < 16; cnt++)
1797 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1798
1799 WRT_REG_DWORD(&reg->iobase_addr, 0x0F60);
1800 dmp_reg = &reg->iobase_window;
1801 for (cnt = 0; cnt < 16; cnt++)
1802 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1803
1804 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1805 dmp_reg = &reg->iobase_window;
1806 for (cnt = 0; cnt < 16; cnt++)
1807 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1808
1809 /* Local memory controller registers. */
1810 iter_reg = fw->lmc_reg;
1811 WRT_REG_DWORD(&reg->iobase_addr, 0x3000);
1812 dmp_reg = &reg->iobase_window;
1813 for (cnt = 0; cnt < 16; cnt++)
1814 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1815
1816 WRT_REG_DWORD(&reg->iobase_addr, 0x3010);
1817 dmp_reg = &reg->iobase_window;
1818 for (cnt = 0; cnt < 16; cnt++)
1819 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1820
1821 WRT_REG_DWORD(&reg->iobase_addr, 0x3020);
1822 dmp_reg = &reg->iobase_window;
1823 for (cnt = 0; cnt < 16; cnt++)
1824 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1825
1826 WRT_REG_DWORD(&reg->iobase_addr, 0x3030);
1827 dmp_reg = &reg->iobase_window;
1828 for (cnt = 0; cnt < 16; cnt++)
1829 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1830
1831 WRT_REG_DWORD(&reg->iobase_addr, 0x3040);
1832 dmp_reg = &reg->iobase_window;
1833 for (cnt = 0; cnt < 16; cnt++)
1834 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1835
1836 WRT_REG_DWORD(&reg->iobase_addr, 0x3050);
1837 dmp_reg = &reg->iobase_window;
1838 for (cnt = 0; cnt < 16; cnt++)
1839 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1840
1841 WRT_REG_DWORD(&reg->iobase_addr, 0x3060);
1842 dmp_reg = &reg->iobase_window;
1843 for (cnt = 0; cnt < 16; cnt++)
1844 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1845
1846 WRT_REG_DWORD(&reg->iobase_addr, 0x3070);
1847 dmp_reg = &reg->iobase_window;
1848 for (cnt = 0; cnt < 16; cnt++)
1849 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1850
1851 /* Fibre Protocol Module registers. */
1852 iter_reg = fw->fpm_hdw_reg;
1853 WRT_REG_DWORD(&reg->iobase_addr, 0x4000);
1854 dmp_reg = &reg->iobase_window;
1855 for (cnt = 0; cnt < 16; cnt++)
1856 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1857
1858 WRT_REG_DWORD(&reg->iobase_addr, 0x4010);
1859 dmp_reg = &reg->iobase_window;
1860 for (cnt = 0; cnt < 16; cnt++)
1861 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1862
1863 WRT_REG_DWORD(&reg->iobase_addr, 0x4020);
1864 dmp_reg = &reg->iobase_window;
1865 for (cnt = 0; cnt < 16; cnt++)
1866 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1867
1868 WRT_REG_DWORD(&reg->iobase_addr, 0x4030);
1869 dmp_reg = &reg->iobase_window;
1870 for (cnt = 0; cnt < 16; cnt++)
1871 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1872
1873 WRT_REG_DWORD(&reg->iobase_addr, 0x4040);
1874 dmp_reg = &reg->iobase_window;
1875 for (cnt = 0; cnt < 16; cnt++)
1876 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1877
1878 WRT_REG_DWORD(&reg->iobase_addr, 0x4050);
1879 dmp_reg = &reg->iobase_window;
1880 for (cnt = 0; cnt < 16; cnt++)
1881 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1882
1883 WRT_REG_DWORD(&reg->iobase_addr, 0x4060);
1884 dmp_reg = &reg->iobase_window;
1885 for (cnt = 0; cnt < 16; cnt++)
1886 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1887
1888 WRT_REG_DWORD(&reg->iobase_addr, 0x4070);
1889 dmp_reg = &reg->iobase_window;
1890 for (cnt = 0; cnt < 16; cnt++)
1891 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1892
1893 WRT_REG_DWORD(&reg->iobase_addr, 0x4080);
1894 dmp_reg = &reg->iobase_window;
1895 for (cnt = 0; cnt < 16; cnt++)
1896 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1897
1898 WRT_REG_DWORD(&reg->iobase_addr, 0x4090);
1899 dmp_reg = &reg->iobase_window;
1900 for (cnt = 0; cnt < 16; cnt++)
1901 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1902
1903 WRT_REG_DWORD(&reg->iobase_addr, 0x40A0);
1904 dmp_reg = &reg->iobase_window;
1905 for (cnt = 0; cnt < 16; cnt++)
1906 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1907
1908 WRT_REG_DWORD(&reg->iobase_addr, 0x40B0);
1909 dmp_reg = &reg->iobase_window;
1910 for (cnt = 0; cnt < 16; cnt++)
1911 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1912
1913 /* Frame Buffer registers. */
1914 iter_reg = fw->fb_hdw_reg;
1915 WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
1916 dmp_reg = &reg->iobase_window;
1917 for (cnt = 0; cnt < 16; cnt++)
1918 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1919
1920 WRT_REG_DWORD(&reg->iobase_addr, 0x6010);
1921 dmp_reg = &reg->iobase_window;
1922 for (cnt = 0; cnt < 16; cnt++)
1923 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1924
1925 WRT_REG_DWORD(&reg->iobase_addr, 0x6020);
1926 dmp_reg = &reg->iobase_window;
1927 for (cnt = 0; cnt < 16; cnt++)
1928 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1929
1930 WRT_REG_DWORD(&reg->iobase_addr, 0x6030);
1931 dmp_reg = &reg->iobase_window;
1932 for (cnt = 0; cnt < 16; cnt++)
1933 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1934
1935 WRT_REG_DWORD(&reg->iobase_addr, 0x6040);
1936 dmp_reg = &reg->iobase_window;
1937 for (cnt = 0; cnt < 16; cnt++)
1938 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1939
1940 WRT_REG_DWORD(&reg->iobase_addr, 0x6100);
1941 dmp_reg = &reg->iobase_window;
1942 for (cnt = 0; cnt < 16; cnt++)
1943 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1944
1945 WRT_REG_DWORD(&reg->iobase_addr, 0x6130);
1946 dmp_reg = &reg->iobase_window;
1947 for (cnt = 0; cnt < 16; cnt++)
1948 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1949
1950 WRT_REG_DWORD(&reg->iobase_addr, 0x6150);
1951 dmp_reg = &reg->iobase_window;
1952 for (cnt = 0; cnt < 16; cnt++)
1953 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1954
1955 WRT_REG_DWORD(&reg->iobase_addr, 0x6170);
1956 dmp_reg = &reg->iobase_window;
1957 for (cnt = 0; cnt < 16; cnt++)
1958 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1959
1960 WRT_REG_DWORD(&reg->iobase_addr, 0x6190);
1961 dmp_reg = &reg->iobase_window;
1962 for (cnt = 0; cnt < 16; cnt++)
1963 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1964
1965 WRT_REG_DWORD(&reg->iobase_addr, 0x61B0);
1966 dmp_reg = &reg->iobase_window;
1967 for (cnt = 0; cnt < 16; cnt++)
1968 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1969
1970 WRT_REG_DWORD(&reg->iobase_addr, 0x6F00);
1971 dmp_reg = &reg->iobase_window;
1972 for (cnt = 0; cnt < 16; cnt++)
1973 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1974
1975 /* Reset RISC. */
1976 WRT_REG_DWORD(&reg->ctrl_status,
1977 CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
1978 for (cnt = 0; cnt < 30000; cnt++) {
1979 if ((RD_REG_DWORD(&reg->ctrl_status) &
1980 CSRX_DMA_ACTIVE) == 0)
1981 break;
1982
1983 udelay(10);
1984 }
1985
1986 WRT_REG_DWORD(&reg->ctrl_status,
1987 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
1988 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
1989
1990 udelay(100);
1991 /* Wait for firmware to complete NVRAM accesses. */
1992 mb0 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
1993 for (cnt = 10000 ; cnt && mb0; cnt--) {
1306 udelay(5); 1994 udelay(5);
1995 mb0 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
1996 barrier();
1307 } 1997 }
1308 1998
1309 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 1999 /* Wait for soft-reset to complete. */
1310 rval = mb[0] & MBS_MASK; 2000 for (cnt = 0; cnt < 30000; cnt++) {
1311 fw->ext_mem[cnt] = htonl((mb[3] << 16) | mb[2]); 2001 if ((RD_REG_DWORD(&reg->ctrl_status) &
1312 } else { 2002 CSRX_ISP_SOFT_RESET) == 0)
1313 rval = QLA_FUNCTION_FAILED; 2003 break;
2004
2005 udelay(10);
1314 } 2006 }
2007 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
2008 RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
1315 } 2009 }
1316 2010
2011 for (cnt = 30000; RD_REG_WORD(&reg->mailbox0) != 0 &&
2012 rval == QLA_SUCCESS; cnt--) {
2013 if (cnt)
2014 udelay(100);
2015 else
2016 rval = QLA_FUNCTION_TIMEOUT;
2017 }
2018
2019 if (rval == QLA_SUCCESS)
2020 rval = qla2xxx_dump_memory(ha, fw->code_ram,
2021 sizeof(fw->code_ram), fw->ext_mem, &nxt);
2022
1317 if (rval == QLA_SUCCESS) { 2023 if (rval == QLA_SUCCESS) {
1318 eft = qla2xxx_copy_queues(ha, &fw->ext_mem[cnt]); 2024 nxt = qla2xxx_copy_queues(ha, nxt);
1319 if (ha->eft) 2025 if (ha->eft)
1320 memcpy(eft, ha->eft, ntohl(ha->fw_dump->eft_size)); 2026 memcpy(nxt, ha->eft, ntohl(ha->fw_dump->eft_size));
1321 } 2027 }
1322 2028
1323 if (rval != QLA_SUCCESS) { 2029 if (rval != QLA_SUCCESS) {
@@ -1332,7 +2038,7 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
1332 ha->fw_dumped = 1; 2038 ha->fw_dumped = 1;
1333 } 2039 }
1334 2040
1335qla24xx_fw_dump_failed: 2041qla25xx_fw_dump_failed:
1336 if (!hardware_locked) 2042 if (!hardware_locked)
1337 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2043 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1338} 2044}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 49dffeb78512..cca4b0d8253e 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -213,6 +213,43 @@ struct qla24xx_fw_dump {
213 uint32_t ext_mem[1]; 213 uint32_t ext_mem[1];
214}; 214};
215 215
216struct qla25xx_fw_dump {
217 uint32_t host_status;
218 uint32_t host_reg[32];
219 uint32_t shadow_reg[11];
220 uint32_t risc_io_reg;
221 uint16_t mailbox_reg[32];
222 uint32_t xseq_gp_reg[128];
223 uint32_t xseq_0_reg[48];
224 uint32_t xseq_1_reg[16];
225 uint32_t rseq_gp_reg[128];
226 uint32_t rseq_0_reg[32];
227 uint32_t rseq_1_reg[16];
228 uint32_t rseq_2_reg[16];
229 uint32_t aseq_gp_reg[128];
230 uint32_t aseq_0_reg[32];
231 uint32_t aseq_1_reg[16];
232 uint32_t aseq_2_reg[16];
233 uint32_t cmd_dma_reg[16];
234 uint32_t req0_dma_reg[15];
235 uint32_t resp0_dma_reg[15];
236 uint32_t req1_dma_reg[15];
237 uint32_t xmt0_dma_reg[32];
238 uint32_t xmt1_dma_reg[32];
239 uint32_t xmt2_dma_reg[32];
240 uint32_t xmt3_dma_reg[32];
241 uint32_t xmt4_dma_reg[32];
242 uint32_t xmt_data_dma_reg[16];
243 uint32_t rcvt0_data_dma_reg[32];
244 uint32_t rcvt1_data_dma_reg[32];
245 uint32_t risc_gp_reg[128];
246 uint32_t lmc_reg[128];
247 uint32_t fpm_hdw_reg[192];
248 uint32_t fb_hdw_reg[192];
249 uint32_t code_ram[0x2000];
250 uint32_t ext_mem[1];
251};
252
216#define EFT_NUM_BUFFERS 4 253#define EFT_NUM_BUFFERS 4
217#define EFT_BYTES_PER_BUFFER 0x4000 254#define EFT_BYTES_PER_BUFFER 0x4000
218#define EFT_SIZE ((EFT_BYTES_PER_BUFFER) * (EFT_NUM_BUFFERS)) 255#define EFT_SIZE ((EFT_BYTES_PER_BUFFER) * (EFT_NUM_BUFFERS))
@@ -246,5 +283,6 @@ struct qla2xxx_fw_dump {
246 struct qla2100_fw_dump isp21; 283 struct qla2100_fw_dump isp21;
247 struct qla2300_fw_dump isp23; 284 struct qla2300_fw_dump isp23;
248 struct qla24xx_fw_dump isp24; 285 struct qla24xx_fw_dump isp24;
286 struct qla25xx_fw_dump isp25;
249 } isp; 287 } isp;
250}; 288};
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index a1ca590ba447..0c9f36c8a248 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1711,6 +1711,14 @@ struct ct_fdmi_hba_attributes {
1711#define FDMI_PORT_OS_DEVICE_NAME 5 1711#define FDMI_PORT_OS_DEVICE_NAME 5
1712#define FDMI_PORT_HOST_NAME 6 1712#define FDMI_PORT_HOST_NAME 6
1713 1713
1714#define FDMI_PORT_SPEED_1GB 0x1
1715#define FDMI_PORT_SPEED_2GB 0x2
1716#define FDMI_PORT_SPEED_10GB 0x4
1717#define FDMI_PORT_SPEED_4GB 0x8
1718#define FDMI_PORT_SPEED_8GB 0x10
1719#define FDMI_PORT_SPEED_16GB 0x20
1720#define FDMI_PORT_SPEED_UNKNOWN 0x8000
1721
1714struct ct_fdmi_port_attr { 1722struct ct_fdmi_port_attr {
1715 uint16_t type; 1723 uint16_t type;
1716 uint16_t len; 1724 uint16_t len;
@@ -2201,6 +2209,7 @@ typedef struct scsi_qla_host {
2201#define SWITCH_FOUND BIT_3 2209#define SWITCH_FOUND BIT_3
2202#define DFLG_NO_CABLE BIT_4 2210#define DFLG_NO_CABLE BIT_4
2203 2211
2212#define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532
2204 uint32_t device_type; 2213 uint32_t device_type;
2205#define DT_ISP2100 BIT_0 2214#define DT_ISP2100 BIT_0
2206#define DT_ISP2200 BIT_1 2215#define DT_ISP2200 BIT_1
@@ -2213,8 +2222,11 @@ typedef struct scsi_qla_host {
2213#define DT_ISP2432 BIT_8 2222#define DT_ISP2432 BIT_8
2214#define DT_ISP5422 BIT_9 2223#define DT_ISP5422 BIT_9
2215#define DT_ISP5432 BIT_10 2224#define DT_ISP5432 BIT_10
2216#define DT_ISP_LAST (DT_ISP5432 << 1) 2225#define DT_ISP2532 BIT_11
2226#define DT_ISP_LAST (DT_ISP2532 << 1)
2217 2227
2228#define DT_IIDMA BIT_26
2229#define DT_FWI2 BIT_27
2218#define DT_ZIO_SUPPORTED BIT_28 2230#define DT_ZIO_SUPPORTED BIT_28
2219#define DT_OEM_001 BIT_29 2231#define DT_OEM_001 BIT_29
2220#define DT_ISP2200A BIT_30 2232#define DT_ISP2200A BIT_30
@@ -2232,12 +2244,16 @@ typedef struct scsi_qla_host {
2232#define IS_QLA2432(ha) (DT_MASK(ha) & DT_ISP2432) 2244#define IS_QLA2432(ha) (DT_MASK(ha) & DT_ISP2432)
2233#define IS_QLA5422(ha) (DT_MASK(ha) & DT_ISP5422) 2245#define IS_QLA5422(ha) (DT_MASK(ha) & DT_ISP5422)
2234#define IS_QLA5432(ha) (DT_MASK(ha) & DT_ISP5432) 2246#define IS_QLA5432(ha) (DT_MASK(ha) & DT_ISP5432)
2247#define IS_QLA2532(ha) (DT_MASK(ha) & DT_ISP2532)
2235 2248
2236#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \ 2249#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
2237 IS_QLA6312(ha) || IS_QLA6322(ha)) 2250 IS_QLA6312(ha) || IS_QLA6322(ha))
2238#define IS_QLA24XX(ha) (IS_QLA2422(ha) || IS_QLA2432(ha)) 2251#define IS_QLA24XX(ha) (IS_QLA2422(ha) || IS_QLA2432(ha))
2239#define IS_QLA54XX(ha) (IS_QLA5422(ha) || IS_QLA5432(ha)) 2252#define IS_QLA54XX(ha) (IS_QLA5422(ha) || IS_QLA5432(ha))
2253#define IS_QLA25XX(ha) (IS_QLA2532(ha))
2240 2254
2255#define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA)
2256#define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2)
2241#define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED) 2257#define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED)
2242#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001) 2258#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001)
2243#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS) 2259#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS)
@@ -2274,7 +2290,7 @@ typedef struct scsi_qla_host {
2274 uint16_t rsp_ring_index; /* Current index. */ 2290 uint16_t rsp_ring_index; /* Current index. */
2275 uint16_t response_q_length; 2291 uint16_t response_q_length;
2276 2292
2277 struct isp_operations isp_ops; 2293 struct isp_operations *isp_ops;
2278 2294
2279 /* Outstandings ISP commands. */ 2295 /* Outstandings ISP commands. */
2280 srb_t *outstanding_cmds[MAX_OUTSTANDING_COMMANDS]; 2296 srb_t *outstanding_cmds[MAX_OUTSTANDING_COMMANDS];
@@ -2298,6 +2314,7 @@ typedef struct scsi_qla_host {
2298#define PORT_SPEED_1GB 0x00 2314#define PORT_SPEED_1GB 0x00
2299#define PORT_SPEED_2GB 0x01 2315#define PORT_SPEED_2GB 0x01
2300#define PORT_SPEED_4GB 0x03 2316#define PORT_SPEED_4GB 0x03
2317#define PORT_SPEED_8GB 0x04
2301 uint16_t link_data_rate; /* F/W operating speed */ 2318 uint16_t link_data_rate; /* F/W operating speed */
2302 2319
2303 uint8_t current_topology; 2320 uint8_t current_topology;
@@ -2564,6 +2581,7 @@ typedef struct scsi_qla_host {
2564#define OPTROM_SIZE_2300 0x20000 2581#define OPTROM_SIZE_2300 0x20000
2565#define OPTROM_SIZE_2322 0x100000 2582#define OPTROM_SIZE_2322 0x100000
2566#define OPTROM_SIZE_24XX 0x100000 2583#define OPTROM_SIZE_24XX 0x100000
2584#define OPTROM_SIZE_25XX 0x200000
2567 2585
2568#include "qla_gbl.h" 2586#include "qla_gbl.h"
2569#include "qla_dbg.h" 2587#include "qla_dbg.h"
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 63a11fef5d1b..99fe49618d61 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -8,14 +8,17 @@
8#define __QLA_FW_H 8#define __QLA_FW_H
9 9
10#define MBS_CHECKSUM_ERROR 0x4010 10#define MBS_CHECKSUM_ERROR 0x4010
11#define MBS_INVALID_PRODUCT_KEY 0x4020
11 12
12/* 13/*
13 * Firmware Options. 14 * Firmware Options.
14 */ 15 */
15#define FO1_ENABLE_PUREX BIT_10 16#define FO1_ENABLE_PUREX BIT_10
16#define FO1_DISABLE_LED_CTRL BIT_6 17#define FO1_DISABLE_LED_CTRL BIT_6
18#define FO1_ENABLE_8016 BIT_0
17#define FO2_ENABLE_SEL_CLASS2 BIT_5 19#define FO2_ENABLE_SEL_CLASS2 BIT_5
18#define FO3_NO_ABTS_ON_LINKDOWN BIT_14 20#define FO3_NO_ABTS_ON_LINKDOWN BIT_14
21#define FO3_HOLD_STS_IOCB BIT_12
19 22
20/* 23/*
21 * Port Database structure definition for ISP 24xx. 24 * Port Database structure definition for ISP 24xx.
@@ -341,7 +344,9 @@ struct init_cb_24xx {
341 * BIT 10 = Reserved 344 * BIT 10 = Reserved
342 * BIT 11 = Enable FC-SP Security 345 * BIT 11 = Enable FC-SP Security
343 * BIT 12 = FC Tape Enable 346 * BIT 12 = FC Tape Enable
344 * BIT 13-31 = Reserved 347 * BIT 13 = Reserved
348 * BIT 14 = Enable Target PRLI Control
349 * BIT 15-31 = Reserved
345 */ 350 */
346 uint32_t firmware_options_2; 351 uint32_t firmware_options_2;
347 352
@@ -363,7 +368,8 @@ struct init_cb_24xx {
363 * BIT 13 = Data Rate bit 0 368 * BIT 13 = Data Rate bit 0
364 * BIT 14 = Data Rate bit 1 369 * BIT 14 = Data Rate bit 1
365 * BIT 15 = Data Rate bit 2 370 * BIT 15 = Data Rate bit 2
366 * BIT 16-31 = Reserved 371 * BIT 16 = Enable 75 ohm Termination Select
372 * BIT 17-31 = Reserved
367 */ 373 */
368 uint32_t firmware_options_3; 374 uint32_t firmware_options_3;
369 375
@@ -435,6 +441,7 @@ struct cmd_type_7 {
435#define TMF_LUN_RESET BIT_12 441#define TMF_LUN_RESET BIT_12
436#define TMF_CLEAR_TASK_SET BIT_10 442#define TMF_CLEAR_TASK_SET BIT_10
437#define TMF_ABORT_TASK_SET BIT_9 443#define TMF_ABORT_TASK_SET BIT_9
444#define TMF_DSD_LIST_ENABLE BIT_2
438#define TMF_READ_DATA BIT_1 445#define TMF_READ_DATA BIT_1
439#define TMF_WRITE_DATA BIT_0 446#define TMF_WRITE_DATA BIT_0
440 447
@@ -589,7 +596,7 @@ struct els_entry_24xx {
589#define EST_SOFI3 (1 << 4) 596#define EST_SOFI3 (1 << 4)
590#define EST_SOFI2 (3 << 4) 597#define EST_SOFI2 (3 << 4)
591 598
592 uint32_t rx_xchg_address[2]; /* Receive exchange address. */ 599 uint32_t rx_xchg_address; /* Receive exchange address. */
593 uint16_t rx_dsd_count; 600 uint16_t rx_dsd_count;
594 601
595 uint8_t opcode; 602 uint8_t opcode;
@@ -650,6 +657,7 @@ struct logio_entry_24xx {
650 657
651 uint16_t control_flags; /* Control flags. */ 658 uint16_t control_flags; /* Control flags. */
652 /* Modifiers. */ 659 /* Modifiers. */
660#define LCF_INCLUDE_SNS BIT_10 /* Include SNS (FFFFFC) during LOGO. */
653#define LCF_FCP2_OVERRIDE BIT_9 /* Set/Reset word 3 of PRLI. */ 661#define LCF_FCP2_OVERRIDE BIT_9 /* Set/Reset word 3 of PRLI. */
654#define LCF_CLASS_2 BIT_8 /* Enable class 2 during PLOGI. */ 662#define LCF_CLASS_2 BIT_8 /* Enable class 2 during PLOGI. */
655#define LCF_FREE_NPORT BIT_7 /* Release NPORT handle after LOGO. */ 663#define LCF_FREE_NPORT BIT_7 /* Release NPORT handle after LOGO. */
@@ -779,6 +787,15 @@ struct device_reg_24xx {
779#define FA_RISC_CODE_ADDR 0x20000 787#define FA_RISC_CODE_ADDR 0x20000
780#define FA_RISC_CODE_SEGMENTS 2 788#define FA_RISC_CODE_SEGMENTS 2
781 789
790#define FA_FW_AREA_ADDR 0x40000
791#define FA_VPD_NVRAM_ADDR 0x48000
792#define FA_FEATURE_ADDR 0x4C000
793#define FA_FLASH_DESCR_ADDR 0x50000
794#define FA_HW_EVENT_ADDR 0x54000
795#define FA_BOOT_LOG_ADDR 0x58000
796#define FA_FW_DUMP0_ADDR 0x60000
797#define FA_FW_DUMP1_ADDR 0x70000
798
782 uint32_t flash_data; /* Flash/NVRAM BIOS data. */ 799 uint32_t flash_data; /* Flash/NVRAM BIOS data. */
783 800
784 uint32_t ctrl_status; /* Control/Status. */ 801 uint32_t ctrl_status; /* Control/Status. */
@@ -859,10 +876,13 @@ struct device_reg_24xx {
859#define HCCRX_CLR_RISC_INT 0xA0000000 876#define HCCRX_CLR_RISC_INT 0xA0000000
860 877
861 uint32_t gpiod; /* GPIO Data register. */ 878 uint32_t gpiod; /* GPIO Data register. */
879
862 /* LED update mask. */ 880 /* LED update mask. */
863#define GPDX_LED_UPDATE_MASK (BIT_20|BIT_19|BIT_18) 881#define GPDX_LED_UPDATE_MASK (BIT_20|BIT_19|BIT_18)
864 /* Data update mask. */ 882 /* Data update mask. */
865#define GPDX_DATA_UPDATE_MASK (BIT_17|BIT_16) 883#define GPDX_DATA_UPDATE_MASK (BIT_17|BIT_16)
884 /* Data update mask. */
885#define GPDX_DATA_UPDATE_2_MASK (BIT_28|BIT_27|BIT_26|BIT_17|BIT_16)
866 /* LED control mask. */ 886 /* LED control mask. */
867#define GPDX_LED_COLOR_MASK (BIT_4|BIT_3|BIT_2) 887#define GPDX_LED_COLOR_MASK (BIT_4|BIT_3|BIT_2)
868 /* LED bit values. Color names as 888 /* LED bit values. Color names as
@@ -877,6 +897,8 @@ struct device_reg_24xx {
877 uint32_t gpioe; /* GPIO Enable register. */ 897 uint32_t gpioe; /* GPIO Enable register. */
878 /* Enable update mask. */ 898 /* Enable update mask. */
879#define GPEX_ENABLE_UPDATE_MASK (BIT_17|BIT_16) 899#define GPEX_ENABLE_UPDATE_MASK (BIT_17|BIT_16)
900 /* Enable update mask. */
901#define GPEX_ENABLE_UPDATE_2_MASK (BIT_28|BIT_27|BIT_26|BIT_17|BIT_16)
880 /* Enable. */ 902 /* Enable. */
881#define GPEX_ENABLE (BIT_1|BIT_0) 903#define GPEX_ENABLE (BIT_1|BIT_0)
882 904
@@ -916,6 +938,14 @@ struct device_reg_24xx {
916 uint16_t mailbox29; 938 uint16_t mailbox29;
917 uint16_t mailbox30; 939 uint16_t mailbox30;
918 uint16_t mailbox31; 940 uint16_t mailbox31;
941
942 uint32_t iobase_window;
943 uint32_t unused_4[8]; /* Gap. */
944 uint32_t iobase_q;
945 uint32_t unused_5[2]; /* Gap. */
946 uint32_t iobase_select;
947 uint32_t unused_6[2]; /* Gap. */
948 uint32_t iobase_sdata;
919}; 949};
920 950
921/* MID Support ***************************************************************/ 951/* MID Support ***************************************************************/
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index b44eff2803ce..aa1e41152283 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -17,6 +17,7 @@ extern int qla2x00_initialize_adapter(scsi_qla_host_t *);
17extern int qla2100_pci_config(struct scsi_qla_host *); 17extern int qla2100_pci_config(struct scsi_qla_host *);
18extern int qla2300_pci_config(struct scsi_qla_host *); 18extern int qla2300_pci_config(struct scsi_qla_host *);
19extern int qla24xx_pci_config(scsi_qla_host_t *); 19extern int qla24xx_pci_config(scsi_qla_host_t *);
20extern int qla25xx_pci_config(scsi_qla_host_t *);
20extern void qla2x00_reset_chip(struct scsi_qla_host *); 21extern void qla2x00_reset_chip(struct scsi_qla_host *);
21extern void qla24xx_reset_chip(struct scsi_qla_host *); 22extern void qla24xx_reset_chip(struct scsi_qla_host *);
22extern int qla2x00_chip_diag(struct scsi_qla_host *); 23extern int qla2x00_chip_diag(struct scsi_qla_host *);
@@ -281,6 +282,10 @@ extern int qla2x00_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
281 uint32_t); 282 uint32_t);
282extern int qla24xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, 283extern int qla24xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
283 uint32_t); 284 uint32_t);
285extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
286 uint32_t);
287extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
288 uint32_t);
284 289
285extern int qla2x00_beacon_on(struct scsi_qla_host *); 290extern int qla2x00_beacon_on(struct scsi_qla_host *);
286extern int qla2x00_beacon_off(struct scsi_qla_host *); 291extern int qla2x00_beacon_off(struct scsi_qla_host *);
@@ -307,6 +312,7 @@ extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *);
307extern void qla2100_fw_dump(scsi_qla_host_t *, int); 312extern void qla2100_fw_dump(scsi_qla_host_t *, int);
308extern void qla2300_fw_dump(scsi_qla_host_t *, int); 313extern void qla2300_fw_dump(scsi_qla_host_t *, int);
309extern void qla24xx_fw_dump(scsi_qla_host_t *, int); 314extern void qla24xx_fw_dump(scsi_qla_host_t *, int);
315extern void qla25xx_fw_dump(scsi_qla_host_t *, int);
310extern void qla2x00_dump_regs(scsi_qla_host_t *); 316extern void qla2x00_dump_regs(scsi_qla_host_t *);
311extern void qla2x00_dump_buffer(uint8_t *, uint32_t); 317extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
312extern void qla2x00_print_scsi_cmd(struct scsi_cmnd *); 318extern void qla2x00_print_scsi_cmd(struct scsi_cmnd *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index a086b3f0df65..b06cbb8580d3 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -127,7 +127,7 @@ qla2x00_chk_ms_status(scsi_qla_host_t *ha, ms_iocb_entry_t *ms_pkt,
127 DEBUG2_3(printk("scsi(%ld): %s failed, error status (%x).\n", 127 DEBUG2_3(printk("scsi(%ld): %s failed, error status (%x).\n",
128 ha->host_no, routine, ms_pkt->entry_status)); 128 ha->host_no, routine, ms_pkt->entry_status));
129 } else { 129 } else {
130 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) 130 if (IS_FWI2_CAPABLE(ha))
131 comp_status = le16_to_cpu( 131 comp_status = le16_to_cpu(
132 ((struct ct_entry_24xx *)ms_pkt)->comp_status); 132 ((struct ct_entry_24xx *)ms_pkt)->comp_status);
133 else 133 else
@@ -180,7 +180,8 @@ qla2x00_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
180 180
181 /* Issue GA_NXT */ 181 /* Issue GA_NXT */
182 /* Prepare common MS IOCB */ 182 /* Prepare common MS IOCB */
183 ms_pkt = ha->isp_ops.prep_ms_iocb(ha, GA_NXT_REQ_SIZE, GA_NXT_RSP_SIZE); 183 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GA_NXT_REQ_SIZE,
184 GA_NXT_RSP_SIZE);
184 185
185 /* Prepare CT request */ 186 /* Prepare CT request */
186 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GA_NXT_CMD, 187 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GA_NXT_CMD,
@@ -266,7 +267,8 @@ qla2x00_gid_pt(scsi_qla_host_t *ha, sw_info_t *list)
266 267
267 /* Issue GID_PT */ 268 /* Issue GID_PT */
268 /* Prepare common MS IOCB */ 269 /* Prepare common MS IOCB */
269 ms_pkt = ha->isp_ops.prep_ms_iocb(ha, GID_PT_REQ_SIZE, GID_PT_RSP_SIZE); 270 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GID_PT_REQ_SIZE,
271 GID_PT_RSP_SIZE);
270 272
271 /* Prepare CT request */ 273 /* Prepare CT request */
272 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GID_PT_CMD, 274 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GID_PT_CMD,
@@ -338,7 +340,7 @@ qla2x00_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
338 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 340 for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
339 /* Issue GPN_ID */ 341 /* Issue GPN_ID */
340 /* Prepare common MS IOCB */ 342 /* Prepare common MS IOCB */
341 ms_pkt = ha->isp_ops.prep_ms_iocb(ha, GPN_ID_REQ_SIZE, 343 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GPN_ID_REQ_SIZE,
342 GPN_ID_RSP_SIZE); 344 GPN_ID_RSP_SIZE);
343 345
344 /* Prepare CT request */ 346 /* Prepare CT request */
@@ -399,7 +401,7 @@ qla2x00_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
399 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 401 for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
400 /* Issue GNN_ID */ 402 /* Issue GNN_ID */
401 /* Prepare common MS IOCB */ 403 /* Prepare common MS IOCB */
402 ms_pkt = ha->isp_ops.prep_ms_iocb(ha, GNN_ID_REQ_SIZE, 404 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GNN_ID_REQ_SIZE,
403 GNN_ID_RSP_SIZE); 405 GNN_ID_RSP_SIZE);
404 406
405 /* Prepare CT request */ 407 /* Prepare CT request */
@@ -473,7 +475,8 @@ qla2x00_rft_id(scsi_qla_host_t *ha)
473 475
474 /* Issue RFT_ID */ 476 /* Issue RFT_ID */
475 /* Prepare common MS IOCB */ 477 /* Prepare common MS IOCB */
476 ms_pkt = ha->isp_ops.prep_ms_iocb(ha, RFT_ID_REQ_SIZE, RFT_ID_RSP_SIZE); 478 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, RFT_ID_REQ_SIZE,
479 RFT_ID_RSP_SIZE);
477 480
478 /* Prepare CT request */ 481 /* Prepare CT request */
479 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RFT_ID_CMD, 482 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RFT_ID_CMD,
@@ -528,7 +531,8 @@ qla2x00_rff_id(scsi_qla_host_t *ha)
528 531
529 /* Issue RFF_ID */ 532 /* Issue RFF_ID */
530 /* Prepare common MS IOCB */ 533 /* Prepare common MS IOCB */
531 ms_pkt = ha->isp_ops.prep_ms_iocb(ha, RFF_ID_REQ_SIZE, RFF_ID_RSP_SIZE); 534 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, RFF_ID_REQ_SIZE,
535 RFF_ID_RSP_SIZE);
532 536
533 /* Prepare CT request */ 537 /* Prepare CT request */
534 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RFF_ID_CMD, 538 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RFF_ID_CMD,
@@ -582,7 +586,8 @@ qla2x00_rnn_id(scsi_qla_host_t *ha)
582 586
583 /* Issue RNN_ID */ 587 /* Issue RNN_ID */
584 /* Prepare common MS IOCB */ 588 /* Prepare common MS IOCB */
585 ms_pkt = ha->isp_ops.prep_ms_iocb(ha, RNN_ID_REQ_SIZE, RNN_ID_RSP_SIZE); 589 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, RNN_ID_REQ_SIZE,
590 RNN_ID_RSP_SIZE);
586 591
587 /* Prepare CT request */ 592 /* Prepare CT request */
588 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RNN_ID_CMD, 593 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RNN_ID_CMD,
@@ -645,7 +650,7 @@ qla2x00_rsnn_nn(scsi_qla_host_t *ha)
645 /* Issue RSNN_NN */ 650 /* Issue RSNN_NN */
646 /* Prepare common MS IOCB */ 651 /* Prepare common MS IOCB */
647 /* Request size adjusted after CT preparation */ 652 /* Request size adjusted after CT preparation */
648 ms_pkt = ha->isp_ops.prep_ms_iocb(ha, 0, RSNN_NN_RSP_SIZE); 653 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, 0, RSNN_NN_RSP_SIZE);
649 654
650 /* Prepare CT request */ 655 /* Prepare CT request */
651 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RSNN_NN_CMD, 656 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RSNN_NN_CMD,
@@ -1102,7 +1107,7 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *ha)
1102 if (ha->flags.management_server_logged_in) 1107 if (ha->flags.management_server_logged_in)
1103 return ret; 1108 return ret;
1104 1109
1105 ha->isp_ops.fabric_login(ha, ha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa, 1110 ha->isp_ops->fabric_login(ha, ha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
1106 mb, BIT_1); 1111 mb, BIT_1);
1107 if (mb[0] != MBS_COMMAND_COMPLETE) { 1112 if (mb[0] != MBS_COMMAND_COMPLETE) {
1108 DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: " 1113 DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: "
@@ -1198,7 +1203,7 @@ qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size)
1198 ms_iocb_entry_t *ms_pkt = ha->ms_iocb; 1203 ms_iocb_entry_t *ms_pkt = ha->ms_iocb;
1199 struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; 1204 struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1200 1205
1201 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 1206 if (IS_FWI2_CAPABLE(ha)) {
1202 ct_pkt->cmd_byte_count = cpu_to_le32(req_size); 1207 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1203 ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count; 1208 ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
1204 } else { 1209 } else {
@@ -1253,7 +1258,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1253 /* Issue RHBA */ 1258 /* Issue RHBA */
1254 /* Prepare common MS IOCB */ 1259 /* Prepare common MS IOCB */
1255 /* Request size adjusted after CT preparation */ 1260 /* Request size adjusted after CT preparation */
1256 ms_pkt = ha->isp_ops.prep_ms_fdmi_iocb(ha, 0, RHBA_RSP_SIZE); 1261 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(ha, 0, RHBA_RSP_SIZE);
1257 1262
1258 /* Prepare CT request */ 1263 /* Prepare CT request */
1259 ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RHBA_CMD, 1264 ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RHBA_CMD,
@@ -1373,7 +1378,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1373 /* Firmware version */ 1378 /* Firmware version */
1374 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1379 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
1375 eiter->type = __constant_cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION); 1380 eiter->type = __constant_cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
1376 ha->isp_ops.fw_version_str(ha, eiter->a.fw_version); 1381 ha->isp_ops->fw_version_str(ha, eiter->a.fw_version);
1377 alen = strlen(eiter->a.fw_version); 1382 alen = strlen(eiter->a.fw_version);
1378 alen += (alen & 3) ? (4 - (alen & 3)) : 4; 1383 alen += (alen & 3) ? (4 - (alen & 3)) : 4;
1379 eiter->len = cpu_to_be16(4 + alen); 1384 eiter->len = cpu_to_be16(4 + alen);
@@ -1439,7 +1444,7 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *ha)
1439 1444
1440 /* Issue RPA */ 1445 /* Issue RPA */
1441 /* Prepare common MS IOCB */ 1446 /* Prepare common MS IOCB */
1442 ms_pkt = ha->isp_ops.prep_ms_fdmi_iocb(ha, DHBA_REQ_SIZE, 1447 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(ha, DHBA_REQ_SIZE,
1443 DHBA_RSP_SIZE); 1448 DHBA_RSP_SIZE);
1444 1449
1445 /* Prepare CT request */ 1450 /* Prepare CT request */
@@ -1497,7 +1502,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1497 /* Issue RPA */ 1502 /* Issue RPA */
1498 /* Prepare common MS IOCB */ 1503 /* Prepare common MS IOCB */
1499 /* Request size adjusted after CT preparation */ 1504 /* Request size adjusted after CT preparation */
1500 ms_pkt = ha->isp_ops.prep_ms_fdmi_iocb(ha, 0, RPA_RSP_SIZE); 1505 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(ha, 0, RPA_RSP_SIZE);
1501 1506
1502 /* Prepare CT request */ 1507 /* Prepare CT request */
1503 ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RPA_CMD, 1508 ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RPA_CMD,
@@ -1527,12 +1532,20 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1527 eiter = (struct ct_fdmi_port_attr *) (entries + size); 1532 eiter = (struct ct_fdmi_port_attr *) (entries + size);
1528 eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED); 1533 eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
1529 eiter->len = __constant_cpu_to_be16(4 + 4); 1534 eiter->len = __constant_cpu_to_be16(4 + 4);
1530 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) 1535 if (IS_QLA25XX(ha))
1531 eiter->a.sup_speed = __constant_cpu_to_be32(4); 1536 eiter->a.sup_speed = __constant_cpu_to_be32(
1537 FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB|
1538 FDMI_PORT_SPEED_4GB|FDMI_PORT_SPEED_8GB);
1539 else if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
1540 eiter->a.sup_speed = __constant_cpu_to_be32(
1541 FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB|
1542 FDMI_PORT_SPEED_4GB);
1532 else if (IS_QLA23XX(ha)) 1543 else if (IS_QLA23XX(ha))
1533 eiter->a.sup_speed = __constant_cpu_to_be32(2); 1544 eiter->a.sup_speed =__constant_cpu_to_be32(
1545 FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB);
1534 else 1546 else
1535 eiter->a.sup_speed = __constant_cpu_to_be32(1); 1547 eiter->a.sup_speed = __constant_cpu_to_be32(
1548 FDMI_PORT_SPEED_1GB);
1536 size += 4 + 4; 1549 size += 4 + 4;
1537 1550
1538 DEBUG13(printk("%s(%ld): SUPPORTED_SPEED=%x.\n", __func__, ha->host_no, 1551 DEBUG13(printk("%s(%ld): SUPPORTED_SPEED=%x.\n", __func__, ha->host_no,
@@ -1543,14 +1556,25 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1543 eiter->type = __constant_cpu_to_be16(FDMI_PORT_CURRENT_SPEED); 1556 eiter->type = __constant_cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
1544 eiter->len = __constant_cpu_to_be16(4 + 4); 1557 eiter->len = __constant_cpu_to_be16(4 + 4);
1545 switch (ha->link_data_rate) { 1558 switch (ha->link_data_rate) {
1546 case 0: 1559 case PORT_SPEED_1GB:
1547 eiter->a.cur_speed = __constant_cpu_to_be32(1); 1560 eiter->a.cur_speed =
1561 __constant_cpu_to_be32(FDMI_PORT_SPEED_1GB);
1562 break;
1563 case PORT_SPEED_2GB:
1564 eiter->a.cur_speed =
1565 __constant_cpu_to_be32(FDMI_PORT_SPEED_2GB);
1566 break;
1567 case PORT_SPEED_4GB:
1568 eiter->a.cur_speed =
1569 __constant_cpu_to_be32(FDMI_PORT_SPEED_4GB);
1548 break; 1570 break;
1549 case 1: 1571 case PORT_SPEED_8GB:
1550 eiter->a.cur_speed = __constant_cpu_to_be32(2); 1572 eiter->a.cur_speed =
1573 __constant_cpu_to_be32(FDMI_PORT_SPEED_8GB);
1551 break; 1574 break;
1552 case 3: 1575 default:
1553 eiter->a.cur_speed = __constant_cpu_to_be32(4); 1576 eiter->a.cur_speed =
1577 __constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
1554 break; 1578 break;
1555 } 1579 }
1556 size += 4 + 4; 1580 size += 4 + 4;
@@ -1562,7 +1586,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1562 eiter = (struct ct_fdmi_port_attr *) (entries + size); 1586 eiter = (struct ct_fdmi_port_attr *) (entries + size);
1563 eiter->type = __constant_cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE); 1587 eiter->type = __constant_cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
1564 eiter->len = __constant_cpu_to_be16(4 + 4); 1588 eiter->len = __constant_cpu_to_be16(4 + 4);
1565 max_frame_size = IS_QLA24XX(ha) || IS_QLA54XX(ha) ? 1589 max_frame_size = IS_FWI2_CAPABLE(ha) ?
1566 (uint32_t) icb24->frame_payload_size: 1590 (uint32_t) icb24->frame_payload_size:
1567 (uint32_t) ha->init_cb->frame_payload_size; 1591 (uint32_t) ha->init_cb->frame_payload_size;
1568 eiter->a.max_frame_size = cpu_to_be32(max_frame_size); 1592 eiter->a.max_frame_size = cpu_to_be32(max_frame_size);
@@ -1678,7 +1702,7 @@ qla2x00_gfpn_id(scsi_qla_host_t *ha, sw_info_t *list)
1678 struct ct_sns_req *ct_req; 1702 struct ct_sns_req *ct_req;
1679 struct ct_sns_rsp *ct_rsp; 1703 struct ct_sns_rsp *ct_rsp;
1680 1704
1681 if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha)) 1705 if (!IS_IIDMA_CAPABLE(ha))
1682 return QLA_FUNCTION_FAILED; 1706 return QLA_FUNCTION_FAILED;
1683 1707
1684 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 1708 for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
@@ -1686,7 +1710,7 @@ qla2x00_gfpn_id(scsi_qla_host_t *ha, sw_info_t *list)
1686 memset(list[i].fabric_port_name, 0, WWN_SIZE); 1710 memset(list[i].fabric_port_name, 0, WWN_SIZE);
1687 1711
1688 /* Prepare common MS IOCB */ 1712 /* Prepare common MS IOCB */
1689 ms_pkt = ha->isp_ops.prep_ms_iocb(ha, GFPN_ID_REQ_SIZE, 1713 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GFPN_ID_REQ_SIZE,
1690 GFPN_ID_RSP_SIZE); 1714 GFPN_ID_RSP_SIZE);
1691 1715
1692 /* Prepare CT request */ 1716 /* Prepare CT request */
@@ -1786,7 +1810,7 @@ qla2x00_gpsc(scsi_qla_host_t *ha, sw_info_t *list)
1786 struct ct_sns_req *ct_req; 1810 struct ct_sns_req *ct_req;
1787 struct ct_sns_rsp *ct_rsp; 1811 struct ct_sns_rsp *ct_rsp;
1788 1812
1789 if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha)) 1813 if (!IS_IIDMA_CAPABLE(ha))
1790 return QLA_FUNCTION_FAILED; 1814 return QLA_FUNCTION_FAILED;
1791 if (!ha->flags.gpsc_supported) 1815 if (!ha->flags.gpsc_supported)
1792 return QLA_FUNCTION_FAILED; 1816 return QLA_FUNCTION_FAILED;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index cc6ebb609e98..5ec798c2bf13 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -79,20 +79,20 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
79 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); 79 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
80 80
81 qla_printk(KERN_INFO, ha, "Configuring PCI space...\n"); 81 qla_printk(KERN_INFO, ha, "Configuring PCI space...\n");
82 rval = ha->isp_ops.pci_config(ha); 82 rval = ha->isp_ops->pci_config(ha);
83 if (rval) { 83 if (rval) {
84 DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n", 84 DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n",
85 ha->host_no)); 85 ha->host_no));
86 return (rval); 86 return (rval);
87 } 87 }
88 88
89 ha->isp_ops.reset_chip(ha); 89 ha->isp_ops->reset_chip(ha);
90 90
91 ha->isp_ops.get_flash_version(ha, ha->request_ring); 91 ha->isp_ops->get_flash_version(ha, ha->request_ring);
92 92
93 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n"); 93 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
94 94
95 ha->isp_ops.nvram_config(ha); 95 ha->isp_ops->nvram_config(ha);
96 96
97 if (ha->flags.disable_serdes) { 97 if (ha->flags.disable_serdes) {
98 /* Mask HBA via NVRAM settings? */ 98 /* Mask HBA via NVRAM settings? */
@@ -108,7 +108,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
108 qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n"); 108 qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n");
109 109
110 if (qla2x00_isp_firmware(ha) != QLA_SUCCESS) { 110 if (qla2x00_isp_firmware(ha) != QLA_SUCCESS) {
111 rval = ha->isp_ops.chip_diag(ha); 111 rval = ha->isp_ops->chip_diag(ha);
112 if (rval) 112 if (rval)
113 return (rval); 113 return (rval);
114 rval = qla2x00_setup_chip(ha); 114 rval = qla2x00_setup_chip(ha);
@@ -129,14 +129,13 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
129int 129int
130qla2100_pci_config(scsi_qla_host_t *ha) 130qla2100_pci_config(scsi_qla_host_t *ha)
131{ 131{
132 int ret;
133 uint16_t w; 132 uint16_t w;
134 uint32_t d; 133 uint32_t d;
135 unsigned long flags; 134 unsigned long flags;
136 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 135 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
137 136
138 pci_set_master(ha->pdev); 137 pci_set_master(ha->pdev);
139 ret = pci_set_mwi(ha->pdev); 138 pci_try_set_mwi(ha->pdev);
140 139
141 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 140 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
142 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 141 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
@@ -164,7 +163,6 @@ qla2100_pci_config(scsi_qla_host_t *ha)
164int 163int
165qla2300_pci_config(scsi_qla_host_t *ha) 164qla2300_pci_config(scsi_qla_host_t *ha)
166{ 165{
167 int ret;
168 uint16_t w; 166 uint16_t w;
169 uint32_t d; 167 uint32_t d;
170 unsigned long flags = 0; 168 unsigned long flags = 0;
@@ -172,7 +170,7 @@ qla2300_pci_config(scsi_qla_host_t *ha)
172 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 170 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
173 171
174 pci_set_master(ha->pdev); 172 pci_set_master(ha->pdev);
175 ret = pci_set_mwi(ha->pdev); 173 pci_try_set_mwi(ha->pdev);
176 174
177 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 175 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
178 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 176 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
@@ -250,15 +248,13 @@ qla2300_pci_config(scsi_qla_host_t *ha)
250int 248int
251qla24xx_pci_config(scsi_qla_host_t *ha) 249qla24xx_pci_config(scsi_qla_host_t *ha)
252{ 250{
253 int ret;
254 uint16_t w; 251 uint16_t w;
255 uint32_t d; 252 uint32_t d;
256 unsigned long flags = 0; 253 unsigned long flags = 0;
257 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 254 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
258 int pcix_cmd_reg, pcie_dctl_reg;
259 255
260 pci_set_master(ha->pdev); 256 pci_set_master(ha->pdev);
261 ret = pci_set_mwi(ha->pdev); 257 pci_try_set_mwi(ha->pdev);
262 258
263 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 259 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
264 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 260 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
@@ -268,28 +264,12 @@ qla24xx_pci_config(scsi_qla_host_t *ha)
268 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); 264 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
269 265
270 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */ 266 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
271 pcix_cmd_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX); 267 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
272 if (pcix_cmd_reg) { 268 pcix_set_mmrbc(ha->pdev, 2048);
273 uint16_t pcix_cmd;
274
275 pcix_cmd_reg += PCI_X_CMD;
276 pci_read_config_word(ha->pdev, pcix_cmd_reg, &pcix_cmd);
277 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
278 pcix_cmd |= 0x0008;
279 pci_write_config_word(ha->pdev, pcix_cmd_reg, pcix_cmd);
280 }
281 269
282 /* PCIe -- adjust Maximum Read Request Size (2048). */ 270 /* PCIe -- adjust Maximum Read Request Size (2048). */
283 pcie_dctl_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP); 271 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
284 if (pcie_dctl_reg) { 272 pcie_set_readrq(ha->pdev, 2048);
285 uint16_t pcie_dctl;
286
287 pcie_dctl_reg += PCI_EXP_DEVCTL;
288 pci_read_config_word(ha->pdev, pcie_dctl_reg, &pcie_dctl);
289 pcie_dctl &= ~PCI_EXP_DEVCTL_READRQ;
290 pcie_dctl |= 0x4000;
291 pci_write_config_word(ha->pdev, pcie_dctl_reg, pcie_dctl);
292 }
293 273
294 /* Reset expansion ROM address decode enable */ 274 /* Reset expansion ROM address decode enable */
295 pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d); 275 pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d);
@@ -307,6 +287,40 @@ qla24xx_pci_config(scsi_qla_host_t *ha)
307} 287}
308 288
309/** 289/**
290 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
291 * @ha: HA context
292 *
293 * Returns 0 on success.
294 */
295int
296qla25xx_pci_config(scsi_qla_host_t *ha)
297{
298 uint16_t w;
299 uint32_t d;
300
301 pci_set_master(ha->pdev);
302 pci_try_set_mwi(ha->pdev);
303
304 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
305 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
306 w &= ~PCI_COMMAND_INTX_DISABLE;
307 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
308
309 /* PCIe -- adjust Maximum Read Request Size (2048). */
310 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
311 pcie_set_readrq(ha->pdev, 2048);
312
313 /* Reset expansion ROM address decode enable */
314 pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d);
315 d &= ~PCI_ROM_ADDRESS_ENABLE;
316 pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d);
317
318 ha->chip_revision = ha->pdev->revision;
319
320 return QLA_SUCCESS;
321}
322
323/**
310 * qla2x00_isp_firmware() - Choose firmware image. 324 * qla2x00_isp_firmware() - Choose firmware image.
311 * @ha: HA context 325 * @ha: HA context
312 * 326 *
@@ -351,7 +365,7 @@ qla2x00_reset_chip(scsi_qla_host_t *ha)
351 uint32_t cnt; 365 uint32_t cnt;
352 uint16_t cmd; 366 uint16_t cmd;
353 367
354 ha->isp_ops.disable_intrs(ha); 368 ha->isp_ops->disable_intrs(ha);
355 369
356 spin_lock_irqsave(&ha->hardware_lock, flags); 370 spin_lock_irqsave(&ha->hardware_lock, flags);
357 371
@@ -551,7 +565,7 @@ qla24xx_reset_risc(scsi_qla_host_t *ha)
551void 565void
552qla24xx_reset_chip(scsi_qla_host_t *ha) 566qla24xx_reset_chip(scsi_qla_host_t *ha)
553{ 567{
554 ha->isp_ops.disable_intrs(ha); 568 ha->isp_ops->disable_intrs(ha);
555 569
556 /* Perform RISC reset. */ 570 /* Perform RISC reset. */
557 qla24xx_reset_risc(ha); 571 qla24xx_reset_risc(ha);
@@ -736,8 +750,10 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
736 fixed_size = offsetof(struct qla2300_fw_dump, data_ram); 750 fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
737 mem_size = (ha->fw_memory_size - 0x11000 + 1) * 751 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
738 sizeof(uint16_t); 752 sizeof(uint16_t);
739 } else if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 753 } else if (IS_FWI2_CAPABLE(ha)) {
740 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem); 754 fixed_size = IS_QLA25XX(ha) ?
755 offsetof(struct qla25xx_fw_dump, ext_mem):
756 offsetof(struct qla24xx_fw_dump, ext_mem);
741 mem_size = (ha->fw_memory_size - 0x100000 + 1) * 757 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
742 sizeof(uint32_t); 758 sizeof(uint32_t);
743 759
@@ -879,7 +895,7 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
879 uint32_t srisc_address = 0; 895 uint32_t srisc_address = 0;
880 896
881 /* Load firmware sequences */ 897 /* Load firmware sequences */
882 rval = ha->isp_ops.load_risc(ha, &srisc_address); 898 rval = ha->isp_ops->load_risc(ha, &srisc_address);
883 if (rval == QLA_SUCCESS) { 899 if (rval == QLA_SUCCESS) {
884 DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC " 900 DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC "
885 "code.\n", ha->host_no)); 901 "code.\n", ha->host_no));
@@ -1130,12 +1146,12 @@ qla2x00_init_rings(scsi_qla_host_t *ha)
1130 /* Initialize response queue entries */ 1146 /* Initialize response queue entries */
1131 qla2x00_init_response_q_entries(ha); 1147 qla2x00_init_response_q_entries(ha);
1132 1148
1133 ha->isp_ops.config_rings(ha); 1149 ha->isp_ops->config_rings(ha);
1134 1150
1135 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1151 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1136 1152
1137 /* Update any ISP specific firmware options before initialization. */ 1153 /* Update any ISP specific firmware options before initialization. */
1138 ha->isp_ops.update_fw_options(ha); 1154 ha->isp_ops->update_fw_options(ha);
1139 1155
1140 DEBUG(printk("scsi(%ld): Issue init firmware.\n", ha->host_no)); 1156 DEBUG(printk("scsi(%ld): Issue init firmware.\n", ha->host_no));
1141 1157
@@ -1459,7 +1475,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1459 ha->nvram_base = 0x80; 1475 ha->nvram_base = 0x80;
1460 1476
1461 /* Get NVRAM data and calculate checksum. */ 1477 /* Get NVRAM data and calculate checksum. */
1462 ha->isp_ops.read_nvram(ha, ptr, ha->nvram_base, ha->nvram_size); 1478 ha->isp_ops->read_nvram(ha, ptr, ha->nvram_base, ha->nvram_size);
1463 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++) 1479 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
1464 chksum += *ptr++; 1480 chksum += *ptr++;
1465 1481
@@ -2119,7 +2135,7 @@ qla2x00_iidma_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
2119 int rval; 2135 int rval;
2120 uint16_t port_speed, mb[6]; 2136 uint16_t port_speed, mb[6];
2121 2137
2122 if (!IS_QLA24XX(ha)) 2138 if (!IS_IIDMA_CAPABLE(ha))
2123 return; 2139 return;
2124 2140
2125 switch (be16_to_cpu(fcport->fp_speed)) { 2141 switch (be16_to_cpu(fcport->fp_speed)) {
@@ -2267,7 +2283,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2267 scsi_qla_host_t *pha = to_qla_parent(ha); 2283 scsi_qla_host_t *pha = to_qla_parent(ha);
2268 2284
2269 /* If FL port exists, then SNS is present */ 2285 /* If FL port exists, then SNS is present */
2270 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) 2286 if (IS_FWI2_CAPABLE(ha))
2271 loop_id = NPH_F_PORT; 2287 loop_id = NPH_F_PORT;
2272 else 2288 else
2273 loop_id = SNS_FL_PORT; 2289 loop_id = SNS_FL_PORT;
@@ -2294,11 +2310,11 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2294 qla2x00_fdmi_register(ha); 2310 qla2x00_fdmi_register(ha);
2295 2311
2296 /* Ensure we are logged into the SNS. */ 2312 /* Ensure we are logged into the SNS. */
2297 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) 2313 if (IS_FWI2_CAPABLE(ha))
2298 loop_id = NPH_SNS; 2314 loop_id = NPH_SNS;
2299 else 2315 else
2300 loop_id = SIMPLE_NAME_SERVER; 2316 loop_id = SIMPLE_NAME_SERVER;
2301 ha->isp_ops.fabric_login(ha, loop_id, 0xff, 0xff, 2317 ha->isp_ops->fabric_login(ha, loop_id, 0xff, 0xff,
2302 0xfc, mb, BIT_1 | BIT_0); 2318 0xfc, mb, BIT_1 | BIT_0);
2303 if (mb[0] != MBS_COMMAND_COMPLETE) { 2319 if (mb[0] != MBS_COMMAND_COMPLETE) {
2304 DEBUG2(qla_printk(KERN_INFO, ha, 2320 DEBUG2(qla_printk(KERN_INFO, ha,
@@ -2355,7 +2371,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2355 (fcport->flags & FCF_TAPE_PRESENT) == 0 && 2371 (fcport->flags & FCF_TAPE_PRESENT) == 0 &&
2356 fcport->port_type != FCT_INITIATOR && 2372 fcport->port_type != FCT_INITIATOR &&
2357 fcport->port_type != FCT_BROADCAST) { 2373 fcport->port_type != FCT_BROADCAST) {
2358 ha->isp_ops.fabric_logout(ha, 2374 ha->isp_ops->fabric_logout(ha,
2359 fcport->loop_id, 2375 fcport->loop_id,
2360 fcport->d_id.b.domain, 2376 fcport->d_id.b.domain,
2361 fcport->d_id.b.area, 2377 fcport->d_id.b.area,
@@ -2664,7 +2680,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2664 (fcport->flags & FCF_TAPE_PRESENT) == 0 && 2680 (fcport->flags & FCF_TAPE_PRESENT) == 0 &&
2665 fcport->port_type != FCT_INITIATOR && 2681 fcport->port_type != FCT_INITIATOR &&
2666 fcport->port_type != FCT_BROADCAST) { 2682 fcport->port_type != FCT_BROADCAST) {
2667 ha->isp_ops.fabric_logout(ha, fcport->loop_id, 2683 ha->isp_ops->fabric_logout(ha, fcport->loop_id,
2668 fcport->d_id.b.domain, fcport->d_id.b.area, 2684 fcport->d_id.b.domain, fcport->d_id.b.area,
2669 fcport->d_id.b.al_pa); 2685 fcport->d_id.b.al_pa);
2670 fcport->loop_id = FC_NO_LOOP_ID; 2686 fcport->loop_id = FC_NO_LOOP_ID;
@@ -2919,7 +2935,7 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *ha, fc_port_t *fcport,
2919 opts |= BIT_1; 2935 opts |= BIT_1;
2920 rval = qla2x00_get_port_database(ha, fcport, opts); 2936 rval = qla2x00_get_port_database(ha, fcport, opts);
2921 if (rval != QLA_SUCCESS) { 2937 if (rval != QLA_SUCCESS) {
2922 ha->isp_ops.fabric_logout(ha, fcport->loop_id, 2938 ha->isp_ops->fabric_logout(ha, fcport->loop_id,
2923 fcport->d_id.b.domain, fcport->d_id.b.area, 2939 fcport->d_id.b.domain, fcport->d_id.b.area,
2924 fcport->d_id.b.al_pa); 2940 fcport->d_id.b.al_pa);
2925 qla2x00_mark_device_lost(ha, fcport, 1, 0); 2941 qla2x00_mark_device_lost(ha, fcport, 1, 0);
@@ -2964,7 +2980,7 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
2964 fcport->d_id.b.area, fcport->d_id.b.al_pa)); 2980 fcport->d_id.b.area, fcport->d_id.b.al_pa));
2965 2981
2966 /* Login fcport on switch. */ 2982 /* Login fcport on switch. */
2967 ha->isp_ops.fabric_login(ha, fcport->loop_id, 2983 ha->isp_ops->fabric_login(ha, fcport->loop_id,
2968 fcport->d_id.b.domain, fcport->d_id.b.area, 2984 fcport->d_id.b.domain, fcport->d_id.b.area,
2969 fcport->d_id.b.al_pa, mb, BIT_0); 2985 fcport->d_id.b.al_pa, mb, BIT_0);
2970 if (mb[0] == MBS_PORT_ID_USED) { 2986 if (mb[0] == MBS_PORT_ID_USED) {
@@ -3032,7 +3048,7 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
3032 * dead. 3048 * dead.
3033 */ 3049 */
3034 *next_loopid = fcport->loop_id; 3050 *next_loopid = fcport->loop_id;
3035 ha->isp_ops.fabric_logout(ha, fcport->loop_id, 3051 ha->isp_ops->fabric_logout(ha, fcport->loop_id,
3036 fcport->d_id.b.domain, fcport->d_id.b.area, 3052 fcport->d_id.b.domain, fcport->d_id.b.area,
3037 fcport->d_id.b.al_pa); 3053 fcport->d_id.b.al_pa);
3038 qla2x00_mark_device_lost(ha, fcport, 1, 0); 3054 qla2x00_mark_device_lost(ha, fcport, 1, 0);
@@ -3050,7 +3066,7 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
3050 fcport->d_id.b.al_pa, fcport->loop_id, jiffies)); 3066 fcport->d_id.b.al_pa, fcport->loop_id, jiffies));
3051 3067
3052 *next_loopid = fcport->loop_id; 3068 *next_loopid = fcport->loop_id;
3053 ha->isp_ops.fabric_logout(ha, fcport->loop_id, 3069 ha->isp_ops->fabric_logout(ha, fcport->loop_id,
3054 fcport->d_id.b.domain, fcport->d_id.b.area, 3070 fcport->d_id.b.domain, fcport->d_id.b.area,
3055 fcport->d_id.b.al_pa); 3071 fcport->d_id.b.al_pa);
3056 fcport->loop_id = FC_NO_LOOP_ID; 3072 fcport->loop_id = FC_NO_LOOP_ID;
@@ -3206,7 +3222,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3206 3222
3207 qla_printk(KERN_INFO, ha, 3223 qla_printk(KERN_INFO, ha,
3208 "Performing ISP error recovery - ha= %p.\n", ha); 3224 "Performing ISP error recovery - ha= %p.\n", ha);
3209 ha->isp_ops.reset_chip(ha); 3225 ha->isp_ops->reset_chip(ha);
3210 3226
3211 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 3227 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
3212 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 3228 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
@@ -3232,9 +3248,9 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3232 } 3248 }
3233 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3249 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3234 3250
3235 ha->isp_ops.get_flash_version(ha, ha->request_ring); 3251 ha->isp_ops->get_flash_version(ha, ha->request_ring);
3236 3252
3237 ha->isp_ops.nvram_config(ha); 3253 ha->isp_ops->nvram_config(ha);
3238 3254
3239 if (!qla2x00_restart_isp(ha)) { 3255 if (!qla2x00_restart_isp(ha)) {
3240 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 3256 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
@@ -3249,7 +3265,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3249 3265
3250 ha->flags.online = 1; 3266 ha->flags.online = 1;
3251 3267
3252 ha->isp_ops.enable_intrs(ha); 3268 ha->isp_ops->enable_intrs(ha);
3253 3269
3254 ha->isp_abort_cnt = 0; 3270 ha->isp_abort_cnt = 0;
3255 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags); 3271 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags);
@@ -3274,7 +3290,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3274 * The next call disables the board 3290 * The next call disables the board
3275 * completely. 3291 * completely.
3276 */ 3292 */
3277 ha->isp_ops.reset_adapter(ha); 3293 ha->isp_ops->reset_adapter(ha);
3278 ha->flags.online = 0; 3294 ha->flags.online = 0;
3279 clear_bit(ISP_ABORT_RETRY, 3295 clear_bit(ISP_ABORT_RETRY,
3280 &ha->dpc_flags); 3296 &ha->dpc_flags);
@@ -3331,7 +3347,7 @@ qla2x00_restart_isp(scsi_qla_host_t *ha)
3331 /* If firmware needs to be loaded */ 3347 /* If firmware needs to be loaded */
3332 if (qla2x00_isp_firmware(ha)) { 3348 if (qla2x00_isp_firmware(ha)) {
3333 ha->flags.online = 0; 3349 ha->flags.online = 0;
3334 if (!(status = ha->isp_ops.chip_diag(ha))) { 3350 if (!(status = ha->isp_ops->chip_diag(ha))) {
3335 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 3351 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
3336 status = qla2x00_setup_chip(ha); 3352 status = qla2x00_setup_chip(ha);
3337 goto done; 3353 goto done;
@@ -3423,7 +3439,7 @@ qla2x00_reset_adapter(scsi_qla_host_t *ha)
3423 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 3439 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3424 3440
3425 ha->flags.online = 0; 3441 ha->flags.online = 0;
3426 ha->isp_ops.disable_intrs(ha); 3442 ha->isp_ops->disable_intrs(ha);
3427 3443
3428 spin_lock_irqsave(&ha->hardware_lock, flags); 3444 spin_lock_irqsave(&ha->hardware_lock, flags);
3429 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); 3445 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
@@ -3440,7 +3456,7 @@ qla24xx_reset_adapter(scsi_qla_host_t *ha)
3440 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 3456 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3441 3457
3442 ha->flags.online = 0; 3458 ha->flags.online = 0;
3443 ha->isp_ops.disable_intrs(ha); 3459 ha->isp_ops->disable_intrs(ha);
3444 3460
3445 spin_lock_irqsave(&ha->hardware_lock, flags); 3461 spin_lock_irqsave(&ha->hardware_lock, flags);
3446 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET); 3462 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
@@ -3498,7 +3514,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3498 3514
3499 /* Get NVRAM data and calculate checksum. */ 3515 /* Get NVRAM data and calculate checksum. */
3500 dptr = (uint32_t *)nv; 3516 dptr = (uint32_t *)nv;
3501 ha->isp_ops.read_nvram(ha, (uint8_t *)dptr, ha->nvram_base, 3517 ha->isp_ops->read_nvram(ha, (uint8_t *)dptr, ha->nvram_base,
3502 ha->nvram_size); 3518 ha->nvram_size);
3503 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) 3519 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
3504 chksum += le32_to_cpu(*dptr++); 3520 chksum += le32_to_cpu(*dptr++);
@@ -4012,7 +4028,7 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *ha)
4012{ 4028{
4013 int ret, retries; 4029 int ret, retries;
4014 4030
4015 if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha)) 4031 if (!IS_FWI2_CAPABLE(ha))
4016 return; 4032 return;
4017 if (!ha->fw_major_version) 4033 if (!ha->fw_major_version)
4018 return; 4034 return;
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index d3023338628f..8e3b04464cff 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -104,7 +104,7 @@ static __inline__ void qla2x00_poll(scsi_qla_host_t *);
104static inline void 104static inline void
105qla2x00_poll(scsi_qla_host_t *ha) 105qla2x00_poll(scsi_qla_host_t *ha)
106{ 106{
107 ha->isp_ops.intr_handler(0, ha); 107 ha->isp_ops->intr_handler(0, ha);
108} 108}
109 109
110static __inline__ void qla2x00_check_fabric_devices(scsi_qla_host_t *); 110static __inline__ void qla2x00_check_fabric_devices(scsi_qla_host_t *);
@@ -163,7 +163,7 @@ static inline int qla2x00_is_reserved_id(scsi_qla_host_t *, uint16_t);
163static inline int 163static inline int
164qla2x00_is_reserved_id(scsi_qla_host_t *ha, uint16_t loop_id) 164qla2x00_is_reserved_id(scsi_qla_host_t *ha, uint16_t loop_id)
165{ 165{
166 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) 166 if (IS_FWI2_CAPABLE(ha))
167 return (loop_id > NPH_LAST_HANDLE); 167 return (loop_id > NPH_LAST_HANDLE);
168 168
169 return ((loop_id > ha->last_loop_id && loop_id < SNS_FIRST_LOOP_ID) || 169 return ((loop_id > ha->last_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index c71863ff5489..3a5e78cb6b3f 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -326,7 +326,7 @@ qla2x00_start_scsi(srb_t *sp)
326 tot_dsds = nseg; 326 tot_dsds = nseg;
327 327
328 /* Calculate the number of request entries needed. */ 328 /* Calculate the number of request entries needed. */
329 req_cnt = ha->isp_ops.calc_req_entries(tot_dsds); 329 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
330 if (ha->req_q_cnt < (req_cnt + 2)) { 330 if (ha->req_q_cnt < (req_cnt + 2)) {
331 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg)); 331 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
332 if (ha->req_ring_index < cnt) 332 if (ha->req_ring_index < cnt)
@@ -364,7 +364,7 @@ qla2x00_start_scsi(srb_t *sp)
364 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 364 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
365 365
366 /* Build IOCB segments */ 366 /* Build IOCB segments */
367 ha->isp_ops.build_iocbs(sp, cmd_pkt, tot_dsds); 367 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
368 368
369 /* Set total data segment count. */ 369 /* Set total data segment count. */
370 cmd_pkt->entry_count = (uint8_t)req_cnt; 370 cmd_pkt->entry_count = (uint8_t)req_cnt;
@@ -432,7 +432,7 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
432 mrk->entry_type = MARKER_TYPE; 432 mrk->entry_type = MARKER_TYPE;
433 mrk->modifier = type; 433 mrk->modifier = type;
434 if (type != MK_SYNC_ALL) { 434 if (type != MK_SYNC_ALL) {
435 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 435 if (IS_FWI2_CAPABLE(ha)) {
436 mrk24 = (struct mrk_entry_24xx *) mrk; 436 mrk24 = (struct mrk_entry_24xx *) mrk;
437 mrk24->nport_handle = cpu_to_le16(loop_id); 437 mrk24->nport_handle = cpu_to_le16(loop_id);
438 mrk24->lun[1] = LSB(lun); 438 mrk24->lun[1] = LSB(lun);
@@ -487,7 +487,7 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
487 for (timer = HZ; timer; timer--) { 487 for (timer = HZ; timer; timer--) {
488 if ((req_cnt + 2) >= ha->req_q_cnt) { 488 if ((req_cnt + 2) >= ha->req_q_cnt) {
489 /* Calculate number of free request entries. */ 489 /* Calculate number of free request entries. */
490 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) 490 if (IS_FWI2_CAPABLE(ha))
491 cnt = (uint16_t)RD_REG_DWORD( 491 cnt = (uint16_t)RD_REG_DWORD(
492 &reg->isp24.req_q_out); 492 &reg->isp24.req_q_out);
493 else 493 else
@@ -561,7 +561,7 @@ qla2x00_isp_cmd(scsi_qla_host_t *ha)
561 ha->request_ring_ptr++; 561 ha->request_ring_ptr++;
562 562
563 /* Set chip new ring index. */ 563 /* Set chip new ring index. */
564 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 564 if (IS_FWI2_CAPABLE(ha)) {
565 WRT_REG_DWORD(&reg->isp24.req_q_in, ha->req_ring_index); 565 WRT_REG_DWORD(&reg->isp24.req_q_in, ha->req_ring_index);
566 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in); 566 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
567 } else { 567 } else {
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 0ba4c8d37879..b8f226ae2633 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -143,7 +143,7 @@ qla2300_intr_handler(int irq, void *dev_id)
143 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); 143 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
144 RD_REG_WORD(&reg->hccr); 144 RD_REG_WORD(&reg->hccr);
145 145
146 ha->isp_ops.fw_dump(ha, 1); 146 ha->isp_ops->fw_dump(ha, 1);
147 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 147 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
148 break; 148 break;
149 } else if ((stat & HSR_RISC_INT) == 0) 149 } else if ((stat & HSR_RISC_INT) == 0)
@@ -247,7 +247,7 @@ void
247qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) 247qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
248{ 248{
249#define LS_UNKNOWN 2 249#define LS_UNKNOWN 2
250 static char *link_speeds[5] = { "1", "2", "?", "4", "10" }; 250 static char *link_speeds[5] = { "1", "2", "?", "4", "8" };
251 char *link_speed; 251 char *link_speed;
252 uint16_t handle_cnt; 252 uint16_t handle_cnt;
253 uint16_t cnt; 253 uint16_t cnt;
@@ -334,9 +334,9 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
334 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n", 334 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
335 mb[1], mb[2], mb[3]); 335 mb[1], mb[2], mb[3]);
336 336
337 ha->isp_ops.fw_dump(ha, 1); 337 ha->isp_ops->fw_dump(ha, 1);
338 338
339 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 339 if (IS_FWI2_CAPABLE(ha)) {
340 if (mb[1] == 0 && mb[2] == 0) { 340 if (mb[1] == 0 && mb[2] == 0) {
341 qla_printk(KERN_ERR, ha, 341 qla_printk(KERN_ERR, ha,
342 "Unrecoverable Hardware Error: adapter " 342 "Unrecoverable Hardware Error: adapter "
@@ -601,7 +601,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
601 "scsi(%ld): [R|Z]IO update completion.\n", 601 "scsi(%ld): [R|Z]IO update completion.\n",
602 ha->host_no)); 602 ha->host_no));
603 603
604 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) 604 if (IS_FWI2_CAPABLE(ha))
605 qla24xx_process_response_queue(ha); 605 qla24xx_process_response_queue(ha);
606 else 606 else
607 qla2x00_process_response_queue(ha); 607 qla2x00_process_response_queue(ha);
@@ -823,7 +823,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
823 823
824 sts = (sts_entry_t *) pkt; 824 sts = (sts_entry_t *) pkt;
825 sts24 = (struct sts_entry_24xx *) pkt; 825 sts24 = (struct sts_entry_24xx *) pkt;
826 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 826 if (IS_FWI2_CAPABLE(ha)) {
827 comp_status = le16_to_cpu(sts24->comp_status); 827 comp_status = le16_to_cpu(sts24->comp_status);
828 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 828 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
829 } else { 829 } else {
@@ -872,7 +872,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
872 fcport = sp->fcport; 872 fcport = sp->fcport;
873 873
874 sense_len = rsp_info_len = resid_len = fw_resid_len = 0; 874 sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
875 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 875 if (IS_FWI2_CAPABLE(ha)) {
876 sense_len = le32_to_cpu(sts24->sense_len); 876 sense_len = le32_to_cpu(sts24->sense_len);
877 rsp_info_len = le32_to_cpu(sts24->rsp_data_len); 877 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
878 resid_len = le32_to_cpu(sts24->rsp_residual_count); 878 resid_len = le32_to_cpu(sts24->rsp_residual_count);
@@ -891,7 +891,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
891 /* Check for any FCP transport errors. */ 891 /* Check for any FCP transport errors. */
892 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { 892 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
893 /* Sense data lies beyond any FCP RESPONSE data. */ 893 /* Sense data lies beyond any FCP RESPONSE data. */
894 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) 894 if (IS_FWI2_CAPABLE(ha))
895 sense_data += rsp_info_len; 895 sense_data += rsp_info_len;
896 if (rsp_info_len > 3 && rsp_info[3]) { 896 if (rsp_info_len > 3 && rsp_info[3]) {
897 DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol " 897 DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
@@ -990,7 +990,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
990 case CS_DATA_UNDERRUN: 990 case CS_DATA_UNDERRUN:
991 resid = resid_len; 991 resid = resid_len;
992 /* Use F/W calculated residual length. */ 992 /* Use F/W calculated residual length. */
993 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) 993 if (IS_FWI2_CAPABLE(ha))
994 resid = fw_resid_len; 994 resid = fw_resid_len;
995 995
996 if (scsi_status & SS_RESIDUAL_UNDER) { 996 if (scsi_status & SS_RESIDUAL_UNDER) {
@@ -1062,6 +1062,25 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1062 cp->device->id, cp->device->lun, cp, 1062 cp->device->id, cp->device->lun, cp,
1063 cp->serial_number)); 1063 cp->serial_number));
1064 1064
1065 /*
1066 * In case of a Underrun condition, set both the lscsi
1067 * status and the completion status to appropriate
1068 * values.
1069 */
1070 if (resid &&
1071 ((unsigned)(cp->request_bufflen - resid) <
1072 cp->underflow)) {
1073 DEBUG2(qla_printk(KERN_INFO, ha,
1074 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1075 "detected (%x of %x bytes)...returning "
1076 "error status.\n", ha->host_no,
1077 cp->device->channel, cp->device->id,
1078 cp->device->lun, resid,
1079 cp->request_bufflen));
1080
1081 cp->result = DID_ERROR << 16 | lscsi_status;
1082 }
1083
1065 if (sense_len) 1084 if (sense_len)
1066 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, 1085 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
1067 CMD_ACTUAL_SNSLEN(cp))); 1086 CMD_ACTUAL_SNSLEN(cp)));
@@ -1166,7 +1185,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1166 case CS_TIMEOUT: 1185 case CS_TIMEOUT:
1167 cp->result = DID_BUS_BUSY << 16; 1186 cp->result = DID_BUS_BUSY << 16;
1168 1187
1169 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 1188 if (IS_FWI2_CAPABLE(ha)) {
1170 DEBUG2(printk(KERN_INFO 1189 DEBUG2(printk(KERN_INFO
1171 "scsi(%ld:%d:%d:%d): TIMEOUT status detected " 1190 "scsi(%ld:%d:%d:%d): TIMEOUT status detected "
1172 "0x%x-0x%x\n", ha->host_no, cp->device->channel, 1191 "0x%x-0x%x\n", ha->host_no, cp->device->channel,
@@ -1235,7 +1254,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
1235 } 1254 }
1236 1255
1237 /* Move sense data. */ 1256 /* Move sense data. */
1238 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) 1257 if (IS_FWI2_CAPABLE(ha))
1239 host_to_fcp_swap(pkt->data, sizeof(pkt->data)); 1258 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1240 memcpy(sp->request_sense_ptr, pkt->data, sense_sz); 1259 memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
1241 DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz)); 1260 DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz));
@@ -1483,7 +1502,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
1483 1502
1484 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " 1503 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1485 "Dumping firmware!\n", hccr); 1504 "Dumping firmware!\n", hccr);
1486 ha->isp_ops.fw_dump(ha, 1); 1505 ha->isp_ops->fw_dump(ha, 1);
1487 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1506 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1488 break; 1507 break;
1489 } else if ((stat & HSRX_RISC_INT) == 0) 1508 } else if ((stat & HSRX_RISC_INT) == 0)
@@ -1617,7 +1636,7 @@ qla24xx_msix_default(int irq, void *dev_id)
1617 1636
1618 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " 1637 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1619 "Dumping firmware!\n", hccr); 1638 "Dumping firmware!\n", hccr);
1620 ha->isp_ops.fw_dump(ha, 1); 1639 ha->isp_ops->fw_dump(ha, 1);
1621 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1640 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1622 break; 1641 break;
1623 } else if ((stat & HSRX_RISC_INT) == 0) 1642 } else if ((stat & HSRX_RISC_INT) == 0)
@@ -1739,11 +1758,11 @@ qla2x00_request_irqs(scsi_qla_host_t *ha)
1739 int ret; 1758 int ret;
1740 1759
1741 /* If possible, enable MSI-X. */ 1760 /* If possible, enable MSI-X. */
1742 if (!IS_QLA2432(ha)) 1761 if (!IS_QLA2432(ha) && !IS_QLA2532(ha))
1743 goto skip_msix; 1762 goto skip_msix;
1744 1763
1745 if (ha->chip_revision < QLA_MSIX_CHIP_REV_24XX || 1764 if (IS_QLA2432(ha) && (ha->chip_revision < QLA_MSIX_CHIP_REV_24XX ||
1746 !QLA_MSIX_FW_MODE_1(ha->fw_attributes)) { 1765 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
1747 DEBUG2(qla_printk(KERN_WARNING, ha, 1766 DEBUG2(qla_printk(KERN_WARNING, ha,
1748 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n", 1767 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
1749 ha->chip_revision, ha->fw_attributes)); 1768 ha->chip_revision, ha->fw_attributes));
@@ -1762,7 +1781,7 @@ qla2x00_request_irqs(scsi_qla_host_t *ha)
1762 "MSI-X: Falling back-to INTa mode -- %d.\n", ret); 1781 "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
1763skip_msix: 1782skip_msix:
1764 1783
1765 if (!IS_QLA24XX(ha)) 1784 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha))
1766 goto skip_msi; 1785 goto skip_msi;
1767 1786
1768 ret = pci_enable_msi(ha->pdev); 1787 ret = pci_enable_msi(ha->pdev);
@@ -1772,7 +1791,7 @@ skip_msix:
1772 } 1791 }
1773skip_msi: 1792skip_msi:
1774 1793
1775 ret = request_irq(ha->pdev->irq, ha->isp_ops.intr_handler, 1794 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
1776 IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha); 1795 IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha);
1777 if (!ret) { 1796 if (!ret) {
1778 ha->flags.inta_enabled = 1; 1797 ha->flags.inta_enabled = 1;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 2cd0cff25928..d3746ec80a85 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -90,7 +90,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
90 spin_lock_irqsave(&ha->hardware_lock, flags); 90 spin_lock_irqsave(&ha->hardware_lock, flags);
91 91
92 /* Load mailbox registers. */ 92 /* Load mailbox registers. */
93 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) 93 if (IS_FWI2_CAPABLE(ha))
94 optr = (uint16_t __iomem *)&reg->isp24.mailbox0; 94 optr = (uint16_t __iomem *)&reg->isp24.mailbox0;
95 else 95 else
96 optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0); 96 optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0);
@@ -154,7 +154,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
154 154
155 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 155 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
156 156
157 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) 157 if (IS_FWI2_CAPABLE(ha))
158 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT); 158 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
159 else 159 else
160 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT); 160 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
@@ -175,7 +175,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
175 DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__, 175 DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__,
176 ha->host_no, command)); 176 ha->host_no, command));
177 177
178 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) 178 if (IS_FWI2_CAPABLE(ha))
179 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT); 179 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
180 else 180 else
181 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT); 181 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
@@ -228,7 +228,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
228 uint16_t mb0; 228 uint16_t mb0;
229 uint32_t ictrl; 229 uint32_t ictrl;
230 230
231 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 231 if (IS_FWI2_CAPABLE(ha)) {
232 mb0 = RD_REG_WORD(&reg->isp24.mailbox0); 232 mb0 = RD_REG_WORD(&reg->isp24.mailbox0);
233 ictrl = RD_REG_DWORD(&reg->isp24.ictrl); 233 ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
234 } else { 234 } else {
@@ -322,7 +322,7 @@ qla2x00_load_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t risc_addr,
322 322
323 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 323 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
324 324
325 if (MSW(risc_addr) || IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 325 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
326 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; 326 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
327 mcp->mb[8] = MSW(risc_addr); 327 mcp->mb[8] = MSW(risc_addr);
328 mcp->out_mb = MBX_8|MBX_0; 328 mcp->out_mb = MBX_8|MBX_0;
@@ -336,7 +336,7 @@ qla2x00_load_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t risc_addr,
336 mcp->mb[6] = MSW(MSD(req_dma)); 336 mcp->mb[6] = MSW(MSD(req_dma));
337 mcp->mb[7] = LSW(MSD(req_dma)); 337 mcp->mb[7] = LSW(MSD(req_dma));
338 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; 338 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
339 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 339 if (IS_FWI2_CAPABLE(ha)) {
340 mcp->mb[4] = MSW(risc_code_size); 340 mcp->mb[4] = MSW(risc_code_size);
341 mcp->mb[5] = LSW(risc_code_size); 341 mcp->mb[5] = LSW(risc_code_size);
342 mcp->out_mb |= MBX_5|MBX_4; 342 mcp->out_mb |= MBX_5|MBX_4;
@@ -387,7 +387,7 @@ qla2x00_execute_fw(scsi_qla_host_t *ha, uint32_t risc_addr)
387 mcp->mb[0] = MBC_EXECUTE_FIRMWARE; 387 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
388 mcp->out_mb = MBX_0; 388 mcp->out_mb = MBX_0;
389 mcp->in_mb = MBX_0; 389 mcp->in_mb = MBX_0;
390 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 390 if (IS_FWI2_CAPABLE(ha)) {
391 mcp->mb[1] = MSW(risc_addr); 391 mcp->mb[1] = MSW(risc_addr);
392 mcp->mb[2] = LSW(risc_addr); 392 mcp->mb[2] = LSW(risc_addr);
393 mcp->mb[3] = 0; 393 mcp->mb[3] = 0;
@@ -410,7 +410,7 @@ qla2x00_execute_fw(scsi_qla_host_t *ha, uint32_t risc_addr)
410 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, 410 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
411 ha->host_no, rval, mcp->mb[0])); 411 ha->host_no, rval, mcp->mb[0]));
412 } else { 412 } else {
413 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 413 if (IS_FWI2_CAPABLE(ha)) {
414 DEBUG11(printk("%s(%ld): done exchanges=%x.\n", 414 DEBUG11(printk("%s(%ld): done exchanges=%x.\n",
415 __func__, ha->host_no, mcp->mb[1])); 415 __func__, ha->host_no, mcp->mb[1]));
416 } else { 416 } else {
@@ -551,7 +551,7 @@ qla2x00_set_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts)
551 mcp->mb[3] = fwopts[3]; 551 mcp->mb[3] = fwopts[3];
552 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 552 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
553 mcp->in_mb = MBX_0; 553 mcp->in_mb = MBX_0;
554 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 554 if (IS_FWI2_CAPABLE(ha)) {
555 mcp->in_mb |= MBX_1; 555 mcp->in_mb |= MBX_1;
556 } else { 556 } else {
557 mcp->mb[10] = fwopts[10]; 557 mcp->mb[10] = fwopts[10];
@@ -664,7 +664,7 @@ qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr)
664 mcp->mb[0] = MBC_VERIFY_CHECKSUM; 664 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
665 mcp->out_mb = MBX_0; 665 mcp->out_mb = MBX_0;
666 mcp->in_mb = MBX_0; 666 mcp->in_mb = MBX_0;
667 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 667 if (IS_FWI2_CAPABLE(ha)) {
668 mcp->mb[1] = MSW(risc_addr); 668 mcp->mb[1] = MSW(risc_addr);
669 mcp->mb[2] = LSW(risc_addr); 669 mcp->mb[2] = LSW(risc_addr);
670 mcp->out_mb |= MBX_2|MBX_1; 670 mcp->out_mb |= MBX_2|MBX_1;
@@ -681,8 +681,8 @@ qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr)
681 681
682 if (rval != QLA_SUCCESS) { 682 if (rval != QLA_SUCCESS) {
683 DEBUG2_3_11(printk("%s(%ld): failed=%x chk sum=%x.\n", __func__, 683 DEBUG2_3_11(printk("%s(%ld): failed=%x chk sum=%x.\n", __func__,
684 ha->host_no, rval, (IS_QLA24XX(ha) || IS_QLA54XX(ha) ? 684 ha->host_no, rval, IS_FWI2_CAPABLE(ha) ?
685 (mcp->mb[2] << 16) | mcp->mb[1]: mcp->mb[1]))); 685 (mcp->mb[2] << 16) | mcp->mb[1]: mcp->mb[1]));
686 } else { 686 } else {
687 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 687 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
688 } 688 }
@@ -739,7 +739,7 @@ qla2x00_issue_iocb(scsi_qla_host_t *ha, void* buffer, dma_addr_t phys_addr,
739 739
740 /* Mask reserved bits. */ 740 /* Mask reserved bits. */
741 sts_entry->entry_status &= 741 sts_entry->entry_status &=
742 IS_QLA24XX(ha) || IS_QLA54XX(ha) ? RF_MASK_24XX :RF_MASK; 742 IS_FWI2_CAPABLE(ha) ? RF_MASK_24XX :RF_MASK;
743 } 743 }
744 744
745 return rval; 745 return rval;
@@ -1085,7 +1085,7 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
1085 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE)); 1085 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
1086 1086
1087 mcp->mb[0] = MBC_GET_PORT_DATABASE; 1087 mcp->mb[0] = MBC_GET_PORT_DATABASE;
1088 if (opt != 0 && !IS_QLA24XX(ha) && !IS_QLA54XX(ha)) 1088 if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1089 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE; 1089 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
1090 mcp->mb[2] = MSW(pd_dma); 1090 mcp->mb[2] = MSW(pd_dma);
1091 mcp->mb[3] = LSW(pd_dma); 1091 mcp->mb[3] = LSW(pd_dma);
@@ -1094,7 +1094,7 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
1094 mcp->mb[9] = ha->vp_idx; 1094 mcp->mb[9] = ha->vp_idx;
1095 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 1095 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1096 mcp->in_mb = MBX_0; 1096 mcp->in_mb = MBX_0;
1097 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 1097 if (IS_FWI2_CAPABLE(ha)) {
1098 mcp->mb[1] = fcport->loop_id; 1098 mcp->mb[1] = fcport->loop_id;
1099 mcp->mb[10] = opt; 1099 mcp->mb[10] = opt;
1100 mcp->out_mb |= MBX_10|MBX_1; 1100 mcp->out_mb |= MBX_10|MBX_1;
@@ -1107,15 +1107,15 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
1107 mcp->mb[1] = fcport->loop_id << 8 | opt; 1107 mcp->mb[1] = fcport->loop_id << 8 | opt;
1108 mcp->out_mb |= MBX_1; 1108 mcp->out_mb |= MBX_1;
1109 } 1109 }
1110 mcp->buf_size = (IS_QLA24XX(ha) || IS_QLA54XX(ha) ? 1110 mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
1111 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE); 1111 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
1112 mcp->flags = MBX_DMA_IN; 1112 mcp->flags = MBX_DMA_IN;
1113 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 1113 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1114 rval = qla2x00_mailbox_command(ha, mcp); 1114 rval = qla2x00_mailbox_command(ha, mcp);
1115 if (rval != QLA_SUCCESS) 1115 if (rval != QLA_SUCCESS)
1116 goto gpd_error_out; 1116 goto gpd_error_out;
1117 1117
1118 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 1118 if (IS_FWI2_CAPABLE(ha)) {
1119 pd24 = (struct port_database_24xx *) pd; 1119 pd24 = (struct port_database_24xx *) pd;
1120 1120
1121 /* Check for logged in state. */ 1121 /* Check for logged in state. */
@@ -1333,7 +1333,7 @@ qla2x00_lip_reset(scsi_qla_host_t *ha)
1333 1333
1334 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 1334 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
1335 1335
1336 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 1336 if (IS_FWI2_CAPABLE(ha)) {
1337 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 1337 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1338 mcp->mb[1] = BIT_6; 1338 mcp->mb[1] = BIT_6;
1339 mcp->mb[2] = 0; 1339 mcp->mb[2] = 0;
@@ -1637,7 +1637,7 @@ qla2x00_login_local_device(scsi_qla_host_t *ha, fc_port_t *fcport,
1637 mbx_cmd_t mc; 1637 mbx_cmd_t mc;
1638 mbx_cmd_t *mcp = &mc; 1638 mbx_cmd_t *mcp = &mc;
1639 1639
1640 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) 1640 if (IS_FWI2_CAPABLE(ha))
1641 return qla24xx_login_fabric(ha, fcport->loop_id, 1641 return qla24xx_login_fabric(ha, fcport->loop_id,
1642 fcport->d_id.b.domain, fcport->d_id.b.area, 1642 fcport->d_id.b.domain, fcport->d_id.b.area,
1643 fcport->d_id.b.al_pa, mb_ret, opt); 1643 fcport->d_id.b.al_pa, mb_ret, opt);
@@ -1821,7 +1821,7 @@ qla2x00_full_login_lip(scsi_qla_host_t *ha)
1821 ha->host_no)); 1821 ha->host_no));
1822 1822
1823 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 1823 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1824 mcp->mb[1] = IS_QLA24XX(ha) || IS_QLA54XX(ha) ? BIT_3: 0; 1824 mcp->mb[1] = IS_FWI2_CAPABLE(ha) ? BIT_3: 0;
1825 mcp->mb[2] = 0; 1825 mcp->mb[2] = 0;
1826 mcp->mb[3] = 0; 1826 mcp->mb[3] = 0;
1827 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1827 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -1871,7 +1871,7 @@ qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma,
1871 1871
1872 mcp->mb[0] = MBC_GET_ID_LIST; 1872 mcp->mb[0] = MBC_GET_ID_LIST;
1873 mcp->out_mb = MBX_0; 1873 mcp->out_mb = MBX_0;
1874 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 1874 if (IS_FWI2_CAPABLE(ha)) {
1875 mcp->mb[2] = MSW(id_list_dma); 1875 mcp->mb[2] = MSW(id_list_dma);
1876 mcp->mb[3] = LSW(id_list_dma); 1876 mcp->mb[3] = LSW(id_list_dma);
1877 mcp->mb[6] = MSW(MSD(id_list_dma)); 1877 mcp->mb[6] = MSW(MSD(id_list_dma));
@@ -2063,7 +2063,7 @@ qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id,
2063 mcp->mb[7] = LSW(MSD(stat_buf_dma)); 2063 mcp->mb[7] = LSW(MSD(stat_buf_dma));
2064 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 2064 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2065 mcp->in_mb = MBX_0; 2065 mcp->in_mb = MBX_0;
2066 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 2066 if (IS_FWI2_CAPABLE(ha)) {
2067 mcp->mb[1] = loop_id; 2067 mcp->mb[1] = loop_id;
2068 mcp->mb[4] = 0; 2068 mcp->mb[4] = 0;
2069 mcp->mb[10] = 0; 2069 mcp->mb[10] = 0;
@@ -2334,7 +2334,7 @@ qla2x00_system_error(scsi_qla_host_t *ha)
2334 mbx_cmd_t mc; 2334 mbx_cmd_t mc;
2335 mbx_cmd_t *mcp = &mc; 2335 mbx_cmd_t *mcp = &mc;
2336 2336
2337 if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha)) 2337 if (!IS_FWI2_CAPABLE(ha))
2338 return QLA_FUNCTION_FAILED; 2338 return QLA_FUNCTION_FAILED;
2339 2339
2340 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2340 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
@@ -2444,7 +2444,7 @@ qla2x00_stop_firmware(scsi_qla_host_t *ha)
2444 mbx_cmd_t mc; 2444 mbx_cmd_t mc;
2445 mbx_cmd_t *mcp = &mc; 2445 mbx_cmd_t *mcp = &mc;
2446 2446
2447 if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha)) 2447 if (!IS_FWI2_CAPABLE(ha))
2448 return QLA_FUNCTION_FAILED; 2448 return QLA_FUNCTION_FAILED;
2449 2449
2450 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2450 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
@@ -2474,7 +2474,7 @@ qla2x00_trace_control(scsi_qla_host_t *ha, uint16_t ctrl, dma_addr_t eft_dma,
2474 mbx_cmd_t mc; 2474 mbx_cmd_t mc;
2475 mbx_cmd_t *mcp = &mc; 2475 mbx_cmd_t *mcp = &mc;
2476 2476
2477 if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha)) 2477 if (!IS_FWI2_CAPABLE(ha))
2478 return QLA_FUNCTION_FAILED; 2478 return QLA_FUNCTION_FAILED;
2479 2479
2480 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2480 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
@@ -2514,7 +2514,7 @@ qla2x00_read_sfp(scsi_qla_host_t *ha, dma_addr_t sfp_dma, uint16_t addr,
2514 mbx_cmd_t mc; 2514 mbx_cmd_t mc;
2515 mbx_cmd_t *mcp = &mc; 2515 mbx_cmd_t *mcp = &mc;
2516 2516
2517 if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha)) 2517 if (!IS_FWI2_CAPABLE(ha))
2518 return QLA_FUNCTION_FAILED; 2518 return QLA_FUNCTION_FAILED;
2519 2519
2520 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2520 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
@@ -2552,7 +2552,7 @@ qla2x00_get_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id,
2552 mbx_cmd_t mc; 2552 mbx_cmd_t mc;
2553 mbx_cmd_t *mcp = &mc; 2553 mbx_cmd_t *mcp = &mc;
2554 2554
2555 if (!IS_QLA24XX(ha)) 2555 if (!IS_IIDMA_CAPABLE(ha))
2556 return QLA_FUNCTION_FAILED; 2556 return QLA_FUNCTION_FAILED;
2557 2557
2558 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2558 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
@@ -2595,7 +2595,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id,
2595 mbx_cmd_t mc; 2595 mbx_cmd_t mc;
2596 mbx_cmd_t *mcp = &mc; 2596 mbx_cmd_t *mcp = &mc;
2597 2597
2598 if (!IS_QLA24XX(ha)) 2598 if (!IS_IIDMA_CAPABLE(ha))
2599 return QLA_FUNCTION_FAILED; 2599 return QLA_FUNCTION_FAILED;
2600 2600
2601 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2601 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 92376f9dfdd5..c488996cb958 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -265,6 +265,8 @@ qla24xx_pci_info_str(struct scsi_qla_host *ha, char *str)
265 strcpy(str, "PCIe ("); 265 strcpy(str, "PCIe (");
266 if (lspeed == 1) 266 if (lspeed == 1)
267 strcat(str, "2.5Gb/s "); 267 strcat(str, "2.5Gb/s ");
268 else if (lspeed == 2)
269 strcat(str, "5.0Gb/s ");
268 else 270 else
269 strcat(str, "<unknown> "); 271 strcat(str, "<unknown> ");
270 snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth); 272 snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth);
@@ -343,6 +345,12 @@ qla24xx_fw_version_str(struct scsi_qla_host *ha, char *str)
343 strcat(str, "[IP] "); 345 strcat(str, "[IP] ");
344 if (ha->fw_attributes & BIT_2) 346 if (ha->fw_attributes & BIT_2)
345 strcat(str, "[Multi-ID] "); 347 strcat(str, "[Multi-ID] ");
348 if (ha->fw_attributes & BIT_3)
349 strcat(str, "[SB-2] ");
350 if (ha->fw_attributes & BIT_4)
351 strcat(str, "[T10 CRC] ");
352 if (ha->fw_attributes & BIT_5)
353 strcat(str, "[VI] ");
346 if (ha->fw_attributes & BIT_13) 354 if (ha->fw_attributes & BIT_13)
347 strcat(str, "[Experimental]"); 355 strcat(str, "[Experimental]");
348 return str; 356 return str;
@@ -681,7 +689,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
681 DEBUG3(qla2x00_print_scsi_cmd(cmd)); 689 DEBUG3(qla2x00_print_scsi_cmd(cmd));
682 690
683 spin_unlock_irqrestore(&pha->hardware_lock, flags); 691 spin_unlock_irqrestore(&pha->hardware_lock, flags);
684 if (ha->isp_ops.abort_command(ha, sp)) { 692 if (ha->isp_ops->abort_command(ha, sp)) {
685 DEBUG2(printk("%s(%ld): abort_command " 693 DEBUG2(printk("%s(%ld): abort_command "
686 "mbx failed.\n", __func__, ha->host_no)); 694 "mbx failed.\n", __func__, ha->host_no));
687 } else { 695 } else {
@@ -813,7 +821,7 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
813#if defined(LOGOUT_AFTER_DEVICE_RESET) 821#if defined(LOGOUT_AFTER_DEVICE_RESET)
814 if (ret == SUCCESS) { 822 if (ret == SUCCESS) {
815 if (fcport->flags & FC_FABRIC_DEVICE) { 823 if (fcport->flags & FC_FABRIC_DEVICE) {
816 ha->isp_ops.fabric_logout(ha, fcport->loop_id); 824 ha->isp_ops->fabric_logout(ha, fcport->loop_id);
817 qla2x00_mark_device_lost(ha, fcport, 0, 0); 825 qla2x00_mark_device_lost(ha, fcport, 0, 0);
818 } 826 }
819 } 827 }
@@ -1105,7 +1113,7 @@ static int
1105qla2x00_device_reset(scsi_qla_host_t *ha, fc_port_t *reset_fcport) 1113qla2x00_device_reset(scsi_qla_host_t *ha, fc_port_t *reset_fcport)
1106{ 1114{
1107 /* Abort Target command will clear Reservation */ 1115 /* Abort Target command will clear Reservation */
1108 return ha->isp_ops.abort_target(reset_fcport); 1116 return ha->isp_ops->abort_target(reset_fcport);
1109} 1117}
1110 1118
1111static int 1119static int
@@ -1184,8 +1192,8 @@ qla2x00_config_dma_addressing(scsi_qla_host_t *ha)
1184 !pci_set_consistent_dma_mask(ha->pdev, DMA_64BIT_MASK)) { 1192 !pci_set_consistent_dma_mask(ha->pdev, DMA_64BIT_MASK)) {
1185 /* Ok, a 64bit DMA mask is applicable. */ 1193 /* Ok, a 64bit DMA mask is applicable. */
1186 ha->flags.enable_64bit_addressing = 1; 1194 ha->flags.enable_64bit_addressing = 1;
1187 ha->isp_ops.calc_req_entries = qla2x00_calc_iocbs_64; 1195 ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
1188 ha->isp_ops.build_iocbs = qla2x00_build_scsi_iocbs_64; 1196 ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
1189 return; 1197 return;
1190 } 1198 }
1191 } 1199 }
@@ -1194,6 +1202,193 @@ qla2x00_config_dma_addressing(scsi_qla_host_t *ha)
1194 pci_set_consistent_dma_mask(ha->pdev, DMA_32BIT_MASK); 1202 pci_set_consistent_dma_mask(ha->pdev, DMA_32BIT_MASK);
1195} 1203}
1196 1204
1205static void
1206qla2x00_enable_intrs(scsi_qla_host_t *ha)
1207{
1208 unsigned long flags = 0;
1209 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1210
1211 spin_lock_irqsave(&ha->hardware_lock, flags);
1212 ha->interrupts_on = 1;
1213 /* enable risc and host interrupts */
1214 WRT_REG_WORD(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
1215 RD_REG_WORD(&reg->ictrl);
1216 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1217
1218}
1219
1220static void
1221qla2x00_disable_intrs(scsi_qla_host_t *ha)
1222{
1223 unsigned long flags = 0;
1224 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1225
1226 spin_lock_irqsave(&ha->hardware_lock, flags);
1227 ha->interrupts_on = 0;
1228 /* disable risc and host interrupts */
1229 WRT_REG_WORD(&reg->ictrl, 0);
1230 RD_REG_WORD(&reg->ictrl);
1231 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1232}
1233
1234static void
1235qla24xx_enable_intrs(scsi_qla_host_t *ha)
1236{
1237 unsigned long flags = 0;
1238 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1239
1240 spin_lock_irqsave(&ha->hardware_lock, flags);
1241 ha->interrupts_on = 1;
1242 WRT_REG_DWORD(&reg->ictrl, ICRX_EN_RISC_INT);
1243 RD_REG_DWORD(&reg->ictrl);
1244 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1245}
1246
1247static void
1248qla24xx_disable_intrs(scsi_qla_host_t *ha)
1249{
1250 unsigned long flags = 0;
1251 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1252
1253 spin_lock_irqsave(&ha->hardware_lock, flags);
1254 ha->interrupts_on = 0;
1255 WRT_REG_DWORD(&reg->ictrl, 0);
1256 RD_REG_DWORD(&reg->ictrl);
1257 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1258}
1259
1260static struct isp_operations qla2100_isp_ops = {
1261 .pci_config = qla2100_pci_config,
1262 .reset_chip = qla2x00_reset_chip,
1263 .chip_diag = qla2x00_chip_diag,
1264 .config_rings = qla2x00_config_rings,
1265 .reset_adapter = qla2x00_reset_adapter,
1266 .nvram_config = qla2x00_nvram_config,
1267 .update_fw_options = qla2x00_update_fw_options,
1268 .load_risc = qla2x00_load_risc,
1269 .pci_info_str = qla2x00_pci_info_str,
1270 .fw_version_str = qla2x00_fw_version_str,
1271 .intr_handler = qla2100_intr_handler,
1272 .enable_intrs = qla2x00_enable_intrs,
1273 .disable_intrs = qla2x00_disable_intrs,
1274 .abort_command = qla2x00_abort_command,
1275 .abort_target = qla2x00_abort_target,
1276 .fabric_login = qla2x00_login_fabric,
1277 .fabric_logout = qla2x00_fabric_logout,
1278 .calc_req_entries = qla2x00_calc_iocbs_32,
1279 .build_iocbs = qla2x00_build_scsi_iocbs_32,
1280 .prep_ms_iocb = qla2x00_prep_ms_iocb,
1281 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb,
1282 .read_nvram = qla2x00_read_nvram_data,
1283 .write_nvram = qla2x00_write_nvram_data,
1284 .fw_dump = qla2100_fw_dump,
1285 .beacon_on = NULL,
1286 .beacon_off = NULL,
1287 .beacon_blink = NULL,
1288 .read_optrom = qla2x00_read_optrom_data,
1289 .write_optrom = qla2x00_write_optrom_data,
1290 .get_flash_version = qla2x00_get_flash_version,
1291};
1292
1293static struct isp_operations qla2300_isp_ops = {
1294 .pci_config = qla2300_pci_config,
1295 .reset_chip = qla2x00_reset_chip,
1296 .chip_diag = qla2x00_chip_diag,
1297 .config_rings = qla2x00_config_rings,
1298 .reset_adapter = qla2x00_reset_adapter,
1299 .nvram_config = qla2x00_nvram_config,
1300 .update_fw_options = qla2x00_update_fw_options,
1301 .load_risc = qla2x00_load_risc,
1302 .pci_info_str = qla2x00_pci_info_str,
1303 .fw_version_str = qla2x00_fw_version_str,
1304 .intr_handler = qla2300_intr_handler,
1305 .enable_intrs = qla2x00_enable_intrs,
1306 .disable_intrs = qla2x00_disable_intrs,
1307 .abort_command = qla2x00_abort_command,
1308 .abort_target = qla2x00_abort_target,
1309 .fabric_login = qla2x00_login_fabric,
1310 .fabric_logout = qla2x00_fabric_logout,
1311 .calc_req_entries = qla2x00_calc_iocbs_32,
1312 .build_iocbs = qla2x00_build_scsi_iocbs_32,
1313 .prep_ms_iocb = qla2x00_prep_ms_iocb,
1314 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb,
1315 .read_nvram = qla2x00_read_nvram_data,
1316 .write_nvram = qla2x00_write_nvram_data,
1317 .fw_dump = qla2300_fw_dump,
1318 .beacon_on = qla2x00_beacon_on,
1319 .beacon_off = qla2x00_beacon_off,
1320 .beacon_blink = qla2x00_beacon_blink,
1321 .read_optrom = qla2x00_read_optrom_data,
1322 .write_optrom = qla2x00_write_optrom_data,
1323 .get_flash_version = qla2x00_get_flash_version,
1324};
1325
1326static struct isp_operations qla24xx_isp_ops = {
1327 .pci_config = qla24xx_pci_config,
1328 .reset_chip = qla24xx_reset_chip,
1329 .chip_diag = qla24xx_chip_diag,
1330 .config_rings = qla24xx_config_rings,
1331 .reset_adapter = qla24xx_reset_adapter,
1332 .nvram_config = qla24xx_nvram_config,
1333 .update_fw_options = qla24xx_update_fw_options,
1334 .load_risc = qla24xx_load_risc,
1335 .pci_info_str = qla24xx_pci_info_str,
1336 .fw_version_str = qla24xx_fw_version_str,
1337 .intr_handler = qla24xx_intr_handler,
1338 .enable_intrs = qla24xx_enable_intrs,
1339 .disable_intrs = qla24xx_disable_intrs,
1340 .abort_command = qla24xx_abort_command,
1341 .abort_target = qla24xx_abort_target,
1342 .fabric_login = qla24xx_login_fabric,
1343 .fabric_logout = qla24xx_fabric_logout,
1344 .calc_req_entries = NULL,
1345 .build_iocbs = NULL,
1346 .prep_ms_iocb = qla24xx_prep_ms_iocb,
1347 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
1348 .read_nvram = qla24xx_read_nvram_data,
1349 .write_nvram = qla24xx_write_nvram_data,
1350 .fw_dump = qla24xx_fw_dump,
1351 .beacon_on = qla24xx_beacon_on,
1352 .beacon_off = qla24xx_beacon_off,
1353 .beacon_blink = qla24xx_beacon_blink,
1354 .read_optrom = qla24xx_read_optrom_data,
1355 .write_optrom = qla24xx_write_optrom_data,
1356 .get_flash_version = qla24xx_get_flash_version,
1357};
1358
1359static struct isp_operations qla25xx_isp_ops = {
1360 .pci_config = qla25xx_pci_config,
1361 .reset_chip = qla24xx_reset_chip,
1362 .chip_diag = qla24xx_chip_diag,
1363 .config_rings = qla24xx_config_rings,
1364 .reset_adapter = qla24xx_reset_adapter,
1365 .nvram_config = qla24xx_nvram_config,
1366 .update_fw_options = qla24xx_update_fw_options,
1367 .load_risc = qla24xx_load_risc,
1368 .pci_info_str = qla24xx_pci_info_str,
1369 .fw_version_str = qla24xx_fw_version_str,
1370 .intr_handler = qla24xx_intr_handler,
1371 .enable_intrs = qla24xx_enable_intrs,
1372 .disable_intrs = qla24xx_disable_intrs,
1373 .abort_command = qla24xx_abort_command,
1374 .abort_target = qla24xx_abort_target,
1375 .fabric_login = qla24xx_login_fabric,
1376 .fabric_logout = qla24xx_fabric_logout,
1377 .calc_req_entries = NULL,
1378 .build_iocbs = NULL,
1379 .prep_ms_iocb = qla24xx_prep_ms_iocb,
1380 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
1381 .read_nvram = qla25xx_read_nvram_data,
1382 .write_nvram = qla25xx_write_nvram_data,
1383 .fw_dump = qla25xx_fw_dump,
1384 .beacon_on = qla24xx_beacon_on,
1385 .beacon_off = qla24xx_beacon_off,
1386 .beacon_blink = qla24xx_beacon_blink,
1387 .read_optrom = qla24xx_read_optrom_data,
1388 .write_optrom = qla24xx_write_optrom_data,
1389 .get_flash_version = qla24xx_get_flash_version,
1390};
1391
1197static inline void 1392static inline void
1198qla2x00_set_isp_flags(scsi_qla_host_t *ha) 1393qla2x00_set_isp_flags(scsi_qla_host_t *ha)
1199{ 1394{
@@ -1238,19 +1433,32 @@ qla2x00_set_isp_flags(scsi_qla_host_t *ha)
1238 case PCI_DEVICE_ID_QLOGIC_ISP2422: 1433 case PCI_DEVICE_ID_QLOGIC_ISP2422:
1239 ha->device_type |= DT_ISP2422; 1434 ha->device_type |= DT_ISP2422;
1240 ha->device_type |= DT_ZIO_SUPPORTED; 1435 ha->device_type |= DT_ZIO_SUPPORTED;
1436 ha->device_type |= DT_FWI2;
1437 ha->device_type |= DT_IIDMA;
1241 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1438 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1242 break; 1439 break;
1243 case PCI_DEVICE_ID_QLOGIC_ISP2432: 1440 case PCI_DEVICE_ID_QLOGIC_ISP2432:
1244 ha->device_type |= DT_ISP2432; 1441 ha->device_type |= DT_ISP2432;
1245 ha->device_type |= DT_ZIO_SUPPORTED; 1442 ha->device_type |= DT_ZIO_SUPPORTED;
1443 ha->device_type |= DT_FWI2;
1444 ha->device_type |= DT_IIDMA;
1246 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1445 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1247 break; 1446 break;
1248 case PCI_DEVICE_ID_QLOGIC_ISP5422: 1447 case PCI_DEVICE_ID_QLOGIC_ISP5422:
1249 ha->device_type |= DT_ISP5422; 1448 ha->device_type |= DT_ISP5422;
1449 ha->device_type |= DT_FWI2;
1250 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1450 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1251 break; 1451 break;
1252 case PCI_DEVICE_ID_QLOGIC_ISP5432: 1452 case PCI_DEVICE_ID_QLOGIC_ISP5432:
1253 ha->device_type |= DT_ISP5432; 1453 ha->device_type |= DT_ISP5432;
1454 ha->device_type |= DT_FWI2;
1455 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1456 break;
1457 case PCI_DEVICE_ID_QLOGIC_ISP2532:
1458 ha->device_type |= DT_ISP2532;
1459 ha->device_type |= DT_ZIO_SUPPORTED;
1460 ha->device_type |= DT_FWI2;
1461 ha->device_type |= DT_IIDMA;
1254 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1462 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1255 break; 1463 break;
1256 } 1464 }
@@ -1323,61 +1531,6 @@ iospace_error_exit:
1323} 1531}
1324 1532
1325static void 1533static void
1326qla2x00_enable_intrs(scsi_qla_host_t *ha)
1327{
1328 unsigned long flags = 0;
1329 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1330
1331 spin_lock_irqsave(&ha->hardware_lock, flags);
1332 ha->interrupts_on = 1;
1333 /* enable risc and host interrupts */
1334 WRT_REG_WORD(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
1335 RD_REG_WORD(&reg->ictrl);
1336 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1337
1338}
1339
1340static void
1341qla2x00_disable_intrs(scsi_qla_host_t *ha)
1342{
1343 unsigned long flags = 0;
1344 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1345
1346 spin_lock_irqsave(&ha->hardware_lock, flags);
1347 ha->interrupts_on = 0;
1348 /* disable risc and host interrupts */
1349 WRT_REG_WORD(&reg->ictrl, 0);
1350 RD_REG_WORD(&reg->ictrl);
1351 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1352}
1353
1354static void
1355qla24xx_enable_intrs(scsi_qla_host_t *ha)
1356{
1357 unsigned long flags = 0;
1358 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1359
1360 spin_lock_irqsave(&ha->hardware_lock, flags);
1361 ha->interrupts_on = 1;
1362 WRT_REG_DWORD(&reg->ictrl, ICRX_EN_RISC_INT);
1363 RD_REG_DWORD(&reg->ictrl);
1364 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1365}
1366
1367static void
1368qla24xx_disable_intrs(scsi_qla_host_t *ha)
1369{
1370 unsigned long flags = 0;
1371 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1372
1373 spin_lock_irqsave(&ha->hardware_lock, flags);
1374 ha->interrupts_on = 0;
1375 WRT_REG_DWORD(&reg->ictrl, 0);
1376 RD_REG_DWORD(&reg->ictrl);
1377 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1378}
1379
1380static void
1381qla2xxx_scan_start(struct Scsi_Host *shost) 1534qla2xxx_scan_start(struct Scsi_Host *shost)
1382{ 1535{
1383 scsi_qla_host_t *ha = (scsi_qla_host_t *)shost->hostdata; 1536 scsi_qla_host_t *ha = (scsi_qla_host_t *)shost->hostdata;
@@ -1422,7 +1575,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1422 if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 || 1575 if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
1423 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 || 1576 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 ||
1424 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 || 1577 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
1425 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432) 1578 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
1579 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532)
1426 sht = &qla24xx_driver_template; 1580 sht = &qla24xx_driver_template;
1427 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); 1581 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
1428 if (host == NULL) { 1582 if (host == NULL) {
@@ -1466,33 +1620,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1466 ha->max_q_depth = ql2xmaxqdepth; 1620 ha->max_q_depth = ql2xmaxqdepth;
1467 1621
1468 /* Assign ISP specific operations. */ 1622 /* Assign ISP specific operations. */
1469 ha->isp_ops.pci_config = qla2100_pci_config;
1470 ha->isp_ops.reset_chip = qla2x00_reset_chip;
1471 ha->isp_ops.chip_diag = qla2x00_chip_diag;
1472 ha->isp_ops.config_rings = qla2x00_config_rings;
1473 ha->isp_ops.reset_adapter = qla2x00_reset_adapter;
1474 ha->isp_ops.nvram_config = qla2x00_nvram_config;
1475 ha->isp_ops.update_fw_options = qla2x00_update_fw_options;
1476 ha->isp_ops.load_risc = qla2x00_load_risc;
1477 ha->isp_ops.pci_info_str = qla2x00_pci_info_str;
1478 ha->isp_ops.fw_version_str = qla2x00_fw_version_str;
1479 ha->isp_ops.intr_handler = qla2100_intr_handler;
1480 ha->isp_ops.enable_intrs = qla2x00_enable_intrs;
1481 ha->isp_ops.disable_intrs = qla2x00_disable_intrs;
1482 ha->isp_ops.abort_command = qla2x00_abort_command;
1483 ha->isp_ops.abort_target = qla2x00_abort_target;
1484 ha->isp_ops.fabric_login = qla2x00_login_fabric;
1485 ha->isp_ops.fabric_logout = qla2x00_fabric_logout;
1486 ha->isp_ops.calc_req_entries = qla2x00_calc_iocbs_32;
1487 ha->isp_ops.build_iocbs = qla2x00_build_scsi_iocbs_32;
1488 ha->isp_ops.prep_ms_iocb = qla2x00_prep_ms_iocb;
1489 ha->isp_ops.prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb;
1490 ha->isp_ops.read_nvram = qla2x00_read_nvram_data;
1491 ha->isp_ops.write_nvram = qla2x00_write_nvram_data;
1492 ha->isp_ops.fw_dump = qla2100_fw_dump;
1493 ha->isp_ops.read_optrom = qla2x00_read_optrom_data;
1494 ha->isp_ops.write_optrom = qla2x00_write_optrom_data;
1495 ha->isp_ops.get_flash_version = qla2x00_get_flash_version;
1496 if (IS_QLA2100(ha)) { 1623 if (IS_QLA2100(ha)) {
1497 host->max_id = MAX_TARGETS_2100; 1624 host->max_id = MAX_TARGETS_2100;
1498 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; 1625 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
@@ -1501,6 +1628,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1501 ha->last_loop_id = SNS_LAST_LOOP_ID_2100; 1628 ha->last_loop_id = SNS_LAST_LOOP_ID_2100;
1502 host->sg_tablesize = 32; 1629 host->sg_tablesize = 32;
1503 ha->gid_list_info_size = 4; 1630 ha->gid_list_info_size = 4;
1631 ha->isp_ops = &qla2100_isp_ops;
1504 } else if (IS_QLA2200(ha)) { 1632 } else if (IS_QLA2200(ha)) {
1505 host->max_id = MAX_TARGETS_2200; 1633 host->max_id = MAX_TARGETS_2200;
1506 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1634 ha->mbx_count = MAILBOX_REGISTER_COUNT;
@@ -1508,21 +1636,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1508 ha->response_q_length = RESPONSE_ENTRY_CNT_2100; 1636 ha->response_q_length = RESPONSE_ENTRY_CNT_2100;
1509 ha->last_loop_id = SNS_LAST_LOOP_ID_2100; 1637 ha->last_loop_id = SNS_LAST_LOOP_ID_2100;
1510 ha->gid_list_info_size = 4; 1638 ha->gid_list_info_size = 4;
1639 ha->isp_ops = &qla2100_isp_ops;
1511 } else if (IS_QLA23XX(ha)) { 1640 } else if (IS_QLA23XX(ha)) {
1512 host->max_id = MAX_TARGETS_2200; 1641 host->max_id = MAX_TARGETS_2200;
1513 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1642 ha->mbx_count = MAILBOX_REGISTER_COUNT;
1514 ha->request_q_length = REQUEST_ENTRY_CNT_2200; 1643 ha->request_q_length = REQUEST_ENTRY_CNT_2200;
1515 ha->response_q_length = RESPONSE_ENTRY_CNT_2300; 1644 ha->response_q_length = RESPONSE_ENTRY_CNT_2300;
1516 ha->last_loop_id = SNS_LAST_LOOP_ID_2300; 1645 ha->last_loop_id = SNS_LAST_LOOP_ID_2300;
1517 ha->isp_ops.pci_config = qla2300_pci_config;
1518 ha->isp_ops.intr_handler = qla2300_intr_handler;
1519 ha->isp_ops.fw_dump = qla2300_fw_dump;
1520 ha->isp_ops.beacon_on = qla2x00_beacon_on;
1521 ha->isp_ops.beacon_off = qla2x00_beacon_off;
1522 ha->isp_ops.beacon_blink = qla2x00_beacon_blink;
1523 ha->gid_list_info_size = 6; 1646 ha->gid_list_info_size = 6;
1524 if (IS_QLA2322(ha) || IS_QLA6322(ha)) 1647 if (IS_QLA2322(ha) || IS_QLA6322(ha))
1525 ha->optrom_size = OPTROM_SIZE_2322; 1648 ha->optrom_size = OPTROM_SIZE_2322;
1649 ha->isp_ops = &qla2300_isp_ops;
1526 } else if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 1650 } else if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
1527 host->max_id = MAX_TARGETS_2200; 1651 host->max_id = MAX_TARGETS_2200;
1528 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1652 ha->mbx_count = MAILBOX_REGISTER_COUNT;
@@ -1531,36 +1655,20 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1531 ha->last_loop_id = SNS_LAST_LOOP_ID_2300; 1655 ha->last_loop_id = SNS_LAST_LOOP_ID_2300;
1532 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 1656 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
1533 ha->mgmt_svr_loop_id = 10 + ha->vp_idx; 1657 ha->mgmt_svr_loop_id = 10 + ha->vp_idx;
1534 ha->isp_ops.pci_config = qla24xx_pci_config;
1535 ha->isp_ops.reset_chip = qla24xx_reset_chip;
1536 ha->isp_ops.chip_diag = qla24xx_chip_diag;
1537 ha->isp_ops.config_rings = qla24xx_config_rings;
1538 ha->isp_ops.reset_adapter = qla24xx_reset_adapter;
1539 ha->isp_ops.nvram_config = qla24xx_nvram_config;
1540 ha->isp_ops.update_fw_options = qla24xx_update_fw_options;
1541 ha->isp_ops.load_risc = qla24xx_load_risc;
1542 ha->isp_ops.pci_info_str = qla24xx_pci_info_str;
1543 ha->isp_ops.fw_version_str = qla24xx_fw_version_str;
1544 ha->isp_ops.intr_handler = qla24xx_intr_handler;
1545 ha->isp_ops.enable_intrs = qla24xx_enable_intrs;
1546 ha->isp_ops.disable_intrs = qla24xx_disable_intrs;
1547 ha->isp_ops.abort_command = qla24xx_abort_command;
1548 ha->isp_ops.abort_target = qla24xx_abort_target;
1549 ha->isp_ops.fabric_login = qla24xx_login_fabric;
1550 ha->isp_ops.fabric_logout = qla24xx_fabric_logout;
1551 ha->isp_ops.prep_ms_iocb = qla24xx_prep_ms_iocb;
1552 ha->isp_ops.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb;
1553 ha->isp_ops.read_nvram = qla24xx_read_nvram_data;
1554 ha->isp_ops.write_nvram = qla24xx_write_nvram_data;
1555 ha->isp_ops.fw_dump = qla24xx_fw_dump;
1556 ha->isp_ops.read_optrom = qla24xx_read_optrom_data;
1557 ha->isp_ops.write_optrom = qla24xx_write_optrom_data;
1558 ha->isp_ops.beacon_on = qla24xx_beacon_on;
1559 ha->isp_ops.beacon_off = qla24xx_beacon_off;
1560 ha->isp_ops.beacon_blink = qla24xx_beacon_blink;
1561 ha->isp_ops.get_flash_version = qla24xx_get_flash_version;
1562 ha->gid_list_info_size = 8; 1658 ha->gid_list_info_size = 8;
1563 ha->optrom_size = OPTROM_SIZE_24XX; 1659 ha->optrom_size = OPTROM_SIZE_24XX;
1660 ha->isp_ops = &qla24xx_isp_ops;
1661 } else if (IS_QLA25XX(ha)) {
1662 host->max_id = MAX_TARGETS_2200;
1663 ha->mbx_count = MAILBOX_REGISTER_COUNT;
1664 ha->request_q_length = REQUEST_ENTRY_CNT_24XX;
1665 ha->response_q_length = RESPONSE_ENTRY_CNT_2300;
1666 ha->last_loop_id = SNS_LAST_LOOP_ID_2300;
1667 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
1668 ha->mgmt_svr_loop_id = 10 + ha->vp_idx;
1669 ha->gid_list_info_size = 8;
1670 ha->optrom_size = OPTROM_SIZE_25XX;
1671 ha->isp_ops = &qla25xx_isp_ops;
1564 } 1672 }
1565 host->can_queue = ha->request_q_length + 128; 1673 host->can_queue = ha->request_q_length + 128;
1566 1674
@@ -1628,11 +1736,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1628 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n", 1736 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
1629 ha->host_no, ha)); 1737 ha->host_no, ha));
1630 1738
1631 ha->isp_ops.disable_intrs(ha); 1739 ha->isp_ops->disable_intrs(ha);
1632 1740
1633 spin_lock_irqsave(&ha->hardware_lock, flags); 1741 spin_lock_irqsave(&ha->hardware_lock, flags);
1634 reg = ha->iobase; 1742 reg = ha->iobase;
1635 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 1743 if (IS_FWI2_CAPABLE(ha)) {
1636 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT); 1744 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
1637 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT); 1745 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
1638 } else { 1746 } else {
@@ -1654,7 +1762,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1654 } 1762 }
1655 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1763 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1656 1764
1657 ha->isp_ops.enable_intrs(ha); 1765 ha->isp_ops->enable_intrs(ha);
1658 1766
1659 pci_set_drvdata(pdev, ha); 1767 pci_set_drvdata(pdev, ha);
1660 1768
@@ -1679,9 +1787,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1679 " ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n", 1787 " ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n",
1680 qla2x00_version_str, ha->model_number, 1788 qla2x00_version_str, ha->model_number,
1681 ha->model_desc ? ha->model_desc: "", pdev->device, 1789 ha->model_desc ? ha->model_desc: "", pdev->device,
1682 ha->isp_ops.pci_info_str(ha, pci_info), pci_name(pdev), 1790 ha->isp_ops->pci_info_str(ha, pci_info), pci_name(pdev),
1683 ha->flags.enable_64bit_addressing ? '+': '-', ha->host_no, 1791 ha->flags.enable_64bit_addressing ? '+': '-', ha->host_no,
1684 ha->isp_ops.fw_version_str(ha, fw_str)); 1792 ha->isp_ops->fw_version_str(ha, fw_str));
1685 1793
1686 return 0; 1794 return 0;
1687 1795
@@ -1747,7 +1855,7 @@ qla2x00_free_device(scsi_qla_host_t *ha)
1747 1855
1748 /* turn-off interrupts on the card */ 1856 /* turn-off interrupts on the card */
1749 if (ha->interrupts_on) 1857 if (ha->interrupts_on)
1750 ha->isp_ops.disable_intrs(ha); 1858 ha->isp_ops->disable_intrs(ha);
1751 1859
1752 qla2x00_mem_free(ha); 1860 qla2x00_mem_free(ha);
1753 1861
@@ -2025,7 +2133,7 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha)
2025 } 2133 }
2026 memset(ha->ct_sns, 0, sizeof(struct ct_sns_pkt)); 2134 memset(ha->ct_sns, 0, sizeof(struct ct_sns_pkt));
2027 2135
2028 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 2136 if (IS_FWI2_CAPABLE(ha)) {
2029 /* 2137 /*
2030 * Get consistent memory allocated for SFP 2138 * Get consistent memory allocated for SFP
2031 * block. 2139 * block.
@@ -2305,7 +2413,7 @@ qla2x00_do_dpc(void *data)
2305 if (fcport->flags & FCF_FABRIC_DEVICE) { 2413 if (fcport->flags & FCF_FABRIC_DEVICE) {
2306 if (fcport->flags & 2414 if (fcport->flags &
2307 FCF_TAPE_PRESENT) 2415 FCF_TAPE_PRESENT)
2308 ha->isp_ops.fabric_logout( 2416 ha->isp_ops->fabric_logout(
2309 ha, fcport->loop_id, 2417 ha, fcport->loop_id,
2310 fcport->d_id.b.domain, 2418 fcport->d_id.b.domain,
2311 fcport->d_id.b.area, 2419 fcport->d_id.b.area,
@@ -2385,10 +2493,10 @@ qla2x00_do_dpc(void *data)
2385 } 2493 }
2386 2494
2387 if (!ha->interrupts_on) 2495 if (!ha->interrupts_on)
2388 ha->isp_ops.enable_intrs(ha); 2496 ha->isp_ops->enable_intrs(ha);
2389 2497
2390 if (test_and_clear_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags)) 2498 if (test_and_clear_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags))
2391 ha->isp_ops.beacon_blink(ha); 2499 ha->isp_ops->beacon_blink(ha);
2392 2500
2393 qla2x00_do_dpc_all_vps(ha); 2501 qla2x00_do_dpc_all_vps(ha);
2394 2502
@@ -2617,18 +2725,20 @@ qla2x00_down_timeout(struct semaphore *sema, unsigned long timeout)
2617 2725
2618/* Firmware interface routines. */ 2726/* Firmware interface routines. */
2619 2727
2620#define FW_BLOBS 5 2728#define FW_BLOBS 6
2621#define FW_ISP21XX 0 2729#define FW_ISP21XX 0
2622#define FW_ISP22XX 1 2730#define FW_ISP22XX 1
2623#define FW_ISP2300 2 2731#define FW_ISP2300 2
2624#define FW_ISP2322 3 2732#define FW_ISP2322 3
2625#define FW_ISP24XX 4 2733#define FW_ISP24XX 4
2734#define FW_ISP25XX 5
2626 2735
2627#define FW_FILE_ISP21XX "ql2100_fw.bin" 2736#define FW_FILE_ISP21XX "ql2100_fw.bin"
2628#define FW_FILE_ISP22XX "ql2200_fw.bin" 2737#define FW_FILE_ISP22XX "ql2200_fw.bin"
2629#define FW_FILE_ISP2300 "ql2300_fw.bin" 2738#define FW_FILE_ISP2300 "ql2300_fw.bin"
2630#define FW_FILE_ISP2322 "ql2322_fw.bin" 2739#define FW_FILE_ISP2322 "ql2322_fw.bin"
2631#define FW_FILE_ISP24XX "ql2400_fw.bin" 2740#define FW_FILE_ISP24XX "ql2400_fw.bin"
2741#define FW_FILE_ISP25XX "ql2500_fw.bin"
2632 2742
2633static DECLARE_MUTEX(qla_fw_lock); 2743static DECLARE_MUTEX(qla_fw_lock);
2634 2744
@@ -2638,6 +2748,7 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
2638 { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, }, 2748 { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, },
2639 { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, }, 2749 { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, },
2640 { .name = FW_FILE_ISP24XX, }, 2750 { .name = FW_FILE_ISP24XX, },
2751 { .name = FW_FILE_ISP25XX, },
2641}; 2752};
2642 2753
2643struct fw_blob * 2754struct fw_blob *
@@ -2656,6 +2767,8 @@ qla2x00_request_firmware(scsi_qla_host_t *ha)
2656 blob = &qla_fw_blobs[FW_ISP2322]; 2767 blob = &qla_fw_blobs[FW_ISP2322];
2657 } else if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 2768 } else if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
2658 blob = &qla_fw_blobs[FW_ISP24XX]; 2769 blob = &qla_fw_blobs[FW_ISP24XX];
2770 } else if (IS_QLA25XX(ha)) {
2771 blob = &qla_fw_blobs[FW_ISP25XX];
2659 } 2772 }
2660 2773
2661 down(&qla_fw_lock); 2774 down(&qla_fw_lock);
@@ -2699,6 +2812,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
2699 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) }, 2812 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) },
2700 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) }, 2813 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) },
2701 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) }, 2814 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
2815 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
2702 { 0 }, 2816 { 0 },
2703}; 2817};
2704MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); 2818MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 206bda093da2..a925a3f179f9 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -766,6 +766,29 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
766 return ret; 766 return ret;
767} 767}
768 768
769uint8_t *
770qla25xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
771 uint32_t bytes)
772{
773 uint32_t i;
774 uint32_t *dwptr;
775
776 /* Dword reads to flash. */
777 dwptr = (uint32_t *)buf;
778 for (i = 0; i < bytes >> 2; i++, naddr++)
779 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
780 flash_data_to_access_addr(FA_VPD_NVRAM_ADDR | naddr)));
781
782 return buf;
783}
784
785int
786qla25xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
787 uint32_t bytes)
788{
789 return qla24xx_write_flash_data(ha, (uint32_t *)buf,
790 FA_VPD_NVRAM_ADDR | naddr, bytes >> 2);
791}
769 792
770static inline void 793static inline void
771qla2x00_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags) 794qla2x00_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags)
@@ -919,7 +942,7 @@ qla2x00_beacon_off(struct scsi_qla_host *ha)
919 else 942 else
920 ha->beacon_color_state = QLA_LED_GRN_ON; 943 ha->beacon_color_state = QLA_LED_GRN_ON;
921 944
922 ha->isp_ops.beacon_blink(ha); /* This turns green LED off */ 945 ha->isp_ops->beacon_blink(ha); /* This turns green LED off */
923 946
924 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; 947 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
925 ha->fw_options[1] &= ~FO1_DISABLE_GPIO6_7; 948 ha->fw_options[1] &= ~FO1_DISABLE_GPIO6_7;
@@ -1031,7 +1054,7 @@ qla24xx_beacon_off(struct scsi_qla_host *ha)
1031 ha->beacon_blink_led = 0; 1054 ha->beacon_blink_led = 0;
1032 ha->beacon_color_state = QLA_LED_ALL_ON; 1055 ha->beacon_color_state = QLA_LED_ALL_ON;
1033 1056
1034 ha->isp_ops.beacon_blink(ha); /* Will flip to all off. */ 1057 ha->isp_ops->beacon_blink(ha); /* Will flip to all off. */
1035 1058
1036 /* Give control back to firmware. */ 1059 /* Give control back to firmware. */
1037 spin_lock_irqsave(&ha->hardware_lock, flags); 1060 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1419,7 +1442,7 @@ qla2x00_suspend_hba(struct scsi_qla_host *ha)
1419 1442
1420 /* Suspend HBA. */ 1443 /* Suspend HBA. */
1421 scsi_block_requests(ha->host); 1444 scsi_block_requests(ha->host);
1422 ha->isp_ops.disable_intrs(ha); 1445 ha->isp_ops->disable_intrs(ha);
1423 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 1446 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
1424 1447
1425 /* Pause RISC. */ 1448 /* Pause RISC. */
@@ -1705,7 +1728,7 @@ qla24xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1705{ 1728{
1706 /* Suspend HBA. */ 1729 /* Suspend HBA. */
1707 scsi_block_requests(ha->host); 1730 scsi_block_requests(ha->host);
1708 ha->isp_ops.disable_intrs(ha); 1731 ha->isp_ops->disable_intrs(ha);
1709 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 1732 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
1710 1733
1711 /* Go with read. */ 1734 /* Go with read. */
@@ -1713,7 +1736,7 @@ qla24xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1713 1736
1714 /* Resume HBA. */ 1737 /* Resume HBA. */
1715 clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 1738 clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
1716 ha->isp_ops.enable_intrs(ha); 1739 ha->isp_ops->enable_intrs(ha);
1717 scsi_unblock_requests(ha->host); 1740 scsi_unblock_requests(ha->host);
1718 1741
1719 return buf; 1742 return buf;
@@ -1727,7 +1750,7 @@ qla24xx_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1727 1750
1728 /* Suspend HBA. */ 1751 /* Suspend HBA. */
1729 scsi_block_requests(ha->host); 1752 scsi_block_requests(ha->host);
1730 ha->isp_ops.disable_intrs(ha); 1753 ha->isp_ops->disable_intrs(ha);
1731 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 1754 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
1732 1755
1733 /* Go with write. */ 1756 /* Go with write. */
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index fd2f10a25348..dd1f8ceb79c4 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.02.00-k1" 10#define QLA2XXX_VERSION "8.02.00-k2"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 2 13#define QLA_DRIVER_MINOR_VER 2