aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla4xxx
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-02 22:01:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-02 22:01:32 -0400
commit3151367f8778a1789d6f6e6f6c642681b6cd6d64 (patch)
tree1869d5429a25abd994ae94079808b8db060ec6f3 /drivers/scsi/qla4xxx
parent16642a2e7be23bbda013fc32d8f6c68982eab603 (diff)
parentfe709ed827d370e6b0c0a9f9456da1c22bdcd118 (diff)
Merge tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull first round of SCSI updates from James Bottomley: "This is a large set of updates, mostly for drivers (qla2xxx [including support for new 83xx based card], qla4xxx, mpt2sas, bfa, zfcp, hpsa, be2iscsi, isci, lpfc, ipr, ibmvfc, ibmvscsi, megaraid_sas). There's also a rework for tape adding virtually unlimited numbers of tape drives plus a set of dif fixes for sd and a fix for a live lock on hot remove of SCSI devices. This round includes a signed tag pull of isci-for-3.6 Signed-off-by: James Bottomley <JBottomley@Parallels.com>" Fix up trivial conflict in drivers/scsi/qla2xxx/qla_nx.c due to new PCI helper function use in a function that was removed by this pull. * tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (198 commits) [SCSI] st: remove st_mutex [SCSI] sd: Ensure we correctly disable devices with unknown protection type [SCSI] hpsa: gen8plus Smart Array IDs [SCSI] qla4xxx: Update driver version to 5.03.00-k1 [SCSI] qla4xxx: Disable generating pause frames for ISP83XX [SCSI] qla4xxx: Fix double clearing of risc_intr for ISP83XX [SCSI] qla4xxx: IDC implementation for Loopback [SCSI] qla4xxx: update copyrights in LICENSE.qla4xxx [SCSI] qla4xxx: Fix panic while rmmod [SCSI] qla4xxx: Fail probe_adapter if IRQ allocation fails [SCSI] qla4xxx: Prevent MSI/MSI-X falling back to INTx for ISP82XX [SCSI] qla4xxx: Update idc reg in case of PCI AER [SCSI] qla4xxx: Fix double IDC locking in qla4_8xxx_error_recovery [SCSI] qla4xxx: Clear interrupt while unloading driver for ISP83XX [SCSI] qla4xxx: Print correct IDC version [SCSI] qla4xxx: Added new mbox cmd to pass driver version to FW [SCSI] scsi_dh_alua: Enable STPG for unavailable ports [SCSI] scsi_remove_target: fix softlockup regression on hot remove [SCSI] ibmvscsi: Fix host config length field overflow [SCSI] ibmvscsi: Remove backend abstraction ...
Diffstat (limited to 'drivers/scsi/qla4xxx')
-rw-r--r--drivers/scsi/qla4xxx/Kconfig4
-rw-r--r--drivers/scsi/qla4xxx/Makefile2
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.c1611
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.h283
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c26
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.c32
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h65
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h59
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h94
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c23
-rw-r--r--drivers/scsi/qla4xxx/ql4_inline.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c28
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c406
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c186
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c1432
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.h198
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c494
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h4
21 files changed, 3991 insertions, 964 deletions
diff --git a/drivers/scsi/qla4xxx/Kconfig b/drivers/scsi/qla4xxx/Kconfig
index f1ad02ea212b..e4dc7c733c29 100644
--- a/drivers/scsi/qla4xxx/Kconfig
+++ b/drivers/scsi/qla4xxx/Kconfig
@@ -4,5 +4,5 @@ config SCSI_QLA_ISCSI
4 select SCSI_ISCSI_ATTRS 4 select SCSI_ISCSI_ATTRS
5 select ISCSI_BOOT_SYSFS 5 select ISCSI_BOOT_SYSFS
6 ---help--- 6 ---help---
7 This driver supports the QLogic 40xx (ISP4XXX) and 8022 (ISP82XX) 7 This driver supports the QLogic 40xx (ISP4XXX), 8022 (ISP82XX)
8 iSCSI host adapter family. 8 and 8032 (ISP83XX) iSCSI host adapter family.
diff --git a/drivers/scsi/qla4xxx/Makefile b/drivers/scsi/qla4xxx/Makefile
index 5b44139ff43d..4230977748cf 100644
--- a/drivers/scsi/qla4xxx/Makefile
+++ b/drivers/scsi/qla4xxx/Makefile
@@ -1,5 +1,5 @@
1qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \ 1qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \
2 ql4_nx.o ql4_nvram.o ql4_dbg.o ql4_attr.o ql4_bsg.o 2 ql4_nx.o ql4_nvram.o ql4_dbg.o ql4_attr.o ql4_bsg.o ql4_83xx.o
3 3
4obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx.o 4obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx.o
5 5
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c
new file mode 100644
index 000000000000..6e9af20be12f
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_83xx.c
@@ -0,0 +1,1611 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2012 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#include <linux/ratelimit.h>
9
10#include "ql4_def.h"
11#include "ql4_version.h"
12#include "ql4_glbl.h"
13#include "ql4_dbg.h"
14#include "ql4_inline.h"
15
16uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha, ulong addr)
17{
18 return readl((void __iomem *)(ha->nx_pcibase + addr));
19}
20
21void qla4_83xx_wr_reg(struct scsi_qla_host *ha, ulong addr, uint32_t val)
22{
23 writel(val, (void __iomem *)(ha->nx_pcibase + addr));
24}
25
26static int qla4_83xx_set_win_base(struct scsi_qla_host *ha, uint32_t addr)
27{
28 uint32_t val;
29 int ret_val = QLA_SUCCESS;
30
31 qla4_83xx_wr_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num), addr);
32 val = qla4_83xx_rd_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num));
33 if (val != addr) {
34 ql4_printk(KERN_ERR, ha, "%s: Failed to set register window : addr written 0x%x, read 0x%x!\n",
35 __func__, addr, val);
36 ret_val = QLA_ERROR;
37 }
38
39 return ret_val;
40}
41
42int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
43 uint32_t *data)
44{
45 int ret_val;
46
47 ret_val = qla4_83xx_set_win_base(ha, addr);
48
49 if (ret_val == QLA_SUCCESS)
50 *data = qla4_83xx_rd_reg(ha, QLA83XX_WILDCARD);
51 else
52 ql4_printk(KERN_ERR, ha, "%s: failed read of addr 0x%x!\n",
53 __func__, addr);
54
55 return ret_val;
56}
57
58int qla4_83xx_wr_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
59 uint32_t data)
60{
61 int ret_val;
62
63 ret_val = qla4_83xx_set_win_base(ha, addr);
64
65 if (ret_val == QLA_SUCCESS)
66 qla4_83xx_wr_reg(ha, QLA83XX_WILDCARD, data);
67 else
68 ql4_printk(KERN_ERR, ha, "%s: failed wrt to addr 0x%x, data 0x%x\n",
69 __func__, addr, data);
70
71 return ret_val;
72}
73
74static int qla4_83xx_flash_lock(struct scsi_qla_host *ha)
75{
76 int lock_owner;
77 int timeout = 0;
78 uint32_t lock_status = 0;
79 int ret_val = QLA_SUCCESS;
80
81 while (lock_status == 0) {
82 lock_status = qla4_83xx_rd_reg(ha, QLA83XX_FLASH_LOCK);
83 if (lock_status)
84 break;
85
86 if (++timeout >= QLA83XX_FLASH_LOCK_TIMEOUT / 20) {
87 lock_owner = qla4_83xx_rd_reg(ha,
88 QLA83XX_FLASH_LOCK_ID);
89 ql4_printk(KERN_ERR, ha, "%s: flash lock by func %d failed, held by func %d\n",
90 __func__, ha->func_num, lock_owner);
91 ret_val = QLA_ERROR;
92 break;
93 }
94 msleep(20);
95 }
96
97 qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, ha->func_num);
98 return ret_val;
99}
100
101static void qla4_83xx_flash_unlock(struct scsi_qla_host *ha)
102{
103 /* Reading FLASH_UNLOCK register unlocks the Flash */
104 qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, 0xFF);
105 qla4_83xx_rd_reg(ha, QLA83XX_FLASH_UNLOCK);
106}
107
108int qla4_83xx_flash_read_u32(struct scsi_qla_host *ha, uint32_t flash_addr,
109 uint8_t *p_data, int u32_word_count)
110{
111 int i;
112 uint32_t u32_word;
113 uint32_t addr = flash_addr;
114 int ret_val = QLA_SUCCESS;
115
116 ret_val = qla4_83xx_flash_lock(ha);
117 if (ret_val == QLA_ERROR)
118 goto exit_lock_error;
119
120 if (addr & 0x03) {
121 ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n",
122 __func__, addr);
123 ret_val = QLA_ERROR;
124 goto exit_flash_read;
125 }
126
127 for (i = 0; i < u32_word_count; i++) {
128 ret_val = qla4_83xx_wr_reg_indirect(ha,
129 QLA83XX_FLASH_DIRECT_WINDOW,
130 (addr & 0xFFFF0000));
131 if (ret_val == QLA_ERROR) {
132 ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW\n!",
133 __func__, addr);
134 goto exit_flash_read;
135 }
136
137 ret_val = qla4_83xx_rd_reg_indirect(ha,
138 QLA83XX_FLASH_DIRECT_DATA(addr),
139 &u32_word);
140 if (ret_val == QLA_ERROR) {
141 ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
142 __func__, addr);
143 goto exit_flash_read;
144 }
145
146 *(__le32 *)p_data = le32_to_cpu(u32_word);
147 p_data = p_data + 4;
148 addr = addr + 4;
149 }
150
151exit_flash_read:
152 qla4_83xx_flash_unlock(ha);
153
154exit_lock_error:
155 return ret_val;
156}
157
158int qla4_83xx_lockless_flash_read_u32(struct scsi_qla_host *ha,
159 uint32_t flash_addr, uint8_t *p_data,
160 int u32_word_count)
161{
162 uint32_t i;
163 uint32_t u32_word;
164 uint32_t flash_offset;
165 uint32_t addr = flash_addr;
166 int ret_val = QLA_SUCCESS;
167
168 flash_offset = addr & (QLA83XX_FLASH_SECTOR_SIZE - 1);
169
170 if (addr & 0x3) {
171 ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n",
172 __func__, addr);
173 ret_val = QLA_ERROR;
174 goto exit_lockless_read;
175 }
176
177 ret_val = qla4_83xx_wr_reg_indirect(ha, QLA83XX_FLASH_DIRECT_WINDOW,
178 addr);
179 if (ret_val == QLA_ERROR) {
180 ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
181 __func__, addr);
182 goto exit_lockless_read;
183 }
184
185 /* Check if data is spread across multiple sectors */
186 if ((flash_offset + (u32_word_count * sizeof(uint32_t))) >
187 (QLA83XX_FLASH_SECTOR_SIZE - 1)) {
188
189 /* Multi sector read */
190 for (i = 0; i < u32_word_count; i++) {
191 ret_val = qla4_83xx_rd_reg_indirect(ha,
192 QLA83XX_FLASH_DIRECT_DATA(addr),
193 &u32_word);
194 if (ret_val == QLA_ERROR) {
195 ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
196 __func__, addr);
197 goto exit_lockless_read;
198 }
199
200 *(__le32 *)p_data = le32_to_cpu(u32_word);
201 p_data = p_data + 4;
202 addr = addr + 4;
203 flash_offset = flash_offset + 4;
204
205 if (flash_offset > (QLA83XX_FLASH_SECTOR_SIZE - 1)) {
206 /* This write is needed once for each sector */
207 ret_val = qla4_83xx_wr_reg_indirect(ha,
208 QLA83XX_FLASH_DIRECT_WINDOW,
209 addr);
210 if (ret_val == QLA_ERROR) {
211 ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
212 __func__, addr);
213 goto exit_lockless_read;
214 }
215 flash_offset = 0;
216 }
217 }
218 } else {
219 /* Single sector read */
220 for (i = 0; i < u32_word_count; i++) {
221 ret_val = qla4_83xx_rd_reg_indirect(ha,
222 QLA83XX_FLASH_DIRECT_DATA(addr),
223 &u32_word);
224 if (ret_val == QLA_ERROR) {
225 ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
226 __func__, addr);
227 goto exit_lockless_read;
228 }
229
230 *(__le32 *)p_data = le32_to_cpu(u32_word);
231 p_data = p_data + 4;
232 addr = addr + 4;
233 }
234 }
235
236exit_lockless_read:
237 return ret_val;
238}
239
240void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha)
241{
242 if (qla4_83xx_flash_lock(ha))
243 ql4_printk(KERN_INFO, ha, "%s: Resetting rom lock\n", __func__);
244
245 /*
246 * We got the lock, or someone else is holding the lock
247 * since we are restting, forcefully unlock
248 */
249 qla4_83xx_flash_unlock(ha);
250}
251
252/**
253 * qla4_83xx_ms_mem_write_128b - Writes data to MS/off-chip memory
254 * @ha: Pointer to adapter structure
255 * @addr: Flash address to write to
256 * @data: Data to be written
257 * @count: word_count to be written
258 *
259 * Return: On success return QLA_SUCCESS
260 * On error return QLA_ERROR
261 **/
262static int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr,
263 uint32_t *data, uint32_t count)
264{
265 int i, j;
266 uint32_t agt_ctrl;
267 unsigned long flags;
268 int ret_val = QLA_SUCCESS;
269
270 /* Only 128-bit aligned access */
271 if (addr & 0xF) {
272 ret_val = QLA_ERROR;
273 goto exit_ms_mem_write;
274 }
275
276 write_lock_irqsave(&ha->hw_lock, flags);
277
278 /* Write address */
279 ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI, 0);
280 if (ret_val == QLA_ERROR) {
281 ql4_printk(KERN_ERR, ha, "%s: write to AGT_ADDR_HI failed\n",
282 __func__);
283 goto exit_ms_mem_write_unlock;
284 }
285
286 for (i = 0; i < count; i++, addr += 16) {
287 if (!((QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET,
288 QLA8XXX_ADDR_QDR_NET_MAX)) ||
289 (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
290 QLA8XXX_ADDR_DDR_NET_MAX)))) {
291 ret_val = QLA_ERROR;
292 goto exit_ms_mem_write_unlock;
293 }
294
295 ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_LO,
296 addr);
297 /* Write data */
298 ret_val |= qla4_83xx_wr_reg_indirect(ha,
299 MD_MIU_TEST_AGT_WRDATA_LO,
300 *data++);
301 ret_val |= qla4_83xx_wr_reg_indirect(ha,
302 MD_MIU_TEST_AGT_WRDATA_HI,
303 *data++);
304 ret_val |= qla4_83xx_wr_reg_indirect(ha,
305 MD_MIU_TEST_AGT_WRDATA_ULO,
306 *data++);
307 ret_val |= qla4_83xx_wr_reg_indirect(ha,
308 MD_MIU_TEST_AGT_WRDATA_UHI,
309 *data++);
310 if (ret_val == QLA_ERROR) {
311 ql4_printk(KERN_ERR, ha, "%s: write to AGT_WRDATA failed\n",
312 __func__);
313 goto exit_ms_mem_write_unlock;
314 }
315
316 /* Check write status */
317 ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL,
318 MIU_TA_CTL_WRITE_ENABLE);
319 ret_val |= qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL,
320 MIU_TA_CTL_WRITE_START);
321 if (ret_val == QLA_ERROR) {
322 ql4_printk(KERN_ERR, ha, "%s: write to AGT_CTRL failed\n",
323 __func__);
324 goto exit_ms_mem_write_unlock;
325 }
326
327 for (j = 0; j < MAX_CTL_CHECK; j++) {
328 ret_val = qla4_83xx_rd_reg_indirect(ha,
329 MD_MIU_TEST_AGT_CTRL,
330 &agt_ctrl);
331 if (ret_val == QLA_ERROR) {
332 ql4_printk(KERN_ERR, ha, "%s: failed to read MD_MIU_TEST_AGT_CTRL\n",
333 __func__);
334 goto exit_ms_mem_write_unlock;
335 }
336 if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0)
337 break;
338 }
339
340 /* Status check failed */
341 if (j >= MAX_CTL_CHECK) {
342 printk_ratelimited(KERN_ERR "%s: MS memory write failed!\n",
343 __func__);
344 ret_val = QLA_ERROR;
345 goto exit_ms_mem_write_unlock;
346 }
347 }
348
349exit_ms_mem_write_unlock:
350 write_unlock_irqrestore(&ha->hw_lock, flags);
351
352exit_ms_mem_write:
353 return ret_val;
354}
355
356#define INTENT_TO_RECOVER 0x01
357#define PROCEED_TO_RECOVER 0x02
358
359static int qla4_83xx_lock_recovery(struct scsi_qla_host *ha)
360{
361
362 uint32_t lock = 0, lockid;
363 int ret_val = QLA_ERROR;
364
365 lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY);
366
367 /* Check for other Recovery in progress, go wait */
368 if ((lockid & 0x3) != 0)
369 goto exit_lock_recovery;
370
371 /* Intent to Recover */
372 ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY,
373 (ha->func_num << 2) | INTENT_TO_RECOVER);
374
375 msleep(200);
376
377 /* Check Intent to Recover is advertised */
378 lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY);
379 if ((lockid & 0x3C) != (ha->func_num << 2))
380 goto exit_lock_recovery;
381
382 ql4_printk(KERN_INFO, ha, "%s: IDC Lock recovery initiated for func %d\n",
383 __func__, ha->func_num);
384
385 /* Proceed to Recover */
386 ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY,
387 (ha->func_num << 2) | PROCEED_TO_RECOVER);
388
389 /* Force Unlock */
390 ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, 0xFF);
391 ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_UNLOCK);
392
393 /* Clear bits 0-5 in IDC_RECOVERY register*/
394 ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY, 0);
395
396 /* Get lock */
397 lock = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK);
398 if (lock) {
399 lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK_ID);
400 lockid = ((lockid + (1 << 8)) & ~0xFF) | ha->func_num;
401 ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, lockid);
402 ret_val = QLA_SUCCESS;
403 }
404
405exit_lock_recovery:
406 return ret_val;
407}
408
409#define QLA83XX_DRV_LOCK_MSLEEP 200
410
411int qla4_83xx_drv_lock(struct scsi_qla_host *ha)
412{
413 int timeout = 0;
414 uint32_t status = 0;
415 int ret_val = QLA_SUCCESS;
416 uint32_t first_owner = 0;
417 uint32_t tmo_owner = 0;
418 uint32_t lock_id;
419 uint32_t func_num;
420 uint32_t lock_cnt;
421
422 while (status == 0) {
423 status = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK);
424 if (status) {
425 /* Increment Counter (8-31) and update func_num (0-7) on
426 * getting a successful lock */
427 lock_id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
428 lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->func_num;
429 qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, lock_id);
430 break;
431 }
432
433 if (timeout == 0)
434 /* Save counter + ID of function holding the lock for
435 * first failure */
436 first_owner = ha->isp_ops->rd_reg_direct(ha,
437 QLA83XX_DRV_LOCK_ID);
438
439 if (++timeout >=
440 (QLA83XX_DRV_LOCK_TIMEOUT / QLA83XX_DRV_LOCK_MSLEEP)) {
441 tmo_owner = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
442 func_num = tmo_owner & 0xFF;
443 lock_cnt = tmo_owner >> 8;
444 ql4_printk(KERN_INFO, ha, "%s: Lock by func %d failed after 2s, lock held by func %d, lock count %d, first_owner %d\n",
445 __func__, ha->func_num, func_num, lock_cnt,
446 (first_owner & 0xFF));
447
448 if (first_owner != tmo_owner) {
449 /* Some other driver got lock, OR same driver
450 * got lock again (counter value changed), when
451 * we were waiting for lock.
452 * Retry for another 2 sec */
453 ql4_printk(KERN_INFO, ha, "%s: IDC lock failed for func %d\n",
454 __func__, ha->func_num);
455 timeout = 0;
456 } else {
457 /* Same driver holding lock > 2sec.
458 * Force Recovery */
459 ret_val = qla4_83xx_lock_recovery(ha);
460 if (ret_val == QLA_SUCCESS) {
461 /* Recovered and got lock */
462 ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d successful\n",
463 __func__, ha->func_num);
464 break;
465 }
466 /* Recovery Failed, some other function
467 * has the lock, wait for 2secs and retry */
468 ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d failed, Retrying timout\n",
469 __func__, ha->func_num);
470 timeout = 0;
471 }
472 }
473 msleep(QLA83XX_DRV_LOCK_MSLEEP);
474 }
475
476 return ret_val;
477}
478
479void qla4_83xx_drv_unlock(struct scsi_qla_host *ha)
480{
481 int id;
482
483 id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
484
485 if ((id & 0xFF) != ha->func_num) {
486 ql4_printk(KERN_ERR, ha, "%s: IDC Unlock by %d failed, lock owner is %d\n",
487 __func__, ha->func_num, (id & 0xFF));
488 return;
489 }
490
491 /* Keep lock counter value, update the ha->func_num to 0xFF */
492 qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, (id | 0xFF));
493 qla4_83xx_rd_reg(ha, QLA83XX_DRV_UNLOCK);
494}
495
496void qla4_83xx_set_idc_dontreset(struct scsi_qla_host *ha)
497{
498 uint32_t idc_ctrl;
499
500 idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
501 idc_ctrl |= DONTRESET_BIT0;
502 qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, idc_ctrl);
503 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: idc_ctrl = %d\n", __func__,
504 idc_ctrl));
505}
506
507void qla4_83xx_clear_idc_dontreset(struct scsi_qla_host *ha)
508{
509 uint32_t idc_ctrl;
510
511 idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
512 idc_ctrl &= ~DONTRESET_BIT0;
513 qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, idc_ctrl);
514 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: idc_ctrl = %d\n", __func__,
515 idc_ctrl));
516}
517
518int qla4_83xx_idc_dontreset(struct scsi_qla_host *ha)
519{
520 uint32_t idc_ctrl;
521
522 idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
523 return idc_ctrl & DONTRESET_BIT0;
524}
525
526/*-------------------------IDC State Machine ---------------------*/
527
528enum {
529 UNKNOWN_CLASS = 0,
530 NIC_CLASS,
531 FCOE_CLASS,
532 ISCSI_CLASS
533};
534
535struct device_info {
536 int func_num;
537 int device_type;
538 int port_num;
539};
540
541static int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha)
542{
543 uint32_t drv_active;
544 uint32_t dev_part, dev_part1, dev_part2;
545 int i;
546 struct device_info device_map[16];
547 int func_nibble;
548 int nibble;
549 int nic_present = 0;
550 int iscsi_present = 0;
551 int iscsi_func_low = 0;
552
553 /* Use the dev_partition register to determine the PCI function number
554 * and then check drv_active register to see which driver is loaded */
555 dev_part1 = qla4_83xx_rd_reg(ha,
556 ha->reg_tbl[QLA8XXX_CRB_DEV_PART_INFO]);
557 dev_part2 = qla4_83xx_rd_reg(ha, QLA83XX_CRB_DEV_PART_INFO2);
558 drv_active = qla4_83xx_rd_reg(ha, ha->reg_tbl[QLA8XXX_CRB_DRV_ACTIVE]);
559
560 /* Each function has 4 bits in dev_partition Info register,
561 * Lower 2 bits - device type, Upper 2 bits - physical port number */
562 dev_part = dev_part1;
563 for (i = nibble = 0; i <= 15; i++, nibble++) {
564 func_nibble = dev_part & (0xF << (nibble * 4));
565 func_nibble >>= (nibble * 4);
566 device_map[i].func_num = i;
567 device_map[i].device_type = func_nibble & 0x3;
568 device_map[i].port_num = func_nibble & 0xC;
569
570 if (device_map[i].device_type == NIC_CLASS) {
571 if (drv_active & (1 << device_map[i].func_num)) {
572 nic_present++;
573 break;
574 }
575 } else if (device_map[i].device_type == ISCSI_CLASS) {
576 if (drv_active & (1 << device_map[i].func_num)) {
577 if (!iscsi_present ||
578 (iscsi_present &&
579 (iscsi_func_low > device_map[i].func_num)))
580 iscsi_func_low = device_map[i].func_num;
581
582 iscsi_present++;
583 }
584 }
585
586 /* For function_num[8..15] get info from dev_part2 register */
587 if (nibble == 7) {
588 nibble = 0;
589 dev_part = dev_part2;
590 }
591 }
592
593 /* NIC, iSCSI and FCOE are the Reset owners based on order, NIC gets
594 * precedence over iSCSI and FCOE and iSCSI over FCOE, based on drivers
595 * present. */
596 if (!nic_present && (ha->func_num == iscsi_func_low)) {
597 DEBUG2(ql4_printk(KERN_INFO, ha,
598 "%s: can reset - NIC not present and lower iSCSI function is %d\n",
599 __func__, ha->func_num));
600 return 1;
601 }
602
603 return 0;
604}
605
606/**
607 * qla4_83xx_need_reset_handler - Code to start reset sequence
608 * @ha: pointer to adapter structure
609 *
610 * Note: IDC lock must be held upon entry
611 **/
612void qla4_83xx_need_reset_handler(struct scsi_qla_host *ha)
613{
614 uint32_t dev_state, drv_state, drv_active;
615 unsigned long reset_timeout, dev_init_timeout;
616
617 ql4_printk(KERN_INFO, ha, "%s: Performing ISP error recovery\n",
618 __func__);
619
620 if (!test_bit(AF_8XXX_RST_OWNER, &ha->flags)) {
621 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: reset acknowledged\n",
622 __func__));
623 qla4_8xxx_set_rst_ready(ha);
624
625 /* Non-reset owners ACK Reset and wait for device INIT state
626 * as part of Reset Recovery by Reset Owner */
627 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
628
629 do {
630 if (time_after_eq(jiffies, dev_init_timeout)) {
631 ql4_printk(KERN_INFO, ha, "%s: Non Reset owner dev init timeout\n",
632 __func__);
633 break;
634 }
635
636 ha->isp_ops->idc_unlock(ha);
637 msleep(1000);
638 ha->isp_ops->idc_lock(ha);
639
640 dev_state = qla4_8xxx_rd_direct(ha,
641 QLA8XXX_CRB_DEV_STATE);
642 } while (dev_state == QLA8XXX_DEV_NEED_RESET);
643 } else {
644 qla4_8xxx_set_rst_ready(ha);
645 reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
646 drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
647 drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
648
649 ql4_printk(KERN_INFO, ha, "%s: drv_state = 0x%x, drv_active = 0x%x\n",
650 __func__, drv_state, drv_active);
651
652 while (drv_state != drv_active) {
653 if (time_after_eq(jiffies, reset_timeout)) {
654 ql4_printk(KERN_INFO, ha, "%s: %s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n",
655 __func__, DRIVER_NAME, drv_state,
656 drv_active);
657 break;
658 }
659
660 ha->isp_ops->idc_unlock(ha);
661 msleep(1000);
662 ha->isp_ops->idc_lock(ha);
663
664 drv_state = qla4_8xxx_rd_direct(ha,
665 QLA8XXX_CRB_DRV_STATE);
666 drv_active = qla4_8xxx_rd_direct(ha,
667 QLA8XXX_CRB_DRV_ACTIVE);
668 }
669
670 if (drv_state != drv_active) {
671 ql4_printk(KERN_INFO, ha, "%s: Reset_owner turning off drv_active of non-acking function 0x%x\n",
672 __func__, (drv_active ^ drv_state));
673 drv_active = drv_active & drv_state;
674 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE,
675 drv_active);
676 }
677
678 clear_bit(AF_8XXX_RST_OWNER, &ha->flags);
679 /* Start Reset Recovery */
680 qla4_8xxx_device_bootstrap(ha);
681 }
682}
683
684void qla4_83xx_get_idc_param(struct scsi_qla_host *ha)
685{
686 uint32_t idc_params, ret_val;
687
688 ret_val = qla4_83xx_flash_read_u32(ha, QLA83XX_IDC_PARAM_ADDR,
689 (uint8_t *)&idc_params, 1);
690 if (ret_val == QLA_SUCCESS) {
691 ha->nx_dev_init_timeout = idc_params & 0xFFFF;
692 ha->nx_reset_timeout = (idc_params >> 16) & 0xFFFF;
693 } else {
694 ha->nx_dev_init_timeout = ROM_DEV_INIT_TIMEOUT;
695 ha->nx_reset_timeout = ROM_DRV_RESET_ACK_TIMEOUT;
696 }
697
698 DEBUG2(ql4_printk(KERN_DEBUG, ha,
699 "%s: ha->nx_dev_init_timeout = %d, ha->nx_reset_timeout = %d\n",
700 __func__, ha->nx_dev_init_timeout,
701 ha->nx_reset_timeout));
702}
703
704/*-------------------------Reset Sequence Functions-----------------------*/
705
706static void qla4_83xx_dump_reset_seq_hdr(struct scsi_qla_host *ha)
707{
708 uint8_t *phdr;
709
710 if (!ha->reset_tmplt.buff) {
711 ql4_printk(KERN_ERR, ha, "%s: Error: Invalid reset_seq_template\n",
712 __func__);
713 return;
714 }
715
716 phdr = ha->reset_tmplt.buff;
717
718 DEBUG2(ql4_printk(KERN_INFO, ha,
719 "Reset Template: 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n",
720 *phdr, *(phdr+1), *(phdr+2), *(phdr+3), *(phdr+4),
721 *(phdr+5), *(phdr+6), *(phdr+7), *(phdr + 8),
722 *(phdr+9), *(phdr+10), *(phdr+11), *(phdr+12),
723 *(phdr+13), *(phdr+14), *(phdr+15)));
724}
725
726static int qla4_83xx_copy_bootloader(struct scsi_qla_host *ha)
727{
728 uint8_t *p_cache;
729 uint32_t src, count, size;
730 uint64_t dest;
731 int ret_val = QLA_SUCCESS;
732
733 src = QLA83XX_BOOTLOADER_FLASH_ADDR;
734 dest = qla4_83xx_rd_reg(ha, QLA83XX_BOOTLOADER_ADDR);
735 size = qla4_83xx_rd_reg(ha, QLA83XX_BOOTLOADER_SIZE);
736
737 /* 128 bit alignment check */
738 if (size & 0xF)
739 size = (size + 16) & ~0xF;
740
741 /* 16 byte count */
742 count = size/16;
743
744 p_cache = vmalloc(size);
745 if (p_cache == NULL) {
746 ql4_printk(KERN_ERR, ha, "%s: Failed to allocate memory for boot loader cache\n",
747 __func__);
748 ret_val = QLA_ERROR;
749 goto exit_copy_bootloader;
750 }
751
752 ret_val = qla4_83xx_lockless_flash_read_u32(ha, src, p_cache,
753 size / sizeof(uint32_t));
754 if (ret_val == QLA_ERROR) {
755 ql4_printk(KERN_ERR, ha, "%s: Error reading firmware from flash\n",
756 __func__);
757 goto exit_copy_error;
758 }
759 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Read firmware from flash\n",
760 __func__));
761
762 /* 128 bit/16 byte write to MS memory */
763 ret_val = qla4_83xx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache,
764 count);
765 if (ret_val == QLA_ERROR) {
766 ql4_printk(KERN_ERR, ha, "%s: Error writing firmware to MS\n",
767 __func__);
768 goto exit_copy_error;
769 }
770
771 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Wrote firmware size %d to MS\n",
772 __func__, size));
773
774exit_copy_error:
775 vfree(p_cache);
776
777exit_copy_bootloader:
778 return ret_val;
779}
780
781static int qla4_83xx_check_cmd_peg_status(struct scsi_qla_host *ha)
782{
783 uint32_t val, ret_val = QLA_ERROR;
784 int retries = CRB_CMDPEG_CHECK_RETRY_COUNT;
785
786 do {
787 val = qla4_83xx_rd_reg(ha, QLA83XX_CMDPEG_STATE);
788 if (val == PHAN_INITIALIZE_COMPLETE) {
789 DEBUG2(ql4_printk(KERN_INFO, ha,
790 "%s: Command Peg initialization complete. State=0x%x\n",
791 __func__, val));
792 ret_val = QLA_SUCCESS;
793 break;
794 }
795 msleep(CRB_CMDPEG_CHECK_DELAY);
796 } while (--retries);
797
798 return ret_val;
799}
800
801/**
802 * qla4_83xx_poll_reg - Poll the given CRB addr for duration msecs till
803 * value read ANDed with test_mask is equal to test_result.
804 *
805 * @ha : Pointer to adapter structure
806 * @addr : CRB register address
807 * @duration : Poll for total of "duration" msecs
808 * @test_mask : Mask value read with "test_mask"
809 * @test_result : Compare (value&test_mask) with test_result.
810 **/
811static int qla4_83xx_poll_reg(struct scsi_qla_host *ha, uint32_t addr,
812 int duration, uint32_t test_mask,
813 uint32_t test_result)
814{
815 uint32_t value;
816 uint8_t retries;
817 int ret_val = QLA_SUCCESS;
818
819 ret_val = qla4_83xx_rd_reg_indirect(ha, addr, &value);
820 if (ret_val == QLA_ERROR)
821 goto exit_poll_reg;
822
823 retries = duration / 10;
824 do {
825 if ((value & test_mask) != test_result) {
826 msleep(duration / 10);
827 ret_val = qla4_83xx_rd_reg_indirect(ha, addr, &value);
828 if (ret_val == QLA_ERROR)
829 goto exit_poll_reg;
830
831 ret_val = QLA_ERROR;
832 } else {
833 ret_val = QLA_SUCCESS;
834 break;
835 }
836 } while (retries--);
837
838exit_poll_reg:
839 if (ret_val == QLA_ERROR) {
840 ha->reset_tmplt.seq_error++;
841 ql4_printk(KERN_ERR, ha, "%s: Poll Failed: 0x%08x 0x%08x 0x%08x\n",
842 __func__, value, test_mask, test_result);
843 }
844
845 return ret_val;
846}
847
848static int qla4_83xx_reset_seq_checksum_test(struct scsi_qla_host *ha)
849{
850 uint32_t sum = 0;
851 uint16_t *buff = (uint16_t *)ha->reset_tmplt.buff;
852 int u16_count = ha->reset_tmplt.hdr->size / sizeof(uint16_t);
853 int ret_val;
854
855 while (u16_count-- > 0)
856 sum += *buff++;
857
858 while (sum >> 16)
859 sum = (sum & 0xFFFF) + (sum >> 16);
860
861 /* checksum of 0 indicates a valid template */
862 if (~sum) {
863 ret_val = QLA_SUCCESS;
864 } else {
865 ql4_printk(KERN_ERR, ha, "%s: Reset seq checksum failed\n",
866 __func__);
867 ret_val = QLA_ERROR;
868 }
869
870 return ret_val;
871}
872
873/**
874 * qla4_83xx_read_reset_template - Read Reset Template from Flash
875 * @ha: Pointer to adapter structure
876 **/
877void qla4_83xx_read_reset_template(struct scsi_qla_host *ha)
878{
879 uint8_t *p_buff;
880 uint32_t addr, tmplt_hdr_def_size, tmplt_hdr_size;
881 uint32_t ret_val;
882
883 ha->reset_tmplt.seq_error = 0;
884 ha->reset_tmplt.buff = vmalloc(QLA83XX_RESTART_TEMPLATE_SIZE);
885 if (ha->reset_tmplt.buff == NULL) {
886 ql4_printk(KERN_ERR, ha, "%s: Failed to allocate reset template resources\n",
887 __func__);
888 goto exit_read_reset_template;
889 }
890
891 p_buff = ha->reset_tmplt.buff;
892 addr = QLA83XX_RESET_TEMPLATE_ADDR;
893
894 tmplt_hdr_def_size = sizeof(struct qla4_83xx_reset_template_hdr) /
895 sizeof(uint32_t);
896
897 DEBUG2(ql4_printk(KERN_INFO, ha,
898 "%s: Read template hdr size %d from Flash\n",
899 __func__, tmplt_hdr_def_size));
900
901 /* Copy template header from flash */
902 ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff,
903 tmplt_hdr_def_size);
904 if (ret_val != QLA_SUCCESS) {
905 ql4_printk(KERN_ERR, ha, "%s: Failed to read reset template\n",
906 __func__);
907 goto exit_read_template_error;
908 }
909
910 ha->reset_tmplt.hdr =
911 (struct qla4_83xx_reset_template_hdr *)ha->reset_tmplt.buff;
912
913 /* Validate the template header size and signature */
914 tmplt_hdr_size = ha->reset_tmplt.hdr->hdr_size/sizeof(uint32_t);
915 if ((tmplt_hdr_size != tmplt_hdr_def_size) ||
916 (ha->reset_tmplt.hdr->signature != RESET_TMPLT_HDR_SIGNATURE)) {
917 ql4_printk(KERN_ERR, ha, "%s: Template Header size %d is invalid, tmplt_hdr_def_size %d\n",
918 __func__, tmplt_hdr_size, tmplt_hdr_def_size);
919 goto exit_read_template_error;
920 }
921
922 addr = QLA83XX_RESET_TEMPLATE_ADDR + ha->reset_tmplt.hdr->hdr_size;
923 p_buff = ha->reset_tmplt.buff + ha->reset_tmplt.hdr->hdr_size;
924 tmplt_hdr_def_size = (ha->reset_tmplt.hdr->size -
925 ha->reset_tmplt.hdr->hdr_size) / sizeof(uint32_t);
926
927 DEBUG2(ql4_printk(KERN_INFO, ha,
928 "%s: Read rest of the template size %d\n",
929 __func__, ha->reset_tmplt.hdr->size));
930
931 /* Copy rest of the template */
932 ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff,
933 tmplt_hdr_def_size);
934 if (ret_val != QLA_SUCCESS) {
935 ql4_printk(KERN_ERR, ha, "%s: Failed to read reset tempelate\n",
936 __func__);
937 goto exit_read_template_error;
938 }
939
940 /* Integrity check */
941 if (qla4_83xx_reset_seq_checksum_test(ha)) {
942 ql4_printk(KERN_ERR, ha, "%s: Reset Seq checksum failed!\n",
943 __func__);
944 goto exit_read_template_error;
945 }
946 DEBUG2(ql4_printk(KERN_INFO, ha,
947 "%s: Reset Seq checksum passed, Get stop, start and init seq offsets\n",
948 __func__));
949
950 /* Get STOP, START, INIT sequence offsets */
951 ha->reset_tmplt.init_offset = ha->reset_tmplt.buff +
952 ha->reset_tmplt.hdr->init_seq_offset;
953 ha->reset_tmplt.start_offset = ha->reset_tmplt.buff +
954 ha->reset_tmplt.hdr->start_seq_offset;
955 ha->reset_tmplt.stop_offset = ha->reset_tmplt.buff +
956 ha->reset_tmplt.hdr->hdr_size;
957 qla4_83xx_dump_reset_seq_hdr(ha);
958
959 goto exit_read_reset_template;
960
961exit_read_template_error:
962 vfree(ha->reset_tmplt.buff);
963
964exit_read_reset_template:
965 return;
966}
967
968/**
969 * qla4_83xx_read_write_crb_reg - Read from raddr and write value to waddr.
970 *
971 * @ha : Pointer to adapter structure
972 * @raddr : CRB address to read from
973 * @waddr : CRB address to write to
974 **/
975static void qla4_83xx_read_write_crb_reg(struct scsi_qla_host *ha,
976 uint32_t raddr, uint32_t waddr)
977{
978 uint32_t value;
979
980 qla4_83xx_rd_reg_indirect(ha, raddr, &value);
981 qla4_83xx_wr_reg_indirect(ha, waddr, value);
982}
983
984/**
985 * qla4_83xx_rmw_crb_reg - Read Modify Write crb register
986 *
987 * This function read value from raddr, AND with test_mask,
988 * Shift Left,Right/OR/XOR with values RMW header and write value to waddr.
989 *
990 * @ha : Pointer to adapter structure
991 * @raddr : CRB address to read from
992 * @waddr : CRB address to write to
993 * @p_rmw_hdr : header with shift/or/xor values.
994 **/
995static void qla4_83xx_rmw_crb_reg(struct scsi_qla_host *ha, uint32_t raddr,
996 uint32_t waddr,
997 struct qla4_83xx_rmw *p_rmw_hdr)
998{
999 uint32_t value;
1000
1001 if (p_rmw_hdr->index_a)
1002 value = ha->reset_tmplt.array[p_rmw_hdr->index_a];
1003 else
1004 qla4_83xx_rd_reg_indirect(ha, raddr, &value);
1005
1006 value &= p_rmw_hdr->test_mask;
1007 value <<= p_rmw_hdr->shl;
1008 value >>= p_rmw_hdr->shr;
1009 value |= p_rmw_hdr->or_value;
1010 value ^= p_rmw_hdr->xor_value;
1011
1012 qla4_83xx_wr_reg_indirect(ha, waddr, value);
1013
1014 return;
1015}
1016
1017static void qla4_83xx_write_list(struct scsi_qla_host *ha,
1018 struct qla4_83xx_reset_entry_hdr *p_hdr)
1019{
1020 struct qla4_83xx_entry *p_entry;
1021 uint32_t i;
1022
1023 p_entry = (struct qla4_83xx_entry *)
1024 ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
1025
1026 for (i = 0; i < p_hdr->count; i++, p_entry++) {
1027 qla4_83xx_wr_reg_indirect(ha, p_entry->arg1, p_entry->arg2);
1028 if (p_hdr->delay)
1029 udelay((uint32_t)(p_hdr->delay));
1030 }
1031}
1032
1033static void qla4_83xx_read_write_list(struct scsi_qla_host *ha,
1034 struct qla4_83xx_reset_entry_hdr *p_hdr)
1035{
1036 struct qla4_83xx_entry *p_entry;
1037 uint32_t i;
1038
1039 p_entry = (struct qla4_83xx_entry *)
1040 ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
1041
1042 for (i = 0; i < p_hdr->count; i++, p_entry++) {
1043 qla4_83xx_read_write_crb_reg(ha, p_entry->arg1, p_entry->arg2);
1044 if (p_hdr->delay)
1045 udelay((uint32_t)(p_hdr->delay));
1046 }
1047}
1048
1049static void qla4_83xx_poll_list(struct scsi_qla_host *ha,
1050 struct qla4_83xx_reset_entry_hdr *p_hdr)
1051{
1052 long delay;
1053 struct qla4_83xx_entry *p_entry;
1054 struct qla4_83xx_poll *p_poll;
1055 uint32_t i;
1056 uint32_t value;
1057
1058 p_poll = (struct qla4_83xx_poll *)
1059 ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
1060
1061 /* Entries start after 8 byte qla4_83xx_poll, poll header contains
1062 * the test_mask, test_value. */
1063 p_entry = (struct qla4_83xx_entry *)((char *)p_poll +
1064 sizeof(struct qla4_83xx_poll));
1065
1066 delay = (long)p_hdr->delay;
1067 if (!delay) {
1068 for (i = 0; i < p_hdr->count; i++, p_entry++) {
1069 qla4_83xx_poll_reg(ha, p_entry->arg1, delay,
1070 p_poll->test_mask,
1071 p_poll->test_value);
1072 }
1073 } else {
1074 for (i = 0; i < p_hdr->count; i++, p_entry++) {
1075 if (qla4_83xx_poll_reg(ha, p_entry->arg1, delay,
1076 p_poll->test_mask,
1077 p_poll->test_value)) {
1078 qla4_83xx_rd_reg_indirect(ha, p_entry->arg1,
1079 &value);
1080 qla4_83xx_rd_reg_indirect(ha, p_entry->arg2,
1081 &value);
1082 }
1083 }
1084 }
1085}
1086
1087static void qla4_83xx_poll_write_list(struct scsi_qla_host *ha,
1088 struct qla4_83xx_reset_entry_hdr *p_hdr)
1089{
1090 long delay;
1091 struct qla4_83xx_quad_entry *p_entry;
1092 struct qla4_83xx_poll *p_poll;
1093 uint32_t i;
1094
1095 p_poll = (struct qla4_83xx_poll *)
1096 ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
1097 p_entry = (struct qla4_83xx_quad_entry *)
1098 ((char *)p_poll + sizeof(struct qla4_83xx_poll));
1099 delay = (long)p_hdr->delay;
1100
1101 for (i = 0; i < p_hdr->count; i++, p_entry++) {
1102 qla4_83xx_wr_reg_indirect(ha, p_entry->dr_addr,
1103 p_entry->dr_value);
1104 qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr,
1105 p_entry->ar_value);
1106 if (delay) {
1107 if (qla4_83xx_poll_reg(ha, p_entry->ar_addr, delay,
1108 p_poll->test_mask,
1109 p_poll->test_value)) {
1110 DEBUG2(ql4_printk(KERN_INFO, ha,
1111 "%s: Timeout Error: poll list, item_num %d, entry_num %d\n",
1112 __func__, i,
1113 ha->reset_tmplt.seq_index));
1114 }
1115 }
1116 }
1117}
1118
1119static void qla4_83xx_read_modify_write(struct scsi_qla_host *ha,
1120 struct qla4_83xx_reset_entry_hdr *p_hdr)
1121{
1122 struct qla4_83xx_entry *p_entry;
1123 struct qla4_83xx_rmw *p_rmw_hdr;
1124 uint32_t i;
1125
1126 p_rmw_hdr = (struct qla4_83xx_rmw *)
1127 ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
1128 p_entry = (struct qla4_83xx_entry *)
1129 ((char *)p_rmw_hdr + sizeof(struct qla4_83xx_rmw));
1130
1131 for (i = 0; i < p_hdr->count; i++, p_entry++) {
1132 qla4_83xx_rmw_crb_reg(ha, p_entry->arg1, p_entry->arg2,
1133 p_rmw_hdr);
1134 if (p_hdr->delay)
1135 udelay((uint32_t)(p_hdr->delay));
1136 }
1137}
1138
1139static void qla4_83xx_pause(struct scsi_qla_host *ha,
1140 struct qla4_83xx_reset_entry_hdr *p_hdr)
1141{
1142 if (p_hdr->delay)
1143 mdelay((uint32_t)((long)p_hdr->delay));
1144}
1145
1146static void qla4_83xx_poll_read_list(struct scsi_qla_host *ha,
1147 struct qla4_83xx_reset_entry_hdr *p_hdr)
1148{
1149 long delay;
1150 int index;
1151 struct qla4_83xx_quad_entry *p_entry;
1152 struct qla4_83xx_poll *p_poll;
1153 uint32_t i;
1154 uint32_t value;
1155
1156 p_poll = (struct qla4_83xx_poll *)
1157 ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
1158 p_entry = (struct qla4_83xx_quad_entry *)
1159 ((char *)p_poll + sizeof(struct qla4_83xx_poll));
1160 delay = (long)p_hdr->delay;
1161
1162 for (i = 0; i < p_hdr->count; i++, p_entry++) {
1163 qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr,
1164 p_entry->ar_value);
1165 if (delay) {
1166 if (qla4_83xx_poll_reg(ha, p_entry->ar_addr, delay,
1167 p_poll->test_mask,
1168 p_poll->test_value)) {
1169 DEBUG2(ql4_printk(KERN_INFO, ha,
1170 "%s: Timeout Error: poll list, Item_num %d, entry_num %d\n",
1171 __func__, i,
1172 ha->reset_tmplt.seq_index));
1173 } else {
1174 index = ha->reset_tmplt.array_index;
1175 qla4_83xx_rd_reg_indirect(ha, p_entry->dr_addr,
1176 &value);
1177 ha->reset_tmplt.array[index++] = value;
1178
1179 if (index == QLA83XX_MAX_RESET_SEQ_ENTRIES)
1180 ha->reset_tmplt.array_index = 1;
1181 }
1182 }
1183 }
1184}
1185
1186static void qla4_83xx_seq_end(struct scsi_qla_host *ha,
1187 struct qla4_83xx_reset_entry_hdr *p_hdr)
1188{
1189 ha->reset_tmplt.seq_end = 1;
1190}
1191
1192static void qla4_83xx_template_end(struct scsi_qla_host *ha,
1193 struct qla4_83xx_reset_entry_hdr *p_hdr)
1194{
1195 ha->reset_tmplt.template_end = 1;
1196
1197 if (ha->reset_tmplt.seq_error == 0) {
1198 DEBUG2(ql4_printk(KERN_INFO, ha,
1199 "%s: Reset sequence completed SUCCESSFULLY.\n",
1200 __func__));
1201 } else {
1202 ql4_printk(KERN_ERR, ha, "%s: Reset sequence completed with some timeout errors.\n",
1203 __func__);
1204 }
1205}
1206
1207/**
1208 * qla4_83xx_process_reset_template - Process reset template.
1209 *
1210 * Process all entries in reset template till entry with SEQ_END opcode,
1211 * which indicates end of the reset template processing. Each entry has a
1212 * Reset Entry header, entry opcode/command, with size of the entry, number
1213 * of entries in sub-sequence and delay in microsecs or timeout in millisecs.
1214 *
1215 * @ha : Pointer to adapter structure
1216 * @p_buff : Common reset entry header.
1217 **/
1218static void qla4_83xx_process_reset_template(struct scsi_qla_host *ha,
1219 char *p_buff)
1220{
1221 int index, entries;
1222 struct qla4_83xx_reset_entry_hdr *p_hdr;
1223 char *p_entry = p_buff;
1224
1225 ha->reset_tmplt.seq_end = 0;
1226 ha->reset_tmplt.template_end = 0;
1227 entries = ha->reset_tmplt.hdr->entries;
1228 index = ha->reset_tmplt.seq_index;
1229
1230 for (; (!ha->reset_tmplt.seq_end) && (index < entries); index++) {
1231
1232 p_hdr = (struct qla4_83xx_reset_entry_hdr *)p_entry;
1233 switch (p_hdr->cmd) {
1234 case OPCODE_NOP:
1235 break;
1236 case OPCODE_WRITE_LIST:
1237 qla4_83xx_write_list(ha, p_hdr);
1238 break;
1239 case OPCODE_READ_WRITE_LIST:
1240 qla4_83xx_read_write_list(ha, p_hdr);
1241 break;
1242 case OPCODE_POLL_LIST:
1243 qla4_83xx_poll_list(ha, p_hdr);
1244 break;
1245 case OPCODE_POLL_WRITE_LIST:
1246 qla4_83xx_poll_write_list(ha, p_hdr);
1247 break;
1248 case OPCODE_READ_MODIFY_WRITE:
1249 qla4_83xx_read_modify_write(ha, p_hdr);
1250 break;
1251 case OPCODE_SEQ_PAUSE:
1252 qla4_83xx_pause(ha, p_hdr);
1253 break;
1254 case OPCODE_SEQ_END:
1255 qla4_83xx_seq_end(ha, p_hdr);
1256 break;
1257 case OPCODE_TMPL_END:
1258 qla4_83xx_template_end(ha, p_hdr);
1259 break;
1260 case OPCODE_POLL_READ_LIST:
1261 qla4_83xx_poll_read_list(ha, p_hdr);
1262 break;
1263 default:
1264 ql4_printk(KERN_ERR, ha, "%s: Unknown command ==> 0x%04x on entry = %d\n",
1265 __func__, p_hdr->cmd, index);
1266 break;
1267 }
1268
1269 /* Set pointer to next entry in the sequence. */
1270 p_entry += p_hdr->size;
1271 }
1272
1273 ha->reset_tmplt.seq_index = index;
1274}
1275
1276static void qla4_83xx_process_stop_seq(struct scsi_qla_host *ha)
1277{
1278 ha->reset_tmplt.seq_index = 0;
1279 qla4_83xx_process_reset_template(ha, ha->reset_tmplt.stop_offset);
1280
1281 if (ha->reset_tmplt.seq_end != 1)
1282 ql4_printk(KERN_ERR, ha, "%s: Abrupt STOP Sub-Sequence end.\n",
1283 __func__);
1284}
1285
1286static void qla4_83xx_process_start_seq(struct scsi_qla_host *ha)
1287{
1288 qla4_83xx_process_reset_template(ha, ha->reset_tmplt.start_offset);
1289
1290 if (ha->reset_tmplt.template_end != 1)
1291 ql4_printk(KERN_ERR, ha, "%s: Abrupt START Sub-Sequence end.\n",
1292 __func__);
1293}
1294
1295static void qla4_83xx_process_init_seq(struct scsi_qla_host *ha)
1296{
1297 qla4_83xx_process_reset_template(ha, ha->reset_tmplt.init_offset);
1298
1299 if (ha->reset_tmplt.seq_end != 1)
1300 ql4_printk(KERN_ERR, ha, "%s: Abrupt INIT Sub-Sequence end.\n",
1301 __func__);
1302}
1303
1304static int qla4_83xx_restart(struct scsi_qla_host *ha)
1305{
1306 int ret_val = QLA_SUCCESS;
1307
1308 qla4_83xx_process_stop_seq(ha);
1309
1310 /* Collect minidump*/
1311 if (!test_and_clear_bit(AF_83XX_NO_FW_DUMP, &ha->flags))
1312 qla4_8xxx_get_minidump(ha);
1313
1314 qla4_83xx_process_init_seq(ha);
1315
1316 if (qla4_83xx_copy_bootloader(ha)) {
1317 ql4_printk(KERN_ERR, ha, "%s: Copy bootloader, firmware restart failed!\n",
1318 __func__);
1319 ret_val = QLA_ERROR;
1320 goto exit_restart;
1321 }
1322
1323 qla4_83xx_wr_reg(ha, QLA83XX_FW_IMAGE_VALID, QLA83XX_BOOT_FROM_FLASH);
1324 qla4_83xx_process_start_seq(ha);
1325
1326exit_restart:
1327 return ret_val;
1328}
1329
1330int qla4_83xx_start_firmware(struct scsi_qla_host *ha)
1331{
1332 int ret_val = QLA_SUCCESS;
1333
1334 ret_val = qla4_83xx_restart(ha);
1335 if (ret_val == QLA_ERROR) {
1336 ql4_printk(KERN_ERR, ha, "%s: Restart error\n", __func__);
1337 goto exit_start_fw;
1338 } else {
1339 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Restart done\n",
1340 __func__));
1341 }
1342
1343 ret_val = qla4_83xx_check_cmd_peg_status(ha);
1344 if (ret_val == QLA_ERROR)
1345 ql4_printk(KERN_ERR, ha, "%s: Peg not initialized\n",
1346 __func__);
1347
1348exit_start_fw:
1349 return ret_val;
1350}
1351
1352/*----------------------Interrupt Related functions ---------------------*/
1353
1354void qla4_83xx_disable_intrs(struct scsi_qla_host *ha)
1355{
1356 uint32_t mb_int, ret;
1357
1358 if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags))
1359 qla4_8xxx_mbx_intr_disable(ha);
1360
1361 ret = readl(&ha->qla4_83xx_reg->mbox_int);
1362 mb_int = ret & ~INT_ENABLE_FW_MB;
1363 writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
1364 writel(1, &ha->qla4_83xx_reg->leg_int_mask);
1365}
1366
1367void qla4_83xx_enable_intrs(struct scsi_qla_host *ha)
1368{
1369 uint32_t mb_int;
1370
1371 qla4_8xxx_mbx_intr_enable(ha);
1372 mb_int = INT_ENABLE_FW_MB;
1373 writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
1374 writel(0, &ha->qla4_83xx_reg->leg_int_mask);
1375
1376 set_bit(AF_INTERRUPTS_ON, &ha->flags);
1377}
1378
1379void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
1380 int incount)
1381{
1382 int i;
1383
1384 /* Load all mailbox registers, except mailbox 0. */
1385 for (i = 1; i < incount; i++)
1386 writel(mbx_cmd[i], &ha->qla4_83xx_reg->mailbox_in[i]);
1387
1388 writel(mbx_cmd[0], &ha->qla4_83xx_reg->mailbox_in[0]);
1389
1390 /* Set Host Interrupt register to 1, to tell the firmware that
1391 * a mailbox command is pending. Firmware after reading the
1392 * mailbox command, clears the host interrupt register */
1393 writel(HINT_MBX_INT_PENDING, &ha->qla4_83xx_reg->host_intr);
1394}
1395
1396void qla4_83xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount)
1397{
1398 int intr_status;
1399
1400 intr_status = readl(&ha->qla4_83xx_reg->risc_intr);
1401 if (intr_status) {
1402 ha->mbox_status_count = outcount;
1403 ha->isp_ops->interrupt_service_routine(ha, intr_status);
1404 }
1405}
1406
1407/**
1408 * qla4_83xx_isp_reset - Resets ISP and aborts all outstanding commands.
1409 * @ha: pointer to host adapter structure.
1410 **/
1411int qla4_83xx_isp_reset(struct scsi_qla_host *ha)
1412{
1413 int rval;
1414 uint32_t dev_state;
1415
1416 ha->isp_ops->idc_lock(ha);
1417 dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
1418
1419 if (ql4xdontresethba)
1420 qla4_83xx_set_idc_dontreset(ha);
1421
1422 if (dev_state == QLA8XXX_DEV_READY) {
1423 /* If IDC_CTRL DONTRESETHBA_BIT0 is set dont do reset
1424 * recovery */
1425 if (qla4_83xx_idc_dontreset(ha) == DONTRESET_BIT0) {
1426 ql4_printk(KERN_ERR, ha, "%s: Reset recovery disabled\n",
1427 __func__);
1428 rval = QLA_ERROR;
1429 goto exit_isp_reset;
1430 }
1431
1432 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET\n",
1433 __func__));
1434 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
1435 QLA8XXX_DEV_NEED_RESET);
1436
1437 } else {
1438 /* If device_state is NEED_RESET, go ahead with
1439 * Reset,irrespective of ql4xdontresethba. This is to allow a
1440 * non-reset-owner to force a reset. Non-reset-owner sets
1441 * the IDC_CTRL BIT0 to prevent Reset-owner from doing a Reset
1442 * and then forces a Reset by setting device_state to
1443 * NEED_RESET. */
1444 DEBUG2(ql4_printk(KERN_INFO, ha,
1445 "%s: HW state already set to NEED_RESET\n",
1446 __func__));
1447 }
1448
1449 /* For ISP8324, Reset owner is NIC, iSCSI or FCOE based on priority
1450 * and which drivers are present. Unlike ISP8022, the function setting
1451 * NEED_RESET, may not be the Reset owner. */
1452 if (qla4_83xx_can_perform_reset(ha))
1453 set_bit(AF_8XXX_RST_OWNER, &ha->flags);
1454
1455 ha->isp_ops->idc_unlock(ha);
1456 rval = qla4_8xxx_device_state_handler(ha);
1457
1458 ha->isp_ops->idc_lock(ha);
1459 qla4_8xxx_clear_rst_ready(ha);
1460exit_isp_reset:
1461 ha->isp_ops->idc_unlock(ha);
1462
1463 if (rval == QLA_SUCCESS)
1464 clear_bit(AF_FW_RECOVERY, &ha->flags);
1465
1466 return rval;
1467}
1468
1469static void qla4_83xx_dump_pause_control_regs(struct scsi_qla_host *ha)
1470{
1471 u32 val = 0, val1 = 0;
1472 int i, status = QLA_SUCCESS;
1473
1474 status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL, &val);
1475 DEBUG2(ql4_printk(KERN_INFO, ha, "SRE-Shim Ctrl:0x%x\n", val));
1476
1477 /* Port 0 Rx Buffer Pause Threshold Registers. */
1478 DEBUG2(ql4_printk(KERN_INFO, ha,
1479 "Port 0 Rx Buffer Pause Threshold Registers[TC7..TC0]:"));
1480 for (i = 0; i < 8; i++) {
1481 status = qla4_83xx_rd_reg_indirect(ha,
1482 QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4), &val);
1483 DEBUG2(pr_info("0x%x ", val));
1484 }
1485
1486 DEBUG2(pr_info("\n"));
1487
1488 /* Port 1 Rx Buffer Pause Threshold Registers. */
1489 DEBUG2(ql4_printk(KERN_INFO, ha,
1490 "Port 1 Rx Buffer Pause Threshold Registers[TC7..TC0]:"));
1491 for (i = 0; i < 8; i++) {
1492 status = qla4_83xx_rd_reg_indirect(ha,
1493 QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4), &val);
1494 DEBUG2(pr_info("0x%x ", val));
1495 }
1496
1497 DEBUG2(pr_info("\n"));
1498
1499 /* Port 0 RxB Traffic Class Max Cell Registers. */
1500 DEBUG2(ql4_printk(KERN_INFO, ha,
1501 "Port 0 RxB Traffic Class Max Cell Registers[3..0]:"));
1502 for (i = 0; i < 4; i++) {
1503 status = qla4_83xx_rd_reg_indirect(ha,
1504 QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4), &val);
1505 DEBUG2(pr_info("0x%x ", val));
1506 }
1507
1508 DEBUG2(pr_info("\n"));
1509
1510 /* Port 1 RxB Traffic Class Max Cell Registers. */
1511 DEBUG2(ql4_printk(KERN_INFO, ha,
1512 "Port 1 RxB Traffic Class Max Cell Registers[3..0]:"));
1513 for (i = 0; i < 4; i++) {
1514 status = qla4_83xx_rd_reg_indirect(ha,
1515 QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4), &val);
1516 DEBUG2(pr_info("0x%x ", val));
1517 }
1518
1519 DEBUG2(pr_info("\n"));
1520
1521 /* Port 0 RxB Rx Traffic Class Stats. */
1522 DEBUG2(ql4_printk(KERN_INFO, ha,
1523 "Port 0 RxB Rx Traffic Class Stats [TC7..TC0]"));
1524 for (i = 7; i >= 0; i--) {
1525 status = qla4_83xx_rd_reg_indirect(ha,
1526 QLA83XX_PORT0_RXB_TC_STATS,
1527 &val);
1528 val &= ~(0x7 << 29); /* Reset bits 29 to 31 */
1529 qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS,
1530 (val | (i << 29)));
1531 status = qla4_83xx_rd_reg_indirect(ha,
1532 QLA83XX_PORT0_RXB_TC_STATS,
1533 &val);
1534 DEBUG2(pr_info("0x%x ", val));
1535 }
1536
1537 DEBUG2(pr_info("\n"));
1538
1539 /* Port 1 RxB Rx Traffic Class Stats. */
1540 DEBUG2(ql4_printk(KERN_INFO, ha,
1541 "Port 1 RxB Rx Traffic Class Stats [TC7..TC0]"));
1542 for (i = 7; i >= 0; i--) {
1543 status = qla4_83xx_rd_reg_indirect(ha,
1544 QLA83XX_PORT1_RXB_TC_STATS,
1545 &val);
1546 val &= ~(0x7 << 29); /* Reset bits 29 to 31 */
1547 qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS,
1548 (val | (i << 29)));
1549 status = qla4_83xx_rd_reg_indirect(ha,
1550 QLA83XX_PORT1_RXB_TC_STATS,
1551 &val);
1552 DEBUG2(pr_info("0x%x ", val));
1553 }
1554
1555 DEBUG2(pr_info("\n"));
1556
1557 status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS,
1558 &val);
1559 status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS,
1560 &val1);
1561
1562 DEBUG2(ql4_printk(KERN_INFO, ha,
1563 "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n",
1564 val, val1));
1565}
1566
1567static void __qla4_83xx_disable_pause(struct scsi_qla_host *ha)
1568{
1569 int i;
1570
1571 /* set SRE-Shim Control Register */
1572 qla4_83xx_wr_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL,
1573 QLA83XX_SET_PAUSE_VAL);
1574
1575 for (i = 0; i < 8; i++) {
1576 /* Port 0 Rx Buffer Pause Threshold Registers. */
1577 qla4_83xx_wr_reg_indirect(ha,
1578 QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4),
1579 QLA83XX_SET_PAUSE_VAL);
1580 /* Port 1 Rx Buffer Pause Threshold Registers. */
1581 qla4_83xx_wr_reg_indirect(ha,
1582 QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4),
1583 QLA83XX_SET_PAUSE_VAL);
1584 }
1585
1586 for (i = 0; i < 4; i++) {
1587 /* Port 0 RxB Traffic Class Max Cell Registers. */
1588 qla4_83xx_wr_reg_indirect(ha,
1589 QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4),
1590 QLA83XX_SET_TC_MAX_CELL_VAL);
1591 /* Port 1 RxB Traffic Class Max Cell Registers. */
1592 qla4_83xx_wr_reg_indirect(ha,
1593 QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4),
1594 QLA83XX_SET_TC_MAX_CELL_VAL);
1595 }
1596
1597 qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS,
1598 QLA83XX_SET_PAUSE_VAL);
1599 qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS,
1600 QLA83XX_SET_PAUSE_VAL);
1601
1602 ql4_printk(KERN_INFO, ha, "Disabled pause frames successfully.\n");
1603}
1604
1605void qla4_83xx_disable_pause(struct scsi_qla_host *ha)
1606{
1607 ha->isp_ops->idc_lock(ha);
1608 qla4_83xx_dump_pause_control_regs(ha);
1609 __qla4_83xx_disable_pause(ha);
1610 ha->isp_ops->idc_unlock(ha);
1611}
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.h b/drivers/scsi/qla4xxx/ql4_83xx.h
new file mode 100644
index 000000000000..6a00f903f2a6
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_83xx.h
@@ -0,0 +1,283 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2012 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#ifndef __QL483XX_H
9#define __QL483XX_H
10
11/* Indirectly Mapped Registers */
12#define QLA83XX_FLASH_SPI_STATUS 0x2808E010
13#define QLA83XX_FLASH_SPI_CONTROL 0x2808E014
14#define QLA83XX_FLASH_STATUS 0x42100004
15#define QLA83XX_FLASH_CONTROL 0x42110004
16#define QLA83XX_FLASH_ADDR 0x42110008
17#define QLA83XX_FLASH_WRDATA 0x4211000C
18#define QLA83XX_FLASH_RDDATA 0x42110018
19#define QLA83XX_FLASH_DIRECT_WINDOW 0x42110030
20#define QLA83XX_FLASH_DIRECT_DATA(DATA) (0x42150000 | (0x0000FFFF&DATA))
21
22/* Directly Mapped Registers in 83xx register table */
23
24/* Flash access regs */
25#define QLA83XX_FLASH_LOCK 0x3850
26#define QLA83XX_FLASH_UNLOCK 0x3854
27#define QLA83XX_FLASH_LOCK_ID 0x3500
28
29/* Driver Lock regs */
30#define QLA83XX_DRV_LOCK 0x3868
31#define QLA83XX_DRV_UNLOCK 0x386C
32#define QLA83XX_DRV_LOCK_ID 0x3504
33#define QLA83XX_DRV_LOCKRECOVERY 0x379C
34
35/* IDC version */
36#define QLA83XX_IDC_VER_MAJ_VALUE 0x1
37#define QLA83XX_IDC_VER_MIN_VALUE 0x0
38
39/* IDC Registers : Driver Coexistence Defines */
40#define QLA83XX_CRB_IDC_VER_MAJOR 0x3780
41#define QLA83XX_CRB_IDC_VER_MINOR 0x3798
42#define QLA83XX_IDC_DRV_CTRL 0x3790
43#define QLA83XX_IDC_DRV_AUDIT 0x3794
44#define QLA83XX_SRE_SHIM_CONTROL 0x0D200284
45#define QLA83XX_PORT0_RXB_PAUSE_THRS 0x0B2003A4
46#define QLA83XX_PORT1_RXB_PAUSE_THRS 0x0B2013A4
47#define QLA83XX_PORT0_RXB_TC_MAX_CELL 0x0B200388
48#define QLA83XX_PORT1_RXB_TC_MAX_CELL 0x0B201388
49#define QLA83XX_PORT0_RXB_TC_STATS 0x0B20039C
50#define QLA83XX_PORT1_RXB_TC_STATS 0x0B20139C
51#define QLA83XX_PORT2_IFB_PAUSE_THRS 0x0B200704
52#define QLA83XX_PORT3_IFB_PAUSE_THRS 0x0B201704
53
54/* set value to pause threshold value */
55#define QLA83XX_SET_PAUSE_VAL 0x0
56#define QLA83XX_SET_TC_MAX_CELL_VAL 0x03FF03FF
57
58/* qla_83xx_reg_tbl registers */
59#define QLA83XX_PEG_HALT_STATUS1 0x34A8
60#define QLA83XX_PEG_HALT_STATUS2 0x34AC
61#define QLA83XX_PEG_ALIVE_COUNTER 0x34B0 /* FW_HEARTBEAT */
62#define QLA83XX_FW_CAPABILITIES 0x3528
63#define QLA83XX_CRB_DRV_ACTIVE 0x3788 /* IDC_DRV_PRESENCE */
64#define QLA83XX_CRB_DEV_STATE 0x3784 /* IDC_DEV_STATE */
65#define QLA83XX_CRB_DRV_STATE 0x378C /* IDC_DRV_ACK */
66#define QLA83XX_CRB_DRV_SCRATCH 0x3548
67#define QLA83XX_CRB_DEV_PART_INFO1 0x37E0
68#define QLA83XX_CRB_DEV_PART_INFO2 0x37E4
69
70#define QLA83XX_FW_VER_MAJOR 0x3550
71#define QLA83XX_FW_VER_MINOR 0x3554
72#define QLA83XX_FW_VER_SUB 0x3558
73#define QLA83XX_NPAR_STATE 0x359C
74#define QLA83XX_FW_IMAGE_VALID 0x35FC
75#define QLA83XX_CMDPEG_STATE 0x3650
76#define QLA83XX_ASIC_TEMP 0x37B4
77#define QLA83XX_FW_API 0x356C
78#define QLA83XX_DRV_OP_MODE 0x3570
79
80static const uint32_t qla4_83xx_reg_tbl[] = {
81 QLA83XX_PEG_HALT_STATUS1,
82 QLA83XX_PEG_HALT_STATUS2,
83 QLA83XX_PEG_ALIVE_COUNTER,
84 QLA83XX_CRB_DRV_ACTIVE,
85 QLA83XX_CRB_DEV_STATE,
86 QLA83XX_CRB_DRV_STATE,
87 QLA83XX_CRB_DRV_SCRATCH,
88 QLA83XX_CRB_DEV_PART_INFO1,
89 QLA83XX_CRB_IDC_VER_MAJOR,
90 QLA83XX_FW_VER_MAJOR,
91 QLA83XX_FW_VER_MINOR,
92 QLA83XX_FW_VER_SUB,
93 QLA83XX_CMDPEG_STATE,
94 QLA83XX_ASIC_TEMP,
95};
96
97#define QLA83XX_CRB_WIN_BASE 0x3800
98#define QLA83XX_CRB_WIN_FUNC(f) (QLA83XX_CRB_WIN_BASE+((f)*4))
99#define QLA83XX_SEM_LOCK_BASE 0x3840
100#define QLA83XX_SEM_UNLOCK_BASE 0x3844
101#define QLA83XX_SEM_LOCK_FUNC(f) (QLA83XX_SEM_LOCK_BASE+((f)*8))
102#define QLA83XX_SEM_UNLOCK_FUNC(f) (QLA83XX_SEM_UNLOCK_BASE+((f)*8))
103#define QLA83XX_LINK_STATE(f) (0x3698+((f) > 7 ? 4 : 0))
104#define QLA83XX_LINK_SPEED(f) (0x36E0+(((f) >> 2) * 4))
105#define QLA83XX_MAX_LINK_SPEED(f) (0x36F0+(((f) / 4) * 4))
106#define QLA83XX_LINK_SPEED_FACTOR 10
107
108/* FLASH API Defines */
109#define QLA83xx_FLASH_MAX_WAIT_USEC 100
110#define QLA83XX_FLASH_LOCK_TIMEOUT 10000
111#define QLA83XX_FLASH_SECTOR_SIZE 65536
112#define QLA83XX_DRV_LOCK_TIMEOUT 2000
113#define QLA83XX_FLASH_SECTOR_ERASE_CMD 0xdeadbeef
114#define QLA83XX_FLASH_WRITE_CMD 0xdacdacda
115#define QLA83XX_FLASH_BUFFER_WRITE_CMD 0xcadcadca
116#define QLA83XX_FLASH_READ_RETRY_COUNT 2000
117#define QLA83XX_FLASH_STATUS_READY 0x6
118#define QLA83XX_FLASH_BUFFER_WRITE_MIN 2
119#define QLA83XX_FLASH_BUFFER_WRITE_MAX 64
120#define QLA83XX_FLASH_STATUS_REG_POLL_DELAY 1
121#define QLA83XX_ERASE_MODE 1
122#define QLA83XX_WRITE_MODE 2
123#define QLA83XX_DWORD_WRITE_MODE 3
124
125#define QLA83XX_GLOBAL_RESET 0x38CC
126#define QLA83XX_WILDCARD 0x38F0
127#define QLA83XX_INFORMANT 0x38FC
128#define QLA83XX_HOST_MBX_CTRL 0x3038
129#define QLA83XX_FW_MBX_CTRL 0x303C
130#define QLA83XX_BOOTLOADER_ADDR 0x355C
131#define QLA83XX_BOOTLOADER_SIZE 0x3560
132#define QLA83XX_FW_IMAGE_ADDR 0x3564
133#define QLA83XX_MBX_INTR_ENABLE 0x1000
134#define QLA83XX_MBX_INTR_MASK 0x1200
135
136/* IDC Control Register bit defines */
137#define DONTRESET_BIT0 0x1
138#define GRACEFUL_RESET_BIT1 0x2
139
140#define QLA83XX_HALT_STATUS_INFORMATIONAL (0x1 << 29)
141#define QLA83XX_HALT_STATUS_FW_RESET (0x2 << 29)
142#define QLA83XX_HALT_STATUS_UNRECOVERABLE (0x4 << 29)
143
144/* Firmware image definitions */
145#define QLA83XX_BOOTLOADER_FLASH_ADDR 0x10000
146#define QLA83XX_BOOT_FROM_FLASH 0
147
148#define QLA83XX_IDC_PARAM_ADDR 0x3e8020
149/* Reset template definitions */
150#define QLA83XX_MAX_RESET_SEQ_ENTRIES 16
151#define QLA83XX_RESTART_TEMPLATE_SIZE 0x2000
152#define QLA83XX_RESET_TEMPLATE_ADDR 0x4F0000
153#define QLA83XX_RESET_SEQ_VERSION 0x0101
154
155/* Reset template entry opcodes */
156#define OPCODE_NOP 0x0000
157#define OPCODE_WRITE_LIST 0x0001
158#define OPCODE_READ_WRITE_LIST 0x0002
159#define OPCODE_POLL_LIST 0x0004
160#define OPCODE_POLL_WRITE_LIST 0x0008
161#define OPCODE_READ_MODIFY_WRITE 0x0010
162#define OPCODE_SEQ_PAUSE 0x0020
163#define OPCODE_SEQ_END 0x0040
164#define OPCODE_TMPL_END 0x0080
165#define OPCODE_POLL_READ_LIST 0x0100
166
167/* Template Header */
168#define RESET_TMPLT_HDR_SIGNATURE 0xCAFE
169struct qla4_83xx_reset_template_hdr {
170 __le16 version;
171 __le16 signature;
172 __le16 size;
173 __le16 entries;
174 __le16 hdr_size;
175 __le16 checksum;
176 __le16 init_seq_offset;
177 __le16 start_seq_offset;
178} __packed;
179
180/* Common Entry Header. */
181struct qla4_83xx_reset_entry_hdr {
182 __le16 cmd;
183 __le16 size;
184 __le16 count;
185 __le16 delay;
186} __packed;
187
188/* Generic poll entry type. */
189struct qla4_83xx_poll {
190 __le32 test_mask;
191 __le32 test_value;
192} __packed;
193
194/* Read modify write entry type. */
195struct qla4_83xx_rmw {
196 __le32 test_mask;
197 __le32 xor_value;
198 __le32 or_value;
199 uint8_t shl;
200 uint8_t shr;
201 uint8_t index_a;
202 uint8_t rsvd;
203} __packed;
204
205/* Generic Entry Item with 2 DWords. */
206struct qla4_83xx_entry {
207 __le32 arg1;
208 __le32 arg2;
209} __packed;
210
211/* Generic Entry Item with 4 DWords.*/
212struct qla4_83xx_quad_entry {
213 __le32 dr_addr;
214 __le32 dr_value;
215 __le32 ar_addr;
216 __le32 ar_value;
217} __packed;
218
219struct qla4_83xx_reset_template {
220 int seq_index;
221 int seq_error;
222 int array_index;
223 uint32_t array[QLA83XX_MAX_RESET_SEQ_ENTRIES];
224 uint8_t *buff;
225 uint8_t *stop_offset;
226 uint8_t *start_offset;
227 uint8_t *init_offset;
228 struct qla4_83xx_reset_template_hdr *hdr;
229 uint8_t seq_end;
230 uint8_t template_end;
231};
232
233/* POLLRD Entry */
234struct qla83xx_minidump_entry_pollrd {
235 struct qla8xxx_minidump_entry_hdr h;
236 uint32_t select_addr;
237 uint32_t read_addr;
238 uint32_t select_value;
239 uint16_t select_value_stride;
240 uint16_t op_count;
241 uint32_t poll_wait;
242 uint32_t poll_mask;
243 uint32_t data_size;
244 uint32_t rsvd_1;
245};
246
247/* RDMUX2 Entry */
248struct qla83xx_minidump_entry_rdmux2 {
249 struct qla8xxx_minidump_entry_hdr h;
250 uint32_t select_addr_1;
251 uint32_t select_addr_2;
252 uint32_t select_value_1;
253 uint32_t select_value_2;
254 uint32_t op_count;
255 uint32_t select_value_mask;
256 uint32_t read_addr;
257 uint8_t select_value_stride;
258 uint8_t data_size;
259 uint8_t rsvd[2];
260};
261
262/* POLLRDMWR Entry */
263struct qla83xx_minidump_entry_pollrdmwr {
264 struct qla8xxx_minidump_entry_hdr h;
265 uint32_t addr_1;
266 uint32_t addr_2;
267 uint32_t value_1;
268 uint32_t value_2;
269 uint32_t poll_wait;
270 uint32_t poll_mask;
271 uint32_t modify_mask;
272 uint32_t data_size;
273};
274
275/* IDC additional information */
276struct qla4_83xx_idc_information {
277 uint32_t request_desc; /* IDC request descriptor */
278 uint32_t info1; /* IDC additional info */
279 uint32_t info2; /* IDC additional info */
280 uint32_t info3; /* IDC additional info */
281};
282
283#endif
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
index c681b2a355e1..76819b71ada7 100644
--- a/drivers/scsi/qla4xxx/ql4_attr.c
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -17,7 +17,7 @@ qla4_8xxx_sysfs_read_fw_dump(struct file *filep, struct kobject *kobj,
17 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj, 17 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
18 struct device, kobj))); 18 struct device, kobj)));
19 19
20 if (!is_qla8022(ha)) 20 if (is_qla40XX(ha))
21 return -EINVAL; 21 return -EINVAL;
22 22
23 if (!test_bit(AF_82XX_DUMP_READING, &ha->flags)) 23 if (!test_bit(AF_82XX_DUMP_READING, &ha->flags))
@@ -38,7 +38,7 @@ qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
38 long reading; 38 long reading;
39 int ret = 0; 39 int ret = 0;
40 40
41 if (!is_qla8022(ha)) 41 if (is_qla40XX(ha))
42 return -EINVAL; 42 return -EINVAL;
43 43
44 if (off != 0) 44 if (off != 0)
@@ -75,21 +75,21 @@ qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
75 break; 75 break;
76 case 2: 76 case 2:
77 /* Reset HBA */ 77 /* Reset HBA */
78 qla4_8xxx_idc_lock(ha); 78 ha->isp_ops->idc_lock(ha);
79 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 79 dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
80 if (dev_state == QLA82XX_DEV_READY) { 80 if (dev_state == QLA8XXX_DEV_READY) {
81 ql4_printk(KERN_INFO, ha, 81 ql4_printk(KERN_INFO, ha,
82 "%s: Setting Need reset, reset_owner is 0x%x.\n", 82 "%s: Setting Need reset, reset_owner is 0x%x.\n",
83 __func__, ha->func_num); 83 __func__, ha->func_num);
84 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 84 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
85 QLA82XX_DEV_NEED_RESET); 85 QLA8XXX_DEV_NEED_RESET);
86 set_bit(AF_82XX_RST_OWNER, &ha->flags); 86 set_bit(AF_8XXX_RST_OWNER, &ha->flags);
87 } else 87 } else
88 ql4_printk(KERN_INFO, ha, 88 ql4_printk(KERN_INFO, ha,
89 "%s: Reset not performed as device state is 0x%x\n", 89 "%s: Reset not performed as device state is 0x%x\n",
90 __func__, dev_state); 90 __func__, dev_state);
91 91
92 qla4_8xxx_idc_unlock(ha); 92 ha->isp_ops->idc_unlock(ha);
93 break; 93 break;
94 default: 94 default:
95 /* do nothing */ 95 /* do nothing */
@@ -150,7 +150,7 @@ qla4xxx_fw_version_show(struct device *dev,
150{ 150{
151 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); 151 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
152 152
153 if (is_qla8022(ha)) 153 if (is_qla80XX(ha))
154 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n", 154 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
155 ha->firmware_version[0], 155 ha->firmware_version[0],
156 ha->firmware_version[1], 156 ha->firmware_version[1],
@@ -214,7 +214,7 @@ qla4xxx_phy_port_cnt_show(struct device *dev, struct device_attribute *attr,
214{ 214{
215 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); 215 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
216 216
217 if (!is_qla8022(ha)) 217 if (is_qla40XX(ha))
218 return -ENOSYS; 218 return -ENOSYS;
219 219
220 return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->phy_port_cnt); 220 return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->phy_port_cnt);
@@ -226,7 +226,7 @@ qla4xxx_phy_port_num_show(struct device *dev, struct device_attribute *attr,
226{ 226{
227 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); 227 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
228 228
229 if (!is_qla8022(ha)) 229 if (is_qla40XX(ha))
230 return -ENOSYS; 230 return -ENOSYS;
231 231
232 return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->phy_port_num); 232 return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->phy_port_num);
@@ -238,7 +238,7 @@ qla4xxx_iscsi_func_cnt_show(struct device *dev, struct device_attribute *attr,
238{ 238{
239 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); 239 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
240 240
241 if (!is_qla8022(ha)) 241 if (is_qla40XX(ha))
242 return -ENOSYS; 242 return -ENOSYS;
243 243
244 return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->iscsi_pci_func_cnt); 244 return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->iscsi_pci_func_cnt);
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c
index 8d58ae274829..77b7c594010f 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.c
+++ b/drivers/scsi/qla4xxx/ql4_dbg.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -37,7 +37,7 @@ void qla4xxx_dump_registers(struct scsi_qla_host *ha)
37 if (is_qla8022(ha)) { 37 if (is_qla8022(ha)) {
38 for (i = 1; i < MBOX_REG_COUNT; i++) 38 for (i = 1; i < MBOX_REG_COUNT; i++)
39 printk(KERN_INFO "mailbox[%d] = 0x%08X\n", 39 printk(KERN_INFO "mailbox[%d] = 0x%08X\n",
40 i, readl(&ha->qla4_8xxx_reg->mailbox_in[i])); 40 i, readl(&ha->qla4_82xx_reg->mailbox_in[i]));
41 return; 41 return;
42 } 42 }
43 43
@@ -131,3 +131,31 @@ void qla4xxx_dump_registers(struct scsi_qla_host *ha)
131 &ha->reg->ctrl_status); 131 &ha->reg->ctrl_status);
132 } 132 }
133} 133}
134
135void qla4_8xxx_dump_peg_reg(struct scsi_qla_host *ha)
136{
137 uint32_t halt_status1, halt_status2;
138
139 halt_status1 = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1);
140 halt_status2 = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS2);
141
142 if (is_qla8022(ha)) {
143 ql4_printk(KERN_INFO, ha,
144 "scsi(%ld): %s, ISP8022 Dumping hw/fw registers:\n"
145 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
146 " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
147 " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
148 " PEG_NET_4_PC: 0x%x\n", ha->host_no,
149 __func__, halt_status1, halt_status2,
150 qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c),
151 qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c),
152 qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c),
153 qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c),
154 qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c));
155 } else if (is_qla8032(ha)) {
156 ql4_printk(KERN_INFO, ha,
157 "scsi(%ld): %s, ISP8324 Dumping hw/fw registers:\n"
158 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n",
159 ha->host_no, __func__, halt_status1, halt_status2);
160 }
161}
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.h b/drivers/scsi/qla4xxx/ql4_dbg.h
index abd83602cdda..5b0afc18ef18 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.h
+++ b/drivers/scsi/qla4xxx/ql4_dbg.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 7fdba7f1ffb7..329d553eae94 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -42,6 +42,7 @@
42#include "ql4_nx.h" 42#include "ql4_nx.h"
43#include "ql4_fw.h" 43#include "ql4_fw.h"
44#include "ql4_nvram.h" 44#include "ql4_nvram.h"
45#include "ql4_83xx.h"
45 46
46#ifndef PCI_DEVICE_ID_QLOGIC_ISP4010 47#ifndef PCI_DEVICE_ID_QLOGIC_ISP4010
47#define PCI_DEVICE_ID_QLOGIC_ISP4010 0x4010 48#define PCI_DEVICE_ID_QLOGIC_ISP4010 0x4010
@@ -59,6 +60,10 @@
59#define PCI_DEVICE_ID_QLOGIC_ISP8022 0x8022 60#define PCI_DEVICE_ID_QLOGIC_ISP8022 0x8022
60#endif 61#endif
61 62
63#ifndef PCI_DEVICE_ID_QLOGIC_ISP8324
64#define PCI_DEVICE_ID_QLOGIC_ISP8324 0x8032
65#endif
66
62#define ISP4XXX_PCI_FN_1 0x1 67#define ISP4XXX_PCI_FN_1 0x1
63#define ISP4XXX_PCI_FN_2 0x3 68#define ISP4XXX_PCI_FN_2 0x3
64 69
@@ -388,8 +393,10 @@ struct isp_operations {
388 void (*disable_intrs) (struct scsi_qla_host *); 393 void (*disable_intrs) (struct scsi_qla_host *);
389 void (*enable_intrs) (struct scsi_qla_host *); 394 void (*enable_intrs) (struct scsi_qla_host *);
390 int (*start_firmware) (struct scsi_qla_host *); 395 int (*start_firmware) (struct scsi_qla_host *);
396 int (*restart_firmware) (struct scsi_qla_host *);
391 irqreturn_t (*intr_handler) (int , void *); 397 irqreturn_t (*intr_handler) (int , void *);
392 void (*interrupt_service_routine) (struct scsi_qla_host *, uint32_t); 398 void (*interrupt_service_routine) (struct scsi_qla_host *, uint32_t);
399 int (*need_reset) (struct scsi_qla_host *);
393 int (*reset_chip) (struct scsi_qla_host *); 400 int (*reset_chip) (struct scsi_qla_host *);
394 int (*reset_firmware) (struct scsi_qla_host *); 401 int (*reset_firmware) (struct scsi_qla_host *);
395 void (*queue_iocb) (struct scsi_qla_host *); 402 void (*queue_iocb) (struct scsi_qla_host *);
@@ -397,6 +404,15 @@ struct isp_operations {
397 uint16_t (*rd_shdw_req_q_out) (struct scsi_qla_host *); 404 uint16_t (*rd_shdw_req_q_out) (struct scsi_qla_host *);
398 uint16_t (*rd_shdw_rsp_q_in) (struct scsi_qla_host *); 405 uint16_t (*rd_shdw_rsp_q_in) (struct scsi_qla_host *);
399 int (*get_sys_info) (struct scsi_qla_host *); 406 int (*get_sys_info) (struct scsi_qla_host *);
407 uint32_t (*rd_reg_direct) (struct scsi_qla_host *, ulong);
408 void (*wr_reg_direct) (struct scsi_qla_host *, ulong, uint32_t);
409 int (*rd_reg_indirect) (struct scsi_qla_host *, uint32_t, uint32_t *);
410 int (*wr_reg_indirect) (struct scsi_qla_host *, uint32_t, uint32_t);
411 int (*idc_lock) (struct scsi_qla_host *);
412 void (*idc_unlock) (struct scsi_qla_host *);
413 void (*rom_lock_recovery) (struct scsi_qla_host *);
414 void (*queue_mailbox_command) (struct scsi_qla_host *, uint32_t *, int);
415 void (*process_mailbox_interrupt) (struct scsi_qla_host *, int);
400}; 416};
401 417
402struct ql4_mdump_size_table { 418struct ql4_mdump_size_table {
@@ -497,8 +513,9 @@ struct scsi_qla_host {
497#define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */ 513#define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */
498#define AF_BUILD_DDB_LIST 22 /* 0x00400000 */ 514#define AF_BUILD_DDB_LIST 22 /* 0x00400000 */
499#define AF_82XX_FW_DUMPED 24 /* 0x01000000 */ 515#define AF_82XX_FW_DUMPED 24 /* 0x01000000 */
500#define AF_82XX_RST_OWNER 25 /* 0x02000000 */ 516#define AF_8XXX_RST_OWNER 25 /* 0x02000000 */
501#define AF_82XX_DUMP_READING 26 /* 0x04000000 */ 517#define AF_82XX_DUMP_READING 26 /* 0x04000000 */
518#define AF_83XX_NO_FW_DUMP 27 /* 0x08000000 */
502 519
503 unsigned long dpc_flags; 520 unsigned long dpc_flags;
504 521
@@ -514,7 +531,7 @@ struct scsi_qla_host {
514#define DPC_RESET_ACTIVE 20 /* 0x00040000 */ 531#define DPC_RESET_ACTIVE 20 /* 0x00040000 */
515#define DPC_HA_UNRECOVERABLE 21 /* 0x00080000 ISP-82xx only*/ 532#define DPC_HA_UNRECOVERABLE 21 /* 0x00080000 ISP-82xx only*/
516#define DPC_HA_NEED_QUIESCENT 22 /* 0x00100000 ISP-82xx only*/ 533#define DPC_HA_NEED_QUIESCENT 22 /* 0x00100000 ISP-82xx only*/
517 534#define DPC_POST_IDC_ACK 23 /* 0x00200000 */
518 535
519 struct Scsi_Host *host; /* pointer to host data */ 536 struct Scsi_Host *host; /* pointer to host data */
520 uint32_t tot_ddbs; 537 uint32_t tot_ddbs;
@@ -647,7 +664,7 @@ struct scsi_qla_host {
647 uint8_t acb_version; 664 uint8_t acb_version;
648 665
649 /* qla82xx specific fields */ 666 /* qla82xx specific fields */
650 struct device_reg_82xx __iomem *qla4_8xxx_reg; /* Base I/O address */ 667 struct device_reg_82xx __iomem *qla4_82xx_reg; /* Base I/O address */
651 unsigned long nx_pcibase; /* Base I/O address */ 668 unsigned long nx_pcibase; /* Base I/O address */
652 uint8_t *nx_db_rd_ptr; /* Doorbell read pointer */ 669 uint8_t *nx_db_rd_ptr; /* Doorbell read pointer */
653 unsigned long nx_db_wr_ptr; /* Door bell write pointer */ 670 unsigned long nx_db_wr_ptr; /* Door bell write pointer */
@@ -733,6 +750,13 @@ struct scsi_qla_host {
733#define MAX_MRB 128 750#define MAX_MRB 128
734 struct mrb *active_mrb_array[MAX_MRB]; 751 struct mrb *active_mrb_array[MAX_MRB];
735 uint32_t mrb_index; 752 uint32_t mrb_index;
753
754 uint32_t *reg_tbl;
755 struct qla4_83xx_reset_template reset_tmplt;
756 struct device_reg_83xx __iomem *qla4_83xx_reg; /* Base I/O address
757 for ISP8324 */
758 uint32_t pf_bit;
759 struct qla4_83xx_idc_information idc_info;
736}; 760};
737 761
738struct ql4_task_data { 762struct ql4_task_data {
@@ -752,7 +776,7 @@ struct ql4_task_data {
752 776
753struct qla_endpoint { 777struct qla_endpoint {
754 struct Scsi_Host *host; 778 struct Scsi_Host *host;
755 struct sockaddr dst_addr; 779 struct sockaddr_storage dst_addr;
756}; 780};
757 781
758struct qla_conn { 782struct qla_conn {
@@ -795,13 +819,20 @@ static inline int is_qla8022(struct scsi_qla_host *ha)
795 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022; 819 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022;
796} 820}
797 821
798/* Note: Currently AER/EEH is now supported only for 8022 cards 822static inline int is_qla8032(struct scsi_qla_host *ha)
799 * This function needs to be updated when AER/EEH is enabled 823{
800 * for other cards. 824 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8324;
801 */ 825}
826
827static inline int is_qla80XX(struct scsi_qla_host *ha)
828{
829 return is_qla8022(ha) || is_qla8032(ha);
830}
831
802static inline int is_aer_supported(struct scsi_qla_host *ha) 832static inline int is_aer_supported(struct scsi_qla_host *ha)
803{ 833{
804 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022; 834 return ((ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022) ||
835 (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8324));
805} 836}
806 837
807static inline int adapter_up(struct scsi_qla_host *ha) 838static inline int adapter_up(struct scsi_qla_host *ha)
@@ -942,6 +973,20 @@ static inline int ql4xxx_reset_active(struct scsi_qla_host *ha)
942 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags); 973 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
943 974
944} 975}
976
977static inline int qla4_8xxx_rd_direct(struct scsi_qla_host *ha,
978 const uint32_t crb_reg)
979{
980 return ha->isp_ops->rd_reg_direct(ha, ha->reg_tbl[crb_reg]);
981}
982
983static inline void qla4_8xxx_wr_direct(struct scsi_qla_host *ha,
984 const uint32_t crb_reg,
985 const uint32_t value)
986{
987 ha->isp_ops->wr_reg_direct(ha, ha->reg_tbl[crb_reg], value);
988}
989
945/*---------------------------------------------------------------------------*/ 990/*---------------------------------------------------------------------------*/
946 991
947/* Defines for qla4xxx_initialize_adapter() and qla4xxx_recover_adapter() */ 992/* Defines for qla4xxx_initialize_adapter() and qla4xxx_recover_adapter() */
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 7240948fb929..1c4795020357 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -65,6 +65,40 @@ struct device_reg_82xx {
65#define ISRX_82XX_RISC_INT BIT_0 /* RISC interrupt. */ 65#define ISRX_82XX_RISC_INT BIT_0 /* RISC interrupt. */
66}; 66};
67 67
68/* ISP 83xx I/O Register Set structure */
69struct device_reg_83xx {
70 __le32 mailbox_in[16]; /* 0x0000 */
71 __le32 reserve1[496]; /* 0x0040 */
72 __le32 mailbox_out[16]; /* 0x0800 */
73 __le32 reserve2[496];
74 __le32 mbox_int; /* 0x1000 */
75 __le32 reserve3[63];
76 __le32 req_q_out; /* 0x1100 */
77 __le32 reserve4[63];
78
79 __le32 rsp_q_in; /* 0x1200 */
80 __le32 reserve5[1919];
81
82 __le32 req_q_in; /* 0x3000 */
83 __le32 reserve6[3];
84 __le32 iocb_int_mask; /* 0x3010 */
85 __le32 reserve7[3];
86 __le32 rsp_q_out; /* 0x3020 */
87 __le32 reserve8[3];
88 __le32 anonymousbuff; /* 0x3030 */
89 __le32 mb_int_mask; /* 0x3034 */
90
91 __le32 host_intr; /* 0x3038 - Host Interrupt Register */
92 __le32 risc_intr; /* 0x303C - RISC Interrupt Register */
93 __le32 reserve9[544];
94 __le32 leg_int_ptr; /* 0x38C0 - Legacy Interrupt Pointer Register */
95 __le32 leg_int_trig; /* 0x38C4 - Legacy Interrupt Trigger Control */
96 __le32 leg_int_mask; /* 0x38C8 - Legacy Interrupt Mask Register */
97};
98
99#define INT_ENABLE_FW_MB (1 << 2)
100#define INT_MASK_FW_MB (1 << 2)
101
68/* remote register set (access via PCI memory read/write) */ 102/* remote register set (access via PCI memory read/write) */
69struct isp_reg { 103struct isp_reg {
70#define MBOX_REG_COUNT 8 104#define MBOX_REG_COUNT 8
@@ -356,6 +390,9 @@ struct qla_flt_region {
356#define LOGOUT_OPTION_CLOSE_SESSION 0x0002 390#define LOGOUT_OPTION_CLOSE_SESSION 0x0002
357#define LOGOUT_OPTION_RELOGIN 0x0004 391#define LOGOUT_OPTION_RELOGIN 0x0004
358#define LOGOUT_OPTION_FREE_DDB 0x0008 392#define LOGOUT_OPTION_FREE_DDB 0x0008
393#define MBOX_CMD_SET_PARAM 0x0059
394#define SET_DRVR_VERSION 0x200
395#define MAX_DRVR_VER_LEN 24
359#define MBOX_CMD_EXECUTE_IOCB_A64 0x005A 396#define MBOX_CMD_EXECUTE_IOCB_A64 0x005A
360#define MBOX_CMD_INITIALIZE_FIRMWARE 0x0060 397#define MBOX_CMD_INITIALIZE_FIRMWARE 0x0060
361#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK 0x0061 398#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK 0x0061
@@ -417,6 +454,10 @@ struct qla_flt_region {
417#define MBOX_CMD_GET_CRASH_RECORD 0x0076 /* 4010 only */ 454#define MBOX_CMD_GET_CRASH_RECORD 0x0076 /* 4010 only */
418#define MBOX_CMD_GET_CONN_EVENT_LOG 0x0077 455#define MBOX_CMD_GET_CONN_EVENT_LOG 0x0077
419 456
457#define MBOX_CMD_IDC_ACK 0x0101
458#define MBOX_CMD_PORT_RESET 0x0120
459#define MBOX_CMD_SET_PORT_CONFIG 0x0122
460
420/* Mailbox status definitions */ 461/* Mailbox status definitions */
421#define MBOX_COMPLETION_STATUS 4 462#define MBOX_COMPLETION_STATUS 4
422#define MBOX_STS_BUSY 0x0007 463#define MBOX_STS_BUSY 0x0007
@@ -453,6 +494,8 @@ struct qla_flt_region {
453#define MBOX_ASTS_IPV6_ND_PREFIX_IGNORED 0x802C 494#define MBOX_ASTS_IPV6_ND_PREFIX_IGNORED 0x802C
454#define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED 0x802D 495#define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED 0x802D
455#define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD 0x802E 496#define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD 0x802E
497#define MBOX_ASTS_IDC_COMPLETE 0x8100
498#define MBOX_ASTS_IDC_NOTIFY 0x8101
456#define MBOX_ASTS_TXSCVR_INSERTED 0x8130 499#define MBOX_ASTS_TXSCVR_INSERTED 0x8130
457#define MBOX_ASTS_TXSCVR_REMOVED 0x8131 500#define MBOX_ASTS_TXSCVR_REMOVED 0x8131
458 501
@@ -1195,9 +1238,12 @@ struct ql_iscsi_stats {
1195 uint8_t reserved2[264]; /* 0x0308 - 0x040F */ 1238 uint8_t reserved2[264]; /* 0x0308 - 0x040F */
1196}; 1239};
1197 1240
1198#define QLA82XX_DBG_STATE_ARRAY_LEN 16 1241#define QLA8XXX_DBG_STATE_ARRAY_LEN 16
1199#define QLA82XX_DBG_CAP_SIZE_ARRAY_LEN 8 1242#define QLA8XXX_DBG_CAP_SIZE_ARRAY_LEN 8
1200#define QLA82XX_DBG_RSVD_ARRAY_LEN 8 1243#define QLA8XXX_DBG_RSVD_ARRAY_LEN 8
1244#define QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN 16
1245#define QLA83XX_SS_OCM_WNDREG_INDEX 3
1246#define QLA83XX_SS_PCI_INDEX 0
1201 1247
1202struct qla4_8xxx_minidump_template_hdr { 1248struct qla4_8xxx_minidump_template_hdr {
1203 uint32_t entry_type; 1249 uint32_t entry_type;
@@ -1214,8 +1260,9 @@ struct qla4_8xxx_minidump_template_hdr {
1214 uint32_t driver_info_word3; 1260 uint32_t driver_info_word3;
1215 uint32_t driver_info_word4; 1261 uint32_t driver_info_word4;
1216 1262
1217 uint32_t saved_state_array[QLA82XX_DBG_STATE_ARRAY_LEN]; 1263 uint32_t saved_state_array[QLA8XXX_DBG_STATE_ARRAY_LEN];
1218 uint32_t capture_size_array[QLA82XX_DBG_CAP_SIZE_ARRAY_LEN]; 1264 uint32_t capture_size_array[QLA8XXX_DBG_CAP_SIZE_ARRAY_LEN];
1265 uint32_t ocm_window_reg[QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN];
1219}; 1266};
1220 1267
1221#endif /* _QLA4X_FW_H */ 1268#endif /* _QLA4X_FW_H */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 5b2525c4139e..57a5a3cf5770 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -109,28 +109,28 @@ uint8_t qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
109void qla4_8xxx_pci_config(struct scsi_qla_host *); 109void qla4_8xxx_pci_config(struct scsi_qla_host *);
110int qla4_8xxx_iospace_config(struct scsi_qla_host *ha); 110int qla4_8xxx_iospace_config(struct scsi_qla_host *ha);
111int qla4_8xxx_load_risc(struct scsi_qla_host *); 111int qla4_8xxx_load_risc(struct scsi_qla_host *);
112irqreturn_t qla4_8xxx_intr_handler(int irq, void *dev_id); 112irqreturn_t qla4_82xx_intr_handler(int irq, void *dev_id);
113void qla4_8xxx_queue_iocb(struct scsi_qla_host *ha); 113void qla4_82xx_queue_iocb(struct scsi_qla_host *ha);
114void qla4_8xxx_complete_iocb(struct scsi_qla_host *ha); 114void qla4_82xx_complete_iocb(struct scsi_qla_host *ha);
115 115
116int qla4_8xxx_crb_win_lock(struct scsi_qla_host *); 116int qla4_82xx_crb_win_lock(struct scsi_qla_host *);
117void qla4_8xxx_crb_win_unlock(struct scsi_qla_host *); 117void qla4_82xx_crb_win_unlock(struct scsi_qla_host *);
118int qla4_8xxx_pci_get_crb_addr_2M(struct scsi_qla_host *, ulong *); 118int qla4_82xx_pci_get_crb_addr_2M(struct scsi_qla_host *, ulong *);
119void qla4_8xxx_wr_32(struct scsi_qla_host *, ulong, u32); 119void qla4_82xx_wr_32(struct scsi_qla_host *, ulong, u32);
120int qla4_8xxx_rd_32(struct scsi_qla_host *, ulong); 120uint32_t qla4_82xx_rd_32(struct scsi_qla_host *, ulong);
121int qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *, u64, void *, int); 121int qla4_82xx_pci_mem_read_2M(struct scsi_qla_host *, u64, void *, int);
122int qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha, u64, void *, int); 122int qla4_82xx_pci_mem_write_2M(struct scsi_qla_host *ha, u64, void *, int);
123int qla4_8xxx_isp_reset(struct scsi_qla_host *ha); 123int qla4_82xx_isp_reset(struct scsi_qla_host *ha);
124void qla4_8xxx_interrupt_service_routine(struct scsi_qla_host *ha, 124void qla4_82xx_interrupt_service_routine(struct scsi_qla_host *ha,
125 uint32_t intr_status); 125 uint32_t intr_status);
126uint16_t qla4_8xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha); 126uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha);
127uint16_t qla4_8xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha); 127uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha);
128int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha); 128int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha);
129void qla4_8xxx_watchdog(struct scsi_qla_host *ha); 129void qla4_8xxx_watchdog(struct scsi_qla_host *ha);
130int qla4_8xxx_stop_firmware(struct scsi_qla_host *ha); 130int qla4_8xxx_stop_firmware(struct scsi_qla_host *ha);
131int qla4_8xxx_get_flash_info(struct scsi_qla_host *ha); 131int qla4_8xxx_get_flash_info(struct scsi_qla_host *ha);
132void qla4_8xxx_enable_intrs(struct scsi_qla_host *ha); 132void qla4_82xx_enable_intrs(struct scsi_qla_host *ha);
133void qla4_8xxx_disable_intrs(struct scsi_qla_host *ha); 133void qla4_82xx_disable_intrs(struct scsi_qla_host *ha);
134int qla4_8xxx_enable_msix(struct scsi_qla_host *ha); 134int qla4_8xxx_enable_msix(struct scsi_qla_host *ha);
135void qla4_8xxx_disable_msix(struct scsi_qla_host *ha); 135void qla4_8xxx_disable_msix(struct scsi_qla_host *ha);
136irqreturn_t qla4_8xxx_msi_handler(int irq, void *dev_id); 136irqreturn_t qla4_8xxx_msi_handler(int irq, void *dev_id);
@@ -138,8 +138,8 @@ irqreturn_t qla4_8xxx_default_intr_handler(int irq, void *dev_id);
138irqreturn_t qla4_8xxx_msix_rsp_q(int irq, void *dev_id); 138irqreturn_t qla4_8xxx_msix_rsp_q(int irq, void *dev_id);
139void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha); 139void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha);
140void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha); 140void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha);
141int qla4_8xxx_idc_lock(struct scsi_qla_host *ha); 141int qla4_82xx_idc_lock(struct scsi_qla_host *ha);
142void qla4_8xxx_idc_unlock(struct scsi_qla_host *ha); 142void qla4_82xx_idc_unlock(struct scsi_qla_host *ha);
143int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha); 143int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha);
144void qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha); 144void qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha);
145void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha); 145void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha);
@@ -203,6 +203,62 @@ int qla4xxx_req_template_size(struct scsi_qla_host *ha);
203void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha); 203void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha);
204void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha); 204void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha);
205void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha); 205void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha);
206int qla4_82xx_try_start_fw(struct scsi_qla_host *ha);
207int qla4_8xxx_need_reset(struct scsi_qla_host *ha);
208int qla4_82xx_md_rd_32(struct scsi_qla_host *ha, uint32_t off, uint32_t *data);
209int qla4_82xx_md_wr_32(struct scsi_qla_host *ha, uint32_t off, uint32_t data);
210void qla4_82xx_rom_lock_recovery(struct scsi_qla_host *ha);
211void qla4_82xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
212 int incount);
213void qla4_82xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount);
214void qla4xxx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
215 int incount);
216void qla4xxx_process_mbox_intr(struct scsi_qla_host *ha, int outcount);
217void qla4_8xxx_dump_peg_reg(struct scsi_qla_host *ha);
218void qla4_83xx_disable_intrs(struct scsi_qla_host *ha);
219void qla4_83xx_enable_intrs(struct scsi_qla_host *ha);
220int qla4_83xx_start_firmware(struct scsi_qla_host *ha);
221irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id);
222void qla4_83xx_interrupt_service_routine(struct scsi_qla_host *ha,
223 uint32_t intr_status);
224int qla4_83xx_isp_reset(struct scsi_qla_host *ha);
225void qla4_83xx_queue_iocb(struct scsi_qla_host *ha);
226void qla4_83xx_complete_iocb(struct scsi_qla_host *ha);
227uint16_t qla4_83xx_rd_shdw_req_q_out(struct scsi_qla_host *ha);
228uint16_t qla4_83xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha);
229uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha, ulong addr);
230void qla4_83xx_wr_reg(struct scsi_qla_host *ha, ulong addr, uint32_t val);
231int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
232 uint32_t *data);
233int qla4_83xx_wr_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
234 uint32_t data);
235int qla4_83xx_drv_lock(struct scsi_qla_host *ha);
236void qla4_83xx_drv_unlock(struct scsi_qla_host *ha);
237void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha);
238void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
239 int incount);
240void qla4_83xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount);
241void qla4_83xx_read_reset_template(struct scsi_qla_host *ha);
242void qla4_83xx_set_idc_dontreset(struct scsi_qla_host *ha);
243int qla4_83xx_idc_dontreset(struct scsi_qla_host *ha);
244int qla4_83xx_lockless_flash_read_u32(struct scsi_qla_host *ha,
245 uint32_t flash_addr, uint8_t *p_data,
246 int u32_word_count);
247void qla4_83xx_clear_idc_dontreset(struct scsi_qla_host *ha);
248void qla4_83xx_need_reset_handler(struct scsi_qla_host *ha);
249int qla4_83xx_flash_read_u32(struct scsi_qla_host *ha, uint32_t flash_addr,
250 uint8_t *p_data, int u32_word_count);
251void qla4_83xx_get_idc_param(struct scsi_qla_host *ha);
252void qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha);
253void qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha);
254int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha);
255void qla4_8xxx_get_minidump(struct scsi_qla_host *ha);
256int qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha);
257int qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha);
258int qla4_8xxx_set_param(struct scsi_qla_host *ha, int param);
259int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha);
260int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha);
261void qla4_83xx_disable_pause(struct scsi_qla_host *ha);
206 262
207extern int ql4xextended_error_logging; 263extern int ql4xextended_error_logging;
208extern int ql4xdontresethba; 264extern int ql4xdontresethba;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index ddd9472066cb..1aca1b4f70b8 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -102,11 +102,18 @@ int qla4xxx_init_rings(struct scsi_qla_host *ha)
102 102
103 if (is_qla8022(ha)) { 103 if (is_qla8022(ha)) {
104 writel(0, 104 writel(0,
105 (unsigned long __iomem *)&ha->qla4_8xxx_reg->req_q_out); 105 (unsigned long __iomem *)&ha->qla4_82xx_reg->req_q_out);
106 writel(0, 106 writel(0,
107 (unsigned long __iomem *)&ha->qla4_8xxx_reg->rsp_q_in); 107 (unsigned long __iomem *)&ha->qla4_82xx_reg->rsp_q_in);
108 writel(0, 108 writel(0,
109 (unsigned long __iomem *)&ha->qla4_8xxx_reg->rsp_q_out); 109 (unsigned long __iomem *)&ha->qla4_82xx_reg->rsp_q_out);
110 } else if (is_qla8032(ha)) {
111 writel(0,
112 (unsigned long __iomem *)&ha->qla4_83xx_reg->req_q_in);
113 writel(0,
114 (unsigned long __iomem *)&ha->qla4_83xx_reg->rsp_q_in);
115 writel(0,
116 (unsigned long __iomem *)&ha->qla4_83xx_reg->rsp_q_out);
110 } else { 117 } else {
111 /* 118 /*
112 * Initialize DMA Shadow registers. The firmware is really 119 * Initialize DMA Shadow registers. The firmware is really
@@ -524,7 +531,7 @@ static int qla4xxx_init_firmware(struct scsi_qla_host *ha)
524 /* For 82xx, stop firmware before initializing because if BIOS 531 /* For 82xx, stop firmware before initializing because if BIOS
525 * has previously initialized firmware, then driver's initialize 532 * has previously initialized firmware, then driver's initialize
526 * firmware will fail. */ 533 * firmware will fail. */
527 if (is_qla8022(ha)) 534 if (is_qla80XX(ha))
528 qla4_8xxx_stop_firmware(ha); 535 qla4_8xxx_stop_firmware(ha);
529 536
530 ql4_printk(KERN_INFO, ha, "Initializing firmware..\n"); 537 ql4_printk(KERN_INFO, ha, "Initializing firmware..\n");
@@ -537,7 +544,7 @@ static int qla4xxx_init_firmware(struct scsi_qla_host *ha)
537 if (!qla4xxx_fw_ready(ha)) 544 if (!qla4xxx_fw_ready(ha))
538 return status; 545 return status;
539 546
540 if (is_qla8022(ha) && !test_bit(AF_INIT_DONE, &ha->flags)) 547 if (is_qla80XX(ha) && !test_bit(AF_INIT_DONE, &ha->flags))
541 qla4xxx_alloc_fw_dump(ha); 548 qla4xxx_alloc_fw_dump(ha);
542 549
543 return qla4xxx_get_firmware_status(ha); 550 return qla4xxx_get_firmware_status(ha);
@@ -946,9 +953,9 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)
946 953
947 set_bit(AF_ONLINE, &ha->flags); 954 set_bit(AF_ONLINE, &ha->flags);
948exit_init_hba: 955exit_init_hba:
949 if (is_qla8022(ha) && (status == QLA_ERROR)) { 956 if (is_qla80XX(ha) && (status == QLA_ERROR)) {
950 /* Since interrupts are registered in start_firmware for 957 /* Since interrupts are registered in start_firmware for
951 * 82xx, release them here if initialize_adapter fails */ 958 * 80XX, release them here if initialize_adapter fails */
952 qla4xxx_free_irqs(ha); 959 qla4xxx_free_irqs(ha);
953 } 960 }
954 961
diff --git a/drivers/scsi/qla4xxx/ql4_inline.h b/drivers/scsi/qla4xxx/ql4_inline.h
index 62f90bdec5d5..6f4decd44c6a 100644
--- a/drivers/scsi/qla4xxx/ql4_inline.h
+++ b/drivers/scsi/qla4xxx/ql4_inline.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index 2a2022a6bb9b..f48f37a281d1 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -192,35 +192,47 @@ static void qla4xxx_build_scsi_iocbs(struct srb *srb,
192 } 192 }
193} 193}
194 194
195void qla4_83xx_queue_iocb(struct scsi_qla_host *ha)
196{
197 writel(ha->request_in, &ha->qla4_83xx_reg->req_q_in);
198 readl(&ha->qla4_83xx_reg->req_q_in);
199}
200
201void qla4_83xx_complete_iocb(struct scsi_qla_host *ha)
202{
203 writel(ha->response_out, &ha->qla4_83xx_reg->rsp_q_out);
204 readl(&ha->qla4_83xx_reg->rsp_q_out);
205}
206
195/** 207/**
196 * qla4_8xxx_queue_iocb - Tell ISP it's got new request(s) 208 * qla4_82xx_queue_iocb - Tell ISP it's got new request(s)
197 * @ha: pointer to host adapter structure. 209 * @ha: pointer to host adapter structure.
198 * 210 *
199 * This routine notifies the ISP that one or more new request 211 * This routine notifies the ISP that one or more new request
200 * queue entries have been placed on the request queue. 212 * queue entries have been placed on the request queue.
201 **/ 213 **/
202void qla4_8xxx_queue_iocb(struct scsi_qla_host *ha) 214void qla4_82xx_queue_iocb(struct scsi_qla_host *ha)
203{ 215{
204 uint32_t dbval = 0; 216 uint32_t dbval = 0;
205 217
206 dbval = 0x14 | (ha->func_num << 5); 218 dbval = 0x14 | (ha->func_num << 5);
207 dbval = dbval | (0 << 8) | (ha->request_in << 16); 219 dbval = dbval | (0 << 8) | (ha->request_in << 16);
208 220
209 qla4_8xxx_wr_32(ha, ha->nx_db_wr_ptr, ha->request_in); 221 qla4_82xx_wr_32(ha, ha->nx_db_wr_ptr, ha->request_in);
210} 222}
211 223
212/** 224/**
213 * qla4_8xxx_complete_iocb - Tell ISP we're done with response(s) 225 * qla4_82xx_complete_iocb - Tell ISP we're done with response(s)
214 * @ha: pointer to host adapter structure. 226 * @ha: pointer to host adapter structure.
215 * 227 *
216 * This routine notifies the ISP that one or more response/completion 228 * This routine notifies the ISP that one or more response/completion
217 * queue entries have been processed by the driver. 229 * queue entries have been processed by the driver.
218 * This also clears the interrupt. 230 * This also clears the interrupt.
219 **/ 231 **/
220void qla4_8xxx_complete_iocb(struct scsi_qla_host *ha) 232void qla4_82xx_complete_iocb(struct scsi_qla_host *ha)
221{ 233{
222 writel(ha->response_out, &ha->qla4_8xxx_reg->rsp_q_out); 234 writel(ha->response_out, &ha->qla4_82xx_reg->rsp_q_out);
223 readl(&ha->qla4_8xxx_reg->rsp_q_out); 235 readl(&ha->qla4_82xx_reg->rsp_q_out);
224} 236}
225 237
226/** 238/**
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index fc542a9bb106..15ea81465ce4 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -126,7 +126,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
126 ql4_printk(KERN_WARNING, ha, "%s invalid status entry: " 126 ql4_printk(KERN_WARNING, ha, "%s invalid status entry: "
127 "handle=0x%0x, srb=%p\n", __func__, 127 "handle=0x%0x, srb=%p\n", __func__,
128 sts_entry->handle, srb); 128 sts_entry->handle, srb);
129 if (is_qla8022(ha)) 129 if (is_qla80XX(ha))
130 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 130 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
131 else 131 else
132 set_bit(DPC_RESET_HA, &ha->dpc_flags); 132 set_bit(DPC_RESET_HA, &ha->dpc_flags);
@@ -243,56 +243,72 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
243 243
244 scsi_set_resid(cmd, residual); 244 scsi_set_resid(cmd, residual);
245 245
246 /* 246 if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_UNDER) {
247 * If there is scsi_status, it takes precedense over 247
248 * underflow condition. 248 /* Both the firmware and target reported UNDERRUN:
249 */ 249 *
250 if (scsi_status != 0) { 250 * MID-LAYER UNDERFLOW case:
251 cmd->result = DID_OK << 16 | scsi_status; 251 * Some kernels do not properly detect midlayer
252 * underflow, so we manually check it and return
253 * ERROR if the minimum required data was not
254 * received.
255 *
256 * ALL OTHER cases:
257 * Fall thru to check scsi_status
258 */
259 if (!scsi_status && (scsi_bufflen(cmd) - residual) <
260 cmd->underflow) {
261 DEBUG2(ql4_printk(KERN_INFO, ha,
262 "scsi%ld:%d:%d:%d: %s: Mid-layer Data underrun, xferlen = 0x%x,residual = 0x%x\n",
263 ha->host_no,
264 cmd->device->channel,
265 cmd->device->id,
266 cmd->device->lun, __func__,
267 scsi_bufflen(cmd),
268 residual));
252 269
253 if (scsi_status != SCSI_CHECK_CONDITION) 270 cmd->result = DID_ERROR << 16;
254 break; 271 break;
272 }
273
274 } else if (scsi_status != SAM_STAT_TASK_SET_FULL &&
275 scsi_status != SAM_STAT_BUSY) {
255 276
256 /* Copy Sense Data into sense buffer. */
257 qla4xxx_copy_sense(ha, sts_entry, srb);
258 } else {
259 /* 277 /*
260 * If RISC reports underrun and target does not 278 * The firmware reports UNDERRUN, but the target does
261 * report it then we must have a lost frame, so 279 * not report it:
262 * tell upper layer to retry it by reporting a 280 *
263 * bus busy. 281 * scsi_status | host_byte device_byte
282 * | (19:16) (7:0)
283 * ============= | ========= ===========
284 * TASK_SET_FULL | DID_OK scsi_status
285 * BUSY | DID_OK scsi_status
286 * ALL OTHERS | DID_ERROR scsi_status
287 *
288 * Note: If scsi_status is task set full or busy,
289 * then this else if would fall thru to check the
290 * scsi_status and return DID_OK.
264 */ 291 */
265 if ((sts_entry->iscsiFlags &
266 ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
267 cmd->result = DID_BUS_BUSY << 16;
268 } else if ((scsi_bufflen(cmd) - residual) <
269 cmd->underflow) {
270 /*
271 * Handle mid-layer underflow???
272 *
273 * For kernels less than 2.4, the driver must
274 * return an error if an underflow is detected.
275 * For kernels equal-to and above 2.4, the
276 * mid-layer will appearantly handle the
277 * underflow by detecting the residual count --
278 * unfortunately, we do not see where this is
279 * actually being done. In the interim, we
280 * will return DID_ERROR.
281 */
282 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
283 "Mid-layer Data underrun1, "
284 "xferlen = 0x%x, "
285 "residual = 0x%x\n", ha->host_no,
286 cmd->device->channel,
287 cmd->device->id,
288 cmd->device->lun, __func__,
289 scsi_bufflen(cmd), residual));
290 292
291 cmd->result = DID_ERROR << 16; 293 DEBUG2(ql4_printk(KERN_INFO, ha,
292 } else { 294 "scsi%ld:%d:%d:%d: %s: Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
293 cmd->result = DID_OK << 16; 295 ha->host_no,
294 } 296 cmd->device->channel,
297 cmd->device->id,
298 cmd->device->lun, __func__,
299 residual,
300 scsi_bufflen(cmd)));
301
302 cmd->result = DID_ERROR << 16 | scsi_status;
303 goto check_scsi_status;
295 } 304 }
305
306 cmd->result = DID_OK << 16 | scsi_status;
307
308check_scsi_status:
309 if (scsi_status == SAM_STAT_CHECK_CONDITION)
310 qla4xxx_copy_sense(ha, sts_entry, srb);
311
296 break; 312 break;
297 313
298 case SCS_DEVICE_LOGGED_OUT: 314 case SCS_DEVICE_LOGGED_OUT:
@@ -578,6 +594,14 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
578{ 594{
579 int i; 595 int i;
580 uint32_t mbox_sts[MBOX_AEN_REG_COUNT]; 596 uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
597 __le32 __iomem *mailbox_out;
598
599 if (is_qla8032(ha))
600 mailbox_out = &ha->qla4_83xx_reg->mailbox_out[0];
601 else if (is_qla8022(ha))
602 mailbox_out = &ha->qla4_82xx_reg->mailbox_out[0];
603 else
604 mailbox_out = &ha->reg->mailbox[0];
581 605
582 if ((mbox_status == MBOX_STS_BUSY) || 606 if ((mbox_status == MBOX_STS_BUSY) ||
583 (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) || 607 (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) ||
@@ -590,9 +614,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
590 * location and set mailbox command done flag 614 * location and set mailbox command done flag
591 */ 615 */
592 for (i = 0; i < ha->mbox_status_count; i++) 616 for (i = 0; i < ha->mbox_status_count; i++)
593 ha->mbox_status[i] = is_qla8022(ha) 617 ha->mbox_status[i] = readl(&mailbox_out[i]);
594 ? readl(&ha->qla4_8xxx_reg->mailbox_out[i])
595 : readl(&ha->reg->mailbox[i]);
596 618
597 set_bit(AF_MBOX_COMMAND_DONE, &ha->flags); 619 set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
598 620
@@ -601,9 +623,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
601 } 623 }
602 } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) { 624 } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
603 for (i = 0; i < MBOX_AEN_REG_COUNT; i++) 625 for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
604 mbox_sts[i] = is_qla8022(ha) 626 mbox_sts[i] = readl(&mailbox_out[i]);
605 ? readl(&ha->qla4_8xxx_reg->mailbox_out[i])
606 : readl(&ha->reg->mailbox[i]);
607 627
608 /* Immediately process the AENs that don't require much work. 628 /* Immediately process the AENs that don't require much work.
609 * Only queue the database_changed AENs */ 629 * Only queue the database_changed AENs */
@@ -619,7 +639,8 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
619 ql4_printk(KERN_INFO, ha, "%s: System Err\n", __func__); 639 ql4_printk(KERN_INFO, ha, "%s: System Err\n", __func__);
620 qla4xxx_dump_registers(ha); 640 qla4xxx_dump_registers(ha);
621 641
622 if (ql4xdontresethba) { 642 if ((is_qla8022(ha) && ql4xdontresethba) ||
643 (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) {
623 DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n", 644 DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n",
624 ha->host_no, __func__)); 645 ha->host_no, __func__));
625 } else { 646 } else {
@@ -635,7 +656,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
635 case MBOX_ASTS_DHCP_LEASE_EXPIRED: 656 case MBOX_ASTS_DHCP_LEASE_EXPIRED:
636 DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, " 657 DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, "
637 "Reset HA\n", ha->host_no, mbox_status)); 658 "Reset HA\n", ha->host_no, mbox_status));
638 if (is_qla8022(ha)) 659 if (is_qla80XX(ha))
639 set_bit(DPC_RESET_HA_FW_CONTEXT, 660 set_bit(DPC_RESET_HA_FW_CONTEXT,
640 &ha->dpc_flags); 661 &ha->dpc_flags);
641 else 662 else
@@ -700,7 +721,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
700 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags); 721 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
701 else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) && 722 else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) &&
702 (mbox_sts[2] == ACB_STATE_VALID)) { 723 (mbox_sts[2] == ACB_STATE_VALID)) {
703 if (is_qla8022(ha)) 724 if (is_qla80XX(ha))
704 set_bit(DPC_RESET_HA_FW_CONTEXT, 725 set_bit(DPC_RESET_HA_FW_CONTEXT,
705 &ha->dpc_flags); 726 &ha->dpc_flags);
706 else 727 else
@@ -785,6 +806,43 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
785 " removed\n", ha->host_no, mbox_sts[0])); 806 " removed\n", ha->host_no, mbox_sts[0]));
786 break; 807 break;
787 808
809 case MBOX_ASTS_IDC_NOTIFY:
810 {
811 uint32_t opcode;
812 if (is_qla8032(ha)) {
813 DEBUG2(ql4_printk(KERN_INFO, ha,
814 "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n",
815 ha->host_no, mbox_sts[0],
816 mbox_sts[1], mbox_sts[2],
817 mbox_sts[3], mbox_sts[4]));
818 opcode = mbox_sts[1] >> 16;
819 if ((opcode == MBOX_CMD_SET_PORT_CONFIG) ||
820 (opcode == MBOX_CMD_PORT_RESET)) {
821 set_bit(DPC_POST_IDC_ACK,
822 &ha->dpc_flags);
823 ha->idc_info.request_desc = mbox_sts[1];
824 ha->idc_info.info1 = mbox_sts[2];
825 ha->idc_info.info2 = mbox_sts[3];
826 ha->idc_info.info3 = mbox_sts[4];
827 qla4xxx_wake_dpc(ha);
828 }
829 }
830 break;
831 }
832
833 case MBOX_ASTS_IDC_COMPLETE:
834 if (is_qla8032(ha)) {
835 DEBUG2(ql4_printk(KERN_INFO, ha,
836 "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n",
837 ha->host_no, mbox_sts[0],
838 mbox_sts[1], mbox_sts[2],
839 mbox_sts[3], mbox_sts[4]));
840 DEBUG2(ql4_printk(KERN_INFO, ha,
841 "scsi:%ld: AEN %04x IDC Complete notification\n",
842 ha->host_no, mbox_sts[0]));
843 }
844 break;
845
788 default: 846 default:
789 DEBUG2(printk(KERN_WARNING 847 DEBUG2(printk(KERN_WARNING
790 "scsi%ld: AEN %04x UNKNOWN\n", 848 "scsi%ld: AEN %04x UNKNOWN\n",
@@ -799,14 +857,31 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
799 } 857 }
800} 858}
801 859
860void qla4_83xx_interrupt_service_routine(struct scsi_qla_host *ha,
861 uint32_t intr_status)
862{
863 /* Process mailbox/asynch event interrupt.*/
864 if (intr_status) {
865 qla4xxx_isr_decode_mailbox(ha,
866 readl(&ha->qla4_83xx_reg->mailbox_out[0]));
867 /* clear the interrupt */
868 writel(0, &ha->qla4_83xx_reg->risc_intr);
869 } else {
870 qla4xxx_process_response_queue(ha);
871 }
872
873 /* clear the interrupt */
874 writel(0, &ha->qla4_83xx_reg->mb_int_mask);
875}
876
802/** 877/**
803 * qla4_8xxx_interrupt_service_routine - isr 878 * qla4_82xx_interrupt_service_routine - isr
804 * @ha: pointer to host adapter structure. 879 * @ha: pointer to host adapter structure.
805 * 880 *
806 * This is the main interrupt service routine. 881 * This is the main interrupt service routine.
807 * hardware_lock locked upon entry. runs in interrupt context. 882 * hardware_lock locked upon entry. runs in interrupt context.
808 **/ 883 **/
809void qla4_8xxx_interrupt_service_routine(struct scsi_qla_host *ha, 884void qla4_82xx_interrupt_service_routine(struct scsi_qla_host *ha,
810 uint32_t intr_status) 885 uint32_t intr_status)
811{ 886{
812 /* Process response queue interrupt. */ 887 /* Process response queue interrupt. */
@@ -816,11 +891,11 @@ void qla4_8xxx_interrupt_service_routine(struct scsi_qla_host *ha,
816 /* Process mailbox/asynch event interrupt.*/ 891 /* Process mailbox/asynch event interrupt.*/
817 if (intr_status & HSRX_RISC_MB_INT) 892 if (intr_status & HSRX_RISC_MB_INT)
818 qla4xxx_isr_decode_mailbox(ha, 893 qla4xxx_isr_decode_mailbox(ha,
819 readl(&ha->qla4_8xxx_reg->mailbox_out[0])); 894 readl(&ha->qla4_82xx_reg->mailbox_out[0]));
820 895
821 /* clear the interrupt */ 896 /* clear the interrupt */
822 writel(0, &ha->qla4_8xxx_reg->host_int); 897 writel(0, &ha->qla4_82xx_reg->host_int);
823 readl(&ha->qla4_8xxx_reg->host_int); 898 readl(&ha->qla4_82xx_reg->host_int);
824} 899}
825 900
826/** 901/**
@@ -850,12 +925,12 @@ void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
850} 925}
851 926
852/** 927/**
853 * qla4_8xxx_spurious_interrupt - processes spurious interrupt 928 * qla4_82xx_spurious_interrupt - processes spurious interrupt
854 * @ha: pointer to host adapter structure. 929 * @ha: pointer to host adapter structure.
855 * @reqs_count: . 930 * @reqs_count: .
856 * 931 *
857 **/ 932 **/
858static void qla4_8xxx_spurious_interrupt(struct scsi_qla_host *ha, 933static void qla4_82xx_spurious_interrupt(struct scsi_qla_host *ha,
859 uint8_t reqs_count) 934 uint8_t reqs_count)
860{ 935{
861 if (reqs_count) 936 if (reqs_count)
@@ -863,9 +938,9 @@ static void qla4_8xxx_spurious_interrupt(struct scsi_qla_host *ha,
863 938
864 DEBUG2(ql4_printk(KERN_INFO, ha, "Spurious Interrupt\n")); 939 DEBUG2(ql4_printk(KERN_INFO, ha, "Spurious Interrupt\n"));
865 if (is_qla8022(ha)) { 940 if (is_qla8022(ha)) {
866 writel(0, &ha->qla4_8xxx_reg->host_int); 941 writel(0, &ha->qla4_82xx_reg->host_int);
867 if (test_bit(AF_INTx_ENABLED, &ha->flags)) 942 if (test_bit(AF_INTx_ENABLED, &ha->flags))
868 qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 943 qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
869 0xfbff); 944 0xfbff);
870 } 945 }
871 ha->spurious_int_count++; 946 ha->spurious_int_count++;
@@ -968,11 +1043,11 @@ irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
968} 1043}
969 1044
970/** 1045/**
971 * qla4_8xxx_intr_handler - hardware interrupt handler. 1046 * qla4_82xx_intr_handler - hardware interrupt handler.
972 * @irq: Unused 1047 * @irq: Unused
973 * @dev_id: Pointer to host adapter structure 1048 * @dev_id: Pointer to host adapter structure
974 **/ 1049 **/
975irqreturn_t qla4_8xxx_intr_handler(int irq, void *dev_id) 1050irqreturn_t qla4_82xx_intr_handler(int irq, void *dev_id)
976{ 1051{
977 struct scsi_qla_host *ha = dev_id; 1052 struct scsi_qla_host *ha = dev_id;
978 uint32_t intr_status; 1053 uint32_t intr_status;
@@ -984,11 +1059,11 @@ irqreturn_t qla4_8xxx_intr_handler(int irq, void *dev_id)
984 return IRQ_HANDLED; 1059 return IRQ_HANDLED;
985 1060
986 ha->isr_count++; 1061 ha->isr_count++;
987 status = qla4_8xxx_rd_32(ha, ISR_INT_VECTOR); 1062 status = qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
988 if (!(status & ha->nx_legacy_intr.int_vec_bit)) 1063 if (!(status & ha->nx_legacy_intr.int_vec_bit))
989 return IRQ_NONE; 1064 return IRQ_NONE;
990 1065
991 status = qla4_8xxx_rd_32(ha, ISR_INT_STATE_REG); 1066 status = qla4_82xx_rd_32(ha, ISR_INT_STATE_REG);
992 if (!ISR_IS_LEGACY_INTR_TRIGGERED(status)) { 1067 if (!ISR_IS_LEGACY_INTR_TRIGGERED(status)) {
993 DEBUG2(ql4_printk(KERN_INFO, ha, 1068 DEBUG2(ql4_printk(KERN_INFO, ha,
994 "%s legacy Int not triggered\n", __func__)); 1069 "%s legacy Int not triggered\n", __func__));
@@ -996,30 +1071,30 @@ irqreturn_t qla4_8xxx_intr_handler(int irq, void *dev_id)
996 } 1071 }
997 1072
998 /* clear the interrupt */ 1073 /* clear the interrupt */
999 qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff); 1074 qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
1000 1075
1001 /* read twice to ensure write is flushed */ 1076 /* read twice to ensure write is flushed */
1002 qla4_8xxx_rd_32(ha, ISR_INT_VECTOR); 1077 qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
1003 qla4_8xxx_rd_32(ha, ISR_INT_VECTOR); 1078 qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
1004 1079
1005 spin_lock_irqsave(&ha->hardware_lock, flags); 1080 spin_lock_irqsave(&ha->hardware_lock, flags);
1006 while (1) { 1081 while (1) {
1007 if (!(readl(&ha->qla4_8xxx_reg->host_int) & 1082 if (!(readl(&ha->qla4_82xx_reg->host_int) &
1008 ISRX_82XX_RISC_INT)) { 1083 ISRX_82XX_RISC_INT)) {
1009 qla4_8xxx_spurious_interrupt(ha, reqs_count); 1084 qla4_82xx_spurious_interrupt(ha, reqs_count);
1010 break; 1085 break;
1011 } 1086 }
1012 intr_status = readl(&ha->qla4_8xxx_reg->host_status); 1087 intr_status = readl(&ha->qla4_82xx_reg->host_status);
1013 if ((intr_status & 1088 if ((intr_status &
1014 (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) { 1089 (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) {
1015 qla4_8xxx_spurious_interrupt(ha, reqs_count); 1090 qla4_82xx_spurious_interrupt(ha, reqs_count);
1016 break; 1091 break;
1017 } 1092 }
1018 1093
1019 ha->isp_ops->interrupt_service_routine(ha, intr_status); 1094 ha->isp_ops->interrupt_service_routine(ha, intr_status);
1020 1095
1021 /* Enable Interrupt */ 1096 /* Enable Interrupt */
1022 qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); 1097 qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
1023 1098
1024 if (++reqs_count == MAX_REQS_SERVICED_PER_INTR) 1099 if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
1025 break; 1100 break;
@@ -1029,6 +1104,59 @@ irqreturn_t qla4_8xxx_intr_handler(int irq, void *dev_id)
1029 return IRQ_HANDLED; 1104 return IRQ_HANDLED;
1030} 1105}
1031 1106
1107#define LEG_INT_PTR_B31 (1 << 31)
1108#define LEG_INT_PTR_B30 (1 << 30)
1109#define PF_BITS_MASK (0xF << 16)
1110
1111/**
1112 * qla4_83xx_intr_handler - hardware interrupt handler.
1113 * @irq: Unused
1114 * @dev_id: Pointer to host adapter structure
1115 **/
1116irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id)
1117{
1118 struct scsi_qla_host *ha = dev_id;
1119 uint32_t leg_int_ptr = 0;
1120 unsigned long flags = 0;
1121
1122 ha->isr_count++;
1123 leg_int_ptr = readl(&ha->qla4_83xx_reg->leg_int_ptr);
1124
1125 /* Legacy interrupt is valid if bit31 of leg_int_ptr is set */
1126 if (!(leg_int_ptr & LEG_INT_PTR_B31)) {
1127 ql4_printk(KERN_ERR, ha,
1128 "%s: Legacy Interrupt Bit 31 not set, spurious interrupt!\n",
1129 __func__);
1130 return IRQ_NONE;
1131 }
1132
1133 /* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */
1134 if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit) {
1135 ql4_printk(KERN_ERR, ha,
1136 "%s: Incorrect function ID 0x%x in legacy interrupt register, ha->pf_bit = 0x%x\n",
1137 __func__, (leg_int_ptr & PF_BITS_MASK), ha->pf_bit);
1138 return IRQ_NONE;
1139 }
1140
1141 /* To de-assert legacy interrupt, write 0 to Legacy Interrupt Trigger
1142 * Control register and poll till Legacy Interrupt Pointer register
1143 * bit30 is 0.
1144 */
1145 writel(0, &ha->qla4_83xx_reg->leg_int_trig);
1146 do {
1147 leg_int_ptr = readl(&ha->qla4_83xx_reg->leg_int_ptr);
1148 if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit)
1149 break;
1150 } while (leg_int_ptr & LEG_INT_PTR_B30);
1151
1152 spin_lock_irqsave(&ha->hardware_lock, flags);
1153 leg_int_ptr = readl(&ha->qla4_83xx_reg->risc_intr);
1154 ha->isp_ops->interrupt_service_routine(ha, leg_int_ptr);
1155 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1156
1157 return IRQ_HANDLED;
1158}
1159
1032irqreturn_t 1160irqreturn_t
1033qla4_8xxx_msi_handler(int irq, void *dev_id) 1161qla4_8xxx_msi_handler(int irq, void *dev_id)
1034{ 1162{
@@ -1043,15 +1171,46 @@ qla4_8xxx_msi_handler(int irq, void *dev_id)
1043 1171
1044 ha->isr_count++; 1172 ha->isr_count++;
1045 /* clear the interrupt */ 1173 /* clear the interrupt */
1046 qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff); 1174 qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
1047 1175
1048 /* read twice to ensure write is flushed */ 1176 /* read twice to ensure write is flushed */
1049 qla4_8xxx_rd_32(ha, ISR_INT_VECTOR); 1177 qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
1050 qla4_8xxx_rd_32(ha, ISR_INT_VECTOR); 1178 qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
1051 1179
1052 return qla4_8xxx_default_intr_handler(irq, dev_id); 1180 return qla4_8xxx_default_intr_handler(irq, dev_id);
1053} 1181}
1054 1182
1183static irqreturn_t qla4_83xx_mailbox_intr_handler(int irq, void *dev_id)
1184{
1185 struct scsi_qla_host *ha = dev_id;
1186 unsigned long flags;
1187 uint32_t ival = 0;
1188
1189 spin_lock_irqsave(&ha->hardware_lock, flags);
1190
1191 ival = readl(&ha->qla4_83xx_reg->risc_intr);
1192 if (ival == 0) {
1193 ql4_printk(KERN_INFO, ha,
1194 "%s: It is a spurious mailbox interrupt!\n",
1195 __func__);
1196 ival = readl(&ha->qla4_83xx_reg->mb_int_mask);
1197 ival &= ~INT_MASK_FW_MB;
1198 writel(ival, &ha->qla4_83xx_reg->mb_int_mask);
1199 goto exit;
1200 }
1201
1202 qla4xxx_isr_decode_mailbox(ha,
1203 readl(&ha->qla4_83xx_reg->mailbox_out[0]));
1204 writel(0, &ha->qla4_83xx_reg->risc_intr);
1205 ival = readl(&ha->qla4_83xx_reg->mb_int_mask);
1206 ival &= ~INT_MASK_FW_MB;
1207 writel(ival, &ha->qla4_83xx_reg->mb_int_mask);
1208 ha->isr_count++;
1209exit:
1210 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1211 return IRQ_HANDLED;
1212}
1213
1055/** 1214/**
1056 * qla4_8xxx_default_intr_handler - hardware interrupt handler. 1215 * qla4_8xxx_default_intr_handler - hardware interrupt handler.
1057 * @irq: Unused 1216 * @irq: Unused
@@ -1068,29 +1227,32 @@ qla4_8xxx_default_intr_handler(int irq, void *dev_id)
1068 uint32_t intr_status; 1227 uint32_t intr_status;
1069 uint8_t reqs_count = 0; 1228 uint8_t reqs_count = 0;
1070 1229
1071 spin_lock_irqsave(&ha->hardware_lock, flags); 1230 if (is_qla8032(ha)) {
1072 while (1) { 1231 qla4_83xx_mailbox_intr_handler(irq, dev_id);
1073 if (!(readl(&ha->qla4_8xxx_reg->host_int) & 1232 } else {
1074 ISRX_82XX_RISC_INT)) { 1233 spin_lock_irqsave(&ha->hardware_lock, flags);
1075 qla4_8xxx_spurious_interrupt(ha, reqs_count); 1234 while (1) {
1076 break; 1235 if (!(readl(&ha->qla4_82xx_reg->host_int) &
1077 } 1236 ISRX_82XX_RISC_INT)) {
1237 qla4_82xx_spurious_interrupt(ha, reqs_count);
1238 break;
1239 }
1078 1240
1079 intr_status = readl(&ha->qla4_8xxx_reg->host_status); 1241 intr_status = readl(&ha->qla4_82xx_reg->host_status);
1080 if ((intr_status & 1242 if ((intr_status &
1081 (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) { 1243 (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) {
1082 qla4_8xxx_spurious_interrupt(ha, reqs_count); 1244 qla4_82xx_spurious_interrupt(ha, reqs_count);
1083 break; 1245 break;
1084 } 1246 }
1085 1247
1086 ha->isp_ops->interrupt_service_routine(ha, intr_status); 1248 ha->isp_ops->interrupt_service_routine(ha, intr_status);
1087 1249
1088 if (++reqs_count == MAX_REQS_SERVICED_PER_INTR) 1250 if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
1089 break; 1251 break;
1252 }
1253 ha->isr_count++;
1254 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1090 } 1255 }
1091
1092 ha->isr_count++;
1093 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1094 return IRQ_HANDLED; 1256 return IRQ_HANDLED;
1095} 1257}
1096 1258
@@ -1099,13 +1261,25 @@ qla4_8xxx_msix_rsp_q(int irq, void *dev_id)
1099{ 1261{
1100 struct scsi_qla_host *ha = dev_id; 1262 struct scsi_qla_host *ha = dev_id;
1101 unsigned long flags; 1263 unsigned long flags;
1264 uint32_t ival = 0;
1102 1265
1103 spin_lock_irqsave(&ha->hardware_lock, flags); 1266 spin_lock_irqsave(&ha->hardware_lock, flags);
1104 qla4xxx_process_response_queue(ha); 1267 if (is_qla8032(ha)) {
1105 writel(0, &ha->qla4_8xxx_reg->host_int); 1268 ival = readl(&ha->qla4_83xx_reg->iocb_int_mask);
1106 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1269 if (ival == 0) {
1107 1270 ql4_printk(KERN_INFO, ha, "%s: It is a spurious iocb interrupt!\n",
1271 __func__);
1272 goto exit_msix_rsp_q;
1273 }
1274 qla4xxx_process_response_queue(ha);
1275 writel(0, &ha->qla4_83xx_reg->iocb_int_mask);
1276 } else {
1277 qla4xxx_process_response_queue(ha);
1278 writel(0, &ha->qla4_82xx_reg->host_int);
1279 }
1108 ha->isr_count++; 1280 ha->isr_count++;
1281exit_msix_rsp_q:
1282 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1109 return IRQ_HANDLED; 1283 return IRQ_HANDLED;
1110} 1284}
1111 1285
@@ -1177,11 +1351,18 @@ int qla4xxx_request_irqs(struct scsi_qla_host *ha)
1177{ 1351{
1178 int ret; 1352 int ret;
1179 1353
1180 if (!is_qla8022(ha)) 1354 if (is_qla40XX(ha))
1181 goto try_intx; 1355 goto try_intx;
1182 1356
1183 if (ql4xenablemsix == 2) 1357 if (ql4xenablemsix == 2) {
1358 /* Note: MSI Interrupts not supported for ISP8324 */
1359 if (is_qla8032(ha)) {
1360 ql4_printk(KERN_INFO, ha, "%s: MSI Interrupts not supported for ISP8324, Falling back-to INTx mode\n",
1361 __func__);
1362 goto try_intx;
1363 }
1184 goto try_msi; 1364 goto try_msi;
1365 }
1185 1366
1186 if (ql4xenablemsix == 0 || ql4xenablemsix != 1) 1367 if (ql4xenablemsix == 0 || ql4xenablemsix != 1)
1187 goto try_intx; 1368 goto try_intx;
@@ -1192,6 +1373,12 @@ int qla4xxx_request_irqs(struct scsi_qla_host *ha)
1192 DEBUG2(ql4_printk(KERN_INFO, ha, 1373 DEBUG2(ql4_printk(KERN_INFO, ha,
1193 "MSI-X: Enabled (0x%X).\n", ha->revision_id)); 1374 "MSI-X: Enabled (0x%X).\n", ha->revision_id));
1194 goto irq_attached; 1375 goto irq_attached;
1376 } else {
1377 if (is_qla8032(ha)) {
1378 ql4_printk(KERN_INFO, ha, "%s: ISP8324: MSI-X: Falling back-to INTx mode. ret = %d\n",
1379 __func__, ret);
1380 goto try_intx;
1381 }
1195 } 1382 }
1196 1383
1197 ql4_printk(KERN_WARNING, ha, 1384 ql4_printk(KERN_WARNING, ha,
@@ -1214,9 +1401,15 @@ try_msi:
1214 pci_disable_msi(ha->pdev); 1401 pci_disable_msi(ha->pdev);
1215 } 1402 }
1216 } 1403 }
1217 ql4_printk(KERN_WARNING, ha,
1218 "MSI: Falling back-to INTx mode -- %d.\n", ret);
1219 1404
1405 /*
1406 * Prevent interrupts from falling back to INTx mode in cases where
1407 * interrupts cannot get acquired through MSI-X or MSI mode.
1408 */
1409 if (is_qla8022(ha)) {
1410 ql4_printk(KERN_WARNING, ha, "IRQ not attached -- %d.\n", ret);
1411 goto irq_not_attached;
1412 }
1220try_intx: 1413try_intx:
1221 /* Trying INTx */ 1414 /* Trying INTx */
1222 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 1415 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
@@ -1230,7 +1423,7 @@ try_intx:
1230 ql4_printk(KERN_WARNING, ha, 1423 ql4_printk(KERN_WARNING, ha,
1231 "INTx: Failed to reserve interrupt %d already in" 1424 "INTx: Failed to reserve interrupt %d already in"
1232 " use.\n", ha->pdev->irq); 1425 " use.\n", ha->pdev->irq);
1233 return ret; 1426 goto irq_not_attached;
1234 } 1427 }
1235 1428
1236irq_attached: 1429irq_attached:
@@ -1238,6 +1431,7 @@ irq_attached:
1238 ha->host->irq = ha->pdev->irq; 1431 ha->host->irq = ha->pdev->irq;
1239 ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n", 1432 ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n",
1240 __func__, ha->pdev->irq); 1433 __func__, ha->pdev->irq);
1434irq_not_attached:
1241 return ret; 1435 return ret;
1242} 1436}
1243 1437
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index cab8f665a41f..3d41034191f0 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -9,7 +9,39 @@
9#include "ql4_glbl.h" 9#include "ql4_glbl.h"
10#include "ql4_dbg.h" 10#include "ql4_dbg.h"
11#include "ql4_inline.h" 11#include "ql4_inline.h"
12#include "ql4_version.h"
12 13
14void qla4xxx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
15 int in_count)
16{
17 int i;
18
19 /* Load all mailbox registers, except mailbox 0. */
20 for (i = 1; i < in_count; i++)
21 writel(mbx_cmd[i], &ha->reg->mailbox[i]);
22
23 /* Wakeup firmware */
24 writel(mbx_cmd[0], &ha->reg->mailbox[0]);
25 readl(&ha->reg->mailbox[0]);
26 writel(set_rmask(CSR_INTR_RISC), &ha->reg->ctrl_status);
27 readl(&ha->reg->ctrl_status);
28}
29
30void qla4xxx_process_mbox_intr(struct scsi_qla_host *ha, int out_count)
31{
32 int intr_status;
33
34 intr_status = readl(&ha->reg->ctrl_status);
35 if (intr_status & INTR_PENDING) {
36 /*
37 * Service the interrupt.
38 * The ISR will save the mailbox status registers
39 * to a temporary storage location in the adapter structure.
40 */
41 ha->mbox_status_count = out_count;
42 ha->isp_ops->interrupt_service_routine(ha, intr_status);
43 }
44}
13 45
14/** 46/**
15 * qla4xxx_mailbox_command - issues mailbox commands 47 * qla4xxx_mailbox_command - issues mailbox commands
@@ -30,7 +62,6 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
30 int status = QLA_ERROR; 62 int status = QLA_ERROR;
31 uint8_t i; 63 uint8_t i;
32 u_long wait_count; 64 u_long wait_count;
33 uint32_t intr_status;
34 unsigned long flags = 0; 65 unsigned long flags = 0;
35 uint32_t dev_state; 66 uint32_t dev_state;
36 67
@@ -77,7 +108,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
77 msleep(10); 108 msleep(10);
78 } 109 }
79 110
80 if (is_qla8022(ha)) { 111 if (is_qla80XX(ha)) {
81 if (test_bit(AF_FW_RECOVERY, &ha->flags)) { 112 if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
82 DEBUG2(ql4_printk(KERN_WARNING, ha, 113 DEBUG2(ql4_printk(KERN_WARNING, ha,
83 "scsi%ld: %s: prematurely completing mbx cmd as firmware recovery detected\n", 114 "scsi%ld: %s: prematurely completing mbx cmd as firmware recovery detected\n",
@@ -85,10 +116,10 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
85 goto mbox_exit; 116 goto mbox_exit;
86 } 117 }
87 /* Do not send any mbx cmd if h/w is in failed state*/ 118 /* Do not send any mbx cmd if h/w is in failed state*/
88 qla4_8xxx_idc_lock(ha); 119 ha->isp_ops->idc_lock(ha);
89 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 120 dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
90 qla4_8xxx_idc_unlock(ha); 121 ha->isp_ops->idc_unlock(ha);
91 if (dev_state == QLA82XX_DEV_FAILED) { 122 if (dev_state == QLA8XXX_DEV_FAILED) {
92 ql4_printk(KERN_WARNING, ha, 123 ql4_printk(KERN_WARNING, ha,
93 "scsi%ld: %s: H/W is in failed state, do not send any mailbox commands\n", 124 "scsi%ld: %s: H/W is in failed state, do not send any mailbox commands\n",
94 ha->host_no, __func__); 125 ha->host_no, __func__);
@@ -102,30 +133,8 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
102 for (i = 0; i < outCount; i++) 133 for (i = 0; i < outCount; i++)
103 ha->mbox_status[i] = 0; 134 ha->mbox_status[i] = 0;
104 135
105 if (is_qla8022(ha)) { 136 /* Queue the mailbox command to the firmware */
106 /* Load all mailbox registers, except mailbox 0. */ 137 ha->isp_ops->queue_mailbox_command(ha, mbx_cmd, inCount);
107 DEBUG5(
108 printk("scsi%ld: %s: Cmd ", ha->host_no, __func__);
109 for (i = 0; i < inCount; i++)
110 printk("mb%d=%04x ", i, mbx_cmd[i]);
111 printk("\n"));
112
113 for (i = 1; i < inCount; i++)
114 writel(mbx_cmd[i], &ha->qla4_8xxx_reg->mailbox_in[i]);
115 writel(mbx_cmd[0], &ha->qla4_8xxx_reg->mailbox_in[0]);
116 readl(&ha->qla4_8xxx_reg->mailbox_in[0]);
117 writel(HINT_MBX_INT_PENDING, &ha->qla4_8xxx_reg->hint);
118 } else {
119 /* Load all mailbox registers, except mailbox 0. */
120 for (i = 1; i < inCount; i++)
121 writel(mbx_cmd[i], &ha->reg->mailbox[i]);
122
123 /* Wakeup firmware */
124 writel(mbx_cmd[0], &ha->reg->mailbox[0]);
125 readl(&ha->reg->mailbox[0]);
126 writel(set_rmask(CSR_INTR_RISC), &ha->reg->ctrl_status);
127 readl(&ha->reg->ctrl_status);
128 }
129 138
130 spin_unlock_irqrestore(&ha->hardware_lock, flags); 139 spin_unlock_irqrestore(&ha->hardware_lock, flags);
131 140
@@ -167,37 +176,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
167 */ 176 */
168 177
169 spin_lock_irqsave(&ha->hardware_lock, flags); 178 spin_lock_irqsave(&ha->hardware_lock, flags);
170 if (is_qla8022(ha)) { 179 ha->isp_ops->process_mailbox_interrupt(ha, outCount);
171 intr_status =
172 readl(&ha->qla4_8xxx_reg->host_int);
173 if (intr_status & ISRX_82XX_RISC_INT) {
174 ha->mbox_status_count = outCount;
175 intr_status =
176 readl(&ha->qla4_8xxx_reg->host_status);
177 ha->isp_ops->interrupt_service_routine(
178 ha, intr_status);
179 if (test_bit(AF_INTERRUPTS_ON,
180 &ha->flags) &&
181 test_bit(AF_INTx_ENABLED,
182 &ha->flags))
183 qla4_8xxx_wr_32(ha,
184 ha->nx_legacy_intr.tgt_mask_reg,
185 0xfbff);
186 }
187 } else {
188 intr_status = readl(&ha->reg->ctrl_status);
189 if (intr_status & INTR_PENDING) {
190 /*
191 * Service the interrupt.
192 * The ISR will save the mailbox status
193 * registers to a temporary storage
194 * location in the adapter structure.
195 */
196 ha->mbox_status_count = outCount;
197 ha->isp_ops->interrupt_service_routine(
198 ha, intr_status);
199 }
200 }
201 spin_unlock_irqrestore(&ha->hardware_lock, flags); 180 spin_unlock_irqrestore(&ha->hardware_lock, flags);
202 msleep(10); 181 msleep(10);
203 } 182 }
@@ -205,7 +184,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
205 184
206 /* Check for mailbox timeout. */ 185 /* Check for mailbox timeout. */
207 if (!test_bit(AF_MBOX_COMMAND_DONE, &ha->flags)) { 186 if (!test_bit(AF_MBOX_COMMAND_DONE, &ha->flags)) {
208 if (is_qla8022(ha) && 187 if (is_qla80XX(ha) &&
209 test_bit(AF_FW_RECOVERY, &ha->flags)) { 188 test_bit(AF_FW_RECOVERY, &ha->flags)) {
210 DEBUG2(ql4_printk(KERN_INFO, ha, 189 DEBUG2(ql4_printk(KERN_INFO, ha,
211 "scsi%ld: %s: prematurely completing mbx cmd as " 190 "scsi%ld: %s: prematurely completing mbx cmd as "
@@ -222,9 +201,13 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
222 if (is_qla8022(ha)) { 201 if (is_qla8022(ha)) {
223 ql4_printk(KERN_INFO, ha, 202 ql4_printk(KERN_INFO, ha,
224 "disabling pause transmit on port 0 & 1.\n"); 203 "disabling pause transmit on port 0 & 1.\n");
225 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, 204 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
226 CRB_NIU_XG_PAUSE_CTL_P0 | 205 CRB_NIU_XG_PAUSE_CTL_P0 |
227 CRB_NIU_XG_PAUSE_CTL_P1); 206 CRB_NIU_XG_PAUSE_CTL_P1);
207 } else if (is_qla8032(ha)) {
208 ql4_printk(KERN_INFO, ha, " %s: disabling pause transmit on port 0 & 1.\n",
209 __func__);
210 qla4_83xx_disable_pause(ha);
228 } 211 }
229 goto mbox_exit; 212 goto mbox_exit;
230 } 213 }
@@ -373,7 +356,7 @@ qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
373 memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT); 356 memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
374 357
375 if (is_qla8022(ha)) 358 if (is_qla8022(ha))
376 qla4_8xxx_wr_32(ha, ha->nx_db_wr_ptr, 0); 359 qla4_82xx_wr_32(ha, ha->nx_db_wr_ptr, 0);
377 360
378 mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE; 361 mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE;
379 mbox_cmd[1] = 0; 362 mbox_cmd[1] = 0;
@@ -566,7 +549,7 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
566 __constant_cpu_to_le16(FWOPT_SESSION_MODE | 549 __constant_cpu_to_le16(FWOPT_SESSION_MODE |
567 FWOPT_INITIATOR_MODE); 550 FWOPT_INITIATOR_MODE);
568 551
569 if (is_qla8022(ha)) 552 if (is_qla80XX(ha))
570 init_fw_cb->fw_options |= 553 init_fw_cb->fw_options |=
571 __constant_cpu_to_le16(FWOPT_ENABLE_CRBDB); 554 __constant_cpu_to_le16(FWOPT_ENABLE_CRBDB);
572 555
@@ -1695,7 +1678,7 @@ int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
1695 conn = cls_conn->dd_data; 1678 conn = cls_conn->dd_data;
1696 qla_conn = conn->dd_data; 1679 qla_conn = conn->dd_data;
1697 sess = conn->session; 1680 sess = conn->session;
1698 dst_addr = &qla_conn->qla_ep->dst_addr; 1681 dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr;
1699 1682
1700 if (dst_addr->sa_family == AF_INET6) 1683 if (dst_addr->sa_family == AF_INET6)
1701 options |= IPV6_DEFAULT_DDB_ENTRY; 1684 options |= IPV6_DEFAULT_DDB_ENTRY;
@@ -1953,3 +1936,72 @@ int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha,
1953 } 1936 }
1954 return status; 1937 return status;
1955} 1938}
1939
1940/**
1941 * qla4_8xxx_set_param - set driver version in firmware.
1942 * @ha: Pointer to host adapter structure.
1943 * @param: Parameter to set i.e driver version
1944 **/
1945int qla4_8xxx_set_param(struct scsi_qla_host *ha, int param)
1946{
1947 uint32_t mbox_cmd[MBOX_REG_COUNT];
1948 uint32_t mbox_sts[MBOX_REG_COUNT];
1949 uint32_t status;
1950
1951 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
1952 memset(&mbox_sts, 0, sizeof(mbox_sts));
1953
1954 mbox_cmd[0] = MBOX_CMD_SET_PARAM;
1955 if (param == SET_DRVR_VERSION) {
1956 mbox_cmd[1] = SET_DRVR_VERSION;
1957 strncpy((char *)&mbox_cmd[2], QLA4XXX_DRIVER_VERSION,
1958 MAX_DRVR_VER_LEN);
1959 } else {
1960 ql4_printk(KERN_ERR, ha, "%s: invalid parameter 0x%x\n",
1961 __func__, param);
1962 status = QLA_ERROR;
1963 goto exit_set_param;
1964 }
1965
1966 status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, mbox_cmd,
1967 mbox_sts);
1968 if (status == QLA_ERROR)
1969 ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n",
1970 __func__, mbox_sts[0]);
1971
1972exit_set_param:
1973 return status;
1974}
1975
1976/**
1977 * qla4_83xx_post_idc_ack - post IDC ACK
1978 * @ha: Pointer to host adapter structure.
1979 *
1980 * Posts IDC ACK for IDC Request Notification AEN.
1981 **/
1982int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha)
1983{
1984 uint32_t mbox_cmd[MBOX_REG_COUNT];
1985 uint32_t mbox_sts[MBOX_REG_COUNT];
1986 int status;
1987
1988 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
1989 memset(&mbox_sts, 0, sizeof(mbox_sts));
1990
1991 mbox_cmd[0] = MBOX_CMD_IDC_ACK;
1992 mbox_cmd[1] = ha->idc_info.request_desc;
1993 mbox_cmd[2] = ha->idc_info.info1;
1994 mbox_cmd[3] = ha->idc_info.info2;
1995 mbox_cmd[4] = ha->idc_info.info3;
1996
1997 status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
1998 mbox_cmd, mbox_sts);
1999 if (status == QLA_ERROR)
2000 ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__,
2001 mbox_sts[0]);
2002 else
2003 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IDC ACK posted\n",
2004 __func__));
2005
2006 return status;
2007}
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.c b/drivers/scsi/qla4xxx/ql4_nvram.c
index 7851f314ba96..325db1f2c091 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.c
+++ b/drivers/scsi/qla4xxx/ql4_nvram.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.h b/drivers/scsi/qla4xxx/ql4_nvram.h
index 945cc328f57f..dba0514d1c70 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.h
+++ b/drivers/scsi/qla4xxx/ql4_nvram.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 807bf76f1b6a..499a92db1cf6 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -10,6 +10,7 @@
10#include <linux/ratelimit.h> 10#include <linux/ratelimit.h>
11#include "ql4_def.h" 11#include "ql4_def.h"
12#include "ql4_glbl.h" 12#include "ql4_glbl.h"
13#include "ql4_inline.h"
13 14
14#include <asm-generic/io-64-nonatomic-lo-hi.h> 15#include <asm-generic/io-64-nonatomic-lo-hi.h>
15 16
@@ -27,7 +28,7 @@
27#define CRB_BLK(off) ((off >> 20) & 0x3f) 28#define CRB_BLK(off) ((off >> 20) & 0x3f)
28#define CRB_SUBBLK(off) ((off >> 16) & 0xf) 29#define CRB_SUBBLK(off) ((off >> 16) & 0xf)
29#define CRB_WINDOW_2M (0x130060) 30#define CRB_WINDOW_2M (0x130060)
30#define CRB_HI(off) ((qla4_8xxx_crb_hub_agt[CRB_BLK(off)] << 20) | \ 31#define CRB_HI(off) ((qla4_82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \
31 ((off) & 0xf0000)) 32 ((off) & 0xf0000))
32#define QLA82XX_PCI_CAMQM_2M_END (0x04800800UL) 33#define QLA82XX_PCI_CAMQM_2M_END (0x04800800UL)
33#define QLA82XX_PCI_CAMQM_2M_BASE (0x000ff800UL) 34#define QLA82XX_PCI_CAMQM_2M_BASE (0x000ff800UL)
@@ -51,7 +52,7 @@ static int qla4_8xxx_crb_table_initialized;
51 (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \ 52 (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
52 QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20) 53 QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
53static void 54static void
54qla4_8xxx_crb_addr_transform_setup(void) 55qla4_82xx_crb_addr_transform_setup(void)
55{ 56{
56 qla4_8xxx_crb_addr_transform(XDMA); 57 qla4_8xxx_crb_addr_transform(XDMA);
57 qla4_8xxx_crb_addr_transform(TIMR); 58 qla4_8xxx_crb_addr_transform(TIMR);
@@ -268,7 +269,7 @@ static struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
268/* 269/*
269 * top 12 bits of crb internal address (hub, agent) 270 * top 12 bits of crb internal address (hub, agent)
270 */ 271 */
271static unsigned qla4_8xxx_crb_hub_agt[64] = { 272static unsigned qla4_82xx_crb_hub_agt[64] = {
272 0, 273 0,
273 QLA82XX_HW_CRB_HUB_AGT_ADR_PS, 274 QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
274 QLA82XX_HW_CRB_HUB_AGT_ADR_MN, 275 QLA82XX_HW_CRB_HUB_AGT_ADR_MN,
@@ -353,7 +354,7 @@ static char *qdev_state[] = {
353 * side effect: lock crb window 354 * side effect: lock crb window
354 */ 355 */
355static void 356static void
356qla4_8xxx_pci_set_crbwindow_2M(struct scsi_qla_host *ha, ulong *off) 357qla4_82xx_pci_set_crbwindow_2M(struct scsi_qla_host *ha, ulong *off)
357{ 358{
358 u32 win_read; 359 u32 win_read;
359 360
@@ -373,96 +374,115 @@ qla4_8xxx_pci_set_crbwindow_2M(struct scsi_qla_host *ha, ulong *off)
373} 374}
374 375
375void 376void
376qla4_8xxx_wr_32(struct scsi_qla_host *ha, ulong off, u32 data) 377qla4_82xx_wr_32(struct scsi_qla_host *ha, ulong off, u32 data)
377{ 378{
378 unsigned long flags = 0; 379 unsigned long flags = 0;
379 int rv; 380 int rv;
380 381
381 rv = qla4_8xxx_pci_get_crb_addr_2M(ha, &off); 382 rv = qla4_82xx_pci_get_crb_addr_2M(ha, &off);
382 383
383 BUG_ON(rv == -1); 384 BUG_ON(rv == -1);
384 385
385 if (rv == 1) { 386 if (rv == 1) {
386 write_lock_irqsave(&ha->hw_lock, flags); 387 write_lock_irqsave(&ha->hw_lock, flags);
387 qla4_8xxx_crb_win_lock(ha); 388 qla4_82xx_crb_win_lock(ha);
388 qla4_8xxx_pci_set_crbwindow_2M(ha, &off); 389 qla4_82xx_pci_set_crbwindow_2M(ha, &off);
389 } 390 }
390 391
391 writel(data, (void __iomem *)off); 392 writel(data, (void __iomem *)off);
392 393
393 if (rv == 1) { 394 if (rv == 1) {
394 qla4_8xxx_crb_win_unlock(ha); 395 qla4_82xx_crb_win_unlock(ha);
395 write_unlock_irqrestore(&ha->hw_lock, flags); 396 write_unlock_irqrestore(&ha->hw_lock, flags);
396 } 397 }
397} 398}
398 399
399int 400uint32_t qla4_82xx_rd_32(struct scsi_qla_host *ha, ulong off)
400qla4_8xxx_rd_32(struct scsi_qla_host *ha, ulong off)
401{ 401{
402 unsigned long flags = 0; 402 unsigned long flags = 0;
403 int rv; 403 int rv;
404 u32 data; 404 u32 data;
405 405
406 rv = qla4_8xxx_pci_get_crb_addr_2M(ha, &off); 406 rv = qla4_82xx_pci_get_crb_addr_2M(ha, &off);
407 407
408 BUG_ON(rv == -1); 408 BUG_ON(rv == -1);
409 409
410 if (rv == 1) { 410 if (rv == 1) {
411 write_lock_irqsave(&ha->hw_lock, flags); 411 write_lock_irqsave(&ha->hw_lock, flags);
412 qla4_8xxx_crb_win_lock(ha); 412 qla4_82xx_crb_win_lock(ha);
413 qla4_8xxx_pci_set_crbwindow_2M(ha, &off); 413 qla4_82xx_pci_set_crbwindow_2M(ha, &off);
414 } 414 }
415 data = readl((void __iomem *)off); 415 data = readl((void __iomem *)off);
416 416
417 if (rv == 1) { 417 if (rv == 1) {
418 qla4_8xxx_crb_win_unlock(ha); 418 qla4_82xx_crb_win_unlock(ha);
419 write_unlock_irqrestore(&ha->hw_lock, flags); 419 write_unlock_irqrestore(&ha->hw_lock, flags);
420 } 420 }
421 return data; 421 return data;
422} 422}
423 423
424/* Minidump related functions */ 424/* Minidump related functions */
425static int qla4_8xxx_md_rw_32(struct scsi_qla_host *ha, uint32_t off, 425int qla4_82xx_md_rd_32(struct scsi_qla_host *ha, uint32_t off, uint32_t *data)
426 u32 data, uint8_t flag)
427{ 426{
428 uint32_t win_read, off_value, rval = QLA_SUCCESS; 427 uint32_t win_read, off_value;
428 int rval = QLA_SUCCESS;
429 429
430 off_value = off & 0xFFFF0000; 430 off_value = off & 0xFFFF0000;
431 writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase)); 431 writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
432 432
433 /* Read back value to make sure write has gone through before trying 433 /*
434 * Read back value to make sure write has gone through before trying
434 * to use it. 435 * to use it.
435 */ 436 */
436 win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase)); 437 win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
437 if (win_read != off_value) { 438 if (win_read != off_value) {
438 DEBUG2(ql4_printk(KERN_INFO, ha, 439 DEBUG2(ql4_printk(KERN_INFO, ha,
439 "%s: Written (0x%x) != Read (0x%x), off=0x%x\n", 440 "%s: Written (0x%x) != Read (0x%x), off=0x%x\n",
440 __func__, off_value, win_read, off)); 441 __func__, off_value, win_read, off));
441 return QLA_ERROR; 442 rval = QLA_ERROR;
443 } else {
444 off_value = off & 0x0000FFFF;
445 *data = readl((void __iomem *)(off_value + CRB_INDIRECT_2M +
446 ha->nx_pcibase));
442 } 447 }
448 return rval;
449}
443 450
444 off_value = off & 0x0000FFFF; 451int qla4_82xx_md_wr_32(struct scsi_qla_host *ha, uint32_t off, uint32_t data)
452{
453 uint32_t win_read, off_value;
454 int rval = QLA_SUCCESS;
455
456 off_value = off & 0xFFFF0000;
457 writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
445 458
446 if (flag) 459 /* Read back value to make sure write has gone through before trying
460 * to use it.
461 */
462 win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
463 if (win_read != off_value) {
464 DEBUG2(ql4_printk(KERN_INFO, ha,
465 "%s: Written (0x%x) != Read (0x%x), off=0x%x\n",
466 __func__, off_value, win_read, off));
467 rval = QLA_ERROR;
468 } else {
469 off_value = off & 0x0000FFFF;
447 writel(data, (void __iomem *)(off_value + CRB_INDIRECT_2M + 470 writel(data, (void __iomem *)(off_value + CRB_INDIRECT_2M +
448 ha->nx_pcibase)); 471 ha->nx_pcibase));
449 else 472 }
450 rval = readl((void __iomem *)(off_value + CRB_INDIRECT_2M +
451 ha->nx_pcibase));
452
453 return rval; 473 return rval;
454} 474}
455 475
456#define CRB_WIN_LOCK_TIMEOUT 100000000 476#define CRB_WIN_LOCK_TIMEOUT 100000000
457 477
458int qla4_8xxx_crb_win_lock(struct scsi_qla_host *ha) 478int qla4_82xx_crb_win_lock(struct scsi_qla_host *ha)
459{ 479{
460 int i; 480 int i;
461 int done = 0, timeout = 0; 481 int done = 0, timeout = 0;
462 482
463 while (!done) { 483 while (!done) {
464 /* acquire semaphore3 from PCI HW block */ 484 /* acquire semaphore3 from PCI HW block */
465 done = qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK)); 485 done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
466 if (done == 1) 486 if (done == 1)
467 break; 487 break;
468 if (timeout >= CRB_WIN_LOCK_TIMEOUT) 488 if (timeout >= CRB_WIN_LOCK_TIMEOUT)
@@ -478,32 +498,32 @@ int qla4_8xxx_crb_win_lock(struct scsi_qla_host *ha)
478 cpu_relax(); /*This a nop instr on i386*/ 498 cpu_relax(); /*This a nop instr on i386*/
479 } 499 }
480 } 500 }
481 qla4_8xxx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->func_num); 501 qla4_82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->func_num);
482 return 0; 502 return 0;
483} 503}
484 504
485void qla4_8xxx_crb_win_unlock(struct scsi_qla_host *ha) 505void qla4_82xx_crb_win_unlock(struct scsi_qla_host *ha)
486{ 506{
487 qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK)); 507 qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
488} 508}
489 509
490#define IDC_LOCK_TIMEOUT 100000000 510#define IDC_LOCK_TIMEOUT 100000000
491 511
492/** 512/**
493 * qla4_8xxx_idc_lock - hw_lock 513 * qla4_82xx_idc_lock - hw_lock
494 * @ha: pointer to adapter structure 514 * @ha: pointer to adapter structure
495 * 515 *
496 * General purpose lock used to synchronize access to 516 * General purpose lock used to synchronize access to
497 * CRB_DEV_STATE, CRB_DEV_REF_COUNT, etc. 517 * CRB_DEV_STATE, CRB_DEV_REF_COUNT, etc.
498 **/ 518 **/
499int qla4_8xxx_idc_lock(struct scsi_qla_host *ha) 519int qla4_82xx_idc_lock(struct scsi_qla_host *ha)
500{ 520{
501 int i; 521 int i;
502 int done = 0, timeout = 0; 522 int done = 0, timeout = 0;
503 523
504 while (!done) { 524 while (!done) {
505 /* acquire semaphore5 from PCI HW block */ 525 /* acquire semaphore5 from PCI HW block */
506 done = qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK)); 526 done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK));
507 if (done == 1) 527 if (done == 1)
508 break; 528 break;
509 if (timeout >= IDC_LOCK_TIMEOUT) 529 if (timeout >= IDC_LOCK_TIMEOUT)
@@ -522,13 +542,13 @@ int qla4_8xxx_idc_lock(struct scsi_qla_host *ha)
522 return 0; 542 return 0;
523} 543}
524 544
525void qla4_8xxx_idc_unlock(struct scsi_qla_host *ha) 545void qla4_82xx_idc_unlock(struct scsi_qla_host *ha)
526{ 546{
527 qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK)); 547 qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
528} 548}
529 549
530int 550int
531qla4_8xxx_pci_get_crb_addr_2M(struct scsi_qla_host *ha, ulong *off) 551qla4_82xx_pci_get_crb_addr_2M(struct scsi_qla_host *ha, ulong *off)
532{ 552{
533 struct crb_128M_2M_sub_block_map *m; 553 struct crb_128M_2M_sub_block_map *m;
534 554
@@ -562,44 +582,40 @@ qla4_8xxx_pci_get_crb_addr_2M(struct scsi_qla_host *ha, ulong *off)
562 return 1; 582 return 1;
563} 583}
564 584
565/* PCI Windowing for DDR regions. */
566#define QLA82XX_ADDR_IN_RANGE(addr, low, high) \
567 (((addr) <= (high)) && ((addr) >= (low)))
568
569/* 585/*
570* check memory access boundary. 586* check memory access boundary.
571* used by test agent. support ddr access only for now 587* used by test agent. support ddr access only for now
572*/ 588*/
573static unsigned long 589static unsigned long
574qla4_8xxx_pci_mem_bound_check(struct scsi_qla_host *ha, 590qla4_82xx_pci_mem_bound_check(struct scsi_qla_host *ha,
575 unsigned long long addr, int size) 591 unsigned long long addr, int size)
576{ 592{
577 if (!QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET, 593 if (!QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
578 QLA82XX_ADDR_DDR_NET_MAX) || 594 QLA8XXX_ADDR_DDR_NET_MAX) ||
579 !QLA82XX_ADDR_IN_RANGE(addr + size - 1, 595 !QLA8XXX_ADDR_IN_RANGE(addr + size - 1,
580 QLA82XX_ADDR_DDR_NET, QLA82XX_ADDR_DDR_NET_MAX) || 596 QLA8XXX_ADDR_DDR_NET, QLA8XXX_ADDR_DDR_NET_MAX) ||
581 ((size != 1) && (size != 2) && (size != 4) && (size != 8))) { 597 ((size != 1) && (size != 2) && (size != 4) && (size != 8))) {
582 return 0; 598 return 0;
583 } 599 }
584 return 1; 600 return 1;
585} 601}
586 602
587static int qla4_8xxx_pci_set_window_warning_count; 603static int qla4_82xx_pci_set_window_warning_count;
588 604
589static unsigned long 605static unsigned long
590qla4_8xxx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr) 606qla4_82xx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr)
591{ 607{
592 int window; 608 int window;
593 u32 win_read; 609 u32 win_read;
594 610
595 if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET, 611 if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
596 QLA82XX_ADDR_DDR_NET_MAX)) { 612 QLA8XXX_ADDR_DDR_NET_MAX)) {
597 /* DDR network side */ 613 /* DDR network side */
598 window = MN_WIN(addr); 614 window = MN_WIN(addr);
599 ha->ddr_mn_window = window; 615 ha->ddr_mn_window = window;
600 qla4_8xxx_wr_32(ha, ha->mn_win_crb | 616 qla4_82xx_wr_32(ha, ha->mn_win_crb |
601 QLA82XX_PCI_CRBSPACE, window); 617 QLA82XX_PCI_CRBSPACE, window);
602 win_read = qla4_8xxx_rd_32(ha, ha->mn_win_crb | 618 win_read = qla4_82xx_rd_32(ha, ha->mn_win_crb |
603 QLA82XX_PCI_CRBSPACE); 619 QLA82XX_PCI_CRBSPACE);
604 if ((win_read << 17) != window) { 620 if ((win_read << 17) != window) {
605 ql4_printk(KERN_WARNING, ha, 621 ql4_printk(KERN_WARNING, ha,
@@ -607,8 +623,8 @@ qla4_8xxx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr)
607 __func__, window, win_read); 623 __func__, window, win_read);
608 } 624 }
609 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET; 625 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
610 } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0, 626 } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_OCM0,
611 QLA82XX_ADDR_OCM0_MAX)) { 627 QLA8XXX_ADDR_OCM0_MAX)) {
612 unsigned int temp1; 628 unsigned int temp1;
613 /* if bits 19:18&17:11 are on */ 629 /* if bits 19:18&17:11 are on */
614 if ((addr & 0x00ff800) == 0xff800) { 630 if ((addr & 0x00ff800) == 0xff800) {
@@ -618,9 +634,9 @@ qla4_8xxx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr)
618 634
619 window = OCM_WIN(addr); 635 window = OCM_WIN(addr);
620 ha->ddr_mn_window = window; 636 ha->ddr_mn_window = window;
621 qla4_8xxx_wr_32(ha, ha->mn_win_crb | 637 qla4_82xx_wr_32(ha, ha->mn_win_crb |
622 QLA82XX_PCI_CRBSPACE, window); 638 QLA82XX_PCI_CRBSPACE, window);
623 win_read = qla4_8xxx_rd_32(ha, ha->mn_win_crb | 639 win_read = qla4_82xx_rd_32(ha, ha->mn_win_crb |
624 QLA82XX_PCI_CRBSPACE); 640 QLA82XX_PCI_CRBSPACE);
625 temp1 = ((window & 0x1FF) << 7) | 641 temp1 = ((window & 0x1FF) << 7) |
626 ((window & 0x0FFFE0000) >> 17); 642 ((window & 0x0FFFE0000) >> 17);
@@ -630,14 +646,14 @@ qla4_8xxx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr)
630 } 646 }
631 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M; 647 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
632 648
633 } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, 649 } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET,
634 QLA82XX_P3_ADDR_QDR_NET_MAX)) { 650 QLA82XX_P3_ADDR_QDR_NET_MAX)) {
635 /* QDR network side */ 651 /* QDR network side */
636 window = MS_WIN(addr); 652 window = MS_WIN(addr);
637 ha->qdr_sn_window = window; 653 ha->qdr_sn_window = window;
638 qla4_8xxx_wr_32(ha, ha->ms_win_crb | 654 qla4_82xx_wr_32(ha, ha->ms_win_crb |
639 QLA82XX_PCI_CRBSPACE, window); 655 QLA82XX_PCI_CRBSPACE, window);
640 win_read = qla4_8xxx_rd_32(ha, 656 win_read = qla4_82xx_rd_32(ha,
641 ha->ms_win_crb | QLA82XX_PCI_CRBSPACE); 657 ha->ms_win_crb | QLA82XX_PCI_CRBSPACE);
642 if (win_read != window) { 658 if (win_read != window) {
643 printk("%s: Written MSwin (0x%x) != Read " 659 printk("%s: Written MSwin (0x%x) != Read "
@@ -650,8 +666,8 @@ qla4_8xxx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr)
650 * peg gdb frequently accesses memory that doesn't exist, 666 * peg gdb frequently accesses memory that doesn't exist,
651 * this limits the chit chat so debugging isn't slowed down. 667 * this limits the chit chat so debugging isn't slowed down.
652 */ 668 */
653 if ((qla4_8xxx_pci_set_window_warning_count++ < 8) || 669 if ((qla4_82xx_pci_set_window_warning_count++ < 8) ||
654 (qla4_8xxx_pci_set_window_warning_count%64 == 0)) { 670 (qla4_82xx_pci_set_window_warning_count%64 == 0)) {
655 printk("%s: Warning:%s Unknown address range!\n", 671 printk("%s: Warning:%s Unknown address range!\n",
656 __func__, DRIVER_NAME); 672 __func__, DRIVER_NAME);
657 } 673 }
@@ -661,7 +677,7 @@ qla4_8xxx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr)
661} 677}
662 678
663/* check if address is in the same windows as the previous access */ 679/* check if address is in the same windows as the previous access */
664static int qla4_8xxx_pci_is_same_window(struct scsi_qla_host *ha, 680static int qla4_82xx_pci_is_same_window(struct scsi_qla_host *ha,
665 unsigned long long addr) 681 unsigned long long addr)
666{ 682{
667 int window; 683 int window;
@@ -669,20 +685,20 @@ static int qla4_8xxx_pci_is_same_window(struct scsi_qla_host *ha,
669 685
670 qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX; 686 qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX;
671 687
672 if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET, 688 if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
673 QLA82XX_ADDR_DDR_NET_MAX)) { 689 QLA8XXX_ADDR_DDR_NET_MAX)) {
674 /* DDR network side */ 690 /* DDR network side */
675 BUG(); /* MN access can not come here */ 691 BUG(); /* MN access can not come here */
676 } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0, 692 } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_OCM0,
677 QLA82XX_ADDR_OCM0_MAX)) { 693 QLA8XXX_ADDR_OCM0_MAX)) {
678 return 1; 694 return 1;
679 } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM1, 695 } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_OCM1,
680 QLA82XX_ADDR_OCM1_MAX)) { 696 QLA8XXX_ADDR_OCM1_MAX)) {
681 return 1; 697 return 1;
682 } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, 698 } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET,
683 qdr_max)) { 699 qdr_max)) {
684 /* QDR network side */ 700 /* QDR network side */
685 window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f; 701 window = ((addr - QLA8XXX_ADDR_QDR_NET) >> 22) & 0x3f;
686 if (ha->qdr_sn_window == window) 702 if (ha->qdr_sn_window == window)
687 return 1; 703 return 1;
688 } 704 }
@@ -690,7 +706,7 @@ static int qla4_8xxx_pci_is_same_window(struct scsi_qla_host *ha,
690 return 0; 706 return 0;
691} 707}
692 708
693static int qla4_8xxx_pci_mem_read_direct(struct scsi_qla_host *ha, 709static int qla4_82xx_pci_mem_read_direct(struct scsi_qla_host *ha,
694 u64 off, void *data, int size) 710 u64 off, void *data, int size)
695{ 711{
696 unsigned long flags; 712 unsigned long flags;
@@ -707,9 +723,9 @@ static int qla4_8xxx_pci_mem_read_direct(struct scsi_qla_host *ha,
707 * If attempting to access unknown address or straddle hw windows, 723 * If attempting to access unknown address or straddle hw windows,
708 * do not access. 724 * do not access.
709 */ 725 */
710 start = qla4_8xxx_pci_set_window(ha, off); 726 start = qla4_82xx_pci_set_window(ha, off);
711 if ((start == -1UL) || 727 if ((start == -1UL) ||
712 (qla4_8xxx_pci_is_same_window(ha, off + size - 1) == 0)) { 728 (qla4_82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
713 write_unlock_irqrestore(&ha->hw_lock, flags); 729 write_unlock_irqrestore(&ha->hw_lock, flags);
714 printk(KERN_ERR"%s out of bound pci memory access. " 730 printk(KERN_ERR"%s out of bound pci memory access. "
715 "offset is 0x%llx\n", DRIVER_NAME, off); 731 "offset is 0x%llx\n", DRIVER_NAME, off);
@@ -763,7 +779,7 @@ static int qla4_8xxx_pci_mem_read_direct(struct scsi_qla_host *ha,
763} 779}
764 780
765static int 781static int
766qla4_8xxx_pci_mem_write_direct(struct scsi_qla_host *ha, u64 off, 782qla4_82xx_pci_mem_write_direct(struct scsi_qla_host *ha, u64 off,
767 void *data, int size) 783 void *data, int size)
768{ 784{
769 unsigned long flags; 785 unsigned long flags;
@@ -780,9 +796,9 @@ qla4_8xxx_pci_mem_write_direct(struct scsi_qla_host *ha, u64 off,
780 * If attempting to access unknown address or straddle hw windows, 796 * If attempting to access unknown address or straddle hw windows,
781 * do not access. 797 * do not access.
782 */ 798 */
783 start = qla4_8xxx_pci_set_window(ha, off); 799 start = qla4_82xx_pci_set_window(ha, off);
784 if ((start == -1UL) || 800 if ((start == -1UL) ||
785 (qla4_8xxx_pci_is_same_window(ha, off + size - 1) == 0)) { 801 (qla4_82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
786 write_unlock_irqrestore(&ha->hw_lock, flags); 802 write_unlock_irqrestore(&ha->hw_lock, flags);
787 printk(KERN_ERR"%s out of bound pci memory access. " 803 printk(KERN_ERR"%s out of bound pci memory access. "
788 "offset is 0x%llx\n", DRIVER_NAME, off); 804 "offset is 0x%llx\n", DRIVER_NAME, off);
@@ -835,13 +851,13 @@ qla4_8xxx_pci_mem_write_direct(struct scsi_qla_host *ha, u64 off,
835#define MTU_FUDGE_FACTOR 100 851#define MTU_FUDGE_FACTOR 100
836 852
837static unsigned long 853static unsigned long
838qla4_8xxx_decode_crb_addr(unsigned long addr) 854qla4_82xx_decode_crb_addr(unsigned long addr)
839{ 855{
840 int i; 856 int i;
841 unsigned long base_addr, offset, pci_base; 857 unsigned long base_addr, offset, pci_base;
842 858
843 if (!qla4_8xxx_crb_table_initialized) 859 if (!qla4_8xxx_crb_table_initialized)
844 qla4_8xxx_crb_addr_transform_setup(); 860 qla4_82xx_crb_addr_transform_setup();
845 861
846 pci_base = ADDR_ERROR; 862 pci_base = ADDR_ERROR;
847 base_addr = addr & 0xfff00000; 863 base_addr = addr & 0xfff00000;
@@ -860,10 +876,10 @@ qla4_8xxx_decode_crb_addr(unsigned long addr)
860} 876}
861 877
862static long rom_max_timeout = 100; 878static long rom_max_timeout = 100;
863static long qla4_8xxx_rom_lock_timeout = 100; 879static long qla4_82xx_rom_lock_timeout = 100;
864 880
865static int 881static int
866qla4_8xxx_rom_lock(struct scsi_qla_host *ha) 882qla4_82xx_rom_lock(struct scsi_qla_host *ha)
867{ 883{
868 int i; 884 int i;
869 int done = 0, timeout = 0; 885 int done = 0, timeout = 0;
@@ -871,10 +887,10 @@ qla4_8xxx_rom_lock(struct scsi_qla_host *ha)
871 while (!done) { 887 while (!done) {
872 /* acquire semaphore2 from PCI HW block */ 888 /* acquire semaphore2 from PCI HW block */
873 889
874 done = qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK)); 890 done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
875 if (done == 1) 891 if (done == 1)
876 break; 892 break;
877 if (timeout >= qla4_8xxx_rom_lock_timeout) 893 if (timeout >= qla4_82xx_rom_lock_timeout)
878 return -1; 894 return -1;
879 895
880 timeout++; 896 timeout++;
@@ -887,24 +903,24 @@ qla4_8xxx_rom_lock(struct scsi_qla_host *ha)
887 cpu_relax(); /*This a nop instr on i386*/ 903 cpu_relax(); /*This a nop instr on i386*/
888 } 904 }
889 } 905 }
890 qla4_8xxx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER); 906 qla4_82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
891 return 0; 907 return 0;
892} 908}
893 909
894static void 910static void
895qla4_8xxx_rom_unlock(struct scsi_qla_host *ha) 911qla4_82xx_rom_unlock(struct scsi_qla_host *ha)
896{ 912{
897 qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); 913 qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
898} 914}
899 915
900static int 916static int
901qla4_8xxx_wait_rom_done(struct scsi_qla_host *ha) 917qla4_82xx_wait_rom_done(struct scsi_qla_host *ha)
902{ 918{
903 long timeout = 0; 919 long timeout = 0;
904 long done = 0 ; 920 long done = 0 ;
905 921
906 while (done == 0) { 922 while (done == 0) {
907 done = qla4_8xxx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS); 923 done = qla4_82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
908 done &= 2; 924 done &= 2;
909 timeout++; 925 timeout++;
910 if (timeout >= rom_max_timeout) { 926 if (timeout >= rom_max_timeout) {
@@ -917,40 +933,41 @@ qla4_8xxx_wait_rom_done(struct scsi_qla_host *ha)
917} 933}
918 934
919static int 935static int
920qla4_8xxx_do_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp) 936qla4_82xx_do_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp)
921{ 937{
922 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr); 938 qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
923 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); 939 qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
924 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3); 940 qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
925 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb); 941 qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
926 if (qla4_8xxx_wait_rom_done(ha)) { 942 if (qla4_82xx_wait_rom_done(ha)) {
927 printk("%s: Error waiting for rom done\n", DRIVER_NAME); 943 printk("%s: Error waiting for rom done\n", DRIVER_NAME);
928 return -1; 944 return -1;
929 } 945 }
930 /* reset abyte_cnt and dummy_byte_cnt */ 946 /* reset abyte_cnt and dummy_byte_cnt */
931 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); 947 qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
932 udelay(10); 948 udelay(10);
933 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0); 949 qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
934 950
935 *valp = qla4_8xxx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA); 951 *valp = qla4_82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
936 return 0; 952 return 0;
937} 953}
938 954
939static int 955static int
940qla4_8xxx_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp) 956qla4_82xx_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp)
941{ 957{
942 int ret, loops = 0; 958 int ret, loops = 0;
943 959
944 while ((qla4_8xxx_rom_lock(ha) != 0) && (loops < 50000)) { 960 while ((qla4_82xx_rom_lock(ha) != 0) && (loops < 50000)) {
945 udelay(100); 961 udelay(100);
946 loops++; 962 loops++;
947 } 963 }
948 if (loops >= 50000) { 964 if (loops >= 50000) {
949 printk("%s: qla4_8xxx_rom_lock failed\n", DRIVER_NAME); 965 ql4_printk(KERN_WARNING, ha, "%s: qla4_82xx_rom_lock failed\n",
966 DRIVER_NAME);
950 return -1; 967 return -1;
951 } 968 }
952 ret = qla4_8xxx_do_rom_fast_read(ha, addr, valp); 969 ret = qla4_82xx_do_rom_fast_read(ha, addr, valp);
953 qla4_8xxx_rom_unlock(ha); 970 qla4_82xx_rom_unlock(ha);
954 return ret; 971 return ret;
955} 972}
956 973
@@ -959,7 +976,7 @@ qla4_8xxx_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp)
959 * to put the ISP into operational state 976 * to put the ISP into operational state
960 **/ 977 **/
961static int 978static int
962qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose) 979qla4_82xx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
963{ 980{
964 int addr, val; 981 int addr, val;
965 int i ; 982 int i ;
@@ -973,68 +990,68 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
973 }; 990 };
974 991
975 /* Halt all the indiviual PEGs and other blocks of the ISP */ 992 /* Halt all the indiviual PEGs and other blocks of the ISP */
976 qla4_8xxx_rom_lock(ha); 993 qla4_82xx_rom_lock(ha);
977 994
978 /* disable all I2Q */ 995 /* disable all I2Q */
979 qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0); 996 qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0);
980 qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0); 997 qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0);
981 qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0); 998 qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0);
982 qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0); 999 qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0);
983 qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0); 1000 qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0);
984 qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0); 1001 qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0);
985 1002
986 /* disable all niu interrupts */ 1003 /* disable all niu interrupts */
987 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff); 1004 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
988 /* disable xge rx/tx */ 1005 /* disable xge rx/tx */
989 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00); 1006 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
990 /* disable xg1 rx/tx */ 1007 /* disable xg1 rx/tx */
991 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00); 1008 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
992 /* disable sideband mac */ 1009 /* disable sideband mac */
993 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00); 1010 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00);
994 /* disable ap0 mac */ 1011 /* disable ap0 mac */
995 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00); 1012 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00);
996 /* disable ap1 mac */ 1013 /* disable ap1 mac */
997 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00); 1014 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00);
998 1015
999 /* halt sre */ 1016 /* halt sre */
1000 val = qla4_8xxx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000); 1017 val = qla4_82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
1001 qla4_8xxx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1))); 1018 qla4_82xx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1)));
1002 1019
1003 /* halt epg */ 1020 /* halt epg */
1004 qla4_8xxx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1); 1021 qla4_82xx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1);
1005 1022
1006 /* halt timers */ 1023 /* halt timers */
1007 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0); 1024 qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0);
1008 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0); 1025 qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0);
1009 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0); 1026 qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
1010 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0); 1027 qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
1011 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0); 1028 qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
1012 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0); 1029 qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0);
1013 1030
1014 /* halt pegs */ 1031 /* halt pegs */
1015 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1); 1032 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
1016 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1); 1033 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1);
1017 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1); 1034 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
1018 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1); 1035 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
1019 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1); 1036 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
1020 msleep(5); 1037 msleep(5);
1021 1038
1022 /* big hammer */ 1039 /* big hammer */
1023 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) 1040 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
1024 /* don't reset CAM block on reset */ 1041 /* don't reset CAM block on reset */
1025 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff); 1042 qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
1026 else 1043 else
1027 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff); 1044 qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
1028 1045
1029 qla4_8xxx_rom_unlock(ha); 1046 qla4_82xx_rom_unlock(ha);
1030 1047
1031 /* Read the signature value from the flash. 1048 /* Read the signature value from the flash.
1032 * Offset 0: Contain signature (0xcafecafe) 1049 * Offset 0: Contain signature (0xcafecafe)
1033 * Offset 4: Offset and number of addr/value pairs 1050 * Offset 4: Offset and number of addr/value pairs
1034 * that present in CRB initialize sequence 1051 * that present in CRB initialize sequence
1035 */ 1052 */
1036 if (qla4_8xxx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL || 1053 if (qla4_82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
1037 qla4_8xxx_rom_fast_read(ha, 4, &n) != 0) { 1054 qla4_82xx_rom_fast_read(ha, 4, &n) != 0) {
1038 ql4_printk(KERN_WARNING, ha, 1055 ql4_printk(KERN_WARNING, ha,
1039 "[ERROR] Reading crb_init area: n: %08x\n", n); 1056 "[ERROR] Reading crb_init area: n: %08x\n", n);
1040 return -1; 1057 return -1;
@@ -1065,8 +1082,8 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
1065 } 1082 }
1066 1083
1067 for (i = 0; i < n; i++) { 1084 for (i = 0; i < n; i++) {
1068 if (qla4_8xxx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 || 1085 if (qla4_82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 ||
1069 qla4_8xxx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) != 1086 qla4_82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) !=
1070 0) { 1087 0) {
1071 kfree(buf); 1088 kfree(buf);
1072 return -1; 1089 return -1;
@@ -1080,7 +1097,7 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
1080 /* Translate internal CRB initialization 1097 /* Translate internal CRB initialization
1081 * address to PCI bus address 1098 * address to PCI bus address
1082 */ 1099 */
1083 off = qla4_8xxx_decode_crb_addr((unsigned long)buf[i].addr) + 1100 off = qla4_82xx_decode_crb_addr((unsigned long)buf[i].addr) +
1084 QLA82XX_PCI_CRBSPACE; 1101 QLA82XX_PCI_CRBSPACE;
1085 /* Not all CRB addr/value pair to be written, 1102 /* Not all CRB addr/value pair to be written,
1086 * some of them are skipped 1103 * some of them are skipped
@@ -1125,7 +1142,7 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
1125 continue; 1142 continue;
1126 } 1143 }
1127 1144
1128 qla4_8xxx_wr_32(ha, off, buf[i].data); 1145 qla4_82xx_wr_32(ha, off, buf[i].data);
1129 1146
1130 /* ISP requires much bigger delay to settle down, 1147 /* ISP requires much bigger delay to settle down,
1131 * else crb_window returns 0xffffffff 1148 * else crb_window returns 0xffffffff
@@ -1142,25 +1159,25 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
1142 kfree(buf); 1159 kfree(buf);
1143 1160
1144 /* Resetting the data and instruction cache */ 1161 /* Resetting the data and instruction cache */
1145 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e); 1162 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e);
1146 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8); 1163 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8);
1147 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8); 1164 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8);
1148 1165
1149 /* Clear all protocol processing engines */ 1166 /* Clear all protocol processing engines */
1150 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0); 1167 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0);
1151 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0); 1168 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0);
1152 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0); 1169 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0);
1153 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0); 1170 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0);
1154 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0); 1171 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0);
1155 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0); 1172 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0);
1156 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0); 1173 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0);
1157 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0); 1174 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0);
1158 1175
1159 return 0; 1176 return 0;
1160} 1177}
1161 1178
1162static int 1179static int
1163qla4_8xxx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start) 1180qla4_82xx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)
1164{ 1181{
1165 int i, rval = 0; 1182 int i, rval = 0;
1166 long size = 0; 1183 long size = 0;
@@ -1175,14 +1192,14 @@ qla4_8xxx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)
1175 ha->host_no, __func__, flashaddr, image_start)); 1192 ha->host_no, __func__, flashaddr, image_start));
1176 1193
1177 for (i = 0; i < size; i++) { 1194 for (i = 0; i < size; i++) {
1178 if ((qla4_8xxx_rom_fast_read(ha, flashaddr, (int *)&low)) || 1195 if ((qla4_82xx_rom_fast_read(ha, flashaddr, (int *)&low)) ||
1179 (qla4_8xxx_rom_fast_read(ha, flashaddr + 4, 1196 (qla4_82xx_rom_fast_read(ha, flashaddr + 4,
1180 (int *)&high))) { 1197 (int *)&high))) {
1181 rval = -1; 1198 rval = -1;
1182 goto exit_load_from_flash; 1199 goto exit_load_from_flash;
1183 } 1200 }
1184 data = ((u64)high << 32) | low ; 1201 data = ((u64)high << 32) | low ;
1185 rval = qla4_8xxx_pci_mem_write_2M(ha, memaddr, &data, 8); 1202 rval = qla4_82xx_pci_mem_write_2M(ha, memaddr, &data, 8);
1186 if (rval) 1203 if (rval)
1187 goto exit_load_from_flash; 1204 goto exit_load_from_flash;
1188 1205
@@ -1197,20 +1214,20 @@ qla4_8xxx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)
1197 udelay(100); 1214 udelay(100);
1198 1215
1199 read_lock(&ha->hw_lock); 1216 read_lock(&ha->hw_lock);
1200 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020); 1217 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
1201 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e); 1218 qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
1202 read_unlock(&ha->hw_lock); 1219 read_unlock(&ha->hw_lock);
1203 1220
1204exit_load_from_flash: 1221exit_load_from_flash:
1205 return rval; 1222 return rval;
1206} 1223}
1207 1224
1208static int qla4_8xxx_load_fw(struct scsi_qla_host *ha, uint32_t image_start) 1225static int qla4_82xx_load_fw(struct scsi_qla_host *ha, uint32_t image_start)
1209{ 1226{
1210 u32 rst; 1227 u32 rst;
1211 1228
1212 qla4_8xxx_wr_32(ha, CRB_CMDPEG_STATE, 0); 1229 qla4_82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
1213 if (qla4_8xxx_pinit_from_rom(ha, 0) != QLA_SUCCESS) { 1230 if (qla4_82xx_pinit_from_rom(ha, 0) != QLA_SUCCESS) {
1214 printk(KERN_WARNING "%s: Error during CRB Initialization\n", 1231 printk(KERN_WARNING "%s: Error during CRB Initialization\n",
1215 __func__); 1232 __func__);
1216 return QLA_ERROR; 1233 return QLA_ERROR;
@@ -1223,12 +1240,12 @@ static int qla4_8xxx_load_fw(struct scsi_qla_host *ha, uint32_t image_start)
1223 * To get around this, QM is brought out of reset. 1240 * To get around this, QM is brought out of reset.
1224 */ 1241 */
1225 1242
1226 rst = qla4_8xxx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET); 1243 rst = qla4_82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET);
1227 /* unreset qm */ 1244 /* unreset qm */
1228 rst &= ~(1 << 28); 1245 rst &= ~(1 << 28);
1229 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst); 1246 qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst);
1230 1247
1231 if (qla4_8xxx_load_from_flash(ha, image_start)) { 1248 if (qla4_82xx_load_from_flash(ha, image_start)) {
1232 printk("%s: Error trying to load fw from flash!\n", __func__); 1249 printk("%s: Error trying to load fw from flash!\n", __func__);
1233 return QLA_ERROR; 1250 return QLA_ERROR;
1234 } 1251 }
@@ -1237,7 +1254,7 @@ static int qla4_8xxx_load_fw(struct scsi_qla_host *ha, uint32_t image_start)
1237} 1254}
1238 1255
1239int 1256int
1240qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *ha, 1257qla4_82xx_pci_mem_read_2M(struct scsi_qla_host *ha,
1241 u64 off, void *data, int size) 1258 u64 off, void *data, int size)
1242{ 1259{
1243 int i, j = 0, k, start, end, loop, sz[2], off0[2]; 1260 int i, j = 0, k, start, end, loop, sz[2], off0[2];
@@ -1249,12 +1266,12 @@ qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *ha,
1249 * If not MN, go check for MS or invalid. 1266 * If not MN, go check for MS or invalid.
1250 */ 1267 */
1251 1268
1252 if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX) 1269 if (off >= QLA8XXX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1253 mem_crb = QLA82XX_CRB_QDR_NET; 1270 mem_crb = QLA82XX_CRB_QDR_NET;
1254 else { 1271 else {
1255 mem_crb = QLA82XX_CRB_DDR_NET; 1272 mem_crb = QLA82XX_CRB_DDR_NET;
1256 if (qla4_8xxx_pci_mem_bound_check(ha, off, size) == 0) 1273 if (qla4_82xx_pci_mem_bound_check(ha, off, size) == 0)
1257 return qla4_8xxx_pci_mem_read_direct(ha, 1274 return qla4_82xx_pci_mem_read_direct(ha,
1258 off, data, size); 1275 off, data, size);
1259 } 1276 }
1260 1277
@@ -1270,16 +1287,16 @@ qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *ha,
1270 1287
1271 for (i = 0; i < loop; i++) { 1288 for (i = 0; i < loop; i++) {
1272 temp = off8 + (i << shift_amount); 1289 temp = off8 + (i << shift_amount);
1273 qla4_8xxx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp); 1290 qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
1274 temp = 0; 1291 temp = 0;
1275 qla4_8xxx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp); 1292 qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
1276 temp = MIU_TA_CTL_ENABLE; 1293 temp = MIU_TA_CTL_ENABLE;
1277 qla4_8xxx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); 1294 qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1278 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE; 1295 temp = MIU_TA_CTL_START_ENABLE;
1279 qla4_8xxx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); 1296 qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1280 1297
1281 for (j = 0; j < MAX_CTL_CHECK; j++) { 1298 for (j = 0; j < MAX_CTL_CHECK; j++) {
1282 temp = qla4_8xxx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL); 1299 temp = qla4_82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1283 if ((temp & MIU_TA_CTL_BUSY) == 0) 1300 if ((temp & MIU_TA_CTL_BUSY) == 0)
1284 break; 1301 break;
1285 } 1302 }
@@ -1294,7 +1311,7 @@ qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *ha,
1294 start = off0[i] >> 2; 1311 start = off0[i] >> 2;
1295 end = (off0[i] + sz[i] - 1) >> 2; 1312 end = (off0[i] + sz[i] - 1) >> 2;
1296 for (k = start; k <= end; k++) { 1313 for (k = start; k <= end; k++) {
1297 temp = qla4_8xxx_rd_32(ha, 1314 temp = qla4_82xx_rd_32(ha,
1298 mem_crb + MIU_TEST_AGT_RDDATA(k)); 1315 mem_crb + MIU_TEST_AGT_RDDATA(k));
1299 word[i] |= ((uint64_t)temp << (32 * (k & 1))); 1316 word[i] |= ((uint64_t)temp << (32 * (k & 1)));
1300 } 1317 }
@@ -1328,7 +1345,7 @@ qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *ha,
1328} 1345}
1329 1346
1330int 1347int
1331qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha, 1348qla4_82xx_pci_mem_write_2M(struct scsi_qla_host *ha,
1332 u64 off, void *data, int size) 1349 u64 off, void *data, int size)
1333{ 1350{
1334 int i, j, ret = 0, loop, sz[2], off0; 1351 int i, j, ret = 0, loop, sz[2], off0;
@@ -1339,12 +1356,12 @@ qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha,
1339 /* 1356 /*
1340 * If not MN, go check for MS or invalid. 1357 * If not MN, go check for MS or invalid.
1341 */ 1358 */
1342 if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX) 1359 if (off >= QLA8XXX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1343 mem_crb = QLA82XX_CRB_QDR_NET; 1360 mem_crb = QLA82XX_CRB_QDR_NET;
1344 else { 1361 else {
1345 mem_crb = QLA82XX_CRB_DDR_NET; 1362 mem_crb = QLA82XX_CRB_DDR_NET;
1346 if (qla4_8xxx_pci_mem_bound_check(ha, off, size) == 0) 1363 if (qla4_82xx_pci_mem_bound_check(ha, off, size) == 0)
1347 return qla4_8xxx_pci_mem_write_direct(ha, 1364 return qla4_82xx_pci_mem_write_direct(ha,
1348 off, data, size); 1365 off, data, size);
1349 } 1366 }
1350 1367
@@ -1359,7 +1376,7 @@ qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha,
1359 startword = (off & 0xf)/8; 1376 startword = (off & 0xf)/8;
1360 1377
1361 for (i = 0; i < loop; i++) { 1378 for (i = 0; i < loop; i++) {
1362 if (qla4_8xxx_pci_mem_read_2M(ha, off8 + 1379 if (qla4_82xx_pci_mem_read_2M(ha, off8 +
1363 (i << shift_amount), &word[i * scale], 8)) 1380 (i << shift_amount), &word[i * scale], 8))
1364 return -1; 1381 return -1;
1365 } 1382 }
@@ -1395,27 +1412,27 @@ qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha,
1395 1412
1396 for (i = 0; i < loop; i++) { 1413 for (i = 0; i < loop; i++) {
1397 temp = off8 + (i << shift_amount); 1414 temp = off8 + (i << shift_amount);
1398 qla4_8xxx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp); 1415 qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
1399 temp = 0; 1416 temp = 0;
1400 qla4_8xxx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp); 1417 qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
1401 temp = word[i * scale] & 0xffffffff; 1418 temp = word[i * scale] & 0xffffffff;
1402 qla4_8xxx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp); 1419 qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
1403 temp = (word[i * scale] >> 32) & 0xffffffff; 1420 temp = (word[i * scale] >> 32) & 0xffffffff;
1404 qla4_8xxx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp); 1421 qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
1405 temp = word[i*scale + 1] & 0xffffffff; 1422 temp = word[i*scale + 1] & 0xffffffff;
1406 qla4_8xxx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_LO, 1423 qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_LO,
1407 temp); 1424 temp);
1408 temp = (word[i*scale + 1] >> 32) & 0xffffffff; 1425 temp = (word[i*scale + 1] >> 32) & 0xffffffff;
1409 qla4_8xxx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_HI, 1426 qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_HI,
1410 temp); 1427 temp);
1411 1428
1412 temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE; 1429 temp = MIU_TA_CTL_WRITE_ENABLE;
1413 qla4_8xxx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp); 1430 qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp);
1414 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE; 1431 temp = MIU_TA_CTL_WRITE_START;
1415 qla4_8xxx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp); 1432 qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp);
1416 1433
1417 for (j = 0; j < MAX_CTL_CHECK; j++) { 1434 for (j = 0; j < MAX_CTL_CHECK; j++) {
1418 temp = qla4_8xxx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL); 1435 temp = qla4_82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1419 if ((temp & MIU_TA_CTL_BUSY) == 0) 1436 if ((temp & MIU_TA_CTL_BUSY) == 0)
1420 break; 1437 break;
1421 } 1438 }
@@ -1433,14 +1450,14 @@ qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha,
1433 return ret; 1450 return ret;
1434} 1451}
1435 1452
1436static int qla4_8xxx_cmdpeg_ready(struct scsi_qla_host *ha, int pegtune_val) 1453static int qla4_82xx_cmdpeg_ready(struct scsi_qla_host *ha, int pegtune_val)
1437{ 1454{
1438 u32 val = 0; 1455 u32 val = 0;
1439 int retries = 60; 1456 int retries = 60;
1440 1457
1441 if (!pegtune_val) { 1458 if (!pegtune_val) {
1442 do { 1459 do {
1443 val = qla4_8xxx_rd_32(ha, CRB_CMDPEG_STATE); 1460 val = qla4_82xx_rd_32(ha, CRB_CMDPEG_STATE);
1444 if ((val == PHAN_INITIALIZE_COMPLETE) || 1461 if ((val == PHAN_INITIALIZE_COMPLETE) ||
1445 (val == PHAN_INITIALIZE_ACK)) 1462 (val == PHAN_INITIALIZE_ACK))
1446 return 0; 1463 return 0;
@@ -1450,7 +1467,7 @@ static int qla4_8xxx_cmdpeg_ready(struct scsi_qla_host *ha, int pegtune_val)
1450 } while (--retries); 1467 } while (--retries);
1451 1468
1452 if (!retries) { 1469 if (!retries) {
1453 pegtune_val = qla4_8xxx_rd_32(ha, 1470 pegtune_val = qla4_82xx_rd_32(ha,
1454 QLA82XX_ROMUSB_GLB_PEGTUNE_DONE); 1471 QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
1455 printk(KERN_WARNING "%s: init failed, " 1472 printk(KERN_WARNING "%s: init failed, "
1456 "pegtune_val = %x\n", __func__, pegtune_val); 1473 "pegtune_val = %x\n", __func__, pegtune_val);
@@ -1460,21 +1477,21 @@ static int qla4_8xxx_cmdpeg_ready(struct scsi_qla_host *ha, int pegtune_val)
1460 return 0; 1477 return 0;
1461} 1478}
1462 1479
1463static int qla4_8xxx_rcvpeg_ready(struct scsi_qla_host *ha) 1480static int qla4_82xx_rcvpeg_ready(struct scsi_qla_host *ha)
1464{ 1481{
1465 uint32_t state = 0; 1482 uint32_t state = 0;
1466 int loops = 0; 1483 int loops = 0;
1467 1484
1468 /* Window 1 call */ 1485 /* Window 1 call */
1469 read_lock(&ha->hw_lock); 1486 read_lock(&ha->hw_lock);
1470 state = qla4_8xxx_rd_32(ha, CRB_RCVPEG_STATE); 1487 state = qla4_82xx_rd_32(ha, CRB_RCVPEG_STATE);
1471 read_unlock(&ha->hw_lock); 1488 read_unlock(&ha->hw_lock);
1472 1489
1473 while ((state != PHAN_PEG_RCV_INITIALIZED) && (loops < 30000)) { 1490 while ((state != PHAN_PEG_RCV_INITIALIZED) && (loops < 30000)) {
1474 udelay(100); 1491 udelay(100);
1475 /* Window 1 call */ 1492 /* Window 1 call */
1476 read_lock(&ha->hw_lock); 1493 read_lock(&ha->hw_lock);
1477 state = qla4_8xxx_rd_32(ha, CRB_RCVPEG_STATE); 1494 state = qla4_82xx_rd_32(ha, CRB_RCVPEG_STATE);
1478 read_unlock(&ha->hw_lock); 1495 read_unlock(&ha->hw_lock);
1479 1496
1480 loops++; 1497 loops++;
@@ -1494,11 +1511,21 @@ qla4_8xxx_set_drv_active(struct scsi_qla_host *ha)
1494{ 1511{
1495 uint32_t drv_active; 1512 uint32_t drv_active;
1496 1513
1497 drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 1514 drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
1498 drv_active |= (1 << (ha->func_num * 4)); 1515
1516 /*
1517 * For ISP8324, drv_active register has 1 bit per function,
1518 * shift 1 by func_num to set a bit for the function.
1519 * For ISP8022, drv_active has 4 bits per function
1520 */
1521 if (is_qla8032(ha))
1522 drv_active |= (1 << ha->func_num);
1523 else
1524 drv_active |= (1 << (ha->func_num * 4));
1525
1499 ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n", 1526 ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
1500 __func__, ha->host_no, drv_active); 1527 __func__, ha->host_no, drv_active);
1501 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); 1528 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE, drv_active);
1502} 1529}
1503 1530
1504void 1531void
@@ -1506,50 +1533,87 @@ qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha)
1506{ 1533{
1507 uint32_t drv_active; 1534 uint32_t drv_active;
1508 1535
1509 drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 1536 drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
1510 drv_active &= ~(1 << (ha->func_num * 4)); 1537
1538 /*
1539 * For ISP8324, drv_active register has 1 bit per function,
1540 * shift 1 by func_num to set a bit for the function.
1541 * For ISP8022, drv_active has 4 bits per function
1542 */
1543 if (is_qla8032(ha))
1544 drv_active &= ~(1 << (ha->func_num));
1545 else
1546 drv_active &= ~(1 << (ha->func_num * 4));
1547
1511 ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n", 1548 ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
1512 __func__, ha->host_no, drv_active); 1549 __func__, ha->host_no, drv_active);
1513 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); 1550 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE, drv_active);
1514} 1551}
1515 1552
1516static inline int 1553inline int qla4_8xxx_need_reset(struct scsi_qla_host *ha)
1517qla4_8xxx_need_reset(struct scsi_qla_host *ha)
1518{ 1554{
1519 uint32_t drv_state, drv_active; 1555 uint32_t drv_state, drv_active;
1520 int rval; 1556 int rval;
1521 1557
1522 drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 1558 drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
1523 drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 1559 drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
1524 rval = drv_state & (1 << (ha->func_num * 4)); 1560
1561 /*
1562 * For ISP8324, drv_active register has 1 bit per function,
1563 * shift 1 by func_num to set a bit for the function.
1564 * For ISP8022, drv_active has 4 bits per function
1565 */
1566 if (is_qla8032(ha))
1567 rval = drv_state & (1 << ha->func_num);
1568 else
1569 rval = drv_state & (1 << (ha->func_num * 4));
1570
1525 if ((test_bit(AF_EEH_BUSY, &ha->flags)) && drv_active) 1571 if ((test_bit(AF_EEH_BUSY, &ha->flags)) && drv_active)
1526 rval = 1; 1572 rval = 1;
1527 1573
1528 return rval; 1574 return rval;
1529} 1575}
1530 1576
1531static inline void 1577void qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha)
1532qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha)
1533{ 1578{
1534 uint32_t drv_state; 1579 uint32_t drv_state;
1535 1580
1536 drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 1581 drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
1537 drv_state |= (1 << (ha->func_num * 4)); 1582
1583 /*
1584 * For ISP8324, drv_active register has 1 bit per function,
1585 * shift 1 by func_num to set a bit for the function.
1586 * For ISP8022, drv_active has 4 bits per function
1587 */
1588 if (is_qla8032(ha))
1589 drv_state |= (1 << ha->func_num);
1590 else
1591 drv_state |= (1 << (ha->func_num * 4));
1592
1538 ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n", 1593 ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
1539 __func__, ha->host_no, drv_state); 1594 __func__, ha->host_no, drv_state);
1540 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); 1595 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, drv_state);
1541} 1596}
1542 1597
1543static inline void 1598void qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha)
1544qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha)
1545{ 1599{
1546 uint32_t drv_state; 1600 uint32_t drv_state;
1547 1601
1548 drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 1602 drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
1549 drv_state &= ~(1 << (ha->func_num * 4)); 1603
1604 /*
1605 * For ISP8324, drv_active register has 1 bit per function,
1606 * shift 1 by func_num to set a bit for the function.
1607 * For ISP8022, drv_active has 4 bits per function
1608 */
1609 if (is_qla8032(ha))
1610 drv_state &= ~(1 << ha->func_num);
1611 else
1612 drv_state &= ~(1 << (ha->func_num * 4));
1613
1550 ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n", 1614 ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
1551 __func__, ha->host_no, drv_state); 1615 __func__, ha->host_no, drv_state);
1552 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); 1616 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, drv_state);
1553} 1617}
1554 1618
1555static inline void 1619static inline void
@@ -1557,33 +1621,43 @@ qla4_8xxx_set_qsnt_ready(struct scsi_qla_host *ha)
1557{ 1621{
1558 uint32_t qsnt_state; 1622 uint32_t qsnt_state;
1559 1623
1560 qsnt_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 1624 qsnt_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
1561 qsnt_state |= (2 << (ha->func_num * 4)); 1625
1562 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state); 1626 /*
1627 * For ISP8324, drv_active register has 1 bit per function,
1628 * shift 1 by func_num to set a bit for the function.
1629 * For ISP8022, drv_active has 4 bits per function.
1630 */
1631 if (is_qla8032(ha))
1632 qsnt_state |= (1 << ha->func_num);
1633 else
1634 qsnt_state |= (2 << (ha->func_num * 4));
1635
1636 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, qsnt_state);
1563} 1637}
1564 1638
1565 1639
1566static int 1640static int
1567qla4_8xxx_start_firmware(struct scsi_qla_host *ha, uint32_t image_start) 1641qla4_82xx_start_firmware(struct scsi_qla_host *ha, uint32_t image_start)
1568{ 1642{
1569 uint16_t lnk; 1643 uint16_t lnk;
1570 1644
1571 /* scrub dma mask expansion register */ 1645 /* scrub dma mask expansion register */
1572 qla4_8xxx_wr_32(ha, CRB_DMA_SHIFT, 0x55555555); 1646 qla4_82xx_wr_32(ha, CRB_DMA_SHIFT, 0x55555555);
1573 1647
1574 /* Overwrite stale initialization register values */ 1648 /* Overwrite stale initialization register values */
1575 qla4_8xxx_wr_32(ha, CRB_CMDPEG_STATE, 0); 1649 qla4_82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
1576 qla4_8xxx_wr_32(ha, CRB_RCVPEG_STATE, 0); 1650 qla4_82xx_wr_32(ha, CRB_RCVPEG_STATE, 0);
1577 qla4_8xxx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0); 1651 qla4_82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0);
1578 qla4_8xxx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0); 1652 qla4_82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
1579 1653
1580 if (qla4_8xxx_load_fw(ha, image_start) != QLA_SUCCESS) { 1654 if (qla4_82xx_load_fw(ha, image_start) != QLA_SUCCESS) {
1581 printk("%s: Error trying to start fw!\n", __func__); 1655 printk("%s: Error trying to start fw!\n", __func__);
1582 return QLA_ERROR; 1656 return QLA_ERROR;
1583 } 1657 }
1584 1658
1585 /* Handshake with the card before we register the devices. */ 1659 /* Handshake with the card before we register the devices. */
1586 if (qla4_8xxx_cmdpeg_ready(ha, 0) != QLA_SUCCESS) { 1660 if (qla4_82xx_cmdpeg_ready(ha, 0) != QLA_SUCCESS) {
1587 printk("%s: Error during card handshake!\n", __func__); 1661 printk("%s: Error during card handshake!\n", __func__);
1588 return QLA_ERROR; 1662 return QLA_ERROR;
1589 } 1663 }
@@ -1593,11 +1667,10 @@ qla4_8xxx_start_firmware(struct scsi_qla_host *ha, uint32_t image_start)
1593 ha->link_width = (lnk >> 4) & 0x3f; 1667 ha->link_width = (lnk >> 4) & 0x3f;
1594 1668
1595 /* Synchronize with Receive peg */ 1669 /* Synchronize with Receive peg */
1596 return qla4_8xxx_rcvpeg_ready(ha); 1670 return qla4_82xx_rcvpeg_ready(ha);
1597} 1671}
1598 1672
1599static int 1673int qla4_82xx_try_start_fw(struct scsi_qla_host *ha)
1600qla4_8xxx_try_start_fw(struct scsi_qla_host *ha)
1601{ 1674{
1602 int rval = QLA_ERROR; 1675 int rval = QLA_ERROR;
1603 1676
@@ -1615,7 +1688,7 @@ qla4_8xxx_try_start_fw(struct scsi_qla_host *ha)
1615 1688
1616 ql4_printk(KERN_INFO, ha, 1689 ql4_printk(KERN_INFO, ha,
1617 "FW: Attempting to load firmware from flash...\n"); 1690 "FW: Attempting to load firmware from flash...\n");
1618 rval = qla4_8xxx_start_firmware(ha, ha->hw.flt_region_fw); 1691 rval = qla4_82xx_start_firmware(ha, ha->hw.flt_region_fw);
1619 1692
1620 if (rval != QLA_SUCCESS) { 1693 if (rval != QLA_SUCCESS) {
1621 ql4_printk(KERN_ERR, ha, "FW: Load firmware from flash" 1694 ql4_printk(KERN_ERR, ha, "FW: Load firmware from flash"
@@ -1626,9 +1699,9 @@ qla4_8xxx_try_start_fw(struct scsi_qla_host *ha)
1626 return rval; 1699 return rval;
1627} 1700}
1628 1701
1629static void qla4_8xxx_rom_lock_recovery(struct scsi_qla_host *ha) 1702void qla4_82xx_rom_lock_recovery(struct scsi_qla_host *ha)
1630{ 1703{
1631 if (qla4_8xxx_rom_lock(ha)) { 1704 if (qla4_82xx_rom_lock(ha)) {
1632 /* Someone else is holding the lock. */ 1705 /* Someone else is holding the lock. */
1633 dev_info(&ha->pdev->dev, "Resetting rom_lock\n"); 1706 dev_info(&ha->pdev->dev, "Resetting rom_lock\n");
1634 } 1707 }
@@ -1638,25 +1711,25 @@ static void qla4_8xxx_rom_lock_recovery(struct scsi_qla_host *ha)
1638 * else died while holding it. 1711 * else died while holding it.
1639 * In either case, unlock. 1712 * In either case, unlock.
1640 */ 1713 */
1641 qla4_8xxx_rom_unlock(ha); 1714 qla4_82xx_rom_unlock(ha);
1642} 1715}
1643 1716
1644static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha, 1717static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha,
1645 struct qla82xx_minidump_entry_hdr *entry_hdr, 1718 struct qla8xxx_minidump_entry_hdr *entry_hdr,
1646 uint32_t **d_ptr) 1719 uint32_t **d_ptr)
1647{ 1720{
1648 uint32_t r_addr, r_stride, loop_cnt, i, r_value; 1721 uint32_t r_addr, r_stride, loop_cnt, i, r_value;
1649 struct qla82xx_minidump_entry_crb *crb_hdr; 1722 struct qla8xxx_minidump_entry_crb *crb_hdr;
1650 uint32_t *data_ptr = *d_ptr; 1723 uint32_t *data_ptr = *d_ptr;
1651 1724
1652 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); 1725 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1653 crb_hdr = (struct qla82xx_minidump_entry_crb *)entry_hdr; 1726 crb_hdr = (struct qla8xxx_minidump_entry_crb *)entry_hdr;
1654 r_addr = crb_hdr->addr; 1727 r_addr = crb_hdr->addr;
1655 r_stride = crb_hdr->crb_strd.addr_stride; 1728 r_stride = crb_hdr->crb_strd.addr_stride;
1656 loop_cnt = crb_hdr->op_count; 1729 loop_cnt = crb_hdr->op_count;
1657 1730
1658 for (i = 0; i < loop_cnt; i++) { 1731 for (i = 0; i < loop_cnt; i++) {
1659 r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0); 1732 ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value);
1660 *data_ptr++ = cpu_to_le32(r_addr); 1733 *data_ptr++ = cpu_to_le32(r_addr);
1661 *data_ptr++ = cpu_to_le32(r_value); 1734 *data_ptr++ = cpu_to_le32(r_value);
1662 r_addr += r_stride; 1735 r_addr += r_stride;
@@ -1665,19 +1738,19 @@ static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha,
1665} 1738}
1666 1739
1667static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha, 1740static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
1668 struct qla82xx_minidump_entry_hdr *entry_hdr, 1741 struct qla8xxx_minidump_entry_hdr *entry_hdr,
1669 uint32_t **d_ptr) 1742 uint32_t **d_ptr)
1670{ 1743{
1671 uint32_t addr, r_addr, c_addr, t_r_addr; 1744 uint32_t addr, r_addr, c_addr, t_r_addr;
1672 uint32_t i, k, loop_count, t_value, r_cnt, r_value; 1745 uint32_t i, k, loop_count, t_value, r_cnt, r_value;
1673 unsigned long p_wait, w_time, p_mask; 1746 unsigned long p_wait, w_time, p_mask;
1674 uint32_t c_value_w, c_value_r; 1747 uint32_t c_value_w, c_value_r;
1675 struct qla82xx_minidump_entry_cache *cache_hdr; 1748 struct qla8xxx_minidump_entry_cache *cache_hdr;
1676 int rval = QLA_ERROR; 1749 int rval = QLA_ERROR;
1677 uint32_t *data_ptr = *d_ptr; 1750 uint32_t *data_ptr = *d_ptr;
1678 1751
1679 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); 1752 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1680 cache_hdr = (struct qla82xx_minidump_entry_cache *)entry_hdr; 1753 cache_hdr = (struct qla8xxx_minidump_entry_cache *)entry_hdr;
1681 1754
1682 loop_count = cache_hdr->op_count; 1755 loop_count = cache_hdr->op_count;
1683 r_addr = cache_hdr->read_addr; 1756 r_addr = cache_hdr->read_addr;
@@ -1691,16 +1764,16 @@ static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
1691 p_mask = cache_hdr->cache_ctrl.poll_mask; 1764 p_mask = cache_hdr->cache_ctrl.poll_mask;
1692 1765
1693 for (i = 0; i < loop_count; i++) { 1766 for (i = 0; i < loop_count; i++) {
1694 qla4_8xxx_md_rw_32(ha, t_r_addr, t_value, 1); 1767 ha->isp_ops->wr_reg_indirect(ha, t_r_addr, t_value);
1695 1768
1696 if (c_value_w) 1769 if (c_value_w)
1697 qla4_8xxx_md_rw_32(ha, c_addr, c_value_w, 1); 1770 ha->isp_ops->wr_reg_indirect(ha, c_addr, c_value_w);
1698 1771
1699 if (p_mask) { 1772 if (p_mask) {
1700 w_time = jiffies + p_wait; 1773 w_time = jiffies + p_wait;
1701 do { 1774 do {
1702 c_value_r = qla4_8xxx_md_rw_32(ha, c_addr, 1775 ha->isp_ops->rd_reg_indirect(ha, c_addr,
1703 0, 0); 1776 &c_value_r);
1704 if ((c_value_r & p_mask) == 0) { 1777 if ((c_value_r & p_mask) == 0) {
1705 break; 1778 break;
1706 } else if (time_after_eq(jiffies, w_time)) { 1779 } else if (time_after_eq(jiffies, w_time)) {
@@ -1712,7 +1785,7 @@ static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
1712 1785
1713 addr = r_addr; 1786 addr = r_addr;
1714 for (k = 0; k < r_cnt; k++) { 1787 for (k = 0; k < r_cnt; k++) {
1715 r_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0); 1788 ha->isp_ops->rd_reg_indirect(ha, addr, &r_value);
1716 *data_ptr++ = cpu_to_le32(r_value); 1789 *data_ptr++ = cpu_to_le32(r_value);
1717 addr += cache_hdr->read_ctrl.read_addr_stride; 1790 addr += cache_hdr->read_ctrl.read_addr_stride;
1718 } 1791 }
@@ -1724,9 +1797,9 @@ static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
1724} 1797}
1725 1798
1726static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha, 1799static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
1727 struct qla82xx_minidump_entry_hdr *entry_hdr) 1800 struct qla8xxx_minidump_entry_hdr *entry_hdr)
1728{ 1801{
1729 struct qla82xx_minidump_entry_crb *crb_entry; 1802 struct qla8xxx_minidump_entry_crb *crb_entry;
1730 uint32_t read_value, opcode, poll_time, addr, index, rval = QLA_SUCCESS; 1803 uint32_t read_value, opcode, poll_time, addr, index, rval = QLA_SUCCESS;
1731 uint32_t crb_addr; 1804 uint32_t crb_addr;
1732 unsigned long wtime; 1805 unsigned long wtime;
@@ -1736,58 +1809,59 @@ static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
1736 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); 1809 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1737 tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *) 1810 tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
1738 ha->fw_dump_tmplt_hdr; 1811 ha->fw_dump_tmplt_hdr;
1739 crb_entry = (struct qla82xx_minidump_entry_crb *)entry_hdr; 1812 crb_entry = (struct qla8xxx_minidump_entry_crb *)entry_hdr;
1740 1813
1741 crb_addr = crb_entry->addr; 1814 crb_addr = crb_entry->addr;
1742 for (i = 0; i < crb_entry->op_count; i++) { 1815 for (i = 0; i < crb_entry->op_count; i++) {
1743 opcode = crb_entry->crb_ctrl.opcode; 1816 opcode = crb_entry->crb_ctrl.opcode;
1744 if (opcode & QLA82XX_DBG_OPCODE_WR) { 1817 if (opcode & QLA8XXX_DBG_OPCODE_WR) {
1745 qla4_8xxx_md_rw_32(ha, crb_addr, 1818 ha->isp_ops->wr_reg_indirect(ha, crb_addr,
1746 crb_entry->value_1, 1); 1819 crb_entry->value_1);
1747 opcode &= ~QLA82XX_DBG_OPCODE_WR; 1820 opcode &= ~QLA8XXX_DBG_OPCODE_WR;
1748 } 1821 }
1749 if (opcode & QLA82XX_DBG_OPCODE_RW) { 1822 if (opcode & QLA8XXX_DBG_OPCODE_RW) {
1750 read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0); 1823 ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value);
1751 qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1); 1824 ha->isp_ops->wr_reg_indirect(ha, crb_addr, read_value);
1752 opcode &= ~QLA82XX_DBG_OPCODE_RW; 1825 opcode &= ~QLA8XXX_DBG_OPCODE_RW;
1753 } 1826 }
1754 if (opcode & QLA82XX_DBG_OPCODE_AND) { 1827 if (opcode & QLA8XXX_DBG_OPCODE_AND) {
1755 read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0); 1828 ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value);
1756 read_value &= crb_entry->value_2; 1829 read_value &= crb_entry->value_2;
1757 opcode &= ~QLA82XX_DBG_OPCODE_AND; 1830 opcode &= ~QLA8XXX_DBG_OPCODE_AND;
1758 if (opcode & QLA82XX_DBG_OPCODE_OR) { 1831 if (opcode & QLA8XXX_DBG_OPCODE_OR) {
1759 read_value |= crb_entry->value_3; 1832 read_value |= crb_entry->value_3;
1760 opcode &= ~QLA82XX_DBG_OPCODE_OR; 1833 opcode &= ~QLA8XXX_DBG_OPCODE_OR;
1761 } 1834 }
1762 qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1); 1835 ha->isp_ops->wr_reg_indirect(ha, crb_addr, read_value);
1763 } 1836 }
1764 if (opcode & QLA82XX_DBG_OPCODE_OR) { 1837 if (opcode & QLA8XXX_DBG_OPCODE_OR) {
1765 read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0); 1838 ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value);
1766 read_value |= crb_entry->value_3; 1839 read_value |= crb_entry->value_3;
1767 qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1); 1840 ha->isp_ops->wr_reg_indirect(ha, crb_addr, read_value);
1768 opcode &= ~QLA82XX_DBG_OPCODE_OR; 1841 opcode &= ~QLA8XXX_DBG_OPCODE_OR;
1769 } 1842 }
1770 if (opcode & QLA82XX_DBG_OPCODE_POLL) { 1843 if (opcode & QLA8XXX_DBG_OPCODE_POLL) {
1771 poll_time = crb_entry->crb_strd.poll_timeout; 1844 poll_time = crb_entry->crb_strd.poll_timeout;
1772 wtime = jiffies + poll_time; 1845 wtime = jiffies + poll_time;
1773 read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0); 1846 ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value);
1774 1847
1775 do { 1848 do {
1776 if ((read_value & crb_entry->value_2) == 1849 if ((read_value & crb_entry->value_2) ==
1777 crb_entry->value_1) 1850 crb_entry->value_1) {
1778 break; 1851 break;
1779 else if (time_after_eq(jiffies, wtime)) { 1852 } else if (time_after_eq(jiffies, wtime)) {
1780 /* capturing dump failed */ 1853 /* capturing dump failed */
1781 rval = QLA_ERROR; 1854 rval = QLA_ERROR;
1782 break; 1855 break;
1783 } else 1856 } else {
1784 read_value = qla4_8xxx_md_rw_32(ha, 1857 ha->isp_ops->rd_reg_indirect(ha,
1785 crb_addr, 0, 0); 1858 crb_addr, &read_value);
1859 }
1786 } while (1); 1860 } while (1);
1787 opcode &= ~QLA82XX_DBG_OPCODE_POLL; 1861 opcode &= ~QLA8XXX_DBG_OPCODE_POLL;
1788 } 1862 }
1789 1863
1790 if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) { 1864 if (opcode & QLA8XXX_DBG_OPCODE_RDSTATE) {
1791 if (crb_entry->crb_strd.state_index_a) { 1865 if (crb_entry->crb_strd.state_index_a) {
1792 index = crb_entry->crb_strd.state_index_a; 1866 index = crb_entry->crb_strd.state_index_a;
1793 addr = tmplt_hdr->saved_state_array[index]; 1867 addr = tmplt_hdr->saved_state_array[index];
@@ -1795,13 +1869,13 @@ static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
1795 addr = crb_addr; 1869 addr = crb_addr;
1796 } 1870 }
1797 1871
1798 read_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0); 1872 ha->isp_ops->rd_reg_indirect(ha, addr, &read_value);
1799 index = crb_entry->crb_ctrl.state_index_v; 1873 index = crb_entry->crb_ctrl.state_index_v;
1800 tmplt_hdr->saved_state_array[index] = read_value; 1874 tmplt_hdr->saved_state_array[index] = read_value;
1801 opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE; 1875 opcode &= ~QLA8XXX_DBG_OPCODE_RDSTATE;
1802 } 1876 }
1803 1877
1804 if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) { 1878 if (opcode & QLA8XXX_DBG_OPCODE_WRSTATE) {
1805 if (crb_entry->crb_strd.state_index_a) { 1879 if (crb_entry->crb_strd.state_index_a) {
1806 index = crb_entry->crb_strd.state_index_a; 1880 index = crb_entry->crb_strd.state_index_a;
1807 addr = tmplt_hdr->saved_state_array[index]; 1881 addr = tmplt_hdr->saved_state_array[index];
@@ -1817,11 +1891,11 @@ static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
1817 read_value = crb_entry->value_1; 1891 read_value = crb_entry->value_1;
1818 } 1892 }
1819 1893
1820 qla4_8xxx_md_rw_32(ha, addr, read_value, 1); 1894 ha->isp_ops->wr_reg_indirect(ha, addr, read_value);
1821 opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE; 1895 opcode &= ~QLA8XXX_DBG_OPCODE_WRSTATE;
1822 } 1896 }
1823 1897
1824 if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) { 1898 if (opcode & QLA8XXX_DBG_OPCODE_MDSTATE) {
1825 index = crb_entry->crb_ctrl.state_index_v; 1899 index = crb_entry->crb_ctrl.state_index_v;
1826 read_value = tmplt_hdr->saved_state_array[index]; 1900 read_value = tmplt_hdr->saved_state_array[index];
1827 read_value <<= crb_entry->crb_ctrl.shl; 1901 read_value <<= crb_entry->crb_ctrl.shl;
@@ -1831,7 +1905,7 @@ static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
1831 read_value |= crb_entry->value_3; 1905 read_value |= crb_entry->value_3;
1832 read_value += crb_entry->value_1; 1906 read_value += crb_entry->value_1;
1833 tmplt_hdr->saved_state_array[index] = read_value; 1907 tmplt_hdr->saved_state_array[index] = read_value;
1834 opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE; 1908 opcode &= ~QLA8XXX_DBG_OPCODE_MDSTATE;
1835 } 1909 }
1836 crb_addr += crb_entry->crb_strd.addr_stride; 1910 crb_addr += crb_entry->crb_strd.addr_stride;
1837 } 1911 }
@@ -1840,15 +1914,15 @@ static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
1840} 1914}
1841 1915
1842static void qla4_8xxx_minidump_process_rdocm(struct scsi_qla_host *ha, 1916static void qla4_8xxx_minidump_process_rdocm(struct scsi_qla_host *ha,
1843 struct qla82xx_minidump_entry_hdr *entry_hdr, 1917 struct qla8xxx_minidump_entry_hdr *entry_hdr,
1844 uint32_t **d_ptr) 1918 uint32_t **d_ptr)
1845{ 1919{
1846 uint32_t r_addr, r_stride, loop_cnt, i, r_value; 1920 uint32_t r_addr, r_stride, loop_cnt, i, r_value;
1847 struct qla82xx_minidump_entry_rdocm *ocm_hdr; 1921 struct qla8xxx_minidump_entry_rdocm *ocm_hdr;
1848 uint32_t *data_ptr = *d_ptr; 1922 uint32_t *data_ptr = *d_ptr;
1849 1923
1850 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); 1924 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1851 ocm_hdr = (struct qla82xx_minidump_entry_rdocm *)entry_hdr; 1925 ocm_hdr = (struct qla8xxx_minidump_entry_rdocm *)entry_hdr;
1852 r_addr = ocm_hdr->read_addr; 1926 r_addr = ocm_hdr->read_addr;
1853 r_stride = ocm_hdr->read_addr_stride; 1927 r_stride = ocm_hdr->read_addr_stride;
1854 loop_cnt = ocm_hdr->op_count; 1928 loop_cnt = ocm_hdr->op_count;
@@ -1863,20 +1937,20 @@ static void qla4_8xxx_minidump_process_rdocm(struct scsi_qla_host *ha,
1863 r_addr += r_stride; 1937 r_addr += r_stride;
1864 } 1938 }
1865 DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%lx\n", 1939 DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%lx\n",
1866 __func__, (loop_cnt * sizeof(uint32_t)))); 1940 __func__, (long unsigned int) (loop_cnt * sizeof(uint32_t))));
1867 *d_ptr = data_ptr; 1941 *d_ptr = data_ptr;
1868} 1942}
1869 1943
1870static void qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host *ha, 1944static void qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host *ha,
1871 struct qla82xx_minidump_entry_hdr *entry_hdr, 1945 struct qla8xxx_minidump_entry_hdr *entry_hdr,
1872 uint32_t **d_ptr) 1946 uint32_t **d_ptr)
1873{ 1947{
1874 uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value; 1948 uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
1875 struct qla82xx_minidump_entry_mux *mux_hdr; 1949 struct qla8xxx_minidump_entry_mux *mux_hdr;
1876 uint32_t *data_ptr = *d_ptr; 1950 uint32_t *data_ptr = *d_ptr;
1877 1951
1878 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); 1952 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1879 mux_hdr = (struct qla82xx_minidump_entry_mux *)entry_hdr; 1953 mux_hdr = (struct qla8xxx_minidump_entry_mux *)entry_hdr;
1880 r_addr = mux_hdr->read_addr; 1954 r_addr = mux_hdr->read_addr;
1881 s_addr = mux_hdr->select_addr; 1955 s_addr = mux_hdr->select_addr;
1882 s_stride = mux_hdr->select_value_stride; 1956 s_stride = mux_hdr->select_value_stride;
@@ -1884,8 +1958,8 @@ static void qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host *ha,
1884 loop_cnt = mux_hdr->op_count; 1958 loop_cnt = mux_hdr->op_count;
1885 1959
1886 for (i = 0; i < loop_cnt; i++) { 1960 for (i = 0; i < loop_cnt; i++) {
1887 qla4_8xxx_md_rw_32(ha, s_addr, s_value, 1); 1961 ha->isp_ops->wr_reg_indirect(ha, s_addr, s_value);
1888 r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0); 1962 ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value);
1889 *data_ptr++ = cpu_to_le32(s_value); 1963 *data_ptr++ = cpu_to_le32(s_value);
1890 *data_ptr++ = cpu_to_le32(r_value); 1964 *data_ptr++ = cpu_to_le32(r_value);
1891 s_value += s_stride; 1965 s_value += s_stride;
@@ -1894,16 +1968,16 @@ static void qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host *ha,
1894} 1968}
1895 1969
1896static void qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host *ha, 1970static void qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host *ha,
1897 struct qla82xx_minidump_entry_hdr *entry_hdr, 1971 struct qla8xxx_minidump_entry_hdr *entry_hdr,
1898 uint32_t **d_ptr) 1972 uint32_t **d_ptr)
1899{ 1973{
1900 uint32_t addr, r_addr, c_addr, t_r_addr; 1974 uint32_t addr, r_addr, c_addr, t_r_addr;
1901 uint32_t i, k, loop_count, t_value, r_cnt, r_value; 1975 uint32_t i, k, loop_count, t_value, r_cnt, r_value;
1902 uint32_t c_value_w; 1976 uint32_t c_value_w;
1903 struct qla82xx_minidump_entry_cache *cache_hdr; 1977 struct qla8xxx_minidump_entry_cache *cache_hdr;
1904 uint32_t *data_ptr = *d_ptr; 1978 uint32_t *data_ptr = *d_ptr;
1905 1979
1906 cache_hdr = (struct qla82xx_minidump_entry_cache *)entry_hdr; 1980 cache_hdr = (struct qla8xxx_minidump_entry_cache *)entry_hdr;
1907 loop_count = cache_hdr->op_count; 1981 loop_count = cache_hdr->op_count;
1908 r_addr = cache_hdr->read_addr; 1982 r_addr = cache_hdr->read_addr;
1909 c_addr = cache_hdr->control_addr; 1983 c_addr = cache_hdr->control_addr;
@@ -1914,11 +1988,11 @@ static void qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host *ha,
1914 r_cnt = cache_hdr->read_ctrl.read_addr_cnt; 1988 r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
1915 1989
1916 for (i = 0; i < loop_count; i++) { 1990 for (i = 0; i < loop_count; i++) {
1917 qla4_8xxx_md_rw_32(ha, t_r_addr, t_value, 1); 1991 ha->isp_ops->wr_reg_indirect(ha, t_r_addr, t_value);
1918 qla4_8xxx_md_rw_32(ha, c_addr, c_value_w, 1); 1992 ha->isp_ops->wr_reg_indirect(ha, c_addr, c_value_w);
1919 addr = r_addr; 1993 addr = r_addr;
1920 for (k = 0; k < r_cnt; k++) { 1994 for (k = 0; k < r_cnt; k++) {
1921 r_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0); 1995 ha->isp_ops->rd_reg_indirect(ha, addr, &r_value);
1922 *data_ptr++ = cpu_to_le32(r_value); 1996 *data_ptr++ = cpu_to_le32(r_value);
1923 addr += cache_hdr->read_ctrl.read_addr_stride; 1997 addr += cache_hdr->read_ctrl.read_addr_stride;
1924 } 1998 }
@@ -1928,27 +2002,27 @@ static void qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host *ha,
1928} 2002}
1929 2003
1930static void qla4_8xxx_minidump_process_queue(struct scsi_qla_host *ha, 2004static void qla4_8xxx_minidump_process_queue(struct scsi_qla_host *ha,
1931 struct qla82xx_minidump_entry_hdr *entry_hdr, 2005 struct qla8xxx_minidump_entry_hdr *entry_hdr,
1932 uint32_t **d_ptr) 2006 uint32_t **d_ptr)
1933{ 2007{
1934 uint32_t s_addr, r_addr; 2008 uint32_t s_addr, r_addr;
1935 uint32_t r_stride, r_value, r_cnt, qid = 0; 2009 uint32_t r_stride, r_value, r_cnt, qid = 0;
1936 uint32_t i, k, loop_cnt; 2010 uint32_t i, k, loop_cnt;
1937 struct qla82xx_minidump_entry_queue *q_hdr; 2011 struct qla8xxx_minidump_entry_queue *q_hdr;
1938 uint32_t *data_ptr = *d_ptr; 2012 uint32_t *data_ptr = *d_ptr;
1939 2013
1940 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); 2014 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1941 q_hdr = (struct qla82xx_minidump_entry_queue *)entry_hdr; 2015 q_hdr = (struct qla8xxx_minidump_entry_queue *)entry_hdr;
1942 s_addr = q_hdr->select_addr; 2016 s_addr = q_hdr->select_addr;
1943 r_cnt = q_hdr->rd_strd.read_addr_cnt; 2017 r_cnt = q_hdr->rd_strd.read_addr_cnt;
1944 r_stride = q_hdr->rd_strd.read_addr_stride; 2018 r_stride = q_hdr->rd_strd.read_addr_stride;
1945 loop_cnt = q_hdr->op_count; 2019 loop_cnt = q_hdr->op_count;
1946 2020
1947 for (i = 0; i < loop_cnt; i++) { 2021 for (i = 0; i < loop_cnt; i++) {
1948 qla4_8xxx_md_rw_32(ha, s_addr, qid, 1); 2022 ha->isp_ops->wr_reg_indirect(ha, s_addr, qid);
1949 r_addr = q_hdr->read_addr; 2023 r_addr = q_hdr->read_addr;
1950 for (k = 0; k < r_cnt; k++) { 2024 for (k = 0; k < r_cnt; k++) {
1951 r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0); 2025 ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value);
1952 *data_ptr++ = cpu_to_le32(r_value); 2026 *data_ptr++ = cpu_to_le32(r_value);
1953 r_addr += r_stride; 2027 r_addr += r_stride;
1954 } 2028 }
@@ -1960,17 +2034,17 @@ static void qla4_8xxx_minidump_process_queue(struct scsi_qla_host *ha,
1960#define MD_DIRECT_ROM_WINDOW 0x42110030 2034#define MD_DIRECT_ROM_WINDOW 0x42110030
1961#define MD_DIRECT_ROM_READ_BASE 0x42150000 2035#define MD_DIRECT_ROM_READ_BASE 0x42150000
1962 2036
1963static void qla4_8xxx_minidump_process_rdrom(struct scsi_qla_host *ha, 2037static void qla4_82xx_minidump_process_rdrom(struct scsi_qla_host *ha,
1964 struct qla82xx_minidump_entry_hdr *entry_hdr, 2038 struct qla8xxx_minidump_entry_hdr *entry_hdr,
1965 uint32_t **d_ptr) 2039 uint32_t **d_ptr)
1966{ 2040{
1967 uint32_t r_addr, r_value; 2041 uint32_t r_addr, r_value;
1968 uint32_t i, loop_cnt; 2042 uint32_t i, loop_cnt;
1969 struct qla82xx_minidump_entry_rdrom *rom_hdr; 2043 struct qla8xxx_minidump_entry_rdrom *rom_hdr;
1970 uint32_t *data_ptr = *d_ptr; 2044 uint32_t *data_ptr = *d_ptr;
1971 2045
1972 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); 2046 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1973 rom_hdr = (struct qla82xx_minidump_entry_rdrom *)entry_hdr; 2047 rom_hdr = (struct qla8xxx_minidump_entry_rdrom *)entry_hdr;
1974 r_addr = rom_hdr->read_addr; 2048 r_addr = rom_hdr->read_addr;
1975 loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t); 2049 loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t);
1976 2050
@@ -1979,11 +2053,11 @@ static void qla4_8xxx_minidump_process_rdrom(struct scsi_qla_host *ha,
1979 __func__, r_addr, loop_cnt)); 2053 __func__, r_addr, loop_cnt));
1980 2054
1981 for (i = 0; i < loop_cnt; i++) { 2055 for (i = 0; i < loop_cnt; i++) {
1982 qla4_8xxx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW, 2056 ha->isp_ops->wr_reg_indirect(ha, MD_DIRECT_ROM_WINDOW,
1983 (r_addr & 0xFFFF0000), 1); 2057 (r_addr & 0xFFFF0000));
1984 r_value = qla4_8xxx_md_rw_32(ha, 2058 ha->isp_ops->rd_reg_indirect(ha,
1985 MD_DIRECT_ROM_READ_BASE + 2059 MD_DIRECT_ROM_READ_BASE + (r_addr & 0x0000FFFF),
1986 (r_addr & 0x0000FFFF), 0, 0); 2060 &r_value);
1987 *data_ptr++ = cpu_to_le32(r_value); 2061 *data_ptr++ = cpu_to_le32(r_value);
1988 r_addr += sizeof(uint32_t); 2062 r_addr += sizeof(uint32_t);
1989 } 2063 }
@@ -1995,17 +2069,17 @@ static void qla4_8xxx_minidump_process_rdrom(struct scsi_qla_host *ha,
1995#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098 2069#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
1996 2070
1997static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha, 2071static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
1998 struct qla82xx_minidump_entry_hdr *entry_hdr, 2072 struct qla8xxx_minidump_entry_hdr *entry_hdr,
1999 uint32_t **d_ptr) 2073 uint32_t **d_ptr)
2000{ 2074{
2001 uint32_t r_addr, r_value, r_data; 2075 uint32_t r_addr, r_value, r_data;
2002 uint32_t i, j, loop_cnt; 2076 uint32_t i, j, loop_cnt;
2003 struct qla82xx_minidump_entry_rdmem *m_hdr; 2077 struct qla8xxx_minidump_entry_rdmem *m_hdr;
2004 unsigned long flags; 2078 unsigned long flags;
2005 uint32_t *data_ptr = *d_ptr; 2079 uint32_t *data_ptr = *d_ptr;
2006 2080
2007 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); 2081 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
2008 m_hdr = (struct qla82xx_minidump_entry_rdmem *)entry_hdr; 2082 m_hdr = (struct qla8xxx_minidump_entry_rdmem *)entry_hdr;
2009 r_addr = m_hdr->read_addr; 2083 r_addr = m_hdr->read_addr;
2010 loop_cnt = m_hdr->read_data_size/16; 2084 loop_cnt = m_hdr->read_data_size/16;
2011 2085
@@ -2033,17 +2107,19 @@ static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
2033 2107
2034 write_lock_irqsave(&ha->hw_lock, flags); 2108 write_lock_irqsave(&ha->hw_lock, flags);
2035 for (i = 0; i < loop_cnt; i++) { 2109 for (i = 0; i < loop_cnt; i++) {
2036 qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr, 1); 2110 ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_LO,
2111 r_addr);
2037 r_value = 0; 2112 r_value = 0;
2038 qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value, 1); 2113 ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI,
2114 r_value);
2039 r_value = MIU_TA_CTL_ENABLE; 2115 r_value = MIU_TA_CTL_ENABLE;
2040 qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1); 2116 ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, r_value);
2041 r_value = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE; 2117 r_value = MIU_TA_CTL_START_ENABLE;
2042 qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1); 2118 ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, r_value);
2043 2119
2044 for (j = 0; j < MAX_CTL_CHECK; j++) { 2120 for (j = 0; j < MAX_CTL_CHECK; j++) {
2045 r_value = qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, 2121 ha->isp_ops->rd_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL,
2046 0, 0); 2122 &r_value);
2047 if ((r_value & MIU_TA_CTL_BUSY) == 0) 2123 if ((r_value & MIU_TA_CTL_BUSY) == 0)
2048 break; 2124 break;
2049 } 2125 }
@@ -2057,9 +2133,9 @@ static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
2057 } 2133 }
2058 2134
2059 for (j = 0; j < 4; j++) { 2135 for (j = 0; j < 4; j++) {
2060 r_data = qla4_8xxx_md_rw_32(ha, 2136 ha->isp_ops->rd_reg_indirect(ha,
2061 MD_MIU_TEST_AGT_RDDATA[j], 2137 MD_MIU_TEST_AGT_RDDATA[j],
2062 0, 0); 2138 &r_data);
2063 *data_ptr++ = cpu_to_le32(r_data); 2139 *data_ptr++ = cpu_to_le32(r_data);
2064 } 2140 }
2065 2141
@@ -2074,25 +2150,215 @@ static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
2074 return QLA_SUCCESS; 2150 return QLA_SUCCESS;
2075} 2151}
2076 2152
2077static void ql4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha, 2153static void qla4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha,
2078 struct qla82xx_minidump_entry_hdr *entry_hdr, 2154 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2079 int index) 2155 int index)
2080{ 2156{
2081 entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG; 2157 entry_hdr->d_ctrl.driver_flags |= QLA8XXX_DBG_SKIPPED_FLAG;
2082 DEBUG2(ql4_printk(KERN_INFO, ha, 2158 DEBUG2(ql4_printk(KERN_INFO, ha,
2083 "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n", 2159 "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",
2084 ha->host_no, index, entry_hdr->entry_type, 2160 ha->host_no, index, entry_hdr->entry_type,
2085 entry_hdr->d_ctrl.entry_capture_mask)); 2161 entry_hdr->d_ctrl.entry_capture_mask));
2086} 2162}
2087 2163
2164/* ISP83xx functions to process new minidump entries... */
2165static uint32_t qla83xx_minidump_process_pollrd(struct scsi_qla_host *ha,
2166 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2167 uint32_t **d_ptr)
2168{
2169 uint32_t r_addr, s_addr, s_value, r_value, poll_wait, poll_mask;
2170 uint16_t s_stride, i;
2171 uint32_t *data_ptr = *d_ptr;
2172 uint32_t rval = QLA_SUCCESS;
2173 struct qla83xx_minidump_entry_pollrd *pollrd_hdr;
2174
2175 pollrd_hdr = (struct qla83xx_minidump_entry_pollrd *)entry_hdr;
2176 s_addr = le32_to_cpu(pollrd_hdr->select_addr);
2177 r_addr = le32_to_cpu(pollrd_hdr->read_addr);
2178 s_value = le32_to_cpu(pollrd_hdr->select_value);
2179 s_stride = le32_to_cpu(pollrd_hdr->select_value_stride);
2180
2181 poll_wait = le32_to_cpu(pollrd_hdr->poll_wait);
2182 poll_mask = le32_to_cpu(pollrd_hdr->poll_mask);
2183
2184 for (i = 0; i < le32_to_cpu(pollrd_hdr->op_count); i++) {
2185 ha->isp_ops->wr_reg_indirect(ha, s_addr, s_value);
2186 poll_wait = le32_to_cpu(pollrd_hdr->poll_wait);
2187 while (1) {
2188 ha->isp_ops->rd_reg_indirect(ha, s_addr, &r_value);
2189
2190 if ((r_value & poll_mask) != 0) {
2191 break;
2192 } else {
2193 msleep(1);
2194 if (--poll_wait == 0) {
2195 ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n",
2196 __func__);
2197 rval = QLA_ERROR;
2198 goto exit_process_pollrd;
2199 }
2200 }
2201 }
2202 ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value);
2203 *data_ptr++ = cpu_to_le32(s_value);
2204 *data_ptr++ = cpu_to_le32(r_value);
2205 s_value += s_stride;
2206 }
2207
2208 *d_ptr = data_ptr;
2209
2210exit_process_pollrd:
2211 return rval;
2212}
2213
2214static void qla83xx_minidump_process_rdmux2(struct scsi_qla_host *ha,
2215 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2216 uint32_t **d_ptr)
2217{
2218 uint32_t sel_val1, sel_val2, t_sel_val, data, i;
2219 uint32_t sel_addr1, sel_addr2, sel_val_mask, read_addr;
2220 struct qla83xx_minidump_entry_rdmux2 *rdmux2_hdr;
2221 uint32_t *data_ptr = *d_ptr;
2222
2223 rdmux2_hdr = (struct qla83xx_minidump_entry_rdmux2 *)entry_hdr;
2224 sel_val1 = le32_to_cpu(rdmux2_hdr->select_value_1);
2225 sel_val2 = le32_to_cpu(rdmux2_hdr->select_value_2);
2226 sel_addr1 = le32_to_cpu(rdmux2_hdr->select_addr_1);
2227 sel_addr2 = le32_to_cpu(rdmux2_hdr->select_addr_2);
2228 sel_val_mask = le32_to_cpu(rdmux2_hdr->select_value_mask);
2229 read_addr = le32_to_cpu(rdmux2_hdr->read_addr);
2230
2231 for (i = 0; i < rdmux2_hdr->op_count; i++) {
2232 ha->isp_ops->wr_reg_indirect(ha, sel_addr1, sel_val1);
2233 t_sel_val = sel_val1 & sel_val_mask;
2234 *data_ptr++ = cpu_to_le32(t_sel_val);
2235
2236 ha->isp_ops->wr_reg_indirect(ha, sel_addr2, t_sel_val);
2237 ha->isp_ops->rd_reg_indirect(ha, read_addr, &data);
2238
2239 *data_ptr++ = cpu_to_le32(data);
2240
2241 ha->isp_ops->wr_reg_indirect(ha, sel_addr1, sel_val2);
2242 t_sel_val = sel_val2 & sel_val_mask;
2243 *data_ptr++ = cpu_to_le32(t_sel_val);
2244
2245 ha->isp_ops->wr_reg_indirect(ha, sel_addr2, t_sel_val);
2246 ha->isp_ops->rd_reg_indirect(ha, read_addr, &data);
2247
2248 *data_ptr++ = cpu_to_le32(data);
2249
2250 sel_val1 += rdmux2_hdr->select_value_stride;
2251 sel_val2 += rdmux2_hdr->select_value_stride;
2252 }
2253
2254 *d_ptr = data_ptr;
2255}
2256
2257static uint32_t qla83xx_minidump_process_pollrdmwr(struct scsi_qla_host *ha,
2258 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2259 uint32_t **d_ptr)
2260{
2261 uint32_t poll_wait, poll_mask, r_value, data;
2262 uint32_t addr_1, addr_2, value_1, value_2;
2263 uint32_t *data_ptr = *d_ptr;
2264 uint32_t rval = QLA_SUCCESS;
2265 struct qla83xx_minidump_entry_pollrdmwr *poll_hdr;
2266
2267 poll_hdr = (struct qla83xx_minidump_entry_pollrdmwr *)entry_hdr;
2268 addr_1 = le32_to_cpu(poll_hdr->addr_1);
2269 addr_2 = le32_to_cpu(poll_hdr->addr_2);
2270 value_1 = le32_to_cpu(poll_hdr->value_1);
2271 value_2 = le32_to_cpu(poll_hdr->value_2);
2272 poll_mask = le32_to_cpu(poll_hdr->poll_mask);
2273
2274 ha->isp_ops->wr_reg_indirect(ha, addr_1, value_1);
2275
2276 poll_wait = le32_to_cpu(poll_hdr->poll_wait);
2277 while (1) {
2278 ha->isp_ops->rd_reg_indirect(ha, addr_1, &r_value);
2279
2280 if ((r_value & poll_mask) != 0) {
2281 break;
2282 } else {
2283 msleep(1);
2284 if (--poll_wait == 0) {
2285 ql4_printk(KERN_ERR, ha, "%s: TIMEOUT_1\n",
2286 __func__);
2287 rval = QLA_ERROR;
2288 goto exit_process_pollrdmwr;
2289 }
2290 }
2291 }
2292
2293 ha->isp_ops->rd_reg_indirect(ha, addr_2, &data);
2294 data &= le32_to_cpu(poll_hdr->modify_mask);
2295 ha->isp_ops->wr_reg_indirect(ha, addr_2, data);
2296 ha->isp_ops->wr_reg_indirect(ha, addr_1, value_2);
2297
2298 poll_wait = le32_to_cpu(poll_hdr->poll_wait);
2299 while (1) {
2300 ha->isp_ops->rd_reg_indirect(ha, addr_1, &r_value);
2301
2302 if ((r_value & poll_mask) != 0) {
2303 break;
2304 } else {
2305 msleep(1);
2306 if (--poll_wait == 0) {
2307 ql4_printk(KERN_ERR, ha, "%s: TIMEOUT_2\n",
2308 __func__);
2309 rval = QLA_ERROR;
2310 goto exit_process_pollrdmwr;
2311 }
2312 }
2313 }
2314
2315 *data_ptr++ = cpu_to_le32(addr_2);
2316 *data_ptr++ = cpu_to_le32(data);
2317 *d_ptr = data_ptr;
2318
2319exit_process_pollrdmwr:
2320 return rval;
2321}
2322
2323static uint32_t qla4_83xx_minidump_process_rdrom(struct scsi_qla_host *ha,
2324 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2325 uint32_t **d_ptr)
2326{
2327 uint32_t fl_addr, u32_count, rval;
2328 struct qla8xxx_minidump_entry_rdrom *rom_hdr;
2329 uint32_t *data_ptr = *d_ptr;
2330
2331 rom_hdr = (struct qla8xxx_minidump_entry_rdrom *)entry_hdr;
2332 fl_addr = le32_to_cpu(rom_hdr->read_addr);
2333 u32_count = le32_to_cpu(rom_hdr->read_data_size)/sizeof(uint32_t);
2334
2335 DEBUG2(ql4_printk(KERN_INFO, ha, "[%s]: fl_addr: 0x%x, count: 0x%x\n",
2336 __func__, fl_addr, u32_count));
2337
2338 rval = qla4_83xx_lockless_flash_read_u32(ha, fl_addr,
2339 (u8 *)(data_ptr), u32_count);
2340
2341 if (rval == QLA_ERROR) {
2342 ql4_printk(KERN_ERR, ha, "%s: Flash Read Error,Count=%d\n",
2343 __func__, u32_count);
2344 goto exit_process_rdrom;
2345 }
2346
2347 data_ptr += u32_count;
2348 *d_ptr = data_ptr;
2349
2350exit_process_rdrom:
2351 return rval;
2352}
2353
2088/** 2354/**
2089 * qla82xx_collect_md_data - Retrieve firmware minidump data. 2355 * qla4_8xxx_collect_md_data - Retrieve firmware minidump data.
2090 * @ha: pointer to adapter structure 2356 * @ha: pointer to adapter structure
2091 **/ 2357 **/
2092static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha) 2358static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
2093{ 2359{
2094 int num_entry_hdr = 0; 2360 int num_entry_hdr = 0;
2095 struct qla82xx_minidump_entry_hdr *entry_hdr; 2361 struct qla8xxx_minidump_entry_hdr *entry_hdr;
2096 struct qla4_8xxx_minidump_template_hdr *tmplt_hdr; 2362 struct qla4_8xxx_minidump_template_hdr *tmplt_hdr;
2097 uint32_t *data_ptr; 2363 uint32_t *data_ptr;
2098 uint32_t data_collected = 0; 2364 uint32_t data_collected = 0;
@@ -2128,10 +2394,14 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
2128 timestamp = (u32)(jiffies_to_msecs(now) / 1000); 2394 timestamp = (u32)(jiffies_to_msecs(now) / 1000);
2129 tmplt_hdr->driver_timestamp = timestamp; 2395 tmplt_hdr->driver_timestamp = timestamp;
2130 2396
2131 entry_hdr = (struct qla82xx_minidump_entry_hdr *) 2397 entry_hdr = (struct qla8xxx_minidump_entry_hdr *)
2132 (((uint8_t *)ha->fw_dump_tmplt_hdr) + 2398 (((uint8_t *)ha->fw_dump_tmplt_hdr) +
2133 tmplt_hdr->first_entry_offset); 2399 tmplt_hdr->first_entry_offset);
2134 2400
2401 if (is_qla8032(ha))
2402 tmplt_hdr->saved_state_array[QLA83XX_SS_OCM_WNDREG_INDEX] =
2403 tmplt_hdr->ocm_window_reg[ha->func_num];
2404
2135 /* Walk through the entry headers - validate/perform required action */ 2405 /* Walk through the entry headers - validate/perform required action */
2136 for (i = 0; i < num_entry_hdr; i++) { 2406 for (i = 0; i < num_entry_hdr; i++) {
2137 if (data_collected >= ha->fw_dump_size) { 2407 if (data_collected >= ha->fw_dump_size) {
@@ -2144,7 +2414,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
2144 if (!(entry_hdr->d_ctrl.entry_capture_mask & 2414 if (!(entry_hdr->d_ctrl.entry_capture_mask &
2145 ha->fw_dump_capture_mask)) { 2415 ha->fw_dump_capture_mask)) {
2146 entry_hdr->d_ctrl.driver_flags |= 2416 entry_hdr->d_ctrl.driver_flags |=
2147 QLA82XX_DBG_SKIPPED_FLAG; 2417 QLA8XXX_DBG_SKIPPED_FLAG;
2148 goto skip_nxt_entry; 2418 goto skip_nxt_entry;
2149 } 2419 }
2150 2420
@@ -2157,65 +2427,105 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
2157 * debug data 2427 * debug data
2158 */ 2428 */
2159 switch (entry_hdr->entry_type) { 2429 switch (entry_hdr->entry_type) {
2160 case QLA82XX_RDEND: 2430 case QLA8XXX_RDEND:
2161 ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i); 2431 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2162 break; 2432 break;
2163 case QLA82XX_CNTRL: 2433 case QLA8XXX_CNTRL:
2164 rval = qla4_8xxx_minidump_process_control(ha, 2434 rval = qla4_8xxx_minidump_process_control(ha,
2165 entry_hdr); 2435 entry_hdr);
2166 if (rval != QLA_SUCCESS) { 2436 if (rval != QLA_SUCCESS) {
2167 ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i); 2437 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2168 goto md_failed; 2438 goto md_failed;
2169 } 2439 }
2170 break; 2440 break;
2171 case QLA82XX_RDCRB: 2441 case QLA8XXX_RDCRB:
2172 qla4_8xxx_minidump_process_rdcrb(ha, entry_hdr, 2442 qla4_8xxx_minidump_process_rdcrb(ha, entry_hdr,
2173 &data_ptr); 2443 &data_ptr);
2174 break; 2444 break;
2175 case QLA82XX_RDMEM: 2445 case QLA8XXX_RDMEM:
2176 rval = qla4_8xxx_minidump_process_rdmem(ha, entry_hdr, 2446 rval = qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
2177 &data_ptr); 2447 &data_ptr);
2178 if (rval != QLA_SUCCESS) { 2448 if (rval != QLA_SUCCESS) {
2179 ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i); 2449 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2180 goto md_failed; 2450 goto md_failed;
2181 } 2451 }
2182 break; 2452 break;
2183 case QLA82XX_BOARD: 2453 case QLA8XXX_BOARD:
2184 case QLA82XX_RDROM: 2454 case QLA8XXX_RDROM:
2185 qla4_8xxx_minidump_process_rdrom(ha, entry_hdr, 2455 if (is_qla8022(ha)) {
2186 &data_ptr); 2456 qla4_82xx_minidump_process_rdrom(ha, entry_hdr,
2457 &data_ptr);
2458 } else if (is_qla8032(ha)) {
2459 rval = qla4_83xx_minidump_process_rdrom(ha,
2460 entry_hdr,
2461 &data_ptr);
2462 if (rval != QLA_SUCCESS)
2463 qla4_8xxx_mark_entry_skipped(ha,
2464 entry_hdr,
2465 i);
2466 }
2187 break; 2467 break;
2188 case QLA82XX_L2DTG: 2468 case QLA8XXX_L2DTG:
2189 case QLA82XX_L2ITG: 2469 case QLA8XXX_L2ITG:
2190 case QLA82XX_L2DAT: 2470 case QLA8XXX_L2DAT:
2191 case QLA82XX_L2INS: 2471 case QLA8XXX_L2INS:
2192 rval = qla4_8xxx_minidump_process_l2tag(ha, entry_hdr, 2472 rval = qla4_8xxx_minidump_process_l2tag(ha, entry_hdr,
2193 &data_ptr); 2473 &data_ptr);
2194 if (rval != QLA_SUCCESS) { 2474 if (rval != QLA_SUCCESS) {
2195 ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i); 2475 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2196 goto md_failed; 2476 goto md_failed;
2197 } 2477 }
2198 break; 2478 break;
2199 case QLA82XX_L1DAT: 2479 case QLA8XXX_L1DTG:
2200 case QLA82XX_L1INS: 2480 case QLA8XXX_L1ITG:
2481 case QLA8XXX_L1DAT:
2482 case QLA8XXX_L1INS:
2201 qla4_8xxx_minidump_process_l1cache(ha, entry_hdr, 2483 qla4_8xxx_minidump_process_l1cache(ha, entry_hdr,
2202 &data_ptr); 2484 &data_ptr);
2203 break; 2485 break;
2204 case QLA82XX_RDOCM: 2486 case QLA8XXX_RDOCM:
2205 qla4_8xxx_minidump_process_rdocm(ha, entry_hdr, 2487 qla4_8xxx_minidump_process_rdocm(ha, entry_hdr,
2206 &data_ptr); 2488 &data_ptr);
2207 break; 2489 break;
2208 case QLA82XX_RDMUX: 2490 case QLA8XXX_RDMUX:
2209 qla4_8xxx_minidump_process_rdmux(ha, entry_hdr, 2491 qla4_8xxx_minidump_process_rdmux(ha, entry_hdr,
2210 &data_ptr); 2492 &data_ptr);
2211 break; 2493 break;
2212 case QLA82XX_QUEUE: 2494 case QLA8XXX_QUEUE:
2213 qla4_8xxx_minidump_process_queue(ha, entry_hdr, 2495 qla4_8xxx_minidump_process_queue(ha, entry_hdr,
2214 &data_ptr); 2496 &data_ptr);
2215 break; 2497 break;
2216 case QLA82XX_RDNOP: 2498 case QLA83XX_POLLRD:
2499 if (!is_qla8032(ha)) {
2500 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2501 break;
2502 }
2503 rval = qla83xx_minidump_process_pollrd(ha, entry_hdr,
2504 &data_ptr);
2505 if (rval != QLA_SUCCESS)
2506 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2507 break;
2508 case QLA83XX_RDMUX2:
2509 if (!is_qla8032(ha)) {
2510 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2511 break;
2512 }
2513 qla83xx_minidump_process_rdmux2(ha, entry_hdr,
2514 &data_ptr);
2515 break;
2516 case QLA83XX_POLLRDMWR:
2517 if (!is_qla8032(ha)) {
2518 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2519 break;
2520 }
2521 rval = qla83xx_minidump_process_pollrdmwr(ha, entry_hdr,
2522 &data_ptr);
2523 if (rval != QLA_SUCCESS)
2524 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2525 break;
2526 case QLA8XXX_RDNOP:
2217 default: 2527 default:
2218 ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i); 2528 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2219 break; 2529 break;
2220 } 2530 }
2221 2531
@@ -2224,7 +2534,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
2224 ha->fw_dump_tmplt_size)); 2534 ha->fw_dump_tmplt_size));
2225skip_nxt_entry: 2535skip_nxt_entry:
2226 /* next entry in the template */ 2536 /* next entry in the template */
2227 entry_hdr = (struct qla82xx_minidump_entry_hdr *) 2537 entry_hdr = (struct qla8xxx_minidump_entry_hdr *)
2228 (((uint8_t *)entry_hdr) + 2538 (((uint8_t *)entry_hdr) +
2229 entry_hdr->entry_size); 2539 entry_hdr->entry_size);
2230 } 2540 }
@@ -2264,33 +2574,45 @@ static void qla4_8xxx_uevent_emit(struct scsi_qla_host *ha, u32 code)
2264 kobject_uevent_env(&(&ha->pdev->dev)->kobj, KOBJ_CHANGE, envp); 2574 kobject_uevent_env(&(&ha->pdev->dev)->kobj, KOBJ_CHANGE, envp);
2265} 2575}
2266 2576
2577void qla4_8xxx_get_minidump(struct scsi_qla_host *ha)
2578{
2579 if (ql4xenablemd && test_bit(AF_FW_RECOVERY, &ha->flags) &&
2580 !test_bit(AF_82XX_FW_DUMPED, &ha->flags)) {
2581 if (!qla4_8xxx_collect_md_data(ha)) {
2582 qla4_8xxx_uevent_emit(ha, QL4_UEVENT_CODE_FW_DUMP);
2583 set_bit(AF_82XX_FW_DUMPED, &ha->flags);
2584 } else {
2585 ql4_printk(KERN_INFO, ha, "%s: Unable to collect minidump\n",
2586 __func__);
2587 }
2588 }
2589}
2590
2267/** 2591/**
2268 * qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw 2592 * qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw
2269 * @ha: pointer to adapter structure 2593 * @ha: pointer to adapter structure
2270 * 2594 *
2271 * Note: IDC lock must be held upon entry 2595 * Note: IDC lock must be held upon entry
2272 **/ 2596 **/
2273static int 2597int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
2274qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
2275{ 2598{
2276 int rval = QLA_ERROR; 2599 int rval = QLA_ERROR;
2277 int i, timeout; 2600 int i, timeout;
2278 uint32_t old_count, count; 2601 uint32_t old_count, count, idc_ctrl;
2279 int need_reset = 0, peg_stuck = 1; 2602 int need_reset = 0, peg_stuck = 1;
2280 2603
2281 need_reset = qla4_8xxx_need_reset(ha); 2604 need_reset = ha->isp_ops->need_reset(ha);
2282 2605 old_count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
2283 old_count = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
2284 2606
2285 for (i = 0; i < 10; i++) { 2607 for (i = 0; i < 10; i++) {
2286 timeout = msleep_interruptible(200); 2608 timeout = msleep_interruptible(200);
2287 if (timeout) { 2609 if (timeout) {
2288 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 2610 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
2289 QLA82XX_DEV_FAILED); 2611 QLA8XXX_DEV_FAILED);
2290 return rval; 2612 return rval;
2291 } 2613 }
2292 2614
2293 count = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); 2615 count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
2294 if (count != old_count) 2616 if (count != old_count)
2295 peg_stuck = 0; 2617 peg_stuck = 0;
2296 } 2618 }
@@ -2298,13 +2620,13 @@ qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
2298 if (need_reset) { 2620 if (need_reset) {
2299 /* We are trying to perform a recovery here. */ 2621 /* We are trying to perform a recovery here. */
2300 if (peg_stuck) 2622 if (peg_stuck)
2301 qla4_8xxx_rom_lock_recovery(ha); 2623 ha->isp_ops->rom_lock_recovery(ha);
2302 goto dev_initialize; 2624 goto dev_initialize;
2303 } else { 2625 } else {
2304 /* Start of day for this ha context. */ 2626 /* Start of day for this ha context. */
2305 if (peg_stuck) { 2627 if (peg_stuck) {
2306 /* Either we are the first or recovery in progress. */ 2628 /* Either we are the first or recovery in progress. */
2307 qla4_8xxx_rom_lock_recovery(ha); 2629 ha->isp_ops->rom_lock_recovery(ha);
2308 goto dev_initialize; 2630 goto dev_initialize;
2309 } else { 2631 } else {
2310 /* Firmware already running. */ 2632 /* Firmware already running. */
@@ -2316,46 +2638,53 @@ qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
2316dev_initialize: 2638dev_initialize:
2317 /* set to DEV_INITIALIZING */ 2639 /* set to DEV_INITIALIZING */
2318 ql4_printk(KERN_INFO, ha, "HW State: INITIALIZING\n"); 2640 ql4_printk(KERN_INFO, ha, "HW State: INITIALIZING\n");
2319 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING); 2641 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
2642 QLA8XXX_DEV_INITIALIZING);
2320 2643
2321 /* Driver that sets device state to initializating sets IDC version */ 2644 /*
2322 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION); 2645 * For ISP8324, if IDC_CTRL GRACEFUL_RESET_BIT1 is set, reset it after
2323 2646 * device goes to INIT state.
2324 qla4_8xxx_idc_unlock(ha); 2647 */
2325 if (ql4xenablemd && test_bit(AF_FW_RECOVERY, &ha->flags) && 2648 if (is_qla8032(ha)) {
2326 !test_and_set_bit(AF_82XX_FW_DUMPED, &ha->flags)) { 2649 idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
2327 if (!qla4_8xxx_collect_md_data(ha)) { 2650 if (idc_ctrl & GRACEFUL_RESET_BIT1) {
2328 qla4_8xxx_uevent_emit(ha, QL4_UEVENT_CODE_FW_DUMP); 2651 qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
2329 } else { 2652 (idc_ctrl & ~GRACEFUL_RESET_BIT1));
2330 ql4_printk(KERN_INFO, ha, "Unable to collect minidump\n"); 2653 set_bit(AF_83XX_NO_FW_DUMP, &ha->flags);
2331 clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
2332 } 2654 }
2333 } 2655 }
2334 rval = qla4_8xxx_try_start_fw(ha); 2656
2335 qla4_8xxx_idc_lock(ha); 2657 ha->isp_ops->idc_unlock(ha);
2658
2659 if (is_qla8022(ha))
2660 qla4_8xxx_get_minidump(ha);
2661
2662 rval = ha->isp_ops->restart_firmware(ha);
2663 ha->isp_ops->idc_lock(ha);
2336 2664
2337 if (rval != QLA_SUCCESS) { 2665 if (rval != QLA_SUCCESS) {
2338 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n"); 2666 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
2339 qla4_8xxx_clear_drv_active(ha); 2667 qla4_8xxx_clear_drv_active(ha);
2340 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED); 2668 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
2669 QLA8XXX_DEV_FAILED);
2341 return rval; 2670 return rval;
2342 } 2671 }
2343 2672
2344dev_ready: 2673dev_ready:
2345 ql4_printk(KERN_INFO, ha, "HW State: READY\n"); 2674 ql4_printk(KERN_INFO, ha, "HW State: READY\n");
2346 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY); 2675 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, QLA8XXX_DEV_READY);
2347 2676
2348 return rval; 2677 return rval;
2349} 2678}
2350 2679
2351/** 2680/**
2352 * qla4_8xxx_need_reset_handler - Code to start reset sequence 2681 * qla4_82xx_need_reset_handler - Code to start reset sequence
2353 * @ha: pointer to adapter structure 2682 * @ha: pointer to adapter structure
2354 * 2683 *
2355 * Note: IDC lock must be held upon entry 2684 * Note: IDC lock must be held upon entry
2356 **/ 2685 **/
2357static void 2686static void
2358qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha) 2687qla4_82xx_need_reset_handler(struct scsi_qla_host *ha)
2359{ 2688{
2360 uint32_t dev_state, drv_state, drv_active; 2689 uint32_t dev_state, drv_state, drv_active;
2361 uint32_t active_mask = 0xFFFFFFFF; 2690 uint32_t active_mask = 0xFFFFFFFF;
@@ -2365,12 +2694,12 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
2365 "Performing ISP error recovery\n"); 2694 "Performing ISP error recovery\n");
2366 2695
2367 if (test_and_clear_bit(AF_ONLINE, &ha->flags)) { 2696 if (test_and_clear_bit(AF_ONLINE, &ha->flags)) {
2368 qla4_8xxx_idc_unlock(ha); 2697 qla4_82xx_idc_unlock(ha);
2369 ha->isp_ops->disable_intrs(ha); 2698 ha->isp_ops->disable_intrs(ha);
2370 qla4_8xxx_idc_lock(ha); 2699 qla4_82xx_idc_lock(ha);
2371 } 2700 }
2372 2701
2373 if (!test_bit(AF_82XX_RST_OWNER, &ha->flags)) { 2702 if (!test_bit(AF_8XXX_RST_OWNER, &ha->flags)) {
2374 DEBUG2(ql4_printk(KERN_INFO, ha, 2703 DEBUG2(ql4_printk(KERN_INFO, ha,
2375 "%s(%ld): reset acknowledged\n", 2704 "%s(%ld): reset acknowledged\n",
2376 __func__, ha->host_no)); 2705 __func__, ha->host_no));
@@ -2382,8 +2711,8 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
2382 /* wait for 10 seconds for reset ack from all functions */ 2711 /* wait for 10 seconds for reset ack from all functions */
2383 reset_timeout = jiffies + (ha->nx_reset_timeout * HZ); 2712 reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
2384 2713
2385 drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 2714 drv_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2386 drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 2715 drv_active = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2387 2716
2388 ql4_printk(KERN_INFO, ha, 2717 ql4_printk(KERN_INFO, ha,
2389 "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n", 2718 "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
@@ -2401,31 +2730,31 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
2401 * When reset_owner times out, check which functions 2730 * When reset_owner times out, check which functions
2402 * acked/did not ack 2731 * acked/did not ack
2403 */ 2732 */
2404 if (test_bit(AF_82XX_RST_OWNER, &ha->flags)) { 2733 if (test_bit(AF_8XXX_RST_OWNER, &ha->flags)) {
2405 ql4_printk(KERN_INFO, ha, 2734 ql4_printk(KERN_INFO, ha,
2406 "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n", 2735 "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
2407 __func__, ha->host_no, drv_state, 2736 __func__, ha->host_no, drv_state,
2408 drv_active); 2737 drv_active);
2409 } 2738 }
2410 qla4_8xxx_idc_unlock(ha); 2739 qla4_82xx_idc_unlock(ha);
2411 msleep(1000); 2740 msleep(1000);
2412 qla4_8xxx_idc_lock(ha); 2741 qla4_82xx_idc_lock(ha);
2413 2742
2414 drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 2743 drv_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2415 drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 2744 drv_active = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2416 } 2745 }
2417 2746
2418 /* Clear RESET OWNER as we are not going to use it any further */ 2747 /* Clear RESET OWNER as we are not going to use it any further */
2419 clear_bit(AF_82XX_RST_OWNER, &ha->flags); 2748 clear_bit(AF_8XXX_RST_OWNER, &ha->flags);
2420 2749
2421 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 2750 dev_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
2422 ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", dev_state, 2751 ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", dev_state,
2423 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); 2752 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
2424 2753
2425 /* Force to DEV_COLD unless someone else is starting a reset */ 2754 /* Force to DEV_COLD unless someone else is starting a reset */
2426 if (dev_state != QLA82XX_DEV_INITIALIZING) { 2755 if (dev_state != QLA8XXX_DEV_INITIALIZING) {
2427 ql4_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n"); 2756 ql4_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n");
2428 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD); 2757 qla4_82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_COLD);
2429 qla4_8xxx_set_rst_ready(ha); 2758 qla4_8xxx_set_rst_ready(ha);
2430 } 2759 }
2431} 2760}
@@ -2437,9 +2766,104 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
2437void 2766void
2438qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha) 2767qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha)
2439{ 2768{
2440 qla4_8xxx_idc_lock(ha); 2769 ha->isp_ops->idc_lock(ha);
2441 qla4_8xxx_set_qsnt_ready(ha); 2770 qla4_8xxx_set_qsnt_ready(ha);
2442 qla4_8xxx_idc_unlock(ha); 2771 ha->isp_ops->idc_unlock(ha);
2772}
2773
2774static void qla4_82xx_set_idc_ver(struct scsi_qla_host *ha)
2775{
2776 int idc_ver;
2777 uint32_t drv_active;
2778
2779 drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
2780 if (drv_active == (1 << (ha->func_num * 4))) {
2781 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION,
2782 QLA82XX_IDC_VERSION);
2783 ql4_printk(KERN_INFO, ha,
2784 "%s: IDC version updated to %d\n", __func__,
2785 QLA82XX_IDC_VERSION);
2786 } else {
2787 idc_ver = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION);
2788 if (QLA82XX_IDC_VERSION != idc_ver) {
2789 ql4_printk(KERN_INFO, ha,
2790 "%s: qla4xxx driver IDC version %d is not compatible with IDC version %d of other drivers!\n",
2791 __func__, QLA82XX_IDC_VERSION, idc_ver);
2792 }
2793 }
2794}
2795
2796static int qla4_83xx_set_idc_ver(struct scsi_qla_host *ha)
2797{
2798 int idc_ver;
2799 uint32_t drv_active;
2800 int rval = QLA_SUCCESS;
2801
2802 drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
2803 if (drv_active == (1 << ha->func_num)) {
2804 idc_ver = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION);
2805 idc_ver &= (~0xFF);
2806 idc_ver |= QLA83XX_IDC_VER_MAJ_VALUE;
2807 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION, idc_ver);
2808 ql4_printk(KERN_INFO, ha,
2809 "%s: IDC version updated to %d\n", __func__,
2810 idc_ver);
2811 } else {
2812 idc_ver = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION);
2813 idc_ver &= 0xFF;
2814 if (QLA83XX_IDC_VER_MAJ_VALUE != idc_ver) {
2815 ql4_printk(KERN_INFO, ha,
2816 "%s: qla4xxx driver IDC version %d is not compatible with IDC version %d of other drivers!\n",
2817 __func__, QLA83XX_IDC_VER_MAJ_VALUE,
2818 idc_ver);
2819 rval = QLA_ERROR;
2820 goto exit_set_idc_ver;
2821 }
2822 }
2823
2824 /* Update IDC_MINOR_VERSION */
2825 idc_ver = qla4_83xx_rd_reg(ha, QLA83XX_CRB_IDC_VER_MINOR);
2826 idc_ver &= ~(0x03 << (ha->func_num * 2));
2827 idc_ver |= (QLA83XX_IDC_VER_MIN_VALUE << (ha->func_num * 2));
2828 qla4_83xx_wr_reg(ha, QLA83XX_CRB_IDC_VER_MINOR, idc_ver);
2829
2830exit_set_idc_ver:
2831 return rval;
2832}
2833
2834int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha)
2835{
2836 uint32_t drv_active;
2837 int rval = QLA_SUCCESS;
2838
2839 if (test_bit(AF_INIT_DONE, &ha->flags))
2840 goto exit_update_idc_reg;
2841
2842 ha->isp_ops->idc_lock(ha);
2843 qla4_8xxx_set_drv_active(ha);
2844
2845 /*
2846 * If we are the first driver to load and
2847 * ql4xdontresethba is not set, clear IDC_CTRL BIT0.
2848 */
2849 if (is_qla8032(ha)) {
2850 drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
2851 if ((drv_active == (1 << ha->func_num)) && !ql4xdontresethba)
2852 qla4_83xx_clear_idc_dontreset(ha);
2853 }
2854
2855 if (is_qla8022(ha)) {
2856 qla4_82xx_set_idc_ver(ha);
2857 } else if (is_qla8032(ha)) {
2858 rval = qla4_83xx_set_idc_ver(ha);
2859 if (rval == QLA_ERROR)
2860 qla4_8xxx_clear_drv_active(ha);
2861 }
2862
2863 ha->isp_ops->idc_unlock(ha);
2864
2865exit_update_idc_reg:
2866 return rval;
2443} 2867}
2444 2868
2445/** 2869/**
@@ -2454,13 +2878,11 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
2454 int rval = QLA_SUCCESS; 2878 int rval = QLA_SUCCESS;
2455 unsigned long dev_init_timeout; 2879 unsigned long dev_init_timeout;
2456 2880
2457 if (!test_bit(AF_INIT_DONE, &ha->flags)) { 2881 rval = qla4_8xxx_update_idc_reg(ha);
2458 qla4_8xxx_idc_lock(ha); 2882 if (rval == QLA_ERROR)
2459 qla4_8xxx_set_drv_active(ha); 2883 goto exit_state_handler;
2460 qla4_8xxx_idc_unlock(ha);
2461 }
2462 2884
2463 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 2885 dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
2464 DEBUG2(ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", 2886 DEBUG2(ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
2465 dev_state, dev_state < MAX_STATES ? 2887 dev_state, dev_state < MAX_STATES ?
2466 qdev_state[dev_state] : "Unknown")); 2888 qdev_state[dev_state] : "Unknown"));
@@ -2468,7 +2890,7 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
2468 /* wait for 30 seconds for device to go ready */ 2890 /* wait for 30 seconds for device to go ready */
2469 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ); 2891 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
2470 2892
2471 qla4_8xxx_idc_lock(ha); 2893 ha->isp_ops->idc_lock(ha);
2472 while (1) { 2894 while (1) {
2473 2895
2474 if (time_after_eq(jiffies, dev_init_timeout)) { 2896 if (time_after_eq(jiffies, dev_init_timeout)) {
@@ -2477,65 +2899,75 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
2477 DRIVER_NAME, 2899 DRIVER_NAME,
2478 dev_state, dev_state < MAX_STATES ? 2900 dev_state, dev_state < MAX_STATES ?
2479 qdev_state[dev_state] : "Unknown"); 2901 qdev_state[dev_state] : "Unknown");
2480 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 2902 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
2481 QLA82XX_DEV_FAILED); 2903 QLA8XXX_DEV_FAILED);
2482 } 2904 }
2483 2905
2484 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 2906 dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
2485 ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", 2907 ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
2486 dev_state, dev_state < MAX_STATES ? 2908 dev_state, dev_state < MAX_STATES ?
2487 qdev_state[dev_state] : "Unknown"); 2909 qdev_state[dev_state] : "Unknown");
2488 2910
2489 /* NOTE: Make sure idc unlocked upon exit of switch statement */ 2911 /* NOTE: Make sure idc unlocked upon exit of switch statement */
2490 switch (dev_state) { 2912 switch (dev_state) {
2491 case QLA82XX_DEV_READY: 2913 case QLA8XXX_DEV_READY:
2492 goto exit; 2914 goto exit;
2493 case QLA82XX_DEV_COLD: 2915 case QLA8XXX_DEV_COLD:
2494 rval = qla4_8xxx_device_bootstrap(ha); 2916 rval = qla4_8xxx_device_bootstrap(ha);
2495 goto exit; 2917 goto exit;
2496 case QLA82XX_DEV_INITIALIZING: 2918 case QLA8XXX_DEV_INITIALIZING:
2497 qla4_8xxx_idc_unlock(ha); 2919 ha->isp_ops->idc_unlock(ha);
2498 msleep(1000); 2920 msleep(1000);
2499 qla4_8xxx_idc_lock(ha); 2921 ha->isp_ops->idc_lock(ha);
2500 break; 2922 break;
2501 case QLA82XX_DEV_NEED_RESET: 2923 case QLA8XXX_DEV_NEED_RESET:
2502 if (!ql4xdontresethba) { 2924 /*
2503 qla4_8xxx_need_reset_handler(ha); 2925 * For ISP8324, if NEED_RESET is set by any driver,
2504 /* Update timeout value after need 2926 * it should be honored, irrespective of IDC_CTRL
2505 * reset handler */ 2927 * DONTRESET_BIT0
2506 dev_init_timeout = jiffies + 2928 */
2507 (ha->nx_dev_init_timeout * HZ); 2929 if (is_qla8032(ha)) {
2508 } else { 2930 qla4_83xx_need_reset_handler(ha);
2509 qla4_8xxx_idc_unlock(ha); 2931 } else if (is_qla8022(ha)) {
2510 msleep(1000); 2932 if (!ql4xdontresethba) {
2511 qla4_8xxx_idc_lock(ha); 2933 qla4_82xx_need_reset_handler(ha);
2934 /* Update timeout value after need
2935 * reset handler */
2936 dev_init_timeout = jiffies +
2937 (ha->nx_dev_init_timeout * HZ);
2938 } else {
2939 ha->isp_ops->idc_unlock(ha);
2940 msleep(1000);
2941 ha->isp_ops->idc_lock(ha);
2942 }
2512 } 2943 }
2513 break; 2944 break;
2514 case QLA82XX_DEV_NEED_QUIESCENT: 2945 case QLA8XXX_DEV_NEED_QUIESCENT:
2515 /* idc locked/unlocked in handler */ 2946 /* idc locked/unlocked in handler */
2516 qla4_8xxx_need_qsnt_handler(ha); 2947 qla4_8xxx_need_qsnt_handler(ha);
2517 break; 2948 break;
2518 case QLA82XX_DEV_QUIESCENT: 2949 case QLA8XXX_DEV_QUIESCENT:
2519 qla4_8xxx_idc_unlock(ha); 2950 ha->isp_ops->idc_unlock(ha);
2520 msleep(1000); 2951 msleep(1000);
2521 qla4_8xxx_idc_lock(ha); 2952 ha->isp_ops->idc_lock(ha);
2522 break; 2953 break;
2523 case QLA82XX_DEV_FAILED: 2954 case QLA8XXX_DEV_FAILED:
2524 qla4_8xxx_idc_unlock(ha); 2955 ha->isp_ops->idc_unlock(ha);
2525 qla4xxx_dead_adapter_cleanup(ha); 2956 qla4xxx_dead_adapter_cleanup(ha);
2526 rval = QLA_ERROR; 2957 rval = QLA_ERROR;
2527 qla4_8xxx_idc_lock(ha); 2958 ha->isp_ops->idc_lock(ha);
2528 goto exit; 2959 goto exit;
2529 default: 2960 default:
2530 qla4_8xxx_idc_unlock(ha); 2961 ha->isp_ops->idc_unlock(ha);
2531 qla4xxx_dead_adapter_cleanup(ha); 2962 qla4xxx_dead_adapter_cleanup(ha);
2532 rval = QLA_ERROR; 2963 rval = QLA_ERROR;
2533 qla4_8xxx_idc_lock(ha); 2964 ha->isp_ops->idc_lock(ha);
2534 goto exit; 2965 goto exit;
2535 } 2966 }
2536 } 2967 }
2537exit: 2968exit:
2538 qla4_8xxx_idc_unlock(ha); 2969 ha->isp_ops->idc_unlock(ha);
2970exit_state_handler:
2539 return rval; 2971 return rval;
2540} 2972}
2541 2973
@@ -2544,8 +2976,13 @@ int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
2544 int retval; 2976 int retval;
2545 2977
2546 /* clear the interrupt */ 2978 /* clear the interrupt */
2547 writel(0, &ha->qla4_8xxx_reg->host_int); 2979 if (is_qla8032(ha)) {
2548 readl(&ha->qla4_8xxx_reg->host_int); 2980 writel(0, &ha->qla4_83xx_reg->risc_intr);
2981 readl(&ha->qla4_83xx_reg->risc_intr);
2982 } else if (is_qla8022(ha)) {
2983 writel(0, &ha->qla4_82xx_reg->host_int);
2984 readl(&ha->qla4_82xx_reg->host_int);
2985 }
2549 2986
2550 retval = qla4_8xxx_device_state_handler(ha); 2987 retval = qla4_8xxx_device_state_handler(ha);
2551 2988
@@ -2579,13 +3016,13 @@ flash_data_addr(struct ql82xx_hw_data *hw, uint32_t faddr)
2579} 3016}
2580 3017
2581static uint32_t * 3018static uint32_t *
2582qla4_8xxx_read_flash_data(struct scsi_qla_host *ha, uint32_t *dwptr, 3019qla4_82xx_read_flash_data(struct scsi_qla_host *ha, uint32_t *dwptr,
2583 uint32_t faddr, uint32_t length) 3020 uint32_t faddr, uint32_t length)
2584{ 3021{
2585 uint32_t i; 3022 uint32_t i;
2586 uint32_t val; 3023 uint32_t val;
2587 int loops = 0; 3024 int loops = 0;
2588 while ((qla4_8xxx_rom_lock(ha) != 0) && (loops < 50000)) { 3025 while ((qla4_82xx_rom_lock(ha) != 0) && (loops < 50000)) {
2589 udelay(100); 3026 udelay(100);
2590 cond_resched(); 3027 cond_resched();
2591 loops++; 3028 loops++;
@@ -2597,7 +3034,7 @@ qla4_8xxx_read_flash_data(struct scsi_qla_host *ha, uint32_t *dwptr,
2597 3034
2598 /* Dword reads to flash. */ 3035 /* Dword reads to flash. */
2599 for (i = 0; i < length/4; i++, faddr += 4) { 3036 for (i = 0; i < length/4; i++, faddr += 4) {
2600 if (qla4_8xxx_do_rom_fast_read(ha, faddr, &val)) { 3037 if (qla4_82xx_do_rom_fast_read(ha, faddr, &val)) {
2601 ql4_printk(KERN_WARNING, ha, 3038 ql4_printk(KERN_WARNING, ha,
2602 "Do ROM fast read failed\n"); 3039 "Do ROM fast read failed\n");
2603 goto done_read; 3040 goto done_read;
@@ -2606,7 +3043,7 @@ qla4_8xxx_read_flash_data(struct scsi_qla_host *ha, uint32_t *dwptr,
2606 } 3043 }
2607 3044
2608done_read: 3045done_read:
2609 qla4_8xxx_rom_unlock(ha); 3046 qla4_82xx_rom_unlock(ha);
2610 return dwptr; 3047 return dwptr;
2611} 3048}
2612 3049
@@ -2614,10 +3051,10 @@ done_read:
2614 * Address and length are byte address 3051 * Address and length are byte address
2615 **/ 3052 **/
2616static uint8_t * 3053static uint8_t *
2617qla4_8xxx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, 3054qla4_82xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
2618 uint32_t offset, uint32_t length) 3055 uint32_t offset, uint32_t length)
2619{ 3056{
2620 qla4_8xxx_read_flash_data(ha, (uint32_t *)buf, offset, length); 3057 qla4_82xx_read_flash_data(ha, (uint32_t *)buf, offset, length);
2621 return buf; 3058 return buf;
2622} 3059}
2623 3060
@@ -2644,7 +3081,7 @@ qla4_8xxx_get_flt_info(struct scsi_qla_host *ha, uint32_t flt_addr)
2644 const char *loc, *locations[] = { "DEF", "FLT" }; 3081 const char *loc, *locations[] = { "DEF", "FLT" };
2645 uint16_t *wptr; 3082 uint16_t *wptr;
2646 uint16_t cnt, chksum; 3083 uint16_t cnt, chksum;
2647 uint32_t start; 3084 uint32_t start, status;
2648 struct qla_flt_header *flt; 3085 struct qla_flt_header *flt;
2649 struct qla_flt_region *region; 3086 struct qla_flt_region *region;
2650 struct ql82xx_hw_data *hw = &ha->hw; 3087 struct ql82xx_hw_data *hw = &ha->hw;
@@ -2653,8 +3090,18 @@ qla4_8xxx_get_flt_info(struct scsi_qla_host *ha, uint32_t flt_addr)
2653 wptr = (uint16_t *)ha->request_ring; 3090 wptr = (uint16_t *)ha->request_ring;
2654 flt = (struct qla_flt_header *)ha->request_ring; 3091 flt = (struct qla_flt_header *)ha->request_ring;
2655 region = (struct qla_flt_region *)&flt[1]; 3092 region = (struct qla_flt_region *)&flt[1];
2656 qla4_8xxx_read_optrom_data(ha, (uint8_t *)ha->request_ring, 3093
2657 flt_addr << 2, OPTROM_BURST_SIZE); 3094 if (is_qla8022(ha)) {
3095 qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
3096 flt_addr << 2, OPTROM_BURST_SIZE);
3097 } else if (is_qla8032(ha)) {
3098 status = qla4_83xx_flash_read_u32(ha, flt_addr << 2,
3099 (uint8_t *)ha->request_ring,
3100 0x400);
3101 if (status != QLA_SUCCESS)
3102 goto no_flash_data;
3103 }
3104
2658 if (*wptr == __constant_cpu_to_le16(0xffff)) 3105 if (*wptr == __constant_cpu_to_le16(0xffff))
2659 goto no_flash_data; 3106 goto no_flash_data;
2660 if (flt->version != __constant_cpu_to_le16(1)) { 3107 if (flt->version != __constant_cpu_to_le16(1)) {
@@ -2730,7 +3177,7 @@ done:
2730} 3177}
2731 3178
2732static void 3179static void
2733qla4_8xxx_get_fdt_info(struct scsi_qla_host *ha) 3180qla4_82xx_get_fdt_info(struct scsi_qla_host *ha)
2734{ 3181{
2735#define FLASH_BLK_SIZE_4K 0x1000 3182#define FLASH_BLK_SIZE_4K 0x1000
2736#define FLASH_BLK_SIZE_32K 0x8000 3183#define FLASH_BLK_SIZE_32K 0x8000
@@ -2748,7 +3195,7 @@ qla4_8xxx_get_fdt_info(struct scsi_qla_host *ha)
2748 3195
2749 wptr = (uint16_t *)ha->request_ring; 3196 wptr = (uint16_t *)ha->request_ring;
2750 fdt = (struct qla_fdt_layout *)ha->request_ring; 3197 fdt = (struct qla_fdt_layout *)ha->request_ring;
2751 qla4_8xxx_read_optrom_data(ha, (uint8_t *)ha->request_ring, 3198 qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
2752 hw->flt_region_fdt << 2, OPTROM_BURST_SIZE); 3199 hw->flt_region_fdt << 2, OPTROM_BURST_SIZE);
2753 3200
2754 if (*wptr == __constant_cpu_to_le16(0xffff)) 3201 if (*wptr == __constant_cpu_to_le16(0xffff))
@@ -2797,7 +3244,7 @@ done:
2797} 3244}
2798 3245
2799static void 3246static void
2800qla4_8xxx_get_idc_param(struct scsi_qla_host *ha) 3247qla4_82xx_get_idc_param(struct scsi_qla_host *ha)
2801{ 3248{
2802#define QLA82XX_IDC_PARAM_ADDR 0x003e885c 3249#define QLA82XX_IDC_PARAM_ADDR 0x003e885c
2803 uint32_t *wptr; 3250 uint32_t *wptr;
@@ -2805,7 +3252,7 @@ qla4_8xxx_get_idc_param(struct scsi_qla_host *ha)
2805 if (!is_qla8022(ha)) 3252 if (!is_qla8022(ha))
2806 return; 3253 return;
2807 wptr = (uint32_t *)ha->request_ring; 3254 wptr = (uint32_t *)ha->request_ring;
2808 qla4_8xxx_read_optrom_data(ha, (uint8_t *)ha->request_ring, 3255 qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
2809 QLA82XX_IDC_PARAM_ADDR , 8); 3256 QLA82XX_IDC_PARAM_ADDR , 8);
2810 3257
2811 if (*wptr == __constant_cpu_to_le32(0xffffffff)) { 3258 if (*wptr == __constant_cpu_to_le32(0xffffffff)) {
@@ -2823,6 +3270,39 @@ qla4_8xxx_get_idc_param(struct scsi_qla_host *ha)
2823 return; 3270 return;
2824} 3271}
2825 3272
3273void qla4_82xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
3274 int in_count)
3275{
3276 int i;
3277
3278 /* Load all mailbox registers, except mailbox 0. */
3279 for (i = 1; i < in_count; i++)
3280 writel(mbx_cmd[i], &ha->qla4_82xx_reg->mailbox_in[i]);
3281
3282 /* Wakeup firmware */
3283 writel(mbx_cmd[0], &ha->qla4_82xx_reg->mailbox_in[0]);
3284 readl(&ha->qla4_82xx_reg->mailbox_in[0]);
3285 writel(HINT_MBX_INT_PENDING, &ha->qla4_82xx_reg->hint);
3286 readl(&ha->qla4_82xx_reg->hint);
3287}
3288
3289void qla4_82xx_process_mbox_intr(struct scsi_qla_host *ha, int out_count)
3290{
3291 int intr_status;
3292
3293 intr_status = readl(&ha->qla4_82xx_reg->host_int);
3294 if (intr_status & ISRX_82XX_RISC_INT) {
3295 ha->mbox_status_count = out_count;
3296 intr_status = readl(&ha->qla4_82xx_reg->host_status);
3297 ha->isp_ops->interrupt_service_routine(ha, intr_status);
3298
3299 if (test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
3300 test_bit(AF_INTx_ENABLED, &ha->flags))
3301 qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
3302 0xfbff);
3303 }
3304}
3305
2826int 3306int
2827qla4_8xxx_get_flash_info(struct scsi_qla_host *ha) 3307qla4_8xxx_get_flash_info(struct scsi_qla_host *ha)
2828{ 3308{
@@ -2834,8 +3314,12 @@ qla4_8xxx_get_flash_info(struct scsi_qla_host *ha)
2834 return ret; 3314 return ret;
2835 3315
2836 qla4_8xxx_get_flt_info(ha, flt_addr); 3316 qla4_8xxx_get_flt_info(ha, flt_addr);
2837 qla4_8xxx_get_fdt_info(ha); 3317 if (is_qla8022(ha)) {
2838 qla4_8xxx_get_idc_param(ha); 3318 qla4_82xx_get_fdt_info(ha);
3319 qla4_82xx_get_idc_param(ha);
3320 } else if (is_qla8032(ha)) {
3321 qla4_83xx_get_idc_param(ha);
3322 }
2839 3323
2840 return QLA_SUCCESS; 3324 return QLA_SUCCESS;
2841} 3325}
@@ -2869,36 +3353,36 @@ qla4_8xxx_stop_firmware(struct scsi_qla_host *ha)
2869} 3353}
2870 3354
2871/** 3355/**
2872 * qla4_8xxx_isp_reset - Resets ISP and aborts all outstanding commands. 3356 * qla4_82xx_isp_reset - Resets ISP and aborts all outstanding commands.
2873 * @ha: pointer to host adapter structure. 3357 * @ha: pointer to host adapter structure.
2874 **/ 3358 **/
2875int 3359int
2876qla4_8xxx_isp_reset(struct scsi_qla_host *ha) 3360qla4_82xx_isp_reset(struct scsi_qla_host *ha)
2877{ 3361{
2878 int rval; 3362 int rval;
2879 uint32_t dev_state; 3363 uint32_t dev_state;
2880 3364
2881 qla4_8xxx_idc_lock(ha); 3365 qla4_82xx_idc_lock(ha);
2882 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3366 dev_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
2883 3367
2884 if (dev_state == QLA82XX_DEV_READY) { 3368 if (dev_state == QLA8XXX_DEV_READY) {
2885 ql4_printk(KERN_INFO, ha, "HW State: NEED RESET\n"); 3369 ql4_printk(KERN_INFO, ha, "HW State: NEED RESET\n");
2886 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3370 qla4_82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2887 QLA82XX_DEV_NEED_RESET); 3371 QLA8XXX_DEV_NEED_RESET);
2888 set_bit(AF_82XX_RST_OWNER, &ha->flags); 3372 set_bit(AF_8XXX_RST_OWNER, &ha->flags);
2889 } else 3373 } else
2890 ql4_printk(KERN_INFO, ha, "HW State: DEVICE INITIALIZING\n"); 3374 ql4_printk(KERN_INFO, ha, "HW State: DEVICE INITIALIZING\n");
2891 3375
2892 qla4_8xxx_idc_unlock(ha); 3376 qla4_82xx_idc_unlock(ha);
2893 3377
2894 rval = qla4_8xxx_device_state_handler(ha); 3378 rval = qla4_8xxx_device_state_handler(ha);
2895 3379
2896 qla4_8xxx_idc_lock(ha); 3380 qla4_82xx_idc_lock(ha);
2897 qla4_8xxx_clear_rst_ready(ha); 3381 qla4_8xxx_clear_rst_ready(ha);
2898 qla4_8xxx_idc_unlock(ha); 3382 qla4_82xx_idc_unlock(ha);
2899 3383
2900 if (rval == QLA_SUCCESS) { 3384 if (rval == QLA_SUCCESS) {
2901 ql4_printk(KERN_INFO, ha, "Clearing AF_RECOVERY in qla4_8xxx_isp_reset\n"); 3385 ql4_printk(KERN_INFO, ha, "Clearing AF_RECOVERY in qla4_82xx_isp_reset\n");
2902 clear_bit(AF_FW_RECOVERY, &ha->flags); 3386 clear_bit(AF_FW_RECOVERY, &ha->flags);
2903 } 3387 }
2904 3388
@@ -2979,8 +3463,7 @@ exit_validate_mac82:
2979 3463
2980/* Interrupt handling helpers. */ 3464/* Interrupt handling helpers. */
2981 3465
2982static int 3466int qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha)
2983qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha)
2984{ 3467{
2985 uint32_t mbox_cmd[MBOX_REG_COUNT]; 3468 uint32_t mbox_cmd[MBOX_REG_COUNT];
2986 uint32_t mbox_sts[MBOX_REG_COUNT]; 3469 uint32_t mbox_sts[MBOX_REG_COUNT];
@@ -3001,8 +3484,7 @@ qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha)
3001 return QLA_SUCCESS; 3484 return QLA_SUCCESS;
3002} 3485}
3003 3486
3004static int 3487int qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha)
3005qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha)
3006{ 3488{
3007 uint32_t mbox_cmd[MBOX_REG_COUNT]; 3489 uint32_t mbox_cmd[MBOX_REG_COUNT];
3008 uint32_t mbox_sts[MBOX_REG_COUNT]; 3490 uint32_t mbox_sts[MBOX_REG_COUNT];
@@ -3025,26 +3507,26 @@ qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha)
3025} 3507}
3026 3508
3027void 3509void
3028qla4_8xxx_enable_intrs(struct scsi_qla_host *ha) 3510qla4_82xx_enable_intrs(struct scsi_qla_host *ha)
3029{ 3511{
3030 qla4_8xxx_mbx_intr_enable(ha); 3512 qla4_8xxx_mbx_intr_enable(ha);
3031 3513
3032 spin_lock_irq(&ha->hardware_lock); 3514 spin_lock_irq(&ha->hardware_lock);
3033 /* BIT 10 - reset */ 3515 /* BIT 10 - reset */
3034 qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); 3516 qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
3035 spin_unlock_irq(&ha->hardware_lock); 3517 spin_unlock_irq(&ha->hardware_lock);
3036 set_bit(AF_INTERRUPTS_ON, &ha->flags); 3518 set_bit(AF_INTERRUPTS_ON, &ha->flags);
3037} 3519}
3038 3520
3039void 3521void
3040qla4_8xxx_disable_intrs(struct scsi_qla_host *ha) 3522qla4_82xx_disable_intrs(struct scsi_qla_host *ha)
3041{ 3523{
3042 if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags)) 3524 if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags))
3043 qla4_8xxx_mbx_intr_disable(ha); 3525 qla4_8xxx_mbx_intr_disable(ha);
3044 3526
3045 spin_lock_irq(&ha->hardware_lock); 3527 spin_lock_irq(&ha->hardware_lock);
3046 /* BIT 10 - set */ 3528 /* BIT 10 - set */
3047 qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400); 3529 qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
3048 spin_unlock_irq(&ha->hardware_lock); 3530 spin_unlock_irq(&ha->hardware_lock);
3049} 3531}
3050 3532
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
index 30258479f100..9dc0bbfe50d5 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -25,6 +25,8 @@
25#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c) 25#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c)
26#define CRB_DMA_SHIFT QLA82XX_REG(0xcc) 26#define CRB_DMA_SHIFT QLA82XX_REG(0xcc)
27#define CRB_TEMP_STATE QLA82XX_REG(0x1b4) 27#define CRB_TEMP_STATE QLA82XX_REG(0x1b4)
28#define CRB_CMDPEG_CHECK_RETRY_COUNT 60
29#define CRB_CMDPEG_CHECK_DELAY 500
28 30
29#define qla82xx_get_temp_val(x) ((x) >> 16) 31#define qla82xx_get_temp_val(x) ((x) >> 16)
30#define qla82xx_get_temp_state(x) ((x) & 0xffff) 32#define qla82xx_get_temp_state(x) ((x) & 0xffff)
@@ -490,8 +492,8 @@ enum {
490 * Base addresses of major components on-chip. 492 * Base addresses of major components on-chip.
491 * ====================== BASE ADDRESSES ON-CHIP ====================== 493 * ====================== BASE ADDRESSES ON-CHIP ======================
492 */ 494 */
493#define QLA82XX_ADDR_DDR_NET (0x0000000000000000ULL) 495#define QLA8XXX_ADDR_DDR_NET (0x0000000000000000ULL)
494#define QLA82XX_ADDR_DDR_NET_MAX (0x000000000fffffffULL) 496#define QLA8XXX_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
495 497
496/* Imbus address bit used to indicate a host address. This bit is 498/* Imbus address bit used to indicate a host address. This bit is
497 * eliminated by the pcie bar and bar select before presentation 499 * eliminated by the pcie bar and bar select before presentation
@@ -500,14 +502,15 @@ enum {
500#define QLA82XX_P2_ADDR_PCIE (0x0000000800000000ULL) 502#define QLA82XX_P2_ADDR_PCIE (0x0000000800000000ULL)
501#define QLA82XX_P3_ADDR_PCIE (0x0000008000000000ULL) 503#define QLA82XX_P3_ADDR_PCIE (0x0000008000000000ULL)
502#define QLA82XX_ADDR_PCIE_MAX (0x0000000FFFFFFFFFULL) 504#define QLA82XX_ADDR_PCIE_MAX (0x0000000FFFFFFFFFULL)
503#define QLA82XX_ADDR_OCM0 (0x0000000200000000ULL) 505#define QLA8XXX_ADDR_OCM0 (0x0000000200000000ULL)
504#define QLA82XX_ADDR_OCM0_MAX (0x00000002000fffffULL) 506#define QLA8XXX_ADDR_OCM0_MAX (0x00000002000fffffULL)
505#define QLA82XX_ADDR_OCM1 (0x0000000200400000ULL) 507#define QLA8XXX_ADDR_OCM1 (0x0000000200400000ULL)
506#define QLA82XX_ADDR_OCM1_MAX (0x00000002004fffffULL) 508#define QLA8XXX_ADDR_OCM1_MAX (0x00000002004fffffULL)
507#define QLA82XX_ADDR_QDR_NET (0x0000000300000000ULL) 509#define QLA8XXX_ADDR_QDR_NET (0x0000000300000000ULL)
508 510
509#define QLA82XX_P2_ADDR_QDR_NET_MAX (0x00000003001fffffULL) 511#define QLA82XX_P2_ADDR_QDR_NET_MAX (0x00000003001fffffULL)
510#define QLA82XX_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL) 512#define QLA82XX_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL)
513#define QLA8XXX_ADDR_QDR_NET_MAX (0x0000000307ffffffULL)
511 514
512#define QLA82XX_PCI_CRBSPACE (unsigned long)0x06000000 515#define QLA82XX_PCI_CRBSPACE (unsigned long)0x06000000
513#define QLA82XX_PCI_DIRECT_CRB (unsigned long)0x04400000 516#define QLA82XX_PCI_DIRECT_CRB (unsigned long)0x04400000
@@ -517,6 +520,10 @@ enum {
517#define QLA82XX_PCI_QDR_NET (unsigned long)0x04000000 520#define QLA82XX_PCI_QDR_NET (unsigned long)0x04000000
518#define QLA82XX_PCI_QDR_NET_MAX (unsigned long)0x043fffff 521#define QLA82XX_PCI_QDR_NET_MAX (unsigned long)0x043fffff
519 522
523/* PCI Windowing for DDR regions. */
524#define QLA8XXX_ADDR_IN_RANGE(addr, low, high) \
525 (((addr) <= (high)) && ((addr) >= (low)))
526
520/* 527/*
521 * Register offsets for MN 528 * Register offsets for MN
522 */ 529 */
@@ -540,6 +547,11 @@ enum {
540#define MIU_TA_CTL_WRITE 4 547#define MIU_TA_CTL_WRITE 4
541#define MIU_TA_CTL_BUSY 8 548#define MIU_TA_CTL_BUSY 8
542 549
550#define MIU_TA_CTL_WRITE_ENABLE (MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE)
551#define MIU_TA_CTL_WRITE_START (MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE |\
552 MIU_TA_CTL_START)
553#define MIU_TA_CTL_START_ENABLE (MIU_TA_CTL_START | MIU_TA_CTL_ENABLE)
554
543/*CAM RAM */ 555/*CAM RAM */
544# define QLA82XX_CAM_RAM_BASE (QLA82XX_CRB_CAM + 0x02000) 556# define QLA82XX_CAM_RAM_BASE (QLA82XX_CRB_CAM + 0x02000)
545# define QLA82XX_CAM_RAM(reg) (QLA82XX_CAM_RAM_BASE + (reg)) 557# define QLA82XX_CAM_RAM(reg) (QLA82XX_CAM_RAM_BASE + (reg))
@@ -565,20 +577,53 @@ enum {
565/* Driver Coexistence Defines */ 577/* Driver Coexistence Defines */
566#define QLA82XX_CRB_DRV_ACTIVE (QLA82XX_CAM_RAM(0x138)) 578#define QLA82XX_CRB_DRV_ACTIVE (QLA82XX_CAM_RAM(0x138))
567#define QLA82XX_CRB_DEV_STATE (QLA82XX_CAM_RAM(0x140)) 579#define QLA82XX_CRB_DEV_STATE (QLA82XX_CAM_RAM(0x140))
568#define QLA82XX_CRB_DEV_PART_INFO (QLA82XX_CAM_RAM(0x14c))
569#define QLA82XX_CRB_DRV_IDC_VERSION (QLA82XX_CAM_RAM(0x174))
570#define QLA82XX_CRB_DRV_STATE (QLA82XX_CAM_RAM(0x144)) 580#define QLA82XX_CRB_DRV_STATE (QLA82XX_CAM_RAM(0x144))
571#define QLA82XX_CRB_DRV_SCRATCH (QLA82XX_CAM_RAM(0x148)) 581#define QLA82XX_CRB_DRV_SCRATCH (QLA82XX_CAM_RAM(0x148))
572#define QLA82XX_CRB_DEV_PART_INFO (QLA82XX_CAM_RAM(0x14c)) 582#define QLA82XX_CRB_DEV_PART_INFO (QLA82XX_CAM_RAM(0x14c))
583#define QLA82XX_CRB_DRV_IDC_VERSION (QLA82XX_CAM_RAM(0x174))
584
585enum qla_regs {
586 QLA8XXX_PEG_HALT_STATUS1 = 0,
587 QLA8XXX_PEG_HALT_STATUS2,
588 QLA8XXX_PEG_ALIVE_COUNTER,
589 QLA8XXX_CRB_DRV_ACTIVE,
590 QLA8XXX_CRB_DEV_STATE,
591 QLA8XXX_CRB_DRV_STATE,
592 QLA8XXX_CRB_DRV_SCRATCH,
593 QLA8XXX_CRB_DEV_PART_INFO,
594 QLA8XXX_CRB_DRV_IDC_VERSION,
595 QLA8XXX_FW_VERSION_MAJOR,
596 QLA8XXX_FW_VERSION_MINOR,
597 QLA8XXX_FW_VERSION_SUB,
598 QLA8XXX_CRB_CMDPEG_STATE,
599 QLA8XXX_CRB_TEMP_STATE,
600};
601
602static const uint32_t qla4_82xx_reg_tbl[] = {
603 QLA82XX_PEG_HALT_STATUS1,
604 QLA82XX_PEG_HALT_STATUS2,
605 QLA82XX_PEG_ALIVE_COUNTER,
606 QLA82XX_CRB_DRV_ACTIVE,
607 QLA82XX_CRB_DEV_STATE,
608 QLA82XX_CRB_DRV_STATE,
609 QLA82XX_CRB_DRV_SCRATCH,
610 QLA82XX_CRB_DEV_PART_INFO,
611 QLA82XX_CRB_DRV_IDC_VERSION,
612 QLA82XX_FW_VERSION_MAJOR,
613 QLA82XX_FW_VERSION_MINOR,
614 QLA82XX_FW_VERSION_SUB,
615 CRB_CMDPEG_STATE,
616 CRB_TEMP_STATE,
617};
573 618
574/* Every driver should use these Device State */ 619/* Every driver should use these Device State */
575#define QLA82XX_DEV_COLD 1 620#define QLA8XXX_DEV_COLD 1
576#define QLA82XX_DEV_INITIALIZING 2 621#define QLA8XXX_DEV_INITIALIZING 2
577#define QLA82XX_DEV_READY 3 622#define QLA8XXX_DEV_READY 3
578#define QLA82XX_DEV_NEED_RESET 4 623#define QLA8XXX_DEV_NEED_RESET 4
579#define QLA82XX_DEV_NEED_QUIESCENT 5 624#define QLA8XXX_DEV_NEED_QUIESCENT 5
580#define QLA82XX_DEV_FAILED 6 625#define QLA8XXX_DEV_FAILED 6
581#define QLA82XX_DEV_QUIESCENT 7 626#define QLA8XXX_DEV_QUIESCENT 7
582#define MAX_STATES 8 /* Increment if new state added */ 627#define MAX_STATES 8 /* Increment if new state added */
583 628
584#define QLA82XX_IDC_VERSION 0x1 629#define QLA82XX_IDC_VERSION 0x1
@@ -795,47 +840,51 @@ struct crb_addr_pair {
795/* Minidump related */ 840/* Minidump related */
796 841
797/* Entry Type Defines */ 842/* Entry Type Defines */
798#define QLA82XX_RDNOP 0 843#define QLA8XXX_RDNOP 0
799#define QLA82XX_RDCRB 1 844#define QLA8XXX_RDCRB 1
800#define QLA82XX_RDMUX 2 845#define QLA8XXX_RDMUX 2
801#define QLA82XX_QUEUE 3 846#define QLA8XXX_QUEUE 3
802#define QLA82XX_BOARD 4 847#define QLA8XXX_BOARD 4
803#define QLA82XX_RDOCM 6 848#define QLA8XXX_RDOCM 6
804#define QLA82XX_PREGS 7 849#define QLA8XXX_PREGS 7
805#define QLA82XX_L1DTG 8 850#define QLA8XXX_L1DTG 8
806#define QLA82XX_L1ITG 9 851#define QLA8XXX_L1ITG 9
807#define QLA82XX_L1DAT 11 852#define QLA8XXX_L1DAT 11
808#define QLA82XX_L1INS 12 853#define QLA8XXX_L1INS 12
809#define QLA82XX_L2DTG 21 854#define QLA8XXX_L2DTG 21
810#define QLA82XX_L2ITG 22 855#define QLA8XXX_L2ITG 22
811#define QLA82XX_L2DAT 23 856#define QLA8XXX_L2DAT 23
812#define QLA82XX_L2INS 24 857#define QLA8XXX_L2INS 24
813#define QLA82XX_RDROM 71 858#define QLA83XX_POLLRD 35
814#define QLA82XX_RDMEM 72 859#define QLA83XX_RDMUX2 36
815#define QLA82XX_CNTRL 98 860#define QLA83XX_POLLRDMWR 37
816#define QLA82XX_RDEND 255 861#define QLA8XXX_RDROM 71
862#define QLA8XXX_RDMEM 72
863#define QLA8XXX_CNTRL 98
864#define QLA83XX_TLHDR 99
865#define QLA8XXX_RDEND 255
817 866
818/* Opcodes for Control Entries. 867/* Opcodes for Control Entries.
819 * These Flags are bit fields. 868 * These Flags are bit fields.
820 */ 869 */
821#define QLA82XX_DBG_OPCODE_WR 0x01 870#define QLA8XXX_DBG_OPCODE_WR 0x01
822#define QLA82XX_DBG_OPCODE_RW 0x02 871#define QLA8XXX_DBG_OPCODE_RW 0x02
823#define QLA82XX_DBG_OPCODE_AND 0x04 872#define QLA8XXX_DBG_OPCODE_AND 0x04
824#define QLA82XX_DBG_OPCODE_OR 0x08 873#define QLA8XXX_DBG_OPCODE_OR 0x08
825#define QLA82XX_DBG_OPCODE_POLL 0x10 874#define QLA8XXX_DBG_OPCODE_POLL 0x10
826#define QLA82XX_DBG_OPCODE_RDSTATE 0x20 875#define QLA8XXX_DBG_OPCODE_RDSTATE 0x20
827#define QLA82XX_DBG_OPCODE_WRSTATE 0x40 876#define QLA8XXX_DBG_OPCODE_WRSTATE 0x40
828#define QLA82XX_DBG_OPCODE_MDSTATE 0x80 877#define QLA8XXX_DBG_OPCODE_MDSTATE 0x80
829 878
830/* Driver Flags */ 879/* Driver Flags */
831#define QLA82XX_DBG_SKIPPED_FLAG 0x80 /* driver skipped this entry */ 880#define QLA8XXX_DBG_SKIPPED_FLAG 0x80 /* driver skipped this entry */
832#define QLA82XX_DBG_SIZE_ERR_FLAG 0x40 /* Entry vs Capture size 881#define QLA8XXX_DBG_SIZE_ERR_FLAG 0x40 /* Entry vs Capture size
833 * mismatch */ 882 * mismatch */
834 883
835/* Driver_code is for driver to write some info about the entry 884/* Driver_code is for driver to write some info about the entry
836 * currently not used. 885 * currently not used.
837 */ 886 */
838struct qla82xx_minidump_entry_hdr { 887struct qla8xxx_minidump_entry_hdr {
839 uint32_t entry_type; 888 uint32_t entry_type;
840 uint32_t entry_size; 889 uint32_t entry_size;
841 uint32_t entry_capture_size; 890 uint32_t entry_capture_size;
@@ -848,8 +897,8 @@ struct qla82xx_minidump_entry_hdr {
848}; 897};
849 898
850/* Read CRB entry header */ 899/* Read CRB entry header */
851struct qla82xx_minidump_entry_crb { 900struct qla8xxx_minidump_entry_crb {
852 struct qla82xx_minidump_entry_hdr h; 901 struct qla8xxx_minidump_entry_hdr h;
853 uint32_t addr; 902 uint32_t addr;
854 struct { 903 struct {
855 uint8_t addr_stride; 904 uint8_t addr_stride;
@@ -871,8 +920,8 @@ struct qla82xx_minidump_entry_crb {
871 uint32_t value_3; 920 uint32_t value_3;
872}; 921};
873 922
874struct qla82xx_minidump_entry_cache { 923struct qla8xxx_minidump_entry_cache {
875 struct qla82xx_minidump_entry_hdr h; 924 struct qla8xxx_minidump_entry_hdr h;
876 uint32_t tag_reg_addr; 925 uint32_t tag_reg_addr;
877 struct { 926 struct {
878 uint16_t tag_value_stride; 927 uint16_t tag_value_stride;
@@ -895,8 +944,8 @@ struct qla82xx_minidump_entry_cache {
895}; 944};
896 945
897/* Read OCM */ 946/* Read OCM */
898struct qla82xx_minidump_entry_rdocm { 947struct qla8xxx_minidump_entry_rdocm {
899 struct qla82xx_minidump_entry_hdr h; 948 struct qla8xxx_minidump_entry_hdr h;
900 uint32_t rsvd_0; 949 uint32_t rsvd_0;
901 uint32_t rsvd_1; 950 uint32_t rsvd_1;
902 uint32_t data_size; 951 uint32_t data_size;
@@ -908,24 +957,24 @@ struct qla82xx_minidump_entry_rdocm {
908}; 957};
909 958
910/* Read Memory */ 959/* Read Memory */
911struct qla82xx_minidump_entry_rdmem { 960struct qla8xxx_minidump_entry_rdmem {
912 struct qla82xx_minidump_entry_hdr h; 961 struct qla8xxx_minidump_entry_hdr h;
913 uint32_t rsvd[6]; 962 uint32_t rsvd[6];
914 uint32_t read_addr; 963 uint32_t read_addr;
915 uint32_t read_data_size; 964 uint32_t read_data_size;
916}; 965};
917 966
918/* Read ROM */ 967/* Read ROM */
919struct qla82xx_minidump_entry_rdrom { 968struct qla8xxx_minidump_entry_rdrom {
920 struct qla82xx_minidump_entry_hdr h; 969 struct qla8xxx_minidump_entry_hdr h;
921 uint32_t rsvd[6]; 970 uint32_t rsvd[6];
922 uint32_t read_addr; 971 uint32_t read_addr;
923 uint32_t read_data_size; 972 uint32_t read_data_size;
924}; 973};
925 974
926/* Mux entry */ 975/* Mux entry */
927struct qla82xx_minidump_entry_mux { 976struct qla8xxx_minidump_entry_mux {
928 struct qla82xx_minidump_entry_hdr h; 977 struct qla8xxx_minidump_entry_hdr h;
929 uint32_t select_addr; 978 uint32_t select_addr;
930 uint32_t rsvd_0; 979 uint32_t rsvd_0;
931 uint32_t data_size; 980 uint32_t data_size;
@@ -937,8 +986,8 @@ struct qla82xx_minidump_entry_mux {
937}; 986};
938 987
939/* Queue entry */ 988/* Queue entry */
940struct qla82xx_minidump_entry_queue { 989struct qla8xxx_minidump_entry_queue {
941 struct qla82xx_minidump_entry_hdr h; 990 struct qla8xxx_minidump_entry_hdr h;
942 uint32_t select_addr; 991 uint32_t select_addr;
943 struct { 992 struct {
944 uint16_t queue_id_stride; 993 uint16_t queue_id_stride;
@@ -956,23 +1005,6 @@ struct qla82xx_minidump_entry_queue {
956 } rd_strd; 1005 } rd_strd;
957}; 1006};
958 1007
959#define QLA82XX_MINIDUMP_OCM0_SIZE (256 * 1024)
960#define QLA82XX_MINIDUMP_L1C_SIZE (256 * 1024)
961#define QLA82XX_MINIDUMP_L2C_SIZE 1572864
962#define QLA82XX_MINIDUMP_COMMON_STR_SIZE 0
963#define QLA82XX_MINIDUMP_FCOE_STR_SIZE 0
964#define QLA82XX_MINIDUMP_MEM_SIZE 0
965#define QLA82XX_MAX_ENTRY_HDR 4
966
967struct qla82xx_minidump {
968 uint32_t md_ocm0_data[QLA82XX_MINIDUMP_OCM0_SIZE];
969 uint32_t md_l1c_data[QLA82XX_MINIDUMP_L1C_SIZE];
970 uint32_t md_l2c_data[QLA82XX_MINIDUMP_L2C_SIZE];
971 uint32_t md_cs_data[QLA82XX_MINIDUMP_COMMON_STR_SIZE];
972 uint32_t md_fcoes_data[QLA82XX_MINIDUMP_FCOE_STR_SIZE];
973 uint32_t md_mem_data[QLA82XX_MINIDUMP_MEM_SIZE];
974};
975
976#define MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE 0x129 1008#define MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE 0x129
977#define RQST_TMPLT_SIZE 0x0 1009#define RQST_TMPLT_SIZE 0x0
978#define RQST_TMPLT 0x1 1010#define RQST_TMPLT 0x1
@@ -982,6 +1014,16 @@ struct qla82xx_minidump {
982#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094 1014#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
983#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098 1015#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
984 1016
1017#define MD_MIU_TEST_AGT_WRDATA_LO 0x410000A0
1018#define MD_MIU_TEST_AGT_WRDATA_HI 0x410000A4
1019#define MD_MIU_TEST_AGT_WRDATA_ULO 0x410000B0
1020#define MD_MIU_TEST_AGT_WRDATA_UHI 0x410000B4
1021
1022#define MD_MIU_TEST_AGT_RDDATA_LO 0x410000A8
1023#define MD_MIU_TEST_AGT_RDDATA_HI 0x410000AC
1024#define MD_MIU_TEST_AGT_RDDATA_ULO 0x410000B8
1025#define MD_MIU_TEST_AGT_RDDATA_UHI 0x410000BC
1026
985static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 1027static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8,
986 0x410000AC, 0x410000B8, 0x410000BC }; 1028 0x410000AC, 0x410000B8, 0x410000BC };
987#endif 1029#endif
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 79243b76d17e..fbc546e893ac 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -18,6 +18,7 @@
18#include "ql4_glbl.h" 18#include "ql4_glbl.h"
19#include "ql4_dbg.h" 19#include "ql4_dbg.h"
20#include "ql4_inline.h" 20#include "ql4_inline.h"
21#include "ql4_83xx.h"
21 22
22/* 23/*
23 * Driver version 24 * Driver version
@@ -160,7 +161,7 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
160static int qla4xxx_slave_alloc(struct scsi_device *device); 161static int qla4xxx_slave_alloc(struct scsi_device *device);
161static int qla4xxx_slave_configure(struct scsi_device *device); 162static int qla4xxx_slave_configure(struct scsi_device *device);
162static void qla4xxx_slave_destroy(struct scsi_device *sdev); 163static void qla4xxx_slave_destroy(struct scsi_device *sdev);
163static umode_t ql4_attr_is_visible(int param_type, int param); 164static umode_t qla4_attr_is_visible(int param_type, int param);
164static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type); 165static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
165static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth, 166static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
166 int reason); 167 int reason);
@@ -203,7 +204,7 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
203 CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST | 204 CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST |
204 CAP_DATADGST | CAP_LOGIN_OFFLOAD | 205 CAP_DATADGST | CAP_LOGIN_OFFLOAD |
205 CAP_MULTI_R2T, 206 CAP_MULTI_R2T,
206 .attr_is_visible = ql4_attr_is_visible, 207 .attr_is_visible = qla4_attr_is_visible,
207 .create_session = qla4xxx_session_create, 208 .create_session = qla4xxx_session_create,
208 .destroy_session = qla4xxx_session_destroy, 209 .destroy_session = qla4xxx_session_destroy,
209 .start_conn = qla4xxx_conn_start, 210 .start_conn = qla4xxx_conn_start,
@@ -315,7 +316,7 @@ exit_send_ping:
315 return rval; 316 return rval;
316} 317}
317 318
318static umode_t ql4_attr_is_visible(int param_type, int param) 319static umode_t qla4_attr_is_visible(int param_type, int param)
319{ 320{
320 switch (param_type) { 321 switch (param_type) {
321 case ISCSI_HOST_PARAM: 322 case ISCSI_HOST_PARAM:
@@ -1366,7 +1367,7 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
1366 1367
1367 conn = cls_conn->dd_data; 1368 conn = cls_conn->dd_data;
1368 qla_conn = conn->dd_data; 1369 qla_conn = conn->dd_data;
1369 dst_addr = &qla_conn->qla_ep->dst_addr; 1370 dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr;
1370 1371
1371 switch (param) { 1372 switch (param) {
1372 case ISCSI_PARAM_CONN_PORT: 1373 case ISCSI_PARAM_CONN_PORT:
@@ -2315,8 +2316,17 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
2315 if (ha->nx_pcibase) 2316 if (ha->nx_pcibase)
2316 iounmap( 2317 iounmap(
2317 (struct device_reg_82xx __iomem *)ha->nx_pcibase); 2318 (struct device_reg_82xx __iomem *)ha->nx_pcibase);
2318 } else if (ha->reg) 2319 } else if (is_qla8032(ha)) {
2320 if (ha->nx_pcibase)
2321 iounmap(
2322 (struct device_reg_83xx __iomem *)ha->nx_pcibase);
2323 } else if (ha->reg) {
2319 iounmap(ha->reg); 2324 iounmap(ha->reg);
2325 }
2326
2327 if (ha->reset_tmplt.buff)
2328 vfree(ha->reset_tmplt.buff);
2329
2320 pci_release_regions(ha->pdev); 2330 pci_release_regions(ha->pdev);
2321} 2331}
2322 2332
@@ -2420,7 +2430,7 @@ static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
2420 uint32_t temp, temp_state, temp_val; 2430 uint32_t temp, temp_state, temp_val;
2421 int status = QLA_SUCCESS; 2431 int status = QLA_SUCCESS;
2422 2432
2423 temp = qla4_8xxx_rd_32(ha, CRB_TEMP_STATE); 2433 temp = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_TEMP_STATE);
2424 2434
2425 temp_state = qla82xx_get_temp_state(temp); 2435 temp_state = qla82xx_get_temp_state(temp);
2426 temp_val = qla82xx_get_temp_val(temp); 2436 temp_val = qla82xx_get_temp_val(temp);
@@ -2456,7 +2466,8 @@ static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
2456 uint32_t fw_heartbeat_counter; 2466 uint32_t fw_heartbeat_counter;
2457 int status = QLA_SUCCESS; 2467 int status = QLA_SUCCESS;
2458 2468
2459 fw_heartbeat_counter = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); 2469 fw_heartbeat_counter = qla4_8xxx_rd_direct(ha,
2470 QLA8XXX_PEG_ALIVE_COUNTER);
2460 /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */ 2471 /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
2461 if (fw_heartbeat_counter == 0xffffffff) { 2472 if (fw_heartbeat_counter == 0xffffffff) {
2462 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen " 2473 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
@@ -2470,28 +2481,7 @@ static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
2470 /* FW not alive after 2 seconds */ 2481 /* FW not alive after 2 seconds */
2471 if (ha->seconds_since_last_heartbeat == 2) { 2482 if (ha->seconds_since_last_heartbeat == 2) {
2472 ha->seconds_since_last_heartbeat = 0; 2483 ha->seconds_since_last_heartbeat = 0;
2473 2484 qla4_8xxx_dump_peg_reg(ha);
2474 ql4_printk(KERN_INFO, ha,
2475 "scsi(%ld): %s, Dumping hw/fw registers:\n "
2476 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2:"
2477 " 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
2478 " 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
2479 " 0x%x,\n PEG_NET_4_PC: 0x%x\n",
2480 ha->host_no, __func__,
2481 qla4_8xxx_rd_32(ha,
2482 QLA82XX_PEG_HALT_STATUS1),
2483 qla4_8xxx_rd_32(ha,
2484 QLA82XX_PEG_HALT_STATUS2),
2485 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
2486 0x3c),
2487 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 +
2488 0x3c),
2489 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 +
2490 0x3c),
2491 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 +
2492 0x3c),
2493 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
2494 0x3c));
2495 status = QLA_ERROR; 2485 status = QLA_ERROR;
2496 } 2486 }
2497 } else 2487 } else
@@ -2501,6 +2491,48 @@ static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
2501 return status; 2491 return status;
2502} 2492}
2503 2493
2494static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha)
2495{
2496 uint32_t halt_status;
2497 int halt_status_unrecoverable = 0;
2498
2499 halt_status = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1);
2500
2501 if (is_qla8022(ha)) {
2502 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
2503 __func__);
2504 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
2505 CRB_NIU_XG_PAUSE_CTL_P0 |
2506 CRB_NIU_XG_PAUSE_CTL_P1);
2507
2508 if (QLA82XX_FWERROR_CODE(halt_status) == 0x67)
2509 ql4_printk(KERN_ERR, ha, "%s: Firmware aborted with error code 0x00006700. Device is being reset\n",
2510 __func__);
2511 if (halt_status & HALT_STATUS_UNRECOVERABLE)
2512 halt_status_unrecoverable = 1;
2513 } else if (is_qla8032(ha)) {
2514 if (halt_status & QLA83XX_HALT_STATUS_FW_RESET)
2515 ql4_printk(KERN_ERR, ha, "%s: Firmware error detected device is being reset\n",
2516 __func__);
2517 else if (halt_status & QLA83XX_HALT_STATUS_UNRECOVERABLE)
2518 halt_status_unrecoverable = 1;
2519 }
2520
2521 /*
2522 * Since we cannot change dev_state in interrupt context,
2523 * set appropriate DPC flag then wakeup DPC
2524 */
2525 if (halt_status_unrecoverable) {
2526 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
2527 } else {
2528 ql4_printk(KERN_INFO, ha, "%s: detect abort needed!\n",
2529 __func__);
2530 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2531 }
2532 qla4xxx_mailbox_premature_completion(ha);
2533 qla4xxx_wake_dpc(ha);
2534}
2535
2504/** 2536/**
2505 * qla4_8xxx_watchdog - Poll dev state 2537 * qla4_8xxx_watchdog - Poll dev state
2506 * @ha: Pointer to host adapter structure. 2538 * @ha: Pointer to host adapter structure.
@@ -2509,31 +2541,33 @@ static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
2509 **/ 2541 **/
2510void qla4_8xxx_watchdog(struct scsi_qla_host *ha) 2542void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
2511{ 2543{
2512 uint32_t dev_state, halt_status; 2544 uint32_t dev_state;
2513 2545
2514 /* don't poll if reset is going on */ 2546 /* don't poll if reset is going on */
2515 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || 2547 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
2516 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 2548 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2517 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) { 2549 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
2518 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 2550 dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
2519 2551
2520 if (qla4_8xxx_check_temp(ha)) { 2552 if (qla4_8xxx_check_temp(ha)) {
2521 ql4_printk(KERN_INFO, ha, "disabling pause" 2553 if (is_qla8022(ha)) {
2522 " transmit on port 0 & 1.\n"); 2554 ql4_printk(KERN_INFO, ha, "disabling pause transmit on port 0 & 1.\n");
2523 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, 2555 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
2524 CRB_NIU_XG_PAUSE_CTL_P0 | 2556 CRB_NIU_XG_PAUSE_CTL_P0 |
2525 CRB_NIU_XG_PAUSE_CTL_P1); 2557 CRB_NIU_XG_PAUSE_CTL_P1);
2558 }
2526 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags); 2559 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
2527 qla4xxx_wake_dpc(ha); 2560 qla4xxx_wake_dpc(ha);
2528 } else if (dev_state == QLA82XX_DEV_NEED_RESET && 2561 } else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
2529 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 2562 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
2530 if (!ql4xdontresethba) { 2563 if (is_qla8032(ha) ||
2564 (is_qla8022(ha) && !ql4xdontresethba)) {
2531 ql4_printk(KERN_INFO, ha, "%s: HW State: " 2565 ql4_printk(KERN_INFO, ha, "%s: HW State: "
2532 "NEED RESET!\n", __func__); 2566 "NEED RESET!\n", __func__);
2533 set_bit(DPC_RESET_HA, &ha->dpc_flags); 2567 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2534 qla4xxx_wake_dpc(ha); 2568 qla4xxx_wake_dpc(ha);
2535 } 2569 }
2536 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT && 2570 } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
2537 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { 2571 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
2538 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n", 2572 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
2539 __func__); 2573 __func__);
@@ -2541,36 +2575,8 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
2541 qla4xxx_wake_dpc(ha); 2575 qla4xxx_wake_dpc(ha);
2542 } else { 2576 } else {
2543 /* Check firmware health */ 2577 /* Check firmware health */
2544 if (qla4_8xxx_check_fw_alive(ha)) { 2578 if (qla4_8xxx_check_fw_alive(ha))
2545 ql4_printk(KERN_INFO, ha, "disabling pause" 2579 qla4_8xxx_process_fw_error(ha);
2546 " transmit on port 0 & 1.\n");
2547 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
2548 CRB_NIU_XG_PAUSE_CTL_P0 |
2549 CRB_NIU_XG_PAUSE_CTL_P1);
2550 halt_status = qla4_8xxx_rd_32(ha,
2551 QLA82XX_PEG_HALT_STATUS1);
2552
2553 if (QLA82XX_FWERROR_CODE(halt_status) == 0x67)
2554 ql4_printk(KERN_ERR, ha, "%s:"
2555 " Firmware aborted with"
2556 " error code 0x00006700."
2557 " Device is being reset\n",
2558 __func__);
2559
2560 /* Since we cannot change dev_state in interrupt
2561 * context, set appropriate DPC flag then wakeup
2562 * DPC */
2563 if (halt_status & HALT_STATUS_UNRECOVERABLE)
2564 set_bit(DPC_HA_UNRECOVERABLE,
2565 &ha->dpc_flags);
2566 else {
2567 ql4_printk(KERN_INFO, ha, "%s: detect "
2568 "abort needed!\n", __func__);
2569 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2570 }
2571 qla4xxx_mailbox_premature_completion(ha);
2572 qla4xxx_wake_dpc(ha);
2573 }
2574 } 2580 }
2575 } 2581 }
2576} 2582}
@@ -2652,11 +2658,10 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
2652 if (!pci_channel_offline(ha->pdev)) 2658 if (!pci_channel_offline(ha->pdev))
2653 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 2659 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
2654 2660
2655 if (is_qla8022(ha)) { 2661 if (is_qla80XX(ha))
2656 qla4_8xxx_watchdog(ha); 2662 qla4_8xxx_watchdog(ha);
2657 }
2658 2663
2659 if (!is_qla8022(ha)) { 2664 if (is_qla40XX(ha)) {
2660 /* Check for heartbeat interval. */ 2665 /* Check for heartbeat interval. */
2661 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE && 2666 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
2662 ha->heartbeat_interval != 0) { 2667 ha->heartbeat_interval != 0) {
@@ -2941,6 +2946,14 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
2941 2946
2942 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 2947 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
2943 2948
2949 if (is_qla8032(ha) &&
2950 !test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
2951 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
2952 __func__);
2953 /* disable pause frame for ISP83xx */
2954 qla4_83xx_disable_pause(ha);
2955 }
2956
2944 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session); 2957 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
2945 2958
2946 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) 2959 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
@@ -2953,9 +2966,9 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
2953 goto recover_ha_init_adapter; 2966 goto recover_ha_init_adapter;
2954 } 2967 }
2955 2968
2956 /* For the ISP-82xx adapter, issue a stop_firmware if invoked 2969 /* For the ISP-8xxx adapter, issue a stop_firmware if invoked
2957 * from eh_host_reset or ioctl module */ 2970 * from eh_host_reset or ioctl module */
2958 if (is_qla8022(ha) && !reset_chip && 2971 if (is_qla80XX(ha) && !reset_chip &&
2959 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) { 2972 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
2960 2973
2961 DEBUG2(ql4_printk(KERN_INFO, ha, 2974 DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -2978,13 +2991,13 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
2978 } 2991 }
2979 2992
2980 /* Issue full chip reset if recovering from a catastrophic error, 2993 /* Issue full chip reset if recovering from a catastrophic error,
2981 * or if stop_firmware fails for ISP-82xx. 2994 * or if stop_firmware fails for ISP-8xxx.
2982 * This is the default case for ISP-4xxx */ 2995 * This is the default case for ISP-4xxx */
2983 if (!is_qla8022(ha) || reset_chip) { 2996 if (is_qla40XX(ha) || reset_chip) {
2984 if (!is_qla8022(ha)) 2997 if (is_qla40XX(ha))
2985 goto chip_reset; 2998 goto chip_reset;
2986 2999
2987 /* Check if 82XX firmware is alive or not 3000 /* Check if 8XXX firmware is alive or not
2988 * We may have arrived here from NEED_RESET 3001 * We may have arrived here from NEED_RESET
2989 * detection only */ 3002 * detection only */
2990 if (test_bit(AF_FW_RECOVERY, &ha->flags)) 3003 if (test_bit(AF_FW_RECOVERY, &ha->flags))
@@ -3000,10 +3013,10 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
3000 set_current_state(TASK_UNINTERRUPTIBLE); 3013 set_current_state(TASK_UNINTERRUPTIBLE);
3001 schedule_timeout(HZ); 3014 schedule_timeout(HZ);
3002 } 3015 }
3003 3016chip_reset:
3004 if (!test_bit(AF_FW_RECOVERY, &ha->flags)) 3017 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
3005 qla4xxx_cmd_wait(ha); 3018 qla4xxx_cmd_wait(ha);
3006chip_reset: 3019
3007 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 3020 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3008 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 3021 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
3009 DEBUG2(ql4_printk(KERN_INFO, ha, 3022 DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -3021,7 +3034,7 @@ recover_ha_init_adapter:
3021 /* For ISP-4xxx, force function 1 to always initialize 3034 /* For ISP-4xxx, force function 1 to always initialize
3022 * before function 3 to prevent both funcions from 3035 * before function 3 to prevent both funcions from
3023 * stepping on top of the other */ 3036 * stepping on top of the other */
3024 if (!is_qla8022(ha) && (ha->mac_index == 3)) 3037 if (is_qla40XX(ha) && (ha->mac_index == 3))
3025 ssleep(6); 3038 ssleep(6);
3026 3039
3027 /* NOTE: AF_ONLINE flag set upon successful completion of 3040 /* NOTE: AF_ONLINE flag set upon successful completion of
@@ -3039,11 +3052,12 @@ recover_ha_init_adapter:
3039 * Since we don't want to block the DPC for too long 3052 * Since we don't want to block the DPC for too long
3040 * with multiple resets in the same thread, 3053 * with multiple resets in the same thread,
3041 * utilize DPC to retry */ 3054 * utilize DPC to retry */
3042 if (is_qla8022(ha)) { 3055 if (is_qla80XX(ha)) {
3043 qla4_8xxx_idc_lock(ha); 3056 ha->isp_ops->idc_lock(ha);
3044 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3057 dev_state = qla4_8xxx_rd_direct(ha,
3045 qla4_8xxx_idc_unlock(ha); 3058 QLA8XXX_CRB_DEV_STATE);
3046 if (dev_state == QLA82XX_DEV_FAILED) { 3059 ha->isp_ops->idc_unlock(ha);
3060 if (dev_state == QLA8XXX_DEV_FAILED) {
3047 ql4_printk(KERN_INFO, ha, "%s: don't retry " 3061 ql4_printk(KERN_INFO, ha, "%s: don't retry "
3048 "recover adapter. H/W is in Failed " 3062 "recover adapter. H/W is in Failed "
3049 "state\n", __func__); 3063 "state\n", __func__);
@@ -3168,6 +3182,7 @@ int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
3168 struct iscsi_session *sess; 3182 struct iscsi_session *sess;
3169 struct ddb_entry *ddb_entry; 3183 struct ddb_entry *ddb_entry;
3170 struct scsi_qla_host *ha; 3184 struct scsi_qla_host *ha;
3185 int status = QLA_SUCCESS;
3171 3186
3172 sess = cls_session->dd_data; 3187 sess = cls_session->dd_data;
3173 ddb_entry = sess->dd_data; 3188 ddb_entry = sess->dd_data;
@@ -3175,11 +3190,20 @@ int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
3175 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 3190 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3176 " unblock user space session\n", ha->host_no, __func__, 3191 " unblock user space session\n", ha->host_no, __func__,
3177 ddb_entry->fw_ddb_index); 3192 ddb_entry->fw_ddb_index);
3178 iscsi_conn_start(ddb_entry->conn);
3179 iscsi_conn_login_event(ddb_entry->conn,
3180 ISCSI_CONN_STATE_LOGGED_IN);
3181 3193
3182 return QLA_SUCCESS; 3194 if (!iscsi_is_session_online(cls_session)) {
3195 iscsi_conn_start(ddb_entry->conn);
3196 iscsi_conn_login_event(ddb_entry->conn,
3197 ISCSI_CONN_STATE_LOGGED_IN);
3198 } else {
3199 ql4_printk(KERN_INFO, ha,
3200 "scsi%ld: %s: ddb[%d] session [%d] already logged in\n",
3201 ha->host_no, __func__, ddb_entry->fw_ddb_index,
3202 cls_session->sid);
3203 status = QLA_ERROR;
3204 }
3205
3206 return status;
3183} 3207}
3184 3208
3185static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha) 3209static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
@@ -3373,15 +3397,26 @@ static void qla4xxx_do_dpc(struct work_struct *work)
3373 /* post events to application */ 3397 /* post events to application */
3374 qla4xxx_do_work(ha); 3398 qla4xxx_do_work(ha);
3375 3399
3376 if (is_qla8022(ha)) { 3400 if (is_qla80XX(ha)) {
3377 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) { 3401 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
3378 qla4_8xxx_idc_lock(ha); 3402 if (is_qla8032(ha)) {
3379 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3403 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
3380 QLA82XX_DEV_FAILED); 3404 __func__);
3381 qla4_8xxx_idc_unlock(ha); 3405 /* disable pause frame for ISP83xx */
3406 qla4_83xx_disable_pause(ha);
3407 }
3408
3409 ha->isp_ops->idc_lock(ha);
3410 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
3411 QLA8XXX_DEV_FAILED);
3412 ha->isp_ops->idc_unlock(ha);
3382 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n"); 3413 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
3383 qla4_8xxx_device_state_handler(ha); 3414 qla4_8xxx_device_state_handler(ha);
3384 } 3415 }
3416
3417 if (test_and_clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags))
3418 qla4_83xx_post_idc_ack(ha);
3419
3385 if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { 3420 if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
3386 qla4_8xxx_need_qsnt_handler(ha); 3421 qla4_8xxx_need_qsnt_handler(ha);
3387 } 3422 }
@@ -3391,7 +3426,8 @@ static void qla4xxx_do_dpc(struct work_struct *work)
3391 (test_bit(DPC_RESET_HA, &ha->dpc_flags) || 3426 (test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
3392 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 3427 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
3393 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) { 3428 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
3394 if (ql4xdontresethba) { 3429 if ((is_qla8022(ha) && ql4xdontresethba) ||
3430 (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) {
3395 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", 3431 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
3396 ha->host_no, __func__)); 3432 ha->host_no, __func__));
3397 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 3433 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
@@ -3477,6 +3513,18 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
3477 ha->isp_ops->disable_intrs(ha); 3513 ha->isp_ops->disable_intrs(ha);
3478 } 3514 }
3479 3515
3516 if (is_qla40XX(ha)) {
3517 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
3518 &ha->reg->ctrl_status);
3519 readl(&ha->reg->ctrl_status);
3520 } else if (is_qla8022(ha)) {
3521 writel(0, &ha->qla4_82xx_reg->host_int);
3522 readl(&ha->qla4_82xx_reg->host_int);
3523 } else if (is_qla8032(ha)) {
3524 writel(0, &ha->qla4_83xx_reg->risc_intr);
3525 readl(&ha->qla4_83xx_reg->risc_intr);
3526 }
3527
3480 /* Remove timer thread, if present */ 3528 /* Remove timer thread, if present */
3481 if (ha->timer_active) 3529 if (ha->timer_active)
3482 qla4xxx_stop_timer(ha); 3530 qla4xxx_stop_timer(ha);
@@ -3492,10 +3540,10 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
3492 /* Put firmware in known state */ 3540 /* Put firmware in known state */
3493 ha->isp_ops->reset_firmware(ha); 3541 ha->isp_ops->reset_firmware(ha);
3494 3542
3495 if (is_qla8022(ha)) { 3543 if (is_qla80XX(ha)) {
3496 qla4_8xxx_idc_lock(ha); 3544 ha->isp_ops->idc_lock(ha);
3497 qla4_8xxx_clear_drv_active(ha); 3545 qla4_8xxx_clear_drv_active(ha);
3498 qla4_8xxx_idc_unlock(ha); 3546 ha->isp_ops->idc_unlock(ha);
3499 } 3547 }
3500 3548
3501 /* Detach interrupts */ 3549 /* Detach interrupts */
@@ -3542,16 +3590,20 @@ int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
3542 /* Mapping of IO base pointer, door bell read and write pointer */ 3590 /* Mapping of IO base pointer, door bell read and write pointer */
3543 3591
3544 /* mapping of IO base pointer */ 3592 /* mapping of IO base pointer */
3545 ha->qla4_8xxx_reg = 3593 if (is_qla8022(ha)) {
3546 (struct device_reg_82xx __iomem *)((uint8_t *)ha->nx_pcibase + 3594 ha->qla4_82xx_reg = (struct device_reg_82xx __iomem *)
3547 0xbc000 + (ha->pdev->devfn << 11)); 3595 ((uint8_t *)ha->nx_pcibase + 0xbc000 +
3596 (ha->pdev->devfn << 11));
3597 ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
3598 QLA82XX_CAM_RAM_DB2);
3599 } else if (is_qla8032(ha)) {
3600 ha->qla4_83xx_reg = (struct device_reg_83xx __iomem *)
3601 ((uint8_t *)ha->nx_pcibase);
3602 }
3548 3603
3549 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */ 3604 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
3550 db_len = pci_resource_len(pdev, 4); 3605 db_len = pci_resource_len(pdev, 4);
3551 3606
3552 ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
3553 QLA82XX_CAM_RAM_DB2);
3554
3555 return 0; 3607 return 0;
3556iospace_error_exit: 3608iospace_error_exit:
3557 return -ENOMEM; 3609 return -ENOMEM;
@@ -3639,23 +3691,64 @@ static struct isp_operations qla4xxx_isp_ops = {
3639 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out, 3691 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out,
3640 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in, 3692 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in,
3641 .get_sys_info = qla4xxx_get_sys_info, 3693 .get_sys_info = qla4xxx_get_sys_info,
3694 .queue_mailbox_command = qla4xxx_queue_mbox_cmd,
3695 .process_mailbox_interrupt = qla4xxx_process_mbox_intr,
3642}; 3696};
3643 3697
3644static struct isp_operations qla4_8xxx_isp_ops = { 3698static struct isp_operations qla4_82xx_isp_ops = {
3645 .iospace_config = qla4_8xxx_iospace_config, 3699 .iospace_config = qla4_8xxx_iospace_config,
3646 .pci_config = qla4_8xxx_pci_config, 3700 .pci_config = qla4_8xxx_pci_config,
3647 .disable_intrs = qla4_8xxx_disable_intrs, 3701 .disable_intrs = qla4_82xx_disable_intrs,
3648 .enable_intrs = qla4_8xxx_enable_intrs, 3702 .enable_intrs = qla4_82xx_enable_intrs,
3649 .start_firmware = qla4_8xxx_load_risc, 3703 .start_firmware = qla4_8xxx_load_risc,
3650 .intr_handler = qla4_8xxx_intr_handler, 3704 .restart_firmware = qla4_82xx_try_start_fw,
3651 .interrupt_service_routine = qla4_8xxx_interrupt_service_routine, 3705 .intr_handler = qla4_82xx_intr_handler,
3652 .reset_chip = qla4_8xxx_isp_reset, 3706 .interrupt_service_routine = qla4_82xx_interrupt_service_routine,
3707 .need_reset = qla4_8xxx_need_reset,
3708 .reset_chip = qla4_82xx_isp_reset,
3653 .reset_firmware = qla4_8xxx_stop_firmware, 3709 .reset_firmware = qla4_8xxx_stop_firmware,
3654 .queue_iocb = qla4_8xxx_queue_iocb, 3710 .queue_iocb = qla4_82xx_queue_iocb,
3655 .complete_iocb = qla4_8xxx_complete_iocb, 3711 .complete_iocb = qla4_82xx_complete_iocb,
3656 .rd_shdw_req_q_out = qla4_8xxx_rd_shdw_req_q_out, 3712 .rd_shdw_req_q_out = qla4_82xx_rd_shdw_req_q_out,
3657 .rd_shdw_rsp_q_in = qla4_8xxx_rd_shdw_rsp_q_in, 3713 .rd_shdw_rsp_q_in = qla4_82xx_rd_shdw_rsp_q_in,
3658 .get_sys_info = qla4_8xxx_get_sys_info, 3714 .get_sys_info = qla4_8xxx_get_sys_info,
3715 .rd_reg_direct = qla4_82xx_rd_32,
3716 .wr_reg_direct = qla4_82xx_wr_32,
3717 .rd_reg_indirect = qla4_82xx_md_rd_32,
3718 .wr_reg_indirect = qla4_82xx_md_wr_32,
3719 .idc_lock = qla4_82xx_idc_lock,
3720 .idc_unlock = qla4_82xx_idc_unlock,
3721 .rom_lock_recovery = qla4_82xx_rom_lock_recovery,
3722 .queue_mailbox_command = qla4_82xx_queue_mbox_cmd,
3723 .process_mailbox_interrupt = qla4_82xx_process_mbox_intr,
3724};
3725
3726static struct isp_operations qla4_83xx_isp_ops = {
3727 .iospace_config = qla4_8xxx_iospace_config,
3728 .pci_config = qla4_8xxx_pci_config,
3729 .disable_intrs = qla4_83xx_disable_intrs,
3730 .enable_intrs = qla4_83xx_enable_intrs,
3731 .start_firmware = qla4_8xxx_load_risc,
3732 .restart_firmware = qla4_83xx_start_firmware,
3733 .intr_handler = qla4_83xx_intr_handler,
3734 .interrupt_service_routine = qla4_83xx_interrupt_service_routine,
3735 .need_reset = qla4_8xxx_need_reset,
3736 .reset_chip = qla4_83xx_isp_reset,
3737 .reset_firmware = qla4_8xxx_stop_firmware,
3738 .queue_iocb = qla4_83xx_queue_iocb,
3739 .complete_iocb = qla4_83xx_complete_iocb,
3740 .rd_shdw_req_q_out = qla4_83xx_rd_shdw_req_q_out,
3741 .rd_shdw_rsp_q_in = qla4_83xx_rd_shdw_rsp_q_in,
3742 .get_sys_info = qla4_8xxx_get_sys_info,
3743 .rd_reg_direct = qla4_83xx_rd_reg,
3744 .wr_reg_direct = qla4_83xx_wr_reg,
3745 .rd_reg_indirect = qla4_83xx_rd_reg_indirect,
3746 .wr_reg_indirect = qla4_83xx_wr_reg_indirect,
3747 .idc_lock = qla4_83xx_drv_lock,
3748 .idc_unlock = qla4_83xx_drv_unlock,
3749 .rom_lock_recovery = qla4_83xx_rom_lock_recovery,
3750 .queue_mailbox_command = qla4_83xx_queue_mbox_cmd,
3751 .process_mailbox_interrupt = qla4_83xx_process_mbox_intr,
3659}; 3752};
3660 3753
3661uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha) 3754uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
@@ -3663,9 +3756,14 @@ uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
3663 return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out); 3756 return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out);
3664} 3757}
3665 3758
3666uint16_t qla4_8xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha) 3759uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
3667{ 3760{
3668 return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->req_q_out)); 3761 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->req_q_out));
3762}
3763
3764uint16_t qla4_83xx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
3765{
3766 return (uint16_t)le32_to_cpu(readl(&ha->qla4_83xx_reg->req_q_out));
3669} 3767}
3670 3768
3671uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha) 3769uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
@@ -3673,9 +3771,14 @@ uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
3673 return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in); 3771 return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
3674} 3772}
3675 3773
3676uint16_t qla4_8xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha) 3774uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
3677{ 3775{
3678 return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->rsp_q_in)); 3776 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->rsp_q_in));
3777}
3778
3779uint16_t qla4_83xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
3780{
3781 return (uint16_t)le32_to_cpu(readl(&ha->qla4_83xx_reg->rsp_q_in));
3679} 3782}
3680 3783
3681static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf) 3784static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
@@ -5050,30 +5153,36 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
5050 ha->pdev = pdev; 5153 ha->pdev = pdev;
5051 ha->host = host; 5154 ha->host = host;
5052 ha->host_no = host->host_no; 5155 ha->host_no = host->host_no;
5156 ha->func_num = PCI_FUNC(ha->pdev->devfn);
5053 5157
5054 pci_enable_pcie_error_reporting(pdev); 5158 pci_enable_pcie_error_reporting(pdev);
5055 5159
5056 /* Setup Runtime configurable options */ 5160 /* Setup Runtime configurable options */
5057 if (is_qla8022(ha)) { 5161 if (is_qla8022(ha)) {
5058 ha->isp_ops = &qla4_8xxx_isp_ops; 5162 ha->isp_ops = &qla4_82xx_isp_ops;
5059 rwlock_init(&ha->hw_lock); 5163 ha->reg_tbl = (uint32_t *) qla4_82xx_reg_tbl;
5060 ha->qdr_sn_window = -1; 5164 ha->qdr_sn_window = -1;
5061 ha->ddr_mn_window = -1; 5165 ha->ddr_mn_window = -1;
5062 ha->curr_window = 255; 5166 ha->curr_window = 255;
5063 ha->func_num = PCI_FUNC(ha->pdev->devfn);
5064 nx_legacy_intr = &legacy_intr[ha->func_num]; 5167 nx_legacy_intr = &legacy_intr[ha->func_num];
5065 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit; 5168 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
5066 ha->nx_legacy_intr.tgt_status_reg = 5169 ha->nx_legacy_intr.tgt_status_reg =
5067 nx_legacy_intr->tgt_status_reg; 5170 nx_legacy_intr->tgt_status_reg;
5068 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg; 5171 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
5069 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg; 5172 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
5173 } else if (is_qla8032(ha)) {
5174 ha->isp_ops = &qla4_83xx_isp_ops;
5175 ha->reg_tbl = (uint32_t *)qla4_83xx_reg_tbl;
5070 } else { 5176 } else {
5071 ha->isp_ops = &qla4xxx_isp_ops; 5177 ha->isp_ops = &qla4xxx_isp_ops;
5072 } 5178 }
5073 5179
5074 /* Set EEH reset type to fundamental if required by hba */ 5180 if (is_qla80XX(ha)) {
5075 if (is_qla8022(ha)) 5181 rwlock_init(&ha->hw_lock);
5182 ha->pf_bit = ha->func_num << 16;
5183 /* Set EEH reset type to fundamental if required by hba */
5076 pdev->needs_freset = 1; 5184 pdev->needs_freset = 1;
5185 }
5077 5186
5078 /* Configure PCI I/O space. */ 5187 /* Configure PCI I/O space. */
5079 ret = ha->isp_ops->iospace_config(ha); 5188 ret = ha->isp_ops->iospace_config(ha);
@@ -5094,6 +5203,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
5094 init_completion(&ha->disable_acb_comp); 5203 init_completion(&ha->disable_acb_comp);
5095 5204
5096 spin_lock_init(&ha->hardware_lock); 5205 spin_lock_init(&ha->hardware_lock);
5206 spin_lock_init(&ha->work_lock);
5097 5207
5098 /* Initialize work list */ 5208 /* Initialize work list */
5099 INIT_LIST_HEAD(&ha->work_list); 5209 INIT_LIST_HEAD(&ha->work_list);
@@ -5128,8 +5238,20 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
5128 if (ret) 5238 if (ret)
5129 goto probe_failed; 5239 goto probe_failed;
5130 5240
5131 if (is_qla8022(ha)) 5241 if (is_qla80XX(ha))
5132 (void) qla4_8xxx_get_flash_info(ha); 5242 qla4_8xxx_get_flash_info(ha);
5243
5244 if (is_qla8032(ha)) {
5245 qla4_83xx_read_reset_template(ha);
5246 /*
5247 * NOTE: If ql4dontresethba==1, set IDC_CTRL DONTRESET_BIT0.
5248 * If DONRESET_BIT0 is set, drivers should not set dev_state
5249 * to NEED_RESET. But if NEED_RESET is set, drivers should
5250 * should honor the reset.
5251 */
5252 if (ql4xdontresethba == 1)
5253 qla4_83xx_set_idc_dontreset(ha);
5254 }
5133 5255
5134 /* 5256 /*
5135 * Initialize the Host adapter request/response queues and 5257 * Initialize the Host adapter request/response queues and
@@ -5137,14 +5259,20 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
5137 * NOTE: interrupts enabled upon successful completion 5259 * NOTE: interrupts enabled upon successful completion
5138 */ 5260 */
5139 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER); 5261 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
5262
5263 /* Dont retry adapter initialization if IRQ allocation failed */
5264 if (!test_bit(AF_IRQ_ATTACHED, &ha->flags))
5265 goto skip_retry_init;
5266
5140 while ((!test_bit(AF_ONLINE, &ha->flags)) && 5267 while ((!test_bit(AF_ONLINE, &ha->flags)) &&
5141 init_retry_count++ < MAX_INIT_RETRIES) { 5268 init_retry_count++ < MAX_INIT_RETRIES) {
5142 5269
5143 if (is_qla8022(ha)) { 5270 if (is_qla80XX(ha)) {
5144 qla4_8xxx_idc_lock(ha); 5271 ha->isp_ops->idc_lock(ha);
5145 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 5272 dev_state = qla4_8xxx_rd_direct(ha,
5146 qla4_8xxx_idc_unlock(ha); 5273 QLA82XX_CRB_DEV_STATE);
5147 if (dev_state == QLA82XX_DEV_FAILED) { 5274 ha->isp_ops->idc_unlock(ha);
5275 if (dev_state == QLA8XXX_DEV_FAILED) {
5148 ql4_printk(KERN_WARNING, ha, "%s: don't retry " 5276 ql4_printk(KERN_WARNING, ha, "%s: don't retry "
5149 "initialize adapter. H/W is in failed state\n", 5277 "initialize adapter. H/W is in failed state\n",
5150 __func__); 5278 __func__);
@@ -5160,16 +5288,18 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
5160 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER); 5288 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
5161 } 5289 }
5162 5290
5291skip_retry_init:
5163 if (!test_bit(AF_ONLINE, &ha->flags)) { 5292 if (!test_bit(AF_ONLINE, &ha->flags)) {
5164 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n"); 5293 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
5165 5294
5166 if (is_qla8022(ha) && ql4xdontresethba) { 5295 if ((is_qla8022(ha) && ql4xdontresethba) ||
5296 (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) {
5167 /* Put the device in failed state. */ 5297 /* Put the device in failed state. */
5168 DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n")); 5298 DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
5169 qla4_8xxx_idc_lock(ha); 5299 ha->isp_ops->idc_lock(ha);
5170 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 5300 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
5171 QLA82XX_DEV_FAILED); 5301 QLA8XXX_DEV_FAILED);
5172 qla4_8xxx_idc_unlock(ha); 5302 ha->isp_ops->idc_unlock(ha);
5173 } 5303 }
5174 ret = -ENODEV; 5304 ret = -ENODEV;
5175 goto remove_host; 5305 goto remove_host;
@@ -5195,12 +5325,13 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
5195 goto remove_host; 5325 goto remove_host;
5196 } 5326 }
5197 5327
5198 /* For ISP-82XX, request_irqs is called in qla4_8xxx_load_risc 5328 /*
5329 * For ISP-8XXX, request_irqs is called in qla4_8xxx_load_risc
5199 * (which is called indirectly by qla4xxx_initialize_adapter), 5330 * (which is called indirectly by qla4xxx_initialize_adapter),
5200 * so that irqs will be registered after crbinit but before 5331 * so that irqs will be registered after crbinit but before
5201 * mbx_intr_enable. 5332 * mbx_intr_enable.
5202 */ 5333 */
5203 if (!is_qla8022(ha)) { 5334 if (is_qla40XX(ha)) {
5204 ret = qla4xxx_request_irqs(ha); 5335 ret = qla4xxx_request_irqs(ha);
5205 if (ret) { 5336 if (ret) {
5206 ql4_printk(KERN_WARNING, ha, "Failed to reserve " 5337 ql4_printk(KERN_WARNING, ha, "Failed to reserve "
@@ -5226,6 +5357,10 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
5226 ha->host_no, ha->firmware_version[0], ha->firmware_version[1], 5357 ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
5227 ha->patch_number, ha->build_number); 5358 ha->patch_number, ha->build_number);
5228 5359
5360 /* Set the driver version */
5361 if (is_qla80XX(ha))
5362 qla4_8xxx_set_param(ha, SET_DRVR_VERSION);
5363
5229 if (qla4xxx_setup_boot_info(ha)) 5364 if (qla4xxx_setup_boot_info(ha))
5230 ql4_printk(KERN_ERR, ha, 5365 ql4_printk(KERN_ERR, ha,
5231 "%s: No iSCSI boot target configured\n", __func__); 5366 "%s: No iSCSI boot target configured\n", __func__);
@@ -5333,9 +5468,16 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
5333{ 5468{
5334 struct scsi_qla_host *ha; 5469 struct scsi_qla_host *ha;
5335 5470
5471 /*
5472 * If the PCI device is disabled then it means probe_adapter had
5473 * failed and resources already cleaned up on probe_adapter exit.
5474 */
5475 if (!pci_is_enabled(pdev))
5476 return;
5477
5336 ha = pci_get_drvdata(pdev); 5478 ha = pci_get_drvdata(pdev);
5337 5479
5338 if (!is_qla8022(ha)) 5480 if (is_qla40XX(ha))
5339 qla4xxx_prevent_other_port_reinit(ha); 5481 qla4xxx_prevent_other_port_reinit(ha);
5340 5482
5341 /* destroy iface from sysfs */ 5483 /* destroy iface from sysfs */
@@ -5755,7 +5897,16 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
5755 5897
5756 ha = to_qla_host(cmd->device->host); 5898 ha = to_qla_host(cmd->device->host);
5757 5899
5758 if (ql4xdontresethba) { 5900 if (is_qla8032(ha) && ql4xdontresethba)
5901 qla4_83xx_set_idc_dontreset(ha);
5902
5903 /*
5904 * For ISP8324, if IDC_CTRL DONTRESET_BIT0 is set by other
5905 * protocol drivers, we should not set device_state to
5906 * NEED_RESET
5907 */
5908 if (ql4xdontresethba ||
5909 (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) {
5759 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", 5910 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
5760 ha->host_no, __func__)); 5911 ha->host_no, __func__));
5761 5912
@@ -5779,7 +5930,7 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
5779 } 5930 }
5780 5931
5781 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 5932 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
5782 if (is_qla8022(ha)) 5933 if (is_qla80XX(ha))
5783 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 5934 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
5784 else 5935 else
5785 set_bit(DPC_RESET_HA, &ha->dpc_flags); 5936 set_bit(DPC_RESET_HA, &ha->dpc_flags);
@@ -5874,7 +6025,7 @@ static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
5874 break; 6025 break;
5875 case SCSI_FIRMWARE_RESET: 6026 case SCSI_FIRMWARE_RESET:
5876 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 6027 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
5877 if (is_qla8022(ha)) 6028 if (is_qla80XX(ha))
5878 /* set firmware context reset */ 6029 /* set firmware context reset */
5879 set_bit(DPC_RESET_HA_FW_CONTEXT, 6030 set_bit(DPC_RESET_HA_FW_CONTEXT,
5880 &ha->dpc_flags); 6031 &ha->dpc_flags);
@@ -6013,32 +6164,43 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
6013 "0x%x is the owner\n", ha->host_no, __func__, 6164 "0x%x is the owner\n", ha->host_no, __func__,
6014 ha->pdev->devfn); 6165 ha->pdev->devfn);
6015 6166
6016 qla4_8xxx_idc_lock(ha); 6167 ha->isp_ops->idc_lock(ha);
6017 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 6168 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
6018 QLA82XX_DEV_COLD); 6169 QLA8XXX_DEV_COLD);
6019 6170 ha->isp_ops->idc_unlock(ha);
6020 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, 6171
6021 QLA82XX_IDC_VERSION); 6172 rval = qla4_8xxx_update_idc_reg(ha);
6173 if (rval == QLA_ERROR) {
6174 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: FAILED\n",
6175 ha->host_no, __func__);
6176 ha->isp_ops->idc_lock(ha);
6177 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
6178 QLA8XXX_DEV_FAILED);
6179 ha->isp_ops->idc_unlock(ha);
6180 goto exit_error_recovery;
6181 }
6022 6182
6023 qla4_8xxx_idc_unlock(ha);
6024 clear_bit(AF_FW_RECOVERY, &ha->flags); 6183 clear_bit(AF_FW_RECOVERY, &ha->flags);
6025 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); 6184 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
6026 qla4_8xxx_idc_lock(ha);
6027 6185
6028 if (rval != QLA_SUCCESS) { 6186 if (rval != QLA_SUCCESS) {
6029 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: " 6187 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
6030 "FAILED\n", ha->host_no, __func__); 6188 "FAILED\n", ha->host_no, __func__);
6189 ha->isp_ops->idc_lock(ha);
6031 qla4_8xxx_clear_drv_active(ha); 6190 qla4_8xxx_clear_drv_active(ha);
6032 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 6191 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
6033 QLA82XX_DEV_FAILED); 6192 QLA8XXX_DEV_FAILED);
6193 ha->isp_ops->idc_unlock(ha);
6034 } else { 6194 } else {
6035 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: " 6195 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
6036 "READY\n", ha->host_no, __func__); 6196 "READY\n", ha->host_no, __func__);
6037 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 6197 ha->isp_ops->idc_lock(ha);
6038 QLA82XX_DEV_READY); 6198 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
6199 QLA8XXX_DEV_READY);
6039 /* Clear driver state register */ 6200 /* Clear driver state register */
6040 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0); 6201 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0);
6041 qla4_8xxx_set_drv_active(ha); 6202 qla4_8xxx_set_drv_active(ha);
6203 ha->isp_ops->idc_unlock(ha);
6042 ret = qla4xxx_request_irqs(ha); 6204 ret = qla4xxx_request_irqs(ha);
6043 if (ret) { 6205 if (ret) {
6044 ql4_printk(KERN_WARNING, ha, "Failed to " 6206 ql4_printk(KERN_WARNING, ha, "Failed to "
@@ -6050,13 +6212,12 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
6050 rval = QLA_SUCCESS; 6212 rval = QLA_SUCCESS;
6051 } 6213 }
6052 } 6214 }
6053 qla4_8xxx_idc_unlock(ha);
6054 } else { 6215 } else {
6055 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not " 6216 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
6056 "the reset owner\n", ha->host_no, __func__, 6217 "the reset owner\n", ha->host_no, __func__,
6057 ha->pdev->devfn); 6218 ha->pdev->devfn);
6058 if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) == 6219 if ((qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE) ==
6059 QLA82XX_DEV_READY)) { 6220 QLA8XXX_DEV_READY)) {
6060 clear_bit(AF_FW_RECOVERY, &ha->flags); 6221 clear_bit(AF_FW_RECOVERY, &ha->flags);
6061 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); 6222 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
6062 if (rval == QLA_SUCCESS) { 6223 if (rval == QLA_SUCCESS) {
@@ -6071,11 +6232,12 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
6071 rval = QLA_SUCCESS; 6232 rval = QLA_SUCCESS;
6072 } 6233 }
6073 } 6234 }
6074 qla4_8xxx_idc_lock(ha); 6235 ha->isp_ops->idc_lock(ha);
6075 qla4_8xxx_set_drv_active(ha); 6236 qla4_8xxx_set_drv_active(ha);
6076 qla4_8xxx_idc_unlock(ha); 6237 ha->isp_ops->idc_unlock(ha);
6077 } 6238 }
6078 } 6239 }
6240exit_error_recovery:
6079 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 6241 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
6080 return rval; 6242 return rval;
6081} 6243}
@@ -6114,7 +6276,7 @@ qla4xxx_pci_slot_reset(struct pci_dev *pdev)
6114 6276
6115 ha->isp_ops->disable_intrs(ha); 6277 ha->isp_ops->disable_intrs(ha);
6116 6278
6117 if (is_qla8022(ha)) { 6279 if (is_qla80XX(ha)) {
6118 if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) { 6280 if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
6119 ret = PCI_ERS_RESULT_RECOVERED; 6281 ret = PCI_ERS_RESULT_RECOVERED;
6120 goto exit_slot_reset; 6282 goto exit_slot_reset;
@@ -6180,6 +6342,12 @@ static struct pci_device_id qla4xxx_pci_tbl[] = {
6180 .subvendor = PCI_ANY_ID, 6342 .subvendor = PCI_ANY_ID,
6181 .subdevice = PCI_ANY_ID, 6343 .subdevice = PCI_ANY_ID,
6182 }, 6344 },
6345 {
6346 .vendor = PCI_VENDOR_ID_QLOGIC,
6347 .device = PCI_DEVICE_ID_QLOGIC_ISP8324,
6348 .subvendor = PCI_ANY_ID,
6349 .subdevice = PCI_ANY_ID,
6350 },
6183 {0, 0}, 6351 {0, 0},
6184}; 6352};
6185MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl); 6353MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 725034f4252c..f6df2ea91ab5 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -1,8 +1,8 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.02.00-k18" 8#define QLA4XXX_DRIVER_VERSION "5.03.00-k1"